diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/rapidio | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'drivers/rapidio')
-rw-r--r-- | drivers/rapidio/Kconfig | 18 | ||||
-rw-r--r-- | drivers/rapidio/Makefile | 1 | ||||
-rw-r--r-- | drivers/rapidio/devices/Kconfig | 10 | ||||
-rw-r--r-- | drivers/rapidio/devices/Makefile | 8 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721.c | 2511 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721.h | 852 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721_dma.c | 823 | ||||
-rw-r--r-- | drivers/rapidio/rio-scan.c | 347 | ||||
-rw-r--r-- | drivers/rapidio/rio.c | 197 | ||||
-rw-r--r-- | drivers/rapidio/switches/idt_gen2.c | 1 |
10 files changed, 123 insertions, 4645 deletions
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index 6194d35ebb9..070211a5955 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig | |||
@@ -1,8 +1,6 @@ | |||
1 | # | 1 | # |
2 | # RapidIO configuration | 2 | # RapidIO configuration |
3 | # | 3 | # |
4 | source "drivers/rapidio/devices/Kconfig" | ||
5 | |||
6 | config RAPIDIO_DISC_TIMEOUT | 4 | config RAPIDIO_DISC_TIMEOUT |
7 | int "Discovery timeout duration (seconds)" | 5 | int "Discovery timeout duration (seconds)" |
8 | depends on RAPIDIO | 6 | depends on RAPIDIO |
@@ -22,19 +20,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS | |||
22 | ports for Input/Output direction to allow other traffic | 20 | ports for Input/Output direction to allow other traffic |
23 | than Maintenance transfers. | 21 | than Maintenance transfers. |
24 | 22 | ||
25 | config RAPIDIO_DMA_ENGINE | 23 | source "drivers/rapidio/switches/Kconfig" |
26 | bool "DMA Engine support for RapidIO" | ||
27 | depends on RAPIDIO | ||
28 | select DMADEVICES | ||
29 | select DMA_ENGINE | ||
30 | help | ||
31 | Say Y here if you want to use DMA Engine frameork for RapidIO data | ||
32 | transfers to/from target RIO devices. RapidIO uses NREAD and | ||
33 | NWRITE (NWRITE_R, SWRITE) requests to transfer data between local | ||
34 | memory and memory on remote target device. You need a DMA controller | ||
35 | capable to perform data transfers to/from RapidIO. | ||
36 | |||
37 | If you are unsure about this, say Y here. | ||
38 | 24 | ||
39 | config RAPIDIO_DEBUG | 25 | config RAPIDIO_DEBUG |
40 | bool "RapidIO subsystem debug messages" | 26 | bool "RapidIO subsystem debug messages" |
@@ -46,5 +32,3 @@ config RAPIDIO_DEBUG | |||
46 | going on. | 32 | going on. |
47 | 33 | ||
48 | If you are unsure about this, say N here. | 34 | If you are unsure about this, say N here. |
49 | |||
50 | source "drivers/rapidio/switches/Kconfig" | ||
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile index ec3fb812100..89b8eca825b 100644 --- a/drivers/rapidio/Makefile +++ b/drivers/rapidio/Makefile | |||
@@ -4,6 +4,5 @@ | |||
4 | obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o | 4 | obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o |
5 | 5 | ||
6 | obj-$(CONFIG_RAPIDIO) += switches/ | 6 | obj-$(CONFIG_RAPIDIO) += switches/ |
7 | obj-$(CONFIG_RAPIDIO) += devices/ | ||
8 | 7 | ||
9 | subdir-ccflags-$(CONFIG_RAPIDIO_DEBUG) := -DDEBUG | 8 | subdir-ccflags-$(CONFIG_RAPIDIO_DEBUG) := -DDEBUG |
diff --git a/drivers/rapidio/devices/Kconfig b/drivers/rapidio/devices/Kconfig deleted file mode 100644 index 12a9d7f7040..00000000000 --- a/drivers/rapidio/devices/Kconfig +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | # | ||
2 | # RapidIO master port configuration | ||
3 | # | ||
4 | |||
5 | config RAPIDIO_TSI721 | ||
6 | bool "IDT Tsi721 PCI Express SRIO Controller support" | ||
7 | depends on RAPIDIO && PCIEPORTBUS | ||
8 | default "n" | ||
9 | ---help--- | ||
10 | Include support for IDT Tsi721 PCI Express Serial RapidIO controller. | ||
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile deleted file mode 100644 index 7b62860f34f..00000000000 --- a/drivers/rapidio/devices/Makefile +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for RapidIO devices | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o | ||
6 | ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y) | ||
7 | obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o | ||
8 | endif | ||
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c deleted file mode 100644 index 6faba406b6e..00000000000 --- a/drivers/rapidio/devices/tsi721.c +++ /dev/null | |||
@@ -1,2511 +0,0 @@ | |||
1 | /* | ||
2 | * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge | ||
3 | * | ||
4 | * Copyright 2011 Integrated Device Technology, Inc. | ||
5 | * Alexandre Bounine <alexandre.bounine@idt.com> | ||
6 | * Chul Kim <chul.kim@idt.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along with | ||
19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/io.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/ioport.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/pci.h> | ||
30 | #include <linux/rio.h> | ||
31 | #include <linux/rio_drv.h> | ||
32 | #include <linux/dma-mapping.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/kfifo.h> | ||
35 | #include <linux/delay.h> | ||
36 | |||
37 | #include "tsi721.h" | ||
38 | |||
39 | #define DEBUG_PW /* Inbound Port-Write debugging */ | ||
40 | |||
41 | static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); | ||
42 | static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); | ||
43 | |||
44 | /** | ||
45 | * tsi721_lcread - read from local SREP config space | ||
46 | * @mport: RapidIO master port info | ||
47 | * @index: ID of RapdiIO interface | ||
48 | * @offset: Offset into configuration space | ||
49 | * @len: Length (in bytes) of the maintenance transaction | ||
50 | * @data: Value to be read into | ||
51 | * | ||
52 | * Generates a local SREP space read. Returns %0 on | ||
53 | * success or %-EINVAL on failure. | ||
54 | */ | ||
55 | static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset, | ||
56 | int len, u32 *data) | ||
57 | { | ||
58 | struct tsi721_device *priv = mport->priv; | ||
59 | |||
60 | if (len != sizeof(u32)) | ||
61 | return -EINVAL; /* only 32-bit access is supported */ | ||
62 | |||
63 | *data = ioread32(priv->regs + offset); | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * tsi721_lcwrite - write into local SREP config space | ||
70 | * @mport: RapidIO master port info | ||
71 | * @index: ID of RapdiIO interface | ||
72 | * @offset: Offset into configuration space | ||
73 | * @len: Length (in bytes) of the maintenance transaction | ||
74 | * @data: Value to be written | ||
75 | * | ||
76 | * Generates a local write into SREP configuration space. Returns %0 on | ||
77 | * success or %-EINVAL on failure. | ||
78 | */ | ||
79 | static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset, | ||
80 | int len, u32 data) | ||
81 | { | ||
82 | struct tsi721_device *priv = mport->priv; | ||
83 | |||
84 | if (len != sizeof(u32)) | ||
85 | return -EINVAL; /* only 32-bit access is supported */ | ||
86 | |||
87 | iowrite32(data, priv->regs + offset); | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | /** | ||
93 | * tsi721_maint_dma - Helper function to generate RapidIO maintenance | ||
94 | * transactions using designated Tsi721 DMA channel. | ||
95 | * @priv: pointer to tsi721 private data | ||
96 | * @sys_size: RapdiIO transport system size | ||
97 | * @destid: Destination ID of transaction | ||
98 | * @hopcount: Number of hops to target device | ||
99 | * @offset: Offset into configuration space | ||
100 | * @len: Length (in bytes) of the maintenance transaction | ||
101 | * @data: Location to be read from or write into | ||
102 | * @do_wr: Operation flag (1 == MAINT_WR) | ||
103 | * | ||
104 | * Generates a RapidIO maintenance transaction (Read or Write). | ||
105 | * Returns %0 on success and %-EINVAL or %-EFAULT on failure. | ||
106 | */ | ||
107 | static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, | ||
108 | u16 destid, u8 hopcount, u32 offset, int len, | ||
109 | u32 *data, int do_wr) | ||
110 | { | ||
111 | void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); | ||
112 | struct tsi721_dma_desc *bd_ptr; | ||
113 | u32 rd_count, swr_ptr, ch_stat; | ||
114 | int i, err = 0; | ||
115 | u32 op = do_wr ? MAINT_WR : MAINT_RD; | ||
116 | |||
117 | if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) | ||
118 | return -EINVAL; | ||
119 | |||
120 | bd_ptr = priv->mdma.bd_base; | ||
121 | |||
122 | rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); | ||
123 | |||
124 | /* Initialize DMA descriptor */ | ||
125 | bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); | ||
126 | bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04); | ||
127 | bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset); | ||
128 | bd_ptr[0].raddr_hi = 0; | ||
129 | if (do_wr) | ||
130 | bd_ptr[0].data[0] = cpu_to_be32p(data); | ||
131 | else | ||
132 | bd_ptr[0].data[0] = 0xffffffff; | ||
133 | |||
134 | mb(); | ||
135 | |||
136 | /* Start DMA operation */ | ||
137 | iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT); | ||
138 | ioread32(regs + TSI721_DMAC_DWRCNT); | ||
139 | i = 0; | ||
140 | |||
141 | /* Wait until DMA transfer is finished */ | ||
142 | while ((ch_stat = ioread32(regs + TSI721_DMAC_STS)) | ||
143 | & TSI721_DMAC_STS_RUN) { | ||
144 | udelay(1); | ||
145 | if (++i >= 5000000) { | ||
146 | dev_dbg(&priv->pdev->dev, | ||
147 | "%s : DMA[%d] read timeout ch_status=%x\n", | ||
148 | __func__, priv->mdma.ch_id, ch_stat); | ||
149 | if (!do_wr) | ||
150 | *data = 0xffffffff; | ||
151 | err = -EIO; | ||
152 | goto err_out; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | if (ch_stat & TSI721_DMAC_STS_ABORT) { | ||
157 | /* If DMA operation aborted due to error, | ||
158 | * reinitialize DMA channel | ||
159 | */ | ||
160 | dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n", | ||
161 | __func__, ch_stat); | ||
162 | dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", | ||
163 | do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); | ||
164 | iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); | ||
165 | iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); | ||
166 | udelay(10); | ||
167 | iowrite32(0, regs + TSI721_DMAC_DWRCNT); | ||
168 | udelay(1); | ||
169 | if (!do_wr) | ||
170 | *data = 0xffffffff; | ||
171 | err = -EIO; | ||
172 | goto err_out; | ||
173 | } | ||
174 | |||
175 | if (!do_wr) | ||
176 | *data = be32_to_cpu(bd_ptr[0].data[0]); | ||
177 | |||
178 | /* | ||
179 | * Update descriptor status FIFO RD pointer. | ||
180 | * NOTE: Skipping check and clear FIFO entries because we are waiting | ||
181 | * for transfer to be completed. | ||
182 | */ | ||
183 | swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); | ||
184 | iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); | ||
185 | err_out: | ||
186 | |||
187 | return err; | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * tsi721_cread_dma - Generate a RapidIO maintenance read transaction | ||
192 | * using Tsi721 BDMA engine. | ||
193 | * @mport: RapidIO master port control structure | ||
194 | * @index: ID of RapdiIO interface | ||
195 | * @destid: Destination ID of transaction | ||
196 | * @hopcount: Number of hops to target device | ||
197 | * @offset: Offset into configuration space | ||
198 | * @len: Length (in bytes) of the maintenance transaction | ||
199 | * @val: Location to be read into | ||
200 | * | ||
201 | * Generates a RapidIO maintenance read transaction. | ||
202 | * Returns %0 on success and %-EINVAL or %-EFAULT on failure. | ||
203 | */ | ||
204 | static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid, | ||
205 | u8 hopcount, u32 offset, int len, u32 *data) | ||
206 | { | ||
207 | struct tsi721_device *priv = mport->priv; | ||
208 | |||
209 | return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, | ||
210 | offset, len, data, 0); | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction | ||
215 | * using Tsi721 BDMA engine | ||
216 | * @mport: RapidIO master port control structure | ||
217 | * @index: ID of RapdiIO interface | ||
218 | * @destid: Destination ID of transaction | ||
219 | * @hopcount: Number of hops to target device | ||
220 | * @offset: Offset into configuration space | ||
221 | * @len: Length (in bytes) of the maintenance transaction | ||
222 | * @val: Value to be written | ||
223 | * | ||
224 | * Generates a RapidIO maintenance write transaction. | ||
225 | * Returns %0 on success and %-EINVAL or %-EFAULT on failure. | ||
226 | */ | ||
227 | static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, | ||
228 | u8 hopcount, u32 offset, int len, u32 data) | ||
229 | { | ||
230 | struct tsi721_device *priv = mport->priv; | ||
231 | u32 temp = data; | ||
232 | |||
233 | return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, | ||
234 | offset, len, &temp, 1); | ||
235 | } | ||
236 | |||
237 | /** | ||
238 | * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler | ||
239 | * @mport: RapidIO master port structure | ||
240 | * | ||
241 | * Handles inbound port-write interrupts. Copies PW message from an internal | ||
242 | * buffer into PW message FIFO and schedules deferred routine to process | ||
243 | * queued messages. | ||
244 | */ | ||
245 | static int | ||
246 | tsi721_pw_handler(struct rio_mport *mport) | ||
247 | { | ||
248 | struct tsi721_device *priv = mport->priv; | ||
249 | u32 pw_stat; | ||
250 | u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)]; | ||
251 | |||
252 | |||
253 | pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT); | ||
254 | |||
255 | if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) { | ||
256 | pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0)); | ||
257 | pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1)); | ||
258 | pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2)); | ||
259 | pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3)); | ||
260 | |||
261 | /* Queue PW message (if there is room in FIFO), | ||
262 | * otherwise discard it. | ||
263 | */ | ||
264 | spin_lock(&priv->pw_fifo_lock); | ||
265 | if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE) | ||
266 | kfifo_in(&priv->pw_fifo, pw_buf, | ||
267 | TSI721_RIO_PW_MSG_SIZE); | ||
268 | else | ||
269 | priv->pw_discard_count++; | ||
270 | spin_unlock(&priv->pw_fifo_lock); | ||
271 | } | ||
272 | |||
273 | /* Clear pending PW interrupts */ | ||
274 | iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, | ||
275 | priv->regs + TSI721_RIO_PW_RX_STAT); | ||
276 | |||
277 | schedule_work(&priv->pw_work); | ||
278 | |||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | static void tsi721_pw_dpc(struct work_struct *work) | ||
283 | { | ||
284 | struct tsi721_device *priv = container_of(work, struct tsi721_device, | ||
285 | pw_work); | ||
286 | u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message | ||
287 | buffer for RIO layer */ | ||
288 | |||
289 | /* | ||
290 | * Process port-write messages | ||
291 | */ | ||
292 | while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer, | ||
293 | TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) { | ||
294 | /* Process one message */ | ||
295 | #ifdef DEBUG_PW | ||
296 | { | ||
297 | u32 i; | ||
298 | pr_debug("%s : Port-Write Message:", __func__); | ||
299 | for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) { | ||
300 | pr_debug("0x%02x: %08x %08x %08x %08x", i*4, | ||
301 | msg_buffer[i], msg_buffer[i + 1], | ||
302 | msg_buffer[i + 2], msg_buffer[i + 3]); | ||
303 | i += 4; | ||
304 | } | ||
305 | pr_debug("\n"); | ||
306 | } | ||
307 | #endif | ||
308 | /* Pass the port-write message to RIO core for processing */ | ||
309 | rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); | ||
310 | } | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * tsi721_pw_enable - enable/disable port-write interface init | ||
315 | * @mport: Master port implementing the port write unit | ||
316 | * @enable: 1=enable; 0=disable port-write message handling | ||
317 | */ | ||
318 | static int tsi721_pw_enable(struct rio_mport *mport, int enable) | ||
319 | { | ||
320 | struct tsi721_device *priv = mport->priv; | ||
321 | u32 rval; | ||
322 | |||
323 | rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE); | ||
324 | |||
325 | if (enable) | ||
326 | rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX; | ||
327 | else | ||
328 | rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX; | ||
329 | |||
330 | /* Clear pending PW interrupts */ | ||
331 | iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, | ||
332 | priv->regs + TSI721_RIO_PW_RX_STAT); | ||
333 | /* Update enable bits */ | ||
334 | iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE); | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | /** | ||
340 | * tsi721_dsend - Send a RapidIO doorbell | ||
341 | * @mport: RapidIO master port info | ||
342 | * @index: ID of RapidIO interface | ||
343 | * @destid: Destination ID of target device | ||
344 | * @data: 16-bit info field of RapidIO doorbell | ||
345 | * | ||
346 | * Sends a RapidIO doorbell message. Always returns %0. | ||
347 | */ | ||
348 | static int tsi721_dsend(struct rio_mport *mport, int index, | ||
349 | u16 destid, u16 data) | ||
350 | { | ||
351 | struct tsi721_device *priv = mport->priv; | ||
352 | u32 offset; | ||
353 | |||
354 | offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) | | ||
355 | (destid << 2); | ||
356 | |||
357 | dev_dbg(&priv->pdev->dev, | ||
358 | "Send Doorbell 0x%04x to destID 0x%x\n", data, destid); | ||
359 | iowrite16be(data, priv->odb_base + offset); | ||
360 | |||
361 | return 0; | ||
362 | } | ||
363 | |||
364 | /** | ||
365 | * tsi721_dbell_handler - Tsi721 doorbell interrupt handler | ||
366 | * @mport: RapidIO master port structure | ||
367 | * | ||
368 | * Handles inbound doorbell interrupts. Copies doorbell entry from an internal | ||
369 | * buffer into DB message FIFO and schedules deferred routine to process | ||
370 | * queued DBs. | ||
371 | */ | ||
372 | static int | ||
373 | tsi721_dbell_handler(struct rio_mport *mport) | ||
374 | { | ||
375 | struct tsi721_device *priv = mport->priv; | ||
376 | u32 regval; | ||
377 | |||
378 | /* Disable IDB interrupts */ | ||
379 | regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); | ||
380 | regval &= ~TSI721_SR_CHINT_IDBQRCV; | ||
381 | iowrite32(regval, | ||
382 | priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); | ||
383 | |||
384 | schedule_work(&priv->idb_work); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | static void tsi721_db_dpc(struct work_struct *work) | ||
390 | { | ||
391 | struct tsi721_device *priv = container_of(work, struct tsi721_device, | ||
392 | idb_work); | ||
393 | struct rio_mport *mport; | ||
394 | struct rio_dbell *dbell; | ||
395 | int found = 0; | ||
396 | u32 wr_ptr, rd_ptr; | ||
397 | u64 *idb_entry; | ||
398 | u32 regval; | ||
399 | union { | ||
400 | u64 msg; | ||
401 | u8 bytes[8]; | ||
402 | } idb; | ||
403 | |||
404 | /* | ||
405 | * Process queued inbound doorbells | ||
406 | */ | ||
407 | mport = priv->mport; | ||
408 | |||
409 | wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; | ||
410 | rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE; | ||
411 | |||
412 | while (wr_ptr != rd_ptr) { | ||
413 | idb_entry = (u64 *)(priv->idb_base + | ||
414 | (TSI721_IDB_ENTRY_SIZE * rd_ptr)); | ||
415 | rd_ptr++; | ||
416 | rd_ptr %= IDB_QSIZE; | ||
417 | idb.msg = *idb_entry; | ||
418 | *idb_entry = 0; | ||
419 | |||
420 | /* Process one doorbell */ | ||
421 | list_for_each_entry(dbell, &mport->dbells, node) { | ||
422 | if ((dbell->res->start <= DBELL_INF(idb.bytes)) && | ||
423 | (dbell->res->end >= DBELL_INF(idb.bytes))) { | ||
424 | found = 1; | ||
425 | break; | ||
426 | } | ||
427 | } | ||
428 | |||
429 | if (found) { | ||
430 | dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes), | ||
431 | DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); | ||
432 | } else { | ||
433 | dev_dbg(&priv->pdev->dev, | ||
434 | "spurious inb doorbell, sid %2.2x tid %2.2x" | ||
435 | " info %4.4x\n", DBELL_SID(idb.bytes), | ||
436 | DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); | ||
437 | } | ||
438 | |||
439 | wr_ptr = ioread32(priv->regs + | ||
440 | TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; | ||
441 | } | ||
442 | |||
443 | iowrite32(rd_ptr & (IDB_QSIZE - 1), | ||
444 | priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); | ||
445 | |||
446 | /* Re-enable IDB interrupts */ | ||
447 | regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); | ||
448 | regval |= TSI721_SR_CHINT_IDBQRCV; | ||
449 | iowrite32(regval, | ||
450 | priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); | ||
451 | |||
452 | wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; | ||
453 | if (wr_ptr != rd_ptr) | ||
454 | schedule_work(&priv->idb_work); | ||
455 | } | ||
456 | |||
457 | /** | ||
458 | * tsi721_irqhandler - Tsi721 interrupt handler | ||
459 | * @irq: Linux interrupt number | ||
460 | * @ptr: Pointer to interrupt-specific data (mport structure) | ||
461 | * | ||
462 | * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported | ||
463 | * interrupt events and calls an event-specific handler(s). | ||
464 | */ | ||
465 | static irqreturn_t tsi721_irqhandler(int irq, void *ptr) | ||
466 | { | ||
467 | struct rio_mport *mport = (struct rio_mport *)ptr; | ||
468 | struct tsi721_device *priv = mport->priv; | ||
469 | u32 dev_int; | ||
470 | u32 dev_ch_int; | ||
471 | u32 intval; | ||
472 | u32 ch_inte; | ||
473 | |||
474 | dev_int = ioread32(priv->regs + TSI721_DEV_INT); | ||
475 | if (!dev_int) | ||
476 | return IRQ_NONE; | ||
477 | |||
478 | dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT); | ||
479 | |||
480 | if (dev_int & TSI721_DEV_INT_SR2PC_CH) { | ||
481 | /* Service SR2PC Channel interrupts */ | ||
482 | if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) { | ||
483 | /* Service Inbound Doorbell interrupt */ | ||
484 | intval = ioread32(priv->regs + | ||
485 | TSI721_SR_CHINT(IDB_QUEUE)); | ||
486 | if (intval & TSI721_SR_CHINT_IDBQRCV) | ||
487 | tsi721_dbell_handler(mport); | ||
488 | else | ||
489 | dev_info(&priv->pdev->dev, | ||
490 | "Unsupported SR_CH_INT %x\n", intval); | ||
491 | |||
492 | /* Clear interrupts */ | ||
493 | iowrite32(intval, | ||
494 | priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); | ||
495 | ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); | ||
496 | } | ||
497 | } | ||
498 | |||
499 | if (dev_int & TSI721_DEV_INT_SMSG_CH) { | ||
500 | int ch; | ||
501 | |||
502 | /* | ||
503 | * Service channel interrupts from Messaging Engine | ||
504 | */ | ||
505 | |||
506 | if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */ | ||
507 | /* Disable signaled OB MSG Channel interrupts */ | ||
508 | ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); | ||
509 | ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M); | ||
510 | iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); | ||
511 | |||
512 | /* | ||
513 | * Process Inbound Message interrupt for each MBOX | ||
514 | */ | ||
515 | for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) { | ||
516 | if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch))) | ||
517 | continue; | ||
518 | tsi721_imsg_handler(priv, ch); | ||
519 | } | ||
520 | } | ||
521 | |||
522 | if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */ | ||
523 | /* Disable signaled OB MSG Channel interrupts */ | ||
524 | ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); | ||
525 | ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M); | ||
526 | iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); | ||
527 | |||
528 | /* | ||
529 | * Process Outbound Message interrupts for each MBOX | ||
530 | */ | ||
531 | |||
532 | for (ch = 0; ch < RIO_MAX_MBOX; ch++) { | ||
533 | if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch))) | ||
534 | continue; | ||
535 | tsi721_omsg_handler(priv, ch); | ||
536 | } | ||
537 | } | ||
538 | } | ||
539 | |||
540 | if (dev_int & TSI721_DEV_INT_SRIO) { | ||
541 | /* Service SRIO MAC interrupts */ | ||
542 | intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); | ||
543 | if (intval & TSI721_RIO_EM_INT_STAT_PW_RX) | ||
544 | tsi721_pw_handler(mport); | ||
545 | } | ||
546 | |||
547 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
548 | if (dev_int & TSI721_DEV_INT_BDMA_CH) { | ||
549 | int ch; | ||
550 | |||
551 | if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) { | ||
552 | dev_dbg(&priv->pdev->dev, | ||
553 | "IRQ from DMA channel 0x%08x\n", dev_ch_int); | ||
554 | |||
555 | for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) { | ||
556 | if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch))) | ||
557 | continue; | ||
558 | tsi721_bdma_handler(&priv->bdma[ch]); | ||
559 | } | ||
560 | } | ||
561 | } | ||
562 | #endif | ||
563 | return IRQ_HANDLED; | ||
564 | } | ||
565 | |||
566 | static void tsi721_interrupts_init(struct tsi721_device *priv) | ||
567 | { | ||
568 | u32 intr; | ||
569 | |||
570 | /* Enable IDB interrupts */ | ||
571 | iowrite32(TSI721_SR_CHINT_ALL, | ||
572 | priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); | ||
573 | iowrite32(TSI721_SR_CHINT_IDBQRCV, | ||
574 | priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); | ||
575 | |||
576 | /* Enable SRIO MAC interrupts */ | ||
577 | iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, | ||
578 | priv->regs + TSI721_RIO_EM_DEV_INT_EN); | ||
579 | |||
580 | /* Enable interrupts from channels in use */ | ||
581 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
582 | intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) | | ||
583 | (TSI721_INT_BDMA_CHAN_M & | ||
584 | ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT)); | ||
585 | #else | ||
586 | intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE); | ||
587 | #endif | ||
588 | iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE); | ||
589 | |||
590 | if (priv->flags & TSI721_USING_MSIX) | ||
591 | intr = TSI721_DEV_INT_SRIO; | ||
592 | else | ||
593 | intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | | ||
594 | TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; | ||
595 | |||
596 | iowrite32(intr, priv->regs + TSI721_DEV_INTE); | ||
597 | ioread32(priv->regs + TSI721_DEV_INTE); | ||
598 | } | ||
599 | |||
600 | #ifdef CONFIG_PCI_MSI | ||
601 | /** | ||
602 | * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging | ||
603 | * @irq: Linux interrupt number | ||
604 | * @ptr: Pointer to interrupt-specific data (mport structure) | ||
605 | * | ||
606 | * Handles outbound messaging interrupts signaled using MSI-X. | ||
607 | */ | ||
608 | static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) | ||
609 | { | ||
610 | struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; | ||
611 | int mbox; | ||
612 | |||
613 | mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; | ||
614 | tsi721_omsg_handler(priv, mbox); | ||
615 | return IRQ_HANDLED; | ||
616 | } | ||
617 | |||
618 | /** | ||
619 | * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging | ||
620 | * @irq: Linux interrupt number | ||
621 | * @ptr: Pointer to interrupt-specific data (mport structure) | ||
622 | * | ||
623 | * Handles inbound messaging interrupts signaled using MSI-X. | ||
624 | */ | ||
625 | static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) | ||
626 | { | ||
627 | struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; | ||
628 | int mbox; | ||
629 | |||
630 | mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; | ||
631 | tsi721_imsg_handler(priv, mbox + 4); | ||
632 | return IRQ_HANDLED; | ||
633 | } | ||
634 | |||
635 | /** | ||
636 | * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler | ||
637 | * @irq: Linux interrupt number | ||
638 | * @ptr: Pointer to interrupt-specific data (mport structure) | ||
639 | * | ||
640 | * Handles Tsi721 interrupts from SRIO MAC. | ||
641 | */ | ||
642 | static irqreturn_t tsi721_srio_msix(int irq, void *ptr) | ||
643 | { | ||
644 | struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; | ||
645 | u32 srio_int; | ||
646 | |||
647 | /* Service SRIO MAC interrupts */ | ||
648 | srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); | ||
649 | if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX) | ||
650 | tsi721_pw_handler((struct rio_mport *)ptr); | ||
651 | |||
652 | return IRQ_HANDLED; | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler | ||
657 | * @irq: Linux interrupt number | ||
658 | * @ptr: Pointer to interrupt-specific data (mport structure) | ||
659 | * | ||
660 | * Handles Tsi721 interrupts from SR2PC Channel. | ||
661 | * NOTE: At this moment services only one SR2PC channel associated with inbound | ||
662 | * doorbells. | ||
663 | */ | ||
664 | static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) | ||
665 | { | ||
666 | struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; | ||
667 | u32 sr_ch_int; | ||
668 | |||
669 | /* Service Inbound DB interrupt from SR2PC channel */ | ||
670 | sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); | ||
671 | if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV) | ||
672 | tsi721_dbell_handler((struct rio_mport *)ptr); | ||
673 | |||
674 | /* Clear interrupts */ | ||
675 | iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); | ||
676 | /* Read back to ensure that interrupt was cleared */ | ||
677 | sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); | ||
678 | |||
679 | return IRQ_HANDLED; | ||
680 | } | ||
681 | |||
682 | /** | ||
683 | * tsi721_request_msix - register interrupt service for MSI-X mode. | ||
684 | * @mport: RapidIO master port structure | ||
685 | * | ||
686 | * Registers MSI-X interrupt service routines for interrupts that are active | ||
687 | * immediately after mport initialization. Messaging interrupt service routines | ||
688 | * should be registered during corresponding open requests. | ||
689 | */ | ||
690 | static int tsi721_request_msix(struct rio_mport *mport) | ||
691 | { | ||
692 | struct tsi721_device *priv = mport->priv; | ||
693 | int err = 0; | ||
694 | |||
695 | err = request_irq(priv->msix[TSI721_VECT_IDB].vector, | ||
696 | tsi721_sr2pc_ch_msix, 0, | ||
697 | priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport); | ||
698 | if (err) | ||
699 | goto out; | ||
700 | |||
701 | err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, | ||
702 | tsi721_srio_msix, 0, | ||
703 | priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport); | ||
704 | if (err) | ||
705 | free_irq( | ||
706 | priv->msix[TSI721_VECT_IDB].vector, | ||
707 | (void *)mport); | ||
708 | out: | ||
709 | return err; | ||
710 | } | ||
711 | |||
712 | /** | ||
713 | * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721. | ||
714 | * @priv: pointer to tsi721 private data | ||
715 | * | ||
716 | * Configures MSI-X support for Tsi721. Supports only an exact number | ||
717 | * of requested vectors. | ||
718 | */ | ||
719 | static int tsi721_enable_msix(struct tsi721_device *priv) | ||
720 | { | ||
721 | struct msix_entry entries[TSI721_VECT_MAX]; | ||
722 | int err; | ||
723 | int i; | ||
724 | |||
725 | entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE); | ||
726 | entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT; | ||
727 | |||
728 | /* | ||
729 | * Initialize MSI-X entries for Messaging Engine: | ||
730 | * this driver supports four RIO mailboxes (inbound and outbound) | ||
731 | * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore | ||
732 | * offset +4 is added to IB MBOX number. | ||
733 | */ | ||
734 | for (i = 0; i < RIO_MAX_MBOX; i++) { | ||
735 | entries[TSI721_VECT_IMB0_RCV + i].entry = | ||
736 | TSI721_MSIX_IMSG_DQ_RCV(i + 4); | ||
737 | entries[TSI721_VECT_IMB0_INT + i].entry = | ||
738 | TSI721_MSIX_IMSG_INT(i + 4); | ||
739 | entries[TSI721_VECT_OMB0_DONE + i].entry = | ||
740 | TSI721_MSIX_OMSG_DONE(i); | ||
741 | entries[TSI721_VECT_OMB0_INT + i].entry = | ||
742 | TSI721_MSIX_OMSG_INT(i); | ||
743 | } | ||
744 | |||
745 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
746 | /* | ||
747 | * Initialize MSI-X entries for Block DMA Engine: | ||
748 | * this driver supports XXX DMA channels | ||
749 | * (one is reserved for SRIO maintenance transactions) | ||
750 | */ | ||
751 | for (i = 0; i < TSI721_DMA_CHNUM; i++) { | ||
752 | entries[TSI721_VECT_DMA0_DONE + i].entry = | ||
753 | TSI721_MSIX_DMACH_DONE(i); | ||
754 | entries[TSI721_VECT_DMA0_INT + i].entry = | ||
755 | TSI721_MSIX_DMACH_INT(i); | ||
756 | } | ||
757 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | ||
758 | |||
759 | err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries)); | ||
760 | if (err) { | ||
761 | if (err > 0) | ||
762 | dev_info(&priv->pdev->dev, | ||
763 | "Only %d MSI-X vectors available, " | ||
764 | "not using MSI-X\n", err); | ||
765 | else | ||
766 | dev_err(&priv->pdev->dev, | ||
767 | "Failed to enable MSI-X (err=%d)\n", err); | ||
768 | return err; | ||
769 | } | ||
770 | |||
771 | /* | ||
772 | * Copy MSI-X vector information into tsi721 private structure | ||
773 | */ | ||
774 | priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; | ||
775 | snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX, | ||
776 | DRV_NAME "-idb@pci:%s", pci_name(priv->pdev)); | ||
777 | priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; | ||
778 | snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX, | ||
779 | DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev)); | ||
780 | |||
781 | for (i = 0; i < RIO_MAX_MBOX; i++) { | ||
782 | priv->msix[TSI721_VECT_IMB0_RCV + i].vector = | ||
783 | entries[TSI721_VECT_IMB0_RCV + i].vector; | ||
784 | snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name, | ||
785 | IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s", | ||
786 | i, pci_name(priv->pdev)); | ||
787 | |||
788 | priv->msix[TSI721_VECT_IMB0_INT + i].vector = | ||
789 | entries[TSI721_VECT_IMB0_INT + i].vector; | ||
790 | snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name, | ||
791 | IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s", | ||
792 | i, pci_name(priv->pdev)); | ||
793 | |||
794 | priv->msix[TSI721_VECT_OMB0_DONE + i].vector = | ||
795 | entries[TSI721_VECT_OMB0_DONE + i].vector; | ||
796 | snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name, | ||
797 | IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s", | ||
798 | i, pci_name(priv->pdev)); | ||
799 | |||
800 | priv->msix[TSI721_VECT_OMB0_INT + i].vector = | ||
801 | entries[TSI721_VECT_OMB0_INT + i].vector; | ||
802 | snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name, | ||
803 | IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s", | ||
804 | i, pci_name(priv->pdev)); | ||
805 | } | ||
806 | |||
807 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
808 | for (i = 0; i < TSI721_DMA_CHNUM; i++) { | ||
809 | priv->msix[TSI721_VECT_DMA0_DONE + i].vector = | ||
810 | entries[TSI721_VECT_DMA0_DONE + i].vector; | ||
811 | snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name, | ||
812 | IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s", | ||
813 | i, pci_name(priv->pdev)); | ||
814 | |||
815 | priv->msix[TSI721_VECT_DMA0_INT + i].vector = | ||
816 | entries[TSI721_VECT_DMA0_INT + i].vector; | ||
817 | snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name, | ||
818 | IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s", | ||
819 | i, pci_name(priv->pdev)); | ||
820 | } | ||
821 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | ||
822 | |||
823 | return 0; | ||
824 | } | ||
825 | #endif /* CONFIG_PCI_MSI */ | ||
826 | |||
827 | static int tsi721_request_irq(struct rio_mport *mport) | ||
828 | { | ||
829 | struct tsi721_device *priv = mport->priv; | ||
830 | int err; | ||
831 | |||
832 | #ifdef CONFIG_PCI_MSI | ||
833 | if (priv->flags & TSI721_USING_MSIX) | ||
834 | err = tsi721_request_msix(mport); | ||
835 | else | ||
836 | #endif | ||
837 | err = request_irq(priv->pdev->irq, tsi721_irqhandler, | ||
838 | (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED, | ||
839 | DRV_NAME, (void *)mport); | ||
840 | |||
841 | if (err) | ||
842 | dev_err(&priv->pdev->dev, | ||
843 | "Unable to allocate interrupt, Error: %d\n", err); | ||
844 | |||
845 | return err; | ||
846 | } | ||
847 | |||
848 | /** | ||
849 | * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO) | ||
850 | * translation regions. | ||
851 | * @priv: pointer to tsi721 private data | ||
852 | * | ||
853 | * Disables SREP translation regions. | ||
854 | */ | ||
855 | static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) | ||
856 | { | ||
857 | int i; | ||
858 | |||
859 | /* Disable all PC2SR translation windows */ | ||
860 | for (i = 0; i < TSI721_OBWIN_NUM; i++) | ||
861 | iowrite32(0, priv->regs + TSI721_OBWINLB(i)); | ||
862 | } | ||
863 | |||
864 | /** | ||
865 | * tsi721_rio_map_inb_mem -- Mapping inbound memory region. | ||
866 | * @mport: RapidIO master port | ||
867 | * @lstart: Local memory space start address. | ||
868 | * @rstart: RapidIO space start address. | ||
869 | * @size: The mapping region size. | ||
870 | * @flags: Flags for mapping. 0 for using default flags. | ||
871 | * | ||
872 | * Return: 0 -- Success. | ||
873 | * | ||
874 | * This function will create the inbound mapping | ||
875 | * from rstart to lstart. | ||
876 | */ | ||
877 | static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, | ||
878 | u64 rstart, u32 size, u32 flags) | ||
879 | { | ||
880 | struct tsi721_device *priv = mport->priv; | ||
881 | int i; | ||
882 | u32 regval; | ||
883 | |||
884 | if (!is_power_of_2(size) || size < 0x1000 || | ||
885 | ((u64)lstart & (size - 1)) || (rstart & (size - 1))) | ||
886 | return -EINVAL; | ||
887 | |||
888 | /* Search for free inbound translation window */ | ||
889 | for (i = 0; i < TSI721_IBWIN_NUM; i++) { | ||
890 | regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); | ||
891 | if (!(regval & TSI721_IBWIN_LB_WEN)) | ||
892 | break; | ||
893 | } | ||
894 | |||
895 | if (i >= TSI721_IBWIN_NUM) { | ||
896 | dev_err(&priv->pdev->dev, | ||
897 | "Unable to find free inbound window\n"); | ||
898 | return -EBUSY; | ||
899 | } | ||
900 | |||
901 | iowrite32(TSI721_IBWIN_SIZE(size) << 8, | ||
902 | priv->regs + TSI721_IBWIN_SZ(i)); | ||
903 | |||
904 | iowrite32(((u64)lstart >> 32), priv->regs + TSI721_IBWIN_TUA(i)); | ||
905 | iowrite32(((u64)lstart & TSI721_IBWIN_TLA_ADD), | ||
906 | priv->regs + TSI721_IBWIN_TLA(i)); | ||
907 | |||
908 | iowrite32(rstart >> 32, priv->regs + TSI721_IBWIN_UB(i)); | ||
909 | iowrite32((rstart & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN, | ||
910 | priv->regs + TSI721_IBWIN_LB(i)); | ||
911 | dev_dbg(&priv->pdev->dev, | ||
912 | "Configured IBWIN%d mapping (RIO_0x%llx -> PCIe_0x%llx)\n", | ||
913 | i, rstart, (unsigned long long)lstart); | ||
914 | |||
915 | return 0; | ||
916 | } | ||
917 | |||
918 | /** | ||
919 | * fsl_rio_unmap_inb_mem -- Unmapping inbound memory region. | ||
920 | * @mport: RapidIO master port | ||
921 | * @lstart: Local memory space start address. | ||
922 | */ | ||
923 | static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport, | ||
924 | dma_addr_t lstart) | ||
925 | { | ||
926 | struct tsi721_device *priv = mport->priv; | ||
927 | int i; | ||
928 | u64 addr; | ||
929 | u32 regval; | ||
930 | |||
931 | /* Search for matching active inbound translation window */ | ||
932 | for (i = 0; i < TSI721_IBWIN_NUM; i++) { | ||
933 | regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); | ||
934 | if (regval & TSI721_IBWIN_LB_WEN) { | ||
935 | regval = ioread32(priv->regs + TSI721_IBWIN_TUA(i)); | ||
936 | addr = (u64)regval << 32; | ||
937 | regval = ioread32(priv->regs + TSI721_IBWIN_TLA(i)); | ||
938 | addr |= regval & TSI721_IBWIN_TLA_ADD; | ||
939 | |||
940 | if (addr == (u64)lstart) { | ||
941 | iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); | ||
942 | break; | ||
943 | } | ||
944 | } | ||
945 | } | ||
946 | } | ||
947 | |||
948 | /** | ||
949 | * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe) | ||
950 | * translation regions. | ||
951 | * @priv: pointer to tsi721 private data | ||
952 | * | ||
953 | * Disables inbound windows. | ||
954 | */ | ||
955 | static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv) | ||
956 | { | ||
957 | int i; | ||
958 | |||
959 | /* Disable all SR2PC inbound windows */ | ||
960 | for (i = 0; i < TSI721_IBWIN_NUM; i++) | ||
961 | iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); | ||
962 | } | ||
963 | |||
964 | /** | ||
965 | * tsi721_port_write_init - Inbound port write interface init | ||
966 | * @priv: pointer to tsi721 private data | ||
967 | * | ||
968 | * Initializes inbound port write handler. | ||
969 | * Returns %0 on success or %-ENOMEM on failure. | ||
970 | */ | ||
971 | static int tsi721_port_write_init(struct tsi721_device *priv) | ||
972 | { | ||
973 | priv->pw_discard_count = 0; | ||
974 | INIT_WORK(&priv->pw_work, tsi721_pw_dpc); | ||
975 | spin_lock_init(&priv->pw_fifo_lock); | ||
976 | if (kfifo_alloc(&priv->pw_fifo, | ||
977 | TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { | ||
978 | dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n"); | ||
979 | return -ENOMEM; | ||
980 | } | ||
981 | |||
982 | /* Use reliable port-write capture mode */ | ||
983 | iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL); | ||
984 | return 0; | ||
985 | } | ||
986 | |||
987 | static int tsi721_doorbell_init(struct tsi721_device *priv) | ||
988 | { | ||
989 | /* Outbound Doorbells do not require any setup. | ||
990 | * Tsi721 uses dedicated PCI BAR1 to generate doorbells. | ||
991 | * That BAR1 was mapped during the probe routine. | ||
992 | */ | ||
993 | |||
994 | /* Initialize Inbound Doorbell processing DPC and queue */ | ||
995 | priv->db_discard_count = 0; | ||
996 | INIT_WORK(&priv->idb_work, tsi721_db_dpc); | ||
997 | |||
998 | /* Allocate buffer for inbound doorbells queue */ | ||
999 | priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, | ||
1000 | IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, | ||
1001 | &priv->idb_dma, GFP_KERNEL); | ||
1002 | if (!priv->idb_base) | ||
1003 | return -ENOMEM; | ||
1004 | |||
1005 | dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", | ||
1006 | priv->idb_base, (unsigned long long)priv->idb_dma); | ||
1007 | |||
1008 | iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE), | ||
1009 | priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE)); | ||
1010 | iowrite32(((u64)priv->idb_dma >> 32), | ||
1011 | priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE)); | ||
1012 | iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR), | ||
1013 | priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE)); | ||
1014 | /* Enable accepting all inbound doorbells */ | ||
1015 | iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE)); | ||
1016 | |||
1017 | iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE)); | ||
1018 | |||
1019 | iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); | ||
1020 | |||
1021 | return 0; | ||
1022 | } | ||
1023 | |||
1024 | static void tsi721_doorbell_free(struct tsi721_device *priv) | ||
1025 | { | ||
1026 | if (priv->idb_base == NULL) | ||
1027 | return; | ||
1028 | |||
1029 | /* Free buffer allocated for inbound doorbell queue */ | ||
1030 | dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, | ||
1031 | priv->idb_base, priv->idb_dma); | ||
1032 | priv->idb_base = NULL; | ||
1033 | } | ||
1034 | |||
1035 | /** | ||
1036 | * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel. | ||
1037 | * @priv: pointer to tsi721 private data | ||
1038 | * | ||
1039 | * Initialize BDMA channel allocated for RapidIO maintenance read/write | ||
1040 | * request generation | ||
1041 | * Returns %0 on success or %-ENOMEM on failure. | ||
1042 | */ | ||
1043 | static int tsi721_bdma_maint_init(struct tsi721_device *priv) | ||
1044 | { | ||
1045 | struct tsi721_dma_desc *bd_ptr; | ||
1046 | u64 *sts_ptr; | ||
1047 | dma_addr_t bd_phys, sts_phys; | ||
1048 | int sts_size; | ||
1049 | int bd_num = 2; | ||
1050 | void __iomem *regs; | ||
1051 | |||
1052 | dev_dbg(&priv->pdev->dev, | ||
1053 | "Init Block DMA Engine for Maintenance requests, CH%d\n", | ||
1054 | TSI721_DMACH_MAINT); | ||
1055 | |||
1056 | /* | ||
1057 | * Initialize DMA channel for maintenance requests | ||
1058 | */ | ||
1059 | |||
1060 | priv->mdma.ch_id = TSI721_DMACH_MAINT; | ||
1061 | regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); | ||
1062 | |||
1063 | /* Allocate space for DMA descriptors */ | ||
1064 | bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, | ||
1065 | bd_num * sizeof(struct tsi721_dma_desc), | ||
1066 | &bd_phys, GFP_KERNEL); | ||
1067 | if (!bd_ptr) | ||
1068 | return -ENOMEM; | ||
1069 | |||
1070 | priv->mdma.bd_num = bd_num; | ||
1071 | priv->mdma.bd_phys = bd_phys; | ||
1072 | priv->mdma.bd_base = bd_ptr; | ||
1073 | |||
1074 | dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", | ||
1075 | bd_ptr, (unsigned long long)bd_phys); | ||
1076 | |||
1077 | /* Allocate space for descriptor status FIFO */ | ||
1078 | sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? | ||
1079 | bd_num : TSI721_DMA_MINSTSSZ; | ||
1080 | sts_size = roundup_pow_of_two(sts_size); | ||
1081 | sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, | ||
1082 | sts_size * sizeof(struct tsi721_dma_sts), | ||
1083 | &sts_phys, GFP_KERNEL); | ||
1084 | if (!sts_ptr) { | ||
1085 | /* Free space allocated for DMA descriptors */ | ||
1086 | dma_free_coherent(&priv->pdev->dev, | ||
1087 | bd_num * sizeof(struct tsi721_dma_desc), | ||
1088 | bd_ptr, bd_phys); | ||
1089 | priv->mdma.bd_base = NULL; | ||
1090 | return -ENOMEM; | ||
1091 | } | ||
1092 | |||
1093 | priv->mdma.sts_phys = sts_phys; | ||
1094 | priv->mdma.sts_base = sts_ptr; | ||
1095 | priv->mdma.sts_size = sts_size; | ||
1096 | |||
1097 | dev_dbg(&priv->pdev->dev, | ||
1098 | "desc status FIFO @ %p (phys = %llx) size=0x%x\n", | ||
1099 | sts_ptr, (unsigned long long)sts_phys, sts_size); | ||
1100 | |||
1101 | /* Initialize DMA descriptors ring */ | ||
1102 | bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); | ||
1103 | bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & | ||
1104 | TSI721_DMAC_DPTRL_MASK); | ||
1105 | bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); | ||
1106 | |||
1107 | /* Setup DMA descriptor pointers */ | ||
1108 | iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH); | ||
1109 | iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), | ||
1110 | regs + TSI721_DMAC_DPTRL); | ||
1111 | |||
1112 | /* Setup descriptor status FIFO */ | ||
1113 | iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH); | ||
1114 | iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), | ||
1115 | regs + TSI721_DMAC_DSBL); | ||
1116 | iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), | ||
1117 | regs + TSI721_DMAC_DSSZ); | ||
1118 | |||
1119 | /* Clear interrupt bits */ | ||
1120 | iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); | ||
1121 | |||
1122 | ioread32(regs + TSI721_DMAC_INT); | ||
1123 | |||
1124 | /* Toggle DMA channel initialization */ | ||
1125 | iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); | ||
1126 | ioread32(regs + TSI721_DMAC_CTL); | ||
1127 | udelay(10); | ||
1128 | |||
1129 | return 0; | ||
1130 | } | ||
1131 | |||
1132 | static int tsi721_bdma_maint_free(struct tsi721_device *priv) | ||
1133 | { | ||
1134 | u32 ch_stat; | ||
1135 | struct tsi721_bdma_maint *mdma = &priv->mdma; | ||
1136 | void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id); | ||
1137 | |||
1138 | if (mdma->bd_base == NULL) | ||
1139 | return 0; | ||
1140 | |||
1141 | /* Check if DMA channel still running */ | ||
1142 | ch_stat = ioread32(regs + TSI721_DMAC_STS); | ||
1143 | if (ch_stat & TSI721_DMAC_STS_RUN) | ||
1144 | return -EFAULT; | ||
1145 | |||
1146 | /* Put DMA channel into init state */ | ||
1147 | iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); | ||
1148 | |||
1149 | /* Free space allocated for DMA descriptors */ | ||
1150 | dma_free_coherent(&priv->pdev->dev, | ||
1151 | mdma->bd_num * sizeof(struct tsi721_dma_desc), | ||
1152 | mdma->bd_base, mdma->bd_phys); | ||
1153 | mdma->bd_base = NULL; | ||
1154 | |||
1155 | /* Free space allocated for status FIFO */ | ||
1156 | dma_free_coherent(&priv->pdev->dev, | ||
1157 | mdma->sts_size * sizeof(struct tsi721_dma_sts), | ||
1158 | mdma->sts_base, mdma->sts_phys); | ||
1159 | mdma->sts_base = NULL; | ||
1160 | return 0; | ||
1161 | } | ||
1162 | |||
1163 | /* Enable Inbound Messaging Interrupts */ | ||
1164 | static void | ||
1165 | tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, | ||
1166 | u32 inte_mask) | ||
1167 | { | ||
1168 | u32 rval; | ||
1169 | |||
1170 | if (!inte_mask) | ||
1171 | return; | ||
1172 | |||
1173 | /* Clear pending Inbound Messaging interrupts */ | ||
1174 | iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); | ||
1175 | |||
1176 | /* Enable Inbound Messaging interrupts */ | ||
1177 | rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); | ||
1178 | iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch)); | ||
1179 | |||
1180 | if (priv->flags & TSI721_USING_MSIX) | ||
1181 | return; /* Finished if we are in MSI-X mode */ | ||
1182 | |||
1183 | /* | ||
1184 | * For MSI and INTA interrupt signalling we need to enable next levels | ||
1185 | */ | ||
1186 | |||
1187 | /* Enable Device Channel Interrupt */ | ||
1188 | rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); | ||
1189 | iowrite32(rval | TSI721_INT_IMSG_CHAN(ch), | ||
1190 | priv->regs + TSI721_DEV_CHAN_INTE); | ||
1191 | } | ||
1192 | |||
1193 | /* Disable Inbound Messaging Interrupts */ | ||
1194 | static void | ||
1195 | tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch, | ||
1196 | u32 inte_mask) | ||
1197 | { | ||
1198 | u32 rval; | ||
1199 | |||
1200 | if (!inte_mask) | ||
1201 | return; | ||
1202 | |||
1203 | /* Clear pending Inbound Messaging interrupts */ | ||
1204 | iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); | ||
1205 | |||
1206 | /* Disable Inbound Messaging interrupts */ | ||
1207 | rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); | ||
1208 | rval &= ~inte_mask; | ||
1209 | iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch)); | ||
1210 | |||
1211 | if (priv->flags & TSI721_USING_MSIX) | ||
1212 | return; /* Finished if we are in MSI-X mode */ | ||
1213 | |||
1214 | /* | ||
1215 | * For MSI and INTA interrupt signalling we need to disable next levels | ||
1216 | */ | ||
1217 | |||
1218 | /* Disable Device Channel Interrupt */ | ||
1219 | rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); | ||
1220 | rval &= ~TSI721_INT_IMSG_CHAN(ch); | ||
1221 | iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); | ||
1222 | } | ||
1223 | |||
1224 | /* Enable Outbound Messaging interrupts */ | ||
1225 | static void | ||
1226 | tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch, | ||
1227 | u32 inte_mask) | ||
1228 | { | ||
1229 | u32 rval; | ||
1230 | |||
1231 | if (!inte_mask) | ||
1232 | return; | ||
1233 | |||
1234 | /* Clear pending Outbound Messaging interrupts */ | ||
1235 | iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); | ||
1236 | |||
1237 | /* Enable Outbound Messaging channel interrupts */ | ||
1238 | rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); | ||
1239 | iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch)); | ||
1240 | |||
1241 | if (priv->flags & TSI721_USING_MSIX) | ||
1242 | return; /* Finished if we are in MSI-X mode */ | ||
1243 | |||
1244 | /* | ||
1245 | * For MSI and INTA interrupt signalling we need to enable next levels | ||
1246 | */ | ||
1247 | |||
1248 | /* Enable Device Channel Interrupt */ | ||
1249 | rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); | ||
1250 | iowrite32(rval | TSI721_INT_OMSG_CHAN(ch), | ||
1251 | priv->regs + TSI721_DEV_CHAN_INTE); | ||
1252 | } | ||
1253 | |||
1254 | /* Disable Outbound Messaging interrupts */ | ||
1255 | static void | ||
1256 | tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch, | ||
1257 | u32 inte_mask) | ||
1258 | { | ||
1259 | u32 rval; | ||
1260 | |||
1261 | if (!inte_mask) | ||
1262 | return; | ||
1263 | |||
1264 | /* Clear pending Outbound Messaging interrupts */ | ||
1265 | iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); | ||
1266 | |||
1267 | /* Disable Outbound Messaging interrupts */ | ||
1268 | rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); | ||
1269 | rval &= ~inte_mask; | ||
1270 | iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch)); | ||
1271 | |||
1272 | if (priv->flags & TSI721_USING_MSIX) | ||
1273 | return; /* Finished if we are in MSI-X mode */ | ||
1274 | |||
1275 | /* | ||
1276 | * For MSI and INTA interrupt signalling we need to disable next levels | ||
1277 | */ | ||
1278 | |||
1279 | /* Disable Device Channel Interrupt */ | ||
1280 | rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); | ||
1281 | rval &= ~TSI721_INT_OMSG_CHAN(ch); | ||
1282 | iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); | ||
1283 | } | ||
1284 | |||
1285 | /** | ||
1286 | * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue | ||
1287 | * @mport: Master port with outbound message queue | ||
1288 | * @rdev: Target of outbound message | ||
1289 | * @mbox: Outbound mailbox | ||
1290 | * @buffer: Message to add to outbound queue | ||
1291 | * @len: Length of message | ||
1292 | */ | ||
1293 | static int | ||
1294 | tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, | ||
1295 | void *buffer, size_t len) | ||
1296 | { | ||
1297 | struct tsi721_device *priv = mport->priv; | ||
1298 | struct tsi721_omsg_desc *desc; | ||
1299 | u32 tx_slot; | ||
1300 | |||
1301 | if (!priv->omsg_init[mbox] || | ||
1302 | len > TSI721_MSG_MAX_SIZE || len < 8) | ||
1303 | return -EINVAL; | ||
1304 | |||
1305 | tx_slot = priv->omsg_ring[mbox].tx_slot; | ||
1306 | |||
1307 | /* Copy copy message into transfer buffer */ | ||
1308 | memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len); | ||
1309 | |||
1310 | if (len & 0x7) | ||
1311 | len += 8; | ||
1312 | |||
1313 | /* Build descriptor associated with buffer */ | ||
1314 | desc = priv->omsg_ring[mbox].omd_base; | ||
1315 | desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid); | ||
1316 | if (tx_slot % 4 == 0) | ||
1317 | desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF); | ||
1318 | |||
1319 | desc[tx_slot].msg_info = | ||
1320 | cpu_to_le32((mport->sys_size << 26) | (mbox << 22) | | ||
1321 | (0xe << 12) | (len & 0xff8)); | ||
1322 | desc[tx_slot].bufptr_lo = | ||
1323 | cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] & | ||
1324 | 0xffffffff); | ||
1325 | desc[tx_slot].bufptr_hi = | ||
1326 | cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32); | ||
1327 | |||
1328 | priv->omsg_ring[mbox].wr_count++; | ||
1329 | |||
1330 | /* Go to next descriptor */ | ||
1331 | if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) { | ||
1332 | priv->omsg_ring[mbox].tx_slot = 0; | ||
1333 | /* Move through the ring link descriptor at the end */ | ||
1334 | priv->omsg_ring[mbox].wr_count++; | ||
1335 | } | ||
1336 | |||
1337 | mb(); | ||
1338 | |||
1339 | /* Set new write count value */ | ||
1340 | iowrite32(priv->omsg_ring[mbox].wr_count, | ||
1341 | priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); | ||
1342 | ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); | ||
1343 | |||
1344 | return 0; | ||
1345 | } | ||
1346 | |||
1347 | /** | ||
1348 | * tsi721_omsg_handler - Outbound Message Interrupt Handler | ||
1349 | * @priv: pointer to tsi721 private data | ||
1350 | * @ch: number of OB MSG channel to service | ||
1351 | * | ||
1352 | * Services channel interrupts from outbound messaging engine. | ||
1353 | */ | ||
1354 | static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) | ||
1355 | { | ||
1356 | u32 omsg_int; | ||
1357 | |||
1358 | spin_lock(&priv->omsg_ring[ch].lock); | ||
1359 | |||
1360 | omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch)); | ||
1361 | |||
1362 | if (omsg_int & TSI721_OBDMAC_INT_ST_FULL) | ||
1363 | dev_info(&priv->pdev->dev, | ||
1364 | "OB MBOX%d: Status FIFO is full\n", ch); | ||
1365 | |||
1366 | if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) { | ||
1367 | u32 srd_ptr; | ||
1368 | u64 *sts_ptr, last_ptr = 0, prev_ptr = 0; | ||
1369 | int i, j; | ||
1370 | u32 tx_slot; | ||
1371 | |||
1372 | /* | ||
1373 | * Find last successfully processed descriptor | ||
1374 | */ | ||
1375 | |||
1376 | /* Check and clear descriptor status FIFO entries */ | ||
1377 | srd_ptr = priv->omsg_ring[ch].sts_rdptr; | ||
1378 | sts_ptr = priv->omsg_ring[ch].sts_base; | ||
1379 | j = srd_ptr * 8; | ||
1380 | while (sts_ptr[j]) { | ||
1381 | for (i = 0; i < 8 && sts_ptr[j]; i++, j++) { | ||
1382 | prev_ptr = last_ptr; | ||
1383 | last_ptr = le64_to_cpu(sts_ptr[j]); | ||
1384 | sts_ptr[j] = 0; | ||
1385 | } | ||
1386 | |||
1387 | ++srd_ptr; | ||
1388 | srd_ptr %= priv->omsg_ring[ch].sts_size; | ||
1389 | j = srd_ptr * 8; | ||
1390 | } | ||
1391 | |||
1392 | if (last_ptr == 0) | ||
1393 | goto no_sts_update; | ||
1394 | |||
1395 | priv->omsg_ring[ch].sts_rdptr = srd_ptr; | ||
1396 | iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch)); | ||
1397 | |||
1398 | if (!priv->mport->outb_msg[ch].mcback) | ||
1399 | goto no_sts_update; | ||
1400 | |||
1401 | /* Inform upper layer about transfer completion */ | ||
1402 | |||
1403 | tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/ | ||
1404 | sizeof(struct tsi721_omsg_desc); | ||
1405 | |||
1406 | /* | ||
1407 | * Check if this is a Link Descriptor (LD). | ||
1408 | * If yes, ignore LD and use descriptor processed | ||
1409 | * before LD. | ||
1410 | */ | ||
1411 | if (tx_slot == priv->omsg_ring[ch].size) { | ||
1412 | if (prev_ptr) | ||
1413 | tx_slot = (prev_ptr - | ||
1414 | (u64)priv->omsg_ring[ch].omd_phys)/ | ||
1415 | sizeof(struct tsi721_omsg_desc); | ||
1416 | else | ||
1417 | goto no_sts_update; | ||
1418 | } | ||
1419 | |||
1420 | /* Move slot index to the next message to be sent */ | ||
1421 | ++tx_slot; | ||
1422 | if (tx_slot == priv->omsg_ring[ch].size) | ||
1423 | tx_slot = 0; | ||
1424 | BUG_ON(tx_slot >= priv->omsg_ring[ch].size); | ||
1425 | priv->mport->outb_msg[ch].mcback(priv->mport, | ||
1426 | priv->omsg_ring[ch].dev_id, ch, | ||
1427 | tx_slot); | ||
1428 | } | ||
1429 | |||
1430 | no_sts_update: | ||
1431 | |||
1432 | if (omsg_int & TSI721_OBDMAC_INT_ERROR) { | ||
1433 | /* | ||
1434 | * Outbound message operation aborted due to error, | ||
1435 | * reinitialize OB MSG channel | ||
1436 | */ | ||
1437 | |||
1438 | dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n", | ||
1439 | ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); | ||
1440 | |||
1441 | iowrite32(TSI721_OBDMAC_INT_ERROR, | ||
1442 | priv->regs + TSI721_OBDMAC_INT(ch)); | ||
1443 | iowrite32(TSI721_OBDMAC_CTL_INIT, | ||
1444 | priv->regs + TSI721_OBDMAC_CTL(ch)); | ||
1445 | ioread32(priv->regs + TSI721_OBDMAC_CTL(ch)); | ||
1446 | |||
1447 | /* Inform upper level to clear all pending tx slots */ | ||
1448 | if (priv->mport->outb_msg[ch].mcback) | ||
1449 | priv->mport->outb_msg[ch].mcback(priv->mport, | ||
1450 | priv->omsg_ring[ch].dev_id, ch, | ||
1451 | priv->omsg_ring[ch].tx_slot); | ||
1452 | /* Synch tx_slot tracking */ | ||
1453 | iowrite32(priv->omsg_ring[ch].tx_slot, | ||
1454 | priv->regs + TSI721_OBDMAC_DRDCNT(ch)); | ||
1455 | ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch)); | ||
1456 | priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot; | ||
1457 | priv->omsg_ring[ch].sts_rdptr = 0; | ||
1458 | } | ||
1459 | |||
1460 | /* Clear channel interrupts */ | ||
1461 | iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch)); | ||
1462 | |||
1463 | if (!(priv->flags & TSI721_USING_MSIX)) { | ||
1464 | u32 ch_inte; | ||
1465 | |||
1466 | /* Re-enable channel interrupts */ | ||
1467 | ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); | ||
1468 | ch_inte |= TSI721_INT_OMSG_CHAN(ch); | ||
1469 | iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); | ||
1470 | } | ||
1471 | |||
1472 | spin_unlock(&priv->omsg_ring[ch].lock); | ||
1473 | } | ||
1474 | |||
1475 | /** | ||
1476 | * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox | ||
1477 | * @mport: Master port implementing Outbound Messaging Engine | ||
1478 | * @dev_id: Device specific pointer to pass on event | ||
1479 | * @mbox: Mailbox to open | ||
1480 | * @entries: Number of entries in the outbound mailbox ring | ||
1481 | */ | ||
1482 | static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, | ||
1483 | int mbox, int entries) | ||
1484 | { | ||
1485 | struct tsi721_device *priv = mport->priv; | ||
1486 | struct tsi721_omsg_desc *bd_ptr; | ||
1487 | int i, rc = 0; | ||
1488 | |||
1489 | if ((entries < TSI721_OMSGD_MIN_RING_SIZE) || | ||
1490 | (entries > (TSI721_OMSGD_RING_SIZE)) || | ||
1491 | (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { | ||
1492 | rc = -EINVAL; | ||
1493 | goto out; | ||
1494 | } | ||
1495 | |||
1496 | priv->omsg_ring[mbox].dev_id = dev_id; | ||
1497 | priv->omsg_ring[mbox].size = entries; | ||
1498 | priv->omsg_ring[mbox].sts_rdptr = 0; | ||
1499 | spin_lock_init(&priv->omsg_ring[mbox].lock); | ||
1500 | |||
1501 | /* Outbound Msg Buffer allocation based on | ||
1502 | the number of maximum descriptor entries */ | ||
1503 | for (i = 0; i < entries; i++) { | ||
1504 | priv->omsg_ring[mbox].omq_base[i] = | ||
1505 | dma_alloc_coherent( | ||
1506 | &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE, | ||
1507 | &priv->omsg_ring[mbox].omq_phys[i], | ||
1508 | GFP_KERNEL); | ||
1509 | if (priv->omsg_ring[mbox].omq_base[i] == NULL) { | ||
1510 | dev_dbg(&priv->pdev->dev, | ||
1511 | "Unable to allocate OB MSG data buffer for" | ||
1512 | " MBOX%d\n", mbox); | ||
1513 | rc = -ENOMEM; | ||
1514 | goto out_buf; | ||
1515 | } | ||
1516 | } | ||
1517 | |||
1518 | /* Outbound message descriptor allocation */ | ||
1519 | priv->omsg_ring[mbox].omd_base = dma_alloc_coherent( | ||
1520 | &priv->pdev->dev, | ||
1521 | (entries + 1) * sizeof(struct tsi721_omsg_desc), | ||
1522 | &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL); | ||
1523 | if (priv->omsg_ring[mbox].omd_base == NULL) { | ||
1524 | dev_dbg(&priv->pdev->dev, | ||
1525 | "Unable to allocate OB MSG descriptor memory " | ||
1526 | "for MBOX%d\n", mbox); | ||
1527 | rc = -ENOMEM; | ||
1528 | goto out_buf; | ||
1529 | } | ||
1530 | |||
1531 | priv->omsg_ring[mbox].tx_slot = 0; | ||
1532 | |||
1533 | /* Outbound message descriptor status FIFO allocation */ | ||
1534 | priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); | ||
1535 | priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, | ||
1536 | priv->omsg_ring[mbox].sts_size * | ||
1537 | sizeof(struct tsi721_dma_sts), | ||
1538 | &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); | ||
1539 | if (priv->omsg_ring[mbox].sts_base == NULL) { | ||
1540 | dev_dbg(&priv->pdev->dev, | ||
1541 | "Unable to allocate OB MSG descriptor status FIFO " | ||
1542 | "for MBOX%d\n", mbox); | ||
1543 | rc = -ENOMEM; | ||
1544 | goto out_desc; | ||
1545 | } | ||
1546 | |||
1547 | /* | ||
1548 | * Configure Outbound Messaging Engine | ||
1549 | */ | ||
1550 | |||
1551 | /* Setup Outbound Message descriptor pointer */ | ||
1552 | iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32), | ||
1553 | priv->regs + TSI721_OBDMAC_DPTRH(mbox)); | ||
1554 | iowrite32(((u64)priv->omsg_ring[mbox].omd_phys & | ||
1555 | TSI721_OBDMAC_DPTRL_MASK), | ||
1556 | priv->regs + TSI721_OBDMAC_DPTRL(mbox)); | ||
1557 | |||
1558 | /* Setup Outbound Message descriptor status FIFO */ | ||
1559 | iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32), | ||
1560 | priv->regs + TSI721_OBDMAC_DSBH(mbox)); | ||
1561 | iowrite32(((u64)priv->omsg_ring[mbox].sts_phys & | ||
1562 | TSI721_OBDMAC_DSBL_MASK), | ||
1563 | priv->regs + TSI721_OBDMAC_DSBL(mbox)); | ||
1564 | iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size), | ||
1565 | priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox)); | ||
1566 | |||
1567 | /* Enable interrupts */ | ||
1568 | |||
1569 | #ifdef CONFIG_PCI_MSI | ||
1570 | if (priv->flags & TSI721_USING_MSIX) { | ||
1571 | /* Request interrupt service if we are in MSI-X mode */ | ||
1572 | rc = request_irq( | ||
1573 | priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, | ||
1574 | tsi721_omsg_msix, 0, | ||
1575 | priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name, | ||
1576 | (void *)mport); | ||
1577 | |||
1578 | if (rc) { | ||
1579 | dev_dbg(&priv->pdev->dev, | ||
1580 | "Unable to allocate MSI-X interrupt for " | ||
1581 | "OBOX%d-DONE\n", mbox); | ||
1582 | goto out_stat; | ||
1583 | } | ||
1584 | |||
1585 | rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, | ||
1586 | tsi721_omsg_msix, 0, | ||
1587 | priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name, | ||
1588 | (void *)mport); | ||
1589 | |||
1590 | if (rc) { | ||
1591 | dev_dbg(&priv->pdev->dev, | ||
1592 | "Unable to allocate MSI-X interrupt for " | ||
1593 | "MBOX%d-INT\n", mbox); | ||
1594 | free_irq( | ||
1595 | priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, | ||
1596 | (void *)mport); | ||
1597 | goto out_stat; | ||
1598 | } | ||
1599 | } | ||
1600 | #endif /* CONFIG_PCI_MSI */ | ||
1601 | |||
1602 | tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL); | ||
1603 | |||
1604 | /* Initialize Outbound Message descriptors ring */ | ||
1605 | bd_ptr = priv->omsg_ring[mbox].omd_base; | ||
1606 | bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29); | ||
1607 | bd_ptr[entries].msg_info = 0; | ||
1608 | bd_ptr[entries].next_lo = | ||
1609 | cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys & | ||
1610 | TSI721_OBDMAC_DPTRL_MASK); | ||
1611 | bd_ptr[entries].next_hi = | ||
1612 | cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32); | ||
1613 | priv->omsg_ring[mbox].wr_count = 0; | ||
1614 | mb(); | ||
1615 | |||
1616 | /* Initialize Outbound Message engine */ | ||
1617 | iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox)); | ||
1618 | ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); | ||
1619 | udelay(10); | ||
1620 | |||
1621 | priv->omsg_init[mbox] = 1; | ||
1622 | |||
1623 | return 0; | ||
1624 | |||
1625 | #ifdef CONFIG_PCI_MSI | ||
1626 | out_stat: | ||
1627 | dma_free_coherent(&priv->pdev->dev, | ||
1628 | priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), | ||
1629 | priv->omsg_ring[mbox].sts_base, | ||
1630 | priv->omsg_ring[mbox].sts_phys); | ||
1631 | |||
1632 | priv->omsg_ring[mbox].sts_base = NULL; | ||
1633 | #endif /* CONFIG_PCI_MSI */ | ||
1634 | |||
1635 | out_desc: | ||
1636 | dma_free_coherent(&priv->pdev->dev, | ||
1637 | (entries + 1) * sizeof(struct tsi721_omsg_desc), | ||
1638 | priv->omsg_ring[mbox].omd_base, | ||
1639 | priv->omsg_ring[mbox].omd_phys); | ||
1640 | |||
1641 | priv->omsg_ring[mbox].omd_base = NULL; | ||
1642 | |||
1643 | out_buf: | ||
1644 | for (i = 0; i < priv->omsg_ring[mbox].size; i++) { | ||
1645 | if (priv->omsg_ring[mbox].omq_base[i]) { | ||
1646 | dma_free_coherent(&priv->pdev->dev, | ||
1647 | TSI721_MSG_BUFFER_SIZE, | ||
1648 | priv->omsg_ring[mbox].omq_base[i], | ||
1649 | priv->omsg_ring[mbox].omq_phys[i]); | ||
1650 | |||
1651 | priv->omsg_ring[mbox].omq_base[i] = NULL; | ||
1652 | } | ||
1653 | } | ||
1654 | |||
1655 | out: | ||
1656 | return rc; | ||
1657 | } | ||
1658 | |||
1659 | /** | ||
1660 | * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox | ||
1661 | * @mport: Master port implementing the outbound message unit | ||
1662 | * @mbox: Mailbox to close | ||
1663 | */ | ||
1664 | static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox) | ||
1665 | { | ||
1666 | struct tsi721_device *priv = mport->priv; | ||
1667 | u32 i; | ||
1668 | |||
1669 | if (!priv->omsg_init[mbox]) | ||
1670 | return; | ||
1671 | priv->omsg_init[mbox] = 0; | ||
1672 | |||
1673 | /* Disable Interrupts */ | ||
1674 | |||
1675 | tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL); | ||
1676 | |||
1677 | #ifdef CONFIG_PCI_MSI | ||
1678 | if (priv->flags & TSI721_USING_MSIX) { | ||
1679 | free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, | ||
1680 | (void *)mport); | ||
1681 | free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, | ||
1682 | (void *)mport); | ||
1683 | } | ||
1684 | #endif /* CONFIG_PCI_MSI */ | ||
1685 | |||
1686 | /* Free OMSG Descriptor Status FIFO */ | ||
1687 | dma_free_coherent(&priv->pdev->dev, | ||
1688 | priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), | ||
1689 | priv->omsg_ring[mbox].sts_base, | ||
1690 | priv->omsg_ring[mbox].sts_phys); | ||
1691 | |||
1692 | priv->omsg_ring[mbox].sts_base = NULL; | ||
1693 | |||
1694 | /* Free OMSG descriptors */ | ||
1695 | dma_free_coherent(&priv->pdev->dev, | ||
1696 | (priv->omsg_ring[mbox].size + 1) * | ||
1697 | sizeof(struct tsi721_omsg_desc), | ||
1698 | priv->omsg_ring[mbox].omd_base, | ||
1699 | priv->omsg_ring[mbox].omd_phys); | ||
1700 | |||
1701 | priv->omsg_ring[mbox].omd_base = NULL; | ||
1702 | |||
1703 | /* Free message buffers */ | ||
1704 | for (i = 0; i < priv->omsg_ring[mbox].size; i++) { | ||
1705 | if (priv->omsg_ring[mbox].omq_base[i]) { | ||
1706 | dma_free_coherent(&priv->pdev->dev, | ||
1707 | TSI721_MSG_BUFFER_SIZE, | ||
1708 | priv->omsg_ring[mbox].omq_base[i], | ||
1709 | priv->omsg_ring[mbox].omq_phys[i]); | ||
1710 | |||
1711 | priv->omsg_ring[mbox].omq_base[i] = NULL; | ||
1712 | } | ||
1713 | } | ||
1714 | } | ||
1715 | |||
1716 | /** | ||
1717 | * tsi721_imsg_handler - Inbound Message Interrupt Handler | ||
1718 | * @priv: pointer to tsi721 private data | ||
1719 | * @ch: inbound message channel number to service | ||
1720 | * | ||
1721 | * Services channel interrupts from inbound messaging engine. | ||
1722 | */ | ||
1723 | static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) | ||
1724 | { | ||
1725 | u32 mbox = ch - 4; | ||
1726 | u32 imsg_int; | ||
1727 | |||
1728 | spin_lock(&priv->imsg_ring[mbox].lock); | ||
1729 | |||
1730 | imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch)); | ||
1731 | |||
1732 | if (imsg_int & TSI721_IBDMAC_INT_SRTO) | ||
1733 | dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n", | ||
1734 | mbox); | ||
1735 | |||
1736 | if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR) | ||
1737 | dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n", | ||
1738 | mbox); | ||
1739 | |||
1740 | if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW) | ||
1741 | dev_info(&priv->pdev->dev, | ||
1742 | "IB MBOX%d IB free queue low\n", mbox); | ||
1743 | |||
1744 | /* Clear IB channel interrupts */ | ||
1745 | iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch)); | ||
1746 | |||
1747 | /* If an IB Msg is received notify the upper layer */ | ||
1748 | if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV && | ||
1749 | priv->mport->inb_msg[mbox].mcback) | ||
1750 | priv->mport->inb_msg[mbox].mcback(priv->mport, | ||
1751 | priv->imsg_ring[mbox].dev_id, mbox, -1); | ||
1752 | |||
1753 | if (!(priv->flags & TSI721_USING_MSIX)) { | ||
1754 | u32 ch_inte; | ||
1755 | |||
1756 | /* Re-enable channel interrupts */ | ||
1757 | ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); | ||
1758 | ch_inte |= TSI721_INT_IMSG_CHAN(ch); | ||
1759 | iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); | ||
1760 | } | ||
1761 | |||
1762 | spin_unlock(&priv->imsg_ring[mbox].lock); | ||
1763 | } | ||
1764 | |||
1765 | /** | ||
1766 | * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox | ||
1767 | * @mport: Master port implementing the Inbound Messaging Engine | ||
1768 | * @dev_id: Device specific pointer to pass on event | ||
1769 | * @mbox: Mailbox to open | ||
1770 | * @entries: Number of entries in the inbound mailbox ring | ||
1771 | */ | ||
1772 | static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, | ||
1773 | int mbox, int entries) | ||
1774 | { | ||
1775 | struct tsi721_device *priv = mport->priv; | ||
1776 | int ch = mbox + 4; | ||
1777 | int i; | ||
1778 | u64 *free_ptr; | ||
1779 | int rc = 0; | ||
1780 | |||
1781 | if ((entries < TSI721_IMSGD_MIN_RING_SIZE) || | ||
1782 | (entries > TSI721_IMSGD_RING_SIZE) || | ||
1783 | (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { | ||
1784 | rc = -EINVAL; | ||
1785 | goto out; | ||
1786 | } | ||
1787 | |||
1788 | /* Initialize IB Messaging Ring */ | ||
1789 | priv->imsg_ring[mbox].dev_id = dev_id; | ||
1790 | priv->imsg_ring[mbox].size = entries; | ||
1791 | priv->imsg_ring[mbox].rx_slot = 0; | ||
1792 | priv->imsg_ring[mbox].desc_rdptr = 0; | ||
1793 | priv->imsg_ring[mbox].fq_wrptr = 0; | ||
1794 | for (i = 0; i < priv->imsg_ring[mbox].size; i++) | ||
1795 | priv->imsg_ring[mbox].imq_base[i] = NULL; | ||
1796 | spin_lock_init(&priv->imsg_ring[mbox].lock); | ||
1797 | |||
1798 | /* Allocate buffers for incoming messages */ | ||
1799 | priv->imsg_ring[mbox].buf_base = | ||
1800 | dma_alloc_coherent(&priv->pdev->dev, | ||
1801 | entries * TSI721_MSG_BUFFER_SIZE, | ||
1802 | &priv->imsg_ring[mbox].buf_phys, | ||
1803 | GFP_KERNEL); | ||
1804 | |||
1805 | if (priv->imsg_ring[mbox].buf_base == NULL) { | ||
1806 | dev_err(&priv->pdev->dev, | ||
1807 | "Failed to allocate buffers for IB MBOX%d\n", mbox); | ||
1808 | rc = -ENOMEM; | ||
1809 | goto out; | ||
1810 | } | ||
1811 | |||
1812 | /* Allocate memory for circular free list */ | ||
1813 | priv->imsg_ring[mbox].imfq_base = | ||
1814 | dma_alloc_coherent(&priv->pdev->dev, | ||
1815 | entries * 8, | ||
1816 | &priv->imsg_ring[mbox].imfq_phys, | ||
1817 | GFP_KERNEL); | ||
1818 | |||
1819 | if (priv->imsg_ring[mbox].imfq_base == NULL) { | ||
1820 | dev_err(&priv->pdev->dev, | ||
1821 | "Failed to allocate free queue for IB MBOX%d\n", mbox); | ||
1822 | rc = -ENOMEM; | ||
1823 | goto out_buf; | ||
1824 | } | ||
1825 | |||
1826 | /* Allocate memory for Inbound message descriptors */ | ||
1827 | priv->imsg_ring[mbox].imd_base = | ||
1828 | dma_alloc_coherent(&priv->pdev->dev, | ||
1829 | entries * sizeof(struct tsi721_imsg_desc), | ||
1830 | &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL); | ||
1831 | |||
1832 | if (priv->imsg_ring[mbox].imd_base == NULL) { | ||
1833 | dev_err(&priv->pdev->dev, | ||
1834 | "Failed to allocate descriptor memory for IB MBOX%d\n", | ||
1835 | mbox); | ||
1836 | rc = -ENOMEM; | ||
1837 | goto out_dma; | ||
1838 | } | ||
1839 | |||
1840 | /* Fill free buffer pointer list */ | ||
1841 | free_ptr = priv->imsg_ring[mbox].imfq_base; | ||
1842 | for (i = 0; i < entries; i++) | ||
1843 | free_ptr[i] = cpu_to_le64( | ||
1844 | (u64)(priv->imsg_ring[mbox].buf_phys) + | ||
1845 | i * 0x1000); | ||
1846 | |||
1847 | mb(); | ||
1848 | |||
1849 | /* | ||
1850 | * For mapping of inbound SRIO Messages into appropriate queues we need | ||
1851 | * to set Inbound Device ID register in the messaging engine. We do it | ||
1852 | * once when first inbound mailbox is requested. | ||
1853 | */ | ||
1854 | if (!(priv->flags & TSI721_IMSGID_SET)) { | ||
1855 | iowrite32((u32)priv->mport->host_deviceid, | ||
1856 | priv->regs + TSI721_IB_DEVID); | ||
1857 | priv->flags |= TSI721_IMSGID_SET; | ||
1858 | } | ||
1859 | |||
1860 | /* | ||
1861 | * Configure Inbound Messaging channel (ch = mbox + 4) | ||
1862 | */ | ||
1863 | |||
1864 | /* Setup Inbound Message free queue */ | ||
1865 | iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32), | ||
1866 | priv->regs + TSI721_IBDMAC_FQBH(ch)); | ||
1867 | iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys & | ||
1868 | TSI721_IBDMAC_FQBL_MASK), | ||
1869 | priv->regs+TSI721_IBDMAC_FQBL(ch)); | ||
1870 | iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), | ||
1871 | priv->regs + TSI721_IBDMAC_FQSZ(ch)); | ||
1872 | |||
1873 | /* Setup Inbound Message descriptor queue */ | ||
1874 | iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32), | ||
1875 | priv->regs + TSI721_IBDMAC_DQBH(ch)); | ||
1876 | iowrite32(((u32)priv->imsg_ring[mbox].imd_phys & | ||
1877 | (u32)TSI721_IBDMAC_DQBL_MASK), | ||
1878 | priv->regs+TSI721_IBDMAC_DQBL(ch)); | ||
1879 | iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), | ||
1880 | priv->regs + TSI721_IBDMAC_DQSZ(ch)); | ||
1881 | |||
1882 | /* Enable interrupts */ | ||
1883 | |||
1884 | #ifdef CONFIG_PCI_MSI | ||
1885 | if (priv->flags & TSI721_USING_MSIX) { | ||
1886 | /* Request interrupt service if we are in MSI-X mode */ | ||
1887 | rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, | ||
1888 | tsi721_imsg_msix, 0, | ||
1889 | priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name, | ||
1890 | (void *)mport); | ||
1891 | |||
1892 | if (rc) { | ||
1893 | dev_dbg(&priv->pdev->dev, | ||
1894 | "Unable to allocate MSI-X interrupt for " | ||
1895 | "IBOX%d-DONE\n", mbox); | ||
1896 | goto out_desc; | ||
1897 | } | ||
1898 | |||
1899 | rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, | ||
1900 | tsi721_imsg_msix, 0, | ||
1901 | priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name, | ||
1902 | (void *)mport); | ||
1903 | |||
1904 | if (rc) { | ||
1905 | dev_dbg(&priv->pdev->dev, | ||
1906 | "Unable to allocate MSI-X interrupt for " | ||
1907 | "IBOX%d-INT\n", mbox); | ||
1908 | free_irq( | ||
1909 | priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, | ||
1910 | (void *)mport); | ||
1911 | goto out_desc; | ||
1912 | } | ||
1913 | } | ||
1914 | #endif /* CONFIG_PCI_MSI */ | ||
1915 | |||
1916 | tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL); | ||
1917 | |||
1918 | /* Initialize Inbound Message Engine */ | ||
1919 | iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch)); | ||
1920 | ioread32(priv->regs + TSI721_IBDMAC_CTL(ch)); | ||
1921 | udelay(10); | ||
1922 | priv->imsg_ring[mbox].fq_wrptr = entries - 1; | ||
1923 | iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch)); | ||
1924 | |||
1925 | priv->imsg_init[mbox] = 1; | ||
1926 | return 0; | ||
1927 | |||
1928 | #ifdef CONFIG_PCI_MSI | ||
1929 | out_desc: | ||
1930 | dma_free_coherent(&priv->pdev->dev, | ||
1931 | priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), | ||
1932 | priv->imsg_ring[mbox].imd_base, | ||
1933 | priv->imsg_ring[mbox].imd_phys); | ||
1934 | |||
1935 | priv->imsg_ring[mbox].imd_base = NULL; | ||
1936 | #endif /* CONFIG_PCI_MSI */ | ||
1937 | |||
1938 | out_dma: | ||
1939 | dma_free_coherent(&priv->pdev->dev, | ||
1940 | priv->imsg_ring[mbox].size * 8, | ||
1941 | priv->imsg_ring[mbox].imfq_base, | ||
1942 | priv->imsg_ring[mbox].imfq_phys); | ||
1943 | |||
1944 | priv->imsg_ring[mbox].imfq_base = NULL; | ||
1945 | |||
1946 | out_buf: | ||
1947 | dma_free_coherent(&priv->pdev->dev, | ||
1948 | priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, | ||
1949 | priv->imsg_ring[mbox].buf_base, | ||
1950 | priv->imsg_ring[mbox].buf_phys); | ||
1951 | |||
1952 | priv->imsg_ring[mbox].buf_base = NULL; | ||
1953 | |||
1954 | out: | ||
1955 | return rc; | ||
1956 | } | ||
1957 | |||
1958 | /** | ||
1959 | * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox | ||
1960 | * @mport: Master port implementing the Inbound Messaging Engine | ||
1961 | * @mbox: Mailbox to close | ||
1962 | */ | ||
1963 | static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox) | ||
1964 | { | ||
1965 | struct tsi721_device *priv = mport->priv; | ||
1966 | u32 rx_slot; | ||
1967 | int ch = mbox + 4; | ||
1968 | |||
1969 | if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */ | ||
1970 | return; | ||
1971 | priv->imsg_init[mbox] = 0; | ||
1972 | |||
1973 | /* Disable Inbound Messaging Engine */ | ||
1974 | |||
1975 | /* Disable Interrupts */ | ||
1976 | tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK); | ||
1977 | |||
1978 | #ifdef CONFIG_PCI_MSI | ||
1979 | if (priv->flags & TSI721_USING_MSIX) { | ||
1980 | free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, | ||
1981 | (void *)mport); | ||
1982 | free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, | ||
1983 | (void *)mport); | ||
1984 | } | ||
1985 | #endif /* CONFIG_PCI_MSI */ | ||
1986 | |||
1987 | /* Clear Inbound Buffer Queue */ | ||
1988 | for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++) | ||
1989 | priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; | ||
1990 | |||
1991 | /* Free memory allocated for message buffers */ | ||
1992 | dma_free_coherent(&priv->pdev->dev, | ||
1993 | priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, | ||
1994 | priv->imsg_ring[mbox].buf_base, | ||
1995 | priv->imsg_ring[mbox].buf_phys); | ||
1996 | |||
1997 | priv->imsg_ring[mbox].buf_base = NULL; | ||
1998 | |||
1999 | /* Free memory allocated for free pointr list */ | ||
2000 | dma_free_coherent(&priv->pdev->dev, | ||
2001 | priv->imsg_ring[mbox].size * 8, | ||
2002 | priv->imsg_ring[mbox].imfq_base, | ||
2003 | priv->imsg_ring[mbox].imfq_phys); | ||
2004 | |||
2005 | priv->imsg_ring[mbox].imfq_base = NULL; | ||
2006 | |||
2007 | /* Free memory allocated for RX descriptors */ | ||
2008 | dma_free_coherent(&priv->pdev->dev, | ||
2009 | priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), | ||
2010 | priv->imsg_ring[mbox].imd_base, | ||
2011 | priv->imsg_ring[mbox].imd_phys); | ||
2012 | |||
2013 | priv->imsg_ring[mbox].imd_base = NULL; | ||
2014 | } | ||
2015 | |||
2016 | /** | ||
2017 | * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue | ||
2018 | * @mport: Master port implementing the Inbound Messaging Engine | ||
2019 | * @mbox: Inbound mailbox number | ||
2020 | * @buf: Buffer to add to inbound queue | ||
2021 | */ | ||
2022 | static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) | ||
2023 | { | ||
2024 | struct tsi721_device *priv = mport->priv; | ||
2025 | u32 rx_slot; | ||
2026 | int rc = 0; | ||
2027 | |||
2028 | rx_slot = priv->imsg_ring[mbox].rx_slot; | ||
2029 | if (priv->imsg_ring[mbox].imq_base[rx_slot]) { | ||
2030 | dev_err(&priv->pdev->dev, | ||
2031 | "Error adding inbound buffer %d, buffer exists\n", | ||
2032 | rx_slot); | ||
2033 | rc = -EINVAL; | ||
2034 | goto out; | ||
2035 | } | ||
2036 | |||
2037 | priv->imsg_ring[mbox].imq_base[rx_slot] = buf; | ||
2038 | |||
2039 | if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size) | ||
2040 | priv->imsg_ring[mbox].rx_slot = 0; | ||
2041 | |||
2042 | out: | ||
2043 | return rc; | ||
2044 | } | ||
2045 | |||
2046 | /** | ||
2047 | * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue | ||
2048 | * @mport: Master port implementing the Inbound Messaging Engine | ||
2049 | * @mbox: Inbound mailbox number | ||
2050 | * | ||
2051 | * Returns pointer to the message on success or NULL on failure. | ||
2052 | */ | ||
2053 | static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox) | ||
2054 | { | ||
2055 | struct tsi721_device *priv = mport->priv; | ||
2056 | struct tsi721_imsg_desc *desc; | ||
2057 | u32 rx_slot; | ||
2058 | void *rx_virt = NULL; | ||
2059 | u64 rx_phys; | ||
2060 | void *buf = NULL; | ||
2061 | u64 *free_ptr; | ||
2062 | int ch = mbox + 4; | ||
2063 | int msg_size; | ||
2064 | |||
2065 | if (!priv->imsg_init[mbox]) | ||
2066 | return NULL; | ||
2067 | |||
2068 | desc = priv->imsg_ring[mbox].imd_base; | ||
2069 | desc += priv->imsg_ring[mbox].desc_rdptr; | ||
2070 | |||
2071 | if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO)) | ||
2072 | goto out; | ||
2073 | |||
2074 | rx_slot = priv->imsg_ring[mbox].rx_slot; | ||
2075 | while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) { | ||
2076 | if (++rx_slot == priv->imsg_ring[mbox].size) | ||
2077 | rx_slot = 0; | ||
2078 | } | ||
2079 | |||
2080 | rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) | | ||
2081 | le32_to_cpu(desc->bufptr_lo); | ||
2082 | |||
2083 | rx_virt = priv->imsg_ring[mbox].buf_base + | ||
2084 | (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys); | ||
2085 | |||
2086 | buf = priv->imsg_ring[mbox].imq_base[rx_slot]; | ||
2087 | msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT; | ||
2088 | if (msg_size == 0) | ||
2089 | msg_size = RIO_MAX_MSG_SIZE; | ||
2090 | |||
2091 | memcpy(buf, rx_virt, msg_size); | ||
2092 | priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; | ||
2093 | |||
2094 | desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO); | ||
2095 | if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size) | ||
2096 | priv->imsg_ring[mbox].desc_rdptr = 0; | ||
2097 | |||
2098 | iowrite32(priv->imsg_ring[mbox].desc_rdptr, | ||
2099 | priv->regs + TSI721_IBDMAC_DQRP(ch)); | ||
2100 | |||
2101 | /* Return free buffer into the pointer list */ | ||
2102 | free_ptr = priv->imsg_ring[mbox].imfq_base; | ||
2103 | free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys); | ||
2104 | |||
2105 | if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size) | ||
2106 | priv->imsg_ring[mbox].fq_wrptr = 0; | ||
2107 | |||
2108 | iowrite32(priv->imsg_ring[mbox].fq_wrptr, | ||
2109 | priv->regs + TSI721_IBDMAC_FQWP(ch)); | ||
2110 | out: | ||
2111 | return buf; | ||
2112 | } | ||
2113 | |||
2114 | /** | ||
2115 | * tsi721_messages_init - Initialization of Messaging Engine | ||
2116 | * @priv: pointer to tsi721 private data | ||
2117 | * | ||
2118 | * Configures Tsi721 messaging engine. | ||
2119 | */ | ||
2120 | static int tsi721_messages_init(struct tsi721_device *priv) | ||
2121 | { | ||
2122 | int ch; | ||
2123 | |||
2124 | iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG); | ||
2125 | iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT); | ||
2126 | iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT); | ||
2127 | |||
2128 | /* Set SRIO Message Request/Response Timeout */ | ||
2129 | iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO); | ||
2130 | |||
2131 | /* Initialize Inbound Messaging Engine Registers */ | ||
2132 | for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) { | ||
2133 | /* Clear interrupt bits */ | ||
2134 | iowrite32(TSI721_IBDMAC_INT_MASK, | ||
2135 | priv->regs + TSI721_IBDMAC_INT(ch)); | ||
2136 | /* Clear Status */ | ||
2137 | iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch)); | ||
2138 | |||
2139 | iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK, | ||
2140 | priv->regs + TSI721_SMSG_ECC_COR_LOG(ch)); | ||
2141 | iowrite32(TSI721_SMSG_ECC_NCOR_MASK, | ||
2142 | priv->regs + TSI721_SMSG_ECC_NCOR(ch)); | ||
2143 | } | ||
2144 | |||
2145 | return 0; | ||
2146 | } | ||
2147 | |||
2148 | /** | ||
2149 | * tsi721_disable_ints - disables all device interrupts | ||
2150 | * @priv: pointer to tsi721 private data | ||
2151 | */ | ||
2152 | static void tsi721_disable_ints(struct tsi721_device *priv) | ||
2153 | { | ||
2154 | int ch; | ||
2155 | |||
2156 | /* Disable all device level interrupts */ | ||
2157 | iowrite32(0, priv->regs + TSI721_DEV_INTE); | ||
2158 | |||
2159 | /* Disable all Device Channel interrupts */ | ||
2160 | iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE); | ||
2161 | |||
2162 | /* Disable all Inbound Msg Channel interrupts */ | ||
2163 | for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) | ||
2164 | iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch)); | ||
2165 | |||
2166 | /* Disable all Outbound Msg Channel interrupts */ | ||
2167 | for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++) | ||
2168 | iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch)); | ||
2169 | |||
2170 | /* Disable all general messaging interrupts */ | ||
2171 | iowrite32(0, priv->regs + TSI721_SMSG_INTE); | ||
2172 | |||
2173 | /* Disable all BDMA Channel interrupts */ | ||
2174 | for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) | ||
2175 | iowrite32(0, | ||
2176 | priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE); | ||
2177 | |||
2178 | /* Disable all general BDMA interrupts */ | ||
2179 | iowrite32(0, priv->regs + TSI721_BDMA_INTE); | ||
2180 | |||
2181 | /* Disable all SRIO Channel interrupts */ | ||
2182 | for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++) | ||
2183 | iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch)); | ||
2184 | |||
2185 | /* Disable all general SR2PC interrupts */ | ||
2186 | iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE); | ||
2187 | |||
2188 | /* Disable all PC2SR interrupts */ | ||
2189 | iowrite32(0, priv->regs + TSI721_PC2SR_INTE); | ||
2190 | |||
2191 | /* Disable all I2C interrupts */ | ||
2192 | iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE); | ||
2193 | |||
2194 | /* Disable SRIO MAC interrupts */ | ||
2195 | iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE); | ||
2196 | iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN); | ||
2197 | } | ||
2198 | |||
2199 | /** | ||
2200 | * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port | ||
2201 | * @priv: pointer to tsi721 private data | ||
2202 | * | ||
2203 | * Configures Tsi721 as RapidIO master port. | ||
2204 | */ | ||
2205 | static int tsi721_setup_mport(struct tsi721_device *priv) | ||
2206 | { | ||
2207 | struct pci_dev *pdev = priv->pdev; | ||
2208 | int err = 0; | ||
2209 | struct rio_ops *ops; | ||
2210 | |||
2211 | struct rio_mport *mport; | ||
2212 | |||
2213 | ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); | ||
2214 | if (!ops) { | ||
2215 | dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n"); | ||
2216 | return -ENOMEM; | ||
2217 | } | ||
2218 | |||
2219 | ops->lcread = tsi721_lcread; | ||
2220 | ops->lcwrite = tsi721_lcwrite; | ||
2221 | ops->cread = tsi721_cread_dma; | ||
2222 | ops->cwrite = tsi721_cwrite_dma; | ||
2223 | ops->dsend = tsi721_dsend; | ||
2224 | ops->open_inb_mbox = tsi721_open_inb_mbox; | ||
2225 | ops->close_inb_mbox = tsi721_close_inb_mbox; | ||
2226 | ops->open_outb_mbox = tsi721_open_outb_mbox; | ||
2227 | ops->close_outb_mbox = tsi721_close_outb_mbox; | ||
2228 | ops->add_outb_message = tsi721_add_outb_message; | ||
2229 | ops->add_inb_buffer = tsi721_add_inb_buffer; | ||
2230 | ops->get_inb_message = tsi721_get_inb_message; | ||
2231 | ops->map_inb = tsi721_rio_map_inb_mem; | ||
2232 | ops->unmap_inb = tsi721_rio_unmap_inb_mem; | ||
2233 | |||
2234 | mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); | ||
2235 | if (!mport) { | ||
2236 | kfree(ops); | ||
2237 | dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n"); | ||
2238 | return -ENOMEM; | ||
2239 | } | ||
2240 | |||
2241 | mport->ops = ops; | ||
2242 | mport->index = 0; | ||
2243 | mport->sys_size = 0; /* small system */ | ||
2244 | mport->phy_type = RIO_PHY_SERIAL; | ||
2245 | mport->priv = (void *)priv; | ||
2246 | mport->phys_efptr = 0x100; | ||
2247 | priv->mport = mport; | ||
2248 | |||
2249 | INIT_LIST_HEAD(&mport->dbells); | ||
2250 | |||
2251 | rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); | ||
2252 | rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); | ||
2253 | rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); | ||
2254 | snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)", | ||
2255 | dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); | ||
2256 | |||
2257 | /* Hook up interrupt handler */ | ||
2258 | |||
2259 | #ifdef CONFIG_PCI_MSI | ||
2260 | if (!tsi721_enable_msix(priv)) | ||
2261 | priv->flags |= TSI721_USING_MSIX; | ||
2262 | else if (!pci_enable_msi(pdev)) | ||
2263 | priv->flags |= TSI721_USING_MSI; | ||
2264 | else | ||
2265 | dev_info(&pdev->dev, | ||
2266 | "MSI/MSI-X is not available. Using legacy INTx.\n"); | ||
2267 | #endif /* CONFIG_PCI_MSI */ | ||
2268 | |||
2269 | err = tsi721_request_irq(mport); | ||
2270 | |||
2271 | if (!err) { | ||
2272 | tsi721_interrupts_init(priv); | ||
2273 | ops->pwenable = tsi721_pw_enable; | ||
2274 | } else { | ||
2275 | dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " | ||
2276 | "vector %02X err=0x%x\n", pdev->irq, err); | ||
2277 | goto err_exit; | ||
2278 | } | ||
2279 | |||
2280 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
2281 | tsi721_register_dma(priv); | ||
2282 | #endif | ||
2283 | /* Enable SRIO link */ | ||
2284 | iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | | ||
2285 | TSI721_DEVCTL_SRBOOT_CMPL, | ||
2286 | priv->regs + TSI721_DEVCTL); | ||
2287 | |||
2288 | rio_register_mport(mport); | ||
2289 | |||
2290 | if (mport->host_deviceid >= 0) | ||
2291 | iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | | ||
2292 | RIO_PORT_GEN_DISCOVERED, | ||
2293 | priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); | ||
2294 | else | ||
2295 | iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); | ||
2296 | |||
2297 | return 0; | ||
2298 | |||
2299 | err_exit: | ||
2300 | kfree(mport); | ||
2301 | kfree(ops); | ||
2302 | return err; | ||
2303 | } | ||
2304 | |||
2305 | static int tsi721_probe(struct pci_dev *pdev, | ||
2306 | const struct pci_device_id *id) | ||
2307 | { | ||
2308 | struct tsi721_device *priv; | ||
2309 | int err; | ||
2310 | |||
2311 | priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); | ||
2312 | if (priv == NULL) { | ||
2313 | dev_err(&pdev->dev, "Failed to allocate memory for device\n"); | ||
2314 | err = -ENOMEM; | ||
2315 | goto err_exit; | ||
2316 | } | ||
2317 | |||
2318 | err = pci_enable_device(pdev); | ||
2319 | if (err) { | ||
2320 | dev_err(&pdev->dev, "Failed to enable PCI device\n"); | ||
2321 | goto err_clean; | ||
2322 | } | ||
2323 | |||
2324 | priv->pdev = pdev; | ||
2325 | |||
2326 | #ifdef DEBUG | ||
2327 | { | ||
2328 | int i; | ||
2329 | for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { | ||
2330 | dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", | ||
2331 | i, (unsigned long long)pci_resource_start(pdev, i), | ||
2332 | (unsigned long)pci_resource_len(pdev, i), | ||
2333 | pci_resource_flags(pdev, i)); | ||
2334 | } | ||
2335 | } | ||
2336 | #endif | ||
2337 | /* | ||
2338 | * Verify BAR configuration | ||
2339 | */ | ||
2340 | |||
2341 | /* BAR_0 (registers) must be 512KB+ in 32-bit address space */ | ||
2342 | if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) || | ||
2343 | pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 || | ||
2344 | pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) { | ||
2345 | dev_err(&pdev->dev, | ||
2346 | "Missing or misconfigured CSR BAR0, aborting.\n"); | ||
2347 | err = -ENODEV; | ||
2348 | goto err_disable_pdev; | ||
2349 | } | ||
2350 | |||
2351 | /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */ | ||
2352 | if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) || | ||
2353 | pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 || | ||
2354 | pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) { | ||
2355 | dev_err(&pdev->dev, | ||
2356 | "Missing or misconfigured Doorbell BAR1, aborting.\n"); | ||
2357 | err = -ENODEV; | ||
2358 | goto err_disable_pdev; | ||
2359 | } | ||
2360 | |||
2361 | /* | ||
2362 | * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address | ||
2363 | * space. | ||
2364 | * NOTE: BAR_2 and BAR_4 are not used by this version of driver. | ||
2365 | * It may be a good idea to keep them disabled using HW configuration | ||
2366 | * to save PCI memory space. | ||
2367 | */ | ||
2368 | if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) && | ||
2369 | (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) { | ||
2370 | dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n"); | ||
2371 | } | ||
2372 | |||
2373 | if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) && | ||
2374 | (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) { | ||
2375 | dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n"); | ||
2376 | } | ||
2377 | |||
2378 | err = pci_request_regions(pdev, DRV_NAME); | ||
2379 | if (err) { | ||
2380 | dev_err(&pdev->dev, "Cannot obtain PCI resources, " | ||
2381 | "aborting.\n"); | ||
2382 | goto err_disable_pdev; | ||
2383 | } | ||
2384 | |||
2385 | pci_set_master(pdev); | ||
2386 | |||
2387 | priv->regs = pci_ioremap_bar(pdev, BAR_0); | ||
2388 | if (!priv->regs) { | ||
2389 | dev_err(&pdev->dev, | ||
2390 | "Unable to map device registers space, aborting\n"); | ||
2391 | err = -ENOMEM; | ||
2392 | goto err_free_res; | ||
2393 | } | ||
2394 | |||
2395 | priv->odb_base = pci_ioremap_bar(pdev, BAR_1); | ||
2396 | if (!priv->odb_base) { | ||
2397 | dev_err(&pdev->dev, | ||
2398 | "Unable to map outbound doorbells space, aborting\n"); | ||
2399 | err = -ENOMEM; | ||
2400 | goto err_unmap_bars; | ||
2401 | } | ||
2402 | |||
2403 | /* Configure DMA attributes. */ | ||
2404 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
2405 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2406 | if (err) { | ||
2407 | dev_info(&pdev->dev, "Unable to set DMA mask\n"); | ||
2408 | goto err_unmap_bars; | ||
2409 | } | ||
2410 | |||
2411 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) | ||
2412 | dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); | ||
2413 | } else { | ||
2414 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | ||
2415 | if (err) | ||
2416 | dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); | ||
2417 | } | ||
2418 | |||
2419 | BUG_ON(!pci_is_pcie(pdev)); | ||
2420 | |||
2421 | /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ | ||
2422 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, | ||
2423 | PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | | ||
2424 | PCI_EXP_DEVCTL_NOSNOOP_EN, | ||
2425 | 0x2 << MAX_READ_REQUEST_SZ_SHIFT); | ||
2426 | |||
2427 | /* Adjust PCIe completion timeout. */ | ||
2428 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); | ||
2429 | |||
2430 | /* | ||
2431 | * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block | ||
2432 | */ | ||
2433 | pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01); | ||
2434 | pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL, | ||
2435 | TSI721_MSIXTBL_OFFSET); | ||
2436 | pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA, | ||
2437 | TSI721_MSIXPBA_OFFSET); | ||
2438 | pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0); | ||
2439 | /* End of FIXUP */ | ||
2440 | |||
2441 | tsi721_disable_ints(priv); | ||
2442 | |||
2443 | tsi721_init_pc2sr_mapping(priv); | ||
2444 | tsi721_init_sr2pc_mapping(priv); | ||
2445 | |||
2446 | if (tsi721_bdma_maint_init(priv)) { | ||
2447 | dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); | ||
2448 | err = -ENOMEM; | ||
2449 | goto err_unmap_bars; | ||
2450 | } | ||
2451 | |||
2452 | err = tsi721_doorbell_init(priv); | ||
2453 | if (err) | ||
2454 | goto err_free_bdma; | ||
2455 | |||
2456 | tsi721_port_write_init(priv); | ||
2457 | |||
2458 | err = tsi721_messages_init(priv); | ||
2459 | if (err) | ||
2460 | goto err_free_consistent; | ||
2461 | |||
2462 | err = tsi721_setup_mport(priv); | ||
2463 | if (err) | ||
2464 | goto err_free_consistent; | ||
2465 | |||
2466 | return 0; | ||
2467 | |||
2468 | err_free_consistent: | ||
2469 | tsi721_doorbell_free(priv); | ||
2470 | err_free_bdma: | ||
2471 | tsi721_bdma_maint_free(priv); | ||
2472 | err_unmap_bars: | ||
2473 | if (priv->regs) | ||
2474 | iounmap(priv->regs); | ||
2475 | if (priv->odb_base) | ||
2476 | iounmap(priv->odb_base); | ||
2477 | err_free_res: | ||
2478 | pci_release_regions(pdev); | ||
2479 | pci_clear_master(pdev); | ||
2480 | err_disable_pdev: | ||
2481 | pci_disable_device(pdev); | ||
2482 | err_clean: | ||
2483 | kfree(priv); | ||
2484 | err_exit: | ||
2485 | return err; | ||
2486 | } | ||
2487 | |||
2488 | static DEFINE_PCI_DEVICE_TABLE(tsi721_pci_tbl) = { | ||
2489 | { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) }, | ||
2490 | { 0, } /* terminate list */ | ||
2491 | }; | ||
2492 | |||
2493 | MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl); | ||
2494 | |||
2495 | static struct pci_driver tsi721_driver = { | ||
2496 | .name = "tsi721", | ||
2497 | .id_table = tsi721_pci_tbl, | ||
2498 | .probe = tsi721_probe, | ||
2499 | }; | ||
2500 | |||
2501 | static int __init tsi721_init(void) | ||
2502 | { | ||
2503 | return pci_register_driver(&tsi721_driver); | ||
2504 | } | ||
2505 | |||
2506 | static void __exit tsi721_exit(void) | ||
2507 | { | ||
2508 | pci_unregister_driver(&tsi721_driver); | ||
2509 | } | ||
2510 | |||
2511 | device_initcall(tsi721_init); | ||
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h deleted file mode 100644 index b4b0d83f9ef..00000000000 --- a/drivers/rapidio/devices/tsi721.h +++ /dev/null | |||
@@ -1,852 +0,0 @@ | |||
1 | /* | ||
2 | * Tsi721 PCIExpress-to-SRIO bridge definitions | ||
3 | * | ||
4 | * Copyright 2011, Integrated Device Technology, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the Free | ||
8 | * Software Foundation; either version 2 of the License, or (at your option) | ||
9 | * any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License along with | ||
17 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
18 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef __TSI721_H | ||
22 | #define __TSI721_H | ||
23 | |||
24 | #define DRV_NAME "tsi721" | ||
25 | |||
26 | #define DEFAULT_HOPCOUNT 0xff | ||
27 | #define DEFAULT_DESTID 0xff | ||
28 | |||
29 | /* PCI device ID */ | ||
30 | #define PCI_DEVICE_ID_TSI721 0x80ab | ||
31 | |||
32 | #define BAR_0 0 | ||
33 | #define BAR_1 1 | ||
34 | #define BAR_2 2 | ||
35 | #define BAR_4 4 | ||
36 | |||
37 | #define TSI721_PC2SR_BARS 2 | ||
38 | #define TSI721_PC2SR_WINS 8 | ||
39 | #define TSI721_PC2SR_ZONES 8 | ||
40 | #define TSI721_MAINT_WIN 0 /* Window for outbound maintenance requests */ | ||
41 | #define IDB_QUEUE 0 /* Inbound Doorbell Queue to use */ | ||
42 | #define IDB_QSIZE 512 /* Inbound Doorbell Queue size */ | ||
43 | |||
44 | /* Memory space sizes */ | ||
45 | #define TSI721_REG_SPACE_SIZE (512 * 1024) /* 512K */ | ||
46 | #define TSI721_DB_WIN_SIZE (16 * 1024 * 1024) /* 16MB */ | ||
47 | |||
48 | #define RIO_TT_CODE_8 0x00000000 | ||
49 | #define RIO_TT_CODE_16 0x00000001 | ||
50 | |||
51 | #define TSI721_DMA_MAXCH 8 | ||
52 | #define TSI721_DMA_MINSTSSZ 32 | ||
53 | #define TSI721_DMA_STSBLKSZ 8 | ||
54 | |||
55 | #define TSI721_SRIO_MAXCH 8 | ||
56 | |||
57 | #define DBELL_SID(buf) (((u8)buf[2] << 8) | (u8)buf[3]) | ||
58 | #define DBELL_TID(buf) (((u8)buf[4] << 8) | (u8)buf[5]) | ||
59 | #define DBELL_INF(buf) (((u8)buf[0] << 8) | (u8)buf[1]) | ||
60 | |||
61 | #define TSI721_RIO_PW_MSG_SIZE 16 /* Tsi721 saves only 16 bytes of PW msg */ | ||
62 | |||
63 | /* Register definitions */ | ||
64 | |||
65 | /* | ||
66 | * Registers in PCIe configuration space | ||
67 | */ | ||
68 | |||
69 | #define TSI721_PCIECFG_MSIXTBL 0x0a4 | ||
70 | #define TSI721_MSIXTBL_OFFSET 0x2c000 | ||
71 | #define TSI721_PCIECFG_MSIXPBA 0x0a8 | ||
72 | #define TSI721_MSIXPBA_OFFSET 0x2a000 | ||
73 | #define TSI721_PCIECFG_EPCTL 0x400 | ||
74 | |||
75 | #define MAX_READ_REQUEST_SZ_SHIFT 12 | ||
76 | |||
77 | /* | ||
78 | * Event Management Registers | ||
79 | */ | ||
80 | |||
81 | #define TSI721_RIO_EM_INT_STAT 0x10910 | ||
82 | #define TSI721_RIO_EM_INT_STAT_PW_RX 0x00010000 | ||
83 | |||
84 | #define TSI721_RIO_EM_INT_ENABLE 0x10914 | ||
85 | #define TSI721_RIO_EM_INT_ENABLE_PW_RX 0x00010000 | ||
86 | |||
87 | #define TSI721_RIO_EM_DEV_INT_EN 0x10930 | ||
88 | #define TSI721_RIO_EM_DEV_INT_EN_INT 0x00000001 | ||
89 | |||
90 | /* | ||
91 | * Port-Write Block Registers | ||
92 | */ | ||
93 | |||
94 | #define TSI721_RIO_PW_CTL 0x10a04 | ||
95 | #define TSI721_RIO_PW_CTL_PW_TIMER 0xf0000000 | ||
96 | #define TSI721_RIO_PW_CTL_PWT_DIS (0 << 28) | ||
97 | #define TSI721_RIO_PW_CTL_PWT_103 (1 << 28) | ||
98 | #define TSI721_RIO_PW_CTL_PWT_205 (1 << 29) | ||
99 | #define TSI721_RIO_PW_CTL_PWT_410 (1 << 30) | ||
100 | #define TSI721_RIO_PW_CTL_PWT_820 (1 << 31) | ||
101 | #define TSI721_RIO_PW_CTL_PWC_MODE 0x01000000 | ||
102 | #define TSI721_RIO_PW_CTL_PWC_CONT 0x00000000 | ||
103 | #define TSI721_RIO_PW_CTL_PWC_REL 0x01000000 | ||
104 | |||
105 | #define TSI721_RIO_PW_RX_STAT 0x10a10 | ||
106 | #define TSI721_RIO_PW_RX_STAT_WR_SIZE 0x0000f000 | ||
107 | #define TSI_RIO_PW_RX_STAT_WDPTR 0x00000100 | ||
108 | #define TSI721_RIO_PW_RX_STAT_PW_SHORT 0x00000008 | ||
109 | #define TSI721_RIO_PW_RX_STAT_PW_TRUNC 0x00000004 | ||
110 | #define TSI721_RIO_PW_RX_STAT_PW_DISC 0x00000002 | ||
111 | #define TSI721_RIO_PW_RX_STAT_PW_VAL 0x00000001 | ||
112 | |||
113 | #define TSI721_RIO_PW_RX_CAPT(x) (0x10a20 + (x)*4) | ||
114 | |||
115 | /* | ||
116 | * Inbound Doorbells | ||
117 | */ | ||
118 | |||
119 | #define TSI721_IDB_ENTRY_SIZE 64 | ||
120 | |||
121 | #define TSI721_IDQ_CTL(x) (0x20000 + (x) * 0x1000) | ||
122 | #define TSI721_IDQ_SUSPEND 0x00000002 | ||
123 | #define TSI721_IDQ_INIT 0x00000001 | ||
124 | |||
125 | #define TSI721_IDQ_STS(x) (0x20004 + (x) * 0x1000) | ||
126 | #define TSI721_IDQ_RUN 0x00200000 | ||
127 | |||
128 | #define TSI721_IDQ_MASK(x) (0x20008 + (x) * 0x1000) | ||
129 | #define TSI721_IDQ_MASK_MASK 0xffff0000 | ||
130 | #define TSI721_IDQ_MASK_PATT 0x0000ffff | ||
131 | |||
132 | #define TSI721_IDQ_RP(x) (0x2000c + (x) * 0x1000) | ||
133 | #define TSI721_IDQ_RP_PTR 0x0007ffff | ||
134 | |||
135 | #define TSI721_IDQ_WP(x) (0x20010 + (x) * 0x1000) | ||
136 | #define TSI721_IDQ_WP_PTR 0x0007ffff | ||
137 | |||
138 | #define TSI721_IDQ_BASEL(x) (0x20014 + (x) * 0x1000) | ||
139 | #define TSI721_IDQ_BASEL_ADDR 0xffffffc0 | ||
140 | #define TSI721_IDQ_BASEU(x) (0x20018 + (x) * 0x1000) | ||
141 | #define TSI721_IDQ_SIZE(x) (0x2001c + (x) * 0x1000) | ||
142 | #define TSI721_IDQ_SIZE_VAL(size) (__fls(size) - 4) | ||
143 | #define TSI721_IDQ_SIZE_MIN 512 | ||
144 | #define TSI721_IDQ_SIZE_MAX (512 * 1024) | ||
145 | |||
146 | #define TSI721_SR_CHINT(x) (0x20040 + (x) * 0x1000) | ||
147 | #define TSI721_SR_CHINTE(x) (0x20044 + (x) * 0x1000) | ||
148 | #define TSI721_SR_CHINTSET(x) (0x20048 + (x) * 0x1000) | ||
149 | #define TSI721_SR_CHINT_ODBOK 0x00000020 | ||
150 | #define TSI721_SR_CHINT_IDBQRCV 0x00000010 | ||
151 | #define TSI721_SR_CHINT_SUSP 0x00000008 | ||
152 | #define TSI721_SR_CHINT_ODBTO 0x00000004 | ||
153 | #define TSI721_SR_CHINT_ODBRTRY 0x00000002 | ||
154 | #define TSI721_SR_CHINT_ODBERR 0x00000001 | ||
155 | #define TSI721_SR_CHINT_ALL 0x0000003f | ||
156 | |||
157 | #define TSI721_IBWIN_NUM 8 | ||
158 | |||
159 | #define TSI721_IBWIN_LB(x) (0x29000 + (x) * 0x20) | ||
160 | #define TSI721_IBWIN_LB_BA 0xfffff000 | ||
161 | #define TSI721_IBWIN_LB_WEN 0x00000001 | ||
162 | |||
163 | #define TSI721_IBWIN_UB(x) (0x29004 + (x) * 0x20) | ||
164 | #define TSI721_IBWIN_SZ(x) (0x29008 + (x) * 0x20) | ||
165 | #define TSI721_IBWIN_SZ_SIZE 0x00001f00 | ||
166 | #define TSI721_IBWIN_SIZE(size) (__fls(size) - 12) | ||
167 | |||
168 | #define TSI721_IBWIN_TLA(x) (0x2900c + (x) * 0x20) | ||
169 | #define TSI721_IBWIN_TLA_ADD 0xfffff000 | ||
170 | #define TSI721_IBWIN_TUA(x) (0x29010 + (x) * 0x20) | ||
171 | |||
172 | #define TSI721_SR2PC_GEN_INTE 0x29800 | ||
173 | #define TSI721_SR2PC_PWE 0x29804 | ||
174 | #define TSI721_SR2PC_GEN_INT 0x29808 | ||
175 | |||
176 | #define TSI721_DEV_INTE 0x29840 | ||
177 | #define TSI721_DEV_INT 0x29844 | ||
178 | #define TSI721_DEV_INTSET 0x29848 | ||
179 | #define TSI721_DEV_INT_BDMA_CH 0x00002000 | ||
180 | #define TSI721_DEV_INT_BDMA_NCH 0x00001000 | ||
181 | #define TSI721_DEV_INT_SMSG_CH 0x00000800 | ||
182 | #define TSI721_DEV_INT_SMSG_NCH 0x00000400 | ||
183 | #define TSI721_DEV_INT_SR2PC_CH 0x00000200 | ||
184 | #define TSI721_DEV_INT_SRIO 0x00000020 | ||
185 | |||
186 | #define TSI721_DEV_CHAN_INTE 0x2984c | ||
187 | #define TSI721_DEV_CHAN_INT 0x29850 | ||
188 | |||
189 | #define TSI721_INT_SR2PC_CHAN_M 0xff000000 | ||
190 | #define TSI721_INT_SR2PC_CHAN(x) (1 << (24 + (x))) | ||
191 | #define TSI721_INT_IMSG_CHAN_M 0x00ff0000 | ||
192 | #define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x))) | ||
193 | #define TSI721_INT_OMSG_CHAN_M 0x0000ff00 | ||
194 | #define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x))) | ||
195 | #define TSI721_INT_BDMA_CHAN_M 0x000000ff | ||
196 | #define TSI721_INT_BDMA_CHAN(x) (1 << (x)) | ||
197 | |||
198 | /* | ||
199 | * PC2SR block registers | ||
200 | */ | ||
201 | #define TSI721_OBWIN_NUM TSI721_PC2SR_WINS | ||
202 | |||
203 | #define TSI721_OBWINLB(x) (0x40000 + (x) * 0x20) | ||
204 | #define TSI721_OBWINLB_BA 0xffff8000 | ||
205 | #define TSI721_OBWINLB_WEN 0x00000001 | ||
206 | |||
207 | #define TSI721_OBWINUB(x) (0x40004 + (x) * 0x20) | ||
208 | |||
209 | #define TSI721_OBWINSZ(x) (0x40008 + (x) * 0x20) | ||
210 | #define TSI721_OBWINSZ_SIZE 0x00001f00 | ||
211 | #define TSI721_OBWIN_SIZE(size) (__fls(size) - 15) | ||
212 | |||
213 | #define TSI721_ZONE_SEL 0x41300 | ||
214 | #define TSI721_ZONE_SEL_RD_WRB 0x00020000 | ||
215 | #define TSI721_ZONE_SEL_GO 0x00010000 | ||
216 | #define TSI721_ZONE_SEL_WIN 0x00000038 | ||
217 | #define TSI721_ZONE_SEL_ZONE 0x00000007 | ||
218 | |||
219 | #define TSI721_LUT_DATA0 0x41304 | ||
220 | #define TSI721_LUT_DATA0_ADD 0xfffff000 | ||
221 | #define TSI721_LUT_DATA0_RDTYPE 0x00000f00 | ||
222 | #define TSI721_LUT_DATA0_NREAD 0x00000100 | ||
223 | #define TSI721_LUT_DATA0_MNTRD 0x00000200 | ||
224 | #define TSI721_LUT_DATA0_RDCRF 0x00000020 | ||
225 | #define TSI721_LUT_DATA0_WRCRF 0x00000010 | ||
226 | #define TSI721_LUT_DATA0_WRTYPE 0x0000000f | ||
227 | #define TSI721_LUT_DATA0_NWR 0x00000001 | ||
228 | #define TSI721_LUT_DATA0_MNTWR 0x00000002 | ||
229 | #define TSI721_LUT_DATA0_NWR_R 0x00000004 | ||
230 | |||
231 | #define TSI721_LUT_DATA1 0x41308 | ||
232 | |||
233 | #define TSI721_LUT_DATA2 0x4130c | ||
234 | #define TSI721_LUT_DATA2_HC 0xff000000 | ||
235 | #define TSI721_LUT_DATA2_ADD65 0x000c0000 | ||
236 | #define TSI721_LUT_DATA2_TT 0x00030000 | ||
237 | #define TSI721_LUT_DATA2_DSTID 0x0000ffff | ||
238 | |||
239 | #define TSI721_PC2SR_INTE 0x41310 | ||
240 | |||
241 | #define TSI721_DEVCTL 0x48004 | ||
242 | #define TSI721_DEVCTL_SRBOOT_CMPL 0x00000004 | ||
243 | |||
244 | #define TSI721_I2C_INT_ENABLE 0x49120 | ||
245 | |||
246 | /* | ||
247 | * Block DMA Engine Registers | ||
248 | * x = 0..7 | ||
249 | */ | ||
250 | |||
251 | #define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000) | ||
252 | |||
253 | #define TSI721_DMAC_DWRCNT 0x000 | ||
254 | #define TSI721_DMAC_DRDCNT 0x004 | ||
255 | |||
256 | #define TSI721_DMAC_CTL 0x008 | ||
257 | #define TSI721_DMAC_CTL_SUSP 0x00000002 | ||
258 | #define TSI721_DMAC_CTL_INIT 0x00000001 | ||
259 | |||
260 | #define TSI721_DMAC_INT 0x00c | ||
261 | #define TSI721_DMAC_INT_STFULL 0x00000010 | ||
262 | #define TSI721_DMAC_INT_DONE 0x00000008 | ||
263 | #define TSI721_DMAC_INT_SUSP 0x00000004 | ||
264 | #define TSI721_DMAC_INT_ERR 0x00000002 | ||
265 | #define TSI721_DMAC_INT_IOFDONE 0x00000001 | ||
266 | #define TSI721_DMAC_INT_ALL 0x0000001f | ||
267 | |||
268 | #define TSI721_DMAC_INTSET 0x010 | ||
269 | |||
270 | #define TSI721_DMAC_STS 0x014 | ||
271 | #define TSI721_DMAC_STS_ABORT 0x00400000 | ||
272 | #define TSI721_DMAC_STS_RUN 0x00200000 | ||
273 | #define TSI721_DMAC_STS_CS 0x001f0000 | ||
274 | |||
275 | #define TSI721_DMAC_INTE 0x018 | ||
276 | |||
277 | #define TSI721_DMAC_DPTRL 0x024 | ||
278 | #define TSI721_DMAC_DPTRL_MASK 0xffffffe0 | ||
279 | |||
280 | #define TSI721_DMAC_DPTRH 0x028 | ||
281 | |||
282 | #define TSI721_DMAC_DSBL 0x02c | ||
283 | #define TSI721_DMAC_DSBL_MASK 0xffffffc0 | ||
284 | |||
285 | #define TSI721_DMAC_DSBH 0x030 | ||
286 | |||
287 | #define TSI721_DMAC_DSSZ 0x034 | ||
288 | #define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f | ||
289 | #define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4) | ||
290 | |||
291 | #define TSI721_DMAC_DSRP 0x038 | ||
292 | #define TSI721_DMAC_DSRP_MASK 0x0007ffff | ||
293 | |||
294 | #define TSI721_DMAC_DSWP 0x03c | ||
295 | #define TSI721_DMAC_DSWP_MASK 0x0007ffff | ||
296 | |||
297 | #define TSI721_BDMA_INTE 0x5f000 | ||
298 | |||
299 | /* | ||
300 | * Messaging definitions | ||
301 | */ | ||
302 | #define TSI721_MSG_BUFFER_SIZE RIO_MAX_MSG_SIZE | ||
303 | #define TSI721_MSG_MAX_SIZE RIO_MAX_MSG_SIZE | ||
304 | #define TSI721_IMSG_MAXCH 8 | ||
305 | #define TSI721_IMSG_CHNUM TSI721_IMSG_MAXCH | ||
306 | #define TSI721_IMSGD_MIN_RING_SIZE 32 | ||
307 | #define TSI721_IMSGD_RING_SIZE 512 | ||
308 | |||
309 | #define TSI721_OMSG_CHNUM 4 /* One channel per MBOX */ | ||
310 | #define TSI721_OMSGD_MIN_RING_SIZE 32 | ||
311 | #define TSI721_OMSGD_RING_SIZE 512 | ||
312 | |||
313 | /* | ||
314 | * Outbound Messaging Engine Registers | ||
315 | * x = 0..7 | ||
316 | */ | ||
317 | |||
318 | #define TSI721_OBDMAC_DWRCNT(x) (0x61000 + (x) * 0x1000) | ||
319 | |||
320 | #define TSI721_OBDMAC_DRDCNT(x) (0x61004 + (x) * 0x1000) | ||
321 | |||
322 | #define TSI721_OBDMAC_CTL(x) (0x61008 + (x) * 0x1000) | ||
323 | #define TSI721_OBDMAC_CTL_MASK 0x00000007 | ||
324 | #define TSI721_OBDMAC_CTL_RETRY_THR 0x00000004 | ||
325 | #define TSI721_OBDMAC_CTL_SUSPEND 0x00000002 | ||
326 | #define TSI721_OBDMAC_CTL_INIT 0x00000001 | ||
327 | |||
328 | #define TSI721_OBDMAC_INT(x) (0x6100c + (x) * 0x1000) | ||
329 | #define TSI721_OBDMAC_INTSET(x) (0x61010 + (x) * 0x1000) | ||
330 | #define TSI721_OBDMAC_INTE(x) (0x61018 + (x) * 0x1000) | ||
331 | #define TSI721_OBDMAC_INT_MASK 0x0000001F | ||
332 | #define TSI721_OBDMAC_INT_ST_FULL 0x00000010 | ||
333 | #define TSI721_OBDMAC_INT_DONE 0x00000008 | ||
334 | #define TSI721_OBDMAC_INT_SUSPENDED 0x00000004 | ||
335 | #define TSI721_OBDMAC_INT_ERROR 0x00000002 | ||
336 | #define TSI721_OBDMAC_INT_IOF_DONE 0x00000001 | ||
337 | #define TSI721_OBDMAC_INT_ALL TSI721_OBDMAC_INT_MASK | ||
338 | |||
339 | #define TSI721_OBDMAC_STS(x) (0x61014 + (x) * 0x1000) | ||
340 | #define TSI721_OBDMAC_STS_MASK 0x007f0000 | ||
341 | #define TSI721_OBDMAC_STS_ABORT 0x00400000 | ||
342 | #define TSI721_OBDMAC_STS_RUN 0x00200000 | ||
343 | #define TSI721_OBDMAC_STS_CS 0x001f0000 | ||
344 | |||
345 | #define TSI721_OBDMAC_PWE(x) (0x6101c + (x) * 0x1000) | ||
346 | #define TSI721_OBDMAC_PWE_MASK 0x00000002 | ||
347 | #define TSI721_OBDMAC_PWE_ERROR_EN 0x00000002 | ||
348 | |||
349 | #define TSI721_OBDMAC_DPTRL(x) (0x61020 + (x) * 0x1000) | ||
350 | #define TSI721_OBDMAC_DPTRL_MASK 0xfffffff0 | ||
351 | |||
352 | #define TSI721_OBDMAC_DPTRH(x) (0x61024 + (x) * 0x1000) | ||
353 | #define TSI721_OBDMAC_DPTRH_MASK 0xffffffff | ||
354 | |||
355 | #define TSI721_OBDMAC_DSBL(x) (0x61040 + (x) * 0x1000) | ||
356 | #define TSI721_OBDMAC_DSBL_MASK 0xffffffc0 | ||
357 | |||
358 | #define TSI721_OBDMAC_DSBH(x) (0x61044 + (x) * 0x1000) | ||
359 | #define TSI721_OBDMAC_DSBH_MASK 0xffffffff | ||
360 | |||
361 | #define TSI721_OBDMAC_DSSZ(x) (0x61048 + (x) * 0x1000) | ||
362 | #define TSI721_OBDMAC_DSSZ_MASK 0x0000000f | ||
363 | |||
364 | #define TSI721_OBDMAC_DSRP(x) (0x6104c + (x) * 0x1000) | ||
365 | #define TSI721_OBDMAC_DSRP_MASK 0x0007ffff | ||
366 | |||
367 | #define TSI721_OBDMAC_DSWP(x) (0x61050 + (x) * 0x1000) | ||
368 | #define TSI721_OBDMAC_DSWP_MASK 0x0007ffff | ||
369 | |||
370 | #define TSI721_RQRPTO 0x60010 | ||
371 | #define TSI721_RQRPTO_MASK 0x00ffffff | ||
372 | #define TSI721_RQRPTO_VAL 400 /* Response TO value */ | ||
373 | |||
374 | /* | ||
375 | * Inbound Messaging Engine Registers | ||
376 | * x = 0..7 | ||
377 | */ | ||
378 | |||
379 | #define TSI721_IB_DEVID_GLOBAL 0xffff | ||
380 | #define TSI721_IBDMAC_FQBL(x) (0x61200 + (x) * 0x1000) | ||
381 | #define TSI721_IBDMAC_FQBL_MASK 0xffffffc0 | ||
382 | |||
383 | #define TSI721_IBDMAC_FQBH(x) (0x61204 + (x) * 0x1000) | ||
384 | #define TSI721_IBDMAC_FQBH_MASK 0xffffffff | ||
385 | |||
386 | #define TSI721_IBDMAC_FQSZ_ENTRY_INX TSI721_IMSGD_RING_SIZE | ||
387 | #define TSI721_IBDMAC_FQSZ(x) (0x61208 + (x) * 0x1000) | ||
388 | #define TSI721_IBDMAC_FQSZ_MASK 0x0000000f | ||
389 | |||
390 | #define TSI721_IBDMAC_FQRP(x) (0x6120c + (x) * 0x1000) | ||
391 | #define TSI721_IBDMAC_FQRP_MASK 0x0007ffff | ||
392 | |||
393 | #define TSI721_IBDMAC_FQWP(x) (0x61210 + (x) * 0x1000) | ||
394 | #define TSI721_IBDMAC_FQWP_MASK 0x0007ffff | ||
395 | |||
396 | #define TSI721_IBDMAC_FQTH(x) (0x61214 + (x) * 0x1000) | ||
397 | #define TSI721_IBDMAC_FQTH_MASK 0x0007ffff | ||
398 | |||
399 | #define TSI721_IB_DEVID 0x60020 | ||
400 | #define TSI721_IB_DEVID_MASK 0x0000ffff | ||
401 | |||
402 | #define TSI721_IBDMAC_CTL(x) (0x61240 + (x) * 0x1000) | ||
403 | #define TSI721_IBDMAC_CTL_MASK 0x00000003 | ||
404 | #define TSI721_IBDMAC_CTL_SUSPEND 0x00000002 | ||
405 | #define TSI721_IBDMAC_CTL_INIT 0x00000001 | ||
406 | |||
407 | #define TSI721_IBDMAC_STS(x) (0x61244 + (x) * 0x1000) | ||
408 | #define TSI721_IBDMAC_STS_MASK 0x007f0000 | ||
409 | #define TSI721_IBSMAC_STS_ABORT 0x00400000 | ||
410 | #define TSI721_IBSMAC_STS_RUN 0x00200000 | ||
411 | #define TSI721_IBSMAC_STS_CS 0x001f0000 | ||
412 | |||
413 | #define TSI721_IBDMAC_INT(x) (0x61248 + (x) * 0x1000) | ||
414 | #define TSI721_IBDMAC_INTSET(x) (0x6124c + (x) * 0x1000) | ||
415 | #define TSI721_IBDMAC_INTE(x) (0x61250 + (x) * 0x1000) | ||
416 | #define TSI721_IBDMAC_INT_MASK 0x0000100f | ||
417 | #define TSI721_IBDMAC_INT_SRTO 0x00001000 | ||
418 | #define TSI721_IBDMAC_INT_SUSPENDED 0x00000008 | ||
419 | #define TSI721_IBDMAC_INT_PC_ERROR 0x00000004 | ||
420 | #define TSI721_IBDMAC_INT_FQ_LOW 0x00000002 | ||
421 | #define TSI721_IBDMAC_INT_DQ_RCV 0x00000001 | ||
422 | #define TSI721_IBDMAC_INT_ALL TSI721_IBDMAC_INT_MASK | ||
423 | |||
424 | #define TSI721_IBDMAC_PWE(x) (0x61254 + (x) * 0x1000) | ||
425 | #define TSI721_IBDMAC_PWE_MASK 0x00001700 | ||
426 | #define TSI721_IBDMAC_PWE_SRTO 0x00001000 | ||
427 | #define TSI721_IBDMAC_PWE_ILL_FMT 0x00000400 | ||
428 | #define TSI721_IBDMAC_PWE_ILL_DEC 0x00000200 | ||
429 | #define TSI721_IBDMAC_PWE_IMP_SP 0x00000100 | ||
430 | |||
431 | #define TSI721_IBDMAC_DQBL(x) (0x61300 + (x) * 0x1000) | ||
432 | #define TSI721_IBDMAC_DQBL_MASK 0xffffffc0 | ||
433 | #define TSI721_IBDMAC_DQBL_ADDR 0xffffffc0 | ||
434 | |||
435 | #define TSI721_IBDMAC_DQBH(x) (0x61304 + (x) * 0x1000) | ||
436 | #define TSI721_IBDMAC_DQBH_MASK 0xffffffff | ||
437 | |||
438 | #define TSI721_IBDMAC_DQRP(x) (0x61308 + (x) * 0x1000) | ||
439 | #define TSI721_IBDMAC_DQRP_MASK 0x0007ffff | ||
440 | |||
441 | #define TSI721_IBDMAC_DQWR(x) (0x6130c + (x) * 0x1000) | ||
442 | #define TSI721_IBDMAC_DQWR_MASK 0x0007ffff | ||
443 | |||
444 | #define TSI721_IBDMAC_DQSZ(x) (0x61314 + (x) * 0x1000) | ||
445 | #define TSI721_IBDMAC_DQSZ_MASK 0x0000000f | ||
446 | |||
447 | /* | ||
448 | * Messaging Engine Interrupts | ||
449 | */ | ||
450 | |||
451 | #define TSI721_SMSG_PWE 0x6a004 | ||
452 | |||
453 | #define TSI721_SMSG_INTE 0x6a000 | ||
454 | #define TSI721_SMSG_INT 0x6a008 | ||
455 | #define TSI721_SMSG_INTSET 0x6a010 | ||
456 | #define TSI721_SMSG_INT_MASK 0x0086ffff | ||
457 | #define TSI721_SMSG_INT_UNS_RSP 0x00800000 | ||
458 | #define TSI721_SMSG_INT_ECC_NCOR 0x00040000 | ||
459 | #define TSI721_SMSG_INT_ECC_COR 0x00020000 | ||
460 | #define TSI721_SMSG_INT_ECC_NCOR_CH 0x0000ff00 | ||
461 | #define TSI721_SMSG_INT_ECC_COR_CH 0x000000ff | ||
462 | |||
463 | #define TSI721_SMSG_ECC_LOG 0x6a014 | ||
464 | #define TSI721_SMSG_ECC_LOG_MASK 0x00070007 | ||
465 | #define TSI721_SMSG_ECC_LOG_ECC_NCOR_M 0x00070000 | ||
466 | #define TSI721_SMSG_ECC_LOG_ECC_COR_M 0x00000007 | ||
467 | |||
468 | #define TSI721_RETRY_GEN_CNT 0x6a100 | ||
469 | #define TSI721_RETRY_GEN_CNT_MASK 0xffffffff | ||
470 | |||
471 | #define TSI721_RETRY_RX_CNT 0x6a104 | ||
472 | #define TSI721_RETRY_RX_CNT_MASK 0xffffffff | ||
473 | |||
474 | #define TSI721_SMSG_ECC_COR_LOG(x) (0x6a300 + (x) * 4) | ||
475 | #define TSI721_SMSG_ECC_COR_LOG_MASK 0x000000ff | ||
476 | |||
477 | #define TSI721_SMSG_ECC_NCOR(x) (0x6a340 + (x) * 4) | ||
478 | #define TSI721_SMSG_ECC_NCOR_MASK 0x000000ff | ||
479 | |||
480 | /* | ||
481 | * Block DMA Descriptors | ||
482 | */ | ||
483 | |||
484 | struct tsi721_dma_desc { | ||
485 | __le32 type_id; | ||
486 | |||
487 | #define TSI721_DMAD_DEVID 0x0000ffff | ||
488 | #define TSI721_DMAD_CRF 0x00010000 | ||
489 | #define TSI721_DMAD_PRIO 0x00060000 | ||
490 | #define TSI721_DMAD_RTYPE 0x00780000 | ||
491 | #define TSI721_DMAD_IOF 0x08000000 | ||
492 | #define TSI721_DMAD_DTYPE 0xe0000000 | ||
493 | |||
494 | __le32 bcount; | ||
495 | |||
496 | #define TSI721_DMAD_BCOUNT1 0x03ffffff /* if DTYPE == 1 */ | ||
497 | #define TSI721_DMAD_BCOUNT2 0x0000000f /* if DTYPE == 2 */ | ||
498 | #define TSI721_DMAD_TT 0x0c000000 | ||
499 | #define TSI721_DMAD_RADDR0 0xc0000000 | ||
500 | |||
501 | union { | ||
502 | __le32 raddr_lo; /* if DTYPE == (1 || 2) */ | ||
503 | __le32 next_lo; /* if DTYPE == 3 */ | ||
504 | }; | ||
505 | |||
506 | #define TSI721_DMAD_CFGOFF 0x00ffffff | ||
507 | #define TSI721_DMAD_HOPCNT 0xff000000 | ||
508 | |||
509 | union { | ||
510 | __le32 raddr_hi; /* if DTYPE == (1 || 2) */ | ||
511 | __le32 next_hi; /* if DTYPE == 3 */ | ||
512 | }; | ||
513 | |||
514 | union { | ||
515 | struct { /* if DTYPE == 1 */ | ||
516 | __le32 bufptr_lo; | ||
517 | __le32 bufptr_hi; | ||
518 | __le32 s_dist; | ||
519 | __le32 s_size; | ||
520 | } t1; | ||
521 | __le32 data[4]; /* if DTYPE == 2 */ | ||
522 | u32 reserved[4]; /* if DTYPE == 3 */ | ||
523 | }; | ||
524 | } __aligned(32); | ||
525 | |||
526 | /* | ||
527 | * Inbound Messaging Descriptor | ||
528 | */ | ||
529 | struct tsi721_imsg_desc { | ||
530 | __le32 type_id; | ||
531 | |||
532 | #define TSI721_IMD_DEVID 0x0000ffff | ||
533 | #define TSI721_IMD_CRF 0x00010000 | ||
534 | #define TSI721_IMD_PRIO 0x00060000 | ||
535 | #define TSI721_IMD_TT 0x00180000 | ||
536 | #define TSI721_IMD_DTYPE 0xe0000000 | ||
537 | |||
538 | __le32 msg_info; | ||
539 | |||
540 | #define TSI721_IMD_BCOUNT 0x00000ff8 | ||
541 | #define TSI721_IMD_SSIZE 0x0000f000 | ||
542 | #define TSI721_IMD_LETER 0x00030000 | ||
543 | #define TSI721_IMD_XMBOX 0x003c0000 | ||
544 | #define TSI721_IMD_MBOX 0x00c00000 | ||
545 | #define TSI721_IMD_CS 0x78000000 | ||
546 | #define TSI721_IMD_HO 0x80000000 | ||
547 | |||
548 | __le32 bufptr_lo; | ||
549 | __le32 bufptr_hi; | ||
550 | u32 reserved[12]; | ||
551 | |||
552 | } __aligned(64); | ||
553 | |||
554 | /* | ||
555 | * Outbound Messaging Descriptor | ||
556 | */ | ||
557 | struct tsi721_omsg_desc { | ||
558 | __le32 type_id; | ||
559 | |||
560 | #define TSI721_OMD_DEVID 0x0000ffff | ||
561 | #define TSI721_OMD_CRF 0x00010000 | ||
562 | #define TSI721_OMD_PRIO 0x00060000 | ||
563 | #define TSI721_OMD_IOF 0x08000000 | ||
564 | #define TSI721_OMD_DTYPE 0xe0000000 | ||
565 | #define TSI721_OMD_RSRVD 0x17f80000 | ||
566 | |||
567 | __le32 msg_info; | ||
568 | |||
569 | #define TSI721_OMD_BCOUNT 0x00000ff8 | ||
570 | #define TSI721_OMD_SSIZE 0x0000f000 | ||
571 | #define TSI721_OMD_LETER 0x00030000 | ||
572 | #define TSI721_OMD_XMBOX 0x003c0000 | ||
573 | #define TSI721_OMD_MBOX 0x00c00000 | ||
574 | #define TSI721_OMD_TT 0x0c000000 | ||
575 | |||
576 | union { | ||
577 | __le32 bufptr_lo; /* if DTYPE == 4 */ | ||
578 | __le32 next_lo; /* if DTYPE == 5 */ | ||
579 | }; | ||
580 | |||
581 | union { | ||
582 | __le32 bufptr_hi; /* if DTYPE == 4 */ | ||
583 | __le32 next_hi; /* if DTYPE == 5 */ | ||
584 | }; | ||
585 | |||
586 | } __aligned(16); | ||
587 | |||
588 | struct tsi721_dma_sts { | ||
589 | __le64 desc_sts[8]; | ||
590 | } __aligned(64); | ||
591 | |||
592 | struct tsi721_desc_sts_fifo { | ||
593 | union { | ||
594 | __le64 da64; | ||
595 | struct { | ||
596 | __le32 lo; | ||
597 | __le32 hi; | ||
598 | } da32; | ||
599 | } stat[8]; | ||
600 | } __aligned(64); | ||
601 | |||
602 | /* Descriptor types for BDMA and Messaging blocks */ | ||
603 | enum dma_dtype { | ||
604 | DTYPE1 = 1, /* Data Transfer DMA Descriptor */ | ||
605 | DTYPE2 = 2, /* Immediate Data Transfer DMA Descriptor */ | ||
606 | DTYPE3 = 3, /* Block Pointer DMA Descriptor */ | ||
607 | DTYPE4 = 4, /* Outbound Msg DMA Descriptor */ | ||
608 | DTYPE5 = 5, /* OB Messaging Block Pointer Descriptor */ | ||
609 | DTYPE6 = 6 /* Inbound Messaging Descriptor */ | ||
610 | }; | ||
611 | |||
612 | enum dma_rtype { | ||
613 | NREAD = 0, | ||
614 | LAST_NWRITE_R = 1, | ||
615 | ALL_NWRITE = 2, | ||
616 | ALL_NWRITE_R = 3, | ||
617 | MAINT_RD = 4, | ||
618 | MAINT_WR = 5 | ||
619 | }; | ||
620 | |||
621 | /* | ||
622 | * mport Driver Definitions | ||
623 | */ | ||
624 | #define TSI721_DMA_CHNUM TSI721_DMA_MAXCH | ||
625 | |||
626 | #define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */ | ||
627 | #define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ | ||
628 | |||
629 | #define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */ | ||
630 | |||
631 | #define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0) | ||
632 | |||
633 | enum tsi721_smsg_int_flag { | ||
634 | SMSG_INT_NONE = 0x00000000, | ||
635 | SMSG_INT_ECC_COR_CH = 0x000000ff, | ||
636 | SMSG_INT_ECC_NCOR_CH = 0x0000ff00, | ||
637 | SMSG_INT_ECC_COR = 0x00020000, | ||
638 | SMSG_INT_ECC_NCOR = 0x00040000, | ||
639 | SMSG_INT_UNS_RSP = 0x00800000, | ||
640 | SMSG_INT_ALL = 0x0006ffff | ||
641 | }; | ||
642 | |||
643 | /* Structures */ | ||
644 | |||
645 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
646 | |||
647 | struct tsi721_tx_desc { | ||
648 | struct dma_async_tx_descriptor txd; | ||
649 | struct tsi721_dma_desc *hw_desc; | ||
650 | u16 destid; | ||
651 | /* low 64-bits of 66-bit RIO address */ | ||
652 | u64 rio_addr; | ||
653 | /* upper 2-bits of 66-bit RIO address */ | ||
654 | u8 rio_addr_u; | ||
655 | bool interrupt; | ||
656 | struct list_head desc_node; | ||
657 | struct list_head tx_list; | ||
658 | }; | ||
659 | |||
660 | struct tsi721_bdma_chan { | ||
661 | int id; | ||
662 | void __iomem *regs; | ||
663 | int bd_num; /* number of buffer descriptors */ | ||
664 | void *bd_base; /* start of DMA descriptors */ | ||
665 | dma_addr_t bd_phys; | ||
666 | void *sts_base; /* start of DMA BD status FIFO */ | ||
667 | dma_addr_t sts_phys; | ||
668 | int sts_size; | ||
669 | u32 sts_rdptr; | ||
670 | u32 wr_count; | ||
671 | u32 wr_count_next; | ||
672 | |||
673 | struct dma_chan dchan; | ||
674 | struct tsi721_tx_desc *tx_desc; | ||
675 | spinlock_t lock; | ||
676 | struct list_head active_list; | ||
677 | struct list_head queue; | ||
678 | struct list_head free_list; | ||
679 | dma_cookie_t completed_cookie; | ||
680 | struct tasklet_struct tasklet; | ||
681 | }; | ||
682 | |||
683 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | ||
684 | |||
685 | struct tsi721_bdma_maint { | ||
686 | int ch_id; /* BDMA channel number */ | ||
687 | int bd_num; /* number of buffer descriptors */ | ||
688 | void *bd_base; /* start of DMA descriptors */ | ||
689 | dma_addr_t bd_phys; | ||
690 | void *sts_base; /* start of DMA BD status FIFO */ | ||
691 | dma_addr_t sts_phys; | ||
692 | int sts_size; | ||
693 | }; | ||
694 | |||
695 | struct tsi721_imsg_ring { | ||
696 | u32 size; | ||
697 | /* VA/PA of data buffers for incoming messages */ | ||
698 | void *buf_base; | ||
699 | dma_addr_t buf_phys; | ||
700 | /* VA/PA of circular free buffer list */ | ||
701 | void *imfq_base; | ||
702 | dma_addr_t imfq_phys; | ||
703 | /* VA/PA of Inbound message descriptors */ | ||
704 | void *imd_base; | ||
705 | dma_addr_t imd_phys; | ||
706 | /* Inbound Queue buffer pointers */ | ||
707 | void *imq_base[TSI721_IMSGD_RING_SIZE]; | ||
708 | |||
709 | u32 rx_slot; | ||
710 | void *dev_id; | ||
711 | u32 fq_wrptr; | ||
712 | u32 desc_rdptr; | ||
713 | spinlock_t lock; | ||
714 | }; | ||
715 | |||
716 | struct tsi721_omsg_ring { | ||
717 | u32 size; | ||
718 | /* VA/PA of OB Msg descriptors */ | ||
719 | void *omd_base; | ||
720 | dma_addr_t omd_phys; | ||
721 | /* VA/PA of OB Msg data buffers */ | ||
722 | void *omq_base[TSI721_OMSGD_RING_SIZE]; | ||
723 | dma_addr_t omq_phys[TSI721_OMSGD_RING_SIZE]; | ||
724 | /* VA/PA of OB Msg descriptor status FIFO */ | ||
725 | void *sts_base; | ||
726 | dma_addr_t sts_phys; | ||
727 | u32 sts_size; /* # of allocated status entries */ | ||
728 | u32 sts_rdptr; | ||
729 | |||
730 | u32 tx_slot; | ||
731 | void *dev_id; | ||
732 | u32 wr_count; | ||
733 | spinlock_t lock; | ||
734 | }; | ||
735 | |||
736 | enum tsi721_flags { | ||
737 | TSI721_USING_MSI = (1 << 0), | ||
738 | TSI721_USING_MSIX = (1 << 1), | ||
739 | TSI721_IMSGID_SET = (1 << 2), | ||
740 | }; | ||
741 | |||
742 | #ifdef CONFIG_PCI_MSI | ||
743 | /* | ||
744 | * MSI-X Table Entries (0 ... 69) | ||
745 | */ | ||
746 | #define TSI721_MSIX_DMACH_DONE(x) (0 + (x)) | ||
747 | #define TSI721_MSIX_DMACH_INT(x) (8 + (x)) | ||
748 | #define TSI721_MSIX_BDMA_INT 16 | ||
749 | #define TSI721_MSIX_OMSG_DONE(x) (17 + (x)) | ||
750 | #define TSI721_MSIX_OMSG_INT(x) (25 + (x)) | ||
751 | #define TSI721_MSIX_IMSG_DQ_RCV(x) (33 + (x)) | ||
752 | #define TSI721_MSIX_IMSG_INT(x) (41 + (x)) | ||
753 | #define TSI721_MSIX_MSG_INT 49 | ||
754 | #define TSI721_MSIX_SR2PC_IDBQ_RCV(x) (50 + (x)) | ||
755 | #define TSI721_MSIX_SR2PC_CH_INT(x) (58 + (x)) | ||
756 | #define TSI721_MSIX_SR2PC_INT 66 | ||
757 | #define TSI721_MSIX_PC2SR_INT 67 | ||
758 | #define TSI721_MSIX_SRIO_MAC_INT 68 | ||
759 | #define TSI721_MSIX_I2C_INT 69 | ||
760 | |||
761 | /* MSI-X vector and init table entry indexes */ | ||
762 | enum tsi721_msix_vect { | ||
763 | TSI721_VECT_IDB, | ||
764 | TSI721_VECT_PWRX, /* PW_RX is part of SRIO MAC Interrupt reporting */ | ||
765 | TSI721_VECT_OMB0_DONE, | ||
766 | TSI721_VECT_OMB1_DONE, | ||
767 | TSI721_VECT_OMB2_DONE, | ||
768 | TSI721_VECT_OMB3_DONE, | ||
769 | TSI721_VECT_OMB0_INT, | ||
770 | TSI721_VECT_OMB1_INT, | ||
771 | TSI721_VECT_OMB2_INT, | ||
772 | TSI721_VECT_OMB3_INT, | ||
773 | TSI721_VECT_IMB0_RCV, | ||
774 | TSI721_VECT_IMB1_RCV, | ||
775 | TSI721_VECT_IMB2_RCV, | ||
776 | TSI721_VECT_IMB3_RCV, | ||
777 | TSI721_VECT_IMB0_INT, | ||
778 | TSI721_VECT_IMB1_INT, | ||
779 | TSI721_VECT_IMB2_INT, | ||
780 | TSI721_VECT_IMB3_INT, | ||
781 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
782 | TSI721_VECT_DMA0_DONE, | ||
783 | TSI721_VECT_DMA1_DONE, | ||
784 | TSI721_VECT_DMA2_DONE, | ||
785 | TSI721_VECT_DMA3_DONE, | ||
786 | TSI721_VECT_DMA4_DONE, | ||
787 | TSI721_VECT_DMA5_DONE, | ||
788 | TSI721_VECT_DMA6_DONE, | ||
789 | TSI721_VECT_DMA7_DONE, | ||
790 | TSI721_VECT_DMA0_INT, | ||
791 | TSI721_VECT_DMA1_INT, | ||
792 | TSI721_VECT_DMA2_INT, | ||
793 | TSI721_VECT_DMA3_INT, | ||
794 | TSI721_VECT_DMA4_INT, | ||
795 | TSI721_VECT_DMA5_INT, | ||
796 | TSI721_VECT_DMA6_INT, | ||
797 | TSI721_VECT_DMA7_INT, | ||
798 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | ||
799 | TSI721_VECT_MAX | ||
800 | }; | ||
801 | |||
802 | #define IRQ_DEVICE_NAME_MAX 64 | ||
803 | |||
804 | struct msix_irq { | ||
805 | u16 vector; | ||
806 | char irq_name[IRQ_DEVICE_NAME_MAX]; | ||
807 | }; | ||
808 | #endif /* CONFIG_PCI_MSI */ | ||
809 | |||
810 | struct tsi721_device { | ||
811 | struct pci_dev *pdev; | ||
812 | struct rio_mport *mport; | ||
813 | u32 flags; | ||
814 | void __iomem *regs; | ||
815 | #ifdef CONFIG_PCI_MSI | ||
816 | struct msix_irq msix[TSI721_VECT_MAX]; | ||
817 | #endif | ||
818 | /* Doorbells */ | ||
819 | void __iomem *odb_base; | ||
820 | void *idb_base; | ||
821 | dma_addr_t idb_dma; | ||
822 | struct work_struct idb_work; | ||
823 | u32 db_discard_count; | ||
824 | |||
825 | /* Inbound Port-Write */ | ||
826 | struct work_struct pw_work; | ||
827 | struct kfifo pw_fifo; | ||
828 | spinlock_t pw_fifo_lock; | ||
829 | u32 pw_discard_count; | ||
830 | |||
831 | /* BDMA Engine */ | ||
832 | struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */ | ||
833 | |||
834 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
835 | struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM]; | ||
836 | #endif | ||
837 | |||
838 | /* Inbound Messaging */ | ||
839 | int imsg_init[TSI721_IMSG_CHNUM]; | ||
840 | struct tsi721_imsg_ring imsg_ring[TSI721_IMSG_CHNUM]; | ||
841 | |||
842 | /* Outbound Messaging */ | ||
843 | int omsg_init[TSI721_OMSG_CHNUM]; | ||
844 | struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM]; | ||
845 | }; | ||
846 | |||
847 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
848 | extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan); | ||
849 | extern int tsi721_register_dma(struct tsi721_device *priv); | ||
850 | #endif | ||
851 | |||
852 | #endif | ||
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c deleted file mode 100644 index 502663f5f7c..00000000000 --- a/drivers/rapidio/devices/tsi721_dma.c +++ /dev/null | |||
@@ -1,823 +0,0 @@ | |||
1 | /* | ||
2 | * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge | ||
3 | * | ||
4 | * Copyright 2011 Integrated Device Technology, Inc. | ||
5 | * Alexandre Bounine <alexandre.bounine@idt.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the Free | ||
9 | * Software Foundation; either version 2 of the License, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
19 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/io.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/ioport.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/pci.h> | ||
29 | #include <linux/rio.h> | ||
30 | #include <linux/rio_drv.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/kfifo.h> | ||
34 | #include <linux/delay.h> | ||
35 | |||
36 | #include "tsi721.h" | ||
37 | |||
38 | static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) | ||
39 | { | ||
40 | return container_of(chan, struct tsi721_bdma_chan, dchan); | ||
41 | } | ||
42 | |||
43 | static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) | ||
44 | { | ||
45 | return container_of(ddev, struct rio_mport, dma)->priv; | ||
46 | } | ||
47 | |||
48 | static inline | ||
49 | struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) | ||
50 | { | ||
51 | return container_of(txd, struct tsi721_tx_desc, txd); | ||
52 | } | ||
53 | |||
54 | static inline | ||
55 | struct tsi721_tx_desc *tsi721_dma_first_active( | ||
56 | struct tsi721_bdma_chan *bdma_chan) | ||
57 | { | ||
58 | return list_first_entry(&bdma_chan->active_list, | ||
59 | struct tsi721_tx_desc, desc_node); | ||
60 | } | ||
61 | |||
62 | static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan) | ||
63 | { | ||
64 | struct tsi721_dma_desc *bd_ptr; | ||
65 | struct device *dev = bdma_chan->dchan.device->dev; | ||
66 | u64 *sts_ptr; | ||
67 | dma_addr_t bd_phys; | ||
68 | dma_addr_t sts_phys; | ||
69 | int sts_size; | ||
70 | int bd_num = bdma_chan->bd_num; | ||
71 | |||
72 | dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id); | ||
73 | |||
74 | /* Allocate space for DMA descriptors */ | ||
75 | bd_ptr = dma_zalloc_coherent(dev, | ||
76 | bd_num * sizeof(struct tsi721_dma_desc), | ||
77 | &bd_phys, GFP_KERNEL); | ||
78 | if (!bd_ptr) | ||
79 | return -ENOMEM; | ||
80 | |||
81 | bdma_chan->bd_phys = bd_phys; | ||
82 | bdma_chan->bd_base = bd_ptr; | ||
83 | |||
84 | dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n", | ||
85 | bd_ptr, (unsigned long long)bd_phys); | ||
86 | |||
87 | /* Allocate space for descriptor status FIFO */ | ||
88 | sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? | ||
89 | bd_num : TSI721_DMA_MINSTSSZ; | ||
90 | sts_size = roundup_pow_of_two(sts_size); | ||
91 | sts_ptr = dma_zalloc_coherent(dev, | ||
92 | sts_size * sizeof(struct tsi721_dma_sts), | ||
93 | &sts_phys, GFP_KERNEL); | ||
94 | if (!sts_ptr) { | ||
95 | /* Free space allocated for DMA descriptors */ | ||
96 | dma_free_coherent(dev, | ||
97 | bd_num * sizeof(struct tsi721_dma_desc), | ||
98 | bd_ptr, bd_phys); | ||
99 | bdma_chan->bd_base = NULL; | ||
100 | return -ENOMEM; | ||
101 | } | ||
102 | |||
103 | bdma_chan->sts_phys = sts_phys; | ||
104 | bdma_chan->sts_base = sts_ptr; | ||
105 | bdma_chan->sts_size = sts_size; | ||
106 | |||
107 | dev_dbg(dev, | ||
108 | "desc status FIFO @ %p (phys = %llx) size=0x%x\n", | ||
109 | sts_ptr, (unsigned long long)sts_phys, sts_size); | ||
110 | |||
111 | /* Initialize DMA descriptors ring */ | ||
112 | bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); | ||
113 | bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & | ||
114 | TSI721_DMAC_DPTRL_MASK); | ||
115 | bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); | ||
116 | |||
117 | /* Setup DMA descriptor pointers */ | ||
118 | iowrite32(((u64)bd_phys >> 32), | ||
119 | bdma_chan->regs + TSI721_DMAC_DPTRH); | ||
120 | iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), | ||
121 | bdma_chan->regs + TSI721_DMAC_DPTRL); | ||
122 | |||
123 | /* Setup descriptor status FIFO */ | ||
124 | iowrite32(((u64)sts_phys >> 32), | ||
125 | bdma_chan->regs + TSI721_DMAC_DSBH); | ||
126 | iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), | ||
127 | bdma_chan->regs + TSI721_DMAC_DSBL); | ||
128 | iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), | ||
129 | bdma_chan->regs + TSI721_DMAC_DSSZ); | ||
130 | |||
131 | /* Clear interrupt bits */ | ||
132 | iowrite32(TSI721_DMAC_INT_ALL, | ||
133 | bdma_chan->regs + TSI721_DMAC_INT); | ||
134 | |||
135 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | ||
136 | |||
137 | /* Toggle DMA channel initialization */ | ||
138 | iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); | ||
139 | ioread32(bdma_chan->regs + TSI721_DMAC_CTL); | ||
140 | bdma_chan->wr_count = bdma_chan->wr_count_next = 0; | ||
141 | bdma_chan->sts_rdptr = 0; | ||
142 | udelay(10); | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) | ||
148 | { | ||
149 | u32 ch_stat; | ||
150 | |||
151 | if (bdma_chan->bd_base == NULL) | ||
152 | return 0; | ||
153 | |||
154 | /* Check if DMA channel still running */ | ||
155 | ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | ||
156 | if (ch_stat & TSI721_DMAC_STS_RUN) | ||
157 | return -EFAULT; | ||
158 | |||
159 | /* Put DMA channel into init state */ | ||
160 | iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); | ||
161 | |||
162 | /* Free space allocated for DMA descriptors */ | ||
163 | dma_free_coherent(bdma_chan->dchan.device->dev, | ||
164 | bdma_chan->bd_num * sizeof(struct tsi721_dma_desc), | ||
165 | bdma_chan->bd_base, bdma_chan->bd_phys); | ||
166 | bdma_chan->bd_base = NULL; | ||
167 | |||
168 | /* Free space allocated for status FIFO */ | ||
169 | dma_free_coherent(bdma_chan->dchan.device->dev, | ||
170 | bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), | ||
171 | bdma_chan->sts_base, bdma_chan->sts_phys); | ||
172 | bdma_chan->sts_base = NULL; | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static void | ||
177 | tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) | ||
178 | { | ||
179 | if (enable) { | ||
180 | /* Clear pending BDMA channel interrupts */ | ||
181 | iowrite32(TSI721_DMAC_INT_ALL, | ||
182 | bdma_chan->regs + TSI721_DMAC_INT); | ||
183 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | ||
184 | /* Enable BDMA channel interrupts */ | ||
185 | iowrite32(TSI721_DMAC_INT_ALL, | ||
186 | bdma_chan->regs + TSI721_DMAC_INTE); | ||
187 | } else { | ||
188 | /* Disable BDMA channel interrupts */ | ||
189 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | ||
190 | /* Clear pending BDMA channel interrupts */ | ||
191 | iowrite32(TSI721_DMAC_INT_ALL, | ||
192 | bdma_chan->regs + TSI721_DMAC_INT); | ||
193 | } | ||
194 | |||
195 | } | ||
196 | |||
197 | static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) | ||
198 | { | ||
199 | u32 sts; | ||
200 | |||
201 | sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | ||
202 | return ((sts & TSI721_DMAC_STS_RUN) == 0); | ||
203 | } | ||
204 | |||
205 | void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) | ||
206 | { | ||
207 | /* Disable BDMA channel interrupts */ | ||
208 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | ||
209 | |||
210 | tasklet_schedule(&bdma_chan->tasklet); | ||
211 | } | ||
212 | |||
213 | #ifdef CONFIG_PCI_MSI | ||
214 | /** | ||
215 | * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels | ||
216 | * @irq: Linux interrupt number | ||
217 | * @ptr: Pointer to interrupt-specific data (BDMA channel structure) | ||
218 | * | ||
219 | * Handles BDMA channel interrupts signaled using MSI-X. | ||
220 | */ | ||
221 | static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) | ||
222 | { | ||
223 | struct tsi721_bdma_chan *bdma_chan = ptr; | ||
224 | |||
225 | tsi721_bdma_handler(bdma_chan); | ||
226 | return IRQ_HANDLED; | ||
227 | } | ||
228 | #endif /* CONFIG_PCI_MSI */ | ||
229 | |||
230 | /* Must be called with the spinlock held */ | ||
231 | static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) | ||
232 | { | ||
233 | if (!tsi721_dma_is_idle(bdma_chan)) { | ||
234 | dev_err(bdma_chan->dchan.device->dev, | ||
235 | "BUG: Attempt to start non-idle channel\n"); | ||
236 | return; | ||
237 | } | ||
238 | |||
239 | if (bdma_chan->wr_count == bdma_chan->wr_count_next) { | ||
240 | dev_err(bdma_chan->dchan.device->dev, | ||
241 | "BUG: Attempt to start DMA with no BDs ready\n"); | ||
242 | return; | ||
243 | } | ||
244 | |||
245 | dev_dbg(bdma_chan->dchan.device->dev, | ||
246 | "tx_chan: %p, chan: %d, regs: %p\n", | ||
247 | bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs); | ||
248 | |||
249 | iowrite32(bdma_chan->wr_count_next, | ||
250 | bdma_chan->regs + TSI721_DMAC_DWRCNT); | ||
251 | ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); | ||
252 | |||
253 | bdma_chan->wr_count = bdma_chan->wr_count_next; | ||
254 | } | ||
255 | |||
256 | static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan, | ||
257 | struct tsi721_tx_desc *desc) | ||
258 | { | ||
259 | dev_dbg(bdma_chan->dchan.device->dev, | ||
260 | "Put desc: %p into free list\n", desc); | ||
261 | |||
262 | if (desc) { | ||
263 | spin_lock_bh(&bdma_chan->lock); | ||
264 | list_splice_init(&desc->tx_list, &bdma_chan->free_list); | ||
265 | list_add(&desc->desc_node, &bdma_chan->free_list); | ||
266 | bdma_chan->wr_count_next = bdma_chan->wr_count; | ||
267 | spin_unlock_bh(&bdma_chan->lock); | ||
268 | } | ||
269 | } | ||
270 | |||
271 | static | ||
272 | struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) | ||
273 | { | ||
274 | struct tsi721_tx_desc *tx_desc, *_tx_desc; | ||
275 | struct tsi721_tx_desc *ret = NULL; | ||
276 | int i; | ||
277 | |||
278 | spin_lock_bh(&bdma_chan->lock); | ||
279 | list_for_each_entry_safe(tx_desc, _tx_desc, | ||
280 | &bdma_chan->free_list, desc_node) { | ||
281 | if (async_tx_test_ack(&tx_desc->txd)) { | ||
282 | list_del(&tx_desc->desc_node); | ||
283 | ret = tx_desc; | ||
284 | break; | ||
285 | } | ||
286 | dev_dbg(bdma_chan->dchan.device->dev, | ||
287 | "desc %p not ACKed\n", tx_desc); | ||
288 | } | ||
289 | |||
290 | i = bdma_chan->wr_count_next % bdma_chan->bd_num; | ||
291 | if (i == bdma_chan->bd_num - 1) { | ||
292 | i = 0; | ||
293 | bdma_chan->wr_count_next++; /* skip link descriptor */ | ||
294 | } | ||
295 | |||
296 | bdma_chan->wr_count_next++; | ||
297 | tx_desc->txd.phys = bdma_chan->bd_phys + | ||
298 | i * sizeof(struct tsi721_dma_desc); | ||
299 | tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; | ||
300 | |||
301 | spin_unlock_bh(&bdma_chan->lock); | ||
302 | |||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | static int | ||
307 | tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan, | ||
308 | struct tsi721_tx_desc *desc, struct scatterlist *sg, | ||
309 | enum dma_rtype rtype, u32 sys_size) | ||
310 | { | ||
311 | struct tsi721_dma_desc *bd_ptr = desc->hw_desc; | ||
312 | u64 rio_addr; | ||
313 | |||
314 | if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) { | ||
315 | dev_err(bdma_chan->dchan.device->dev, | ||
316 | "SG element is too large\n"); | ||
317 | return -EINVAL; | ||
318 | } | ||
319 | |||
320 | dev_dbg(bdma_chan->dchan.device->dev, | ||
321 | "desc: 0x%llx, addr: 0x%llx len: 0x%x\n", | ||
322 | (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg), | ||
323 | sg_dma_len(sg)); | ||
324 | |||
325 | dev_dbg(bdma_chan->dchan.device->dev, | ||
326 | "bd_ptr = %p did=%d raddr=0x%llx\n", | ||
327 | bd_ptr, desc->destid, desc->rio_addr); | ||
328 | |||
329 | /* Initialize DMA descriptor */ | ||
330 | bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | | ||
331 | (rtype << 19) | desc->destid); | ||
332 | if (desc->interrupt) | ||
333 | bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); | ||
334 | bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | | ||
335 | (sys_size << 26) | sg_dma_len(sg)); | ||
336 | rio_addr = (desc->rio_addr >> 2) | | ||
337 | ((u64)(desc->rio_addr_u & 0x3) << 62); | ||
338 | bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); | ||
339 | bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); | ||
340 | bd_ptr->t1.bufptr_lo = cpu_to_le32( | ||
341 | (u64)sg_dma_address(sg) & 0xffffffff); | ||
342 | bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); | ||
343 | bd_ptr->t1.s_dist = 0; | ||
344 | bd_ptr->t1.s_size = 0; | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan, | ||
350 | struct tsi721_tx_desc *desc) | ||
351 | { | ||
352 | struct dma_async_tx_descriptor *txd = &desc->txd; | ||
353 | dma_async_tx_callback callback = txd->callback; | ||
354 | void *param = txd->callback_param; | ||
355 | |||
356 | list_splice_init(&desc->tx_list, &bdma_chan->free_list); | ||
357 | list_move(&desc->desc_node, &bdma_chan->free_list); | ||
358 | bdma_chan->completed_cookie = txd->cookie; | ||
359 | |||
360 | if (callback) | ||
361 | callback(param); | ||
362 | } | ||
363 | |||
364 | static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan) | ||
365 | { | ||
366 | struct tsi721_tx_desc *desc, *_d; | ||
367 | LIST_HEAD(list); | ||
368 | |||
369 | BUG_ON(!tsi721_dma_is_idle(bdma_chan)); | ||
370 | |||
371 | if (!list_empty(&bdma_chan->queue)) | ||
372 | tsi721_start_dma(bdma_chan); | ||
373 | |||
374 | list_splice_init(&bdma_chan->active_list, &list); | ||
375 | list_splice_init(&bdma_chan->queue, &bdma_chan->active_list); | ||
376 | |||
377 | list_for_each_entry_safe(desc, _d, &list, desc_node) | ||
378 | tsi721_dma_chain_complete(bdma_chan, desc); | ||
379 | } | ||
380 | |||
381 | static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) | ||
382 | { | ||
383 | u32 srd_ptr; | ||
384 | u64 *sts_ptr; | ||
385 | int i, j; | ||
386 | |||
387 | /* Check and clear descriptor status FIFO entries */ | ||
388 | srd_ptr = bdma_chan->sts_rdptr; | ||
389 | sts_ptr = bdma_chan->sts_base; | ||
390 | j = srd_ptr * 8; | ||
391 | while (sts_ptr[j]) { | ||
392 | for (i = 0; i < 8 && sts_ptr[j]; i++, j++) | ||
393 | sts_ptr[j] = 0; | ||
394 | |||
395 | ++srd_ptr; | ||
396 | srd_ptr %= bdma_chan->sts_size; | ||
397 | j = srd_ptr * 8; | ||
398 | } | ||
399 | |||
400 | iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); | ||
401 | bdma_chan->sts_rdptr = srd_ptr; | ||
402 | } | ||
403 | |||
404 | static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan) | ||
405 | { | ||
406 | if (list_empty(&bdma_chan->active_list) || | ||
407 | list_is_singular(&bdma_chan->active_list)) { | ||
408 | dev_dbg(bdma_chan->dchan.device->dev, | ||
409 | "%s: Active_list empty\n", __func__); | ||
410 | tsi721_dma_complete_all(bdma_chan); | ||
411 | } else { | ||
412 | dev_dbg(bdma_chan->dchan.device->dev, | ||
413 | "%s: Active_list NOT empty\n", __func__); | ||
414 | tsi721_dma_chain_complete(bdma_chan, | ||
415 | tsi721_dma_first_active(bdma_chan)); | ||
416 | tsi721_start_dma(bdma_chan); | ||
417 | } | ||
418 | } | ||
419 | |||
420 | static void tsi721_dma_tasklet(unsigned long data) | ||
421 | { | ||
422 | struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; | ||
423 | u32 dmac_int, dmac_sts; | ||
424 | |||
425 | dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); | ||
426 | dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n", | ||
427 | __func__, bdma_chan->id, dmac_int); | ||
428 | /* Clear channel interrupts */ | ||
429 | iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); | ||
430 | |||
431 | if (dmac_int & TSI721_DMAC_INT_ERR) { | ||
432 | dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | ||
433 | dev_err(bdma_chan->dchan.device->dev, | ||
434 | "%s: DMA ERROR - DMAC%d_STS = 0x%x\n", | ||
435 | __func__, bdma_chan->id, dmac_sts); | ||
436 | } | ||
437 | |||
438 | if (dmac_int & TSI721_DMAC_INT_STFULL) { | ||
439 | dev_err(bdma_chan->dchan.device->dev, | ||
440 | "%s: DMAC%d descriptor status FIFO is full\n", | ||
441 | __func__, bdma_chan->id); | ||
442 | } | ||
443 | |||
444 | if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { | ||
445 | tsi721_clr_stat(bdma_chan); | ||
446 | spin_lock(&bdma_chan->lock); | ||
447 | tsi721_advance_work(bdma_chan); | ||
448 | spin_unlock(&bdma_chan->lock); | ||
449 | } | ||
450 | |||
451 | /* Re-Enable BDMA channel interrupts */ | ||
452 | iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); | ||
453 | } | ||
454 | |||
455 | static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) | ||
456 | { | ||
457 | struct tsi721_tx_desc *desc = to_tsi721_desc(txd); | ||
458 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); | ||
459 | dma_cookie_t cookie; | ||
460 | |||
461 | spin_lock_bh(&bdma_chan->lock); | ||
462 | |||
463 | cookie = txd->chan->cookie; | ||
464 | if (++cookie < 0) | ||
465 | cookie = 1; | ||
466 | txd->chan->cookie = cookie; | ||
467 | txd->cookie = cookie; | ||
468 | |||
469 | if (list_empty(&bdma_chan->active_list)) { | ||
470 | list_add_tail(&desc->desc_node, &bdma_chan->active_list); | ||
471 | tsi721_start_dma(bdma_chan); | ||
472 | } else { | ||
473 | list_add_tail(&desc->desc_node, &bdma_chan->queue); | ||
474 | } | ||
475 | |||
476 | spin_unlock_bh(&bdma_chan->lock); | ||
477 | return cookie; | ||
478 | } | ||
479 | |||
480 | static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | ||
481 | { | ||
482 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
483 | #ifdef CONFIG_PCI_MSI | ||
484 | struct tsi721_device *priv = to_tsi721(dchan->device); | ||
485 | #endif | ||
486 | struct tsi721_tx_desc *desc = NULL; | ||
487 | LIST_HEAD(tmp_list); | ||
488 | int i; | ||
489 | int rc; | ||
490 | |||
491 | if (bdma_chan->bd_base) | ||
492 | return bdma_chan->bd_num - 1; | ||
493 | |||
494 | /* Initialize BDMA channel */ | ||
495 | if (tsi721_bdma_ch_init(bdma_chan)) { | ||
496 | dev_err(dchan->device->dev, "Unable to initialize data DMA" | ||
497 | " channel %d, aborting\n", bdma_chan->id); | ||
498 | return -ENOMEM; | ||
499 | } | ||
500 | |||
501 | /* Alocate matching number of logical descriptors */ | ||
502 | desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc), | ||
503 | GFP_KERNEL); | ||
504 | if (!desc) { | ||
505 | dev_err(dchan->device->dev, | ||
506 | "Failed to allocate logical descriptors\n"); | ||
507 | rc = -ENOMEM; | ||
508 | goto err_out; | ||
509 | } | ||
510 | |||
511 | bdma_chan->tx_desc = desc; | ||
512 | |||
513 | for (i = 0; i < bdma_chan->bd_num - 1; i++) { | ||
514 | dma_async_tx_descriptor_init(&desc[i].txd, dchan); | ||
515 | desc[i].txd.tx_submit = tsi721_tx_submit; | ||
516 | desc[i].txd.flags = DMA_CTRL_ACK; | ||
517 | INIT_LIST_HEAD(&desc[i].tx_list); | ||
518 | list_add_tail(&desc[i].desc_node, &tmp_list); | ||
519 | } | ||
520 | |||
521 | spin_lock_bh(&bdma_chan->lock); | ||
522 | list_splice(&tmp_list, &bdma_chan->free_list); | ||
523 | bdma_chan->completed_cookie = dchan->cookie = 1; | ||
524 | spin_unlock_bh(&bdma_chan->lock); | ||
525 | |||
526 | #ifdef CONFIG_PCI_MSI | ||
527 | if (priv->flags & TSI721_USING_MSIX) { | ||
528 | /* Request interrupt service if we are in MSI-X mode */ | ||
529 | rc = request_irq( | ||
530 | priv->msix[TSI721_VECT_DMA0_DONE + | ||
531 | bdma_chan->id].vector, | ||
532 | tsi721_bdma_msix, 0, | ||
533 | priv->msix[TSI721_VECT_DMA0_DONE + | ||
534 | bdma_chan->id].irq_name, | ||
535 | (void *)bdma_chan); | ||
536 | |||
537 | if (rc) { | ||
538 | dev_dbg(dchan->device->dev, | ||
539 | "Unable to allocate MSI-X interrupt for " | ||
540 | "BDMA%d-DONE\n", bdma_chan->id); | ||
541 | goto err_out; | ||
542 | } | ||
543 | |||
544 | rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT + | ||
545 | bdma_chan->id].vector, | ||
546 | tsi721_bdma_msix, 0, | ||
547 | priv->msix[TSI721_VECT_DMA0_INT + | ||
548 | bdma_chan->id].irq_name, | ||
549 | (void *)bdma_chan); | ||
550 | |||
551 | if (rc) { | ||
552 | dev_dbg(dchan->device->dev, | ||
553 | "Unable to allocate MSI-X interrupt for " | ||
554 | "BDMA%d-INT\n", bdma_chan->id); | ||
555 | free_irq( | ||
556 | priv->msix[TSI721_VECT_DMA0_DONE + | ||
557 | bdma_chan->id].vector, | ||
558 | (void *)bdma_chan); | ||
559 | rc = -EIO; | ||
560 | goto err_out; | ||
561 | } | ||
562 | } | ||
563 | #endif /* CONFIG_PCI_MSI */ | ||
564 | |||
565 | tasklet_enable(&bdma_chan->tasklet); | ||
566 | tsi721_bdma_interrupt_enable(bdma_chan, 1); | ||
567 | |||
568 | return bdma_chan->bd_num - 1; | ||
569 | |||
570 | err_out: | ||
571 | kfree(desc); | ||
572 | tsi721_bdma_ch_free(bdma_chan); | ||
573 | return rc; | ||
574 | } | ||
575 | |||
576 | static void tsi721_free_chan_resources(struct dma_chan *dchan) | ||
577 | { | ||
578 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
579 | #ifdef CONFIG_PCI_MSI | ||
580 | struct tsi721_device *priv = to_tsi721(dchan->device); | ||
581 | #endif | ||
582 | LIST_HEAD(list); | ||
583 | |||
584 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); | ||
585 | |||
586 | if (bdma_chan->bd_base == NULL) | ||
587 | return; | ||
588 | |||
589 | BUG_ON(!list_empty(&bdma_chan->active_list)); | ||
590 | BUG_ON(!list_empty(&bdma_chan->queue)); | ||
591 | |||
592 | tasklet_disable(&bdma_chan->tasklet); | ||
593 | |||
594 | spin_lock_bh(&bdma_chan->lock); | ||
595 | list_splice_init(&bdma_chan->free_list, &list); | ||
596 | spin_unlock_bh(&bdma_chan->lock); | ||
597 | |||
598 | tsi721_bdma_interrupt_enable(bdma_chan, 0); | ||
599 | |||
600 | #ifdef CONFIG_PCI_MSI | ||
601 | if (priv->flags & TSI721_USING_MSIX) { | ||
602 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + | ||
603 | bdma_chan->id].vector, (void *)bdma_chan); | ||
604 | free_irq(priv->msix[TSI721_VECT_DMA0_INT + | ||
605 | bdma_chan->id].vector, (void *)bdma_chan); | ||
606 | } | ||
607 | #endif /* CONFIG_PCI_MSI */ | ||
608 | |||
609 | tsi721_bdma_ch_free(bdma_chan); | ||
610 | kfree(bdma_chan->tx_desc); | ||
611 | } | ||
612 | |||
613 | static | ||
614 | enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, | ||
615 | struct dma_tx_state *txstate) | ||
616 | { | ||
617 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
618 | dma_cookie_t last_used; | ||
619 | dma_cookie_t last_completed; | ||
620 | int ret; | ||
621 | |||
622 | spin_lock_bh(&bdma_chan->lock); | ||
623 | last_completed = bdma_chan->completed_cookie; | ||
624 | last_used = dchan->cookie; | ||
625 | spin_unlock_bh(&bdma_chan->lock); | ||
626 | |||
627 | ret = dma_async_is_complete(cookie, last_completed, last_used); | ||
628 | |||
629 | dma_set_tx_state(txstate, last_completed, last_used, 0); | ||
630 | |||
631 | dev_dbg(dchan->device->dev, | ||
632 | "%s: exit, ret: %d, last_completed: %d, last_used: %d\n", | ||
633 | __func__, ret, last_completed, last_used); | ||
634 | |||
635 | return ret; | ||
636 | } | ||
637 | |||
638 | static void tsi721_issue_pending(struct dma_chan *dchan) | ||
639 | { | ||
640 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
641 | |||
642 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); | ||
643 | |||
644 | if (tsi721_dma_is_idle(bdma_chan)) { | ||
645 | spin_lock_bh(&bdma_chan->lock); | ||
646 | tsi721_advance_work(bdma_chan); | ||
647 | spin_unlock_bh(&bdma_chan->lock); | ||
648 | } else | ||
649 | dev_dbg(dchan->device->dev, | ||
650 | "%s: DMA channel still busy\n", __func__); | ||
651 | } | ||
652 | |||
653 | static | ||
654 | struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, | ||
655 | struct scatterlist *sgl, unsigned int sg_len, | ||
656 | enum dma_transfer_direction dir, unsigned long flags, | ||
657 | void *tinfo) | ||
658 | { | ||
659 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
660 | struct tsi721_tx_desc *desc = NULL; | ||
661 | struct tsi721_tx_desc *first = NULL; | ||
662 | struct scatterlist *sg; | ||
663 | struct rio_dma_ext *rext = tinfo; | ||
664 | u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */ | ||
665 | unsigned int i; | ||
666 | u32 sys_size = dma_to_mport(dchan->device)->sys_size; | ||
667 | enum dma_rtype rtype; | ||
668 | |||
669 | if (!sgl || !sg_len) { | ||
670 | dev_err(dchan->device->dev, "%s: No SG list\n", __func__); | ||
671 | return NULL; | ||
672 | } | ||
673 | |||
674 | if (dir == DMA_DEV_TO_MEM) | ||
675 | rtype = NREAD; | ||
676 | else if (dir == DMA_MEM_TO_DEV) { | ||
677 | switch (rext->wr_type) { | ||
678 | case RDW_ALL_NWRITE: | ||
679 | rtype = ALL_NWRITE; | ||
680 | break; | ||
681 | case RDW_ALL_NWRITE_R: | ||
682 | rtype = ALL_NWRITE_R; | ||
683 | break; | ||
684 | case RDW_LAST_NWRITE_R: | ||
685 | default: | ||
686 | rtype = LAST_NWRITE_R; | ||
687 | break; | ||
688 | } | ||
689 | } else { | ||
690 | dev_err(dchan->device->dev, | ||
691 | "%s: Unsupported DMA direction option\n", __func__); | ||
692 | return NULL; | ||
693 | } | ||
694 | |||
695 | for_each_sg(sgl, sg, sg_len, i) { | ||
696 | int err; | ||
697 | |||
698 | dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i); | ||
699 | desc = tsi721_desc_get(bdma_chan); | ||
700 | if (!desc) { | ||
701 | dev_err(dchan->device->dev, | ||
702 | "Not enough descriptors available\n"); | ||
703 | goto err_desc_get; | ||
704 | } | ||
705 | |||
706 | if (sg_is_last(sg)) | ||
707 | desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; | ||
708 | else | ||
709 | desc->interrupt = false; | ||
710 | |||
711 | desc->destid = rext->destid; | ||
712 | desc->rio_addr = rio_addr; | ||
713 | desc->rio_addr_u = 0; | ||
714 | |||
715 | err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size); | ||
716 | if (err) { | ||
717 | dev_err(dchan->device->dev, | ||
718 | "Failed to build desc: %d\n", err); | ||
719 | goto err_desc_get; | ||
720 | } | ||
721 | |||
722 | rio_addr += sg_dma_len(sg); | ||
723 | |||
724 | if (!first) | ||
725 | first = desc; | ||
726 | else | ||
727 | list_add_tail(&desc->desc_node, &first->tx_list); | ||
728 | } | ||
729 | |||
730 | first->txd.cookie = -EBUSY; | ||
731 | desc->txd.flags = flags; | ||
732 | |||
733 | return &first->txd; | ||
734 | |||
735 | err_desc_get: | ||
736 | tsi721_desc_put(bdma_chan, first); | ||
737 | return NULL; | ||
738 | } | ||
739 | |||
740 | static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | ||
741 | unsigned long arg) | ||
742 | { | ||
743 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | ||
744 | struct tsi721_tx_desc *desc, *_d; | ||
745 | LIST_HEAD(list); | ||
746 | |||
747 | dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); | ||
748 | |||
749 | if (cmd != DMA_TERMINATE_ALL) | ||
750 | return -ENXIO; | ||
751 | |||
752 | spin_lock_bh(&bdma_chan->lock); | ||
753 | |||
754 | /* make sure to stop the transfer */ | ||
755 | iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL); | ||
756 | |||
757 | list_splice_init(&bdma_chan->active_list, &list); | ||
758 | list_splice_init(&bdma_chan->queue, &list); | ||
759 | |||
760 | list_for_each_entry_safe(desc, _d, &list, desc_node) | ||
761 | tsi721_dma_chain_complete(bdma_chan, desc); | ||
762 | |||
763 | spin_unlock_bh(&bdma_chan->lock); | ||
764 | |||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | int tsi721_register_dma(struct tsi721_device *priv) | ||
769 | { | ||
770 | int i; | ||
771 | int nr_channels = TSI721_DMA_MAXCH; | ||
772 | int err; | ||
773 | struct rio_mport *mport = priv->mport; | ||
774 | |||
775 | mport->dma.dev = &priv->pdev->dev; | ||
776 | mport->dma.chancnt = nr_channels; | ||
777 | |||
778 | INIT_LIST_HEAD(&mport->dma.channels); | ||
779 | |||
780 | for (i = 0; i < nr_channels; i++) { | ||
781 | struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; | ||
782 | |||
783 | if (i == TSI721_DMACH_MAINT) | ||
784 | continue; | ||
785 | |||
786 | bdma_chan->bd_num = 64; | ||
787 | bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); | ||
788 | |||
789 | bdma_chan->dchan.device = &mport->dma; | ||
790 | bdma_chan->dchan.cookie = 1; | ||
791 | bdma_chan->dchan.chan_id = i; | ||
792 | bdma_chan->id = i; | ||
793 | |||
794 | spin_lock_init(&bdma_chan->lock); | ||
795 | |||
796 | INIT_LIST_HEAD(&bdma_chan->active_list); | ||
797 | INIT_LIST_HEAD(&bdma_chan->queue); | ||
798 | INIT_LIST_HEAD(&bdma_chan->free_list); | ||
799 | |||
800 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, | ||
801 | (unsigned long)bdma_chan); | ||
802 | tasklet_disable(&bdma_chan->tasklet); | ||
803 | list_add_tail(&bdma_chan->dchan.device_node, | ||
804 | &mport->dma.channels); | ||
805 | } | ||
806 | |||
807 | dma_cap_zero(mport->dma.cap_mask); | ||
808 | dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); | ||
809 | dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); | ||
810 | |||
811 | mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; | ||
812 | mport->dma.device_free_chan_resources = tsi721_free_chan_resources; | ||
813 | mport->dma.device_tx_status = tsi721_tx_status; | ||
814 | mport->dma.device_issue_pending = tsi721_issue_pending; | ||
815 | mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; | ||
816 | mport->dma.device_control = tsi721_device_control; | ||
817 | |||
818 | err = dma_async_device_register(&mport->dma); | ||
819 | if (err) | ||
820 | dev_err(&priv->pdev->dev, "Failed to register DMA device\n"); | ||
821 | |||
822 | return err; | ||
823 | } | ||
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index a965acd3c0e..ebe77dd87da 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c | |||
@@ -31,21 +31,27 @@ | |||
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/spinlock.h> | 32 | #include <linux/spinlock.h> |
33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
34 | #include <linux/sched.h> | ||
35 | #include <linux/jiffies.h> | 34 | #include <linux/jiffies.h> |
36 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
37 | 36 | ||
38 | #include "rio.h" | 37 | #include "rio.h" |
39 | 38 | ||
40 | LIST_HEAD(rio_devices); | 39 | LIST_HEAD(rio_devices); |
40 | static LIST_HEAD(rio_switches); | ||
41 | |||
42 | static void rio_enum_timeout(unsigned long); | ||
41 | 43 | ||
42 | static void rio_init_em(struct rio_dev *rdev); | 44 | static void rio_init_em(struct rio_dev *rdev); |
43 | 45 | ||
44 | DEFINE_SPINLOCK(rio_global_list_lock); | 46 | DEFINE_SPINLOCK(rio_global_list_lock); |
45 | 47 | ||
46 | static int next_destid = 0; | 48 | static int next_destid = 0; |
49 | static int next_net = 0; | ||
47 | static int next_comptag = 1; | 50 | static int next_comptag = 1; |
48 | 51 | ||
52 | static struct timer_list rio_enum_timer = | ||
53 | TIMER_INITIALIZER(rio_enum_timeout, 0, 0); | ||
54 | |||
49 | static int rio_mport_phys_table[] = { | 55 | static int rio_mport_phys_table[] = { |
50 | RIO_EFB_PAR_EP_ID, | 56 | RIO_EFB_PAR_EP_ID, |
51 | RIO_EFB_PAR_EP_REC_ID, | 57 | RIO_EFB_PAR_EP_REC_ID, |
@@ -54,109 +60,6 @@ static int rio_mport_phys_table[] = { | |||
54 | -1, | 60 | -1, |
55 | }; | 61 | }; |
56 | 62 | ||
57 | |||
58 | /** | ||
59 | * rio_destid_alloc - Allocate next available destID for given network | ||
60 | * @net: RIO network | ||
61 | * | ||
62 | * Returns next available device destination ID for the specified RIO network. | ||
63 | * Marks allocated ID as one in use. | ||
64 | * Returns RIO_INVALID_DESTID if new destID is not available. | ||
65 | */ | ||
66 | static u16 rio_destid_alloc(struct rio_net *net) | ||
67 | { | ||
68 | int destid; | ||
69 | struct rio_id_table *idtab = &net->destid_table; | ||
70 | |||
71 | spin_lock(&idtab->lock); | ||
72 | destid = find_first_zero_bit(idtab->table, idtab->max); | ||
73 | |||
74 | if (destid < idtab->max) { | ||
75 | set_bit(destid, idtab->table); | ||
76 | destid += idtab->start; | ||
77 | } else | ||
78 | destid = RIO_INVALID_DESTID; | ||
79 | |||
80 | spin_unlock(&idtab->lock); | ||
81 | return (u16)destid; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * rio_destid_reserve - Reserve the specivied destID | ||
86 | * @net: RIO network | ||
87 | * @destid: destID to reserve | ||
88 | * | ||
89 | * Tries to reserve the specified destID. | ||
90 | * Returns 0 if successfull. | ||
91 | */ | ||
92 | static int rio_destid_reserve(struct rio_net *net, u16 destid) | ||
93 | { | ||
94 | int oldbit; | ||
95 | struct rio_id_table *idtab = &net->destid_table; | ||
96 | |||
97 | destid -= idtab->start; | ||
98 | spin_lock(&idtab->lock); | ||
99 | oldbit = test_and_set_bit(destid, idtab->table); | ||
100 | spin_unlock(&idtab->lock); | ||
101 | return oldbit; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * rio_destid_free - free a previously allocated destID | ||
106 | * @net: RIO network | ||
107 | * @destid: destID to free | ||
108 | * | ||
109 | * Makes the specified destID available for use. | ||
110 | */ | ||
111 | static void rio_destid_free(struct rio_net *net, u16 destid) | ||
112 | { | ||
113 | struct rio_id_table *idtab = &net->destid_table; | ||
114 | |||
115 | destid -= idtab->start; | ||
116 | spin_lock(&idtab->lock); | ||
117 | clear_bit(destid, idtab->table); | ||
118 | spin_unlock(&idtab->lock); | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * rio_destid_first - return first destID in use | ||
123 | * @net: RIO network | ||
124 | */ | ||
125 | static u16 rio_destid_first(struct rio_net *net) | ||
126 | { | ||
127 | int destid; | ||
128 | struct rio_id_table *idtab = &net->destid_table; | ||
129 | |||
130 | spin_lock(&idtab->lock); | ||
131 | destid = find_first_bit(idtab->table, idtab->max); | ||
132 | if (destid >= idtab->max) | ||
133 | destid = RIO_INVALID_DESTID; | ||
134 | else | ||
135 | destid += idtab->start; | ||
136 | spin_unlock(&idtab->lock); | ||
137 | return (u16)destid; | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * rio_destid_next - return next destID in use | ||
142 | * @net: RIO network | ||
143 | * @from: destination ID from which search shall continue | ||
144 | */ | ||
145 | static u16 rio_destid_next(struct rio_net *net, u16 from) | ||
146 | { | ||
147 | int destid; | ||
148 | struct rio_id_table *idtab = &net->destid_table; | ||
149 | |||
150 | spin_lock(&idtab->lock); | ||
151 | destid = find_next_bit(idtab->table, idtab->max, from); | ||
152 | if (destid >= idtab->max) | ||
153 | destid = RIO_INVALID_DESTID; | ||
154 | else | ||
155 | destid += idtab->start; | ||
156 | spin_unlock(&idtab->lock); | ||
157 | return (u16)destid; | ||
158 | } | ||
159 | |||
160 | /** | 63 | /** |
161 | * rio_get_device_id - Get the base/extended device id for a device | 64 | * rio_get_device_id - Get the base/extended device id for a device |
162 | * @port: RIO master port | 65 | * @port: RIO master port |
@@ -205,15 +108,14 @@ static void rio_local_set_device_id(struct rio_mport *port, u16 did) | |||
205 | 108 | ||
206 | /** | 109 | /** |
207 | * rio_clear_locks- Release all host locks and signal enumeration complete | 110 | * rio_clear_locks- Release all host locks and signal enumeration complete |
208 | * @net: RIO network to run on | 111 | * @port: Master port to issue transaction |
209 | * | 112 | * |
210 | * Marks the component tag CSR on each device with the enumeration | 113 | * Marks the component tag CSR on each device with the enumeration |
211 | * complete flag. When complete, it then release the host locks on | 114 | * complete flag. When complete, it then release the host locks on |
212 | * each device. Returns 0 on success or %-EINVAL on failure. | 115 | * each device. Returns 0 on success or %-EINVAL on failure. |
213 | */ | 116 | */ |
214 | static int rio_clear_locks(struct rio_net *net) | 117 | static int rio_clear_locks(struct rio_mport *port) |
215 | { | 118 | { |
216 | struct rio_mport *port = net->hport; | ||
217 | struct rio_dev *rdev; | 119 | struct rio_dev *rdev; |
218 | u32 result; | 120 | u32 result; |
219 | int ret = 0; | 121 | int ret = 0; |
@@ -228,7 +130,7 @@ static int rio_clear_locks(struct rio_net *net) | |||
228 | result); | 130 | result); |
229 | ret = -EINVAL; | 131 | ret = -EINVAL; |
230 | } | 132 | } |
231 | list_for_each_entry(rdev, &net->devices, net_list) { | 133 | list_for_each_entry(rdev, &rio_devices, global_list) { |
232 | rio_write_config_32(rdev, RIO_HOST_DID_LOCK_CSR, | 134 | rio_write_config_32(rdev, RIO_HOST_DID_LOCK_CSR, |
233 | port->host_deviceid); | 135 | port->host_deviceid); |
234 | rio_read_config_32(rdev, RIO_HOST_DID_LOCK_CSR, &result); | 136 | rio_read_config_32(rdev, RIO_HOST_DID_LOCK_CSR, &result); |
@@ -274,6 +176,10 @@ static int rio_enum_host(struct rio_mport *port) | |||
274 | 176 | ||
275 | /* Set master port destid and init destid ctr */ | 177 | /* Set master port destid and init destid ctr */ |
276 | rio_local_set_device_id(port, port->host_deviceid); | 178 | rio_local_set_device_id(port, port->host_deviceid); |
179 | |||
180 | if (next_destid == port->host_deviceid) | ||
181 | next_destid++; | ||
182 | |||
277 | return 0; | 183 | return 0; |
278 | } | 184 | } |
279 | 185 | ||
@@ -371,7 +277,7 @@ static void rio_switch_init(struct rio_dev *rdev, int do_enum) | |||
371 | * device to the RIO device list. Creates the generic sysfs nodes | 277 | * device to the RIO device list. Creates the generic sysfs nodes |
372 | * for an RIO device. | 278 | * for an RIO device. |
373 | */ | 279 | */ |
374 | static int rio_add_device(struct rio_dev *rdev) | 280 | static int __devinit rio_add_device(struct rio_dev *rdev) |
375 | { | 281 | { |
376 | int err; | 282 | int err; |
377 | 283 | ||
@@ -463,7 +369,7 @@ inline int rio_enable_rx_tx_port(struct rio_mport *port, | |||
463 | * to a RIO device on success or NULL on failure. | 369 | * to a RIO device on success or NULL on failure. |
464 | * | 370 | * |
465 | */ | 371 | */ |
466 | static struct rio_dev *rio_setup_device(struct rio_net *net, | 372 | static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, |
467 | struct rio_mport *port, u16 destid, | 373 | struct rio_mport *port, u16 destid, |
468 | u8 hopcount, int do_enum) | 374 | u8 hopcount, int do_enum) |
469 | { | 375 | { |
@@ -540,8 +446,9 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, | |||
540 | if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) { | 446 | if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) { |
541 | if (do_enum) { | 447 | if (do_enum) { |
542 | rio_set_device_id(port, destid, hopcount, next_destid); | 448 | rio_set_device_id(port, destid, hopcount, next_destid); |
543 | rdev->destid = next_destid; | 449 | rdev->destid = next_destid++; |
544 | next_destid = rio_destid_alloc(net); | 450 | if (next_destid == port->host_deviceid) |
451 | next_destid++; | ||
545 | } else | 452 | } else |
546 | rdev->destid = rio_get_device_id(port, destid, hopcount); | 453 | rdev->destid = rio_get_device_id(port, destid, hopcount); |
547 | 454 | ||
@@ -576,7 +483,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, | |||
576 | rswitch->clr_table(port, destid, hopcount, | 483 | rswitch->clr_table(port, destid, hopcount, |
577 | RIO_GLOBAL_TABLE); | 484 | RIO_GLOBAL_TABLE); |
578 | 485 | ||
579 | list_add_tail(&rswitch->node, &net->switches); | 486 | list_add_tail(&rswitch->node, &rio_switches); |
580 | 487 | ||
581 | } else { | 488 | } else { |
582 | if (do_enum) | 489 | if (do_enum) |
@@ -609,7 +516,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, | |||
609 | return rdev; | 516 | return rdev; |
610 | 517 | ||
611 | cleanup: | 518 | cleanup: |
612 | if (rswitch) | 519 | if (rio_is_switch(rdev)) |
613 | kfree(rswitch->route_table); | 520 | kfree(rswitch->route_table); |
614 | 521 | ||
615 | kfree(rdev); | 522 | kfree(rdev); |
@@ -837,10 +744,15 @@ static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount) | |||
837 | * Recursively enumerates a RIO network. Transactions are sent via the | 744 | * Recursively enumerates a RIO network. Transactions are sent via the |
838 | * master port passed in @port. | 745 | * master port passed in @port. |
839 | */ | 746 | */ |
840 | static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, | 747 | static int __devinit rio_enum_peer(struct rio_net *net, struct rio_mport *port, |
841 | u8 hopcount, struct rio_dev *prev, int prev_port) | 748 | u8 hopcount, struct rio_dev *prev, int prev_port) |
842 | { | 749 | { |
750 | int port_num; | ||
751 | int cur_destid; | ||
752 | int sw_destid; | ||
753 | int sw_inport; | ||
843 | struct rio_dev *rdev; | 754 | struct rio_dev *rdev; |
755 | u16 destid; | ||
844 | u32 regval; | 756 | u32 regval; |
845 | int tmp; | 757 | int tmp; |
846 | 758 | ||
@@ -906,26 +818,19 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, | |||
906 | return -1; | 818 | return -1; |
907 | 819 | ||
908 | if (rio_is_switch(rdev)) { | 820 | if (rio_is_switch(rdev)) { |
909 | int sw_destid; | ||
910 | int cur_destid; | ||
911 | int sw_inport; | ||
912 | u16 destid; | ||
913 | int port_num; | ||
914 | |||
915 | sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo); | 821 | sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo); |
916 | rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, | 822 | rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, |
917 | port->host_deviceid, sw_inport, 0); | 823 | port->host_deviceid, sw_inport, 0); |
918 | rdev->rswitch->route_table[port->host_deviceid] = sw_inport; | 824 | rdev->rswitch->route_table[port->host_deviceid] = sw_inport; |
919 | 825 | ||
920 | destid = rio_destid_first(net); | 826 | for (destid = 0; destid < next_destid; destid++) { |
921 | while (destid != RIO_INVALID_DESTID && destid < next_destid) { | 827 | if (destid == port->host_deviceid) |
922 | if (destid != port->host_deviceid) { | 828 | continue; |
923 | rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, | 829 | rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, |
924 | destid, sw_inport, 0); | 830 | destid, sw_inport, 0); |
925 | rdev->rswitch->route_table[destid] = sw_inport; | 831 | rdev->rswitch->route_table[destid] = sw_inport; |
926 | } | ||
927 | destid = rio_destid_next(net, destid + 1); | ||
928 | } | 832 | } |
833 | |||
929 | pr_debug( | 834 | pr_debug( |
930 | "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", | 835 | "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", |
931 | rio_name(rdev), rdev->vid, rdev->did, | 836 | rio_name(rdev), rdev->vid, rdev->did, |
@@ -934,10 +839,12 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, | |||
934 | for (port_num = 0; | 839 | for (port_num = 0; |
935 | port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo); | 840 | port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo); |
936 | port_num++) { | 841 | port_num++) { |
937 | if (sw_inport == port_num) { | 842 | /*Enable Input Output Port (transmitter reviever)*/ |
938 | rio_enable_rx_tx_port(port, 0, | 843 | rio_enable_rx_tx_port(port, 0, |
939 | RIO_ANY_DESTID(port->sys_size), | 844 | RIO_ANY_DESTID(port->sys_size), |
940 | hopcount, port_num); | 845 | hopcount, port_num); |
846 | |||
847 | if (sw_inport == port_num) { | ||
941 | rdev->rswitch->port_ok |= (1 << port_num); | 848 | rdev->rswitch->port_ok |= (1 << port_num); |
942 | continue; | 849 | continue; |
943 | } | 850 | } |
@@ -950,9 +857,6 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, | |||
950 | pr_debug( | 857 | pr_debug( |
951 | "RIO: scanning device on port %d\n", | 858 | "RIO: scanning device on port %d\n", |
952 | port_num); | 859 | port_num); |
953 | rio_enable_rx_tx_port(port, 0, | ||
954 | RIO_ANY_DESTID(port->sys_size), | ||
955 | hopcount, port_num); | ||
956 | rdev->rswitch->port_ok |= (1 << port_num); | 860 | rdev->rswitch->port_ok |= (1 << port_num); |
957 | rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, | 861 | rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, |
958 | RIO_ANY_DESTID(port->sys_size), | 862 | RIO_ANY_DESTID(port->sys_size), |
@@ -963,22 +867,19 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, | |||
963 | return -1; | 867 | return -1; |
964 | 868 | ||
965 | /* Update routing tables */ | 869 | /* Update routing tables */ |
966 | destid = rio_destid_next(net, cur_destid + 1); | 870 | if (next_destid > cur_destid) { |
967 | if (destid != RIO_INVALID_DESTID) { | ||
968 | for (destid = cur_destid; | 871 | for (destid = cur_destid; |
969 | destid < next_destid;) { | 872 | destid < next_destid; destid++) { |
970 | if (destid != port->host_deviceid) { | 873 | if (destid == port->host_deviceid) |
971 | rio_route_add_entry(rdev, | 874 | continue; |
875 | rio_route_add_entry(rdev, | ||
972 | RIO_GLOBAL_TABLE, | 876 | RIO_GLOBAL_TABLE, |
973 | destid, | 877 | destid, |
974 | port_num, | 878 | port_num, |
975 | 0); | 879 | 0); |
976 | rdev->rswitch-> | 880 | rdev->rswitch-> |
977 | route_table[destid] = | 881 | route_table[destid] = |
978 | port_num; | 882 | port_num; |
979 | } | ||
980 | destid = rio_destid_next(net, | ||
981 | destid + 1); | ||
982 | } | 883 | } |
983 | } | 884 | } |
984 | } else { | 885 | } else { |
@@ -1004,8 +905,11 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, | |||
1004 | rio_init_em(rdev); | 905 | rio_init_em(rdev); |
1005 | 906 | ||
1006 | /* Check for empty switch */ | 907 | /* Check for empty switch */ |
1007 | if (next_destid == sw_destid) | 908 | if (next_destid == sw_destid) { |
1008 | next_destid = rio_destid_alloc(net); | 909 | next_destid++; |
910 | if (next_destid == port->host_deviceid) | ||
911 | next_destid++; | ||
912 | } | ||
1009 | 913 | ||
1010 | rdev->destid = sw_destid; | 914 | rdev->destid = sw_destid; |
1011 | } else | 915 | } else |
@@ -1019,7 +923,7 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, | |||
1019 | * rio_enum_complete- Tests if enumeration of a network is complete | 923 | * rio_enum_complete- Tests if enumeration of a network is complete |
1020 | * @port: Master port to send transaction | 924 | * @port: Master port to send transaction |
1021 | * | 925 | * |
1022 | * Tests the PGCCSR discovered bit for non-zero value (enumeration | 926 | * Tests the Component Tag CSR for non-zero value (enumeration |
1023 | * complete flag). Return %1 if enumeration is complete or %0 if | 927 | * complete flag). Return %1 if enumeration is complete or %0 if |
1024 | * enumeration is incomplete. | 928 | * enumeration is incomplete. |
1025 | */ | 929 | */ |
@@ -1029,7 +933,7 @@ static int rio_enum_complete(struct rio_mport *port) | |||
1029 | 933 | ||
1030 | rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR, | 934 | rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR, |
1031 | ®val); | 935 | ®val); |
1032 | return (regval & RIO_PORT_GEN_DISCOVERED) ? 1 : 0; | 936 | return (regval & RIO_PORT_GEN_MASTER) ? 1 : 0; |
1033 | } | 937 | } |
1034 | 938 | ||
1035 | /** | 939 | /** |
@@ -1044,7 +948,7 @@ static int rio_enum_complete(struct rio_mport *port) | |||
1044 | * Recursively discovers a RIO network. Transactions are sent via the | 948 | * Recursively discovers a RIO network. Transactions are sent via the |
1045 | * master port passed in @port. | 949 | * master port passed in @port. |
1046 | */ | 950 | */ |
1047 | static int | 951 | static int __devinit |
1048 | rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, | 952 | rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, |
1049 | u8 hopcount, struct rio_dev *prev, int prev_port) | 953 | u8 hopcount, struct rio_dev *prev, int prev_port) |
1050 | { | 954 | { |
@@ -1143,70 +1047,48 @@ static int rio_mport_is_active(struct rio_mport *port) | |||
1143 | /** | 1047 | /** |
1144 | * rio_alloc_net- Allocate and configure a new RIO network | 1048 | * rio_alloc_net- Allocate and configure a new RIO network |
1145 | * @port: Master port associated with the RIO network | 1049 | * @port: Master port associated with the RIO network |
1146 | * @do_enum: Enumeration/Discovery mode flag | ||
1147 | * @start: logical minimal start id for new net | ||
1148 | * | 1050 | * |
1149 | * Allocates a RIO network structure, initializes per-network | 1051 | * Allocates a RIO network structure, initializes per-network |
1150 | * list heads, and adds the associated master port to the | 1052 | * list heads, and adds the associated master port to the |
1151 | * network list of associated master ports. Returns a | 1053 | * network list of associated master ports. Returns a |
1152 | * RIO network pointer on success or %NULL on failure. | 1054 | * RIO network pointer on success or %NULL on failure. |
1153 | */ | 1055 | */ |
1154 | static struct rio_net *rio_alloc_net(struct rio_mport *port, | 1056 | static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port) |
1155 | int do_enum, u16 start) | ||
1156 | { | 1057 | { |
1157 | struct rio_net *net; | 1058 | struct rio_net *net; |
1158 | 1059 | ||
1159 | net = kzalloc(sizeof(struct rio_net), GFP_KERNEL); | 1060 | net = kzalloc(sizeof(struct rio_net), GFP_KERNEL); |
1160 | if (net && do_enum) { | ||
1161 | net->destid_table.table = kcalloc( | ||
1162 | BITS_TO_LONGS(RIO_MAX_ROUTE_ENTRIES(port->sys_size)), | ||
1163 | sizeof(long), | ||
1164 | GFP_KERNEL); | ||
1165 | |||
1166 | if (net->destid_table.table == NULL) { | ||
1167 | pr_err("RIO: failed to allocate destID table\n"); | ||
1168 | kfree(net); | ||
1169 | net = NULL; | ||
1170 | } else { | ||
1171 | net->destid_table.start = start; | ||
1172 | net->destid_table.max = | ||
1173 | RIO_MAX_ROUTE_ENTRIES(port->sys_size); | ||
1174 | spin_lock_init(&net->destid_table.lock); | ||
1175 | } | ||
1176 | } | ||
1177 | |||
1178 | if (net) { | 1061 | if (net) { |
1179 | INIT_LIST_HEAD(&net->node); | 1062 | INIT_LIST_HEAD(&net->node); |
1180 | INIT_LIST_HEAD(&net->devices); | 1063 | INIT_LIST_HEAD(&net->devices); |
1181 | INIT_LIST_HEAD(&net->switches); | ||
1182 | INIT_LIST_HEAD(&net->mports); | 1064 | INIT_LIST_HEAD(&net->mports); |
1183 | list_add_tail(&port->nnode, &net->mports); | 1065 | list_add_tail(&port->nnode, &net->mports); |
1184 | net->hport = port; | 1066 | net->hport = port; |
1185 | net->id = port->id; | 1067 | net->id = next_net++; |
1186 | } | 1068 | } |
1187 | return net; | 1069 | return net; |
1188 | } | 1070 | } |
1189 | 1071 | ||
1190 | /** | 1072 | /** |
1191 | * rio_update_route_tables- Updates route tables in switches | 1073 | * rio_update_route_tables- Updates route tables in switches |
1192 | * @net: RIO network to run update on | 1074 | * @port: Master port associated with the RIO network |
1193 | * | 1075 | * |
1194 | * For each enumerated device, ensure that each switch in a system | 1076 | * For each enumerated device, ensure that each switch in a system |
1195 | * has correct routing entries. Add routes for devices that where | 1077 | * has correct routing entries. Add routes for devices that where |
1196 | * unknown dirung the first enumeration pass through the switch. | 1078 | * unknown dirung the first enumeration pass through the switch. |
1197 | */ | 1079 | */ |
1198 | static void rio_update_route_tables(struct rio_net *net) | 1080 | static void rio_update_route_tables(struct rio_mport *port) |
1199 | { | 1081 | { |
1200 | struct rio_dev *rdev, *swrdev; | 1082 | struct rio_dev *rdev, *swrdev; |
1201 | struct rio_switch *rswitch; | 1083 | struct rio_switch *rswitch; |
1202 | u8 sport; | 1084 | u8 sport; |
1203 | u16 destid; | 1085 | u16 destid; |
1204 | 1086 | ||
1205 | list_for_each_entry(rdev, &net->devices, net_list) { | 1087 | list_for_each_entry(rdev, &rio_devices, global_list) { |
1206 | 1088 | ||
1207 | destid = rdev->destid; | 1089 | destid = rdev->destid; |
1208 | 1090 | ||
1209 | list_for_each_entry(rswitch, &net->switches, node) { | 1091 | list_for_each_entry(rswitch, &rio_switches, node) { |
1210 | 1092 | ||
1211 | if (rio_is_switch(rdev) && (rdev->rswitch == rswitch)) | 1093 | if (rio_is_switch(rdev) && (rdev->rswitch == rswitch)) |
1212 | continue; | 1094 | continue; |
@@ -1266,7 +1148,7 @@ static void rio_pw_enable(struct rio_mport *port, int enable) | |||
1266 | * link, then start recursive peer enumeration. Returns %0 if | 1148 | * link, then start recursive peer enumeration. Returns %0 if |
1267 | * enumeration succeeds or %-EBUSY if enumeration fails. | 1149 | * enumeration succeeds or %-EBUSY if enumeration fails. |
1268 | */ | 1150 | */ |
1269 | int rio_enum_mport(struct rio_mport *mport) | 1151 | int __devinit rio_enum_mport(struct rio_mport *mport) |
1270 | { | 1152 | { |
1271 | struct rio_net *net = NULL; | 1153 | struct rio_net *net = NULL; |
1272 | int rc = 0; | 1154 | int rc = 0; |
@@ -1284,16 +1166,12 @@ int rio_enum_mport(struct rio_mport *mport) | |||
1284 | 1166 | ||
1285 | /* If master port has an active link, allocate net and enum peers */ | 1167 | /* If master port has an active link, allocate net and enum peers */ |
1286 | if (rio_mport_is_active(mport)) { | 1168 | if (rio_mport_is_active(mport)) { |
1287 | net = rio_alloc_net(mport, 1, 0); | 1169 | if (!(net = rio_alloc_net(mport))) { |
1288 | if (!net) { | ||
1289 | printk(KERN_ERR "RIO: failed to allocate new net\n"); | 1170 | printk(KERN_ERR "RIO: failed to allocate new net\n"); |
1290 | rc = -ENOMEM; | 1171 | rc = -ENOMEM; |
1291 | goto out; | 1172 | goto out; |
1292 | } | 1173 | } |
1293 | 1174 | ||
1294 | /* reserve mport destID in new net */ | ||
1295 | rio_destid_reserve(net, mport->host_deviceid); | ||
1296 | |||
1297 | /* Enable Input Output Port (transmitter reviever) */ | 1175 | /* Enable Input Output Port (transmitter reviever) */ |
1298 | rio_enable_rx_tx_port(mport, 1, 0, 0, 0); | 1176 | rio_enable_rx_tx_port(mport, 1, 0, 0, 0); |
1299 | 1177 | ||
@@ -1301,21 +1179,17 @@ int rio_enum_mport(struct rio_mport *mport) | |||
1301 | rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR, | 1179 | rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR, |
1302 | next_comptag++); | 1180 | next_comptag++); |
1303 | 1181 | ||
1304 | next_destid = rio_destid_alloc(net); | ||
1305 | |||
1306 | if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) { | 1182 | if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) { |
1307 | /* A higher priority host won enumeration, bail. */ | 1183 | /* A higher priority host won enumeration, bail. */ |
1308 | printk(KERN_INFO | 1184 | printk(KERN_INFO |
1309 | "RIO: master port %d device has lost enumeration to a remote host\n", | 1185 | "RIO: master port %d device has lost enumeration to a remote host\n", |
1310 | mport->id); | 1186 | mport->id); |
1311 | rio_clear_locks(net); | 1187 | rio_clear_locks(mport); |
1312 | rc = -EBUSY; | 1188 | rc = -EBUSY; |
1313 | goto out; | 1189 | goto out; |
1314 | } | 1190 | } |
1315 | /* free the last allocated destID (unused) */ | 1191 | rio_update_route_tables(mport); |
1316 | rio_destid_free(net, next_destid); | 1192 | rio_clear_locks(mport); |
1317 | rio_update_route_tables(net); | ||
1318 | rio_clear_locks(net); | ||
1319 | rio_pw_enable(mport, 1); | 1193 | rio_pw_enable(mport, 1); |
1320 | } else { | 1194 | } else { |
1321 | printk(KERN_INFO "RIO: master port %d link inactive\n", | 1195 | printk(KERN_INFO "RIO: master port %d link inactive\n", |
@@ -1329,34 +1203,47 @@ int rio_enum_mport(struct rio_mport *mport) | |||
1329 | 1203 | ||
1330 | /** | 1204 | /** |
1331 | * rio_build_route_tables- Generate route tables from switch route entries | 1205 | * rio_build_route_tables- Generate route tables from switch route entries |
1332 | * @net: RIO network to run route tables scan on | ||
1333 | * | 1206 | * |
1334 | * For each switch device, generate a route table by copying existing | 1207 | * For each switch device, generate a route table by copying existing |
1335 | * route entries from the switch. | 1208 | * route entries from the switch. |
1336 | */ | 1209 | */ |
1337 | static void rio_build_route_tables(struct rio_net *net) | 1210 | static void rio_build_route_tables(void) |
1338 | { | 1211 | { |
1339 | struct rio_switch *rswitch; | ||
1340 | struct rio_dev *rdev; | 1212 | struct rio_dev *rdev; |
1341 | int i; | 1213 | int i; |
1342 | u8 sport; | 1214 | u8 sport; |
1343 | 1215 | ||
1344 | list_for_each_entry(rswitch, &net->switches, node) { | 1216 | list_for_each_entry(rdev, &rio_devices, global_list) |
1345 | rdev = sw_to_rio_dev(rswitch); | 1217 | if (rio_is_switch(rdev)) { |
1218 | rio_lock_device(rdev->net->hport, rdev->destid, | ||
1219 | rdev->hopcount, 1000); | ||
1220 | for (i = 0; | ||
1221 | i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); | ||
1222 | i++) { | ||
1223 | if (rio_route_get_entry(rdev, | ||
1224 | RIO_GLOBAL_TABLE, i, &sport, 0) < 0) | ||
1225 | continue; | ||
1226 | rdev->rswitch->route_table[i] = sport; | ||
1227 | } | ||
1346 | 1228 | ||
1347 | rio_lock_device(net->hport, rdev->destid, | 1229 | rio_unlock_device(rdev->net->hport, |
1348 | rdev->hopcount, 1000); | 1230 | rdev->destid, |
1349 | for (i = 0; | 1231 | rdev->hopcount); |
1350 | i < RIO_MAX_ROUTE_ENTRIES(net->hport->sys_size); | ||
1351 | i++) { | ||
1352 | if (rio_route_get_entry(rdev, RIO_GLOBAL_TABLE, | ||
1353 | i, &sport, 0) < 0) | ||
1354 | continue; | ||
1355 | rswitch->route_table[i] = sport; | ||
1356 | } | 1232 | } |
1233 | } | ||
1357 | 1234 | ||
1358 | rio_unlock_device(net->hport, rdev->destid, rdev->hopcount); | 1235 | /** |
1359 | } | 1236 | * rio_enum_timeout- Signal that enumeration timed out |
1237 | * @data: Address of timeout flag. | ||
1238 | * | ||
1239 | * When the enumeration complete timer expires, set a flag that | ||
1240 | * signals to the discovery process that enumeration did not | ||
1241 | * complete in a sane amount of time. | ||
1242 | */ | ||
1243 | static void rio_enum_timeout(unsigned long data) | ||
1244 | { | ||
1245 | /* Enumeration timed out, set flag */ | ||
1246 | *(int *)data = 1; | ||
1360 | } | 1247 | } |
1361 | 1248 | ||
1362 | /** | 1249 | /** |
@@ -1369,37 +1256,38 @@ static void rio_build_route_tables(struct rio_net *net) | |||
1369 | * peer discovery. Returns %0 if discovery succeeds or %-EBUSY | 1256 | * peer discovery. Returns %0 if discovery succeeds or %-EBUSY |
1370 | * on failure. | 1257 | * on failure. |
1371 | */ | 1258 | */ |
1372 | int rio_disc_mport(struct rio_mport *mport) | 1259 | int __devinit rio_disc_mport(struct rio_mport *mport) |
1373 | { | 1260 | { |
1374 | struct rio_net *net = NULL; | 1261 | struct rio_net *net = NULL; |
1375 | unsigned long to_end; | 1262 | int enum_timeout_flag = 0; |
1376 | 1263 | ||
1377 | printk(KERN_INFO "RIO: discover master port %d, %s\n", mport->id, | 1264 | printk(KERN_INFO "RIO: discover master port %d, %s\n", mport->id, |
1378 | mport->name); | 1265 | mport->name); |
1379 | 1266 | ||
1380 | /* If master port has an active link, allocate net and discover peers */ | 1267 | /* If master port has an active link, allocate net and discover peers */ |
1381 | if (rio_mport_is_active(mport)) { | 1268 | if (rio_mport_is_active(mport)) { |
1382 | pr_debug("RIO: wait for enumeration to complete...\n"); | 1269 | if (!(net = rio_alloc_net(mport))) { |
1383 | |||
1384 | to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; | ||
1385 | while (time_before(jiffies, to_end)) { | ||
1386 | if (rio_enum_complete(mport)) | ||
1387 | goto enum_done; | ||
1388 | msleep(10); | ||
1389 | } | ||
1390 | |||
1391 | pr_debug("RIO: discovery timeout on mport %d %s\n", | ||
1392 | mport->id, mport->name); | ||
1393 | goto bail; | ||
1394 | enum_done: | ||
1395 | pr_debug("RIO: ... enumeration done\n"); | ||
1396 | |||
1397 | net = rio_alloc_net(mport, 0, 0); | ||
1398 | if (!net) { | ||
1399 | printk(KERN_ERR "RIO: Failed to allocate new net\n"); | 1270 | printk(KERN_ERR "RIO: Failed to allocate new net\n"); |
1400 | goto bail; | 1271 | goto bail; |
1401 | } | 1272 | } |
1402 | 1273 | ||
1274 | pr_debug("RIO: wait for enumeration complete..."); | ||
1275 | |||
1276 | rio_enum_timer.expires = | ||
1277 | jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; | ||
1278 | rio_enum_timer.data = (unsigned long)&enum_timeout_flag; | ||
1279 | add_timer(&rio_enum_timer); | ||
1280 | while (!rio_enum_complete(mport)) { | ||
1281 | mdelay(1); | ||
1282 | if (enum_timeout_flag) { | ||
1283 | del_timer_sync(&rio_enum_timer); | ||
1284 | goto timeout; | ||
1285 | } | ||
1286 | } | ||
1287 | del_timer_sync(&rio_enum_timer); | ||
1288 | |||
1289 | pr_debug("done\n"); | ||
1290 | |||
1403 | /* Read DestID assigned by enumerator */ | 1291 | /* Read DestID assigned by enumerator */ |
1404 | rio_local_read_config_32(mport, RIO_DID_CSR, | 1292 | rio_local_read_config_32(mport, RIO_DID_CSR, |
1405 | &mport->host_deviceid); | 1293 | &mport->host_deviceid); |
@@ -1414,10 +1302,13 @@ enum_done: | |||
1414 | goto bail; | 1302 | goto bail; |
1415 | } | 1303 | } |
1416 | 1304 | ||
1417 | rio_build_route_tables(net); | 1305 | rio_build_route_tables(); |
1418 | } | 1306 | } |
1419 | 1307 | ||
1420 | return 0; | 1308 | return 0; |
1421 | bail: | 1309 | |
1310 | timeout: | ||
1311 | pr_debug("timeout\n"); | ||
1312 | bail: | ||
1422 | return -EBUSY; | 1313 | return -EBUSY; |
1423 | } | 1314 | } |
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index d553b5d1372..86c9a091a2f 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | static LIST_HEAD(rio_mports); | 34 | static LIST_HEAD(rio_mports); |
35 | static unsigned char next_portid; | 35 | static unsigned char next_portid; |
36 | static DEFINE_SPINLOCK(rio_mmap_lock); | ||
37 | 36 | ||
38 | /** | 37 | /** |
39 | * rio_local_get_device_id - Get the base/extended device id for a port | 38 | * rio_local_get_device_id - Get the base/extended device id for a port |
@@ -399,49 +398,6 @@ int rio_release_inb_pwrite(struct rio_dev *rdev) | |||
399 | EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); | 398 | EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); |
400 | 399 | ||
401 | /** | 400 | /** |
402 | * rio_map_inb_region -- Map inbound memory region. | ||
403 | * @mport: Master port. | ||
404 | * @local: physical address of memory region to be mapped | ||
405 | * @rbase: RIO base address assigned to this window | ||
406 | * @size: Size of the memory region | ||
407 | * @rflags: Flags for mapping. | ||
408 | * | ||
409 | * Return: 0 -- Success. | ||
410 | * | ||
411 | * This function will create the mapping from RIO space to local memory. | ||
412 | */ | ||
413 | int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, | ||
414 | u64 rbase, u32 size, u32 rflags) | ||
415 | { | ||
416 | int rc = 0; | ||
417 | unsigned long flags; | ||
418 | |||
419 | if (!mport->ops->map_inb) | ||
420 | return -1; | ||
421 | spin_lock_irqsave(&rio_mmap_lock, flags); | ||
422 | rc = mport->ops->map_inb(mport, local, rbase, size, rflags); | ||
423 | spin_unlock_irqrestore(&rio_mmap_lock, flags); | ||
424 | return rc; | ||
425 | } | ||
426 | EXPORT_SYMBOL_GPL(rio_map_inb_region); | ||
427 | |||
428 | /** | ||
429 | * rio_unmap_inb_region -- Unmap the inbound memory region | ||
430 | * @mport: Master port | ||
431 | * @lstart: physical address of memory region to be unmapped | ||
432 | */ | ||
433 | void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart) | ||
434 | { | ||
435 | unsigned long flags; | ||
436 | if (!mport->ops->unmap_inb) | ||
437 | return; | ||
438 | spin_lock_irqsave(&rio_mmap_lock, flags); | ||
439 | mport->ops->unmap_inb(mport, lstart); | ||
440 | spin_unlock_irqrestore(&rio_mmap_lock, flags); | ||
441 | } | ||
442 | EXPORT_SYMBOL_GPL(rio_unmap_inb_region); | ||
443 | |||
444 | /** | ||
445 | * rio_mport_get_physefb - Helper function that returns register offset | 401 | * rio_mport_get_physefb - Helper function that returns register offset |
446 | * for Physical Layer Extended Features Block. | 402 | * for Physical Layer Extended Features Block. |
447 | * @port: Master port to issue transaction | 403 | * @port: Master port to issue transaction |
@@ -1165,92 +1121,11 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, | |||
1165 | return 0; | 1121 | return 0; |
1166 | } | 1122 | } |
1167 | 1123 | ||
1168 | #ifdef CONFIG_RAPIDIO_DMA_ENGINE | ||
1169 | |||
1170 | static bool rio_chan_filter(struct dma_chan *chan, void *arg) | ||
1171 | { | ||
1172 | struct rio_dev *rdev = arg; | ||
1173 | |||
1174 | /* Check that DMA device belongs to the right MPORT */ | ||
1175 | return (rdev->net->hport == | ||
1176 | container_of(chan->device, struct rio_mport, dma)); | ||
1177 | } | ||
1178 | |||
1179 | /** | ||
1180 | * rio_request_dma - request RapidIO capable DMA channel that supports | ||
1181 | * specified target RapidIO device. | ||
1182 | * @rdev: RIO device control structure | ||
1183 | * | ||
1184 | * Returns pointer to allocated DMA channel or NULL if failed. | ||
1185 | */ | ||
1186 | struct dma_chan *rio_request_dma(struct rio_dev *rdev) | ||
1187 | { | ||
1188 | dma_cap_mask_t mask; | ||
1189 | struct dma_chan *dchan; | ||
1190 | |||
1191 | dma_cap_zero(mask); | ||
1192 | dma_cap_set(DMA_SLAVE, mask); | ||
1193 | dchan = dma_request_channel(mask, rio_chan_filter, rdev); | ||
1194 | |||
1195 | return dchan; | ||
1196 | } | ||
1197 | EXPORT_SYMBOL_GPL(rio_request_dma); | ||
1198 | |||
1199 | /** | ||
1200 | * rio_release_dma - release specified DMA channel | ||
1201 | * @dchan: DMA channel to release | ||
1202 | */ | ||
1203 | void rio_release_dma(struct dma_chan *dchan) | ||
1204 | { | ||
1205 | dma_release_channel(dchan); | ||
1206 | } | ||
1207 | EXPORT_SYMBOL_GPL(rio_release_dma); | ||
1208 | |||
1209 | /** | ||
1210 | * rio_dma_prep_slave_sg - RapidIO specific wrapper | ||
1211 | * for device_prep_slave_sg callback defined by DMAENGINE. | ||
1212 | * @rdev: RIO device control structure | ||
1213 | * @dchan: DMA channel to configure | ||
1214 | * @data: RIO specific data descriptor | ||
1215 | * @direction: DMA data transfer direction (TO or FROM the device) | ||
1216 | * @flags: dmaengine defined flags | ||
1217 | * | ||
1218 | * Initializes RapidIO capable DMA channel for the specified data transfer. | ||
1219 | * Uses DMA channel private extension to pass information related to remote | ||
1220 | * target RIO device. | ||
1221 | * Returns pointer to DMA transaction descriptor or NULL if failed. | ||
1222 | */ | ||
1223 | struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, | ||
1224 | struct dma_chan *dchan, struct rio_dma_data *data, | ||
1225 | enum dma_transfer_direction direction, unsigned long flags) | ||
1226 | { | ||
1227 | struct dma_async_tx_descriptor *txd = NULL; | ||
1228 | struct rio_dma_ext rio_ext; | ||
1229 | |||
1230 | if (dchan->device->device_prep_slave_sg == NULL) { | ||
1231 | pr_err("%s: prep_rio_sg == NULL\n", __func__); | ||
1232 | return NULL; | ||
1233 | } | ||
1234 | |||
1235 | rio_ext.destid = rdev->destid; | ||
1236 | rio_ext.rio_addr_u = data->rio_addr_u; | ||
1237 | rio_ext.rio_addr = data->rio_addr; | ||
1238 | rio_ext.wr_type = data->wr_type; | ||
1239 | |||
1240 | txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, | ||
1241 | direction, flags, &rio_ext); | ||
1242 | |||
1243 | return txd; | ||
1244 | } | ||
1245 | EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); | ||
1246 | |||
1247 | #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ | ||
1248 | |||
1249 | static void rio_fixup_device(struct rio_dev *dev) | 1124 | static void rio_fixup_device(struct rio_dev *dev) |
1250 | { | 1125 | { |
1251 | } | 1126 | } |
1252 | 1127 | ||
1253 | static int rio_init(void) | 1128 | static int __devinit rio_init(void) |
1254 | { | 1129 | { |
1255 | struct rio_dev *dev = NULL; | 1130 | struct rio_dev *dev = NULL; |
1256 | 1131 | ||
@@ -1260,83 +1135,17 @@ static int rio_init(void) | |||
1260 | return 0; | 1135 | return 0; |
1261 | } | 1136 | } |
1262 | 1137 | ||
1263 | static struct workqueue_struct *rio_wq; | 1138 | int __devinit rio_init_mports(void) |
1264 | |||
1265 | struct rio_disc_work { | ||
1266 | struct work_struct work; | ||
1267 | struct rio_mport *mport; | ||
1268 | }; | ||
1269 | |||
1270 | static void disc_work_handler(struct work_struct *_work) | ||
1271 | { | ||
1272 | struct rio_disc_work *work; | ||
1273 | |||
1274 | work = container_of(_work, struct rio_disc_work, work); | ||
1275 | pr_debug("RIO: discovery work for mport %d %s\n", | ||
1276 | work->mport->id, work->mport->name); | ||
1277 | rio_disc_mport(work->mport); | ||
1278 | } | ||
1279 | |||
1280 | int rio_init_mports(void) | ||
1281 | { | 1139 | { |
1282 | struct rio_mport *port; | 1140 | struct rio_mport *port; |
1283 | struct rio_disc_work *work; | ||
1284 | int n = 0; | ||
1285 | |||
1286 | if (!next_portid) | ||
1287 | return -ENODEV; | ||
1288 | 1141 | ||
1289 | /* | ||
1290 | * First, run enumerations and check if we need to perform discovery | ||
1291 | * on any of the registered mports. | ||
1292 | */ | ||
1293 | list_for_each_entry(port, &rio_mports, node) { | 1142 | list_for_each_entry(port, &rio_mports, node) { |
1294 | if (port->host_deviceid >= 0) | 1143 | if (port->host_deviceid >= 0) |
1295 | rio_enum_mport(port); | 1144 | rio_enum_mport(port); |
1296 | else | 1145 | else |
1297 | n++; | 1146 | rio_disc_mport(port); |
1298 | } | ||
1299 | |||
1300 | if (!n) | ||
1301 | goto no_disc; | ||
1302 | |||
1303 | /* | ||
1304 | * If we have mports that require discovery schedule a discovery work | ||
1305 | * for each of them. If the code below fails to allocate needed | ||
1306 | * resources, exit without error to keep results of enumeration | ||
1307 | * process (if any). | ||
1308 | * TODO: Implement restart of dicovery process for all or | ||
1309 | * individual discovering mports. | ||
1310 | */ | ||
1311 | rio_wq = alloc_workqueue("riodisc", 0, 0); | ||
1312 | if (!rio_wq) { | ||
1313 | pr_err("RIO: unable allocate rio_wq\n"); | ||
1314 | goto no_disc; | ||
1315 | } | ||
1316 | |||
1317 | work = kcalloc(n, sizeof *work, GFP_KERNEL); | ||
1318 | if (!work) { | ||
1319 | pr_err("RIO: no memory for work struct\n"); | ||
1320 | destroy_workqueue(rio_wq); | ||
1321 | goto no_disc; | ||
1322 | } | ||
1323 | |||
1324 | n = 0; | ||
1325 | list_for_each_entry(port, &rio_mports, node) { | ||
1326 | if (port->host_deviceid < 0) { | ||
1327 | work[n].mport = port; | ||
1328 | INIT_WORK(&work[n].work, disc_work_handler); | ||
1329 | queue_work(rio_wq, &work[n].work); | ||
1330 | n++; | ||
1331 | } | ||
1332 | } | 1147 | } |
1333 | 1148 | ||
1334 | flush_workqueue(rio_wq); | ||
1335 | pr_debug("RIO: destroy discovery workqueue\n"); | ||
1336 | destroy_workqueue(rio_wq); | ||
1337 | kfree(work); | ||
1338 | |||
1339 | no_disc: | ||
1340 | rio_init(); | 1149 | rio_init(); |
1341 | 1150 | ||
1342 | return 0; | 1151 | return 0; |
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index 809b7a3336b..043ee3136e4 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c | |||
@@ -10,7 +10,6 @@ | |||
10 | * option) any later version. | 10 | * option) any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/stat.h> | ||
14 | #include <linux/rio.h> | 13 | #include <linux/rio.h> |
15 | #include <linux/rio_drv.h> | 14 | #include <linux/rio_drv.h> |
16 | #include <linux/rio_ids.h> | 15 | #include <linux/rio_ids.h> |