aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vme
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-04-26 15:34:58 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-04-26 15:34:58 -0400
commitdb3b9e990e75573402cda22faf933760f076c033 (patch)
tree4e742e97f7bd71adc61ce23938e361d80b61a45d /drivers/vme
parent8176df8e95df4c867457076190cfb18f2d7ff18c (diff)
Staging: VME: move VME drivers out of staging
This moves the VME core, VME board drivers, and VME bridge drivers out of the drivers/staging/vme/ area to drivers/vme/. The VME device drivers have not moved out yet due to some API questions they are still working through, that should happen soon, hopefully. Cc: Martyn Welch <martyn.welch@ge.com> Cc: Manohar Vanga <manohar.vanga@cern.ch> Cc: Vincent Bossier <vincent.bossier@gmail.com> Cc: "Emilio G. Cota" <cota@braap.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/vme')
-rw-r--r--drivers/vme/Kconfig19
-rw-r--r--drivers/vme/Makefile7
-rw-r--r--drivers/vme/boards/Kconfig9
-rw-r--r--drivers/vme/boards/Makefile5
-rw-r--r--drivers/vme/boards/vme_vmivme7805.c123
-rw-r--r--drivers/vme/boards/vme_vmivme7805.h37
-rw-r--r--drivers/vme/bridges/Kconfig15
-rw-r--r--drivers/vme/bridges/Makefile2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c1959
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.h583
-rw-r--r--drivers/vme/bridges/vme_tsi148.c2691
-rw-r--r--drivers/vme/bridges/vme_tsi148.h1410
-rw-r--r--drivers/vme/vme.c1517
-rw-r--r--drivers/vme/vme_api.txt396
-rw-r--r--drivers/vme/vme_bridge.h174
15 files changed, 8947 insertions, 0 deletions
diff --git a/drivers/vme/Kconfig b/drivers/vme/Kconfig
new file mode 100644
index 00000000000..c5c22465a80
--- /dev/null
+++ b/drivers/vme/Kconfig
@@ -0,0 +1,19 @@
1#
2# VME configuration.
3#
4
5menuconfig VME_BUS
6 tristate "VME bridge support"
7 depends on PCI
8 ---help---
9 If you say Y here you get support for the VME bridge Framework.
10
11if VME_BUS
12
13source "drivers/vme/bridges/Kconfig"
14
15source "drivers/vme/boards/Kconfig"
16
17source "drivers/staging/vme/devices/Kconfig"
18
19endif # VME
diff --git a/drivers/vme/Makefile b/drivers/vme/Makefile
new file mode 100644
index 00000000000..d7bfcb9fd5a
--- /dev/null
+++ b/drivers/vme/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the VME bridge device drivers.
3#
4obj-$(CONFIG_VME_BUS) += vme.o
5
6obj-y += bridges/
7obj-y += boards/
diff --git a/drivers/vme/boards/Kconfig b/drivers/vme/boards/Kconfig
new file mode 100644
index 00000000000..76163135352
--- /dev/null
+++ b/drivers/vme/boards/Kconfig
@@ -0,0 +1,9 @@
1comment "VME Board Drivers"
2
3config VMIVME_7805
4 tristate "VMIVME-7805"
5 help
6 If you say Y here you get support for the VMIVME-7805 board.
7 This board has an additional control interface to the Universe II
8 chip. This driver has to be included if you want to access VME bus
9 with VMIVME-7805 board.
diff --git a/drivers/vme/boards/Makefile b/drivers/vme/boards/Makefile
new file mode 100644
index 00000000000..43658340885
--- /dev/null
+++ b/drivers/vme/boards/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the VME board drivers.
3#
4
5obj-$(CONFIG_VMIVME_7805) += vme_vmivme7805.o
diff --git a/drivers/vme/boards/vme_vmivme7805.c b/drivers/vme/boards/vme_vmivme7805.c
new file mode 100644
index 00000000000..8e05bb4e135
--- /dev/null
+++ b/drivers/vme/boards/vme_vmivme7805.c
@@ -0,0 +1,123 @@
1/*
2 * Support for the VMIVME-7805 board access to the Universe II bridge.
3 *
4 * Author: Arthur Benilov <arthur.benilov@iba-group.com>
5 * Copyright 2010 Ion Beam Application, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/errno.h>
16#include <linux/pci.h>
17#include <linux/poll.h>
18#include <linux/io.h>
19
20#include "vme_vmivme7805.h"
21
22static int __init vmic_init(void);
23static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
24static void vmic_remove(struct pci_dev *);
25static void __exit vmic_exit(void);
26
27/** Base address to access FPGA register */
28static void *vmic_base;
29
30static const char driver_name[] = "vmivme_7805";
31
32static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
33 { PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
34 { },
35};
36
37static struct pci_driver vmic_driver = {
38 .name = driver_name,
39 .id_table = vmic_ids,
40 .probe = vmic_probe,
41 .remove = vmic_remove,
42};
43
44static int __init vmic_init(void)
45{
46 return pci_register_driver(&vmic_driver);
47}
48
49static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
50{
51 int retval;
52 u32 data;
53
54 /* Enable the device */
55 retval = pci_enable_device(pdev);
56 if (retval) {
57 dev_err(&pdev->dev, "Unable to enable device\n");
58 goto err;
59 }
60
61 /* Map Registers */
62 retval = pci_request_regions(pdev, driver_name);
63 if (retval) {
64 dev_err(&pdev->dev, "Unable to reserve resources\n");
65 goto err_resource;
66 }
67
68 /* Map registers in BAR 0 */
69 vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
70 if (!vmic_base) {
71 dev_err(&pdev->dev, "Unable to remap CRG region\n");
72 retval = -EIO;
73 goto err_remap;
74 }
75
76 /* Clear the FPGA VME IF contents */
77 iowrite32(0, vmic_base + VME_CONTROL);
78
79 /* Clear any initial BERR */
80 data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
81 data |= BM_VME_CONTROL_BERRST;
82 iowrite32(data, vmic_base + VME_CONTROL);
83
84 /* Enable the vme interface and byte swapping */
85 data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
86 data = data | BM_VME_CONTROL_MASTER_ENDIAN |
87 BM_VME_CONTROL_SLAVE_ENDIAN |
88 BM_VME_CONTROL_ABLE |
89 BM_VME_CONTROL_BERRI |
90 BM_VME_CONTROL_BPENA |
91 BM_VME_CONTROL_VBENA;
92 iowrite32(data, vmic_base + VME_CONTROL);
93
94 return 0;
95
96err_remap:
97 pci_release_regions(pdev);
98err_resource:
99 pci_disable_device(pdev);
100err:
101 return retval;
102}
103
104static void vmic_remove(struct pci_dev *pdev)
105{
106 iounmap(vmic_base);
107 pci_release_regions(pdev);
108 pci_disable_device(pdev);
109
110}
111
112static void __exit vmic_exit(void)
113{
114 pci_unregister_driver(&vmic_driver);
115}
116
117MODULE_DESCRIPTION("VMIVME-7805 board support driver");
118MODULE_AUTHOR("Arthur Benilov <arthur.benilov@iba-group.com>");
119MODULE_LICENSE("GPL");
120
121module_init(vmic_init);
122module_exit(vmic_exit);
123
diff --git a/drivers/vme/boards/vme_vmivme7805.h b/drivers/vme/boards/vme_vmivme7805.h
new file mode 100644
index 00000000000..44c2c449808
--- /dev/null
+++ b/drivers/vme/boards/vme_vmivme7805.h
@@ -0,0 +1,37 @@
1/*
2 * vmivme_7805.h
3 *
4 * Support for the VMIVME-7805 board access to the Universe II bridge.
5 *
6 * Author: Arthur Benilov <arthur.benilov@iba-group.com>
7 * Copyright 2010 Ion Beam Application, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15
16#ifndef _VMIVME_7805_H
17#define _VMIVME_7805_H
18
19#ifndef PCI_VENDOR_ID_VMIC
20#define PCI_VENDOR_ID_VMIC 0x114A
21#endif
22
23#ifndef PCI_DEVICE_ID_VTIMR
24#define PCI_DEVICE_ID_VTIMR 0x0004
25#endif
26
27#define VME_CONTROL 0x0000
28#define BM_VME_CONTROL_MASTER_ENDIAN 0x0001
29#define BM_VME_CONTROL_SLAVE_ENDIAN 0x0002
30#define BM_VME_CONTROL_ABLE 0x0004
31#define BM_VME_CONTROL_BERRI 0x0040
32#define BM_VME_CONTROL_BERRST 0x0080
33#define BM_VME_CONTROL_BPENA 0x0400
34#define BM_VME_CONTROL_VBENA 0x0800
35
36#endif /* _VMIVME_7805_H */
37
diff --git a/drivers/vme/bridges/Kconfig b/drivers/vme/bridges/Kconfig
new file mode 100644
index 00000000000..9331064e047
--- /dev/null
+++ b/drivers/vme/bridges/Kconfig
@@ -0,0 +1,15 @@
1comment "VME Bridge Drivers"
2
3config VME_CA91CX42
4 tristate "Universe II"
5 depends on VIRT_TO_BUS
6 help
7 If you say Y here you get support for the Tundra CA91C142
8 (Universe II) VME bridge chip.
9
10config VME_TSI148
11 tristate "Tempe"
12 depends on VIRT_TO_BUS
13 help
14 If you say Y here you get support for the Tundra TSI148 VME bridge
15 chip.
diff --git a/drivers/vme/bridges/Makefile b/drivers/vme/bridges/Makefile
new file mode 100644
index 00000000000..59638afcd50
--- /dev/null
+++ b/drivers/vme/bridges/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_VME_CA91CX42) += vme_ca91cx42.o
2obj-$(CONFIG_VME_TSI148) += vme_tsi148.o
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
new file mode 100644
index 00000000000..a3c0f84e2fa
--- /dev/null
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -0,0 +1,1959 @@
1/*
2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * Derived from ca91c042.c by Michael Wyrick
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/pci.h>
23#include <linux/dma-mapping.h>
24#include <linux/poll.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/time.h>
30#include <linux/io.h>
31#include <linux/uaccess.h>
32#include <linux/vme.h>
33
34#include "../vme_bridge.h"
35#include "vme_ca91cx42.h"
36
37static int __init ca91cx42_init(void);
38static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
39static void ca91cx42_remove(struct pci_dev *);
40static void __exit ca91cx42_exit(void);
41
42/* Module parameters */
43static int geoid;
44
45static const char driver_name[] = "vme_ca91cx42";
46
47static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
48 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
49 { },
50};
51
52static struct pci_driver ca91cx42_driver = {
53 .name = driver_name,
54 .id_table = ca91cx42_ids,
55 .probe = ca91cx42_probe,
56 .remove = ca91cx42_remove,
57};
58
59static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
60{
61 wake_up(&bridge->dma_queue);
62
63 return CA91CX42_LINT_DMA;
64}
65
66static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
67{
68 int i;
69 u32 serviced = 0;
70
71 for (i = 0; i < 4; i++) {
72 if (stat & CA91CX42_LINT_LM[i]) {
73 /* We only enable interrupts if the callback is set */
74 bridge->lm_callback[i](i);
75 serviced |= CA91CX42_LINT_LM[i];
76 }
77 }
78
79 return serviced;
80}
81
82/* XXX This needs to be split into 4 queues */
83static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
84{
85 wake_up(&bridge->mbox_queue);
86
87 return CA91CX42_LINT_MBOX;
88}
89
90static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
91{
92 wake_up(&bridge->iack_queue);
93
94 return CA91CX42_LINT_SW_IACK;
95}
96
97static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
98{
99 int val;
100 struct ca91cx42_driver *bridge;
101
102 bridge = ca91cx42_bridge->driver_priv;
103
104 val = ioread32(bridge->base + DGCS);
105
106 if (!(val & 0x00000800)) {
107 dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
108 "Read Error DGCS=%08X\n", val);
109 }
110
111 return CA91CX42_LINT_VERR;
112}
113
114static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
115{
116 int val;
117 struct ca91cx42_driver *bridge;
118
119 bridge = ca91cx42_bridge->driver_priv;
120
121 val = ioread32(bridge->base + DGCS);
122
123 if (!(val & 0x00000800))
124 dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
125 "Read Error DGCS=%08X\n", val);
126
127 return CA91CX42_LINT_LERR;
128}
129
130
131static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
132 int stat)
133{
134 int vec, i, serviced = 0;
135 struct ca91cx42_driver *bridge;
136
137 bridge = ca91cx42_bridge->driver_priv;
138
139
140 for (i = 7; i > 0; i--) {
141 if (stat & (1 << i)) {
142 vec = ioread32(bridge->base +
143 CA91CX42_V_STATID[i]) & 0xff;
144
145 vme_irq_handler(ca91cx42_bridge, i, vec);
146
147 serviced |= (1 << i);
148 }
149 }
150
151 return serviced;
152}
153
154static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
155{
156 u32 stat, enable, serviced = 0;
157 struct vme_bridge *ca91cx42_bridge;
158 struct ca91cx42_driver *bridge;
159
160 ca91cx42_bridge = ptr;
161
162 bridge = ca91cx42_bridge->driver_priv;
163
164 enable = ioread32(bridge->base + LINT_EN);
165 stat = ioread32(bridge->base + LINT_STAT);
166
167 /* Only look at unmasked interrupts */
168 stat &= enable;
169
170 if (unlikely(!stat))
171 return IRQ_NONE;
172
173 if (stat & CA91CX42_LINT_DMA)
174 serviced |= ca91cx42_DMA_irqhandler(bridge);
175 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
176 CA91CX42_LINT_LM3))
177 serviced |= ca91cx42_LM_irqhandler(bridge, stat);
178 if (stat & CA91CX42_LINT_MBOX)
179 serviced |= ca91cx42_MB_irqhandler(bridge, stat);
180 if (stat & CA91CX42_LINT_SW_IACK)
181 serviced |= ca91cx42_IACK_irqhandler(bridge);
182 if (stat & CA91CX42_LINT_VERR)
183 serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
184 if (stat & CA91CX42_LINT_LERR)
185 serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
186 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
187 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
188 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
189 CA91CX42_LINT_VIRQ7))
190 serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
191
192 /* Clear serviced interrupts */
193 iowrite32(serviced, bridge->base + LINT_STAT);
194
195 return IRQ_HANDLED;
196}
197
198static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
199{
200 int result, tmp;
201 struct pci_dev *pdev;
202 struct ca91cx42_driver *bridge;
203
204 bridge = ca91cx42_bridge->driver_priv;
205
206 /* Need pdev */
207 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
208
209 /* Initialise list for VME bus errors */
210 INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
211
212 mutex_init(&ca91cx42_bridge->irq_mtx);
213
214 /* Disable interrupts from PCI to VME */
215 iowrite32(0, bridge->base + VINT_EN);
216
217 /* Disable PCI interrupts */
218 iowrite32(0, bridge->base + LINT_EN);
219 /* Clear Any Pending PCI Interrupts */
220 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
221
222 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
223 driver_name, ca91cx42_bridge);
224 if (result) {
225 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
226 pdev->irq);
227 return result;
228 }
229
230 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
231 iowrite32(0, bridge->base + LINT_MAP0);
232 iowrite32(0, bridge->base + LINT_MAP1);
233 iowrite32(0, bridge->base + LINT_MAP2);
234
235 /* Enable DMA, mailbox & LM Interrupts */
236 tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
237 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
238 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
239
240 iowrite32(tmp, bridge->base + LINT_EN);
241
242 return 0;
243}
244
245static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
246 struct pci_dev *pdev)
247{
248 /* Disable interrupts from PCI to VME */
249 iowrite32(0, bridge->base + VINT_EN);
250
251 /* Disable PCI interrupts */
252 iowrite32(0, bridge->base + LINT_EN);
253 /* Clear Any Pending PCI Interrupts */
254 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
255
256 free_irq(pdev->irq, pdev);
257}
258
259static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
260{
261 u32 tmp;
262
263 tmp = ioread32(bridge->base + LINT_STAT);
264
265 if (tmp & (1 << level))
266 return 0;
267 else
268 return 1;
269}
270
271/*
272 * Set up an VME interrupt
273 */
274static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
275 int state, int sync)
276
277{
278 struct pci_dev *pdev;
279 u32 tmp;
280 struct ca91cx42_driver *bridge;
281
282 bridge = ca91cx42_bridge->driver_priv;
283
284 /* Enable IRQ level */
285 tmp = ioread32(bridge->base + LINT_EN);
286
287 if (state == 0)
288 tmp &= ~CA91CX42_LINT_VIRQ[level];
289 else
290 tmp |= CA91CX42_LINT_VIRQ[level];
291
292 iowrite32(tmp, bridge->base + LINT_EN);
293
294 if ((state == 0) && (sync != 0)) {
295 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
296 dev);
297
298 synchronize_irq(pdev->irq);
299 }
300}
301
302static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
303 int statid)
304{
305 u32 tmp;
306 struct ca91cx42_driver *bridge;
307
308 bridge = ca91cx42_bridge->driver_priv;
309
310 /* Universe can only generate even vectors */
311 if (statid & 1)
312 return -EINVAL;
313
314 mutex_lock(&bridge->vme_int);
315
316 tmp = ioread32(bridge->base + VINT_EN);
317
318 /* Set Status/ID */
319 iowrite32(statid << 24, bridge->base + STATID);
320
321 /* Assert VMEbus IRQ */
322 tmp = tmp | (1 << (level + 24));
323 iowrite32(tmp, bridge->base + VINT_EN);
324
325 /* Wait for IACK */
326 wait_event_interruptible(bridge->iack_queue,
327 ca91cx42_iack_received(bridge, level));
328
329 /* Return interrupt to low state */
330 tmp = ioread32(bridge->base + VINT_EN);
331 tmp = tmp & ~(1 << (level + 24));
332 iowrite32(tmp, bridge->base + VINT_EN);
333
334 mutex_unlock(&bridge->vme_int);
335
336 return 0;
337}
338
339static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
340 unsigned long long vme_base, unsigned long long size,
341 dma_addr_t pci_base, u32 aspace, u32 cycle)
342{
343 unsigned int i, addr = 0, granularity;
344 unsigned int temp_ctl = 0;
345 unsigned int vme_bound, pci_offset;
346 struct vme_bridge *ca91cx42_bridge;
347 struct ca91cx42_driver *bridge;
348
349 ca91cx42_bridge = image->parent;
350
351 bridge = ca91cx42_bridge->driver_priv;
352
353 i = image->number;
354
355 switch (aspace) {
356 case VME_A16:
357 addr |= CA91CX42_VSI_CTL_VAS_A16;
358 break;
359 case VME_A24:
360 addr |= CA91CX42_VSI_CTL_VAS_A24;
361 break;
362 case VME_A32:
363 addr |= CA91CX42_VSI_CTL_VAS_A32;
364 break;
365 case VME_USER1:
366 addr |= CA91CX42_VSI_CTL_VAS_USER1;
367 break;
368 case VME_USER2:
369 addr |= CA91CX42_VSI_CTL_VAS_USER2;
370 break;
371 case VME_A64:
372 case VME_CRCSR:
373 case VME_USER3:
374 case VME_USER4:
375 default:
376 dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
377 return -EINVAL;
378 break;
379 }
380
381 /*
382 * Bound address is a valid address for the window, adjust
383 * accordingly
384 */
385 vme_bound = vme_base + size;
386 pci_offset = pci_base - vme_base;
387
388 if ((i == 0) || (i == 4))
389 granularity = 0x1000;
390 else
391 granularity = 0x10000;
392
393 if (vme_base & (granularity - 1)) {
394 dev_err(ca91cx42_bridge->parent, "Invalid VME base "
395 "alignment\n");
396 return -EINVAL;
397 }
398 if (vme_bound & (granularity - 1)) {
399 dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
400 "alignment\n");
401 return -EINVAL;
402 }
403 if (pci_offset & (granularity - 1)) {
404 dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
405 "alignment\n");
406 return -EINVAL;
407 }
408
409 /* Disable while we are mucking around */
410 temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
411 temp_ctl &= ~CA91CX42_VSI_CTL_EN;
412 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
413
414 /* Setup mapping */
415 iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
416 iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
417 iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
418
419 /* Setup address space */
420 temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
421 temp_ctl |= addr;
422
423 /* Setup cycle types */
424 temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
425 if (cycle & VME_SUPER)
426 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
427 if (cycle & VME_USER)
428 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
429 if (cycle & VME_PROG)
430 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
431 if (cycle & VME_DATA)
432 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
433
434 /* Write ctl reg without enable */
435 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
436
437 if (enabled)
438 temp_ctl |= CA91CX42_VSI_CTL_EN;
439
440 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
441
442 return 0;
443}
444
445static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
446 unsigned long long *vme_base, unsigned long long *size,
447 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
448{
449 unsigned int i, granularity = 0, ctl = 0;
450 unsigned long long vme_bound, pci_offset;
451 struct ca91cx42_driver *bridge;
452
453 bridge = image->parent->driver_priv;
454
455 i = image->number;
456
457 if ((i == 0) || (i == 4))
458 granularity = 0x1000;
459 else
460 granularity = 0x10000;
461
462 /* Read Registers */
463 ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
464
465 *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
466 vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
467 pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
468
469 *pci_base = (dma_addr_t)vme_base + pci_offset;
470 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
471
472 *enabled = 0;
473 *aspace = 0;
474 *cycle = 0;
475
476 if (ctl & CA91CX42_VSI_CTL_EN)
477 *enabled = 1;
478
479 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
480 *aspace = VME_A16;
481 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
482 *aspace = VME_A24;
483 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
484 *aspace = VME_A32;
485 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
486 *aspace = VME_USER1;
487 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
488 *aspace = VME_USER2;
489
490 if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
491 *cycle |= VME_SUPER;
492 if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
493 *cycle |= VME_USER;
494 if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
495 *cycle |= VME_PROG;
496 if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
497 *cycle |= VME_DATA;
498
499 return 0;
500}
501
502/*
503 * Allocate and map PCI Resource
504 */
505static int ca91cx42_alloc_resource(struct vme_master_resource *image,
506 unsigned long long size)
507{
508 unsigned long long existing_size;
509 int retval = 0;
510 struct pci_dev *pdev;
511 struct vme_bridge *ca91cx42_bridge;
512
513 ca91cx42_bridge = image->parent;
514
515 /* Find pci_dev container of dev */
516 if (ca91cx42_bridge->parent == NULL) {
517 dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
518 return -EINVAL;
519 }
520 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
521
522 existing_size = (unsigned long long)(image->bus_resource.end -
523 image->bus_resource.start);
524
525 /* If the existing size is OK, return */
526 if (existing_size == (size - 1))
527 return 0;
528
529 if (existing_size != 0) {
530 iounmap(image->kern_base);
531 image->kern_base = NULL;
532 kfree(image->bus_resource.name);
533 release_resource(&image->bus_resource);
534 memset(&image->bus_resource, 0, sizeof(struct resource));
535 }
536
537 if (image->bus_resource.name == NULL) {
538 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
539 if (image->bus_resource.name == NULL) {
540 dev_err(ca91cx42_bridge->parent, "Unable to allocate "
541 "memory for resource name\n");
542 retval = -ENOMEM;
543 goto err_name;
544 }
545 }
546
547 sprintf((char *)image->bus_resource.name, "%s.%d",
548 ca91cx42_bridge->name, image->number);
549
550 image->bus_resource.start = 0;
551 image->bus_resource.end = (unsigned long)size;
552 image->bus_resource.flags = IORESOURCE_MEM;
553
554 retval = pci_bus_alloc_resource(pdev->bus,
555 &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
556 0, NULL, NULL);
557 if (retval) {
558 dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
559 "resource for window %d size 0x%lx start 0x%lx\n",
560 image->number, (unsigned long)size,
561 (unsigned long)image->bus_resource.start);
562 goto err_resource;
563 }
564
565 image->kern_base = ioremap_nocache(
566 image->bus_resource.start, size);
567 if (image->kern_base == NULL) {
568 dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
569 retval = -ENOMEM;
570 goto err_remap;
571 }
572
573 return 0;
574
575err_remap:
576 release_resource(&image->bus_resource);
577err_resource:
578 kfree(image->bus_resource.name);
579 memset(&image->bus_resource, 0, sizeof(struct resource));
580err_name:
581 return retval;
582}
583
584/*
585 * Free and unmap PCI Resource
586 */
587static void ca91cx42_free_resource(struct vme_master_resource *image)
588{
589 iounmap(image->kern_base);
590 image->kern_base = NULL;
591 release_resource(&image->bus_resource);
592 kfree(image->bus_resource.name);
593 memset(&image->bus_resource, 0, sizeof(struct resource));
594}
595
596
597static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
598 unsigned long long vme_base, unsigned long long size, u32 aspace,
599 u32 cycle, u32 dwidth)
600{
601 int retval = 0;
602 unsigned int i, granularity = 0;
603 unsigned int temp_ctl = 0;
604 unsigned long long pci_bound, vme_offset, pci_base;
605 struct vme_bridge *ca91cx42_bridge;
606 struct ca91cx42_driver *bridge;
607
608 ca91cx42_bridge = image->parent;
609
610 bridge = ca91cx42_bridge->driver_priv;
611
612 i = image->number;
613
614 if ((i == 0) || (i == 4))
615 granularity = 0x1000;
616 else
617 granularity = 0x10000;
618
619 /* Verify input data */
620 if (vme_base & (granularity - 1)) {
621 dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
622 "alignment\n");
623 retval = -EINVAL;
624 goto err_window;
625 }
626 if (size & (granularity - 1)) {
627 dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
628 "alignment\n");
629 retval = -EINVAL;
630 goto err_window;
631 }
632
633 spin_lock(&image->lock);
634
635 /*
636 * Let's allocate the resource here rather than further up the stack as
637 * it avoids pushing loads of bus dependent stuff up the stack
638 */
639 retval = ca91cx42_alloc_resource(image, size);
640 if (retval) {
641 spin_unlock(&image->lock);
642 dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
643 "for resource name\n");
644 retval = -ENOMEM;
645 goto err_res;
646 }
647
648 pci_base = (unsigned long long)image->bus_resource.start;
649
650 /*
651 * Bound address is a valid address for the window, adjust
652 * according to window granularity.
653 */
654 pci_bound = pci_base + size;
655 vme_offset = vme_base - pci_base;
656
657 /* Disable while we are mucking around */
658 temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
659 temp_ctl &= ~CA91CX42_LSI_CTL_EN;
660 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
661
662 /* Setup cycle types */
663 temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
664 if (cycle & VME_BLT)
665 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
666 if (cycle & VME_MBLT)
667 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
668
669 /* Setup data width */
670 temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
671 switch (dwidth) {
672 case VME_D8:
673 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
674 break;
675 case VME_D16:
676 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
677 break;
678 case VME_D32:
679 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
680 break;
681 case VME_D64:
682 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
683 break;
684 default:
685 spin_unlock(&image->lock);
686 dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
687 retval = -EINVAL;
688 goto err_dwidth;
689 break;
690 }
691
692 /* Setup address space */
693 temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
694 switch (aspace) {
695 case VME_A16:
696 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
697 break;
698 case VME_A24:
699 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
700 break;
701 case VME_A32:
702 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
703 break;
704 case VME_CRCSR:
705 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
706 break;
707 case VME_USER1:
708 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
709 break;
710 case VME_USER2:
711 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
712 break;
713 case VME_A64:
714 case VME_USER3:
715 case VME_USER4:
716 default:
717 spin_unlock(&image->lock);
718 dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
719 retval = -EINVAL;
720 goto err_aspace;
721 break;
722 }
723
724 temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
725 if (cycle & VME_SUPER)
726 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
727 if (cycle & VME_PROG)
728 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
729
730 /* Setup mapping */
731 iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
732 iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
733 iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
734
735 /* Write ctl reg without enable */
736 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
737
738 if (enabled)
739 temp_ctl |= CA91CX42_LSI_CTL_EN;
740
741 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
742
743 spin_unlock(&image->lock);
744 return 0;
745
746err_aspace:
747err_dwidth:
748 ca91cx42_free_resource(image);
749err_res:
750err_window:
751 return retval;
752}
753
754static int __ca91cx42_master_get(struct vme_master_resource *image,
755 int *enabled, unsigned long long *vme_base, unsigned long long *size,
756 u32 *aspace, u32 *cycle, u32 *dwidth)
757{
758 unsigned int i, ctl;
759 unsigned long long pci_base, pci_bound, vme_offset;
760 struct ca91cx42_driver *bridge;
761
762 bridge = image->parent->driver_priv;
763
764 i = image->number;
765
766 ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
767
768 pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
769 vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
770 pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
771
772 *vme_base = pci_base + vme_offset;
773 *size = (unsigned long long)(pci_bound - pci_base);
774
775 *enabled = 0;
776 *aspace = 0;
777 *cycle = 0;
778 *dwidth = 0;
779
780 if (ctl & CA91CX42_LSI_CTL_EN)
781 *enabled = 1;
782
783 /* Setup address space */
784 switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
785 case CA91CX42_LSI_CTL_VAS_A16:
786 *aspace = VME_A16;
787 break;
788 case CA91CX42_LSI_CTL_VAS_A24:
789 *aspace = VME_A24;
790 break;
791 case CA91CX42_LSI_CTL_VAS_A32:
792 *aspace = VME_A32;
793 break;
794 case CA91CX42_LSI_CTL_VAS_CRCSR:
795 *aspace = VME_CRCSR;
796 break;
797 case CA91CX42_LSI_CTL_VAS_USER1:
798 *aspace = VME_USER1;
799 break;
800 case CA91CX42_LSI_CTL_VAS_USER2:
801 *aspace = VME_USER2;
802 break;
803 }
804
805 /* XXX Not sure howto check for MBLT */
806 /* Setup cycle types */
807 if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
808 *cycle |= VME_BLT;
809 else
810 *cycle |= VME_SCT;
811
812 if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
813 *cycle |= VME_SUPER;
814 else
815 *cycle |= VME_USER;
816
817 if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
818 *cycle = VME_PROG;
819 else
820 *cycle = VME_DATA;
821
822 /* Setup data width */
823 switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
824 case CA91CX42_LSI_CTL_VDW_D8:
825 *dwidth = VME_D8;
826 break;
827 case CA91CX42_LSI_CTL_VDW_D16:
828 *dwidth = VME_D16;
829 break;
830 case CA91CX42_LSI_CTL_VDW_D32:
831 *dwidth = VME_D32;
832 break;
833 case CA91CX42_LSI_CTL_VDW_D64:
834 *dwidth = VME_D64;
835 break;
836 }
837
838 return 0;
839}
840
841static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
842 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
843 u32 *cycle, u32 *dwidth)
844{
845 int retval;
846
847 spin_lock(&image->lock);
848
849 retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
850 cycle, dwidth);
851
852 spin_unlock(&image->lock);
853
854 return retval;
855}
856
857static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
858 void *buf, size_t count, loff_t offset)
859{
860 ssize_t retval;
861 void *addr = image->kern_base + offset;
862 unsigned int done = 0;
863 unsigned int count32;
864
865 if (count == 0)
866 return 0;
867
868 spin_lock(&image->lock);
869
870 /* The following code handles VME address alignment problem
871 * in order to assure the maximal data width cycle.
872 * We cannot use memcpy_xxx directly here because it
873 * may cut data transfer in 8-bits cycles, thus making
874 * D16 cycle impossible.
875 * From the other hand, the bridge itself assures that
876 * maximal configured data cycle is used and splits it
877 * automatically for non-aligned addresses.
878 */
879 if ((uintptr_t)addr & 0x1) {
880 *(u8 *)buf = ioread8(addr);
881 done += 1;
882 if (done == count)
883 goto out;
884 }
885 if ((uintptr_t)addr & 0x2) {
886 if ((count - done) < 2) {
887 *(u8 *)(buf + done) = ioread8(addr + done);
888 done += 1;
889 goto out;
890 } else {
891 *(u16 *)(buf + done) = ioread16(addr + done);
892 done += 2;
893 }
894 }
895
896 count32 = (count - done) & ~0x3;
897 if (count32 > 0) {
898 memcpy_fromio(buf + done, addr + done, (unsigned int)count);
899 done += count32;
900 }
901
902 if ((count - done) & 0x2) {
903 *(u16 *)(buf + done) = ioread16(addr + done);
904 done += 2;
905 }
906 if ((count - done) & 0x1) {
907 *(u8 *)(buf + done) = ioread8(addr + done);
908 done += 1;
909 }
910out:
911 retval = count;
912 spin_unlock(&image->lock);
913
914 return retval;
915}
916
917static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
918 void *buf, size_t count, loff_t offset)
919{
920 ssize_t retval;
921 void *addr = image->kern_base + offset;
922 unsigned int done = 0;
923 unsigned int count32;
924
925 if (count == 0)
926 return 0;
927
928 spin_lock(&image->lock);
929
930 /* Here we apply for the same strategy we do in master_read
931 * function in order to assure D16 cycle when required.
932 */
933 if ((uintptr_t)addr & 0x1) {
934 iowrite8(*(u8 *)buf, addr);
935 done += 1;
936 if (done == count)
937 goto out;
938 }
939 if ((uintptr_t)addr & 0x2) {
940 if ((count - done) < 2) {
941 iowrite8(*(u8 *)(buf + done), addr + done);
942 done += 1;
943 goto out;
944 } else {
945 iowrite16(*(u16 *)(buf + done), addr + done);
946 done += 2;
947 }
948 }
949
950 count32 = (count - done) & ~0x3;
951 if (count32 > 0) {
952 memcpy_toio(addr + done, buf + done, count32);
953 done += count32;
954 }
955
956 if ((count - done) & 0x2) {
957 iowrite16(*(u16 *)(buf + done), addr + done);
958 done += 2;
959 }
960 if ((count - done) & 0x1) {
961 iowrite8(*(u8 *)(buf + done), addr + done);
962 done += 1;
963 }
964out:
965 retval = count;
966
967 spin_unlock(&image->lock);
968
969 return retval;
970}
971
972static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
973 unsigned int mask, unsigned int compare, unsigned int swap,
974 loff_t offset)
975{
976 u32 result;
977 uintptr_t pci_addr;
978 int i;
979 struct ca91cx42_driver *bridge;
980 struct device *dev;
981
982 bridge = image->parent->driver_priv;
983 dev = image->parent->parent;
984
985 /* Find the PCI address that maps to the desired VME address */
986 i = image->number;
987
988 /* Locking as we can only do one of these at a time */
989 mutex_lock(&bridge->vme_rmw);
990
991 /* Lock image */
992 spin_lock(&image->lock);
993
994 pci_addr = (uintptr_t)image->kern_base + offset;
995
996 /* Address must be 4-byte aligned */
997 if (pci_addr & 0x3) {
998 dev_err(dev, "RMW Address not 4-byte aligned\n");
999 result = -EINVAL;
1000 goto out;
1001 }
1002
1003 /* Ensure RMW Disabled whilst configuring */
1004 iowrite32(0, bridge->base + SCYC_CTL);
1005
1006 /* Configure registers */
1007 iowrite32(mask, bridge->base + SCYC_EN);
1008 iowrite32(compare, bridge->base + SCYC_CMP);
1009 iowrite32(swap, bridge->base + SCYC_SWP);
1010 iowrite32(pci_addr, bridge->base + SCYC_ADDR);
1011
1012 /* Enable RMW */
1013 iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
1014
1015 /* Kick process off with a read to the required address. */
1016 result = ioread32(image->kern_base + offset);
1017
1018 /* Disable RMW */
1019 iowrite32(0, bridge->base + SCYC_CTL);
1020
1021out:
1022 spin_unlock(&image->lock);
1023
1024 mutex_unlock(&bridge->vme_rmw);
1025
1026 return result;
1027}
1028
1029static int ca91cx42_dma_list_add(struct vme_dma_list *list,
1030 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1031{
1032 struct ca91cx42_dma_entry *entry, *prev;
1033 struct vme_dma_pci *pci_attr;
1034 struct vme_dma_vme *vme_attr;
1035 dma_addr_t desc_ptr;
1036 int retval = 0;
1037 struct device *dev;
1038
1039 dev = list->parent->parent->parent;
1040
1041 /* XXX descriptor must be aligned on 64-bit boundaries */
1042 entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
1043 if (entry == NULL) {
1044 dev_err(dev, "Failed to allocate memory for dma resource "
1045 "structure\n");
1046 retval = -ENOMEM;
1047 goto err_mem;
1048 }
1049
1050 /* Test descriptor alignment */
1051 if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
1052 dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
1053 "required: %p\n", &entry->descriptor);
1054 retval = -EINVAL;
1055 goto err_align;
1056 }
1057
1058 memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
1059
1060 if (dest->type == VME_DMA_VME) {
1061 entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
1062 vme_attr = dest->private;
1063 pci_attr = src->private;
1064 } else {
1065 vme_attr = src->private;
1066 pci_attr = dest->private;
1067 }
1068
1069 /* Check we can do fulfill required attributes */
1070 if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
1071 VME_USER2)) != 0) {
1072
1073 dev_err(dev, "Unsupported cycle type\n");
1074 retval = -EINVAL;
1075 goto err_aspace;
1076 }
1077
1078 if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
1079 VME_PROG | VME_DATA)) != 0) {
1080
1081 dev_err(dev, "Unsupported cycle type\n");
1082 retval = -EINVAL;
1083 goto err_cycle;
1084 }
1085
1086 /* Check to see if we can fulfill source and destination */
1087 if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
1088 ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
1089
1090 dev_err(dev, "Cannot perform transfer with this "
1091 "source-destination combination\n");
1092 retval = -EINVAL;
1093 goto err_direct;
1094 }
1095
1096 /* Setup cycle types */
1097 if (vme_attr->cycle & VME_BLT)
1098 entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
1099
1100 /* Setup data width */
1101 switch (vme_attr->dwidth) {
1102 case VME_D8:
1103 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
1104 break;
1105 case VME_D16:
1106 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
1107 break;
1108 case VME_D32:
1109 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
1110 break;
1111 case VME_D64:
1112 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
1113 break;
1114 default:
1115 dev_err(dev, "Invalid data width\n");
1116 return -EINVAL;
1117 }
1118
1119 /* Setup address space */
1120 switch (vme_attr->aspace) {
1121 case VME_A16:
1122 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1123 break;
1124 case VME_A24:
1125 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1126 break;
1127 case VME_A32:
1128 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1129 break;
1130 case VME_USER1:
1131 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1132 break;
1133 case VME_USER2:
1134 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1135 break;
1136 default:
1137 dev_err(dev, "Invalid address space\n");
1138 return -EINVAL;
1139 break;
1140 }
1141
1142 if (vme_attr->cycle & VME_SUPER)
1143 entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1144 if (vme_attr->cycle & VME_PROG)
1145 entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1146
1147 entry->descriptor.dtbc = count;
1148 entry->descriptor.dla = pci_attr->address;
1149 entry->descriptor.dva = vme_attr->address;
1150 entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1151
1152 /* Add to list */
1153 list_add_tail(&entry->list, &list->entries);
1154
1155 /* Fill out previous descriptors "Next Address" */
1156 if (entry->list.prev != &list->entries) {
1157 prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1158 list);
1159 /* We need the bus address for the pointer */
1160 desc_ptr = virt_to_bus(&entry->descriptor);
1161 prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1162 }
1163
1164 return 0;
1165
1166err_cycle:
1167err_aspace:
1168err_direct:
1169err_align:
1170 kfree(entry);
1171err_mem:
1172 return retval;
1173}
1174
1175static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1176{
1177 u32 tmp;
1178 struct ca91cx42_driver *bridge;
1179
1180 bridge = ca91cx42_bridge->driver_priv;
1181
1182 tmp = ioread32(bridge->base + DGCS);
1183
1184 if (tmp & CA91CX42_DGCS_ACT)
1185 return 0;
1186 else
1187 return 1;
1188}
1189
1190static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1191{
1192 struct vme_dma_resource *ctrlr;
1193 struct ca91cx42_dma_entry *entry;
1194 int retval = 0;
1195 dma_addr_t bus_addr;
1196 u32 val;
1197 struct device *dev;
1198 struct ca91cx42_driver *bridge;
1199
1200 ctrlr = list->parent;
1201
1202 bridge = ctrlr->parent->driver_priv;
1203 dev = ctrlr->parent->parent;
1204
1205 mutex_lock(&ctrlr->mtx);
1206
1207 if (!(list_empty(&ctrlr->running))) {
1208 /*
1209 * XXX We have an active DMA transfer and currently haven't
1210 * sorted out the mechanism for "pending" DMA transfers.
1211 * Return busy.
1212 */
1213 /* Need to add to pending here */
1214 mutex_unlock(&ctrlr->mtx);
1215 return -EBUSY;
1216 } else {
1217 list_add(&list->list, &ctrlr->running);
1218 }
1219
1220 /* Get first bus address and write into registers */
1221 entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
1222 list);
1223
1224 bus_addr = virt_to_bus(&entry->descriptor);
1225
1226 mutex_unlock(&ctrlr->mtx);
1227
1228 iowrite32(0, bridge->base + DTBC);
1229 iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1230
1231 /* Start the operation */
1232 val = ioread32(bridge->base + DGCS);
1233
1234 /* XXX Could set VMEbus On and Off Counters here */
1235 val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1236
1237 val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1238 CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1239 CA91CX42_DGCS_PERR);
1240
1241 iowrite32(val, bridge->base + DGCS);
1242
1243 val |= CA91CX42_DGCS_GO;
1244
1245 iowrite32(val, bridge->base + DGCS);
1246
1247 wait_event_interruptible(bridge->dma_queue,
1248 ca91cx42_dma_busy(ctrlr->parent));
1249
1250 /*
1251 * Read status register, this register is valid until we kick off a
1252 * new transfer.
1253 */
1254 val = ioread32(bridge->base + DGCS);
1255
1256 if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1257 CA91CX42_DGCS_PERR)) {
1258
1259 dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
1260 val = ioread32(bridge->base + DCTL);
1261 }
1262
1263 /* Remove list from running list */
1264 mutex_lock(&ctrlr->mtx);
1265 list_del(&list->list);
1266 mutex_unlock(&ctrlr->mtx);
1267
1268 return retval;
1269
1270}
1271
1272static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1273{
1274 struct list_head *pos, *temp;
1275 struct ca91cx42_dma_entry *entry;
1276
1277 /* detach and free each entry */
1278 list_for_each_safe(pos, temp, &list->entries) {
1279 list_del(pos);
1280 entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1281 kfree(entry);
1282 }
1283
1284 return 0;
1285}
1286
1287/*
1288 * All 4 location monitors reside at the same base - this is therefore a
1289 * system wide configuration.
1290 *
1291 * This does not enable the LM monitor - that should be done when the first
1292 * callback is attached and disabled when the last callback is removed.
1293 */
1294static int ca91cx42_lm_set(struct vme_lm_resource *lm,
1295 unsigned long long lm_base, u32 aspace, u32 cycle)
1296{
1297 u32 temp_base, lm_ctl = 0;
1298 int i;
1299 struct ca91cx42_driver *bridge;
1300 struct device *dev;
1301
1302 bridge = lm->parent->driver_priv;
1303 dev = lm->parent->parent;
1304
1305 /* Check the alignment of the location monitor */
1306 temp_base = (u32)lm_base;
1307 if (temp_base & 0xffff) {
1308 dev_err(dev, "Location monitor must be aligned to 64KB "
1309 "boundary");
1310 return -EINVAL;
1311 }
1312
1313 mutex_lock(&lm->mtx);
1314
1315 /* If we already have a callback attached, we can't move it! */
1316 for (i = 0; i < lm->monitors; i++) {
1317 if (bridge->lm_callback[i] != NULL) {
1318 mutex_unlock(&lm->mtx);
1319 dev_err(dev, "Location monitor callback attached, "
1320 "can't reset\n");
1321 return -EBUSY;
1322 }
1323 }
1324
1325 switch (aspace) {
1326 case VME_A16:
1327 lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1328 break;
1329 case VME_A24:
1330 lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1331 break;
1332 case VME_A32:
1333 lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1334 break;
1335 default:
1336 mutex_unlock(&lm->mtx);
1337 dev_err(dev, "Invalid address space\n");
1338 return -EINVAL;
1339 break;
1340 }
1341
1342 if (cycle & VME_SUPER)
1343 lm_ctl |= CA91CX42_LM_CTL_SUPR;
1344 if (cycle & VME_USER)
1345 lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1346 if (cycle & VME_PROG)
1347 lm_ctl |= CA91CX42_LM_CTL_PGM;
1348 if (cycle & VME_DATA)
1349 lm_ctl |= CA91CX42_LM_CTL_DATA;
1350
1351 iowrite32(lm_base, bridge->base + LM_BS);
1352 iowrite32(lm_ctl, bridge->base + LM_CTL);
1353
1354 mutex_unlock(&lm->mtx);
1355
1356 return 0;
1357}
1358
1359/* Get configuration of the callback monitor and return whether it is enabled
1360 * or disabled.
1361 */
1362static int ca91cx42_lm_get(struct vme_lm_resource *lm,
1363 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1364{
1365 u32 lm_ctl, enabled = 0;
1366 struct ca91cx42_driver *bridge;
1367
1368 bridge = lm->parent->driver_priv;
1369
1370 mutex_lock(&lm->mtx);
1371
1372 *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1373 lm_ctl = ioread32(bridge->base + LM_CTL);
1374
1375 if (lm_ctl & CA91CX42_LM_CTL_EN)
1376 enabled = 1;
1377
1378 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1379 *aspace = VME_A16;
1380 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1381 *aspace = VME_A24;
1382 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1383 *aspace = VME_A32;
1384
1385 *cycle = 0;
1386 if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1387 *cycle |= VME_SUPER;
1388 if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1389 *cycle |= VME_USER;
1390 if (lm_ctl & CA91CX42_LM_CTL_PGM)
1391 *cycle |= VME_PROG;
1392 if (lm_ctl & CA91CX42_LM_CTL_DATA)
1393 *cycle |= VME_DATA;
1394
1395 mutex_unlock(&lm->mtx);
1396
1397 return enabled;
1398}
1399
1400/*
1401 * Attach a callback to a specific location monitor.
1402 *
1403 * Callback will be passed the monitor triggered.
1404 */
1405static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1406 void (*callback)(int))
1407{
1408 u32 lm_ctl, tmp;
1409 struct ca91cx42_driver *bridge;
1410 struct device *dev;
1411
1412 bridge = lm->parent->driver_priv;
1413 dev = lm->parent->parent;
1414
1415 mutex_lock(&lm->mtx);
1416
1417 /* Ensure that the location monitor is configured - need PGM or DATA */
1418 lm_ctl = ioread32(bridge->base + LM_CTL);
1419 if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1420 mutex_unlock(&lm->mtx);
1421 dev_err(dev, "Location monitor not properly configured\n");
1422 return -EINVAL;
1423 }
1424
1425 /* Check that a callback isn't already attached */
1426 if (bridge->lm_callback[monitor] != NULL) {
1427 mutex_unlock(&lm->mtx);
1428 dev_err(dev, "Existing callback attached\n");
1429 return -EBUSY;
1430 }
1431
1432 /* Attach callback */
1433 bridge->lm_callback[monitor] = callback;
1434
1435 /* Enable Location Monitor interrupt */
1436 tmp = ioread32(bridge->base + LINT_EN);
1437 tmp |= CA91CX42_LINT_LM[monitor];
1438 iowrite32(tmp, bridge->base + LINT_EN);
1439
1440 /* Ensure that global Location Monitor Enable set */
1441 if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1442 lm_ctl |= CA91CX42_LM_CTL_EN;
1443 iowrite32(lm_ctl, bridge->base + LM_CTL);
1444 }
1445
1446 mutex_unlock(&lm->mtx);
1447
1448 return 0;
1449}
1450
1451/*
1452 * Detach a callback function forn a specific location monitor.
1453 */
1454static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1455{
1456 u32 tmp;
1457 struct ca91cx42_driver *bridge;
1458
1459 bridge = lm->parent->driver_priv;
1460
1461 mutex_lock(&lm->mtx);
1462
1463 /* Disable Location Monitor and ensure previous interrupts are clear */
1464 tmp = ioread32(bridge->base + LINT_EN);
1465 tmp &= ~CA91CX42_LINT_LM[monitor];
1466 iowrite32(tmp, bridge->base + LINT_EN);
1467
1468 iowrite32(CA91CX42_LINT_LM[monitor],
1469 bridge->base + LINT_STAT);
1470
1471 /* Detach callback */
1472 bridge->lm_callback[monitor] = NULL;
1473
1474 /* If all location monitors disabled, disable global Location Monitor */
1475 if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1476 CA91CX42_LINT_LM3)) == 0) {
1477 tmp = ioread32(bridge->base + LM_CTL);
1478 tmp &= ~CA91CX42_LM_CTL_EN;
1479 iowrite32(tmp, bridge->base + LM_CTL);
1480 }
1481
1482 mutex_unlock(&lm->mtx);
1483
1484 return 0;
1485}
1486
1487static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1488{
1489 u32 slot = 0;
1490 struct ca91cx42_driver *bridge;
1491
1492 bridge = ca91cx42_bridge->driver_priv;
1493
1494 if (!geoid) {
1495 slot = ioread32(bridge->base + VCSR_BS);
1496 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1497 } else
1498 slot = geoid;
1499
1500 return (int)slot;
1501
1502}
1503
1504void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
1505 dma_addr_t *dma)
1506{
1507 struct pci_dev *pdev;
1508
1509 /* Find pci_dev container of dev */
1510 pdev = container_of(parent, struct pci_dev, dev);
1511
1512 return pci_alloc_consistent(pdev, size, dma);
1513}
1514
1515void ca91cx42_free_consistent(struct device *parent, size_t size, void *vaddr,
1516 dma_addr_t dma)
1517{
1518 struct pci_dev *pdev;
1519
1520 /* Find pci_dev container of dev */
1521 pdev = container_of(parent, struct pci_dev, dev);
1522
1523 pci_free_consistent(pdev, size, vaddr, dma);
1524}
1525
1526static int __init ca91cx42_init(void)
1527{
1528 return pci_register_driver(&ca91cx42_driver);
1529}
1530
1531/*
1532 * Configure CR/CSR space
1533 *
1534 * Access to the CR/CSR can be configured at power-up. The location of the
1535 * CR/CSR registers in the CR/CSR address space is determined by the boards
1536 * Auto-ID or Geographic address. This function ensures that the window is
1537 * enabled at an offset consistent with the boards geopgraphic address.
1538 */
1539static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1540 struct pci_dev *pdev)
1541{
1542 unsigned int crcsr_addr;
1543 int tmp, slot;
1544 struct ca91cx42_driver *bridge;
1545
1546 bridge = ca91cx42_bridge->driver_priv;
1547
1548 slot = ca91cx42_slot_get(ca91cx42_bridge);
1549
1550 /* Write CSR Base Address if slot ID is supplied as a module param */
1551 if (geoid)
1552 iowrite32(geoid << 27, bridge->base + VCSR_BS);
1553
1554 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1555 if (slot == 0) {
1556 dev_err(&pdev->dev, "Slot number is unset, not configuring "
1557 "CR/CSR space\n");
1558 return -EINVAL;
1559 }
1560
1561 /* Allocate mem for CR/CSR image */
1562 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1563 &bridge->crcsr_bus);
1564 if (bridge->crcsr_kernel == NULL) {
1565 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1566 "image\n");
1567 return -ENOMEM;
1568 }
1569
1570 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
1571
1572 crcsr_addr = slot * (512 * 1024);
1573 iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1574
1575 tmp = ioread32(bridge->base + VCSR_CTL);
1576 tmp |= CA91CX42_VCSR_CTL_EN;
1577 iowrite32(tmp, bridge->base + VCSR_CTL);
1578
1579 return 0;
1580}
1581
1582static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1583 struct pci_dev *pdev)
1584{
1585 u32 tmp;
1586 struct ca91cx42_driver *bridge;
1587
1588 bridge = ca91cx42_bridge->driver_priv;
1589
1590 /* Turn off CR/CSR space */
1591 tmp = ioread32(bridge->base + VCSR_CTL);
1592 tmp &= ~CA91CX42_VCSR_CTL_EN;
1593 iowrite32(tmp, bridge->base + VCSR_CTL);
1594
1595 /* Free image */
1596 iowrite32(0, bridge->base + VCSR_TO);
1597
1598 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1599 bridge->crcsr_bus);
1600}
1601
1602static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1603{
1604 int retval, i;
1605 u32 data;
1606 struct list_head *pos = NULL;
1607 struct vme_bridge *ca91cx42_bridge;
1608 struct ca91cx42_driver *ca91cx42_device;
1609 struct vme_master_resource *master_image;
1610 struct vme_slave_resource *slave_image;
1611 struct vme_dma_resource *dma_ctrlr;
1612 struct vme_lm_resource *lm;
1613
1614 /* We want to support more than one of each bridge so we need to
1615 * dynamically allocate the bridge structure
1616 */
1617 ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1618
1619 if (ca91cx42_bridge == NULL) {
1620 dev_err(&pdev->dev, "Failed to allocate memory for device "
1621 "structure\n");
1622 retval = -ENOMEM;
1623 goto err_struct;
1624 }
1625
1626 ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
1627
1628 if (ca91cx42_device == NULL) {
1629 dev_err(&pdev->dev, "Failed to allocate memory for device "
1630 "structure\n");
1631 retval = -ENOMEM;
1632 goto err_driver;
1633 }
1634
1635 ca91cx42_bridge->driver_priv = ca91cx42_device;
1636
1637 /* Enable the device */
1638 retval = pci_enable_device(pdev);
1639 if (retval) {
1640 dev_err(&pdev->dev, "Unable to enable device\n");
1641 goto err_enable;
1642 }
1643
1644 /* Map Registers */
1645 retval = pci_request_regions(pdev, driver_name);
1646 if (retval) {
1647 dev_err(&pdev->dev, "Unable to reserve resources\n");
1648 goto err_resource;
1649 }
1650
1651 /* map registers in BAR 0 */
1652 ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
1653 4096);
1654 if (!ca91cx42_device->base) {
1655 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1656 retval = -EIO;
1657 goto err_remap;
1658 }
1659
1660 /* Check to see if the mapping worked out */
1661 data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1662 if (data != PCI_VENDOR_ID_TUNDRA) {
1663 dev_err(&pdev->dev, "PCI_ID check failed\n");
1664 retval = -EIO;
1665 goto err_test;
1666 }
1667
1668 /* Initialize wait queues & mutual exclusion flags */
1669 init_waitqueue_head(&ca91cx42_device->dma_queue);
1670 init_waitqueue_head(&ca91cx42_device->iack_queue);
1671 mutex_init(&ca91cx42_device->vme_int);
1672 mutex_init(&ca91cx42_device->vme_rmw);
1673
1674 ca91cx42_bridge->parent = &pdev->dev;
1675 strcpy(ca91cx42_bridge->name, driver_name);
1676
1677 /* Setup IRQ */
1678 retval = ca91cx42_irq_init(ca91cx42_bridge);
1679 if (retval != 0) {
1680 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1681 goto err_irq;
1682 }
1683
1684 /* Add master windows to list */
1685 INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
1686 for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1687 master_image = kmalloc(sizeof(struct vme_master_resource),
1688 GFP_KERNEL);
1689 if (master_image == NULL) {
1690 dev_err(&pdev->dev, "Failed to allocate memory for "
1691 "master resource structure\n");
1692 retval = -ENOMEM;
1693 goto err_master;
1694 }
1695 master_image->parent = ca91cx42_bridge;
1696 spin_lock_init(&master_image->lock);
1697 master_image->locked = 0;
1698 master_image->number = i;
1699 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1700 VME_CRCSR | VME_USER1 | VME_USER2;
1701 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1702 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1703 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1704 memset(&master_image->bus_resource, 0,
1705 sizeof(struct resource));
1706 master_image->kern_base = NULL;
1707 list_add_tail(&master_image->list,
1708 &ca91cx42_bridge->master_resources);
1709 }
1710
1711 /* Add slave windows to list */
1712 INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
1713 for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1714 slave_image = kmalloc(sizeof(struct vme_slave_resource),
1715 GFP_KERNEL);
1716 if (slave_image == NULL) {
1717 dev_err(&pdev->dev, "Failed to allocate memory for "
1718 "slave resource structure\n");
1719 retval = -ENOMEM;
1720 goto err_slave;
1721 }
1722 slave_image->parent = ca91cx42_bridge;
1723 mutex_init(&slave_image->mtx);
1724 slave_image->locked = 0;
1725 slave_image->number = i;
1726 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1727 VME_USER2;
1728
1729 /* Only windows 0 and 4 support A16 */
1730 if (i == 0 || i == 4)
1731 slave_image->address_attr |= VME_A16;
1732
1733 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1734 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1735 list_add_tail(&slave_image->list,
1736 &ca91cx42_bridge->slave_resources);
1737 }
1738
1739 /* Add dma engines to list */
1740 INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
1741 for (i = 0; i < CA91C142_MAX_DMA; i++) {
1742 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1743 GFP_KERNEL);
1744 if (dma_ctrlr == NULL) {
1745 dev_err(&pdev->dev, "Failed to allocate memory for "
1746 "dma resource structure\n");
1747 retval = -ENOMEM;
1748 goto err_dma;
1749 }
1750 dma_ctrlr->parent = ca91cx42_bridge;
1751 mutex_init(&dma_ctrlr->mtx);
1752 dma_ctrlr->locked = 0;
1753 dma_ctrlr->number = i;
1754 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1755 VME_DMA_MEM_TO_VME;
1756 INIT_LIST_HEAD(&dma_ctrlr->pending);
1757 INIT_LIST_HEAD(&dma_ctrlr->running);
1758 list_add_tail(&dma_ctrlr->list,
1759 &ca91cx42_bridge->dma_resources);
1760 }
1761
1762 /* Add location monitor to list */
1763 INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
1764 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1765 if (lm == NULL) {
1766 dev_err(&pdev->dev, "Failed to allocate memory for "
1767 "location monitor resource structure\n");
1768 retval = -ENOMEM;
1769 goto err_lm;
1770 }
1771 lm->parent = ca91cx42_bridge;
1772 mutex_init(&lm->mtx);
1773 lm->locked = 0;
1774 lm->number = 1;
1775 lm->monitors = 4;
1776 list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
1777
1778 ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1779 ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1780 ca91cx42_bridge->master_get = ca91cx42_master_get;
1781 ca91cx42_bridge->master_set = ca91cx42_master_set;
1782 ca91cx42_bridge->master_read = ca91cx42_master_read;
1783 ca91cx42_bridge->master_write = ca91cx42_master_write;
1784 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1785 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1786 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1787 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1788 ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1789 ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1790 ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1791 ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1792 ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1793 ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1794 ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1795 ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
1796 ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
1797
1798 data = ioread32(ca91cx42_device->base + MISC_CTL);
1799 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1800 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1801 dev_info(&pdev->dev, "Slot ID is %d\n",
1802 ca91cx42_slot_get(ca91cx42_bridge));
1803
1804 if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
1805 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1806
1807 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1808 * ca91cx42_remove()
1809 */
1810 retval = vme_register_bridge(ca91cx42_bridge);
1811 if (retval != 0) {
1812 dev_err(&pdev->dev, "Chip Registration failed.\n");
1813 goto err_reg;
1814 }
1815
1816 pci_set_drvdata(pdev, ca91cx42_bridge);
1817
1818 return 0;
1819
1820err_reg:
1821 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1822err_lm:
1823 /* resources are stored in link list */
1824 list_for_each(pos, &ca91cx42_bridge->lm_resources) {
1825 lm = list_entry(pos, struct vme_lm_resource, list);
1826 list_del(pos);
1827 kfree(lm);
1828 }
1829err_dma:
1830 /* resources are stored in link list */
1831 list_for_each(pos, &ca91cx42_bridge->dma_resources) {
1832 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1833 list_del(pos);
1834 kfree(dma_ctrlr);
1835 }
1836err_slave:
1837 /* resources are stored in link list */
1838 list_for_each(pos, &ca91cx42_bridge->slave_resources) {
1839 slave_image = list_entry(pos, struct vme_slave_resource, list);
1840 list_del(pos);
1841 kfree(slave_image);
1842 }
1843err_master:
1844 /* resources are stored in link list */
1845 list_for_each(pos, &ca91cx42_bridge->master_resources) {
1846 master_image = list_entry(pos, struct vme_master_resource,
1847 list);
1848 list_del(pos);
1849 kfree(master_image);
1850 }
1851
1852 ca91cx42_irq_exit(ca91cx42_device, pdev);
1853err_irq:
1854err_test:
1855 iounmap(ca91cx42_device->base);
1856err_remap:
1857 pci_release_regions(pdev);
1858err_resource:
1859 pci_disable_device(pdev);
1860err_enable:
1861 kfree(ca91cx42_device);
1862err_driver:
1863 kfree(ca91cx42_bridge);
1864err_struct:
1865 return retval;
1866
1867}
1868
1869static void ca91cx42_remove(struct pci_dev *pdev)
1870{
1871 struct list_head *pos = NULL;
1872 struct vme_master_resource *master_image;
1873 struct vme_slave_resource *slave_image;
1874 struct vme_dma_resource *dma_ctrlr;
1875 struct vme_lm_resource *lm;
1876 struct ca91cx42_driver *bridge;
1877 struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1878
1879 bridge = ca91cx42_bridge->driver_priv;
1880
1881
1882 /* Turn off Ints */
1883 iowrite32(0, bridge->base + LINT_EN);
1884
1885 /* Turn off the windows */
1886 iowrite32(0x00800000, bridge->base + LSI0_CTL);
1887 iowrite32(0x00800000, bridge->base + LSI1_CTL);
1888 iowrite32(0x00800000, bridge->base + LSI2_CTL);
1889 iowrite32(0x00800000, bridge->base + LSI3_CTL);
1890 iowrite32(0x00800000, bridge->base + LSI4_CTL);
1891 iowrite32(0x00800000, bridge->base + LSI5_CTL);
1892 iowrite32(0x00800000, bridge->base + LSI6_CTL);
1893 iowrite32(0x00800000, bridge->base + LSI7_CTL);
1894 iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1895 iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1896 iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1897 iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1898 iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1899 iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1900 iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1901 iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1902
1903 vme_unregister_bridge(ca91cx42_bridge);
1904
1905 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1906
1907 /* resources are stored in link list */
1908 list_for_each(pos, &ca91cx42_bridge->lm_resources) {
1909 lm = list_entry(pos, struct vme_lm_resource, list);
1910 list_del(pos);
1911 kfree(lm);
1912 }
1913
1914 /* resources are stored in link list */
1915 list_for_each(pos, &ca91cx42_bridge->dma_resources) {
1916 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1917 list_del(pos);
1918 kfree(dma_ctrlr);
1919 }
1920
1921 /* resources are stored in link list */
1922 list_for_each(pos, &ca91cx42_bridge->slave_resources) {
1923 slave_image = list_entry(pos, struct vme_slave_resource, list);
1924 list_del(pos);
1925 kfree(slave_image);
1926 }
1927
1928 /* resources are stored in link list */
1929 list_for_each(pos, &ca91cx42_bridge->master_resources) {
1930 master_image = list_entry(pos, struct vme_master_resource,
1931 list);
1932 list_del(pos);
1933 kfree(master_image);
1934 }
1935
1936 ca91cx42_irq_exit(bridge, pdev);
1937
1938 iounmap(bridge->base);
1939
1940 pci_release_regions(pdev);
1941
1942 pci_disable_device(pdev);
1943
1944 kfree(ca91cx42_bridge);
1945}
1946
1947static void __exit ca91cx42_exit(void)
1948{
1949 pci_unregister_driver(&ca91cx42_driver);
1950}
1951
1952MODULE_PARM_DESC(geoid, "Override geographical addressing");
1953module_param(geoid, int, 0);
1954
1955MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1956MODULE_LICENSE("GPL");
1957
1958module_init(ca91cx42_init);
1959module_exit(ca91cx42_exit);
diff --git a/drivers/vme/bridges/vme_ca91cx42.h b/drivers/vme/bridges/vme_ca91cx42.h
new file mode 100644
index 00000000000..02a7c794db0
--- /dev/null
+++ b/drivers/vme/bridges/vme_ca91cx42.h
@@ -0,0 +1,583 @@
1/*
2 * ca91c042.h
3 *
4 * Support for the Tundra Universe 1 and Universe II VME bridge chips
5 *
6 * Author: Tom Armistead
7 * Updated by Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * Further updated by Martyn Welch <martyn.welch@ge.com>
11 * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
12 *
13 * Derived from ca91c042.h by Michael Wyrick
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 */
20
21#ifndef _CA91CX42_H
22#define _CA91CX42_H
23
24#ifndef PCI_VENDOR_ID_TUNDRA
25#define PCI_VENDOR_ID_TUNDRA 0x10e3
26#endif
27
28#ifndef PCI_DEVICE_ID_TUNDRA_CA91C142
29#define PCI_DEVICE_ID_TUNDRA_CA91C142 0x0000
30#endif
31
32/*
33 * Define the number of each that the CA91C142 supports.
34 */
35#define CA91C142_MAX_MASTER 8 /* Max Master Windows */
36#define CA91C142_MAX_SLAVE 8 /* Max Slave Windows */
37#define CA91C142_MAX_DMA 1 /* Max DMA Controllers */
38#define CA91C142_MAX_MAILBOX 4 /* Max Mail Box registers */
39
40/* Structure used to hold driver specific information */
41struct ca91cx42_driver {
42 void __iomem *base; /* Base Address of device registers */
43 wait_queue_head_t dma_queue;
44 wait_queue_head_t iack_queue;
45 wait_queue_head_t mbox_queue;
46 void (*lm_callback[4])(int); /* Called in interrupt handler */
47 void *crcsr_kernel;
48 dma_addr_t crcsr_bus;
49 struct mutex vme_rmw; /* Only one RMW cycle at a time */
50 struct mutex vme_int; /*
51 * Only one VME interrupt can be
52 * generated at a time, provide locking
53 */
54};
55
56/* See Page 2-77 in the Universe User Manual */
57struct ca91cx42_dma_descriptor {
58 unsigned int dctl; /* DMA Control */
59 unsigned int dtbc; /* Transfer Byte Count */
60 unsigned int dla; /* PCI Address */
61 unsigned int res1; /* Reserved */
62 unsigned int dva; /* Vme Address */
63 unsigned int res2; /* Reserved */
64 unsigned int dcpp; /* Pointer to Numed Cmd Packet with rPN */
65 unsigned int res3; /* Reserved */
66};
67
68struct ca91cx42_dma_entry {
69 struct ca91cx42_dma_descriptor descriptor;
70 struct list_head list;
71};
72
73/* Universe Register Offsets */
74/* general PCI configuration registers */
75#define CA91CX42_PCI_ID 0x000
76#define CA91CX42_PCI_CSR 0x004
77#define CA91CX42_PCI_CLASS 0x008
78#define CA91CX42_PCI_MISC0 0x00C
79#define CA91CX42_PCI_BS 0x010
80#define CA91CX42_PCI_MISC1 0x03C
81
82#define LSI0_CTL 0x0100
83#define LSI0_BS 0x0104
84#define LSI0_BD 0x0108
85#define LSI0_TO 0x010C
86
87#define LSI1_CTL 0x0114
88#define LSI1_BS 0x0118
89#define LSI1_BD 0x011C
90#define LSI1_TO 0x0120
91
92#define LSI2_CTL 0x0128
93#define LSI2_BS 0x012C
94#define LSI2_BD 0x0130
95#define LSI2_TO 0x0134
96
97#define LSI3_CTL 0x013C
98#define LSI3_BS 0x0140
99#define LSI3_BD 0x0144
100#define LSI3_TO 0x0148
101
102#define LSI4_CTL 0x01A0
103#define LSI4_BS 0x01A4
104#define LSI4_BD 0x01A8
105#define LSI4_TO 0x01AC
106
107#define LSI5_CTL 0x01B4
108#define LSI5_BS 0x01B8
109#define LSI5_BD 0x01BC
110#define LSI5_TO 0x01C0
111
112#define LSI6_CTL 0x01C8
113#define LSI6_BS 0x01CC
114#define LSI6_BD 0x01D0
115#define LSI6_TO 0x01D4
116
117#define LSI7_CTL 0x01DC
118#define LSI7_BS 0x01E0
119#define LSI7_BD 0x01E4
120#define LSI7_TO 0x01E8
121
122static const int CA91CX42_LSI_CTL[] = { LSI0_CTL, LSI1_CTL, LSI2_CTL, LSI3_CTL,
123 LSI4_CTL, LSI5_CTL, LSI6_CTL, LSI7_CTL };
124
125static const int CA91CX42_LSI_BS[] = { LSI0_BS, LSI1_BS, LSI2_BS, LSI3_BS,
126 LSI4_BS, LSI5_BS, LSI6_BS, LSI7_BS };
127
128static const int CA91CX42_LSI_BD[] = { LSI0_BD, LSI1_BD, LSI2_BD, LSI3_BD,
129 LSI4_BD, LSI5_BD, LSI6_BD, LSI7_BD };
130
131static const int CA91CX42_LSI_TO[] = { LSI0_TO, LSI1_TO, LSI2_TO, LSI3_TO,
132 LSI4_TO, LSI5_TO, LSI6_TO, LSI7_TO };
133
134#define SCYC_CTL 0x0170
135#define SCYC_ADDR 0x0174
136#define SCYC_EN 0x0178
137#define SCYC_CMP 0x017C
138#define SCYC_SWP 0x0180
139#define LMISC 0x0184
140#define SLSI 0x0188
141#define L_CMDERR 0x018C
142#define LAERR 0x0190
143
144#define DCTL 0x0200
145#define DTBC 0x0204
146#define DLA 0x0208
147#define DVA 0x0210
148#define DCPP 0x0218
149#define DGCS 0x0220
150#define D_LLUE 0x0224
151
152#define LINT_EN 0x0300
153#define LINT_STAT 0x0304
154#define LINT_MAP0 0x0308
155#define LINT_MAP1 0x030C
156#define VINT_EN 0x0310
157#define VINT_STAT 0x0314
158#define VINT_MAP0 0x0318
159#define VINT_MAP1 0x031C
160#define STATID 0x0320
161
162#define V1_STATID 0x0324
163#define V2_STATID 0x0328
164#define V3_STATID 0x032C
165#define V4_STATID 0x0330
166#define V5_STATID 0x0334
167#define V6_STATID 0x0338
168#define V7_STATID 0x033C
169
170static const int CA91CX42_V_STATID[8] = { 0, V1_STATID, V2_STATID, V3_STATID,
171 V4_STATID, V5_STATID, V6_STATID,
172 V7_STATID };
173
174#define LINT_MAP2 0x0340
175#define VINT_MAP2 0x0344
176
177#define MBOX0 0x0348
178#define MBOX1 0x034C
179#define MBOX2 0x0350
180#define MBOX3 0x0354
181#define SEMA0 0x0358
182#define SEMA1 0x035C
183
184#define MAST_CTL 0x0400
185#define MISC_CTL 0x0404
186#define MISC_STAT 0x0408
187#define USER_AM 0x040C
188
189#define VSI0_CTL 0x0F00
190#define VSI0_BS 0x0F04
191#define VSI0_BD 0x0F08
192#define VSI0_TO 0x0F0C
193
194#define VSI1_CTL 0x0F14
195#define VSI1_BS 0x0F18
196#define VSI1_BD 0x0F1C
197#define VSI1_TO 0x0F20
198
199#define VSI2_CTL 0x0F28
200#define VSI2_BS 0x0F2C
201#define VSI2_BD 0x0F30
202#define VSI2_TO 0x0F34
203
204#define VSI3_CTL 0x0F3C
205#define VSI3_BS 0x0F40
206#define VSI3_BD 0x0F44
207#define VSI3_TO 0x0F48
208
209#define LM_CTL 0x0F64
210#define LM_BS 0x0F68
211
212#define VRAI_CTL 0x0F70
213
214#define VRAI_BS 0x0F74
215#define VCSR_CTL 0x0F80
216#define VCSR_TO 0x0F84
217#define V_AMERR 0x0F88
218#define VAERR 0x0F8C
219
220#define VSI4_CTL 0x0F90
221#define VSI4_BS 0x0F94
222#define VSI4_BD 0x0F98
223#define VSI4_TO 0x0F9C
224
225#define VSI5_CTL 0x0FA4
226#define VSI5_BS 0x0FA8
227#define VSI5_BD 0x0FAC
228#define VSI5_TO 0x0FB0
229
230#define VSI6_CTL 0x0FB8
231#define VSI6_BS 0x0FBC
232#define VSI6_BD 0x0FC0
233#define VSI6_TO 0x0FC4
234
235#define VSI7_CTL 0x0FCC
236#define VSI7_BS 0x0FD0
237#define VSI7_BD 0x0FD4
238#define VSI7_TO 0x0FD8
239
240static const int CA91CX42_VSI_CTL[] = { VSI0_CTL, VSI1_CTL, VSI2_CTL, VSI3_CTL,
241 VSI4_CTL, VSI5_CTL, VSI6_CTL, VSI7_CTL };
242
243static const int CA91CX42_VSI_BS[] = { VSI0_BS, VSI1_BS, VSI2_BS, VSI3_BS,
244 VSI4_BS, VSI5_BS, VSI6_BS, VSI7_BS };
245
246static const int CA91CX42_VSI_BD[] = { VSI0_BD, VSI1_BD, VSI2_BD, VSI3_BD,
247 VSI4_BD, VSI5_BD, VSI6_BD, VSI7_BD };
248
249static const int CA91CX42_VSI_TO[] = { VSI0_TO, VSI1_TO, VSI2_TO, VSI3_TO,
250 VSI4_TO, VSI5_TO, VSI6_TO, VSI7_TO };
251
252#define VCSR_CLR 0x0FF4
253#define VCSR_SET 0x0FF8
254#define VCSR_BS 0x0FFC
255
256/*
257 * PCI Class Register
258 * offset 008
259 */
260#define CA91CX42_BM_PCI_CLASS_BASE 0xFF000000
261#define CA91CX42_OF_PCI_CLASS_BASE 24
262#define CA91CX42_BM_PCI_CLASS_SUB 0x00FF0000
263#define CA91CX42_OF_PCI_CLASS_SUB 16
264#define CA91CX42_BM_PCI_CLASS_PROG 0x0000FF00
265#define CA91CX42_OF_PCI_CLASS_PROG 8
266#define CA91CX42_BM_PCI_CLASS_RID 0x000000FF
267#define CA91CX42_OF_PCI_CLASS_RID 0
268
269#define CA91CX42_OF_PCI_CLASS_RID_UNIVERSE_I 0
270#define CA91CX42_OF_PCI_CLASS_RID_UNIVERSE_II 1
271
272/*
273 * PCI Misc Register
274 * offset 00C
275 */
276#define CA91CX42_BM_PCI_MISC0_BISTC 0x80000000
277#define CA91CX42_BM_PCI_MISC0_SBIST 0x60000000
278#define CA91CX42_BM_PCI_MISC0_CCODE 0x0F000000
279#define CA91CX42_BM_PCI_MISC0_MFUNCT 0x00800000
280#define CA91CX42_BM_PCI_MISC0_LAYOUT 0x007F0000
281#define CA91CX42_BM_PCI_MISC0_LTIMER 0x0000FF00
282#define CA91CX42_OF_PCI_MISC0_LTIMER 8
283
284
285/*
286 * LSI Control Register
287 * offset 100
288 */
289#define CA91CX42_LSI_CTL_EN (1<<31)
290#define CA91CX42_LSI_CTL_PWEN (1<<30)
291
292#define CA91CX42_LSI_CTL_VDW_M (3<<22)
293#define CA91CX42_LSI_CTL_VDW_D8 0
294#define CA91CX42_LSI_CTL_VDW_D16 (1<<22)
295#define CA91CX42_LSI_CTL_VDW_D32 (1<<23)
296#define CA91CX42_LSI_CTL_VDW_D64 (3<<22)
297
298#define CA91CX42_LSI_CTL_VAS_M (7<<16)
299#define CA91CX42_LSI_CTL_VAS_A16 0
300#define CA91CX42_LSI_CTL_VAS_A24 (1<<16)
301#define CA91CX42_LSI_CTL_VAS_A32 (1<<17)
302#define CA91CX42_LSI_CTL_VAS_CRCSR (5<<16)
303#define CA91CX42_LSI_CTL_VAS_USER1 (3<<17)
304#define CA91CX42_LSI_CTL_VAS_USER2 (7<<16)
305
306#define CA91CX42_LSI_CTL_PGM_M (1<<14)
307#define CA91CX42_LSI_CTL_PGM_DATA 0
308#define CA91CX42_LSI_CTL_PGM_PGM (1<<14)
309
310#define CA91CX42_LSI_CTL_SUPER_M (1<<12)
311#define CA91CX42_LSI_CTL_SUPER_NPRIV 0
312#define CA91CX42_LSI_CTL_SUPER_SUPR (1<<12)
313
314#define CA91CX42_LSI_CTL_VCT_M (1<<8)
315#define CA91CX42_LSI_CTL_VCT_BLT (1<<8)
316#define CA91CX42_LSI_CTL_VCT_MBLT (1<<8)
317#define CA91CX42_LSI_CTL_LAS (1<<0)
318
319/*
320 * SCYC_CTL Register
321 * offset 178
322 */
323#define CA91CX42_SCYC_CTL_LAS_PCIMEM 0
324#define CA91CX42_SCYC_CTL_LAS_PCIIO (1<<2)
325
326#define CA91CX42_SCYC_CTL_CYC_M (3<<0)
327#define CA91CX42_SCYC_CTL_CYC_RMW (1<<0)
328#define CA91CX42_SCYC_CTL_CYC_ADOH (1<<1)
329
330/*
331 * LMISC Register
332 * offset 184
333 */
334#define CA91CX42_BM_LMISC_CRT 0xF0000000
335#define CA91CX42_OF_LMISC_CRT 28
336#define CA91CX42_BM_LMISC_CWT 0x0F000000
337#define CA91CX42_OF_LMISC_CWT 24
338
339/*
340 * SLSI Register
341 * offset 188
342 */
343#define CA91CX42_BM_SLSI_EN 0x80000000
344#define CA91CX42_BM_SLSI_PWEN 0x40000000
345#define CA91CX42_BM_SLSI_VDW 0x00F00000
346#define CA91CX42_OF_SLSI_VDW 20
347#define CA91CX42_BM_SLSI_PGM 0x0000F000
348#define CA91CX42_OF_SLSI_PGM 12
349#define CA91CX42_BM_SLSI_SUPER 0x00000F00
350#define CA91CX42_OF_SLSI_SUPER 8
351#define CA91CX42_BM_SLSI_BS 0x000000F6
352#define CA91CX42_OF_SLSI_BS 2
353#define CA91CX42_BM_SLSI_LAS 0x00000003
354#define CA91CX42_OF_SLSI_LAS 0
355#define CA91CX42_BM_SLSI_RESERVED 0x3F0F0000
356
357/*
358 * DCTL Register
359 * offset 200
360 */
361#define CA91CX42_DCTL_L2V (1<<31)
362#define CA91CX42_DCTL_VDW_M (3<<22)
363#define CA91CX42_DCTL_VDW_M (3<<22)
364#define CA91CX42_DCTL_VDW_D8 0
365#define CA91CX42_DCTL_VDW_D16 (1<<22)
366#define CA91CX42_DCTL_VDW_D32 (1<<23)
367#define CA91CX42_DCTL_VDW_D64 (3<<22)
368
369#define CA91CX42_DCTL_VAS_M (7<<16)
370#define CA91CX42_DCTL_VAS_A16 0
371#define CA91CX42_DCTL_VAS_A24 (1<<16)
372#define CA91CX42_DCTL_VAS_A32 (1<<17)
373#define CA91CX42_DCTL_VAS_USER1 (3<<17)
374#define CA91CX42_DCTL_VAS_USER2 (7<<16)
375
376#define CA91CX42_DCTL_PGM_M (1<<14)
377#define CA91CX42_DCTL_PGM_DATA 0
378#define CA91CX42_DCTL_PGM_PGM (1<<14)
379
380#define CA91CX42_DCTL_SUPER_M (1<<12)
381#define CA91CX42_DCTL_SUPER_NPRIV 0
382#define CA91CX42_DCTL_SUPER_SUPR (1<<12)
383
384#define CA91CX42_DCTL_VCT_M (1<<8)
385#define CA91CX42_DCTL_VCT_BLT (1<<8)
386#define CA91CX42_DCTL_LD64EN (1<<7)
387
388/*
389 * DCPP Register
390 * offset 218
391 */
392#define CA91CX42_DCPP_M 0xf
393#define CA91CX42_DCPP_NULL (1<<0)
394
395/*
396 * DMA General Control/Status Register (DGCS)
397 * offset 220
398 */
399#define CA91CX42_DGCS_GO (1<<31)
400#define CA91CX42_DGCS_STOP_REQ (1<<30)
401#define CA91CX42_DGCS_HALT_REQ (1<<29)
402#define CA91CX42_DGCS_CHAIN (1<<27)
403
404#define CA91CX42_DGCS_VON_M (7<<20)
405
406#define CA91CX42_DGCS_VOFF_M (0xf<<16)
407
408#define CA91CX42_DGCS_ACT (1<<15)
409#define CA91CX42_DGCS_STOP (1<<14)
410#define CA91CX42_DGCS_HALT (1<<13)
411#define CA91CX42_DGCS_DONE (1<<11)
412#define CA91CX42_DGCS_LERR (1<<10)
413#define CA91CX42_DGCS_VERR (1<<9)
414#define CA91CX42_DGCS_PERR (1<<8)
415#define CA91CX42_DGCS_INT_STOP (1<<6)
416#define CA91CX42_DGCS_INT_HALT (1<<5)
417#define CA91CX42_DGCS_INT_DONE (1<<3)
418#define CA91CX42_DGCS_INT_LERR (1<<2)
419#define CA91CX42_DGCS_INT_VERR (1<<1)
420#define CA91CX42_DGCS_INT_PERR (1<<0)
421
422/*
423 * PCI Interrupt Enable Register
424 * offset 300
425 */
426#define CA91CX42_LINT_LM3 0x00800000
427#define CA91CX42_LINT_LM2 0x00400000
428#define CA91CX42_LINT_LM1 0x00200000
429#define CA91CX42_LINT_LM0 0x00100000
430#define CA91CX42_LINT_MBOX3 0x00080000
431#define CA91CX42_LINT_MBOX2 0x00040000
432#define CA91CX42_LINT_MBOX1 0x00020000
433#define CA91CX42_LINT_MBOX0 0x00010000
434#define CA91CX42_LINT_ACFAIL 0x00008000
435#define CA91CX42_LINT_SYSFAIL 0x00004000
436#define CA91CX42_LINT_SW_INT 0x00002000
437#define CA91CX42_LINT_SW_IACK 0x00001000
438
439#define CA91CX42_LINT_VERR 0x00000400
440#define CA91CX42_LINT_LERR 0x00000200
441#define CA91CX42_LINT_DMA 0x00000100
442#define CA91CX42_LINT_VIRQ7 0x00000080
443#define CA91CX42_LINT_VIRQ6 0x00000040
444#define CA91CX42_LINT_VIRQ5 0x00000020
445#define CA91CX42_LINT_VIRQ4 0x00000010
446#define CA91CX42_LINT_VIRQ3 0x00000008
447#define CA91CX42_LINT_VIRQ2 0x00000004
448#define CA91CX42_LINT_VIRQ1 0x00000002
449#define CA91CX42_LINT_VOWN 0x00000001
450
451static const int CA91CX42_LINT_VIRQ[] = { 0, CA91CX42_LINT_VIRQ1,
452 CA91CX42_LINT_VIRQ2, CA91CX42_LINT_VIRQ3,
453 CA91CX42_LINT_VIRQ4, CA91CX42_LINT_VIRQ5,
454 CA91CX42_LINT_VIRQ6, CA91CX42_LINT_VIRQ7 };
455
456#define CA91CX42_LINT_MBOX 0x000F0000
457
458static const int CA91CX42_LINT_LM[] = { CA91CX42_LINT_LM0, CA91CX42_LINT_LM1,
459 CA91CX42_LINT_LM2, CA91CX42_LINT_LM3 };
460
461/*
462 * MAST_CTL Register
463 * offset 400
464 */
465#define CA91CX42_BM_MAST_CTL_MAXRTRY 0xF0000000
466#define CA91CX42_OF_MAST_CTL_MAXRTRY 28
467#define CA91CX42_BM_MAST_CTL_PWON 0x0F000000
468#define CA91CX42_OF_MAST_CTL_PWON 24
469#define CA91CX42_BM_MAST_CTL_VRL 0x00C00000
470#define CA91CX42_OF_MAST_CTL_VRL 22
471#define CA91CX42_BM_MAST_CTL_VRM 0x00200000
472#define CA91CX42_BM_MAST_CTL_VREL 0x00100000
473#define CA91CX42_BM_MAST_CTL_VOWN 0x00080000
474#define CA91CX42_BM_MAST_CTL_VOWN_ACK 0x00040000
475#define CA91CX42_BM_MAST_CTL_PABS 0x00001000
476#define CA91CX42_BM_MAST_CTL_BUS_NO 0x0000000F
477#define CA91CX42_OF_MAST_CTL_BUS_NO 0
478
479/*
480 * MISC_CTL Register
481 * offset 404
482 */
483#define CA91CX42_MISC_CTL_VBTO 0xF0000000
484#define CA91CX42_MISC_CTL_VARB 0x04000000
485#define CA91CX42_MISC_CTL_VARBTO 0x03000000
486#define CA91CX42_MISC_CTL_SW_LRST 0x00800000
487#define CA91CX42_MISC_CTL_SW_SRST 0x00400000
488#define CA91CX42_MISC_CTL_BI 0x00100000
489#define CA91CX42_MISC_CTL_ENGBI 0x00080000
490#define CA91CX42_MISC_CTL_RESCIND 0x00040000
491#define CA91CX42_MISC_CTL_SYSCON 0x00020000
492#define CA91CX42_MISC_CTL_V64AUTO 0x00010000
493#define CA91CX42_MISC_CTL_RESERVED 0x0820FFFF
494
495#define CA91CX42_OF_MISC_CTL_VARBTO 24
496#define CA91CX42_OF_MISC_CTL_VBTO 28
497
498/*
499 * MISC_STAT Register
500 * offset 408
501 */
502#define CA91CX42_BM_MISC_STAT_ENDIAN 0x80000000
503#define CA91CX42_BM_MISC_STAT_LCLSIZE 0x40000000
504#define CA91CX42_BM_MISC_STAT_DY4AUTO 0x08000000
505#define CA91CX42_BM_MISC_STAT_MYBBSY 0x00200000
506#define CA91CX42_BM_MISC_STAT_DY4DONE 0x00080000
507#define CA91CX42_BM_MISC_STAT_TXFE 0x00040000
508#define CA91CX42_BM_MISC_STAT_RXFE 0x00020000
509#define CA91CX42_BM_MISC_STAT_DY4AUTOID 0x0000FF00
510#define CA91CX42_OF_MISC_STAT_DY4AUTOID 8
511
512/*
513 * VSI Control Register
514 * offset F00
515 */
516#define CA91CX42_VSI_CTL_EN (1<<31)
517#define CA91CX42_VSI_CTL_PWEN (1<<30)
518#define CA91CX42_VSI_CTL_PREN (1<<29)
519
520#define CA91CX42_VSI_CTL_PGM_M (3<<22)
521#define CA91CX42_VSI_CTL_PGM_DATA (1<<22)
522#define CA91CX42_VSI_CTL_PGM_PGM (1<<23)
523
524#define CA91CX42_VSI_CTL_SUPER_M (3<<20)
525#define CA91CX42_VSI_CTL_SUPER_NPRIV (1<<20)
526#define CA91CX42_VSI_CTL_SUPER_SUPR (1<<21)
527
528#define CA91CX42_VSI_CTL_VAS_M (7<<16)
529#define CA91CX42_VSI_CTL_VAS_A16 0
530#define CA91CX42_VSI_CTL_VAS_A24 (1<<16)
531#define CA91CX42_VSI_CTL_VAS_A32 (1<<17)
532#define CA91CX42_VSI_CTL_VAS_USER1 (3<<17)
533#define CA91CX42_VSI_CTL_VAS_USER2 (7<<16)
534
535#define CA91CX42_VSI_CTL_LD64EN (1<<7)
536#define CA91CX42_VSI_CTL_LLRMW (1<<6)
537
538#define CA91CX42_VSI_CTL_LAS_M (3<<0)
539#define CA91CX42_VSI_CTL_LAS_PCI_MS 0
540#define CA91CX42_VSI_CTL_LAS_PCI_IO (1<<0)
541#define CA91CX42_VSI_CTL_LAS_PCI_CONF (1<<1)
542
543/* LM_CTL Register
544 * offset F64
545 */
546#define CA91CX42_LM_CTL_EN (1<<31)
547#define CA91CX42_LM_CTL_PGM (1<<23)
548#define CA91CX42_LM_CTL_DATA (1<<22)
549#define CA91CX42_LM_CTL_SUPR (1<<21)
550#define CA91CX42_LM_CTL_NPRIV (1<<20)
551#define CA91CX42_LM_CTL_AS_M (5<<16)
552#define CA91CX42_LM_CTL_AS_A16 0
553#define CA91CX42_LM_CTL_AS_A24 (1<<16)
554#define CA91CX42_LM_CTL_AS_A32 (1<<17)
555
556/*
557 * VRAI_CTL Register
558 * offset F70
559 */
560#define CA91CX42_BM_VRAI_CTL_EN 0x80000000
561#define CA91CX42_BM_VRAI_CTL_PGM 0x00C00000
562#define CA91CX42_OF_VRAI_CTL_PGM 22
563#define CA91CX42_BM_VRAI_CTL_SUPER 0x00300000
564#define CA91CX42_OF_VRAI_CTL_SUPER 20
565#define CA91CX42_BM_VRAI_CTL_VAS 0x00030000
566#define CA91CX42_OF_VRAI_CTL_VAS 16
567
568/* VCSR_CTL Register
569 * offset F80
570 */
571#define CA91CX42_VCSR_CTL_EN (1<<31)
572
573#define CA91CX42_VCSR_CTL_LAS_M (3<<0)
574#define CA91CX42_VCSR_CTL_LAS_PCI_MS 0
575#define CA91CX42_VCSR_CTL_LAS_PCI_IO (1<<0)
576#define CA91CX42_VCSR_CTL_LAS_PCI_CONF (1<<1)
577
578/* VCSR_BS Register
579 * offset FFC
580 */
581#define CA91CX42_VCSR_BS_SLOT_M (0x1F<<27)
582
583#endif /* _CA91CX42_H */
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
new file mode 100644
index 00000000000..081e9c4c5e7
--- /dev/null
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -0,0 +1,2691 @@
1/*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/mm.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/proc_fs.h>
22#include <linux/pci.h>
23#include <linux/poll.h>
24#include <linux/dma-mapping.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/time.h>
30#include <linux/io.h>
31#include <linux/uaccess.h>
32#include <linux/byteorder/generic.h>
33#include <linux/vme.h>
34
35#include "../vme_bridge.h"
36#include "vme_tsi148.h"
37
38static int __init tsi148_init(void);
39static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
40static void tsi148_remove(struct pci_dev *);
41static void __exit tsi148_exit(void);
42
43
44/* Module parameter */
45static bool err_chk;
46static int geoid;
47
48static const char driver_name[] = "vme_tsi148";
49
50static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
51 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
52 { },
53};
54
55static struct pci_driver tsi148_driver = {
56 .name = driver_name,
57 .id_table = tsi148_ids,
58 .probe = tsi148_probe,
59 .remove = tsi148_remove,
60};
61
62static void reg_join(unsigned int high, unsigned int low,
63 unsigned long long *variable)
64{
65 *variable = (unsigned long long)high << 32;
66 *variable |= (unsigned long long)low;
67}
68
69static void reg_split(unsigned long long variable, unsigned int *high,
70 unsigned int *low)
71{
72 *low = (unsigned int)variable & 0xFFFFFFFF;
73 *high = (unsigned int)(variable >> 32);
74}
75
76/*
77 * Wakes up DMA queue.
78 */
79static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
80 int channel_mask)
81{
82 u32 serviced = 0;
83
84 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
85 wake_up(&bridge->dma_queue[0]);
86 serviced |= TSI148_LCSR_INTC_DMA0C;
87 }
88 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
89 wake_up(&bridge->dma_queue[1]);
90 serviced |= TSI148_LCSR_INTC_DMA1C;
91 }
92
93 return serviced;
94}
95
96/*
97 * Wake up location monitor queue
98 */
99static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
100{
101 int i;
102 u32 serviced = 0;
103
104 for (i = 0; i < 4; i++) {
105 if (stat & TSI148_LCSR_INTS_LMS[i]) {
106 /* We only enable interrupts if the callback is set */
107 bridge->lm_callback[i](i);
108 serviced |= TSI148_LCSR_INTC_LMC[i];
109 }
110 }
111
112 return serviced;
113}
114
115/*
116 * Wake up mail box queue.
117 *
118 * XXX This functionality is not exposed up though API.
119 */
120static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
121{
122 int i;
123 u32 val;
124 u32 serviced = 0;
125 struct tsi148_driver *bridge;
126
127 bridge = tsi148_bridge->driver_priv;
128
129 for (i = 0; i < 4; i++) {
130 if (stat & TSI148_LCSR_INTS_MBS[i]) {
131 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
132 dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
133 ": 0x%x\n", i, val);
134 serviced |= TSI148_LCSR_INTC_MBC[i];
135 }
136 }
137
138 return serviced;
139}
140
141/*
142 * Display error & status message when PERR (PCI) exception interrupt occurs.
143 */
144static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
145{
146 struct tsi148_driver *bridge;
147
148 bridge = tsi148_bridge->driver_priv;
149
150 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
151 "attributes: %08x\n",
152 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
153 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
154 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
155
156 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
157 "completion reg: %08x\n",
158 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
159 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
160
161 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
162
163 return TSI148_LCSR_INTC_PERRC;
164}
165
166/*
167 * Save address and status when VME error interrupt occurs.
168 */
169static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
170{
171 unsigned int error_addr_high, error_addr_low;
172 unsigned long long error_addr;
173 u32 error_attrib;
174 struct vme_bus_error *error;
175 struct tsi148_driver *bridge;
176
177 bridge = tsi148_bridge->driver_priv;
178
179 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
180 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
181 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
182
183 reg_join(error_addr_high, error_addr_low, &error_addr);
184
185 /* Check for exception register overflow (we have lost error data) */
186 if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
187 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
188 "Occurred\n");
189 }
190
191 error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
192 if (error) {
193 error->address = error_addr;
194 error->attributes = error_attrib;
195 list_add_tail(&error->list, &tsi148_bridge->vme_errors);
196 } else {
197 dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
198 "VMEbus Error reporting\n");
199 dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
200 "0x%llx, attributes: %08x\n", error_addr, error_attrib);
201 }
202
203 /* Clear Status */
204 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
205
206 return TSI148_LCSR_INTC_VERRC;
207}
208
209/*
210 * Wake up IACK queue.
211 */
212static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
213{
214 wake_up(&bridge->iack_queue);
215
216 return TSI148_LCSR_INTC_IACKC;
217}
218
219/*
220 * Calling VME bus interrupt callback if provided.
221 */
222static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
223 u32 stat)
224{
225 int vec, i, serviced = 0;
226 struct tsi148_driver *bridge;
227
228 bridge = tsi148_bridge->driver_priv;
229
230 for (i = 7; i > 0; i--) {
231 if (stat & (1 << i)) {
232 /*
233 * Note: Even though the registers are defined as
234 * 32-bits in the spec, we only want to issue 8-bit
235 * IACK cycles on the bus, read from offset 3.
236 */
237 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
238
239 vme_irq_handler(tsi148_bridge, i, vec);
240
241 serviced |= (1 << i);
242 }
243 }
244
245 return serviced;
246}
247
248/*
249 * Top level interrupt handler. Clears appropriate interrupt status bits and
250 * then calls appropriate sub handler(s).
251 */
252static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
253{
254 u32 stat, enable, serviced = 0;
255 struct vme_bridge *tsi148_bridge;
256 struct tsi148_driver *bridge;
257
258 tsi148_bridge = ptr;
259
260 bridge = tsi148_bridge->driver_priv;
261
262 /* Determine which interrupts are unmasked and set */
263 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
264 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
265
266 /* Only look at unmasked interrupts */
267 stat &= enable;
268
269 if (unlikely(!stat))
270 return IRQ_NONE;
271
272 /* Call subhandlers as appropriate */
273 /* DMA irqs */
274 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
275 serviced |= tsi148_DMA_irqhandler(bridge, stat);
276
277 /* Location monitor irqs */
278 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
279 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
280 serviced |= tsi148_LM_irqhandler(bridge, stat);
281
282 /* Mail box irqs */
283 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
284 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
285 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
286
287 /* PCI bus error */
288 if (stat & TSI148_LCSR_INTS_PERRS)
289 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
290
291 /* VME bus error */
292 if (stat & TSI148_LCSR_INTS_VERRS)
293 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
294
295 /* IACK irq */
296 if (stat & TSI148_LCSR_INTS_IACKS)
297 serviced |= tsi148_IACK_irqhandler(bridge);
298
299 /* VME bus irqs */
300 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
301 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
302 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
303 TSI148_LCSR_INTS_IRQ1S))
304 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
305
306 /* Clear serviced interrupts */
307 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
308
309 return IRQ_HANDLED;
310}
311
312static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
313{
314 int result;
315 unsigned int tmp;
316 struct pci_dev *pdev;
317 struct tsi148_driver *bridge;
318
319 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
320
321 bridge = tsi148_bridge->driver_priv;
322
323 /* Initialise list for VME bus errors */
324 INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
325
326 mutex_init(&tsi148_bridge->irq_mtx);
327
328 result = request_irq(pdev->irq,
329 tsi148_irqhandler,
330 IRQF_SHARED,
331 driver_name, tsi148_bridge);
332 if (result) {
333 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
334 "vector %02X\n", pdev->irq);
335 return result;
336 }
337
338 /* Enable and unmask interrupts */
339 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
340 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
341 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
342 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
343 TSI148_LCSR_INTEO_IACKEO;
344
345 /* This leaves the following interrupts masked.
346 * TSI148_LCSR_INTEO_VIEEO
347 * TSI148_LCSR_INTEO_SYSFLEO
348 * TSI148_LCSR_INTEO_ACFLEO
349 */
350
351 /* Don't enable Location Monitor interrupts here - they will be
352 * enabled when the location monitors are properly configured and
353 * a callback has been attached.
354 * TSI148_LCSR_INTEO_LM0EO
355 * TSI148_LCSR_INTEO_LM1EO
356 * TSI148_LCSR_INTEO_LM2EO
357 * TSI148_LCSR_INTEO_LM3EO
358 */
359
360 /* Don't enable VME interrupts until we add a handler, else the board
361 * will respond to it and we don't want that unless it knows how to
362 * properly deal with it.
363 * TSI148_LCSR_INTEO_IRQ7EO
364 * TSI148_LCSR_INTEO_IRQ6EO
365 * TSI148_LCSR_INTEO_IRQ5EO
366 * TSI148_LCSR_INTEO_IRQ4EO
367 * TSI148_LCSR_INTEO_IRQ3EO
368 * TSI148_LCSR_INTEO_IRQ2EO
369 * TSI148_LCSR_INTEO_IRQ1EO
370 */
371
372 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
373 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
374
375 return 0;
376}
377
378static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
379 struct pci_dev *pdev)
380{
381 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
382
383 /* Turn off interrupts */
384 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
385 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
386
387 /* Clear all interrupts */
388 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
389
390 /* Detach interrupt handler */
391 free_irq(pdev->irq, tsi148_bridge);
392}
393
394/*
395 * Check to see if an IACk has been received, return true (1) or false (0).
396 */
397static int tsi148_iack_received(struct tsi148_driver *bridge)
398{
399 u32 tmp;
400
401 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
402
403 if (tmp & TSI148_LCSR_VICR_IRQS)
404 return 0;
405 else
406 return 1;
407}
408
409/*
410 * Configure VME interrupt
411 */
412static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
413 int state, int sync)
414{
415 struct pci_dev *pdev;
416 u32 tmp;
417 struct tsi148_driver *bridge;
418
419 bridge = tsi148_bridge->driver_priv;
420
421 /* We need to do the ordering differently for enabling and disabling */
422 if (state == 0) {
423 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
424 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
425 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
426
427 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
428 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
429 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
430
431 if (sync != 0) {
432 pdev = container_of(tsi148_bridge->parent,
433 struct pci_dev, dev);
434
435 synchronize_irq(pdev->irq);
436 }
437 } else {
438 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
439 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
440 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
441
442 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
443 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
444 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
445 }
446}
447
448/*
449 * Generate a VME bus interrupt at the requested level & vector. Wait for
450 * interrupt to be acked.
451 */
452static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
453 int statid)
454{
455 u32 tmp;
456 struct tsi148_driver *bridge;
457
458 bridge = tsi148_bridge->driver_priv;
459
460 mutex_lock(&bridge->vme_int);
461
462 /* Read VICR register */
463 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
464
465 /* Set Status/ID */
466 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
467 (statid & TSI148_LCSR_VICR_STID_M);
468 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
469
470 /* Assert VMEbus IRQ */
471 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
472 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
473
474 /* XXX Consider implementing a timeout? */
475 wait_event_interruptible(bridge->iack_queue,
476 tsi148_iack_received(bridge));
477
478 mutex_unlock(&bridge->vme_int);
479
480 return 0;
481}
482
483/*
484 * Find the first error in this address range
485 */
486static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
487 u32 aspace, unsigned long long address, size_t count)
488{
489 struct list_head *err_pos;
490 struct vme_bus_error *vme_err, *valid = NULL;
491 unsigned long long bound;
492
493 bound = address + count;
494
495 /*
496 * XXX We are currently not looking at the address space when parsing
497 * for errors. This is because parsing the Address Modifier Codes
498 * is going to be quite resource intensive to do properly. We
499 * should be OK just looking at the addresses and this is certainly
500 * much better than what we had before.
501 */
502 err_pos = NULL;
503 /* Iterate through errors */
504 list_for_each(err_pos, &tsi148_bridge->vme_errors) {
505 vme_err = list_entry(err_pos, struct vme_bus_error, list);
506 if ((vme_err->address >= address) &&
507 (vme_err->address < bound)) {
508
509 valid = vme_err;
510 break;
511 }
512 }
513
514 return valid;
515}
516
517/*
518 * Clear errors in the provided address range.
519 */
520static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
521 u32 aspace, unsigned long long address, size_t count)
522{
523 struct list_head *err_pos, *temp;
524 struct vme_bus_error *vme_err;
525 unsigned long long bound;
526
527 bound = address + count;
528
529 /*
530 * XXX We are currently not looking at the address space when parsing
531 * for errors. This is because parsing the Address Modifier Codes
532 * is going to be quite resource intensive to do properly. We
533 * should be OK just looking at the addresses and this is certainly
534 * much better than what we had before.
535 */
536 err_pos = NULL;
537 /* Iterate through errors */
538 list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
539 vme_err = list_entry(err_pos, struct vme_bus_error, list);
540
541 if ((vme_err->address >= address) &&
542 (vme_err->address < bound)) {
543
544 list_del(err_pos);
545 kfree(vme_err);
546 }
547 }
548}
549
550/*
551 * Initialize a slave window with the requested attributes.
552 */
553static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
554 unsigned long long vme_base, unsigned long long size,
555 dma_addr_t pci_base, u32 aspace, u32 cycle)
556{
557 unsigned int i, addr = 0, granularity = 0;
558 unsigned int temp_ctl = 0;
559 unsigned int vme_base_low, vme_base_high;
560 unsigned int vme_bound_low, vme_bound_high;
561 unsigned int pci_offset_low, pci_offset_high;
562 unsigned long long vme_bound, pci_offset;
563 struct vme_bridge *tsi148_bridge;
564 struct tsi148_driver *bridge;
565
566 tsi148_bridge = image->parent;
567 bridge = tsi148_bridge->driver_priv;
568
569 i = image->number;
570
571 switch (aspace) {
572 case VME_A16:
573 granularity = 0x10;
574 addr |= TSI148_LCSR_ITAT_AS_A16;
575 break;
576 case VME_A24:
577 granularity = 0x1000;
578 addr |= TSI148_LCSR_ITAT_AS_A24;
579 break;
580 case VME_A32:
581 granularity = 0x10000;
582 addr |= TSI148_LCSR_ITAT_AS_A32;
583 break;
584 case VME_A64:
585 granularity = 0x10000;
586 addr |= TSI148_LCSR_ITAT_AS_A64;
587 break;
588 case VME_CRCSR:
589 case VME_USER1:
590 case VME_USER2:
591 case VME_USER3:
592 case VME_USER4:
593 default:
594 dev_err(tsi148_bridge->parent, "Invalid address space\n");
595 return -EINVAL;
596 break;
597 }
598
599 /* Convert 64-bit variables to 2x 32-bit variables */
600 reg_split(vme_base, &vme_base_high, &vme_base_low);
601
602 /*
603 * Bound address is a valid address for the window, adjust
604 * accordingly
605 */
606 vme_bound = vme_base + size - granularity;
607 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
608 pci_offset = (unsigned long long)pci_base - vme_base;
609 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
610
611 if (vme_base_low & (granularity - 1)) {
612 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
613 return -EINVAL;
614 }
615 if (vme_bound_low & (granularity - 1)) {
616 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
617 return -EINVAL;
618 }
619 if (pci_offset_low & (granularity - 1)) {
620 dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
621 "alignment\n");
622 return -EINVAL;
623 }
624
625 /* Disable while we are mucking around */
626 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
627 TSI148_LCSR_OFFSET_ITAT);
628 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
629 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
630 TSI148_LCSR_OFFSET_ITAT);
631
632 /* Setup mapping */
633 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
634 TSI148_LCSR_OFFSET_ITSAU);
635 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
636 TSI148_LCSR_OFFSET_ITSAL);
637 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
638 TSI148_LCSR_OFFSET_ITEAU);
639 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
640 TSI148_LCSR_OFFSET_ITEAL);
641 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
642 TSI148_LCSR_OFFSET_ITOFU);
643 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
644 TSI148_LCSR_OFFSET_ITOFL);
645
646 /* Setup 2eSST speeds */
647 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
648 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
649 case VME_2eSST160:
650 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
651 break;
652 case VME_2eSST267:
653 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
654 break;
655 case VME_2eSST320:
656 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
657 break;
658 }
659
660 /* Setup cycle types */
661 temp_ctl &= ~(0x1F << 7);
662 if (cycle & VME_BLT)
663 temp_ctl |= TSI148_LCSR_ITAT_BLT;
664 if (cycle & VME_MBLT)
665 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
666 if (cycle & VME_2eVME)
667 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
668 if (cycle & VME_2eSST)
669 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
670 if (cycle & VME_2eSSTB)
671 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
672
673 /* Setup address space */
674 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
675 temp_ctl |= addr;
676
677 temp_ctl &= ~0xF;
678 if (cycle & VME_SUPER)
679 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
680 if (cycle & VME_USER)
681 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
682 if (cycle & VME_PROG)
683 temp_ctl |= TSI148_LCSR_ITAT_PGM;
684 if (cycle & VME_DATA)
685 temp_ctl |= TSI148_LCSR_ITAT_DATA;
686
687 /* Write ctl reg without enable */
688 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
689 TSI148_LCSR_OFFSET_ITAT);
690
691 if (enabled)
692 temp_ctl |= TSI148_LCSR_ITAT_EN;
693
694 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
695 TSI148_LCSR_OFFSET_ITAT);
696
697 return 0;
698}
699
700/*
701 * Get slave window configuration.
702 */
703static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
704 unsigned long long *vme_base, unsigned long long *size,
705 dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
706{
707 unsigned int i, granularity = 0, ctl = 0;
708 unsigned int vme_base_low, vme_base_high;
709 unsigned int vme_bound_low, vme_bound_high;
710 unsigned int pci_offset_low, pci_offset_high;
711 unsigned long long vme_bound, pci_offset;
712 struct tsi148_driver *bridge;
713
714 bridge = image->parent->driver_priv;
715
716 i = image->number;
717
718 /* Read registers */
719 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
720 TSI148_LCSR_OFFSET_ITAT);
721
722 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
723 TSI148_LCSR_OFFSET_ITSAU);
724 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
725 TSI148_LCSR_OFFSET_ITSAL);
726 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
727 TSI148_LCSR_OFFSET_ITEAU);
728 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
729 TSI148_LCSR_OFFSET_ITEAL);
730 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
731 TSI148_LCSR_OFFSET_ITOFU);
732 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
733 TSI148_LCSR_OFFSET_ITOFL);
734
735 /* Convert 64-bit variables to 2x 32-bit variables */
736 reg_join(vme_base_high, vme_base_low, vme_base);
737 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
738 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
739
740 *pci_base = (dma_addr_t)vme_base + pci_offset;
741
742 *enabled = 0;
743 *aspace = 0;
744 *cycle = 0;
745
746 if (ctl & TSI148_LCSR_ITAT_EN)
747 *enabled = 1;
748
749 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
750 granularity = 0x10;
751 *aspace |= VME_A16;
752 }
753 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
754 granularity = 0x1000;
755 *aspace |= VME_A24;
756 }
757 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
758 granularity = 0x10000;
759 *aspace |= VME_A32;
760 }
761 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
762 granularity = 0x10000;
763 *aspace |= VME_A64;
764 }
765
766 /* Need granularity before we set the size */
767 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
768
769
770 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
771 *cycle |= VME_2eSST160;
772 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
773 *cycle |= VME_2eSST267;
774 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
775 *cycle |= VME_2eSST320;
776
777 if (ctl & TSI148_LCSR_ITAT_BLT)
778 *cycle |= VME_BLT;
779 if (ctl & TSI148_LCSR_ITAT_MBLT)
780 *cycle |= VME_MBLT;
781 if (ctl & TSI148_LCSR_ITAT_2eVME)
782 *cycle |= VME_2eVME;
783 if (ctl & TSI148_LCSR_ITAT_2eSST)
784 *cycle |= VME_2eSST;
785 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
786 *cycle |= VME_2eSSTB;
787
788 if (ctl & TSI148_LCSR_ITAT_SUPR)
789 *cycle |= VME_SUPER;
790 if (ctl & TSI148_LCSR_ITAT_NPRIV)
791 *cycle |= VME_USER;
792 if (ctl & TSI148_LCSR_ITAT_PGM)
793 *cycle |= VME_PROG;
794 if (ctl & TSI148_LCSR_ITAT_DATA)
795 *cycle |= VME_DATA;
796
797 return 0;
798}
799
800/*
801 * Allocate and map PCI Resource
802 */
803static int tsi148_alloc_resource(struct vme_master_resource *image,
804 unsigned long long size)
805{
806 unsigned long long existing_size;
807 int retval = 0;
808 struct pci_dev *pdev;
809 struct vme_bridge *tsi148_bridge;
810
811 tsi148_bridge = image->parent;
812
813 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
814
815 existing_size = (unsigned long long)(image->bus_resource.end -
816 image->bus_resource.start);
817
818 /* If the existing size is OK, return */
819 if ((size != 0) && (existing_size == (size - 1)))
820 return 0;
821
822 if (existing_size != 0) {
823 iounmap(image->kern_base);
824 image->kern_base = NULL;
825 kfree(image->bus_resource.name);
826 release_resource(&image->bus_resource);
827 memset(&image->bus_resource, 0, sizeof(struct resource));
828 }
829
830 /* Exit here if size is zero */
831 if (size == 0)
832 return 0;
833
834 if (image->bus_resource.name == NULL) {
835 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
836 if (image->bus_resource.name == NULL) {
837 dev_err(tsi148_bridge->parent, "Unable to allocate "
838 "memory for resource name\n");
839 retval = -ENOMEM;
840 goto err_name;
841 }
842 }
843
844 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
845 image->number);
846
847 image->bus_resource.start = 0;
848 image->bus_resource.end = (unsigned long)size;
849 image->bus_resource.flags = IORESOURCE_MEM;
850
851 retval = pci_bus_alloc_resource(pdev->bus,
852 &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
853 0, NULL, NULL);
854 if (retval) {
855 dev_err(tsi148_bridge->parent, "Failed to allocate mem "
856 "resource for window %d size 0x%lx start 0x%lx\n",
857 image->number, (unsigned long)size,
858 (unsigned long)image->bus_resource.start);
859 goto err_resource;
860 }
861
862 image->kern_base = ioremap_nocache(
863 image->bus_resource.start, size);
864 if (image->kern_base == NULL) {
865 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
866 retval = -ENOMEM;
867 goto err_remap;
868 }
869
870 return 0;
871
872err_remap:
873 release_resource(&image->bus_resource);
874err_resource:
875 kfree(image->bus_resource.name);
876 memset(&image->bus_resource, 0, sizeof(struct resource));
877err_name:
878 return retval;
879}
880
881/*
882 * Free and unmap PCI Resource
883 */
884static void tsi148_free_resource(struct vme_master_resource *image)
885{
886 iounmap(image->kern_base);
887 image->kern_base = NULL;
888 release_resource(&image->bus_resource);
889 kfree(image->bus_resource.name);
890 memset(&image->bus_resource, 0, sizeof(struct resource));
891}
892
893/*
894 * Set the attributes of an outbound window.
895 */
896static int tsi148_master_set(struct vme_master_resource *image, int enabled,
897 unsigned long long vme_base, unsigned long long size, u32 aspace,
898 u32 cycle, u32 dwidth)
899{
900 int retval = 0;
901 unsigned int i;
902 unsigned int temp_ctl = 0;
903 unsigned int pci_base_low, pci_base_high;
904 unsigned int pci_bound_low, pci_bound_high;
905 unsigned int vme_offset_low, vme_offset_high;
906 unsigned long long pci_bound, vme_offset, pci_base;
907 struct vme_bridge *tsi148_bridge;
908 struct tsi148_driver *bridge;
909
910 tsi148_bridge = image->parent;
911
912 bridge = tsi148_bridge->driver_priv;
913
914 /* Verify input data */
915 if (vme_base & 0xFFFF) {
916 dev_err(tsi148_bridge->parent, "Invalid VME Window "
917 "alignment\n");
918 retval = -EINVAL;
919 goto err_window;
920 }
921
922 if ((size == 0) && (enabled != 0)) {
923 dev_err(tsi148_bridge->parent, "Size must be non-zero for "
924 "enabled windows\n");
925 retval = -EINVAL;
926 goto err_window;
927 }
928
929 spin_lock(&image->lock);
930
931 /* Let's allocate the resource here rather than further up the stack as
932 * it avoids pushing loads of bus dependent stuff up the stack. If size
933 * is zero, any existing resource will be freed.
934 */
935 retval = tsi148_alloc_resource(image, size);
936 if (retval) {
937 spin_unlock(&image->lock);
938 dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
939 "resource\n");
940 goto err_res;
941 }
942
943 if (size == 0) {
944 pci_base = 0;
945 pci_bound = 0;
946 vme_offset = 0;
947 } else {
948 pci_base = (unsigned long long)image->bus_resource.start;
949
950 /*
951 * Bound address is a valid address for the window, adjust
952 * according to window granularity.
953 */
954 pci_bound = pci_base + (size - 0x10000);
955 vme_offset = vme_base - pci_base;
956 }
957
958 /* Convert 64-bit variables to 2x 32-bit variables */
959 reg_split(pci_base, &pci_base_high, &pci_base_low);
960 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
961 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
962
963 if (pci_base_low & 0xFFFF) {
964 spin_unlock(&image->lock);
965 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
966 retval = -EINVAL;
967 goto err_gran;
968 }
969 if (pci_bound_low & 0xFFFF) {
970 spin_unlock(&image->lock);
971 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
972 retval = -EINVAL;
973 goto err_gran;
974 }
975 if (vme_offset_low & 0xFFFF) {
976 spin_unlock(&image->lock);
977 dev_err(tsi148_bridge->parent, "Invalid VME Offset "
978 "alignment\n");
979 retval = -EINVAL;
980 goto err_gran;
981 }
982
983 i = image->number;
984
985 /* Disable while we are mucking around */
986 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
987 TSI148_LCSR_OFFSET_OTAT);
988 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
989 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
990 TSI148_LCSR_OFFSET_OTAT);
991
992 /* Setup 2eSST speeds */
993 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
994 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
995 case VME_2eSST160:
996 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
997 break;
998 case VME_2eSST267:
999 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1000 break;
1001 case VME_2eSST320:
1002 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1003 break;
1004 }
1005
1006 /* Setup cycle types */
1007 if (cycle & VME_BLT) {
1008 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1009 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1010 }
1011 if (cycle & VME_MBLT) {
1012 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1013 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1014 }
1015 if (cycle & VME_2eVME) {
1016 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1017 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1018 }
1019 if (cycle & VME_2eSST) {
1020 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1021 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1022 }
1023 if (cycle & VME_2eSSTB) {
1024 dev_warn(tsi148_bridge->parent, "Currently not setting "
1025 "Broadcast Select Registers\n");
1026 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1027 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1028 }
1029
1030 /* Setup data width */
1031 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1032 switch (dwidth) {
1033 case VME_D16:
1034 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1035 break;
1036 case VME_D32:
1037 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1038 break;
1039 default:
1040 spin_unlock(&image->lock);
1041 dev_err(tsi148_bridge->parent, "Invalid data width\n");
1042 retval = -EINVAL;
1043 goto err_dwidth;
1044 }
1045
1046 /* Setup address space */
1047 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1048 switch (aspace) {
1049 case VME_A16:
1050 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1051 break;
1052 case VME_A24:
1053 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1054 break;
1055 case VME_A32:
1056 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1057 break;
1058 case VME_A64:
1059 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1060 break;
1061 case VME_CRCSR:
1062 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1063 break;
1064 case VME_USER1:
1065 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1066 break;
1067 case VME_USER2:
1068 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1069 break;
1070 case VME_USER3:
1071 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1072 break;
1073 case VME_USER4:
1074 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1075 break;
1076 default:
1077 spin_unlock(&image->lock);
1078 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1079 retval = -EINVAL;
1080 goto err_aspace;
1081 break;
1082 }
1083
1084 temp_ctl &= ~(3<<4);
1085 if (cycle & VME_SUPER)
1086 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1087 if (cycle & VME_PROG)
1088 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1089
1090 /* Setup mapping */
1091 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1092 TSI148_LCSR_OFFSET_OTSAU);
1093 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1094 TSI148_LCSR_OFFSET_OTSAL);
1095 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1096 TSI148_LCSR_OFFSET_OTEAU);
1097 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1098 TSI148_LCSR_OFFSET_OTEAL);
1099 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1100 TSI148_LCSR_OFFSET_OTOFU);
1101 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1102 TSI148_LCSR_OFFSET_OTOFL);
1103
1104 /* Write ctl reg without enable */
1105 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1106 TSI148_LCSR_OFFSET_OTAT);
1107
1108 if (enabled)
1109 temp_ctl |= TSI148_LCSR_OTAT_EN;
1110
1111 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1112 TSI148_LCSR_OFFSET_OTAT);
1113
1114 spin_unlock(&image->lock);
1115 return 0;
1116
1117err_aspace:
1118err_dwidth:
1119err_gran:
1120 tsi148_free_resource(image);
1121err_res:
1122err_window:
1123 return retval;
1124
1125}
1126
1127/*
1128 * Set the attributes of an outbound window.
1129 *
1130 * XXX Not parsing prefetch information.
1131 */
1132static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1133 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1134 u32 *cycle, u32 *dwidth)
1135{
1136 unsigned int i, ctl;
1137 unsigned int pci_base_low, pci_base_high;
1138 unsigned int pci_bound_low, pci_bound_high;
1139 unsigned int vme_offset_low, vme_offset_high;
1140
1141 unsigned long long pci_base, pci_bound, vme_offset;
1142 struct tsi148_driver *bridge;
1143
1144 bridge = image->parent->driver_priv;
1145
1146 i = image->number;
1147
1148 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1149 TSI148_LCSR_OFFSET_OTAT);
1150
1151 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1152 TSI148_LCSR_OFFSET_OTSAU);
1153 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1154 TSI148_LCSR_OFFSET_OTSAL);
1155 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1156 TSI148_LCSR_OFFSET_OTEAU);
1157 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1158 TSI148_LCSR_OFFSET_OTEAL);
1159 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1160 TSI148_LCSR_OFFSET_OTOFU);
1161 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1162 TSI148_LCSR_OFFSET_OTOFL);
1163
1164 /* Convert 64-bit variables to 2x 32-bit variables */
1165 reg_join(pci_base_high, pci_base_low, &pci_base);
1166 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1167 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1168
1169 *vme_base = pci_base + vme_offset;
1170 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1171
1172 *enabled = 0;
1173 *aspace = 0;
1174 *cycle = 0;
1175 *dwidth = 0;
1176
1177 if (ctl & TSI148_LCSR_OTAT_EN)
1178 *enabled = 1;
1179
1180 /* Setup address space */
1181 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1182 *aspace |= VME_A16;
1183 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1184 *aspace |= VME_A24;
1185 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1186 *aspace |= VME_A32;
1187 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1188 *aspace |= VME_A64;
1189 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1190 *aspace |= VME_CRCSR;
1191 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1192 *aspace |= VME_USER1;
1193 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1194 *aspace |= VME_USER2;
1195 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1196 *aspace |= VME_USER3;
1197 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1198 *aspace |= VME_USER4;
1199
1200 /* Setup 2eSST speeds */
1201 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1202 *cycle |= VME_2eSST160;
1203 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1204 *cycle |= VME_2eSST267;
1205 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1206 *cycle |= VME_2eSST320;
1207
1208 /* Setup cycle types */
1209 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1210 *cycle |= VME_SCT;
1211 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1212 *cycle |= VME_BLT;
1213 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1214 *cycle |= VME_MBLT;
1215 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1216 *cycle |= VME_2eVME;
1217 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1218 *cycle |= VME_2eSST;
1219 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1220 *cycle |= VME_2eSSTB;
1221
1222 if (ctl & TSI148_LCSR_OTAT_SUP)
1223 *cycle |= VME_SUPER;
1224 else
1225 *cycle |= VME_USER;
1226
1227 if (ctl & TSI148_LCSR_OTAT_PGM)
1228 *cycle |= VME_PROG;
1229 else
1230 *cycle |= VME_DATA;
1231
1232 /* Setup data width */
1233 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1234 *dwidth = VME_D16;
1235 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1236 *dwidth = VME_D32;
1237
1238 return 0;
1239}
1240
1241
1242static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1243 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1244 u32 *cycle, u32 *dwidth)
1245{
1246 int retval;
1247
1248 spin_lock(&image->lock);
1249
1250 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1251 cycle, dwidth);
1252
1253 spin_unlock(&image->lock);
1254
1255 return retval;
1256}
1257
1258static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1259 size_t count, loff_t offset)
1260{
1261 int retval, enabled;
1262 unsigned long long vme_base, size;
1263 u32 aspace, cycle, dwidth;
1264 struct vme_bus_error *vme_err = NULL;
1265 struct vme_bridge *tsi148_bridge;
1266
1267 tsi148_bridge = image->parent;
1268
1269 spin_lock(&image->lock);
1270
1271 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1272 retval = count;
1273
1274 if (!err_chk)
1275 goto skip_chk;
1276
1277 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1278 &dwidth);
1279
1280 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1281 count);
1282 if (vme_err != NULL) {
1283 dev_err(image->parent->parent, "First VME read error detected "
1284 "an at address 0x%llx\n", vme_err->address);
1285 retval = vme_err->address - (vme_base + offset);
1286 /* Clear down save errors in this address range */
1287 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1288 count);
1289 }
1290
1291skip_chk:
1292 spin_unlock(&image->lock);
1293
1294 return retval;
1295}
1296
1297
1298static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1299 size_t count, loff_t offset)
1300{
1301 int retval = 0, enabled;
1302 unsigned long long vme_base, size;
1303 u32 aspace, cycle, dwidth;
1304
1305 struct vme_bus_error *vme_err = NULL;
1306 struct vme_bridge *tsi148_bridge;
1307 struct tsi148_driver *bridge;
1308
1309 tsi148_bridge = image->parent;
1310
1311 bridge = tsi148_bridge->driver_priv;
1312
1313 spin_lock(&image->lock);
1314
1315 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1316 retval = count;
1317
1318 /*
1319 * Writes are posted. We need to do a read on the VME bus to flush out
1320 * all of the writes before we check for errors. We can't guarantee
1321 * that reading the data we have just written is safe. It is believed
1322 * that there isn't any read, write re-ordering, so we can read any
1323 * location in VME space, so lets read the Device ID from the tsi148's
1324 * own registers as mapped into CR/CSR space.
1325 *
1326 * We check for saved errors in the written address range/space.
1327 */
1328
1329 if (!err_chk)
1330 goto skip_chk;
1331
1332 /*
1333 * Get window info first, to maximise the time that the buffers may
1334 * fluch on their own
1335 */
1336 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1337 &dwidth);
1338
1339 ioread16(bridge->flush_image->kern_base + 0x7F000);
1340
1341 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1342 count);
1343 if (vme_err != NULL) {
1344 dev_warn(tsi148_bridge->parent, "First VME write error detected"
1345 " an at address 0x%llx\n", vme_err->address);
1346 retval = vme_err->address - (vme_base + offset);
1347 /* Clear down save errors in this address range */
1348 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1349 count);
1350 }
1351
1352skip_chk:
1353 spin_unlock(&image->lock);
1354
1355 return retval;
1356}
1357
1358/*
1359 * Perform an RMW cycle on the VME bus.
1360 *
1361 * Requires a previously configured master window, returns final value.
1362 */
1363static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1364 unsigned int mask, unsigned int compare, unsigned int swap,
1365 loff_t offset)
1366{
1367 unsigned long long pci_addr;
1368 unsigned int pci_addr_high, pci_addr_low;
1369 u32 tmp, result;
1370 int i;
1371 struct tsi148_driver *bridge;
1372
1373 bridge = image->parent->driver_priv;
1374
1375 /* Find the PCI address that maps to the desired VME address */
1376 i = image->number;
1377
1378 /* Locking as we can only do one of these at a time */
1379 mutex_lock(&bridge->vme_rmw);
1380
1381 /* Lock image */
1382 spin_lock(&image->lock);
1383
1384 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1385 TSI148_LCSR_OFFSET_OTSAU);
1386 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1387 TSI148_LCSR_OFFSET_OTSAL);
1388
1389 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1390 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1391
1392 /* Configure registers */
1393 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1394 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1395 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1396 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1397 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1398
1399 /* Enable RMW */
1400 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1401 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1402 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1403
1404 /* Kick process off with a read to the required address. */
1405 result = ioread32be(image->kern_base + offset);
1406
1407 /* Disable RMW */
1408 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1409 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1410 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1411
1412 spin_unlock(&image->lock);
1413
1414 mutex_unlock(&bridge->vme_rmw);
1415
1416 return result;
1417}
1418
1419static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1420 u32 aspace, u32 cycle, u32 dwidth)
1421{
1422 u32 val;
1423
1424 val = be32_to_cpu(*attr);
1425
1426 /* Setup 2eSST speeds */
1427 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1428 case VME_2eSST160:
1429 val |= TSI148_LCSR_DSAT_2eSSTM_160;
1430 break;
1431 case VME_2eSST267:
1432 val |= TSI148_LCSR_DSAT_2eSSTM_267;
1433 break;
1434 case VME_2eSST320:
1435 val |= TSI148_LCSR_DSAT_2eSSTM_320;
1436 break;
1437 }
1438
1439 /* Setup cycle types */
1440 if (cycle & VME_SCT)
1441 val |= TSI148_LCSR_DSAT_TM_SCT;
1442
1443 if (cycle & VME_BLT)
1444 val |= TSI148_LCSR_DSAT_TM_BLT;
1445
1446 if (cycle & VME_MBLT)
1447 val |= TSI148_LCSR_DSAT_TM_MBLT;
1448
1449 if (cycle & VME_2eVME)
1450 val |= TSI148_LCSR_DSAT_TM_2eVME;
1451
1452 if (cycle & VME_2eSST)
1453 val |= TSI148_LCSR_DSAT_TM_2eSST;
1454
1455 if (cycle & VME_2eSSTB) {
1456 dev_err(dev, "Currently not setting Broadcast Select "
1457 "Registers\n");
1458 val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1459 }
1460
1461 /* Setup data width */
1462 switch (dwidth) {
1463 case VME_D16:
1464 val |= TSI148_LCSR_DSAT_DBW_16;
1465 break;
1466 case VME_D32:
1467 val |= TSI148_LCSR_DSAT_DBW_32;
1468 break;
1469 default:
1470 dev_err(dev, "Invalid data width\n");
1471 return -EINVAL;
1472 }
1473
1474 /* Setup address space */
1475 switch (aspace) {
1476 case VME_A16:
1477 val |= TSI148_LCSR_DSAT_AMODE_A16;
1478 break;
1479 case VME_A24:
1480 val |= TSI148_LCSR_DSAT_AMODE_A24;
1481 break;
1482 case VME_A32:
1483 val |= TSI148_LCSR_DSAT_AMODE_A32;
1484 break;
1485 case VME_A64:
1486 val |= TSI148_LCSR_DSAT_AMODE_A64;
1487 break;
1488 case VME_CRCSR:
1489 val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1490 break;
1491 case VME_USER1:
1492 val |= TSI148_LCSR_DSAT_AMODE_USER1;
1493 break;
1494 case VME_USER2:
1495 val |= TSI148_LCSR_DSAT_AMODE_USER2;
1496 break;
1497 case VME_USER3:
1498 val |= TSI148_LCSR_DSAT_AMODE_USER3;
1499 break;
1500 case VME_USER4:
1501 val |= TSI148_LCSR_DSAT_AMODE_USER4;
1502 break;
1503 default:
1504 dev_err(dev, "Invalid address space\n");
1505 return -EINVAL;
1506 break;
1507 }
1508
1509 if (cycle & VME_SUPER)
1510 val |= TSI148_LCSR_DSAT_SUP;
1511 if (cycle & VME_PROG)
1512 val |= TSI148_LCSR_DSAT_PGM;
1513
1514 *attr = cpu_to_be32(val);
1515
1516 return 0;
1517}
1518
1519static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1520 u32 aspace, u32 cycle, u32 dwidth)
1521{
1522 u32 val;
1523
1524 val = be32_to_cpu(*attr);
1525
1526 /* Setup 2eSST speeds */
1527 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1528 case VME_2eSST160:
1529 val |= TSI148_LCSR_DDAT_2eSSTM_160;
1530 break;
1531 case VME_2eSST267:
1532 val |= TSI148_LCSR_DDAT_2eSSTM_267;
1533 break;
1534 case VME_2eSST320:
1535 val |= TSI148_LCSR_DDAT_2eSSTM_320;
1536 break;
1537 }
1538
1539 /* Setup cycle types */
1540 if (cycle & VME_SCT)
1541 val |= TSI148_LCSR_DDAT_TM_SCT;
1542
1543 if (cycle & VME_BLT)
1544 val |= TSI148_LCSR_DDAT_TM_BLT;
1545
1546 if (cycle & VME_MBLT)
1547 val |= TSI148_LCSR_DDAT_TM_MBLT;
1548
1549 if (cycle & VME_2eVME)
1550 val |= TSI148_LCSR_DDAT_TM_2eVME;
1551
1552 if (cycle & VME_2eSST)
1553 val |= TSI148_LCSR_DDAT_TM_2eSST;
1554
1555 if (cycle & VME_2eSSTB) {
1556 dev_err(dev, "Currently not setting Broadcast Select "
1557 "Registers\n");
1558 val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1559 }
1560
1561 /* Setup data width */
1562 switch (dwidth) {
1563 case VME_D16:
1564 val |= TSI148_LCSR_DDAT_DBW_16;
1565 break;
1566 case VME_D32:
1567 val |= TSI148_LCSR_DDAT_DBW_32;
1568 break;
1569 default:
1570 dev_err(dev, "Invalid data width\n");
1571 return -EINVAL;
1572 }
1573
1574 /* Setup address space */
1575 switch (aspace) {
1576 case VME_A16:
1577 val |= TSI148_LCSR_DDAT_AMODE_A16;
1578 break;
1579 case VME_A24:
1580 val |= TSI148_LCSR_DDAT_AMODE_A24;
1581 break;
1582 case VME_A32:
1583 val |= TSI148_LCSR_DDAT_AMODE_A32;
1584 break;
1585 case VME_A64:
1586 val |= TSI148_LCSR_DDAT_AMODE_A64;
1587 break;
1588 case VME_CRCSR:
1589 val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1590 break;
1591 case VME_USER1:
1592 val |= TSI148_LCSR_DDAT_AMODE_USER1;
1593 break;
1594 case VME_USER2:
1595 val |= TSI148_LCSR_DDAT_AMODE_USER2;
1596 break;
1597 case VME_USER3:
1598 val |= TSI148_LCSR_DDAT_AMODE_USER3;
1599 break;
1600 case VME_USER4:
1601 val |= TSI148_LCSR_DDAT_AMODE_USER4;
1602 break;
1603 default:
1604 dev_err(dev, "Invalid address space\n");
1605 return -EINVAL;
1606 break;
1607 }
1608
1609 if (cycle & VME_SUPER)
1610 val |= TSI148_LCSR_DDAT_SUP;
1611 if (cycle & VME_PROG)
1612 val |= TSI148_LCSR_DDAT_PGM;
1613
1614 *attr = cpu_to_be32(val);
1615
1616 return 0;
1617}
1618
1619/*
1620 * Add a link list descriptor to the list
1621 *
1622 * Note: DMA engine expects the DMA descriptor to be big endian.
1623 */
1624static int tsi148_dma_list_add(struct vme_dma_list *list,
1625 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1626{
1627 struct tsi148_dma_entry *entry, *prev;
1628 u32 address_high, address_low, val;
1629 struct vme_dma_pattern *pattern_attr;
1630 struct vme_dma_pci *pci_attr;
1631 struct vme_dma_vme *vme_attr;
1632 int retval = 0;
1633 struct vme_bridge *tsi148_bridge;
1634
1635 tsi148_bridge = list->parent->parent;
1636
1637 /* Descriptor must be aligned on 64-bit boundaries */
1638 entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1639 if (entry == NULL) {
1640 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
1641 "dma resource structure\n");
1642 retval = -ENOMEM;
1643 goto err_mem;
1644 }
1645
1646 /* Test descriptor alignment */
1647 if ((unsigned long)&entry->descriptor & 0x7) {
1648 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1649 "byte boundary as required: %p\n",
1650 &entry->descriptor);
1651 retval = -EINVAL;
1652 goto err_align;
1653 }
1654
1655 /* Given we are going to fill out the structure, we probably don't
1656 * need to zero it, but better safe than sorry for now.
1657 */
1658 memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
1659
1660 /* Fill out source part */
1661 switch (src->type) {
1662 case VME_DMA_PATTERN:
1663 pattern_attr = src->private;
1664
1665 entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1666
1667 val = TSI148_LCSR_DSAT_TYP_PAT;
1668
1669 /* Default behaviour is 32 bit pattern */
1670 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1671 val |= TSI148_LCSR_DSAT_PSZ;
1672
1673 /* It seems that the default behaviour is to increment */
1674 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1675 val |= TSI148_LCSR_DSAT_NIN;
1676 entry->descriptor.dsat = cpu_to_be32(val);
1677 break;
1678 case VME_DMA_PCI:
1679 pci_attr = src->private;
1680
1681 reg_split((unsigned long long)pci_attr->address, &address_high,
1682 &address_low);
1683 entry->descriptor.dsau = cpu_to_be32(address_high);
1684 entry->descriptor.dsal = cpu_to_be32(address_low);
1685 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1686 break;
1687 case VME_DMA_VME:
1688 vme_attr = src->private;
1689
1690 reg_split((unsigned long long)vme_attr->address, &address_high,
1691 &address_low);
1692 entry->descriptor.dsau = cpu_to_be32(address_high);
1693 entry->descriptor.dsal = cpu_to_be32(address_low);
1694 entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1695
1696 retval = tsi148_dma_set_vme_src_attributes(
1697 tsi148_bridge->parent, &entry->descriptor.dsat,
1698 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1699 if (retval < 0)
1700 goto err_source;
1701 break;
1702 default:
1703 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1704 retval = -EINVAL;
1705 goto err_source;
1706 break;
1707 }
1708
1709 /* Assume last link - this will be over-written by adding another */
1710 entry->descriptor.dnlau = cpu_to_be32(0);
1711 entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1712
1713 /* Fill out destination part */
1714 switch (dest->type) {
1715 case VME_DMA_PCI:
1716 pci_attr = dest->private;
1717
1718 reg_split((unsigned long long)pci_attr->address, &address_high,
1719 &address_low);
1720 entry->descriptor.ddau = cpu_to_be32(address_high);
1721 entry->descriptor.ddal = cpu_to_be32(address_low);
1722 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1723 break;
1724 case VME_DMA_VME:
1725 vme_attr = dest->private;
1726
1727 reg_split((unsigned long long)vme_attr->address, &address_high,
1728 &address_low);
1729 entry->descriptor.ddau = cpu_to_be32(address_high);
1730 entry->descriptor.ddal = cpu_to_be32(address_low);
1731 entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1732
1733 retval = tsi148_dma_set_vme_dest_attributes(
1734 tsi148_bridge->parent, &entry->descriptor.ddat,
1735 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1736 if (retval < 0)
1737 goto err_dest;
1738 break;
1739 default:
1740 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1741 retval = -EINVAL;
1742 goto err_dest;
1743 break;
1744 }
1745
1746 /* Fill out count */
1747 entry->descriptor.dcnt = cpu_to_be32((u32)count);
1748
1749 /* Add to list */
1750 list_add_tail(&entry->list, &list->entries);
1751
1752 /* Fill out previous descriptors "Next Address" */
1753 if (entry->list.prev != &list->entries) {
1754 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1755 list);
1756 /* We need the bus address for the pointer */
1757 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1758 &entry->descriptor,
1759 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1760
1761 reg_split((unsigned long long)entry->dma_handle, &address_high,
1762 &address_low);
1763 entry->descriptor.dnlau = cpu_to_be32(address_high);
1764 entry->descriptor.dnlal = cpu_to_be32(address_low);
1765
1766 }
1767
1768 return 0;
1769
1770err_dest:
1771err_source:
1772err_align:
1773 kfree(entry);
1774err_mem:
1775 return retval;
1776}
1777
1778/*
1779 * Check to see if the provided DMA channel is busy.
1780 */
1781static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1782{
1783 u32 tmp;
1784 struct tsi148_driver *bridge;
1785
1786 bridge = tsi148_bridge->driver_priv;
1787
1788 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1789 TSI148_LCSR_OFFSET_DSTA);
1790
1791 if (tmp & TSI148_LCSR_DSTA_BSY)
1792 return 0;
1793 else
1794 return 1;
1795
1796}
1797
1798/*
1799 * Execute a previously generated link list
1800 *
1801 * XXX Need to provide control register configuration.
1802 */
1803static int tsi148_dma_list_exec(struct vme_dma_list *list)
1804{
1805 struct vme_dma_resource *ctrlr;
1806 int channel, retval = 0;
1807 struct tsi148_dma_entry *entry;
1808 u32 bus_addr_high, bus_addr_low;
1809 u32 val, dctlreg = 0;
1810 struct vme_bridge *tsi148_bridge;
1811 struct tsi148_driver *bridge;
1812
1813 ctrlr = list->parent;
1814
1815 tsi148_bridge = ctrlr->parent;
1816
1817 bridge = tsi148_bridge->driver_priv;
1818
1819 mutex_lock(&ctrlr->mtx);
1820
1821 channel = ctrlr->number;
1822
1823 if (!list_empty(&ctrlr->running)) {
1824 /*
1825 * XXX We have an active DMA transfer and currently haven't
1826 * sorted out the mechanism for "pending" DMA transfers.
1827 * Return busy.
1828 */
1829 /* Need to add to pending here */
1830 mutex_unlock(&ctrlr->mtx);
1831 return -EBUSY;
1832 } else {
1833 list_add(&list->list, &ctrlr->running);
1834 }
1835
1836 /* Get first bus address and write into registers */
1837 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1838 list);
1839
1840 entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1841 &entry->descriptor,
1842 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1843
1844 mutex_unlock(&ctrlr->mtx);
1845
1846 reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1847
1848 iowrite32be(bus_addr_high, bridge->base +
1849 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1850 iowrite32be(bus_addr_low, bridge->base +
1851 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1852
1853 dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1854 TSI148_LCSR_OFFSET_DCTL);
1855
1856 /* Start the operation */
1857 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1858 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1859
1860 wait_event_interruptible(bridge->dma_queue[channel],
1861 tsi148_dma_busy(ctrlr->parent, channel));
1862
1863 /*
1864 * Read status register, this register is valid until we kick off a
1865 * new transfer.
1866 */
1867 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1868 TSI148_LCSR_OFFSET_DSTA);
1869
1870 if (val & TSI148_LCSR_DSTA_VBE) {
1871 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1872 retval = -EIO;
1873 }
1874
1875 /* Remove list from running list */
1876 mutex_lock(&ctrlr->mtx);
1877 list_del(&list->list);
1878 mutex_unlock(&ctrlr->mtx);
1879
1880 return retval;
1881}
1882
1883/*
1884 * Clean up a previously generated link list
1885 *
1886 * We have a separate function, don't assume that the chain can't be reused.
1887 */
1888static int tsi148_dma_list_empty(struct vme_dma_list *list)
1889{
1890 struct list_head *pos, *temp;
1891 struct tsi148_dma_entry *entry;
1892
1893 struct vme_bridge *tsi148_bridge = list->parent->parent;
1894
1895 /* detach and free each entry */
1896 list_for_each_safe(pos, temp, &list->entries) {
1897 list_del(pos);
1898 entry = list_entry(pos, struct tsi148_dma_entry, list);
1899
1900 dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1901 sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1902 kfree(entry);
1903 }
1904
1905 return 0;
1906}
1907
1908/*
1909 * All 4 location monitors reside at the same base - this is therefore a
1910 * system wide configuration.
1911 *
1912 * This does not enable the LM monitor - that should be done when the first
1913 * callback is attached and disabled when the last callback is removed.
1914 */
1915static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1916 u32 aspace, u32 cycle)
1917{
1918 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1919 int i;
1920 struct vme_bridge *tsi148_bridge;
1921 struct tsi148_driver *bridge;
1922
1923 tsi148_bridge = lm->parent;
1924
1925 bridge = tsi148_bridge->driver_priv;
1926
1927 mutex_lock(&lm->mtx);
1928
1929 /* If we already have a callback attached, we can't move it! */
1930 for (i = 0; i < lm->monitors; i++) {
1931 if (bridge->lm_callback[i] != NULL) {
1932 mutex_unlock(&lm->mtx);
1933 dev_err(tsi148_bridge->parent, "Location monitor "
1934 "callback attached, can't reset\n");
1935 return -EBUSY;
1936 }
1937 }
1938
1939 switch (aspace) {
1940 case VME_A16:
1941 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1942 break;
1943 case VME_A24:
1944 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1945 break;
1946 case VME_A32:
1947 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1948 break;
1949 case VME_A64:
1950 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1951 break;
1952 default:
1953 mutex_unlock(&lm->mtx);
1954 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1955 return -EINVAL;
1956 break;
1957 }
1958
1959 if (cycle & VME_SUPER)
1960 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1961 if (cycle & VME_USER)
1962 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1963 if (cycle & VME_PROG)
1964 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1965 if (cycle & VME_DATA)
1966 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1967
1968 reg_split(lm_base, &lm_base_high, &lm_base_low);
1969
1970 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1971 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1972 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1973
1974 mutex_unlock(&lm->mtx);
1975
1976 return 0;
1977}
1978
1979/* Get configuration of the callback monitor and return whether it is enabled
1980 * or disabled.
1981 */
1982static int tsi148_lm_get(struct vme_lm_resource *lm,
1983 unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1984{
1985 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1986 struct tsi148_driver *bridge;
1987
1988 bridge = lm->parent->driver_priv;
1989
1990 mutex_lock(&lm->mtx);
1991
1992 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1993 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1994 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1995
1996 reg_join(lm_base_high, lm_base_low, lm_base);
1997
1998 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1999 enabled = 1;
2000
2001 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2002 *aspace |= VME_A16;
2003
2004 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2005 *aspace |= VME_A24;
2006
2007 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2008 *aspace |= VME_A32;
2009
2010 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2011 *aspace |= VME_A64;
2012
2013
2014 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2015 *cycle |= VME_SUPER;
2016 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2017 *cycle |= VME_USER;
2018 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2019 *cycle |= VME_PROG;
2020 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2021 *cycle |= VME_DATA;
2022
2023 mutex_unlock(&lm->mtx);
2024
2025 return enabled;
2026}
2027
2028/*
2029 * Attach a callback to a specific location monitor.
2030 *
2031 * Callback will be passed the monitor triggered.
2032 */
2033static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2034 void (*callback)(int))
2035{
2036 u32 lm_ctl, tmp;
2037 struct vme_bridge *tsi148_bridge;
2038 struct tsi148_driver *bridge;
2039
2040 tsi148_bridge = lm->parent;
2041
2042 bridge = tsi148_bridge->driver_priv;
2043
2044 mutex_lock(&lm->mtx);
2045
2046 /* Ensure that the location monitor is configured - need PGM or DATA */
2047 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2048 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2049 mutex_unlock(&lm->mtx);
2050 dev_err(tsi148_bridge->parent, "Location monitor not properly "
2051 "configured\n");
2052 return -EINVAL;
2053 }
2054
2055 /* Check that a callback isn't already attached */
2056 if (bridge->lm_callback[monitor] != NULL) {
2057 mutex_unlock(&lm->mtx);
2058 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2059 return -EBUSY;
2060 }
2061
2062 /* Attach callback */
2063 bridge->lm_callback[monitor] = callback;
2064
2065 /* Enable Location Monitor interrupt */
2066 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2067 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2068 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2069
2070 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2071 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2072 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2073
2074 /* Ensure that global Location Monitor Enable set */
2075 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2076 lm_ctl |= TSI148_LCSR_LMAT_EN;
2077 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2078 }
2079
2080 mutex_unlock(&lm->mtx);
2081
2082 return 0;
2083}
2084
2085/*
2086 * Detach a callback function forn a specific location monitor.
2087 */
2088static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2089{
2090 u32 lm_en, tmp;
2091 struct tsi148_driver *bridge;
2092
2093 bridge = lm->parent->driver_priv;
2094
2095 mutex_lock(&lm->mtx);
2096
2097 /* Disable Location Monitor and ensure previous interrupts are clear */
2098 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2099 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2100 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2101
2102 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2103 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2104 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2105
2106 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2107 bridge->base + TSI148_LCSR_INTC);
2108
2109 /* Detach callback */
2110 bridge->lm_callback[monitor] = NULL;
2111
2112 /* If all location monitors disabled, disable global Location Monitor */
2113 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2114 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2115 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2116 tmp &= ~TSI148_LCSR_LMAT_EN;
2117 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2118 }
2119
2120 mutex_unlock(&lm->mtx);
2121
2122 return 0;
2123}
2124
2125/*
2126 * Determine Geographical Addressing
2127 */
2128static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2129{
2130 u32 slot = 0;
2131 struct tsi148_driver *bridge;
2132
2133 bridge = tsi148_bridge->driver_priv;
2134
2135 if (!geoid) {
2136 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2137 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2138 } else
2139 slot = geoid;
2140
2141 return (int)slot;
2142}
2143
2144void *tsi148_alloc_consistent(struct device *parent, size_t size,
2145 dma_addr_t *dma)
2146{
2147 struct pci_dev *pdev;
2148
2149 /* Find pci_dev container of dev */
2150 pdev = container_of(parent, struct pci_dev, dev);
2151
2152 return pci_alloc_consistent(pdev, size, dma);
2153}
2154
2155void tsi148_free_consistent(struct device *parent, size_t size, void *vaddr,
2156 dma_addr_t dma)
2157{
2158 struct pci_dev *pdev;
2159
2160 /* Find pci_dev container of dev */
2161 pdev = container_of(parent, struct pci_dev, dev);
2162
2163 pci_free_consistent(pdev, size, vaddr, dma);
2164}
2165
2166static int __init tsi148_init(void)
2167{
2168 return pci_register_driver(&tsi148_driver);
2169}
2170
2171/*
2172 * Configure CR/CSR space
2173 *
2174 * Access to the CR/CSR can be configured at power-up. The location of the
2175 * CR/CSR registers in the CR/CSR address space is determined by the boards
2176 * Auto-ID or Geographic address. This function ensures that the window is
2177 * enabled at an offset consistent with the boards geopgraphic address.
2178 *
2179 * Each board has a 512kB window, with the highest 4kB being used for the
2180 * boards registers, this means there is a fix length 508kB window which must
2181 * be mapped onto PCI memory.
2182 */
2183static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2184 struct pci_dev *pdev)
2185{
2186 u32 cbar, crat, vstat;
2187 u32 crcsr_bus_high, crcsr_bus_low;
2188 int retval;
2189 struct tsi148_driver *bridge;
2190
2191 bridge = tsi148_bridge->driver_priv;
2192
2193 /* Allocate mem for CR/CSR image */
2194 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2195 &bridge->crcsr_bus);
2196 if (bridge->crcsr_kernel == NULL) {
2197 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2198 "CR/CSR image\n");
2199 return -ENOMEM;
2200 }
2201
2202 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2203
2204 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2205
2206 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2207 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2208
2209 /* Ensure that the CR/CSR is configured at the correct offset */
2210 cbar = ioread32be(bridge->base + TSI148_CBAR);
2211 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2212
2213 vstat = tsi148_slot_get(tsi148_bridge);
2214
2215 if (cbar != vstat) {
2216 cbar = vstat;
2217 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2218 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2219 }
2220 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2221
2222 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2223 if (crat & TSI148_LCSR_CRAT_EN) {
2224 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2225 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2226 bridge->base + TSI148_LCSR_CRAT);
2227 } else
2228 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2229
2230 /* If we want flushed, error-checked writes, set up a window
2231 * over the CR/CSR registers. We read from here to safely flush
2232 * through VME writes.
2233 */
2234 if (err_chk) {
2235 retval = tsi148_master_set(bridge->flush_image, 1,
2236 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2237 VME_D16);
2238 if (retval)
2239 dev_err(tsi148_bridge->parent, "Configuring flush image"
2240 " failed\n");
2241 }
2242
2243 return 0;
2244
2245}
2246
2247static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2248 struct pci_dev *pdev)
2249{
2250 u32 crat;
2251 struct tsi148_driver *bridge;
2252
2253 bridge = tsi148_bridge->driver_priv;
2254
2255 /* Turn off CR/CSR space */
2256 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2257 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2258 bridge->base + TSI148_LCSR_CRAT);
2259
2260 /* Free image */
2261 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2262 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2263
2264 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2265 bridge->crcsr_bus);
2266}
2267
2268static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2269{
2270 int retval, i, master_num;
2271 u32 data;
2272 struct list_head *pos = NULL;
2273 struct vme_bridge *tsi148_bridge;
2274 struct tsi148_driver *tsi148_device;
2275 struct vme_master_resource *master_image;
2276 struct vme_slave_resource *slave_image;
2277 struct vme_dma_resource *dma_ctrlr;
2278 struct vme_lm_resource *lm;
2279
2280 /* If we want to support more than one of each bridge, we need to
2281 * dynamically generate this so we get one per device
2282 */
2283 tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
2284 if (tsi148_bridge == NULL) {
2285 dev_err(&pdev->dev, "Failed to allocate memory for device "
2286 "structure\n");
2287 retval = -ENOMEM;
2288 goto err_struct;
2289 }
2290
2291 tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2292 if (tsi148_device == NULL) {
2293 dev_err(&pdev->dev, "Failed to allocate memory for device "
2294 "structure\n");
2295 retval = -ENOMEM;
2296 goto err_driver;
2297 }
2298
2299 tsi148_bridge->driver_priv = tsi148_device;
2300
2301 /* Enable the device */
2302 retval = pci_enable_device(pdev);
2303 if (retval) {
2304 dev_err(&pdev->dev, "Unable to enable device\n");
2305 goto err_enable;
2306 }
2307
2308 /* Map Registers */
2309 retval = pci_request_regions(pdev, driver_name);
2310 if (retval) {
2311 dev_err(&pdev->dev, "Unable to reserve resources\n");
2312 goto err_resource;
2313 }
2314
2315 /* map registers in BAR 0 */
2316 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2317 4096);
2318 if (!tsi148_device->base) {
2319 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2320 retval = -EIO;
2321 goto err_remap;
2322 }
2323
2324 /* Check to see if the mapping worked out */
2325 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2326 if (data != PCI_VENDOR_ID_TUNDRA) {
2327 dev_err(&pdev->dev, "CRG region check failed\n");
2328 retval = -EIO;
2329 goto err_test;
2330 }
2331
2332 /* Initialize wait queues & mutual exclusion flags */
2333 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2334 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2335 init_waitqueue_head(&tsi148_device->iack_queue);
2336 mutex_init(&tsi148_device->vme_int);
2337 mutex_init(&tsi148_device->vme_rmw);
2338
2339 tsi148_bridge->parent = &pdev->dev;
2340 strcpy(tsi148_bridge->name, driver_name);
2341
2342 /* Setup IRQ */
2343 retval = tsi148_irq_init(tsi148_bridge);
2344 if (retval != 0) {
2345 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2346 goto err_irq;
2347 }
2348
2349 /* If we are going to flush writes, we need to read from the VME bus.
2350 * We need to do this safely, thus we read the devices own CR/CSR
2351 * register. To do this we must set up a window in CR/CSR space and
2352 * hence have one less master window resource available.
2353 */
2354 master_num = TSI148_MAX_MASTER;
2355 if (err_chk) {
2356 master_num--;
2357
2358 tsi148_device->flush_image =
2359 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2360 if (tsi148_device->flush_image == NULL) {
2361 dev_err(&pdev->dev, "Failed to allocate memory for "
2362 "flush resource structure\n");
2363 retval = -ENOMEM;
2364 goto err_master;
2365 }
2366 tsi148_device->flush_image->parent = tsi148_bridge;
2367 spin_lock_init(&tsi148_device->flush_image->lock);
2368 tsi148_device->flush_image->locked = 1;
2369 tsi148_device->flush_image->number = master_num;
2370 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2371 VME_A32 | VME_A64;
2372 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2373 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2374 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2375 VME_USER | VME_PROG | VME_DATA;
2376 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
2377 memset(&tsi148_device->flush_image->bus_resource, 0,
2378 sizeof(struct resource));
2379 tsi148_device->flush_image->kern_base = NULL;
2380 }
2381
2382 /* Add master windows to list */
2383 INIT_LIST_HEAD(&tsi148_bridge->master_resources);
2384 for (i = 0; i < master_num; i++) {
2385 master_image = kmalloc(sizeof(struct vme_master_resource),
2386 GFP_KERNEL);
2387 if (master_image == NULL) {
2388 dev_err(&pdev->dev, "Failed to allocate memory for "
2389 "master resource structure\n");
2390 retval = -ENOMEM;
2391 goto err_master;
2392 }
2393 master_image->parent = tsi148_bridge;
2394 spin_lock_init(&master_image->lock);
2395 master_image->locked = 0;
2396 master_image->number = i;
2397 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2398 VME_A64;
2399 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2400 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2401 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2402 VME_PROG | VME_DATA;
2403 master_image->width_attr = VME_D16 | VME_D32;
2404 memset(&master_image->bus_resource, 0,
2405 sizeof(struct resource));
2406 master_image->kern_base = NULL;
2407 list_add_tail(&master_image->list,
2408 &tsi148_bridge->master_resources);
2409 }
2410
2411 /* Add slave windows to list */
2412 INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
2413 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2414 slave_image = kmalloc(sizeof(struct vme_slave_resource),
2415 GFP_KERNEL);
2416 if (slave_image == NULL) {
2417 dev_err(&pdev->dev, "Failed to allocate memory for "
2418 "slave resource structure\n");
2419 retval = -ENOMEM;
2420 goto err_slave;
2421 }
2422 slave_image->parent = tsi148_bridge;
2423 mutex_init(&slave_image->mtx);
2424 slave_image->locked = 0;
2425 slave_image->number = i;
2426 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2427 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2428 VME_USER3 | VME_USER4;
2429 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2430 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2431 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2432 VME_PROG | VME_DATA;
2433 list_add_tail(&slave_image->list,
2434 &tsi148_bridge->slave_resources);
2435 }
2436
2437 /* Add dma engines to list */
2438 INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
2439 for (i = 0; i < TSI148_MAX_DMA; i++) {
2440 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
2441 GFP_KERNEL);
2442 if (dma_ctrlr == NULL) {
2443 dev_err(&pdev->dev, "Failed to allocate memory for "
2444 "dma resource structure\n");
2445 retval = -ENOMEM;
2446 goto err_dma;
2447 }
2448 dma_ctrlr->parent = tsi148_bridge;
2449 mutex_init(&dma_ctrlr->mtx);
2450 dma_ctrlr->locked = 0;
2451 dma_ctrlr->number = i;
2452 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2453 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2454 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2455 VME_DMA_PATTERN_TO_MEM;
2456 INIT_LIST_HEAD(&dma_ctrlr->pending);
2457 INIT_LIST_HEAD(&dma_ctrlr->running);
2458 list_add_tail(&dma_ctrlr->list,
2459 &tsi148_bridge->dma_resources);
2460 }
2461
2462 /* Add location monitor to list */
2463 INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
2464 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2465 if (lm == NULL) {
2466 dev_err(&pdev->dev, "Failed to allocate memory for "
2467 "location monitor resource structure\n");
2468 retval = -ENOMEM;
2469 goto err_lm;
2470 }
2471 lm->parent = tsi148_bridge;
2472 mutex_init(&lm->mtx);
2473 lm->locked = 0;
2474 lm->number = 1;
2475 lm->monitors = 4;
2476 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2477
2478 tsi148_bridge->slave_get = tsi148_slave_get;
2479 tsi148_bridge->slave_set = tsi148_slave_set;
2480 tsi148_bridge->master_get = tsi148_master_get;
2481 tsi148_bridge->master_set = tsi148_master_set;
2482 tsi148_bridge->master_read = tsi148_master_read;
2483 tsi148_bridge->master_write = tsi148_master_write;
2484 tsi148_bridge->master_rmw = tsi148_master_rmw;
2485 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2486 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2487 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2488 tsi148_bridge->irq_set = tsi148_irq_set;
2489 tsi148_bridge->irq_generate = tsi148_irq_generate;
2490 tsi148_bridge->lm_set = tsi148_lm_set;
2491 tsi148_bridge->lm_get = tsi148_lm_get;
2492 tsi148_bridge->lm_attach = tsi148_lm_attach;
2493 tsi148_bridge->lm_detach = tsi148_lm_detach;
2494 tsi148_bridge->slot_get = tsi148_slot_get;
2495 tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2496 tsi148_bridge->free_consistent = tsi148_free_consistent;
2497
2498 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2499 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2500 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2501 if (!geoid)
2502 dev_info(&pdev->dev, "VME geographical address is %d\n",
2503 data & TSI148_LCSR_VSTAT_GA_M);
2504 else
2505 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2506 geoid);
2507
2508 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2509 err_chk ? "enabled" : "disabled");
2510
2511 if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
2512 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2513 goto err_crcsr;
2514 }
2515
2516 retval = vme_register_bridge(tsi148_bridge);
2517 if (retval != 0) {
2518 dev_err(&pdev->dev, "Chip Registration failed.\n");
2519 goto err_reg;
2520 }
2521
2522 pci_set_drvdata(pdev, tsi148_bridge);
2523
2524 /* Clear VME bus "board fail", and "power-up reset" lines */
2525 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2526 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2527 data |= TSI148_LCSR_VSTAT_CPURST;
2528 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2529
2530 return 0;
2531
2532err_reg:
2533 tsi148_crcsr_exit(tsi148_bridge, pdev);
2534err_crcsr:
2535err_lm:
2536 /* resources are stored in link list */
2537 list_for_each(pos, &tsi148_bridge->lm_resources) {
2538 lm = list_entry(pos, struct vme_lm_resource, list);
2539 list_del(pos);
2540 kfree(lm);
2541 }
2542err_dma:
2543 /* resources are stored in link list */
2544 list_for_each(pos, &tsi148_bridge->dma_resources) {
2545 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2546 list_del(pos);
2547 kfree(dma_ctrlr);
2548 }
2549err_slave:
2550 /* resources are stored in link list */
2551 list_for_each(pos, &tsi148_bridge->slave_resources) {
2552 slave_image = list_entry(pos, struct vme_slave_resource, list);
2553 list_del(pos);
2554 kfree(slave_image);
2555 }
2556err_master:
2557 /* resources are stored in link list */
2558 list_for_each(pos, &tsi148_bridge->master_resources) {
2559 master_image = list_entry(pos, struct vme_master_resource,
2560 list);
2561 list_del(pos);
2562 kfree(master_image);
2563 }
2564
2565 tsi148_irq_exit(tsi148_bridge, pdev);
2566err_irq:
2567err_test:
2568 iounmap(tsi148_device->base);
2569err_remap:
2570 pci_release_regions(pdev);
2571err_resource:
2572 pci_disable_device(pdev);
2573err_enable:
2574 kfree(tsi148_device);
2575err_driver:
2576 kfree(tsi148_bridge);
2577err_struct:
2578 return retval;
2579
2580}
2581
2582static void tsi148_remove(struct pci_dev *pdev)
2583{
2584 struct list_head *pos = NULL;
2585 struct list_head *tmplist;
2586 struct vme_master_resource *master_image;
2587 struct vme_slave_resource *slave_image;
2588 struct vme_dma_resource *dma_ctrlr;
2589 int i;
2590 struct tsi148_driver *bridge;
2591 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2592
2593 bridge = tsi148_bridge->driver_priv;
2594
2595
2596 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2597
2598 /*
2599 * Shutdown all inbound and outbound windows.
2600 */
2601 for (i = 0; i < 8; i++) {
2602 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2603 TSI148_LCSR_OFFSET_ITAT);
2604 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2605 TSI148_LCSR_OFFSET_OTAT);
2606 }
2607
2608 /*
2609 * Shutdown Location monitor.
2610 */
2611 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2612
2613 /*
2614 * Shutdown CRG map.
2615 */
2616 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2617
2618 /*
2619 * Clear error status.
2620 */
2621 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2622 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2623 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2624
2625 /*
2626 * Remove VIRQ interrupt (if any)
2627 */
2628 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2629 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2630
2631 /*
2632 * Map all Interrupts to PCI INTA
2633 */
2634 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2635 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2636
2637 tsi148_irq_exit(tsi148_bridge, pdev);
2638
2639 vme_unregister_bridge(tsi148_bridge);
2640
2641 tsi148_crcsr_exit(tsi148_bridge, pdev);
2642
2643 /* resources are stored in link list */
2644 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2645 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2646 list_del(pos);
2647 kfree(dma_ctrlr);
2648 }
2649
2650 /* resources are stored in link list */
2651 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2652 slave_image = list_entry(pos, struct vme_slave_resource, list);
2653 list_del(pos);
2654 kfree(slave_image);
2655 }
2656
2657 /* resources are stored in link list */
2658 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2659 master_image = list_entry(pos, struct vme_master_resource,
2660 list);
2661 list_del(pos);
2662 kfree(master_image);
2663 }
2664
2665 iounmap(bridge->base);
2666
2667 pci_release_regions(pdev);
2668
2669 pci_disable_device(pdev);
2670
2671 kfree(tsi148_bridge->driver_priv);
2672
2673 kfree(tsi148_bridge);
2674}
2675
2676static void __exit tsi148_exit(void)
2677{
2678 pci_unregister_driver(&tsi148_driver);
2679}
2680
2681MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2682module_param(err_chk, bool, 0);
2683
2684MODULE_PARM_DESC(geoid, "Override geographical addressing");
2685module_param(geoid, int, 0);
2686
2687MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2688MODULE_LICENSE("GPL");
2689
2690module_init(tsi148_init);
2691module_exit(tsi148_exit);
diff --git a/drivers/vme/bridges/vme_tsi148.h b/drivers/vme/bridges/vme_tsi148.h
new file mode 100644
index 00000000000..f5ed14382a8
--- /dev/null
+++ b/drivers/vme/bridges/vme_tsi148.h
@@ -0,0 +1,1410 @@
1/*
2 * tsi148.h
3 *
4 * Support for the Tundra TSI148 VME Bridge chip
5 *
6 * Author: Tom Armistead
7 * Updated and maintained by Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#ifndef TSI148_H
17#define TSI148_H
18
19#ifndef PCI_VENDOR_ID_TUNDRA
20#define PCI_VENDOR_ID_TUNDRA 0x10e3
21#endif
22
23#ifndef PCI_DEVICE_ID_TUNDRA_TSI148
24#define PCI_DEVICE_ID_TUNDRA_TSI148 0x148
25#endif
26
27/*
28 * Define the number of each that the Tsi148 supports.
29 */
30#define TSI148_MAX_MASTER 8 /* Max Master Windows */
31#define TSI148_MAX_SLAVE 8 /* Max Slave Windows */
32#define TSI148_MAX_DMA 2 /* Max DMA Controllers */
33#define TSI148_MAX_MAILBOX 4 /* Max Mail Box registers */
34#define TSI148_MAX_SEMAPHORE 8 /* Max Semaphores */
35
36/* Structure used to hold driver specific information */
37struct tsi148_driver {
38 void __iomem *base; /* Base Address of device registers */
39 wait_queue_head_t dma_queue[2];
40 wait_queue_head_t iack_queue;
41 void (*lm_callback[4])(int); /* Called in interrupt handler */
42 void *crcsr_kernel;
43 dma_addr_t crcsr_bus;
44 struct vme_master_resource *flush_image;
45 struct mutex vme_rmw; /* Only one RMW cycle at a time */
46 struct mutex vme_int; /*
47 * Only one VME interrupt can be
48 * generated at a time, provide locking
49 */
50};
51
52/*
53 * Layout of a DMAC Linked-List Descriptor
54 *
55 * Note: This structure is accessed via the chip and therefore must be
56 * correctly laid out - It must also be aligned on 64-bit boundaries.
57 */
58struct tsi148_dma_descriptor {
59 __be32 dsau; /* Source Address */
60 __be32 dsal;
61 __be32 ddau; /* Destination Address */
62 __be32 ddal;
63 __be32 dsat; /* Source attributes */
64 __be32 ddat; /* Destination attributes */
65 __be32 dnlau; /* Next link address */
66 __be32 dnlal;
67 __be32 dcnt; /* Byte count */
68 __be32 ddbs; /* 2eSST Broadcast select */
69};
70
71struct tsi148_dma_entry {
72 /*
73 * The descriptor needs to be aligned on a 64-bit boundary, we increase
74 * the chance of this by putting it first in the structure.
75 */
76 struct tsi148_dma_descriptor descriptor;
77 struct list_head list;
78 dma_addr_t dma_handle;
79};
80
81/*
82 * TSI148 ASIC register structure overlays and bit field definitions.
83 *
84 * Note: Tsi148 Register Group (CRG) consists of the following
85 * combination of registers:
86 * PCFS - PCI Configuration Space Registers
87 * LCSR - Local Control and Status Registers
88 * GCSR - Global Control and Status Registers
89 * CR/CSR - Subset of Configuration ROM /
90 * Control and Status Registers
91 */
92
93
94/*
95 * Command/Status Registers (CRG + $004)
96 */
97#define TSI148_PCFS_ID 0x0
98#define TSI148_PCFS_CSR 0x4
99#define TSI148_PCFS_CLASS 0x8
100#define TSI148_PCFS_MISC0 0xC
101#define TSI148_PCFS_MBARL 0x10
102#define TSI148_PCFS_MBARU 0x14
103
104#define TSI148_PCFS_SUBID 0x28
105
106#define TSI148_PCFS_CAPP 0x34
107
108#define TSI148_PCFS_MISC1 0x3C
109
110#define TSI148_PCFS_XCAPP 0x40
111#define TSI148_PCFS_XSTAT 0x44
112
113/*
114 * LCSR definitions
115 */
116
117/*
118 * Outbound Translations
119 */
120#define TSI148_LCSR_OT0_OTSAU 0x100
121#define TSI148_LCSR_OT0_OTSAL 0x104
122#define TSI148_LCSR_OT0_OTEAU 0x108
123#define TSI148_LCSR_OT0_OTEAL 0x10C
124#define TSI148_LCSR_OT0_OTOFU 0x110
125#define TSI148_LCSR_OT0_OTOFL 0x114
126#define TSI148_LCSR_OT0_OTBS 0x118
127#define TSI148_LCSR_OT0_OTAT 0x11C
128
129#define TSI148_LCSR_OT1_OTSAU 0x120
130#define TSI148_LCSR_OT1_OTSAL 0x124
131#define TSI148_LCSR_OT1_OTEAU 0x128
132#define TSI148_LCSR_OT1_OTEAL 0x12C
133#define TSI148_LCSR_OT1_OTOFU 0x130
134#define TSI148_LCSR_OT1_OTOFL 0x134
135#define TSI148_LCSR_OT1_OTBS 0x138
136#define TSI148_LCSR_OT1_OTAT 0x13C
137
138#define TSI148_LCSR_OT2_OTSAU 0x140
139#define TSI148_LCSR_OT2_OTSAL 0x144
140#define TSI148_LCSR_OT2_OTEAU 0x148
141#define TSI148_LCSR_OT2_OTEAL 0x14C
142#define TSI148_LCSR_OT2_OTOFU 0x150
143#define TSI148_LCSR_OT2_OTOFL 0x154
144#define TSI148_LCSR_OT2_OTBS 0x158
145#define TSI148_LCSR_OT2_OTAT 0x15C
146
147#define TSI148_LCSR_OT3_OTSAU 0x160
148#define TSI148_LCSR_OT3_OTSAL 0x164
149#define TSI148_LCSR_OT3_OTEAU 0x168
150#define TSI148_LCSR_OT3_OTEAL 0x16C
151#define TSI148_LCSR_OT3_OTOFU 0x170
152#define TSI148_LCSR_OT3_OTOFL 0x174
153#define TSI148_LCSR_OT3_OTBS 0x178
154#define TSI148_LCSR_OT3_OTAT 0x17C
155
156#define TSI148_LCSR_OT4_OTSAU 0x180
157#define TSI148_LCSR_OT4_OTSAL 0x184
158#define TSI148_LCSR_OT4_OTEAU 0x188
159#define TSI148_LCSR_OT4_OTEAL 0x18C
160#define TSI148_LCSR_OT4_OTOFU 0x190
161#define TSI148_LCSR_OT4_OTOFL 0x194
162#define TSI148_LCSR_OT4_OTBS 0x198
163#define TSI148_LCSR_OT4_OTAT 0x19C
164
165#define TSI148_LCSR_OT5_OTSAU 0x1A0
166#define TSI148_LCSR_OT5_OTSAL 0x1A4
167#define TSI148_LCSR_OT5_OTEAU 0x1A8
168#define TSI148_LCSR_OT5_OTEAL 0x1AC
169#define TSI148_LCSR_OT5_OTOFU 0x1B0
170#define TSI148_LCSR_OT5_OTOFL 0x1B4
171#define TSI148_LCSR_OT5_OTBS 0x1B8
172#define TSI148_LCSR_OT5_OTAT 0x1BC
173
174#define TSI148_LCSR_OT6_OTSAU 0x1C0
175#define TSI148_LCSR_OT6_OTSAL 0x1C4
176#define TSI148_LCSR_OT6_OTEAU 0x1C8
177#define TSI148_LCSR_OT6_OTEAL 0x1CC
178#define TSI148_LCSR_OT6_OTOFU 0x1D0
179#define TSI148_LCSR_OT6_OTOFL 0x1D4
180#define TSI148_LCSR_OT6_OTBS 0x1D8
181#define TSI148_LCSR_OT6_OTAT 0x1DC
182
183#define TSI148_LCSR_OT7_OTSAU 0x1E0
184#define TSI148_LCSR_OT7_OTSAL 0x1E4
185#define TSI148_LCSR_OT7_OTEAU 0x1E8
186#define TSI148_LCSR_OT7_OTEAL 0x1EC
187#define TSI148_LCSR_OT7_OTOFU 0x1F0
188#define TSI148_LCSR_OT7_OTOFL 0x1F4
189#define TSI148_LCSR_OT7_OTBS 0x1F8
190#define TSI148_LCSR_OT7_OTAT 0x1FC
191
192#define TSI148_LCSR_OT0 0x100
193#define TSI148_LCSR_OT1 0x120
194#define TSI148_LCSR_OT2 0x140
195#define TSI148_LCSR_OT3 0x160
196#define TSI148_LCSR_OT4 0x180
197#define TSI148_LCSR_OT5 0x1A0
198#define TSI148_LCSR_OT6 0x1C0
199#define TSI148_LCSR_OT7 0x1E0
200
201static const int TSI148_LCSR_OT[8] = { TSI148_LCSR_OT0, TSI148_LCSR_OT1,
202 TSI148_LCSR_OT2, TSI148_LCSR_OT3,
203 TSI148_LCSR_OT4, TSI148_LCSR_OT5,
204 TSI148_LCSR_OT6, TSI148_LCSR_OT7 };
205
206#define TSI148_LCSR_OFFSET_OTSAU 0x0
207#define TSI148_LCSR_OFFSET_OTSAL 0x4
208#define TSI148_LCSR_OFFSET_OTEAU 0x8
209#define TSI148_LCSR_OFFSET_OTEAL 0xC
210#define TSI148_LCSR_OFFSET_OTOFU 0x10
211#define TSI148_LCSR_OFFSET_OTOFL 0x14
212#define TSI148_LCSR_OFFSET_OTBS 0x18
213#define TSI148_LCSR_OFFSET_OTAT 0x1C
214
215/*
216 * VMEbus interrupt ack
217 * offset 200
218 */
219#define TSI148_LCSR_VIACK1 0x204
220#define TSI148_LCSR_VIACK2 0x208
221#define TSI148_LCSR_VIACK3 0x20C
222#define TSI148_LCSR_VIACK4 0x210
223#define TSI148_LCSR_VIACK5 0x214
224#define TSI148_LCSR_VIACK6 0x218
225#define TSI148_LCSR_VIACK7 0x21C
226
227static const int TSI148_LCSR_VIACK[8] = { 0, TSI148_LCSR_VIACK1,
228 TSI148_LCSR_VIACK2, TSI148_LCSR_VIACK3,
229 TSI148_LCSR_VIACK4, TSI148_LCSR_VIACK5,
230 TSI148_LCSR_VIACK6, TSI148_LCSR_VIACK7 };
231
232/*
233 * RMW
234 * offset 220
235 */
236#define TSI148_LCSR_RMWAU 0x220
237#define TSI148_LCSR_RMWAL 0x224
238#define TSI148_LCSR_RMWEN 0x228
239#define TSI148_LCSR_RMWC 0x22C
240#define TSI148_LCSR_RMWS 0x230
241
242/*
243 * VMEbus control
244 * offset 234
245 */
246#define TSI148_LCSR_VMCTRL 0x234
247#define TSI148_LCSR_VCTRL 0x238
248#define TSI148_LCSR_VSTAT 0x23C
249
250/*
251 * PCI status
252 * offset 240
253 */
254#define TSI148_LCSR_PSTAT 0x240
255
256/*
257 * VME filter.
258 * offset 250
259 */
260#define TSI148_LCSR_VMEFL 0x250
261
262 /*
263 * VME exception.
264 * offset 260
265 */
266#define TSI148_LCSR_VEAU 0x260
267#define TSI148_LCSR_VEAL 0x264
268#define TSI148_LCSR_VEAT 0x268
269
270 /*
271 * PCI error
272 * offset 270
273 */
274#define TSI148_LCSR_EDPAU 0x270
275#define TSI148_LCSR_EDPAL 0x274
276#define TSI148_LCSR_EDPXA 0x278
277#define TSI148_LCSR_EDPXS 0x27C
278#define TSI148_LCSR_EDPAT 0x280
279
280 /*
281 * Inbound Translations
282 * offset 300
283 */
284#define TSI148_LCSR_IT0_ITSAU 0x300
285#define TSI148_LCSR_IT0_ITSAL 0x304
286#define TSI148_LCSR_IT0_ITEAU 0x308
287#define TSI148_LCSR_IT0_ITEAL 0x30C
288#define TSI148_LCSR_IT0_ITOFU 0x310
289#define TSI148_LCSR_IT0_ITOFL 0x314
290#define TSI148_LCSR_IT0_ITAT 0x318
291
292#define TSI148_LCSR_IT1_ITSAU 0x320
293#define TSI148_LCSR_IT1_ITSAL 0x324
294#define TSI148_LCSR_IT1_ITEAU 0x328
295#define TSI148_LCSR_IT1_ITEAL 0x32C
296#define TSI148_LCSR_IT1_ITOFU 0x330
297#define TSI148_LCSR_IT1_ITOFL 0x334
298#define TSI148_LCSR_IT1_ITAT 0x338
299
300#define TSI148_LCSR_IT2_ITSAU 0x340
301#define TSI148_LCSR_IT2_ITSAL 0x344
302#define TSI148_LCSR_IT2_ITEAU 0x348
303#define TSI148_LCSR_IT2_ITEAL 0x34C
304#define TSI148_LCSR_IT2_ITOFU 0x350
305#define TSI148_LCSR_IT2_ITOFL 0x354
306#define TSI148_LCSR_IT2_ITAT 0x358
307
308#define TSI148_LCSR_IT3_ITSAU 0x360
309#define TSI148_LCSR_IT3_ITSAL 0x364
310#define TSI148_LCSR_IT3_ITEAU 0x368
311#define TSI148_LCSR_IT3_ITEAL 0x36C
312#define TSI148_LCSR_IT3_ITOFU 0x370
313#define TSI148_LCSR_IT3_ITOFL 0x374
314#define TSI148_LCSR_IT3_ITAT 0x378
315
316#define TSI148_LCSR_IT4_ITSAU 0x380
317#define TSI148_LCSR_IT4_ITSAL 0x384
318#define TSI148_LCSR_IT4_ITEAU 0x388
319#define TSI148_LCSR_IT4_ITEAL 0x38C
320#define TSI148_LCSR_IT4_ITOFU 0x390
321#define TSI148_LCSR_IT4_ITOFL 0x394
322#define TSI148_LCSR_IT4_ITAT 0x398
323
324#define TSI148_LCSR_IT5_ITSAU 0x3A0
325#define TSI148_LCSR_IT5_ITSAL 0x3A4
326#define TSI148_LCSR_IT5_ITEAU 0x3A8
327#define TSI148_LCSR_IT5_ITEAL 0x3AC
328#define TSI148_LCSR_IT5_ITOFU 0x3B0
329#define TSI148_LCSR_IT5_ITOFL 0x3B4
330#define TSI148_LCSR_IT5_ITAT 0x3B8
331
332#define TSI148_LCSR_IT6_ITSAU 0x3C0
333#define TSI148_LCSR_IT6_ITSAL 0x3C4
334#define TSI148_LCSR_IT6_ITEAU 0x3C8
335#define TSI148_LCSR_IT6_ITEAL 0x3CC
336#define TSI148_LCSR_IT6_ITOFU 0x3D0
337#define TSI148_LCSR_IT6_ITOFL 0x3D4
338#define TSI148_LCSR_IT6_ITAT 0x3D8
339
340#define TSI148_LCSR_IT7_ITSAU 0x3E0
341#define TSI148_LCSR_IT7_ITSAL 0x3E4
342#define TSI148_LCSR_IT7_ITEAU 0x3E8
343#define TSI148_LCSR_IT7_ITEAL 0x3EC
344#define TSI148_LCSR_IT7_ITOFU 0x3F0
345#define TSI148_LCSR_IT7_ITOFL 0x3F4
346#define TSI148_LCSR_IT7_ITAT 0x3F8
347
348
349#define TSI148_LCSR_IT0 0x300
350#define TSI148_LCSR_IT1 0x320
351#define TSI148_LCSR_IT2 0x340
352#define TSI148_LCSR_IT3 0x360
353#define TSI148_LCSR_IT4 0x380
354#define TSI148_LCSR_IT5 0x3A0
355#define TSI148_LCSR_IT6 0x3C0
356#define TSI148_LCSR_IT7 0x3E0
357
358static const int TSI148_LCSR_IT[8] = { TSI148_LCSR_IT0, TSI148_LCSR_IT1,
359 TSI148_LCSR_IT2, TSI148_LCSR_IT3,
360 TSI148_LCSR_IT4, TSI148_LCSR_IT5,
361 TSI148_LCSR_IT6, TSI148_LCSR_IT7 };
362
363#define TSI148_LCSR_OFFSET_ITSAU 0x0
364#define TSI148_LCSR_OFFSET_ITSAL 0x4
365#define TSI148_LCSR_OFFSET_ITEAU 0x8
366#define TSI148_LCSR_OFFSET_ITEAL 0xC
367#define TSI148_LCSR_OFFSET_ITOFU 0x10
368#define TSI148_LCSR_OFFSET_ITOFL 0x14
369#define TSI148_LCSR_OFFSET_ITAT 0x18
370
371 /*
372 * Inbound Translation GCSR
373 * offset 400
374 */
375#define TSI148_LCSR_GBAU 0x400
376#define TSI148_LCSR_GBAL 0x404
377#define TSI148_LCSR_GCSRAT 0x408
378
379 /*
380 * Inbound Translation CRG
381 * offset 40C
382 */
383#define TSI148_LCSR_CBAU 0x40C
384#define TSI148_LCSR_CBAL 0x410
385#define TSI148_LCSR_CSRAT 0x414
386
387 /*
388 * Inbound Translation CR/CSR
389 * CRG
390 * offset 418
391 */
392#define TSI148_LCSR_CROU 0x418
393#define TSI148_LCSR_CROL 0x41C
394#define TSI148_LCSR_CRAT 0x420
395
396 /*
397 * Inbound Translation Location Monitor
398 * offset 424
399 */
400#define TSI148_LCSR_LMBAU 0x424
401#define TSI148_LCSR_LMBAL 0x428
402#define TSI148_LCSR_LMAT 0x42C
403
404 /*
405 * VMEbus Interrupt Control.
406 * offset 430
407 */
408#define TSI148_LCSR_BCU 0x430
409#define TSI148_LCSR_BCL 0x434
410#define TSI148_LCSR_BPGTR 0x438
411#define TSI148_LCSR_BPCTR 0x43C
412#define TSI148_LCSR_VICR 0x440
413
414 /*
415 * Local Bus Interrupt Control.
416 * offset 448
417 */
418#define TSI148_LCSR_INTEN 0x448
419#define TSI148_LCSR_INTEO 0x44C
420#define TSI148_LCSR_INTS 0x450
421#define TSI148_LCSR_INTC 0x454
422#define TSI148_LCSR_INTM1 0x458
423#define TSI148_LCSR_INTM2 0x45C
424
425 /*
426 * DMA Controllers
427 * offset 500
428 */
429#define TSI148_LCSR_DCTL0 0x500
430#define TSI148_LCSR_DSTA0 0x504
431#define TSI148_LCSR_DCSAU0 0x508
432#define TSI148_LCSR_DCSAL0 0x50C
433#define TSI148_LCSR_DCDAU0 0x510
434#define TSI148_LCSR_DCDAL0 0x514
435#define TSI148_LCSR_DCLAU0 0x518
436#define TSI148_LCSR_DCLAL0 0x51C
437#define TSI148_LCSR_DSAU0 0x520
438#define TSI148_LCSR_DSAL0 0x524
439#define TSI148_LCSR_DDAU0 0x528
440#define TSI148_LCSR_DDAL0 0x52C
441#define TSI148_LCSR_DSAT0 0x530
442#define TSI148_LCSR_DDAT0 0x534
443#define TSI148_LCSR_DNLAU0 0x538
444#define TSI148_LCSR_DNLAL0 0x53C
445#define TSI148_LCSR_DCNT0 0x540
446#define TSI148_LCSR_DDBS0 0x544
447
448#define TSI148_LCSR_DCTL1 0x580
449#define TSI148_LCSR_DSTA1 0x584
450#define TSI148_LCSR_DCSAU1 0x588
451#define TSI148_LCSR_DCSAL1 0x58C
452#define TSI148_LCSR_DCDAU1 0x590
453#define TSI148_LCSR_DCDAL1 0x594
454#define TSI148_LCSR_DCLAU1 0x598
455#define TSI148_LCSR_DCLAL1 0x59C
456#define TSI148_LCSR_DSAU1 0x5A0
457#define TSI148_LCSR_DSAL1 0x5A4
458#define TSI148_LCSR_DDAU1 0x5A8
459#define TSI148_LCSR_DDAL1 0x5AC
460#define TSI148_LCSR_DSAT1 0x5B0
461#define TSI148_LCSR_DDAT1 0x5B4
462#define TSI148_LCSR_DNLAU1 0x5B8
463#define TSI148_LCSR_DNLAL1 0x5BC
464#define TSI148_LCSR_DCNT1 0x5C0
465#define TSI148_LCSR_DDBS1 0x5C4
466
467#define TSI148_LCSR_DMA0 0x500
468#define TSI148_LCSR_DMA1 0x580
469
470
471static const int TSI148_LCSR_DMA[TSI148_MAX_DMA] = { TSI148_LCSR_DMA0,
472 TSI148_LCSR_DMA1 };
473
474#define TSI148_LCSR_OFFSET_DCTL 0x0
475#define TSI148_LCSR_OFFSET_DSTA 0x4
476#define TSI148_LCSR_OFFSET_DCSAU 0x8
477#define TSI148_LCSR_OFFSET_DCSAL 0xC
478#define TSI148_LCSR_OFFSET_DCDAU 0x10
479#define TSI148_LCSR_OFFSET_DCDAL 0x14
480#define TSI148_LCSR_OFFSET_DCLAU 0x18
481#define TSI148_LCSR_OFFSET_DCLAL 0x1C
482#define TSI148_LCSR_OFFSET_DSAU 0x20
483#define TSI148_LCSR_OFFSET_DSAL 0x24
484#define TSI148_LCSR_OFFSET_DDAU 0x28
485#define TSI148_LCSR_OFFSET_DDAL 0x2C
486#define TSI148_LCSR_OFFSET_DSAT 0x30
487#define TSI148_LCSR_OFFSET_DDAT 0x34
488#define TSI148_LCSR_OFFSET_DNLAU 0x38
489#define TSI148_LCSR_OFFSET_DNLAL 0x3C
490#define TSI148_LCSR_OFFSET_DCNT 0x40
491#define TSI148_LCSR_OFFSET_DDBS 0x44
492
493 /*
494 * GCSR Register Group
495 */
496
497 /*
498 * GCSR CRG
499 * offset 00 600 - DEVI/VENI
500 * offset 04 604 - CTRL/GA/REVID
501 * offset 08 608 - Semaphore3/2/1/0
502 * offset 0C 60C - Seamphore7/6/5/4
503 */
504#define TSI148_GCSR_ID 0x600
505#define TSI148_GCSR_CSR 0x604
506#define TSI148_GCSR_SEMA0 0x608
507#define TSI148_GCSR_SEMA1 0x60C
508
509 /*
510 * Mail Box
511 * GCSR CRG
512 * offset 10 610 - Mailbox0
513 */
514#define TSI148_GCSR_MBOX0 0x610
515#define TSI148_GCSR_MBOX1 0x614
516#define TSI148_GCSR_MBOX2 0x618
517#define TSI148_GCSR_MBOX3 0x61C
518
519static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
520 TSI148_GCSR_MBOX1,
521 TSI148_GCSR_MBOX2,
522 TSI148_GCSR_MBOX3 };
523
524 /*
525 * CR/CSR
526 */
527
528 /*
529 * CR/CSR CRG
530 * offset 7FFF4 FF4 - CSRBCR
531 * offset 7FFF8 FF8 - CSRBSR
532 * offset 7FFFC FFC - CBAR
533 */
534#define TSI148_CSRBCR 0xFF4
535#define TSI148_CSRBSR 0xFF8
536#define TSI148_CBAR 0xFFC
537
538
539
540
541 /*
542 * TSI148 Register Bit Definitions
543 */
544
545 /*
546 * PFCS Register Set
547 */
548#define TSI148_PCFS_CMMD_SERR (1<<8) /* SERR_L out pin ssys err */
549#define TSI148_PCFS_CMMD_PERR (1<<6) /* PERR_L out pin parity */
550#define TSI148_PCFS_CMMD_MSTR (1<<2) /* PCI bus master */
551#define TSI148_PCFS_CMMD_MEMSP (1<<1) /* PCI mem space access */
552#define TSI148_PCFS_CMMD_IOSP (1<<0) /* PCI I/O space enable */
553
554#define TSI148_PCFS_STAT_RCPVE (1<<15) /* Detected Parity Error */
555#define TSI148_PCFS_STAT_SIGSE (1<<14) /* Signalled System Error */
556#define TSI148_PCFS_STAT_RCVMA (1<<13) /* Received Master Abort */
557#define TSI148_PCFS_STAT_RCVTA (1<<12) /* Received Target Abort */
558#define TSI148_PCFS_STAT_SIGTA (1<<11) /* Signalled Target Abort */
559#define TSI148_PCFS_STAT_SELTIM (3<<9) /* DELSEL Timing */
560#define TSI148_PCFS_STAT_DPAR (1<<8) /* Data Parity Err Reported */
561#define TSI148_PCFS_STAT_FAST (1<<7) /* Fast back-to-back Cap */
562#define TSI148_PCFS_STAT_P66M (1<<5) /* 66 MHz Capable */
563#define TSI148_PCFS_STAT_CAPL (1<<4) /* Capab List - address $34 */
564
565/*
566 * Revision ID/Class Code Registers (CRG +$008)
567 */
568#define TSI148_PCFS_CLAS_M (0xFF<<24) /* Class ID */
569#define TSI148_PCFS_SUBCLAS_M (0xFF<<16) /* Sub-Class ID */
570#define TSI148_PCFS_PROGIF_M (0xFF<<8) /* Sub-Class ID */
571#define TSI148_PCFS_REVID_M (0xFF<<0) /* Rev ID */
572
573/*
574 * Cache Line Size/ Master Latency Timer/ Header Type Registers (CRG + $00C)
575 */
576#define TSI148_PCFS_HEAD_M (0xFF<<16) /* Master Lat Timer */
577#define TSI148_PCFS_MLAT_M (0xFF<<8) /* Master Lat Timer */
578#define TSI148_PCFS_CLSZ_M (0xFF<<0) /* Cache Line Size */
579
580/*
581 * Memory Base Address Lower Reg (CRG + $010)
582 */
583#define TSI148_PCFS_MBARL_BASEL_M (0xFFFFF<<12) /* Base Addr Lower Mask */
584#define TSI148_PCFS_MBARL_PRE (1<<3) /* Prefetch */
585#define TSI148_PCFS_MBARL_MTYPE_M (3<<1) /* Memory Type Mask */
586#define TSI148_PCFS_MBARL_IOMEM (1<<0) /* I/O Space Indicator */
587
588/*
589 * Message Signaled Interrupt Capabilities Register (CRG + $040)
590 */
591#define TSI148_PCFS_MSICAP_64BAC (1<<7) /* 64-bit Address Capable */
592#define TSI148_PCFS_MSICAP_MME_M (7<<4) /* Multiple Msg Enable Mask */
593#define TSI148_PCFS_MSICAP_MMC_M (7<<1) /* Multiple Msg Capable Mask */
594#define TSI148_PCFS_MSICAP_MSIEN (1<<0) /* Msg signaled INT Enable */
595
596/*
597 * Message Address Lower Register (CRG +$044)
598 */
599#define TSI148_PCFS_MSIAL_M (0x3FFFFFFF<<2) /* Mask */
600
601/*
602 * Message Data Register (CRG + 4C)
603 */
604#define TSI148_PCFS_MSIMD_M (0xFFFF<<0) /* Mask */
605
606/*
607 * PCI-X Capabilities Register (CRG + $050)
608 */
609#define TSI148_PCFS_PCIXCAP_MOST_M (7<<4) /* Max outstanding Split Tran */
610#define TSI148_PCFS_PCIXCAP_MMRBC_M (3<<2) /* Max Mem Read byte cnt */
611#define TSI148_PCFS_PCIXCAP_ERO (1<<1) /* Enable Relaxed Ordering */
612#define TSI148_PCFS_PCIXCAP_DPERE (1<<0) /* Data Parity Recover Enable */
613
614/*
615 * PCI-X Status Register (CRG +$054)
616 */
617#define TSI148_PCFS_PCIXSTAT_RSCEM (1<<29) /* Received Split Comp Error */
618#define TSI148_PCFS_PCIXSTAT_DMCRS_M (7<<26) /* max Cumulative Read Size */
619#define TSI148_PCFS_PCIXSTAT_DMOST_M (7<<23) /* max outstanding Split Trans
620 */
621#define TSI148_PCFS_PCIXSTAT_DMMRC_M (3<<21) /* max mem read byte count */
622#define TSI148_PCFS_PCIXSTAT_DC (1<<20) /* Device Complexity */
623#define TSI148_PCFS_PCIXSTAT_USC (1<<19) /* Unexpected Split comp */
624#define TSI148_PCFS_PCIXSTAT_SCD (1<<18) /* Split completion discard */
625#define TSI148_PCFS_PCIXSTAT_133C (1<<17) /* 133MHz capable */
626#define TSI148_PCFS_PCIXSTAT_64D (1<<16) /* 64 bit device */
627#define TSI148_PCFS_PCIXSTAT_BN_M (0xFF<<8) /* Bus number */
628#define TSI148_PCFS_PCIXSTAT_DN_M (0x1F<<3) /* Device number */
629#define TSI148_PCFS_PCIXSTAT_FN_M (7<<0) /* Function Number */
630
631/*
632 * LCSR Registers
633 */
634
635/*
636 * Outbound Translation Starting Address Lower
637 */
638#define TSI148_LCSR_OTSAL_M (0xFFFF<<16) /* Mask */
639
640/*
641 * Outbound Translation Ending Address Lower
642 */
643#define TSI148_LCSR_OTEAL_M (0xFFFF<<16) /* Mask */
644
645/*
646 * Outbound Translation Offset Lower
647 */
648#define TSI148_LCSR_OTOFFL_M (0xFFFF<<16) /* Mask */
649
650/*
651 * Outbound Translation 2eSST Broadcast Select
652 */
653#define TSI148_LCSR_OTBS_M (0xFFFFF<<0) /* Mask */
654
655/*
656 * Outbound Translation Attribute
657 */
658#define TSI148_LCSR_OTAT_EN (1<<31) /* Window Enable */
659#define TSI148_LCSR_OTAT_MRPFD (1<<18) /* Prefetch Disable */
660
661#define TSI148_LCSR_OTAT_PFS_M (3<<16) /* Prefetch Size Mask */
662#define TSI148_LCSR_OTAT_PFS_2 (0<<16) /* 2 Cache Lines P Size */
663#define TSI148_LCSR_OTAT_PFS_4 (1<<16) /* 4 Cache Lines P Size */
664#define TSI148_LCSR_OTAT_PFS_8 (2<<16) /* 8 Cache Lines P Size */
665#define TSI148_LCSR_OTAT_PFS_16 (3<<16) /* 16 Cache Lines P Size */
666
667#define TSI148_LCSR_OTAT_2eSSTM_M (7<<11) /* 2eSST Xfer Rate Mask */
668#define TSI148_LCSR_OTAT_2eSSTM_160 (0<<11) /* 160MB/s 2eSST Xfer Rate */
669#define TSI148_LCSR_OTAT_2eSSTM_267 (1<<11) /* 267MB/s 2eSST Xfer Rate */
670#define TSI148_LCSR_OTAT_2eSSTM_320 (2<<11) /* 320MB/s 2eSST Xfer Rate */
671
672#define TSI148_LCSR_OTAT_TM_M (7<<8) /* Xfer Protocol Mask */
673#define TSI148_LCSR_OTAT_TM_SCT (0<<8) /* SCT Xfer Protocol */
674#define TSI148_LCSR_OTAT_TM_BLT (1<<8) /* BLT Xfer Protocol */
675#define TSI148_LCSR_OTAT_TM_MBLT (2<<8) /* MBLT Xfer Protocol */
676#define TSI148_LCSR_OTAT_TM_2eVME (3<<8) /* 2eVME Xfer Protocol */
677#define TSI148_LCSR_OTAT_TM_2eSST (4<<8) /* 2eSST Xfer Protocol */
678#define TSI148_LCSR_OTAT_TM_2eSSTB (5<<8) /* 2eSST Bcast Xfer Protocol */
679
680#define TSI148_LCSR_OTAT_DBW_M (3<<6) /* Max Data Width */
681#define TSI148_LCSR_OTAT_DBW_16 (0<<6) /* 16-bit Data Width */
682#define TSI148_LCSR_OTAT_DBW_32 (1<<6) /* 32-bit Data Width */
683
684#define TSI148_LCSR_OTAT_SUP (1<<5) /* Supervisory Access */
685#define TSI148_LCSR_OTAT_PGM (1<<4) /* Program Access */
686
687#define TSI148_LCSR_OTAT_AMODE_M (0xf<<0) /* Address Mode Mask */
688#define TSI148_LCSR_OTAT_AMODE_A16 (0<<0) /* A16 Address Space */
689#define TSI148_LCSR_OTAT_AMODE_A24 (1<<0) /* A24 Address Space */
690#define TSI148_LCSR_OTAT_AMODE_A32 (2<<0) /* A32 Address Space */
691#define TSI148_LCSR_OTAT_AMODE_A64 (4<<0) /* A32 Address Space */
692#define TSI148_LCSR_OTAT_AMODE_CRCSR (5<<0) /* CR/CSR Address Space */
693#define TSI148_LCSR_OTAT_AMODE_USER1 (8<<0) /* User1 Address Space */
694#define TSI148_LCSR_OTAT_AMODE_USER2 (9<<0) /* User2 Address Space */
695#define TSI148_LCSR_OTAT_AMODE_USER3 (10<<0) /* User3 Address Space */
696#define TSI148_LCSR_OTAT_AMODE_USER4 (11<<0) /* User4 Address Space */
697
698/*
699 * VME Master Control Register CRG+$234
700 */
701#define TSI148_LCSR_VMCTRL_VSA (1<<27) /* VMEbus Stop Ack */
702#define TSI148_LCSR_VMCTRL_VS (1<<26) /* VMEbus Stop */
703#define TSI148_LCSR_VMCTRL_DHB (1<<25) /* Device Has Bus */
704#define TSI148_LCSR_VMCTRL_DWB (1<<24) /* Device Wants Bus */
705
706#define TSI148_LCSR_VMCTRL_RMWEN (1<<20) /* RMW Enable */
707
708#define TSI148_LCSR_VMCTRL_ATO_M (7<<16) /* Master Access Time-out Mask
709 */
710#define TSI148_LCSR_VMCTRL_ATO_32 (0<<16) /* 32 us */
711#define TSI148_LCSR_VMCTRL_ATO_128 (1<<16) /* 128 us */
712#define TSI148_LCSR_VMCTRL_ATO_512 (2<<16) /* 512 us */
713#define TSI148_LCSR_VMCTRL_ATO_2M (3<<16) /* 2 ms */
714#define TSI148_LCSR_VMCTRL_ATO_8M (4<<16) /* 8 ms */
715#define TSI148_LCSR_VMCTRL_ATO_32M (5<<16) /* 32 ms */
716#define TSI148_LCSR_VMCTRL_ATO_128M (6<<16) /* 128 ms */
717#define TSI148_LCSR_VMCTRL_ATO_DIS (7<<16) /* Disabled */
718
719#define TSI148_LCSR_VMCTRL_VTOFF_M (7<<12) /* VMEbus Master Time off */
720#define TSI148_LCSR_VMCTRL_VTOFF_0 (0<<12) /* 0us */
721#define TSI148_LCSR_VMCTRL_VTOFF_1 (1<<12) /* 1us */
722#define TSI148_LCSR_VMCTRL_VTOFF_2 (2<<12) /* 2us */
723#define TSI148_LCSR_VMCTRL_VTOFF_4 (3<<12) /* 4us */
724#define TSI148_LCSR_VMCTRL_VTOFF_8 (4<<12) /* 8us */
725#define TSI148_LCSR_VMCTRL_VTOFF_16 (5<<12) /* 16us */
726#define TSI148_LCSR_VMCTRL_VTOFF_32 (6<<12) /* 32us */
727#define TSI148_LCSR_VMCTRL_VTOFF_64 (7<<12) /* 64us */
728
729#define TSI148_LCSR_VMCTRL_VTON_M (7<<8) /* VMEbus Master Time On */
730#define TSI148_LCSR_VMCTRL_VTON_4 (0<<8) /* 8us */
731#define TSI148_LCSR_VMCTRL_VTON_8 (1<<8) /* 8us */
732#define TSI148_LCSR_VMCTRL_VTON_16 (2<<8) /* 16us */
733#define TSI148_LCSR_VMCTRL_VTON_32 (3<<8) /* 32us */
734#define TSI148_LCSR_VMCTRL_VTON_64 (4<<8) /* 64us */
735#define TSI148_LCSR_VMCTRL_VTON_128 (5<<8) /* 128us */
736#define TSI148_LCSR_VMCTRL_VTON_256 (6<<8) /* 256us */
737#define TSI148_LCSR_VMCTRL_VTON_512 (7<<8) /* 512us */
738
739#define TSI148_LCSR_VMCTRL_VREL_M (3<<3) /* VMEbus Master Rel Mode Mask
740 */
741#define TSI148_LCSR_VMCTRL_VREL_T_D (0<<3) /* Time on or Done */
742#define TSI148_LCSR_VMCTRL_VREL_T_R_D (1<<3) /* Time on and REQ or Done */
743#define TSI148_LCSR_VMCTRL_VREL_T_B_D (2<<3) /* Time on and BCLR or Done */
744#define TSI148_LCSR_VMCTRL_VREL_T_D_R (3<<3) /* Time on or Done and REQ */
745
746#define TSI148_LCSR_VMCTRL_VFAIR (1<<2) /* VMEbus Master Fair Mode */
747#define TSI148_LCSR_VMCTRL_VREQL_M (3<<0) /* VMEbus Master Req Level Mask
748 */
749
750/*
751 * VMEbus Control Register CRG+$238
752 */
753#define TSI148_LCSR_VCTRL_LRE (1<<31) /* Late Retry Enable */
754
755#define TSI148_LCSR_VCTRL_DLT_M (0xF<<24) /* Deadlock Timer */
756#define TSI148_LCSR_VCTRL_DLT_OFF (0<<24) /* Deadlock Timer Off */
757#define TSI148_LCSR_VCTRL_DLT_16 (1<<24) /* 16 VCLKS */
758#define TSI148_LCSR_VCTRL_DLT_32 (2<<24) /* 32 VCLKS */
759#define TSI148_LCSR_VCTRL_DLT_64 (3<<24) /* 64 VCLKS */
760#define TSI148_LCSR_VCTRL_DLT_128 (4<<24) /* 128 VCLKS */
761#define TSI148_LCSR_VCTRL_DLT_256 (5<<24) /* 256 VCLKS */
762#define TSI148_LCSR_VCTRL_DLT_512 (6<<24) /* 512 VCLKS */
763#define TSI148_LCSR_VCTRL_DLT_1024 (7<<24) /* 1024 VCLKS */
764#define TSI148_LCSR_VCTRL_DLT_2048 (8<<24) /* 2048 VCLKS */
765#define TSI148_LCSR_VCTRL_DLT_4096 (9<<24) /* 4096 VCLKS */
766#define TSI148_LCSR_VCTRL_DLT_8192 (0xA<<24) /* 8192 VCLKS */
767#define TSI148_LCSR_VCTRL_DLT_16384 (0xB<<24) /* 16384 VCLKS */
768#define TSI148_LCSR_VCTRL_DLT_32768 (0xC<<24) /* 32768 VCLKS */
769
770#define TSI148_LCSR_VCTRL_NERBB (1<<20) /* No Early Release of Bus Busy
771 */
772
773#define TSI148_LCSR_VCTRL_SRESET (1<<17) /* System Reset */
774#define TSI148_LCSR_VCTRL_LRESET (1<<16) /* Local Reset */
775
776#define TSI148_LCSR_VCTRL_SFAILAI (1<<15) /* SYSFAIL Auto Slot ID */
777#define TSI148_LCSR_VCTRL_BID_M (0x1F<<8) /* Broadcast ID Mask */
778
779#define TSI148_LCSR_VCTRL_ATOEN (1<<7) /* Arbiter Time-out Enable */
780#define TSI148_LCSR_VCTRL_ROBIN (1<<6) /* VMEbus Round Robin */
781
782#define TSI148_LCSR_VCTRL_GTO_M (7<<0) /* VMEbus Global Time-out Mask
783 */
784#define TSI148_LCSR_VCTRL_GTO_8 (0<<0) /* 8 us */
785#define TSI148_LCSR_VCTRL_GTO_16 (1<<0) /* 16 us */
786#define TSI148_LCSR_VCTRL_GTO_32 (2<<0) /* 32 us */
787#define TSI148_LCSR_VCTRL_GTO_64 (3<<0) /* 64 us */
788#define TSI148_LCSR_VCTRL_GTO_128 (4<<0) /* 128 us */
789#define TSI148_LCSR_VCTRL_GTO_256 (5<<0) /* 256 us */
790#define TSI148_LCSR_VCTRL_GTO_512 (6<<0) /* 512 us */
791#define TSI148_LCSR_VCTRL_GTO_DIS (7<<0) /* Disabled */
792
793/*
794 * VMEbus Status Register CRG + $23C
795 */
796#define TSI148_LCSR_VSTAT_CPURST (1<<15) /* Clear power up reset */
797#define TSI148_LCSR_VSTAT_BRDFL (1<<14) /* Board fail */
798#define TSI148_LCSR_VSTAT_PURSTS (1<<12) /* Power up reset status */
799#define TSI148_LCSR_VSTAT_BDFAILS (1<<11) /* Board Fail Status */
800#define TSI148_LCSR_VSTAT_SYSFAILS (1<<10) /* System Fail Status */
801#define TSI148_LCSR_VSTAT_ACFAILS (1<<9) /* AC fail status */
802#define TSI148_LCSR_VSTAT_SCONS (1<<8) /* System Cont Status */
803#define TSI148_LCSR_VSTAT_GAP (1<<5) /* Geographic Addr Parity */
804#define TSI148_LCSR_VSTAT_GA_M (0x1F<<0) /* Geographic Addr Mask */
805
806/*
807 * PCI Configuration Status Register CRG+$240
808 */
809#define TSI148_LCSR_PSTAT_REQ64S (1<<6) /* Request 64 status set */
810#define TSI148_LCSR_PSTAT_M66ENS (1<<5) /* M66ENS 66Mhz enable */
811#define TSI148_LCSR_PSTAT_FRAMES (1<<4) /* Frame Status */
812#define TSI148_LCSR_PSTAT_IRDYS (1<<3) /* IRDY status */
813#define TSI148_LCSR_PSTAT_DEVSELS (1<<2) /* DEVL status */
814#define TSI148_LCSR_PSTAT_STOPS (1<<1) /* STOP status */
815#define TSI148_LCSR_PSTAT_TRDYS (1<<0) /* TRDY status */
816
817/*
818 * VMEbus Exception Attributes Register CRG + $268
819 */
820#define TSI148_LCSR_VEAT_VES (1<<31) /* Status */
821#define TSI148_LCSR_VEAT_VEOF (1<<30) /* Overflow */
822#define TSI148_LCSR_VEAT_VESCL (1<<29) /* Status Clear */
823#define TSI148_LCSR_VEAT_2EOT (1<<21) /* 2e Odd Termination */
824#define TSI148_LCSR_VEAT_2EST (1<<20) /* 2e Slave terminated */
825#define TSI148_LCSR_VEAT_BERR (1<<19) /* Bus Error */
826#define TSI148_LCSR_VEAT_LWORD (1<<18) /* LWORD_ signal state */
827#define TSI148_LCSR_VEAT_WRITE (1<<17) /* WRITE_ signal state */
828#define TSI148_LCSR_VEAT_IACK (1<<16) /* IACK_ signal state */
829#define TSI148_LCSR_VEAT_DS1 (1<<15) /* DS1_ signal state */
830#define TSI148_LCSR_VEAT_DS0 (1<<14) /* DS0_ signal state */
831#define TSI148_LCSR_VEAT_AM_M (0x3F<<8) /* Address Mode Mask */
832#define TSI148_LCSR_VEAT_XAM_M (0xFF<<0) /* Master AMode Mask */
833
834
835/*
836 * VMEbus PCI Error Diagnostics PCI/X Attributes Register CRG + $280
837 */
838#define TSI148_LCSR_EDPAT_EDPCL (1<<29)
839
840/*
841 * Inbound Translation Starting Address Lower
842 */
843#define TSI148_LCSR_ITSAL6432_M (0xFFFF<<16) /* Mask */
844#define TSI148_LCSR_ITSAL24_M (0x00FFF<<12) /* Mask */
845#define TSI148_LCSR_ITSAL16_M (0x0000FFF<<4) /* Mask */
846
847/*
848 * Inbound Translation Ending Address Lower
849 */
850#define TSI148_LCSR_ITEAL6432_M (0xFFFF<<16) /* Mask */
851#define TSI148_LCSR_ITEAL24_M (0x00FFF<<12) /* Mask */
852#define TSI148_LCSR_ITEAL16_M (0x0000FFF<<4) /* Mask */
853
854/*
855 * Inbound Translation Offset Lower
856 */
857#define TSI148_LCSR_ITOFFL6432_M (0xFFFF<<16) /* Mask */
858#define TSI148_LCSR_ITOFFL24_M (0xFFFFF<<12) /* Mask */
859#define TSI148_LCSR_ITOFFL16_M (0xFFFFFFF<<4) /* Mask */
860
861/*
862 * Inbound Translation Attribute
863 */
864#define TSI148_LCSR_ITAT_EN (1<<31) /* Window Enable */
865#define TSI148_LCSR_ITAT_TH (1<<18) /* Prefetch Threshold */
866
867#define TSI148_LCSR_ITAT_VFS_M (3<<16) /* Virtual FIFO Size Mask */
868#define TSI148_LCSR_ITAT_VFS_64 (0<<16) /* 64 bytes Virtual FIFO Size */
869#define TSI148_LCSR_ITAT_VFS_128 (1<<16) /* 128 bytes Virtual FIFO Sz */
870#define TSI148_LCSR_ITAT_VFS_256 (2<<16) /* 256 bytes Virtual FIFO Sz */
871#define TSI148_LCSR_ITAT_VFS_512 (3<<16) /* 512 bytes Virtual FIFO Sz */
872
873#define TSI148_LCSR_ITAT_2eSSTM_M (7<<12) /* 2eSST Xfer Rate Mask */
874#define TSI148_LCSR_ITAT_2eSSTM_160 (0<<12) /* 160MB/s 2eSST Xfer Rate */
875#define TSI148_LCSR_ITAT_2eSSTM_267 (1<<12) /* 267MB/s 2eSST Xfer Rate */
876#define TSI148_LCSR_ITAT_2eSSTM_320 (2<<12) /* 320MB/s 2eSST Xfer Rate */
877
878#define TSI148_LCSR_ITAT_2eSSTB (1<<11) /* 2eSST Bcast Xfer Protocol */
879#define TSI148_LCSR_ITAT_2eSST (1<<10) /* 2eSST Xfer Protocol */
880#define TSI148_LCSR_ITAT_2eVME (1<<9) /* 2eVME Xfer Protocol */
881#define TSI148_LCSR_ITAT_MBLT (1<<8) /* MBLT Xfer Protocol */
882#define TSI148_LCSR_ITAT_BLT (1<<7) /* BLT Xfer Protocol */
883
884#define TSI148_LCSR_ITAT_AS_M (7<<4) /* Address Space Mask */
885#define TSI148_LCSR_ITAT_AS_A16 (0<<4) /* A16 Address Space */
886#define TSI148_LCSR_ITAT_AS_A24 (1<<4) /* A24 Address Space */
887#define TSI148_LCSR_ITAT_AS_A32 (2<<4) /* A32 Address Space */
888#define TSI148_LCSR_ITAT_AS_A64 (4<<4) /* A64 Address Space */
889
890#define TSI148_LCSR_ITAT_SUPR (1<<3) /* Supervisor Access */
891#define TSI148_LCSR_ITAT_NPRIV (1<<2) /* Non-Priv (User) Access */
892#define TSI148_LCSR_ITAT_PGM (1<<1) /* Program Access */
893#define TSI148_LCSR_ITAT_DATA (1<<0) /* Data Access */
894
895/*
896 * GCSR Base Address Lower Address CRG +$404
897 */
898#define TSI148_LCSR_GBAL_M (0x7FFFFFF<<5) /* Mask */
899
900/*
901 * GCSR Attribute Register CRG + $408
902 */
903#define TSI148_LCSR_GCSRAT_EN (1<<7) /* Enable access to GCSR */
904
905#define TSI148_LCSR_GCSRAT_AS_M (7<<4) /* Address Space Mask */
906#define TSI148_LCSR_GCSRAT_AS_A16 (0<<4) /* Address Space 16 */
907#define TSI148_LCSR_GCSRAT_AS_A24 (1<<4) /* Address Space 24 */
908#define TSI148_LCSR_GCSRAT_AS_A32 (2<<4) /* Address Space 32 */
909#define TSI148_LCSR_GCSRAT_AS_A64 (4<<4) /* Address Space 64 */
910
911#define TSI148_LCSR_GCSRAT_SUPR (1<<3) /* Sup set -GCSR decoder */
912#define TSI148_LCSR_GCSRAT_NPRIV (1<<2) /* Non-Privliged set - CGSR */
913#define TSI148_LCSR_GCSRAT_PGM (1<<1) /* Program set - GCSR decoder */
914#define TSI148_LCSR_GCSRAT_DATA (1<<0) /* DATA set GCSR decoder */
915
916/*
917 * CRG Base Address Lower Address CRG + $410
918 */
919#define TSI148_LCSR_CBAL_M (0xFFFFF<<12)
920
921/*
922 * CRG Attribute Register CRG + $414
923 */
924#define TSI148_LCSR_CRGAT_EN (1<<7) /* Enable PRG Access */
925
926#define TSI148_LCSR_CRGAT_AS_M (7<<4) /* Address Space */
927#define TSI148_LCSR_CRGAT_AS_A16 (0<<4) /* Address Space 16 */
928#define TSI148_LCSR_CRGAT_AS_A24 (1<<4) /* Address Space 24 */
929#define TSI148_LCSR_CRGAT_AS_A32 (2<<4) /* Address Space 32 */
930#define TSI148_LCSR_CRGAT_AS_A64 (4<<4) /* Address Space 64 */
931
932#define TSI148_LCSR_CRGAT_SUPR (1<<3) /* Supervisor Access */
933#define TSI148_LCSR_CRGAT_NPRIV (1<<2) /* Non-Privliged(User) Access */
934#define TSI148_LCSR_CRGAT_PGM (1<<1) /* Program Access */
935#define TSI148_LCSR_CRGAT_DATA (1<<0) /* Data Access */
936
937/*
938 * CR/CSR Offset Lower Register CRG + $41C
939 */
940#define TSI148_LCSR_CROL_M (0x1FFF<<19) /* Mask */
941
942/*
943 * CR/CSR Attribute register CRG + $420
944 */
945#define TSI148_LCSR_CRAT_EN (1<<7) /* Enable access to CR/CSR */
946
947/*
948 * Location Monitor base address lower register CRG + $428
949 */
950#define TSI148_LCSR_LMBAL_M (0x7FFFFFF<<5) /* Mask */
951
952/*
953 * Location Monitor Attribute Register CRG + $42C
954 */
955#define TSI148_LCSR_LMAT_EN (1<<7) /* Enable Location Monitor */
956
957#define TSI148_LCSR_LMAT_AS_M (7<<4) /* Address Space MASK */
958#define TSI148_LCSR_LMAT_AS_A16 (0<<4) /* A16 */
959#define TSI148_LCSR_LMAT_AS_A24 (1<<4) /* A24 */
960#define TSI148_LCSR_LMAT_AS_A32 (2<<4) /* A32 */
961#define TSI148_LCSR_LMAT_AS_A64 (4<<4) /* A64 */
962
963#define TSI148_LCSR_LMAT_SUPR (1<<3) /* Supervisor Access */
964#define TSI148_LCSR_LMAT_NPRIV (1<<2) /* Non-Priv (User) Access */
965#define TSI148_LCSR_LMAT_PGM (1<<1) /* Program Access */
966#define TSI148_LCSR_LMAT_DATA (1<<0) /* Data Access */
967
968/*
969 * Broadcast Pulse Generator Timer Register CRG + $438
970 */
971#define TSI148_LCSR_BPGTR_BPGT_M (0xFFFF<<0) /* Mask */
972
973/*
974 * Broadcast Programmable Clock Timer Register CRG + $43C
975 */
976#define TSI148_LCSR_BPCTR_BPCT_M (0xFFFFFF<<0) /* Mask */
977
978/*
979 * VMEbus Interrupt Control Register CRG + $43C
980 */
981#define TSI148_LCSR_VICR_CNTS_M (3<<22) /* Cntr Source MASK */
982#define TSI148_LCSR_VICR_CNTS_DIS (1<<22) /* Cntr Disable */
983#define TSI148_LCSR_VICR_CNTS_IRQ1 (2<<22) /* IRQ1 to Cntr */
984#define TSI148_LCSR_VICR_CNTS_IRQ2 (3<<22) /* IRQ2 to Cntr */
985
986#define TSI148_LCSR_VICR_EDGIS_M (3<<20) /* Edge interrupt MASK */
987#define TSI148_LCSR_VICR_EDGIS_DIS (1<<20) /* Edge interrupt Disable */
988#define TSI148_LCSR_VICR_EDGIS_IRQ1 (2<<20) /* IRQ1 to Edge */
989#define TSI148_LCSR_VICR_EDGIS_IRQ2 (3<<20) /* IRQ2 to Edge */
990
991#define TSI148_LCSR_VICR_IRQIF_M (3<<18) /* IRQ1* Function MASK */
992#define TSI148_LCSR_VICR_IRQIF_NORM (1<<18) /* Normal */
993#define TSI148_LCSR_VICR_IRQIF_PULSE (2<<18) /* Pulse Generator */
994#define TSI148_LCSR_VICR_IRQIF_PROG (3<<18) /* Programmable Clock */
995#define TSI148_LCSR_VICR_IRQIF_1U (4<<18) /* 1us Clock */
996
997#define TSI148_LCSR_VICR_IRQ2F_M (3<<16) /* IRQ2* Function MASK */
998#define TSI148_LCSR_VICR_IRQ2F_NORM (1<<16) /* Normal */
999#define TSI148_LCSR_VICR_IRQ2F_PULSE (2<<16) /* Pulse Generator */
1000#define TSI148_LCSR_VICR_IRQ2F_PROG (3<<16) /* Programmable Clock */
1001#define TSI148_LCSR_VICR_IRQ2F_1U (4<<16) /* 1us Clock */
1002
1003#define TSI148_LCSR_VICR_BIP (1<<15) /* Broadcast Interrupt Pulse */
1004
1005#define TSI148_LCSR_VICR_IRQC (1<<12) /* VMEbus IRQ Clear */
1006#define TSI148_LCSR_VICR_IRQS (1<<11) /* VMEbus IRQ Status */
1007
1008#define TSI148_LCSR_VICR_IRQL_M (7<<8) /* VMEbus SW IRQ Level Mask */
1009#define TSI148_LCSR_VICR_IRQL_1 (1<<8) /* VMEbus SW IRQ Level 1 */
1010#define TSI148_LCSR_VICR_IRQL_2 (2<<8) /* VMEbus SW IRQ Level 2 */
1011#define TSI148_LCSR_VICR_IRQL_3 (3<<8) /* VMEbus SW IRQ Level 3 */
1012#define TSI148_LCSR_VICR_IRQL_4 (4<<8) /* VMEbus SW IRQ Level 4 */
1013#define TSI148_LCSR_VICR_IRQL_5 (5<<8) /* VMEbus SW IRQ Level 5 */
1014#define TSI148_LCSR_VICR_IRQL_6 (6<<8) /* VMEbus SW IRQ Level 6 */
1015#define TSI148_LCSR_VICR_IRQL_7 (7<<8) /* VMEbus SW IRQ Level 7 */
1016
1017static const int TSI148_LCSR_VICR_IRQL[8] = { 0, TSI148_LCSR_VICR_IRQL_1,
1018 TSI148_LCSR_VICR_IRQL_2, TSI148_LCSR_VICR_IRQL_3,
1019 TSI148_LCSR_VICR_IRQL_4, TSI148_LCSR_VICR_IRQL_5,
1020 TSI148_LCSR_VICR_IRQL_6, TSI148_LCSR_VICR_IRQL_7 };
1021
1022#define TSI148_LCSR_VICR_STID_M (0xFF<<0) /* Status/ID Mask */
1023
1024/*
1025 * Interrupt Enable Register CRG + $440
1026 */
1027#define TSI148_LCSR_INTEN_DMA1EN (1<<25) /* DMAC 1 */
1028#define TSI148_LCSR_INTEN_DMA0EN (1<<24) /* DMAC 0 */
1029#define TSI148_LCSR_INTEN_LM3EN (1<<23) /* Location Monitor 3 */
1030#define TSI148_LCSR_INTEN_LM2EN (1<<22) /* Location Monitor 2 */
1031#define TSI148_LCSR_INTEN_LM1EN (1<<21) /* Location Monitor 1 */
1032#define TSI148_LCSR_INTEN_LM0EN (1<<20) /* Location Monitor 0 */
1033#define TSI148_LCSR_INTEN_MB3EN (1<<19) /* Mail Box 3 */
1034#define TSI148_LCSR_INTEN_MB2EN (1<<18) /* Mail Box 2 */
1035#define TSI148_LCSR_INTEN_MB1EN (1<<17) /* Mail Box 1 */
1036#define TSI148_LCSR_INTEN_MB0EN (1<<16) /* Mail Box 0 */
1037#define TSI148_LCSR_INTEN_PERREN (1<<13) /* PCI/X Error */
1038#define TSI148_LCSR_INTEN_VERREN (1<<12) /* VMEbus Error */
1039#define TSI148_LCSR_INTEN_VIEEN (1<<11) /* VMEbus IRQ Edge */
1040#define TSI148_LCSR_INTEN_IACKEN (1<<10) /* IACK */
1041#define TSI148_LCSR_INTEN_SYSFLEN (1<<9) /* System Fail */
1042#define TSI148_LCSR_INTEN_ACFLEN (1<<8) /* AC Fail */
1043#define TSI148_LCSR_INTEN_IRQ7EN (1<<7) /* IRQ7 */
1044#define TSI148_LCSR_INTEN_IRQ6EN (1<<6) /* IRQ6 */
1045#define TSI148_LCSR_INTEN_IRQ5EN (1<<5) /* IRQ5 */
1046#define TSI148_LCSR_INTEN_IRQ4EN (1<<4) /* IRQ4 */
1047#define TSI148_LCSR_INTEN_IRQ3EN (1<<3) /* IRQ3 */
1048#define TSI148_LCSR_INTEN_IRQ2EN (1<<2) /* IRQ2 */
1049#define TSI148_LCSR_INTEN_IRQ1EN (1<<1) /* IRQ1 */
1050
1051static const int TSI148_LCSR_INTEN_LMEN[4] = { TSI148_LCSR_INTEN_LM0EN,
1052 TSI148_LCSR_INTEN_LM1EN,
1053 TSI148_LCSR_INTEN_LM2EN,
1054 TSI148_LCSR_INTEN_LM3EN };
1055
1056static const int TSI148_LCSR_INTEN_IRQEN[7] = { TSI148_LCSR_INTEN_IRQ1EN,
1057 TSI148_LCSR_INTEN_IRQ2EN,
1058 TSI148_LCSR_INTEN_IRQ3EN,
1059 TSI148_LCSR_INTEN_IRQ4EN,
1060 TSI148_LCSR_INTEN_IRQ5EN,
1061 TSI148_LCSR_INTEN_IRQ6EN,
1062 TSI148_LCSR_INTEN_IRQ7EN };
1063
1064/*
1065 * Interrupt Enable Out Register CRG + $444
1066 */
1067#define TSI148_LCSR_INTEO_DMA1EO (1<<25) /* DMAC 1 */
1068#define TSI148_LCSR_INTEO_DMA0EO (1<<24) /* DMAC 0 */
1069#define TSI148_LCSR_INTEO_LM3EO (1<<23) /* Loc Monitor 3 */
1070#define TSI148_LCSR_INTEO_LM2EO (1<<22) /* Loc Monitor 2 */
1071#define TSI148_LCSR_INTEO_LM1EO (1<<21) /* Loc Monitor 1 */
1072#define TSI148_LCSR_INTEO_LM0EO (1<<20) /* Location Monitor 0 */
1073#define TSI148_LCSR_INTEO_MB3EO (1<<19) /* Mail Box 3 */
1074#define TSI148_LCSR_INTEO_MB2EO (1<<18) /* Mail Box 2 */
1075#define TSI148_LCSR_INTEO_MB1EO (1<<17) /* Mail Box 1 */
1076#define TSI148_LCSR_INTEO_MB0EO (1<<16) /* Mail Box 0 */
1077#define TSI148_LCSR_INTEO_PERREO (1<<13) /* PCI/X Error */
1078#define TSI148_LCSR_INTEO_VERREO (1<<12) /* VMEbus Error */
1079#define TSI148_LCSR_INTEO_VIEEO (1<<11) /* VMEbus IRQ Edge */
1080#define TSI148_LCSR_INTEO_IACKEO (1<<10) /* IACK */
1081#define TSI148_LCSR_INTEO_SYSFLEO (1<<9) /* System Fail */
1082#define TSI148_LCSR_INTEO_ACFLEO (1<<8) /* AC Fail */
1083#define TSI148_LCSR_INTEO_IRQ7EO (1<<7) /* IRQ7 */
1084#define TSI148_LCSR_INTEO_IRQ6EO (1<<6) /* IRQ6 */
1085#define TSI148_LCSR_INTEO_IRQ5EO (1<<5) /* IRQ5 */
1086#define TSI148_LCSR_INTEO_IRQ4EO (1<<4) /* IRQ4 */
1087#define TSI148_LCSR_INTEO_IRQ3EO (1<<3) /* IRQ3 */
1088#define TSI148_LCSR_INTEO_IRQ2EO (1<<2) /* IRQ2 */
1089#define TSI148_LCSR_INTEO_IRQ1EO (1<<1) /* IRQ1 */
1090
1091static const int TSI148_LCSR_INTEO_LMEO[4] = { TSI148_LCSR_INTEO_LM0EO,
1092 TSI148_LCSR_INTEO_LM1EO,
1093 TSI148_LCSR_INTEO_LM2EO,
1094 TSI148_LCSR_INTEO_LM3EO };
1095
1096static const int TSI148_LCSR_INTEO_IRQEO[7] = { TSI148_LCSR_INTEO_IRQ1EO,
1097 TSI148_LCSR_INTEO_IRQ2EO,
1098 TSI148_LCSR_INTEO_IRQ3EO,
1099 TSI148_LCSR_INTEO_IRQ4EO,
1100 TSI148_LCSR_INTEO_IRQ5EO,
1101 TSI148_LCSR_INTEO_IRQ6EO,
1102 TSI148_LCSR_INTEO_IRQ7EO };
1103
1104/*
1105 * Interrupt Status Register CRG + $448
1106 */
1107#define TSI148_LCSR_INTS_DMA1S (1<<25) /* DMA 1 */
1108#define TSI148_LCSR_INTS_DMA0S (1<<24) /* DMA 0 */
1109#define TSI148_LCSR_INTS_LM3S (1<<23) /* Location Monitor 3 */
1110#define TSI148_LCSR_INTS_LM2S (1<<22) /* Location Monitor 2 */
1111#define TSI148_LCSR_INTS_LM1S (1<<21) /* Location Monitor 1 */
1112#define TSI148_LCSR_INTS_LM0S (1<<20) /* Location Monitor 0 */
1113#define TSI148_LCSR_INTS_MB3S (1<<19) /* Mail Box 3 */
1114#define TSI148_LCSR_INTS_MB2S (1<<18) /* Mail Box 2 */
1115#define TSI148_LCSR_INTS_MB1S (1<<17) /* Mail Box 1 */
1116#define TSI148_LCSR_INTS_MB0S (1<<16) /* Mail Box 0 */
1117#define TSI148_LCSR_INTS_PERRS (1<<13) /* PCI/X Error */
1118#define TSI148_LCSR_INTS_VERRS (1<<12) /* VMEbus Error */
1119#define TSI148_LCSR_INTS_VIES (1<<11) /* VMEbus IRQ Edge */
1120#define TSI148_LCSR_INTS_IACKS (1<<10) /* IACK */
1121#define TSI148_LCSR_INTS_SYSFLS (1<<9) /* System Fail */
1122#define TSI148_LCSR_INTS_ACFLS (1<<8) /* AC Fail */
1123#define TSI148_LCSR_INTS_IRQ7S (1<<7) /* IRQ7 */
1124#define TSI148_LCSR_INTS_IRQ6S (1<<6) /* IRQ6 */
1125#define TSI148_LCSR_INTS_IRQ5S (1<<5) /* IRQ5 */
1126#define TSI148_LCSR_INTS_IRQ4S (1<<4) /* IRQ4 */
1127#define TSI148_LCSR_INTS_IRQ3S (1<<3) /* IRQ3 */
1128#define TSI148_LCSR_INTS_IRQ2S (1<<2) /* IRQ2 */
1129#define TSI148_LCSR_INTS_IRQ1S (1<<1) /* IRQ1 */
1130
1131static const int TSI148_LCSR_INTS_LMS[4] = { TSI148_LCSR_INTS_LM0S,
1132 TSI148_LCSR_INTS_LM1S,
1133 TSI148_LCSR_INTS_LM2S,
1134 TSI148_LCSR_INTS_LM3S };
1135
1136static const int TSI148_LCSR_INTS_MBS[4] = { TSI148_LCSR_INTS_MB0S,
1137 TSI148_LCSR_INTS_MB1S,
1138 TSI148_LCSR_INTS_MB2S,
1139 TSI148_LCSR_INTS_MB3S };
1140
1141/*
1142 * Interrupt Clear Register CRG + $44C
1143 */
1144#define TSI148_LCSR_INTC_DMA1C (1<<25) /* DMA 1 */
1145#define TSI148_LCSR_INTC_DMA0C (1<<24) /* DMA 0 */
1146#define TSI148_LCSR_INTC_LM3C (1<<23) /* Location Monitor 3 */
1147#define TSI148_LCSR_INTC_LM2C (1<<22) /* Location Monitor 2 */
1148#define TSI148_LCSR_INTC_LM1C (1<<21) /* Location Monitor 1 */
1149#define TSI148_LCSR_INTC_LM0C (1<<20) /* Location Monitor 0 */
1150#define TSI148_LCSR_INTC_MB3C (1<<19) /* Mail Box 3 */
1151#define TSI148_LCSR_INTC_MB2C (1<<18) /* Mail Box 2 */
1152#define TSI148_LCSR_INTC_MB1C (1<<17) /* Mail Box 1 */
1153#define TSI148_LCSR_INTC_MB0C (1<<16) /* Mail Box 0 */
1154#define TSI148_LCSR_INTC_PERRC (1<<13) /* VMEbus Error */
1155#define TSI148_LCSR_INTC_VERRC (1<<12) /* VMEbus Access Time-out */
1156#define TSI148_LCSR_INTC_VIEC (1<<11) /* VMEbus IRQ Edge */
1157#define TSI148_LCSR_INTC_IACKC (1<<10) /* IACK */
1158#define TSI148_LCSR_INTC_SYSFLC (1<<9) /* System Fail */
1159#define TSI148_LCSR_INTC_ACFLC (1<<8) /* AC Fail */
1160
1161static const int TSI148_LCSR_INTC_LMC[4] = { TSI148_LCSR_INTC_LM0C,
1162 TSI148_LCSR_INTC_LM1C,
1163 TSI148_LCSR_INTC_LM2C,
1164 TSI148_LCSR_INTC_LM3C };
1165
1166static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
1167 TSI148_LCSR_INTC_MB1C,
1168 TSI148_LCSR_INTC_MB2C,
1169 TSI148_LCSR_INTC_MB3C };
1170
1171/*
1172 * Interrupt Map Register 1 CRG + $458
1173 */
1174#define TSI148_LCSR_INTM1_DMA1M_M (3<<18) /* DMA 1 */
1175#define TSI148_LCSR_INTM1_DMA0M_M (3<<16) /* DMA 0 */
1176#define TSI148_LCSR_INTM1_LM3M_M (3<<14) /* Location Monitor 3 */
1177#define TSI148_LCSR_INTM1_LM2M_M (3<<12) /* Location Monitor 2 */
1178#define TSI148_LCSR_INTM1_LM1M_M (3<<10) /* Location Monitor 1 */
1179#define TSI148_LCSR_INTM1_LM0M_M (3<<8) /* Location Monitor 0 */
1180#define TSI148_LCSR_INTM1_MB3M_M (3<<6) /* Mail Box 3 */
1181#define TSI148_LCSR_INTM1_MB2M_M (3<<4) /* Mail Box 2 */
1182#define TSI148_LCSR_INTM1_MB1M_M (3<<2) /* Mail Box 1 */
1183#define TSI148_LCSR_INTM1_MB0M_M (3<<0) /* Mail Box 0 */
1184
1185/*
1186 * Interrupt Map Register 2 CRG + $45C
1187 */
1188#define TSI148_LCSR_INTM2_PERRM_M (3<<26) /* PCI Bus Error */
1189#define TSI148_LCSR_INTM2_VERRM_M (3<<24) /* VMEbus Error */
1190#define TSI148_LCSR_INTM2_VIEM_M (3<<22) /* VMEbus IRQ Edge */
1191#define TSI148_LCSR_INTM2_IACKM_M (3<<20) /* IACK */
1192#define TSI148_LCSR_INTM2_SYSFLM_M (3<<18) /* System Fail */
1193#define TSI148_LCSR_INTM2_ACFLM_M (3<<16) /* AC Fail */
1194#define TSI148_LCSR_INTM2_IRQ7M_M (3<<14) /* IRQ7 */
1195#define TSI148_LCSR_INTM2_IRQ6M_M (3<<12) /* IRQ6 */
1196#define TSI148_LCSR_INTM2_IRQ5M_M (3<<10) /* IRQ5 */
1197#define TSI148_LCSR_INTM2_IRQ4M_M (3<<8) /* IRQ4 */
1198#define TSI148_LCSR_INTM2_IRQ3M_M (3<<6) /* IRQ3 */
1199#define TSI148_LCSR_INTM2_IRQ2M_M (3<<4) /* IRQ2 */
1200#define TSI148_LCSR_INTM2_IRQ1M_M (3<<2) /* IRQ1 */
1201
1202/*
1203 * DMA Control (0-1) Registers CRG + $500
1204 */
1205#define TSI148_LCSR_DCTL_ABT (1<<27) /* Abort */
1206#define TSI148_LCSR_DCTL_PAU (1<<26) /* Pause */
1207#define TSI148_LCSR_DCTL_DGO (1<<25) /* DMA Go */
1208
1209#define TSI148_LCSR_DCTL_MOD (1<<23) /* Mode */
1210
1211#define TSI148_LCSR_DCTL_VBKS_M (7<<12) /* VMEbus block Size MASK */
1212#define TSI148_LCSR_DCTL_VBKS_32 (0<<12) /* VMEbus block Size 32 */
1213#define TSI148_LCSR_DCTL_VBKS_64 (1<<12) /* VMEbus block Size 64 */
1214#define TSI148_LCSR_DCTL_VBKS_128 (2<<12) /* VMEbus block Size 128 */
1215#define TSI148_LCSR_DCTL_VBKS_256 (3<<12) /* VMEbus block Size 256 */
1216#define TSI148_LCSR_DCTL_VBKS_512 (4<<12) /* VMEbus block Size 512 */
1217#define TSI148_LCSR_DCTL_VBKS_1024 (5<<12) /* VMEbus block Size 1024 */
1218#define TSI148_LCSR_DCTL_VBKS_2048 (6<<12) /* VMEbus block Size 2048 */
1219#define TSI148_LCSR_DCTL_VBKS_4096 (7<<12) /* VMEbus block Size 4096 */
1220
1221#define TSI148_LCSR_DCTL_VBOT_M (7<<8) /* VMEbus back-off MASK */
1222#define TSI148_LCSR_DCTL_VBOT_0 (0<<8) /* VMEbus back-off 0us */
1223#define TSI148_LCSR_DCTL_VBOT_1 (1<<8) /* VMEbus back-off 1us */
1224#define TSI148_LCSR_DCTL_VBOT_2 (2<<8) /* VMEbus back-off 2us */
1225#define TSI148_LCSR_DCTL_VBOT_4 (3<<8) /* VMEbus back-off 4us */
1226#define TSI148_LCSR_DCTL_VBOT_8 (4<<8) /* VMEbus back-off 8us */
1227#define TSI148_LCSR_DCTL_VBOT_16 (5<<8) /* VMEbus back-off 16us */
1228#define TSI148_LCSR_DCTL_VBOT_32 (6<<8) /* VMEbus back-off 32us */
1229#define TSI148_LCSR_DCTL_VBOT_64 (7<<8) /* VMEbus back-off 64us */
1230
1231#define TSI148_LCSR_DCTL_PBKS_M (7<<4) /* PCI block size MASK */
1232#define TSI148_LCSR_DCTL_PBKS_32 (0<<4) /* PCI block size 32 bytes */
1233#define TSI148_LCSR_DCTL_PBKS_64 (1<<4) /* PCI block size 64 bytes */
1234#define TSI148_LCSR_DCTL_PBKS_128 (2<<4) /* PCI block size 128 bytes */
1235#define TSI148_LCSR_DCTL_PBKS_256 (3<<4) /* PCI block size 256 bytes */
1236#define TSI148_LCSR_DCTL_PBKS_512 (4<<4) /* PCI block size 512 bytes */
1237#define TSI148_LCSR_DCTL_PBKS_1024 (5<<4) /* PCI block size 1024 bytes */
1238#define TSI148_LCSR_DCTL_PBKS_2048 (6<<4) /* PCI block size 2048 bytes */
1239#define TSI148_LCSR_DCTL_PBKS_4096 (7<<4) /* PCI block size 4096 bytes */
1240
1241#define TSI148_LCSR_DCTL_PBOT_M (7<<0) /* PCI back off MASK */
1242#define TSI148_LCSR_DCTL_PBOT_0 (0<<0) /* PCI back off 0us */
1243#define TSI148_LCSR_DCTL_PBOT_1 (1<<0) /* PCI back off 1us */
1244#define TSI148_LCSR_DCTL_PBOT_2 (2<<0) /* PCI back off 2us */
1245#define TSI148_LCSR_DCTL_PBOT_4 (3<<0) /* PCI back off 3us */
1246#define TSI148_LCSR_DCTL_PBOT_8 (4<<0) /* PCI back off 4us */
1247#define TSI148_LCSR_DCTL_PBOT_16 (5<<0) /* PCI back off 8us */
1248#define TSI148_LCSR_DCTL_PBOT_32 (6<<0) /* PCI back off 16us */
1249#define TSI148_LCSR_DCTL_PBOT_64 (7<<0) /* PCI back off 32us */
1250
1251/*
1252 * DMA Status Registers (0-1) CRG + $504
1253 */
1254#define TSI148_LCSR_DSTA_SMA (1<<31) /* PCI Signalled Master Abt */
1255#define TSI148_LCSR_DSTA_RTA (1<<30) /* PCI Received Target Abt */
1256#define TSI148_LCSR_DSTA_MRC (1<<29) /* PCI Max Retry Count */
1257#define TSI148_LCSR_DSTA_VBE (1<<28) /* VMEbus error */
1258#define TSI148_LCSR_DSTA_ABT (1<<27) /* Abort */
1259#define TSI148_LCSR_DSTA_PAU (1<<26) /* Pause */
1260#define TSI148_LCSR_DSTA_DON (1<<25) /* Done */
1261#define TSI148_LCSR_DSTA_BSY (1<<24) /* Busy */
1262
1263/*
1264 * DMA Current Link Address Lower (0-1)
1265 */
1266#define TSI148_LCSR_DCLAL_M (0x3FFFFFF<<6) /* Mask */
1267
1268/*
1269 * DMA Source Attribute (0-1) Reg
1270 */
1271#define TSI148_LCSR_DSAT_TYP_M (3<<28) /* Source Bus Type */
1272#define TSI148_LCSR_DSAT_TYP_PCI (0<<28) /* PCI Bus */
1273#define TSI148_LCSR_DSAT_TYP_VME (1<<28) /* VMEbus */
1274#define TSI148_LCSR_DSAT_TYP_PAT (2<<28) /* Data Pattern */
1275
1276#define TSI148_LCSR_DSAT_PSZ (1<<25) /* Pattern Size */
1277#define TSI148_LCSR_DSAT_NIN (1<<24) /* No Increment */
1278
1279#define TSI148_LCSR_DSAT_2eSSTM_M (3<<11) /* 2eSST Trans Rate Mask */
1280#define TSI148_LCSR_DSAT_2eSSTM_160 (0<<11) /* 160 MB/s */
1281#define TSI148_LCSR_DSAT_2eSSTM_267 (1<<11) /* 267 MB/s */
1282#define TSI148_LCSR_DSAT_2eSSTM_320 (2<<11) /* 320 MB/s */
1283
1284#define TSI148_LCSR_DSAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */
1285#define TSI148_LCSR_DSAT_TM_SCT (0<<8) /* SCT */
1286#define TSI148_LCSR_DSAT_TM_BLT (1<<8) /* BLT */
1287#define TSI148_LCSR_DSAT_TM_MBLT (2<<8) /* MBLT */
1288#define TSI148_LCSR_DSAT_TM_2eVME (3<<8) /* 2eVME */
1289#define TSI148_LCSR_DSAT_TM_2eSST (4<<8) /* 2eSST */
1290#define TSI148_LCSR_DSAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */
1291
1292#define TSI148_LCSR_DSAT_DBW_M (3<<6) /* Max Data Width MASK */
1293#define TSI148_LCSR_DSAT_DBW_16 (0<<6) /* 16 Bits */
1294#define TSI148_LCSR_DSAT_DBW_32 (1<<6) /* 32 Bits */
1295
1296#define TSI148_LCSR_DSAT_SUP (1<<5) /* Supervisory Mode */
1297#define TSI148_LCSR_DSAT_PGM (1<<4) /* Program Mode */
1298
1299#define TSI148_LCSR_DSAT_AMODE_M (0xf<<0) /* Address Space Mask */
1300#define TSI148_LCSR_DSAT_AMODE_A16 (0<<0) /* A16 */
1301#define TSI148_LCSR_DSAT_AMODE_A24 (1<<0) /* A24 */
1302#define TSI148_LCSR_DSAT_AMODE_A32 (2<<0) /* A32 */
1303#define TSI148_LCSR_DSAT_AMODE_A64 (4<<0) /* A64 */
1304#define TSI148_LCSR_DSAT_AMODE_CRCSR (5<<0) /* CR/CSR */
1305#define TSI148_LCSR_DSAT_AMODE_USER1 (8<<0) /* User1 */
1306#define TSI148_LCSR_DSAT_AMODE_USER2 (9<<0) /* User2 */
1307#define TSI148_LCSR_DSAT_AMODE_USER3 (0xa<<0) /* User3 */
1308#define TSI148_LCSR_DSAT_AMODE_USER4 (0xb<<0) /* User4 */
1309
1310/*
1311 * DMA Destination Attribute Registers (0-1)
1312 */
1313#define TSI148_LCSR_DDAT_TYP_PCI (0<<28) /* Destination PCI Bus */
1314#define TSI148_LCSR_DDAT_TYP_VME (1<<28) /* Destination VMEbus */
1315
1316#define TSI148_LCSR_DDAT_2eSSTM_M (3<<11) /* 2eSST Transfer Rate Mask */
1317#define TSI148_LCSR_DDAT_2eSSTM_160 (0<<11) /* 160 MB/s */
1318#define TSI148_LCSR_DDAT_2eSSTM_267 (1<<11) /* 267 MB/s */
1319#define TSI148_LCSR_DDAT_2eSSTM_320 (2<<11) /* 320 MB/s */
1320
1321#define TSI148_LCSR_DDAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */
1322#define TSI148_LCSR_DDAT_TM_SCT (0<<8) /* SCT */
1323#define TSI148_LCSR_DDAT_TM_BLT (1<<8) /* BLT */
1324#define TSI148_LCSR_DDAT_TM_MBLT (2<<8) /* MBLT */
1325#define TSI148_LCSR_DDAT_TM_2eVME (3<<8) /* 2eVME */
1326#define TSI148_LCSR_DDAT_TM_2eSST (4<<8) /* 2eSST */
1327#define TSI148_LCSR_DDAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */
1328
1329#define TSI148_LCSR_DDAT_DBW_M (3<<6) /* Max Data Width MASK */
1330#define TSI148_LCSR_DDAT_DBW_16 (0<<6) /* 16 Bits */
1331#define TSI148_LCSR_DDAT_DBW_32 (1<<6) /* 32 Bits */
1332
1333#define TSI148_LCSR_DDAT_SUP (1<<5) /* Supervisory/User Access */
1334#define TSI148_LCSR_DDAT_PGM (1<<4) /* Program/Data Access */
1335
1336#define TSI148_LCSR_DDAT_AMODE_M (0xf<<0) /* Address Space Mask */
1337#define TSI148_LCSR_DDAT_AMODE_A16 (0<<0) /* A16 */
1338#define TSI148_LCSR_DDAT_AMODE_A24 (1<<0) /* A24 */
1339#define TSI148_LCSR_DDAT_AMODE_A32 (2<<0) /* A32 */
1340#define TSI148_LCSR_DDAT_AMODE_A64 (4<<0) /* A64 */
1341#define TSI148_LCSR_DDAT_AMODE_CRCSR (5<<0) /* CRC/SR */
1342#define TSI148_LCSR_DDAT_AMODE_USER1 (8<<0) /* User1 */
1343#define TSI148_LCSR_DDAT_AMODE_USER2 (9<<0) /* User2 */
1344#define TSI148_LCSR_DDAT_AMODE_USER3 (0xa<<0) /* User3 */
1345#define TSI148_LCSR_DDAT_AMODE_USER4 (0xb<<0) /* User4 */
1346
1347/*
1348 * DMA Next Link Address Lower
1349 */
1350#define TSI148_LCSR_DNLAL_DNLAL_M (0x3FFFFFF<<6) /* Address Mask */
1351#define TSI148_LCSR_DNLAL_LLA (1<<0) /* Last Link Address Indicator */
1352
1353/*
1354 * DMA 2eSST Broadcast Select
1355 */
1356#define TSI148_LCSR_DBS_M (0x1FFFFF<<0) /* Mask */
1357
1358/*
1359 * GCSR Register Group
1360 */
1361
1362/*
1363 * GCSR Control and Status Register CRG + $604
1364 */
1365#define TSI148_GCSR_GCTRL_LRST (1<<15) /* Local Reset */
1366#define TSI148_GCSR_GCTRL_SFAILEN (1<<14) /* System Fail enable */
1367#define TSI148_GCSR_GCTRL_BDFAILS (1<<13) /* Board Fail Status */
1368#define TSI148_GCSR_GCTRL_SCON (1<<12) /* System Copntroller */
1369#define TSI148_GCSR_GCTRL_MEN (1<<11) /* Module Enable (READY) */
1370
1371#define TSI148_GCSR_GCTRL_LMI3S (1<<7) /* Loc Monitor 3 Int Status */
1372#define TSI148_GCSR_GCTRL_LMI2S (1<<6) /* Loc Monitor 2 Int Status */
1373#define TSI148_GCSR_GCTRL_LMI1S (1<<5) /* Loc Monitor 1 Int Status */
1374#define TSI148_GCSR_GCTRL_LMI0S (1<<4) /* Loc Monitor 0 Int Status */
1375#define TSI148_GCSR_GCTRL_MBI3S (1<<3) /* Mail box 3 Int Status */
1376#define TSI148_GCSR_GCTRL_MBI2S (1<<2) /* Mail box 2 Int Status */
1377#define TSI148_GCSR_GCTRL_MBI1S (1<<1) /* Mail box 1 Int Status */
1378#define TSI148_GCSR_GCTRL_MBI0S (1<<0) /* Mail box 0 Int Status */
1379
1380#define TSI148_GCSR_GAP (1<<5) /* Geographic Addr Parity */
1381#define TSI148_GCSR_GA_M (0x1F<<0) /* Geographic Address Mask */
1382
1383/*
1384 * CR/CSR Register Group
1385 */
1386
1387/*
1388 * CR/CSR Bit Clear Register CRG + $FF4
1389 */
1390#define TSI148_CRCSR_CSRBCR_LRSTC (1<<7) /* Local Reset Clear */
1391#define TSI148_CRCSR_CSRBCR_SFAILC (1<<6) /* System Fail Enable Clear */
1392#define TSI148_CRCSR_CSRBCR_BDFAILS (1<<5) /* Board Fail Status */
1393#define TSI148_CRCSR_CSRBCR_MENC (1<<4) /* Module Enable Clear */
1394#define TSI148_CRCSR_CSRBCR_BERRSC (1<<3) /* Bus Error Status Clear */
1395
1396/*
1397 * CR/CSR Bit Set Register CRG+$FF8
1398 */
1399#define TSI148_CRCSR_CSRBSR_LISTS (1<<7) /* Local Reset Clear */
1400#define TSI148_CRCSR_CSRBSR_SFAILS (1<<6) /* System Fail Enable Clear */
1401#define TSI148_CRCSR_CSRBSR_BDFAILS (1<<5) /* Board Fail Status */
1402#define TSI148_CRCSR_CSRBSR_MENS (1<<4) /* Module Enable Clear */
1403#define TSI148_CRCSR_CSRBSR_BERRS (1<<3) /* Bus Error Status Clear */
1404
1405/*
1406 * CR/CSR Base Address Register CRG + FFC
1407 */
1408#define TSI148_CRCSR_CBAR_M (0x1F<<3) /* Mask */
1409
1410#endif /* TSI148_H */
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
new file mode 100644
index 00000000000..95a9f71d793
--- /dev/null
+++ b/drivers/vme/vme.c
@@ -0,0 +1,1517 @@
1/*
2 * VME Bridge Framework
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/mm.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/pci.h>
23#include <linux/poll.h>
24#include <linux/highmem.h>
25#include <linux/interrupt.h>
26#include <linux/pagemap.h>
27#include <linux/device.h>
28#include <linux/dma-mapping.h>
29#include <linux/syscalls.h>
30#include <linux/mutex.h>
31#include <linux/spinlock.h>
32#include <linux/slab.h>
33#include <linux/vme.h>
34
35#include "vme_bridge.h"
36
37/* Bitmask and list of registered buses both protected by common mutex */
38static unsigned int vme_bus_numbers;
39static LIST_HEAD(vme_bus_list);
40static DEFINE_MUTEX(vme_buses_lock);
41
42static void __exit vme_exit(void);
43static int __init vme_init(void);
44
45static struct vme_dev *dev_to_vme_dev(struct device *dev)
46{
47 return container_of(dev, struct vme_dev, dev);
48}
49
50/*
51 * Find the bridge that the resource is associated with.
52 */
53static struct vme_bridge *find_bridge(struct vme_resource *resource)
54{
55 /* Get list to search */
56 switch (resource->type) {
57 case VME_MASTER:
58 return list_entry(resource->entry, struct vme_master_resource,
59 list)->parent;
60 break;
61 case VME_SLAVE:
62 return list_entry(resource->entry, struct vme_slave_resource,
63 list)->parent;
64 break;
65 case VME_DMA:
66 return list_entry(resource->entry, struct vme_dma_resource,
67 list)->parent;
68 break;
69 case VME_LM:
70 return list_entry(resource->entry, struct vme_lm_resource,
71 list)->parent;
72 break;
73 default:
74 printk(KERN_ERR "Unknown resource type\n");
75 return NULL;
76 break;
77 }
78}
79
80/*
81 * Allocate a contiguous block of memory for use by the driver. This is used to
82 * create the buffers for the slave windows.
83 */
84void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
85 dma_addr_t *dma)
86{
87 struct vme_bridge *bridge;
88
89 if (resource == NULL) {
90 printk(KERN_ERR "No resource\n");
91 return NULL;
92 }
93
94 bridge = find_bridge(resource);
95 if (bridge == NULL) {
96 printk(KERN_ERR "Can't find bridge\n");
97 return NULL;
98 }
99
100 if (bridge->parent == NULL) {
101 printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
102 return NULL;
103 }
104
105 if (bridge->alloc_consistent == NULL) {
106 printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
107 bridge->name);
108 return NULL;
109 }
110
111 return bridge->alloc_consistent(bridge->parent, size, dma);
112}
113EXPORT_SYMBOL(vme_alloc_consistent);
114
115/*
116 * Free previously allocated contiguous block of memory.
117 */
118void vme_free_consistent(struct vme_resource *resource, size_t size,
119 void *vaddr, dma_addr_t dma)
120{
121 struct vme_bridge *bridge;
122
123 if (resource == NULL) {
124 printk(KERN_ERR "No resource\n");
125 return;
126 }
127
128 bridge = find_bridge(resource);
129 if (bridge == NULL) {
130 printk(KERN_ERR "Can't find bridge\n");
131 return;
132 }
133
134 if (bridge->parent == NULL) {
135 printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
136 return;
137 }
138
139 if (bridge->free_consistent == NULL) {
140 printk(KERN_ERR "free_consistent not supported by bridge %s\n",
141 bridge->name);
142 return;
143 }
144
145 bridge->free_consistent(bridge->parent, size, vaddr, dma);
146}
147EXPORT_SYMBOL(vme_free_consistent);
148
149size_t vme_get_size(struct vme_resource *resource)
150{
151 int enabled, retval;
152 unsigned long long base, size;
153 dma_addr_t buf_base;
154 u32 aspace, cycle, dwidth;
155
156 switch (resource->type) {
157 case VME_MASTER:
158 retval = vme_master_get(resource, &enabled, &base, &size,
159 &aspace, &cycle, &dwidth);
160
161 return size;
162 break;
163 case VME_SLAVE:
164 retval = vme_slave_get(resource, &enabled, &base, &size,
165 &buf_base, &aspace, &cycle);
166
167 return size;
168 break;
169 case VME_DMA:
170 return 0;
171 break;
172 default:
173 printk(KERN_ERR "Unknown resource type\n");
174 return 0;
175 break;
176 }
177}
178EXPORT_SYMBOL(vme_get_size);
179
180static int vme_check_window(u32 aspace, unsigned long long vme_base,
181 unsigned long long size)
182{
183 int retval = 0;
184
185 switch (aspace) {
186 case VME_A16:
187 if (((vme_base + size) > VME_A16_MAX) ||
188 (vme_base > VME_A16_MAX))
189 retval = -EFAULT;
190 break;
191 case VME_A24:
192 if (((vme_base + size) > VME_A24_MAX) ||
193 (vme_base > VME_A24_MAX))
194 retval = -EFAULT;
195 break;
196 case VME_A32:
197 if (((vme_base + size) > VME_A32_MAX) ||
198 (vme_base > VME_A32_MAX))
199 retval = -EFAULT;
200 break;
201 case VME_A64:
202 /*
203 * Any value held in an unsigned long long can be used as the
204 * base
205 */
206 break;
207 case VME_CRCSR:
208 if (((vme_base + size) > VME_CRCSR_MAX) ||
209 (vme_base > VME_CRCSR_MAX))
210 retval = -EFAULT;
211 break;
212 case VME_USER1:
213 case VME_USER2:
214 case VME_USER3:
215 case VME_USER4:
216 /* User Defined */
217 break;
218 default:
219 printk(KERN_ERR "Invalid address space\n");
220 retval = -EINVAL;
221 break;
222 }
223
224 return retval;
225}
226
227/*
228 * Request a slave image with specific attributes, return some unique
229 * identifier.
230 */
231struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
232 u32 cycle)
233{
234 struct vme_bridge *bridge;
235 struct list_head *slave_pos = NULL;
236 struct vme_slave_resource *allocated_image = NULL;
237 struct vme_slave_resource *slave_image = NULL;
238 struct vme_resource *resource = NULL;
239
240 bridge = vdev->bridge;
241 if (bridge == NULL) {
242 printk(KERN_ERR "Can't find VME bus\n");
243 goto err_bus;
244 }
245
246 /* Loop through slave resources */
247 list_for_each(slave_pos, &bridge->slave_resources) {
248 slave_image = list_entry(slave_pos,
249 struct vme_slave_resource, list);
250
251 if (slave_image == NULL) {
252 printk(KERN_ERR "Registered NULL Slave resource\n");
253 continue;
254 }
255
256 /* Find an unlocked and compatible image */
257 mutex_lock(&slave_image->mtx);
258 if (((slave_image->address_attr & address) == address) &&
259 ((slave_image->cycle_attr & cycle) == cycle) &&
260 (slave_image->locked == 0)) {
261
262 slave_image->locked = 1;
263 mutex_unlock(&slave_image->mtx);
264 allocated_image = slave_image;
265 break;
266 }
267 mutex_unlock(&slave_image->mtx);
268 }
269
270 /* No free image */
271 if (allocated_image == NULL)
272 goto err_image;
273
274 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
275 if (resource == NULL) {
276 printk(KERN_WARNING "Unable to allocate resource structure\n");
277 goto err_alloc;
278 }
279 resource->type = VME_SLAVE;
280 resource->entry = &allocated_image->list;
281
282 return resource;
283
284err_alloc:
285 /* Unlock image */
286 mutex_lock(&slave_image->mtx);
287 slave_image->locked = 0;
288 mutex_unlock(&slave_image->mtx);
289err_image:
290err_bus:
291 return NULL;
292}
293EXPORT_SYMBOL(vme_slave_request);
294
295int vme_slave_set(struct vme_resource *resource, int enabled,
296 unsigned long long vme_base, unsigned long long size,
297 dma_addr_t buf_base, u32 aspace, u32 cycle)
298{
299 struct vme_bridge *bridge = find_bridge(resource);
300 struct vme_slave_resource *image;
301 int retval;
302
303 if (resource->type != VME_SLAVE) {
304 printk(KERN_ERR "Not a slave resource\n");
305 return -EINVAL;
306 }
307
308 image = list_entry(resource->entry, struct vme_slave_resource, list);
309
310 if (bridge->slave_set == NULL) {
311 printk(KERN_ERR "Function not supported\n");
312 return -ENOSYS;
313 }
314
315 if (!(((image->address_attr & aspace) == aspace) &&
316 ((image->cycle_attr & cycle) == cycle))) {
317 printk(KERN_ERR "Invalid attributes\n");
318 return -EINVAL;
319 }
320
321 retval = vme_check_window(aspace, vme_base, size);
322 if (retval)
323 return retval;
324
325 return bridge->slave_set(image, enabled, vme_base, size, buf_base,
326 aspace, cycle);
327}
328EXPORT_SYMBOL(vme_slave_set);
329
330int vme_slave_get(struct vme_resource *resource, int *enabled,
331 unsigned long long *vme_base, unsigned long long *size,
332 dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
333{
334 struct vme_bridge *bridge = find_bridge(resource);
335 struct vme_slave_resource *image;
336
337 if (resource->type != VME_SLAVE) {
338 printk(KERN_ERR "Not a slave resource\n");
339 return -EINVAL;
340 }
341
342 image = list_entry(resource->entry, struct vme_slave_resource, list);
343
344 if (bridge->slave_get == NULL) {
345 printk(KERN_ERR "vme_slave_get not supported\n");
346 return -EINVAL;
347 }
348
349 return bridge->slave_get(image, enabled, vme_base, size, buf_base,
350 aspace, cycle);
351}
352EXPORT_SYMBOL(vme_slave_get);
353
354void vme_slave_free(struct vme_resource *resource)
355{
356 struct vme_slave_resource *slave_image;
357
358 if (resource->type != VME_SLAVE) {
359 printk(KERN_ERR "Not a slave resource\n");
360 return;
361 }
362
363 slave_image = list_entry(resource->entry, struct vme_slave_resource,
364 list);
365 if (slave_image == NULL) {
366 printk(KERN_ERR "Can't find slave resource\n");
367 return;
368 }
369
370 /* Unlock image */
371 mutex_lock(&slave_image->mtx);
372 if (slave_image->locked == 0)
373 printk(KERN_ERR "Image is already free\n");
374
375 slave_image->locked = 0;
376 mutex_unlock(&slave_image->mtx);
377
378 /* Free up resource memory */
379 kfree(resource);
380}
381EXPORT_SYMBOL(vme_slave_free);
382
383/*
384 * Request a master image with specific attributes, return some unique
385 * identifier.
386 */
387struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
388 u32 cycle, u32 dwidth)
389{
390 struct vme_bridge *bridge;
391 struct list_head *master_pos = NULL;
392 struct vme_master_resource *allocated_image = NULL;
393 struct vme_master_resource *master_image = NULL;
394 struct vme_resource *resource = NULL;
395
396 bridge = vdev->bridge;
397 if (bridge == NULL) {
398 printk(KERN_ERR "Can't find VME bus\n");
399 goto err_bus;
400 }
401
402 /* Loop through master resources */
403 list_for_each(master_pos, &bridge->master_resources) {
404 master_image = list_entry(master_pos,
405 struct vme_master_resource, list);
406
407 if (master_image == NULL) {
408 printk(KERN_WARNING "Registered NULL master resource\n");
409 continue;
410 }
411
412 /* Find an unlocked and compatible image */
413 spin_lock(&master_image->lock);
414 if (((master_image->address_attr & address) == address) &&
415 ((master_image->cycle_attr & cycle) == cycle) &&
416 ((master_image->width_attr & dwidth) == dwidth) &&
417 (master_image->locked == 0)) {
418
419 master_image->locked = 1;
420 spin_unlock(&master_image->lock);
421 allocated_image = master_image;
422 break;
423 }
424 spin_unlock(&master_image->lock);
425 }
426
427 /* Check to see if we found a resource */
428 if (allocated_image == NULL) {
429 printk(KERN_ERR "Can't find a suitable resource\n");
430 goto err_image;
431 }
432
433 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
434 if (resource == NULL) {
435 printk(KERN_ERR "Unable to allocate resource structure\n");
436 goto err_alloc;
437 }
438 resource->type = VME_MASTER;
439 resource->entry = &allocated_image->list;
440
441 return resource;
442
443err_alloc:
444 /* Unlock image */
445 spin_lock(&master_image->lock);
446 master_image->locked = 0;
447 spin_unlock(&master_image->lock);
448err_image:
449err_bus:
450 return NULL;
451}
452EXPORT_SYMBOL(vme_master_request);
453
454int vme_master_set(struct vme_resource *resource, int enabled,
455 unsigned long long vme_base, unsigned long long size, u32 aspace,
456 u32 cycle, u32 dwidth)
457{
458 struct vme_bridge *bridge = find_bridge(resource);
459 struct vme_master_resource *image;
460 int retval;
461
462 if (resource->type != VME_MASTER) {
463 printk(KERN_ERR "Not a master resource\n");
464 return -EINVAL;
465 }
466
467 image = list_entry(resource->entry, struct vme_master_resource, list);
468
469 if (bridge->master_set == NULL) {
470 printk(KERN_WARNING "vme_master_set not supported\n");
471 return -EINVAL;
472 }
473
474 if (!(((image->address_attr & aspace) == aspace) &&
475 ((image->cycle_attr & cycle) == cycle) &&
476 ((image->width_attr & dwidth) == dwidth))) {
477 printk(KERN_WARNING "Invalid attributes\n");
478 return -EINVAL;
479 }
480
481 retval = vme_check_window(aspace, vme_base, size);
482 if (retval)
483 return retval;
484
485 return bridge->master_set(image, enabled, vme_base, size, aspace,
486 cycle, dwidth);
487}
488EXPORT_SYMBOL(vme_master_set);
489
490int vme_master_get(struct vme_resource *resource, int *enabled,
491 unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
492 u32 *cycle, u32 *dwidth)
493{
494 struct vme_bridge *bridge = find_bridge(resource);
495 struct vme_master_resource *image;
496
497 if (resource->type != VME_MASTER) {
498 printk(KERN_ERR "Not a master resource\n");
499 return -EINVAL;
500 }
501
502 image = list_entry(resource->entry, struct vme_master_resource, list);
503
504 if (bridge->master_get == NULL) {
505 printk(KERN_WARNING "vme_master_set not supported\n");
506 return -EINVAL;
507 }
508
509 return bridge->master_get(image, enabled, vme_base, size, aspace,
510 cycle, dwidth);
511}
512EXPORT_SYMBOL(vme_master_get);
513
514/*
515 * Read data out of VME space into a buffer.
516 */
517ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
518 loff_t offset)
519{
520 struct vme_bridge *bridge = find_bridge(resource);
521 struct vme_master_resource *image;
522 size_t length;
523
524 if (bridge->master_read == NULL) {
525 printk(KERN_WARNING "Reading from resource not supported\n");
526 return -EINVAL;
527 }
528
529 if (resource->type != VME_MASTER) {
530 printk(KERN_ERR "Not a master resource\n");
531 return -EINVAL;
532 }
533
534 image = list_entry(resource->entry, struct vme_master_resource, list);
535
536 length = vme_get_size(resource);
537
538 if (offset > length) {
539 printk(KERN_WARNING "Invalid Offset\n");
540 return -EFAULT;
541 }
542
543 if ((offset + count) > length)
544 count = length - offset;
545
546 return bridge->master_read(image, buf, count, offset);
547
548}
549EXPORT_SYMBOL(vme_master_read);
550
551/*
552 * Write data out to VME space from a buffer.
553 */
554ssize_t vme_master_write(struct vme_resource *resource, void *buf,
555 size_t count, loff_t offset)
556{
557 struct vme_bridge *bridge = find_bridge(resource);
558 struct vme_master_resource *image;
559 size_t length;
560
561 if (bridge->master_write == NULL) {
562 printk(KERN_WARNING "Writing to resource not supported\n");
563 return -EINVAL;
564 }
565
566 if (resource->type != VME_MASTER) {
567 printk(KERN_ERR "Not a master resource\n");
568 return -EINVAL;
569 }
570
571 image = list_entry(resource->entry, struct vme_master_resource, list);
572
573 length = vme_get_size(resource);
574
575 if (offset > length) {
576 printk(KERN_WARNING "Invalid Offset\n");
577 return -EFAULT;
578 }
579
580 if ((offset + count) > length)
581 count = length - offset;
582
583 return bridge->master_write(image, buf, count, offset);
584}
585EXPORT_SYMBOL(vme_master_write);
586
587/*
588 * Perform RMW cycle to provided location.
589 */
590unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
591 unsigned int compare, unsigned int swap, loff_t offset)
592{
593 struct vme_bridge *bridge = find_bridge(resource);
594 struct vme_master_resource *image;
595
596 if (bridge->master_rmw == NULL) {
597 printk(KERN_WARNING "Writing to resource not supported\n");
598 return -EINVAL;
599 }
600
601 if (resource->type != VME_MASTER) {
602 printk(KERN_ERR "Not a master resource\n");
603 return -EINVAL;
604 }
605
606 image = list_entry(resource->entry, struct vme_master_resource, list);
607
608 return bridge->master_rmw(image, mask, compare, swap, offset);
609}
610EXPORT_SYMBOL(vme_master_rmw);
611
612void vme_master_free(struct vme_resource *resource)
613{
614 struct vme_master_resource *master_image;
615
616 if (resource->type != VME_MASTER) {
617 printk(KERN_ERR "Not a master resource\n");
618 return;
619 }
620
621 master_image = list_entry(resource->entry, struct vme_master_resource,
622 list);
623 if (master_image == NULL) {
624 printk(KERN_ERR "Can't find master resource\n");
625 return;
626 }
627
628 /* Unlock image */
629 spin_lock(&master_image->lock);
630 if (master_image->locked == 0)
631 printk(KERN_ERR "Image is already free\n");
632
633 master_image->locked = 0;
634 spin_unlock(&master_image->lock);
635
636 /* Free up resource memory */
637 kfree(resource);
638}
639EXPORT_SYMBOL(vme_master_free);
640
641/*
642 * Request a DMA controller with specific attributes, return some unique
643 * identifier.
644 */
645struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
646{
647 struct vme_bridge *bridge;
648 struct list_head *dma_pos = NULL;
649 struct vme_dma_resource *allocated_ctrlr = NULL;
650 struct vme_dma_resource *dma_ctrlr = NULL;
651 struct vme_resource *resource = NULL;
652
653 /* XXX Not checking resource attributes */
654 printk(KERN_ERR "No VME resource Attribute tests done\n");
655
656 bridge = vdev->bridge;
657 if (bridge == NULL) {
658 printk(KERN_ERR "Can't find VME bus\n");
659 goto err_bus;
660 }
661
662 /* Loop through DMA resources */
663 list_for_each(dma_pos, &bridge->dma_resources) {
664 dma_ctrlr = list_entry(dma_pos,
665 struct vme_dma_resource, list);
666
667 if (dma_ctrlr == NULL) {
668 printk(KERN_ERR "Registered NULL DMA resource\n");
669 continue;
670 }
671
672 /* Find an unlocked and compatible controller */
673 mutex_lock(&dma_ctrlr->mtx);
674 if (((dma_ctrlr->route_attr & route) == route) &&
675 (dma_ctrlr->locked == 0)) {
676
677 dma_ctrlr->locked = 1;
678 mutex_unlock(&dma_ctrlr->mtx);
679 allocated_ctrlr = dma_ctrlr;
680 break;
681 }
682 mutex_unlock(&dma_ctrlr->mtx);
683 }
684
685 /* Check to see if we found a resource */
686 if (allocated_ctrlr == NULL)
687 goto err_ctrlr;
688
689 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
690 if (resource == NULL) {
691 printk(KERN_WARNING "Unable to allocate resource structure\n");
692 goto err_alloc;
693 }
694 resource->type = VME_DMA;
695 resource->entry = &allocated_ctrlr->list;
696
697 return resource;
698
699err_alloc:
700 /* Unlock image */
701 mutex_lock(&dma_ctrlr->mtx);
702 dma_ctrlr->locked = 0;
703 mutex_unlock(&dma_ctrlr->mtx);
704err_ctrlr:
705err_bus:
706 return NULL;
707}
708EXPORT_SYMBOL(vme_dma_request);
709
710/*
711 * Start new list
712 */
713struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
714{
715 struct vme_dma_resource *ctrlr;
716 struct vme_dma_list *dma_list;
717
718 if (resource->type != VME_DMA) {
719 printk(KERN_ERR "Not a DMA resource\n");
720 return NULL;
721 }
722
723 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
724
725 dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
726 if (dma_list == NULL) {
727 printk(KERN_ERR "Unable to allocate memory for new dma list\n");
728 return NULL;
729 }
730 INIT_LIST_HEAD(&dma_list->entries);
731 dma_list->parent = ctrlr;
732 mutex_init(&dma_list->mtx);
733
734 return dma_list;
735}
736EXPORT_SYMBOL(vme_new_dma_list);
737
738/*
739 * Create "Pattern" type attributes
740 */
741struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
742{
743 struct vme_dma_attr *attributes;
744 struct vme_dma_pattern *pattern_attr;
745
746 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
747 if (attributes == NULL) {
748 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
749 goto err_attr;
750 }
751
752 pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
753 if (pattern_attr == NULL) {
754 printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
755 goto err_pat;
756 }
757
758 attributes->type = VME_DMA_PATTERN;
759 attributes->private = (void *)pattern_attr;
760
761 pattern_attr->pattern = pattern;
762 pattern_attr->type = type;
763
764 return attributes;
765
766err_pat:
767 kfree(attributes);
768err_attr:
769 return NULL;
770}
771EXPORT_SYMBOL(vme_dma_pattern_attribute);
772
773/*
774 * Create "PCI" type attributes
775 */
776struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
777{
778 struct vme_dma_attr *attributes;
779 struct vme_dma_pci *pci_attr;
780
781 /* XXX Run some sanity checks here */
782
783 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
784 if (attributes == NULL) {
785 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
786 goto err_attr;
787 }
788
789 pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
790 if (pci_attr == NULL) {
791 printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
792 goto err_pci;
793 }
794
795
796
797 attributes->type = VME_DMA_PCI;
798 attributes->private = (void *)pci_attr;
799
800 pci_attr->address = address;
801
802 return attributes;
803
804err_pci:
805 kfree(attributes);
806err_attr:
807 return NULL;
808}
809EXPORT_SYMBOL(vme_dma_pci_attribute);
810
811/*
812 * Create "VME" type attributes
813 */
814struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
815 u32 aspace, u32 cycle, u32 dwidth)
816{
817 struct vme_dma_attr *attributes;
818 struct vme_dma_vme *vme_attr;
819
820 attributes = kmalloc(
821 sizeof(struct vme_dma_attr), GFP_KERNEL);
822 if (attributes == NULL) {
823 printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
824 goto err_attr;
825 }
826
827 vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
828 if (vme_attr == NULL) {
829 printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
830 goto err_vme;
831 }
832
833 attributes->type = VME_DMA_VME;
834 attributes->private = (void *)vme_attr;
835
836 vme_attr->address = address;
837 vme_attr->aspace = aspace;
838 vme_attr->cycle = cycle;
839 vme_attr->dwidth = dwidth;
840
841 return attributes;
842
843err_vme:
844 kfree(attributes);
845err_attr:
846 return NULL;
847}
848EXPORT_SYMBOL(vme_dma_vme_attribute);
849
850/*
851 * Free attribute
852 */
853void vme_dma_free_attribute(struct vme_dma_attr *attributes)
854{
855 kfree(attributes->private);
856 kfree(attributes);
857}
858EXPORT_SYMBOL(vme_dma_free_attribute);
859
860int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
861 struct vme_dma_attr *dest, size_t count)
862{
863 struct vme_bridge *bridge = list->parent->parent;
864 int retval;
865
866 if (bridge->dma_list_add == NULL) {
867 printk(KERN_WARNING "Link List DMA generation not supported\n");
868 return -EINVAL;
869 }
870
871 if (!mutex_trylock(&list->mtx)) {
872 printk(KERN_ERR "Link List already submitted\n");
873 return -EINVAL;
874 }
875
876 retval = bridge->dma_list_add(list, src, dest, count);
877
878 mutex_unlock(&list->mtx);
879
880 return retval;
881}
882EXPORT_SYMBOL(vme_dma_list_add);
883
884int vme_dma_list_exec(struct vme_dma_list *list)
885{
886 struct vme_bridge *bridge = list->parent->parent;
887 int retval;
888
889 if (bridge->dma_list_exec == NULL) {
890 printk(KERN_ERR "Link List DMA execution not supported\n");
891 return -EINVAL;
892 }
893
894 mutex_lock(&list->mtx);
895
896 retval = bridge->dma_list_exec(list);
897
898 mutex_unlock(&list->mtx);
899
900 return retval;
901}
902EXPORT_SYMBOL(vme_dma_list_exec);
903
904int vme_dma_list_free(struct vme_dma_list *list)
905{
906 struct vme_bridge *bridge = list->parent->parent;
907 int retval;
908
909 if (bridge->dma_list_empty == NULL) {
910 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
911 return -EINVAL;
912 }
913
914 if (!mutex_trylock(&list->mtx)) {
915 printk(KERN_ERR "Link List in use\n");
916 return -EINVAL;
917 }
918
919 /*
920 * Empty out all of the entries from the dma list. We need to go to the
921 * low level driver as dma entries are driver specific.
922 */
923 retval = bridge->dma_list_empty(list);
924 if (retval) {
925 printk(KERN_ERR "Unable to empty link-list entries\n");
926 mutex_unlock(&list->mtx);
927 return retval;
928 }
929 mutex_unlock(&list->mtx);
930 kfree(list);
931
932 return retval;
933}
934EXPORT_SYMBOL(vme_dma_list_free);
935
936int vme_dma_free(struct vme_resource *resource)
937{
938 struct vme_dma_resource *ctrlr;
939
940 if (resource->type != VME_DMA) {
941 printk(KERN_ERR "Not a DMA resource\n");
942 return -EINVAL;
943 }
944
945 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
946
947 if (!mutex_trylock(&ctrlr->mtx)) {
948 printk(KERN_ERR "Resource busy, can't free\n");
949 return -EBUSY;
950 }
951
952 if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
953 printk(KERN_WARNING "Resource still processing transfers\n");
954 mutex_unlock(&ctrlr->mtx);
955 return -EBUSY;
956 }
957
958 ctrlr->locked = 0;
959
960 mutex_unlock(&ctrlr->mtx);
961
962 return 0;
963}
964EXPORT_SYMBOL(vme_dma_free);
965
966void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
967{
968 void (*call)(int, int, void *);
969 void *priv_data;
970
971 call = bridge->irq[level - 1].callback[statid].func;
972 priv_data = bridge->irq[level - 1].callback[statid].priv_data;
973
974 if (call != NULL)
975 call(level, statid, priv_data);
976 else
977 printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
978 level, statid);
979}
980EXPORT_SYMBOL(vme_irq_handler);
981
982int vme_irq_request(struct vme_dev *vdev, int level, int statid,
983 void (*callback)(int, int, void *),
984 void *priv_data)
985{
986 struct vme_bridge *bridge;
987
988 bridge = vdev->bridge;
989 if (bridge == NULL) {
990 printk(KERN_ERR "Can't find VME bus\n");
991 return -EINVAL;
992 }
993
994 if ((level < 1) || (level > 7)) {
995 printk(KERN_ERR "Invalid interrupt level\n");
996 return -EINVAL;
997 }
998
999 if (bridge->irq_set == NULL) {
1000 printk(KERN_ERR "Configuring interrupts not supported\n");
1001 return -EINVAL;
1002 }
1003
1004 mutex_lock(&bridge->irq_mtx);
1005
1006 if (bridge->irq[level - 1].callback[statid].func) {
1007 mutex_unlock(&bridge->irq_mtx);
1008 printk(KERN_WARNING "VME Interrupt already taken\n");
1009 return -EBUSY;
1010 }
1011
1012 bridge->irq[level - 1].count++;
1013 bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1014 bridge->irq[level - 1].callback[statid].func = callback;
1015
1016 /* Enable IRQ level */
1017 bridge->irq_set(bridge, level, 1, 1);
1018
1019 mutex_unlock(&bridge->irq_mtx);
1020
1021 return 0;
1022}
1023EXPORT_SYMBOL(vme_irq_request);
1024
1025void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1026{
1027 struct vme_bridge *bridge;
1028
1029 bridge = vdev->bridge;
1030 if (bridge == NULL) {
1031 printk(KERN_ERR "Can't find VME bus\n");
1032 return;
1033 }
1034
1035 if ((level < 1) || (level > 7)) {
1036 printk(KERN_ERR "Invalid interrupt level\n");
1037 return;
1038 }
1039
1040 if (bridge->irq_set == NULL) {
1041 printk(KERN_ERR "Configuring interrupts not supported\n");
1042 return;
1043 }
1044
1045 mutex_lock(&bridge->irq_mtx);
1046
1047 bridge->irq[level - 1].count--;
1048
1049 /* Disable IRQ level if no more interrupts attached at this level*/
1050 if (bridge->irq[level - 1].count == 0)
1051 bridge->irq_set(bridge, level, 0, 1);
1052
1053 bridge->irq[level - 1].callback[statid].func = NULL;
1054 bridge->irq[level - 1].callback[statid].priv_data = NULL;
1055
1056 mutex_unlock(&bridge->irq_mtx);
1057}
1058EXPORT_SYMBOL(vme_irq_free);
1059
1060int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1061{
1062 struct vme_bridge *bridge;
1063
1064 bridge = vdev->bridge;
1065 if (bridge == NULL) {
1066 printk(KERN_ERR "Can't find VME bus\n");
1067 return -EINVAL;
1068 }
1069
1070 if ((level < 1) || (level > 7)) {
1071 printk(KERN_WARNING "Invalid interrupt level\n");
1072 return -EINVAL;
1073 }
1074
1075 if (bridge->irq_generate == NULL) {
1076 printk(KERN_WARNING "Interrupt generation not supported\n");
1077 return -EINVAL;
1078 }
1079
1080 return bridge->irq_generate(bridge, level, statid);
1081}
1082EXPORT_SYMBOL(vme_irq_generate);
1083
1084/*
1085 * Request the location monitor, return resource or NULL
1086 */
1087struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1088{
1089 struct vme_bridge *bridge;
1090 struct list_head *lm_pos = NULL;
1091 struct vme_lm_resource *allocated_lm = NULL;
1092 struct vme_lm_resource *lm = NULL;
1093 struct vme_resource *resource = NULL;
1094
1095 bridge = vdev->bridge;
1096 if (bridge == NULL) {
1097 printk(KERN_ERR "Can't find VME bus\n");
1098 goto err_bus;
1099 }
1100
1101 /* Loop through DMA resources */
1102 list_for_each(lm_pos, &bridge->lm_resources) {
1103 lm = list_entry(lm_pos,
1104 struct vme_lm_resource, list);
1105
1106 if (lm == NULL) {
1107 printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1108 continue;
1109 }
1110
1111 /* Find an unlocked controller */
1112 mutex_lock(&lm->mtx);
1113 if (lm->locked == 0) {
1114 lm->locked = 1;
1115 mutex_unlock(&lm->mtx);
1116 allocated_lm = lm;
1117 break;
1118 }
1119 mutex_unlock(&lm->mtx);
1120 }
1121
1122 /* Check to see if we found a resource */
1123 if (allocated_lm == NULL)
1124 goto err_lm;
1125
1126 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1127 if (resource == NULL) {
1128 printk(KERN_ERR "Unable to allocate resource structure\n");
1129 goto err_alloc;
1130 }
1131 resource->type = VME_LM;
1132 resource->entry = &allocated_lm->list;
1133
1134 return resource;
1135
1136err_alloc:
1137 /* Unlock image */
1138 mutex_lock(&lm->mtx);
1139 lm->locked = 0;
1140 mutex_unlock(&lm->mtx);
1141err_lm:
1142err_bus:
1143 return NULL;
1144}
1145EXPORT_SYMBOL(vme_lm_request);
1146
1147int vme_lm_count(struct vme_resource *resource)
1148{
1149 struct vme_lm_resource *lm;
1150
1151 if (resource->type != VME_LM) {
1152 printk(KERN_ERR "Not a Location Monitor resource\n");
1153 return -EINVAL;
1154 }
1155
1156 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1157
1158 return lm->monitors;
1159}
1160EXPORT_SYMBOL(vme_lm_count);
1161
1162int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1163 u32 aspace, u32 cycle)
1164{
1165 struct vme_bridge *bridge = find_bridge(resource);
1166 struct vme_lm_resource *lm;
1167
1168 if (resource->type != VME_LM) {
1169 printk(KERN_ERR "Not a Location Monitor resource\n");
1170 return -EINVAL;
1171 }
1172
1173 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1174
1175 if (bridge->lm_set == NULL) {
1176 printk(KERN_ERR "vme_lm_set not supported\n");
1177 return -EINVAL;
1178 }
1179
1180 return bridge->lm_set(lm, lm_base, aspace, cycle);
1181}
1182EXPORT_SYMBOL(vme_lm_set);
1183
1184int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1185 u32 *aspace, u32 *cycle)
1186{
1187 struct vme_bridge *bridge = find_bridge(resource);
1188 struct vme_lm_resource *lm;
1189
1190 if (resource->type != VME_LM) {
1191 printk(KERN_ERR "Not a Location Monitor resource\n");
1192 return -EINVAL;
1193 }
1194
1195 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1196
1197 if (bridge->lm_get == NULL) {
1198 printk(KERN_ERR "vme_lm_get not supported\n");
1199 return -EINVAL;
1200 }
1201
1202 return bridge->lm_get(lm, lm_base, aspace, cycle);
1203}
1204EXPORT_SYMBOL(vme_lm_get);
1205
1206int vme_lm_attach(struct vme_resource *resource, int monitor,
1207 void (*callback)(int))
1208{
1209 struct vme_bridge *bridge = find_bridge(resource);
1210 struct vme_lm_resource *lm;
1211
1212 if (resource->type != VME_LM) {
1213 printk(KERN_ERR "Not a Location Monitor resource\n");
1214 return -EINVAL;
1215 }
1216
1217 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1218
1219 if (bridge->lm_attach == NULL) {
1220 printk(KERN_ERR "vme_lm_attach not supported\n");
1221 return -EINVAL;
1222 }
1223
1224 return bridge->lm_attach(lm, monitor, callback);
1225}
1226EXPORT_SYMBOL(vme_lm_attach);
1227
1228int vme_lm_detach(struct vme_resource *resource, int monitor)
1229{
1230 struct vme_bridge *bridge = find_bridge(resource);
1231 struct vme_lm_resource *lm;
1232
1233 if (resource->type != VME_LM) {
1234 printk(KERN_ERR "Not a Location Monitor resource\n");
1235 return -EINVAL;
1236 }
1237
1238 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1239
1240 if (bridge->lm_detach == NULL) {
1241 printk(KERN_ERR "vme_lm_detach not supported\n");
1242 return -EINVAL;
1243 }
1244
1245 return bridge->lm_detach(lm, monitor);
1246}
1247EXPORT_SYMBOL(vme_lm_detach);
1248
1249void vme_lm_free(struct vme_resource *resource)
1250{
1251 struct vme_lm_resource *lm;
1252
1253 if (resource->type != VME_LM) {
1254 printk(KERN_ERR "Not a Location Monitor resource\n");
1255 return;
1256 }
1257
1258 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1259
1260 mutex_lock(&lm->mtx);
1261
1262 /* XXX
1263 * Check to see that there aren't any callbacks still attached, if
1264 * there are we should probably be detaching them!
1265 */
1266
1267 lm->locked = 0;
1268
1269 mutex_unlock(&lm->mtx);
1270
1271 kfree(resource);
1272}
1273EXPORT_SYMBOL(vme_lm_free);
1274
1275int vme_slot_get(struct vme_dev *vdev)
1276{
1277 struct vme_bridge *bridge;
1278
1279 bridge = vdev->bridge;
1280 if (bridge == NULL) {
1281 printk(KERN_ERR "Can't find VME bus\n");
1282 return -EINVAL;
1283 }
1284
1285 if (bridge->slot_get == NULL) {
1286 printk(KERN_WARNING "vme_slot_get not supported\n");
1287 return -EINVAL;
1288 }
1289
1290 return bridge->slot_get(bridge);
1291}
1292EXPORT_SYMBOL(vme_slot_get);
1293
1294
1295/* - Bridge Registration --------------------------------------------------- */
1296
1297static void vme_dev_release(struct device *dev)
1298{
1299 kfree(dev_to_vme_dev(dev));
1300}
1301
1302int vme_register_bridge(struct vme_bridge *bridge)
1303{
1304 int i;
1305 int ret = -1;
1306
1307 mutex_lock(&vme_buses_lock);
1308 for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1309 if ((vme_bus_numbers & (1 << i)) == 0) {
1310 vme_bus_numbers |= (1 << i);
1311 bridge->num = i;
1312 INIT_LIST_HEAD(&bridge->devices);
1313 list_add_tail(&bridge->bus_list, &vme_bus_list);
1314 ret = 0;
1315 break;
1316 }
1317 }
1318 mutex_unlock(&vme_buses_lock);
1319
1320 return ret;
1321}
1322EXPORT_SYMBOL(vme_register_bridge);
1323
1324void vme_unregister_bridge(struct vme_bridge *bridge)
1325{
1326 struct vme_dev *vdev;
1327 struct vme_dev *tmp;
1328
1329 mutex_lock(&vme_buses_lock);
1330 vme_bus_numbers &= ~(1 << bridge->num);
1331 list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
1332 list_del(&vdev->drv_list);
1333 list_del(&vdev->bridge_list);
1334 device_unregister(&vdev->dev);
1335 }
1336 list_del(&bridge->bus_list);
1337 mutex_unlock(&vme_buses_lock);
1338}
1339EXPORT_SYMBOL(vme_unregister_bridge);
1340
1341/* - Driver Registration --------------------------------------------------- */
1342
1343static int __vme_register_driver_bus(struct vme_driver *drv,
1344 struct vme_bridge *bridge, unsigned int ndevs)
1345{
1346 int err;
1347 unsigned int i;
1348 struct vme_dev *vdev;
1349 struct vme_dev *tmp;
1350
1351 for (i = 0; i < ndevs; i++) {
1352 vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
1353 if (!vdev) {
1354 err = -ENOMEM;
1355 goto err_devalloc;
1356 }
1357 vdev->num = i;
1358 vdev->bridge = bridge;
1359 vdev->dev.platform_data = drv;
1360 vdev->dev.release = vme_dev_release;
1361 vdev->dev.parent = bridge->parent;
1362 vdev->dev.bus = &vme_bus_type;
1363 dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
1364 vdev->num);
1365
1366 err = device_register(&vdev->dev);
1367 if (err)
1368 goto err_reg;
1369
1370 if (vdev->dev.platform_data) {
1371 list_add_tail(&vdev->drv_list, &drv->devices);
1372 list_add_tail(&vdev->bridge_list, &bridge->devices);
1373 } else
1374 device_unregister(&vdev->dev);
1375 }
1376 return 0;
1377
1378err_reg:
1379 kfree(vdev);
1380err_devalloc:
1381 list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1382 list_del(&vdev->drv_list);
1383 list_del(&vdev->bridge_list);
1384 device_unregister(&vdev->dev);
1385 }
1386 return err;
1387}
1388
1389static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1390{
1391 struct vme_bridge *bridge;
1392 int err = 0;
1393
1394 mutex_lock(&vme_buses_lock);
1395 list_for_each_entry(bridge, &vme_bus_list, bus_list) {
1396 /*
1397 * This cannot cause trouble as we already have vme_buses_lock
1398 * and if the bridge is removed, it will have to go through
1399 * vme_unregister_bridge() to do it (which calls remove() on
1400 * the bridge which in turn tries to acquire vme_buses_lock and
1401 * will have to wait).
1402 */
1403 err = __vme_register_driver_bus(drv, bridge, ndevs);
1404 if (err)
1405 break;
1406 }
1407 mutex_unlock(&vme_buses_lock);
1408 return err;
1409}
1410
1411int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1412{
1413 int err;
1414
1415 drv->driver.name = drv->name;
1416 drv->driver.bus = &vme_bus_type;
1417 INIT_LIST_HEAD(&drv->devices);
1418
1419 err = driver_register(&drv->driver);
1420 if (err)
1421 return err;
1422
1423 err = __vme_register_driver(drv, ndevs);
1424 if (err)
1425 driver_unregister(&drv->driver);
1426
1427 return err;
1428}
1429EXPORT_SYMBOL(vme_register_driver);
1430
1431void vme_unregister_driver(struct vme_driver *drv)
1432{
1433 struct vme_dev *dev, *dev_tmp;
1434
1435 mutex_lock(&vme_buses_lock);
1436 list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
1437 list_del(&dev->drv_list);
1438 list_del(&dev->bridge_list);
1439 device_unregister(&dev->dev);
1440 }
1441 mutex_unlock(&vme_buses_lock);
1442
1443 driver_unregister(&drv->driver);
1444}
1445EXPORT_SYMBOL(vme_unregister_driver);
1446
1447/* - Bus Registration ------------------------------------------------------ */
1448
1449static int vme_bus_match(struct device *dev, struct device_driver *drv)
1450{
1451 struct vme_driver *vme_drv;
1452
1453 vme_drv = container_of(drv, struct vme_driver, driver);
1454
1455 if (dev->platform_data == vme_drv) {
1456 struct vme_dev *vdev = dev_to_vme_dev(dev);
1457
1458 if (vme_drv->match && vme_drv->match(vdev))
1459 return 1;
1460
1461 dev->platform_data = NULL;
1462 }
1463 return 0;
1464}
1465
1466static int vme_bus_probe(struct device *dev)
1467{
1468 int retval = -ENODEV;
1469 struct vme_driver *driver;
1470 struct vme_dev *vdev = dev_to_vme_dev(dev);
1471
1472 driver = dev->platform_data;
1473
1474 if (driver->probe != NULL)
1475 retval = driver->probe(vdev);
1476
1477 return retval;
1478}
1479
1480static int vme_bus_remove(struct device *dev)
1481{
1482 int retval = -ENODEV;
1483 struct vme_driver *driver;
1484 struct vme_dev *vdev = dev_to_vme_dev(dev);
1485
1486 driver = dev->platform_data;
1487
1488 if (driver->remove != NULL)
1489 retval = driver->remove(vdev);
1490
1491 return retval;
1492}
1493
1494struct bus_type vme_bus_type = {
1495 .name = "vme",
1496 .match = vme_bus_match,
1497 .probe = vme_bus_probe,
1498 .remove = vme_bus_remove,
1499};
1500EXPORT_SYMBOL(vme_bus_type);
1501
1502static int __init vme_init(void)
1503{
1504 return bus_register(&vme_bus_type);
1505}
1506
1507static void __exit vme_exit(void)
1508{
1509 bus_unregister(&vme_bus_type);
1510}
1511
1512MODULE_DESCRIPTION("VME bridge driver framework");
1513MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1514MODULE_LICENSE("GPL");
1515
1516module_init(vme_init);
1517module_exit(vme_exit);
diff --git a/drivers/vme/vme_api.txt b/drivers/vme/vme_api.txt
new file mode 100644
index 00000000000..856efa35f6e
--- /dev/null
+++ b/drivers/vme/vme_api.txt
@@ -0,0 +1,396 @@
1 VME Device Driver API
2 =====================
3
4Driver registration
5===================
6
7As with other subsystems within the Linux kernel, VME device drivers register
8with the VME subsystem, typically called from the devices init routine. This is
9achieved via a call to the following function:
10
11 int vme_register_driver (struct vme_driver *driver);
12
13If driver registration is successful this function returns zero, if an error
14occurred a negative error code will be returned.
15
16A pointer to a structure of type 'vme_driver' must be provided to the
17registration function. The structure is as follows:
18
19 struct vme_driver {
20 struct list_head node;
21 const char *name;
22 int (*match)(struct vme_dev *);
23 int (*probe)(struct vme_dev *);
24 int (*remove)(struct vme_dev *);
25 void (*shutdown)(void);
26 struct device_driver driver;
27 struct list_head devices;
28 unsigned int ndev;
29 };
30
31At the minimum, the '.name', '.match' and '.probe' elements of this structure
32should be correctly set. The '.name' element is a pointer to a string holding
33the device driver's name.
34
35The '.match' function allows controlling the number of devices that need to
36be registered. The match function should return 1 if a device should be
37probed and 0 otherwise. This example match function (from vme_user.c) limits
38the number of devices probed to one:
39
40 #define USER_BUS_MAX 1
41 ...
42 static int vme_user_match(struct vme_dev *vdev)
43 {
44 if (vdev->id.num >= USER_BUS_MAX)
45 return 0;
46 return 1;
47 }
48
49The '.probe' element should contain a pointer to the probe routine. The
50probe routine is passed a 'struct vme_dev' pointer as an argument. The
51'struct vme_dev' structure looks like the following:
52
53 struct vme_dev {
54 int num;
55 struct vme_bridge *bridge;
56 struct device dev;
57 struct list_head drv_list;
58 struct list_head bridge_list;
59 };
60
61Here, the 'num' field refers to the sequential device ID for this specific
62driver. The bridge number (or bus number) can be accessed using
63dev->bridge->num.
64
65A function is also provided to unregister the driver from the VME core and is
66usually called from the device driver's exit routine:
67
68 void vme_unregister_driver (struct vme_driver *driver);
69
70
71Resource management
72===================
73
74Once a driver has registered with the VME core the provided match routine will
75be called the number of times specified during the registration. If a match
76succeeds, a non-zero value should be returned. A zero return value indicates
77failure. For all successful matches, the probe routine of the corresponding
78driver is called. The probe routine is passed a pointer to the devices
79device structure. This pointer should be saved, it will be required for
80requesting VME resources.
81
82The driver can request ownership of one or more master windows, slave windows
83and/or dma channels. Rather than allowing the device driver to request a
84specific window or DMA channel (which may be used by a different driver) this
85driver allows a resource to be assigned based on the required attributes of the
86driver in question:
87
88 struct vme_resource * vme_master_request(struct vme_dev *dev,
89 u32 aspace, u32 cycle, u32 width);
90
91 struct vme_resource * vme_slave_request(struct vme_dev *dev, u32 aspace,
92 u32 cycle);
93
94 struct vme_resource *vme_dma_request(struct vme_dev *dev, u32 route);
95
96For slave windows these attributes are split into the VME address spaces that
97need to be accessed in 'aspace' and VME bus cycle types required in 'cycle'.
98Master windows add a further set of attributes in 'width' specifying the
99required data transfer widths. These attributes are defined as bitmasks and as
100such any combination of the attributes can be requested for a single window,
101the core will assign a window that meets the requirements, returning a pointer
102of type vme_resource that should be used to identify the allocated resource
103when it is used. For DMA controllers, the request function requires the
104potential direction of any transfers to be provided in the route attributes.
105This is typically VME-to-MEM and/or MEM-to-VME, though some hardware can
106support VME-to-VME and MEM-to-MEM transfers as well as test pattern generation.
107If an unallocated window fitting the requirements can not be found a NULL
108pointer will be returned.
109
110Functions are also provided to free window allocations once they are no longer
111required. These functions should be passed the pointer to the resource provided
112during resource allocation:
113
114 void vme_master_free(struct vme_resource *res);
115
116 void vme_slave_free(struct vme_resource *res);
117
118 void vme_dma_free(struct vme_resource *res);
119
120
121Master windows
122==============
123
124Master windows provide access from the local processor[s] out onto the VME bus.
125The number of windows available and the available access modes is dependent on
126the underlying chipset. A window must be configured before it can be used.
127
128
129Master window configuration
130---------------------------
131
132Once a master window has been assigned the following functions can be used to
133configure it and retrieve the current settings:
134
135 int vme_master_set (struct vme_resource *res, int enabled,
136 unsigned long long base, unsigned long long size, u32 aspace,
137 u32 cycle, u32 width);
138
139 int vme_master_get (struct vme_resource *res, int *enabled,
140 unsigned long long *base, unsigned long long *size, u32 *aspace,
141 u32 *cycle, u32 *width);
142
143The address spaces, transfer widths and cycle types are the same as described
144under resource management, however some of the options are mutually exclusive.
145For example, only one address space may be specified.
146
147These functions return 0 on success or an error code should the call fail.
148
149
150Master window access
151--------------------
152
153The following functions can be used to read from and write to configured master
154windows. These functions return the number of bytes copied:
155
156 ssize_t vme_master_read(struct vme_resource *res, void *buf,
157 size_t count, loff_t offset);
158
159 ssize_t vme_master_write(struct vme_resource *res, void *buf,
160 size_t count, loff_t offset);
161
162In addition to simple reads and writes, a function is provided to do a
163read-modify-write transaction. This function returns the original value of the
164VME bus location :
165
166 unsigned int vme_master_rmw (struct vme_resource *res,
167 unsigned int mask, unsigned int compare, unsigned int swap,
168 loff_t offset);
169
170This functions by reading the offset, applying the mask. If the bits selected in
171the mask match with the values of the corresponding bits in the compare field,
172the value of swap is written the specified offset.
173
174
175Slave windows
176=============
177
178Slave windows provide devices on the VME bus access into mapped portions of the
179local memory. The number of windows available and the access modes that can be
180used is dependent on the underlying chipset. A window must be configured before
181it can be used.
182
183
184Slave window configuration
185--------------------------
186
187Once a slave window has been assigned the following functions can be used to
188configure it and retrieve the current settings:
189
190 int vme_slave_set (struct vme_resource *res, int enabled,
191 unsigned long long base, unsigned long long size,
192 dma_addr_t mem, u32 aspace, u32 cycle);
193
194 int vme_slave_get (struct vme_resource *res, int *enabled,
195 unsigned long long *base, unsigned long long *size,
196 dma_addr_t *mem, u32 *aspace, u32 *cycle);
197
198The address spaces, transfer widths and cycle types are the same as described
199under resource management, however some of the options are mutually exclusive.
200For example, only one address space may be specified.
201
202These functions return 0 on success or an error code should the call fail.
203
204
205Slave window buffer allocation
206------------------------------
207
208Functions are provided to allow the user to allocate and free a contiguous
209buffers which will be accessible by the VME bridge. These functions do not have
210to be used, other methods can be used to allocate a buffer, though care must be
211taken to ensure that they are contiguous and accessible by the VME bridge:
212
213 void * vme_alloc_consistent(struct vme_resource *res, size_t size,
214 dma_addr_t *mem);
215
216 void vme_free_consistent(struct vme_resource *res, size_t size,
217 void *virt, dma_addr_t mem);
218
219
220Slave window access
221-------------------
222
223Slave windows map local memory onto the VME bus, the standard methods for
224accessing memory should be used.
225
226
227DMA channels
228============
229
230The VME DMA transfer provides the ability to run link-list DMA transfers. The
231API introduces the concept of DMA lists. Each DMA list is a link-list which can
232be passed to a DMA controller. Multiple lists can be created, extended,
233executed, reused and destroyed.
234
235
236List Management
237---------------
238
239The following functions are provided to create and destroy DMA lists. Execution
240of a list will not automatically destroy the list, thus enabling a list to be
241reused for repetitive tasks:
242
243 struct vme_dma_list *vme_new_dma_list(struct vme_resource *res);
244
245 int vme_dma_list_free(struct vme_dma_list *list);
246
247
248List Population
249---------------
250
251An item can be added to a list using the following function ( the source and
252destination attributes need to be created before calling this function, this is
253covered under "Transfer Attributes"):
254
255 int vme_dma_list_add(struct vme_dma_list *list,
256 struct vme_dma_attr *src, struct vme_dma_attr *dest,
257 size_t count);
258
259NOTE: The detailed attributes of the transfers source and destination
260 are not checked until an entry is added to a DMA list, the request
261 for a DMA channel purely checks the directions in which the
262 controller is expected to transfer data. As a result it is
263 possible for this call to return an error, for example if the
264 source or destination is in an unsupported VME address space.
265
266Transfer Attributes
267-------------------
268
269The attributes for the source and destination are handled separately from adding
270an item to a list. This is due to the diverse attributes required for each type
271of source and destination. There are functions to create attributes for PCI, VME
272and pattern sources and destinations (where appropriate):
273
274Pattern source:
275
276 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type);
277
278PCI source or destination:
279
280 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t mem);
281
282VME source or destination:
283
284 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long base,
285 u32 aspace, u32 cycle, u32 width);
286
287The following function should be used to free an attribute:
288
289 void vme_dma_free_attribute(struct vme_dma_attr *attr);
290
291
292List Execution
293--------------
294
295The following function queues a list for execution. The function will return
296once the list has been executed:
297
298 int vme_dma_list_exec(struct vme_dma_list *list);
299
300
301Interrupts
302==========
303
304The VME API provides functions to attach and detach callbacks to specific VME
305level and status ID combinations and for the generation of VME interrupts with
306specific VME level and status IDs.
307
308
309Attaching Interrupt Handlers
310----------------------------
311
312The following functions can be used to attach and free a specific VME level and
313status ID combination. Any given combination can only be assigned a single
314callback function. A void pointer parameter is provided, the value of which is
315passed to the callback function, the use of this pointer is user undefined:
316
317 int vme_irq_request(struct vme_dev *dev, int level, int statid,
318 void (*callback)(int, int, void *), void *priv);
319
320 void vme_irq_free(struct vme_dev *dev, int level, int statid);
321
322The callback parameters are as follows. Care must be taken in writing a callback
323function, callback functions run in interrupt context:
324
325 void callback(int level, int statid, void *priv);
326
327
328Interrupt Generation
329--------------------
330
331The following function can be used to generate a VME interrupt at a given VME
332level and VME status ID:
333
334 int vme_irq_generate(struct vme_dev *dev, int level, int statid);
335
336
337Location monitors
338=================
339
340The VME API provides the following functionality to configure the location
341monitor.
342
343
344Location Monitor Management
345---------------------------
346
347The following functions are provided to request the use of a block of location
348monitors and to free them after they are no longer required:
349
350 struct vme_resource * vme_lm_request(struct vme_dev *dev);
351
352 void vme_lm_free(struct vme_resource * res);
353
354Each block may provide a number of location monitors, monitoring adjacent
355locations. The following function can be used to determine how many locations
356are provided:
357
358 int vme_lm_count(struct vme_resource * res);
359
360
361Location Monitor Configuration
362------------------------------
363
364Once a bank of location monitors has been allocated, the following functions
365are provided to configure the location and mode of the location monitor:
366
367 int vme_lm_set(struct vme_resource *res, unsigned long long base,
368 u32 aspace, u32 cycle);
369
370 int vme_lm_get(struct vme_resource *res, unsigned long long *base,
371 u32 *aspace, u32 *cycle);
372
373
374Location Monitor Use
375--------------------
376
377The following functions allow a callback to be attached and detached from each
378location monitor location. Each location monitor can monitor a number of
379adjacent locations:
380
381 int vme_lm_attach(struct vme_resource *res, int num,
382 void (*callback)(int));
383
384 int vme_lm_detach(struct vme_resource *res, int num);
385
386The callback function is declared as follows.
387
388 void callback(int num);
389
390
391Slot Detection
392==============
393
394This function returns the slot ID of the provided bridge.
395
396 int vme_slot_get(struct vme_dev *dev);
diff --git a/drivers/vme/vme_bridge.h b/drivers/vme/vme_bridge.h
new file mode 100644
index 00000000000..934949abd74
--- /dev/null
+++ b/drivers/vme/vme_bridge.h
@@ -0,0 +1,174 @@
1#ifndef _VME_BRIDGE_H_
2#define _VME_BRIDGE_H_
3
4#define VME_CRCSR_BUF_SIZE (508*1024)
5/*
6 * Resource structures
7 */
8struct vme_master_resource {
9 struct list_head list;
10 struct vme_bridge *parent;
11 /*
12 * We are likely to need to access the VME bus in interrupt context, so
13 * protect master routines with a spinlock rather than a mutex.
14 */
15 spinlock_t lock;
16 int locked;
17 int number;
18 u32 address_attr;
19 u32 cycle_attr;
20 u32 width_attr;
21 struct resource bus_resource;
22 void __iomem *kern_base;
23};
24
25struct vme_slave_resource {
26 struct list_head list;
27 struct vme_bridge *parent;
28 struct mutex mtx;
29 int locked;
30 int number;
31 u32 address_attr;
32 u32 cycle_attr;
33};
34
35struct vme_dma_pattern {
36 u32 pattern;
37 u32 type;
38};
39
40struct vme_dma_pci {
41 dma_addr_t address;
42};
43
44struct vme_dma_vme {
45 unsigned long long address;
46 u32 aspace;
47 u32 cycle;
48 u32 dwidth;
49};
50
51struct vme_dma_list {
52 struct list_head list;
53 struct vme_dma_resource *parent;
54 struct list_head entries;
55 struct mutex mtx;
56};
57
58struct vme_dma_resource {
59 struct list_head list;
60 struct vme_bridge *parent;
61 struct mutex mtx;
62 int locked;
63 int number;
64 struct list_head pending;
65 struct list_head running;
66 u32 route_attr;
67};
68
69struct vme_lm_resource {
70 struct list_head list;
71 struct vme_bridge *parent;
72 struct mutex mtx;
73 int locked;
74 int number;
75 int monitors;
76};
77
78struct vme_bus_error {
79 struct list_head list;
80 unsigned long long address;
81 u32 attributes;
82};
83
84struct vme_callback {
85 void (*func)(int, int, void*);
86 void *priv_data;
87};
88
89struct vme_irq {
90 int count;
91 struct vme_callback callback[255];
92};
93
94/* Allow 16 characters for name (including null character) */
95#define VMENAMSIZ 16
96
97/* This structure stores all the information about one bridge
98 * The structure should be dynamically allocated by the driver and one instance
99 * of the structure should be present for each VME chip present in the system.
100 */
101struct vme_bridge {
102 char name[VMENAMSIZ];
103 int num;
104 struct list_head master_resources;
105 struct list_head slave_resources;
106 struct list_head dma_resources;
107 struct list_head lm_resources;
108
109 struct list_head vme_errors; /* List for errors generated on VME */
110 struct list_head devices; /* List of devices on this bridge */
111
112 /* Bridge Info - XXX Move to private structure? */
113 struct device *parent; /* Parent device (eg. pdev->dev for PCI) */
114 void *driver_priv; /* Private pointer for the bridge driver */
115 struct list_head bus_list; /* list of VME buses */
116
117 /* Interrupt callbacks */
118 struct vme_irq irq[7];
119 /* Locking for VME irq callback configuration */
120 struct mutex irq_mtx;
121
122 /* Slave Functions */
123 int (*slave_get) (struct vme_slave_resource *, int *,
124 unsigned long long *, unsigned long long *, dma_addr_t *,
125 u32 *, u32 *);
126 int (*slave_set) (struct vme_slave_resource *, int, unsigned long long,
127 unsigned long long, dma_addr_t, u32, u32);
128
129 /* Master Functions */
130 int (*master_get) (struct vme_master_resource *, int *,
131 unsigned long long *, unsigned long long *, u32 *, u32 *,
132 u32 *);
133 int (*master_set) (struct vme_master_resource *, int,
134 unsigned long long, unsigned long long, u32, u32, u32);
135 ssize_t (*master_read) (struct vme_master_resource *, void *, size_t,
136 loff_t);
137 ssize_t (*master_write) (struct vme_master_resource *, void *, size_t,
138 loff_t);
139 unsigned int (*master_rmw) (struct vme_master_resource *, unsigned int,
140 unsigned int, unsigned int, loff_t);
141
142 /* DMA Functions */
143 int (*dma_list_add) (struct vme_dma_list *, struct vme_dma_attr *,
144 struct vme_dma_attr *, size_t);
145 int (*dma_list_exec) (struct vme_dma_list *);
146 int (*dma_list_empty) (struct vme_dma_list *);
147
148 /* Interrupt Functions */
149 void (*irq_set) (struct vme_bridge *, int, int, int);
150 int (*irq_generate) (struct vme_bridge *, int, int);
151
152 /* Location monitor functions */
153 int (*lm_set) (struct vme_lm_resource *, unsigned long long, u32, u32);
154 int (*lm_get) (struct vme_lm_resource *, unsigned long long *, u32 *,
155 u32 *);
156 int (*lm_attach) (struct vme_lm_resource *, int, void (*callback)(int));
157 int (*lm_detach) (struct vme_lm_resource *, int);
158
159 /* CR/CSR space functions */
160 int (*slot_get) (struct vme_bridge *);
161
162 /* Bridge parent interface */
163 void *(*alloc_consistent)(struct device *dev, size_t size,
164 dma_addr_t *dma);
165 void (*free_consistent)(struct device *dev, size_t size,
166 void *vaddr, dma_addr_t dma);
167};
168
169void vme_irq_handler(struct vme_bridge *, int, int);
170
171int vme_register_bridge(struct vme_bridge *);
172void vme_unregister_bridge(struct vme_bridge *);
173
174#endif /* _VME_BRIDGE_H_ */