aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/vme
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/staging/vme
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/staging/vme')
-rw-r--r--drivers/staging/vme/Kconfig19
-rw-r--r--drivers/staging/vme/TODO70
-rw-r--r--drivers/staging/vme/boards/Kconfig9
-rw-r--r--drivers/staging/vme/boards/Makefile5
-rw-r--r--drivers/staging/vme/boards/vme_vmivme7805.c123
-rw-r--r--drivers/staging/vme/boards/vme_vmivme7805.h37
-rw-r--r--drivers/staging/vme/bridges/Kconfig15
-rw-r--r--drivers/staging/vme/bridges/Makefile2
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c1934
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.h583
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c2640
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.h1409
-rw-r--r--drivers/staging/vme/vme.c1534
-rw-r--r--drivers/staging/vme/vme.h170
-rw-r--r--drivers/staging/vme/vme_api.txt383
-rw-r--r--drivers/staging/vme/vme_bridge.h175
16 files changed, 9108 insertions, 0 deletions
diff --git a/drivers/staging/vme/Kconfig b/drivers/staging/vme/Kconfig
new file mode 100644
index 00000000000..6411ae51ed3
--- /dev/null
+++ b/drivers/staging/vme/Kconfig
@@ -0,0 +1,19 @@
1#
2# VME configuration.
3#
4
5menuconfig VME_BUS
6 tristate "VME bridge support"
7 depends on PCI
8 ---help---
9 If you say Y here you get support for the VME bridge Framework.
10
11if VME_BUS
12
13source "drivers/staging/vme/bridges/Kconfig"
14
15source "drivers/staging/vme/devices/Kconfig"
16
17source "drivers/staging/vme/boards/Kconfig"
18
19endif # VME
diff --git a/drivers/staging/vme/TODO b/drivers/staging/vme/TODO
new file mode 100644
index 00000000000..82c222b4a14
--- /dev/null
+++ b/drivers/staging/vme/TODO
@@ -0,0 +1,70 @@
1 TODO
2 ====
3
4API
5===
6
7Master window broadcast select mask
8-----------------------------------
9
10API currently provides no method to set or get Broadcast Select mask. Suggest
11somthing like:
12
13 int vme_master_bmsk_set (struct vme_resource *res, int mask);
14 int vme_master_bmsk_get (struct vme_resource *res, int *mask);
15
16
17Interrupt Generation
18--------------------
19
20Add optional timeout when waiting for an IACK.
21
22
23CR/CSR Buffer
24-------------
25
26The VME API provides no functions to access the buffer mapped into the CR/CSR
27space.
28
29
30Mailboxes
31---------
32
33Whilst not part of the VME specification, they are provided by a number of
34chips. They are currently not supported at all by the API.
35
36
37Core
38====
39
40- Improve generic sanity checks (Such as does an offset and size fit within a
41 window and parameter checking).
42
43Bridge Support
44==============
45
46Tempe (tsi148)
47--------------
48
49- 2eSST Broadcast mode.
50- Mailboxes unsupported.
51- Improve error detection.
52- Control of prefetch size, threshold.
53- Arbiter control
54- Requestor control
55
56Universe II (ca91c142)
57----------------------
58
59- Mailboxes unsupported.
60- Error Detection.
61- Control of prefetch size, threshold.
62- Arbiter control
63- Requestor control
64- Slot detection
65
66Universe I (ca91x042)
67---------------------
68
69Currently completely unsupported.
70
diff --git a/drivers/staging/vme/boards/Kconfig b/drivers/staging/vme/boards/Kconfig
new file mode 100644
index 00000000000..76163135352
--- /dev/null
+++ b/drivers/staging/vme/boards/Kconfig
@@ -0,0 +1,9 @@
1comment "VME Board Drivers"
2
3config VMIVME_7805
4 tristate "VMIVME-7805"
5 help
6 If you say Y here you get support for the VMIVME-7805 board.
7 This board has an additional control interface to the Universe II
8 chip. This driver has to be included if you want to access VME bus
9 with VMIVME-7805 board.
diff --git a/drivers/staging/vme/boards/Makefile b/drivers/staging/vme/boards/Makefile
new file mode 100644
index 00000000000..43658340885
--- /dev/null
+++ b/drivers/staging/vme/boards/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the VME board drivers.
3#
4
5obj-$(CONFIG_VMIVME_7805) += vme_vmivme7805.o
diff --git a/drivers/staging/vme/boards/vme_vmivme7805.c b/drivers/staging/vme/boards/vme_vmivme7805.c
new file mode 100644
index 00000000000..8e05bb4e135
--- /dev/null
+++ b/drivers/staging/vme/boards/vme_vmivme7805.c
@@ -0,0 +1,123 @@
1/*
2 * Support for the VMIVME-7805 board access to the Universe II bridge.
3 *
4 * Author: Arthur Benilov <arthur.benilov@iba-group.com>
5 * Copyright 2010 Ion Beam Application, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/errno.h>
16#include <linux/pci.h>
17#include <linux/poll.h>
18#include <linux/io.h>
19
20#include "vme_vmivme7805.h"
21
22static int __init vmic_init(void);
23static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
24static void vmic_remove(struct pci_dev *);
25static void __exit vmic_exit(void);
26
27/** Base address to access FPGA register */
28static void *vmic_base;
29
30static const char driver_name[] = "vmivme_7805";
31
32static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
33 { PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
34 { },
35};
36
37static struct pci_driver vmic_driver = {
38 .name = driver_name,
39 .id_table = vmic_ids,
40 .probe = vmic_probe,
41 .remove = vmic_remove,
42};
43
44static int __init vmic_init(void)
45{
46 return pci_register_driver(&vmic_driver);
47}
48
49static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
50{
51 int retval;
52 u32 data;
53
54 /* Enable the device */
55 retval = pci_enable_device(pdev);
56 if (retval) {
57 dev_err(&pdev->dev, "Unable to enable device\n");
58 goto err;
59 }
60
61 /* Map Registers */
62 retval = pci_request_regions(pdev, driver_name);
63 if (retval) {
64 dev_err(&pdev->dev, "Unable to reserve resources\n");
65 goto err_resource;
66 }
67
68 /* Map registers in BAR 0 */
69 vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
70 if (!vmic_base) {
71 dev_err(&pdev->dev, "Unable to remap CRG region\n");
72 retval = -EIO;
73 goto err_remap;
74 }
75
76 /* Clear the FPGA VME IF contents */
77 iowrite32(0, vmic_base + VME_CONTROL);
78
79 /* Clear any initial BERR */
80 data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
81 data |= BM_VME_CONTROL_BERRST;
82 iowrite32(data, vmic_base + VME_CONTROL);
83
84 /* Enable the vme interface and byte swapping */
85 data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
86 data = data | BM_VME_CONTROL_MASTER_ENDIAN |
87 BM_VME_CONTROL_SLAVE_ENDIAN |
88 BM_VME_CONTROL_ABLE |
89 BM_VME_CONTROL_BERRI |
90 BM_VME_CONTROL_BPENA |
91 BM_VME_CONTROL_VBENA;
92 iowrite32(data, vmic_base + VME_CONTROL);
93
94 return 0;
95
96err_remap:
97 pci_release_regions(pdev);
98err_resource:
99 pci_disable_device(pdev);
100err:
101 return retval;
102}
103
104static void vmic_remove(struct pci_dev *pdev)
105{
106 iounmap(vmic_base);
107 pci_release_regions(pdev);
108 pci_disable_device(pdev);
109
110}
111
112static void __exit vmic_exit(void)
113{
114 pci_unregister_driver(&vmic_driver);
115}
116
117MODULE_DESCRIPTION("VMIVME-7805 board support driver");
118MODULE_AUTHOR("Arthur Benilov <arthur.benilov@iba-group.com>");
119MODULE_LICENSE("GPL");
120
121module_init(vmic_init);
122module_exit(vmic_exit);
123
diff --git a/drivers/staging/vme/boards/vme_vmivme7805.h b/drivers/staging/vme/boards/vme_vmivme7805.h
new file mode 100644
index 00000000000..44c2c449808
--- /dev/null
+++ b/drivers/staging/vme/boards/vme_vmivme7805.h
@@ -0,0 +1,37 @@
1/*
2 * vmivme_7805.h
3 *
4 * Support for the VMIVME-7805 board access to the Universe II bridge.
5 *
6 * Author: Arthur Benilov <arthur.benilov@iba-group.com>
7 * Copyright 2010 Ion Beam Application, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15
16#ifndef _VMIVME_7805_H
17#define _VMIVME_7805_H
18
19#ifndef PCI_VENDOR_ID_VMIC
20#define PCI_VENDOR_ID_VMIC 0x114A
21#endif
22
23#ifndef PCI_DEVICE_ID_VTIMR
24#define PCI_DEVICE_ID_VTIMR 0x0004
25#endif
26
27#define VME_CONTROL 0x0000
28#define BM_VME_CONTROL_MASTER_ENDIAN 0x0001
29#define BM_VME_CONTROL_SLAVE_ENDIAN 0x0002
30#define BM_VME_CONTROL_ABLE 0x0004
31#define BM_VME_CONTROL_BERRI 0x0040
32#define BM_VME_CONTROL_BERRST 0x0080
33#define BM_VME_CONTROL_BPENA 0x0400
34#define BM_VME_CONTROL_VBENA 0x0800
35
36#endif /* _VMIVME_7805_H */
37
diff --git a/drivers/staging/vme/bridges/Kconfig b/drivers/staging/vme/bridges/Kconfig
new file mode 100644
index 00000000000..9331064e047
--- /dev/null
+++ b/drivers/staging/vme/bridges/Kconfig
@@ -0,0 +1,15 @@
1comment "VME Bridge Drivers"
2
3config VME_CA91CX42
4 tristate "Universe II"
5 depends on VIRT_TO_BUS
6 help
7 If you say Y here you get support for the Tundra CA91C142
8 (Universe II) VME bridge chip.
9
10config VME_TSI148
11 tristate "Tempe"
12 depends on VIRT_TO_BUS
13 help
14 If you say Y here you get support for the Tundra TSI148 VME bridge
15 chip.
diff --git a/drivers/staging/vme/bridges/Makefile b/drivers/staging/vme/bridges/Makefile
new file mode 100644
index 00000000000..59638afcd50
--- /dev/null
+++ b/drivers/staging/vme/bridges/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_VME_CA91CX42) += vme_ca91cx42.o
2obj-$(CONFIG_VME_TSI148) += vme_tsi148.o
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
new file mode 100644
index 00000000000..5122c13a956
--- /dev/null
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -0,0 +1,1934 @@
1/*
2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * Derived from ca91c042.c by Michael Wyrick
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/pci.h>
23#include <linux/dma-mapping.h>
24#include <linux/poll.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/time.h>
30#include <linux/io.h>
31#include <linux/uaccess.h>
32
33#include "../vme.h"
34#include "../vme_bridge.h"
35#include "vme_ca91cx42.h"
36
37static int __init ca91cx42_init(void);
38static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
39static void ca91cx42_remove(struct pci_dev *);
40static void __exit ca91cx42_exit(void);
41
42/* Module parameters */
43static int geoid;
44
45static const char driver_name[] = "vme_ca91cx42";
46
47static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
48 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
49 { },
50};
51
52static struct pci_driver ca91cx42_driver = {
53 .name = driver_name,
54 .id_table = ca91cx42_ids,
55 .probe = ca91cx42_probe,
56 .remove = ca91cx42_remove,
57};
58
59static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
60{
61 wake_up(&bridge->dma_queue);
62
63 return CA91CX42_LINT_DMA;
64}
65
66static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
67{
68 int i;
69 u32 serviced = 0;
70
71 for (i = 0; i < 4; i++) {
72 if (stat & CA91CX42_LINT_LM[i]) {
73 /* We only enable interrupts if the callback is set */
74 bridge->lm_callback[i](i);
75 serviced |= CA91CX42_LINT_LM[i];
76 }
77 }
78
79 return serviced;
80}
81
82/* XXX This needs to be split into 4 queues */
83static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
84{
85 wake_up(&bridge->mbox_queue);
86
87 return CA91CX42_LINT_MBOX;
88}
89
90static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
91{
92 wake_up(&bridge->iack_queue);
93
94 return CA91CX42_LINT_SW_IACK;
95}
96
97static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
98{
99 int val;
100 struct ca91cx42_driver *bridge;
101
102 bridge = ca91cx42_bridge->driver_priv;
103
104 val = ioread32(bridge->base + DGCS);
105
106 if (!(val & 0x00000800)) {
107 dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
108 "Read Error DGCS=%08X\n", val);
109 }
110
111 return CA91CX42_LINT_VERR;
112}
113
114static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
115{
116 int val;
117 struct ca91cx42_driver *bridge;
118
119 bridge = ca91cx42_bridge->driver_priv;
120
121 val = ioread32(bridge->base + DGCS);
122
123 if (!(val & 0x00000800))
124 dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
125 "Read Error DGCS=%08X\n", val);
126
127 return CA91CX42_LINT_LERR;
128}
129
130
131static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
132 int stat)
133{
134 int vec, i, serviced = 0;
135 struct ca91cx42_driver *bridge;
136
137 bridge = ca91cx42_bridge->driver_priv;
138
139
140 for (i = 7; i > 0; i--) {
141 if (stat & (1 << i)) {
142 vec = ioread32(bridge->base +
143 CA91CX42_V_STATID[i]) & 0xff;
144
145 vme_irq_handler(ca91cx42_bridge, i, vec);
146
147 serviced |= (1 << i);
148 }
149 }
150
151 return serviced;
152}
153
154static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
155{
156 u32 stat, enable, serviced = 0;
157 struct vme_bridge *ca91cx42_bridge;
158 struct ca91cx42_driver *bridge;
159
160 ca91cx42_bridge = ptr;
161
162 bridge = ca91cx42_bridge->driver_priv;
163
164 enable = ioread32(bridge->base + LINT_EN);
165 stat = ioread32(bridge->base + LINT_STAT);
166
167 /* Only look at unmasked interrupts */
168 stat &= enable;
169
170 if (unlikely(!stat))
171 return IRQ_NONE;
172
173 if (stat & CA91CX42_LINT_DMA)
174 serviced |= ca91cx42_DMA_irqhandler(bridge);
175 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
176 CA91CX42_LINT_LM3))
177 serviced |= ca91cx42_LM_irqhandler(bridge, stat);
178 if (stat & CA91CX42_LINT_MBOX)
179 serviced |= ca91cx42_MB_irqhandler(bridge, stat);
180 if (stat & CA91CX42_LINT_SW_IACK)
181 serviced |= ca91cx42_IACK_irqhandler(bridge);
182 if (stat & CA91CX42_LINT_VERR)
183 serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
184 if (stat & CA91CX42_LINT_LERR)
185 serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
186 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
187 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
188 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
189 CA91CX42_LINT_VIRQ7))
190 serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
191
192 /* Clear serviced interrupts */
193 iowrite32(serviced, bridge->base + LINT_STAT);
194
195 return IRQ_HANDLED;
196}
197
198static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
199{
200 int result, tmp;
201 struct pci_dev *pdev;
202 struct ca91cx42_driver *bridge;
203
204 bridge = ca91cx42_bridge->driver_priv;
205
206 /* Need pdev */
207 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
208
209 /* Initialise list for VME bus errors */
210 INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
211
212 mutex_init(&ca91cx42_bridge->irq_mtx);
213
214 /* Disable interrupts from PCI to VME */
215 iowrite32(0, bridge->base + VINT_EN);
216
217 /* Disable PCI interrupts */
218 iowrite32(0, bridge->base + LINT_EN);
219 /* Clear Any Pending PCI Interrupts */
220 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
221
222 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
223 driver_name, ca91cx42_bridge);
224 if (result) {
225 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
226 pdev->irq);
227 return result;
228 }
229
230 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
231 iowrite32(0, bridge->base + LINT_MAP0);
232 iowrite32(0, bridge->base + LINT_MAP1);
233 iowrite32(0, bridge->base + LINT_MAP2);
234
235 /* Enable DMA, mailbox & LM Interrupts */
236 tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
237 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
238 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
239
240 iowrite32(tmp, bridge->base + LINT_EN);
241
242 return 0;
243}
244
245static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
246 struct pci_dev *pdev)
247{
248 /* Disable interrupts from PCI to VME */
249 iowrite32(0, bridge->base + VINT_EN);
250
251 /* Disable PCI interrupts */
252 iowrite32(0, bridge->base + LINT_EN);
253 /* Clear Any Pending PCI Interrupts */
254 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
255
256 free_irq(pdev->irq, pdev);
257}
258
259static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
260{
261 u32 tmp;
262
263 tmp = ioread32(bridge->base + LINT_STAT);
264
265 if (tmp & (1 << level))
266 return 0;
267 else
268 return 1;
269}
270
271/*
272 * Set up an VME interrupt
273 */
274static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
275 int state, int sync)
276
277{
278 struct pci_dev *pdev;
279 u32 tmp;
280 struct ca91cx42_driver *bridge;
281
282 bridge = ca91cx42_bridge->driver_priv;
283
284 /* Enable IRQ level */
285 tmp = ioread32(bridge->base + LINT_EN);
286
287 if (state == 0)
288 tmp &= ~CA91CX42_LINT_VIRQ[level];
289 else
290 tmp |= CA91CX42_LINT_VIRQ[level];
291
292 iowrite32(tmp, bridge->base + LINT_EN);
293
294 if ((state == 0) && (sync != 0)) {
295 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
296 dev);
297
298 synchronize_irq(pdev->irq);
299 }
300}
301
302static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
303 int statid)
304{
305 u32 tmp;
306 struct ca91cx42_driver *bridge;
307
308 bridge = ca91cx42_bridge->driver_priv;
309
310 /* Universe can only generate even vectors */
311 if (statid & 1)
312 return -EINVAL;
313
314 mutex_lock(&bridge->vme_int);
315
316 tmp = ioread32(bridge->base + VINT_EN);
317
318 /* Set Status/ID */
319 iowrite32(statid << 24, bridge->base + STATID);
320
321 /* Assert VMEbus IRQ */
322 tmp = tmp | (1 << (level + 24));
323 iowrite32(tmp, bridge->base + VINT_EN);
324
325 /* Wait for IACK */
326 wait_event_interruptible(bridge->iack_queue,
327 ca91cx42_iack_received(bridge, level));
328
329 /* Return interrupt to low state */
330 tmp = ioread32(bridge->base + VINT_EN);
331 tmp = tmp & ~(1 << (level + 24));
332 iowrite32(tmp, bridge->base + VINT_EN);
333
334 mutex_unlock(&bridge->vme_int);
335
336 return 0;
337}
338
339static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
340 unsigned long long vme_base, unsigned long long size,
341 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
342{
343 unsigned int i, addr = 0, granularity;
344 unsigned int temp_ctl = 0;
345 unsigned int vme_bound, pci_offset;
346 struct vme_bridge *ca91cx42_bridge;
347 struct ca91cx42_driver *bridge;
348
349 ca91cx42_bridge = image->parent;
350
351 bridge = ca91cx42_bridge->driver_priv;
352
353 i = image->number;
354
355 switch (aspace) {
356 case VME_A16:
357 addr |= CA91CX42_VSI_CTL_VAS_A16;
358 break;
359 case VME_A24:
360 addr |= CA91CX42_VSI_CTL_VAS_A24;
361 break;
362 case VME_A32:
363 addr |= CA91CX42_VSI_CTL_VAS_A32;
364 break;
365 case VME_USER1:
366 addr |= CA91CX42_VSI_CTL_VAS_USER1;
367 break;
368 case VME_USER2:
369 addr |= CA91CX42_VSI_CTL_VAS_USER2;
370 break;
371 case VME_A64:
372 case VME_CRCSR:
373 case VME_USER3:
374 case VME_USER4:
375 default:
376 dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
377 return -EINVAL;
378 break;
379 }
380
381 /*
382 * Bound address is a valid address for the window, adjust
383 * accordingly
384 */
385 vme_bound = vme_base + size;
386 pci_offset = pci_base - vme_base;
387
388 if ((i == 0) || (i == 4))
389 granularity = 0x1000;
390 else
391 granularity = 0x10000;
392
393 if (vme_base & (granularity - 1)) {
394 dev_err(ca91cx42_bridge->parent, "Invalid VME base "
395 "alignment\n");
396 return -EINVAL;
397 }
398 if (vme_bound & (granularity - 1)) {
399 dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
400 "alignment\n");
401 return -EINVAL;
402 }
403 if (pci_offset & (granularity - 1)) {
404 dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
405 "alignment\n");
406 return -EINVAL;
407 }
408
409 /* Disable while we are mucking around */
410 temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
411 temp_ctl &= ~CA91CX42_VSI_CTL_EN;
412 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
413
414 /* Setup mapping */
415 iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
416 iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
417 iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
418
419 /* Setup address space */
420 temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
421 temp_ctl |= addr;
422
423 /* Setup cycle types */
424 temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
425 if (cycle & VME_SUPER)
426 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
427 if (cycle & VME_USER)
428 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
429 if (cycle & VME_PROG)
430 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
431 if (cycle & VME_DATA)
432 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
433
434 /* Write ctl reg without enable */
435 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
436
437 if (enabled)
438 temp_ctl |= CA91CX42_VSI_CTL_EN;
439
440 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
441
442 return 0;
443}
444
445static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
446 unsigned long long *vme_base, unsigned long long *size,
447 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
448{
449 unsigned int i, granularity = 0, ctl = 0;
450 unsigned long long vme_bound, pci_offset;
451 struct ca91cx42_driver *bridge;
452
453 bridge = image->parent->driver_priv;
454
455 i = image->number;
456
457 if ((i == 0) || (i == 4))
458 granularity = 0x1000;
459 else
460 granularity = 0x10000;
461
462 /* Read Registers */
463 ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
464
465 *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
466 vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
467 pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
468
469 *pci_base = (dma_addr_t)vme_base + pci_offset;
470 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
471
472 *enabled = 0;
473 *aspace = 0;
474 *cycle = 0;
475
476 if (ctl & CA91CX42_VSI_CTL_EN)
477 *enabled = 1;
478
479 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
480 *aspace = VME_A16;
481 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
482 *aspace = VME_A24;
483 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
484 *aspace = VME_A32;
485 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
486 *aspace = VME_USER1;
487 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
488 *aspace = VME_USER2;
489
490 if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
491 *cycle |= VME_SUPER;
492 if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
493 *cycle |= VME_USER;
494 if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
495 *cycle |= VME_PROG;
496 if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
497 *cycle |= VME_DATA;
498
499 return 0;
500}
501
502/*
503 * Allocate and map PCI Resource
504 */
505static int ca91cx42_alloc_resource(struct vme_master_resource *image,
506 unsigned long long size)
507{
508 unsigned long long existing_size;
509 int retval = 0;
510 struct pci_dev *pdev;
511 struct vme_bridge *ca91cx42_bridge;
512
513 ca91cx42_bridge = image->parent;
514
515 /* Find pci_dev container of dev */
516 if (ca91cx42_bridge->parent == NULL) {
517 dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
518 return -EINVAL;
519 }
520 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
521
522 existing_size = (unsigned long long)(image->bus_resource.end -
523 image->bus_resource.start);
524
525 /* If the existing size is OK, return */
526 if (existing_size == (size - 1))
527 return 0;
528
529 if (existing_size != 0) {
530 iounmap(image->kern_base);
531 image->kern_base = NULL;
532 kfree(image->bus_resource.name);
533 release_resource(&image->bus_resource);
534 memset(&image->bus_resource, 0, sizeof(struct resource));
535 }
536
537 if (image->bus_resource.name == NULL) {
538 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
539 if (image->bus_resource.name == NULL) {
540 dev_err(ca91cx42_bridge->parent, "Unable to allocate "
541 "memory for resource name\n");
542 retval = -ENOMEM;
543 goto err_name;
544 }
545 }
546
547 sprintf((char *)image->bus_resource.name, "%s.%d",
548 ca91cx42_bridge->name, image->number);
549
550 image->bus_resource.start = 0;
551 image->bus_resource.end = (unsigned long)size;
552 image->bus_resource.flags = IORESOURCE_MEM;
553
554 retval = pci_bus_alloc_resource(pdev->bus,
555 &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
556 0, NULL, NULL);
557 if (retval) {
558 dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
559 "resource for window %d size 0x%lx start 0x%lx\n",
560 image->number, (unsigned long)size,
561 (unsigned long)image->bus_resource.start);
562 goto err_resource;
563 }
564
565 image->kern_base = ioremap_nocache(
566 image->bus_resource.start, size);
567 if (image->kern_base == NULL) {
568 dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
569 retval = -ENOMEM;
570 goto err_remap;
571 }
572
573 return 0;
574
575err_remap:
576 release_resource(&image->bus_resource);
577err_resource:
578 kfree(image->bus_resource.name);
579 memset(&image->bus_resource, 0, sizeof(struct resource));
580err_name:
581 return retval;
582}
583
584/*
585 * Free and unmap PCI Resource
586 */
587static void ca91cx42_free_resource(struct vme_master_resource *image)
588{
589 iounmap(image->kern_base);
590 image->kern_base = NULL;
591 release_resource(&image->bus_resource);
592 kfree(image->bus_resource.name);
593 memset(&image->bus_resource, 0, sizeof(struct resource));
594}
595
596
597static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
598 unsigned long long vme_base, unsigned long long size,
599 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
600{
601 int retval = 0;
602 unsigned int i, granularity = 0;
603 unsigned int temp_ctl = 0;
604 unsigned long long pci_bound, vme_offset, pci_base;
605 struct vme_bridge *ca91cx42_bridge;
606 struct ca91cx42_driver *bridge;
607
608 ca91cx42_bridge = image->parent;
609
610 bridge = ca91cx42_bridge->driver_priv;
611
612 i = image->number;
613
614 if ((i == 0) || (i == 4))
615 granularity = 0x1000;
616 else
617 granularity = 0x10000;
618
619 /* Verify input data */
620 if (vme_base & (granularity - 1)) {
621 dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
622 "alignment\n");
623 retval = -EINVAL;
624 goto err_window;
625 }
626 if (size & (granularity - 1)) {
627 dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
628 "alignment\n");
629 retval = -EINVAL;
630 goto err_window;
631 }
632
633 spin_lock(&image->lock);
634
635 /*
636 * Let's allocate the resource here rather than further up the stack as
637 * it avoids pushing loads of bus dependent stuff up the stack
638 */
639 retval = ca91cx42_alloc_resource(image, size);
640 if (retval) {
641 spin_unlock(&image->lock);
642 dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
643 "for resource name\n");
644 retval = -ENOMEM;
645 goto err_res;
646 }
647
648 pci_base = (unsigned long long)image->bus_resource.start;
649
650 /*
651 * Bound address is a valid address for the window, adjust
652 * according to window granularity.
653 */
654 pci_bound = pci_base + size;
655 vme_offset = vme_base - pci_base;
656
657 /* Disable while we are mucking around */
658 temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
659 temp_ctl &= ~CA91CX42_LSI_CTL_EN;
660 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
661
662 /* Setup cycle types */
663 temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
664 if (cycle & VME_BLT)
665 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
666 if (cycle & VME_MBLT)
667 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
668
669 /* Setup data width */
670 temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
671 switch (dwidth) {
672 case VME_D8:
673 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
674 break;
675 case VME_D16:
676 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
677 break;
678 case VME_D32:
679 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
680 break;
681 case VME_D64:
682 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
683 break;
684 default:
685 spin_unlock(&image->lock);
686 dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
687 retval = -EINVAL;
688 goto err_dwidth;
689 break;
690 }
691
692 /* Setup address space */
693 temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
694 switch (aspace) {
695 case VME_A16:
696 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
697 break;
698 case VME_A24:
699 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
700 break;
701 case VME_A32:
702 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
703 break;
704 case VME_CRCSR:
705 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
706 break;
707 case VME_USER1:
708 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
709 break;
710 case VME_USER2:
711 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
712 break;
713 case VME_A64:
714 case VME_USER3:
715 case VME_USER4:
716 default:
717 spin_unlock(&image->lock);
718 dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
719 retval = -EINVAL;
720 goto err_aspace;
721 break;
722 }
723
724 temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
725 if (cycle & VME_SUPER)
726 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
727 if (cycle & VME_PROG)
728 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
729
730 /* Setup mapping */
731 iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
732 iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
733 iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
734
735 /* Write ctl reg without enable */
736 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
737
738 if (enabled)
739 temp_ctl |= CA91CX42_LSI_CTL_EN;
740
741 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
742
743 spin_unlock(&image->lock);
744 return 0;
745
746err_aspace:
747err_dwidth:
748 ca91cx42_free_resource(image);
749err_res:
750err_window:
751 return retval;
752}
753
754static int __ca91cx42_master_get(struct vme_master_resource *image,
755 int *enabled, unsigned long long *vme_base, unsigned long long *size,
756 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
757{
758 unsigned int i, ctl;
759 unsigned long long pci_base, pci_bound, vme_offset;
760 struct ca91cx42_driver *bridge;
761
762 bridge = image->parent->driver_priv;
763
764 i = image->number;
765
766 ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
767
768 pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
769 vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
770 pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
771
772 *vme_base = pci_base + vme_offset;
773 *size = (unsigned long long)(pci_bound - pci_base);
774
775 *enabled = 0;
776 *aspace = 0;
777 *cycle = 0;
778 *dwidth = 0;
779
780 if (ctl & CA91CX42_LSI_CTL_EN)
781 *enabled = 1;
782
783 /* Setup address space */
784 switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
785 case CA91CX42_LSI_CTL_VAS_A16:
786 *aspace = VME_A16;
787 break;
788 case CA91CX42_LSI_CTL_VAS_A24:
789 *aspace = VME_A24;
790 break;
791 case CA91CX42_LSI_CTL_VAS_A32:
792 *aspace = VME_A32;
793 break;
794 case CA91CX42_LSI_CTL_VAS_CRCSR:
795 *aspace = VME_CRCSR;
796 break;
797 case CA91CX42_LSI_CTL_VAS_USER1:
798 *aspace = VME_USER1;
799 break;
800 case CA91CX42_LSI_CTL_VAS_USER2:
801 *aspace = VME_USER2;
802 break;
803 }
804
805 /* XXX Not sure howto check for MBLT */
806 /* Setup cycle types */
807 if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
808 *cycle |= VME_BLT;
809 else
810 *cycle |= VME_SCT;
811
812 if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
813 *cycle |= VME_SUPER;
814 else
815 *cycle |= VME_USER;
816
817 if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
818 *cycle = VME_PROG;
819 else
820 *cycle = VME_DATA;
821
822 /* Setup data width */
823 switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
824 case CA91CX42_LSI_CTL_VDW_D8:
825 *dwidth = VME_D8;
826 break;
827 case CA91CX42_LSI_CTL_VDW_D16:
828 *dwidth = VME_D16;
829 break;
830 case CA91CX42_LSI_CTL_VDW_D32:
831 *dwidth = VME_D32;
832 break;
833 case CA91CX42_LSI_CTL_VDW_D64:
834 *dwidth = VME_D64;
835 break;
836 }
837
838 return 0;
839}
840
841static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
842 unsigned long long *vme_base, unsigned long long *size,
843 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
844{
845 int retval;
846
847 spin_lock(&image->lock);
848
849 retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
850 cycle, dwidth);
851
852 spin_unlock(&image->lock);
853
854 return retval;
855}
856
857static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
858 void *buf, size_t count, loff_t offset)
859{
860 ssize_t retval;
861 void *addr = image->kern_base + offset;
862 unsigned int done = 0;
863 unsigned int count32;
864
865 if (count == 0)
866 return 0;
867
868 spin_lock(&image->lock);
869
870 /* The following code handles VME address alignment problem
871 * in order to assure the maximal data width cycle.
872 * We cannot use memcpy_xxx directly here because it
873 * may cut data transfer in 8-bits cycles, thus making
874 * D16 cycle impossible.
875 * From the other hand, the bridge itself assures that
876 * maximal configured data cycle is used and splits it
877 * automatically for non-aligned addresses.
878 */
879 if ((int)addr & 0x1) {
880 *(u8 *)buf = ioread8(addr);
881 done += 1;
882 if (done == count)
883 goto out;
884 }
885 if ((int)addr & 0x2) {
886 if ((count - done) < 2) {
887 *(u8 *)(buf + done) = ioread8(addr + done);
888 done += 1;
889 goto out;
890 } else {
891 *(u16 *)(buf + done) = ioread16(addr + done);
892 done += 2;
893 }
894 }
895
896 count32 = (count - done) & ~0x3;
897 if (count32 > 0) {
898 memcpy_fromio(buf + done, addr + done, (unsigned int)count);
899 done += count32;
900 }
901
902 if ((count - done) & 0x2) {
903 *(u16 *)(buf + done) = ioread16(addr + done);
904 done += 2;
905 }
906 if ((count - done) & 0x1) {
907 *(u8 *)(buf + done) = ioread8(addr + done);
908 done += 1;
909 }
910out:
911 retval = count;
912 spin_unlock(&image->lock);
913
914 return retval;
915}
916
917static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
918 void *buf, size_t count, loff_t offset)
919{
920 ssize_t retval;
921 void *addr = image->kern_base + offset;
922 unsigned int done = 0;
923 unsigned int count32;
924
925 if (count == 0)
926 return 0;
927
928 spin_lock(&image->lock);
929
930 /* Here we apply for the same strategy we do in master_read
931 * function in order to assure D16 cycle when required.
932 */
933 if ((int)addr & 0x1) {
934 iowrite8(*(u8 *)buf, addr);
935 done += 1;
936 if (done == count)
937 goto out;
938 }
939 if ((int)addr & 0x2) {
940 if ((count - done) < 2) {
941 iowrite8(*(u8 *)(buf + done), addr + done);
942 done += 1;
943 goto out;
944 } else {
945 iowrite16(*(u16 *)(buf + done), addr + done);
946 done += 2;
947 }
948 }
949
950 count32 = (count - done) & ~0x3;
951 if (count32 > 0) {
952 memcpy_toio(addr + done, buf + done, count32);
953 done += count32;
954 }
955
956 if ((count - done) & 0x2) {
957 iowrite16(*(u16 *)(buf + done), addr + done);
958 done += 2;
959 }
960 if ((count - done) & 0x1) {
961 iowrite8(*(u8 *)(buf + done), addr + done);
962 done += 1;
963 }
964out:
965 retval = count;
966
967 spin_unlock(&image->lock);
968
969 return retval;
970}
971
972static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
973 unsigned int mask, unsigned int compare, unsigned int swap,
974 loff_t offset)
975{
976 u32 pci_addr, result;
977 int i;
978 struct ca91cx42_driver *bridge;
979 struct device *dev;
980
981 bridge = image->parent->driver_priv;
982 dev = image->parent->parent;
983
984 /* Find the PCI address that maps to the desired VME address */
985 i = image->number;
986
987 /* Locking as we can only do one of these at a time */
988 mutex_lock(&bridge->vme_rmw);
989
990 /* Lock image */
991 spin_lock(&image->lock);
992
993 pci_addr = (u32)image->kern_base + offset;
994
995 /* Address must be 4-byte aligned */
996 if (pci_addr & 0x3) {
997 dev_err(dev, "RMW Address not 4-byte aligned\n");
998 result = -EINVAL;
999 goto out;
1000 }
1001
1002 /* Ensure RMW Disabled whilst configuring */
1003 iowrite32(0, bridge->base + SCYC_CTL);
1004
1005 /* Configure registers */
1006 iowrite32(mask, bridge->base + SCYC_EN);
1007 iowrite32(compare, bridge->base + SCYC_CMP);
1008 iowrite32(swap, bridge->base + SCYC_SWP);
1009 iowrite32(pci_addr, bridge->base + SCYC_ADDR);
1010
1011 /* Enable RMW */
1012 iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
1013
1014 /* Kick process off with a read to the required address. */
1015 result = ioread32(image->kern_base + offset);
1016
1017 /* Disable RMW */
1018 iowrite32(0, bridge->base + SCYC_CTL);
1019
1020out:
1021 spin_unlock(&image->lock);
1022
1023 mutex_unlock(&bridge->vme_rmw);
1024
1025 return result;
1026}
1027
1028static int ca91cx42_dma_list_add(struct vme_dma_list *list,
1029 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1030{
1031 struct ca91cx42_dma_entry *entry, *prev;
1032 struct vme_dma_pci *pci_attr;
1033 struct vme_dma_vme *vme_attr;
1034 dma_addr_t desc_ptr;
1035 int retval = 0;
1036 struct device *dev;
1037
1038 dev = list->parent->parent->parent;
1039
1040 /* XXX descriptor must be aligned on 64-bit boundaries */
1041 entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
1042 if (entry == NULL) {
1043 dev_err(dev, "Failed to allocate memory for dma resource "
1044 "structure\n");
1045 retval = -ENOMEM;
1046 goto err_mem;
1047 }
1048
1049 /* Test descriptor alignment */
1050 if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
1051 dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
1052 "required: %p\n", &entry->descriptor);
1053 retval = -EINVAL;
1054 goto err_align;
1055 }
1056
1057 memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
1058
1059 if (dest->type == VME_DMA_VME) {
1060 entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
1061 vme_attr = dest->private;
1062 pci_attr = src->private;
1063 } else {
1064 vme_attr = src->private;
1065 pci_attr = dest->private;
1066 }
1067
1068 /* Check we can do fulfill required attributes */
1069 if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
1070 VME_USER2)) != 0) {
1071
1072 dev_err(dev, "Unsupported cycle type\n");
1073 retval = -EINVAL;
1074 goto err_aspace;
1075 }
1076
1077 if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
1078 VME_PROG | VME_DATA)) != 0) {
1079
1080 dev_err(dev, "Unsupported cycle type\n");
1081 retval = -EINVAL;
1082 goto err_cycle;
1083 }
1084
1085 /* Check to see if we can fulfill source and destination */
1086 if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
1087 ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
1088
1089 dev_err(dev, "Cannot perform transfer with this "
1090 "source-destination combination\n");
1091 retval = -EINVAL;
1092 goto err_direct;
1093 }
1094
1095 /* Setup cycle types */
1096 if (vme_attr->cycle & VME_BLT)
1097 entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
1098
1099 /* Setup data width */
1100 switch (vme_attr->dwidth) {
1101 case VME_D8:
1102 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
1103 break;
1104 case VME_D16:
1105 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
1106 break;
1107 case VME_D32:
1108 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
1109 break;
1110 case VME_D64:
1111 entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
1112 break;
1113 default:
1114 dev_err(dev, "Invalid data width\n");
1115 return -EINVAL;
1116 }
1117
1118 /* Setup address space */
1119 switch (vme_attr->aspace) {
1120 case VME_A16:
1121 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1122 break;
1123 case VME_A24:
1124 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1125 break;
1126 case VME_A32:
1127 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1128 break;
1129 case VME_USER1:
1130 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1131 break;
1132 case VME_USER2:
1133 entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1134 break;
1135 default:
1136 dev_err(dev, "Invalid address space\n");
1137 return -EINVAL;
1138 break;
1139 }
1140
1141 if (vme_attr->cycle & VME_SUPER)
1142 entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1143 if (vme_attr->cycle & VME_PROG)
1144 entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1145
1146 entry->descriptor.dtbc = count;
1147 entry->descriptor.dla = pci_attr->address;
1148 entry->descriptor.dva = vme_attr->address;
1149 entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1150
1151 /* Add to list */
1152 list_add_tail(&entry->list, &list->entries);
1153
1154 /* Fill out previous descriptors "Next Address" */
1155 if (entry->list.prev != &list->entries) {
1156 prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1157 list);
1158 /* We need the bus address for the pointer */
1159 desc_ptr = virt_to_bus(&entry->descriptor);
1160 prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1161 }
1162
1163 return 0;
1164
1165err_cycle:
1166err_aspace:
1167err_direct:
1168err_align:
1169 kfree(entry);
1170err_mem:
1171 return retval;
1172}
1173
1174static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1175{
1176 u32 tmp;
1177 struct ca91cx42_driver *bridge;
1178
1179 bridge = ca91cx42_bridge->driver_priv;
1180
1181 tmp = ioread32(bridge->base + DGCS);
1182
1183 if (tmp & CA91CX42_DGCS_ACT)
1184 return 0;
1185 else
1186 return 1;
1187}
1188
1189static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1190{
1191 struct vme_dma_resource *ctrlr;
1192 struct ca91cx42_dma_entry *entry;
1193 int retval = 0;
1194 dma_addr_t bus_addr;
1195 u32 val;
1196 struct device *dev;
1197 struct ca91cx42_driver *bridge;
1198
1199 ctrlr = list->parent;
1200
1201 bridge = ctrlr->parent->driver_priv;
1202 dev = ctrlr->parent->parent;
1203
1204 mutex_lock(&ctrlr->mtx);
1205
1206 if (!(list_empty(&ctrlr->running))) {
1207 /*
1208 * XXX We have an active DMA transfer and currently haven't
1209 * sorted out the mechanism for "pending" DMA transfers.
1210 * Return busy.
1211 */
1212 /* Need to add to pending here */
1213 mutex_unlock(&ctrlr->mtx);
1214 return -EBUSY;
1215 } else {
1216 list_add(&list->list, &ctrlr->running);
1217 }
1218
1219 /* Get first bus address and write into registers */
1220 entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
1221 list);
1222
1223 bus_addr = virt_to_bus(&entry->descriptor);
1224
1225 mutex_unlock(&ctrlr->mtx);
1226
1227 iowrite32(0, bridge->base + DTBC);
1228 iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1229
1230 /* Start the operation */
1231 val = ioread32(bridge->base + DGCS);
1232
1233 /* XXX Could set VMEbus On and Off Counters here */
1234 val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1235
1236 val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1237 CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1238 CA91CX42_DGCS_PERR);
1239
1240 iowrite32(val, bridge->base + DGCS);
1241
1242 val |= CA91CX42_DGCS_GO;
1243
1244 iowrite32(val, bridge->base + DGCS);
1245
1246 wait_event_interruptible(bridge->dma_queue,
1247 ca91cx42_dma_busy(ctrlr->parent));
1248
1249 /*
1250 * Read status register, this register is valid until we kick off a
1251 * new transfer.
1252 */
1253 val = ioread32(bridge->base + DGCS);
1254
1255 if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1256 CA91CX42_DGCS_PERR)) {
1257
1258 dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
1259 val = ioread32(bridge->base + DCTL);
1260 }
1261
1262 /* Remove list from running list */
1263 mutex_lock(&ctrlr->mtx);
1264 list_del(&list->list);
1265 mutex_unlock(&ctrlr->mtx);
1266
1267 return retval;
1268
1269}
1270
1271static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1272{
1273 struct list_head *pos, *temp;
1274 struct ca91cx42_dma_entry *entry;
1275
1276 /* detach and free each entry */
1277 list_for_each_safe(pos, temp, &list->entries) {
1278 list_del(pos);
1279 entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1280 kfree(entry);
1281 }
1282
1283 return 0;
1284}
1285
1286/*
1287 * All 4 location monitors reside at the same base - this is therefore a
1288 * system wide configuration.
1289 *
1290 * This does not enable the LM monitor - that should be done when the first
1291 * callback is attached and disabled when the last callback is removed.
1292 */
1293static int ca91cx42_lm_set(struct vme_lm_resource *lm,
1294 unsigned long long lm_base, vme_address_t aspace, vme_cycle_t cycle)
1295{
1296 u32 temp_base, lm_ctl = 0;
1297 int i;
1298 struct ca91cx42_driver *bridge;
1299 struct device *dev;
1300
1301 bridge = lm->parent->driver_priv;
1302 dev = lm->parent->parent;
1303
1304 /* Check the alignment of the location monitor */
1305 temp_base = (u32)lm_base;
1306 if (temp_base & 0xffff) {
1307 dev_err(dev, "Location monitor must be aligned to 64KB "
1308 "boundary");
1309 return -EINVAL;
1310 }
1311
1312 mutex_lock(&lm->mtx);
1313
1314 /* If we already have a callback attached, we can't move it! */
1315 for (i = 0; i < lm->monitors; i++) {
1316 if (bridge->lm_callback[i] != NULL) {
1317 mutex_unlock(&lm->mtx);
1318 dev_err(dev, "Location monitor callback attached, "
1319 "can't reset\n");
1320 return -EBUSY;
1321 }
1322 }
1323
1324 switch (aspace) {
1325 case VME_A16:
1326 lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1327 break;
1328 case VME_A24:
1329 lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1330 break;
1331 case VME_A32:
1332 lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1333 break;
1334 default:
1335 mutex_unlock(&lm->mtx);
1336 dev_err(dev, "Invalid address space\n");
1337 return -EINVAL;
1338 break;
1339 }
1340
1341 if (cycle & VME_SUPER)
1342 lm_ctl |= CA91CX42_LM_CTL_SUPR;
1343 if (cycle & VME_USER)
1344 lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1345 if (cycle & VME_PROG)
1346 lm_ctl |= CA91CX42_LM_CTL_PGM;
1347 if (cycle & VME_DATA)
1348 lm_ctl |= CA91CX42_LM_CTL_DATA;
1349
1350 iowrite32(lm_base, bridge->base + LM_BS);
1351 iowrite32(lm_ctl, bridge->base + LM_CTL);
1352
1353 mutex_unlock(&lm->mtx);
1354
1355 return 0;
1356}
1357
1358/* Get configuration of the callback monitor and return whether it is enabled
1359 * or disabled.
1360 */
1361static int ca91cx42_lm_get(struct vme_lm_resource *lm,
1362 unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
1363{
1364 u32 lm_ctl, enabled = 0;
1365 struct ca91cx42_driver *bridge;
1366
1367 bridge = lm->parent->driver_priv;
1368
1369 mutex_lock(&lm->mtx);
1370
1371 *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1372 lm_ctl = ioread32(bridge->base + LM_CTL);
1373
1374 if (lm_ctl & CA91CX42_LM_CTL_EN)
1375 enabled = 1;
1376
1377 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1378 *aspace = VME_A16;
1379 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1380 *aspace = VME_A24;
1381 if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1382 *aspace = VME_A32;
1383
1384 *cycle = 0;
1385 if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1386 *cycle |= VME_SUPER;
1387 if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1388 *cycle |= VME_USER;
1389 if (lm_ctl & CA91CX42_LM_CTL_PGM)
1390 *cycle |= VME_PROG;
1391 if (lm_ctl & CA91CX42_LM_CTL_DATA)
1392 *cycle |= VME_DATA;
1393
1394 mutex_unlock(&lm->mtx);
1395
1396 return enabled;
1397}
1398
1399/*
1400 * Attach a callback to a specific location monitor.
1401 *
1402 * Callback will be passed the monitor triggered.
1403 */
1404static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1405 void (*callback)(int))
1406{
1407 u32 lm_ctl, tmp;
1408 struct ca91cx42_driver *bridge;
1409 struct device *dev;
1410
1411 bridge = lm->parent->driver_priv;
1412 dev = lm->parent->parent;
1413
1414 mutex_lock(&lm->mtx);
1415
1416 /* Ensure that the location monitor is configured - need PGM or DATA */
1417 lm_ctl = ioread32(bridge->base + LM_CTL);
1418 if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1419 mutex_unlock(&lm->mtx);
1420 dev_err(dev, "Location monitor not properly configured\n");
1421 return -EINVAL;
1422 }
1423
1424 /* Check that a callback isn't already attached */
1425 if (bridge->lm_callback[monitor] != NULL) {
1426 mutex_unlock(&lm->mtx);
1427 dev_err(dev, "Existing callback attached\n");
1428 return -EBUSY;
1429 }
1430
1431 /* Attach callback */
1432 bridge->lm_callback[monitor] = callback;
1433
1434 /* Enable Location Monitor interrupt */
1435 tmp = ioread32(bridge->base + LINT_EN);
1436 tmp |= CA91CX42_LINT_LM[monitor];
1437 iowrite32(tmp, bridge->base + LINT_EN);
1438
1439 /* Ensure that global Location Monitor Enable set */
1440 if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1441 lm_ctl |= CA91CX42_LM_CTL_EN;
1442 iowrite32(lm_ctl, bridge->base + LM_CTL);
1443 }
1444
1445 mutex_unlock(&lm->mtx);
1446
1447 return 0;
1448}
1449
1450/*
1451 * Detach a callback function forn a specific location monitor.
1452 */
1453static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1454{
1455 u32 tmp;
1456 struct ca91cx42_driver *bridge;
1457
1458 bridge = lm->parent->driver_priv;
1459
1460 mutex_lock(&lm->mtx);
1461
1462 /* Disable Location Monitor and ensure previous interrupts are clear */
1463 tmp = ioread32(bridge->base + LINT_EN);
1464 tmp &= ~CA91CX42_LINT_LM[monitor];
1465 iowrite32(tmp, bridge->base + LINT_EN);
1466
1467 iowrite32(CA91CX42_LINT_LM[monitor],
1468 bridge->base + LINT_STAT);
1469
1470 /* Detach callback */
1471 bridge->lm_callback[monitor] = NULL;
1472
1473 /* If all location monitors disabled, disable global Location Monitor */
1474 if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1475 CA91CX42_LINT_LM3)) == 0) {
1476 tmp = ioread32(bridge->base + LM_CTL);
1477 tmp &= ~CA91CX42_LM_CTL_EN;
1478 iowrite32(tmp, bridge->base + LM_CTL);
1479 }
1480
1481 mutex_unlock(&lm->mtx);
1482
1483 return 0;
1484}
1485
1486static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1487{
1488 u32 slot = 0;
1489 struct ca91cx42_driver *bridge;
1490
1491 bridge = ca91cx42_bridge->driver_priv;
1492
1493 if (!geoid) {
1494 slot = ioread32(bridge->base + VCSR_BS);
1495 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1496 } else
1497 slot = geoid;
1498
1499 return (int)slot;
1500
1501}
1502
1503static int __init ca91cx42_init(void)
1504{
1505 return pci_register_driver(&ca91cx42_driver);
1506}
1507
1508/*
1509 * Configure CR/CSR space
1510 *
1511 * Access to the CR/CSR can be configured at power-up. The location of the
1512 * CR/CSR registers in the CR/CSR address space is determined by the boards
1513 * Auto-ID or Geographic address. This function ensures that the window is
1514 * enabled at an offset consistent with the boards geopgraphic address.
1515 */
1516static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1517 struct pci_dev *pdev)
1518{
1519 unsigned int crcsr_addr;
1520 int tmp, slot;
1521 struct ca91cx42_driver *bridge;
1522
1523 bridge = ca91cx42_bridge->driver_priv;
1524
1525 slot = ca91cx42_slot_get(ca91cx42_bridge);
1526
1527 /* Write CSR Base Address if slot ID is supplied as a module param */
1528 if (geoid)
1529 iowrite32(geoid << 27, bridge->base + VCSR_BS);
1530
1531 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1532 if (slot == 0) {
1533 dev_err(&pdev->dev, "Slot number is unset, not configuring "
1534 "CR/CSR space\n");
1535 return -EINVAL;
1536 }
1537
1538 /* Allocate mem for CR/CSR image */
1539 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1540 &bridge->crcsr_bus);
1541 if (bridge->crcsr_kernel == NULL) {
1542 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1543 "image\n");
1544 return -ENOMEM;
1545 }
1546
1547 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
1548
1549 crcsr_addr = slot * (512 * 1024);
1550 iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1551
1552 tmp = ioread32(bridge->base + VCSR_CTL);
1553 tmp |= CA91CX42_VCSR_CTL_EN;
1554 iowrite32(tmp, bridge->base + VCSR_CTL);
1555
1556 return 0;
1557}
1558
1559static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1560 struct pci_dev *pdev)
1561{
1562 u32 tmp;
1563 struct ca91cx42_driver *bridge;
1564
1565 bridge = ca91cx42_bridge->driver_priv;
1566
1567 /* Turn off CR/CSR space */
1568 tmp = ioread32(bridge->base + VCSR_CTL);
1569 tmp &= ~CA91CX42_VCSR_CTL_EN;
1570 iowrite32(tmp, bridge->base + VCSR_CTL);
1571
1572 /* Free image */
1573 iowrite32(0, bridge->base + VCSR_TO);
1574
1575 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1576 bridge->crcsr_bus);
1577}
1578
1579static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1580{
1581 int retval, i;
1582 u32 data;
1583 struct list_head *pos = NULL;
1584 struct vme_bridge *ca91cx42_bridge;
1585 struct ca91cx42_driver *ca91cx42_device;
1586 struct vme_master_resource *master_image;
1587 struct vme_slave_resource *slave_image;
1588 struct vme_dma_resource *dma_ctrlr;
1589 struct vme_lm_resource *lm;
1590
1591 /* We want to support more than one of each bridge so we need to
1592 * dynamically allocate the bridge structure
1593 */
1594 ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1595
1596 if (ca91cx42_bridge == NULL) {
1597 dev_err(&pdev->dev, "Failed to allocate memory for device "
1598 "structure\n");
1599 retval = -ENOMEM;
1600 goto err_struct;
1601 }
1602
1603 ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
1604
1605 if (ca91cx42_device == NULL) {
1606 dev_err(&pdev->dev, "Failed to allocate memory for device "
1607 "structure\n");
1608 retval = -ENOMEM;
1609 goto err_driver;
1610 }
1611
1612 ca91cx42_bridge->driver_priv = ca91cx42_device;
1613
1614 /* Enable the device */
1615 retval = pci_enable_device(pdev);
1616 if (retval) {
1617 dev_err(&pdev->dev, "Unable to enable device\n");
1618 goto err_enable;
1619 }
1620
1621 /* Map Registers */
1622 retval = pci_request_regions(pdev, driver_name);
1623 if (retval) {
1624 dev_err(&pdev->dev, "Unable to reserve resources\n");
1625 goto err_resource;
1626 }
1627
1628 /* map registers in BAR 0 */
1629 ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
1630 4096);
1631 if (!ca91cx42_device->base) {
1632 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1633 retval = -EIO;
1634 goto err_remap;
1635 }
1636
1637 /* Check to see if the mapping worked out */
1638 data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1639 if (data != PCI_VENDOR_ID_TUNDRA) {
1640 dev_err(&pdev->dev, "PCI_ID check failed\n");
1641 retval = -EIO;
1642 goto err_test;
1643 }
1644
1645 /* Initialize wait queues & mutual exclusion flags */
1646 init_waitqueue_head(&ca91cx42_device->dma_queue);
1647 init_waitqueue_head(&ca91cx42_device->iack_queue);
1648 mutex_init(&ca91cx42_device->vme_int);
1649 mutex_init(&ca91cx42_device->vme_rmw);
1650
1651 ca91cx42_bridge->parent = &pdev->dev;
1652 strcpy(ca91cx42_bridge->name, driver_name);
1653
1654 /* Setup IRQ */
1655 retval = ca91cx42_irq_init(ca91cx42_bridge);
1656 if (retval != 0) {
1657 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1658 goto err_irq;
1659 }
1660
1661 /* Add master windows to list */
1662 INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
1663 for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1664 master_image = kmalloc(sizeof(struct vme_master_resource),
1665 GFP_KERNEL);
1666 if (master_image == NULL) {
1667 dev_err(&pdev->dev, "Failed to allocate memory for "
1668 "master resource structure\n");
1669 retval = -ENOMEM;
1670 goto err_master;
1671 }
1672 master_image->parent = ca91cx42_bridge;
1673 spin_lock_init(&master_image->lock);
1674 master_image->locked = 0;
1675 master_image->number = i;
1676 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1677 VME_CRCSR | VME_USER1 | VME_USER2;
1678 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1679 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1680 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1681 memset(&master_image->bus_resource, 0,
1682 sizeof(struct resource));
1683 master_image->kern_base = NULL;
1684 list_add_tail(&master_image->list,
1685 &ca91cx42_bridge->master_resources);
1686 }
1687
1688 /* Add slave windows to list */
1689 INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
1690 for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1691 slave_image = kmalloc(sizeof(struct vme_slave_resource),
1692 GFP_KERNEL);
1693 if (slave_image == NULL) {
1694 dev_err(&pdev->dev, "Failed to allocate memory for "
1695 "slave resource structure\n");
1696 retval = -ENOMEM;
1697 goto err_slave;
1698 }
1699 slave_image->parent = ca91cx42_bridge;
1700 mutex_init(&slave_image->mtx);
1701 slave_image->locked = 0;
1702 slave_image->number = i;
1703 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1704 VME_USER2;
1705
1706 /* Only windows 0 and 4 support A16 */
1707 if (i == 0 || i == 4)
1708 slave_image->address_attr |= VME_A16;
1709
1710 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1711 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1712 list_add_tail(&slave_image->list,
1713 &ca91cx42_bridge->slave_resources);
1714 }
1715
1716 /* Add dma engines to list */
1717 INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
1718 for (i = 0; i < CA91C142_MAX_DMA; i++) {
1719 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1720 GFP_KERNEL);
1721 if (dma_ctrlr == NULL) {
1722 dev_err(&pdev->dev, "Failed to allocate memory for "
1723 "dma resource structure\n");
1724 retval = -ENOMEM;
1725 goto err_dma;
1726 }
1727 dma_ctrlr->parent = ca91cx42_bridge;
1728 mutex_init(&dma_ctrlr->mtx);
1729 dma_ctrlr->locked = 0;
1730 dma_ctrlr->number = i;
1731 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1732 VME_DMA_MEM_TO_VME;
1733 INIT_LIST_HEAD(&dma_ctrlr->pending);
1734 INIT_LIST_HEAD(&dma_ctrlr->running);
1735 list_add_tail(&dma_ctrlr->list,
1736 &ca91cx42_bridge->dma_resources);
1737 }
1738
1739 /* Add location monitor to list */
1740 INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
1741 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1742 if (lm == NULL) {
1743 dev_err(&pdev->dev, "Failed to allocate memory for "
1744 "location monitor resource structure\n");
1745 retval = -ENOMEM;
1746 goto err_lm;
1747 }
1748 lm->parent = ca91cx42_bridge;
1749 mutex_init(&lm->mtx);
1750 lm->locked = 0;
1751 lm->number = 1;
1752 lm->monitors = 4;
1753 list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
1754
1755 ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1756 ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1757 ca91cx42_bridge->master_get = ca91cx42_master_get;
1758 ca91cx42_bridge->master_set = ca91cx42_master_set;
1759 ca91cx42_bridge->master_read = ca91cx42_master_read;
1760 ca91cx42_bridge->master_write = ca91cx42_master_write;
1761 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1762 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1763 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1764 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1765 ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1766 ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1767 ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1768 ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1769 ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1770 ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1771 ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1772
1773 data = ioread32(ca91cx42_device->base + MISC_CTL);
1774 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1775 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1776 dev_info(&pdev->dev, "Slot ID is %d\n",
1777 ca91cx42_slot_get(ca91cx42_bridge));
1778
1779 if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
1780 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1781
1782 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1783 * ca91cx42_remove()
1784 */
1785 retval = vme_register_bridge(ca91cx42_bridge);
1786 if (retval != 0) {
1787 dev_err(&pdev->dev, "Chip Registration failed.\n");
1788 goto err_reg;
1789 }
1790
1791 pci_set_drvdata(pdev, ca91cx42_bridge);
1792
1793 return 0;
1794
1795err_reg:
1796 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1797err_lm:
1798 /* resources are stored in link list */
1799 list_for_each(pos, &ca91cx42_bridge->lm_resources) {
1800 lm = list_entry(pos, struct vme_lm_resource, list);
1801 list_del(pos);
1802 kfree(lm);
1803 }
1804err_dma:
1805 /* resources are stored in link list */
1806 list_for_each(pos, &ca91cx42_bridge->dma_resources) {
1807 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1808 list_del(pos);
1809 kfree(dma_ctrlr);
1810 }
1811err_slave:
1812 /* resources are stored in link list */
1813 list_for_each(pos, &ca91cx42_bridge->slave_resources) {
1814 slave_image = list_entry(pos, struct vme_slave_resource, list);
1815 list_del(pos);
1816 kfree(slave_image);
1817 }
1818err_master:
1819 /* resources are stored in link list */
1820 list_for_each(pos, &ca91cx42_bridge->master_resources) {
1821 master_image = list_entry(pos, struct vme_master_resource,
1822 list);
1823 list_del(pos);
1824 kfree(master_image);
1825 }
1826
1827 ca91cx42_irq_exit(ca91cx42_device, pdev);
1828err_irq:
1829err_test:
1830 iounmap(ca91cx42_device->base);
1831err_remap:
1832 pci_release_regions(pdev);
1833err_resource:
1834 pci_disable_device(pdev);
1835err_enable:
1836 kfree(ca91cx42_device);
1837err_driver:
1838 kfree(ca91cx42_bridge);
1839err_struct:
1840 return retval;
1841
1842}
1843
1844static void ca91cx42_remove(struct pci_dev *pdev)
1845{
1846 struct list_head *pos = NULL;
1847 struct vme_master_resource *master_image;
1848 struct vme_slave_resource *slave_image;
1849 struct vme_dma_resource *dma_ctrlr;
1850 struct vme_lm_resource *lm;
1851 struct ca91cx42_driver *bridge;
1852 struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1853
1854 bridge = ca91cx42_bridge->driver_priv;
1855
1856
1857 /* Turn off Ints */
1858 iowrite32(0, bridge->base + LINT_EN);
1859
1860 /* Turn off the windows */
1861 iowrite32(0x00800000, bridge->base + LSI0_CTL);
1862 iowrite32(0x00800000, bridge->base + LSI1_CTL);
1863 iowrite32(0x00800000, bridge->base + LSI2_CTL);
1864 iowrite32(0x00800000, bridge->base + LSI3_CTL);
1865 iowrite32(0x00800000, bridge->base + LSI4_CTL);
1866 iowrite32(0x00800000, bridge->base + LSI5_CTL);
1867 iowrite32(0x00800000, bridge->base + LSI6_CTL);
1868 iowrite32(0x00800000, bridge->base + LSI7_CTL);
1869 iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1870 iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1871 iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1872 iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1873 iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1874 iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1875 iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1876 iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1877
1878 vme_unregister_bridge(ca91cx42_bridge);
1879
1880 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1881
1882 /* resources are stored in link list */
1883 list_for_each(pos, &ca91cx42_bridge->lm_resources) {
1884 lm = list_entry(pos, struct vme_lm_resource, list);
1885 list_del(pos);
1886 kfree(lm);
1887 }
1888
1889 /* resources are stored in link list */
1890 list_for_each(pos, &ca91cx42_bridge->dma_resources) {
1891 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1892 list_del(pos);
1893 kfree(dma_ctrlr);
1894 }
1895
1896 /* resources are stored in link list */
1897 list_for_each(pos, &ca91cx42_bridge->slave_resources) {
1898 slave_image = list_entry(pos, struct vme_slave_resource, list);
1899 list_del(pos);
1900 kfree(slave_image);
1901 }
1902
1903 /* resources are stored in link list */
1904 list_for_each(pos, &ca91cx42_bridge->master_resources) {
1905 master_image = list_entry(pos, struct vme_master_resource,
1906 list);
1907 list_del(pos);
1908 kfree(master_image);
1909 }
1910
1911 ca91cx42_irq_exit(bridge, pdev);
1912
1913 iounmap(bridge->base);
1914
1915 pci_release_regions(pdev);
1916
1917 pci_disable_device(pdev);
1918
1919 kfree(ca91cx42_bridge);
1920}
1921
1922static void __exit ca91cx42_exit(void)
1923{
1924 pci_unregister_driver(&ca91cx42_driver);
1925}
1926
1927MODULE_PARM_DESC(geoid, "Override geographical addressing");
1928module_param(geoid, int, 0);
1929
1930MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1931MODULE_LICENSE("GPL");
1932
1933module_init(ca91cx42_init);
1934module_exit(ca91cx42_exit);
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.h b/drivers/staging/vme/bridges/vme_ca91cx42.h
new file mode 100644
index 00000000000..02a7c794db0
--- /dev/null
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.h
@@ -0,0 +1,583 @@
1/*
2 * ca91c042.h
3 *
4 * Support for the Tundra Universe 1 and Universe II VME bridge chips
5 *
6 * Author: Tom Armistead
7 * Updated by Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * Further updated by Martyn Welch <martyn.welch@ge.com>
11 * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
12 *
13 * Derived from ca91c042.h by Michael Wyrick
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 */
20
21#ifndef _CA91CX42_H
22#define _CA91CX42_H
23
24#ifndef PCI_VENDOR_ID_TUNDRA
25#define PCI_VENDOR_ID_TUNDRA 0x10e3
26#endif
27
28#ifndef PCI_DEVICE_ID_TUNDRA_CA91C142
29#define PCI_DEVICE_ID_TUNDRA_CA91C142 0x0000
30#endif
31
32/*
33 * Define the number of each that the CA91C142 supports.
34 */
35#define CA91C142_MAX_MASTER 8 /* Max Master Windows */
36#define CA91C142_MAX_SLAVE 8 /* Max Slave Windows */
37#define CA91C142_MAX_DMA 1 /* Max DMA Controllers */
38#define CA91C142_MAX_MAILBOX 4 /* Max Mail Box registers */
39
40/* Structure used to hold driver specific information */
41struct ca91cx42_driver {
42 void __iomem *base; /* Base Address of device registers */
43 wait_queue_head_t dma_queue;
44 wait_queue_head_t iack_queue;
45 wait_queue_head_t mbox_queue;
46 void (*lm_callback[4])(int); /* Called in interrupt handler */
47 void *crcsr_kernel;
48 dma_addr_t crcsr_bus;
49 struct mutex vme_rmw; /* Only one RMW cycle at a time */
50 struct mutex vme_int; /*
51 * Only one VME interrupt can be
52 * generated at a time, provide locking
53 */
54};
55
56/* See Page 2-77 in the Universe User Manual */
57struct ca91cx42_dma_descriptor {
58 unsigned int dctl; /* DMA Control */
59 unsigned int dtbc; /* Transfer Byte Count */
60 unsigned int dla; /* PCI Address */
61 unsigned int res1; /* Reserved */
62 unsigned int dva; /* Vme Address */
63 unsigned int res2; /* Reserved */
64 unsigned int dcpp; /* Pointer to Numed Cmd Packet with rPN */
65 unsigned int res3; /* Reserved */
66};
67
68struct ca91cx42_dma_entry {
69 struct ca91cx42_dma_descriptor descriptor;
70 struct list_head list;
71};
72
73/* Universe Register Offsets */
74/* general PCI configuration registers */
75#define CA91CX42_PCI_ID 0x000
76#define CA91CX42_PCI_CSR 0x004
77#define CA91CX42_PCI_CLASS 0x008
78#define CA91CX42_PCI_MISC0 0x00C
79#define CA91CX42_PCI_BS 0x010
80#define CA91CX42_PCI_MISC1 0x03C
81
82#define LSI0_CTL 0x0100
83#define LSI0_BS 0x0104
84#define LSI0_BD 0x0108
85#define LSI0_TO 0x010C
86
87#define LSI1_CTL 0x0114
88#define LSI1_BS 0x0118
89#define LSI1_BD 0x011C
90#define LSI1_TO 0x0120
91
92#define LSI2_CTL 0x0128
93#define LSI2_BS 0x012C
94#define LSI2_BD 0x0130
95#define LSI2_TO 0x0134
96
97#define LSI3_CTL 0x013C
98#define LSI3_BS 0x0140
99#define LSI3_BD 0x0144
100#define LSI3_TO 0x0148
101
102#define LSI4_CTL 0x01A0
103#define LSI4_BS 0x01A4
104#define LSI4_BD 0x01A8
105#define LSI4_TO 0x01AC
106
107#define LSI5_CTL 0x01B4
108#define LSI5_BS 0x01B8
109#define LSI5_BD 0x01BC
110#define LSI5_TO 0x01C0
111
112#define LSI6_CTL 0x01C8
113#define LSI6_BS 0x01CC
114#define LSI6_BD 0x01D0
115#define LSI6_TO 0x01D4
116
117#define LSI7_CTL 0x01DC
118#define LSI7_BS 0x01E0
119#define LSI7_BD 0x01E4
120#define LSI7_TO 0x01E8
121
122static const int CA91CX42_LSI_CTL[] = { LSI0_CTL, LSI1_CTL, LSI2_CTL, LSI3_CTL,
123 LSI4_CTL, LSI5_CTL, LSI6_CTL, LSI7_CTL };
124
125static const int CA91CX42_LSI_BS[] = { LSI0_BS, LSI1_BS, LSI2_BS, LSI3_BS,
126 LSI4_BS, LSI5_BS, LSI6_BS, LSI7_BS };
127
128static const int CA91CX42_LSI_BD[] = { LSI0_BD, LSI1_BD, LSI2_BD, LSI3_BD,
129 LSI4_BD, LSI5_BD, LSI6_BD, LSI7_BD };
130
131static const int CA91CX42_LSI_TO[] = { LSI0_TO, LSI1_TO, LSI2_TO, LSI3_TO,
132 LSI4_TO, LSI5_TO, LSI6_TO, LSI7_TO };
133
134#define SCYC_CTL 0x0170
135#define SCYC_ADDR 0x0174
136#define SCYC_EN 0x0178
137#define SCYC_CMP 0x017C
138#define SCYC_SWP 0x0180
139#define LMISC 0x0184
140#define SLSI 0x0188
141#define L_CMDERR 0x018C
142#define LAERR 0x0190
143
144#define DCTL 0x0200
145#define DTBC 0x0204
146#define DLA 0x0208
147#define DVA 0x0210
148#define DCPP 0x0218
149#define DGCS 0x0220
150#define D_LLUE 0x0224
151
152#define LINT_EN 0x0300
153#define LINT_STAT 0x0304
154#define LINT_MAP0 0x0308
155#define LINT_MAP1 0x030C
156#define VINT_EN 0x0310
157#define VINT_STAT 0x0314
158#define VINT_MAP0 0x0318
159#define VINT_MAP1 0x031C
160#define STATID 0x0320
161
162#define V1_STATID 0x0324
163#define V2_STATID 0x0328
164#define V3_STATID 0x032C
165#define V4_STATID 0x0330
166#define V5_STATID 0x0334
167#define V6_STATID 0x0338
168#define V7_STATID 0x033C
169
170static const int CA91CX42_V_STATID[8] = { 0, V1_STATID, V2_STATID, V3_STATID,
171 V4_STATID, V5_STATID, V6_STATID,
172 V7_STATID };
173
174#define LINT_MAP2 0x0340
175#define VINT_MAP2 0x0344
176
177#define MBOX0 0x0348
178#define MBOX1 0x034C
179#define MBOX2 0x0350
180#define MBOX3 0x0354
181#define SEMA0 0x0358
182#define SEMA1 0x035C
183
184#define MAST_CTL 0x0400
185#define MISC_CTL 0x0404
186#define MISC_STAT 0x0408
187#define USER_AM 0x040C
188
189#define VSI0_CTL 0x0F00
190#define VSI0_BS 0x0F04
191#define VSI0_BD 0x0F08
192#define VSI0_TO 0x0F0C
193
194#define VSI1_CTL 0x0F14
195#define VSI1_BS 0x0F18
196#define VSI1_BD 0x0F1C
197#define VSI1_TO 0x0F20
198
199#define VSI2_CTL 0x0F28
200#define VSI2_BS 0x0F2C
201#define VSI2_BD 0x0F30
202#define VSI2_TO 0x0F34
203
204#define VSI3_CTL 0x0F3C
205#define VSI3_BS 0x0F40
206#define VSI3_BD 0x0F44
207#define VSI3_TO 0x0F48
208
209#define LM_CTL 0x0F64
210#define LM_BS 0x0F68
211
212#define VRAI_CTL 0x0F70
213
214#define VRAI_BS 0x0F74
215#define VCSR_CTL 0x0F80
216#define VCSR_TO 0x0F84
217#define V_AMERR 0x0F88
218#define VAERR 0x0F8C
219
220#define VSI4_CTL 0x0F90
221#define VSI4_BS 0x0F94
222#define VSI4_BD 0x0F98
223#define VSI4_TO 0x0F9C
224
225#define VSI5_CTL 0x0FA4
226#define VSI5_BS 0x0FA8
227#define VSI5_BD 0x0FAC
228#define VSI5_TO 0x0FB0
229
230#define VSI6_CTL 0x0FB8
231#define VSI6_BS 0x0FBC
232#define VSI6_BD 0x0FC0
233#define VSI6_TO 0x0FC4
234
235#define VSI7_CTL 0x0FCC
236#define VSI7_BS 0x0FD0
237#define VSI7_BD 0x0FD4
238#define VSI7_TO 0x0FD8
239
240static const int CA91CX42_VSI_CTL[] = { VSI0_CTL, VSI1_CTL, VSI2_CTL, VSI3_CTL,
241 VSI4_CTL, VSI5_CTL, VSI6_CTL, VSI7_CTL };
242
243static const int CA91CX42_VSI_BS[] = { VSI0_BS, VSI1_BS, VSI2_BS, VSI3_BS,
244 VSI4_BS, VSI5_BS, VSI6_BS, VSI7_BS };
245
246static const int CA91CX42_VSI_BD[] = { VSI0_BD, VSI1_BD, VSI2_BD, VSI3_BD,
247 VSI4_BD, VSI5_BD, VSI6_BD, VSI7_BD };
248
249static const int CA91CX42_VSI_TO[] = { VSI0_TO, VSI1_TO, VSI2_TO, VSI3_TO,
250 VSI4_TO, VSI5_TO, VSI6_TO, VSI7_TO };
251
252#define VCSR_CLR 0x0FF4
253#define VCSR_SET 0x0FF8
254#define VCSR_BS 0x0FFC
255
256/*
257 * PCI Class Register
258 * offset 008
259 */
260#define CA91CX42_BM_PCI_CLASS_BASE 0xFF000000
261#define CA91CX42_OF_PCI_CLASS_BASE 24
262#define CA91CX42_BM_PCI_CLASS_SUB 0x00FF0000
263#define CA91CX42_OF_PCI_CLASS_SUB 16
264#define CA91CX42_BM_PCI_CLASS_PROG 0x0000FF00
265#define CA91CX42_OF_PCI_CLASS_PROG 8
266#define CA91CX42_BM_PCI_CLASS_RID 0x000000FF
267#define CA91CX42_OF_PCI_CLASS_RID 0
268
269#define CA91CX42_OF_PCI_CLASS_RID_UNIVERSE_I 0
270#define CA91CX42_OF_PCI_CLASS_RID_UNIVERSE_II 1
271
272/*
273 * PCI Misc Register
274 * offset 00C
275 */
276#define CA91CX42_BM_PCI_MISC0_BISTC 0x80000000
277#define CA91CX42_BM_PCI_MISC0_SBIST 0x60000000
278#define CA91CX42_BM_PCI_MISC0_CCODE 0x0F000000
279#define CA91CX42_BM_PCI_MISC0_MFUNCT 0x00800000
280#define CA91CX42_BM_PCI_MISC0_LAYOUT 0x007F0000
281#define CA91CX42_BM_PCI_MISC0_LTIMER 0x0000FF00
282#define CA91CX42_OF_PCI_MISC0_LTIMER 8
283
284
285/*
286 * LSI Control Register
287 * offset 100
288 */
289#define CA91CX42_LSI_CTL_EN (1<<31)
290#define CA91CX42_LSI_CTL_PWEN (1<<30)
291
292#define CA91CX42_LSI_CTL_VDW_M (3<<22)
293#define CA91CX42_LSI_CTL_VDW_D8 0
294#define CA91CX42_LSI_CTL_VDW_D16 (1<<22)
295#define CA91CX42_LSI_CTL_VDW_D32 (1<<23)
296#define CA91CX42_LSI_CTL_VDW_D64 (3<<22)
297
298#define CA91CX42_LSI_CTL_VAS_M (7<<16)
299#define CA91CX42_LSI_CTL_VAS_A16 0
300#define CA91CX42_LSI_CTL_VAS_A24 (1<<16)
301#define CA91CX42_LSI_CTL_VAS_A32 (1<<17)
302#define CA91CX42_LSI_CTL_VAS_CRCSR (5<<16)
303#define CA91CX42_LSI_CTL_VAS_USER1 (3<<17)
304#define CA91CX42_LSI_CTL_VAS_USER2 (7<<16)
305
306#define CA91CX42_LSI_CTL_PGM_M (1<<14)
307#define CA91CX42_LSI_CTL_PGM_DATA 0
308#define CA91CX42_LSI_CTL_PGM_PGM (1<<14)
309
310#define CA91CX42_LSI_CTL_SUPER_M (1<<12)
311#define CA91CX42_LSI_CTL_SUPER_NPRIV 0
312#define CA91CX42_LSI_CTL_SUPER_SUPR (1<<12)
313
314#define CA91CX42_LSI_CTL_VCT_M (1<<8)
315#define CA91CX42_LSI_CTL_VCT_BLT (1<<8)
316#define CA91CX42_LSI_CTL_VCT_MBLT (1<<8)
317#define CA91CX42_LSI_CTL_LAS (1<<0)
318
319/*
320 * SCYC_CTL Register
321 * offset 178
322 */
323#define CA91CX42_SCYC_CTL_LAS_PCIMEM 0
324#define CA91CX42_SCYC_CTL_LAS_PCIIO (1<<2)
325
326#define CA91CX42_SCYC_CTL_CYC_M (3<<0)
327#define CA91CX42_SCYC_CTL_CYC_RMW (1<<0)
328#define CA91CX42_SCYC_CTL_CYC_ADOH (1<<1)
329
330/*
331 * LMISC Register
332 * offset 184
333 */
334#define CA91CX42_BM_LMISC_CRT 0xF0000000
335#define CA91CX42_OF_LMISC_CRT 28
336#define CA91CX42_BM_LMISC_CWT 0x0F000000
337#define CA91CX42_OF_LMISC_CWT 24
338
339/*
340 * SLSI Register
341 * offset 188
342 */
343#define CA91CX42_BM_SLSI_EN 0x80000000
344#define CA91CX42_BM_SLSI_PWEN 0x40000000
345#define CA91CX42_BM_SLSI_VDW 0x00F00000
346#define CA91CX42_OF_SLSI_VDW 20
347#define CA91CX42_BM_SLSI_PGM 0x0000F000
348#define CA91CX42_OF_SLSI_PGM 12
349#define CA91CX42_BM_SLSI_SUPER 0x00000F00
350#define CA91CX42_OF_SLSI_SUPER 8
351#define CA91CX42_BM_SLSI_BS 0x000000F6
352#define CA91CX42_OF_SLSI_BS 2
353#define CA91CX42_BM_SLSI_LAS 0x00000003
354#define CA91CX42_OF_SLSI_LAS 0
355#define CA91CX42_BM_SLSI_RESERVED 0x3F0F0000
356
357/*
358 * DCTL Register
359 * offset 200
360 */
361#define CA91CX42_DCTL_L2V (1<<31)
362#define CA91CX42_DCTL_VDW_M (3<<22)
363#define CA91CX42_DCTL_VDW_M (3<<22)
364#define CA91CX42_DCTL_VDW_D8 0
365#define CA91CX42_DCTL_VDW_D16 (1<<22)
366#define CA91CX42_DCTL_VDW_D32 (1<<23)
367#define CA91CX42_DCTL_VDW_D64 (3<<22)
368
369#define CA91CX42_DCTL_VAS_M (7<<16)
370#define CA91CX42_DCTL_VAS_A16 0
371#define CA91CX42_DCTL_VAS_A24 (1<<16)
372#define CA91CX42_DCTL_VAS_A32 (1<<17)
373#define CA91CX42_DCTL_VAS_USER1 (3<<17)
374#define CA91CX42_DCTL_VAS_USER2 (7<<16)
375
376#define CA91CX42_DCTL_PGM_M (1<<14)
377#define CA91CX42_DCTL_PGM_DATA 0
378#define CA91CX42_DCTL_PGM_PGM (1<<14)
379
380#define CA91CX42_DCTL_SUPER_M (1<<12)
381#define CA91CX42_DCTL_SUPER_NPRIV 0
382#define CA91CX42_DCTL_SUPER_SUPR (1<<12)
383
384#define CA91CX42_DCTL_VCT_M (1<<8)
385#define CA91CX42_DCTL_VCT_BLT (1<<8)
386#define CA91CX42_DCTL_LD64EN (1<<7)
387
388/*
389 * DCPP Register
390 * offset 218
391 */
392#define CA91CX42_DCPP_M 0xf
393#define CA91CX42_DCPP_NULL (1<<0)
394
395/*
396 * DMA General Control/Status Register (DGCS)
397 * offset 220
398 */
399#define CA91CX42_DGCS_GO (1<<31)
400#define CA91CX42_DGCS_STOP_REQ (1<<30)
401#define CA91CX42_DGCS_HALT_REQ (1<<29)
402#define CA91CX42_DGCS_CHAIN (1<<27)
403
404#define CA91CX42_DGCS_VON_M (7<<20)
405
406#define CA91CX42_DGCS_VOFF_M (0xf<<16)
407
408#define CA91CX42_DGCS_ACT (1<<15)
409#define CA91CX42_DGCS_STOP (1<<14)
410#define CA91CX42_DGCS_HALT (1<<13)
411#define CA91CX42_DGCS_DONE (1<<11)
412#define CA91CX42_DGCS_LERR (1<<10)
413#define CA91CX42_DGCS_VERR (1<<9)
414#define CA91CX42_DGCS_PERR (1<<8)
415#define CA91CX42_DGCS_INT_STOP (1<<6)
416#define CA91CX42_DGCS_INT_HALT (1<<5)
417#define CA91CX42_DGCS_INT_DONE (1<<3)
418#define CA91CX42_DGCS_INT_LERR (1<<2)
419#define CA91CX42_DGCS_INT_VERR (1<<1)
420#define CA91CX42_DGCS_INT_PERR (1<<0)
421
422/*
423 * PCI Interrupt Enable Register
424 * offset 300
425 */
426#define CA91CX42_LINT_LM3 0x00800000
427#define CA91CX42_LINT_LM2 0x00400000
428#define CA91CX42_LINT_LM1 0x00200000
429#define CA91CX42_LINT_LM0 0x00100000
430#define CA91CX42_LINT_MBOX3 0x00080000
431#define CA91CX42_LINT_MBOX2 0x00040000
432#define CA91CX42_LINT_MBOX1 0x00020000
433#define CA91CX42_LINT_MBOX0 0x00010000
434#define CA91CX42_LINT_ACFAIL 0x00008000
435#define CA91CX42_LINT_SYSFAIL 0x00004000
436#define CA91CX42_LINT_SW_INT 0x00002000
437#define CA91CX42_LINT_SW_IACK 0x00001000
438
439#define CA91CX42_LINT_VERR 0x00000400
440#define CA91CX42_LINT_LERR 0x00000200
441#define CA91CX42_LINT_DMA 0x00000100
442#define CA91CX42_LINT_VIRQ7 0x00000080
443#define CA91CX42_LINT_VIRQ6 0x00000040
444#define CA91CX42_LINT_VIRQ5 0x00000020
445#define CA91CX42_LINT_VIRQ4 0x00000010
446#define CA91CX42_LINT_VIRQ3 0x00000008
447#define CA91CX42_LINT_VIRQ2 0x00000004
448#define CA91CX42_LINT_VIRQ1 0x00000002
449#define CA91CX42_LINT_VOWN 0x00000001
450
451static const int CA91CX42_LINT_VIRQ[] = { 0, CA91CX42_LINT_VIRQ1,
452 CA91CX42_LINT_VIRQ2, CA91CX42_LINT_VIRQ3,
453 CA91CX42_LINT_VIRQ4, CA91CX42_LINT_VIRQ5,
454 CA91CX42_LINT_VIRQ6, CA91CX42_LINT_VIRQ7 };
455
456#define CA91CX42_LINT_MBOX 0x000F0000
457
458static const int CA91CX42_LINT_LM[] = { CA91CX42_LINT_LM0, CA91CX42_LINT_LM1,
459 CA91CX42_LINT_LM2, CA91CX42_LINT_LM3 };
460
461/*
462 * MAST_CTL Register
463 * offset 400
464 */
465#define CA91CX42_BM_MAST_CTL_MAXRTRY 0xF0000000
466#define CA91CX42_OF_MAST_CTL_MAXRTRY 28
467#define CA91CX42_BM_MAST_CTL_PWON 0x0F000000
468#define CA91CX42_OF_MAST_CTL_PWON 24
469#define CA91CX42_BM_MAST_CTL_VRL 0x00C00000
470#define CA91CX42_OF_MAST_CTL_VRL 22
471#define CA91CX42_BM_MAST_CTL_VRM 0x00200000
472#define CA91CX42_BM_MAST_CTL_VREL 0x00100000
473#define CA91CX42_BM_MAST_CTL_VOWN 0x00080000
474#define CA91CX42_BM_MAST_CTL_VOWN_ACK 0x00040000
475#define CA91CX42_BM_MAST_CTL_PABS 0x00001000
476#define CA91CX42_BM_MAST_CTL_BUS_NO 0x0000000F
477#define CA91CX42_OF_MAST_CTL_BUS_NO 0
478
479/*
480 * MISC_CTL Register
481 * offset 404
482 */
483#define CA91CX42_MISC_CTL_VBTO 0xF0000000
484#define CA91CX42_MISC_CTL_VARB 0x04000000
485#define CA91CX42_MISC_CTL_VARBTO 0x03000000
486#define CA91CX42_MISC_CTL_SW_LRST 0x00800000
487#define CA91CX42_MISC_CTL_SW_SRST 0x00400000
488#define CA91CX42_MISC_CTL_BI 0x00100000
489#define CA91CX42_MISC_CTL_ENGBI 0x00080000
490#define CA91CX42_MISC_CTL_RESCIND 0x00040000
491#define CA91CX42_MISC_CTL_SYSCON 0x00020000
492#define CA91CX42_MISC_CTL_V64AUTO 0x00010000
493#define CA91CX42_MISC_CTL_RESERVED 0x0820FFFF
494
495#define CA91CX42_OF_MISC_CTL_VARBTO 24
496#define CA91CX42_OF_MISC_CTL_VBTO 28
497
498/*
499 * MISC_STAT Register
500 * offset 408
501 */
502#define CA91CX42_BM_MISC_STAT_ENDIAN 0x80000000
503#define CA91CX42_BM_MISC_STAT_LCLSIZE 0x40000000
504#define CA91CX42_BM_MISC_STAT_DY4AUTO 0x08000000
505#define CA91CX42_BM_MISC_STAT_MYBBSY 0x00200000
506#define CA91CX42_BM_MISC_STAT_DY4DONE 0x00080000
507#define CA91CX42_BM_MISC_STAT_TXFE 0x00040000
508#define CA91CX42_BM_MISC_STAT_RXFE 0x00020000
509#define CA91CX42_BM_MISC_STAT_DY4AUTOID 0x0000FF00
510#define CA91CX42_OF_MISC_STAT_DY4AUTOID 8
511
512/*
513 * VSI Control Register
514 * offset F00
515 */
516#define CA91CX42_VSI_CTL_EN (1<<31)
517#define CA91CX42_VSI_CTL_PWEN (1<<30)
518#define CA91CX42_VSI_CTL_PREN (1<<29)
519
520#define CA91CX42_VSI_CTL_PGM_M (3<<22)
521#define CA91CX42_VSI_CTL_PGM_DATA (1<<22)
522#define CA91CX42_VSI_CTL_PGM_PGM (1<<23)
523
524#define CA91CX42_VSI_CTL_SUPER_M (3<<20)
525#define CA91CX42_VSI_CTL_SUPER_NPRIV (1<<20)
526#define CA91CX42_VSI_CTL_SUPER_SUPR (1<<21)
527
528#define CA91CX42_VSI_CTL_VAS_M (7<<16)
529#define CA91CX42_VSI_CTL_VAS_A16 0
530#define CA91CX42_VSI_CTL_VAS_A24 (1<<16)
531#define CA91CX42_VSI_CTL_VAS_A32 (1<<17)
532#define CA91CX42_VSI_CTL_VAS_USER1 (3<<17)
533#define CA91CX42_VSI_CTL_VAS_USER2 (7<<16)
534
535#define CA91CX42_VSI_CTL_LD64EN (1<<7)
536#define CA91CX42_VSI_CTL_LLRMW (1<<6)
537
538#define CA91CX42_VSI_CTL_LAS_M (3<<0)
539#define CA91CX42_VSI_CTL_LAS_PCI_MS 0
540#define CA91CX42_VSI_CTL_LAS_PCI_IO (1<<0)
541#define CA91CX42_VSI_CTL_LAS_PCI_CONF (1<<1)
542
543/* LM_CTL Register
544 * offset F64
545 */
546#define CA91CX42_LM_CTL_EN (1<<31)
547#define CA91CX42_LM_CTL_PGM (1<<23)
548#define CA91CX42_LM_CTL_DATA (1<<22)
549#define CA91CX42_LM_CTL_SUPR (1<<21)
550#define CA91CX42_LM_CTL_NPRIV (1<<20)
551#define CA91CX42_LM_CTL_AS_M (5<<16)
552#define CA91CX42_LM_CTL_AS_A16 0
553#define CA91CX42_LM_CTL_AS_A24 (1<<16)
554#define CA91CX42_LM_CTL_AS_A32 (1<<17)
555
556/*
557 * VRAI_CTL Register
558 * offset F70
559 */
560#define CA91CX42_BM_VRAI_CTL_EN 0x80000000
561#define CA91CX42_BM_VRAI_CTL_PGM 0x00C00000
562#define CA91CX42_OF_VRAI_CTL_PGM 22
563#define CA91CX42_BM_VRAI_CTL_SUPER 0x00300000
564#define CA91CX42_OF_VRAI_CTL_SUPER 20
565#define CA91CX42_BM_VRAI_CTL_VAS 0x00030000
566#define CA91CX42_OF_VRAI_CTL_VAS 16
567
568/* VCSR_CTL Register
569 * offset F80
570 */
571#define CA91CX42_VCSR_CTL_EN (1<<31)
572
573#define CA91CX42_VCSR_CTL_LAS_M (3<<0)
574#define CA91CX42_VCSR_CTL_LAS_PCI_MS 0
575#define CA91CX42_VCSR_CTL_LAS_PCI_IO (1<<0)
576#define CA91CX42_VCSR_CTL_LAS_PCI_CONF (1<<1)
577
578/* VCSR_BS Register
579 * offset FFC
580 */
581#define CA91CX42_VCSR_BS_SLOT_M (0x1F<<27)
582
583#endif /* _CA91CX42_H */
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
new file mode 100644
index 00000000000..9c539513c74
--- /dev/null
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -0,0 +1,2640 @@
1/*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/mm.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/proc_fs.h>
22#include <linux/pci.h>
23#include <linux/poll.h>
24#include <linux/dma-mapping.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/time.h>
30#include <linux/io.h>
31#include <linux/uaccess.h>
32
33#include "../vme.h"
34#include "../vme_bridge.h"
35#include "vme_tsi148.h"
36
37static int __init tsi148_init(void);
38static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
39static void tsi148_remove(struct pci_dev *);
40static void __exit tsi148_exit(void);
41
42
43/* Module parameter */
44static int err_chk;
45static int geoid;
46
47static const char driver_name[] = "vme_tsi148";
48
49static DEFINE_PCI_DEVICE_TABLE(tsi148_ids) = {
50 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
51 { },
52};
53
54static struct pci_driver tsi148_driver = {
55 .name = driver_name,
56 .id_table = tsi148_ids,
57 .probe = tsi148_probe,
58 .remove = tsi148_remove,
59};
60
61static void reg_join(unsigned int high, unsigned int low,
62 unsigned long long *variable)
63{
64 *variable = (unsigned long long)high << 32;
65 *variable |= (unsigned long long)low;
66}
67
68static void reg_split(unsigned long long variable, unsigned int *high,
69 unsigned int *low)
70{
71 *low = (unsigned int)variable & 0xFFFFFFFF;
72 *high = (unsigned int)(variable >> 32);
73}
74
75/*
76 * Wakes up DMA queue.
77 */
78static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
79 int channel_mask)
80{
81 u32 serviced = 0;
82
83 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
84 wake_up(&bridge->dma_queue[0]);
85 serviced |= TSI148_LCSR_INTC_DMA0C;
86 }
87 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
88 wake_up(&bridge->dma_queue[1]);
89 serviced |= TSI148_LCSR_INTC_DMA1C;
90 }
91
92 return serviced;
93}
94
95/*
96 * Wake up location monitor queue
97 */
98static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
99{
100 int i;
101 u32 serviced = 0;
102
103 for (i = 0; i < 4; i++) {
104 if (stat & TSI148_LCSR_INTS_LMS[i]) {
105 /* We only enable interrupts if the callback is set */
106 bridge->lm_callback[i](i);
107 serviced |= TSI148_LCSR_INTC_LMC[i];
108 }
109 }
110
111 return serviced;
112}
113
114/*
115 * Wake up mail box queue.
116 *
117 * XXX This functionality is not exposed up though API.
118 */
119static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
120{
121 int i;
122 u32 val;
123 u32 serviced = 0;
124 struct tsi148_driver *bridge;
125
126 bridge = tsi148_bridge->driver_priv;
127
128 for (i = 0; i < 4; i++) {
129 if (stat & TSI148_LCSR_INTS_MBS[i]) {
130 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
131 dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
132 ": 0x%x\n", i, val);
133 serviced |= TSI148_LCSR_INTC_MBC[i];
134 }
135 }
136
137 return serviced;
138}
139
140/*
141 * Display error & status message when PERR (PCI) exception interrupt occurs.
142 */
143static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
144{
145 struct tsi148_driver *bridge;
146
147 bridge = tsi148_bridge->driver_priv;
148
149 dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
150 "attributes: %08x\n",
151 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
152 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
153 ioread32be(bridge->base + TSI148_LCSR_EDPAT));
154
155 dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
156 "completion reg: %08x\n",
157 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
158 ioread32be(bridge->base + TSI148_LCSR_EDPXS));
159
160 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
161
162 return TSI148_LCSR_INTC_PERRC;
163}
164
165/*
166 * Save address and status when VME error interrupt occurs.
167 */
168static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
169{
170 unsigned int error_addr_high, error_addr_low;
171 unsigned long long error_addr;
172 u32 error_attrib;
173 struct vme_bus_error *error;
174 struct tsi148_driver *bridge;
175
176 bridge = tsi148_bridge->driver_priv;
177
178 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
179 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
180 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
181
182 reg_join(error_addr_high, error_addr_low, &error_addr);
183
184 /* Check for exception register overflow (we have lost error data) */
185 if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
186 dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
187 "Occurred\n");
188 }
189
190 error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
191 if (error) {
192 error->address = error_addr;
193 error->attributes = error_attrib;
194 list_add_tail(&error->list, &tsi148_bridge->vme_errors);
195 } else {
196 dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
197 "VMEbus Error reporting\n");
198 dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
199 "0x%llx, attributes: %08x\n", error_addr, error_attrib);
200 }
201
202 /* Clear Status */
203 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
204
205 return TSI148_LCSR_INTC_VERRC;
206}
207
208/*
209 * Wake up IACK queue.
210 */
211static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
212{
213 wake_up(&bridge->iack_queue);
214
215 return TSI148_LCSR_INTC_IACKC;
216}
217
218/*
219 * Calling VME bus interrupt callback if provided.
220 */
221static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
222 u32 stat)
223{
224 int vec, i, serviced = 0;
225 struct tsi148_driver *bridge;
226
227 bridge = tsi148_bridge->driver_priv;
228
229 for (i = 7; i > 0; i--) {
230 if (stat & (1 << i)) {
231 /*
232 * Note: Even though the registers are defined as
233 * 32-bits in the spec, we only want to issue 8-bit
234 * IACK cycles on the bus, read from offset 3.
235 */
236 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
237
238 vme_irq_handler(tsi148_bridge, i, vec);
239
240 serviced |= (1 << i);
241 }
242 }
243
244 return serviced;
245}
246
247/*
248 * Top level interrupt handler. Clears appropriate interrupt status bits and
249 * then calls appropriate sub handler(s).
250 */
251static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
252{
253 u32 stat, enable, serviced = 0;
254 struct vme_bridge *tsi148_bridge;
255 struct tsi148_driver *bridge;
256
257 tsi148_bridge = ptr;
258
259 bridge = tsi148_bridge->driver_priv;
260
261 /* Determine which interrupts are unmasked and set */
262 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
263 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
264
265 /* Only look at unmasked interrupts */
266 stat &= enable;
267
268 if (unlikely(!stat))
269 return IRQ_NONE;
270
271 /* Call subhandlers as appropriate */
272 /* DMA irqs */
273 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
274 serviced |= tsi148_DMA_irqhandler(bridge, stat);
275
276 /* Location monitor irqs */
277 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
278 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
279 serviced |= tsi148_LM_irqhandler(bridge, stat);
280
281 /* Mail box irqs */
282 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
283 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
284 serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
285
286 /* PCI bus error */
287 if (stat & TSI148_LCSR_INTS_PERRS)
288 serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
289
290 /* VME bus error */
291 if (stat & TSI148_LCSR_INTS_VERRS)
292 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
293
294 /* IACK irq */
295 if (stat & TSI148_LCSR_INTS_IACKS)
296 serviced |= tsi148_IACK_irqhandler(bridge);
297
298 /* VME bus irqs */
299 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
300 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
301 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
302 TSI148_LCSR_INTS_IRQ1S))
303 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
304
305 /* Clear serviced interrupts */
306 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
307
308 return IRQ_HANDLED;
309}
310
311static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
312{
313 int result;
314 unsigned int tmp;
315 struct pci_dev *pdev;
316 struct tsi148_driver *bridge;
317
318 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
319
320 bridge = tsi148_bridge->driver_priv;
321
322 /* Initialise list for VME bus errors */
323 INIT_LIST_HEAD(&tsi148_bridge->vme_errors);
324
325 mutex_init(&tsi148_bridge->irq_mtx);
326
327 result = request_irq(pdev->irq,
328 tsi148_irqhandler,
329 IRQF_SHARED,
330 driver_name, tsi148_bridge);
331 if (result) {
332 dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
333 "vector %02X\n", pdev->irq);
334 return result;
335 }
336
337 /* Enable and unmask interrupts */
338 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
339 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
340 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
341 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
342 TSI148_LCSR_INTEO_IACKEO;
343
344 /* This leaves the following interrupts masked.
345 * TSI148_LCSR_INTEO_VIEEO
346 * TSI148_LCSR_INTEO_SYSFLEO
347 * TSI148_LCSR_INTEO_ACFLEO
348 */
349
350 /* Don't enable Location Monitor interrupts here - they will be
351 * enabled when the location monitors are properly configured and
352 * a callback has been attached.
353 * TSI148_LCSR_INTEO_LM0EO
354 * TSI148_LCSR_INTEO_LM1EO
355 * TSI148_LCSR_INTEO_LM2EO
356 * TSI148_LCSR_INTEO_LM3EO
357 */
358
359 /* Don't enable VME interrupts until we add a handler, else the board
360 * will respond to it and we don't want that unless it knows how to
361 * properly deal with it.
362 * TSI148_LCSR_INTEO_IRQ7EO
363 * TSI148_LCSR_INTEO_IRQ6EO
364 * TSI148_LCSR_INTEO_IRQ5EO
365 * TSI148_LCSR_INTEO_IRQ4EO
366 * TSI148_LCSR_INTEO_IRQ3EO
367 * TSI148_LCSR_INTEO_IRQ2EO
368 * TSI148_LCSR_INTEO_IRQ1EO
369 */
370
371 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
372 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
373
374 return 0;
375}
376
377static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
378 struct pci_dev *pdev)
379{
380 struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
381
382 /* Turn off interrupts */
383 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
384 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
385
386 /* Clear all interrupts */
387 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
388
389 /* Detach interrupt handler */
390 free_irq(pdev->irq, tsi148_bridge);
391}
392
393/*
394 * Check to see if an IACk has been received, return true (1) or false (0).
395 */
396static int tsi148_iack_received(struct tsi148_driver *bridge)
397{
398 u32 tmp;
399
400 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
401
402 if (tmp & TSI148_LCSR_VICR_IRQS)
403 return 0;
404 else
405 return 1;
406}
407
408/*
409 * Configure VME interrupt
410 */
411static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
412 int state, int sync)
413{
414 struct pci_dev *pdev;
415 u32 tmp;
416 struct tsi148_driver *bridge;
417
418 bridge = tsi148_bridge->driver_priv;
419
420 /* We need to do the ordering differently for enabling and disabling */
421 if (state == 0) {
422 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
423 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
424 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
425
426 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
427 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
428 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
429
430 if (sync != 0) {
431 pdev = container_of(tsi148_bridge->parent,
432 struct pci_dev, dev);
433
434 synchronize_irq(pdev->irq);
435 }
436 } else {
437 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
438 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
439 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
440
441 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
442 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
443 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
444 }
445}
446
447/*
448 * Generate a VME bus interrupt at the requested level & vector. Wait for
449 * interrupt to be acked.
450 */
451static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
452 int statid)
453{
454 u32 tmp;
455 struct tsi148_driver *bridge;
456
457 bridge = tsi148_bridge->driver_priv;
458
459 mutex_lock(&bridge->vme_int);
460
461 /* Read VICR register */
462 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
463
464 /* Set Status/ID */
465 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
466 (statid & TSI148_LCSR_VICR_STID_M);
467 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
468
469 /* Assert VMEbus IRQ */
470 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
471 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
472
473 /* XXX Consider implementing a timeout? */
474 wait_event_interruptible(bridge->iack_queue,
475 tsi148_iack_received(bridge));
476
477 mutex_unlock(&bridge->vme_int);
478
479 return 0;
480}
481
482/*
483 * Find the first error in this address range
484 */
485static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
486 vme_address_t aspace, unsigned long long address, size_t count)
487{
488 struct list_head *err_pos;
489 struct vme_bus_error *vme_err, *valid = NULL;
490 unsigned long long bound;
491
492 bound = address + count;
493
494 /*
495 * XXX We are currently not looking at the address space when parsing
496 * for errors. This is because parsing the Address Modifier Codes
497 * is going to be quite resource intensive to do properly. We
498 * should be OK just looking at the addresses and this is certainly
499 * much better than what we had before.
500 */
501 err_pos = NULL;
502 /* Iterate through errors */
503 list_for_each(err_pos, &tsi148_bridge->vme_errors) {
504 vme_err = list_entry(err_pos, struct vme_bus_error, list);
505 if ((vme_err->address >= address) &&
506 (vme_err->address < bound)) {
507
508 valid = vme_err;
509 break;
510 }
511 }
512
513 return valid;
514}
515
516/*
517 * Clear errors in the provided address range.
518 */
519static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
520 vme_address_t aspace, unsigned long long address, size_t count)
521{
522 struct list_head *err_pos, *temp;
523 struct vme_bus_error *vme_err;
524 unsigned long long bound;
525
526 bound = address + count;
527
528 /*
529 * XXX We are currently not looking at the address space when parsing
530 * for errors. This is because parsing the Address Modifier Codes
531 * is going to be quite resource intensive to do properly. We
532 * should be OK just looking at the addresses and this is certainly
533 * much better than what we had before.
534 */
535 err_pos = NULL;
536 /* Iterate through errors */
537 list_for_each_safe(err_pos, temp, &tsi148_bridge->vme_errors) {
538 vme_err = list_entry(err_pos, struct vme_bus_error, list);
539
540 if ((vme_err->address >= address) &&
541 (vme_err->address < bound)) {
542
543 list_del(err_pos);
544 kfree(vme_err);
545 }
546 }
547}
548
549/*
550 * Initialize a slave window with the requested attributes.
551 */
552static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
553 unsigned long long vme_base, unsigned long long size,
554 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
555{
556 unsigned int i, addr = 0, granularity = 0;
557 unsigned int temp_ctl = 0;
558 unsigned int vme_base_low, vme_base_high;
559 unsigned int vme_bound_low, vme_bound_high;
560 unsigned int pci_offset_low, pci_offset_high;
561 unsigned long long vme_bound, pci_offset;
562 struct vme_bridge *tsi148_bridge;
563 struct tsi148_driver *bridge;
564
565 tsi148_bridge = image->parent;
566 bridge = tsi148_bridge->driver_priv;
567
568 i = image->number;
569
570 switch (aspace) {
571 case VME_A16:
572 granularity = 0x10;
573 addr |= TSI148_LCSR_ITAT_AS_A16;
574 break;
575 case VME_A24:
576 granularity = 0x1000;
577 addr |= TSI148_LCSR_ITAT_AS_A24;
578 break;
579 case VME_A32:
580 granularity = 0x10000;
581 addr |= TSI148_LCSR_ITAT_AS_A32;
582 break;
583 case VME_A64:
584 granularity = 0x10000;
585 addr |= TSI148_LCSR_ITAT_AS_A64;
586 break;
587 case VME_CRCSR:
588 case VME_USER1:
589 case VME_USER2:
590 case VME_USER3:
591 case VME_USER4:
592 default:
593 dev_err(tsi148_bridge->parent, "Invalid address space\n");
594 return -EINVAL;
595 break;
596 }
597
598 /* Convert 64-bit variables to 2x 32-bit variables */
599 reg_split(vme_base, &vme_base_high, &vme_base_low);
600
601 /*
602 * Bound address is a valid address for the window, adjust
603 * accordingly
604 */
605 vme_bound = vme_base + size - granularity;
606 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
607 pci_offset = (unsigned long long)pci_base - vme_base;
608 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
609
610 if (vme_base_low & (granularity - 1)) {
611 dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
612 return -EINVAL;
613 }
614 if (vme_bound_low & (granularity - 1)) {
615 dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
616 return -EINVAL;
617 }
618 if (pci_offset_low & (granularity - 1)) {
619 dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
620 "alignment\n");
621 return -EINVAL;
622 }
623
624 /* Disable while we are mucking around */
625 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
626 TSI148_LCSR_OFFSET_ITAT);
627 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
628 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
629 TSI148_LCSR_OFFSET_ITAT);
630
631 /* Setup mapping */
632 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
633 TSI148_LCSR_OFFSET_ITSAU);
634 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
635 TSI148_LCSR_OFFSET_ITSAL);
636 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
637 TSI148_LCSR_OFFSET_ITEAU);
638 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
639 TSI148_LCSR_OFFSET_ITEAL);
640 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
641 TSI148_LCSR_OFFSET_ITOFU);
642 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
643 TSI148_LCSR_OFFSET_ITOFL);
644
645 /* Setup 2eSST speeds */
646 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
647 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
648 case VME_2eSST160:
649 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
650 break;
651 case VME_2eSST267:
652 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
653 break;
654 case VME_2eSST320:
655 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
656 break;
657 }
658
659 /* Setup cycle types */
660 temp_ctl &= ~(0x1F << 7);
661 if (cycle & VME_BLT)
662 temp_ctl |= TSI148_LCSR_ITAT_BLT;
663 if (cycle & VME_MBLT)
664 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
665 if (cycle & VME_2eVME)
666 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
667 if (cycle & VME_2eSST)
668 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
669 if (cycle & VME_2eSSTB)
670 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
671
672 /* Setup address space */
673 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
674 temp_ctl |= addr;
675
676 temp_ctl &= ~0xF;
677 if (cycle & VME_SUPER)
678 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
679 if (cycle & VME_USER)
680 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
681 if (cycle & VME_PROG)
682 temp_ctl |= TSI148_LCSR_ITAT_PGM;
683 if (cycle & VME_DATA)
684 temp_ctl |= TSI148_LCSR_ITAT_DATA;
685
686 /* Write ctl reg without enable */
687 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
688 TSI148_LCSR_OFFSET_ITAT);
689
690 if (enabled)
691 temp_ctl |= TSI148_LCSR_ITAT_EN;
692
693 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
694 TSI148_LCSR_OFFSET_ITAT);
695
696 return 0;
697}
698
699/*
700 * Get slave window configuration.
701 */
702static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
703 unsigned long long *vme_base, unsigned long long *size,
704 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
705{
706 unsigned int i, granularity = 0, ctl = 0;
707 unsigned int vme_base_low, vme_base_high;
708 unsigned int vme_bound_low, vme_bound_high;
709 unsigned int pci_offset_low, pci_offset_high;
710 unsigned long long vme_bound, pci_offset;
711 struct tsi148_driver *bridge;
712
713 bridge = image->parent->driver_priv;
714
715 i = image->number;
716
717 /* Read registers */
718 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
719 TSI148_LCSR_OFFSET_ITAT);
720
721 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
722 TSI148_LCSR_OFFSET_ITSAU);
723 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
724 TSI148_LCSR_OFFSET_ITSAL);
725 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
726 TSI148_LCSR_OFFSET_ITEAU);
727 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
728 TSI148_LCSR_OFFSET_ITEAL);
729 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
730 TSI148_LCSR_OFFSET_ITOFU);
731 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
732 TSI148_LCSR_OFFSET_ITOFL);
733
734 /* Convert 64-bit variables to 2x 32-bit variables */
735 reg_join(vme_base_high, vme_base_low, vme_base);
736 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
737 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
738
739 *pci_base = (dma_addr_t)vme_base + pci_offset;
740
741 *enabled = 0;
742 *aspace = 0;
743 *cycle = 0;
744
745 if (ctl & TSI148_LCSR_ITAT_EN)
746 *enabled = 1;
747
748 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
749 granularity = 0x10;
750 *aspace |= VME_A16;
751 }
752 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
753 granularity = 0x1000;
754 *aspace |= VME_A24;
755 }
756 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
757 granularity = 0x10000;
758 *aspace |= VME_A32;
759 }
760 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
761 granularity = 0x10000;
762 *aspace |= VME_A64;
763 }
764
765 /* Need granularity before we set the size */
766 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
767
768
769 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
770 *cycle |= VME_2eSST160;
771 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
772 *cycle |= VME_2eSST267;
773 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
774 *cycle |= VME_2eSST320;
775
776 if (ctl & TSI148_LCSR_ITAT_BLT)
777 *cycle |= VME_BLT;
778 if (ctl & TSI148_LCSR_ITAT_MBLT)
779 *cycle |= VME_MBLT;
780 if (ctl & TSI148_LCSR_ITAT_2eVME)
781 *cycle |= VME_2eVME;
782 if (ctl & TSI148_LCSR_ITAT_2eSST)
783 *cycle |= VME_2eSST;
784 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
785 *cycle |= VME_2eSSTB;
786
787 if (ctl & TSI148_LCSR_ITAT_SUPR)
788 *cycle |= VME_SUPER;
789 if (ctl & TSI148_LCSR_ITAT_NPRIV)
790 *cycle |= VME_USER;
791 if (ctl & TSI148_LCSR_ITAT_PGM)
792 *cycle |= VME_PROG;
793 if (ctl & TSI148_LCSR_ITAT_DATA)
794 *cycle |= VME_DATA;
795
796 return 0;
797}
798
799/*
800 * Allocate and map PCI Resource
801 */
802static int tsi148_alloc_resource(struct vme_master_resource *image,
803 unsigned long long size)
804{
805 unsigned long long existing_size;
806 int retval = 0;
807 struct pci_dev *pdev;
808 struct vme_bridge *tsi148_bridge;
809
810 tsi148_bridge = image->parent;
811
812 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
813
814 existing_size = (unsigned long long)(image->bus_resource.end -
815 image->bus_resource.start);
816
817 /* If the existing size is OK, return */
818 if ((size != 0) && (existing_size == (size - 1)))
819 return 0;
820
821 if (existing_size != 0) {
822 iounmap(image->kern_base);
823 image->kern_base = NULL;
824 kfree(image->bus_resource.name);
825 release_resource(&image->bus_resource);
826 memset(&image->bus_resource, 0, sizeof(struct resource));
827 }
828
829 /* Exit here if size is zero */
830 if (size == 0)
831 return 0;
832
833 if (image->bus_resource.name == NULL) {
834 image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
835 if (image->bus_resource.name == NULL) {
836 dev_err(tsi148_bridge->parent, "Unable to allocate "
837 "memory for resource name\n");
838 retval = -ENOMEM;
839 goto err_name;
840 }
841 }
842
843 sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
844 image->number);
845
846 image->bus_resource.start = 0;
847 image->bus_resource.end = (unsigned long)size;
848 image->bus_resource.flags = IORESOURCE_MEM;
849
850 retval = pci_bus_alloc_resource(pdev->bus,
851 &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
852 0, NULL, NULL);
853 if (retval) {
854 dev_err(tsi148_bridge->parent, "Failed to allocate mem "
855 "resource for window %d size 0x%lx start 0x%lx\n",
856 image->number, (unsigned long)size,
857 (unsigned long)image->bus_resource.start);
858 goto err_resource;
859 }
860
861 image->kern_base = ioremap_nocache(
862 image->bus_resource.start, size);
863 if (image->kern_base == NULL) {
864 dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
865 retval = -ENOMEM;
866 goto err_remap;
867 }
868
869 return 0;
870
871err_remap:
872 release_resource(&image->bus_resource);
873err_resource:
874 kfree(image->bus_resource.name);
875 memset(&image->bus_resource, 0, sizeof(struct resource));
876err_name:
877 return retval;
878}
879
880/*
881 * Free and unmap PCI Resource
882 */
883static void tsi148_free_resource(struct vme_master_resource *image)
884{
885 iounmap(image->kern_base);
886 image->kern_base = NULL;
887 release_resource(&image->bus_resource);
888 kfree(image->bus_resource.name);
889 memset(&image->bus_resource, 0, sizeof(struct resource));
890}
891
892/*
893 * Set the attributes of an outbound window.
894 */
895static int tsi148_master_set(struct vme_master_resource *image, int enabled,
896 unsigned long long vme_base, unsigned long long size,
897 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
898{
899 int retval = 0;
900 unsigned int i;
901 unsigned int temp_ctl = 0;
902 unsigned int pci_base_low, pci_base_high;
903 unsigned int pci_bound_low, pci_bound_high;
904 unsigned int vme_offset_low, vme_offset_high;
905 unsigned long long pci_bound, vme_offset, pci_base;
906 struct vme_bridge *tsi148_bridge;
907 struct tsi148_driver *bridge;
908
909 tsi148_bridge = image->parent;
910
911 bridge = tsi148_bridge->driver_priv;
912
913 /* Verify input data */
914 if (vme_base & 0xFFFF) {
915 dev_err(tsi148_bridge->parent, "Invalid VME Window "
916 "alignment\n");
917 retval = -EINVAL;
918 goto err_window;
919 }
920
921 if ((size == 0) && (enabled != 0)) {
922 dev_err(tsi148_bridge->parent, "Size must be non-zero for "
923 "enabled windows\n");
924 retval = -EINVAL;
925 goto err_window;
926 }
927
928 spin_lock(&image->lock);
929
930 /* Let's allocate the resource here rather than further up the stack as
931 * it avoids pushing loads of bus dependent stuff up the stack. If size
932 * is zero, any existing resource will be freed.
933 */
934 retval = tsi148_alloc_resource(image, size);
935 if (retval) {
936 spin_unlock(&image->lock);
937 dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
938 "resource\n");
939 goto err_res;
940 }
941
942 if (size == 0) {
943 pci_base = 0;
944 pci_bound = 0;
945 vme_offset = 0;
946 } else {
947 pci_base = (unsigned long long)image->bus_resource.start;
948
949 /*
950 * Bound address is a valid address for the window, adjust
951 * according to window granularity.
952 */
953 pci_bound = pci_base + (size - 0x10000);
954 vme_offset = vme_base - pci_base;
955 }
956
957 /* Convert 64-bit variables to 2x 32-bit variables */
958 reg_split(pci_base, &pci_base_high, &pci_base_low);
959 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
960 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
961
962 if (pci_base_low & 0xFFFF) {
963 spin_unlock(&image->lock);
964 dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
965 retval = -EINVAL;
966 goto err_gran;
967 }
968 if (pci_bound_low & 0xFFFF) {
969 spin_unlock(&image->lock);
970 dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
971 retval = -EINVAL;
972 goto err_gran;
973 }
974 if (vme_offset_low & 0xFFFF) {
975 spin_unlock(&image->lock);
976 dev_err(tsi148_bridge->parent, "Invalid VME Offset "
977 "alignment\n");
978 retval = -EINVAL;
979 goto err_gran;
980 }
981
982 i = image->number;
983
984 /* Disable while we are mucking around */
985 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
986 TSI148_LCSR_OFFSET_OTAT);
987 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
988 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
989 TSI148_LCSR_OFFSET_OTAT);
990
991 /* Setup 2eSST speeds */
992 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
993 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
994 case VME_2eSST160:
995 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
996 break;
997 case VME_2eSST267:
998 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
999 break;
1000 case VME_2eSST320:
1001 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1002 break;
1003 }
1004
1005 /* Setup cycle types */
1006 if (cycle & VME_BLT) {
1007 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1008 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1009 }
1010 if (cycle & VME_MBLT) {
1011 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1012 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1013 }
1014 if (cycle & VME_2eVME) {
1015 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1016 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1017 }
1018 if (cycle & VME_2eSST) {
1019 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1020 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1021 }
1022 if (cycle & VME_2eSSTB) {
1023 dev_warn(tsi148_bridge->parent, "Currently not setting "
1024 "Broadcast Select Registers\n");
1025 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1026 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1027 }
1028
1029 /* Setup data width */
1030 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1031 switch (dwidth) {
1032 case VME_D16:
1033 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1034 break;
1035 case VME_D32:
1036 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1037 break;
1038 default:
1039 spin_unlock(&image->lock);
1040 dev_err(tsi148_bridge->parent, "Invalid data width\n");
1041 retval = -EINVAL;
1042 goto err_dwidth;
1043 }
1044
1045 /* Setup address space */
1046 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1047 switch (aspace) {
1048 case VME_A16:
1049 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1050 break;
1051 case VME_A24:
1052 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1053 break;
1054 case VME_A32:
1055 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1056 break;
1057 case VME_A64:
1058 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1059 break;
1060 case VME_CRCSR:
1061 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1062 break;
1063 case VME_USER1:
1064 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1065 break;
1066 case VME_USER2:
1067 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1068 break;
1069 case VME_USER3:
1070 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1071 break;
1072 case VME_USER4:
1073 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1074 break;
1075 default:
1076 spin_unlock(&image->lock);
1077 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1078 retval = -EINVAL;
1079 goto err_aspace;
1080 break;
1081 }
1082
1083 temp_ctl &= ~(3<<4);
1084 if (cycle & VME_SUPER)
1085 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1086 if (cycle & VME_PROG)
1087 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1088
1089 /* Setup mapping */
1090 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1091 TSI148_LCSR_OFFSET_OTSAU);
1092 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1093 TSI148_LCSR_OFFSET_OTSAL);
1094 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1095 TSI148_LCSR_OFFSET_OTEAU);
1096 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1097 TSI148_LCSR_OFFSET_OTEAL);
1098 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1099 TSI148_LCSR_OFFSET_OTOFU);
1100 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1101 TSI148_LCSR_OFFSET_OTOFL);
1102
1103 /* Write ctl reg without enable */
1104 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1105 TSI148_LCSR_OFFSET_OTAT);
1106
1107 if (enabled)
1108 temp_ctl |= TSI148_LCSR_OTAT_EN;
1109
1110 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1111 TSI148_LCSR_OFFSET_OTAT);
1112
1113 spin_unlock(&image->lock);
1114 return 0;
1115
1116err_aspace:
1117err_dwidth:
1118err_gran:
1119 tsi148_free_resource(image);
1120err_res:
1121err_window:
1122 return retval;
1123
1124}
1125
1126/*
1127 * Set the attributes of an outbound window.
1128 *
1129 * XXX Not parsing prefetch information.
1130 */
1131static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1132 unsigned long long *vme_base, unsigned long long *size,
1133 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1134{
1135 unsigned int i, ctl;
1136 unsigned int pci_base_low, pci_base_high;
1137 unsigned int pci_bound_low, pci_bound_high;
1138 unsigned int vme_offset_low, vme_offset_high;
1139
1140 unsigned long long pci_base, pci_bound, vme_offset;
1141 struct tsi148_driver *bridge;
1142
1143 bridge = image->parent->driver_priv;
1144
1145 i = image->number;
1146
1147 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1148 TSI148_LCSR_OFFSET_OTAT);
1149
1150 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1151 TSI148_LCSR_OFFSET_OTSAU);
1152 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1153 TSI148_LCSR_OFFSET_OTSAL);
1154 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1155 TSI148_LCSR_OFFSET_OTEAU);
1156 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1157 TSI148_LCSR_OFFSET_OTEAL);
1158 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1159 TSI148_LCSR_OFFSET_OTOFU);
1160 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1161 TSI148_LCSR_OFFSET_OTOFL);
1162
1163 /* Convert 64-bit variables to 2x 32-bit variables */
1164 reg_join(pci_base_high, pci_base_low, &pci_base);
1165 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1166 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1167
1168 *vme_base = pci_base + vme_offset;
1169 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1170
1171 *enabled = 0;
1172 *aspace = 0;
1173 *cycle = 0;
1174 *dwidth = 0;
1175
1176 if (ctl & TSI148_LCSR_OTAT_EN)
1177 *enabled = 1;
1178
1179 /* Setup address space */
1180 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1181 *aspace |= VME_A16;
1182 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1183 *aspace |= VME_A24;
1184 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1185 *aspace |= VME_A32;
1186 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1187 *aspace |= VME_A64;
1188 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1189 *aspace |= VME_CRCSR;
1190 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1191 *aspace |= VME_USER1;
1192 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1193 *aspace |= VME_USER2;
1194 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1195 *aspace |= VME_USER3;
1196 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1197 *aspace |= VME_USER4;
1198
1199 /* Setup 2eSST speeds */
1200 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1201 *cycle |= VME_2eSST160;
1202 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1203 *cycle |= VME_2eSST267;
1204 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1205 *cycle |= VME_2eSST320;
1206
1207 /* Setup cycle types */
1208 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1209 *cycle |= VME_SCT;
1210 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1211 *cycle |= VME_BLT;
1212 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1213 *cycle |= VME_MBLT;
1214 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1215 *cycle |= VME_2eVME;
1216 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1217 *cycle |= VME_2eSST;
1218 if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1219 *cycle |= VME_2eSSTB;
1220
1221 if (ctl & TSI148_LCSR_OTAT_SUP)
1222 *cycle |= VME_SUPER;
1223 else
1224 *cycle |= VME_USER;
1225
1226 if (ctl & TSI148_LCSR_OTAT_PGM)
1227 *cycle |= VME_PROG;
1228 else
1229 *cycle |= VME_DATA;
1230
1231 /* Setup data width */
1232 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1233 *dwidth = VME_D16;
1234 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1235 *dwidth = VME_D32;
1236
1237 return 0;
1238}
1239
1240
1241static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1242 unsigned long long *vme_base, unsigned long long *size,
1243 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1244{
1245 int retval;
1246
1247 spin_lock(&image->lock);
1248
1249 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1250 cycle, dwidth);
1251
1252 spin_unlock(&image->lock);
1253
1254 return retval;
1255}
1256
1257static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1258 size_t count, loff_t offset)
1259{
1260 int retval, enabled;
1261 unsigned long long vme_base, size;
1262 vme_address_t aspace;
1263 vme_cycle_t cycle;
1264 vme_width_t dwidth;
1265 struct vme_bus_error *vme_err = NULL;
1266 struct vme_bridge *tsi148_bridge;
1267
1268 tsi148_bridge = image->parent;
1269
1270 spin_lock(&image->lock);
1271
1272 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1273 retval = count;
1274
1275 if (!err_chk)
1276 goto skip_chk;
1277
1278 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1279 &dwidth);
1280
1281 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1282 count);
1283 if (vme_err != NULL) {
1284 dev_err(image->parent->parent, "First VME read error detected "
1285 "an at address 0x%llx\n", vme_err->address);
1286 retval = vme_err->address - (vme_base + offset);
1287 /* Clear down save errors in this address range */
1288 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1289 count);
1290 }
1291
1292skip_chk:
1293 spin_unlock(&image->lock);
1294
1295 return retval;
1296}
1297
1298
1299static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1300 size_t count, loff_t offset)
1301{
1302 int retval = 0, enabled;
1303 unsigned long long vme_base, size;
1304 vme_address_t aspace;
1305 vme_cycle_t cycle;
1306 vme_width_t dwidth;
1307
1308 struct vme_bus_error *vme_err = NULL;
1309 struct vme_bridge *tsi148_bridge;
1310 struct tsi148_driver *bridge;
1311
1312 tsi148_bridge = image->parent;
1313
1314 bridge = tsi148_bridge->driver_priv;
1315
1316 spin_lock(&image->lock);
1317
1318 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1319 retval = count;
1320
1321 /*
1322 * Writes are posted. We need to do a read on the VME bus to flush out
1323 * all of the writes before we check for errors. We can't guarantee
1324 * that reading the data we have just written is safe. It is believed
1325 * that there isn't any read, write re-ordering, so we can read any
1326 * location in VME space, so lets read the Device ID from the tsi148's
1327 * own registers as mapped into CR/CSR space.
1328 *
1329 * We check for saved errors in the written address range/space.
1330 */
1331
1332 if (!err_chk)
1333 goto skip_chk;
1334
1335 /*
1336 * Get window info first, to maximise the time that the buffers may
1337 * fluch on their own
1338 */
1339 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1340 &dwidth);
1341
1342 ioread16(bridge->flush_image->kern_base + 0x7F000);
1343
1344 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1345 count);
1346 if (vme_err != NULL) {
1347 dev_warn(tsi148_bridge->parent, "First VME write error detected"
1348 " an at address 0x%llx\n", vme_err->address);
1349 retval = vme_err->address - (vme_base + offset);
1350 /* Clear down save errors in this address range */
1351 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1352 count);
1353 }
1354
1355skip_chk:
1356 spin_unlock(&image->lock);
1357
1358 return retval;
1359}
1360
1361/*
1362 * Perform an RMW cycle on the VME bus.
1363 *
1364 * Requires a previously configured master window, returns final value.
1365 */
1366static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1367 unsigned int mask, unsigned int compare, unsigned int swap,
1368 loff_t offset)
1369{
1370 unsigned long long pci_addr;
1371 unsigned int pci_addr_high, pci_addr_low;
1372 u32 tmp, result;
1373 int i;
1374 struct tsi148_driver *bridge;
1375
1376 bridge = image->parent->driver_priv;
1377
1378 /* Find the PCI address that maps to the desired VME address */
1379 i = image->number;
1380
1381 /* Locking as we can only do one of these at a time */
1382 mutex_lock(&bridge->vme_rmw);
1383
1384 /* Lock image */
1385 spin_lock(&image->lock);
1386
1387 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1388 TSI148_LCSR_OFFSET_OTSAU);
1389 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1390 TSI148_LCSR_OFFSET_OTSAL);
1391
1392 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1393 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1394
1395 /* Configure registers */
1396 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1397 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1398 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1399 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1400 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1401
1402 /* Enable RMW */
1403 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1404 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1405 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1406
1407 /* Kick process off with a read to the required address. */
1408 result = ioread32be(image->kern_base + offset);
1409
1410 /* Disable RMW */
1411 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1412 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1413 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1414
1415 spin_unlock(&image->lock);
1416
1417 mutex_unlock(&bridge->vme_rmw);
1418
1419 return result;
1420}
1421
1422static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
1423 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
1424{
1425 /* Setup 2eSST speeds */
1426 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1427 case VME_2eSST160:
1428 *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
1429 break;
1430 case VME_2eSST267:
1431 *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
1432 break;
1433 case VME_2eSST320:
1434 *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
1435 break;
1436 }
1437
1438 /* Setup cycle types */
1439 if (cycle & VME_SCT)
1440 *attr |= TSI148_LCSR_DSAT_TM_SCT;
1441
1442 if (cycle & VME_BLT)
1443 *attr |= TSI148_LCSR_DSAT_TM_BLT;
1444
1445 if (cycle & VME_MBLT)
1446 *attr |= TSI148_LCSR_DSAT_TM_MBLT;
1447
1448 if (cycle & VME_2eVME)
1449 *attr |= TSI148_LCSR_DSAT_TM_2eVME;
1450
1451 if (cycle & VME_2eSST)
1452 *attr |= TSI148_LCSR_DSAT_TM_2eSST;
1453
1454 if (cycle & VME_2eSSTB) {
1455 dev_err(dev, "Currently not setting Broadcast Select "
1456 "Registers\n");
1457 *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
1458 }
1459
1460 /* Setup data width */
1461 switch (dwidth) {
1462 case VME_D16:
1463 *attr |= TSI148_LCSR_DSAT_DBW_16;
1464 break;
1465 case VME_D32:
1466 *attr |= TSI148_LCSR_DSAT_DBW_32;
1467 break;
1468 default:
1469 dev_err(dev, "Invalid data width\n");
1470 return -EINVAL;
1471 }
1472
1473 /* Setup address space */
1474 switch (aspace) {
1475 case VME_A16:
1476 *attr |= TSI148_LCSR_DSAT_AMODE_A16;
1477 break;
1478 case VME_A24:
1479 *attr |= TSI148_LCSR_DSAT_AMODE_A24;
1480 break;
1481 case VME_A32:
1482 *attr |= TSI148_LCSR_DSAT_AMODE_A32;
1483 break;
1484 case VME_A64:
1485 *attr |= TSI148_LCSR_DSAT_AMODE_A64;
1486 break;
1487 case VME_CRCSR:
1488 *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1489 break;
1490 case VME_USER1:
1491 *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
1492 break;
1493 case VME_USER2:
1494 *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
1495 break;
1496 case VME_USER3:
1497 *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
1498 break;
1499 case VME_USER4:
1500 *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
1501 break;
1502 default:
1503 dev_err(dev, "Invalid address space\n");
1504 return -EINVAL;
1505 break;
1506 }
1507
1508 if (cycle & VME_SUPER)
1509 *attr |= TSI148_LCSR_DSAT_SUP;
1510 if (cycle & VME_PROG)
1511 *attr |= TSI148_LCSR_DSAT_PGM;
1512
1513 return 0;
1514}
1515
1516static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
1517 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
1518{
1519 /* Setup 2eSST speeds */
1520 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1521 case VME_2eSST160:
1522 *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
1523 break;
1524 case VME_2eSST267:
1525 *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
1526 break;
1527 case VME_2eSST320:
1528 *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
1529 break;
1530 }
1531
1532 /* Setup cycle types */
1533 if (cycle & VME_SCT)
1534 *attr |= TSI148_LCSR_DDAT_TM_SCT;
1535
1536 if (cycle & VME_BLT)
1537 *attr |= TSI148_LCSR_DDAT_TM_BLT;
1538
1539 if (cycle & VME_MBLT)
1540 *attr |= TSI148_LCSR_DDAT_TM_MBLT;
1541
1542 if (cycle & VME_2eVME)
1543 *attr |= TSI148_LCSR_DDAT_TM_2eVME;
1544
1545 if (cycle & VME_2eSST)
1546 *attr |= TSI148_LCSR_DDAT_TM_2eSST;
1547
1548 if (cycle & VME_2eSSTB) {
1549 dev_err(dev, "Currently not setting Broadcast Select "
1550 "Registers\n");
1551 *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
1552 }
1553
1554 /* Setup data width */
1555 switch (dwidth) {
1556 case VME_D16:
1557 *attr |= TSI148_LCSR_DDAT_DBW_16;
1558 break;
1559 case VME_D32:
1560 *attr |= TSI148_LCSR_DDAT_DBW_32;
1561 break;
1562 default:
1563 dev_err(dev, "Invalid data width\n");
1564 return -EINVAL;
1565 }
1566
1567 /* Setup address space */
1568 switch (aspace) {
1569 case VME_A16:
1570 *attr |= TSI148_LCSR_DDAT_AMODE_A16;
1571 break;
1572 case VME_A24:
1573 *attr |= TSI148_LCSR_DDAT_AMODE_A24;
1574 break;
1575 case VME_A32:
1576 *attr |= TSI148_LCSR_DDAT_AMODE_A32;
1577 break;
1578 case VME_A64:
1579 *attr |= TSI148_LCSR_DDAT_AMODE_A64;
1580 break;
1581 case VME_CRCSR:
1582 *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1583 break;
1584 case VME_USER1:
1585 *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
1586 break;
1587 case VME_USER2:
1588 *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
1589 break;
1590 case VME_USER3:
1591 *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
1592 break;
1593 case VME_USER4:
1594 *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
1595 break;
1596 default:
1597 dev_err(dev, "Invalid address space\n");
1598 return -EINVAL;
1599 break;
1600 }
1601
1602 if (cycle & VME_SUPER)
1603 *attr |= TSI148_LCSR_DDAT_SUP;
1604 if (cycle & VME_PROG)
1605 *attr |= TSI148_LCSR_DDAT_PGM;
1606
1607 return 0;
1608}
1609
1610/*
1611 * Add a link list descriptor to the list
1612 */
1613static int tsi148_dma_list_add(struct vme_dma_list *list,
1614 struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1615{
1616 struct tsi148_dma_entry *entry, *prev;
1617 u32 address_high, address_low;
1618 struct vme_dma_pattern *pattern_attr;
1619 struct vme_dma_pci *pci_attr;
1620 struct vme_dma_vme *vme_attr;
1621 dma_addr_t desc_ptr;
1622 int retval = 0;
1623 struct vme_bridge *tsi148_bridge;
1624
1625 tsi148_bridge = list->parent->parent;
1626
1627 /* Descriptor must be aligned on 64-bit boundaries */
1628 entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1629 if (entry == NULL) {
1630 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
1631 "dma resource structure\n");
1632 retval = -ENOMEM;
1633 goto err_mem;
1634 }
1635
1636 /* Test descriptor alignment */
1637 if ((unsigned long)&entry->descriptor & 0x7) {
1638 dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1639 "byte boundary as required: %p\n",
1640 &entry->descriptor);
1641 retval = -EINVAL;
1642 goto err_align;
1643 }
1644
1645 /* Given we are going to fill out the structure, we probably don't
1646 * need to zero it, but better safe than sorry for now.
1647 */
1648 memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
1649
1650 /* Fill out source part */
1651 switch (src->type) {
1652 case VME_DMA_PATTERN:
1653 pattern_attr = src->private;
1654
1655 entry->descriptor.dsal = pattern_attr->pattern;
1656 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
1657 /* Default behaviour is 32 bit pattern */
1658 if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1659 entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
1660
1661 /* It seems that the default behaviour is to increment */
1662 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1663 entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
1664
1665 break;
1666 case VME_DMA_PCI:
1667 pci_attr = src->private;
1668
1669 reg_split((unsigned long long)pci_attr->address, &address_high,
1670 &address_low);
1671 entry->descriptor.dsau = address_high;
1672 entry->descriptor.dsal = address_low;
1673 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
1674 break;
1675 case VME_DMA_VME:
1676 vme_attr = src->private;
1677
1678 reg_split((unsigned long long)vme_attr->address, &address_high,
1679 &address_low);
1680 entry->descriptor.dsau = address_high;
1681 entry->descriptor.dsal = address_low;
1682 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
1683
1684 retval = tsi148_dma_set_vme_src_attributes(
1685 tsi148_bridge->parent, &entry->descriptor.dsat,
1686 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1687 if (retval < 0)
1688 goto err_source;
1689 break;
1690 default:
1691 dev_err(tsi148_bridge->parent, "Invalid source type\n");
1692 retval = -EINVAL;
1693 goto err_source;
1694 break;
1695 }
1696
1697 /* Assume last link - this will be over-written by adding another */
1698 entry->descriptor.dnlau = 0;
1699 entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
1700
1701
1702 /* Fill out destination part */
1703 switch (dest->type) {
1704 case VME_DMA_PCI:
1705 pci_attr = dest->private;
1706
1707 reg_split((unsigned long long)pci_attr->address, &address_high,
1708 &address_low);
1709 entry->descriptor.ddau = address_high;
1710 entry->descriptor.ddal = address_low;
1711 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
1712 break;
1713 case VME_DMA_VME:
1714 vme_attr = dest->private;
1715
1716 reg_split((unsigned long long)vme_attr->address, &address_high,
1717 &address_low);
1718 entry->descriptor.ddau = address_high;
1719 entry->descriptor.ddal = address_low;
1720 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
1721
1722 retval = tsi148_dma_set_vme_dest_attributes(
1723 tsi148_bridge->parent, &entry->descriptor.ddat,
1724 vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1725 if (retval < 0)
1726 goto err_dest;
1727 break;
1728 default:
1729 dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1730 retval = -EINVAL;
1731 goto err_dest;
1732 break;
1733 }
1734
1735 /* Fill out count */
1736 entry->descriptor.dcnt = (u32)count;
1737
1738 /* Add to list */
1739 list_add_tail(&entry->list, &list->entries);
1740
1741 /* Fill out previous descriptors "Next Address" */
1742 if (entry->list.prev != &list->entries) {
1743 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1744 list);
1745 /* We need the bus address for the pointer */
1746 desc_ptr = virt_to_bus(&entry->descriptor);
1747 reg_split(desc_ptr, &prev->descriptor.dnlau,
1748 &prev->descriptor.dnlal);
1749 }
1750
1751 return 0;
1752
1753err_dest:
1754err_source:
1755err_align:
1756 kfree(entry);
1757err_mem:
1758 return retval;
1759}
1760
1761/*
1762 * Check to see if the provided DMA channel is busy.
1763 */
1764static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1765{
1766 u32 tmp;
1767 struct tsi148_driver *bridge;
1768
1769 bridge = tsi148_bridge->driver_priv;
1770
1771 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1772 TSI148_LCSR_OFFSET_DSTA);
1773
1774 if (tmp & TSI148_LCSR_DSTA_BSY)
1775 return 0;
1776 else
1777 return 1;
1778
1779}
1780
1781/*
1782 * Execute a previously generated link list
1783 *
1784 * XXX Need to provide control register configuration.
1785 */
1786static int tsi148_dma_list_exec(struct vme_dma_list *list)
1787{
1788 struct vme_dma_resource *ctrlr;
1789 int channel, retval = 0;
1790 struct tsi148_dma_entry *entry;
1791 dma_addr_t bus_addr;
1792 u32 bus_addr_high, bus_addr_low;
1793 u32 val, dctlreg = 0;
1794 struct vme_bridge *tsi148_bridge;
1795 struct tsi148_driver *bridge;
1796
1797 ctrlr = list->parent;
1798
1799 tsi148_bridge = ctrlr->parent;
1800
1801 bridge = tsi148_bridge->driver_priv;
1802
1803 mutex_lock(&ctrlr->mtx);
1804
1805 channel = ctrlr->number;
1806
1807 if (!list_empty(&ctrlr->running)) {
1808 /*
1809 * XXX We have an active DMA transfer and currently haven't
1810 * sorted out the mechanism for "pending" DMA transfers.
1811 * Return busy.
1812 */
1813 /* Need to add to pending here */
1814 mutex_unlock(&ctrlr->mtx);
1815 return -EBUSY;
1816 } else {
1817 list_add(&list->list, &ctrlr->running);
1818 }
1819
1820 /* Get first bus address and write into registers */
1821 entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1822 list);
1823
1824 bus_addr = virt_to_bus(&entry->descriptor);
1825
1826 mutex_unlock(&ctrlr->mtx);
1827
1828 reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1829
1830 iowrite32be(bus_addr_high, bridge->base +
1831 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1832 iowrite32be(bus_addr_low, bridge->base +
1833 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1834
1835 /* Start the operation */
1836 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1837 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1838
1839 wait_event_interruptible(bridge->dma_queue[channel],
1840 tsi148_dma_busy(ctrlr->parent, channel));
1841 /*
1842 * Read status register, this register is valid until we kick off a
1843 * new transfer.
1844 */
1845 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1846 TSI148_LCSR_OFFSET_DSTA);
1847
1848 if (val & TSI148_LCSR_DSTA_VBE) {
1849 dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1850 retval = -EIO;
1851 }
1852
1853 /* Remove list from running list */
1854 mutex_lock(&ctrlr->mtx);
1855 list_del(&list->list);
1856 mutex_unlock(&ctrlr->mtx);
1857
1858 return retval;
1859}
1860
1861/*
1862 * Clean up a previously generated link list
1863 *
1864 * We have a separate function, don't assume that the chain can't be reused.
1865 */
1866static int tsi148_dma_list_empty(struct vme_dma_list *list)
1867{
1868 struct list_head *pos, *temp;
1869 struct tsi148_dma_entry *entry;
1870
1871 /* detach and free each entry */
1872 list_for_each_safe(pos, temp, &list->entries) {
1873 list_del(pos);
1874 entry = list_entry(pos, struct tsi148_dma_entry, list);
1875 kfree(entry);
1876 }
1877
1878 return 0;
1879}
1880
1881/*
1882 * All 4 location monitors reside at the same base - this is therefore a
1883 * system wide configuration.
1884 *
1885 * This does not enable the LM monitor - that should be done when the first
1886 * callback is attached and disabled when the last callback is removed.
1887 */
1888static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1889 vme_address_t aspace, vme_cycle_t cycle)
1890{
1891 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1892 int i;
1893 struct vme_bridge *tsi148_bridge;
1894 struct tsi148_driver *bridge;
1895
1896 tsi148_bridge = lm->parent;
1897
1898 bridge = tsi148_bridge->driver_priv;
1899
1900 mutex_lock(&lm->mtx);
1901
1902 /* If we already have a callback attached, we can't move it! */
1903 for (i = 0; i < lm->monitors; i++) {
1904 if (bridge->lm_callback[i] != NULL) {
1905 mutex_unlock(&lm->mtx);
1906 dev_err(tsi148_bridge->parent, "Location monitor "
1907 "callback attached, can't reset\n");
1908 return -EBUSY;
1909 }
1910 }
1911
1912 switch (aspace) {
1913 case VME_A16:
1914 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1915 break;
1916 case VME_A24:
1917 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1918 break;
1919 case VME_A32:
1920 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1921 break;
1922 case VME_A64:
1923 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1924 break;
1925 default:
1926 mutex_unlock(&lm->mtx);
1927 dev_err(tsi148_bridge->parent, "Invalid address space\n");
1928 return -EINVAL;
1929 break;
1930 }
1931
1932 if (cycle & VME_SUPER)
1933 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1934 if (cycle & VME_USER)
1935 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1936 if (cycle & VME_PROG)
1937 lm_ctl |= TSI148_LCSR_LMAT_PGM;
1938 if (cycle & VME_DATA)
1939 lm_ctl |= TSI148_LCSR_LMAT_DATA;
1940
1941 reg_split(lm_base, &lm_base_high, &lm_base_low);
1942
1943 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1944 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1945 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1946
1947 mutex_unlock(&lm->mtx);
1948
1949 return 0;
1950}
1951
1952/* Get configuration of the callback monitor and return whether it is enabled
1953 * or disabled.
1954 */
1955static int tsi148_lm_get(struct vme_lm_resource *lm,
1956 unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
1957{
1958 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
1959 struct tsi148_driver *bridge;
1960
1961 bridge = lm->parent->driver_priv;
1962
1963 mutex_lock(&lm->mtx);
1964
1965 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
1966 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
1967 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
1968
1969 reg_join(lm_base_high, lm_base_low, lm_base);
1970
1971 if (lm_ctl & TSI148_LCSR_LMAT_EN)
1972 enabled = 1;
1973
1974 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
1975 *aspace |= VME_A16;
1976
1977 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
1978 *aspace |= VME_A24;
1979
1980 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
1981 *aspace |= VME_A32;
1982
1983 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
1984 *aspace |= VME_A64;
1985
1986
1987 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
1988 *cycle |= VME_SUPER;
1989 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
1990 *cycle |= VME_USER;
1991 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
1992 *cycle |= VME_PROG;
1993 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
1994 *cycle |= VME_DATA;
1995
1996 mutex_unlock(&lm->mtx);
1997
1998 return enabled;
1999}
2000
2001/*
2002 * Attach a callback to a specific location monitor.
2003 *
2004 * Callback will be passed the monitor triggered.
2005 */
2006static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2007 void (*callback)(int))
2008{
2009 u32 lm_ctl, tmp;
2010 struct vme_bridge *tsi148_bridge;
2011 struct tsi148_driver *bridge;
2012
2013 tsi148_bridge = lm->parent;
2014
2015 bridge = tsi148_bridge->driver_priv;
2016
2017 mutex_lock(&lm->mtx);
2018
2019 /* Ensure that the location monitor is configured - need PGM or DATA */
2020 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2021 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2022 mutex_unlock(&lm->mtx);
2023 dev_err(tsi148_bridge->parent, "Location monitor not properly "
2024 "configured\n");
2025 return -EINVAL;
2026 }
2027
2028 /* Check that a callback isn't already attached */
2029 if (bridge->lm_callback[monitor] != NULL) {
2030 mutex_unlock(&lm->mtx);
2031 dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2032 return -EBUSY;
2033 }
2034
2035 /* Attach callback */
2036 bridge->lm_callback[monitor] = callback;
2037
2038 /* Enable Location Monitor interrupt */
2039 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2040 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2041 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2042
2043 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2044 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2045 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2046
2047 /* Ensure that global Location Monitor Enable set */
2048 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2049 lm_ctl |= TSI148_LCSR_LMAT_EN;
2050 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2051 }
2052
2053 mutex_unlock(&lm->mtx);
2054
2055 return 0;
2056}
2057
2058/*
2059 * Detach a callback function forn a specific location monitor.
2060 */
2061static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2062{
2063 u32 lm_en, tmp;
2064 struct tsi148_driver *bridge;
2065
2066 bridge = lm->parent->driver_priv;
2067
2068 mutex_lock(&lm->mtx);
2069
2070 /* Disable Location Monitor and ensure previous interrupts are clear */
2071 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2072 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2073 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2074
2075 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2076 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2077 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2078
2079 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2080 bridge->base + TSI148_LCSR_INTC);
2081
2082 /* Detach callback */
2083 bridge->lm_callback[monitor] = NULL;
2084
2085 /* If all location monitors disabled, disable global Location Monitor */
2086 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2087 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2088 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2089 tmp &= ~TSI148_LCSR_LMAT_EN;
2090 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2091 }
2092
2093 mutex_unlock(&lm->mtx);
2094
2095 return 0;
2096}
2097
2098/*
2099 * Determine Geographical Addressing
2100 */
2101static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2102{
2103 u32 slot = 0;
2104 struct tsi148_driver *bridge;
2105
2106 bridge = tsi148_bridge->driver_priv;
2107
2108 if (!geoid) {
2109 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2110 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2111 } else
2112 slot = geoid;
2113
2114 return (int)slot;
2115}
2116
2117static int __init tsi148_init(void)
2118{
2119 return pci_register_driver(&tsi148_driver);
2120}
2121
2122/*
2123 * Configure CR/CSR space
2124 *
2125 * Access to the CR/CSR can be configured at power-up. The location of the
2126 * CR/CSR registers in the CR/CSR address space is determined by the boards
2127 * Auto-ID or Geographic address. This function ensures that the window is
2128 * enabled at an offset consistent with the boards geopgraphic address.
2129 *
2130 * Each board has a 512kB window, with the highest 4kB being used for the
2131 * boards registers, this means there is a fix length 508kB window which must
2132 * be mapped onto PCI memory.
2133 */
2134static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2135 struct pci_dev *pdev)
2136{
2137 u32 cbar, crat, vstat;
2138 u32 crcsr_bus_high, crcsr_bus_low;
2139 int retval;
2140 struct tsi148_driver *bridge;
2141
2142 bridge = tsi148_bridge->driver_priv;
2143
2144 /* Allocate mem for CR/CSR image */
2145 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2146 &bridge->crcsr_bus);
2147 if (bridge->crcsr_kernel == NULL) {
2148 dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2149 "CR/CSR image\n");
2150 return -ENOMEM;
2151 }
2152
2153 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2154
2155 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2156
2157 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2158 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2159
2160 /* Ensure that the CR/CSR is configured at the correct offset */
2161 cbar = ioread32be(bridge->base + TSI148_CBAR);
2162 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2163
2164 vstat = tsi148_slot_get(tsi148_bridge);
2165
2166 if (cbar != vstat) {
2167 cbar = vstat;
2168 dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2169 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2170 }
2171 dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2172
2173 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2174 if (crat & TSI148_LCSR_CRAT_EN) {
2175 dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2176 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2177 bridge->base + TSI148_LCSR_CRAT);
2178 } else
2179 dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2180
2181 /* If we want flushed, error-checked writes, set up a window
2182 * over the CR/CSR registers. We read from here to safely flush
2183 * through VME writes.
2184 */
2185 if (err_chk) {
2186 retval = tsi148_master_set(bridge->flush_image, 1,
2187 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2188 VME_D16);
2189 if (retval)
2190 dev_err(tsi148_bridge->parent, "Configuring flush image"
2191 " failed\n");
2192 }
2193
2194 return 0;
2195
2196}
2197
2198static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2199 struct pci_dev *pdev)
2200{
2201 u32 crat;
2202 struct tsi148_driver *bridge;
2203
2204 bridge = tsi148_bridge->driver_priv;
2205
2206 /* Turn off CR/CSR space */
2207 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2208 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2209 bridge->base + TSI148_LCSR_CRAT);
2210
2211 /* Free image */
2212 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2213 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2214
2215 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2216 bridge->crcsr_bus);
2217}
2218
2219static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2220{
2221 int retval, i, master_num;
2222 u32 data;
2223 struct list_head *pos = NULL;
2224 struct vme_bridge *tsi148_bridge;
2225 struct tsi148_driver *tsi148_device;
2226 struct vme_master_resource *master_image;
2227 struct vme_slave_resource *slave_image;
2228 struct vme_dma_resource *dma_ctrlr;
2229 struct vme_lm_resource *lm;
2230
2231 /* If we want to support more than one of each bridge, we need to
2232 * dynamically generate this so we get one per device
2233 */
2234 tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
2235 if (tsi148_bridge == NULL) {
2236 dev_err(&pdev->dev, "Failed to allocate memory for device "
2237 "structure\n");
2238 retval = -ENOMEM;
2239 goto err_struct;
2240 }
2241
2242 tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2243 if (tsi148_device == NULL) {
2244 dev_err(&pdev->dev, "Failed to allocate memory for device "
2245 "structure\n");
2246 retval = -ENOMEM;
2247 goto err_driver;
2248 }
2249
2250 tsi148_bridge->driver_priv = tsi148_device;
2251
2252 /* Enable the device */
2253 retval = pci_enable_device(pdev);
2254 if (retval) {
2255 dev_err(&pdev->dev, "Unable to enable device\n");
2256 goto err_enable;
2257 }
2258
2259 /* Map Registers */
2260 retval = pci_request_regions(pdev, driver_name);
2261 if (retval) {
2262 dev_err(&pdev->dev, "Unable to reserve resources\n");
2263 goto err_resource;
2264 }
2265
2266 /* map registers in BAR 0 */
2267 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2268 4096);
2269 if (!tsi148_device->base) {
2270 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2271 retval = -EIO;
2272 goto err_remap;
2273 }
2274
2275 /* Check to see if the mapping worked out */
2276 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2277 if (data != PCI_VENDOR_ID_TUNDRA) {
2278 dev_err(&pdev->dev, "CRG region check failed\n");
2279 retval = -EIO;
2280 goto err_test;
2281 }
2282
2283 /* Initialize wait queues & mutual exclusion flags */
2284 init_waitqueue_head(&tsi148_device->dma_queue[0]);
2285 init_waitqueue_head(&tsi148_device->dma_queue[1]);
2286 init_waitqueue_head(&tsi148_device->iack_queue);
2287 mutex_init(&tsi148_device->vme_int);
2288 mutex_init(&tsi148_device->vme_rmw);
2289
2290 tsi148_bridge->parent = &pdev->dev;
2291 strcpy(tsi148_bridge->name, driver_name);
2292
2293 /* Setup IRQ */
2294 retval = tsi148_irq_init(tsi148_bridge);
2295 if (retval != 0) {
2296 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2297 goto err_irq;
2298 }
2299
2300 /* If we are going to flush writes, we need to read from the VME bus.
2301 * We need to do this safely, thus we read the devices own CR/CSR
2302 * register. To do this we must set up a window in CR/CSR space and
2303 * hence have one less master window resource available.
2304 */
2305 master_num = TSI148_MAX_MASTER;
2306 if (err_chk) {
2307 master_num--;
2308
2309 tsi148_device->flush_image =
2310 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2311 if (tsi148_device->flush_image == NULL) {
2312 dev_err(&pdev->dev, "Failed to allocate memory for "
2313 "flush resource structure\n");
2314 retval = -ENOMEM;
2315 goto err_master;
2316 }
2317 tsi148_device->flush_image->parent = tsi148_bridge;
2318 spin_lock_init(&tsi148_device->flush_image->lock);
2319 tsi148_device->flush_image->locked = 1;
2320 tsi148_device->flush_image->number = master_num;
2321 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2322 VME_A32 | VME_A64;
2323 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2324 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2325 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2326 VME_USER | VME_PROG | VME_DATA;
2327 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
2328 memset(&tsi148_device->flush_image->bus_resource, 0,
2329 sizeof(struct resource));
2330 tsi148_device->flush_image->kern_base = NULL;
2331 }
2332
2333 /* Add master windows to list */
2334 INIT_LIST_HEAD(&tsi148_bridge->master_resources);
2335 for (i = 0; i < master_num; i++) {
2336 master_image = kmalloc(sizeof(struct vme_master_resource),
2337 GFP_KERNEL);
2338 if (master_image == NULL) {
2339 dev_err(&pdev->dev, "Failed to allocate memory for "
2340 "master resource structure\n");
2341 retval = -ENOMEM;
2342 goto err_master;
2343 }
2344 master_image->parent = tsi148_bridge;
2345 spin_lock_init(&master_image->lock);
2346 master_image->locked = 0;
2347 master_image->number = i;
2348 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2349 VME_A64;
2350 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2351 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2352 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2353 VME_PROG | VME_DATA;
2354 master_image->width_attr = VME_D16 | VME_D32;
2355 memset(&master_image->bus_resource, 0,
2356 sizeof(struct resource));
2357 master_image->kern_base = NULL;
2358 list_add_tail(&master_image->list,
2359 &tsi148_bridge->master_resources);
2360 }
2361
2362 /* Add slave windows to list */
2363 INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
2364 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2365 slave_image = kmalloc(sizeof(struct vme_slave_resource),
2366 GFP_KERNEL);
2367 if (slave_image == NULL) {
2368 dev_err(&pdev->dev, "Failed to allocate memory for "
2369 "slave resource structure\n");
2370 retval = -ENOMEM;
2371 goto err_slave;
2372 }
2373 slave_image->parent = tsi148_bridge;
2374 mutex_init(&slave_image->mtx);
2375 slave_image->locked = 0;
2376 slave_image->number = i;
2377 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2378 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2379 VME_USER3 | VME_USER4;
2380 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2381 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2382 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2383 VME_PROG | VME_DATA;
2384 list_add_tail(&slave_image->list,
2385 &tsi148_bridge->slave_resources);
2386 }
2387
2388 /* Add dma engines to list */
2389 INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
2390 for (i = 0; i < TSI148_MAX_DMA; i++) {
2391 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
2392 GFP_KERNEL);
2393 if (dma_ctrlr == NULL) {
2394 dev_err(&pdev->dev, "Failed to allocate memory for "
2395 "dma resource structure\n");
2396 retval = -ENOMEM;
2397 goto err_dma;
2398 }
2399 dma_ctrlr->parent = tsi148_bridge;
2400 mutex_init(&dma_ctrlr->mtx);
2401 dma_ctrlr->locked = 0;
2402 dma_ctrlr->number = i;
2403 dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2404 VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2405 VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2406 VME_DMA_PATTERN_TO_MEM;
2407 INIT_LIST_HEAD(&dma_ctrlr->pending);
2408 INIT_LIST_HEAD(&dma_ctrlr->running);
2409 list_add_tail(&dma_ctrlr->list,
2410 &tsi148_bridge->dma_resources);
2411 }
2412
2413 /* Add location monitor to list */
2414 INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
2415 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2416 if (lm == NULL) {
2417 dev_err(&pdev->dev, "Failed to allocate memory for "
2418 "location monitor resource structure\n");
2419 retval = -ENOMEM;
2420 goto err_lm;
2421 }
2422 lm->parent = tsi148_bridge;
2423 mutex_init(&lm->mtx);
2424 lm->locked = 0;
2425 lm->number = 1;
2426 lm->monitors = 4;
2427 list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2428
2429 tsi148_bridge->slave_get = tsi148_slave_get;
2430 tsi148_bridge->slave_set = tsi148_slave_set;
2431 tsi148_bridge->master_get = tsi148_master_get;
2432 tsi148_bridge->master_set = tsi148_master_set;
2433 tsi148_bridge->master_read = tsi148_master_read;
2434 tsi148_bridge->master_write = tsi148_master_write;
2435 tsi148_bridge->master_rmw = tsi148_master_rmw;
2436 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2437 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2438 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2439 tsi148_bridge->irq_set = tsi148_irq_set;
2440 tsi148_bridge->irq_generate = tsi148_irq_generate;
2441 tsi148_bridge->lm_set = tsi148_lm_set;
2442 tsi148_bridge->lm_get = tsi148_lm_get;
2443 tsi148_bridge->lm_attach = tsi148_lm_attach;
2444 tsi148_bridge->lm_detach = tsi148_lm_detach;
2445 tsi148_bridge->slot_get = tsi148_slot_get;
2446
2447 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2448 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2449 (data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2450 if (!geoid)
2451 dev_info(&pdev->dev, "VME geographical address is %d\n",
2452 data & TSI148_LCSR_VSTAT_GA_M);
2453 else
2454 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2455 geoid);
2456
2457 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2458 err_chk ? "enabled" : "disabled");
2459
2460 if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
2461 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2462 goto err_crcsr;
2463 }
2464
2465 retval = vme_register_bridge(tsi148_bridge);
2466 if (retval != 0) {
2467 dev_err(&pdev->dev, "Chip Registration failed.\n");
2468 goto err_reg;
2469 }
2470
2471 pci_set_drvdata(pdev, tsi148_bridge);
2472
2473 /* Clear VME bus "board fail", and "power-up reset" lines */
2474 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2475 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2476 data |= TSI148_LCSR_VSTAT_CPURST;
2477 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2478
2479 return 0;
2480
2481err_reg:
2482 tsi148_crcsr_exit(tsi148_bridge, pdev);
2483err_crcsr:
2484err_lm:
2485 /* resources are stored in link list */
2486 list_for_each(pos, &tsi148_bridge->lm_resources) {
2487 lm = list_entry(pos, struct vme_lm_resource, list);
2488 list_del(pos);
2489 kfree(lm);
2490 }
2491err_dma:
2492 /* resources are stored in link list */
2493 list_for_each(pos, &tsi148_bridge->dma_resources) {
2494 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2495 list_del(pos);
2496 kfree(dma_ctrlr);
2497 }
2498err_slave:
2499 /* resources are stored in link list */
2500 list_for_each(pos, &tsi148_bridge->slave_resources) {
2501 slave_image = list_entry(pos, struct vme_slave_resource, list);
2502 list_del(pos);
2503 kfree(slave_image);
2504 }
2505err_master:
2506 /* resources are stored in link list */
2507 list_for_each(pos, &tsi148_bridge->master_resources) {
2508 master_image = list_entry(pos, struct vme_master_resource,
2509 list);
2510 list_del(pos);
2511 kfree(master_image);
2512 }
2513
2514 tsi148_irq_exit(tsi148_bridge, pdev);
2515err_irq:
2516err_test:
2517 iounmap(tsi148_device->base);
2518err_remap:
2519 pci_release_regions(pdev);
2520err_resource:
2521 pci_disable_device(pdev);
2522err_enable:
2523 kfree(tsi148_device);
2524err_driver:
2525 kfree(tsi148_bridge);
2526err_struct:
2527 return retval;
2528
2529}
2530
2531static void tsi148_remove(struct pci_dev *pdev)
2532{
2533 struct list_head *pos = NULL;
2534 struct list_head *tmplist;
2535 struct vme_master_resource *master_image;
2536 struct vme_slave_resource *slave_image;
2537 struct vme_dma_resource *dma_ctrlr;
2538 int i;
2539 struct tsi148_driver *bridge;
2540 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2541
2542 bridge = tsi148_bridge->driver_priv;
2543
2544
2545 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2546
2547 /*
2548 * Shutdown all inbound and outbound windows.
2549 */
2550 for (i = 0; i < 8; i++) {
2551 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2552 TSI148_LCSR_OFFSET_ITAT);
2553 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2554 TSI148_LCSR_OFFSET_OTAT);
2555 }
2556
2557 /*
2558 * Shutdown Location monitor.
2559 */
2560 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2561
2562 /*
2563 * Shutdown CRG map.
2564 */
2565 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2566
2567 /*
2568 * Clear error status.
2569 */
2570 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2571 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2572 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2573
2574 /*
2575 * Remove VIRQ interrupt (if any)
2576 */
2577 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2578 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2579
2580 /*
2581 * Map all Interrupts to PCI INTA
2582 */
2583 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2584 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2585
2586 tsi148_irq_exit(tsi148_bridge, pdev);
2587
2588 vme_unregister_bridge(tsi148_bridge);
2589
2590 tsi148_crcsr_exit(tsi148_bridge, pdev);
2591
2592 /* resources are stored in link list */
2593 list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2594 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2595 list_del(pos);
2596 kfree(dma_ctrlr);
2597 }
2598
2599 /* resources are stored in link list */
2600 list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2601 slave_image = list_entry(pos, struct vme_slave_resource, list);
2602 list_del(pos);
2603 kfree(slave_image);
2604 }
2605
2606 /* resources are stored in link list */
2607 list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2608 master_image = list_entry(pos, struct vme_master_resource,
2609 list);
2610 list_del(pos);
2611 kfree(master_image);
2612 }
2613
2614 iounmap(bridge->base);
2615
2616 pci_release_regions(pdev);
2617
2618 pci_disable_device(pdev);
2619
2620 kfree(tsi148_bridge->driver_priv);
2621
2622 kfree(tsi148_bridge);
2623}
2624
2625static void __exit tsi148_exit(void)
2626{
2627 pci_unregister_driver(&tsi148_driver);
2628}
2629
2630MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2631module_param(err_chk, bool, 0);
2632
2633MODULE_PARM_DESC(geoid, "Override geographical addressing");
2634module_param(geoid, int, 0);
2635
2636MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2637MODULE_LICENSE("GPL");
2638
2639module_init(tsi148_init);
2640module_exit(tsi148_exit);
diff --git a/drivers/staging/vme/bridges/vme_tsi148.h b/drivers/staging/vme/bridges/vme_tsi148.h
new file mode 100644
index 00000000000..a3ac2fe9881
--- /dev/null
+++ b/drivers/staging/vme/bridges/vme_tsi148.h
@@ -0,0 +1,1409 @@
1/*
2 * tsi148.h
3 *
4 * Support for the Tundra TSI148 VME Bridge chip
5 *
6 * Author: Tom Armistead
7 * Updated and maintained by Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#ifndef TSI148_H
17#define TSI148_H
18
19#ifndef PCI_VENDOR_ID_TUNDRA
20#define PCI_VENDOR_ID_TUNDRA 0x10e3
21#endif
22
23#ifndef PCI_DEVICE_ID_TUNDRA_TSI148
24#define PCI_DEVICE_ID_TUNDRA_TSI148 0x148
25#endif
26
27/*
28 * Define the number of each that the Tsi148 supports.
29 */
30#define TSI148_MAX_MASTER 8 /* Max Master Windows */
31#define TSI148_MAX_SLAVE 8 /* Max Slave Windows */
32#define TSI148_MAX_DMA 2 /* Max DMA Controllers */
33#define TSI148_MAX_MAILBOX 4 /* Max Mail Box registers */
34#define TSI148_MAX_SEMAPHORE 8 /* Max Semaphores */
35
36/* Structure used to hold driver specific information */
37struct tsi148_driver {
38 void __iomem *base; /* Base Address of device registers */
39 wait_queue_head_t dma_queue[2];
40 wait_queue_head_t iack_queue;
41 void (*lm_callback[4])(int); /* Called in interrupt handler */
42 void *crcsr_kernel;
43 dma_addr_t crcsr_bus;
44 struct vme_master_resource *flush_image;
45 struct mutex vme_rmw; /* Only one RMW cycle at a time */
46 struct mutex vme_int; /*
47 * Only one VME interrupt can be
48 * generated at a time, provide locking
49 */
50};
51
52/*
53 * Layout of a DMAC Linked-List Descriptor
54 *
55 * Note: This structure is accessed via the chip and therefore must be
56 * correctly laid out - It must also be aligned on 64-bit boundaries.
57 */
58struct tsi148_dma_descriptor {
59 u32 dsau; /* Source Address */
60 u32 dsal;
61 u32 ddau; /* Destination Address */
62 u32 ddal;
63 u32 dsat; /* Source attributes */
64 u32 ddat; /* Destination attributes */
65 u32 dnlau; /* Next link address */
66 u32 dnlal;
67 u32 dcnt; /* Byte count */
68 u32 ddbs; /* 2eSST Broadcast select */
69};
70
71struct tsi148_dma_entry {
72 /*
73 * The descriptor needs to be aligned on a 64-bit boundary, we increase
74 * the chance of this by putting it first in the structure.
75 */
76 struct tsi148_dma_descriptor descriptor;
77 struct list_head list;
78};
79
80/*
81 * TSI148 ASIC register structure overlays and bit field definitions.
82 *
83 * Note: Tsi148 Register Group (CRG) consists of the following
84 * combination of registers:
85 * PCFS - PCI Configuration Space Registers
86 * LCSR - Local Control and Status Registers
87 * GCSR - Global Control and Status Registers
88 * CR/CSR - Subset of Configuration ROM /
89 * Control and Status Registers
90 */
91
92
93/*
94 * Command/Status Registers (CRG + $004)
95 */
96#define TSI148_PCFS_ID 0x0
97#define TSI148_PCFS_CSR 0x4
98#define TSI148_PCFS_CLASS 0x8
99#define TSI148_PCFS_MISC0 0xC
100#define TSI148_PCFS_MBARL 0x10
101#define TSI148_PCFS_MBARU 0x14
102
103#define TSI148_PCFS_SUBID 0x28
104
105#define TSI148_PCFS_CAPP 0x34
106
107#define TSI148_PCFS_MISC1 0x3C
108
109#define TSI148_PCFS_XCAPP 0x40
110#define TSI148_PCFS_XSTAT 0x44
111
112/*
113 * LCSR definitions
114 */
115
116/*
117 * Outbound Translations
118 */
119#define TSI148_LCSR_OT0_OTSAU 0x100
120#define TSI148_LCSR_OT0_OTSAL 0x104
121#define TSI148_LCSR_OT0_OTEAU 0x108
122#define TSI148_LCSR_OT0_OTEAL 0x10C
123#define TSI148_LCSR_OT0_OTOFU 0x110
124#define TSI148_LCSR_OT0_OTOFL 0x114
125#define TSI148_LCSR_OT0_OTBS 0x118
126#define TSI148_LCSR_OT0_OTAT 0x11C
127
128#define TSI148_LCSR_OT1_OTSAU 0x120
129#define TSI148_LCSR_OT1_OTSAL 0x124
130#define TSI148_LCSR_OT1_OTEAU 0x128
131#define TSI148_LCSR_OT1_OTEAL 0x12C
132#define TSI148_LCSR_OT1_OTOFU 0x130
133#define TSI148_LCSR_OT1_OTOFL 0x134
134#define TSI148_LCSR_OT1_OTBS 0x138
135#define TSI148_LCSR_OT1_OTAT 0x13C
136
137#define TSI148_LCSR_OT2_OTSAU 0x140
138#define TSI148_LCSR_OT2_OTSAL 0x144
139#define TSI148_LCSR_OT2_OTEAU 0x148
140#define TSI148_LCSR_OT2_OTEAL 0x14C
141#define TSI148_LCSR_OT2_OTOFU 0x150
142#define TSI148_LCSR_OT2_OTOFL 0x154
143#define TSI148_LCSR_OT2_OTBS 0x158
144#define TSI148_LCSR_OT2_OTAT 0x15C
145
146#define TSI148_LCSR_OT3_OTSAU 0x160
147#define TSI148_LCSR_OT3_OTSAL 0x164
148#define TSI148_LCSR_OT3_OTEAU 0x168
149#define TSI148_LCSR_OT3_OTEAL 0x16C
150#define TSI148_LCSR_OT3_OTOFU 0x170
151#define TSI148_LCSR_OT3_OTOFL 0x174
152#define TSI148_LCSR_OT3_OTBS 0x178
153#define TSI148_LCSR_OT3_OTAT 0x17C
154
155#define TSI148_LCSR_OT4_OTSAU 0x180
156#define TSI148_LCSR_OT4_OTSAL 0x184
157#define TSI148_LCSR_OT4_OTEAU 0x188
158#define TSI148_LCSR_OT4_OTEAL 0x18C
159#define TSI148_LCSR_OT4_OTOFU 0x190
160#define TSI148_LCSR_OT4_OTOFL 0x194
161#define TSI148_LCSR_OT4_OTBS 0x198
162#define TSI148_LCSR_OT4_OTAT 0x19C
163
164#define TSI148_LCSR_OT5_OTSAU 0x1A0
165#define TSI148_LCSR_OT5_OTSAL 0x1A4
166#define TSI148_LCSR_OT5_OTEAU 0x1A8
167#define TSI148_LCSR_OT5_OTEAL 0x1AC
168#define TSI148_LCSR_OT5_OTOFU 0x1B0
169#define TSI148_LCSR_OT5_OTOFL 0x1B4
170#define TSI148_LCSR_OT5_OTBS 0x1B8
171#define TSI148_LCSR_OT5_OTAT 0x1BC
172
173#define TSI148_LCSR_OT6_OTSAU 0x1C0
174#define TSI148_LCSR_OT6_OTSAL 0x1C4
175#define TSI148_LCSR_OT6_OTEAU 0x1C8
176#define TSI148_LCSR_OT6_OTEAL 0x1CC
177#define TSI148_LCSR_OT6_OTOFU 0x1D0
178#define TSI148_LCSR_OT6_OTOFL 0x1D4
179#define TSI148_LCSR_OT6_OTBS 0x1D8
180#define TSI148_LCSR_OT6_OTAT 0x1DC
181
182#define TSI148_LCSR_OT7_OTSAU 0x1E0
183#define TSI148_LCSR_OT7_OTSAL 0x1E4
184#define TSI148_LCSR_OT7_OTEAU 0x1E8
185#define TSI148_LCSR_OT7_OTEAL 0x1EC
186#define TSI148_LCSR_OT7_OTOFU 0x1F0
187#define TSI148_LCSR_OT7_OTOFL 0x1F4
188#define TSI148_LCSR_OT7_OTBS 0x1F8
189#define TSI148_LCSR_OT7_OTAT 0x1FC
190
191#define TSI148_LCSR_OT0 0x100
192#define TSI148_LCSR_OT1 0x120
193#define TSI148_LCSR_OT2 0x140
194#define TSI148_LCSR_OT3 0x160
195#define TSI148_LCSR_OT4 0x180
196#define TSI148_LCSR_OT5 0x1A0
197#define TSI148_LCSR_OT6 0x1C0
198#define TSI148_LCSR_OT7 0x1E0
199
200static const int TSI148_LCSR_OT[8] = { TSI148_LCSR_OT0, TSI148_LCSR_OT1,
201 TSI148_LCSR_OT2, TSI148_LCSR_OT3,
202 TSI148_LCSR_OT4, TSI148_LCSR_OT5,
203 TSI148_LCSR_OT6, TSI148_LCSR_OT7 };
204
205#define TSI148_LCSR_OFFSET_OTSAU 0x0
206#define TSI148_LCSR_OFFSET_OTSAL 0x4
207#define TSI148_LCSR_OFFSET_OTEAU 0x8
208#define TSI148_LCSR_OFFSET_OTEAL 0xC
209#define TSI148_LCSR_OFFSET_OTOFU 0x10
210#define TSI148_LCSR_OFFSET_OTOFL 0x14
211#define TSI148_LCSR_OFFSET_OTBS 0x18
212#define TSI148_LCSR_OFFSET_OTAT 0x1C
213
214/*
215 * VMEbus interrupt ack
216 * offset 200
217 */
218#define TSI148_LCSR_VIACK1 0x204
219#define TSI148_LCSR_VIACK2 0x208
220#define TSI148_LCSR_VIACK3 0x20C
221#define TSI148_LCSR_VIACK4 0x210
222#define TSI148_LCSR_VIACK5 0x214
223#define TSI148_LCSR_VIACK6 0x218
224#define TSI148_LCSR_VIACK7 0x21C
225
226static const int TSI148_LCSR_VIACK[8] = { 0, TSI148_LCSR_VIACK1,
227 TSI148_LCSR_VIACK2, TSI148_LCSR_VIACK3,
228 TSI148_LCSR_VIACK4, TSI148_LCSR_VIACK5,
229 TSI148_LCSR_VIACK6, TSI148_LCSR_VIACK7 };
230
231/*
232 * RMW
233 * offset 220
234 */
235#define TSI148_LCSR_RMWAU 0x220
236#define TSI148_LCSR_RMWAL 0x224
237#define TSI148_LCSR_RMWEN 0x228
238#define TSI148_LCSR_RMWC 0x22C
239#define TSI148_LCSR_RMWS 0x230
240
241/*
242 * VMEbus control
243 * offset 234
244 */
245#define TSI148_LCSR_VMCTRL 0x234
246#define TSI148_LCSR_VCTRL 0x238
247#define TSI148_LCSR_VSTAT 0x23C
248
249/*
250 * PCI status
251 * offset 240
252 */
253#define TSI148_LCSR_PSTAT 0x240
254
255/*
256 * VME filter.
257 * offset 250
258 */
259#define TSI148_LCSR_VMEFL 0x250
260
261 /*
262 * VME exception.
263 * offset 260
264 */
265#define TSI148_LCSR_VEAU 0x260
266#define TSI148_LCSR_VEAL 0x264
267#define TSI148_LCSR_VEAT 0x268
268
269 /*
270 * PCI error
271 * offset 270
272 */
273#define TSI148_LCSR_EDPAU 0x270
274#define TSI148_LCSR_EDPAL 0x274
275#define TSI148_LCSR_EDPXA 0x278
276#define TSI148_LCSR_EDPXS 0x27C
277#define TSI148_LCSR_EDPAT 0x280
278
279 /*
280 * Inbound Translations
281 * offset 300
282 */
283#define TSI148_LCSR_IT0_ITSAU 0x300
284#define TSI148_LCSR_IT0_ITSAL 0x304
285#define TSI148_LCSR_IT0_ITEAU 0x308
286#define TSI148_LCSR_IT0_ITEAL 0x30C
287#define TSI148_LCSR_IT0_ITOFU 0x310
288#define TSI148_LCSR_IT0_ITOFL 0x314
289#define TSI148_LCSR_IT0_ITAT 0x318
290
291#define TSI148_LCSR_IT1_ITSAU 0x320
292#define TSI148_LCSR_IT1_ITSAL 0x324
293#define TSI148_LCSR_IT1_ITEAU 0x328
294#define TSI148_LCSR_IT1_ITEAL 0x32C
295#define TSI148_LCSR_IT1_ITOFU 0x330
296#define TSI148_LCSR_IT1_ITOFL 0x334
297#define TSI148_LCSR_IT1_ITAT 0x338
298
299#define TSI148_LCSR_IT2_ITSAU 0x340
300#define TSI148_LCSR_IT2_ITSAL 0x344
301#define TSI148_LCSR_IT2_ITEAU 0x348
302#define TSI148_LCSR_IT2_ITEAL 0x34C
303#define TSI148_LCSR_IT2_ITOFU 0x350
304#define TSI148_LCSR_IT2_ITOFL 0x354
305#define TSI148_LCSR_IT2_ITAT 0x358
306
307#define TSI148_LCSR_IT3_ITSAU 0x360
308#define TSI148_LCSR_IT3_ITSAL 0x364
309#define TSI148_LCSR_IT3_ITEAU 0x368
310#define TSI148_LCSR_IT3_ITEAL 0x36C
311#define TSI148_LCSR_IT3_ITOFU 0x370
312#define TSI148_LCSR_IT3_ITOFL 0x374
313#define TSI148_LCSR_IT3_ITAT 0x378
314
315#define TSI148_LCSR_IT4_ITSAU 0x380
316#define TSI148_LCSR_IT4_ITSAL 0x384
317#define TSI148_LCSR_IT4_ITEAU 0x388
318#define TSI148_LCSR_IT4_ITEAL 0x38C
319#define TSI148_LCSR_IT4_ITOFU 0x390
320#define TSI148_LCSR_IT4_ITOFL 0x394
321#define TSI148_LCSR_IT4_ITAT 0x398
322
323#define TSI148_LCSR_IT5_ITSAU 0x3A0
324#define TSI148_LCSR_IT5_ITSAL 0x3A4
325#define TSI148_LCSR_IT5_ITEAU 0x3A8
326#define TSI148_LCSR_IT5_ITEAL 0x3AC
327#define TSI148_LCSR_IT5_ITOFU 0x3B0
328#define TSI148_LCSR_IT5_ITOFL 0x3B4
329#define TSI148_LCSR_IT5_ITAT 0x3B8
330
331#define TSI148_LCSR_IT6_ITSAU 0x3C0
332#define TSI148_LCSR_IT6_ITSAL 0x3C4
333#define TSI148_LCSR_IT6_ITEAU 0x3C8
334#define TSI148_LCSR_IT6_ITEAL 0x3CC
335#define TSI148_LCSR_IT6_ITOFU 0x3D0
336#define TSI148_LCSR_IT6_ITOFL 0x3D4
337#define TSI148_LCSR_IT6_ITAT 0x3D8
338
339#define TSI148_LCSR_IT7_ITSAU 0x3E0
340#define TSI148_LCSR_IT7_ITSAL 0x3E4
341#define TSI148_LCSR_IT7_ITEAU 0x3E8
342#define TSI148_LCSR_IT7_ITEAL 0x3EC
343#define TSI148_LCSR_IT7_ITOFU 0x3F0
344#define TSI148_LCSR_IT7_ITOFL 0x3F4
345#define TSI148_LCSR_IT7_ITAT 0x3F8
346
347
348#define TSI148_LCSR_IT0 0x300
349#define TSI148_LCSR_IT1 0x320
350#define TSI148_LCSR_IT2 0x340
351#define TSI148_LCSR_IT3 0x360
352#define TSI148_LCSR_IT4 0x380
353#define TSI148_LCSR_IT5 0x3A0
354#define TSI148_LCSR_IT6 0x3C0
355#define TSI148_LCSR_IT7 0x3E0
356
357static const int TSI148_LCSR_IT[8] = { TSI148_LCSR_IT0, TSI148_LCSR_IT1,
358 TSI148_LCSR_IT2, TSI148_LCSR_IT3,
359 TSI148_LCSR_IT4, TSI148_LCSR_IT5,
360 TSI148_LCSR_IT6, TSI148_LCSR_IT7 };
361
362#define TSI148_LCSR_OFFSET_ITSAU 0x0
363#define TSI148_LCSR_OFFSET_ITSAL 0x4
364#define TSI148_LCSR_OFFSET_ITEAU 0x8
365#define TSI148_LCSR_OFFSET_ITEAL 0xC
366#define TSI148_LCSR_OFFSET_ITOFU 0x10
367#define TSI148_LCSR_OFFSET_ITOFL 0x14
368#define TSI148_LCSR_OFFSET_ITAT 0x18
369
370 /*
371 * Inbound Translation GCSR
372 * offset 400
373 */
374#define TSI148_LCSR_GBAU 0x400
375#define TSI148_LCSR_GBAL 0x404
376#define TSI148_LCSR_GCSRAT 0x408
377
378 /*
379 * Inbound Translation CRG
380 * offset 40C
381 */
382#define TSI148_LCSR_CBAU 0x40C
383#define TSI148_LCSR_CBAL 0x410
384#define TSI148_LCSR_CSRAT 0x414
385
386 /*
387 * Inbound Translation CR/CSR
388 * CRG
389 * offset 418
390 */
391#define TSI148_LCSR_CROU 0x418
392#define TSI148_LCSR_CROL 0x41C
393#define TSI148_LCSR_CRAT 0x420
394
395 /*
396 * Inbound Translation Location Monitor
397 * offset 424
398 */
399#define TSI148_LCSR_LMBAU 0x424
400#define TSI148_LCSR_LMBAL 0x428
401#define TSI148_LCSR_LMAT 0x42C
402
403 /*
404 * VMEbus Interrupt Control.
405 * offset 430
406 */
407#define TSI148_LCSR_BCU 0x430
408#define TSI148_LCSR_BCL 0x434
409#define TSI148_LCSR_BPGTR 0x438
410#define TSI148_LCSR_BPCTR 0x43C
411#define TSI148_LCSR_VICR 0x440
412
413 /*
414 * Local Bus Interrupt Control.
415 * offset 448
416 */
417#define TSI148_LCSR_INTEN 0x448
418#define TSI148_LCSR_INTEO 0x44C
419#define TSI148_LCSR_INTS 0x450
420#define TSI148_LCSR_INTC 0x454
421#define TSI148_LCSR_INTM1 0x458
422#define TSI148_LCSR_INTM2 0x45C
423
424 /*
425 * DMA Controllers
426 * offset 500
427 */
428#define TSI148_LCSR_DCTL0 0x500
429#define TSI148_LCSR_DSTA0 0x504
430#define TSI148_LCSR_DCSAU0 0x508
431#define TSI148_LCSR_DCSAL0 0x50C
432#define TSI148_LCSR_DCDAU0 0x510
433#define TSI148_LCSR_DCDAL0 0x514
434#define TSI148_LCSR_DCLAU0 0x518
435#define TSI148_LCSR_DCLAL0 0x51C
436#define TSI148_LCSR_DSAU0 0x520
437#define TSI148_LCSR_DSAL0 0x524
438#define TSI148_LCSR_DDAU0 0x528
439#define TSI148_LCSR_DDAL0 0x52C
440#define TSI148_LCSR_DSAT0 0x530
441#define TSI148_LCSR_DDAT0 0x534
442#define TSI148_LCSR_DNLAU0 0x538
443#define TSI148_LCSR_DNLAL0 0x53C
444#define TSI148_LCSR_DCNT0 0x540
445#define TSI148_LCSR_DDBS0 0x544
446
447#define TSI148_LCSR_DCTL1 0x580
448#define TSI148_LCSR_DSTA1 0x584
449#define TSI148_LCSR_DCSAU1 0x588
450#define TSI148_LCSR_DCSAL1 0x58C
451#define TSI148_LCSR_DCDAU1 0x590
452#define TSI148_LCSR_DCDAL1 0x594
453#define TSI148_LCSR_DCLAU1 0x598
454#define TSI148_LCSR_DCLAL1 0x59C
455#define TSI148_LCSR_DSAU1 0x5A0
456#define TSI148_LCSR_DSAL1 0x5A4
457#define TSI148_LCSR_DDAU1 0x5A8
458#define TSI148_LCSR_DDAL1 0x5AC
459#define TSI148_LCSR_DSAT1 0x5B0
460#define TSI148_LCSR_DDAT1 0x5B4
461#define TSI148_LCSR_DNLAU1 0x5B8
462#define TSI148_LCSR_DNLAL1 0x5BC
463#define TSI148_LCSR_DCNT1 0x5C0
464#define TSI148_LCSR_DDBS1 0x5C4
465
466#define TSI148_LCSR_DMA0 0x500
467#define TSI148_LCSR_DMA1 0x580
468
469
470static const int TSI148_LCSR_DMA[TSI148_MAX_DMA] = { TSI148_LCSR_DMA0,
471 TSI148_LCSR_DMA1 };
472
473#define TSI148_LCSR_OFFSET_DCTL 0x0
474#define TSI148_LCSR_OFFSET_DSTA 0x4
475#define TSI148_LCSR_OFFSET_DCSAU 0x8
476#define TSI148_LCSR_OFFSET_DCSAL 0xC
477#define TSI148_LCSR_OFFSET_DCDAU 0x10
478#define TSI148_LCSR_OFFSET_DCDAL 0x14
479#define TSI148_LCSR_OFFSET_DCLAU 0x18
480#define TSI148_LCSR_OFFSET_DCLAL 0x1C
481#define TSI148_LCSR_OFFSET_DSAU 0x20
482#define TSI148_LCSR_OFFSET_DSAL 0x24
483#define TSI148_LCSR_OFFSET_DDAU 0x28
484#define TSI148_LCSR_OFFSET_DDAL 0x2C
485#define TSI148_LCSR_OFFSET_DSAT 0x30
486#define TSI148_LCSR_OFFSET_DDAT 0x34
487#define TSI148_LCSR_OFFSET_DNLAU 0x38
488#define TSI148_LCSR_OFFSET_DNLAL 0x3C
489#define TSI148_LCSR_OFFSET_DCNT 0x40
490#define TSI148_LCSR_OFFSET_DDBS 0x44
491
492 /*
493 * GCSR Register Group
494 */
495
496 /*
497 * GCSR CRG
498 * offset 00 600 - DEVI/VENI
499 * offset 04 604 - CTRL/GA/REVID
500 * offset 08 608 - Semaphore3/2/1/0
501 * offset 0C 60C - Seamphore7/6/5/4
502 */
503#define TSI148_GCSR_ID 0x600
504#define TSI148_GCSR_CSR 0x604
505#define TSI148_GCSR_SEMA0 0x608
506#define TSI148_GCSR_SEMA1 0x60C
507
508 /*
509 * Mail Box
510 * GCSR CRG
511 * offset 10 610 - Mailbox0
512 */
513#define TSI148_GCSR_MBOX0 0x610
514#define TSI148_GCSR_MBOX1 0x614
515#define TSI148_GCSR_MBOX2 0x618
516#define TSI148_GCSR_MBOX3 0x61C
517
518static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
519 TSI148_GCSR_MBOX1,
520 TSI148_GCSR_MBOX2,
521 TSI148_GCSR_MBOX3 };
522
523 /*
524 * CR/CSR
525 */
526
527 /*
528 * CR/CSR CRG
529 * offset 7FFF4 FF4 - CSRBCR
530 * offset 7FFF8 FF8 - CSRBSR
531 * offset 7FFFC FFC - CBAR
532 */
533#define TSI148_CSRBCR 0xFF4
534#define TSI148_CSRBSR 0xFF8
535#define TSI148_CBAR 0xFFC
536
537
538
539
540 /*
541 * TSI148 Register Bit Definitions
542 */
543
544 /*
545 * PFCS Register Set
546 */
547#define TSI148_PCFS_CMMD_SERR (1<<8) /* SERR_L out pin ssys err */
548#define TSI148_PCFS_CMMD_PERR (1<<6) /* PERR_L out pin parity */
549#define TSI148_PCFS_CMMD_MSTR (1<<2) /* PCI bus master */
550#define TSI148_PCFS_CMMD_MEMSP (1<<1) /* PCI mem space access */
551#define TSI148_PCFS_CMMD_IOSP (1<<0) /* PCI I/O space enable */
552
553#define TSI148_PCFS_STAT_RCPVE (1<<15) /* Detected Parity Error */
554#define TSI148_PCFS_STAT_SIGSE (1<<14) /* Signalled System Error */
555#define TSI148_PCFS_STAT_RCVMA (1<<13) /* Received Master Abort */
556#define TSI148_PCFS_STAT_RCVTA (1<<12) /* Received Target Abort */
557#define TSI148_PCFS_STAT_SIGTA (1<<11) /* Signalled Target Abort */
558#define TSI148_PCFS_STAT_SELTIM (3<<9) /* DELSEL Timing */
559#define TSI148_PCFS_STAT_DPAR (1<<8) /* Data Parity Err Reported */
560#define TSI148_PCFS_STAT_FAST (1<<7) /* Fast back-to-back Cap */
561#define TSI148_PCFS_STAT_P66M (1<<5) /* 66 MHz Capable */
562#define TSI148_PCFS_STAT_CAPL (1<<4) /* Capab List - address $34 */
563
564/*
565 * Revision ID/Class Code Registers (CRG +$008)
566 */
567#define TSI148_PCFS_CLAS_M (0xFF<<24) /* Class ID */
568#define TSI148_PCFS_SUBCLAS_M (0xFF<<16) /* Sub-Class ID */
569#define TSI148_PCFS_PROGIF_M (0xFF<<8) /* Sub-Class ID */
570#define TSI148_PCFS_REVID_M (0xFF<<0) /* Rev ID */
571
572/*
573 * Cache Line Size/ Master Latency Timer/ Header Type Registers (CRG + $00C)
574 */
575#define TSI148_PCFS_HEAD_M (0xFF<<16) /* Master Lat Timer */
576#define TSI148_PCFS_MLAT_M (0xFF<<8) /* Master Lat Timer */
577#define TSI148_PCFS_CLSZ_M (0xFF<<0) /* Cache Line Size */
578
579/*
580 * Memory Base Address Lower Reg (CRG + $010)
581 */
582#define TSI148_PCFS_MBARL_BASEL_M (0xFFFFF<<12) /* Base Addr Lower Mask */
583#define TSI148_PCFS_MBARL_PRE (1<<3) /* Prefetch */
584#define TSI148_PCFS_MBARL_MTYPE_M (3<<1) /* Memory Type Mask */
585#define TSI148_PCFS_MBARL_IOMEM (1<<0) /* I/O Space Indicator */
586
587/*
588 * Message Signaled Interrupt Capabilities Register (CRG + $040)
589 */
590#define TSI148_PCFS_MSICAP_64BAC (1<<7) /* 64-bit Address Capable */
591#define TSI148_PCFS_MSICAP_MME_M (7<<4) /* Multiple Msg Enable Mask */
592#define TSI148_PCFS_MSICAP_MMC_M (7<<1) /* Multiple Msg Capable Mask */
593#define TSI148_PCFS_MSICAP_MSIEN (1<<0) /* Msg signaled INT Enable */
594
595/*
596 * Message Address Lower Register (CRG +$044)
597 */
598#define TSI148_PCFS_MSIAL_M (0x3FFFFFFF<<2) /* Mask */
599
600/*
601 * Message Data Register (CRG + 4C)
602 */
603#define TSI148_PCFS_MSIMD_M (0xFFFF<<0) /* Mask */
604
605/*
606 * PCI-X Capabilities Register (CRG + $050)
607 */
608#define TSI148_PCFS_PCIXCAP_MOST_M (7<<4) /* Max outstanding Split Tran */
609#define TSI148_PCFS_PCIXCAP_MMRBC_M (3<<2) /* Max Mem Read byte cnt */
610#define TSI148_PCFS_PCIXCAP_ERO (1<<1) /* Enable Relaxed Ordering */
611#define TSI148_PCFS_PCIXCAP_DPERE (1<<0) /* Data Parity Recover Enable */
612
613/*
614 * PCI-X Status Register (CRG +$054)
615 */
616#define TSI148_PCFS_PCIXSTAT_RSCEM (1<<29) /* Received Split Comp Error */
617#define TSI148_PCFS_PCIXSTAT_DMCRS_M (7<<26) /* max Cumulative Read Size */
618#define TSI148_PCFS_PCIXSTAT_DMOST_M (7<<23) /* max outstanding Split Trans
619 */
620#define TSI148_PCFS_PCIXSTAT_DMMRC_M (3<<21) /* max mem read byte count */
621#define TSI148_PCFS_PCIXSTAT_DC (1<<20) /* Device Complexity */
622#define TSI148_PCFS_PCIXSTAT_USC (1<<19) /* Unexpected Split comp */
623#define TSI148_PCFS_PCIXSTAT_SCD (1<<18) /* Split completion discard */
624#define TSI148_PCFS_PCIXSTAT_133C (1<<17) /* 133MHz capable */
625#define TSI148_PCFS_PCIXSTAT_64D (1<<16) /* 64 bit device */
626#define TSI148_PCFS_PCIXSTAT_BN_M (0xFF<<8) /* Bus number */
627#define TSI148_PCFS_PCIXSTAT_DN_M (0x1F<<3) /* Device number */
628#define TSI148_PCFS_PCIXSTAT_FN_M (7<<0) /* Function Number */
629
630/*
631 * LCSR Registers
632 */
633
634/*
635 * Outbound Translation Starting Address Lower
636 */
637#define TSI148_LCSR_OTSAL_M (0xFFFF<<16) /* Mask */
638
639/*
640 * Outbound Translation Ending Address Lower
641 */
642#define TSI148_LCSR_OTEAL_M (0xFFFF<<16) /* Mask */
643
644/*
645 * Outbound Translation Offset Lower
646 */
647#define TSI148_LCSR_OTOFFL_M (0xFFFF<<16) /* Mask */
648
649/*
650 * Outbound Translation 2eSST Broadcast Select
651 */
652#define TSI148_LCSR_OTBS_M (0xFFFFF<<0) /* Mask */
653
654/*
655 * Outbound Translation Attribute
656 */
657#define TSI148_LCSR_OTAT_EN (1<<31) /* Window Enable */
658#define TSI148_LCSR_OTAT_MRPFD (1<<18) /* Prefetch Disable */
659
660#define TSI148_LCSR_OTAT_PFS_M (3<<16) /* Prefetch Size Mask */
661#define TSI148_LCSR_OTAT_PFS_2 (0<<16) /* 2 Cache Lines P Size */
662#define TSI148_LCSR_OTAT_PFS_4 (1<<16) /* 4 Cache Lines P Size */
663#define TSI148_LCSR_OTAT_PFS_8 (2<<16) /* 8 Cache Lines P Size */
664#define TSI148_LCSR_OTAT_PFS_16 (3<<16) /* 16 Cache Lines P Size */
665
666#define TSI148_LCSR_OTAT_2eSSTM_M (7<<11) /* 2eSST Xfer Rate Mask */
667#define TSI148_LCSR_OTAT_2eSSTM_160 (0<<11) /* 160MB/s 2eSST Xfer Rate */
668#define TSI148_LCSR_OTAT_2eSSTM_267 (1<<11) /* 267MB/s 2eSST Xfer Rate */
669#define TSI148_LCSR_OTAT_2eSSTM_320 (2<<11) /* 320MB/s 2eSST Xfer Rate */
670
671#define TSI148_LCSR_OTAT_TM_M (7<<8) /* Xfer Protocol Mask */
672#define TSI148_LCSR_OTAT_TM_SCT (0<<8) /* SCT Xfer Protocol */
673#define TSI148_LCSR_OTAT_TM_BLT (1<<8) /* BLT Xfer Protocol */
674#define TSI148_LCSR_OTAT_TM_MBLT (2<<8) /* MBLT Xfer Protocol */
675#define TSI148_LCSR_OTAT_TM_2eVME (3<<8) /* 2eVME Xfer Protocol */
676#define TSI148_LCSR_OTAT_TM_2eSST (4<<8) /* 2eSST Xfer Protocol */
677#define TSI148_LCSR_OTAT_TM_2eSSTB (5<<8) /* 2eSST Bcast Xfer Protocol */
678
679#define TSI148_LCSR_OTAT_DBW_M (3<<6) /* Max Data Width */
680#define TSI148_LCSR_OTAT_DBW_16 (0<<6) /* 16-bit Data Width */
681#define TSI148_LCSR_OTAT_DBW_32 (1<<6) /* 32-bit Data Width */
682
683#define TSI148_LCSR_OTAT_SUP (1<<5) /* Supervisory Access */
684#define TSI148_LCSR_OTAT_PGM (1<<4) /* Program Access */
685
686#define TSI148_LCSR_OTAT_AMODE_M (0xf<<0) /* Address Mode Mask */
687#define TSI148_LCSR_OTAT_AMODE_A16 (0<<0) /* A16 Address Space */
688#define TSI148_LCSR_OTAT_AMODE_A24 (1<<0) /* A24 Address Space */
689#define TSI148_LCSR_OTAT_AMODE_A32 (2<<0) /* A32 Address Space */
690#define TSI148_LCSR_OTAT_AMODE_A64 (4<<0) /* A32 Address Space */
691#define TSI148_LCSR_OTAT_AMODE_CRCSR (5<<0) /* CR/CSR Address Space */
692#define TSI148_LCSR_OTAT_AMODE_USER1 (8<<0) /* User1 Address Space */
693#define TSI148_LCSR_OTAT_AMODE_USER2 (9<<0) /* User2 Address Space */
694#define TSI148_LCSR_OTAT_AMODE_USER3 (10<<0) /* User3 Address Space */
695#define TSI148_LCSR_OTAT_AMODE_USER4 (11<<0) /* User4 Address Space */
696
697/*
698 * VME Master Control Register CRG+$234
699 */
700#define TSI148_LCSR_VMCTRL_VSA (1<<27) /* VMEbus Stop Ack */
701#define TSI148_LCSR_VMCTRL_VS (1<<26) /* VMEbus Stop */
702#define TSI148_LCSR_VMCTRL_DHB (1<<25) /* Device Has Bus */
703#define TSI148_LCSR_VMCTRL_DWB (1<<24) /* Device Wants Bus */
704
705#define TSI148_LCSR_VMCTRL_RMWEN (1<<20) /* RMW Enable */
706
707#define TSI148_LCSR_VMCTRL_ATO_M (7<<16) /* Master Access Time-out Mask
708 */
709#define TSI148_LCSR_VMCTRL_ATO_32 (0<<16) /* 32 us */
710#define TSI148_LCSR_VMCTRL_ATO_128 (1<<16) /* 128 us */
711#define TSI148_LCSR_VMCTRL_ATO_512 (2<<16) /* 512 us */
712#define TSI148_LCSR_VMCTRL_ATO_2M (3<<16) /* 2 ms */
713#define TSI148_LCSR_VMCTRL_ATO_8M (4<<16) /* 8 ms */
714#define TSI148_LCSR_VMCTRL_ATO_32M (5<<16) /* 32 ms */
715#define TSI148_LCSR_VMCTRL_ATO_128M (6<<16) /* 128 ms */
716#define TSI148_LCSR_VMCTRL_ATO_DIS (7<<16) /* Disabled */
717
718#define TSI148_LCSR_VMCTRL_VTOFF_M (7<<12) /* VMEbus Master Time off */
719#define TSI148_LCSR_VMCTRL_VTOFF_0 (0<<12) /* 0us */
720#define TSI148_LCSR_VMCTRL_VTOFF_1 (1<<12) /* 1us */
721#define TSI148_LCSR_VMCTRL_VTOFF_2 (2<<12) /* 2us */
722#define TSI148_LCSR_VMCTRL_VTOFF_4 (3<<12) /* 4us */
723#define TSI148_LCSR_VMCTRL_VTOFF_8 (4<<12) /* 8us */
724#define TSI148_LCSR_VMCTRL_VTOFF_16 (5<<12) /* 16us */
725#define TSI148_LCSR_VMCTRL_VTOFF_32 (6<<12) /* 32us */
726#define TSI148_LCSR_VMCTRL_VTOFF_64 (7<<12) /* 64us */
727
728#define TSI148_LCSR_VMCTRL_VTON_M (7<<8) /* VMEbus Master Time On */
729#define TSI148_LCSR_VMCTRL_VTON_4 (0<<8) /* 8us */
730#define TSI148_LCSR_VMCTRL_VTON_8 (1<<8) /* 8us */
731#define TSI148_LCSR_VMCTRL_VTON_16 (2<<8) /* 16us */
732#define TSI148_LCSR_VMCTRL_VTON_32 (3<<8) /* 32us */
733#define TSI148_LCSR_VMCTRL_VTON_64 (4<<8) /* 64us */
734#define TSI148_LCSR_VMCTRL_VTON_128 (5<<8) /* 128us */
735#define TSI148_LCSR_VMCTRL_VTON_256 (6<<8) /* 256us */
736#define TSI148_LCSR_VMCTRL_VTON_512 (7<<8) /* 512us */
737
738#define TSI148_LCSR_VMCTRL_VREL_M (3<<3) /* VMEbus Master Rel Mode Mask
739 */
740#define TSI148_LCSR_VMCTRL_VREL_T_D (0<<3) /* Time on or Done */
741#define TSI148_LCSR_VMCTRL_VREL_T_R_D (1<<3) /* Time on and REQ or Done */
742#define TSI148_LCSR_VMCTRL_VREL_T_B_D (2<<3) /* Time on and BCLR or Done */
743#define TSI148_LCSR_VMCTRL_VREL_T_D_R (3<<3) /* Time on or Done and REQ */
744
745#define TSI148_LCSR_VMCTRL_VFAIR (1<<2) /* VMEbus Master Fair Mode */
746#define TSI148_LCSR_VMCTRL_VREQL_M (3<<0) /* VMEbus Master Req Level Mask
747 */
748
749/*
750 * VMEbus Control Register CRG+$238
751 */
752#define TSI148_LCSR_VCTRL_LRE (1<<31) /* Late Retry Enable */
753
754#define TSI148_LCSR_VCTRL_DLT_M (0xF<<24) /* Deadlock Timer */
755#define TSI148_LCSR_VCTRL_DLT_OFF (0<<24) /* Deadlock Timer Off */
756#define TSI148_LCSR_VCTRL_DLT_16 (1<<24) /* 16 VCLKS */
757#define TSI148_LCSR_VCTRL_DLT_32 (2<<24) /* 32 VCLKS */
758#define TSI148_LCSR_VCTRL_DLT_64 (3<<24) /* 64 VCLKS */
759#define TSI148_LCSR_VCTRL_DLT_128 (4<<24) /* 128 VCLKS */
760#define TSI148_LCSR_VCTRL_DLT_256 (5<<24) /* 256 VCLKS */
761#define TSI148_LCSR_VCTRL_DLT_512 (6<<24) /* 512 VCLKS */
762#define TSI148_LCSR_VCTRL_DLT_1024 (7<<24) /* 1024 VCLKS */
763#define TSI148_LCSR_VCTRL_DLT_2048 (8<<24) /* 2048 VCLKS */
764#define TSI148_LCSR_VCTRL_DLT_4096 (9<<24) /* 4096 VCLKS */
765#define TSI148_LCSR_VCTRL_DLT_8192 (0xA<<24) /* 8192 VCLKS */
766#define TSI148_LCSR_VCTRL_DLT_16384 (0xB<<24) /* 16384 VCLKS */
767#define TSI148_LCSR_VCTRL_DLT_32768 (0xC<<24) /* 32768 VCLKS */
768
769#define TSI148_LCSR_VCTRL_NERBB (1<<20) /* No Early Release of Bus Busy
770 */
771
772#define TSI148_LCSR_VCTRL_SRESET (1<<17) /* System Reset */
773#define TSI148_LCSR_VCTRL_LRESET (1<<16) /* Local Reset */
774
775#define TSI148_LCSR_VCTRL_SFAILAI (1<<15) /* SYSFAIL Auto Slot ID */
776#define TSI148_LCSR_VCTRL_BID_M (0x1F<<8) /* Broadcast ID Mask */
777
778#define TSI148_LCSR_VCTRL_ATOEN (1<<7) /* Arbiter Time-out Enable */
779#define TSI148_LCSR_VCTRL_ROBIN (1<<6) /* VMEbus Round Robin */
780
781#define TSI148_LCSR_VCTRL_GTO_M (7<<0) /* VMEbus Global Time-out Mask
782 */
783#define TSI148_LCSR_VCTRL_GTO_8 (0<<0) /* 8 us */
784#define TSI148_LCSR_VCTRL_GTO_16 (1<<0) /* 16 us */
785#define TSI148_LCSR_VCTRL_GTO_32 (2<<0) /* 32 us */
786#define TSI148_LCSR_VCTRL_GTO_64 (3<<0) /* 64 us */
787#define TSI148_LCSR_VCTRL_GTO_128 (4<<0) /* 128 us */
788#define TSI148_LCSR_VCTRL_GTO_256 (5<<0) /* 256 us */
789#define TSI148_LCSR_VCTRL_GTO_512 (6<<0) /* 512 us */
790#define TSI148_LCSR_VCTRL_GTO_DIS (7<<0) /* Disabled */
791
792/*
793 * VMEbus Status Register CRG + $23C
794 */
795#define TSI148_LCSR_VSTAT_CPURST (1<<15) /* Clear power up reset */
796#define TSI148_LCSR_VSTAT_BRDFL (1<<14) /* Board fail */
797#define TSI148_LCSR_VSTAT_PURSTS (1<<12) /* Power up reset status */
798#define TSI148_LCSR_VSTAT_BDFAILS (1<<11) /* Board Fail Status */
799#define TSI148_LCSR_VSTAT_SYSFAILS (1<<10) /* System Fail Status */
800#define TSI148_LCSR_VSTAT_ACFAILS (1<<9) /* AC fail status */
801#define TSI148_LCSR_VSTAT_SCONS (1<<8) /* System Cont Status */
802#define TSI148_LCSR_VSTAT_GAP (1<<5) /* Geographic Addr Parity */
803#define TSI148_LCSR_VSTAT_GA_M (0x1F<<0) /* Geographic Addr Mask */
804
805/*
806 * PCI Configuration Status Register CRG+$240
807 */
808#define TSI148_LCSR_PSTAT_REQ64S (1<<6) /* Request 64 status set */
809#define TSI148_LCSR_PSTAT_M66ENS (1<<5) /* M66ENS 66Mhz enable */
810#define TSI148_LCSR_PSTAT_FRAMES (1<<4) /* Frame Status */
811#define TSI148_LCSR_PSTAT_IRDYS (1<<3) /* IRDY status */
812#define TSI148_LCSR_PSTAT_DEVSELS (1<<2) /* DEVL status */
813#define TSI148_LCSR_PSTAT_STOPS (1<<1) /* STOP status */
814#define TSI148_LCSR_PSTAT_TRDYS (1<<0) /* TRDY status */
815
816/*
817 * VMEbus Exception Attributes Register CRG + $268
818 */
819#define TSI148_LCSR_VEAT_VES (1<<31) /* Status */
820#define TSI148_LCSR_VEAT_VEOF (1<<30) /* Overflow */
821#define TSI148_LCSR_VEAT_VESCL (1<<29) /* Status Clear */
822#define TSI148_LCSR_VEAT_2EOT (1<<21) /* 2e Odd Termination */
823#define TSI148_LCSR_VEAT_2EST (1<<20) /* 2e Slave terminated */
824#define TSI148_LCSR_VEAT_BERR (1<<19) /* Bus Error */
825#define TSI148_LCSR_VEAT_LWORD (1<<18) /* LWORD_ signal state */
826#define TSI148_LCSR_VEAT_WRITE (1<<17) /* WRITE_ signal state */
827#define TSI148_LCSR_VEAT_IACK (1<<16) /* IACK_ signal state */
828#define TSI148_LCSR_VEAT_DS1 (1<<15) /* DS1_ signal state */
829#define TSI148_LCSR_VEAT_DS0 (1<<14) /* DS0_ signal state */
830#define TSI148_LCSR_VEAT_AM_M (0x3F<<8) /* Address Mode Mask */
831#define TSI148_LCSR_VEAT_XAM_M (0xFF<<0) /* Master AMode Mask */
832
833
834/*
835 * VMEbus PCI Error Diagnostics PCI/X Attributes Register CRG + $280
836 */
837#define TSI148_LCSR_EDPAT_EDPCL (1<<29)
838
839/*
840 * Inbound Translation Starting Address Lower
841 */
842#define TSI148_LCSR_ITSAL6432_M (0xFFFF<<16) /* Mask */
843#define TSI148_LCSR_ITSAL24_M (0x00FFF<<12) /* Mask */
844#define TSI148_LCSR_ITSAL16_M (0x0000FFF<<4) /* Mask */
845
846/*
847 * Inbound Translation Ending Address Lower
848 */
849#define TSI148_LCSR_ITEAL6432_M (0xFFFF<<16) /* Mask */
850#define TSI148_LCSR_ITEAL24_M (0x00FFF<<12) /* Mask */
851#define TSI148_LCSR_ITEAL16_M (0x0000FFF<<4) /* Mask */
852
853/*
854 * Inbound Translation Offset Lower
855 */
856#define TSI148_LCSR_ITOFFL6432_M (0xFFFF<<16) /* Mask */
857#define TSI148_LCSR_ITOFFL24_M (0xFFFFF<<12) /* Mask */
858#define TSI148_LCSR_ITOFFL16_M (0xFFFFFFF<<4) /* Mask */
859
860/*
861 * Inbound Translation Attribute
862 */
863#define TSI148_LCSR_ITAT_EN (1<<31) /* Window Enable */
864#define TSI148_LCSR_ITAT_TH (1<<18) /* Prefetch Threshold */
865
866#define TSI148_LCSR_ITAT_VFS_M (3<<16) /* Virtual FIFO Size Mask */
867#define TSI148_LCSR_ITAT_VFS_64 (0<<16) /* 64 bytes Virtual FIFO Size */
868#define TSI148_LCSR_ITAT_VFS_128 (1<<16) /* 128 bytes Virtual FIFO Sz */
869#define TSI148_LCSR_ITAT_VFS_256 (2<<16) /* 256 bytes Virtual FIFO Sz */
870#define TSI148_LCSR_ITAT_VFS_512 (3<<16) /* 512 bytes Virtual FIFO Sz */
871
872#define TSI148_LCSR_ITAT_2eSSTM_M (7<<12) /* 2eSST Xfer Rate Mask */
873#define TSI148_LCSR_ITAT_2eSSTM_160 (0<<12) /* 160MB/s 2eSST Xfer Rate */
874#define TSI148_LCSR_ITAT_2eSSTM_267 (1<<12) /* 267MB/s 2eSST Xfer Rate */
875#define TSI148_LCSR_ITAT_2eSSTM_320 (2<<12) /* 320MB/s 2eSST Xfer Rate */
876
877#define TSI148_LCSR_ITAT_2eSSTB (1<<11) /* 2eSST Bcast Xfer Protocol */
878#define TSI148_LCSR_ITAT_2eSST (1<<10) /* 2eSST Xfer Protocol */
879#define TSI148_LCSR_ITAT_2eVME (1<<9) /* 2eVME Xfer Protocol */
880#define TSI148_LCSR_ITAT_MBLT (1<<8) /* MBLT Xfer Protocol */
881#define TSI148_LCSR_ITAT_BLT (1<<7) /* BLT Xfer Protocol */
882
883#define TSI148_LCSR_ITAT_AS_M (7<<4) /* Address Space Mask */
884#define TSI148_LCSR_ITAT_AS_A16 (0<<4) /* A16 Address Space */
885#define TSI148_LCSR_ITAT_AS_A24 (1<<4) /* A24 Address Space */
886#define TSI148_LCSR_ITAT_AS_A32 (2<<4) /* A32 Address Space */
887#define TSI148_LCSR_ITAT_AS_A64 (4<<4) /* A64 Address Space */
888
889#define TSI148_LCSR_ITAT_SUPR (1<<3) /* Supervisor Access */
890#define TSI148_LCSR_ITAT_NPRIV (1<<2) /* Non-Priv (User) Access */
891#define TSI148_LCSR_ITAT_PGM (1<<1) /* Program Access */
892#define TSI148_LCSR_ITAT_DATA (1<<0) /* Data Access */
893
894/*
895 * GCSR Base Address Lower Address CRG +$404
896 */
897#define TSI148_LCSR_GBAL_M (0x7FFFFFF<<5) /* Mask */
898
899/*
900 * GCSR Attribute Register CRG + $408
901 */
902#define TSI148_LCSR_GCSRAT_EN (1<<7) /* Enable access to GCSR */
903
904#define TSI148_LCSR_GCSRAT_AS_M (7<<4) /* Address Space Mask */
905#define TSI148_LCSR_GCSRAT_AS_A16 (0<<4) /* Address Space 16 */
906#define TSI148_LCSR_GCSRAT_AS_A24 (1<<4) /* Address Space 24 */
907#define TSI148_LCSR_GCSRAT_AS_A32 (2<<4) /* Address Space 32 */
908#define TSI148_LCSR_GCSRAT_AS_A64 (4<<4) /* Address Space 64 */
909
910#define TSI148_LCSR_GCSRAT_SUPR (1<<3) /* Sup set -GCSR decoder */
911#define TSI148_LCSR_GCSRAT_NPRIV (1<<2) /* Non-Privliged set - CGSR */
912#define TSI148_LCSR_GCSRAT_PGM (1<<1) /* Program set - GCSR decoder */
913#define TSI148_LCSR_GCSRAT_DATA (1<<0) /* DATA set GCSR decoder */
914
915/*
916 * CRG Base Address Lower Address CRG + $410
917 */
918#define TSI148_LCSR_CBAL_M (0xFFFFF<<12)
919
920/*
921 * CRG Attribute Register CRG + $414
922 */
923#define TSI148_LCSR_CRGAT_EN (1<<7) /* Enable PRG Access */
924
925#define TSI148_LCSR_CRGAT_AS_M (7<<4) /* Address Space */
926#define TSI148_LCSR_CRGAT_AS_A16 (0<<4) /* Address Space 16 */
927#define TSI148_LCSR_CRGAT_AS_A24 (1<<4) /* Address Space 24 */
928#define TSI148_LCSR_CRGAT_AS_A32 (2<<4) /* Address Space 32 */
929#define TSI148_LCSR_CRGAT_AS_A64 (4<<4) /* Address Space 64 */
930
931#define TSI148_LCSR_CRGAT_SUPR (1<<3) /* Supervisor Access */
932#define TSI148_LCSR_CRGAT_NPRIV (1<<2) /* Non-Privliged(User) Access */
933#define TSI148_LCSR_CRGAT_PGM (1<<1) /* Program Access */
934#define TSI148_LCSR_CRGAT_DATA (1<<0) /* Data Access */
935
936/*
937 * CR/CSR Offset Lower Register CRG + $41C
938 */
939#define TSI148_LCSR_CROL_M (0x1FFF<<19) /* Mask */
940
941/*
942 * CR/CSR Attribute register CRG + $420
943 */
944#define TSI148_LCSR_CRAT_EN (1<<7) /* Enable access to CR/CSR */
945
946/*
947 * Location Monitor base address lower register CRG + $428
948 */
949#define TSI148_LCSR_LMBAL_M (0x7FFFFFF<<5) /* Mask */
950
951/*
952 * Location Monitor Attribute Register CRG + $42C
953 */
954#define TSI148_LCSR_LMAT_EN (1<<7) /* Enable Location Monitor */
955
956#define TSI148_LCSR_LMAT_AS_M (7<<4) /* Address Space MASK */
957#define TSI148_LCSR_LMAT_AS_A16 (0<<4) /* A16 */
958#define TSI148_LCSR_LMAT_AS_A24 (1<<4) /* A24 */
959#define TSI148_LCSR_LMAT_AS_A32 (2<<4) /* A32 */
960#define TSI148_LCSR_LMAT_AS_A64 (4<<4) /* A64 */
961
962#define TSI148_LCSR_LMAT_SUPR (1<<3) /* Supervisor Access */
963#define TSI148_LCSR_LMAT_NPRIV (1<<2) /* Non-Priv (User) Access */
964#define TSI148_LCSR_LMAT_PGM (1<<1) /* Program Access */
965#define TSI148_LCSR_LMAT_DATA (1<<0) /* Data Access */
966
967/*
968 * Broadcast Pulse Generator Timer Register CRG + $438
969 */
970#define TSI148_LCSR_BPGTR_BPGT_M (0xFFFF<<0) /* Mask */
971
972/*
973 * Broadcast Programmable Clock Timer Register CRG + $43C
974 */
975#define TSI148_LCSR_BPCTR_BPCT_M (0xFFFFFF<<0) /* Mask */
976
977/*
978 * VMEbus Interrupt Control Register CRG + $43C
979 */
980#define TSI148_LCSR_VICR_CNTS_M (3<<22) /* Cntr Source MASK */
981#define TSI148_LCSR_VICR_CNTS_DIS (1<<22) /* Cntr Disable */
982#define TSI148_LCSR_VICR_CNTS_IRQ1 (2<<22) /* IRQ1 to Cntr */
983#define TSI148_LCSR_VICR_CNTS_IRQ2 (3<<22) /* IRQ2 to Cntr */
984
985#define TSI148_LCSR_VICR_EDGIS_M (3<<20) /* Edge interrupt MASK */
986#define TSI148_LCSR_VICR_EDGIS_DIS (1<<20) /* Edge interrupt Disable */
987#define TSI148_LCSR_VICR_EDGIS_IRQ1 (2<<20) /* IRQ1 to Edge */
988#define TSI148_LCSR_VICR_EDGIS_IRQ2 (3<<20) /* IRQ2 to Edge */
989
990#define TSI148_LCSR_VICR_IRQIF_M (3<<18) /* IRQ1* Function MASK */
991#define TSI148_LCSR_VICR_IRQIF_NORM (1<<18) /* Normal */
992#define TSI148_LCSR_VICR_IRQIF_PULSE (2<<18) /* Pulse Generator */
993#define TSI148_LCSR_VICR_IRQIF_PROG (3<<18) /* Programmable Clock */
994#define TSI148_LCSR_VICR_IRQIF_1U (4<<18) /* 1us Clock */
995
996#define TSI148_LCSR_VICR_IRQ2F_M (3<<16) /* IRQ2* Function MASK */
997#define TSI148_LCSR_VICR_IRQ2F_NORM (1<<16) /* Normal */
998#define TSI148_LCSR_VICR_IRQ2F_PULSE (2<<16) /* Pulse Generator */
999#define TSI148_LCSR_VICR_IRQ2F_PROG (3<<16) /* Programmable Clock */
1000#define TSI148_LCSR_VICR_IRQ2F_1U (4<<16) /* 1us Clock */
1001
1002#define TSI148_LCSR_VICR_BIP (1<<15) /* Broadcast Interrupt Pulse */
1003
1004#define TSI148_LCSR_VICR_IRQC (1<<12) /* VMEbus IRQ Clear */
1005#define TSI148_LCSR_VICR_IRQS (1<<11) /* VMEbus IRQ Status */
1006
1007#define TSI148_LCSR_VICR_IRQL_M (7<<8) /* VMEbus SW IRQ Level Mask */
1008#define TSI148_LCSR_VICR_IRQL_1 (1<<8) /* VMEbus SW IRQ Level 1 */
1009#define TSI148_LCSR_VICR_IRQL_2 (2<<8) /* VMEbus SW IRQ Level 2 */
1010#define TSI148_LCSR_VICR_IRQL_3 (3<<8) /* VMEbus SW IRQ Level 3 */
1011#define TSI148_LCSR_VICR_IRQL_4 (4<<8) /* VMEbus SW IRQ Level 4 */
1012#define TSI148_LCSR_VICR_IRQL_5 (5<<8) /* VMEbus SW IRQ Level 5 */
1013#define TSI148_LCSR_VICR_IRQL_6 (6<<8) /* VMEbus SW IRQ Level 6 */
1014#define TSI148_LCSR_VICR_IRQL_7 (7<<8) /* VMEbus SW IRQ Level 7 */
1015
1016static const int TSI148_LCSR_VICR_IRQL[8] = { 0, TSI148_LCSR_VICR_IRQL_1,
1017 TSI148_LCSR_VICR_IRQL_2, TSI148_LCSR_VICR_IRQL_3,
1018 TSI148_LCSR_VICR_IRQL_4, TSI148_LCSR_VICR_IRQL_5,
1019 TSI148_LCSR_VICR_IRQL_6, TSI148_LCSR_VICR_IRQL_7 };
1020
1021#define TSI148_LCSR_VICR_STID_M (0xFF<<0) /* Status/ID Mask */
1022
1023/*
1024 * Interrupt Enable Register CRG + $440
1025 */
1026#define TSI148_LCSR_INTEN_DMA1EN (1<<25) /* DMAC 1 */
1027#define TSI148_LCSR_INTEN_DMA0EN (1<<24) /* DMAC 0 */
1028#define TSI148_LCSR_INTEN_LM3EN (1<<23) /* Location Monitor 3 */
1029#define TSI148_LCSR_INTEN_LM2EN (1<<22) /* Location Monitor 2 */
1030#define TSI148_LCSR_INTEN_LM1EN (1<<21) /* Location Monitor 1 */
1031#define TSI148_LCSR_INTEN_LM0EN (1<<20) /* Location Monitor 0 */
1032#define TSI148_LCSR_INTEN_MB3EN (1<<19) /* Mail Box 3 */
1033#define TSI148_LCSR_INTEN_MB2EN (1<<18) /* Mail Box 2 */
1034#define TSI148_LCSR_INTEN_MB1EN (1<<17) /* Mail Box 1 */
1035#define TSI148_LCSR_INTEN_MB0EN (1<<16) /* Mail Box 0 */
1036#define TSI148_LCSR_INTEN_PERREN (1<<13) /* PCI/X Error */
1037#define TSI148_LCSR_INTEN_VERREN (1<<12) /* VMEbus Error */
1038#define TSI148_LCSR_INTEN_VIEEN (1<<11) /* VMEbus IRQ Edge */
1039#define TSI148_LCSR_INTEN_IACKEN (1<<10) /* IACK */
1040#define TSI148_LCSR_INTEN_SYSFLEN (1<<9) /* System Fail */
1041#define TSI148_LCSR_INTEN_ACFLEN (1<<8) /* AC Fail */
1042#define TSI148_LCSR_INTEN_IRQ7EN (1<<7) /* IRQ7 */
1043#define TSI148_LCSR_INTEN_IRQ6EN (1<<6) /* IRQ6 */
1044#define TSI148_LCSR_INTEN_IRQ5EN (1<<5) /* IRQ5 */
1045#define TSI148_LCSR_INTEN_IRQ4EN (1<<4) /* IRQ4 */
1046#define TSI148_LCSR_INTEN_IRQ3EN (1<<3) /* IRQ3 */
1047#define TSI148_LCSR_INTEN_IRQ2EN (1<<2) /* IRQ2 */
1048#define TSI148_LCSR_INTEN_IRQ1EN (1<<1) /* IRQ1 */
1049
1050static const int TSI148_LCSR_INTEN_LMEN[4] = { TSI148_LCSR_INTEN_LM0EN,
1051 TSI148_LCSR_INTEN_LM1EN,
1052 TSI148_LCSR_INTEN_LM2EN,
1053 TSI148_LCSR_INTEN_LM3EN };
1054
1055static const int TSI148_LCSR_INTEN_IRQEN[7] = { TSI148_LCSR_INTEN_IRQ1EN,
1056 TSI148_LCSR_INTEN_IRQ2EN,
1057 TSI148_LCSR_INTEN_IRQ3EN,
1058 TSI148_LCSR_INTEN_IRQ4EN,
1059 TSI148_LCSR_INTEN_IRQ5EN,
1060 TSI148_LCSR_INTEN_IRQ6EN,
1061 TSI148_LCSR_INTEN_IRQ7EN };
1062
1063/*
1064 * Interrupt Enable Out Register CRG + $444
1065 */
1066#define TSI148_LCSR_INTEO_DMA1EO (1<<25) /* DMAC 1 */
1067#define TSI148_LCSR_INTEO_DMA0EO (1<<24) /* DMAC 0 */
1068#define TSI148_LCSR_INTEO_LM3EO (1<<23) /* Loc Monitor 3 */
1069#define TSI148_LCSR_INTEO_LM2EO (1<<22) /* Loc Monitor 2 */
1070#define TSI148_LCSR_INTEO_LM1EO (1<<21) /* Loc Monitor 1 */
1071#define TSI148_LCSR_INTEO_LM0EO (1<<20) /* Location Monitor 0 */
1072#define TSI148_LCSR_INTEO_MB3EO (1<<19) /* Mail Box 3 */
1073#define TSI148_LCSR_INTEO_MB2EO (1<<18) /* Mail Box 2 */
1074#define TSI148_LCSR_INTEO_MB1EO (1<<17) /* Mail Box 1 */
1075#define TSI148_LCSR_INTEO_MB0EO (1<<16) /* Mail Box 0 */
1076#define TSI148_LCSR_INTEO_PERREO (1<<13) /* PCI/X Error */
1077#define TSI148_LCSR_INTEO_VERREO (1<<12) /* VMEbus Error */
1078#define TSI148_LCSR_INTEO_VIEEO (1<<11) /* VMEbus IRQ Edge */
1079#define TSI148_LCSR_INTEO_IACKEO (1<<10) /* IACK */
1080#define TSI148_LCSR_INTEO_SYSFLEO (1<<9) /* System Fail */
1081#define TSI148_LCSR_INTEO_ACFLEO (1<<8) /* AC Fail */
1082#define TSI148_LCSR_INTEO_IRQ7EO (1<<7) /* IRQ7 */
1083#define TSI148_LCSR_INTEO_IRQ6EO (1<<6) /* IRQ6 */
1084#define TSI148_LCSR_INTEO_IRQ5EO (1<<5) /* IRQ5 */
1085#define TSI148_LCSR_INTEO_IRQ4EO (1<<4) /* IRQ4 */
1086#define TSI148_LCSR_INTEO_IRQ3EO (1<<3) /* IRQ3 */
1087#define TSI148_LCSR_INTEO_IRQ2EO (1<<2) /* IRQ2 */
1088#define TSI148_LCSR_INTEO_IRQ1EO (1<<1) /* IRQ1 */
1089
1090static const int TSI148_LCSR_INTEO_LMEO[4] = { TSI148_LCSR_INTEO_LM0EO,
1091 TSI148_LCSR_INTEO_LM1EO,
1092 TSI148_LCSR_INTEO_LM2EO,
1093 TSI148_LCSR_INTEO_LM3EO };
1094
1095static const int TSI148_LCSR_INTEO_IRQEO[7] = { TSI148_LCSR_INTEO_IRQ1EO,
1096 TSI148_LCSR_INTEO_IRQ2EO,
1097 TSI148_LCSR_INTEO_IRQ3EO,
1098 TSI148_LCSR_INTEO_IRQ4EO,
1099 TSI148_LCSR_INTEO_IRQ5EO,
1100 TSI148_LCSR_INTEO_IRQ6EO,
1101 TSI148_LCSR_INTEO_IRQ7EO };
1102
1103/*
1104 * Interrupt Status Register CRG + $448
1105 */
1106#define TSI148_LCSR_INTS_DMA1S (1<<25) /* DMA 1 */
1107#define TSI148_LCSR_INTS_DMA0S (1<<24) /* DMA 0 */
1108#define TSI148_LCSR_INTS_LM3S (1<<23) /* Location Monitor 3 */
1109#define TSI148_LCSR_INTS_LM2S (1<<22) /* Location Monitor 2 */
1110#define TSI148_LCSR_INTS_LM1S (1<<21) /* Location Monitor 1 */
1111#define TSI148_LCSR_INTS_LM0S (1<<20) /* Location Monitor 0 */
1112#define TSI148_LCSR_INTS_MB3S (1<<19) /* Mail Box 3 */
1113#define TSI148_LCSR_INTS_MB2S (1<<18) /* Mail Box 2 */
1114#define TSI148_LCSR_INTS_MB1S (1<<17) /* Mail Box 1 */
1115#define TSI148_LCSR_INTS_MB0S (1<<16) /* Mail Box 0 */
1116#define TSI148_LCSR_INTS_PERRS (1<<13) /* PCI/X Error */
1117#define TSI148_LCSR_INTS_VERRS (1<<12) /* VMEbus Error */
1118#define TSI148_LCSR_INTS_VIES (1<<11) /* VMEbus IRQ Edge */
1119#define TSI148_LCSR_INTS_IACKS (1<<10) /* IACK */
1120#define TSI148_LCSR_INTS_SYSFLS (1<<9) /* System Fail */
1121#define TSI148_LCSR_INTS_ACFLS (1<<8) /* AC Fail */
1122#define TSI148_LCSR_INTS_IRQ7S (1<<7) /* IRQ7 */
1123#define TSI148_LCSR_INTS_IRQ6S (1<<6) /* IRQ6 */
1124#define TSI148_LCSR_INTS_IRQ5S (1<<5) /* IRQ5 */
1125#define TSI148_LCSR_INTS_IRQ4S (1<<4) /* IRQ4 */
1126#define TSI148_LCSR_INTS_IRQ3S (1<<3) /* IRQ3 */
1127#define TSI148_LCSR_INTS_IRQ2S (1<<2) /* IRQ2 */
1128#define TSI148_LCSR_INTS_IRQ1S (1<<1) /* IRQ1 */
1129
1130static const int TSI148_LCSR_INTS_LMS[4] = { TSI148_LCSR_INTS_LM0S,
1131 TSI148_LCSR_INTS_LM1S,
1132 TSI148_LCSR_INTS_LM2S,
1133 TSI148_LCSR_INTS_LM3S };
1134
1135static const int TSI148_LCSR_INTS_MBS[4] = { TSI148_LCSR_INTS_MB0S,
1136 TSI148_LCSR_INTS_MB1S,
1137 TSI148_LCSR_INTS_MB2S,
1138 TSI148_LCSR_INTS_MB3S };
1139
1140/*
1141 * Interrupt Clear Register CRG + $44C
1142 */
1143#define TSI148_LCSR_INTC_DMA1C (1<<25) /* DMA 1 */
1144#define TSI148_LCSR_INTC_DMA0C (1<<24) /* DMA 0 */
1145#define TSI148_LCSR_INTC_LM3C (1<<23) /* Location Monitor 3 */
1146#define TSI148_LCSR_INTC_LM2C (1<<22) /* Location Monitor 2 */
1147#define TSI148_LCSR_INTC_LM1C (1<<21) /* Location Monitor 1 */
1148#define TSI148_LCSR_INTC_LM0C (1<<20) /* Location Monitor 0 */
1149#define TSI148_LCSR_INTC_MB3C (1<<19) /* Mail Box 3 */
1150#define TSI148_LCSR_INTC_MB2C (1<<18) /* Mail Box 2 */
1151#define TSI148_LCSR_INTC_MB1C (1<<17) /* Mail Box 1 */
1152#define TSI148_LCSR_INTC_MB0C (1<<16) /* Mail Box 0 */
1153#define TSI148_LCSR_INTC_PERRC (1<<13) /* VMEbus Error */
1154#define TSI148_LCSR_INTC_VERRC (1<<12) /* VMEbus Access Time-out */
1155#define TSI148_LCSR_INTC_VIEC (1<<11) /* VMEbus IRQ Edge */
1156#define TSI148_LCSR_INTC_IACKC (1<<10) /* IACK */
1157#define TSI148_LCSR_INTC_SYSFLC (1<<9) /* System Fail */
1158#define TSI148_LCSR_INTC_ACFLC (1<<8) /* AC Fail */
1159
1160static const int TSI148_LCSR_INTC_LMC[4] = { TSI148_LCSR_INTC_LM0C,
1161 TSI148_LCSR_INTC_LM1C,
1162 TSI148_LCSR_INTC_LM2C,
1163 TSI148_LCSR_INTC_LM3C };
1164
1165static const int TSI148_LCSR_INTC_MBC[4] = { TSI148_LCSR_INTC_MB0C,
1166 TSI148_LCSR_INTC_MB1C,
1167 TSI148_LCSR_INTC_MB2C,
1168 TSI148_LCSR_INTC_MB3C };
1169
1170/*
1171 * Interrupt Map Register 1 CRG + $458
1172 */
1173#define TSI148_LCSR_INTM1_DMA1M_M (3<<18) /* DMA 1 */
1174#define TSI148_LCSR_INTM1_DMA0M_M (3<<16) /* DMA 0 */
1175#define TSI148_LCSR_INTM1_LM3M_M (3<<14) /* Location Monitor 3 */
1176#define TSI148_LCSR_INTM1_LM2M_M (3<<12) /* Location Monitor 2 */
1177#define TSI148_LCSR_INTM1_LM1M_M (3<<10) /* Location Monitor 1 */
1178#define TSI148_LCSR_INTM1_LM0M_M (3<<8) /* Location Monitor 0 */
1179#define TSI148_LCSR_INTM1_MB3M_M (3<<6) /* Mail Box 3 */
1180#define TSI148_LCSR_INTM1_MB2M_M (3<<4) /* Mail Box 2 */
1181#define TSI148_LCSR_INTM1_MB1M_M (3<<2) /* Mail Box 1 */
1182#define TSI148_LCSR_INTM1_MB0M_M (3<<0) /* Mail Box 0 */
1183
1184/*
1185 * Interrupt Map Register 2 CRG + $45C
1186 */
1187#define TSI148_LCSR_INTM2_PERRM_M (3<<26) /* PCI Bus Error */
1188#define TSI148_LCSR_INTM2_VERRM_M (3<<24) /* VMEbus Error */
1189#define TSI148_LCSR_INTM2_VIEM_M (3<<22) /* VMEbus IRQ Edge */
1190#define TSI148_LCSR_INTM2_IACKM_M (3<<20) /* IACK */
1191#define TSI148_LCSR_INTM2_SYSFLM_M (3<<18) /* System Fail */
1192#define TSI148_LCSR_INTM2_ACFLM_M (3<<16) /* AC Fail */
1193#define TSI148_LCSR_INTM2_IRQ7M_M (3<<14) /* IRQ7 */
1194#define TSI148_LCSR_INTM2_IRQ6M_M (3<<12) /* IRQ6 */
1195#define TSI148_LCSR_INTM2_IRQ5M_M (3<<10) /* IRQ5 */
1196#define TSI148_LCSR_INTM2_IRQ4M_M (3<<8) /* IRQ4 */
1197#define TSI148_LCSR_INTM2_IRQ3M_M (3<<6) /* IRQ3 */
1198#define TSI148_LCSR_INTM2_IRQ2M_M (3<<4) /* IRQ2 */
1199#define TSI148_LCSR_INTM2_IRQ1M_M (3<<2) /* IRQ1 */
1200
1201/*
1202 * DMA Control (0-1) Registers CRG + $500
1203 */
1204#define TSI148_LCSR_DCTL_ABT (1<<27) /* Abort */
1205#define TSI148_LCSR_DCTL_PAU (1<<26) /* Pause */
1206#define TSI148_LCSR_DCTL_DGO (1<<25) /* DMA Go */
1207
1208#define TSI148_LCSR_DCTL_MOD (1<<23) /* Mode */
1209
1210#define TSI148_LCSR_DCTL_VBKS_M (7<<12) /* VMEbus block Size MASK */
1211#define TSI148_LCSR_DCTL_VBKS_32 (0<<12) /* VMEbus block Size 32 */
1212#define TSI148_LCSR_DCTL_VBKS_64 (1<<12) /* VMEbus block Size 64 */
1213#define TSI148_LCSR_DCTL_VBKS_128 (2<<12) /* VMEbus block Size 128 */
1214#define TSI148_LCSR_DCTL_VBKS_256 (3<<12) /* VMEbus block Size 256 */
1215#define TSI148_LCSR_DCTL_VBKS_512 (4<<12) /* VMEbus block Size 512 */
1216#define TSI148_LCSR_DCTL_VBKS_1024 (5<<12) /* VMEbus block Size 1024 */
1217#define TSI148_LCSR_DCTL_VBKS_2048 (6<<12) /* VMEbus block Size 2048 */
1218#define TSI148_LCSR_DCTL_VBKS_4096 (7<<12) /* VMEbus block Size 4096 */
1219
1220#define TSI148_LCSR_DCTL_VBOT_M (7<<8) /* VMEbus back-off MASK */
1221#define TSI148_LCSR_DCTL_VBOT_0 (0<<8) /* VMEbus back-off 0us */
1222#define TSI148_LCSR_DCTL_VBOT_1 (1<<8) /* VMEbus back-off 1us */
1223#define TSI148_LCSR_DCTL_VBOT_2 (2<<8) /* VMEbus back-off 2us */
1224#define TSI148_LCSR_DCTL_VBOT_4 (3<<8) /* VMEbus back-off 4us */
1225#define TSI148_LCSR_DCTL_VBOT_8 (4<<8) /* VMEbus back-off 8us */
1226#define TSI148_LCSR_DCTL_VBOT_16 (5<<8) /* VMEbus back-off 16us */
1227#define TSI148_LCSR_DCTL_VBOT_32 (6<<8) /* VMEbus back-off 32us */
1228#define TSI148_LCSR_DCTL_VBOT_64 (7<<8) /* VMEbus back-off 64us */
1229
1230#define TSI148_LCSR_DCTL_PBKS_M (7<<4) /* PCI block size MASK */
1231#define TSI148_LCSR_DCTL_PBKS_32 (0<<4) /* PCI block size 32 bytes */
1232#define TSI148_LCSR_DCTL_PBKS_64 (1<<4) /* PCI block size 64 bytes */
1233#define TSI148_LCSR_DCTL_PBKS_128 (2<<4) /* PCI block size 128 bytes */
1234#define TSI148_LCSR_DCTL_PBKS_256 (3<<4) /* PCI block size 256 bytes */
1235#define TSI148_LCSR_DCTL_PBKS_512 (4<<4) /* PCI block size 512 bytes */
1236#define TSI148_LCSR_DCTL_PBKS_1024 (5<<4) /* PCI block size 1024 bytes */
1237#define TSI148_LCSR_DCTL_PBKS_2048 (6<<4) /* PCI block size 2048 bytes */
1238#define TSI148_LCSR_DCTL_PBKS_4096 (7<<4) /* PCI block size 4096 bytes */
1239
1240#define TSI148_LCSR_DCTL_PBOT_M (7<<0) /* PCI back off MASK */
1241#define TSI148_LCSR_DCTL_PBOT_0 (0<<0) /* PCI back off 0us */
1242#define TSI148_LCSR_DCTL_PBOT_1 (1<<0) /* PCI back off 1us */
1243#define TSI148_LCSR_DCTL_PBOT_2 (2<<0) /* PCI back off 2us */
1244#define TSI148_LCSR_DCTL_PBOT_4 (3<<0) /* PCI back off 3us */
1245#define TSI148_LCSR_DCTL_PBOT_8 (4<<0) /* PCI back off 4us */
1246#define TSI148_LCSR_DCTL_PBOT_16 (5<<0) /* PCI back off 8us */
1247#define TSI148_LCSR_DCTL_PBOT_32 (6<<0) /* PCI back off 16us */
1248#define TSI148_LCSR_DCTL_PBOT_64 (7<<0) /* PCI back off 32us */
1249
1250/*
1251 * DMA Status Registers (0-1) CRG + $504
1252 */
1253#define TSI148_LCSR_DSTA_SMA (1<<31) /* PCI Signalled Master Abt */
1254#define TSI148_LCSR_DSTA_RTA (1<<30) /* PCI Received Target Abt */
1255#define TSI148_LCSR_DSTA_MRC (1<<29) /* PCI Max Retry Count */
1256#define TSI148_LCSR_DSTA_VBE (1<<28) /* VMEbus error */
1257#define TSI148_LCSR_DSTA_ABT (1<<27) /* Abort */
1258#define TSI148_LCSR_DSTA_PAU (1<<26) /* Pause */
1259#define TSI148_LCSR_DSTA_DON (1<<25) /* Done */
1260#define TSI148_LCSR_DSTA_BSY (1<<24) /* Busy */
1261
1262/*
1263 * DMA Current Link Address Lower (0-1)
1264 */
1265#define TSI148_LCSR_DCLAL_M (0x3FFFFFF<<6) /* Mask */
1266
1267/*
1268 * DMA Source Attribute (0-1) Reg
1269 */
1270#define TSI148_LCSR_DSAT_TYP_M (3<<28) /* Source Bus Type */
1271#define TSI148_LCSR_DSAT_TYP_PCI (0<<28) /* PCI Bus */
1272#define TSI148_LCSR_DSAT_TYP_VME (1<<28) /* VMEbus */
1273#define TSI148_LCSR_DSAT_TYP_PAT (2<<28) /* Data Pattern */
1274
1275#define TSI148_LCSR_DSAT_PSZ (1<<25) /* Pattern Size */
1276#define TSI148_LCSR_DSAT_NIN (1<<24) /* No Increment */
1277
1278#define TSI148_LCSR_DSAT_2eSSTM_M (3<<11) /* 2eSST Trans Rate Mask */
1279#define TSI148_LCSR_DSAT_2eSSTM_160 (0<<11) /* 160 MB/s */
1280#define TSI148_LCSR_DSAT_2eSSTM_267 (1<<11) /* 267 MB/s */
1281#define TSI148_LCSR_DSAT_2eSSTM_320 (2<<11) /* 320 MB/s */
1282
1283#define TSI148_LCSR_DSAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */
1284#define TSI148_LCSR_DSAT_TM_SCT (0<<8) /* SCT */
1285#define TSI148_LCSR_DSAT_TM_BLT (1<<8) /* BLT */
1286#define TSI148_LCSR_DSAT_TM_MBLT (2<<8) /* MBLT */
1287#define TSI148_LCSR_DSAT_TM_2eVME (3<<8) /* 2eVME */
1288#define TSI148_LCSR_DSAT_TM_2eSST (4<<8) /* 2eSST */
1289#define TSI148_LCSR_DSAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */
1290
1291#define TSI148_LCSR_DSAT_DBW_M (3<<6) /* Max Data Width MASK */
1292#define TSI148_LCSR_DSAT_DBW_16 (0<<6) /* 16 Bits */
1293#define TSI148_LCSR_DSAT_DBW_32 (1<<6) /* 32 Bits */
1294
1295#define TSI148_LCSR_DSAT_SUP (1<<5) /* Supervisory Mode */
1296#define TSI148_LCSR_DSAT_PGM (1<<4) /* Program Mode */
1297
1298#define TSI148_LCSR_DSAT_AMODE_M (0xf<<0) /* Address Space Mask */
1299#define TSI148_LCSR_DSAT_AMODE_A16 (0<<0) /* A16 */
1300#define TSI148_LCSR_DSAT_AMODE_A24 (1<<0) /* A24 */
1301#define TSI148_LCSR_DSAT_AMODE_A32 (2<<0) /* A32 */
1302#define TSI148_LCSR_DSAT_AMODE_A64 (4<<0) /* A64 */
1303#define TSI148_LCSR_DSAT_AMODE_CRCSR (5<<0) /* CR/CSR */
1304#define TSI148_LCSR_DSAT_AMODE_USER1 (8<<0) /* User1 */
1305#define TSI148_LCSR_DSAT_AMODE_USER2 (9<<0) /* User2 */
1306#define TSI148_LCSR_DSAT_AMODE_USER3 (0xa<<0) /* User3 */
1307#define TSI148_LCSR_DSAT_AMODE_USER4 (0xb<<0) /* User4 */
1308
1309/*
1310 * DMA Destination Attribute Registers (0-1)
1311 */
1312#define TSI148_LCSR_DDAT_TYP_PCI (0<<28) /* Destination PCI Bus */
1313#define TSI148_LCSR_DDAT_TYP_VME (1<<28) /* Destination VMEbus */
1314
1315#define TSI148_LCSR_DDAT_2eSSTM_M (3<<11) /* 2eSST Transfer Rate Mask */
1316#define TSI148_LCSR_DDAT_2eSSTM_160 (0<<11) /* 160 MB/s */
1317#define TSI148_LCSR_DDAT_2eSSTM_267 (1<<11) /* 267 MB/s */
1318#define TSI148_LCSR_DDAT_2eSSTM_320 (2<<11) /* 320 MB/s */
1319
1320#define TSI148_LCSR_DDAT_TM_M (7<<8) /* Bus Transfer Protocol Mask */
1321#define TSI148_LCSR_DDAT_TM_SCT (0<<8) /* SCT */
1322#define TSI148_LCSR_DDAT_TM_BLT (1<<8) /* BLT */
1323#define TSI148_LCSR_DDAT_TM_MBLT (2<<8) /* MBLT */
1324#define TSI148_LCSR_DDAT_TM_2eVME (3<<8) /* 2eVME */
1325#define TSI148_LCSR_DDAT_TM_2eSST (4<<8) /* 2eSST */
1326#define TSI148_LCSR_DDAT_TM_2eSSTB (5<<8) /* 2eSST Broadcast */
1327
1328#define TSI148_LCSR_DDAT_DBW_M (3<<6) /* Max Data Width MASK */
1329#define TSI148_LCSR_DDAT_DBW_16 (0<<6) /* 16 Bits */
1330#define TSI148_LCSR_DDAT_DBW_32 (1<<6) /* 32 Bits */
1331
1332#define TSI148_LCSR_DDAT_SUP (1<<5) /* Supervisory/User Access */
1333#define TSI148_LCSR_DDAT_PGM (1<<4) /* Program/Data Access */
1334
1335#define TSI148_LCSR_DDAT_AMODE_M (0xf<<0) /* Address Space Mask */
1336#define TSI148_LCSR_DDAT_AMODE_A16 (0<<0) /* A16 */
1337#define TSI148_LCSR_DDAT_AMODE_A24 (1<<0) /* A24 */
1338#define TSI148_LCSR_DDAT_AMODE_A32 (2<<0) /* A32 */
1339#define TSI148_LCSR_DDAT_AMODE_A64 (4<<0) /* A64 */
1340#define TSI148_LCSR_DDAT_AMODE_CRCSR (5<<0) /* CRC/SR */
1341#define TSI148_LCSR_DDAT_AMODE_USER1 (8<<0) /* User1 */
1342#define TSI148_LCSR_DDAT_AMODE_USER2 (9<<0) /* User2 */
1343#define TSI148_LCSR_DDAT_AMODE_USER3 (0xa<<0) /* User3 */
1344#define TSI148_LCSR_DDAT_AMODE_USER4 (0xb<<0) /* User4 */
1345
1346/*
1347 * DMA Next Link Address Lower
1348 */
1349#define TSI148_LCSR_DNLAL_DNLAL_M (0x3FFFFFF<<6) /* Address Mask */
1350#define TSI148_LCSR_DNLAL_LLA (1<<0) /* Last Link Address Indicator */
1351
1352/*
1353 * DMA 2eSST Broadcast Select
1354 */
1355#define TSI148_LCSR_DBS_M (0x1FFFFF<<0) /* Mask */
1356
1357/*
1358 * GCSR Register Group
1359 */
1360
1361/*
1362 * GCSR Control and Status Register CRG + $604
1363 */
1364#define TSI148_GCSR_GCTRL_LRST (1<<15) /* Local Reset */
1365#define TSI148_GCSR_GCTRL_SFAILEN (1<<14) /* System Fail enable */
1366#define TSI148_GCSR_GCTRL_BDFAILS (1<<13) /* Board Fail Status */
1367#define TSI148_GCSR_GCTRL_SCON (1<<12) /* System Copntroller */
1368#define TSI148_GCSR_GCTRL_MEN (1<<11) /* Module Enable (READY) */
1369
1370#define TSI148_GCSR_GCTRL_LMI3S (1<<7) /* Loc Monitor 3 Int Status */
1371#define TSI148_GCSR_GCTRL_LMI2S (1<<6) /* Loc Monitor 2 Int Status */
1372#define TSI148_GCSR_GCTRL_LMI1S (1<<5) /* Loc Monitor 1 Int Status */
1373#define TSI148_GCSR_GCTRL_LMI0S (1<<4) /* Loc Monitor 0 Int Status */
1374#define TSI148_GCSR_GCTRL_MBI3S (1<<3) /* Mail box 3 Int Status */
1375#define TSI148_GCSR_GCTRL_MBI2S (1<<2) /* Mail box 2 Int Status */
1376#define TSI148_GCSR_GCTRL_MBI1S (1<<1) /* Mail box 1 Int Status */
1377#define TSI148_GCSR_GCTRL_MBI0S (1<<0) /* Mail box 0 Int Status */
1378
1379#define TSI148_GCSR_GAP (1<<5) /* Geographic Addr Parity */
1380#define TSI148_GCSR_GA_M (0x1F<<0) /* Geographic Address Mask */
1381
1382/*
1383 * CR/CSR Register Group
1384 */
1385
1386/*
1387 * CR/CSR Bit Clear Register CRG + $FF4
1388 */
1389#define TSI148_CRCSR_CSRBCR_LRSTC (1<<7) /* Local Reset Clear */
1390#define TSI148_CRCSR_CSRBCR_SFAILC (1<<6) /* System Fail Enable Clear */
1391#define TSI148_CRCSR_CSRBCR_BDFAILS (1<<5) /* Board Fail Status */
1392#define TSI148_CRCSR_CSRBCR_MENC (1<<4) /* Module Enable Clear */
1393#define TSI148_CRCSR_CSRBCR_BERRSC (1<<3) /* Bus Error Status Clear */
1394
1395/*
1396 * CR/CSR Bit Set Register CRG+$FF8
1397 */
1398#define TSI148_CRCSR_CSRBSR_LISTS (1<<7) /* Local Reset Clear */
1399#define TSI148_CRCSR_CSRBSR_SFAILS (1<<6) /* System Fail Enable Clear */
1400#define TSI148_CRCSR_CSRBSR_BDFAILS (1<<5) /* Board Fail Status */
1401#define TSI148_CRCSR_CSRBSR_MENS (1<<4) /* Module Enable Clear */
1402#define TSI148_CRCSR_CSRBSR_BERRS (1<<3) /* Bus Error Status Clear */
1403
1404/*
1405 * CR/CSR Base Address Register CRG + FFC
1406 */
1407#define TSI148_CRCSR_CBAR_M (0x1F<<3) /* Mask */
1408
1409#endif /* TSI148_H */
diff --git a/drivers/staging/vme/vme.c b/drivers/staging/vme/vme.c
new file mode 100644
index 00000000000..c078ce369df
--- /dev/null
+++ b/drivers/staging/vme/vme.c
@@ -0,0 +1,1534 @@
1/*
2 * VME Bridge Framework
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/mm.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/pci.h>
23#include <linux/poll.h>
24#include <linux/highmem.h>
25#include <linux/interrupt.h>
26#include <linux/pagemap.h>
27#include <linux/device.h>
28#include <linux/dma-mapping.h>
29#include <linux/syscalls.h>
30#include <linux/mutex.h>
31#include <linux/spinlock.h>
32#include <linux/slab.h>
33
34#include "vme.h"
35#include "vme_bridge.h"
36
37/* Bitmask and mutex to keep track of bridge numbers */
38static unsigned int vme_bus_numbers;
39static DEFINE_MUTEX(vme_bus_num_mtx);
40
41static void __exit vme_exit(void);
42static int __init vme_init(void);
43
44
45/*
46 * Find the bridge resource associated with a specific device resource
47 */
48static struct vme_bridge *dev_to_bridge(struct device *dev)
49{
50 return dev->platform_data;
51}
52
53/*
54 * Find the bridge that the resource is associated with.
55 */
56static struct vme_bridge *find_bridge(struct vme_resource *resource)
57{
58 /* Get list to search */
59 switch (resource->type) {
60 case VME_MASTER:
61 return list_entry(resource->entry, struct vme_master_resource,
62 list)->parent;
63 break;
64 case VME_SLAVE:
65 return list_entry(resource->entry, struct vme_slave_resource,
66 list)->parent;
67 break;
68 case VME_DMA:
69 return list_entry(resource->entry, struct vme_dma_resource,
70 list)->parent;
71 break;
72 case VME_LM:
73 return list_entry(resource->entry, struct vme_lm_resource,
74 list)->parent;
75 break;
76 default:
77 printk(KERN_ERR "Unknown resource type\n");
78 return NULL;
79 break;
80 }
81}
82
83/*
84 * Allocate a contiguous block of memory for use by the driver. This is used to
85 * create the buffers for the slave windows.
86 *
87 * XXX VME bridges could be available on buses other than PCI. At the momment
88 * this framework only supports PCI devices.
89 */
90void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
91 dma_addr_t *dma)
92{
93 struct vme_bridge *bridge;
94 struct pci_dev *pdev;
95
96 if (resource == NULL) {
97 printk(KERN_ERR "No resource\n");
98 return NULL;
99 }
100
101 bridge = find_bridge(resource);
102 if (bridge == NULL) {
103 printk(KERN_ERR "Can't find bridge\n");
104 return NULL;
105 }
106
107 /* Find pci_dev container of dev */
108 if (bridge->parent == NULL) {
109 printk(KERN_ERR "Dev entry NULL\n");
110 return NULL;
111 }
112 pdev = container_of(bridge->parent, struct pci_dev, dev);
113
114 return pci_alloc_consistent(pdev, size, dma);
115}
116EXPORT_SYMBOL(vme_alloc_consistent);
117
118/*
119 * Free previously allocated contiguous block of memory.
120 *
121 * XXX VME bridges could be available on buses other than PCI. At the momment
122 * this framework only supports PCI devices.
123 */
124void vme_free_consistent(struct vme_resource *resource, size_t size,
125 void *vaddr, dma_addr_t dma)
126{
127 struct vme_bridge *bridge;
128 struct pci_dev *pdev;
129
130 if (resource == NULL) {
131 printk(KERN_ERR "No resource\n");
132 return;
133 }
134
135 bridge = find_bridge(resource);
136 if (bridge == NULL) {
137 printk(KERN_ERR "Can't find bridge\n");
138 return;
139 }
140
141 /* Find pci_dev container of dev */
142 pdev = container_of(bridge->parent, struct pci_dev, dev);
143
144 pci_free_consistent(pdev, size, vaddr, dma);
145}
146EXPORT_SYMBOL(vme_free_consistent);
147
148size_t vme_get_size(struct vme_resource *resource)
149{
150 int enabled, retval;
151 unsigned long long base, size;
152 dma_addr_t buf_base;
153 vme_address_t aspace;
154 vme_cycle_t cycle;
155 vme_width_t dwidth;
156
157 switch (resource->type) {
158 case VME_MASTER:
159 retval = vme_master_get(resource, &enabled, &base, &size,
160 &aspace, &cycle, &dwidth);
161
162 return size;
163 break;
164 case VME_SLAVE:
165 retval = vme_slave_get(resource, &enabled, &base, &size,
166 &buf_base, &aspace, &cycle);
167
168 return size;
169 break;
170 case VME_DMA:
171 return 0;
172 break;
173 default:
174 printk(KERN_ERR "Unknown resource type\n");
175 return 0;
176 break;
177 }
178}
179EXPORT_SYMBOL(vme_get_size);
180
181static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
182 unsigned long long size)
183{
184 int retval = 0;
185
186 switch (aspace) {
187 case VME_A16:
188 if (((vme_base + size) > VME_A16_MAX) ||
189 (vme_base > VME_A16_MAX))
190 retval = -EFAULT;
191 break;
192 case VME_A24:
193 if (((vme_base + size) > VME_A24_MAX) ||
194 (vme_base > VME_A24_MAX))
195 retval = -EFAULT;
196 break;
197 case VME_A32:
198 if (((vme_base + size) > VME_A32_MAX) ||
199 (vme_base > VME_A32_MAX))
200 retval = -EFAULT;
201 break;
202 case VME_A64:
203 /*
204 * Any value held in an unsigned long long can be used as the
205 * base
206 */
207 break;
208 case VME_CRCSR:
209 if (((vme_base + size) > VME_CRCSR_MAX) ||
210 (vme_base > VME_CRCSR_MAX))
211 retval = -EFAULT;
212 break;
213 case VME_USER1:
214 case VME_USER2:
215 case VME_USER3:
216 case VME_USER4:
217 /* User Defined */
218 break;
219 default:
220 printk(KERN_ERR "Invalid address space\n");
221 retval = -EINVAL;
222 break;
223 }
224
225 return retval;
226}
227
228/*
229 * Request a slave image with specific attributes, return some unique
230 * identifier.
231 */
232struct vme_resource *vme_slave_request(struct device *dev,
233 vme_address_t address, vme_cycle_t cycle)
234{
235 struct vme_bridge *bridge;
236 struct list_head *slave_pos = NULL;
237 struct vme_slave_resource *allocated_image = NULL;
238 struct vme_slave_resource *slave_image = NULL;
239 struct vme_resource *resource = NULL;
240
241 bridge = dev_to_bridge(dev);
242 if (bridge == NULL) {
243 printk(KERN_ERR "Can't find VME bus\n");
244 goto err_bus;
245 }
246
247 /* Loop through slave resources */
248 list_for_each(slave_pos, &bridge->slave_resources) {
249 slave_image = list_entry(slave_pos,
250 struct vme_slave_resource, list);
251
252 if (slave_image == NULL) {
253 printk(KERN_ERR "Registered NULL Slave resource\n");
254 continue;
255 }
256
257 /* Find an unlocked and compatible image */
258 mutex_lock(&slave_image->mtx);
259 if (((slave_image->address_attr & address) == address) &&
260 ((slave_image->cycle_attr & cycle) == cycle) &&
261 (slave_image->locked == 0)) {
262
263 slave_image->locked = 1;
264 mutex_unlock(&slave_image->mtx);
265 allocated_image = slave_image;
266 break;
267 }
268 mutex_unlock(&slave_image->mtx);
269 }
270
271 /* No free image */
272 if (allocated_image == NULL)
273 goto err_image;
274
275 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
276 if (resource == NULL) {
277 printk(KERN_WARNING "Unable to allocate resource structure\n");
278 goto err_alloc;
279 }
280 resource->type = VME_SLAVE;
281 resource->entry = &allocated_image->list;
282
283 return resource;
284
285err_alloc:
286 /* Unlock image */
287 mutex_lock(&slave_image->mtx);
288 slave_image->locked = 0;
289 mutex_unlock(&slave_image->mtx);
290err_image:
291err_bus:
292 return NULL;
293}
294EXPORT_SYMBOL(vme_slave_request);
295
296int vme_slave_set(struct vme_resource *resource, int enabled,
297 unsigned long long vme_base, unsigned long long size,
298 dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
299{
300 struct vme_bridge *bridge = find_bridge(resource);
301 struct vme_slave_resource *image;
302 int retval;
303
304 if (resource->type != VME_SLAVE) {
305 printk(KERN_ERR "Not a slave resource\n");
306 return -EINVAL;
307 }
308
309 image = list_entry(resource->entry, struct vme_slave_resource, list);
310
311 if (bridge->slave_set == NULL) {
312 printk(KERN_ERR "Function not supported\n");
313 return -ENOSYS;
314 }
315
316 if (!(((image->address_attr & aspace) == aspace) &&
317 ((image->cycle_attr & cycle) == cycle))) {
318 printk(KERN_ERR "Invalid attributes\n");
319 return -EINVAL;
320 }
321
322 retval = vme_check_window(aspace, vme_base, size);
323 if (retval)
324 return retval;
325
326 return bridge->slave_set(image, enabled, vme_base, size, buf_base,
327 aspace, cycle);
328}
329EXPORT_SYMBOL(vme_slave_set);
330
331int vme_slave_get(struct vme_resource *resource, int *enabled,
332 unsigned long long *vme_base, unsigned long long *size,
333 dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
334{
335 struct vme_bridge *bridge = find_bridge(resource);
336 struct vme_slave_resource *image;
337
338 if (resource->type != VME_SLAVE) {
339 printk(KERN_ERR "Not a slave resource\n");
340 return -EINVAL;
341 }
342
343 image = list_entry(resource->entry, struct vme_slave_resource, list);
344
345 if (bridge->slave_get == NULL) {
346 printk(KERN_ERR "vme_slave_get not supported\n");
347 return -EINVAL;
348 }
349
350 return bridge->slave_get(image, enabled, vme_base, size, buf_base,
351 aspace, cycle);
352}
353EXPORT_SYMBOL(vme_slave_get);
354
355void vme_slave_free(struct vme_resource *resource)
356{
357 struct vme_slave_resource *slave_image;
358
359 if (resource->type != VME_SLAVE) {
360 printk(KERN_ERR "Not a slave resource\n");
361 return;
362 }
363
364 slave_image = list_entry(resource->entry, struct vme_slave_resource,
365 list);
366 if (slave_image == NULL) {
367 printk(KERN_ERR "Can't find slave resource\n");
368 return;
369 }
370
371 /* Unlock image */
372 mutex_lock(&slave_image->mtx);
373 if (slave_image->locked == 0)
374 printk(KERN_ERR "Image is already free\n");
375
376 slave_image->locked = 0;
377 mutex_unlock(&slave_image->mtx);
378
379 /* Free up resource memory */
380 kfree(resource);
381}
382EXPORT_SYMBOL(vme_slave_free);
383
384/*
385 * Request a master image with specific attributes, return some unique
386 * identifier.
387 */
388struct vme_resource *vme_master_request(struct device *dev,
389 vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
390{
391 struct vme_bridge *bridge;
392 struct list_head *master_pos = NULL;
393 struct vme_master_resource *allocated_image = NULL;
394 struct vme_master_resource *master_image = NULL;
395 struct vme_resource *resource = NULL;
396
397 bridge = dev_to_bridge(dev);
398 if (bridge == NULL) {
399 printk(KERN_ERR "Can't find VME bus\n");
400 goto err_bus;
401 }
402
403 /* Loop through master resources */
404 list_for_each(master_pos, &bridge->master_resources) {
405 master_image = list_entry(master_pos,
406 struct vme_master_resource, list);
407
408 if (master_image == NULL) {
409 printk(KERN_WARNING "Registered NULL master resource\n");
410 continue;
411 }
412
413 /* Find an unlocked and compatible image */
414 spin_lock(&master_image->lock);
415 if (((master_image->address_attr & address) == address) &&
416 ((master_image->cycle_attr & cycle) == cycle) &&
417 ((master_image->width_attr & dwidth) == dwidth) &&
418 (master_image->locked == 0)) {
419
420 master_image->locked = 1;
421 spin_unlock(&master_image->lock);
422 allocated_image = master_image;
423 break;
424 }
425 spin_unlock(&master_image->lock);
426 }
427
428 /* Check to see if we found a resource */
429 if (allocated_image == NULL) {
430 printk(KERN_ERR "Can't find a suitable resource\n");
431 goto err_image;
432 }
433
434 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
435 if (resource == NULL) {
436 printk(KERN_ERR "Unable to allocate resource structure\n");
437 goto err_alloc;
438 }
439 resource->type = VME_MASTER;
440 resource->entry = &allocated_image->list;
441
442 return resource;
443
444err_alloc:
445 /* Unlock image */
446 spin_lock(&master_image->lock);
447 master_image->locked = 0;
448 spin_unlock(&master_image->lock);
449err_image:
450err_bus:
451 return NULL;
452}
453EXPORT_SYMBOL(vme_master_request);
454
455int vme_master_set(struct vme_resource *resource, int enabled,
456 unsigned long long vme_base, unsigned long long size,
457 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
458{
459 struct vme_bridge *bridge = find_bridge(resource);
460 struct vme_master_resource *image;
461 int retval;
462
463 if (resource->type != VME_MASTER) {
464 printk(KERN_ERR "Not a master resource\n");
465 return -EINVAL;
466 }
467
468 image = list_entry(resource->entry, struct vme_master_resource, list);
469
470 if (bridge->master_set == NULL) {
471 printk(KERN_WARNING "vme_master_set not supported\n");
472 return -EINVAL;
473 }
474
475 if (!(((image->address_attr & aspace) == aspace) &&
476 ((image->cycle_attr & cycle) == cycle) &&
477 ((image->width_attr & dwidth) == dwidth))) {
478 printk(KERN_WARNING "Invalid attributes\n");
479 return -EINVAL;
480 }
481
482 retval = vme_check_window(aspace, vme_base, size);
483 if (retval)
484 return retval;
485
486 return bridge->master_set(image, enabled, vme_base, size, aspace,
487 cycle, dwidth);
488}
489EXPORT_SYMBOL(vme_master_set);
490
491int vme_master_get(struct vme_resource *resource, int *enabled,
492 unsigned long long *vme_base, unsigned long long *size,
493 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
494{
495 struct vme_bridge *bridge = find_bridge(resource);
496 struct vme_master_resource *image;
497
498 if (resource->type != VME_MASTER) {
499 printk(KERN_ERR "Not a master resource\n");
500 return -EINVAL;
501 }
502
503 image = list_entry(resource->entry, struct vme_master_resource, list);
504
505 if (bridge->master_get == NULL) {
506 printk(KERN_WARNING "vme_master_set not supported\n");
507 return -EINVAL;
508 }
509
510 return bridge->master_get(image, enabled, vme_base, size, aspace,
511 cycle, dwidth);
512}
513EXPORT_SYMBOL(vme_master_get);
514
515/*
516 * Read data out of VME space into a buffer.
517 */
518ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
519 loff_t offset)
520{
521 struct vme_bridge *bridge = find_bridge(resource);
522 struct vme_master_resource *image;
523 size_t length;
524
525 if (bridge->master_read == NULL) {
526 printk(KERN_WARNING "Reading from resource not supported\n");
527 return -EINVAL;
528 }
529
530 if (resource->type != VME_MASTER) {
531 printk(KERN_ERR "Not a master resource\n");
532 return -EINVAL;
533 }
534
535 image = list_entry(resource->entry, struct vme_master_resource, list);
536
537 length = vme_get_size(resource);
538
539 if (offset > length) {
540 printk(KERN_WARNING "Invalid Offset\n");
541 return -EFAULT;
542 }
543
544 if ((offset + count) > length)
545 count = length - offset;
546
547 return bridge->master_read(image, buf, count, offset);
548
549}
550EXPORT_SYMBOL(vme_master_read);
551
552/*
553 * Write data out to VME space from a buffer.
554 */
555ssize_t vme_master_write(struct vme_resource *resource, void *buf,
556 size_t count, loff_t offset)
557{
558 struct vme_bridge *bridge = find_bridge(resource);
559 struct vme_master_resource *image;
560 size_t length;
561
562 if (bridge->master_write == NULL) {
563 printk(KERN_WARNING "Writing to resource not supported\n");
564 return -EINVAL;
565 }
566
567 if (resource->type != VME_MASTER) {
568 printk(KERN_ERR "Not a master resource\n");
569 return -EINVAL;
570 }
571
572 image = list_entry(resource->entry, struct vme_master_resource, list);
573
574 length = vme_get_size(resource);
575
576 if (offset > length) {
577 printk(KERN_WARNING "Invalid Offset\n");
578 return -EFAULT;
579 }
580
581 if ((offset + count) > length)
582 count = length - offset;
583
584 return bridge->master_write(image, buf, count, offset);
585}
586EXPORT_SYMBOL(vme_master_write);
587
588/*
589 * Perform RMW cycle to provided location.
590 */
591unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
592 unsigned int compare, unsigned int swap, loff_t offset)
593{
594 struct vme_bridge *bridge = find_bridge(resource);
595 struct vme_master_resource *image;
596
597 if (bridge->master_rmw == NULL) {
598 printk(KERN_WARNING "Writing to resource not supported\n");
599 return -EINVAL;
600 }
601
602 if (resource->type != VME_MASTER) {
603 printk(KERN_ERR "Not a master resource\n");
604 return -EINVAL;
605 }
606
607 image = list_entry(resource->entry, struct vme_master_resource, list);
608
609 return bridge->master_rmw(image, mask, compare, swap, offset);
610}
611EXPORT_SYMBOL(vme_master_rmw);
612
613void vme_master_free(struct vme_resource *resource)
614{
615 struct vme_master_resource *master_image;
616
617 if (resource->type != VME_MASTER) {
618 printk(KERN_ERR "Not a master resource\n");
619 return;
620 }
621
622 master_image = list_entry(resource->entry, struct vme_master_resource,
623 list);
624 if (master_image == NULL) {
625 printk(KERN_ERR "Can't find master resource\n");
626 return;
627 }
628
629 /* Unlock image */
630 spin_lock(&master_image->lock);
631 if (master_image->locked == 0)
632 printk(KERN_ERR "Image is already free\n");
633
634 master_image->locked = 0;
635 spin_unlock(&master_image->lock);
636
637 /* Free up resource memory */
638 kfree(resource);
639}
640EXPORT_SYMBOL(vme_master_free);
641
642/*
643 * Request a DMA controller with specific attributes, return some unique
644 * identifier.
645 */
646struct vme_resource *vme_dma_request(struct device *dev, vme_dma_route_t route)
647{
648 struct vme_bridge *bridge;
649 struct list_head *dma_pos = NULL;
650 struct vme_dma_resource *allocated_ctrlr = NULL;
651 struct vme_dma_resource *dma_ctrlr = NULL;
652 struct vme_resource *resource = NULL;
653
654 /* XXX Not checking resource attributes */
655 printk(KERN_ERR "No VME resource Attribute tests done\n");
656
657 bridge = dev_to_bridge(dev);
658 if (bridge == NULL) {
659 printk(KERN_ERR "Can't find VME bus\n");
660 goto err_bus;
661 }
662
663 /* Loop through DMA resources */
664 list_for_each(dma_pos, &bridge->dma_resources) {
665 dma_ctrlr = list_entry(dma_pos,
666 struct vme_dma_resource, list);
667
668 if (dma_ctrlr == NULL) {
669 printk(KERN_ERR "Registered NULL DMA resource\n");
670 continue;
671 }
672
673 /* Find an unlocked and compatible controller */
674 mutex_lock(&dma_ctrlr->mtx);
675 if (((dma_ctrlr->route_attr & route) == route) &&
676 (dma_ctrlr->locked == 0)) {
677
678 dma_ctrlr->locked = 1;
679 mutex_unlock(&dma_ctrlr->mtx);
680 allocated_ctrlr = dma_ctrlr;
681 break;
682 }
683 mutex_unlock(&dma_ctrlr->mtx);
684 }
685
686 /* Check to see if we found a resource */
687 if (allocated_ctrlr == NULL)
688 goto err_ctrlr;
689
690 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
691 if (resource == NULL) {
692 printk(KERN_WARNING "Unable to allocate resource structure\n");
693 goto err_alloc;
694 }
695 resource->type = VME_DMA;
696 resource->entry = &allocated_ctrlr->list;
697
698 return resource;
699
700err_alloc:
701 /* Unlock image */
702 mutex_lock(&dma_ctrlr->mtx);
703 dma_ctrlr->locked = 0;
704 mutex_unlock(&dma_ctrlr->mtx);
705err_ctrlr:
706err_bus:
707 return NULL;
708}
709EXPORT_SYMBOL(vme_dma_request);
710
711/*
712 * Start new list
713 */
714struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
715{
716 struct vme_dma_resource *ctrlr;
717 struct vme_dma_list *dma_list;
718
719 if (resource->type != VME_DMA) {
720 printk(KERN_ERR "Not a DMA resource\n");
721 return NULL;
722 }
723
724 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
725
726 dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
727 if (dma_list == NULL) {
728 printk(KERN_ERR "Unable to allocate memory for new dma list\n");
729 return NULL;
730 }
731 INIT_LIST_HEAD(&dma_list->entries);
732 dma_list->parent = ctrlr;
733 mutex_init(&dma_list->mtx);
734
735 return dma_list;
736}
737EXPORT_SYMBOL(vme_new_dma_list);
738
739/*
740 * Create "Pattern" type attributes
741 */
742struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
743 vme_pattern_t type)
744{
745 struct vme_dma_attr *attributes;
746 struct vme_dma_pattern *pattern_attr;
747
748 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
749 if (attributes == NULL) {
750 printk(KERN_ERR "Unable to allocate memory for attributes "
751 "structure\n");
752 goto err_attr;
753 }
754
755 pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
756 if (pattern_attr == NULL) {
757 printk(KERN_ERR "Unable to allocate memory for pattern "
758 "attributes\n");
759 goto err_pat;
760 }
761
762 attributes->type = VME_DMA_PATTERN;
763 attributes->private = (void *)pattern_attr;
764
765 pattern_attr->pattern = pattern;
766 pattern_attr->type = type;
767
768 return attributes;
769
770err_pat:
771 kfree(attributes);
772err_attr:
773 return NULL;
774}
775EXPORT_SYMBOL(vme_dma_pattern_attribute);
776
777/*
778 * Create "PCI" type attributes
779 */
780struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
781{
782 struct vme_dma_attr *attributes;
783 struct vme_dma_pci *pci_attr;
784
785 /* XXX Run some sanity checks here */
786
787 attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
788 if (attributes == NULL) {
789 printk(KERN_ERR "Unable to allocate memory for attributes "
790 "structure\n");
791 goto err_attr;
792 }
793
794 pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
795 if (pci_attr == NULL) {
796 printk(KERN_ERR "Unable to allocate memory for pci "
797 "attributes\n");
798 goto err_pci;
799 }
800
801
802
803 attributes->type = VME_DMA_PCI;
804 attributes->private = (void *)pci_attr;
805
806 pci_attr->address = address;
807
808 return attributes;
809
810err_pci:
811 kfree(attributes);
812err_attr:
813 return NULL;
814}
815EXPORT_SYMBOL(vme_dma_pci_attribute);
816
817/*
818 * Create "VME" type attributes
819 */
820struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
821 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
822{
823 struct vme_dma_attr *attributes;
824 struct vme_dma_vme *vme_attr;
825
826 attributes = kmalloc(
827 sizeof(struct vme_dma_attr), GFP_KERNEL);
828 if (attributes == NULL) {
829 printk(KERN_ERR "Unable to allocate memory for attributes "
830 "structure\n");
831 goto err_attr;
832 }
833
834 vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
835 if (vme_attr == NULL) {
836 printk(KERN_ERR "Unable to allocate memory for vme "
837 "attributes\n");
838 goto err_vme;
839 }
840
841 attributes->type = VME_DMA_VME;
842 attributes->private = (void *)vme_attr;
843
844 vme_attr->address = address;
845 vme_attr->aspace = aspace;
846 vme_attr->cycle = cycle;
847 vme_attr->dwidth = dwidth;
848
849 return attributes;
850
851err_vme:
852 kfree(attributes);
853err_attr:
854 return NULL;
855}
856EXPORT_SYMBOL(vme_dma_vme_attribute);
857
858/*
859 * Free attribute
860 */
861void vme_dma_free_attribute(struct vme_dma_attr *attributes)
862{
863 kfree(attributes->private);
864 kfree(attributes);
865}
866EXPORT_SYMBOL(vme_dma_free_attribute);
867
868int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
869 struct vme_dma_attr *dest, size_t count)
870{
871 struct vme_bridge *bridge = list->parent->parent;
872 int retval;
873
874 if (bridge->dma_list_add == NULL) {
875 printk(KERN_WARNING "Link List DMA generation not supported\n");
876 return -EINVAL;
877 }
878
879 if (!mutex_trylock(&list->mtx)) {
880 printk(KERN_ERR "Link List already submitted\n");
881 return -EINVAL;
882 }
883
884 retval = bridge->dma_list_add(list, src, dest, count);
885
886 mutex_unlock(&list->mtx);
887
888 return retval;
889}
890EXPORT_SYMBOL(vme_dma_list_add);
891
892int vme_dma_list_exec(struct vme_dma_list *list)
893{
894 struct vme_bridge *bridge = list->parent->parent;
895 int retval;
896
897 if (bridge->dma_list_exec == NULL) {
898 printk(KERN_ERR "Link List DMA execution not supported\n");
899 return -EINVAL;
900 }
901
902 mutex_lock(&list->mtx);
903
904 retval = bridge->dma_list_exec(list);
905
906 mutex_unlock(&list->mtx);
907
908 return retval;
909}
910EXPORT_SYMBOL(vme_dma_list_exec);
911
912int vme_dma_list_free(struct vme_dma_list *list)
913{
914 struct vme_bridge *bridge = list->parent->parent;
915 int retval;
916
917 if (bridge->dma_list_empty == NULL) {
918 printk(KERN_WARNING "Emptying of Link Lists not supported\n");
919 return -EINVAL;
920 }
921
922 if (!mutex_trylock(&list->mtx)) {
923 printk(KERN_ERR "Link List in use\n");
924 return -EINVAL;
925 }
926
927 /*
928 * Empty out all of the entries from the dma list. We need to go to the
929 * low level driver as dma entries are driver specific.
930 */
931 retval = bridge->dma_list_empty(list);
932 if (retval) {
933 printk(KERN_ERR "Unable to empty link-list entries\n");
934 mutex_unlock(&list->mtx);
935 return retval;
936 }
937 mutex_unlock(&list->mtx);
938 kfree(list);
939
940 return retval;
941}
942EXPORT_SYMBOL(vme_dma_list_free);
943
944int vme_dma_free(struct vme_resource *resource)
945{
946 struct vme_dma_resource *ctrlr;
947
948 if (resource->type != VME_DMA) {
949 printk(KERN_ERR "Not a DMA resource\n");
950 return -EINVAL;
951 }
952
953 ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
954
955 if (!mutex_trylock(&ctrlr->mtx)) {
956 printk(KERN_ERR "Resource busy, can't free\n");
957 return -EBUSY;
958 }
959
960 if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
961 printk(KERN_WARNING "Resource still processing transfers\n");
962 mutex_unlock(&ctrlr->mtx);
963 return -EBUSY;
964 }
965
966 ctrlr->locked = 0;
967
968 mutex_unlock(&ctrlr->mtx);
969
970 return 0;
971}
972EXPORT_SYMBOL(vme_dma_free);
973
974void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
975{
976 void (*call)(int, int, void *);
977 void *priv_data;
978
979 call = bridge->irq[level - 1].callback[statid].func;
980 priv_data = bridge->irq[level - 1].callback[statid].priv_data;
981
982 if (call != NULL)
983 call(level, statid, priv_data);
984 else
985 printk(KERN_WARNING "Spurilous VME interrupt, level:%x, "
986 "vector:%x\n", level, statid);
987}
988EXPORT_SYMBOL(vme_irq_handler);
989
990int vme_irq_request(struct device *dev, int level, int statid,
991 void (*callback)(int, int, void *),
992 void *priv_data)
993{
994 struct vme_bridge *bridge;
995
996 bridge = dev_to_bridge(dev);
997 if (bridge == NULL) {
998 printk(KERN_ERR "Can't find VME bus\n");
999 return -EINVAL;
1000 }
1001
1002 if ((level < 1) || (level > 7)) {
1003 printk(KERN_ERR "Invalid interrupt level\n");
1004 return -EINVAL;
1005 }
1006
1007 if (bridge->irq_set == NULL) {
1008 printk(KERN_ERR "Configuring interrupts not supported\n");
1009 return -EINVAL;
1010 }
1011
1012 mutex_lock(&bridge->irq_mtx);
1013
1014 if (bridge->irq[level - 1].callback[statid].func) {
1015 mutex_unlock(&bridge->irq_mtx);
1016 printk(KERN_WARNING "VME Interrupt already taken\n");
1017 return -EBUSY;
1018 }
1019
1020 bridge->irq[level - 1].count++;
1021 bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1022 bridge->irq[level - 1].callback[statid].func = callback;
1023
1024 /* Enable IRQ level */
1025 bridge->irq_set(bridge, level, 1, 1);
1026
1027 mutex_unlock(&bridge->irq_mtx);
1028
1029 return 0;
1030}
1031EXPORT_SYMBOL(vme_irq_request);
1032
1033void vme_irq_free(struct device *dev, int level, int statid)
1034{
1035 struct vme_bridge *bridge;
1036
1037 bridge = dev_to_bridge(dev);
1038 if (bridge == NULL) {
1039 printk(KERN_ERR "Can't find VME bus\n");
1040 return;
1041 }
1042
1043 if ((level < 1) || (level > 7)) {
1044 printk(KERN_ERR "Invalid interrupt level\n");
1045 return;
1046 }
1047
1048 if (bridge->irq_set == NULL) {
1049 printk(KERN_ERR "Configuring interrupts not supported\n");
1050 return;
1051 }
1052
1053 mutex_lock(&bridge->irq_mtx);
1054
1055 bridge->irq[level - 1].count--;
1056
1057 /* Disable IRQ level if no more interrupts attached at this level*/
1058 if (bridge->irq[level - 1].count == 0)
1059 bridge->irq_set(bridge, level, 0, 1);
1060
1061 bridge->irq[level - 1].callback[statid].func = NULL;
1062 bridge->irq[level - 1].callback[statid].priv_data = NULL;
1063
1064 mutex_unlock(&bridge->irq_mtx);
1065}
1066EXPORT_SYMBOL(vme_irq_free);
1067
1068int vme_irq_generate(struct device *dev, int level, int statid)
1069{
1070 struct vme_bridge *bridge;
1071
1072 bridge = dev_to_bridge(dev);
1073 if (bridge == NULL) {
1074 printk(KERN_ERR "Can't find VME bus\n");
1075 return -EINVAL;
1076 }
1077
1078 if ((level < 1) || (level > 7)) {
1079 printk(KERN_WARNING "Invalid interrupt level\n");
1080 return -EINVAL;
1081 }
1082
1083 if (bridge->irq_generate == NULL) {
1084 printk(KERN_WARNING "Interrupt generation not supported\n");
1085 return -EINVAL;
1086 }
1087
1088 return bridge->irq_generate(bridge, level, statid);
1089}
1090EXPORT_SYMBOL(vme_irq_generate);
1091
1092/*
1093 * Request the location monitor, return resource or NULL
1094 */
1095struct vme_resource *vme_lm_request(struct device *dev)
1096{
1097 struct vme_bridge *bridge;
1098 struct list_head *lm_pos = NULL;
1099 struct vme_lm_resource *allocated_lm = NULL;
1100 struct vme_lm_resource *lm = NULL;
1101 struct vme_resource *resource = NULL;
1102
1103 bridge = dev_to_bridge(dev);
1104 if (bridge == NULL) {
1105 printk(KERN_ERR "Can't find VME bus\n");
1106 goto err_bus;
1107 }
1108
1109 /* Loop through DMA resources */
1110 list_for_each(lm_pos, &bridge->lm_resources) {
1111 lm = list_entry(lm_pos,
1112 struct vme_lm_resource, list);
1113
1114 if (lm == NULL) {
1115 printk(KERN_ERR "Registered NULL Location Monitor "
1116 "resource\n");
1117 continue;
1118 }
1119
1120 /* Find an unlocked controller */
1121 mutex_lock(&lm->mtx);
1122 if (lm->locked == 0) {
1123 lm->locked = 1;
1124 mutex_unlock(&lm->mtx);
1125 allocated_lm = lm;
1126 break;
1127 }
1128 mutex_unlock(&lm->mtx);
1129 }
1130
1131 /* Check to see if we found a resource */
1132 if (allocated_lm == NULL)
1133 goto err_lm;
1134
1135 resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1136 if (resource == NULL) {
1137 printk(KERN_ERR "Unable to allocate resource structure\n");
1138 goto err_alloc;
1139 }
1140 resource->type = VME_LM;
1141 resource->entry = &allocated_lm->list;
1142
1143 return resource;
1144
1145err_alloc:
1146 /* Unlock image */
1147 mutex_lock(&lm->mtx);
1148 lm->locked = 0;
1149 mutex_unlock(&lm->mtx);
1150err_lm:
1151err_bus:
1152 return NULL;
1153}
1154EXPORT_SYMBOL(vme_lm_request);
1155
1156int vme_lm_count(struct vme_resource *resource)
1157{
1158 struct vme_lm_resource *lm;
1159
1160 if (resource->type != VME_LM) {
1161 printk(KERN_ERR "Not a Location Monitor resource\n");
1162 return -EINVAL;
1163 }
1164
1165 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1166
1167 return lm->monitors;
1168}
1169EXPORT_SYMBOL(vme_lm_count);
1170
1171int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1172 vme_address_t aspace, vme_cycle_t cycle)
1173{
1174 struct vme_bridge *bridge = find_bridge(resource);
1175 struct vme_lm_resource *lm;
1176
1177 if (resource->type != VME_LM) {
1178 printk(KERN_ERR "Not a Location Monitor resource\n");
1179 return -EINVAL;
1180 }
1181
1182 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1183
1184 if (bridge->lm_set == NULL) {
1185 printk(KERN_ERR "vme_lm_set not supported\n");
1186 return -EINVAL;
1187 }
1188
1189 return bridge->lm_set(lm, lm_base, aspace, cycle);
1190}
1191EXPORT_SYMBOL(vme_lm_set);
1192
1193int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1194 vme_address_t *aspace, vme_cycle_t *cycle)
1195{
1196 struct vme_bridge *bridge = find_bridge(resource);
1197 struct vme_lm_resource *lm;
1198
1199 if (resource->type != VME_LM) {
1200 printk(KERN_ERR "Not a Location Monitor resource\n");
1201 return -EINVAL;
1202 }
1203
1204 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1205
1206 if (bridge->lm_get == NULL) {
1207 printk(KERN_ERR "vme_lm_get not supported\n");
1208 return -EINVAL;
1209 }
1210
1211 return bridge->lm_get(lm, lm_base, aspace, cycle);
1212}
1213EXPORT_SYMBOL(vme_lm_get);
1214
1215int vme_lm_attach(struct vme_resource *resource, int monitor,
1216 void (*callback)(int))
1217{
1218 struct vme_bridge *bridge = find_bridge(resource);
1219 struct vme_lm_resource *lm;
1220
1221 if (resource->type != VME_LM) {
1222 printk(KERN_ERR "Not a Location Monitor resource\n");
1223 return -EINVAL;
1224 }
1225
1226 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1227
1228 if (bridge->lm_attach == NULL) {
1229 printk(KERN_ERR "vme_lm_attach not supported\n");
1230 return -EINVAL;
1231 }
1232
1233 return bridge->lm_attach(lm, monitor, callback);
1234}
1235EXPORT_SYMBOL(vme_lm_attach);
1236
1237int vme_lm_detach(struct vme_resource *resource, int monitor)
1238{
1239 struct vme_bridge *bridge = find_bridge(resource);
1240 struct vme_lm_resource *lm;
1241
1242 if (resource->type != VME_LM) {
1243 printk(KERN_ERR "Not a Location Monitor resource\n");
1244 return -EINVAL;
1245 }
1246
1247 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1248
1249 if (bridge->lm_detach == NULL) {
1250 printk(KERN_ERR "vme_lm_detach not supported\n");
1251 return -EINVAL;
1252 }
1253
1254 return bridge->lm_detach(lm, monitor);
1255}
1256EXPORT_SYMBOL(vme_lm_detach);
1257
1258void vme_lm_free(struct vme_resource *resource)
1259{
1260 struct vme_lm_resource *lm;
1261
1262 if (resource->type != VME_LM) {
1263 printk(KERN_ERR "Not a Location Monitor resource\n");
1264 return;
1265 }
1266
1267 lm = list_entry(resource->entry, struct vme_lm_resource, list);
1268
1269 mutex_lock(&lm->mtx);
1270
1271 /* XXX
1272 * Check to see that there aren't any callbacks still attached, if
1273 * there are we should probably be detaching them!
1274 */
1275
1276 lm->locked = 0;
1277
1278 mutex_unlock(&lm->mtx);
1279
1280 kfree(resource);
1281}
1282EXPORT_SYMBOL(vme_lm_free);
1283
1284int vme_slot_get(struct device *bus)
1285{
1286 struct vme_bridge *bridge;
1287
1288 bridge = dev_to_bridge(bus);
1289 if (bridge == NULL) {
1290 printk(KERN_ERR "Can't find VME bus\n");
1291 return -EINVAL;
1292 }
1293
1294 if (bridge->slot_get == NULL) {
1295 printk(KERN_WARNING "vme_slot_get not supported\n");
1296 return -EINVAL;
1297 }
1298
1299 return bridge->slot_get(bridge);
1300}
1301EXPORT_SYMBOL(vme_slot_get);
1302
1303
1304/* - Bridge Registration --------------------------------------------------- */
1305
1306static int vme_alloc_bus_num(void)
1307{
1308 int i;
1309
1310 mutex_lock(&vme_bus_num_mtx);
1311 for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1312 if (((vme_bus_numbers >> i) & 0x1) == 0) {
1313 vme_bus_numbers |= (0x1 << i);
1314 break;
1315 }
1316 }
1317 mutex_unlock(&vme_bus_num_mtx);
1318
1319 return i;
1320}
1321
1322static void vme_free_bus_num(int bus)
1323{
1324 mutex_lock(&vme_bus_num_mtx);
1325 vme_bus_numbers &= ~(0x1 << bus);
1326 mutex_unlock(&vme_bus_num_mtx);
1327}
1328
1329int vme_register_bridge(struct vme_bridge *bridge)
1330{
1331 struct device *dev;
1332 int retval;
1333 int i;
1334
1335 bridge->num = vme_alloc_bus_num();
1336
1337 /* This creates 32 vme "slot" devices. This equates to a slot for each
1338 * ID available in a system conforming to the ANSI/VITA 1-1994
1339 * specification.
1340 */
1341 for (i = 0; i < VME_SLOTS_MAX; i++) {
1342 dev = &bridge->dev[i];
1343 memset(dev, 0, sizeof(struct device));
1344
1345 dev->parent = bridge->parent;
1346 dev->bus = &vme_bus_type;
1347 /*
1348 * We save a pointer to the bridge in platform_data so that we
1349 * can get to it later. We keep driver_data for use by the
1350 * driver that binds against the slot
1351 */
1352 dev->platform_data = bridge;
1353 dev_set_name(dev, "vme-%x.%x", bridge->num, i + 1);
1354
1355 retval = device_register(dev);
1356 if (retval)
1357 goto err_reg;
1358 }
1359
1360 return retval;
1361
1362err_reg:
1363 while (--i >= 0) {
1364 dev = &bridge->dev[i];
1365 device_unregister(dev);
1366 }
1367 vme_free_bus_num(bridge->num);
1368 return retval;
1369}
1370EXPORT_SYMBOL(vme_register_bridge);
1371
1372void vme_unregister_bridge(struct vme_bridge *bridge)
1373{
1374 int i;
1375 struct device *dev;
1376
1377
1378 for (i = 0; i < VME_SLOTS_MAX; i++) {
1379 dev = &bridge->dev[i];
1380 device_unregister(dev);
1381 }
1382 vme_free_bus_num(bridge->num);
1383}
1384EXPORT_SYMBOL(vme_unregister_bridge);
1385
1386
1387/* - Driver Registration --------------------------------------------------- */
1388
1389int vme_register_driver(struct vme_driver *drv)
1390{
1391 drv->driver.name = drv->name;
1392 drv->driver.bus = &vme_bus_type;
1393
1394 return driver_register(&drv->driver);
1395}
1396EXPORT_SYMBOL(vme_register_driver);
1397
1398void vme_unregister_driver(struct vme_driver *drv)
1399{
1400 driver_unregister(&drv->driver);
1401}
1402EXPORT_SYMBOL(vme_unregister_driver);
1403
1404/* - Bus Registration ------------------------------------------------------ */
1405
1406static int vme_calc_slot(struct device *dev)
1407{
1408 struct vme_bridge *bridge;
1409 int num;
1410
1411 bridge = dev_to_bridge(dev);
1412
1413 /* Determine slot number */
1414 num = 0;
1415 while (num < VME_SLOTS_MAX) {
1416 if (&bridge->dev[num] == dev)
1417 break;
1418
1419 num++;
1420 }
1421 if (num == VME_SLOTS_MAX) {
1422 dev_err(dev, "Failed to identify slot\n");
1423 num = 0;
1424 goto err_dev;
1425 }
1426 num++;
1427
1428err_dev:
1429 return num;
1430}
1431
1432static struct vme_driver *dev_to_vme_driver(struct device *dev)
1433{
1434 if (dev->driver == NULL)
1435 printk(KERN_ERR "Bugger dev->driver is NULL\n");
1436
1437 return container_of(dev->driver, struct vme_driver, driver);
1438}
1439
1440static int vme_bus_match(struct device *dev, struct device_driver *drv)
1441{
1442 struct vme_bridge *bridge;
1443 struct vme_driver *driver;
1444 int i, num;
1445
1446 bridge = dev_to_bridge(dev);
1447 driver = container_of(drv, struct vme_driver, driver);
1448
1449 num = vme_calc_slot(dev);
1450 if (!num)
1451 goto err_dev;
1452
1453 if (driver->bind_table == NULL) {
1454 dev_err(dev, "Bind table NULL\n");
1455 goto err_table;
1456 }
1457
1458 i = 0;
1459 while ((driver->bind_table[i].bus != 0) ||
1460 (driver->bind_table[i].slot != 0)) {
1461
1462 if (bridge->num == driver->bind_table[i].bus) {
1463 if (num == driver->bind_table[i].slot)
1464 return 1;
1465
1466 if (driver->bind_table[i].slot == VME_SLOT_ALL)
1467 return 1;
1468
1469 if ((driver->bind_table[i].slot == VME_SLOT_CURRENT) &&
1470 (num == vme_slot_get(dev)))
1471 return 1;
1472 }
1473 i++;
1474 }
1475
1476err_dev:
1477err_table:
1478 return 0;
1479}
1480
1481static int vme_bus_probe(struct device *dev)
1482{
1483 struct vme_bridge *bridge;
1484 struct vme_driver *driver;
1485 int retval = -ENODEV;
1486
1487 driver = dev_to_vme_driver(dev);
1488 bridge = dev_to_bridge(dev);
1489
1490 if (driver->probe != NULL)
1491 retval = driver->probe(dev, bridge->num, vme_calc_slot(dev));
1492
1493 return retval;
1494}
1495
1496static int vme_bus_remove(struct device *dev)
1497{
1498 struct vme_bridge *bridge;
1499 struct vme_driver *driver;
1500 int retval = -ENODEV;
1501
1502 driver = dev_to_vme_driver(dev);
1503 bridge = dev_to_bridge(dev);
1504
1505 if (driver->remove != NULL)
1506 retval = driver->remove(dev, bridge->num, vme_calc_slot(dev));
1507
1508 return retval;
1509}
1510
1511struct bus_type vme_bus_type = {
1512 .name = "vme",
1513 .match = vme_bus_match,
1514 .probe = vme_bus_probe,
1515 .remove = vme_bus_remove,
1516};
1517EXPORT_SYMBOL(vme_bus_type);
1518
1519static int __init vme_init(void)
1520{
1521 return bus_register(&vme_bus_type);
1522}
1523
1524static void __exit vme_exit(void)
1525{
1526 bus_unregister(&vme_bus_type);
1527}
1528
1529MODULE_DESCRIPTION("VME bridge driver framework");
1530MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1531MODULE_LICENSE("GPL");
1532
1533module_init(vme_init);
1534module_exit(vme_exit);
diff --git a/drivers/staging/vme/vme.h b/drivers/staging/vme/vme.h
new file mode 100644
index 00000000000..4155d8c2a53
--- /dev/null
+++ b/drivers/staging/vme/vme.h
@@ -0,0 +1,170 @@
1#ifndef _VME_H_
2#define _VME_H_
3
4/* Resource Type */
5enum vme_resource_type {
6 VME_MASTER,
7 VME_SLAVE,
8 VME_DMA,
9 VME_LM
10};
11
12/* VME Address Spaces */
13typedef u32 vme_address_t;
14#define VME_A16 0x1
15#define VME_A24 0x2
16#define VME_A32 0x4
17#define VME_A64 0x8
18#define VME_CRCSR 0x10
19#define VME_USER1 0x20
20#define VME_USER2 0x40
21#define VME_USER3 0x80
22#define VME_USER4 0x100
23
24#define VME_A16_MAX 0x10000ULL
25#define VME_A24_MAX 0x1000000ULL
26#define VME_A32_MAX 0x100000000ULL
27#define VME_A64_MAX 0x10000000000000000ULL
28#define VME_CRCSR_MAX 0x1000000ULL
29
30
31/* VME Cycle Types */
32typedef u32 vme_cycle_t;
33#define VME_SCT 0x1
34#define VME_BLT 0x2
35#define VME_MBLT 0x4
36#define VME_2eVME 0x8
37#define VME_2eSST 0x10
38#define VME_2eSSTB 0x20
39
40#define VME_2eSST160 0x100
41#define VME_2eSST267 0x200
42#define VME_2eSST320 0x400
43
44#define VME_SUPER 0x1000
45#define VME_USER 0x2000
46#define VME_PROG 0x4000
47#define VME_DATA 0x8000
48
49/* VME Data Widths */
50typedef u32 vme_width_t;
51#define VME_D8 0x1
52#define VME_D16 0x2
53#define VME_D32 0x4
54#define VME_D64 0x8
55
56/* Arbitration Scheduling Modes */
57typedef u32 vme_arbitration_t;
58#define VME_R_ROBIN_MODE 0x1
59#define VME_PRIORITY_MODE 0x2
60
61typedef u32 vme_dma_t;
62#define VME_DMA_PATTERN (1<<0)
63#define VME_DMA_PCI (1<<1)
64#define VME_DMA_VME (1<<2)
65
66typedef u32 vme_pattern_t;
67#define VME_DMA_PATTERN_BYTE (1<<0)
68#define VME_DMA_PATTERN_WORD (1<<1)
69#define VME_DMA_PATTERN_INCREMENT (1<<2)
70
71typedef u32 vme_dma_route_t;
72#define VME_DMA_VME_TO_MEM (1<<0)
73#define VME_DMA_MEM_TO_VME (1<<1)
74#define VME_DMA_VME_TO_VME (1<<2)
75#define VME_DMA_MEM_TO_MEM (1<<3)
76#define VME_DMA_PATTERN_TO_VME (1<<4)
77#define VME_DMA_PATTERN_TO_MEM (1<<5)
78
79struct vme_dma_attr {
80 vme_dma_t type;
81 void *private;
82};
83
84struct vme_resource {
85 enum vme_resource_type type;
86 struct list_head *entry;
87};
88
89extern struct bus_type vme_bus_type;
90
91#define VME_SLOT_CURRENT -1
92#define VME_SLOT_ALL -2
93
94struct vme_device_id {
95 int bus;
96 int slot;
97};
98
99struct vme_driver {
100 struct list_head node;
101 const char *name;
102 const struct vme_device_id *bind_table;
103 int (*probe) (struct device *, int, int);
104 int (*remove) (struct device *, int, int);
105 void (*shutdown) (void);
106 struct device_driver driver;
107};
108
109void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *);
110void vme_free_consistent(struct vme_resource *, size_t, void *,
111 dma_addr_t);
112
113size_t vme_get_size(struct vme_resource *);
114
115struct vme_resource *vme_slave_request(struct device *, vme_address_t,
116 vme_cycle_t);
117int vme_slave_set(struct vme_resource *, int, unsigned long long,
118 unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
119int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
120 unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
121void vme_slave_free(struct vme_resource *);
122
123struct vme_resource *vme_master_request(struct device *, vme_address_t,
124 vme_cycle_t, vme_width_t);
125int vme_master_set(struct vme_resource *, int, unsigned long long,
126 unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
127int vme_master_get(struct vme_resource *, int *, unsigned long long *,
128 unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
129ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
130ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
131unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
132 unsigned int, loff_t);
133void vme_master_free(struct vme_resource *);
134
135struct vme_resource *vme_dma_request(struct device *, vme_dma_route_t);
136struct vme_dma_list *vme_new_dma_list(struct vme_resource *);
137struct vme_dma_attr *vme_dma_pattern_attribute(u32, vme_pattern_t);
138struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
139struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, vme_address_t,
140 vme_cycle_t, vme_width_t);
141void vme_dma_free_attribute(struct vme_dma_attr *);
142int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
143 struct vme_dma_attr *, size_t);
144int vme_dma_list_exec(struct vme_dma_list *);
145int vme_dma_list_free(struct vme_dma_list *);
146int vme_dma_free(struct vme_resource *);
147
148int vme_irq_request(struct device *, int, int,
149 void (*callback)(int, int, void *), void *);
150void vme_irq_free(struct device *, int, int);
151int vme_irq_generate(struct device *, int, int);
152
153struct vme_resource * vme_lm_request(struct device *);
154int vme_lm_count(struct vme_resource *);
155int vme_lm_set(struct vme_resource *, unsigned long long, vme_address_t,
156 vme_cycle_t);
157int vme_lm_get(struct vme_resource *, unsigned long long *, vme_address_t *,
158 vme_cycle_t *);
159int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
160int vme_lm_detach(struct vme_resource *, int);
161void vme_lm_free(struct vme_resource *);
162
163int vme_slot_get(struct device *);
164
165int vme_register_driver(struct vme_driver *);
166void vme_unregister_driver(struct vme_driver *);
167
168
169#endif /* _VME_H_ */
170
diff --git a/drivers/staging/vme/vme_api.txt b/drivers/staging/vme/vme_api.txt
new file mode 100644
index 00000000000..4910e92c52a
--- /dev/null
+++ b/drivers/staging/vme/vme_api.txt
@@ -0,0 +1,383 @@
1 VME Device Driver API
2 =====================
3
4Driver registration
5===================
6
7As with other subsystems within the Linux kernel, VME device drivers register
8with the VME subsystem, typically called from the devices init routine. This is
9achieved via a call to the following function:
10
11 int vme_register_driver (struct vme_driver *driver);
12
13If driver registration is successful this function returns zero, if an error
14occurred a negative error code will be returned.
15
16A pointer to a structure of type 'vme_driver' must be provided to the
17registration function. The structure is as follows:
18
19 struct vme_driver {
20 struct list_head node;
21 char *name;
22 const struct vme_device_id *bind_table;
23 int (*probe) (struct device *, int, int);
24 int (*remove) (struct device *, int, int);
25 void (*shutdown) (void);
26 struct device_driver driver;
27 };
28
29At the minimum, the '.name', '.probe' and '.bind_table' elements of this
30structure should be correctly set. The '.name' element is a pointer to a string
31holding the device driver's name. The '.probe' element should contain a pointer
32to the probe routine.
33
34The arguments of the probe routine are as follows:
35
36 probe(struct device *dev, int bus, int slot);
37
38The '.bind_table' is a pointer to an array of type 'vme_device_id':
39
40 struct vme_device_id {
41 int bus;
42 int slot;
43 };
44
45Each structure in this array should provide a bus and slot number where the core
46should probe, using the driver's probe routine, for a device on the specified
47VME bus.
48
49The VME subsystem supports a single VME driver per 'slot'. There are considered
50to be 32 slots per bus, one for each slot-ID as defined in the ANSI/VITA 1-1994
51specification and are analogious to the physical slots on the VME backplane.
52
53A function is also provided to unregister the driver from the VME core and is
54usually called from the device driver's exit routine:
55
56 void vme_unregister_driver (struct vme_driver *driver);
57
58
59Resource management
60===================
61
62Once a driver has registered with the VME core the provided probe routine will
63be called for each of the bus/slot combination that becomes valid as VME buses
64are themselves registered. The probe routine is passed a pointer to the devices
65device structure. This pointer should be saved, it will be required for
66requesting VME resources.
67
68The driver can request ownership of one or more master windows, slave windows
69and/or dma channels. Rather than allowing the device driver to request a
70specific window or DMA channel (which may be used by a different driver) this
71driver allows a resource to be assigned based on the required attributes of the
72driver in question:
73
74 struct vme_resource * vme_master_request(struct device *dev,
75 vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
76
77 struct vme_resource * vme_slave_request(struct device *dev,
78 vme_address_t aspace, vme_cycle_t cycle);
79
80 struct vme_resource *vme_dma_request(struct device *dev,
81 vme_dma_route_t route);
82
83For slave windows these attributes are split into those of type 'vme_address_t'
84and 'vme_cycle_t'. Master windows add a further set of attributes
85'vme_cycle_t'. These attributes are defined as bitmasks and as such any
86combination of the attributes can be requested for a single window, the core
87will assign a window that meets the requirements, returning a pointer of type
88vme_resource that should be used to identify the allocated resource when it is
89used. For DMA controllers, the request function requires the potential
90direction of any transfers to be provided in the route attributes. This is
91typically VME-to-MEM and/or MEM-to-VME, though some hardware can support
92VME-to-VME and MEM-to-MEM transfers as well as test pattern generation. If an
93unallocated window fitting the requirements can not be found a NULL pointer
94will be returned.
95
96Functions are also provided to free window allocations once they are no longer
97required. These functions should be passed the pointer to the resource provided
98during resource allocation:
99
100 void vme_master_free(struct vme_resource *res);
101
102 void vme_slave_free(struct vme_resource *res);
103
104 void vme_dma_free(struct vme_resource *res);
105
106
107Master windows
108==============
109
110Master windows provide access from the local processor[s] out onto the VME bus.
111The number of windows available and the available access modes is dependent on
112the underlying chipset. A window must be configured before it can be used.
113
114
115Master window configuration
116---------------------------
117
118Once a master window has been assigned the following functions can be used to
119configure it and retrieve the current settings:
120
121 int vme_master_set (struct vme_resource *res, int enabled,
122 unsigned long long base, unsigned long long size,
123 vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
124
125 int vme_master_get (struct vme_resource *res, int *enabled,
126 unsigned long long *base, unsigned long long *size,
127 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *width);
128
129The address spaces, transfer widths and cycle types are the same as described
130under resource management, however some of the options are mutually exclusive.
131For example, only one address space may be specified.
132
133These functions return 0 on success or an error code should the call fail.
134
135
136Master window access
137--------------------
138
139The following functions can be used to read from and write to configured master
140windows. These functions return the number of bytes copied:
141
142 ssize_t vme_master_read(struct vme_resource *res, void *buf,
143 size_t count, loff_t offset);
144
145 ssize_t vme_master_write(struct vme_resource *res, void *buf,
146 size_t count, loff_t offset);
147
148In addition to simple reads and writes, a function is provided to do a
149read-modify-write transaction. This function returns the original value of the
150VME bus location :
151
152 unsigned int vme_master_rmw (struct vme_resource *res,
153 unsigned int mask, unsigned int compare, unsigned int swap,
154 loff_t offset);
155
156This functions by reading the offset, applying the mask. If the bits selected in
157the mask match with the values of the corresponding bits in the compare field,
158the value of swap is written the specified offset.
159
160
161Slave windows
162=============
163
164Slave windows provide devices on the VME bus access into mapped portions of the
165local memory. The number of windows available and the access modes that can be
166used is dependent on the underlying chipset. A window must be configured before
167it can be used.
168
169
170Slave window configuration
171--------------------------
172
173Once a slave window has been assigned the following functions can be used to
174configure it and retrieve the current settings:
175
176 int vme_slave_set (struct vme_resource *res, int enabled,
177 unsigned long long base, unsigned long long size,
178 dma_addr_t mem, vme_address_t aspace, vme_cycle_t cycle);
179
180 int vme_slave_get (struct vme_resource *res, int *enabled,
181 unsigned long long *base, unsigned long long *size,
182 dma_addr_t *mem, vme_address_t *aspace, vme_cycle_t *cycle);
183
184The address spaces, transfer widths and cycle types are the same as described
185under resource management, however some of the options are mutually exclusive.
186For example, only one address space may be specified.
187
188These functions return 0 on success or an error code should the call fail.
189
190
191Slave window buffer allocation
192------------------------------
193
194Functions are provided to allow the user to allocate and free a contiguous
195buffers which will be accessible by the VME bridge. These functions do not have
196to be used, other methods can be used to allocate a buffer, though care must be
197taken to ensure that they are contiguous and accessible by the VME bridge:
198
199 void * vme_alloc_consistent(struct vme_resource *res, size_t size,
200 dma_addr_t *mem);
201
202 void vme_free_consistent(struct vme_resource *res, size_t size,
203 void *virt, dma_addr_t mem);
204
205
206Slave window access
207-------------------
208
209Slave windows map local memory onto the VME bus, the standard methods for
210accessing memory should be used.
211
212
213DMA channels
214============
215
216The VME DMA transfer provides the ability to run link-list DMA transfers. The
217API introduces the concept of DMA lists. Each DMA list is a link-list which can
218be passed to a DMA controller. Multiple lists can be created, extended,
219executed, reused and destroyed.
220
221
222List Management
223---------------
224
225The following functions are provided to create and destroy DMA lists. Execution
226of a list will not automatically destroy the list, thus enabling a list to be
227reused for repetitive tasks:
228
229 struct vme_dma_list *vme_new_dma_list(struct vme_resource *res);
230
231 int vme_dma_list_free(struct vme_dma_list *list);
232
233
234List Population
235---------------
236
237An item can be added to a list using the following function ( the source and
238destination attributes need to be created before calling this function, this is
239covered under "Transfer Attributes"):
240
241 int vme_dma_list_add(struct vme_dma_list *list,
242 struct vme_dma_attr *src, struct vme_dma_attr *dest,
243 size_t count);
244
245NOTE: The detailed attributes of the transfers source and destination
246 are not checked until an entry is added to a DMA list, the request
247 for a DMA channel purely checks the directions in which the
248 controller is expected to transfer data. As a result it is
249 possible for this call to return an error, for example if the
250 source or destination is in an unsupported VME address space.
251
252Transfer Attributes
253-------------------
254
255The attributes for the source and destination are handled separately from adding
256an item to a list. This is due to the diverse attributes required for each type
257of source and destination. There are functions to create attributes for PCI, VME
258and pattern sources and destinations (where appropriate):
259
260Pattern source:
261
262 struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
263 vme_pattern_t type);
264
265PCI source or destination:
266
267 struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t mem);
268
269VME source or destination:
270
271 struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long base,
272 vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
273
274The following function should be used to free an attribute:
275
276 void vme_dma_free_attribute(struct vme_dma_attr *attr);
277
278
279List Execution
280--------------
281
282The following function queues a list for execution. The function will return
283once the list has been executed:
284
285 int vme_dma_list_exec(struct vme_dma_list *list);
286
287
288Interrupts
289==========
290
291The VME API provides functions to attach and detach callbacks to specific VME
292level and status ID combinations and for the generation of VME interrupts with
293specific VME level and status IDs.
294
295
296Attaching Interrupt Handlers
297----------------------------
298
299The following functions can be used to attach and free a specific VME level and
300status ID combination. Any given combination can only be assigned a single
301callback function. A void pointer parameter is provided, the value of which is
302passed to the callback function, the use of this pointer is user undefined:
303
304 int vme_irq_request(struct device *dev, int level, int statid,
305 void (*callback)(int, int, void *), void *priv);
306
307 void vme_irq_free(struct device *dev, int level, int statid);
308
309The callback parameters are as follows. Care must be taken in writing a callback
310function, callback functions run in interrupt context:
311
312 void callback(int level, int statid, void *priv);
313
314
315Interrupt Generation
316--------------------
317
318The following function can be used to generate a VME interrupt at a given VME
319level and VME status ID:
320
321 int vme_irq_generate(struct device *dev, int level, int statid);
322
323
324Location monitors
325=================
326
327The VME API provides the following functionality to configure the location
328monitor.
329
330
331Location Monitor Management
332---------------------------
333
334The following functions are provided to request the use of a block of location
335monitors and to free them after they are no longer required:
336
337 struct vme_resource * vme_lm_request(struct device *dev);
338
339 void vme_lm_free(struct vme_resource * res);
340
341Each block may provide a number of location monitors, monitoring adjacent
342locations. The following function can be used to determine how many locations
343are provided:
344
345 int vme_lm_count(struct vme_resource * res);
346
347
348Location Monitor Configuration
349------------------------------
350
351Once a bank of location monitors has been allocated, the following functions
352are provided to configure the location and mode of the location monitor:
353
354 int vme_lm_set(struct vme_resource *res, unsigned long long base,
355 vme_address_t aspace, vme_cycle_t cycle);
356
357 int vme_lm_get(struct vme_resource *res, unsigned long long *base,
358 vme_address_t *aspace, vme_cycle_t *cycle);
359
360
361Location Monitor Use
362--------------------
363
364The following functions allow a callback to be attached and detached from each
365location monitor location. Each location monitor can monitor a number of
366adjacent locations:
367
368 int vme_lm_attach(struct vme_resource *res, int num,
369 void (*callback)(int));
370
371 int vme_lm_detach(struct vme_resource *res, int num);
372
373The callback function is declared as follows.
374
375 void callback(int num);
376
377
378Slot Detection
379==============
380
381This function returns the slot ID of the provided bridge.
382
383 int vme_slot_get(struct device *dev);
diff --git a/drivers/staging/vme/vme_bridge.h b/drivers/staging/vme/vme_bridge.h
new file mode 100644
index 00000000000..4c6ec31b01d
--- /dev/null
+++ b/drivers/staging/vme/vme_bridge.h
@@ -0,0 +1,175 @@
1#ifndef _VME_BRIDGE_H_
2#define _VME_BRIDGE_H_
3
4#define VME_CRCSR_BUF_SIZE (508*1024)
5#define VME_SLOTS_MAX 32
6/*
7 * Resource structures
8 */
9struct vme_master_resource {
10 struct list_head list;
11 struct vme_bridge *parent;
12 /*
13 * We are likely to need to access the VME bus in interrupt context, so
14 * protect master routines with a spinlock rather than a mutex.
15 */
16 spinlock_t lock;
17 int locked;
18 int number;
19 vme_address_t address_attr;
20 vme_cycle_t cycle_attr;
21 vme_width_t width_attr;
22 struct resource bus_resource;
23 void __iomem *kern_base;
24};
25
26struct vme_slave_resource {
27 struct list_head list;
28 struct vme_bridge *parent;
29 struct mutex mtx;
30 int locked;
31 int number;
32 vme_address_t address_attr;
33 vme_cycle_t cycle_attr;
34};
35
36struct vme_dma_pattern {
37 u32 pattern;
38 vme_pattern_t type;
39};
40
41struct vme_dma_pci {
42 dma_addr_t address;
43};
44
45struct vme_dma_vme {
46 unsigned long long address;
47 vme_address_t aspace;
48 vme_cycle_t cycle;
49 vme_width_t dwidth;
50};
51
52struct vme_dma_list {
53 struct list_head list;
54 struct vme_dma_resource *parent;
55 struct list_head entries;
56 struct mutex mtx;
57};
58
59struct vme_dma_resource {
60 struct list_head list;
61 struct vme_bridge *parent;
62 struct mutex mtx;
63 int locked;
64 int number;
65 struct list_head pending;
66 struct list_head running;
67 vme_dma_route_t route_attr;
68};
69
70struct vme_lm_resource {
71 struct list_head list;
72 struct vme_bridge *parent;
73 struct mutex mtx;
74 int locked;
75 int number;
76 int monitors;
77};
78
79struct vme_bus_error {
80 struct list_head list;
81 unsigned long long address;
82 u32 attributes;
83};
84
85struct vme_callback {
86 void (*func)(int, int, void*);
87 void *priv_data;
88};
89
90struct vme_irq {
91 int count;
92 struct vme_callback callback[255];
93};
94
95/* Allow 16 characters for name (including null character) */
96#define VMENAMSIZ 16
97
98/* This structure stores all the information about one bridge
99 * The structure should be dynamically allocated by the driver and one instance
100 * of the structure should be present for each VME chip present in the system.
101 *
102 * Currently we assume that all chips are PCI-based
103 */
104struct vme_bridge {
105 char name[VMENAMSIZ];
106 int num;
107 struct list_head master_resources;
108 struct list_head slave_resources;
109 struct list_head dma_resources;
110 struct list_head lm_resources;
111
112 struct list_head vme_errors; /* List for errors generated on VME */
113
114 /* Bridge Info - XXX Move to private structure? */
115 struct device *parent; /* Generic device struct (pdev->dev for PCI) */
116 void *driver_priv; /* Private pointer for the bridge driver */
117
118 struct device dev[VME_SLOTS_MAX]; /* Device registered with
119 * device model on VME bus
120 */
121
122 /* Interrupt callbacks */
123 struct vme_irq irq[7];
124 /* Locking for VME irq callback configuration */
125 struct mutex irq_mtx;
126
127 /* Slave Functions */
128 int (*slave_get) (struct vme_slave_resource *, int *,
129 unsigned long long *, unsigned long long *, dma_addr_t *,
130 vme_address_t *, vme_cycle_t *);
131 int (*slave_set) (struct vme_slave_resource *, int, unsigned long long,
132 unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
133
134 /* Master Functions */
135 int (*master_get) (struct vme_master_resource *, int *,
136 unsigned long long *, unsigned long long *, vme_address_t *,
137 vme_cycle_t *, vme_width_t *);
138 int (*master_set) (struct vme_master_resource *, int,
139 unsigned long long, unsigned long long, vme_address_t,
140 vme_cycle_t, vme_width_t);
141 ssize_t (*master_read) (struct vme_master_resource *, void *, size_t,
142 loff_t);
143 ssize_t (*master_write) (struct vme_master_resource *, void *, size_t,
144 loff_t);
145 unsigned int (*master_rmw) (struct vme_master_resource *, unsigned int,
146 unsigned int, unsigned int, loff_t);
147
148 /* DMA Functions */
149 int (*dma_list_add) (struct vme_dma_list *, struct vme_dma_attr *,
150 struct vme_dma_attr *, size_t);
151 int (*dma_list_exec) (struct vme_dma_list *);
152 int (*dma_list_empty) (struct vme_dma_list *);
153
154 /* Interrupt Functions */
155 void (*irq_set) (struct vme_bridge *, int, int, int);
156 int (*irq_generate) (struct vme_bridge *, int, int);
157
158 /* Location monitor functions */
159 int (*lm_set) (struct vme_lm_resource *, unsigned long long,
160 vme_address_t, vme_cycle_t);
161 int (*lm_get) (struct vme_lm_resource *, unsigned long long *,
162 vme_address_t *, vme_cycle_t *);
163 int (*lm_attach) (struct vme_lm_resource *, int, void (*callback)(int));
164 int (*lm_detach) (struct vme_lm_resource *, int);
165
166 /* CR/CSR space functions */
167 int (*slot_get) (struct vme_bridge *);
168};
169
170void vme_irq_handler(struct vme_bridge *, int, int);
171
172int vme_register_bridge(struct vme_bridge *);
173void vme_unregister_bridge(struct vme_bridge *);
174
175#endif /* _VME_BRIDGE_H_ */