aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartyn Welch <martyn.welch@ge.com>2010-02-18 10:13:05 -0500
committerGreg Kroah-Hartman <gregkh@suse.de>2010-03-03 19:43:00 -0500
commit29848ac9f3b33bf171439ae2d66d40e6a71446c4 (patch)
treeba2e49f2e3ff4a47188dd5fe48690e1b55d6b3ef
parent4f723df45d3952c485ee0125fb6797ad615901c3 (diff)
Staging: vme: Enable drivers to handle more than one bridge
At the moment the vme bridge drivers are written in a way that only allows them to support one bridge at a time. Modify the drivers to enable more than one bridge to be present per board. Signed-off-by: Martyn Welch <martyn.welch@ge.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r--drivers/staging/vme/TODO2
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c411
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.h16
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c634
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.h16
-rw-r--r--drivers/staging/vme/vme.c10
-rw-r--r--drivers/staging/vme/vme_bridge.h8
7 files changed, 633 insertions, 464 deletions
diff --git a/drivers/staging/vme/TODO b/drivers/staging/vme/TODO
index bdc5f6248bcc..83d44cbf9a5f 100644
--- a/drivers/staging/vme/TODO
+++ b/drivers/staging/vme/TODO
@@ -47,7 +47,6 @@ Bridge Support
47Tempe (tsi148) 47Tempe (tsi148)
48-------------- 48--------------
49 49
50- Driver can currently only support a single bridge.
51- 2eSST Broadcast mode. 50- 2eSST Broadcast mode.
52- Mailboxes unsupported. 51- Mailboxes unsupported.
53- Improve error detection. 52- Improve error detection.
@@ -58,7 +57,6 @@ Tempe (tsi148)
58Universe II (ca91c142) 57Universe II (ca91c142)
59---------------------- 58----------------------
60 59
61- Driver can currently only support a single bridge.
62- DMA unsupported. 60- DMA unsupported.
63- RMW transactions unsupported. 61- RMW transactions unsupported.
64- Location Monitors unsupported. 62- Location Monitors unsupported.
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index 7eaba3511ea8..c2f86a6996d5 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -41,24 +41,6 @@ static void __exit ca91cx42_exit(void);
41/* Module parameters */ 41/* Module parameters */
42static int geoid; 42static int geoid;
43 43
44static struct vme_bridge *ca91cx42_bridge;
45static wait_queue_head_t dma_queue;
46static wait_queue_head_t iack_queue;
47#if 0
48static wait_queue_head_t lm_queue;
49#endif
50static wait_queue_head_t mbox_queue;
51
52static void (*lm_callback[4])(int); /* Called in interrupt handler */
53static void *crcsr_kernel;
54static dma_addr_t crcsr_bus;
55
56static struct mutex vme_rmw; /* Only one RMW cycle at a time */
57static struct mutex vme_int; /*
58 * Only one VME interrupt can be
59 * generated at a time, provide locking
60 */
61
62static char driver_name[] = "vme_ca91cx42"; 44static char driver_name[] = "vme_ca91cx42";
63 45
64static const struct pci_device_id ca91cx42_ids[] = { 46static const struct pci_device_id ca91cx42_ids[] = {
@@ -73,14 +55,14 @@ static struct pci_driver ca91cx42_driver = {
73 .remove = ca91cx42_remove, 55 .remove = ca91cx42_remove,
74}; 56};
75 57
76static u32 ca91cx42_DMA_irqhandler(void) 58static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
77{ 59{
78 wake_up(&dma_queue); 60 wake_up(&(bridge->dma_queue));
79 61
80 return CA91CX42_LINT_DMA; 62 return CA91CX42_LINT_DMA;
81} 63}
82 64
83static u32 ca91cx42_LM_irqhandler(u32 stat) 65static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
84{ 66{
85 int i; 67 int i;
86 u32 serviced = 0; 68 u32 serviced = 0;
@@ -88,7 +70,7 @@ static u32 ca91cx42_LM_irqhandler(u32 stat)
88 for (i = 0; i < 4; i++) { 70 for (i = 0; i < 4; i++) {
89 if (stat & CA91CX42_LINT_LM[i]) { 71 if (stat & CA91CX42_LINT_LM[i]) {
90 /* We only enable interrupts if the callback is set */ 72 /* We only enable interrupts if the callback is set */
91 lm_callback[i](i); 73 bridge->lm_callback[i](i);
92 serviced |= CA91CX42_LINT_LM[i]; 74 serviced |= CA91CX42_LINT_LM[i];
93 } 75 }
94 } 76 }
@@ -97,16 +79,16 @@ static u32 ca91cx42_LM_irqhandler(u32 stat)
97} 79}
98 80
99/* XXX This needs to be split into 4 queues */ 81/* XXX This needs to be split into 4 queues */
100static u32 ca91cx42_MB_irqhandler(int mbox_mask) 82static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
101{ 83{
102 wake_up(&mbox_queue); 84 wake_up(&(bridge->mbox_queue));
103 85
104 return CA91CX42_LINT_MBOX; 86 return CA91CX42_LINT_MBOX;
105} 87}
106 88
107static u32 ca91cx42_IACK_irqhandler(void) 89static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
108{ 90{
109 wake_up(&iack_queue); 91 wake_up(&(bridge->iack_queue));
110 92
111 return CA91CX42_LINT_SW_IACK; 93 return CA91CX42_LINT_SW_IACK;
112} 94}
@@ -115,22 +97,22 @@ static u32 ca91cx42_IACK_irqhandler(void)
115int ca91cx42_bus_error_chk(int clrflag) 97int ca91cx42_bus_error_chk(int clrflag)
116{ 98{
117 int tmp; 99 int tmp;
118 tmp = ioread32(ca91cx42_bridge->base + PCI_COMMAND); 100 tmp = ioread32(bridge->base + PCI_COMMAND);
119 if (tmp & 0x08000000) { /* S_TA is Set */ 101 if (tmp & 0x08000000) { /* S_TA is Set */
120 if (clrflag) 102 if (clrflag)
121 iowrite32(tmp | 0x08000000, 103 iowrite32(tmp | 0x08000000,
122 ca91cx42_bridge->base + PCI_COMMAND); 104 bridge->base + PCI_COMMAND);
123 return 1; 105 return 1;
124 } 106 }
125 return 0; 107 return 0;
126} 108}
127#endif 109#endif
128 110
129static u32 ca91cx42_VERR_irqhandler(void) 111static u32 ca91cx42_VERR_irqhandler(struct ca91cx42_driver *bridge)
130{ 112{
131 int val; 113 int val;
132 114
133 val = ioread32(ca91cx42_bridge->base + DGCS); 115 val = ioread32(bridge->base + DGCS);
134 116
135 if (!(val & 0x00000800)) { 117 if (!(val & 0x00000800)) {
136 printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read " 118 printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
@@ -140,11 +122,11 @@ static u32 ca91cx42_VERR_irqhandler(void)
140 return CA91CX42_LINT_VERR; 122 return CA91CX42_LINT_VERR;
141} 123}
142 124
143static u32 ca91cx42_LERR_irqhandler(void) 125static u32 ca91cx42_LERR_irqhandler(struct ca91cx42_driver *bridge)
144{ 126{
145 int val; 127 int val;
146 128
147 val = ioread32(ca91cx42_bridge->base + DGCS); 129 val = ioread32(bridge->base + DGCS);
148 130
149 if (!(val & 0x00000800)) { 131 if (!(val & 0x00000800)) {
150 printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read " 132 printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
@@ -156,13 +138,18 @@ static u32 ca91cx42_LERR_irqhandler(void)
156} 138}
157 139
158 140
159static u32 ca91cx42_VIRQ_irqhandler(int stat) 141static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
142 int stat)
160{ 143{
161 int vec, i, serviced = 0; 144 int vec, i, serviced = 0;
145 struct ca91cx42_driver *bridge;
146
147 bridge = ca91cx42_bridge->driver_priv;
148
162 149
163 for (i = 7; i > 0; i--) { 150 for (i = 7; i > 0; i--) {
164 if (stat & (1 << i)) { 151 if (stat & (1 << i)) {
165 vec = ioread32(ca91cx42_bridge->base + 152 vec = ioread32(bridge->base +
166 CA91CX42_V_STATID[i]) & 0xff; 153 CA91CX42_V_STATID[i]) & 0xff;
167 154
168 vme_irq_handler(ca91cx42_bridge, i, vec); 155 vme_irq_handler(ca91cx42_bridge, i, vec);
@@ -174,15 +161,18 @@ static u32 ca91cx42_VIRQ_irqhandler(int stat)
174 return serviced; 161 return serviced;
175} 162}
176 163
177static irqreturn_t ca91cx42_irqhandler(int irq, void *dev_id) 164static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
178{ 165{
179 u32 stat, enable, serviced = 0; 166 u32 stat, enable, serviced = 0;
167 struct vme_bridge *ca91cx42_bridge;
168 struct ca91cx42_driver *bridge;
180 169
181 if (dev_id != ca91cx42_bridge->base) 170 ca91cx42_bridge = ptr;
182 return IRQ_NONE;
183 171
184 enable = ioread32(ca91cx42_bridge->base + LINT_EN); 172 bridge = ca91cx42_bridge->driver_priv;
185 stat = ioread32(ca91cx42_bridge->base + LINT_STAT); 173
174 enable = ioread32(bridge->base + LINT_EN);
175 stat = ioread32(bridge->base + LINT_STAT);
186 176
187 /* Only look at unmasked interrupts */ 177 /* Only look at unmasked interrupts */
188 stat &= enable; 178 stat &= enable;
@@ -191,42 +181,45 @@ static irqreturn_t ca91cx42_irqhandler(int irq, void *dev_id)
191 return IRQ_NONE; 181 return IRQ_NONE;
192 182
193 if (stat & CA91CX42_LINT_DMA) 183 if (stat & CA91CX42_LINT_DMA)
194 serviced |= ca91cx42_DMA_irqhandler(); 184 serviced |= ca91cx42_DMA_irqhandler(bridge);
195 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 | 185 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
196 CA91CX42_LINT_LM3)) 186 CA91CX42_LINT_LM3))
197 serviced |= ca91cx42_LM_irqhandler(stat); 187 serviced |= ca91cx42_LM_irqhandler(bridge, stat);
198 if (stat & CA91CX42_LINT_MBOX) 188 if (stat & CA91CX42_LINT_MBOX)
199 serviced |= ca91cx42_MB_irqhandler(stat); 189 serviced |= ca91cx42_MB_irqhandler(bridge, stat);
200 if (stat & CA91CX42_LINT_SW_IACK) 190 if (stat & CA91CX42_LINT_SW_IACK)
201 serviced |= ca91cx42_IACK_irqhandler(); 191 serviced |= ca91cx42_IACK_irqhandler(bridge);
202 if (stat & CA91CX42_LINT_VERR) 192 if (stat & CA91CX42_LINT_VERR)
203 serviced |= ca91cx42_VERR_irqhandler(); 193 serviced |= ca91cx42_VERR_irqhandler(bridge);
204 if (stat & CA91CX42_LINT_LERR) 194 if (stat & CA91CX42_LINT_LERR)
205 serviced |= ca91cx42_LERR_irqhandler(); 195 serviced |= ca91cx42_LERR_irqhandler(bridge);
206 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 | 196 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
207 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 | 197 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
208 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 | 198 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
209 CA91CX42_LINT_VIRQ7)) 199 CA91CX42_LINT_VIRQ7))
210 serviced |= ca91cx42_VIRQ_irqhandler(stat); 200 serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
211 201
212 /* Clear serviced interrupts */ 202 /* Clear serviced interrupts */
213 iowrite32(stat, ca91cx42_bridge->base + LINT_STAT); 203 iowrite32(stat, bridge->base + LINT_STAT);
214 204
215 return IRQ_HANDLED; 205 return IRQ_HANDLED;
216} 206}
217 207
218static int ca91cx42_irq_init(struct vme_bridge *bridge) 208static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
219{ 209{
220 int result, tmp; 210 int result, tmp;
221 struct pci_dev *pdev; 211 struct pci_dev *pdev;
212 struct ca91cx42_driver *bridge;
213
214 bridge = ca91cx42_bridge->driver_priv;
222 215
223 /* Need pdev */ 216 /* Need pdev */
224 pdev = container_of(bridge->parent, struct pci_dev, dev); 217 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
225 218
226 /* Initialise list for VME bus errors */ 219 /* Initialise list for VME bus errors */
227 INIT_LIST_HEAD(&(bridge->vme_errors)); 220 INIT_LIST_HEAD(&(ca91cx42_bridge->vme_errors));
228 221
229 mutex_init(&(bridge->irq_mtx)); 222 mutex_init(&(ca91cx42_bridge->irq_mtx));
230 223
231 /* Disable interrupts from PCI to VME */ 224 /* Disable interrupts from PCI to VME */
232 iowrite32(0, bridge->base + VINT_EN); 225 iowrite32(0, bridge->base + VINT_EN);
@@ -237,7 +230,7 @@ static int ca91cx42_irq_init(struct vme_bridge *bridge)
237 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT); 230 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
238 231
239 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED, 232 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
240 driver_name, pdev); 233 driver_name, ca91cx42_bridge);
241 if (result) { 234 if (result) {
242 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n", 235 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
243 pdev->irq); 236 pdev->irq);
@@ -259,15 +252,16 @@ static int ca91cx42_irq_init(struct vme_bridge *bridge)
259 return 0; 252 return 0;
260} 253}
261 254
262static void ca91cx42_irq_exit(struct pci_dev *pdev) 255static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
256 struct pci_dev *pdev)
263{ 257{
264 /* Disable interrupts from PCI to VME */ 258 /* Disable interrupts from PCI to VME */
265 iowrite32(0, ca91cx42_bridge->base + VINT_EN); 259 iowrite32(0, bridge->base + VINT_EN);
266 260
267 /* Disable PCI interrupts */ 261 /* Disable PCI interrupts */
268 iowrite32(0, ca91cx42_bridge->base + LINT_EN); 262 iowrite32(0, bridge->base + LINT_EN);
269 /* Clear Any Pending PCI Interrupts */ 263 /* Clear Any Pending PCI Interrupts */
270 iowrite32(0x00FFFFFF, ca91cx42_bridge->base + LINT_STAT); 264 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
271 265
272 free_irq(pdev->irq, pdev); 266 free_irq(pdev->irq, pdev);
273} 267}
@@ -275,21 +269,25 @@ static void ca91cx42_irq_exit(struct pci_dev *pdev)
275/* 269/*
276 * Set up an VME interrupt 270 * Set up an VME interrupt
277 */ 271 */
278void ca91cx42_irq_set(int level, int state, int sync) 272void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level, int state,
273 int sync)
279 274
280{ 275{
281 struct pci_dev *pdev; 276 struct pci_dev *pdev;
282 u32 tmp; 277 u32 tmp;
278 struct ca91cx42_driver *bridge;
279
280 bridge = ca91cx42_bridge->driver_priv;
283 281
284 /* Enable IRQ level */ 282 /* Enable IRQ level */
285 tmp = ioread32(ca91cx42_bridge->base + LINT_EN); 283 tmp = ioread32(bridge->base + LINT_EN);
286 284
287 if (state == 0) 285 if (state == 0)
288 tmp &= ~CA91CX42_LINT_VIRQ[level]; 286 tmp &= ~CA91CX42_LINT_VIRQ[level];
289 else 287 else
290 tmp |= CA91CX42_LINT_VIRQ[level]; 288 tmp |= CA91CX42_LINT_VIRQ[level];
291 289
292 iowrite32(tmp, ca91cx42_bridge->base + LINT_EN); 290 iowrite32(tmp, bridge->base + LINT_EN);
293 291
294 if ((state == 0) && (sync != 0)) { 292 if ((state == 0) && (sync != 0)) {
295 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, 293 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
@@ -299,34 +297,38 @@ void ca91cx42_irq_set(int level, int state, int sync)
299 } 297 }
300} 298}
301 299
302int ca91cx42_irq_generate(int level, int statid) 300int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
301 int statid)
303{ 302{
304 u32 tmp; 303 u32 tmp;
304 struct ca91cx42_driver *bridge;
305
306 bridge = ca91cx42_bridge->driver_priv;
305 307
306 /* Universe can only generate even vectors */ 308 /* Universe can only generate even vectors */
307 if (statid & 1) 309 if (statid & 1)
308 return -EINVAL; 310 return -EINVAL;
309 311
310 mutex_lock(&(vme_int)); 312 mutex_lock(&(bridge->vme_int));
311 313
312 tmp = ioread32(ca91cx42_bridge->base + VINT_EN); 314 tmp = ioread32(bridge->base + VINT_EN);
313 315
314 /* Set Status/ID */ 316 /* Set Status/ID */
315 iowrite32(statid << 24, ca91cx42_bridge->base + STATID); 317 iowrite32(statid << 24, bridge->base + STATID);
316 318
317 /* Assert VMEbus IRQ */ 319 /* Assert VMEbus IRQ */
318 tmp = tmp | (1 << (level + 24)); 320 tmp = tmp | (1 << (level + 24));
319 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN); 321 iowrite32(tmp, bridge->base + VINT_EN);
320 322
321 /* Wait for IACK */ 323 /* Wait for IACK */
322 wait_event_interruptible(iack_queue, 0); 324 wait_event_interruptible(bridge->iack_queue, 0);
323 325
324 /* Return interrupt to low state */ 326 /* Return interrupt to low state */
325 tmp = ioread32(ca91cx42_bridge->base + VINT_EN); 327 tmp = ioread32(bridge->base + VINT_EN);
326 tmp = tmp & ~(1 << (level + 24)); 328 tmp = tmp & ~(1 << (level + 24));
327 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN); 329 iowrite32(tmp, bridge->base + VINT_EN);
328 330
329 mutex_unlock(&(vme_int)); 331 mutex_unlock(&(bridge->vme_int));
330 332
331 return 0; 333 return 0;
332} 334}
@@ -338,6 +340,9 @@ int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
338 unsigned int i, addr = 0, granularity = 0; 340 unsigned int i, addr = 0, granularity = 0;
339 unsigned int temp_ctl = 0; 341 unsigned int temp_ctl = 0;
340 unsigned int vme_bound, pci_offset; 342 unsigned int vme_bound, pci_offset;
343 struct ca91cx42_driver *bridge;
344
345 bridge = image->parent->driver_priv;
341 346
342 i = image->number; 347 i = image->number;
343 348
@@ -397,14 +402,14 @@ int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
397 } 402 }
398 403
399 /* Disable while we are mucking around */ 404 /* Disable while we are mucking around */
400 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]); 405 temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
401 temp_ctl &= ~CA91CX42_VSI_CTL_EN; 406 temp_ctl &= ~CA91CX42_VSI_CTL_EN;
402 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]); 407 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
403 408
404 /* Setup mapping */ 409 /* Setup mapping */
405 iowrite32(vme_base, ca91cx42_bridge->base + CA91CX42_VSI_BS[i]); 410 iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
406 iowrite32(vme_bound, ca91cx42_bridge->base + CA91CX42_VSI_BD[i]); 411 iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
407 iowrite32(pci_offset, ca91cx42_bridge->base + CA91CX42_VSI_TO[i]); 412 iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
408 413
409/* XXX Prefetch stuff currently unsupported */ 414/* XXX Prefetch stuff currently unsupported */
410#if 0 415#if 0
@@ -434,12 +439,12 @@ int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
434 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA; 439 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
435 440
436 /* Write ctl reg without enable */ 441 /* Write ctl reg without enable */
437 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]); 442 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
438 443
439 if (enabled) 444 if (enabled)
440 temp_ctl |= CA91CX42_VSI_CTL_EN; 445 temp_ctl |= CA91CX42_VSI_CTL_EN;
441 446
442 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]); 447 iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
443 448
444 return 0; 449 return 0;
445} 450}
@@ -450,6 +455,9 @@ int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
450{ 455{
451 unsigned int i, granularity = 0, ctl = 0; 456 unsigned int i, granularity = 0, ctl = 0;
452 unsigned long long vme_bound, pci_offset; 457 unsigned long long vme_bound, pci_offset;
458 struct ca91cx42_driver *bridge;
459
460 bridge = image->parent->driver_priv;
453 461
454 i = image->number; 462 i = image->number;
455 463
@@ -459,11 +467,11 @@ int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
459 granularity = 0x10000; 467 granularity = 0x10000;
460 468
461 /* Read Registers */ 469 /* Read Registers */
462 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]); 470 ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
463 471
464 *vme_base = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BS[i]); 472 *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
465 vme_bound = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BD[i]); 473 vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
466 pci_offset = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_TO[i]); 474 pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
467 475
468 *pci_base = (dma_addr_t)vme_base + pci_offset; 476 *pci_base = (dma_addr_t)vme_base + pci_offset;
469 *size = (unsigned long long)((vme_bound - *vme_base) + granularity); 477 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
@@ -507,6 +515,9 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
507 unsigned long long existing_size; 515 unsigned long long existing_size;
508 int retval = 0; 516 int retval = 0;
509 struct pci_dev *pdev; 517 struct pci_dev *pdev;
518 struct vme_bridge *ca91cx42_bridge;
519
520 ca91cx42_bridge = image->parent;
510 521
511 /* Find pci_dev container of dev */ 522 /* Find pci_dev container of dev */
512 if (ca91cx42_bridge->parent == NULL) { 523 if (ca91cx42_bridge->parent == NULL) {
@@ -601,6 +612,9 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
601 unsigned int i; 612 unsigned int i;
602 unsigned int temp_ctl = 0; 613 unsigned int temp_ctl = 0;
603 unsigned long long pci_bound, vme_offset, pci_base; 614 unsigned long long pci_bound, vme_offset, pci_base;
615 struct ca91cx42_driver *bridge;
616
617 bridge = image->parent->driver_priv;
604 618
605 /* Verify input data */ 619 /* Verify input data */
606 if (vme_base & 0xFFF) { 620 if (vme_base & 0xFFF) {
@@ -644,9 +658,9 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
644 i = image->number; 658 i = image->number;
645 659
646 /* Disable while we are mucking around */ 660 /* Disable while we are mucking around */
647 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]); 661 temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
648 temp_ctl &= ~CA91CX42_LSI_CTL_EN; 662 temp_ctl &= ~CA91CX42_LSI_CTL_EN;
649 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]); 663 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
650 664
651/* XXX Prefetch stuff currently unsupported */ 665/* XXX Prefetch stuff currently unsupported */
652#if 0 666#if 0
@@ -723,17 +737,17 @@ int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
723 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM; 737 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
724 738
725 /* Setup mapping */ 739 /* Setup mapping */
726 iowrite32(pci_base, ca91cx42_bridge->base + CA91CX42_LSI_BS[i]); 740 iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
727 iowrite32(pci_bound, ca91cx42_bridge->base + CA91CX42_LSI_BD[i]); 741 iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
728 iowrite32(vme_offset, ca91cx42_bridge->base + CA91CX42_LSI_TO[i]); 742 iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
729 743
730 /* Write ctl reg without enable */ 744 /* Write ctl reg without enable */
731 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]); 745 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
732 746
733 if (enabled) 747 if (enabled)
734 temp_ctl |= CA91CX42_LSI_CTL_EN; 748 temp_ctl |= CA91CX42_LSI_CTL_EN;
735 749
736 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]); 750 iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
737 751
738 spin_unlock(&(image->lock)); 752 spin_unlock(&(image->lock));
739 return 0; 753 return 0;
@@ -752,14 +766,17 @@ int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
752{ 766{
753 unsigned int i, ctl; 767 unsigned int i, ctl;
754 unsigned long long pci_base, pci_bound, vme_offset; 768 unsigned long long pci_base, pci_bound, vme_offset;
769 struct ca91cx42_driver *bridge;
770
771 bridge = image->parent->driver_priv;
755 772
756 i = image->number; 773 i = image->number;
757 774
758 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]); 775 ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
759 776
760 pci_base = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]); 777 pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
761 vme_offset = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]); 778 vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
762 pci_bound = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]); 779 pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
763 780
764 *vme_base = pci_base + vme_offset; 781 *vme_base = pci_base + vme_offset;
765 *size = (pci_bound - pci_base) + 0x1000; 782 *size = (pci_bound - pci_base) + 0x1000;
@@ -882,12 +899,15 @@ ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
882 return retval; 899 return retval;
883} 900}
884 901
885int ca91cx42_slot_get(void) 902int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
886{ 903{
887 u32 slot = 0; 904 u32 slot = 0;
905 struct ca91cx42_driver *bridge;
906
907 bridge = ca91cx42_bridge->driver_priv;
888 908
889 if (!geoid) { 909 if (!geoid) {
890 slot = ioread32(ca91cx42_bridge->base + VCSR_BS); 910 slot = ioread32(bridge->base + VCSR_BS);
891 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27); 911 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
892 } else 912 } else
893 slot = geoid; 913 slot = geoid;
@@ -909,19 +929,23 @@ static int __init ca91cx42_init(void)
909 * Auto-ID or Geographic address. This function ensures that the window is 929 * Auto-ID or Geographic address. This function ensures that the window is
910 * enabled at an offset consistent with the boards geopgraphic address. 930 * enabled at an offset consistent with the boards geopgraphic address.
911 */ 931 */
912static int ca91cx42_crcsr_init(struct pci_dev *pdev) 932static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
933 struct pci_dev *pdev)
913{ 934{
914 unsigned int crcsr_addr; 935 unsigned int crcsr_addr;
915 int tmp, slot; 936 int tmp, slot;
937 struct ca91cx42_driver *bridge;
938
939 bridge = ca91cx42_bridge->driver_priv;
916 940
917/* XXX We may need to set this somehow as the Universe II does not support 941/* XXX We may need to set this somehow as the Universe II does not support
918 * geographical addressing. 942 * geographical addressing.
919 */ 943 */
920#if 0 944#if 0
921 if (vme_slotnum != -1) 945 if (vme_slotnum != -1)
922 iowrite32(vme_slotnum << 27, ca91cx42_bridge->base + VCSR_BS); 946 iowrite32(vme_slotnum << 27, bridge->base + VCSR_BS);
923#endif 947#endif
924 slot = ca91cx42_slot_get(); 948 slot = ca91cx42_slot_get(ca91cx42_bridge);
925 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot); 949 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
926 if (slot == 0) { 950 if (slot == 0) {
927 dev_err(&pdev->dev, "Slot number is unset, not configuring " 951 dev_err(&pdev->dev, "Slot number is unset, not configuring "
@@ -930,39 +954,44 @@ static int ca91cx42_crcsr_init(struct pci_dev *pdev)
930 } 954 }
931 955
932 /* Allocate mem for CR/CSR image */ 956 /* Allocate mem for CR/CSR image */
933 crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE, 957 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
934 &crcsr_bus); 958 &(bridge->crcsr_bus));
935 if (crcsr_kernel == NULL) { 959 if (bridge->crcsr_kernel == NULL) {
936 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR " 960 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
937 "image\n"); 961 "image\n");
938 return -ENOMEM; 962 return -ENOMEM;
939 } 963 }
940 964
941 memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE); 965 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
942 966
943 crcsr_addr = slot * (512 * 1024); 967 crcsr_addr = slot * (512 * 1024);
944 iowrite32(crcsr_bus - crcsr_addr, ca91cx42_bridge->base + VCSR_TO); 968 iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
945 969
946 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL); 970 tmp = ioread32(bridge->base + VCSR_CTL);
947 tmp |= CA91CX42_VCSR_CTL_EN; 971 tmp |= CA91CX42_VCSR_CTL_EN;
948 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL); 972 iowrite32(tmp, bridge->base + VCSR_CTL);
949 973
950 return 0; 974 return 0;
951} 975}
952 976
953static void ca91cx42_crcsr_exit(struct pci_dev *pdev) 977static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
978 struct pci_dev *pdev)
954{ 979{
955 u32 tmp; 980 u32 tmp;
981 struct ca91cx42_driver *bridge;
982
983 bridge = ca91cx42_bridge->driver_priv;
956 984
957 /* Turn off CR/CSR space */ 985 /* Turn off CR/CSR space */
958 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL); 986 tmp = ioread32(bridge->base + VCSR_CTL);
959 tmp &= ~CA91CX42_VCSR_CTL_EN; 987 tmp &= ~CA91CX42_VCSR_CTL_EN;
960 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL); 988 iowrite32(tmp, bridge->base + VCSR_CTL);
961 989
962 /* Free image */ 990 /* Free image */
963 iowrite32(0, ca91cx42_bridge->base + VCSR_TO); 991 iowrite32(0, bridge->base + VCSR_TO);
964 992
965 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus); 993 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
994 bridge->crcsr_bus);
966} 995}
967 996
968static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id) 997static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -970,6 +999,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
970 int retval, i; 999 int retval, i;
971 u32 data; 1000 u32 data;
972 struct list_head *pos = NULL; 1001 struct list_head *pos = NULL;
1002 struct vme_bridge *ca91cx42_bridge;
1003 struct ca91cx42_driver *ca91cx42_device;
973 struct vme_master_resource *master_image; 1004 struct vme_master_resource *master_image;
974 struct vme_slave_resource *slave_image; 1005 struct vme_slave_resource *slave_image;
975#if 0 1006#if 0
@@ -991,6 +1022,19 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
991 1022
992 memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge)); 1023 memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
993 1024
1025 ca91cx42_device = kmalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
1026
1027 if (ca91cx42_device == NULL) {
1028 dev_err(&pdev->dev, "Failed to allocate memory for device "
1029 "structure\n");
1030 retval = -ENOMEM;
1031 goto err_driver;
1032 }
1033
1034 memset(ca91cx42_device, 0, sizeof(struct ca91cx42_driver));
1035
1036 ca91cx42_bridge->driver_priv = ca91cx42_device;
1037
994 /* Enable the device */ 1038 /* Enable the device */
995 retval = pci_enable_device(pdev); 1039 retval = pci_enable_device(pdev);
996 if (retval) { 1040 if (retval) {
@@ -1006,16 +1050,16 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1006 } 1050 }
1007 1051
1008 /* map registers in BAR 0 */ 1052 /* map registers in BAR 0 */
1009 ca91cx42_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0), 1053 ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
1010 4096); 1054 4096);
1011 if (!ca91cx42_bridge->base) { 1055 if (!ca91cx42_device->base) {
1012 dev_err(&pdev->dev, "Unable to remap CRG region\n"); 1056 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1013 retval = -EIO; 1057 retval = -EIO;
1014 goto err_remap; 1058 goto err_remap;
1015 } 1059 }
1016 1060
1017 /* Check to see if the mapping worked out */ 1061 /* Check to see if the mapping worked out */
1018 data = ioread32(ca91cx42_bridge->base + CA91CX42_PCI_ID) & 0x0000FFFF; 1062 data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1019 if (data != PCI_VENDOR_ID_TUNDRA) { 1063 if (data != PCI_VENDOR_ID_TUNDRA) {
1020 dev_err(&pdev->dev, "PCI_ID check failed\n"); 1064 dev_err(&pdev->dev, "PCI_ID check failed\n");
1021 retval = -EIO; 1065 retval = -EIO;
@@ -1023,11 +1067,10 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1023 } 1067 }
1024 1068
1025 /* Initialize wait queues & mutual exclusion flags */ 1069 /* Initialize wait queues & mutual exclusion flags */
1026 /* XXX These need to be moved to the vme_bridge structure */ 1070 init_waitqueue_head(&(ca91cx42_device->dma_queue));
1027 init_waitqueue_head(&dma_queue); 1071 init_waitqueue_head(&(ca91cx42_device->iack_queue));
1028 init_waitqueue_head(&iack_queue); 1072 mutex_init(&(ca91cx42_device->vme_int));
1029 mutex_init(&(vme_int)); 1073 mutex_init(&(ca91cx42_device->vme_rmw));
1030 mutex_init(&(vme_rmw));
1031 1074
1032 ca91cx42_bridge->parent = &(pdev->dev); 1075 ca91cx42_bridge->parent = &(pdev->dev);
1033 strcpy(ca91cx42_bridge->name, driver_name); 1076 strcpy(ca91cx42_bridge->name, driver_name);
@@ -1155,12 +1198,13 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1155#endif 1198#endif
1156 ca91cx42_bridge->slot_get = ca91cx42_slot_get; 1199 ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1157 1200
1158 data = ioread32(ca91cx42_bridge->base + MISC_CTL); 1201 data = ioread32(ca91cx42_device->base + MISC_CTL);
1159 dev_info(&pdev->dev, "Board is%s the VME system controller\n", 1202 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1160 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not"); 1203 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1161 dev_info(&pdev->dev, "Slot ID is %d\n", ca91cx42_slot_get()); 1204 dev_info(&pdev->dev, "Slot ID is %d\n",
1205 ca91cx42_slot_get(ca91cx42_bridge));
1162 1206
1163 if (ca91cx42_crcsr_init(pdev)) { 1207 if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev)) {
1164 dev_err(&pdev->dev, "CR/CSR configuration failed.\n"); 1208 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1165 retval = -EINVAL; 1209 retval = -EINVAL;
1166#if 0 1210#if 0
@@ -1177,11 +1221,13 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1177 goto err_reg; 1221 goto err_reg;
1178 } 1222 }
1179 1223
1224 pci_set_drvdata(pdev, ca91cx42_bridge);
1225
1180 return 0; 1226 return 0;
1181 1227
1182 vme_unregister_bridge(ca91cx42_bridge); 1228 vme_unregister_bridge(ca91cx42_bridge);
1183err_reg: 1229err_reg:
1184 ca91cx42_crcsr_exit(pdev); 1230 ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1185#if 0 1231#if 0
1186err_crcsr: 1232err_crcsr:
1187#endif 1233#endif
@@ -1217,15 +1263,17 @@ err_master:
1217 kfree(master_image); 1263 kfree(master_image);
1218 } 1264 }
1219 1265
1220 ca91cx42_irq_exit(pdev); 1266 ca91cx42_irq_exit(ca91cx42_device, pdev);
1221err_irq: 1267err_irq:
1222err_test: 1268err_test:
1223 iounmap(ca91cx42_bridge->base); 1269 iounmap(ca91cx42_device->base);
1224err_remap: 1270err_remap:
1225 pci_release_regions(pdev); 1271 pci_release_regions(pdev);
1226err_resource: 1272err_resource:
1227 pci_disable_device(pdev); 1273 pci_disable_device(pdev);
1228err_enable: 1274err_enable:
1275 kfree(ca91cx42_device);
1276err_driver:
1229 kfree(ca91cx42_bridge); 1277 kfree(ca91cx42_bridge);
1230err_struct: 1278err_struct:
1231 return retval; 1279 return retval;
@@ -1239,27 +1287,32 @@ void ca91cx42_remove(struct pci_dev *pdev)
1239 struct vme_slave_resource *slave_image; 1287 struct vme_slave_resource *slave_image;
1240 struct vme_dma_resource *dma_ctrlr; 1288 struct vme_dma_resource *dma_ctrlr;
1241 struct vme_lm_resource *lm; 1289 struct vme_lm_resource *lm;
1290 struct ca91cx42_driver *bridge;
1291 struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1292
1293 bridge = ca91cx42_bridge->driver_priv;
1294
1242 1295
1243 /* Turn off Ints */ 1296 /* Turn off Ints */
1244 iowrite32(0, ca91cx42_bridge->base + LINT_EN); 1297 iowrite32(0, bridge->base + LINT_EN);
1245 1298
1246 /* Turn off the windows */ 1299 /* Turn off the windows */
1247 iowrite32(0x00800000, ca91cx42_bridge->base + LSI0_CTL); 1300 iowrite32(0x00800000, bridge->base + LSI0_CTL);
1248 iowrite32(0x00800000, ca91cx42_bridge->base + LSI1_CTL); 1301 iowrite32(0x00800000, bridge->base + LSI1_CTL);
1249 iowrite32(0x00800000, ca91cx42_bridge->base + LSI2_CTL); 1302 iowrite32(0x00800000, bridge->base + LSI2_CTL);
1250 iowrite32(0x00800000, ca91cx42_bridge->base + LSI3_CTL); 1303 iowrite32(0x00800000, bridge->base + LSI3_CTL);
1251 iowrite32(0x00800000, ca91cx42_bridge->base + LSI4_CTL); 1304 iowrite32(0x00800000, bridge->base + LSI4_CTL);
1252 iowrite32(0x00800000, ca91cx42_bridge->base + LSI5_CTL); 1305 iowrite32(0x00800000, bridge->base + LSI5_CTL);
1253 iowrite32(0x00800000, ca91cx42_bridge->base + LSI6_CTL); 1306 iowrite32(0x00800000, bridge->base + LSI6_CTL);
1254 iowrite32(0x00800000, ca91cx42_bridge->base + LSI7_CTL); 1307 iowrite32(0x00800000, bridge->base + LSI7_CTL);
1255 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI0_CTL); 1308 iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1256 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI1_CTL); 1309 iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1257 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI2_CTL); 1310 iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1258 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI3_CTL); 1311 iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1259 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI4_CTL); 1312 iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1260 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI5_CTL); 1313 iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1261 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI6_CTL); 1314 iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1262 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI7_CTL); 1315 iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1263 1316
1264 vme_unregister_bridge(ca91cx42_bridge); 1317 vme_unregister_bridge(ca91cx42_bridge);
1265#if 0 1318#if 0
@@ -1294,9 +1347,9 @@ void ca91cx42_remove(struct pci_dev *pdev)
1294 kfree(master_image); 1347 kfree(master_image);
1295 } 1348 }
1296 1349
1297 ca91cx42_irq_exit(pdev); 1350 ca91cx42_irq_exit(bridge, pdev);
1298 1351
1299 iounmap(ca91cx42_bridge->base); 1352 iounmap(bridge->base);
1300 1353
1301 pci_release_regions(pdev); 1354 pci_release_regions(pdev);
1302 1355
@@ -1346,7 +1399,7 @@ int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1346 } 1399 }
1347 /* Find the PCI address that maps to the desired VME address */ 1400 /* Find the PCI address that maps to the desired VME address */
1348 for (i = 0; i < 8; i++) { 1401 for (i = 0; i < 8; i++) {
1349 temp_ctl = ioread32(ca91cx42_bridge->base + 1402 temp_ctl = ioread32(bridge->base +
1350 CA91CX42_LSI_CTL[i]); 1403 CA91CX42_LSI_CTL[i]);
1351 if ((temp_ctl & 0x80000000) == 0) { 1404 if ((temp_ctl & 0x80000000) == 0) {
1352 continue; 1405 continue;
@@ -1357,9 +1410,9 @@ int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1357 if (vmeOut.addrSpace != vmeRmw->addrSpace) { 1410 if (vmeOut.addrSpace != vmeRmw->addrSpace) {
1358 continue; 1411 continue;
1359 } 1412 }
1360 tempBS = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]); 1413 tempBS = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
1361 tempBD = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]); 1414 tempBD = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
1362 tempTO = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]); 1415 tempTO = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
1363 vmeBS = tempBS + tempTO; 1416 vmeBS = tempBS + tempTO;
1364 vmeBD = tempBD + tempTO; 1417 vmeBD = tempBD + tempTO;
1365 if ((vmeRmw->targetAddr >= vmeBS) && 1418 if ((vmeRmw->targetAddr >= vmeBS) &&
@@ -1378,13 +1431,13 @@ int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1378 return -EINVAL; 1431 return -EINVAL;
1379 } 1432 }
1380 /* Setup the RMW registers. */ 1433 /* Setup the RMW registers. */
1381 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL); 1434 iowrite32(0, bridge->base + SCYC_CTL);
1382 iowrite32(SWIZZLE(vmeRmw->enableMask), ca91cx42_bridge->base + SCYC_EN); 1435 iowrite32(SWIZZLE(vmeRmw->enableMask), bridge->base + SCYC_EN);
1383 iowrite32(SWIZZLE(vmeRmw->compareData), ca91cx42_bridge->base + 1436 iowrite32(SWIZZLE(vmeRmw->compareData), bridge->base +
1384 SCYC_CMP); 1437 SCYC_CMP);
1385 iowrite32(SWIZZLE(vmeRmw->swapData), ca91cx42_bridge->base + SCYC_SWP); 1438 iowrite32(SWIZZLE(vmeRmw->swapData), bridge->base + SCYC_SWP);
1386 iowrite32((int)rmw_pci_data_ptr, ca91cx42_bridge->base + SCYC_ADDR); 1439 iowrite32((int)rmw_pci_data_ptr, bridge->base + SCYC_ADDR);
1387 iowrite32(1, ca91cx42_bridge->base + SCYC_CTL); 1440 iowrite32(1, bridge->base + SCYC_CTL);
1388 1441
1389 /* Run the RMW cycle until either success or max attempts. */ 1442 /* Run the RMW cycle until either success or max attempts. */
1390 vmeRmw->numAttempts = 1; 1443 vmeRmw->numAttempts = 1;
@@ -1393,7 +1446,7 @@ int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1393 if ((ioread32(vaDataPtr) & vmeRmw->enableMask) == 1446 if ((ioread32(vaDataPtr) & vmeRmw->enableMask) ==
1394 (vmeRmw->swapData & vmeRmw->enableMask)) { 1447 (vmeRmw->swapData & vmeRmw->enableMask)) {
1395 1448
1396 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL); 1449 iowrite32(0, bridge->base + SCYC_CTL);
1397 break; 1450 break;
1398 1451
1399 } 1452 }
@@ -1478,8 +1531,8 @@ ca91cx42_start_dma(int channel, unsigned int dgcsreg, TDMA_Cmd_Packet *vmeLL)
1478 1531
1479 /* Setup registers as needed for direct or chained. */ 1532 /* Setup registers as needed for direct or chained. */
1480 if (dgcsreg & 0x8000000) { 1533 if (dgcsreg & 0x8000000) {
1481 iowrite32(0, ca91cx42_bridge->base + DTBC); 1534 iowrite32(0, bridge->base + DTBC);
1482 iowrite32((unsigned int)vmeLL, ca91cx42_bridge->base + DCPP); 1535 iowrite32((unsigned int)vmeLL, bridge->base + DCPP);
1483 } else { 1536 } else {
1484#if 0 1537#if 0
1485 printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg); 1538 printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg);
@@ -1493,17 +1546,17 @@ ca91cx42_start_dma(int channel, unsigned int dgcsreg, TDMA_Cmd_Packet *vmeLL)
1493 ioread32(&vmeLL->dctl)); 1546 ioread32(&vmeLL->dctl));
1494#endif 1547#endif
1495 /* Write registers */ 1548 /* Write registers */
1496 iowrite32(ioread32(&vmeLL->dva), ca91cx42_bridge->base + DVA); 1549 iowrite32(ioread32(&vmeLL->dva), bridge->base + DVA);
1497 iowrite32(ioread32(&vmeLL->dlv), ca91cx42_bridge->base + DLA); 1550 iowrite32(ioread32(&vmeLL->dlv), bridge->base + DLA);
1498 iowrite32(ioread32(&vmeLL->dtbc), ca91cx42_bridge->base + DTBC); 1551 iowrite32(ioread32(&vmeLL->dtbc), bridge->base + DTBC);
1499 iowrite32(ioread32(&vmeLL->dctl), ca91cx42_bridge->base + DCTL); 1552 iowrite32(ioread32(&vmeLL->dctl), bridge->base + DCTL);
1500 iowrite32(0, ca91cx42_bridge->base + DCPP); 1553 iowrite32(0, bridge->base + DCPP);
1501 } 1554 }
1502 1555
1503 /* Start the operation */ 1556 /* Start the operation */
1504 iowrite32(dgcsreg, ca91cx42_bridge->base + DGCS); 1557 iowrite32(dgcsreg, bridge->base + DGCS);
1505 val = get_tbl(); 1558 val = get_tbl();
1506 iowrite32(dgcsreg | 0x8000000F, ca91cx42_bridge->base + DGCS); 1559 iowrite32(dgcsreg | 0x8000000F, bridge->base + DGCS);
1507 return val; 1560 return val;
1508} 1561}
1509 1562
@@ -1704,10 +1757,10 @@ int ca91cx42_do_dma(vmeDmaPacket_t *vmeDma)
1704 } 1757 }
1705 1758
1706 wait_event_interruptible(dma_queue, 1759 wait_event_interruptible(dma_queue,
1707 ioread32(ca91cx42_bridge->base + DGCS) & 0x800); 1760 ioread32(bridge->base + DGCS) & 0x800);
1708 1761
1709 val = ioread32(ca91cx42_bridge->base + DGCS); 1762 val = ioread32(bridge->base + DGCS);
1710 iowrite32(val | 0xF00, ca91cx42_bridge->base + DGCS); 1763 iowrite32(val | 0xF00, bridge->base + DGCS);
1711 1764
1712 vmeDma->vmeDmaStatus = 0; 1765 vmeDma->vmeDmaStatus = 0;
1713 1766
@@ -1715,15 +1768,15 @@ int ca91cx42_do_dma(vmeDmaPacket_t *vmeDma)
1715 vmeDma->vmeDmaStatus = val & 0x700; 1768 vmeDma->vmeDmaStatus = val & 0x700;
1716 printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler" 1769 printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1717 " DGCS=%08X\n", val); 1770 " DGCS=%08X\n", val);
1718 val = ioread32(ca91cx42_bridge->base + DCPP); 1771 val = ioread32(bridge->base + DCPP);
1719 printk(KERN_ERR "ca91c042: DCPP=%08X\n", val); 1772 printk(KERN_ERR "ca91c042: DCPP=%08X\n", val);
1720 val = ioread32(ca91cx42_bridge->base + DCTL); 1773 val = ioread32(bridge->base + DCTL);
1721 printk(KERN_ERR "ca91c042: DCTL=%08X\n", val); 1774 printk(KERN_ERR "ca91c042: DCTL=%08X\n", val);
1722 val = ioread32(ca91cx42_bridge->base + DTBC); 1775 val = ioread32(bridge->base + DTBC);
1723 printk(KERN_ERR "ca91c042: DTBC=%08X\n", val); 1776 printk(KERN_ERR "ca91c042: DTBC=%08X\n", val);
1724 val = ioread32(ca91cx42_bridge->base + DLA); 1777 val = ioread32(bridge->base + DLA);
1725 printk(KERN_ERR "ca91c042: DLA=%08X\n", val); 1778 printk(KERN_ERR "ca91c042: DLA=%08X\n", val);
1726 val = ioread32(ca91cx42_bridge->base + DVA); 1779 val = ioread32(bridge->base + DVA);
1727 printk(KERN_ERR "ca91c042: DVA=%08X\n", val); 1780 printk(KERN_ERR "ca91c042: DVA=%08X\n", val);
1728 1781
1729 } 1782 }
@@ -1766,9 +1819,9 @@ int ca91cx42_lm_set(vmeLmCfg_t *vmeLm)
1766 } 1819 }
1767 1820
1768 /* Disable while we are mucking around */ 1821 /* Disable while we are mucking around */
1769 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL); 1822 iowrite32(0x00000000, bridge->base + LM_CTL);
1770 1823
1771 iowrite32(vmeLm->addr, ca91cx42_bridge->base + LM_BS); 1824 iowrite32(vmeLm->addr, bridge->base + LM_BS);
1772 1825
1773 /* Setup CTL register. */ 1826 /* Setup CTL register. */
1774 if (vmeLm->userAccessType & VME_SUPER) 1827 if (vmeLm->userAccessType & VME_SUPER)
@@ -1782,8 +1835,8 @@ int ca91cx42_lm_set(vmeLmCfg_t *vmeLm)
1782 1835
1783 1836
1784 /* Write ctl reg and enable */ 1837 /* Write ctl reg and enable */
1785 iowrite32(0x80000000 | temp_ctl, ca91cx42_bridge->base + LM_CTL); 1838 iowrite32(0x80000000 | temp_ctl, bridge->base + LM_CTL);
1786 temp_ctl = ioread32(ca91cx42_bridge->base + LM_CTL); 1839 temp_ctl = ioread32(bridge->base + LM_CTL);
1787 1840
1788 return 0; 1841 return 0;
1789} 1842}
@@ -1800,7 +1853,7 @@ int ca91cx42_wait_lm(vmeLmCfg_t *vmeLm)
1800 vmeLm->lmWait = 10; 1853 vmeLm->lmWait = 10;
1801 interruptible_sleep_on_timeout(&lm_queue, vmeLm->lmWait); 1854 interruptible_sleep_on_timeout(&lm_queue, vmeLm->lmWait);
1802 } 1855 }
1803 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL); 1856 iowrite32(0x00000000, bridge->base + LM_CTL);
1804 1857
1805 return 0; 1858 return 0;
1806} 1859}
@@ -1812,7 +1865,7 @@ int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb)
1812 int temp_ctl = 0; 1865 int temp_ctl = 0;
1813 int vbto = 0; 1866 int vbto = 0;
1814 1867
1815 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL); 1868 temp_ctl = ioread32(bridge->base + MISC_CTL);
1816 temp_ctl &= 0x00FFFFFF; 1869 temp_ctl &= 0x00FFFFFF;
1817 1870
1818 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) { 1871 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
@@ -1834,7 +1887,7 @@ int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb)
1834 if (vmeArb->arbiterTimeoutFlag) 1887 if (vmeArb->arbiterTimeoutFlag)
1835 temp_ctl |= 2 << 24; 1888 temp_ctl |= 2 << 24;
1836 1889
1837 iowrite32(temp_ctl, ca91cx42_bridge->base + MISC_CTL); 1890 iowrite32(temp_ctl, bridge->base + MISC_CTL);
1838 return 0; 1891 return 0;
1839} 1892}
1840 1893
@@ -1843,7 +1896,7 @@ int ca91cx42_get_arbiter(vmeArbiterCfg_t *vmeArb)
1843 int temp_ctl = 0; 1896 int temp_ctl = 0;
1844 int vbto = 0; 1897 int vbto = 0;
1845 1898
1846 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL); 1899 temp_ctl = ioread32(bridge->base + MISC_CTL);
1847 1900
1848 vbto = (temp_ctl >> 28) & 0xF; 1901 vbto = (temp_ctl >> 28) & 0xF;
1849 if (vbto != 0) 1902 if (vbto != 0)
@@ -1864,7 +1917,7 @@ int ca91cx42_set_requestor(vmeRequesterCfg_t *vmeReq)
1864{ 1917{
1865 int temp_ctl = 0; 1918 int temp_ctl = 0;
1866 1919
1867 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL); 1920 temp_ctl = ioread32(bridge->base + MAST_CTL);
1868 temp_ctl &= 0xFF0FFFFF; 1921 temp_ctl &= 0xFF0FFFFF;
1869 1922
1870 if (vmeReq->releaseMode == 1) 1923 if (vmeReq->releaseMode == 1)
@@ -1875,7 +1928,7 @@ int ca91cx42_set_requestor(vmeRequesterCfg_t *vmeReq)
1875 1928
1876 temp_ctl |= (vmeReq->requestLevel << 22); 1929 temp_ctl |= (vmeReq->requestLevel << 22);
1877 1930
1878 iowrite32(temp_ctl, ca91cx42_bridge->base + MAST_CTL); 1931 iowrite32(temp_ctl, bridge->base + MAST_CTL);
1879 return 0; 1932 return 0;
1880} 1933}
1881 1934
@@ -1883,7 +1936,7 @@ int ca91cx42_get_requestor(vmeRequesterCfg_t *vmeReq)
1883{ 1936{
1884 int temp_ctl = 0; 1937 int temp_ctl = 0;
1885 1938
1886 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL); 1939 temp_ctl = ioread32(bridge->base + MAST_CTL);
1887 1940
1888 if (temp_ctl & (1 << 20)) 1941 if (temp_ctl & (1 << 20))
1889 vmeReq->releaseMode = 1; 1942 vmeReq->releaseMode = 1;
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.h b/drivers/staging/vme/bridges/vme_ca91cx42.h
index 2a9ad1454f4e..df1050297849 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.h
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.h
@@ -37,6 +37,22 @@
37#define CA91C142_MAX_DMA 1 /* Max DMA Controllers */ 37#define CA91C142_MAX_DMA 1 /* Max DMA Controllers */
38#define CA91C142_MAX_MAILBOX 4 /* Max Mail Box registers */ 38#define CA91C142_MAX_MAILBOX 4 /* Max Mail Box registers */
39 39
40/* Structure used to hold driver specific information */
41struct ca91cx42_driver {
42 void *base; /* Base Address of device registers */
43 wait_queue_head_t dma_queue;
44 wait_queue_head_t iack_queue;
45 wait_queue_head_t mbox_queue;
46 void (*lm_callback[4])(int); /* Called in interrupt handler */
47 void *crcsr_kernel;
48 dma_addr_t crcsr_bus;
49 struct mutex vme_rmw; /* Only one RMW cycle at a time */
50 struct mutex vme_int; /*
51 * Only one VME interrupt can be
52 * generated at a time, provide locking
53 */
54};
55
40/* See Page 2-77 in the Universe User Manual */ 56/* See Page 2-77 in the Universe User Manual */
41struct ca91cx42_dma_descriptor { 57struct ca91cx42_dma_descriptor {
42 unsigned int dctl; /* DMA Control */ 58 unsigned int dctl; /* DMA Control */
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index e74c4a953b2d..e3d300c9bfba 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -59,26 +59,11 @@ int tsi148_dma_list_add (struct vme_dma_list *, struct vme_dma_attr *,
59int tsi148_dma_list_exec(struct vme_dma_list *); 59int tsi148_dma_list_exec(struct vme_dma_list *);
60int tsi148_dma_list_empty(struct vme_dma_list *); 60int tsi148_dma_list_empty(struct vme_dma_list *);
61int tsi148_generate_irq(int, int); 61int tsi148_generate_irq(int, int);
62int tsi148_slot_get(void);
63 62
64/* Modue parameter */ 63/* Module parameter */
65static int err_chk; 64static int err_chk;
66static int geoid; 65static int geoid;
67 66
68/* XXX These should all be in a per device structure */
69static struct vme_bridge *tsi148_bridge;
70static wait_queue_head_t dma_queue[2];
71static wait_queue_head_t iack_queue;
72static void (*lm_callback[4])(int); /* Called in interrupt handler */
73static void *crcsr_kernel;
74static dma_addr_t crcsr_bus;
75static struct vme_master_resource *flush_image;
76static struct mutex vme_rmw; /* Only one RMW cycle at a time */
77static struct mutex vme_int; /*
78 * Only one VME interrupt can be
79 * generated at a time, provide locking
80 */
81
82static char driver_name[] = "vme_tsi148"; 67static char driver_name[] = "vme_tsi148";
83 68
84static const struct pci_device_id tsi148_ids[] = { 69static const struct pci_device_id tsi148_ids[] = {
@@ -110,16 +95,17 @@ static void reg_split(unsigned long long variable, unsigned int *high,
110/* 95/*
111 * Wakes up DMA queue. 96 * Wakes up DMA queue.
112 */ 97 */
113static u32 tsi148_DMA_irqhandler(int channel_mask) 98static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
99 int channel_mask)
114{ 100{
115 u32 serviced = 0; 101 u32 serviced = 0;
116 102
117 if (channel_mask & TSI148_LCSR_INTS_DMA0S) { 103 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
118 wake_up(&dma_queue[0]); 104 wake_up(&(bridge->dma_queue[0]));
119 serviced |= TSI148_LCSR_INTC_DMA0C; 105 serviced |= TSI148_LCSR_INTC_DMA0C;
120 } 106 }
121 if (channel_mask & TSI148_LCSR_INTS_DMA1S) { 107 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
122 wake_up(&dma_queue[1]); 108 wake_up(&(bridge->dma_queue[1]));
123 serviced |= TSI148_LCSR_INTC_DMA1C; 109 serviced |= TSI148_LCSR_INTC_DMA1C;
124 } 110 }
125 111
@@ -129,7 +115,7 @@ static u32 tsi148_DMA_irqhandler(int channel_mask)
129/* 115/*
130 * Wake up location monitor queue 116 * Wake up location monitor queue
131 */ 117 */
132static u32 tsi148_LM_irqhandler(u32 stat) 118static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
133{ 119{
134 int i; 120 int i;
135 u32 serviced = 0; 121 u32 serviced = 0;
@@ -137,7 +123,7 @@ static u32 tsi148_LM_irqhandler(u32 stat)
137 for (i = 0; i < 4; i++) { 123 for (i = 0; i < 4; i++) {
138 if(stat & TSI148_LCSR_INTS_LMS[i]) { 124 if(stat & TSI148_LCSR_INTS_LMS[i]) {
139 /* We only enable interrupts if the callback is set */ 125 /* We only enable interrupts if the callback is set */
140 lm_callback[i](i); 126 bridge->lm_callback[i](i);
141 serviced |= TSI148_LCSR_INTC_LMC[i]; 127 serviced |= TSI148_LCSR_INTC_LMC[i];
142 } 128 }
143 } 129 }
@@ -150,7 +136,7 @@ static u32 tsi148_LM_irqhandler(u32 stat)
150 * 136 *
151 * XXX This functionality is not exposed up though API. 137 * XXX This functionality is not exposed up though API.
152 */ 138 */
153static u32 tsi148_MB_irqhandler(u32 stat) 139static u32 tsi148_MB_irqhandler(struct tsi148_driver *bridge, u32 stat)
154{ 140{
155 int i; 141 int i;
156 u32 val; 142 u32 val;
@@ -158,8 +144,7 @@ static u32 tsi148_MB_irqhandler(u32 stat)
158 144
159 for (i = 0; i < 4; i++) { 145 for (i = 0; i < 4; i++) {
160 if(stat & TSI148_LCSR_INTS_MBS[i]) { 146 if(stat & TSI148_LCSR_INTS_MBS[i]) {
161 val = ioread32be(tsi148_bridge->base + 147 val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
162 TSI148_GCSR_MBOX[i]);
163 printk("VME Mailbox %d received: 0x%x\n", i, val); 148 printk("VME Mailbox %d received: 0x%x\n", i, val);
164 serviced |= TSI148_LCSR_INTC_MBC[i]; 149 serviced |= TSI148_LCSR_INTC_MBC[i];
165 } 150 }
@@ -171,22 +156,21 @@ static u32 tsi148_MB_irqhandler(u32 stat)
171/* 156/*
172 * Display error & status message when PERR (PCI) exception interrupt occurs. 157 * Display error & status message when PERR (PCI) exception interrupt occurs.
173 */ 158 */
174static u32 tsi148_PERR_irqhandler(void) 159static u32 tsi148_PERR_irqhandler(struct tsi148_driver *bridge)
175{ 160{
176 printk(KERN_ERR 161 printk(KERN_ERR
177 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n", 162 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
178 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAU), 163 ioread32be(bridge->base + TSI148_LCSR_EDPAU),
179 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAL), 164 ioread32be(bridge->base + TSI148_LCSR_EDPAL),
180 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAT) 165 ioread32be(bridge->base + TSI148_LCSR_EDPAT)
181 ); 166 );
182 printk(KERN_ERR 167 printk(KERN_ERR
183 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n", 168 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
184 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXA), 169 ioread32be(bridge->base + TSI148_LCSR_EDPXA),
185 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXS) 170 ioread32be(bridge->base + TSI148_LCSR_EDPXS)
186 ); 171 );
187 172
188 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, 173 iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
189 tsi148_bridge->base + TSI148_LCSR_EDPAT);
190 174
191 return TSI148_LCSR_INTC_PERRC; 175 return TSI148_LCSR_INTC_PERRC;
192} 176}
@@ -194,16 +178,19 @@ static u32 tsi148_PERR_irqhandler(void)
194/* 178/*
195 * Save address and status when VME error interrupt occurs. 179 * Save address and status when VME error interrupt occurs.
196 */ 180 */
197static u32 tsi148_VERR_irqhandler(void) 181static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
198{ 182{
199 unsigned int error_addr_high, error_addr_low; 183 unsigned int error_addr_high, error_addr_low;
200 unsigned long long error_addr; 184 unsigned long long error_addr;
201 u32 error_attrib; 185 u32 error_attrib;
202 struct vme_bus_error *error; 186 struct vme_bus_error *error;
187 struct tsi148_driver *bridge;
188
189 bridge = tsi148_bridge->driver_priv;
203 190
204 error_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAU); 191 error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
205 error_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAL); 192 error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
206 error_attrib = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAT); 193 error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
207 194
208 reg_join(error_addr_high, error_addr_low, &error_addr); 195 reg_join(error_addr_high, error_addr_low, &error_addr);
209 196
@@ -227,8 +214,7 @@ static u32 tsi148_VERR_irqhandler(void)
227 } 214 }
228 215
229 /* Clear Status */ 216 /* Clear Status */
230 iowrite32be(TSI148_LCSR_VEAT_VESCL, 217 iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
231 tsi148_bridge->base + TSI148_LCSR_VEAT);
232 218
233 return TSI148_LCSR_INTC_VERRC; 219 return TSI148_LCSR_INTC_VERRC;
234} 220}
@@ -236,9 +222,9 @@ static u32 tsi148_VERR_irqhandler(void)
236/* 222/*
237 * Wake up IACK queue. 223 * Wake up IACK queue.
238 */ 224 */
239static u32 tsi148_IACK_irqhandler(void) 225static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
240{ 226{
241 wake_up(&iack_queue); 227 wake_up(&(bridge->iack_queue));
242 228
243 return TSI148_LCSR_INTC_IACKC; 229 return TSI148_LCSR_INTC_IACKC;
244} 230}
@@ -246,9 +232,13 @@ static u32 tsi148_IACK_irqhandler(void)
246/* 232/*
247 * Calling VME bus interrupt callback if provided. 233 * Calling VME bus interrupt callback if provided.
248 */ 234 */
249static u32 tsi148_VIRQ_irqhandler(u32 stat) 235static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
236 u32 stat)
250{ 237{
251 int vec, i, serviced = 0; 238 int vec, i, serviced = 0;
239 struct tsi148_driver *bridge;
240
241 bridge = tsi148_bridge->driver_priv;
252 242
253 for (i = 7; i > 0; i--) { 243 for (i = 7; i > 0; i--) {
254 if (stat & (1 << i)) { 244 if (stat & (1 << i)) {
@@ -258,8 +248,7 @@ static u32 tsi148_VIRQ_irqhandler(u32 stat)
258 * 8-bit IACK cycles on the bus, read from offset 248 * 8-bit IACK cycles on the bus, read from offset
259 * 3. 249 * 3.
260 */ 250 */
261 vec = ioread8(tsi148_bridge->base + 251 vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
262 TSI148_LCSR_VIACK[i] + 3);
263 252
264 vme_irq_handler(tsi148_bridge, i, vec); 253 vme_irq_handler(tsi148_bridge, i, vec);
265 254
@@ -274,13 +263,19 @@ static u32 tsi148_VIRQ_irqhandler(u32 stat)
274 * Top level interrupt handler. Clears appropriate interrupt status bits and 263 * Top level interrupt handler. Clears appropriate interrupt status bits and
275 * then calls appropriate sub handler(s). 264 * then calls appropriate sub handler(s).
276 */ 265 */
277static irqreturn_t tsi148_irqhandler(int irq, void *dev_id) 266static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
278{ 267{
279 u32 stat, enable, serviced = 0; 268 u32 stat, enable, serviced = 0;
269 struct vme_bridge *tsi148_bridge;
270 struct tsi148_driver *bridge;
271
272 tsi148_bridge = ptr;
273
274 bridge = tsi148_bridge->driver_priv;
280 275
281 /* Determine which interrupts are unmasked and set */ 276 /* Determine which interrupts are unmasked and set */
282 enable = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO); 277 enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
283 stat = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTS); 278 stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
284 279
285 /* Only look at unmasked interrupts */ 280 /* Only look at unmasked interrupts */
286 stat &= enable; 281 stat &= enable;
@@ -292,61 +287,63 @@ static irqreturn_t tsi148_irqhandler(int irq, void *dev_id)
292 /* Call subhandlers as appropriate */ 287 /* Call subhandlers as appropriate */
293 /* DMA irqs */ 288 /* DMA irqs */
294 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S)) 289 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
295 serviced |= tsi148_DMA_irqhandler(stat); 290 serviced |= tsi148_DMA_irqhandler(bridge, stat);
296 291
297 /* Location monitor irqs */ 292 /* Location monitor irqs */
298 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S | 293 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
299 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S)) 294 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
300 serviced |= tsi148_LM_irqhandler(stat); 295 serviced |= tsi148_LM_irqhandler(bridge, stat);
301 296
302 /* Mail box irqs */ 297 /* Mail box irqs */
303 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S | 298 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
304 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S)) 299 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
305 serviced |= tsi148_MB_irqhandler(stat); 300 serviced |= tsi148_MB_irqhandler(bridge, stat);
306 301
307 /* PCI bus error */ 302 /* PCI bus error */
308 if (stat & TSI148_LCSR_INTS_PERRS) 303 if (stat & TSI148_LCSR_INTS_PERRS)
309 serviced |= tsi148_PERR_irqhandler(); 304 serviced |= tsi148_PERR_irqhandler(bridge);
310 305
311 /* VME bus error */ 306 /* VME bus error */
312 if (stat & TSI148_LCSR_INTS_VERRS) 307 if (stat & TSI148_LCSR_INTS_VERRS)
313 serviced |= tsi148_VERR_irqhandler(); 308 serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
314 309
315 /* IACK irq */ 310 /* IACK irq */
316 if (stat & TSI148_LCSR_INTS_IACKS) 311 if (stat & TSI148_LCSR_INTS_IACKS)
317 serviced |= tsi148_IACK_irqhandler(); 312 serviced |= tsi148_IACK_irqhandler(bridge);
318 313
319 /* VME bus irqs */ 314 /* VME bus irqs */
320 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S | 315 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
321 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S | 316 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
322 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S | 317 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
323 TSI148_LCSR_INTS_IRQ1S)) 318 TSI148_LCSR_INTS_IRQ1S))
324 serviced |= tsi148_VIRQ_irqhandler(stat); 319 serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
325 320
326 /* Clear serviced interrupts */ 321 /* Clear serviced interrupts */
327 iowrite32be(serviced, tsi148_bridge->base + TSI148_LCSR_INTC); 322 iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
328 323
329 return IRQ_HANDLED; 324 return IRQ_HANDLED;
330} 325}
331 326
332static int tsi148_irq_init(struct vme_bridge *bridge) 327static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
333{ 328{
334 int result; 329 int result;
335 unsigned int tmp; 330 unsigned int tmp;
336 struct pci_dev *pdev; 331 struct pci_dev *pdev;
332 struct tsi148_driver *bridge;
333
334 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
337 335
338 /* Need pdev */ 336 bridge = tsi148_bridge->driver_priv;
339 pdev = container_of(bridge->parent, struct pci_dev, dev);
340 337
341 /* Initialise list for VME bus errors */ 338 /* Initialise list for VME bus errors */
342 INIT_LIST_HEAD(&(bridge->vme_errors)); 339 INIT_LIST_HEAD(&(tsi148_bridge->vme_errors));
343 340
344 mutex_init(&(bridge->irq_mtx)); 341 mutex_init(&(tsi148_bridge->irq_mtx));
345 342
346 result = request_irq(pdev->irq, 343 result = request_irq(pdev->irq,
347 tsi148_irqhandler, 344 tsi148_irqhandler,
348 IRQF_SHARED, 345 IRQF_SHARED,
349 driver_name, pdev); 346 driver_name, tsi148_bridge);
350 if (result) { 347 if (result) {
351 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n", 348 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
352 pdev->irq); 349 pdev->irq);
@@ -360,7 +357,7 @@ static int tsi148_irq_init(struct vme_bridge *bridge)
360 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO | 357 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
361 TSI148_LCSR_INTEO_IACKEO; 358 TSI148_LCSR_INTEO_IACKEO;
362 359
363 /* XXX This leaves the following interrupts masked. 360 /* This leaves the following interrupts masked.
364 * TSI148_LCSR_INTEO_VIEEO 361 * TSI148_LCSR_INTEO_VIEEO
365 * TSI148_LCSR_INTEO_SYSFLEO 362 * TSI148_LCSR_INTEO_SYSFLEO
366 * TSI148_LCSR_INTEO_ACFLEO 363 * TSI148_LCSR_INTEO_ACFLEO
@@ -393,14 +390,14 @@ static int tsi148_irq_init(struct vme_bridge *bridge)
393 return 0; 390 return 0;
394} 391}
395 392
396static void tsi148_irq_exit(struct pci_dev *pdev) 393static void tsi148_irq_exit(struct tsi148_driver *bridge, struct pci_dev *pdev)
397{ 394{
398 /* Turn off interrupts */ 395 /* Turn off interrupts */
399 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEO); 396 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
400 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEN); 397 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
401 398
402 /* Clear all interrupts */ 399 /* Clear all interrupts */
403 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTC); 400 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
404 401
405 /* Detach interrupt handler */ 402 /* Detach interrupt handler */
406 free_irq(pdev->irq, pdev); 403 free_irq(pdev->irq, pdev);
@@ -409,11 +406,11 @@ static void tsi148_irq_exit(struct pci_dev *pdev)
409/* 406/*
410 * Check to see if an IACk has been received, return true (1) or false (0). 407 * Check to see if an IACk has been received, return true (1) or false (0).
411 */ 408 */
412int tsi148_iack_received(void) 409int tsi148_iack_received(struct tsi148_driver *bridge)
413{ 410{
414 u32 tmp; 411 u32 tmp;
415 412
416 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR); 413 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
417 414
418 if (tmp & TSI148_LCSR_VICR_IRQS) 415 if (tmp & TSI148_LCSR_VICR_IRQS)
419 return 0; 416 return 0;
@@ -424,20 +421,24 @@ int tsi148_iack_received(void)
424/* 421/*
425 * Configure VME interrupt 422 * Configure VME interrupt
426 */ 423 */
427void tsi148_irq_set(int level, int state, int sync) 424void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
425 int state, int sync)
428{ 426{
429 struct pci_dev *pdev; 427 struct pci_dev *pdev;
430 u32 tmp; 428 u32 tmp;
429 struct tsi148_driver *bridge;
430
431 bridge = tsi148_bridge->driver_priv;
431 432
432 /* We need to do the ordering differently for enabling and disabling */ 433 /* We need to do the ordering differently for enabling and disabling */
433 if (state == 0) { 434 if (state == 0) {
434 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN); 435 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
435 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1]; 436 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
436 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN); 437 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
437 438
438 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO); 439 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
439 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1]; 440 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
440 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO); 441 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
441 442
442 if (sync != 0) { 443 if (sync != 0) {
443 pdev = container_of(tsi148_bridge->parent, 444 pdev = container_of(tsi148_bridge->parent,
@@ -446,13 +447,13 @@ void tsi148_irq_set(int level, int state, int sync)
446 synchronize_irq(pdev->irq); 447 synchronize_irq(pdev->irq);
447 } 448 }
448 } else { 449 } else {
449 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO); 450 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
450 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1]; 451 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
451 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO); 452 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
452 453
453 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN); 454 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
454 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1]; 455 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
455 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN); 456 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
456 } 457 }
457} 458}
458 459
@@ -460,28 +461,32 @@ void tsi148_irq_set(int level, int state, int sync)
460 * Generate a VME bus interrupt at the requested level & vector. Wait for 461 * Generate a VME bus interrupt at the requested level & vector. Wait for
461 * interrupt to be acked. 462 * interrupt to be acked.
462 */ 463 */
463int tsi148_irq_generate(int level, int statid) 464int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)
464{ 465{
465 u32 tmp; 466 u32 tmp;
467 struct tsi148_driver *bridge;
468
469 bridge = tsi148_bridge->driver_priv;
466 470
467 mutex_lock(&(vme_int)); 471 mutex_lock(&(bridge->vme_int));
468 472
469 /* Read VICR register */ 473 /* Read VICR register */
470 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR); 474 tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
471 475
472 /* Set Status/ID */ 476 /* Set Status/ID */
473 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) | 477 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
474 (statid & TSI148_LCSR_VICR_STID_M); 478 (statid & TSI148_LCSR_VICR_STID_M);
475 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR); 479 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
476 480
477 /* Assert VMEbus IRQ */ 481 /* Assert VMEbus IRQ */
478 tmp = tmp | TSI148_LCSR_VICR_IRQL[level]; 482 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
479 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR); 483 iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
480 484
481 /* XXX Consider implementing a timeout? */ 485 /* XXX Consider implementing a timeout? */
482 wait_event_interruptible(iack_queue, tsi148_iack_received()); 486 wait_event_interruptible(bridge->iack_queue,
487 tsi148_iack_received(bridge));
483 488
484 mutex_unlock(&(vme_int)); 489 mutex_unlock(&(bridge->vme_int));
485 490
486 return 0; 491 return 0;
487} 492}
@@ -489,8 +494,8 @@ int tsi148_irq_generate(int level, int statid)
489/* 494/*
490 * Find the first error in this address range 495 * Find the first error in this address range
491 */ 496 */
492static struct vme_bus_error *tsi148_find_error(vme_address_t aspace, 497static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
493 unsigned long long address, size_t count) 498 vme_address_t aspace, unsigned long long address, size_t count)
494{ 499{
495 struct list_head *err_pos; 500 struct list_head *err_pos;
496 struct vme_bus_error *vme_err, *valid = NULL; 501 struct vme_bus_error *vme_err, *valid = NULL;
@@ -521,8 +526,8 @@ static struct vme_bus_error *tsi148_find_error(vme_address_t aspace,
521/* 526/*
522 * Clear errors in the provided address range. 527 * Clear errors in the provided address range.
523 */ 528 */
524static void tsi148_clear_errors(vme_address_t aspace, 529static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
525 unsigned long long address, size_t count) 530 vme_address_t aspace, unsigned long long address, size_t count)
526{ 531{
527 struct list_head *err_pos, *temp; 532 struct list_head *err_pos, *temp;
528 struct vme_bus_error *vme_err; 533 struct vme_bus_error *vme_err;
@@ -562,6 +567,9 @@ int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
562 unsigned int vme_bound_low, vme_bound_high; 567 unsigned int vme_bound_low, vme_bound_high;
563 unsigned int pci_offset_low, pci_offset_high; 568 unsigned int pci_offset_low, pci_offset_high;
564 unsigned long long vme_bound, pci_offset; 569 unsigned long long vme_bound, pci_offset;
570 struct tsi148_driver *bridge;
571
572 bridge = image->parent->driver_priv;
565 573
566#if 0 574#if 0
567 printk("Set slave image %d to:\n", image->number); 575 printk("Set slave image %d to:\n", image->number);
@@ -634,24 +642,24 @@ int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
634#endif 642#endif
635 643
636 /* Disable while we are mucking around */ 644 /* Disable while we are mucking around */
637 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] + 645 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
638 TSI148_LCSR_OFFSET_ITAT); 646 TSI148_LCSR_OFFSET_ITAT);
639 temp_ctl &= ~TSI148_LCSR_ITAT_EN; 647 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
640 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] + 648 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
641 TSI148_LCSR_OFFSET_ITAT); 649 TSI148_LCSR_OFFSET_ITAT);
642 650
643 /* Setup mapping */ 651 /* Setup mapping */
644 iowrite32be(vme_base_high, tsi148_bridge->base + TSI148_LCSR_IT[i] + 652 iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
645 TSI148_LCSR_OFFSET_ITSAU); 653 TSI148_LCSR_OFFSET_ITSAU);
646 iowrite32be(vme_base_low, tsi148_bridge->base + TSI148_LCSR_IT[i] + 654 iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
647 TSI148_LCSR_OFFSET_ITSAL); 655 TSI148_LCSR_OFFSET_ITSAL);
648 iowrite32be(vme_bound_high, tsi148_bridge->base + TSI148_LCSR_IT[i] + 656 iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
649 TSI148_LCSR_OFFSET_ITEAU); 657 TSI148_LCSR_OFFSET_ITEAU);
650 iowrite32be(vme_bound_low, tsi148_bridge->base + TSI148_LCSR_IT[i] + 658 iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
651 TSI148_LCSR_OFFSET_ITEAL); 659 TSI148_LCSR_OFFSET_ITEAL);
652 iowrite32be(pci_offset_high, tsi148_bridge->base + TSI148_LCSR_IT[i] + 660 iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
653 TSI148_LCSR_OFFSET_ITOFU); 661 TSI148_LCSR_OFFSET_ITOFU);
654 iowrite32be(pci_offset_low, tsi148_bridge->base + TSI148_LCSR_IT[i] + 662 iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
655 TSI148_LCSR_OFFSET_ITOFL); 663 TSI148_LCSR_OFFSET_ITOFL);
656 664
657/* XXX Prefetch stuff currently unsupported */ 665/* XXX Prefetch stuff currently unsupported */
@@ -713,13 +721,13 @@ int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
713 temp_ctl |= TSI148_LCSR_ITAT_DATA; 721 temp_ctl |= TSI148_LCSR_ITAT_DATA;
714 722
715 /* Write ctl reg without enable */ 723 /* Write ctl reg without enable */
716 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] + 724 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
717 TSI148_LCSR_OFFSET_ITAT); 725 TSI148_LCSR_OFFSET_ITAT);
718 726
719 if (enabled) 727 if (enabled)
720 temp_ctl |= TSI148_LCSR_ITAT_EN; 728 temp_ctl |= TSI148_LCSR_ITAT_EN;
721 729
722 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] + 730 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
723 TSI148_LCSR_OFFSET_ITAT); 731 TSI148_LCSR_OFFSET_ITAT);
724 732
725 return 0; 733 return 0;
@@ -739,25 +747,27 @@ int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
739 unsigned int vme_bound_low, vme_bound_high; 747 unsigned int vme_bound_low, vme_bound_high;
740 unsigned int pci_offset_low, pci_offset_high; 748 unsigned int pci_offset_low, pci_offset_high;
741 unsigned long long vme_bound, pci_offset; 749 unsigned long long vme_bound, pci_offset;
750 struct tsi148_driver *bridge;
742 751
752 bridge = image->parent->driver_priv;
743 753
744 i = image->number; 754 i = image->number;
745 755
746 /* Read registers */ 756 /* Read registers */
747 ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] + 757 ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
748 TSI148_LCSR_OFFSET_ITAT); 758 TSI148_LCSR_OFFSET_ITAT);
749 759
750 vme_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] + 760 vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
751 TSI148_LCSR_OFFSET_ITSAU); 761 TSI148_LCSR_OFFSET_ITSAU);
752 vme_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] + 762 vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
753 TSI148_LCSR_OFFSET_ITSAL); 763 TSI148_LCSR_OFFSET_ITSAL);
754 vme_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] + 764 vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
755 TSI148_LCSR_OFFSET_ITEAU); 765 TSI148_LCSR_OFFSET_ITEAU);
756 vme_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] + 766 vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
757 TSI148_LCSR_OFFSET_ITEAL); 767 TSI148_LCSR_OFFSET_ITEAL);
758 pci_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] + 768 pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
759 TSI148_LCSR_OFFSET_ITOFU); 769 TSI148_LCSR_OFFSET_ITOFU);
760 pci_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] + 770 pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
761 TSI148_LCSR_OFFSET_ITOFL); 771 TSI148_LCSR_OFFSET_ITOFL);
762 772
763 /* Convert 64-bit variables to 2x 32-bit variables */ 773 /* Convert 64-bit variables to 2x 32-bit variables */
@@ -834,6 +844,9 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
834 unsigned long long existing_size; 844 unsigned long long existing_size;
835 int retval = 0; 845 int retval = 0;
836 struct pci_dev *pdev; 846 struct pci_dev *pdev;
847 struct vme_bridge *tsi148_bridge;
848
849 tsi148_bridge = image->parent;
837 850
838 /* Find pci_dev container of dev */ 851 /* Find pci_dev container of dev */
839 if (tsi148_bridge->parent == NULL) { 852 if (tsi148_bridge->parent == NULL) {
@@ -938,6 +951,9 @@ int tsi148_master_set( struct vme_master_resource *image, int enabled,
938 unsigned int pci_bound_low, pci_bound_high; 951 unsigned int pci_bound_low, pci_bound_high;
939 unsigned int vme_offset_low, vme_offset_high; 952 unsigned int vme_offset_low, vme_offset_high;
940 unsigned long long pci_bound, vme_offset, pci_base; 953 unsigned long long pci_bound, vme_offset, pci_base;
954 struct tsi148_driver *bridge;
955
956 bridge = image->parent->driver_priv;
941 957
942 /* Verify input data */ 958 /* Verify input data */
943 if (vme_base & 0xFFFF) { 959 if (vme_base & 0xFFFF) {
@@ -1008,10 +1024,10 @@ int tsi148_master_set( struct vme_master_resource *image, int enabled,
1008 i = image->number; 1024 i = image->number;
1009 1025
1010 /* Disable while we are mucking around */ 1026 /* Disable while we are mucking around */
1011 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] + 1027 temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1012 TSI148_LCSR_OFFSET_OTAT); 1028 TSI148_LCSR_OFFSET_OTAT);
1013 temp_ctl &= ~TSI148_LCSR_OTAT_EN; 1029 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
1014 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] + 1030 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1015 TSI148_LCSR_OFFSET_OTAT); 1031 TSI148_LCSR_OFFSET_OTAT);
1016 1032
1017/* XXX Prefetch stuff currently unsupported */ 1033/* XXX Prefetch stuff currently unsupported */
@@ -1127,33 +1143,33 @@ int tsi148_master_set( struct vme_master_resource *image, int enabled,
1127 temp_ctl |= TSI148_LCSR_OTAT_PGM; 1143 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1128 1144
1129 /* Setup mapping */ 1145 /* Setup mapping */
1130 iowrite32be(pci_base_high, tsi148_bridge->base + TSI148_LCSR_OT[i] + 1146 iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1131 TSI148_LCSR_OFFSET_OTSAU); 1147 TSI148_LCSR_OFFSET_OTSAU);
1132 iowrite32be(pci_base_low, tsi148_bridge->base + TSI148_LCSR_OT[i] + 1148 iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1133 TSI148_LCSR_OFFSET_OTSAL); 1149 TSI148_LCSR_OFFSET_OTSAL);
1134 iowrite32be(pci_bound_high, tsi148_bridge->base + TSI148_LCSR_OT[i] + 1150 iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1135 TSI148_LCSR_OFFSET_OTEAU); 1151 TSI148_LCSR_OFFSET_OTEAU);
1136 iowrite32be(pci_bound_low, tsi148_bridge->base + TSI148_LCSR_OT[i] + 1152 iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1137 TSI148_LCSR_OFFSET_OTEAL); 1153 TSI148_LCSR_OFFSET_OTEAL);
1138 iowrite32be(vme_offset_high, tsi148_bridge->base + TSI148_LCSR_OT[i] + 1154 iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1139 TSI148_LCSR_OFFSET_OTOFU); 1155 TSI148_LCSR_OFFSET_OTOFU);
1140 iowrite32be(vme_offset_low, tsi148_bridge->base + TSI148_LCSR_OT[i] + 1156 iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1141 TSI148_LCSR_OFFSET_OTOFL); 1157 TSI148_LCSR_OFFSET_OTOFL);
1142 1158
1143/* XXX We need to deal with OTBS */ 1159/* XXX We need to deal with OTBS */
1144#if 0 1160#if 0
1145 iowrite32be(vmeOut->bcastSelect2esst, tsi148_bridge->base + 1161 iowrite32be(vmeOut->bcastSelect2esst, bridge->base +
1146 TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTBS); 1162 TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTBS);
1147#endif 1163#endif
1148 1164
1149 /* Write ctl reg without enable */ 1165 /* Write ctl reg without enable */
1150 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] + 1166 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1151 TSI148_LCSR_OFFSET_OTAT); 1167 TSI148_LCSR_OFFSET_OTAT);
1152 1168
1153 if (enabled) 1169 if (enabled)
1154 temp_ctl |= TSI148_LCSR_OTAT_EN; 1170 temp_ctl |= TSI148_LCSR_OTAT_EN;
1155 1171
1156 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] + 1172 iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1157 TSI148_LCSR_OFFSET_OTAT); 1173 TSI148_LCSR_OFFSET_OTAT);
1158 1174
1159 spin_unlock(&(image->lock)); 1175 spin_unlock(&(image->lock));
@@ -1184,23 +1200,26 @@ int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
1184 unsigned int vme_offset_low, vme_offset_high; 1200 unsigned int vme_offset_low, vme_offset_high;
1185 1201
1186 unsigned long long pci_base, pci_bound, vme_offset; 1202 unsigned long long pci_base, pci_bound, vme_offset;
1203 struct tsi148_driver *bridge;
1204
1205 bridge = image->parent->driver_priv;
1187 1206
1188 i = image->number; 1207 i = image->number;
1189 1208
1190 ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] + 1209 ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1191 TSI148_LCSR_OFFSET_OTAT); 1210 TSI148_LCSR_OFFSET_OTAT);
1192 1211
1193 pci_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] + 1212 pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1194 TSI148_LCSR_OFFSET_OTSAU); 1213 TSI148_LCSR_OFFSET_OTSAU);
1195 pci_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] + 1214 pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1196 TSI148_LCSR_OFFSET_OTSAL); 1215 TSI148_LCSR_OFFSET_OTSAL);
1197 pci_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] + 1216 pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1198 TSI148_LCSR_OFFSET_OTEAU); 1217 TSI148_LCSR_OFFSET_OTEAU);
1199 pci_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] + 1218 pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1200 TSI148_LCSR_OFFSET_OTEAL); 1219 TSI148_LCSR_OFFSET_OTEAL);
1201 vme_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] + 1220 vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1202 TSI148_LCSR_OFFSET_OTOFU); 1221 TSI148_LCSR_OFFSET_OTOFU);
1203 vme_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] + 1222 vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1204 TSI148_LCSR_OFFSET_OTOFL); 1223 TSI148_LCSR_OFFSET_OTOFL);
1205 1224
1206 /* Convert 64-bit variables to 2x 32-bit variables */ 1225 /* Convert 64-bit variables to 2x 32-bit variables */
@@ -1306,6 +1325,9 @@ ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1306 vme_cycle_t cycle; 1325 vme_cycle_t cycle;
1307 vme_width_t dwidth; 1326 vme_width_t dwidth;
1308 struct vme_bus_error *vme_err = NULL; 1327 struct vme_bus_error *vme_err = NULL;
1328 struct vme_bridge *tsi148_bridge;
1329
1330 tsi148_bridge = image->parent;
1309 1331
1310 spin_lock(&(image->lock)); 1332 spin_lock(&(image->lock));
1311 1333
@@ -1318,13 +1340,15 @@ ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1318 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle, 1340 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1319 &dwidth); 1341 &dwidth);
1320 1342
1321 vme_err = tsi148_find_error(aspace, vme_base + offset, count); 1343 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1344 count);
1322 if(vme_err != NULL) { 1345 if(vme_err != NULL) {
1323 dev_err(image->parent->parent, "First VME read error detected " 1346 dev_err(image->parent->parent, "First VME read error detected "
1324 "an at address 0x%llx\n", vme_err->address); 1347 "an at address 0x%llx\n", vme_err->address);
1325 retval = vme_err->address - (vme_base + offset); 1348 retval = vme_err->address - (vme_base + offset);
1326 /* Clear down save errors in this address range */ 1349 /* Clear down save errors in this address range */
1327 tsi148_clear_errors(aspace, vme_base + offset, count); 1350 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1351 count);
1328 } 1352 }
1329 1353
1330skip_chk: 1354skip_chk:
@@ -1334,9 +1358,6 @@ skip_chk:
1334} 1358}
1335 1359
1336 1360
1337/* XXX We need to change vme_master_resource->mtx to a spinlock so that read
1338 * and write functions can be used in an interrupt context
1339 */
1340ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, 1361ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1341 size_t count, loff_t offset) 1362 size_t count, loff_t offset)
1342{ 1363{
@@ -1347,6 +1368,12 @@ ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1347 vme_width_t dwidth; 1368 vme_width_t dwidth;
1348 1369
1349 struct vme_bus_error *vme_err = NULL; 1370 struct vme_bus_error *vme_err = NULL;
1371 struct vme_bridge *tsi148_bridge;
1372 struct tsi148_driver *bridge;
1373
1374 tsi148_bridge = image->parent;
1375
1376 bridge = tsi148_bridge->driver_priv;
1350 1377
1351 spin_lock(&(image->lock)); 1378 spin_lock(&(image->lock));
1352 1379
@@ -1374,15 +1401,17 @@ ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1374 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle, 1401 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1375 &dwidth); 1402 &dwidth);
1376 1403
1377 ioread16(flush_image->kern_base + 0x7F000); 1404 ioread16(bridge->flush_image->kern_base + 0x7F000);
1378 1405
1379 vme_err = tsi148_find_error(aspace, vme_base + offset, count); 1406 vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
1407 count);
1380 if(vme_err != NULL) { 1408 if(vme_err != NULL) {
1381 printk("First VME write error detected an at address 0x%llx\n", 1409 printk("First VME write error detected an at address 0x%llx\n",
1382 vme_err->address); 1410 vme_err->address);
1383 retval = vme_err->address - (vme_base + offset); 1411 retval = vme_err->address - (vme_base + offset);
1384 /* Clear down save errors in this address range */ 1412 /* Clear down save errors in this address range */
1385 tsi148_clear_errors(aspace, vme_base + offset, count); 1413 tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
1414 count);
1386 } 1415 }
1387 1416
1388skip_chk: 1417skip_chk:
@@ -1404,48 +1433,50 @@ unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1404 unsigned int pci_addr_high, pci_addr_low; 1433 unsigned int pci_addr_high, pci_addr_low;
1405 u32 tmp, result; 1434 u32 tmp, result;
1406 int i; 1435 int i;
1436 struct tsi148_driver *bridge;
1407 1437
1438 bridge = image->parent->driver_priv;
1408 1439
1409 /* Find the PCI address that maps to the desired VME address */ 1440 /* Find the PCI address that maps to the desired VME address */
1410 i = image->number; 1441 i = image->number;
1411 1442
1412 /* Locking as we can only do one of these at a time */ 1443 /* Locking as we can only do one of these at a time */
1413 mutex_lock(&(vme_rmw)); 1444 mutex_lock(&(bridge->vme_rmw));
1414 1445
1415 /* Lock image */ 1446 /* Lock image */
1416 spin_lock(&(image->lock)); 1447 spin_lock(&(image->lock));
1417 1448
1418 pci_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] + 1449 pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1419 TSI148_LCSR_OFFSET_OTSAU); 1450 TSI148_LCSR_OFFSET_OTSAU);
1420 pci_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] + 1451 pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1421 TSI148_LCSR_OFFSET_OTSAL); 1452 TSI148_LCSR_OFFSET_OTSAL);
1422 1453
1423 reg_join(pci_addr_high, pci_addr_low, &pci_addr); 1454 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1424 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low); 1455 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1425 1456
1426 /* Configure registers */ 1457 /* Configure registers */
1427 iowrite32be(mask, tsi148_bridge->base + TSI148_LCSR_RMWEN); 1458 iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1428 iowrite32be(compare, tsi148_bridge->base + TSI148_LCSR_RMWC); 1459 iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1429 iowrite32be(swap, tsi148_bridge->base + TSI148_LCSR_RMWS); 1460 iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1430 iowrite32be(pci_addr_high, tsi148_bridge->base + TSI148_LCSR_RMWAU); 1461 iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1431 iowrite32be(pci_addr_low, tsi148_bridge->base + TSI148_LCSR_RMWAL); 1462 iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1432 1463
1433 /* Enable RMW */ 1464 /* Enable RMW */
1434 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL); 1465 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1435 tmp |= TSI148_LCSR_VMCTRL_RMWEN; 1466 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1436 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL); 1467 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1437 1468
1438 /* Kick process off with a read to the required address. */ 1469 /* Kick process off with a read to the required address. */
1439 result = ioread32be(image->kern_base + offset); 1470 result = ioread32be(image->kern_base + offset);
1440 1471
1441 /* Disable RMW */ 1472 /* Disable RMW */
1442 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL); 1473 tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1443 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN; 1474 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1444 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL); 1475 iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1445 1476
1446 spin_unlock(&(image->lock)); 1477 spin_unlock(&(image->lock));
1447 1478
1448 mutex_unlock(&(vme_rmw)); 1479 mutex_unlock(&(bridge->vme_rmw));
1449 1480
1450 return result; 1481 return result;
1451} 1482}
@@ -1789,11 +1820,14 @@ err_mem:
1789/* 1820/*
1790 * Check to see if the provided DMA channel is busy. 1821 * Check to see if the provided DMA channel is busy.
1791 */ 1822 */
1792static int tsi148_dma_busy(int channel) 1823static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1793{ 1824{
1794 u32 tmp; 1825 u32 tmp;
1826 struct tsi148_driver *bridge;
1827
1828 bridge = tsi148_bridge->driver_priv;
1795 1829
1796 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] + 1830 tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1797 TSI148_LCSR_OFFSET_DSTA); 1831 TSI148_LCSR_OFFSET_DSTA);
1798 1832
1799 if (tmp & TSI148_LCSR_DSTA_BSY) 1833 if (tmp & TSI148_LCSR_DSTA_BSY)
@@ -1819,9 +1853,12 @@ int tsi148_dma_list_exec(struct vme_dma_list *list)
1819#if 0 1853#if 0
1820 int x; 1854 int x;
1821#endif 1855#endif
1856 struct tsi148_driver *bridge;
1822 1857
1823 ctrlr = list->parent; 1858 ctrlr = list->parent;
1824 1859
1860 bridge = ctrlr->parent->driver_priv;
1861
1825 mutex_lock(&(ctrlr->mtx)); 1862 mutex_lock(&(ctrlr->mtx));
1826 1863
1827 channel = ctrlr->number; 1864 channel = ctrlr->number;
@@ -1891,21 +1928,22 @@ int tsi148_dma_list_exec(struct vme_dma_list *list)
1891 1928
1892 reg_split(bus_addr, &bus_addr_high, &bus_addr_low); 1929 reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1893 1930
1894 iowrite32be(bus_addr_high, tsi148_bridge->base + 1931 iowrite32be(bus_addr_high, bridge->base +
1895 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU); 1932 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1896 iowrite32be(bus_addr_low, tsi148_bridge->base + 1933 iowrite32be(bus_addr_low, bridge->base +
1897 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL); 1934 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1898 1935
1899 /* Start the operation */ 1936 /* Start the operation */
1900 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, tsi148_bridge->base + 1937 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1901 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); 1938 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1902 1939
1903 wait_event_interruptible(dma_queue[channel], tsi148_dma_busy(channel)); 1940 wait_event_interruptible(bridge->dma_queue[channel],
1941 tsi148_dma_busy(ctrlr->parent, channel));
1904 /* 1942 /*
1905 * Read status register, this register is valid until we kick off a 1943 * Read status register, this register is valid until we kick off a
1906 * new transfer. 1944 * new transfer.
1907 */ 1945 */
1908 val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] + 1946 val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1909 TSI148_LCSR_OFFSET_DSTA); 1947 TSI148_LCSR_OFFSET_DSTA);
1910 1948
1911 if (val & TSI148_LCSR_DSTA_VBE) { 1949 if (val & TSI148_LCSR_DSTA_VBE) {
@@ -1953,12 +1991,15 @@ int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1953{ 1991{
1954 u32 lm_base_high, lm_base_low, lm_ctl = 0; 1992 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1955 int i; 1993 int i;
1994 struct tsi148_driver *bridge;
1995
1996 bridge = lm->parent->driver_priv;
1956 1997
1957 mutex_lock(&(lm->mtx)); 1998 mutex_lock(&(lm->mtx));
1958 1999
1959 /* If we already have a callback attached, we can't move it! */ 2000 /* If we already have a callback attached, we can't move it! */
1960 for (i = 0; i < lm->monitors; i++) { 2001 for (i = 0; i < lm->monitors; i++) {
1961 if(lm_callback[i] != NULL) { 2002 if (bridge->lm_callback[i] != NULL) {
1962 mutex_unlock(&(lm->mtx)); 2003 mutex_unlock(&(lm->mtx));
1963 printk("Location monitor callback attached, can't " 2004 printk("Location monitor callback attached, can't "
1964 "reset\n"); 2005 "reset\n");
@@ -1997,9 +2038,9 @@ int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1997 2038
1998 reg_split(lm_base, &lm_base_high, &lm_base_low); 2039 reg_split(lm_base, &lm_base_high, &lm_base_low);
1999 2040
2000 iowrite32be(lm_base_high, tsi148_bridge->base + TSI148_LCSR_LMBAU); 2041 iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
2001 iowrite32be(lm_base_low, tsi148_bridge->base + TSI148_LCSR_LMBAL); 2042 iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
2002 iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT); 2043 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2003 2044
2004 mutex_unlock(&(lm->mtx)); 2045 mutex_unlock(&(lm->mtx));
2005 2046
@@ -2013,12 +2054,15 @@ int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
2013 vme_address_t *aspace, vme_cycle_t *cycle) 2054 vme_address_t *aspace, vme_cycle_t *cycle)
2014{ 2055{
2015 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0; 2056 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2057 struct tsi148_driver *bridge;
2058
2059 bridge = lm->parent->driver_priv;
2016 2060
2017 mutex_lock(&(lm->mtx)); 2061 mutex_lock(&(lm->mtx));
2018 2062
2019 lm_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAU); 2063 lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
2020 lm_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAL); 2064 lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
2021 lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT); 2065 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2022 2066
2023 reg_join(lm_base_high, lm_base_low, lm_base); 2067 reg_join(lm_base_high, lm_base_low, lm_base);
2024 2068
@@ -2061,11 +2105,14 @@ int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2061 void (*callback)(int)) 2105 void (*callback)(int))
2062{ 2106{
2063 u32 lm_ctl, tmp; 2107 u32 lm_ctl, tmp;
2108 struct tsi148_driver *bridge;
2109
2110 bridge = lm->parent->driver_priv;
2064 2111
2065 mutex_lock(&(lm->mtx)); 2112 mutex_lock(&(lm->mtx));
2066 2113
2067 /* Ensure that the location monitor is configured - need PGM or DATA */ 2114 /* Ensure that the location monitor is configured - need PGM or DATA */
2068 lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT); 2115 lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2069 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) { 2116 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2070 mutex_unlock(&(lm->mtx)); 2117 mutex_unlock(&(lm->mtx));
2071 printk("Location monitor not properly configured\n"); 2118 printk("Location monitor not properly configured\n");
@@ -2073,28 +2120,28 @@ int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2073 } 2120 }
2074 2121
2075 /* Check that a callback isn't already attached */ 2122 /* Check that a callback isn't already attached */
2076 if (lm_callback[monitor] != NULL) { 2123 if (bridge->lm_callback[monitor] != NULL) {
2077 mutex_unlock(&(lm->mtx)); 2124 mutex_unlock(&(lm->mtx));
2078 printk("Existing callback attached\n"); 2125 printk("Existing callback attached\n");
2079 return -EBUSY; 2126 return -EBUSY;
2080 } 2127 }
2081 2128
2082 /* Attach callback */ 2129 /* Attach callback */
2083 lm_callback[monitor] = callback; 2130 bridge->lm_callback[monitor] = callback;
2084 2131
2085 /* Enable Location Monitor interrupt */ 2132 /* Enable Location Monitor interrupt */
2086 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN); 2133 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2087 tmp |= TSI148_LCSR_INTEN_LMEN[monitor]; 2134 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2088 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN); 2135 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2089 2136
2090 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO); 2137 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2091 tmp |= TSI148_LCSR_INTEO_LMEO[monitor]; 2138 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2092 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO); 2139 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2093 2140
2094 /* Ensure that global Location Monitor Enable set */ 2141 /* Ensure that global Location Monitor Enable set */
2095 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) { 2142 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2096 lm_ctl |= TSI148_LCSR_LMAT_EN; 2143 lm_ctl |= TSI148_LCSR_LMAT_EN;
2097 iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT); 2144 iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2098 } 2145 }
2099 2146
2100 mutex_unlock(&(lm->mtx)); 2147 mutex_unlock(&(lm->mtx));
@@ -2108,30 +2155,33 @@ int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2108int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor) 2155int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2109{ 2156{
2110 u32 lm_en, tmp; 2157 u32 lm_en, tmp;
2158 struct tsi148_driver *bridge;
2159
2160 bridge = lm->parent->driver_priv;
2111 2161
2112 mutex_lock(&(lm->mtx)); 2162 mutex_lock(&(lm->mtx));
2113 2163
2114 /* Disable Location Monitor and ensure previous interrupts are clear */ 2164 /* Disable Location Monitor and ensure previous interrupts are clear */
2115 lm_en = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN); 2165 lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2116 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor]; 2166 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2117 iowrite32be(lm_en, tsi148_bridge->base + TSI148_LCSR_INTEN); 2167 iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2118 2168
2119 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO); 2169 tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2120 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor]; 2170 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2121 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO); 2171 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2122 2172
2123 iowrite32be(TSI148_LCSR_INTC_LMC[monitor], 2173 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2124 tsi148_bridge->base + TSI148_LCSR_INTC); 2174 bridge->base + TSI148_LCSR_INTC);
2125 2175
2126 /* Detach callback */ 2176 /* Detach callback */
2127 lm_callback[monitor] = NULL; 2177 bridge->lm_callback[monitor] = NULL;
2128 2178
2129 /* If all location monitors disabled, disable global Location Monitor */ 2179 /* If all location monitors disabled, disable global Location Monitor */
2130 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S | 2180 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2131 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) { 2181 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2132 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT); 2182 tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2133 tmp &= ~TSI148_LCSR_LMAT_EN; 2183 tmp &= ~TSI148_LCSR_LMAT_EN;
2134 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_LMAT); 2184 iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2135 } 2185 }
2136 2186
2137 mutex_unlock(&(lm->mtx)); 2187 mutex_unlock(&(lm->mtx));
@@ -2142,12 +2192,15 @@ int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2142/* 2192/*
2143 * Determine Geographical Addressing 2193 * Determine Geographical Addressing
2144 */ 2194 */
2145int tsi148_slot_get(void) 2195int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2146{ 2196{
2147 u32 slot = 0; 2197 u32 slot = 0;
2198 struct tsi148_driver *bridge;
2199
2200 bridge = tsi148_bridge->driver_priv;
2148 2201
2149 if (!geoid) { 2202 if (!geoid) {
2150 slot = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT); 2203 slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2151 slot = slot & TSI148_LCSR_VSTAT_GA_M; 2204 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2152 } else 2205 } else
2153 slot = geoid; 2206 slot = geoid;
@@ -2172,46 +2225,50 @@ static int __init tsi148_init(void)
2172 * boards registers, this means there is a fix length 508kB window which must 2225 * boards registers, this means there is a fix length 508kB window which must
2173 * be mapped onto PCI memory. 2226 * be mapped onto PCI memory.
2174 */ 2227 */
2175static int tsi148_crcsr_init(struct pci_dev *pdev) 2228static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2229 struct pci_dev *pdev)
2176{ 2230{
2177 u32 cbar, crat, vstat; 2231 u32 cbar, crat, vstat;
2178 u32 crcsr_bus_high, crcsr_bus_low; 2232 u32 crcsr_bus_high, crcsr_bus_low;
2179 int retval; 2233 int retval;
2234 struct tsi148_driver *bridge;
2235
2236 bridge = tsi148_bridge->driver_priv;
2180 2237
2181 /* Allocate mem for CR/CSR image */ 2238 /* Allocate mem for CR/CSR image */
2182 crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE, 2239 bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2183 &crcsr_bus); 2240 &(bridge->crcsr_bus));
2184 if (crcsr_kernel == NULL) { 2241 if (bridge->crcsr_kernel == NULL) {
2185 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR " 2242 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
2186 "image\n"); 2243 "image\n");
2187 return -ENOMEM; 2244 return -ENOMEM;
2188 } 2245 }
2189 2246
2190 memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE); 2247 memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2191 2248
2192 reg_split(crcsr_bus, &crcsr_bus_high, &crcsr_bus_low); 2249 reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2193 2250
2194 iowrite32be(crcsr_bus_high, tsi148_bridge->base + TSI148_LCSR_CROU); 2251 iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2195 iowrite32be(crcsr_bus_low, tsi148_bridge->base + TSI148_LCSR_CROL); 2252 iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2196 2253
2197 /* Ensure that the CR/CSR is configured at the correct offset */ 2254 /* Ensure that the CR/CSR is configured at the correct offset */
2198 cbar = ioread32be(tsi148_bridge->base + TSI148_CBAR); 2255 cbar = ioread32be(bridge->base + TSI148_CBAR);
2199 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3; 2256 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2200 2257
2201 vstat = tsi148_slot_get(); 2258 vstat = tsi148_slot_get(tsi148_bridge);
2202 2259
2203 if (cbar != vstat) { 2260 if (cbar != vstat) {
2204 cbar = vstat; 2261 cbar = vstat;
2205 dev_info(&pdev->dev, "Setting CR/CSR offset\n"); 2262 dev_info(&pdev->dev, "Setting CR/CSR offset\n");
2206 iowrite32be(cbar<<3, tsi148_bridge->base + TSI148_CBAR); 2263 iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2207 } 2264 }
2208 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar); 2265 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
2209 2266
2210 crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT); 2267 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2211 if (crat & TSI148_LCSR_CRAT_EN) { 2268 if (crat & TSI148_LCSR_CRAT_EN) {
2212 dev_info(&pdev->dev, "Enabling CR/CSR space\n"); 2269 dev_info(&pdev->dev, "Enabling CR/CSR space\n");
2213 iowrite32be(crat | TSI148_LCSR_CRAT_EN, 2270 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2214 tsi148_bridge->base + TSI148_LCSR_CRAT); 2271 bridge->base + TSI148_LCSR_CRAT);
2215 } else 2272 } else
2216 dev_info(&pdev->dev, "CR/CSR already enabled\n"); 2273 dev_info(&pdev->dev, "CR/CSR already enabled\n");
2217 2274
@@ -2220,8 +2277,9 @@ static int tsi148_crcsr_init(struct pci_dev *pdev)
2220 * through VME writes. 2277 * through VME writes.
2221 */ 2278 */
2222 if(err_chk) { 2279 if(err_chk) {
2223 retval = tsi148_master_set(flush_image, 1, (vstat * 0x80000), 2280 retval = tsi148_master_set(bridge->flush_image, 1,
2224 0x80000, VME_CRCSR, VME_SCT, VME_D16); 2281 (vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2282 VME_D16);
2225 if (retval) 2283 if (retval)
2226 dev_err(&pdev->dev, "Configuring flush image failed\n"); 2284 dev_err(&pdev->dev, "Configuring flush image failed\n");
2227 } 2285 }
@@ -2230,20 +2288,25 @@ static int tsi148_crcsr_init(struct pci_dev *pdev)
2230 2288
2231} 2289}
2232 2290
2233static void tsi148_crcsr_exit(struct pci_dev *pdev) 2291static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2292 struct pci_dev *pdev)
2234{ 2293{
2235 u32 crat; 2294 u32 crat;
2295 struct tsi148_driver *bridge;
2296
2297 bridge = tsi148_bridge->driver_priv;
2236 2298
2237 /* Turn off CR/CSR space */ 2299 /* Turn off CR/CSR space */
2238 crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT); 2300 crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2239 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN, 2301 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2240 tsi148_bridge->base + TSI148_LCSR_CRAT); 2302 bridge->base + TSI148_LCSR_CRAT);
2241 2303
2242 /* Free image */ 2304 /* Free image */
2243 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROU); 2305 iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2244 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROL); 2306 iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2245 2307
2246 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus); 2308 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2309 bridge->crcsr_bus);
2247} 2310}
2248 2311
2249static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2312static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -2251,6 +2314,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2251 int retval, i, master_num; 2314 int retval, i, master_num;
2252 u32 data; 2315 u32 data;
2253 struct list_head *pos = NULL; 2316 struct list_head *pos = NULL;
2317 struct vme_bridge *tsi148_bridge;
2318 struct tsi148_driver *tsi148_device;
2254 struct vme_master_resource *master_image; 2319 struct vme_master_resource *master_image;
2255 struct vme_slave_resource *slave_image; 2320 struct vme_slave_resource *slave_image;
2256 struct vme_dma_resource *dma_ctrlr; 2321 struct vme_dma_resource *dma_ctrlr;
@@ -2270,6 +2335,18 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2270 2335
2271 memset(tsi148_bridge, 0, sizeof(struct vme_bridge)); 2336 memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
2272 2337
2338 tsi148_device = kmalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2339 if (tsi148_device == NULL) {
2340 dev_err(&pdev->dev, "Failed to allocate memory for device "
2341 "structure\n");
2342 retval = -ENOMEM;
2343 goto err_driver;
2344 }
2345
2346 memset(tsi148_device, 0, sizeof(struct tsi148_driver));
2347
2348 tsi148_bridge->driver_priv = tsi148_device;
2349
2273 /* Enable the device */ 2350 /* Enable the device */
2274 retval = pci_enable_device(pdev); 2351 retval = pci_enable_device(pdev);
2275 if (retval) { 2352 if (retval) {
@@ -2285,15 +2362,16 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2285 } 2362 }
2286 2363
2287 /* map registers in BAR 0 */ 2364 /* map registers in BAR 0 */
2288 tsi148_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0), 4096); 2365 tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2289 if (!tsi148_bridge->base) { 2366 4096);
2367 if (!tsi148_device->base) {
2290 dev_err(&pdev->dev, "Unable to remap CRG region\n"); 2368 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2291 retval = -EIO; 2369 retval = -EIO;
2292 goto err_remap; 2370 goto err_remap;
2293 } 2371 }
2294 2372
2295 /* Check to see if the mapping worked out */ 2373 /* Check to see if the mapping worked out */
2296 data = ioread32(tsi148_bridge->base + TSI148_PCFS_ID) & 0x0000FFFF; 2374 data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2297 if (data != PCI_VENDOR_ID_TUNDRA) { 2375 if (data != PCI_VENDOR_ID_TUNDRA) {
2298 dev_err(&pdev->dev, "CRG region check failed\n"); 2376 dev_err(&pdev->dev, "CRG region check failed\n");
2299 retval = -EIO; 2377 retval = -EIO;
@@ -2301,12 +2379,11 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2301 } 2379 }
2302 2380
2303 /* Initialize wait queues & mutual exclusion flags */ 2381 /* Initialize wait queues & mutual exclusion flags */
2304 /* XXX These need to be moved to the vme_bridge structure */ 2382 init_waitqueue_head(&(tsi148_device->dma_queue[0]));
2305 init_waitqueue_head(&dma_queue[0]); 2383 init_waitqueue_head(&(tsi148_device->dma_queue[1]));
2306 init_waitqueue_head(&dma_queue[1]); 2384 init_waitqueue_head(&(tsi148_device->iack_queue));
2307 init_waitqueue_head(&iack_queue); 2385 mutex_init(&(tsi148_device->vme_int));
2308 mutex_init(&(vme_int)); 2386 mutex_init(&(tsi148_device->vme_rmw));
2309 mutex_init(&(vme_rmw));
2310 2387
2311 tsi148_bridge->parent = &(pdev->dev); 2388 tsi148_bridge->parent = &(pdev->dev);
2312 strcpy(tsi148_bridge->name, driver_name); 2389 strcpy(tsi148_bridge->name, driver_name);
@@ -2326,29 +2403,29 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2326 master_num = TSI148_MAX_MASTER; 2403 master_num = TSI148_MAX_MASTER;
2327 if(err_chk){ 2404 if(err_chk){
2328 master_num--; 2405 master_num--;
2329 /* XXX */ 2406
2330 flush_image = (struct vme_master_resource *)kmalloc( 2407 tsi148_device->flush_image = (struct vme_master_resource *)
2331 sizeof(struct vme_master_resource), GFP_KERNEL); 2408 kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2332 if (flush_image == NULL) { 2409 if (tsi148_device->flush_image == NULL) {
2333 dev_err(&pdev->dev, "Failed to allocate memory for " 2410 dev_err(&pdev->dev, "Failed to allocate memory for "
2334 "flush resource structure\n"); 2411 "flush resource structure\n");
2335 retval = -ENOMEM; 2412 retval = -ENOMEM;
2336 goto err_master; 2413 goto err_master;
2337 } 2414 }
2338 flush_image->parent = tsi148_bridge; 2415 tsi148_device->flush_image->parent = tsi148_bridge;
2339 spin_lock_init(&(flush_image->lock)); 2416 spin_lock_init(&(tsi148_device->flush_image->lock));
2340 flush_image->locked = 1; 2417 tsi148_device->flush_image->locked = 1;
2341 flush_image->number = master_num; 2418 tsi148_device->flush_image->number = master_num;
2342 flush_image->address_attr = VME_A16 | VME_A24 | VME_A32 | 2419 tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
2343 VME_A64; 2420 VME_A32 | VME_A64;
2344 flush_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT | 2421 tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
2345 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 | 2422 VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
2346 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER | 2423 VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
2347 VME_PROG | VME_DATA; 2424 VME_USER | VME_PROG | VME_DATA;
2348 flush_image->width_attr = VME_D16 | VME_D32; 2425 tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
2349 memset(&(flush_image->pci_resource), 0, 2426 memset(&(tsi148_device->flush_image->pci_resource), 0,
2350 sizeof(struct resource)); 2427 sizeof(struct resource));
2351 flush_image->kern_base = NULL; 2428 tsi148_device->flush_image->kern_base = NULL;
2352 } 2429 }
2353 2430
2354 /* Add master windows to list */ 2431 /* Add master windows to list */
@@ -2465,45 +2542,42 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2465 tsi148_bridge->lm_detach = tsi148_lm_detach; 2542 tsi148_bridge->lm_detach = tsi148_lm_detach;
2466 tsi148_bridge->slot_get = tsi148_slot_get; 2543 tsi148_bridge->slot_get = tsi148_slot_get;
2467 2544
2468 data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT); 2545 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2469 dev_info(&pdev->dev, "Board is%s the VME system controller\n", 2546 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2470 (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not"); 2547 (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
2471 if (!geoid) { 2548 if (!geoid)
2472 dev_info(&pdev->dev, "VME geographical address is %d\n", 2549 dev_info(&pdev->dev, "VME geographical address is %d\n",
2473 data & TSI148_LCSR_VSTAT_GA_M); 2550 data & TSI148_LCSR_VSTAT_GA_M);
2474 } else { 2551 else
2475 dev_info(&pdev->dev, "VME geographical address is set to %d\n", 2552 dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2476 geoid); 2553 geoid);
2477 } 2554
2478 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n", 2555 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2479 err_chk ? "enabled" : "disabled"); 2556 err_chk ? "enabled" : "disabled");
2480 2557
2481 if(tsi148_crcsr_init(pdev)) { 2558 if (tsi148_crcsr_init(tsi148_bridge, pdev))
2482 dev_err(&pdev->dev, "CR/CSR configuration failed.\n"); 2559 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2483 goto err_crcsr; 2560 goto err_crcsr;
2484 2561
2485 }
2486
2487 /* Need to save tsi148_bridge pointer locally in link list for use in
2488 * tsi148_remove()
2489 */
2490 retval = vme_register_bridge(tsi148_bridge); 2562 retval = vme_register_bridge(tsi148_bridge);
2491 if (retval != 0) { 2563 if (retval != 0) {
2492 dev_err(&pdev->dev, "Chip Registration failed.\n"); 2564 dev_err(&pdev->dev, "Chip Registration failed.\n");
2493 goto err_reg; 2565 goto err_reg;
2494 } 2566 }
2495 2567
2568 pci_set_drvdata(pdev, tsi148_bridge);
2569
2496 /* Clear VME bus "board fail", and "power-up reset" lines */ 2570 /* Clear VME bus "board fail", and "power-up reset" lines */
2497 data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT); 2571 data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2498 data &= ~TSI148_LCSR_VSTAT_BRDFL; 2572 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2499 data |= TSI148_LCSR_VSTAT_CPURST; 2573 data |= TSI148_LCSR_VSTAT_CPURST;
2500 iowrite32be(data, tsi148_bridge->base + TSI148_LCSR_VSTAT); 2574 iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2501 2575
2502 return 0; 2576 return 0;
2503 2577
2504 vme_unregister_bridge(tsi148_bridge); 2578 vme_unregister_bridge(tsi148_bridge);
2505err_reg: 2579err_reg:
2506 tsi148_crcsr_exit(pdev); 2580 tsi148_crcsr_exit(tsi148_bridge, pdev);
2507err_crcsr: 2581err_crcsr:
2508err_lm: 2582err_lm:
2509 /* resources are stored in link list */ 2583 /* resources are stored in link list */
@@ -2534,15 +2608,17 @@ err_master:
2534 kfree(master_image); 2608 kfree(master_image);
2535 } 2609 }
2536 2610
2537 tsi148_irq_exit(pdev); 2611 tsi148_irq_exit(tsi148_device, pdev);
2538err_irq: 2612err_irq:
2539err_test: 2613err_test:
2540 iounmap(tsi148_bridge->base); 2614 iounmap(tsi148_device->base);
2541err_remap: 2615err_remap:
2542 pci_release_regions(pdev); 2616 pci_release_regions(pdev);
2543err_resource: 2617err_resource:
2544 pci_disable_device(pdev); 2618 pci_disable_device(pdev);
2545err_enable: 2619err_enable:
2620 kfree(tsi148_device);
2621err_driver:
2546 kfree(tsi148_bridge); 2622 kfree(tsi148_bridge);
2547err_struct: 2623err_struct:
2548 return retval; 2624 return retval;
@@ -2556,56 +2632,58 @@ static void tsi148_remove(struct pci_dev *pdev)
2556 struct vme_slave_resource *slave_image; 2632 struct vme_slave_resource *slave_image;
2557 struct vme_dma_resource *dma_ctrlr; 2633 struct vme_dma_resource *dma_ctrlr;
2558 int i; 2634 int i;
2635 struct tsi148_driver *bridge;
2636 struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2637
2638 bridge = tsi148_bridge->driver_priv;
2559 2639
2560 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2561 2640
2562 /* XXX We need to find the pdev->dev in the list of vme_bridge->dev's */ 2641 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2563 2642
2564 /* 2643 /*
2565 * Shutdown all inbound and outbound windows. 2644 * Shutdown all inbound and outbound windows.
2566 */ 2645 */
2567 for (i = 0; i < 8; i++) { 2646 for (i = 0; i < 8; i++) {
2568 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_IT[i] + 2647 iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2569 TSI148_LCSR_OFFSET_ITAT); 2648 TSI148_LCSR_OFFSET_ITAT);
2570 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_OT[i] + 2649 iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2571 TSI148_LCSR_OFFSET_OTAT); 2650 TSI148_LCSR_OFFSET_OTAT);
2572 } 2651 }
2573 2652
2574 /* 2653 /*
2575 * Shutdown Location monitor. 2654 * Shutdown Location monitor.
2576 */ 2655 */
2577 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_LMAT); 2656 iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2578 2657
2579 /* 2658 /*
2580 * Shutdown CRG map. 2659 * Shutdown CRG map.
2581 */ 2660 */
2582 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CSRAT); 2661 iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2583 2662
2584 /* 2663 /*
2585 * Clear error status. 2664 * Clear error status.
2586 */ 2665 */
2587 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_EDPAT); 2666 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2588 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_VEAT); 2667 iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2589 iowrite32be(0x07000700, tsi148_bridge->base + TSI148_LCSR_PSTAT); 2668 iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2590 2669
2591 /* 2670 /*
2592 * Remove VIRQ interrupt (if any) 2671 * Remove VIRQ interrupt (if any)
2593 */ 2672 */
2594 if (ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR) & 0x800) { 2673 if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2595 iowrite32be(0x8000, tsi148_bridge->base + TSI148_LCSR_VICR); 2674 iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2596 }
2597 2675
2598 /* 2676 /*
2599 * Map all Interrupts to PCI INTA 2677 * Map all Interrupts to PCI INTA
2600 */ 2678 */
2601 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM1); 2679 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2602 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM2); 2680 iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2603 2681
2604 tsi148_irq_exit(pdev); 2682 tsi148_irq_exit(bridge, pdev);
2605 2683
2606 vme_unregister_bridge(tsi148_bridge); 2684 vme_unregister_bridge(tsi148_bridge);
2607 2685
2608 tsi148_crcsr_exit(pdev); 2686 tsi148_crcsr_exit(tsi148_bridge, pdev);
2609 2687
2610 /* resources are stored in link list */ 2688 /* resources are stored in link list */
2611 list_for_each(pos, &(tsi148_bridge->dma_resources)) { 2689 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
@@ -2629,14 +2707,16 @@ static void tsi148_remove(struct pci_dev *pdev)
2629 kfree(master_image); 2707 kfree(master_image);
2630 } 2708 }
2631 2709
2632 tsi148_irq_exit(pdev); 2710 tsi148_irq_exit(bridge, pdev);
2633 2711
2634 iounmap(tsi148_bridge->base); 2712 iounmap(bridge->base);
2635 2713
2636 pci_release_regions(pdev); 2714 pci_release_regions(pdev);
2637 2715
2638 pci_disable_device(pdev); 2716 pci_disable_device(pdev);
2639 2717
2718 kfree(tsi148_bridge->driver_priv);
2719
2640 kfree(tsi148_bridge); 2720 kfree(tsi148_bridge);
2641} 2721}
2642 2722
@@ -2724,29 +2804,29 @@ int tsi148_dma_run(struct vme_dma_resource *resource, struct vme_dma_attr src,
2724 } 2804 }
2725 2805
2726 /* Program registers for DMA transfer */ 2806 /* Program registers for DMA transfer */
2727 iowrite32be(dmaLL->dsau, tsi148_bridge->base + 2807 iowrite32be(dmaLL->dsau, tsi148_bridge->driver_priv->base +
2728 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAU); 2808 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAU);
2729 iowrite32be(dmaLL->dsal, tsi148_bridge->base + 2809 iowrite32be(dmaLL->dsal, tsi148_bridge->driver_priv->base +
2730 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAL); 2810 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAL);
2731 iowrite32be(dmaLL->ddau, tsi148_bridge->base + 2811 iowrite32be(dmaLL->ddau, tsi148_bridge->driver_priv->base +
2732 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAU); 2812 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAU);
2733 iowrite32be(dmaLL->ddal, tsi148_bridge->base + 2813 iowrite32be(dmaLL->ddal, tsi148_bridge->driver_priv->base +
2734 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAL); 2814 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAL);
2735 iowrite32be(dmaLL->dsat, tsi148_bridge->base + 2815 iowrite32be(dmaLL->dsat, tsi148_bridge->driver_priv->base +
2736 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAT); 2816 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAT);
2737 iowrite32be(dmaLL->ddat, tsi148_bridge->base + 2817 iowrite32be(dmaLL->ddat, tsi148_bridge->driver_priv->base +
2738 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAT); 2818 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAT);
2739 iowrite32be(dmaLL->dcnt, tsi148_bridge->base + 2819 iowrite32be(dmaLL->dcnt, tsi148_bridge->driver_priv->base +
2740 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCNT); 2820 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCNT);
2741 iowrite32be(dmaLL->ddbs, tsi148_bridge->base + 2821 iowrite32be(dmaLL->ddbs, tsi148_bridge->driver_priv->base +
2742 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDBS); 2822 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDBS);
2743 2823
2744 /* Start the operation */ 2824 /* Start the operation */
2745 iowrite32be(dctlreg | 0x2000000, tsi148_bridge->base + 2825 iowrite32be(dctlreg | 0x2000000, tsi148_bridge->driver_priv->base +
2746 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL); 2826 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
2747 2827
2748 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] + 2828 tmp = ioread32be(tsi148_bridge->driver_priv->base +
2749 TSI148_LCSR_OFFSET_DSTA); 2829 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSTA);
2750 wait_event_interruptible(dma_queue[channel], (tmp & 0x1000000) == 0); 2830 wait_event_interruptible(dma_queue[channel], (tmp & 0x1000000) == 0);
2751 2831
2752 /* 2832 /*
@@ -2754,8 +2834,8 @@ int tsi148_dma_run(struct vme_dma_resource *resource, struct vme_dma_attr src,
2754 * handler rather than here so that we can be sure we haven't kicked off 2834 * handler rather than here so that we can be sure we haven't kicked off
2755 * another DMA transfer. 2835 * another DMA transfer.
2756 */ 2836 */
2757 val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] + 2837 val = ioread32be(tsi148_bridge->driver_priv->base +
2758 TSI148_LCSR_OFFSET_DSTA); 2838 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSTA);
2759 2839
2760 vmeDma->vmeDmaStatus = 0; 2840 vmeDma->vmeDmaStatus = 0;
2761 if (val & 0x10000000) { 2841 if (val & 0x10000000) {
@@ -2782,7 +2862,8 @@ int tempe_set_arbiter(vmeArbiterCfg_t * vmeArb)
2782 int temp_ctl = 0; 2862 int temp_ctl = 0;
2783 int gto = 0; 2863 int gto = 0;
2784 2864
2785 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL); 2865 temp_ctl = ioread32be(tsi148_bridge->driver_priv->base +
2866 TSI148_LCSR_VCTRL);
2786 temp_ctl &= 0xFFEFFF00; 2867 temp_ctl &= 0xFFEFFF00;
2787 2868
2788 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) { 2869 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
@@ -2810,7 +2891,8 @@ int tempe_set_arbiter(vmeArbiterCfg_t * vmeArb)
2810 if (vmeArb->noEarlyReleaseFlag) { 2891 if (vmeArb->noEarlyReleaseFlag) {
2811 temp_ctl |= 1 << 20; 2892 temp_ctl |= 1 << 20;
2812 } 2893 }
2813 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VCTRL); 2894 iowrite32be(temp_ctl, tsi148_bridge->driver_priv->base +
2895 TSI148_LCSR_VCTRL);
2814 2896
2815 return (0); 2897 return (0);
2816} 2898}
@@ -2824,7 +2906,8 @@ int tempe_get_arbiter(vmeArbiterCfg_t * vmeArb)
2824 int gto = 0; 2906 int gto = 0;
2825 2907
2826 2908
2827 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL); 2909 temp_ctl = ioread32be(tsi148_bridge->driver_priv->base +
2910 TSI148_LCSR_VCTRL);
2828 2911
2829 gto = temp_ctl & 0xF; 2912 gto = temp_ctl & 0xF;
2830 if (gto != 0) { 2913 if (gto != 0) {
@@ -2855,7 +2938,8 @@ int tempe_set_requestor(vmeRequesterCfg_t * vmeReq)
2855{ 2938{
2856 int temp_ctl = 0; 2939 int temp_ctl = 0;
2857 2940
2858 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL); 2941 temp_ctl = ioread32be(tsi148_bridge->driver_priv->base +
2942 TSI148_LCSR_VMCTRL);
2859 temp_ctl &= 0xFFFF0000; 2943 temp_ctl &= 0xFFFF0000;
2860 2944
2861 if (vmeReq->releaseMode == 1) { 2945 if (vmeReq->releaseMode == 1) {
@@ -2870,7 +2954,8 @@ int tempe_set_requestor(vmeRequesterCfg_t * vmeReq)
2870 temp_ctl |= (vmeReq->timeoffTimeoutTimer & 7) << 12; 2954 temp_ctl |= (vmeReq->timeoffTimeoutTimer & 7) << 12;
2871 temp_ctl |= vmeReq->requestLevel; 2955 temp_ctl |= vmeReq->requestLevel;
2872 2956
2873 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VMCTRL); 2957 iowrite32be(temp_ctl, tsi148_bridge->driver_priv->base +
2958 TSI148_LCSR_VMCTRL);
2874 return (0); 2959 return (0);
2875} 2960}
2876 2961
@@ -2881,7 +2966,8 @@ int tempe_get_requestor(vmeRequesterCfg_t * vmeReq)
2881{ 2966{
2882 int temp_ctl = 0; 2967 int temp_ctl = 0;
2883 2968
2884 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL); 2969 temp_ctl = ioread32be(tsi148_bridge->driver_priv->base +
2970 TSI148_LCSR_VMCTRL);
2885 2971
2886 if (temp_ctl & 0x18) { 2972 if (temp_ctl & 0x18) {
2887 vmeReq->releaseMode = 1; 2973 vmeReq->releaseMode = 1;
diff --git a/drivers/staging/vme/bridges/vme_tsi148.h b/drivers/staging/vme/bridges/vme_tsi148.h
index 6f0f705ce6be..9e5f7fa1d744 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.h
+++ b/drivers/staging/vme/bridges/vme_tsi148.h
@@ -33,6 +33,22 @@
33#define TSI148_MAX_MAILBOX 4 /* Max Mail Box registers */ 33#define TSI148_MAX_MAILBOX 4 /* Max Mail Box registers */
34#define TSI148_MAX_SEMAPHORE 8 /* Max Semaphores */ 34#define TSI148_MAX_SEMAPHORE 8 /* Max Semaphores */
35 35
36/* Structure used to hold driver specific information */
37struct tsi148_driver {
38 void *base; /* Base Address of device registers */
39 wait_queue_head_t dma_queue[2];
40 wait_queue_head_t iack_queue;
41 void (*lm_callback[4])(int); /* Called in interrupt handler */
42 void *crcsr_kernel;
43 dma_addr_t crcsr_bus;
44 struct vme_master_resource *flush_image;
45 struct mutex vme_rmw; /* Only one RMW cycle at a time */
46 struct mutex vme_int; /*
47 * Only one VME interrupt can be
48 * generated at a time, provide locking
49 */
50};
51
36/* 52/*
37 * Layout of a DMAC Linked-List Descriptor 53 * Layout of a DMAC Linked-List Descriptor
38 * 54 *
diff --git a/drivers/staging/vme/vme.c b/drivers/staging/vme/vme.c
index 79c501dac5f9..ea2eee3f0834 100644
--- a/drivers/staging/vme/vme.c
+++ b/drivers/staging/vme/vme.c
@@ -993,7 +993,7 @@ void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
993EXPORT_SYMBOL(vme_irq_handler); 993EXPORT_SYMBOL(vme_irq_handler);
994 994
995int vme_irq_request(struct device *dev, int level, int statid, 995int vme_irq_request(struct device *dev, int level, int statid,
996 void (*callback)(int level, int vector, void *priv_data), 996 void (*callback)(int, int, void *),
997 void *priv_data) 997 void *priv_data)
998{ 998{
999 struct vme_bridge *bridge; 999 struct vme_bridge *bridge;
@@ -1027,7 +1027,7 @@ int vme_irq_request(struct device *dev, int level, int statid,
1027 bridge->irq[level - 1].callback[statid].func = callback; 1027 bridge->irq[level - 1].callback[statid].func = callback;
1028 1028
1029 /* Enable IRQ level */ 1029 /* Enable IRQ level */
1030 bridge->irq_set(level, 1, 1); 1030 bridge->irq_set(bridge, level, 1, 1);
1031 1031
1032 mutex_unlock(&(bridge->irq_mtx)); 1032 mutex_unlock(&(bridge->irq_mtx));
1033 1033
@@ -1061,7 +1061,7 @@ void vme_irq_free(struct device *dev, int level, int statid)
1061 1061
1062 /* Disable IRQ level if no more interrupts attached at this level*/ 1062 /* Disable IRQ level if no more interrupts attached at this level*/
1063 if (bridge->irq[level - 1].count == 0) 1063 if (bridge->irq[level - 1].count == 0)
1064 bridge->irq_set(level, 0, 1); 1064 bridge->irq_set(bridge, level, 0, 1);
1065 1065
1066 bridge->irq[level - 1].callback[statid].func = NULL; 1066 bridge->irq[level - 1].callback[statid].func = NULL;
1067 bridge->irq[level - 1].callback[statid].priv_data = NULL; 1067 bridge->irq[level - 1].callback[statid].priv_data = NULL;
@@ -1090,7 +1090,7 @@ int vme_irq_generate(struct device *dev, int level, int statid)
1090 return -EINVAL; 1090 return -EINVAL;
1091 } 1091 }
1092 1092
1093 return bridge->irq_generate(level, statid); 1093 return bridge->irq_generate(bridge, level, statid);
1094} 1094}
1095EXPORT_SYMBOL(vme_irq_generate); 1095EXPORT_SYMBOL(vme_irq_generate);
1096 1096
@@ -1303,7 +1303,7 @@ int vme_slot_get(struct device *bus)
1303 return -EINVAL; 1303 return -EINVAL;
1304 } 1304 }
1305 1305
1306 return bridge->slot_get(); 1306 return bridge->slot_get(bridge);
1307} 1307}
1308EXPORT_SYMBOL(vme_slot_get); 1308EXPORT_SYMBOL(vme_slot_get);
1309 1309
diff --git a/drivers/staging/vme/vme_bridge.h b/drivers/staging/vme/vme_bridge.h
index f8ead21c94fb..6dc472f613e2 100644
--- a/drivers/staging/vme/vme_bridge.h
+++ b/drivers/staging/vme/vme_bridge.h
@@ -113,7 +113,7 @@ struct vme_bridge {
113 113
114 /* Bridge Info - XXX Move to private structure? */ 114 /* Bridge Info - XXX Move to private structure? */
115 struct device *parent; /* Generic device struct (pdev->dev for PCI) */ 115 struct device *parent; /* Generic device struct (pdev->dev for PCI) */
116 void *base; /* Base Address of device registers */ 116 void *driver_priv; /* Private pointer for the bridge driver */
117 117
118 struct device dev[VME_SLOTS_MAX]; /* Device registered with 118 struct device dev[VME_SLOTS_MAX]; /* Device registered with
119 * device model on VME bus 119 * device model on VME bus
@@ -152,8 +152,8 @@ struct vme_bridge {
152 int (*dma_list_empty) (struct vme_dma_list *); 152 int (*dma_list_empty) (struct vme_dma_list *);
153 153
154 /* Interrupt Functions */ 154 /* Interrupt Functions */
155 void (*irq_set) (int, int, int); 155 void (*irq_set) (struct vme_bridge *, int, int, int);
156 int (*irq_generate) (int, int); 156 int (*irq_generate) (struct vme_bridge *, int, int);
157 157
158 /* Location monitor functions */ 158 /* Location monitor functions */
159 int (*lm_set) (struct vme_lm_resource *, unsigned long long, 159 int (*lm_set) (struct vme_lm_resource *, unsigned long long,
@@ -164,7 +164,7 @@ struct vme_bridge {
164 int (*lm_detach) (struct vme_lm_resource *, int); 164 int (*lm_detach) (struct vme_lm_resource *, int);
165 165
166 /* CR/CSR space functions */ 166 /* CR/CSR space functions */
167 int (*slot_get) (void); 167 int (*slot_get) (struct vme_bridge *);
168 /* Use standard master read and write functions to access CR/CSR */ 168 /* Use standard master read and write functions to access CR/CSR */
169 169
170#if 0 170#if 0