aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/rapidio/devices/tsi721.c
diff options
context:
space:
mode:
authorAlexandre Bounine <alexandre.bounine@idt.com>2011-11-02 16:39:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-02 19:07:01 -0400
commit48618fb4e522d9d02e217ac05f52749545c1af20 (patch)
tree4699179c3eacc0ec63d03ac631fae6822fa10969 /drivers/rapidio/devices/tsi721.c
parente80dd9a7bca4057d5a09d1ba94a7ba0791e7426a (diff)
RapidIO: add mport driver for Tsi721 bridge
Add RapidIO mport driver for IDT TSI721 PCI Express-to-SRIO bridge device. The driver provides full set of callback functions defined for mport devices in RapidIO subsystem. It also is compatible with current version of RIONET driver (Ethernet over RapidIO messaging services). This patch is applicable to kernel versions starting from 2.6.39. Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com> Signed-off-by: Chul Kim <chul.kim@idt.com> Cc: Kumar Gala <galak@kernel.crashing.org> Cc: Matt Porter <mporter@kernel.crashing.org> Cc: Li Yang <leoli@freescale.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/rapidio/devices/tsi721.c')
-rw-r--r--drivers/rapidio/devices/tsi721.c2360
1 files changed, 2360 insertions, 0 deletions
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
new file mode 100644
index 000000000000..5225930a10cd
--- /dev/null
+++ b/drivers/rapidio/devices/tsi721.c
@@ -0,0 +1,2360 @@
1/*
2 * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge
3 *
4 * Copyright 2011 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * Chul Kim <chul.kim@idt.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/io.h>
24#include <linux/errno.h>
25#include <linux/init.h>
26#include <linux/ioport.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/rio.h>
31#include <linux/rio_drv.h>
32#include <linux/dma-mapping.h>
33#include <linux/interrupt.h>
34#include <linux/kfifo.h>
35#include <linux/delay.h>
36
37#include "tsi721.h"
38
39#define DEBUG_PW /* Inbound Port-Write debugging */
40
41static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
42static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
43
44/**
45 * tsi721_lcread - read from local SREP config space
46 * @mport: RapidIO master port info
47 * @index: ID of RapdiIO interface
48 * @offset: Offset into configuration space
49 * @len: Length (in bytes) of the maintenance transaction
50 * @data: Value to be read into
51 *
52 * Generates a local SREP space read. Returns %0 on
53 * success or %-EINVAL on failure.
54 */
55static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
56 int len, u32 *data)
57{
58 struct tsi721_device *priv = mport->priv;
59
60 if (len != sizeof(u32))
61 return -EINVAL; /* only 32-bit access is supported */
62
63 *data = ioread32(priv->regs + offset);
64
65 return 0;
66}
67
68/**
69 * tsi721_lcwrite - write into local SREP config space
70 * @mport: RapidIO master port info
71 * @index: ID of RapdiIO interface
72 * @offset: Offset into configuration space
73 * @len: Length (in bytes) of the maintenance transaction
74 * @data: Value to be written
75 *
76 * Generates a local write into SREP configuration space. Returns %0 on
77 * success or %-EINVAL on failure.
78 */
79static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
80 int len, u32 data)
81{
82 struct tsi721_device *priv = mport->priv;
83
84 if (len != sizeof(u32))
85 return -EINVAL; /* only 32-bit access is supported */
86
87 iowrite32(data, priv->regs + offset);
88
89 return 0;
90}
91
92/**
93 * tsi721_maint_dma - Helper function to generate RapidIO maintenance
94 * transactions using designated Tsi721 DMA channel.
95 * @priv: pointer to tsi721 private data
96 * @sys_size: RapdiIO transport system size
97 * @destid: Destination ID of transaction
98 * @hopcount: Number of hops to target device
99 * @offset: Offset into configuration space
100 * @len: Length (in bytes) of the maintenance transaction
101 * @data: Location to be read from or write into
102 * @do_wr: Operation flag (1 == MAINT_WR)
103 *
104 * Generates a RapidIO maintenance transaction (Read or Write).
105 * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
106 */
107static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
108 u16 destid, u8 hopcount, u32 offset, int len,
109 u32 *data, int do_wr)
110{
111 struct tsi721_dma_desc *bd_ptr;
112 u32 rd_count, swr_ptr, ch_stat;
113 int i, err = 0;
114 u32 op = do_wr ? MAINT_WR : MAINT_RD;
115
116 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
117 return -EINVAL;
118
119 bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base;
120
121 rd_count = ioread32(
122 priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
123
124 /* Initialize DMA descriptor */
125 bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
126 bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04);
127 bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset);
128 bd_ptr[0].raddr_hi = 0;
129 if (do_wr)
130 bd_ptr[0].data[0] = cpu_to_be32p(data);
131 else
132 bd_ptr[0].data[0] = 0xffffffff;
133
134 mb();
135
136 /* Start DMA operation */
137 iowrite32(rd_count + 2,
138 priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
139 ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
140 i = 0;
141
142 /* Wait until DMA transfer is finished */
143 while ((ch_stat = ioread32(priv->regs +
144 TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) {
145 udelay(1);
146 if (++i >= 5000000) {
147 dev_dbg(&priv->pdev->dev,
148 "%s : DMA[%d] read timeout ch_status=%x\n",
149 __func__, TSI721_DMACH_MAINT, ch_stat);
150 if (!do_wr)
151 *data = 0xffffffff;
152 err = -EIO;
153 goto err_out;
154 }
155 }
156
157 if (ch_stat & TSI721_DMAC_STS_ABORT) {
158 /* If DMA operation aborted due to error,
159 * reinitialize DMA channel
160 */
161 dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n",
162 __func__, ch_stat);
163 dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
164 do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
165 iowrite32(TSI721_DMAC_INT_ALL,
166 priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT));
167 iowrite32(TSI721_DMAC_CTL_INIT,
168 priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
169 udelay(10);
170 iowrite32(0, priv->regs +
171 TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
172 udelay(1);
173 if (!do_wr)
174 *data = 0xffffffff;
175 err = -EIO;
176 goto err_out;
177 }
178
179 if (!do_wr)
180 *data = be32_to_cpu(bd_ptr[0].data[0]);
181
182 /*
183 * Update descriptor status FIFO RD pointer.
184 * NOTE: Skipping check and clear FIFO entries because we are waiting
185 * for transfer to be completed.
186 */
187 swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT));
188 iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT));
189err_out:
190
191 return err;
192}
193
194/**
195 * tsi721_cread_dma - Generate a RapidIO maintenance read transaction
196 * using Tsi721 BDMA engine.
197 * @mport: RapidIO master port control structure
198 * @index: ID of RapdiIO interface
199 * @destid: Destination ID of transaction
200 * @hopcount: Number of hops to target device
201 * @offset: Offset into configuration space
202 * @len: Length (in bytes) of the maintenance transaction
203 * @val: Location to be read into
204 *
205 * Generates a RapidIO maintenance read transaction.
206 * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
207 */
208static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
209 u8 hopcount, u32 offset, int len, u32 *data)
210{
211 struct tsi721_device *priv = mport->priv;
212
213 return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
214 offset, len, data, 0);
215}
216
217/**
218 * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction
219 * using Tsi721 BDMA engine
220 * @mport: RapidIO master port control structure
221 * @index: ID of RapdiIO interface
222 * @destid: Destination ID of transaction
223 * @hopcount: Number of hops to target device
224 * @offset: Offset into configuration space
225 * @len: Length (in bytes) of the maintenance transaction
226 * @val: Value to be written
227 *
228 * Generates a RapidIO maintenance write transaction.
229 * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
230 */
231static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
232 u8 hopcount, u32 offset, int len, u32 data)
233{
234 struct tsi721_device *priv = mport->priv;
235 u32 temp = data;
236
237 return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
238 offset, len, &temp, 1);
239}
240
241/**
242 * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler
243 * @mport: RapidIO master port structure
244 *
245 * Handles inbound port-write interrupts. Copies PW message from an internal
246 * buffer into PW message FIFO and schedules deferred routine to process
247 * queued messages.
248 */
249static int
250tsi721_pw_handler(struct rio_mport *mport)
251{
252 struct tsi721_device *priv = mport->priv;
253 u32 pw_stat;
254 u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)];
255
256
257 pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT);
258
259 if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) {
260 pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0));
261 pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1));
262 pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2));
263 pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3));
264
265 /* Queue PW message (if there is room in FIFO),
266 * otherwise discard it.
267 */
268 spin_lock(&priv->pw_fifo_lock);
269 if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE)
270 kfifo_in(&priv->pw_fifo, pw_buf,
271 TSI721_RIO_PW_MSG_SIZE);
272 else
273 priv->pw_discard_count++;
274 spin_unlock(&priv->pw_fifo_lock);
275 }
276
277 /* Clear pending PW interrupts */
278 iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
279 priv->regs + TSI721_RIO_PW_RX_STAT);
280
281 schedule_work(&priv->pw_work);
282
283 return 0;
284}
285
286static void tsi721_pw_dpc(struct work_struct *work)
287{
288 struct tsi721_device *priv = container_of(work, struct tsi721_device,
289 pw_work);
290 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message
291 buffer for RIO layer */
292
293 /*
294 * Process port-write messages
295 */
296 while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer,
297 TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) {
298 /* Process one message */
299#ifdef DEBUG_PW
300 {
301 u32 i;
302 pr_debug("%s : Port-Write Message:", __func__);
303 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) {
304 pr_debug("0x%02x: %08x %08x %08x %08x", i*4,
305 msg_buffer[i], msg_buffer[i + 1],
306 msg_buffer[i + 2], msg_buffer[i + 3]);
307 i += 4;
308 }
309 pr_debug("\n");
310 }
311#endif
312 /* Pass the port-write message to RIO core for processing */
313 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
314 }
315}
316
317/**
318 * tsi721_pw_enable - enable/disable port-write interface init
319 * @mport: Master port implementing the port write unit
320 * @enable: 1=enable; 0=disable port-write message handling
321 */
322static int tsi721_pw_enable(struct rio_mport *mport, int enable)
323{
324 struct tsi721_device *priv = mport->priv;
325 u32 rval;
326
327 rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE);
328
329 if (enable)
330 rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX;
331 else
332 rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX;
333
334 /* Clear pending PW interrupts */
335 iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
336 priv->regs + TSI721_RIO_PW_RX_STAT);
337 /* Update enable bits */
338 iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE);
339
340 return 0;
341}
342
343/**
344 * tsi721_dsend - Send a RapidIO doorbell
345 * @mport: RapidIO master port info
346 * @index: ID of RapidIO interface
347 * @destid: Destination ID of target device
348 * @data: 16-bit info field of RapidIO doorbell
349 *
350 * Sends a RapidIO doorbell message. Always returns %0.
351 */
352static int tsi721_dsend(struct rio_mport *mport, int index,
353 u16 destid, u16 data)
354{
355 struct tsi721_device *priv = mport->priv;
356 u32 offset;
357
358 offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) |
359 (destid << 2);
360
361 dev_dbg(&priv->pdev->dev,
362 "Send Doorbell 0x%04x to destID 0x%x\n", data, destid);
363 iowrite16be(data, priv->odb_base + offset);
364
365 return 0;
366}
367
368/**
369 * tsi721_dbell_handler - Tsi721 doorbell interrupt handler
370 * @mport: RapidIO master port structure
371 *
372 * Handles inbound doorbell interrupts. Copies doorbell entry from an internal
373 * buffer into DB message FIFO and schedules deferred routine to process
374 * queued DBs.
375 */
376static int
377tsi721_dbell_handler(struct rio_mport *mport)
378{
379 struct tsi721_device *priv = mport->priv;
380 u32 regval;
381
382 /* Disable IDB interrupts */
383 regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
384 regval &= ~TSI721_SR_CHINT_IDBQRCV;
385 iowrite32(regval,
386 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
387
388 schedule_work(&priv->idb_work);
389
390 return 0;
391}
392
393static void tsi721_db_dpc(struct work_struct *work)
394{
395 struct tsi721_device *priv = container_of(work, struct tsi721_device,
396 idb_work);
397 struct rio_mport *mport;
398 struct rio_dbell *dbell;
399 int found = 0;
400 u32 wr_ptr, rd_ptr;
401 u64 *idb_entry;
402 u32 regval;
403 union {
404 u64 msg;
405 u8 bytes[8];
406 } idb;
407
408 /*
409 * Process queued inbound doorbells
410 */
411 mport = priv->mport;
412
413 wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE));
414 rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
415
416 while (wr_ptr != rd_ptr) {
417 idb_entry = (u64 *)(priv->idb_base +
418 (TSI721_IDB_ENTRY_SIZE * rd_ptr));
419 rd_ptr++;
420 idb.msg = *idb_entry;
421 *idb_entry = 0;
422
423 /* Process one doorbell */
424 list_for_each_entry(dbell, &mport->dbells, node) {
425 if ((dbell->res->start <= DBELL_INF(idb.bytes)) &&
426 (dbell->res->end >= DBELL_INF(idb.bytes))) {
427 found = 1;
428 break;
429 }
430 }
431
432 if (found) {
433 dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes),
434 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
435 } else {
436 dev_dbg(&priv->pdev->dev,
437 "spurious inb doorbell, sid %2.2x tid %2.2x"
438 " info %4.4x\n", DBELL_SID(idb.bytes),
439 DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
440 }
441 }
442
443 iowrite32(rd_ptr & (IDB_QSIZE - 1),
444 priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
445
446 /* Re-enable IDB interrupts */
447 regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
448 regval |= TSI721_SR_CHINT_IDBQRCV;
449 iowrite32(regval,
450 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
451}
452
453/**
454 * tsi721_irqhandler - Tsi721 interrupt handler
455 * @irq: Linux interrupt number
456 * @ptr: Pointer to interrupt-specific data (mport structure)
457 *
458 * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
459 * interrupt events and calls an event-specific handler(s).
460 */
461static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
462{
463 struct rio_mport *mport = (struct rio_mport *)ptr;
464 struct tsi721_device *priv = mport->priv;
465 u32 dev_int;
466 u32 dev_ch_int;
467 u32 intval;
468 u32 ch_inte;
469
470 dev_int = ioread32(priv->regs + TSI721_DEV_INT);
471 if (!dev_int)
472 return IRQ_NONE;
473
474 dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT);
475
476 if (dev_int & TSI721_DEV_INT_SR2PC_CH) {
477 /* Service SR2PC Channel interrupts */
478 if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) {
479 /* Service Inbound Doorbell interrupt */
480 intval = ioread32(priv->regs +
481 TSI721_SR_CHINT(IDB_QUEUE));
482 if (intval & TSI721_SR_CHINT_IDBQRCV)
483 tsi721_dbell_handler(mport);
484 else
485 dev_info(&priv->pdev->dev,
486 "Unsupported SR_CH_INT %x\n", intval);
487
488 /* Clear interrupts */
489 iowrite32(intval,
490 priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
491 ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
492 }
493 }
494
495 if (dev_int & TSI721_DEV_INT_SMSG_CH) {
496 int ch;
497
498 /*
499 * Service channel interrupts from Messaging Engine
500 */
501
502 if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */
503 /* Disable signaled OB MSG Channel interrupts */
504 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
505 ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M);
506 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
507
508 /*
509 * Process Inbound Message interrupt for each MBOX
510 */
511 for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) {
512 if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch)))
513 continue;
514 tsi721_imsg_handler(priv, ch);
515 }
516 }
517
518 if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */
519 /* Disable signaled OB MSG Channel interrupts */
520 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
521 ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M);
522 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
523
524 /*
525 * Process Outbound Message interrupts for each MBOX
526 */
527
528 for (ch = 0; ch < RIO_MAX_MBOX; ch++) {
529 if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch)))
530 continue;
531 tsi721_omsg_handler(priv, ch);
532 }
533 }
534 }
535
536 if (dev_int & TSI721_DEV_INT_SRIO) {
537 /* Service SRIO MAC interrupts */
538 intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
539 if (intval & TSI721_RIO_EM_INT_STAT_PW_RX)
540 tsi721_pw_handler(mport);
541 }
542
543 return IRQ_HANDLED;
544}
545
546static void tsi721_interrupts_init(struct tsi721_device *priv)
547{
548 u32 intr;
549
550 /* Enable IDB interrupts */
551 iowrite32(TSI721_SR_CHINT_ALL,
552 priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
553 iowrite32(TSI721_SR_CHINT_IDBQRCV,
554 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
555 iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
556 priv->regs + TSI721_DEV_CHAN_INTE);
557
558 /* Enable SRIO MAC interrupts */
559 iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
560 priv->regs + TSI721_RIO_EM_DEV_INT_EN);
561
562 if (priv->flags & TSI721_USING_MSIX)
563 intr = TSI721_DEV_INT_SRIO;
564 else
565 intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
566 TSI721_DEV_INT_SMSG_CH;
567
568 iowrite32(intr, priv->regs + TSI721_DEV_INTE);
569 ioread32(priv->regs + TSI721_DEV_INTE);
570}
571
572#ifdef CONFIG_PCI_MSI
573/**
574 * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging
575 * @irq: Linux interrupt number
576 * @ptr: Pointer to interrupt-specific data (mport structure)
577 *
578 * Handles outbound messaging interrupts signaled using MSI-X.
579 */
580static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
581{
582 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
583 int mbox;
584
585 mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX;
586 tsi721_omsg_handler(priv, mbox);
587 return IRQ_HANDLED;
588}
589
590/**
591 * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging
592 * @irq: Linux interrupt number
593 * @ptr: Pointer to interrupt-specific data (mport structure)
594 *
595 * Handles inbound messaging interrupts signaled using MSI-X.
596 */
597static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
598{
599 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
600 int mbox;
601
602 mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX;
603 tsi721_imsg_handler(priv, mbox + 4);
604 return IRQ_HANDLED;
605}
606
607/**
608 * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler
609 * @irq: Linux interrupt number
610 * @ptr: Pointer to interrupt-specific data (mport structure)
611 *
612 * Handles Tsi721 interrupts from SRIO MAC.
613 */
614static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
615{
616 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
617 u32 srio_int;
618
619 /* Service SRIO MAC interrupts */
620 srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
621 if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX)
622 tsi721_pw_handler((struct rio_mport *)ptr);
623
624 return IRQ_HANDLED;
625}
626
627/**
628 * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler
629 * @irq: Linux interrupt number
630 * @ptr: Pointer to interrupt-specific data (mport structure)
631 *
632 * Handles Tsi721 interrupts from SR2PC Channel.
633 * NOTE: At this moment services only one SR2PC channel associated with inbound
634 * doorbells.
635 */
636static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
637{
638 struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
639 u32 sr_ch_int;
640
641 /* Service Inbound DB interrupt from SR2PC channel */
642 sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
643 if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV)
644 tsi721_dbell_handler((struct rio_mport *)ptr);
645
646 /* Clear interrupts */
647 iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
648 /* Read back to ensure that interrupt was cleared */
649 sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
650
651 return IRQ_HANDLED;
652}
653
654/**
655 * tsi721_request_msix - register interrupt service for MSI-X mode.
656 * @mport: RapidIO master port structure
657 *
658 * Registers MSI-X interrupt service routines for interrupts that are active
659 * immediately after mport initialization. Messaging interrupt service routines
660 * should be registered during corresponding open requests.
661 */
662static int tsi721_request_msix(struct rio_mport *mport)
663{
664 struct tsi721_device *priv = mport->priv;
665 int err = 0;
666
667 err = request_irq(priv->msix[TSI721_VECT_IDB].vector,
668 tsi721_sr2pc_ch_msix, 0,
669 priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport);
670 if (err)
671 goto out;
672
673 err = request_irq(priv->msix[TSI721_VECT_PWRX].vector,
674 tsi721_srio_msix, 0,
675 priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport);
676 if (err)
677 free_irq(
678 priv->msix[TSI721_VECT_IDB].vector,
679 (void *)mport);
680out:
681 return err;
682}
683
684/**
685 * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721.
686 * @priv: pointer to tsi721 private data
687 *
688 * Configures MSI-X support for Tsi721. Supports only an exact number
689 * of requested vectors.
690 */
691static int tsi721_enable_msix(struct tsi721_device *priv)
692{
693 struct msix_entry entries[TSI721_VECT_MAX];
694 int err;
695 int i;
696
697 entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE);
698 entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT;
699
700 /*
701 * Initialize MSI-X entries for Messaging Engine:
702 * this driver supports four RIO mailboxes (inbound and outbound)
703 * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore
704 * offset +4 is added to IB MBOX number.
705 */
706 for (i = 0; i < RIO_MAX_MBOX; i++) {
707 entries[TSI721_VECT_IMB0_RCV + i].entry =
708 TSI721_MSIX_IMSG_DQ_RCV(i + 4);
709 entries[TSI721_VECT_IMB0_INT + i].entry =
710 TSI721_MSIX_IMSG_INT(i + 4);
711 entries[TSI721_VECT_OMB0_DONE + i].entry =
712 TSI721_MSIX_OMSG_DONE(i);
713 entries[TSI721_VECT_OMB0_INT + i].entry =
714 TSI721_MSIX_OMSG_INT(i);
715 }
716
717 err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
718 if (err) {
719 if (err > 0)
720 dev_info(&priv->pdev->dev,
721 "Only %d MSI-X vectors available, "
722 "not using MSI-X\n", err);
723 return err;
724 }
725
726 /*
727 * Copy MSI-X vector information into tsi721 private structure
728 */
729 priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector;
730 snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX,
731 DRV_NAME "-idb@pci:%s", pci_name(priv->pdev));
732 priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector;
733 snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX,
734 DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev));
735
736 for (i = 0; i < RIO_MAX_MBOX; i++) {
737 priv->msix[TSI721_VECT_IMB0_RCV + i].vector =
738 entries[TSI721_VECT_IMB0_RCV + i].vector;
739 snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name,
740 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s",
741 i, pci_name(priv->pdev));
742
743 priv->msix[TSI721_VECT_IMB0_INT + i].vector =
744 entries[TSI721_VECT_IMB0_INT + i].vector;
745 snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name,
746 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s",
747 i, pci_name(priv->pdev));
748
749 priv->msix[TSI721_VECT_OMB0_DONE + i].vector =
750 entries[TSI721_VECT_OMB0_DONE + i].vector;
751 snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name,
752 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s",
753 i, pci_name(priv->pdev));
754
755 priv->msix[TSI721_VECT_OMB0_INT + i].vector =
756 entries[TSI721_VECT_OMB0_INT + i].vector;
757 snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name,
758 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s",
759 i, pci_name(priv->pdev));
760 }
761
762 return 0;
763}
764#endif /* CONFIG_PCI_MSI */
765
766static int tsi721_request_irq(struct rio_mport *mport)
767{
768 struct tsi721_device *priv = mport->priv;
769 int err;
770
771#ifdef CONFIG_PCI_MSI
772 if (priv->flags & TSI721_USING_MSIX)
773 err = tsi721_request_msix(mport);
774 else
775#endif
776 err = request_irq(priv->pdev->irq, tsi721_irqhandler,
777 (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED,
778 DRV_NAME, (void *)mport);
779
780 if (err)
781 dev_err(&priv->pdev->dev,
782 "Unable to allocate interrupt, Error: %d\n", err);
783
784 return err;
785}
786
787/**
788 * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO)
789 * translation regions.
790 * @priv: pointer to tsi721 private data
791 *
792 * Disables SREP translation regions.
793 */
794static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
795{
796 int i;
797
798 /* Disable all PC2SR translation windows */
799 for (i = 0; i < TSI721_OBWIN_NUM; i++)
800 iowrite32(0, priv->regs + TSI721_OBWINLB(i));
801}
802
803/**
804 * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe)
805 * translation regions.
806 * @priv: pointer to tsi721 private data
807 *
808 * Disables inbound windows.
809 */
810static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv)
811{
812 int i;
813
814 /* Disable all SR2PC inbound windows */
815 for (i = 0; i < TSI721_IBWIN_NUM; i++)
816 iowrite32(0, priv->regs + TSI721_IBWINLB(i));
817}
818
819/**
820 * tsi721_port_write_init - Inbound port write interface init
821 * @priv: pointer to tsi721 private data
822 *
823 * Initializes inbound port write handler.
824 * Returns %0 on success or %-ENOMEM on failure.
825 */
826static int tsi721_port_write_init(struct tsi721_device *priv)
827{
828 priv->pw_discard_count = 0;
829 INIT_WORK(&priv->pw_work, tsi721_pw_dpc);
830 spin_lock_init(&priv->pw_fifo_lock);
831 if (kfifo_alloc(&priv->pw_fifo,
832 TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
833 dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n");
834 return -ENOMEM;
835 }
836
837 /* Use reliable port-write capture mode */
838 iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL);
839 return 0;
840}
841
842static int tsi721_doorbell_init(struct tsi721_device *priv)
843{
844 /* Outbound Doorbells do not require any setup.
845 * Tsi721 uses dedicated PCI BAR1 to generate doorbells.
846 * That BAR1 was mapped during the probe routine.
847 */
848
849 /* Initialize Inbound Doorbell processing DPC and queue */
850 priv->db_discard_count = 0;
851 INIT_WORK(&priv->idb_work, tsi721_db_dpc);
852
853 /* Allocate buffer for inbound doorbells queue */
854 priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
855 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
856 &priv->idb_dma, GFP_KERNEL);
857 if (!priv->idb_base)
858 return -ENOMEM;
859
860 memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
861
862 dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
863 priv->idb_base, (unsigned long long)priv->idb_dma);
864
865 iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE),
866 priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE));
867 iowrite32(((u64)priv->idb_dma >> 32),
868 priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE));
869 iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR),
870 priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE));
871 /* Enable accepting all inbound doorbells */
872 iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE));
873
874 iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE));
875
876 iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
877
878 return 0;
879}
880
881static void tsi721_doorbell_free(struct tsi721_device *priv)
882{
883 if (priv->idb_base == NULL)
884 return;
885
886 /* Free buffer allocated for inbound doorbell queue */
887 dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
888 priv->idb_base, priv->idb_dma);
889 priv->idb_base = NULL;
890}
891
892static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
893{
894 struct tsi721_dma_desc *bd_ptr;
895 u64 *sts_ptr;
896 dma_addr_t bd_phys, sts_phys;
897 int sts_size;
898 int bd_num = priv->bdma[chnum].bd_num;
899
900 dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum);
901
902 /*
903 * Initialize DMA channel for maintenance requests
904 */
905
906 /* Allocate space for DMA descriptors */
907 bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
908 bd_num * sizeof(struct tsi721_dma_desc),
909 &bd_phys, GFP_KERNEL);
910 if (!bd_ptr)
911 return -ENOMEM;
912
913 priv->bdma[chnum].bd_phys = bd_phys;
914 priv->bdma[chnum].bd_base = bd_ptr;
915
916 memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
917
918 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
919 bd_ptr, (unsigned long long)bd_phys);
920
921 /* Allocate space for descriptor status FIFO */
922 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
923 bd_num : TSI721_DMA_MINSTSSZ;
924 sts_size = roundup_pow_of_two(sts_size);
925 sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
926 sts_size * sizeof(struct tsi721_dma_sts),
927 &sts_phys, GFP_KERNEL);
928 if (!sts_ptr) {
929 /* Free space allocated for DMA descriptors */
930 dma_free_coherent(&priv->pdev->dev,
931 bd_num * sizeof(struct tsi721_dma_desc),
932 bd_ptr, bd_phys);
933 priv->bdma[chnum].bd_base = NULL;
934 return -ENOMEM;
935 }
936
937 priv->bdma[chnum].sts_phys = sts_phys;
938 priv->bdma[chnum].sts_base = sts_ptr;
939 priv->bdma[chnum].sts_size = sts_size;
940
941 memset(sts_ptr, 0, sts_size);
942
943 dev_dbg(&priv->pdev->dev,
944 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
945 sts_ptr, (unsigned long long)sts_phys, sts_size);
946
947 /* Initialize DMA descriptors ring */
948 bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
949 bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
950 TSI721_DMAC_DPTRL_MASK);
951 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
952
953 /* Setup DMA descriptor pointers */
954 iowrite32(((u64)bd_phys >> 32),
955 priv->regs + TSI721_DMAC_DPTRH(chnum));
956 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
957 priv->regs + TSI721_DMAC_DPTRL(chnum));
958
959 /* Setup descriptor status FIFO */
960 iowrite32(((u64)sts_phys >> 32),
961 priv->regs + TSI721_DMAC_DSBH(chnum));
962 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
963 priv->regs + TSI721_DMAC_DSBL(chnum));
964 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
965 priv->regs + TSI721_DMAC_DSSZ(chnum));
966
967 /* Clear interrupt bits */
968 iowrite32(TSI721_DMAC_INT_ALL,
969 priv->regs + TSI721_DMAC_INT(chnum));
970
971 ioread32(priv->regs + TSI721_DMAC_INT(chnum));
972
973 /* Toggle DMA channel initialization */
974 iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum));
975 ioread32(priv->regs + TSI721_DMAC_CTL(chnum));
976 udelay(10);
977
978 return 0;
979}
980
981static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum)
982{
983 u32 ch_stat;
984
985 if (priv->bdma[chnum].bd_base == NULL)
986 return 0;
987
988 /* Check if DMA channel still running */
989 ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum));
990 if (ch_stat & TSI721_DMAC_STS_RUN)
991 return -EFAULT;
992
993 /* Put DMA channel into init state */
994 iowrite32(TSI721_DMAC_CTL_INIT,
995 priv->regs + TSI721_DMAC_CTL(chnum));
996
997 /* Free space allocated for DMA descriptors */
998 dma_free_coherent(&priv->pdev->dev,
999 priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc),
1000 priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys);
1001 priv->bdma[chnum].bd_base = NULL;
1002
1003 /* Free space allocated for status FIFO */
1004 dma_free_coherent(&priv->pdev->dev,
1005 priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts),
1006 priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys);
1007 priv->bdma[chnum].sts_base = NULL;
1008 return 0;
1009}
1010
1011static int tsi721_bdma_init(struct tsi721_device *priv)
1012{
1013 /* Initialize BDMA channel allocated for RapidIO maintenance read/write
1014 * request generation
1015 */
1016 priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
1017 if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
1018 dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
1019 " channel %d, aborting\n", TSI721_DMACH_MAINT);
1020 return -ENOMEM;
1021 }
1022
1023 return 0;
1024}
1025
1026static void tsi721_bdma_free(struct tsi721_device *priv)
1027{
1028 tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
1029}
1030
1031/* Enable Inbound Messaging Interrupts */
1032static void
1033tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
1034 u32 inte_mask)
1035{
1036 u32 rval;
1037
1038 if (!inte_mask)
1039 return;
1040
1041 /* Clear pending Inbound Messaging interrupts */
1042 iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
1043
1044 /* Enable Inbound Messaging interrupts */
1045 rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
1046 iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch));
1047
1048 if (priv->flags & TSI721_USING_MSIX)
1049 return; /* Finished if we are in MSI-X mode */
1050
1051 /*
1052 * For MSI and INTA interrupt signalling we need to enable next levels
1053 */
1054
1055 /* Enable Device Channel Interrupt */
1056 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1057 iowrite32(rval | TSI721_INT_IMSG_CHAN(ch),
1058 priv->regs + TSI721_DEV_CHAN_INTE);
1059}
1060
1061/* Disable Inbound Messaging Interrupts */
1062static void
1063tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch,
1064 u32 inte_mask)
1065{
1066 u32 rval;
1067
1068 if (!inte_mask)
1069 return;
1070
1071 /* Clear pending Inbound Messaging interrupts */
1072 iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
1073
1074 /* Disable Inbound Messaging interrupts */
1075 rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
1076 rval &= ~inte_mask;
1077 iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch));
1078
1079 if (priv->flags & TSI721_USING_MSIX)
1080 return; /* Finished if we are in MSI-X mode */
1081
1082 /*
1083 * For MSI and INTA interrupt signalling we need to disable next levels
1084 */
1085
1086 /* Disable Device Channel Interrupt */
1087 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1088 rval &= ~TSI721_INT_IMSG_CHAN(ch);
1089 iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
1090}
1091
1092/* Enable Outbound Messaging interrupts */
1093static void
1094tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch,
1095 u32 inte_mask)
1096{
1097 u32 rval;
1098
1099 if (!inte_mask)
1100 return;
1101
1102 /* Clear pending Outbound Messaging interrupts */
1103 iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
1104
1105 /* Enable Outbound Messaging channel interrupts */
1106 rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
1107 iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch));
1108
1109 if (priv->flags & TSI721_USING_MSIX)
1110 return; /* Finished if we are in MSI-X mode */
1111
1112 /*
1113 * For MSI and INTA interrupt signalling we need to enable next levels
1114 */
1115
1116 /* Enable Device Channel Interrupt */
1117 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1118 iowrite32(rval | TSI721_INT_OMSG_CHAN(ch),
1119 priv->regs + TSI721_DEV_CHAN_INTE);
1120}
1121
1122/* Disable Outbound Messaging interrupts */
1123static void
1124tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch,
1125 u32 inte_mask)
1126{
1127 u32 rval;
1128
1129 if (!inte_mask)
1130 return;
1131
1132 /* Clear pending Outbound Messaging interrupts */
1133 iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
1134
1135 /* Disable Outbound Messaging interrupts */
1136 rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
1137 rval &= ~inte_mask;
1138 iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch));
1139
1140 if (priv->flags & TSI721_USING_MSIX)
1141 return; /* Finished if we are in MSI-X mode */
1142
1143 /*
1144 * For MSI and INTA interrupt signalling we need to disable next levels
1145 */
1146
1147 /* Disable Device Channel Interrupt */
1148 rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1149 rval &= ~TSI721_INT_OMSG_CHAN(ch);
1150 iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
1151}
1152
1153/**
1154 * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue
1155 * @mport: Master port with outbound message queue
1156 * @rdev: Target of outbound message
1157 * @mbox: Outbound mailbox
1158 * @buffer: Message to add to outbound queue
1159 * @len: Length of message
1160 */
1161static int
1162tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
1163 void *buffer, size_t len)
1164{
1165 struct tsi721_device *priv = mport->priv;
1166 struct tsi721_omsg_desc *desc;
1167 u32 tx_slot;
1168
1169 if (!priv->omsg_init[mbox] ||
1170 len > TSI721_MSG_MAX_SIZE || len < 8)
1171 return -EINVAL;
1172
1173 tx_slot = priv->omsg_ring[mbox].tx_slot;
1174
1175 /* Copy copy message into transfer buffer */
1176 memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len);
1177
1178 if (len & 0x7)
1179 len += 8;
1180
1181 /* Build descriptor associated with buffer */
1182 desc = priv->omsg_ring[mbox].omd_base;
1183 desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid);
1184 if (tx_slot % 4 == 0)
1185 desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF);
1186
1187 desc[tx_slot].msg_info =
1188 cpu_to_le32((mport->sys_size << 26) | (mbox << 22) |
1189 (0xe << 12) | (len & 0xff8));
1190 desc[tx_slot].bufptr_lo =
1191 cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] &
1192 0xffffffff);
1193 desc[tx_slot].bufptr_hi =
1194 cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32);
1195
1196 priv->omsg_ring[mbox].wr_count++;
1197
1198 /* Go to next descriptor */
1199 if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) {
1200 priv->omsg_ring[mbox].tx_slot = 0;
1201 /* Move through the ring link descriptor at the end */
1202 priv->omsg_ring[mbox].wr_count++;
1203 }
1204
1205 mb();
1206
1207 /* Set new write count value */
1208 iowrite32(priv->omsg_ring[mbox].wr_count,
1209 priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1210 ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1211
1212 return 0;
1213}
1214
1215/**
1216 * tsi721_omsg_handler - Outbound Message Interrupt Handler
1217 * @priv: pointer to tsi721 private data
1218 * @ch: number of OB MSG channel to service
1219 *
1220 * Services channel interrupts from outbound messaging engine.
1221 */
1222static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
1223{
1224 u32 omsg_int;
1225
1226 spin_lock(&priv->omsg_ring[ch].lock);
1227
1228 omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch));
1229
1230 if (omsg_int & TSI721_OBDMAC_INT_ST_FULL)
1231 dev_info(&priv->pdev->dev,
1232 "OB MBOX%d: Status FIFO is full\n", ch);
1233
1234 if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) {
1235 u32 srd_ptr;
1236 u64 *sts_ptr, last_ptr = 0, prev_ptr = 0;
1237 int i, j;
1238 u32 tx_slot;
1239
1240 /*
1241 * Find last successfully processed descriptor
1242 */
1243
1244 /* Check and clear descriptor status FIFO entries */
1245 srd_ptr = priv->omsg_ring[ch].sts_rdptr;
1246 sts_ptr = priv->omsg_ring[ch].sts_base;
1247 j = srd_ptr * 8;
1248 while (sts_ptr[j]) {
1249 for (i = 0; i < 8 && sts_ptr[j]; i++, j++) {
1250 prev_ptr = last_ptr;
1251 last_ptr = le64_to_cpu(sts_ptr[j]);
1252 sts_ptr[j] = 0;
1253 }
1254
1255 ++srd_ptr;
1256 srd_ptr %= priv->omsg_ring[ch].sts_size;
1257 j = srd_ptr * 8;
1258 }
1259
1260 if (last_ptr == 0)
1261 goto no_sts_update;
1262
1263 priv->omsg_ring[ch].sts_rdptr = srd_ptr;
1264 iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch));
1265
1266 if (!priv->mport->outb_msg[ch].mcback)
1267 goto no_sts_update;
1268
1269 /* Inform upper layer about transfer completion */
1270
1271 tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/
1272 sizeof(struct tsi721_omsg_desc);
1273
1274 /*
1275 * Check if this is a Link Descriptor (LD).
1276 * If yes, ignore LD and use descriptor processed
1277 * before LD.
1278 */
1279 if (tx_slot == priv->omsg_ring[ch].size) {
1280 if (prev_ptr)
1281 tx_slot = (prev_ptr -
1282 (u64)priv->omsg_ring[ch].omd_phys)/
1283 sizeof(struct tsi721_omsg_desc);
1284 else
1285 goto no_sts_update;
1286 }
1287
1288 /* Move slot index to the next message to be sent */
1289 ++tx_slot;
1290 if (tx_slot == priv->omsg_ring[ch].size)
1291 tx_slot = 0;
1292 BUG_ON(tx_slot >= priv->omsg_ring[ch].size);
1293 priv->mport->outb_msg[ch].mcback(priv->mport,
1294 priv->omsg_ring[ch].dev_id, ch,
1295 tx_slot);
1296 }
1297
1298no_sts_update:
1299
1300 if (omsg_int & TSI721_OBDMAC_INT_ERROR) {
1301 /*
1302 * Outbound message operation aborted due to error,
1303 * reinitialize OB MSG channel
1304 */
1305
1306 dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n",
1307 ioread32(priv->regs + TSI721_OBDMAC_STS(ch)));
1308
1309 iowrite32(TSI721_OBDMAC_INT_ERROR,
1310 priv->regs + TSI721_OBDMAC_INT(ch));
1311 iowrite32(TSI721_OBDMAC_CTL_INIT,
1312 priv->regs + TSI721_OBDMAC_CTL(ch));
1313 ioread32(priv->regs + TSI721_OBDMAC_CTL(ch));
1314
1315 /* Inform upper level to clear all pending tx slots */
1316 if (priv->mport->outb_msg[ch].mcback)
1317 priv->mport->outb_msg[ch].mcback(priv->mport,
1318 priv->omsg_ring[ch].dev_id, ch,
1319 priv->omsg_ring[ch].tx_slot);
1320 /* Synch tx_slot tracking */
1321 iowrite32(priv->omsg_ring[ch].tx_slot,
1322 priv->regs + TSI721_OBDMAC_DRDCNT(ch));
1323 ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch));
1324 priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot;
1325 priv->omsg_ring[ch].sts_rdptr = 0;
1326 }
1327
1328 /* Clear channel interrupts */
1329 iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch));
1330
1331 if (!(priv->flags & TSI721_USING_MSIX)) {
1332 u32 ch_inte;
1333
1334 /* Re-enable channel interrupts */
1335 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1336 ch_inte |= TSI721_INT_OMSG_CHAN(ch);
1337 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
1338 }
1339
1340 spin_unlock(&priv->omsg_ring[ch].lock);
1341}
1342
1343/**
1344 * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox
1345 * @mport: Master port implementing Outbound Messaging Engine
1346 * @dev_id: Device specific pointer to pass on event
1347 * @mbox: Mailbox to open
1348 * @entries: Number of entries in the outbound mailbox ring
1349 */
1350static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1351 int mbox, int entries)
1352{
1353 struct tsi721_device *priv = mport->priv;
1354 struct tsi721_omsg_desc *bd_ptr;
1355 int i, rc = 0;
1356
1357 if ((entries < TSI721_OMSGD_MIN_RING_SIZE) ||
1358 (entries > (TSI721_OMSGD_RING_SIZE)) ||
1359 (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
1360 rc = -EINVAL;
1361 goto out;
1362 }
1363
1364 priv->omsg_ring[mbox].dev_id = dev_id;
1365 priv->omsg_ring[mbox].size = entries;
1366 priv->omsg_ring[mbox].sts_rdptr = 0;
1367 spin_lock_init(&priv->omsg_ring[mbox].lock);
1368
1369 /* Outbound Msg Buffer allocation based on
1370 the number of maximum descriptor entries */
1371 for (i = 0; i < entries; i++) {
1372 priv->omsg_ring[mbox].omq_base[i] =
1373 dma_alloc_coherent(
1374 &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE,
1375 &priv->omsg_ring[mbox].omq_phys[i],
1376 GFP_KERNEL);
1377 if (priv->omsg_ring[mbox].omq_base[i] == NULL) {
1378 dev_dbg(&priv->pdev->dev,
1379 "Unable to allocate OB MSG data buffer for"
1380 " MBOX%d\n", mbox);
1381 rc = -ENOMEM;
1382 goto out_buf;
1383 }
1384 }
1385
1386 /* Outbound message descriptor allocation */
1387 priv->omsg_ring[mbox].omd_base = dma_alloc_coherent(
1388 &priv->pdev->dev,
1389 (entries + 1) * sizeof(struct tsi721_omsg_desc),
1390 &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL);
1391 if (priv->omsg_ring[mbox].omd_base == NULL) {
1392 dev_dbg(&priv->pdev->dev,
1393 "Unable to allocate OB MSG descriptor memory "
1394 "for MBOX%d\n", mbox);
1395 rc = -ENOMEM;
1396 goto out_buf;
1397 }
1398
1399 priv->omsg_ring[mbox].tx_slot = 0;
1400
1401 /* Outbound message descriptor status FIFO allocation */
1402 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1403 priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
1404 priv->omsg_ring[mbox].sts_size *
1405 sizeof(struct tsi721_dma_sts),
1406 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
1407 if (priv->omsg_ring[mbox].sts_base == NULL) {
1408 dev_dbg(&priv->pdev->dev,
1409 "Unable to allocate OB MSG descriptor status FIFO "
1410 "for MBOX%d\n", mbox);
1411 rc = -ENOMEM;
1412 goto out_desc;
1413 }
1414
1415 memset(priv->omsg_ring[mbox].sts_base, 0,
1416 entries * sizeof(struct tsi721_dma_sts));
1417
1418 /*
1419 * Configure Outbound Messaging Engine
1420 */
1421
1422 /* Setup Outbound Message descriptor pointer */
1423 iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32),
1424 priv->regs + TSI721_OBDMAC_DPTRH(mbox));
1425 iowrite32(((u64)priv->omsg_ring[mbox].omd_phys &
1426 TSI721_OBDMAC_DPTRL_MASK),
1427 priv->regs + TSI721_OBDMAC_DPTRL(mbox));
1428
1429 /* Setup Outbound Message descriptor status FIFO */
1430 iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32),
1431 priv->regs + TSI721_OBDMAC_DSBH(mbox));
1432 iowrite32(((u64)priv->omsg_ring[mbox].sts_phys &
1433 TSI721_OBDMAC_DSBL_MASK),
1434 priv->regs + TSI721_OBDMAC_DSBL(mbox));
1435 iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size),
1436 priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox));
1437
1438 /* Enable interrupts */
1439
1440#ifdef CONFIG_PCI_MSI
1441 if (priv->flags & TSI721_USING_MSIX) {
1442 /* Request interrupt service if we are in MSI-X mode */
1443 rc = request_irq(
1444 priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
1445 tsi721_omsg_msix, 0,
1446 priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name,
1447 (void *)mport);
1448
1449 if (rc) {
1450 dev_dbg(&priv->pdev->dev,
1451 "Unable to allocate MSI-X interrupt for "
1452 "OBOX%d-DONE\n", mbox);
1453 goto out_stat;
1454 }
1455
1456 rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
1457 tsi721_omsg_msix, 0,
1458 priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name,
1459 (void *)mport);
1460
1461 if (rc) {
1462 dev_dbg(&priv->pdev->dev,
1463 "Unable to allocate MSI-X interrupt for "
1464 "MBOX%d-INT\n", mbox);
1465 free_irq(
1466 priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
1467 (void *)mport);
1468 goto out_stat;
1469 }
1470 }
1471#endif /* CONFIG_PCI_MSI */
1472
1473 tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL);
1474
1475 /* Initialize Outbound Message descriptors ring */
1476 bd_ptr = priv->omsg_ring[mbox].omd_base;
1477 bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29);
1478 bd_ptr[entries].msg_info = 0;
1479 bd_ptr[entries].next_lo =
1480 cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys &
1481 TSI721_OBDMAC_DPTRL_MASK);
1482 bd_ptr[entries].next_hi =
1483 cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32);
1484 priv->omsg_ring[mbox].wr_count = 0;
1485 mb();
1486
1487 /* Initialize Outbound Message engine */
1488 iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox));
1489 ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1490 udelay(10);
1491
1492 priv->omsg_init[mbox] = 1;
1493
1494 return 0;
1495
1496#ifdef CONFIG_PCI_MSI
1497out_stat:
1498 dma_free_coherent(&priv->pdev->dev,
1499 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
1500 priv->omsg_ring[mbox].sts_base,
1501 priv->omsg_ring[mbox].sts_phys);
1502
1503 priv->omsg_ring[mbox].sts_base = NULL;
1504#endif /* CONFIG_PCI_MSI */
1505
1506out_desc:
1507 dma_free_coherent(&priv->pdev->dev,
1508 (entries + 1) * sizeof(struct tsi721_omsg_desc),
1509 priv->omsg_ring[mbox].omd_base,
1510 priv->omsg_ring[mbox].omd_phys);
1511
1512 priv->omsg_ring[mbox].omd_base = NULL;
1513
1514out_buf:
1515 for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
1516 if (priv->omsg_ring[mbox].omq_base[i]) {
1517 dma_free_coherent(&priv->pdev->dev,
1518 TSI721_MSG_BUFFER_SIZE,
1519 priv->omsg_ring[mbox].omq_base[i],
1520 priv->omsg_ring[mbox].omq_phys[i]);
1521
1522 priv->omsg_ring[mbox].omq_base[i] = NULL;
1523 }
1524 }
1525
1526out:
1527 return rc;
1528}
1529
1530/**
1531 * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox
1532 * @mport: Master port implementing the outbound message unit
1533 * @mbox: Mailbox to close
1534 */
1535static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox)
1536{
1537 struct tsi721_device *priv = mport->priv;
1538 u32 i;
1539
1540 if (!priv->omsg_init[mbox])
1541 return;
1542 priv->omsg_init[mbox] = 0;
1543
1544 /* Disable Interrupts */
1545
1546 tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL);
1547
1548#ifdef CONFIG_PCI_MSI
1549 if (priv->flags & TSI721_USING_MSIX) {
1550 free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
1551 (void *)mport);
1552 free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
1553 (void *)mport);
1554 }
1555#endif /* CONFIG_PCI_MSI */
1556
1557 /* Free OMSG Descriptor Status FIFO */
1558 dma_free_coherent(&priv->pdev->dev,
1559 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
1560 priv->omsg_ring[mbox].sts_base,
1561 priv->omsg_ring[mbox].sts_phys);
1562
1563 priv->omsg_ring[mbox].sts_base = NULL;
1564
1565 /* Free OMSG descriptors */
1566 dma_free_coherent(&priv->pdev->dev,
1567 (priv->omsg_ring[mbox].size + 1) *
1568 sizeof(struct tsi721_omsg_desc),
1569 priv->omsg_ring[mbox].omd_base,
1570 priv->omsg_ring[mbox].omd_phys);
1571
1572 priv->omsg_ring[mbox].omd_base = NULL;
1573
1574 /* Free message buffers */
1575 for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
1576 if (priv->omsg_ring[mbox].omq_base[i]) {
1577 dma_free_coherent(&priv->pdev->dev,
1578 TSI721_MSG_BUFFER_SIZE,
1579 priv->omsg_ring[mbox].omq_base[i],
1580 priv->omsg_ring[mbox].omq_phys[i]);
1581
1582 priv->omsg_ring[mbox].omq_base[i] = NULL;
1583 }
1584 }
1585}
1586
1587/**
1588 * tsi721_imsg_handler - Inbound Message Interrupt Handler
1589 * @priv: pointer to tsi721 private data
1590 * @ch: inbound message channel number to service
1591 *
1592 * Services channel interrupts from inbound messaging engine.
1593 */
1594static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
1595{
1596 u32 mbox = ch - 4;
1597 u32 imsg_int;
1598
1599 spin_lock(&priv->imsg_ring[mbox].lock);
1600
1601 imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch));
1602
1603 if (imsg_int & TSI721_IBDMAC_INT_SRTO)
1604 dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n",
1605 mbox);
1606
1607 if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR)
1608 dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n",
1609 mbox);
1610
1611 if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW)
1612 dev_info(&priv->pdev->dev,
1613 "IB MBOX%d IB free queue low\n", mbox);
1614
1615 /* Clear IB channel interrupts */
1616 iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch));
1617
1618 /* If an IB Msg is received notify the upper layer */
1619 if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV &&
1620 priv->mport->inb_msg[mbox].mcback)
1621 priv->mport->inb_msg[mbox].mcback(priv->mport,
1622 priv->imsg_ring[mbox].dev_id, mbox, -1);
1623
1624 if (!(priv->flags & TSI721_USING_MSIX)) {
1625 u32 ch_inte;
1626
1627 /* Re-enable channel interrupts */
1628 ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1629 ch_inte |= TSI721_INT_IMSG_CHAN(ch);
1630 iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
1631 }
1632
1633 spin_unlock(&priv->imsg_ring[mbox].lock);
1634}
1635
1636/**
1637 * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox
1638 * @mport: Master port implementing the Inbound Messaging Engine
1639 * @dev_id: Device specific pointer to pass on event
1640 * @mbox: Mailbox to open
1641 * @entries: Number of entries in the inbound mailbox ring
1642 */
1643static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
1644 int mbox, int entries)
1645{
1646 struct tsi721_device *priv = mport->priv;
1647 int ch = mbox + 4;
1648 int i;
1649 u64 *free_ptr;
1650 int rc = 0;
1651
1652 if ((entries < TSI721_IMSGD_MIN_RING_SIZE) ||
1653 (entries > TSI721_IMSGD_RING_SIZE) ||
1654 (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
1655 rc = -EINVAL;
1656 goto out;
1657 }
1658
1659 /* Initialize IB Messaging Ring */
1660 priv->imsg_ring[mbox].dev_id = dev_id;
1661 priv->imsg_ring[mbox].size = entries;
1662 priv->imsg_ring[mbox].rx_slot = 0;
1663 priv->imsg_ring[mbox].desc_rdptr = 0;
1664 priv->imsg_ring[mbox].fq_wrptr = 0;
1665 for (i = 0; i < priv->imsg_ring[mbox].size; i++)
1666 priv->imsg_ring[mbox].imq_base[i] = NULL;
1667 spin_lock_init(&priv->imsg_ring[mbox].lock);
1668
1669 /* Allocate buffers for incoming messages */
1670 priv->imsg_ring[mbox].buf_base =
1671 dma_alloc_coherent(&priv->pdev->dev,
1672 entries * TSI721_MSG_BUFFER_SIZE,
1673 &priv->imsg_ring[mbox].buf_phys,
1674 GFP_KERNEL);
1675
1676 if (priv->imsg_ring[mbox].buf_base == NULL) {
1677 dev_err(&priv->pdev->dev,
1678 "Failed to allocate buffers for IB MBOX%d\n", mbox);
1679 rc = -ENOMEM;
1680 goto out;
1681 }
1682
1683 /* Allocate memory for circular free list */
1684 priv->imsg_ring[mbox].imfq_base =
1685 dma_alloc_coherent(&priv->pdev->dev,
1686 entries * 8,
1687 &priv->imsg_ring[mbox].imfq_phys,
1688 GFP_KERNEL);
1689
1690 if (priv->imsg_ring[mbox].imfq_base == NULL) {
1691 dev_err(&priv->pdev->dev,
1692 "Failed to allocate free queue for IB MBOX%d\n", mbox);
1693 rc = -ENOMEM;
1694 goto out_buf;
1695 }
1696
1697 /* Allocate memory for Inbound message descriptors */
1698 priv->imsg_ring[mbox].imd_base =
1699 dma_alloc_coherent(&priv->pdev->dev,
1700 entries * sizeof(struct tsi721_imsg_desc),
1701 &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL);
1702
1703 if (priv->imsg_ring[mbox].imd_base == NULL) {
1704 dev_err(&priv->pdev->dev,
1705 "Failed to allocate descriptor memory for IB MBOX%d\n",
1706 mbox);
1707 rc = -ENOMEM;
1708 goto out_dma;
1709 }
1710
1711 /* Fill free buffer pointer list */
1712 free_ptr = priv->imsg_ring[mbox].imfq_base;
1713 for (i = 0; i < entries; i++)
1714 free_ptr[i] = cpu_to_le64(
1715 (u64)(priv->imsg_ring[mbox].buf_phys) +
1716 i * 0x1000);
1717
1718 mb();
1719
1720 /*
1721 * For mapping of inbound SRIO Messages into appropriate queues we need
1722 * to set Inbound Device ID register in the messaging engine. We do it
1723 * once when first inbound mailbox is requested.
1724 */
1725 if (!(priv->flags & TSI721_IMSGID_SET)) {
1726 iowrite32((u32)priv->mport->host_deviceid,
1727 priv->regs + TSI721_IB_DEVID);
1728 priv->flags |= TSI721_IMSGID_SET;
1729 }
1730
1731 /*
1732 * Configure Inbound Messaging channel (ch = mbox + 4)
1733 */
1734
1735 /* Setup Inbound Message free queue */
1736 iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32),
1737 priv->regs + TSI721_IBDMAC_FQBH(ch));
1738 iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys &
1739 TSI721_IBDMAC_FQBL_MASK),
1740 priv->regs+TSI721_IBDMAC_FQBL(ch));
1741 iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
1742 priv->regs + TSI721_IBDMAC_FQSZ(ch));
1743
1744 /* Setup Inbound Message descriptor queue */
1745 iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32),
1746 priv->regs + TSI721_IBDMAC_DQBH(ch));
1747 iowrite32(((u32)priv->imsg_ring[mbox].imd_phys &
1748 (u32)TSI721_IBDMAC_DQBL_MASK),
1749 priv->regs+TSI721_IBDMAC_DQBL(ch));
1750 iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
1751 priv->regs + TSI721_IBDMAC_DQSZ(ch));
1752
1753 /* Enable interrupts */
1754
1755#ifdef CONFIG_PCI_MSI
1756 if (priv->flags & TSI721_USING_MSIX) {
1757 /* Request interrupt service if we are in MSI-X mode */
1758 rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
1759 tsi721_imsg_msix, 0,
1760 priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name,
1761 (void *)mport);
1762
1763 if (rc) {
1764 dev_dbg(&priv->pdev->dev,
1765 "Unable to allocate MSI-X interrupt for "
1766 "IBOX%d-DONE\n", mbox);
1767 goto out_desc;
1768 }
1769
1770 rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
1771 tsi721_imsg_msix, 0,
1772 priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name,
1773 (void *)mport);
1774
1775 if (rc) {
1776 dev_dbg(&priv->pdev->dev,
1777 "Unable to allocate MSI-X interrupt for "
1778 "IBOX%d-INT\n", mbox);
1779 free_irq(
1780 priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
1781 (void *)mport);
1782 goto out_desc;
1783 }
1784 }
1785#endif /* CONFIG_PCI_MSI */
1786
1787 tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL);
1788
1789 /* Initialize Inbound Message Engine */
1790 iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch));
1791 ioread32(priv->regs + TSI721_IBDMAC_CTL(ch));
1792 udelay(10);
1793 priv->imsg_ring[mbox].fq_wrptr = entries - 1;
1794 iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch));
1795
1796 priv->imsg_init[mbox] = 1;
1797 return 0;
1798
1799#ifdef CONFIG_PCI_MSI
1800out_desc:
1801 dma_free_coherent(&priv->pdev->dev,
1802 priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
1803 priv->imsg_ring[mbox].imd_base,
1804 priv->imsg_ring[mbox].imd_phys);
1805
1806 priv->imsg_ring[mbox].imd_base = NULL;
1807#endif /* CONFIG_PCI_MSI */
1808
1809out_dma:
1810 dma_free_coherent(&priv->pdev->dev,
1811 priv->imsg_ring[mbox].size * 8,
1812 priv->imsg_ring[mbox].imfq_base,
1813 priv->imsg_ring[mbox].imfq_phys);
1814
1815 priv->imsg_ring[mbox].imfq_base = NULL;
1816
1817out_buf:
1818 dma_free_coherent(&priv->pdev->dev,
1819 priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
1820 priv->imsg_ring[mbox].buf_base,
1821 priv->imsg_ring[mbox].buf_phys);
1822
1823 priv->imsg_ring[mbox].buf_base = NULL;
1824
1825out:
1826 return rc;
1827}
1828
1829/**
1830 * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox
1831 * @mport: Master port implementing the Inbound Messaging Engine
1832 * @mbox: Mailbox to close
1833 */
1834static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
1835{
1836 struct tsi721_device *priv = mport->priv;
1837 u32 rx_slot;
1838 int ch = mbox + 4;
1839
1840 if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */
1841 return;
1842 priv->imsg_init[mbox] = 0;
1843
1844 /* Disable Inbound Messaging Engine */
1845
1846 /* Disable Interrupts */
1847 tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK);
1848
1849#ifdef CONFIG_PCI_MSI
1850 if (priv->flags & TSI721_USING_MSIX) {
1851 free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
1852 (void *)mport);
1853 free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
1854 (void *)mport);
1855 }
1856#endif /* CONFIG_PCI_MSI */
1857
1858 /* Clear Inbound Buffer Queue */
1859 for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++)
1860 priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
1861
1862 /* Free memory allocated for message buffers */
1863 dma_free_coherent(&priv->pdev->dev,
1864 priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
1865 priv->imsg_ring[mbox].buf_base,
1866 priv->imsg_ring[mbox].buf_phys);
1867
1868 priv->imsg_ring[mbox].buf_base = NULL;
1869
1870 /* Free memory allocated for free pointr list */
1871 dma_free_coherent(&priv->pdev->dev,
1872 priv->imsg_ring[mbox].size * 8,
1873 priv->imsg_ring[mbox].imfq_base,
1874 priv->imsg_ring[mbox].imfq_phys);
1875
1876 priv->imsg_ring[mbox].imfq_base = NULL;
1877
1878 /* Free memory allocated for RX descriptors */
1879 dma_free_coherent(&priv->pdev->dev,
1880 priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
1881 priv->imsg_ring[mbox].imd_base,
1882 priv->imsg_ring[mbox].imd_phys);
1883
1884 priv->imsg_ring[mbox].imd_base = NULL;
1885}
1886
1887/**
1888 * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue
1889 * @mport: Master port implementing the Inbound Messaging Engine
1890 * @mbox: Inbound mailbox number
1891 * @buf: Buffer to add to inbound queue
1892 */
1893static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
1894{
1895 struct tsi721_device *priv = mport->priv;
1896 u32 rx_slot;
1897 int rc = 0;
1898
1899 rx_slot = priv->imsg_ring[mbox].rx_slot;
1900 if (priv->imsg_ring[mbox].imq_base[rx_slot]) {
1901 dev_err(&priv->pdev->dev,
1902 "Error adding inbound buffer %d, buffer exists\n",
1903 rx_slot);
1904 rc = -EINVAL;
1905 goto out;
1906 }
1907
1908 priv->imsg_ring[mbox].imq_base[rx_slot] = buf;
1909
1910 if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size)
1911 priv->imsg_ring[mbox].rx_slot = 0;
1912
1913out:
1914 return rc;
1915}
1916
1917/**
1918 * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue
1919 * @mport: Master port implementing the Inbound Messaging Engine
1920 * @mbox: Inbound mailbox number
1921 *
1922 * Returns pointer to the message on success or NULL on failure.
1923 */
1924static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox)
1925{
1926 struct tsi721_device *priv = mport->priv;
1927 struct tsi721_imsg_desc *desc;
1928 u32 rx_slot;
1929 void *rx_virt = NULL;
1930 u64 rx_phys;
1931 void *buf = NULL;
1932 u64 *free_ptr;
1933 int ch = mbox + 4;
1934 int msg_size;
1935
1936 if (!priv->imsg_init[mbox])
1937 return NULL;
1938
1939 desc = priv->imsg_ring[mbox].imd_base;
1940 desc += priv->imsg_ring[mbox].desc_rdptr;
1941
1942 if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO))
1943 goto out;
1944
1945 rx_slot = priv->imsg_ring[mbox].rx_slot;
1946 while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) {
1947 if (++rx_slot == priv->imsg_ring[mbox].size)
1948 rx_slot = 0;
1949 }
1950
1951 rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) |
1952 le32_to_cpu(desc->bufptr_lo);
1953
1954 rx_virt = priv->imsg_ring[mbox].buf_base +
1955 (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys);
1956
1957 buf = priv->imsg_ring[mbox].imq_base[rx_slot];
1958 msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT;
1959 if (msg_size == 0)
1960 msg_size = RIO_MAX_MSG_SIZE;
1961
1962 memcpy(buf, rx_virt, msg_size);
1963 priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
1964
1965 desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO);
1966 if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size)
1967 priv->imsg_ring[mbox].desc_rdptr = 0;
1968
1969 iowrite32(priv->imsg_ring[mbox].desc_rdptr,
1970 priv->regs + TSI721_IBDMAC_DQRP(ch));
1971
1972 /* Return free buffer into the pointer list */
1973 free_ptr = priv->imsg_ring[mbox].imfq_base;
1974 free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys);
1975
1976 if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size)
1977 priv->imsg_ring[mbox].fq_wrptr = 0;
1978
1979 iowrite32(priv->imsg_ring[mbox].fq_wrptr,
1980 priv->regs + TSI721_IBDMAC_FQWP(ch));
1981out:
1982 return buf;
1983}
1984
1985/**
1986 * tsi721_messages_init - Initialization of Messaging Engine
1987 * @priv: pointer to tsi721 private data
1988 *
1989 * Configures Tsi721 messaging engine.
1990 */
1991static int tsi721_messages_init(struct tsi721_device *priv)
1992{
1993 int ch;
1994
1995 iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG);
1996 iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT);
1997 iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT);
1998
1999 /* Set SRIO Message Request/Response Timeout */
2000 iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO);
2001
2002 /* Initialize Inbound Messaging Engine Registers */
2003 for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) {
2004 /* Clear interrupt bits */
2005 iowrite32(TSI721_IBDMAC_INT_MASK,
2006 priv->regs + TSI721_IBDMAC_INT(ch));
2007 /* Clear Status */
2008 iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch));
2009
2010 iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK,
2011 priv->regs + TSI721_SMSG_ECC_COR_LOG(ch));
2012 iowrite32(TSI721_SMSG_ECC_NCOR_MASK,
2013 priv->regs + TSI721_SMSG_ECC_NCOR(ch));
2014 }
2015
2016 return 0;
2017}
2018
2019/**
2020 * tsi721_disable_ints - disables all device interrupts
2021 * @priv: pointer to tsi721 private data
2022 */
2023static void tsi721_disable_ints(struct tsi721_device *priv)
2024{
2025 int ch;
2026
2027 /* Disable all device level interrupts */
2028 iowrite32(0, priv->regs + TSI721_DEV_INTE);
2029
2030 /* Disable all Device Channel interrupts */
2031 iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE);
2032
2033 /* Disable all Inbound Msg Channel interrupts */
2034 for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++)
2035 iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch));
2036
2037 /* Disable all Outbound Msg Channel interrupts */
2038 for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++)
2039 iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch));
2040
2041 /* Disable all general messaging interrupts */
2042 iowrite32(0, priv->regs + TSI721_SMSG_INTE);
2043
2044 /* Disable all BDMA Channel interrupts */
2045 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
2046 iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch));
2047
2048 /* Disable all general BDMA interrupts */
2049 iowrite32(0, priv->regs + TSI721_BDMA_INTE);
2050
2051 /* Disable all SRIO Channel interrupts */
2052 for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++)
2053 iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch));
2054
2055 /* Disable all general SR2PC interrupts */
2056 iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE);
2057
2058 /* Disable all PC2SR interrupts */
2059 iowrite32(0, priv->regs + TSI721_PC2SR_INTE);
2060
2061 /* Disable all I2C interrupts */
2062 iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE);
2063
2064 /* Disable SRIO MAC interrupts */
2065 iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE);
2066 iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN);
2067}
2068
2069/**
2070 * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port
2071 * @priv: pointer to tsi721 private data
2072 *
2073 * Configures Tsi721 as RapidIO master port.
2074 */
2075static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2076{
2077 struct pci_dev *pdev = priv->pdev;
2078 int err = 0;
2079 struct rio_ops *ops;
2080
2081 struct rio_mport *mport;
2082
2083 ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
2084 if (!ops) {
2085 dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n");
2086 return -ENOMEM;
2087 }
2088
2089 ops->lcread = tsi721_lcread;
2090 ops->lcwrite = tsi721_lcwrite;
2091 ops->cread = tsi721_cread_dma;
2092 ops->cwrite = tsi721_cwrite_dma;
2093 ops->dsend = tsi721_dsend;
2094 ops->open_inb_mbox = tsi721_open_inb_mbox;
2095 ops->close_inb_mbox = tsi721_close_inb_mbox;
2096 ops->open_outb_mbox = tsi721_open_outb_mbox;
2097 ops->close_outb_mbox = tsi721_close_outb_mbox;
2098 ops->add_outb_message = tsi721_add_outb_message;
2099 ops->add_inb_buffer = tsi721_add_inb_buffer;
2100 ops->get_inb_message = tsi721_get_inb_message;
2101
2102 mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
2103 if (!mport) {
2104 kfree(ops);
2105 dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n");
2106 return -ENOMEM;
2107 }
2108
2109 mport->ops = ops;
2110 mport->index = 0;
2111 mport->sys_size = 0; /* small system */
2112 mport->phy_type = RIO_PHY_SERIAL;
2113 mport->priv = (void *)priv;
2114 mport->phys_efptr = 0x100;
2115
2116 INIT_LIST_HEAD(&mport->dbells);
2117
2118 rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
2119 rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
2120 rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
2121 strcpy(mport->name, "Tsi721 mport");
2122
2123 /* Hook up interrupt handler */
2124
2125#ifdef CONFIG_PCI_MSI
2126 if (!tsi721_enable_msix(priv))
2127 priv->flags |= TSI721_USING_MSIX;
2128 else if (!pci_enable_msi(pdev))
2129 priv->flags |= TSI721_USING_MSI;
2130 else
2131 dev_info(&pdev->dev,
2132 "MSI/MSI-X is not available. Using legacy INTx.\n");
2133#endif /* CONFIG_PCI_MSI */
2134
2135 err = tsi721_request_irq(mport);
2136
2137 if (!err) {
2138 tsi721_interrupts_init(priv);
2139 ops->pwenable = tsi721_pw_enable;
2140 } else
2141 dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
2142 "vector %02X err=0x%x\n", pdev->irq, err);
2143
2144 /* Enable SRIO link */
2145 iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
2146 TSI721_DEVCTL_SRBOOT_CMPL,
2147 priv->regs + TSI721_DEVCTL);
2148
2149 rio_register_mport(mport);
2150 priv->mport = mport;
2151
2152 if (mport->host_deviceid >= 0)
2153 iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
2154 RIO_PORT_GEN_DISCOVERED,
2155 priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2156 else
2157 iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2158
2159 return 0;
2160}
2161
2162static int __devinit tsi721_probe(struct pci_dev *pdev,
2163 const struct pci_device_id *id)
2164{
2165 struct tsi721_device *priv;
2166 int i;
2167 int err;
2168 u32 regval;
2169
2170 priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL);
2171 if (priv == NULL) {
2172 dev_err(&pdev->dev, "Failed to allocate memory for device\n");
2173 err = -ENOMEM;
2174 goto err_exit;
2175 }
2176
2177 err = pci_enable_device(pdev);
2178 if (err) {
2179 dev_err(&pdev->dev, "Failed to enable PCI device\n");
2180 goto err_clean;
2181 }
2182
2183 priv->pdev = pdev;
2184
2185#ifdef DEBUG
2186 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2187 dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
2188 i, (unsigned long long)pci_resource_start(pdev, i),
2189 (unsigned long)pci_resource_len(pdev, i),
2190 pci_resource_flags(pdev, i));
2191 }
2192#endif
2193 /*
2194 * Verify BAR configuration
2195 */
2196
2197 /* BAR_0 (registers) must be 512KB+ in 32-bit address space */
2198 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) ||
2199 pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 ||
2200 pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) {
2201 dev_err(&pdev->dev,
2202 "Missing or misconfigured CSR BAR0, aborting.\n");
2203 err = -ENODEV;
2204 goto err_disable_pdev;
2205 }
2206
2207 /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */
2208 if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) ||
2209 pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 ||
2210 pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) {
2211 dev_err(&pdev->dev,
2212 "Missing or misconfigured Doorbell BAR1, aborting.\n");
2213 err = -ENODEV;
2214 goto err_disable_pdev;
2215 }
2216
2217 /*
2218 * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address
2219 * space.
2220 * NOTE: BAR_2 and BAR_4 are not used by this version of driver.
2221 * It may be a good idea to keep them disabled using HW configuration
2222 * to save PCI memory space.
2223 */
2224 if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) &&
2225 (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) {
2226 dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n");
2227 }
2228
2229 if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) &&
2230 (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) {
2231 dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n");
2232 }
2233
2234 err = pci_request_regions(pdev, DRV_NAME);
2235 if (err) {
2236 dev_err(&pdev->dev, "Cannot obtain PCI resources, "
2237 "aborting.\n");
2238 goto err_disable_pdev;
2239 }
2240
2241 pci_set_master(pdev);
2242
2243 priv->regs = pci_ioremap_bar(pdev, BAR_0);
2244 if (!priv->regs) {
2245 dev_err(&pdev->dev,
2246 "Unable to map device registers space, aborting\n");
2247 err = -ENOMEM;
2248 goto err_free_res;
2249 }
2250
2251 priv->odb_base = pci_ioremap_bar(pdev, BAR_1);
2252 if (!priv->odb_base) {
2253 dev_err(&pdev->dev,
2254 "Unable to map outbound doorbells space, aborting\n");
2255 err = -ENOMEM;
2256 goto err_unmap_bars;
2257 }
2258
2259 /* Configure DMA attributes. */
2260 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2261 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
2262 dev_info(&pdev->dev, "Unable to set DMA mask\n");
2263 goto err_unmap_bars;
2264 }
2265
2266 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2267 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
2268 } else {
2269 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2270 if (err)
2271 dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
2272 }
2273
2274 /* Clear "no snoop" and "relaxed ordering" bits. */
2275 pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
2276 regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
2277 pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
2278
2279 /*
2280 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
2281 */
2282 pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01);
2283 pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL,
2284 TSI721_MSIXTBL_OFFSET);
2285 pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA,
2286 TSI721_MSIXPBA_OFFSET);
2287 pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0);
2288 /* End of FIXUP */
2289
2290 tsi721_disable_ints(priv);
2291
2292 tsi721_init_pc2sr_mapping(priv);
2293 tsi721_init_sr2pc_mapping(priv);
2294
2295 if (tsi721_bdma_init(priv)) {
2296 dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
2297 err = -ENOMEM;
2298 goto err_unmap_bars;
2299 }
2300
2301 err = tsi721_doorbell_init(priv);
2302 if (err)
2303 goto err_free_bdma;
2304
2305 tsi721_port_write_init(priv);
2306
2307 err = tsi721_messages_init(priv);
2308 if (err)
2309 goto err_free_consistent;
2310
2311 err = tsi721_setup_mport(priv);
2312 if (err)
2313 goto err_free_consistent;
2314
2315 return 0;
2316
2317err_free_consistent:
2318 tsi721_doorbell_free(priv);
2319err_free_bdma:
2320 tsi721_bdma_free(priv);
2321err_unmap_bars:
2322 if (priv->regs)
2323 iounmap(priv->regs);
2324 if (priv->odb_base)
2325 iounmap(priv->odb_base);
2326err_free_res:
2327 pci_release_regions(pdev);
2328 pci_clear_master(pdev);
2329err_disable_pdev:
2330 pci_disable_device(pdev);
2331err_clean:
2332 kfree(priv);
2333err_exit:
2334 return err;
2335}
2336
2337static DEFINE_PCI_DEVICE_TABLE(tsi721_pci_tbl) = {
2338 { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) },
2339 { 0, } /* terminate list */
2340};
2341
2342MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl);
2343
2344static struct pci_driver tsi721_driver = {
2345 .name = "tsi721",
2346 .id_table = tsi721_pci_tbl,
2347 .probe = tsi721_probe,
2348};
2349
2350static int __init tsi721_init(void)
2351{
2352 return pci_register_driver(&tsi721_driver);
2353}
2354
2355static void __exit tsi721_exit(void)
2356{
2357 pci_unregister_driver(&tsi721_driver);
2358}
2359
2360device_initcall(tsi721_init);