aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig14
-rw-r--r--drivers/char/Makefile3
-rw-r--r--drivers/char/mbcs.c849
-rw-r--r--drivers/char/mbcs.h553
-rw-r--r--drivers/char/snsc.c8
-rw-r--r--drivers/char/snsc.h40
-rw-r--r--drivers/char/snsc_event.c304
7 files changed, 1770 insertions, 1 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 096a1202ea07..97ac4edf4655 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -399,6 +399,20 @@ config SGI_SNSC
399 controller communication from user space (you want this!), 399 controller communication from user space (you want this!),
400 say Y. Otherwise, say N. 400 say Y. Otherwise, say N.
401 401
402config SGI_TIOCX
403 bool "SGI TIO CX driver support"
404 depends on (IA64_SGI_SN2 || IA64_GENERIC)
405 help
406 If you have an SGI Altix and you have fpga devices attached
407 to your TIO, say Y here, otherwise say N.
408
409config SGI_MBCS
410 tristate "SGI FPGA Core Services driver support"
411 depends on (IA64_SGI_SN2 || IA64_GENERIC)
412 help
413 If you have an SGI Altix with an attached SABrick
414 say Y or M here, otherwise say N.
415
402source "drivers/serial/Kconfig" 416source "drivers/serial/Kconfig"
403 417
404config UNIX98_PTYS 418config UNIX98_PTYS
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 54ed76af1a47..e3f5c32aac55 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -42,11 +42,12 @@ obj-$(CONFIG_SX) += sx.o generic_serial.o
42obj-$(CONFIG_RIO) += rio/ generic_serial.o 42obj-$(CONFIG_RIO) += rio/ generic_serial.o
43obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o hvsi.o 43obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o hvsi.o
44obj-$(CONFIG_RAW_DRIVER) += raw.o 44obj-$(CONFIG_RAW_DRIVER) += raw.o
45obj-$(CONFIG_SGI_SNSC) += snsc.o 45obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
46obj-$(CONFIG_MMTIMER) += mmtimer.o 46obj-$(CONFIG_MMTIMER) += mmtimer.o
47obj-$(CONFIG_VIOCONS) += viocons.o 47obj-$(CONFIG_VIOCONS) += viocons.o
48obj-$(CONFIG_VIOTAPE) += viotape.o 48obj-$(CONFIG_VIOTAPE) += viotape.o
49obj-$(CONFIG_HVCS) += hvcs.o 49obj-$(CONFIG_HVCS) += hvcs.o
50obj-$(CONFIG_SGI_MBCS) += mbcs.o
50 51
51obj-$(CONFIG_PRINTER) += lp.o 52obj-$(CONFIG_PRINTER) += lp.o
52obj-$(CONFIG_TIPAR) += tipar.o 53obj-$(CONFIG_TIPAR) += tipar.o
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
new file mode 100644
index 000000000000..ec7100556c50
--- /dev/null
+++ b/drivers/char/mbcs.c
@@ -0,0 +1,849 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9/*
10 * MOATB Core Services driver.
11 */
12
13#include <linux/config.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/types.h>
18#include <linux/ioport.h>
19#include <linux/notifier.h>
20#include <linux/reboot.h>
21#include <linux/init.h>
22#include <linux/fs.h>
23#include <linux/delay.h>
24#include <linux/device.h>
25#include <linux/mm.h>
26#include <linux/uio.h>
27#include <asm/io.h>
28#include <asm/uaccess.h>
29#include <asm/system.h>
30#include <asm/pgtable.h>
31#include <asm/sn/addrs.h>
32#include <asm/sn/intr.h>
33#include <asm/sn/tiocx.h>
34#include "mbcs.h"
35
36#define MBCS_DEBUG 0
37#if MBCS_DEBUG
38#define DBG(fmt...) printk(KERN_ALERT fmt)
39#else
40#define DBG(fmt...)
41#endif
42int mbcs_major;
43
44LIST_HEAD(soft_list);
45
46/*
47 * file operations
48 */
49struct file_operations mbcs_ops = {
50 .open = mbcs_open,
51 .llseek = mbcs_sram_llseek,
52 .read = mbcs_sram_read,
53 .write = mbcs_sram_write,
54 .mmap = mbcs_gscr_mmap,
55};
56
57struct mbcs_callback_arg {
58 int minor;
59 struct cx_dev *cx_dev;
60};
61
62static inline void mbcs_getdma_init(struct getdma *gdma)
63{
64 memset(gdma, 0, sizeof(struct getdma));
65 gdma->DoneIntEnable = 1;
66}
67
68static inline void mbcs_putdma_init(struct putdma *pdma)
69{
70 memset(pdma, 0, sizeof(struct putdma));
71 pdma->DoneIntEnable = 1;
72}
73
74static inline void mbcs_algo_init(struct algoblock *algo_soft)
75{
76 memset(algo_soft, 0, sizeof(struct algoblock));
77}
78
79static inline void mbcs_getdma_set(void *mmr,
80 uint64_t hostAddr,
81 uint64_t localAddr,
82 uint64_t localRamSel,
83 uint64_t numPkts,
84 uint64_t amoEnable,
85 uint64_t intrEnable,
86 uint64_t peerIO,
87 uint64_t amoHostDest,
88 uint64_t amoModType, uint64_t intrHostDest,
89 uint64_t intrVector)
90{
91 union dma_control rdma_control;
92 union dma_amo_dest amo_dest;
93 union intr_dest intr_dest;
94 union dma_localaddr local_addr;
95 union dma_hostaddr host_addr;
96
97 rdma_control.dma_control_reg = 0;
98 amo_dest.dma_amo_dest_reg = 0;
99 intr_dest.intr_dest_reg = 0;
100 local_addr.dma_localaddr_reg = 0;
101 host_addr.dma_hostaddr_reg = 0;
102
103 host_addr.dma_sys_addr = hostAddr;
104 MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
105
106 local_addr.dma_ram_addr = localAddr;
107 local_addr.dma_ram_sel = localRamSel;
108 MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
109
110 rdma_control.dma_op_length = numPkts;
111 rdma_control.done_amo_en = amoEnable;
112 rdma_control.done_int_en = intrEnable;
113 rdma_control.pio_mem_n = peerIO;
114 MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg);
115
116 amo_dest.dma_amo_sys_addr = amoHostDest;
117 amo_dest.dma_amo_mod_type = amoModType;
118 MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
119
120 intr_dest.address = intrHostDest;
121 intr_dest.int_vector = intrVector;
122 MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg);
123
124}
125
126static inline void mbcs_putdma_set(void *mmr,
127 uint64_t hostAddr,
128 uint64_t localAddr,
129 uint64_t localRamSel,
130 uint64_t numPkts,
131 uint64_t amoEnable,
132 uint64_t intrEnable,
133 uint64_t peerIO,
134 uint64_t amoHostDest,
135 uint64_t amoModType,
136 uint64_t intrHostDest, uint64_t intrVector)
137{
138 union dma_control wdma_control;
139 union dma_amo_dest amo_dest;
140 union intr_dest intr_dest;
141 union dma_localaddr local_addr;
142 union dma_hostaddr host_addr;
143
144 wdma_control.dma_control_reg = 0;
145 amo_dest.dma_amo_dest_reg = 0;
146 intr_dest.intr_dest_reg = 0;
147 local_addr.dma_localaddr_reg = 0;
148 host_addr.dma_hostaddr_reg = 0;
149
150 host_addr.dma_sys_addr = hostAddr;
151 MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
152
153 local_addr.dma_ram_addr = localAddr;
154 local_addr.dma_ram_sel = localRamSel;
155 MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
156
157 wdma_control.dma_op_length = numPkts;
158 wdma_control.done_amo_en = amoEnable;
159 wdma_control.done_int_en = intrEnable;
160 wdma_control.pio_mem_n = peerIO;
161 MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg);
162
163 amo_dest.dma_amo_sys_addr = amoHostDest;
164 amo_dest.dma_amo_mod_type = amoModType;
165 MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
166
167 intr_dest.address = intrHostDest;
168 intr_dest.int_vector = intrVector;
169 MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg);
170
171}
172
173static inline void mbcs_algo_set(void *mmr,
174 uint64_t amoHostDest,
175 uint64_t amoModType,
176 uint64_t intrHostDest,
177 uint64_t intrVector, uint64_t algoStepCount)
178{
179 union dma_amo_dest amo_dest;
180 union intr_dest intr_dest;
181 union algo_step step;
182
183 step.algo_step_reg = 0;
184 intr_dest.intr_dest_reg = 0;
185 amo_dest.dma_amo_dest_reg = 0;
186
187 amo_dest.dma_amo_sys_addr = amoHostDest;
188 amo_dest.dma_amo_mod_type = amoModType;
189 MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg);
190
191 intr_dest.address = intrHostDest;
192 intr_dest.int_vector = intrVector;
193 MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg);
194
195 step.alg_step_cnt = algoStepCount;
196 MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg);
197}
198
199static inline int mbcs_getdma_start(struct mbcs_soft *soft)
200{
201 void *mmr_base;
202 struct getdma *gdma;
203 uint64_t numPkts;
204 union cm_control cm_control;
205
206 mmr_base = soft->mmr_base;
207 gdma = &soft->getdma;
208
209 /* check that host address got setup */
210 if (!gdma->hostAddr)
211 return -1;
212
213 numPkts =
214 (gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
215
216 /* program engine */
217 mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr),
218 gdma->localAddr,
219 (gdma->localAddr < MB2) ? 0 :
220 (gdma->localAddr < MB4) ? 1 :
221 (gdma->localAddr < MB6) ? 2 : 3,
222 numPkts,
223 gdma->DoneAmoEnable,
224 gdma->DoneIntEnable,
225 gdma->peerIO,
226 gdma->amoHostDest,
227 gdma->amoModType,
228 gdma->intrHostDest, gdma->intrVector);
229
230 /* start engine */
231 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
232 cm_control.rd_dma_go = 1;
233 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
234
235 return 0;
236
237}
238
239static inline int mbcs_putdma_start(struct mbcs_soft *soft)
240{
241 void *mmr_base;
242 struct putdma *pdma;
243 uint64_t numPkts;
244 union cm_control cm_control;
245
246 mmr_base = soft->mmr_base;
247 pdma = &soft->putdma;
248
249 /* check that host address got setup */
250 if (!pdma->hostAddr)
251 return -1;
252
253 numPkts =
254 (pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
255
256 /* program engine */
257 mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr),
258 pdma->localAddr,
259 (pdma->localAddr < MB2) ? 0 :
260 (pdma->localAddr < MB4) ? 1 :
261 (pdma->localAddr < MB6) ? 2 : 3,
262 numPkts,
263 pdma->DoneAmoEnable,
264 pdma->DoneIntEnable,
265 pdma->peerIO,
266 pdma->amoHostDest,
267 pdma->amoModType,
268 pdma->intrHostDest, pdma->intrVector);
269
270 /* start engine */
271 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
272 cm_control.wr_dma_go = 1;
273 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
274
275 return 0;
276
277}
278
279static inline int mbcs_algo_start(struct mbcs_soft *soft)
280{
281 struct algoblock *algo_soft = &soft->algo;
282 void *mmr_base = soft->mmr_base;
283 union cm_control cm_control;
284
285 if (down_interruptible(&soft->algolock))
286 return -ERESTARTSYS;
287
288 atomic_set(&soft->algo_done, 0);
289
290 mbcs_algo_set(mmr_base,
291 algo_soft->amoHostDest,
292 algo_soft->amoModType,
293 algo_soft->intrHostDest,
294 algo_soft->intrVector, algo_soft->algoStepCount);
295
296 /* start algorithm */
297 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
298 cm_control.alg_done_int_en = 1;
299 cm_control.alg_go = 1;
300 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
301
302 up(&soft->algolock);
303
304 return 0;
305}
306
307static inline ssize_t
308do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
309 size_t len, loff_t * off)
310{
311 int rv = 0;
312
313 if (down_interruptible(&soft->dmawritelock))
314 return -ERESTARTSYS;
315
316 atomic_set(&soft->dmawrite_done, 0);
317
318 soft->putdma.hostAddr = hostAddr;
319 soft->putdma.localAddr = *off;
320 soft->putdma.bytes = len;
321
322 if (mbcs_putdma_start(soft) < 0) {
323 DBG(KERN_ALERT "do_mbcs_sram_dmawrite: "
324 "mbcs_putdma_start failed\n");
325 rv = -EAGAIN;
326 goto dmawrite_exit;
327 }
328
329 if (wait_event_interruptible(soft->dmawrite_queue,
330 atomic_read(&soft->dmawrite_done))) {
331 rv = -ERESTARTSYS;
332 goto dmawrite_exit;
333 }
334
335 rv = len;
336 *off += len;
337
338dmawrite_exit:
339 up(&soft->dmawritelock);
340
341 return rv;
342}
343
344static inline ssize_t
345do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
346 size_t len, loff_t * off)
347{
348 int rv = 0;
349
350 if (down_interruptible(&soft->dmareadlock))
351 return -ERESTARTSYS;
352
353 atomic_set(&soft->dmawrite_done, 0);
354
355 soft->getdma.hostAddr = hostAddr;
356 soft->getdma.localAddr = *off;
357 soft->getdma.bytes = len;
358
359 if (mbcs_getdma_start(soft) < 0) {
360 DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n");
361 rv = -EAGAIN;
362 goto dmaread_exit;
363 }
364
365 if (wait_event_interruptible(soft->dmaread_queue,
366 atomic_read(&soft->dmaread_done))) {
367 rv = -ERESTARTSYS;
368 goto dmaread_exit;
369 }
370
371 rv = len;
372 *off += len;
373
374dmaread_exit:
375 up(&soft->dmareadlock);
376
377 return rv;
378}
379
380int mbcs_open(struct inode *ip, struct file *fp)
381{
382 struct mbcs_soft *soft;
383 int minor;
384
385 minor = iminor(ip);
386
387 list_for_each_entry(soft, &soft_list, list) {
388 if (soft->nasid == minor) {
389 fp->private_data = soft->cxdev;
390 return 0;
391 }
392 }
393
394 return -ENODEV;
395}
396
397ssize_t mbcs_sram_read(struct file * fp, char *buf, size_t len, loff_t * off)
398{
399 struct cx_dev *cx_dev = fp->private_data;
400 struct mbcs_soft *soft = cx_dev->soft;
401 uint64_t hostAddr;
402 int rv = 0;
403
404 hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
405 if (hostAddr == 0)
406 return -ENOMEM;
407
408 rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off);
409 if (rv < 0)
410 goto exit;
411
412 if (copy_to_user(buf, (void *)hostAddr, len))
413 rv = -EFAULT;
414
415 exit:
416 free_pages(hostAddr, get_order(len));
417
418 return rv;
419}
420
421ssize_t
422mbcs_sram_write(struct file * fp, const char *buf, size_t len, loff_t * off)
423{
424 struct cx_dev *cx_dev = fp->private_data;
425 struct mbcs_soft *soft = cx_dev->soft;
426 uint64_t hostAddr;
427 int rv = 0;
428
429 hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
430 if (hostAddr == 0)
431 return -ENOMEM;
432
433 if (copy_from_user((void *)hostAddr, buf, len)) {
434 rv = -EFAULT;
435 goto exit;
436 }
437
438 rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off);
439
440 exit:
441 free_pages(hostAddr, get_order(len));
442
443 return rv;
444}
445
446loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
447{
448 loff_t newpos;
449
450 switch (whence) {
451 case 0: /* SEEK_SET */
452 newpos = off;
453 break;
454
455 case 1: /* SEEK_CUR */
456 newpos = filp->f_pos + off;
457 break;
458
459 case 2: /* SEEK_END */
460 newpos = MBCS_SRAM_SIZE + off;
461 break;
462
463 default: /* can't happen */
464 return -EINVAL;
465 }
466
467 if (newpos < 0)
468 return -EINVAL;
469
470 filp->f_pos = newpos;
471
472 return newpos;
473}
474
475static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset)
476{
477 uint64_t mmr_base;
478
479 mmr_base = (uint64_t) (soft->mmr_base + offset);
480
481 return mmr_base;
482}
483
484static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft)
485{
486 soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START);
487}
488
489static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft)
490{
491 soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
492}
493
494int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
495{
496 struct cx_dev *cx_dev = fp->private_data;
497 struct mbcs_soft *soft = cx_dev->soft;
498
499 if (vma->vm_pgoff != 0)
500 return -EINVAL;
501
502 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
503
504 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
505 if (remap_pfn_range(vma,
506 vma->vm_start,
507 __pa(soft->gscr_addr) >> PAGE_SHIFT,
508 PAGE_SIZE,
509 vma->vm_page_prot))
510 return -EAGAIN;
511
512 return 0;
513}
514
515/**
516 * mbcs_completion_intr_handler - Primary completion handler.
517 * @irq: irq
518 * @arg: soft struct for device
519 * @ep: regs
520 *
521 */
522static irqreturn_t
523mbcs_completion_intr_handler(int irq, void *arg, struct pt_regs *ep)
524{
525 struct mbcs_soft *soft = (struct mbcs_soft *)arg;
526 void *mmr_base;
527 union cm_status cm_status;
528 union cm_control cm_control;
529
530 mmr_base = soft->mmr_base;
531 cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS);
532
533 if (cm_status.rd_dma_done) {
534 /* stop dma-read engine, clear status */
535 cm_control.cm_control_reg =
536 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
537 cm_control.rd_dma_clr = 1;
538 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
539 cm_control.cm_control_reg);
540 atomic_set(&soft->dmaread_done, 1);
541 wake_up(&soft->dmaread_queue);
542 }
543 if (cm_status.wr_dma_done) {
544 /* stop dma-write engine, clear status */
545 cm_control.cm_control_reg =
546 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
547 cm_control.wr_dma_clr = 1;
548 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
549 cm_control.cm_control_reg);
550 atomic_set(&soft->dmawrite_done, 1);
551 wake_up(&soft->dmawrite_queue);
552 }
553 if (cm_status.alg_done) {
554 /* clear status */
555 cm_control.cm_control_reg =
556 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
557 cm_control.alg_done_clr = 1;
558 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
559 cm_control.cm_control_reg);
560 atomic_set(&soft->algo_done, 1);
561 wake_up(&soft->algo_queue);
562 }
563
564 return IRQ_HANDLED;
565}
566
567/**
568 * mbcs_intr_alloc - Allocate interrupts.
569 * @dev: device pointer
570 *
571 */
572static int mbcs_intr_alloc(struct cx_dev *dev)
573{
574 struct sn_irq_info *sn_irq;
575 struct mbcs_soft *soft;
576 struct getdma *getdma;
577 struct putdma *putdma;
578 struct algoblock *algo;
579
580 soft = dev->soft;
581 getdma = &soft->getdma;
582 putdma = &soft->putdma;
583 algo = &soft->algo;
584
585 soft->get_sn_irq = NULL;
586 soft->put_sn_irq = NULL;
587 soft->algo_sn_irq = NULL;
588
589 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
590 if (sn_irq == NULL)
591 return -EAGAIN;
592 soft->get_sn_irq = sn_irq;
593 getdma->intrHostDest = sn_irq->irq_xtalkaddr;
594 getdma->intrVector = sn_irq->irq_irq;
595 if (request_irq(sn_irq->irq_irq,
596 (void *)mbcs_completion_intr_handler, SA_SHIRQ,
597 "MBCS get intr", (void *)soft)) {
598 tiocx_irq_free(soft->get_sn_irq);
599 return -EAGAIN;
600 }
601
602 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
603 if (sn_irq == NULL) {
604 free_irq(soft->get_sn_irq->irq_irq, soft);
605 tiocx_irq_free(soft->get_sn_irq);
606 return -EAGAIN;
607 }
608 soft->put_sn_irq = sn_irq;
609 putdma->intrHostDest = sn_irq->irq_xtalkaddr;
610 putdma->intrVector = sn_irq->irq_irq;
611 if (request_irq(sn_irq->irq_irq,
612 (void *)mbcs_completion_intr_handler, SA_SHIRQ,
613 "MBCS put intr", (void *)soft)) {
614 tiocx_irq_free(soft->put_sn_irq);
615 free_irq(soft->get_sn_irq->irq_irq, soft);
616 tiocx_irq_free(soft->get_sn_irq);
617 return -EAGAIN;
618 }
619
620 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
621 if (sn_irq == NULL) {
622 free_irq(soft->put_sn_irq->irq_irq, soft);
623 tiocx_irq_free(soft->put_sn_irq);
624 free_irq(soft->get_sn_irq->irq_irq, soft);
625 tiocx_irq_free(soft->get_sn_irq);
626 return -EAGAIN;
627 }
628 soft->algo_sn_irq = sn_irq;
629 algo->intrHostDest = sn_irq->irq_xtalkaddr;
630 algo->intrVector = sn_irq->irq_irq;
631 if (request_irq(sn_irq->irq_irq,
632 (void *)mbcs_completion_intr_handler, SA_SHIRQ,
633 "MBCS algo intr", (void *)soft)) {
634 tiocx_irq_free(soft->algo_sn_irq);
635 free_irq(soft->put_sn_irq->irq_irq, soft);
636 tiocx_irq_free(soft->put_sn_irq);
637 free_irq(soft->get_sn_irq->irq_irq, soft);
638 tiocx_irq_free(soft->get_sn_irq);
639 return -EAGAIN;
640 }
641
642 return 0;
643}
644
645/**
646 * mbcs_intr_dealloc - Remove interrupts.
647 * @dev: device pointer
648 *
649 */
650static void mbcs_intr_dealloc(struct cx_dev *dev)
651{
652 struct mbcs_soft *soft;
653
654 soft = dev->soft;
655
656 free_irq(soft->get_sn_irq->irq_irq, soft);
657 tiocx_irq_free(soft->get_sn_irq);
658 free_irq(soft->put_sn_irq->irq_irq, soft);
659 tiocx_irq_free(soft->put_sn_irq);
660 free_irq(soft->algo_sn_irq->irq_irq, soft);
661 tiocx_irq_free(soft->algo_sn_irq);
662}
663
664static inline int mbcs_hw_init(struct mbcs_soft *soft)
665{
666 void *mmr_base = soft->mmr_base;
667 union cm_control cm_control;
668 union cm_req_timeout cm_req_timeout;
669 uint64_t err_stat;
670
671 cm_req_timeout.cm_req_timeout_reg =
672 MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT);
673
674 cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK;
675 MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT,
676 cm_req_timeout.cm_req_timeout_reg);
677
678 mbcs_gscr_pioaddr_set(soft);
679 mbcs_debug_pioaddr_set(soft);
680
681 /* clear errors */
682 err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT);
683 MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat);
684 MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1);
685
686 /* enable interrupts */
687 /* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */
688 MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL);
689
690 /* arm status regs and clear engines */
691 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
692 cm_control.rearm_stat_regs = 1;
693 cm_control.alg_clr = 1;
694 cm_control.wr_dma_clr = 1;
695 cm_control.rd_dma_clr = 1;
696
697 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
698
699 return 0;
700}
701
702static ssize_t show_algo(struct device *dev, char *buf)
703{
704 struct cx_dev *cx_dev = to_cx_dev(dev);
705 struct mbcs_soft *soft = cx_dev->soft;
706 uint64_t debug0;
707
708 /*
709 * By convention, the first debug register contains the
710 * algorithm number and revision.
711 */
712 debug0 = *(uint64_t *) soft->debug_addr;
713
714 return sprintf(buf, "0x%lx 0x%lx\n",
715 (debug0 >> 32), (debug0 & 0xffffffff));
716}
717
718static ssize_t store_algo(struct device *dev, const char *buf, size_t count)
719{
720 int n;
721 struct cx_dev *cx_dev = to_cx_dev(dev);
722 struct mbcs_soft *soft = cx_dev->soft;
723
724 if (count <= 0)
725 return 0;
726
727 n = simple_strtoul(buf, NULL, 0);
728
729 if (n == 1) {
730 mbcs_algo_start(soft);
731 if (wait_event_interruptible(soft->algo_queue,
732 atomic_read(&soft->algo_done)))
733 return -ERESTARTSYS;
734 }
735
736 return count;
737}
738
739DEVICE_ATTR(algo, 0644, show_algo, store_algo);
740
741/**
742 * mbcs_probe - Initialize for device
743 * @dev: device pointer
744 * @device_id: id table pointer
745 *
746 */
747static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
748{
749 struct mbcs_soft *soft;
750
751 dev->soft = NULL;
752
753 soft = kcalloc(1, sizeof(struct mbcs_soft), GFP_KERNEL);
754 if (soft == NULL)
755 return -ENOMEM;
756
757 soft->nasid = dev->cx_id.nasid;
758 list_add(&soft->list, &soft_list);
759 soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid);
760 dev->soft = soft;
761 soft->cxdev = dev;
762
763 init_waitqueue_head(&soft->dmawrite_queue);
764 init_waitqueue_head(&soft->dmaread_queue);
765 init_waitqueue_head(&soft->algo_queue);
766
767 init_MUTEX(&soft->dmawritelock);
768 init_MUTEX(&soft->dmareadlock);
769 init_MUTEX(&soft->algolock);
770
771 mbcs_getdma_init(&soft->getdma);
772 mbcs_putdma_init(&soft->putdma);
773 mbcs_algo_init(&soft->algo);
774
775 mbcs_hw_init(soft);
776
777 /* Allocate interrupts */
778 mbcs_intr_alloc(dev);
779
780 device_create_file(&dev->dev, &dev_attr_algo);
781
782 return 0;
783}
784
785static int mbcs_remove(struct cx_dev *dev)
786{
787 if (dev->soft) {
788 mbcs_intr_dealloc(dev);
789 kfree(dev->soft);
790 }
791
792 device_remove_file(&dev->dev, &dev_attr_algo);
793
794 return 0;
795}
796
797const struct cx_device_id __devinitdata mbcs_id_table[] = {
798 {
799 .part_num = MBCS_PART_NUM,
800 .mfg_num = MBCS_MFG_NUM,
801 },
802 {
803 .part_num = MBCS_PART_NUM_ALG0,
804 .mfg_num = MBCS_MFG_NUM,
805 },
806 {0, 0}
807};
808
809MODULE_DEVICE_TABLE(cx, mbcs_id_table);
810
811struct cx_drv mbcs_driver = {
812 .name = DEVICE_NAME,
813 .id_table = mbcs_id_table,
814 .probe = mbcs_probe,
815 .remove = mbcs_remove,
816};
817
818static void __exit mbcs_exit(void)
819{
820 int rv;
821
822 rv = unregister_chrdev(mbcs_major, DEVICE_NAME);
823 if (rv < 0)
824 DBG(KERN_ALERT "Error in unregister_chrdev: %d\n", rv);
825
826 cx_driver_unregister(&mbcs_driver);
827}
828
829static int __init mbcs_init(void)
830{
831 int rv;
832
833 // Put driver into chrdevs[]. Get major number.
834 rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
835 if (rv < 0) {
836 DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv);
837 return rv;
838 }
839 mbcs_major = rv;
840
841 return cx_driver_register(&mbcs_driver);
842}
843
844module_init(mbcs_init);
845module_exit(mbcs_exit);
846
847MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
848MODULE_DESCRIPTION("Driver for MOATB Core Services");
849MODULE_LICENSE("GPL");
diff --git a/drivers/char/mbcs.h b/drivers/char/mbcs.h
new file mode 100644
index 000000000000..844644d201c5
--- /dev/null
+++ b/drivers/char/mbcs.h
@@ -0,0 +1,553 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
7 */
8
9#ifndef __MBCS_H__
10#define __MBCS_H__
11
12/*
13 * General macros
14 */
15#define MB (1024*1024)
16#define MB2 (2*MB)
17#define MB4 (4*MB)
18#define MB6 (6*MB)
19
20/*
21 * Offsets and masks
22 */
23#define MBCS_CM_ID 0x0000 /* Identification */
24#define MBCS_CM_STATUS 0x0008 /* Status */
25#define MBCS_CM_ERROR_DETAIL1 0x0010 /* Error Detail1 */
26#define MBCS_CM_ERROR_DETAIL2 0x0018 /* Error Detail2 */
27#define MBCS_CM_CONTROL 0x0020 /* Control */
28#define MBCS_CM_REQ_TOUT 0x0028 /* Request Time-out */
29#define MBCS_CM_ERR_INT_DEST 0x0038 /* Error Interrupt Destination */
30#define MBCS_CM_TARG_FL 0x0050 /* Target Flush */
31#define MBCS_CM_ERR_STAT 0x0060 /* Error Status */
32#define MBCS_CM_CLR_ERR_STAT 0x0068 /* Clear Error Status */
33#define MBCS_CM_ERR_INT_EN 0x0070 /* Error Interrupt Enable */
34#define MBCS_RD_DMA_SYS_ADDR 0x0100 /* Read DMA System Address */
35#define MBCS_RD_DMA_LOC_ADDR 0x0108 /* Read DMA Local Address */
36#define MBCS_RD_DMA_CTRL 0x0110 /* Read DMA Control */
37#define MBCS_RD_DMA_AMO_DEST 0x0118 /* Read DMA AMO Destination */
38#define MBCS_RD_DMA_INT_DEST 0x0120 /* Read DMA Interrupt Destination */
39#define MBCS_RD_DMA_AUX_STAT 0x0130 /* Read DMA Auxillary Status */
40#define MBCS_WR_DMA_SYS_ADDR 0x0200 /* Write DMA System Address */
41#define MBCS_WR_DMA_LOC_ADDR 0x0208 /* Write DMA Local Address */
42#define MBCS_WR_DMA_CTRL 0x0210 /* Write DMA Control */
43#define MBCS_WR_DMA_AMO_DEST 0x0218 /* Write DMA AMO Destination */
44#define MBCS_WR_DMA_INT_DEST 0x0220 /* Write DMA Interrupt Destination */
45#define MBCS_WR_DMA_AUX_STAT 0x0230 /* Write DMA Auxillary Status */
46#define MBCS_ALG_AMO_DEST 0x0300 /* Algorithm AMO Destination */
47#define MBCS_ALG_INT_DEST 0x0308 /* Algorithm Interrupt Destination */
48#define MBCS_ALG_OFFSETS 0x0310
49#define MBCS_ALG_STEP 0x0318 /* Algorithm Step */
50
51#define MBCS_GSCR_START 0x0000000
52#define MBCS_DEBUG_START 0x0100000
53#define MBCS_RAM0_START 0x0200000
54#define MBCS_RAM1_START 0x0400000
55#define MBCS_RAM2_START 0x0600000
56
57#define MBCS_CM_CONTROL_REQ_TOUT_MASK 0x0000000000ffffffUL
58//#define PIO_BASE_ADDR_BASE_OFFSET_MASK 0x00fffffffff00000UL
59
60#define MBCS_SRAM_SIZE (1024*1024)
61#define MBCS_CACHELINE_SIZE 128
62
63/*
64 * MMR get's and put's
65 */
66#define MBCS_MMR_ADDR(mmr_base, offset)((uint64_t *)(mmr_base + offset))
67#define MBCS_MMR_SET(mmr_base, offset, value) { \
68 uint64_t *mbcs_mmr_set_u64p, readback; \
69 mbcs_mmr_set_u64p = (uint64_t *)(mmr_base + offset); \
70 *mbcs_mmr_set_u64p = value; \
71 readback = *mbcs_mmr_set_u64p; \
72}
73#define MBCS_MMR_GET(mmr_base, offset) *(uint64_t *)(mmr_base + offset)
74#define MBCS_MMR_ZERO(mmr_base, offset) MBCS_MMR_SET(mmr_base, offset, 0)
75
76/*
77 * MBCS mmr structures
78 */
79union cm_id {
80 uint64_t cm_id_reg;
81 struct {
82 uint64_t always_one:1, // 0
83 mfg_id:11, // 11:1
84 part_num:16, // 27:12
85 bitstream_rev:8, // 35:28
86 :28; // 63:36
87 };
88};
89
90union cm_status {
91 uint64_t cm_status_reg;
92 struct {
93 uint64_t pending_reads:8, // 7:0
94 pending_writes:8, // 15:8
95 ice_rsp_credits:8, // 23:16
96 ice_req_credits:8, // 31:24
97 cm_req_credits:8, // 39:32
98 :1, // 40
99 rd_dma_in_progress:1, // 41
100 rd_dma_done:1, // 42
101 :1, // 43
102 wr_dma_in_progress:1, // 44
103 wr_dma_done:1, // 45
104 alg_waiting:1, // 46
105 alg_pipe_running:1, // 47
106 alg_done:1, // 48
107 :3, // 51:49
108 pending_int_reqs:8, // 59:52
109 :3, // 62:60
110 alg_half_speed_sel:1; // 63
111 };
112};
113
114union cm_error_detail1 {
115 uint64_t cm_error_detail1_reg;
116 struct {
117 uint64_t packet_type:4, // 3:0
118 source_id:2, // 5:4
119 data_size:2, // 7:6
120 tnum:8, // 15:8
121 byte_enable:8, // 23:16
122 gfx_cred:8, // 31:24
123 read_type:2, // 33:32
124 pio_or_memory:1, // 34
125 head_cw_error:1, // 35
126 :12, // 47:36
127 head_error_bit:1, // 48
128 data_error_bit:1, // 49
129 :13, // 62:50
130 valid:1; // 63
131 };
132};
133
134union cm_error_detail2 {
135 uint64_t cm_error_detail2_reg;
136 struct {
137 uint64_t address:56, // 55:0
138 :8; // 63:56
139 };
140};
141
142union cm_control {
143 uint64_t cm_control_reg;
144 struct {
145 uint64_t cm_id:2, // 1:0
146 :2, // 3:2
147 max_trans:5, // 8:4
148 :3, // 11:9
149 address_mode:1, // 12
150 :7, // 19:13
151 credit_limit:8, // 27:20
152 :5, // 32:28
153 rearm_stat_regs:1, // 33
154 prescalar_byp:1, // 34
155 force_gap_war:1, // 35
156 rd_dma_go:1, // 36
157 wr_dma_go:1, // 37
158 alg_go:1, // 38
159 rd_dma_clr:1, // 39
160 wr_dma_clr:1, // 40
161 alg_clr:1, // 41
162 :2, // 43:42
163 alg_wait_step:1, // 44
164 alg_done_amo_en:1, // 45
165 alg_done_int_en:1, // 46
166 :1, // 47
167 alg_sram0_locked:1, // 48
168 alg_sram1_locked:1, // 49
169 alg_sram2_locked:1, // 50
170 alg_done_clr:1, // 51
171 :12; // 63:52
172 };
173};
174
175union cm_req_timeout {
176 uint64_t cm_req_timeout_reg;
177 struct {
178 uint64_t time_out:24, // 23:0
179 :40; // 63:24
180 };
181};
182
183union intr_dest {
184 uint64_t intr_dest_reg;
185 struct {
186 uint64_t address:56, // 55:0
187 int_vector:8; // 63:56
188 };
189};
190
191union cm_error_status {
192 uint64_t cm_error_status_reg;
193 struct {
194 uint64_t ecc_sbe:1, // 0
195 ecc_mbe:1, // 1
196 unsupported_req:1, // 2
197 unexpected_rsp:1, // 3
198 bad_length:1, // 4
199 bad_datavalid:1, // 5
200 buffer_overflow:1, // 6
201 request_timeout:1, // 7
202 :8, // 15:8
203 head_inv_data_size:1, // 16
204 rsp_pactype_inv:1, // 17
205 head_sb_err:1, // 18
206 missing_head:1, // 19
207 head_inv_rd_type:1, // 20
208 head_cmd_err_bit:1, // 21
209 req_addr_align_inv:1, // 22
210 pio_req_addr_inv:1, // 23
211 req_range_dsize_inv:1, // 24
212 early_term:1, // 25
213 early_tail:1, // 26
214 missing_tail:1, // 27
215 data_flit_sb_err:1, // 28
216 cm2hcm_req_cred_of:1, // 29
217 cm2hcm_rsp_cred_of:1, // 30
218 rx_bad_didn:1, // 31
219 rd_dma_err_rsp:1, // 32
220 rd_dma_tnum_tout:1, // 33
221 rd_dma_multi_tnum_tou:1, // 34
222 wr_dma_err_rsp:1, // 35
223 wr_dma_tnum_tout:1, // 36
224 wr_dma_multi_tnum_tou:1, // 37
225 alg_data_overflow:1, // 38
226 alg_data_underflow:1, // 39
227 ram0_access_conflict:1, // 40
228 ram1_access_conflict:1, // 41
229 ram2_access_conflict:1, // 42
230 ram0_perr:1, // 43
231 ram1_perr:1, // 44
232 ram2_perr:1, // 45
233 int_gen_rsp_err:1, // 46
234 int_gen_tnum_tout:1, // 47
235 rd_dma_prog_err:1, // 48
236 wr_dma_prog_err:1, // 49
237 :14; // 63:50
238 };
239};
240
241union cm_clr_error_status {
242 uint64_t cm_clr_error_status_reg;
243 struct {
244 uint64_t clr_ecc_sbe:1, // 0
245 clr_ecc_mbe:1, // 1
246 clr_unsupported_req:1, // 2
247 clr_unexpected_rsp:1, // 3
248 clr_bad_length:1, // 4
249 clr_bad_datavalid:1, // 5
250 clr_buffer_overflow:1, // 6
251 clr_request_timeout:1, // 7
252 :8, // 15:8
253 clr_head_inv_data_siz:1, // 16
254 clr_rsp_pactype_inv:1, // 17
255 clr_head_sb_err:1, // 18
256 clr_missing_head:1, // 19
257 clr_head_inv_rd_type:1, // 20
258 clr_head_cmd_err_bit:1, // 21
259 clr_req_addr_align_in:1, // 22
260 clr_pio_req_addr_inv:1, // 23
261 clr_req_range_dsize_i:1, // 24
262 clr_early_term:1, // 25
263 clr_early_tail:1, // 26
264 clr_missing_tail:1, // 27
265 clr_data_flit_sb_err:1, // 28
266 clr_cm2hcm_req_cred_o:1, // 29
267 clr_cm2hcm_rsp_cred_o:1, // 30
268 clr_rx_bad_didn:1, // 31
269 clr_rd_dma_err_rsp:1, // 32
270 clr_rd_dma_tnum_tout:1, // 33
271 clr_rd_dma_multi_tnum:1, // 34
272 clr_wr_dma_err_rsp:1, // 35
273 clr_wr_dma_tnum_tout:1, // 36
274 clr_wr_dma_multi_tnum:1, // 37
275 clr_alg_data_overflow:1, // 38
276 clr_alg_data_underflo:1, // 39
277 clr_ram0_access_confl:1, // 40
278 clr_ram1_access_confl:1, // 41
279 clr_ram2_access_confl:1, // 42
280 clr_ram0_perr:1, // 43
281 clr_ram1_perr:1, // 44
282 clr_ram2_perr:1, // 45
283 clr_int_gen_rsp_err:1, // 46
284 clr_int_gen_tnum_tout:1, // 47
285 clr_rd_dma_prog_err:1, // 48
286 clr_wr_dma_prog_err:1, // 49
287 :14; // 63:50
288 };
289};
290
291union cm_error_intr_enable {
292 uint64_t cm_error_intr_enable_reg;
293 struct {
294 uint64_t int_en_ecc_sbe:1, // 0
295 int_en_ecc_mbe:1, // 1
296 int_en_unsupported_re:1, // 2
297 int_en_unexpected_rsp:1, // 3
298 int_en_bad_length:1, // 4
299 int_en_bad_datavalid:1, // 5
300 int_en_buffer_overflo:1, // 6
301 int_en_request_timeou:1, // 7
302 :8, // 15:8
303 int_en_head_inv_data_:1, // 16
304 int_en_rsp_pactype_in:1, // 17
305 int_en_head_sb_err:1, // 18
306 int_en_missing_head:1, // 19
307 int_en_head_inv_rd_ty:1, // 20
308 int_en_head_cmd_err_b:1, // 21
309 int_en_req_addr_align:1, // 22
310 int_en_pio_req_addr_i:1, // 23
311 int_en_req_range_dsiz:1, // 24
312 int_en_early_term:1, // 25
313 int_en_early_tail:1, // 26
314 int_en_missing_tail:1, // 27
315 int_en_data_flit_sb_e:1, // 28
316 int_en_cm2hcm_req_cre:1, // 29
317 int_en_cm2hcm_rsp_cre:1, // 30
318 int_en_rx_bad_didn:1, // 31
319 int_en_rd_dma_err_rsp:1, // 32
320 int_en_rd_dma_tnum_to:1, // 33
321 int_en_rd_dma_multi_t:1, // 34
322 int_en_wr_dma_err_rsp:1, // 35
323 int_en_wr_dma_tnum_to:1, // 36
324 int_en_wr_dma_multi_t:1, // 37
325 int_en_alg_data_overf:1, // 38
326 int_en_alg_data_under:1, // 39
327 int_en_ram0_access_co:1, // 40
328 int_en_ram1_access_co:1, // 41
329 int_en_ram2_access_co:1, // 42
330 int_en_ram0_perr:1, // 43
331 int_en_ram1_perr:1, // 44
332 int_en_ram2_perr:1, // 45
333 int_en_int_gen_rsp_er:1, // 46
334 int_en_int_gen_tnum_t:1, // 47
335 int_en_rd_dma_prog_er:1, // 48
336 int_en_wr_dma_prog_er:1, // 49
337 :14; // 63:50
338 };
339};
340
341struct cm_mmr {
342 union cm_id id;
343 union cm_status status;
344 union cm_error_detail1 err_detail1;
345 union cm_error_detail2 err_detail2;
346 union cm_control control;
347 union cm_req_timeout req_timeout;
348 uint64_t reserved1[1];
349 union intr_dest int_dest;
350 uint64_t reserved2[2];
351 uint64_t targ_flush;
352 uint64_t reserved3[1];
353 union cm_error_status err_status;
354 union cm_clr_error_status clr_err_status;
355 union cm_error_intr_enable int_enable;
356};
357
358union dma_hostaddr {
359 uint64_t dma_hostaddr_reg;
360 struct {
361 uint64_t dma_sys_addr:56, // 55:0
362 :8; // 63:56
363 };
364};
365
366union dma_localaddr {
367 uint64_t dma_localaddr_reg;
368 struct {
369 uint64_t dma_ram_addr:21, // 20:0
370 dma_ram_sel:2, // 22:21
371 :41; // 63:23
372 };
373};
374
375union dma_control {
376 uint64_t dma_control_reg;
377 struct {
378 uint64_t dma_op_length:16, // 15:0
379 :18, // 33:16
380 done_amo_en:1, // 34
381 done_int_en:1, // 35
382 :1, // 36
383 pio_mem_n:1, // 37
384 :26; // 63:38
385 };
386};
387
388union dma_amo_dest {
389 uint64_t dma_amo_dest_reg;
390 struct {
391 uint64_t dma_amo_sys_addr:56, // 55:0
392 dma_amo_mod_type:3, // 58:56
393 :5; // 63:59
394 };
395};
396
397union rdma_aux_status {
398 uint64_t rdma_aux_status_reg;
399 struct {
400 uint64_t op_num_pacs_left:17, // 16:0
401 :5, // 21:17
402 lrsp_buff_empty:1, // 22
403 :17, // 39:23
404 pending_reqs_left:6, // 45:40
405 :18; // 63:46
406 };
407};
408
409struct rdma_mmr {
410 union dma_hostaddr host_addr;
411 union dma_localaddr local_addr;
412 union dma_control control;
413 union dma_amo_dest amo_dest;
414 union intr_dest intr_dest;
415 union rdma_aux_status aux_status;
416};
417
418union wdma_aux_status {
419 uint64_t wdma_aux_status_reg;
420 struct {
421 uint64_t op_num_pacs_left:17, // 16:0
422 :4, // 20:17
423 lreq_buff_empty:1, // 21
424 :18, // 39:22
425 pending_reqs_left:6, // 45:40
426 :18; // 63:46
427 };
428};
429
430struct wdma_mmr {
431 union dma_hostaddr host_addr;
432 union dma_localaddr local_addr;
433 union dma_control control;
434 union dma_amo_dest amo_dest;
435 union intr_dest intr_dest;
436 union wdma_aux_status aux_status;
437};
438
439union algo_step {
440 uint64_t algo_step_reg;
441 struct {
442 uint64_t alg_step_cnt:16, // 15:0
443 :48; // 63:16
444 };
445};
446
447struct algo_mmr {
448 union dma_amo_dest amo_dest;
449 union intr_dest intr_dest;
450 union {
451 uint64_t algo_offset_reg;
452 struct {
453 uint64_t sram0_offset:7, // 6:0
454 reserved0:1, // 7
455 sram1_offset:7, // 14:8
456 reserved1:1, // 15
457 sram2_offset:7, // 22:16
458 reserved2:14; // 63:23
459 };
460 } sram_offset;
461 union algo_step step;
462};
463
464struct mbcs_mmr {
465 struct cm_mmr cm;
466 uint64_t reserved1[17];
467 struct rdma_mmr rdDma;
468 uint64_t reserved2[25];
469 struct wdma_mmr wrDma;
470 uint64_t reserved3[25];
471 struct algo_mmr algo;
472 uint64_t reserved4[156];
473};
474
475/*
476 * defines
477 */
478#define DEVICE_NAME "mbcs"
479#define MBCS_PART_NUM 0xfff0
480#define MBCS_PART_NUM_ALG0 0xf001
481#define MBCS_MFG_NUM 0x1
482
483struct algoblock {
484 uint64_t amoHostDest;
485 uint64_t amoModType;
486 uint64_t intrHostDest;
487 uint64_t intrVector;
488 uint64_t algoStepCount;
489};
490
491struct getdma {
492 uint64_t hostAddr;
493 uint64_t localAddr;
494 uint64_t bytes;
495 uint64_t DoneAmoEnable;
496 uint64_t DoneIntEnable;
497 uint64_t peerIO;
498 uint64_t amoHostDest;
499 uint64_t amoModType;
500 uint64_t intrHostDest;
501 uint64_t intrVector;
502};
503
504struct putdma {
505 uint64_t hostAddr;
506 uint64_t localAddr;
507 uint64_t bytes;
508 uint64_t DoneAmoEnable;
509 uint64_t DoneIntEnable;
510 uint64_t peerIO;
511 uint64_t amoHostDest;
512 uint64_t amoModType;
513 uint64_t intrHostDest;
514 uint64_t intrVector;
515};
516
517struct mbcs_soft {
518 struct list_head list;
519 struct cx_dev *cxdev;
520 int major;
521 int nasid;
522 void *mmr_base;
523 wait_queue_head_t dmawrite_queue;
524 wait_queue_head_t dmaread_queue;
525 wait_queue_head_t algo_queue;
526 struct sn_irq_info *get_sn_irq;
527 struct sn_irq_info *put_sn_irq;
528 struct sn_irq_info *algo_sn_irq;
529 struct getdma getdma;
530 struct putdma putdma;
531 struct algoblock algo;
532 uint64_t gscr_addr; // pio addr
533 uint64_t ram0_addr; // pio addr
534 uint64_t ram1_addr; // pio addr
535 uint64_t ram2_addr; // pio addr
536 uint64_t debug_addr; // pio addr
537 atomic_t dmawrite_done;
538 atomic_t dmaread_done;
539 atomic_t algo_done;
540 struct semaphore dmawritelock;
541 struct semaphore dmareadlock;
542 struct semaphore algolock;
543};
544
545extern int mbcs_open(struct inode *ip, struct file *fp);
546extern ssize_t mbcs_sram_read(struct file *fp, char *buf, size_t len,
547 loff_t * off);
548extern ssize_t mbcs_sram_write(struct file *fp, const char *buf, size_t len,
549 loff_t * off);
550extern loff_t mbcs_sram_llseek(struct file *filp, loff_t off, int whence);
551extern int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma);
552
553#endif // __MBCS_H__
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index ffb9143376bb..e3c0b52d943f 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -374,6 +374,7 @@ scdrv_init(void)
374 void *salbuf; 374 void *salbuf;
375 struct class_simple *snsc_class; 375 struct class_simple *snsc_class;
376 dev_t first_dev, dev; 376 dev_t first_dev, dev;
377 nasid_t event_nasid = ia64_sn_get_console_nasid();
377 378
378 if (alloc_chrdev_region(&first_dev, 0, numionodes, 379 if (alloc_chrdev_region(&first_dev, 0, numionodes,
379 SYSCTL_BASENAME) < 0) { 380 SYSCTL_BASENAME) < 0) {
@@ -441,6 +442,13 @@ scdrv_init(void)
441 ia64_sn_irtr_intr_enable(scd->scd_nasid, 442 ia64_sn_irtr_intr_enable(scd->scd_nasid,
442 0 /*ignored */ , 443 0 /*ignored */ ,
443 SAL_IROUTER_INTR_RECV); 444 SAL_IROUTER_INTR_RECV);
445
446 /* on the console nasid, prepare to receive
447 * system controller environmental events
448 */
449 if(scd->scd_nasid == event_nasid) {
450 scdrv_event_init(scd);
451 }
444 } 452 }
445 return 0; 453 return 0;
446} 454}
diff --git a/drivers/char/snsc.h b/drivers/char/snsc.h
index c22c6c55e254..a9efc13cc858 100644
--- a/drivers/char/snsc.h
+++ b/drivers/char/snsc.h
@@ -47,4 +47,44 @@ struct sysctl_data_s {
47 nasid_t scd_nasid; /* Node on which subchannels are opened. */ 47 nasid_t scd_nasid; /* Node on which subchannels are opened. */
48}; 48};
49 49
50
51/* argument types */
52#define IR_ARG_INT 0x00 /* 4-byte integer (big-endian) */
53#define IR_ARG_ASCII 0x01 /* null-terminated ASCII string */
54#define IR_ARG_UNKNOWN 0x80 /* unknown data type. The low
55 * 7 bits will contain the data
56 * length. */
57#define IR_ARG_UNKNOWN_LENGTH_MASK 0x7f
58
59
60/* system controller event codes */
61#define EV_CLASS_MASK 0xf000ul
62#define EV_SEVERITY_MASK 0x0f00ul
63#define EV_COMPONENT_MASK 0x00fful
64
65#define EV_CLASS_POWER 0x1000ul
66#define EV_CLASS_FAN 0x2000ul
67#define EV_CLASS_TEMP 0x3000ul
68#define EV_CLASS_ENV 0x4000ul
69#define EV_CLASS_TEST_FAULT 0x5000ul
70#define EV_CLASS_TEST_WARNING 0x6000ul
71#define EV_CLASS_PWRD_NOTIFY 0x8000ul
72
73#define EV_SEVERITY_POWER_STABLE 0x0000ul
74#define EV_SEVERITY_POWER_LOW_WARNING 0x0100ul
75#define EV_SEVERITY_POWER_HIGH_WARNING 0x0200ul
76#define EV_SEVERITY_POWER_HIGH_FAULT 0x0300ul
77#define EV_SEVERITY_POWER_LOW_FAULT 0x0400ul
78
79#define EV_SEVERITY_FAN_STABLE 0x0000ul
80#define EV_SEVERITY_FAN_WARNING 0x0100ul
81#define EV_SEVERITY_FAN_FAULT 0x0200ul
82
83#define EV_SEVERITY_TEMP_STABLE 0x0000ul
84#define EV_SEVERITY_TEMP_ADVISORY 0x0100ul
85#define EV_SEVERITY_TEMP_CRITICAL 0x0200ul
86#define EV_SEVERITY_TEMP_FAULT 0x0300ul
87
88void scdrv_event_init(struct sysctl_data_s *);
89
50#endif /* _SN_SYSCTL_H_ */ 90#endif /* _SN_SYSCTL_H_ */
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c
new file mode 100644
index 000000000000..d692af57213a
--- /dev/null
+++ b/drivers/char/snsc_event.c
@@ -0,0 +1,304 @@
1/*
2 * SN Platform system controller communication support
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
9 */
10
11/*
12 * System controller event handler
13 *
14 * These routines deal with environmental events arriving from the
15 * system controllers.
16 */
17
18#include <linux/interrupt.h>
19#include <linux/sched.h>
20#include <linux/byteorder/generic.h>
21#include <asm/sn/sn_sal.h>
22#include "snsc.h"
23
24static struct subch_data_s *event_sd;
25
26void scdrv_event(unsigned long);
27DECLARE_TASKLET(sn_sysctl_event, scdrv_event, 0);
28
29/*
30 * scdrv_event_interrupt
31 *
32 * Pull incoming environmental events off the physical link to the
33 * system controller and put them in a temporary holding area in SAL.
34 * Schedule scdrv_event() to move them along to their ultimate
35 * destination.
36 */
37static irqreturn_t
38scdrv_event_interrupt(int irq, void *subch_data, struct pt_regs *regs)
39{
40 struct subch_data_s *sd = subch_data;
41 unsigned long flags;
42 int status;
43
44 spin_lock_irqsave(&sd->sd_rlock, flags);
45 status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
46
47 if ((status > 0) && (status & SAL_IROUTER_INTR_RECV)) {
48 tasklet_schedule(&sn_sysctl_event);
49 }
50 spin_unlock_irqrestore(&sd->sd_rlock, flags);
51 return IRQ_HANDLED;
52}
53
54
55/*
56 * scdrv_parse_event
57 *
58 * Break an event (as read from SAL) into useful pieces so we can decide
59 * what to do with it.
60 */
61static int
62scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
63{
64 char *desc_end;
65
66 /* record event source address */
67 *src = be32_to_cpup((__be32 *)event);
68 event += 4; /* move on to event code */
69
70 /* record the system controller's event code */
71 *code = be32_to_cpup((__be32 *)event);
72 event += 4; /* move on to event arguments */
73
74 /* how many arguments are in the packet? */
75 if (*event++ != 2) {
76 /* if not 2, give up */
77 return -1;
78 }
79
80 /* parse out the ESP code */
81 if (*event++ != IR_ARG_INT) {
82 /* not an integer argument, so give up */
83 return -1;
84 }
85 *esp_code = be32_to_cpup((__be32 *)event);
86 event += 4;
87
88 /* parse out the event description */
89 if (*event++ != IR_ARG_ASCII) {
90 /* not an ASCII string, so give up */
91 return -1;
92 }
93 event[CHUNKSIZE-1] = '\0'; /* ensure this string ends! */
94 event += 2; /* skip leading CR/LF */
95 desc_end = desc + sprintf(desc, "%s", event);
96
97 /* strip trailing CR/LF (if any) */
98 for (desc_end--;
99 (desc_end != desc) && ((*desc_end == 0xd) || (*desc_end == 0xa));
100 desc_end--) {
101 *desc_end = '\0';
102 }
103
104 return 0;
105}
106
107
108/*
109 * scdrv_event_severity
110 *
111 * Figure out how urgent a message we should write to the console/syslog
112 * via printk.
113 */
114static char *
115scdrv_event_severity(int code)
116{
117 int ev_class = (code & EV_CLASS_MASK);
118 int ev_severity = (code & EV_SEVERITY_MASK);
119 char *pk_severity = KERN_NOTICE;
120
121 switch (ev_class) {
122 case EV_CLASS_POWER:
123 switch (ev_severity) {
124 case EV_SEVERITY_POWER_LOW_WARNING:
125 case EV_SEVERITY_POWER_HIGH_WARNING:
126 pk_severity = KERN_WARNING;
127 break;
128 case EV_SEVERITY_POWER_HIGH_FAULT:
129 case EV_SEVERITY_POWER_LOW_FAULT:
130 pk_severity = KERN_ALERT;
131 break;
132 }
133 break;
134 case EV_CLASS_FAN:
135 switch (ev_severity) {
136 case EV_SEVERITY_FAN_WARNING:
137 pk_severity = KERN_WARNING;
138 break;
139 case EV_SEVERITY_FAN_FAULT:
140 pk_severity = KERN_CRIT;
141 break;
142 }
143 break;
144 case EV_CLASS_TEMP:
145 switch (ev_severity) {
146 case EV_SEVERITY_TEMP_ADVISORY:
147 pk_severity = KERN_WARNING;
148 break;
149 case EV_SEVERITY_TEMP_CRITICAL:
150 pk_severity = KERN_CRIT;
151 break;
152 case EV_SEVERITY_TEMP_FAULT:
153 pk_severity = KERN_ALERT;
154 break;
155 }
156 break;
157 case EV_CLASS_ENV:
158 pk_severity = KERN_ALERT;
159 break;
160 case EV_CLASS_TEST_FAULT:
161 pk_severity = KERN_ALERT;
162 break;
163 case EV_CLASS_TEST_WARNING:
164 pk_severity = KERN_WARNING;
165 break;
166 case EV_CLASS_PWRD_NOTIFY:
167 pk_severity = KERN_ALERT;
168 break;
169 }
170
171 return pk_severity;
172}
173
174
175/*
176 * scdrv_dispatch_event
177 *
178 * Do the right thing with an incoming event. That's often nothing
179 * more than printing it to the system log. For power-down notifications
180 * we start a graceful shutdown.
181 */
182static void
183scdrv_dispatch_event(char *event, int len)
184{
185 int code, esp_code, src;
186 char desc[CHUNKSIZE];
187 char *severity;
188
189 if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) {
190 /* ignore uninterpretible event */
191 return;
192 }
193
194 /* how urgent is the message? */
195 severity = scdrv_event_severity(code);
196
197 if ((code & EV_CLASS_MASK) == EV_CLASS_PWRD_NOTIFY) {
198 struct task_struct *p;
199
200 /* give a SIGPWR signal to init proc */
201
202 /* first find init's task */
203 read_lock(&tasklist_lock);
204 for_each_process(p) {
205 if (p->pid == 1)
206 break;
207 }
208 if (p) { /* we found init's task */
209 printk(KERN_EMERG "Power off indication received. Initiating power fail sequence...\n");
210 force_sig(SIGPWR, p);
211 } else { /* failed to find init's task - just give message(s) */
212 printk(KERN_WARNING "Failed to find init proc to handle power off!\n");
213 printk("%s|$(0x%x)%s\n", severity, esp_code, desc);
214 }
215 read_unlock(&tasklist_lock);
216 } else {
217 /* print to system log */
218 printk("%s|$(0x%x)%s\n", severity, esp_code, desc);
219 }
220}
221
222
223/*
224 * scdrv_event
225 *
226 * Called as a tasklet when an event arrives from the L1. Read the event
227 * from where it's temporarily stored in SAL and call scdrv_dispatch_event()
228 * to send it on its way. Keep trying to read events until SAL indicates
229 * that there are no more immediately available.
230 */
231void
232scdrv_event(unsigned long dummy)
233{
234 int status;
235 int len;
236 unsigned long flags;
237 struct subch_data_s *sd = event_sd;
238
239 /* anything to read? */
240 len = CHUNKSIZE;
241 spin_lock_irqsave(&sd->sd_rlock, flags);
242 status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
243 sd->sd_rb, &len);
244
245 while (!(status < 0)) {
246 spin_unlock_irqrestore(&sd->sd_rlock, flags);
247 scdrv_dispatch_event(sd->sd_rb, len);
248 len = CHUNKSIZE;
249 spin_lock_irqsave(&sd->sd_rlock, flags);
250 status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
251 sd->sd_rb, &len);
252 }
253 spin_unlock_irqrestore(&sd->sd_rlock, flags);
254}
255
256
257/*
258 * scdrv_event_init
259 *
260 * Sets up a system controller subchannel to begin receiving event
261 * messages. This is sort of a specialized version of scdrv_open()
262 * in drivers/char/sn_sysctl.c.
263 */
264void
265scdrv_event_init(struct sysctl_data_s *scd)
266{
267 int rv;
268
269 event_sd = kmalloc(sizeof (struct subch_data_s), GFP_KERNEL);
270 if (event_sd == NULL) {
271 printk(KERN_WARNING "%s: couldn't allocate subchannel info"
272 " for event monitoring\n", __FUNCTION__);
273 return;
274 }
275
276 /* initialize subch_data_s fields */
277 memset(event_sd, 0, sizeof (struct subch_data_s));
278 event_sd->sd_nasid = scd->scd_nasid;
279 spin_lock_init(&event_sd->sd_rlock);
280
281 /* ask the system controllers to send events to this node */
282 event_sd->sd_subch = ia64_sn_sysctl_event_init(scd->scd_nasid);
283
284 if (event_sd->sd_subch < 0) {
285 kfree(event_sd);
286 printk(KERN_WARNING "%s: couldn't open event subchannel\n",
287 __FUNCTION__);
288 return;
289 }
290
291 /* hook event subchannel up to the system controller interrupt */
292 rv = request_irq(SGI_UART_VECTOR, scdrv_event_interrupt,
293 SA_SHIRQ | SA_INTERRUPT,
294 "system controller events", event_sd);
295 if (rv) {
296 printk(KERN_WARNING "%s: irq request failed (%d)\n",
297 __FUNCTION__, rv);
298 ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch);
299 kfree(event_sd);
300 return;
301 }
302}
303
304