aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/parisc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/parisc')
-rw-r--r--drivers/parisc/Kconfig169
-rw-r--r--drivers/parisc/Makefile27
-rw-r--r--drivers/parisc/README.dino28
-rw-r--r--drivers/parisc/asp.c132
-rw-r--r--drivers/parisc/ccio-dma.c1593
-rw-r--r--drivers/parisc/ccio-rm-dma.c201
-rw-r--r--drivers/parisc/dino.c1044
-rw-r--r--drivers/parisc/eisa.c464
-rw-r--r--drivers/parisc/eisa_eeprom.c134
-rw-r--r--drivers/parisc/eisa_enumerator.c521
-rw-r--r--drivers/parisc/gsc.c245
-rw-r--r--drivers/parisc/gsc.h47
-rw-r--r--drivers/parisc/hppb.c109
-rw-r--r--drivers/parisc/iommu-helpers.h171
-rw-r--r--drivers/parisc/iosapic.c921
-rw-r--r--drivers/parisc/iosapic_private.h188
-rw-r--r--drivers/parisc/lasi.c240
-rw-r--r--drivers/parisc/lba_pci.c1649
-rw-r--r--drivers/parisc/led.c760
-rw-r--r--drivers/parisc/pdc_stable.c735
-rw-r--r--drivers/parisc/power.c278
-rw-r--r--drivers/parisc/sba_iommu.c2165
-rw-r--r--drivers/parisc/superio.c508
-rw-r--r--drivers/parisc/wax.c140
24 files changed, 12469 insertions, 0 deletions
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig
new file mode 100644
index 000000000000..3f5de867acd7
--- /dev/null
+++ b/drivers/parisc/Kconfig
@@ -0,0 +1,169 @@
1menu "Bus options (PCI, PCMCIA, EISA, GSC, ISA)"
2
3config GSC
4 bool "VSC/GSC/HSC bus support"
5 default y
6 help
7 The VSC, GSC and HSC busses were used from the earliest 700-series
8 workstations up to and including the C360/J2240 workstations. They
9 were also used in servers from the E-class to the K-class. They
10 are not found in B1000, C3000, J5000, A500, L1000, N4000 and upwards.
11 If in doubt, say "Y".
12
13config HPPB
14 bool "HP-PB bus support"
15 depends on GSC
16 help
17 The HP-PB bus was used in the Nova class and K-class servers.
18 If in doubt, say "Y"
19
20config IOMMU_CCIO
21 bool "U2/Uturn I/O MMU"
22 depends on GSC
23 help
24 Say Y here to enable DMA management routines for the first
25 generation of PA-RISC cache-coherent machines. Programs the
26 U2/Uturn chip in "Virtual Mode" and use the I/O MMU.
27
28config GSC_LASI
29 bool "Lasi I/O support"
30 depends on GSC
31 help
32 Say Y here to support the Lasi multifunction chip found in
33 many PA-RISC workstations & servers. It includes interfaces
34 for a parallel port, serial port, NCR 53c710 SCSI, Apricot
35 Ethernet, Harmony audio, PS/2 keyboard & mouse, ISDN, telephony
36 and floppy. Note that you must still enable all the individual
37 drivers for these chips.
38
39config GSC_WAX
40 bool "Wax I/O support"
41 depends on GSC
42 help
43 Say Y here to support the Wax multifunction chip found in some
44 older systems, including B/C/D/R class and 715/64, 715/80 and
45 715/100. Wax includes an EISA adapter, a serial port (not always
46 used), a HIL interface chip and is also known to be used as the
47 GSC bridge for an X.25 GSC card.
48
49config EISA
50 bool "EISA support"
51 depends on GSC
52 help
53 Say Y here if you have an EISA bus in your machine. This code
54 supports both the Mongoose & Wax EISA adapters. It is sadly
55 incomplete and lacks support for card-to-host DMA.
56
57source "drivers/eisa/Kconfig"
58
59config ISA
60 bool "ISA support"
61 depends on EISA
62 help
63 If you want to plug an ISA card into your EISA bus, say Y here.
64 Most people should say N.
65
66config PCI
67 bool "PCI support"
68 help
69 All recent HP machines have PCI slots, and you should say Y here
70 if you have a recent machine. If you are convinced you do not have
71 PCI slots in your machine (eg a 712), then you may say "N" here.
72 Beware that some GSC cards have a Dino onboard and PCI inside them,
73 so it may be safest to say "Y" anyway.
74
75source "drivers/pci/Kconfig"
76
77config GSC_DINO
78 bool "GSCtoPCI/Dino PCI support"
79 depends on PCI && GSC
80 help
81 Say Y here to support the Dino & Cujo GSC to PCI bridges found in
82 machines from the B132 to the C360, the J2240 and the A180. Some
83 GSC/HSC cards (eg gigabit & dual 100 Mbit Ethernet) have a Dino on
84 the card, and you also need to say Y here if you have such a card.
85 Note that Dino also supplies one of the serial ports on certain
86 machines. If in doubt, say Y.
87
88config PCI_LBA
89 bool "LBA/Elroy PCI support"
90 depends on PCI
91 help
92 Say Y here to support the Elroy PCI Lower Bus Adapter. This is
93 present on B, C, J, L and N-class machines with 4-digit model
94 numbers and the A400/A500.
95
96config IOSAPIC
97 bool
98 depends on PCI_LBA
99 default PCI_LBA
100
101config IOMMU_SBA
102 bool
103 depends on PCI_LBA
104 default PCI_LBA
105
106#config PCI_EPIC
107# bool "EPIC/SAGA PCI support"
108# depends on PCI
109# default y
110# help
111# Say Y here for V-class PCI, DMA/IOMMU, IRQ subsystem support.
112
113source "drivers/pcmcia/Kconfig"
114
115source "drivers/pci/hotplug/Kconfig"
116
117endmenu
118
119menu "PA-RISC specific drivers"
120
121config SUPERIO
122 bool "SuperIO (SuckyIO) support"
123 depends on PCI_LBA
124 default y
125 help
126 Say Y here to support the SuperIO chip found in Bxxxx, C3xxx and
127 J5xxx+ machines. This enables IDE, Floppy, Parallel Port, and
128 Serial port on those machines.
129
130config CHASSIS_LCD_LED
131 bool "Chassis LCD and LED support"
132 default y
133 help
134 Say Y here if you want to enable support for the Heartbeat,
135 Disk/Network activities LEDs on some PA-RISC machines,
136 or support for the LCD that can be found on recent material.
137
138 This has nothing to do with LED State support for A and E class.
139
140 If unsure, say Y.
141
142config PDC_CHASSIS
143 bool "PDC chassis State Panel support"
144 default y
145 help
146 Say Y here if you want to enable support for the LED State front
147 panel as found on E class, and support for the GSP Virtual Front
148 Panel (LED State and message logging) as found on high end
149 servers such as A, L and N-class.
150
151 This has nothing to do with Chassis LCD and LED support.
152
153 If unsure, say Y.
154
155config PDC_STABLE
156 tristate "PDC Stable Storage support"
157 depends on SYSFS
158 default y
159 help
160 Say Y here if you want to enable support for accessing Stable Storage
161 variables (PDC non volatile variables such as Primary Boot Path,
162 Console Path, Autoboot, Autosearch, etc) through SysFS.
163
164 If unsure, say Y.
165
166 To compile this driver as a module, choose M here.
167 The module will be called pdc_stable.
168
169endmenu
diff --git a/drivers/parisc/Makefile b/drivers/parisc/Makefile
new file mode 100644
index 000000000000..f95cab57133a
--- /dev/null
+++ b/drivers/parisc/Makefile
@@ -0,0 +1,27 @@
1#
2# Makefile for most of the non-PCI devices in PA-RISC machines
3#
4
5# I/O SAPIC is also on IA64 platforms.
6# The two could be merged into a common source some day.
7obj-$(CONFIG_IOSAPIC) += iosapic.o
8obj-$(CONFIG_IOMMU_SBA) += sba_iommu.o
9obj-$(CONFIG_PCI_LBA) += lba_pci.o
10
11# Only use one of them: ccio-rm-dma is for PCX-W systems *only*
12# obj-$(CONFIG_IOMMU_CCIO) += ccio-rm-dma.o
13obj-$(CONFIG_IOMMU_CCIO) += ccio-dma.o
14
15obj-$(CONFIG_GSC) += gsc.o
16
17obj-$(CONFIG_HPPB) += hppb.o
18obj-$(CONFIG_GSC_DINO) += dino.o
19obj-$(CONFIG_GSC_LASI) += lasi.o asp.o
20obj-$(CONFIG_GSC_WAX) += wax.o
21obj-$(CONFIG_EISA) += eisa.o eisa_enumerator.o eisa_eeprom.o
22
23obj-$(CONFIG_SUPERIO) += superio.o
24obj-$(CONFIG_CHASSIS_LCD_LED) += led.o
25obj-$(CONFIG_PDC_STABLE) += pdc_stable.o
26obj-y += power.o
27
diff --git a/drivers/parisc/README.dino b/drivers/parisc/README.dino
new file mode 100644
index 000000000000..097324f34bbe
--- /dev/null
+++ b/drivers/parisc/README.dino
@@ -0,0 +1,28 @@
1/*
2** HP VISUALIZE Workstation PCI Bus Defect
3**
4** "HP has discovered a potential system defect that can affect
5** the behavior of five models of HP VISUALIZE workstations when
6** equipped with third-party or customer-installed PCI I/O expansion
7** cards. The defect is limited to the HP C180, C160, C160L, B160L,
8** and B132L VISUALIZE workstations, and will only be encountered
9** when data is transmitted through PCI I/O expansion cards on the
10** PCI bus. HP-supplied graphics cards that utilize the PCI bus are
11** not affected."
12**
13** REVISIT: "go/pci_defect" link below is stale.
14** HP Internal can use <http://hpfcdma.fc.hp.com:80/Dino/>
15**
16** Product First Good Serial Number
17** C200/C240 (US) US67350000
18**B132L+/B180 (US) US67390000
19** C200 (Europe) 3713G01000
20** B180L (Europe) 3720G01000
21**
22** Note that many boards were fixed/replaced under a free replacement
23** program. Assume a machine is only "suspect" until proven otherwise.
24**
25** "The pci_check program will also be available as application
26** patch PHSS_12295"
27*/
28
diff --git a/drivers/parisc/asp.c b/drivers/parisc/asp.c
new file mode 100644
index 000000000000..388609967133
--- /dev/null
+++ b/drivers/parisc/asp.c
@@ -0,0 +1,132 @@
1/*
2 * ASP Device Driver
3 *
4 * (c) Copyright 2000 The Puffin Group Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * by Helge Deller <deller@gmx.de>
12 */
13
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <asm/io.h>
21#include <asm/led.h>
22
23#include "gsc.h"
24
25#define ASP_GSC_IRQ 3 /* hardcoded interrupt for GSC */
26
27#define ASP_VER_OFFSET 0x20 /* offset of ASP version */
28
29#define ASP_LED_ADDR 0xf0800020
30
31#define VIPER_INT_WORD 0xFFFBF088 /* addr of viper interrupt word */
32
33static struct gsc_asic asp;
34
35static void asp_choose_irq(struct parisc_device *dev, void *ctrl)
36{
37 int irq;
38
39 switch (dev->id.sversion) {
40 case 0x71: irq = 9; break; /* SCSI */
41 case 0x72: irq = 8; break; /* LAN */
42 case 0x73: irq = 1; break; /* HIL */
43 case 0x74: irq = 7; break; /* Centronics */
44 case 0x75: irq = (dev->hw_path == 4) ? 5 : 6; break; /* RS232 */
45 case 0x76: irq = 10; break; /* EISA BA */
46 case 0x77: irq = 11; break; /* Graphics1 */
47 case 0x7a: irq = 13; break; /* Audio (Bushmaster) */
48 case 0x7b: irq = 13; break; /* Audio (Scorpio) */
49 case 0x7c: irq = 3; break; /* FW SCSI */
50 case 0x7d: irq = 4; break; /* FDDI */
51 case 0x7f: irq = 13; break; /* Audio (Outfield) */
52 default: return; /* Unknown */
53 }
54
55 gsc_asic_assign_irq(ctrl, irq, &dev->irq);
56
57 switch (dev->id.sversion) {
58 case 0x73: irq = 2; break; /* i8042 High-priority */
59 case 0x76: irq = 0; break; /* EISA BA */
60 default: return; /* Other */
61 }
62
63 gsc_asic_assign_irq(ctrl, irq, &dev->aux_irq);
64}
65
66/* There are two register ranges we're interested in. Interrupt /
67 * Status / LED are at 0xf080xxxx and Asp special registers are at
68 * 0xf082fxxx. PDC only tells us that Asp is at 0xf082f000, so for
69 * the purposes of interrupt handling, we have to tell other bits of
70 * the kernel to look at the other registers.
71 */
72#define ASP_INTERRUPT_ADDR 0xf0800000
73
74int __init
75asp_init_chip(struct parisc_device *dev)
76{
77 struct gsc_irq gsc_irq;
78 int ret;
79
80 asp.version = gsc_readb(dev->hpa + ASP_VER_OFFSET) & 0xf;
81 asp.name = (asp.version == 1) ? "Asp" : "Cutoff";
82 asp.hpa = ASP_INTERRUPT_ADDR;
83
84 printk(KERN_INFO "%s version %d at 0x%lx found.\n",
85 asp.name, asp.version, dev->hpa);
86
87 /* the IRQ ASP should use */
88 ret = -EBUSY;
89 dev->irq = gsc_claim_irq(&gsc_irq, ASP_GSC_IRQ);
90 if (dev->irq < 0) {
91 printk(KERN_ERR "%s(): cannot get GSC irq\n", __FUNCTION__);
92 goto out;
93 }
94
95 asp.eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
96
97 ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "asp", &asp);
98 if (ret < 0)
99 goto out;
100
101 /* Program VIPER to interrupt on the ASP irq */
102 gsc_writel((1 << (31 - ASP_GSC_IRQ)),VIPER_INT_WORD);
103
104 /* Done init'ing, register this driver */
105 ret = gsc_common_setup(dev, &asp);
106 if (ret)
107 goto out;
108
109 gsc_fixup_irqs(dev, &asp, asp_choose_irq);
110 /* Mongoose is a sibling of Asp, not a child... */
111 gsc_fixup_irqs(parisc_parent(dev), &asp, asp_choose_irq);
112
113 /* initialize the chassis LEDs */
114#ifdef CONFIG_CHASSIS_LCD_LED
115 register_led_driver(DISPLAY_MODEL_OLD_ASP, LED_CMD_REG_NONE,
116 ASP_LED_ADDR);
117#endif
118
119 out:
120 return ret;
121}
122
123static struct parisc_device_id asp_tbl[] = {
124 { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00070 },
125 { 0, }
126};
127
128struct parisc_driver asp_driver = {
129 .name = "Asp",
130 .id_table = asp_tbl,
131 .probe = asp_init_chip,
132};
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
new file mode 100644
index 000000000000..0e98a9d9834c
--- /dev/null
+++ b/drivers/parisc/ccio-dma.c
@@ -0,0 +1,1593 @@
1/*
2** ccio-dma.c:
3** DMA management routines for first generation cache-coherent machines.
4** Program U2/Uturn in "Virtual Mode" and use the I/O MMU.
5**
6** (c) Copyright 2000 Grant Grundler
7** (c) Copyright 2000 Ryan Bradetich
8** (c) Copyright 2000 Hewlett-Packard Company
9**
10** This program is free software; you can redistribute it and/or modify
11** it under the terms of the GNU General Public License as published by
12** the Free Software Foundation; either version 2 of the License, or
13** (at your option) any later version.
14**
15**
16** "Real Mode" operation refers to U2/Uturn chip operation.
17** U2/Uturn were designed to perform coherency checks w/o using
18** the I/O MMU - basically what x86 does.
19**
20** Philipp Rumpf has a "Real Mode" driver for PCX-W machines at:
21** CVSROOT=:pserver:anonymous@198.186.203.37:/cvsroot/linux-parisc
22** cvs -z3 co linux/arch/parisc/kernel/dma-rm.c
23**
24** I've rewritten his code to work under TPG's tree. See ccio-rm-dma.c.
25**
26** Drawbacks of using Real Mode are:
27** o outbound DMA is slower - U2 won't prefetch data (GSC+ XQL signal).
28** o Inbound DMA less efficient - U2 can't use DMA_FAST attribute.
29** o Ability to do scatter/gather in HW is lost.
30** o Doesn't work under PCX-U/U+ machines since they didn't follow
31** the coherency design originally worked out. Only PCX-W does.
32*/
33
34#include <linux/config.h>
35#include <linux/types.h>
36#include <linux/init.h>
37#include <linux/mm.h>
38#include <linux/spinlock.h>
39#include <linux/slab.h>
40#include <linux/string.h>
41#include <linux/pci.h>
42#include <linux/reboot.h>
43
44#include <asm/byteorder.h>
45#include <asm/cache.h> /* for L1_CACHE_BYTES */
46#include <asm/uaccess.h>
47#include <asm/page.h>
48#include <asm/dma.h>
49#include <asm/io.h>
50#include <asm/hardware.h> /* for register_module() */
51#include <asm/parisc-device.h>
52
53/*
54** Choose "ccio" since that's what HP-UX calls it.
55** Make it easier for folks to migrate from one to the other :^)
56*/
57#define MODULE_NAME "ccio"
58
59#undef DEBUG_CCIO_RES
60#undef DEBUG_CCIO_RUN
61#undef DEBUG_CCIO_INIT
62#undef DEBUG_CCIO_RUN_SG
63
64#ifdef CONFIG_PROC_FS
65/*
66 * CCIO_SEARCH_TIME can help measure how fast the bitmap search is.
67 * impacts performance though - ditch it if you don't use it.
68 */
69#define CCIO_SEARCH_TIME
70#undef CCIO_MAP_STATS
71#else
72#undef CCIO_SEARCH_TIME
73#undef CCIO_MAP_STATS
74#endif
75
76#include <linux/proc_fs.h>
77#include <asm/runway.h> /* for proc_runway_root */
78
79#ifdef DEBUG_CCIO_INIT
80#define DBG_INIT(x...) printk(x)
81#else
82#define DBG_INIT(x...)
83#endif
84
85#ifdef DEBUG_CCIO_RUN
86#define DBG_RUN(x...) printk(x)
87#else
88#define DBG_RUN(x...)
89#endif
90
91#ifdef DEBUG_CCIO_RES
92#define DBG_RES(x...) printk(x)
93#else
94#define DBG_RES(x...)
95#endif
96
97#ifdef DEBUG_CCIO_RUN_SG
98#define DBG_RUN_SG(x...) printk(x)
99#else
100#define DBG_RUN_SG(x...)
101#endif
102
103#define CCIO_INLINE /* inline */
104#define WRITE_U32(value, addr) gsc_writel(value, (u32 *)(addr))
105#define READ_U32(addr) gsc_readl((u32 *)(addr))
106
107#define U2_IOA_RUNWAY 0x580
108#define U2_BC_GSC 0x501
109#define UTURN_IOA_RUNWAY 0x581
110#define UTURN_BC_GSC 0x502
111
112#define IOA_NORMAL_MODE 0x00020080 /* IO_CONTROL to turn on CCIO */
113#define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */
114#define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */
115
116struct ioa_registers {
117 /* Runway Supervisory Set */
118 volatile int32_t unused1[12];
119 volatile uint32_t io_command; /* Offset 12 */
120 volatile uint32_t io_status; /* Offset 13 */
121 volatile uint32_t io_control; /* Offset 14 */
122 volatile int32_t unused2[1];
123
124 /* Runway Auxiliary Register Set */
125 volatile uint32_t io_err_resp; /* Offset 0 */
126 volatile uint32_t io_err_info; /* Offset 1 */
127 volatile uint32_t io_err_req; /* Offset 2 */
128 volatile uint32_t io_err_resp_hi; /* Offset 3 */
129 volatile uint32_t io_tlb_entry_m; /* Offset 4 */
130 volatile uint32_t io_tlb_entry_l; /* Offset 5 */
131 volatile uint32_t unused3[1];
132 volatile uint32_t io_pdir_base; /* Offset 7 */
133 volatile uint32_t io_io_low_hv; /* Offset 8 */
134 volatile uint32_t io_io_high_hv; /* Offset 9 */
135 volatile uint32_t unused4[1];
136 volatile uint32_t io_chain_id_mask; /* Offset 11 */
137 volatile uint32_t unused5[2];
138 volatile uint32_t io_io_low; /* Offset 14 */
139 volatile uint32_t io_io_high; /* Offset 15 */
140};
141
142/*
143** IOA Registers
144** -------------
145**
146** Runway IO_CONTROL Register (+0x38)
147**
148** The Runway IO_CONTROL register controls the forwarding of transactions.
149**
150** | 0 ... 13 | 14 15 | 16 ... 21 | 22 | 23 24 | 25 ... 31 |
151** | HV | TLB | reserved | HV | mode | reserved |
152**
153** o mode field indicates the address translation of transactions
154** forwarded from Runway to GSC+:
155** Mode Name Value Definition
156** Off (default) 0 Opaque to matching addresses.
157** Include 1 Transparent for matching addresses.
158** Peek 3 Map matching addresses.
159**
160** + "Off" mode: Runway transactions which match the I/O range
161** specified by the IO_IO_LOW/IO_IO_HIGH registers will be ignored.
162** + "Include" mode: all addresses within the I/O range specified
163** by the IO_IO_LOW and IO_IO_HIGH registers are transparently
164** forwarded. This is the I/O Adapter's normal operating mode.
165** + "Peek" mode: used during system configuration to initialize the
166** GSC+ bus. Runway Write_Shorts in the address range specified by
167** IO_IO_LOW and IO_IO_HIGH are forwarded through the I/O Adapter
168** *AND* the GSC+ address is remapped to the Broadcast Physical
169** Address space by setting the 14 high order address bits of the
170** 32 bit GSC+ address to ones.
171**
172** o TLB field affects transactions which are forwarded from GSC+ to Runway.
173** "Real" mode is the poweron default.
174**
175** TLB Mode Value Description
176** Real 0 No TLB translation. Address is directly mapped and the
177** virtual address is composed of selected physical bits.
178** Error 1 Software fills the TLB manually.
179** Normal 2 IOA fetches IO TLB misses from IO PDIR (in host memory).
180**
181**
182** IO_IO_LOW_HV +0x60 (HV dependent)
183** IO_IO_HIGH_HV +0x64 (HV dependent)
184** IO_IO_LOW +0x78 (Architected register)
185** IO_IO_HIGH +0x7c (Architected register)
186**
187** IO_IO_LOW and IO_IO_HIGH set the lower and upper bounds of the
188** I/O Adapter address space, respectively.
189**
190** 0 ... 7 | 8 ... 15 | 16 ... 31 |
191** 11111111 | 11111111 | address |
192**
193** Each LOW/HIGH pair describes a disjoint address space region.
194** (2 per GSC+ port). Each incoming Runway transaction address is compared
195** with both sets of LOW/HIGH registers. If the address is in the range
196** greater than or equal to IO_IO_LOW and less than IO_IO_HIGH the transaction
197** for forwarded to the respective GSC+ bus.
198** Specify IO_IO_LOW equal to or greater than IO_IO_HIGH to avoid specifying
199** an address space region.
200**
201** In order for a Runway address to reside within GSC+ extended address space:
202** Runway Address [0:7] must identically compare to 8'b11111111
203** Runway Address [8:11] must be equal to IO_IO_LOW(_HV)[16:19]
204** Runway Address [12:23] must be greater than or equal to
205** IO_IO_LOW(_HV)[20:31] and less than IO_IO_HIGH(_HV)[20:31].
206** Runway Address [24:39] is not used in the comparison.
207**
208** When the Runway transaction is forwarded to GSC+, the GSC+ address is
209** as follows:
210** GSC+ Address[0:3] 4'b1111
211** GSC+ Address[4:29] Runway Address[12:37]
212** GSC+ Address[30:31] 2'b00
213**
214** All 4 Low/High registers must be initialized (by PDC) once the lower bus
215** is interrogated and address space is defined. The operating system will
216** modify the architectural IO_IO_LOW and IO_IO_HIGH registers following
217** the PDC initialization. However, the hardware version dependent IO_IO_LOW
218** and IO_IO_HIGH registers should not be subsequently altered by the OS.
219**
220** Writes to both sets of registers will take effect immediately, bypassing
221** the queues, which ensures that subsequent Runway transactions are checked
222** against the updated bounds values. However reads are queued, introducing
223** the possibility of a read being bypassed by a subsequent write to the same
224** register. This sequence can be avoided by having software wait for read
225** returns before issuing subsequent writes.
226*/
227
228struct ioc {
229 struct ioa_registers *ioc_hpa; /* I/O MMU base address */
230 u8 *res_map; /* resource map, bit == pdir entry */
231 u64 *pdir_base; /* physical base address */
232 u32 pdir_size; /* bytes, function of IOV Space size */
233 u32 res_hint; /* next available IOVP -
234 circular search */
235 u32 res_size; /* size of resource map in bytes */
236 spinlock_t res_lock;
237
238#ifdef CCIO_SEARCH_TIME
239#define CCIO_SEARCH_SAMPLE 0x100
240 unsigned long avg_search[CCIO_SEARCH_SAMPLE];
241 unsigned long avg_idx; /* current index into avg_search */
242#endif
243#ifdef CCIO_MAP_STATS
244 unsigned long used_pages;
245 unsigned long msingle_calls;
246 unsigned long msingle_pages;
247 unsigned long msg_calls;
248 unsigned long msg_pages;
249 unsigned long usingle_calls;
250 unsigned long usingle_pages;
251 unsigned long usg_calls;
252 unsigned long usg_pages;
253#endif
254 unsigned short cujo20_bug;
255
256 /* STUFF We don't need in performance path */
257 u32 chainid_shift; /* specify bit location of chain_id */
258 struct ioc *next; /* Linked list of discovered iocs */
259 const char *name; /* device name from firmware */
260 unsigned int hw_path; /* the hardware path this ioc is associatd with */
261 struct pci_dev *fake_pci_dev; /* the fake pci_dev for non-pci devs */
262 struct resource mmio_region[2]; /* The "routed" MMIO regions */
263};
264
265static struct ioc *ioc_list;
266static int ioc_count;
267
268/**************************************************************
269*
270* I/O Pdir Resource Management
271*
272* Bits set in the resource map are in use.
273* Each bit can represent a number of pages.
274* LSbs represent lower addresses (IOVA's).
275*
276* This was was copied from sba_iommu.c. Don't try to unify
277* the two resource managers unless a way to have different
278* allocation policies is also adjusted. We'd like to avoid
279* I/O TLB thrashing by having resource allocation policy
280* match the I/O TLB replacement policy.
281*
282***************************************************************/
283#define IOVP_SIZE PAGE_SIZE
284#define IOVP_SHIFT PAGE_SHIFT
285#define IOVP_MASK PAGE_MASK
286
287/* Convert from IOVP to IOVA and vice versa. */
288#define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
289#define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
290
291#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
292#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
293#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
294#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
295
296/*
297** Don't worry about the 150% average search length on a miss.
298** If the search wraps around, and passes the res_hint, it will
299** cause the kernel to panic anyhow.
300*/
301#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
302 for(; res_ptr < res_end; ++res_ptr) { \
303 if(0 == (*res_ptr & mask)) { \
304 *res_ptr |= mask; \
305 res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
306 ioc->res_hint = res_idx + (size >> 3); \
307 goto resource_found; \
308 } \
309 }
310
311#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
312 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
313 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
314 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
315 res_ptr = (u##size *)&(ioc)->res_map[0]; \
316 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
317
318/*
319** Find available bit in this ioa's resource map.
320** Use a "circular" search:
321** o Most IOVA's are "temporary" - avg search time should be small.
322** o keep a history of what happened for debugging
323** o KISS.
324**
325** Perf optimizations:
326** o search for log2(size) bits at a time.
327** o search for available resource bits using byte/word/whatever.
328** o use different search for "large" (eg > 4 pages) or "very large"
329** (eg > 16 pages) mappings.
330*/
331
332/**
333 * ccio_alloc_range - Allocate pages in the ioc's resource map.
334 * @ioc: The I/O Controller.
335 * @pages_needed: The requested number of pages to be mapped into the
336 * I/O Pdir...
337 *
338 * This function searches the resource map of the ioc to locate a range
339 * of available pages for the requested size.
340 */
341static int
342ccio_alloc_range(struct ioc *ioc, size_t size)
343{
344 unsigned int pages_needed = size >> IOVP_SHIFT;
345 unsigned int res_idx;
346#ifdef CCIO_SEARCH_TIME
347 unsigned long cr_start = mfctl(16);
348#endif
349
350 BUG_ON(pages_needed == 0);
351 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
352
353 DBG_RES("%s() size: %d pages_needed %d\n",
354 __FUNCTION__, size, pages_needed);
355
356 /*
357 ** "seek and ye shall find"...praying never hurts either...
358 ** ggg sacrifices another 710 to the computer gods.
359 */
360
361 if (pages_needed <= 8) {
362 /*
363 * LAN traffic will not thrash the TLB IFF the same NIC
364 * uses 8 adjacent pages to map seperate payload data.
365 * ie the same byte in the resource bit map.
366 */
367#if 0
368 /* FIXME: bit search should shift it's way through
369 * an unsigned long - not byte at a time. As it is now,
370 * we effectively allocate this byte to this mapping.
371 */
372 unsigned long mask = ~(~0UL >> pages_needed);
373 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
374#else
375 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
376#endif
377 } else if (pages_needed <= 16) {
378 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
379 } else if (pages_needed <= 32) {
380 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
381#ifdef __LP64__
382 } else if (pages_needed <= 64) {
383 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
384#endif
385 } else {
386 panic("%s: %s() Too many pages to map. pages_needed: %u\n",
387 __FILE__, __FUNCTION__, pages_needed);
388 }
389
390 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
391 __FUNCTION__);
392
393resource_found:
394
395 DBG_RES("%s() res_idx %d res_hint: %d\n",
396 __FUNCTION__, res_idx, ioc->res_hint);
397
398#ifdef CCIO_SEARCH_TIME
399 {
400 unsigned long cr_end = mfctl(16);
401 unsigned long tmp = cr_end - cr_start;
402 /* check for roll over */
403 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
404 }
405 ioc->avg_search[ioc->avg_idx++] = cr_start;
406 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
407#endif
408#ifdef CCIO_MAP_STATS
409 ioc->used_pages += pages_needed;
410#endif
411 /*
412 ** return the bit address.
413 */
414 return res_idx << 3;
415}
416
417#define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
418 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
419 BUG_ON((*res_ptr & mask) != mask); \
420 *res_ptr &= ~(mask);
421
422/**
423 * ccio_free_range - Free pages from the ioc's resource map.
424 * @ioc: The I/O Controller.
425 * @iova: The I/O Virtual Address.
426 * @pages_mapped: The requested number of pages to be freed from the
427 * I/O Pdir.
428 *
429 * This function frees the resouces allocated for the iova.
430 */
431static void
432ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
433{
434 unsigned long iovp = CCIO_IOVP(iova);
435 unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
436
437 BUG_ON(pages_mapped == 0);
438 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
439 BUG_ON(pages_mapped > BITS_PER_LONG);
440
441 DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
442 __FUNCTION__, res_idx, pages_mapped);
443
444#ifdef CCIO_MAP_STATS
445 ioc->used_pages -= pages_mapped;
446#endif
447
448 if(pages_mapped <= 8) {
449#if 0
450 /* see matching comments in alloc_range */
451 unsigned long mask = ~(~0UL >> pages_mapped);
452 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
453#else
454 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xff, 8);
455#endif
456 } else if(pages_mapped <= 16) {
457 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffff, 16);
458 } else if(pages_mapped <= 32) {
459 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
460#ifdef __LP64__
461 } else if(pages_mapped <= 64) {
462 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
463#endif
464 } else {
465 panic("%s:%s() Too many pages to unmap.\n", __FILE__,
466 __FUNCTION__);
467 }
468}
469
470/****************************************************************
471**
472** CCIO dma_ops support routines
473**
474*****************************************************************/
475
476typedef unsigned long space_t;
477#define KERNEL_SPACE 0
478
479/*
480** DMA "Page Type" and Hints
481** o if SAFE_DMA isn't set, mapping is for FAST_DMA. SAFE_DMA should be
482** set for subcacheline DMA transfers since we don't want to damage the
483** other part of a cacheline.
484** o SAFE_DMA must be set for "memory" allocated via pci_alloc_consistent().
485** This bit tells U2 to do R/M/W for partial cachelines. "Streaming"
486** data can avoid this if the mapping covers full cache lines.
487** o STOP_MOST is needed for atomicity across cachelines.
488** Apperently only "some EISA devices" need this.
489** Using CONFIG_ISA is hack. Only the IOA with EISA under it needs
490** to use this hint iff the EISA devices needs this feature.
491** According to the U2 ERS, STOP_MOST enabled pages hurt performance.
492** o PREFETCH should *not* be set for cases like Multiple PCI devices
493** behind GSCtoPCI (dino) bus converter. Only one cacheline per GSC
494** device can be fetched and multiply DMA streams will thrash the
495** prefetch buffer and burn memory bandwidth. See 6.7.3 "Prefetch Rules
496** and Invalidation of Prefetch Entries".
497**
498** FIXME: the default hints need to be per GSC device - not global.
499**
500** HP-UX dorks: linux device driver programming model is totally different
501** than HP-UX's. HP-UX always sets HINT_PREFETCH since it's drivers
502** do special things to work on non-coherent platforms...linux has to
503** be much more careful with this.
504*/
505#define IOPDIR_VALID 0x01UL
506#define HINT_SAFE_DMA 0x02UL /* used for pci_alloc_consistent() pages */
507#ifdef CONFIG_EISA
508#define HINT_STOP_MOST 0x04UL /* LSL support */
509#else
510#define HINT_STOP_MOST 0x00UL /* only needed for "some EISA devices" */
511#endif
512#define HINT_UDPATE_ENB 0x08UL /* not used/supported by U2 */
513#define HINT_PREFETCH 0x10UL /* for outbound pages which are not SAFE */
514
515
516/*
517** Use direction (ie PCI_DMA_TODEVICE) to pick hint.
518** ccio_alloc_consistent() depends on this to get SAFE_DMA
519** when it passes in BIDIRECTIONAL flag.
520*/
521static u32 hint_lookup[] = {
522 [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
523 [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
524 [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
525};
526
527/**
528 * ccio_io_pdir_entry - Initialize an I/O Pdir.
529 * @pdir_ptr: A pointer into I/O Pdir.
530 * @sid: The Space Identifier.
531 * @vba: The virtual address.
532 * @hints: The DMA Hint.
533 *
534 * Given a virtual address (vba, arg2) and space id, (sid, arg1),
535 * load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir
536 * entry consists of 8 bytes as shown below (MSB == bit 0):
537 *
538 *
539 * WORD 0:
540 * +------+----------------+-----------------------------------------------+
541 * | Phys | Virtual Index | Phys |
542 * | 0:3 | 0:11 | 4:19 |
543 * |4 bits| 12 bits | 16 bits |
544 * +------+----------------+-----------------------------------------------+
545 * WORD 1:
546 * +-----------------------+-----------------------------------------------+
547 * | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid |
548 * | 20:39 | | Enable |Enable | |Enable|DMA | |
549 * | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit |
550 * +-----------------------+-----------------------------------------------+
551 *
552 * The virtual index field is filled with the results of the LCI
553 * (Load Coherence Index) instruction. The 8 bits used for the virtual
554 * index are bits 12:19 of the value returned by LCI.
555 */
556void CCIO_INLINE
557ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
558 unsigned long hints)
559{
560 register unsigned long pa;
561 register unsigned long ci; /* coherent index */
562
563 /* We currently only support kernel addresses */
564 BUG_ON(sid != KERNEL_SPACE);
565
566 mtsp(sid,1);
567
568 /*
569 ** WORD 1 - low order word
570 ** "hints" parm includes the VALID bit!
571 ** "dep" clobbers the physical address offset bits as well.
572 */
573 pa = virt_to_phys(vba);
574 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
575 ((u32 *)pdir_ptr)[1] = (u32) pa;
576
577 /*
578 ** WORD 0 - high order word
579 */
580
581#ifdef __LP64__
582 /*
583 ** get bits 12:15 of physical address
584 ** shift bits 16:31 of physical address
585 ** and deposit them
586 */
587 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
588 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
589 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
590#else
591 pa = 0;
592#endif
593 /*
594 ** get CPU coherency index bits
595 ** Grab virtual index [0:11]
596 ** Deposit virt_idx bits into I/O PDIR word
597 */
598 asm volatile ("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
599 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
600 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
601
602 ((u32 *)pdir_ptr)[0] = (u32) pa;
603
604
605 /* FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
606 ** PCX-U/U+ do. (eg C200/C240)
607 ** PCX-T'? Don't know. (eg C110 or similar K-class)
608 **
609 ** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit".
610 ** Hopefully we can patch (NOP) these out at boot time somehow.
611 **
612 ** "Since PCX-U employs an offset hash that is incompatible with
613 ** the real mode coherence index generation of U2, the PDIR entry
614 ** must be flushed to memory to retain coherence."
615 */
616 asm volatile("fdc 0(%0)" : : "r" (pdir_ptr));
617 asm volatile("sync");
618}
619
620/**
621 * ccio_clear_io_tlb - Remove stale entries from the I/O TLB.
622 * @ioc: The I/O Controller.
623 * @iovp: The I/O Virtual Page.
624 * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
625 *
626 * Purge invalid I/O PDIR entries from the I/O TLB.
627 *
628 * FIXME: Can we change the byte_cnt to pages_mapped?
629 */
630static CCIO_INLINE void
631ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
632{
633 u32 chain_size = 1 << ioc->chainid_shift;
634
635 iovp &= IOVP_MASK; /* clear offset bits, just want pagenum */
636 byte_cnt += chain_size;
637
638 while(byte_cnt > chain_size) {
639 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_hpa->io_command);
640 iovp += chain_size;
641 byte_cnt -= chain_size;
642 }
643}
644
645/**
646 * ccio_mark_invalid - Mark the I/O Pdir entries invalid.
647 * @ioc: The I/O Controller.
648 * @iova: The I/O Virtual Address.
649 * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir.
650 *
651 * Mark the I/O Pdir entries invalid and blow away the corresponding I/O
652 * TLB entries.
653 *
654 * FIXME: at some threshhold it might be "cheaper" to just blow
655 * away the entire I/O TLB instead of individual entries.
656 *
657 * FIXME: Uturn has 256 TLB entries. We don't need to purge every
658 * PDIR entry - just once for each possible TLB entry.
659 * (We do need to maker I/O PDIR entries invalid regardless).
660 *
661 * FIXME: Can we change byte_cnt to pages_mapped?
662 */
663static CCIO_INLINE void
664ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
665{
666 u32 iovp = (u32)CCIO_IOVP(iova);
667 size_t saved_byte_cnt;
668
669 /* round up to nearest page size */
670 saved_byte_cnt = byte_cnt = ROUNDUP(byte_cnt, IOVP_SIZE);
671
672 while(byte_cnt > 0) {
673 /* invalidate one page at a time */
674 unsigned int idx = PDIR_INDEX(iovp);
675 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
676
677 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
678 pdir_ptr[7] = 0; /* clear only VALID bit */
679 /*
680 ** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360)
681 ** PCX-U/U+ do. (eg C200/C240)
682 ** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit".
683 **
684 ** Hopefully someone figures out how to patch (NOP) the
685 ** FDC/SYNC out at boot time.
686 */
687 asm volatile("fdc 0(%0)" : : "r" (pdir_ptr[7]));
688
689 iovp += IOVP_SIZE;
690 byte_cnt -= IOVP_SIZE;
691 }
692
693 asm volatile("sync");
694 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
695}
696
697/****************************************************************
698**
699** CCIO dma_ops
700**
701*****************************************************************/
702
703/**
704 * ccio_dma_supported - Verify the IOMMU supports the DMA address range.
705 * @dev: The PCI device.
706 * @mask: A bit mask describing the DMA address range of the device.
707 *
708 * This function implements the pci_dma_supported function.
709 */
710static int
711ccio_dma_supported(struct device *dev, u64 mask)
712{
713 if(dev == NULL) {
714 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
715 BUG();
716 return 0;
717 }
718
719 /* only support 32-bit devices (ie PCI/GSC) */
720 return (int)(mask == 0xffffffffUL);
721}
722
723/**
724 * ccio_map_single - Map an address range into the IOMMU.
725 * @dev: The PCI device.
726 * @addr: The start address of the DMA region.
727 * @size: The length of the DMA region.
728 * @direction: The direction of the DMA transaction (to/from device).
729 *
730 * This function implements the pci_map_single function.
731 */
732static dma_addr_t
733ccio_map_single(struct device *dev, void *addr, size_t size,
734 enum dma_data_direction direction)
735{
736 int idx;
737 struct ioc *ioc;
738 unsigned long flags;
739 dma_addr_t iovp;
740 dma_addr_t offset;
741 u64 *pdir_start;
742 unsigned long hint = hint_lookup[(int)direction];
743
744 BUG_ON(!dev);
745 ioc = GET_IOC(dev);
746
747 BUG_ON(size <= 0);
748
749 /* save offset bits */
750 offset = ((unsigned long) addr) & ~IOVP_MASK;
751
752 /* round up to nearest IOVP_SIZE */
753 size = ROUNDUP(size + offset, IOVP_SIZE);
754 spin_lock_irqsave(&ioc->res_lock, flags);
755
756#ifdef CCIO_MAP_STATS
757 ioc->msingle_calls++;
758 ioc->msingle_pages += size >> IOVP_SHIFT;
759#endif
760
761 idx = ccio_alloc_range(ioc, size);
762 iovp = (dma_addr_t)MKIOVP(idx);
763
764 pdir_start = &(ioc->pdir_base[idx]);
765
766 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
767 __FUNCTION__, addr, (long)iovp | offset, size);
768
769 /* If not cacheline aligned, force SAFE_DMA on the whole mess */
770 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
771 hint |= HINT_SAFE_DMA;
772
773 while(size > 0) {
774 ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
775
776 DBG_RUN(" pdir %p %08x%08x\n",
777 pdir_start,
778 (u32) (((u32 *) pdir_start)[0]),
779 (u32) (((u32 *) pdir_start)[1]));
780 ++pdir_start;
781 addr += IOVP_SIZE;
782 size -= IOVP_SIZE;
783 }
784
785 spin_unlock_irqrestore(&ioc->res_lock, flags);
786
787 /* form complete address */
788 return CCIO_IOVA(iovp, offset);
789}
790
791/**
792 * ccio_unmap_single - Unmap an address range from the IOMMU.
793 * @dev: The PCI device.
794 * @addr: The start address of the DMA region.
795 * @size: The length of the DMA region.
796 * @direction: The direction of the DMA transaction (to/from device).
797 *
798 * This function implements the pci_unmap_single function.
799 */
800static void
801ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
802 enum dma_data_direction direction)
803{
804 struct ioc *ioc;
805 unsigned long flags;
806 dma_addr_t offset = iova & ~IOVP_MASK;
807
808 BUG_ON(!dev);
809 ioc = GET_IOC(dev);
810
811 DBG_RUN("%s() iovp 0x%lx/%x\n",
812 __FUNCTION__, (long)iova, size);
813
814 iova ^= offset; /* clear offset bits */
815 size += offset;
816 size = ROUNDUP(size, IOVP_SIZE);
817
818 spin_lock_irqsave(&ioc->res_lock, flags);
819
820#ifdef CCIO_MAP_STATS
821 ioc->usingle_calls++;
822 ioc->usingle_pages += size >> IOVP_SHIFT;
823#endif
824
825 ccio_mark_invalid(ioc, iova, size);
826 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
827 spin_unlock_irqrestore(&ioc->res_lock, flags);
828}
829
830/**
831 * ccio_alloc_consistent - Allocate a consistent DMA mapping.
832 * @dev: The PCI device.
833 * @size: The length of the DMA region.
834 * @dma_handle: The DMA address handed back to the device (not the cpu).
835 *
836 * This function implements the pci_alloc_consistent function.
837 */
838static void *
839ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, int flag)
840{
841 void *ret;
842#if 0
843/* GRANT Need to establish hierarchy for non-PCI devs as well
844** and then provide matching gsc_map_xxx() functions for them as well.
845*/
846 if(!hwdev) {
847 /* only support PCI */
848 *dma_handle = 0;
849 return 0;
850 }
851#endif
852 ret = (void *) __get_free_pages(flag, get_order(size));
853
854 if (ret) {
855 memset(ret, 0, size);
856 *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
857 }
858
859 return ret;
860}
861
862/**
863 * ccio_free_consistent - Free a consistent DMA mapping.
864 * @dev: The PCI device.
865 * @size: The length of the DMA region.
866 * @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
867 * @dma_handle: The device address returned from the ccio_alloc_consistent.
868 *
869 * This function implements the pci_free_consistent function.
870 */
871static void
872ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr,
873 dma_addr_t dma_handle)
874{
875 ccio_unmap_single(dev, dma_handle, size, 0);
876 free_pages((unsigned long)cpu_addr, get_order(size));
877}
878
879/*
880** Since 0 is a valid pdir_base index value, can't use that
881** to determine if a value is valid or not. Use a flag to indicate
882** the SG list entry contains a valid pdir index.
883*/
884#define PIDE_FLAG 0x80000000UL
885
886#ifdef CCIO_MAP_STATS
887#define IOMMU_MAP_STATS
888#endif
889#include "iommu-helpers.h"
890
891/**
892 * ccio_map_sg - Map the scatter/gather list into the IOMMU.
893 * @dev: The PCI device.
894 * @sglist: The scatter/gather list to be mapped in the IOMMU.
895 * @nents: The number of entries in the scatter/gather list.
896 * @direction: The direction of the DMA transaction (to/from device).
897 *
898 * This function implements the pci_map_sg function.
899 */
900static int
901ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
902 enum dma_data_direction direction)
903{
904 struct ioc *ioc;
905 int coalesced, filled = 0;
906 unsigned long flags;
907 unsigned long hint = hint_lookup[(int)direction];
908 unsigned long prev_len = 0, current_len = 0;
909 int i;
910
911 BUG_ON(!dev);
912 ioc = GET_IOC(dev);
913
914 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
915
916 /* Fast path single entry scatterlists. */
917 if (nents == 1) {
918 sg_dma_address(sglist) = ccio_map_single(dev,
919 (void *)sg_virt_addr(sglist), sglist->length,
920 direction);
921 sg_dma_len(sglist) = sglist->length;
922 return 1;
923 }
924
925 for(i = 0; i < nents; i++)
926 prev_len += sglist[i].length;
927
928 spin_lock_irqsave(&ioc->res_lock, flags);
929
930#ifdef CCIO_MAP_STATS
931 ioc->msg_calls++;
932#endif
933
934 /*
935 ** First coalesce the chunks and allocate I/O pdir space
936 **
937 ** If this is one DMA stream, we can properly map using the
938 ** correct virtual address associated with each DMA page.
939 ** w/o this association, we wouldn't have coherent DMA!
940 ** Access to the virtual address is what forces a two pass algorithm.
941 */
942 coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range);
943
944 /*
945 ** Program the I/O Pdir
946 **
947 ** map the virtual addresses to the I/O Pdir
948 ** o dma_address will contain the pdir index
949 ** o dma_len will contain the number of bytes to map
950 ** o page/offset contain the virtual address.
951 */
952 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
953
954 spin_unlock_irqrestore(&ioc->res_lock, flags);
955
956 BUG_ON(coalesced != filled);
957
958 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
959
960 for (i = 0; i < filled; i++)
961 current_len += sg_dma_len(sglist + i);
962
963 BUG_ON(current_len != prev_len);
964
965 return filled;
966}
967
968/**
969 * ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU.
970 * @dev: The PCI device.
971 * @sglist: The scatter/gather list to be unmapped from the IOMMU.
972 * @nents: The number of entries in the scatter/gather list.
973 * @direction: The direction of the DMA transaction (to/from device).
974 *
975 * This function implements the pci_unmap_sg function.
976 */
977static void
978ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
979 enum dma_data_direction direction)
980{
981 struct ioc *ioc;
982
983 BUG_ON(!dev);
984 ioc = GET_IOC(dev);
985
986 DBG_RUN_SG("%s() START %d entries, %08lx,%x\n",
987 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);
988
989#ifdef CCIO_MAP_STATS
990 ioc->usg_calls++;
991#endif
992
993 while(sg_dma_len(sglist) && nents--) {
994
995#ifdef CCIO_MAP_STATS
996 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
997#endif
998 ccio_unmap_single(dev, sg_dma_address(sglist),
999 sg_dma_len(sglist), direction);
1000 ++sglist;
1001 }
1002
1003 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
1004}
1005
1006static struct hppa_dma_ops ccio_ops = {
1007 .dma_supported = ccio_dma_supported,
1008 .alloc_consistent = ccio_alloc_consistent,
1009 .alloc_noncoherent = ccio_alloc_consistent,
1010 .free_consistent = ccio_free_consistent,
1011 .map_single = ccio_map_single,
1012 .unmap_single = ccio_unmap_single,
1013 .map_sg = ccio_map_sg,
1014 .unmap_sg = ccio_unmap_sg,
1015 .dma_sync_single_for_cpu = NULL, /* NOP for U2/Uturn */
1016 .dma_sync_single_for_device = NULL, /* NOP for U2/Uturn */
1017 .dma_sync_sg_for_cpu = NULL, /* ditto */
1018 .dma_sync_sg_for_device = NULL, /* ditto */
1019};
1020
1021#ifdef CONFIG_PROC_FS
1022static int proc_append(char *src, int len, char **dst, off_t *offset, int *max)
1023{
1024 if (len < *offset) {
1025 *offset -= len;
1026 return 0;
1027 }
1028 if (*offset > 0) {
1029 src += *offset;
1030 len -= *offset;
1031 *offset = 0;
1032 }
1033 if (len > *max) {
1034 len = *max;
1035 }
1036 memcpy(*dst, src, len);
1037 *dst += len;
1038 *max -= len;
1039 return (*max == 0);
1040}
1041
1042static int ccio_proc_info(char *buf, char **start, off_t offset, int count,
1043 int *eof, void *data)
1044{
1045 int max = count;
1046 char tmp[80]; /* width of an ANSI-standard terminal */
1047 struct ioc *ioc = ioc_list;
1048
1049 while (ioc != NULL) {
1050 unsigned int total_pages = ioc->res_size << 3;
1051 unsigned long avg = 0, min, max;
1052 int j, len;
1053
1054 len = sprintf(tmp, "%s\n", ioc->name);
1055 if (proc_append(tmp, len, &buf, &offset, &count))
1056 break;
1057
1058 len = sprintf(tmp, "Cujo 2.0 bug : %s\n",
1059 (ioc->cujo20_bug ? "yes" : "no"));
1060 if (proc_append(tmp, len, &buf, &offset, &count))
1061 break;
1062
1063 len = sprintf(tmp, "IO PDIR size : %d bytes (%d entries)\n",
1064 total_pages * 8, total_pages);
1065 if (proc_append(tmp, len, &buf, &offset, &count))
1066 break;
1067#ifdef CCIO_MAP_STATS
1068 len = sprintf(tmp, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1069 total_pages - ioc->used_pages, ioc->used_pages,
1070 (int)(ioc->used_pages * 100 / total_pages));
1071 if (proc_append(tmp, len, &buf, &offset, &count))
1072 break;
1073#endif
1074 len = sprintf(tmp, "Resource bitmap : %d bytes (%d pages)\n",
1075 ioc->res_size, total_pages);
1076 if (proc_append(tmp, len, &buf, &offset, &count))
1077 break;
1078#ifdef CCIO_SEARCH_TIME
1079 min = max = ioc->avg_search[0];
1080 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
1081 avg += ioc->avg_search[j];
1082 if(ioc->avg_search[j] > max)
1083 max = ioc->avg_search[j];
1084 if(ioc->avg_search[j] < min)
1085 min = ioc->avg_search[j];
1086 }
1087 avg /= CCIO_SEARCH_SAMPLE;
1088 len = sprintf(tmp, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1089 min, avg, max);
1090 if (proc_append(tmp, len, &buf, &offset, &count))
1091 break;
1092#endif
1093#ifdef CCIO_MAP_STATS
1094 len = sprintf(tmp, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
1095 ioc->msingle_calls, ioc->msingle_pages,
1096 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1097 if (proc_append(tmp, len, &buf, &offset, &count))
1098 break;
1099
1100
1101 /* KLUGE - unmap_sg calls unmap_single for each mapped page */
1102 min = ioc->usingle_calls - ioc->usg_calls;
1103 max = ioc->usingle_pages - ioc->usg_pages;
1104 len = sprintf(tmp, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
1105 min, max, (int)((max * 1000)/min));
1106 if (proc_append(tmp, len, &buf, &offset, &count))
1107 break;
1108
1109 len = sprintf(tmp, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
1110 ioc->msg_calls, ioc->msg_pages,
1111 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1112 if (proc_append(tmp, len, &buf, &offset, &count))
1113 break;
1114 len = sprintf(tmp, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
1115 ioc->usg_calls, ioc->usg_pages,
1116 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1117 if (proc_append(tmp, len, &buf, &offset, &count))
1118 break;
1119#endif /* CCIO_MAP_STATS */
1120 ioc = ioc->next;
1121 }
1122
1123 if (count == 0) {
1124 *eof = 1;
1125 }
1126 return (max - count);
1127}
1128
1129static int ccio_resource_map(char *buf, char **start, off_t offset, int len,
1130 int *eof, void *data)
1131{
1132 struct ioc *ioc = ioc_list;
1133
1134 buf[0] = '\0';
1135 while (ioc != NULL) {
1136 u32 *res_ptr = (u32 *)ioc->res_map;
1137 int j;
1138
1139 for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) {
1140 if ((j & 7) == 0)
1141 strcat(buf,"\n ");
1142 sprintf(buf, "%s %08x", buf, *res_ptr);
1143 res_ptr++;
1144 }
1145 strcat(buf, "\n\n");
1146 ioc = ioc->next;
1147 break; /* XXX - remove me */
1148 }
1149
1150 return strlen(buf);
1151}
1152#endif
1153
1154/**
1155 * ccio_find_ioc - Find the ioc in the ioc_list
1156 * @hw_path: The hardware path of the ioc.
1157 *
1158 * This function searches the ioc_list for an ioc that matches
1159 * the provide hardware path.
1160 */
1161static struct ioc * ccio_find_ioc(int hw_path)
1162{
1163 int i;
1164 struct ioc *ioc;
1165
1166 ioc = ioc_list;
1167 for (i = 0; i < ioc_count; i++) {
1168 if (ioc->hw_path == hw_path)
1169 return ioc;
1170
1171 ioc = ioc->next;
1172 }
1173
1174 return NULL;
1175}
1176
1177/**
1178 * ccio_get_iommu - Find the iommu which controls this device
1179 * @dev: The parisc device.
1180 *
1181 * This function searches through the registered IOMMU's and returns
1182 * the appropriate IOMMU for the device based on its hardware path.
1183 */
1184void * ccio_get_iommu(const struct parisc_device *dev)
1185{
1186 dev = find_pa_parent_type(dev, HPHW_IOA);
1187 if (!dev)
1188 return NULL;
1189
1190 return ccio_find_ioc(dev->hw_path);
1191}
1192
1193#define CUJO_20_STEP 0x10000000 /* inc upper nibble */
1194
1195/* Cujo 2.0 has a bug which will silently corrupt data being transferred
1196 * to/from certain pages. To avoid this happening, we mark these pages
1197 * as `used', and ensure that nothing will try to allocate from them.
1198 */
1199void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1200{
1201 unsigned int idx;
1202 struct parisc_device *dev = parisc_parent(cujo);
1203 struct ioc *ioc = ccio_get_iommu(dev);
1204 u8 *res_ptr;
1205
1206 ioc->cujo20_bug = 1;
1207 res_ptr = ioc->res_map;
1208 idx = PDIR_INDEX(iovp) >> 3;
1209
1210 while (idx < ioc->res_size) {
1211 res_ptr[idx] |= 0xff;
1212 idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
1213 }
1214}
1215
1216#if 0
1217/* GRANT - is this needed for U2 or not? */
1218
1219/*
1220** Get the size of the I/O TLB for this I/O MMU.
1221**
1222** If spa_shift is non-zero (ie probably U2),
1223** then calculate the I/O TLB size using spa_shift.
1224**
1225** Otherwise we are supposed to get the IODC entry point ENTRY TLB
1226** and execute it. However, both U2 and Uturn firmware supplies spa_shift.
1227** I think only Java (K/D/R-class too?) systems don't do this.
1228*/
1229static int
1230ccio_get_iotlb_size(struct parisc_device *dev)
1231{
1232 if (dev->spa_shift == 0) {
1233 panic("%s() : Can't determine I/O TLB size.\n", __FUNCTION__);
1234 }
1235 return (1 << dev->spa_shift);
1236}
1237#else
1238
1239/* Uturn supports 256 TLB entries */
1240#define CCIO_CHAINID_SHIFT 8
1241#define CCIO_CHAINID_MASK 0xff
1242#endif /* 0 */
1243
1244/* We *can't* support JAVA (T600). Venture there at your own risk. */
1245static struct parisc_device_id ccio_tbl[] = {
1246 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb }, /* U2 */
1247 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb }, /* UTurn */
1248 { 0, }
1249};
1250
1251static int ccio_probe(struct parisc_device *dev);
1252
1253static struct parisc_driver ccio_driver = {
1254 .name = "U2:Uturn",
1255 .id_table = ccio_tbl,
1256 .probe = ccio_probe,
1257};
1258
1259/**
1260 * ccio_ioc_init - Initalize the I/O Controller
1261 * @ioc: The I/O Controller.
1262 *
1263 * Initalize the I/O Controller which includes setting up the
1264 * I/O Page Directory, the resource map, and initalizing the
1265 * U2/Uturn chip into virtual mode.
1266 */
1267static void
1268ccio_ioc_init(struct ioc *ioc)
1269{
1270 int i;
1271 unsigned int iov_order;
1272 u32 iova_space_size;
1273
1274 /*
1275 ** Determine IOVA Space size from memory size.
1276 **
1277 ** Ideally, PCI drivers would register the maximum number
1278 ** of DMA they can have outstanding for each device they
1279 ** own. Next best thing would be to guess how much DMA
1280 ** can be outstanding based on PCI Class/sub-class. Both
1281 ** methods still require some "extra" to support PCI
1282 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
1283 */
1284
1285 iova_space_size = (u32) (num_physpages / count_parisc_driver(&ccio_driver));
1286
1287 /* limit IOVA space size to 1MB-1GB */
1288
1289 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1290 iova_space_size = 1 << (20 - PAGE_SHIFT);
1291#ifdef __LP64__
1292 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1293 iova_space_size = 1 << (30 - PAGE_SHIFT);
1294#endif
1295 }
1296
1297 /*
1298 ** iova space must be log2() in size.
1299 ** thus, pdir/res_map will also be log2().
1300 */
1301
1302 /* We could use larger page sizes in order to *decrease* the number
1303 ** of mappings needed. (ie 8k pages means 1/2 the mappings).
1304 **
1305 ** Note: Grant Grunder says "Using 8k I/O pages isn't trivial either
1306 ** since the pages must also be physically contiguous - typically
1307 ** this is the case under linux."
1308 */
1309
1310 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1311
1312 /* iova_space_size is now bytes, not pages */
1313 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1314
1315 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1316
1317 BUG_ON(ioc->pdir_size >= 4 * 1024 * 1024); /* max pdir size < 4MB */
1318
1319 /* Verify it's a power of two */
1320 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1321
1322 DBG_INIT("%s() hpa 0x%lx mem %luMB IOV %dMB (%d bits)\n",
1323 __FUNCTION__,
1324 ioc->ioc_hpa,
1325 (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
1326 iova_space_size>>20,
1327 iov_order + PAGE_SHIFT);
1328
1329 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1330 get_order(ioc->pdir_size));
1331 if(NULL == ioc->pdir_base) {
1332 panic("%s:%s() could not allocate I/O Page Table\n", __FILE__,
1333 __FUNCTION__);
1334 }
1335 memset(ioc->pdir_base, 0, ioc->pdir_size);
1336
1337 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1338 DBG_INIT(" base %p", ioc->pdir_base);
1339
1340 /* resource map size dictated by pdir_size */
1341 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1342 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
1343
1344 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1345 get_order(ioc->res_size));
1346 if(NULL == ioc->res_map) {
1347 panic("%s:%s() could not allocate resource map\n", __FILE__,
1348 __FUNCTION__);
1349 }
1350 memset(ioc->res_map, 0, ioc->res_size);
1351
1352 /* Initialize the res_hint to 16 */
1353 ioc->res_hint = 16;
1354
1355 /* Initialize the spinlock */
1356 spin_lock_init(&ioc->res_lock);
1357
1358 /*
1359 ** Chainid is the upper most bits of an IOVP used to determine
1360 ** which TLB entry an IOVP will use.
1361 */
1362 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1363 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1364
1365 /*
1366 ** Initialize IOA hardware
1367 */
1368 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1369 &ioc->ioc_hpa->io_chain_id_mask);
1370
1371 WRITE_U32(virt_to_phys(ioc->pdir_base),
1372 &ioc->ioc_hpa->io_pdir_base);
1373
1374 /*
1375 ** Go to "Virtual Mode"
1376 */
1377 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_hpa->io_control);
1378
1379 /*
1380 ** Initialize all I/O TLB entries to 0 (Valid bit off).
1381 */
1382 WRITE_U32(0, &ioc->ioc_hpa->io_tlb_entry_m);
1383 WRITE_U32(0, &ioc->ioc_hpa->io_tlb_entry_l);
1384
1385 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
1386 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1387 &ioc->ioc_hpa->io_command);
1388 }
1389}
1390
1391static void
1392ccio_init_resource(struct resource *res, char *name, unsigned long ioaddr)
1393{
1394 int result;
1395
1396 res->parent = NULL;
1397 res->flags = IORESOURCE_MEM;
1398 res->start = (unsigned long)(signed) __raw_readl(ioaddr) << 16;
1399 res->end = (unsigned long)(signed) (__raw_readl(ioaddr + 4) << 16) - 1;
1400 res->name = name;
1401 if (res->end + 1 == res->start)
1402 return;
1403 result = request_resource(&iomem_resource, res);
1404 if (result < 0) {
1405 printk(KERN_ERR "%s: failed to claim CCIO bus address space (%08lx,%08lx)\n",
1406 __FILE__, res->start, res->end);
1407 }
1408}
1409
1410static void __init ccio_init_resources(struct ioc *ioc)
1411{
1412 struct resource *res = ioc->mmio_region;
1413 char *name = kmalloc(14, GFP_KERNEL);
1414
1415 sprintf(name, "GSC Bus [%d/]", ioc->hw_path);
1416
1417 ccio_init_resource(res, name, (unsigned long)&ioc->ioc_hpa->io_io_low);
1418 ccio_init_resource(res + 1, name,
1419 (unsigned long)&ioc->ioc_hpa->io_io_low_hv);
1420}
1421
1422static int new_ioc_area(struct resource *res, unsigned long size,
1423 unsigned long min, unsigned long max, unsigned long align)
1424{
1425 if (max <= min)
1426 return -EBUSY;
1427
1428 res->start = (max - size + 1) &~ (align - 1);
1429 res->end = res->start + size;
1430 if (!request_resource(&iomem_resource, res))
1431 return 0;
1432
1433 return new_ioc_area(res, size, min, max - size, align);
1434}
1435
1436static int expand_ioc_area(struct resource *res, unsigned long size,
1437 unsigned long min, unsigned long max, unsigned long align)
1438{
1439 unsigned long start, len;
1440
1441 if (!res->parent)
1442 return new_ioc_area(res, size, min, max, align);
1443
1444 start = (res->start - size) &~ (align - 1);
1445 len = res->end - start + 1;
1446 if (start >= min) {
1447 if (!adjust_resource(res, start, len))
1448 return 0;
1449 }
1450
1451 start = res->start;
1452 len = ((size + res->end + align) &~ (align - 1)) - start;
1453 if (start + len <= max) {
1454 if (!adjust_resource(res, start, len))
1455 return 0;
1456 }
1457
1458 return -EBUSY;
1459}
1460
1461/*
1462 * Dino calls this function. Beware that we may get called on systems
1463 * which have no IOC (725, B180, C160L, etc) but do have a Dino.
1464 * So it's legal to find no parent IOC.
1465 *
1466 * Some other issues: one of the resources in the ioc may be unassigned.
1467 */
1468int ccio_allocate_resource(const struct parisc_device *dev,
1469 struct resource *res, unsigned long size,
1470 unsigned long min, unsigned long max, unsigned long align)
1471{
1472 struct resource *parent = &iomem_resource;
1473 struct ioc *ioc = ccio_get_iommu(dev);
1474 if (!ioc)
1475 goto out;
1476
1477 parent = ioc->mmio_region;
1478 if (parent->parent &&
1479 !allocate_resource(parent, res, size, min, max, align, NULL, NULL))
1480 return 0;
1481
1482 if ((parent + 1)->parent &&
1483 !allocate_resource(parent + 1, res, size, min, max, align,
1484 NULL, NULL))
1485 return 0;
1486
1487 if (!expand_ioc_area(parent, size, min, max, align)) {
1488 __raw_writel(((parent->start)>>16) | 0xffff0000,
1489 (unsigned long)&(ioc->ioc_hpa->io_io_low));
1490 __raw_writel(((parent->end)>>16) | 0xffff0000,
1491 (unsigned long)&(ioc->ioc_hpa->io_io_high));
1492 } else if (!expand_ioc_area(parent + 1, size, min, max, align)) {
1493 parent++;
1494 __raw_writel(((parent->start)>>16) | 0xffff0000,
1495 (unsigned long)&(ioc->ioc_hpa->io_io_low_hv));
1496 __raw_writel(((parent->end)>>16) | 0xffff0000,
1497 (unsigned long)&(ioc->ioc_hpa->io_io_high_hv));
1498 } else {
1499 return -EBUSY;
1500 }
1501
1502 out:
1503 return allocate_resource(parent, res, size, min, max, align, NULL,NULL);
1504}
1505
1506int ccio_request_resource(const struct parisc_device *dev,
1507 struct resource *res)
1508{
1509 struct resource *parent;
1510 struct ioc *ioc = ccio_get_iommu(dev);
1511
1512 if (!ioc) {
1513 parent = &iomem_resource;
1514 } else if ((ioc->mmio_region->start <= res->start) &&
1515 (res->end <= ioc->mmio_region->end)) {
1516 parent = ioc->mmio_region;
1517 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1518 (res->end <= (ioc->mmio_region + 1)->end)) {
1519 parent = ioc->mmio_region + 1;
1520 } else {
1521 return -EBUSY;
1522 }
1523
1524 return request_resource(parent, res);
1525}
1526
1527/**
1528 * ccio_probe - Determine if ccio should claim this device.
1529 * @dev: The device which has been found
1530 *
1531 * Determine if ccio should claim this chip (return 0) or not (return 1).
1532 * If so, initialize the chip and tell other partners in crime they
1533 * have work to do.
1534 */
1535static int ccio_probe(struct parisc_device *dev)
1536{
1537 int i;
1538 struct ioc *ioc, **ioc_p = &ioc_list;
1539
1540 ioc = kmalloc(sizeof(struct ioc), GFP_KERNEL);
1541 if (ioc == NULL) {
1542 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1543 return 1;
1544 }
1545 memset(ioc, 0, sizeof(struct ioc));
1546
1547 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1548
1549 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name, dev->hpa);
1550
1551 for (i = 0; i < ioc_count; i++) {
1552 ioc_p = &(*ioc_p)->next;
1553 }
1554 *ioc_p = ioc;
1555
1556 ioc->hw_path = dev->hw_path;
1557 ioc->ioc_hpa = (struct ioa_registers *)dev->hpa;
1558 ccio_ioc_init(ioc);
1559 ccio_init_resources(ioc);
1560 hppa_dma_ops = &ccio_ops;
1561 dev->dev.platform_data = kmalloc(sizeof(struct pci_hba_data), GFP_KERNEL);
1562
1563 /* if this fails, no I/O cards will work, so may as well bug */
1564 BUG_ON(dev->dev.platform_data == NULL);
1565 HBA_DATA(dev->dev.platform_data)->iommu = ioc;
1566
1567
1568 if (ioc_count == 0) {
1569 /* FIXME: Create separate entries for each ioc */
1570 create_proc_read_entry(MODULE_NAME, S_IRWXU, proc_runway_root,
1571 ccio_proc_info, NULL);
1572 create_proc_read_entry(MODULE_NAME"-bitmap", S_IRWXU,
1573 proc_runway_root, ccio_resource_map, NULL);
1574 }
1575
1576 ioc_count++;
1577
1578 parisc_vmerge_boundary = IOVP_SIZE;
1579 parisc_vmerge_max_size = BITS_PER_LONG * IOVP_SIZE;
1580 parisc_has_iommu();
1581 return 0;
1582}
1583
1584/**
1585 * ccio_init - ccio initalization procedure.
1586 *
1587 * Register this driver.
1588 */
1589void __init ccio_init(void)
1590{
1591 register_parisc_driver(&ccio_driver);
1592}
1593
diff --git a/drivers/parisc/ccio-rm-dma.c b/drivers/parisc/ccio-rm-dma.c
new file mode 100644
index 000000000000..57e6385976e2
--- /dev/null
+++ b/drivers/parisc/ccio-rm-dma.c
@@ -0,0 +1,201 @@
1/*
2 * ccio-rm-dma.c:
3 * DMA management routines for first generation cache-coherent machines.
4 * "Real Mode" operation refers to U2/Uturn chip operation. The chip
5 * can perform coherency checks w/o using the I/O MMU. That's all we
6 * need until support for more than 4GB phys mem is needed.
7 *
8 * This is the trivial case - basically what x86 does.
9 *
10 * Drawbacks of using Real Mode are:
11 * o outbound DMA is slower since one isn't using the prefetching
12 * U2 can do for outbound DMA.
13 * o Ability to do scatter/gather in HW is also lost.
14 * o only known to work with PCX-W processor. (eg C360)
15 * (PCX-U/U+ are not coherent with U2 in real mode.)
16 *
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 *
24 * Original version/author:
25 * CVSROOT=:pserver:anonymous@198.186.203.37:/cvsroot/linux-parisc
26 * cvs -z3 co linux/arch/parisc/kernel/dma-rm.c
27 *
28 * (C) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
29 *
30 *
31 * Adopted for The Puffin Group's parisc-linux port by Grant Grundler.
32 * (C) Copyright 2000 Grant Grundler <grundler@puffin.external.hp.com>
33 *
34 */
35
36#include <linux/types.h>
37#include <linux/init.h>
38#include <linux/mm.h>
39#include <linux/string.h>
40#include <linux/pci.h>
41
42#include <asm/uaccess.h>
43
44#include <asm/io.h>
45#include <asm/hardware.h>
46#include <asm/page.h>
47
48/* Only chose "ccio" since that's what HP-UX calls it....
49** Make it easier for folks to migrate from one to the other :^)
50*/
51#define MODULE_NAME "ccio"
52
53#define U2_IOA_RUNWAY 0x580
54#define U2_BC_GSC 0x501
55#define UTURN_IOA_RUNWAY 0x581
56#define UTURN_BC_GSC 0x502
57
58#define IS_U2(id) ( \
59 (((id)->hw_type == HPHW_IOA) && ((id)->hversion == U2_IOA_RUNWAY)) || \
60 (((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == U2_BC_GSC)) \
61)
62
63#define IS_UTURN(id) ( \
64 (((id)->hw_type == HPHW_IOA) && ((id)->hversion == UTURN_IOA_RUNWAY)) || \
65 (((id)->hw_type == HPHW_BCPORT) && ((id)->hversion == UTURN_BC_GSC)) \
66)
67
68static int ccio_dma_supported( struct pci_dev *dev, u64 mask)
69{
70 if (dev == NULL) {
71 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
72 BUG();
73 return(0);
74 }
75
76 /* only support 32-bit devices (ie PCI/GSC) */
77 return((int) (mask >= 0xffffffffUL));
78}
79
80
81static void *ccio_alloc_consistent(struct pci_dev *dev, size_t size,
82 dma_addr_t *handle)
83{
84 void *ret;
85
86 ret = (void *)__get_free_pages(GFP_ATOMIC, get_order(size));
87
88 if (ret != NULL) {
89 memset(ret, 0, size);
90 *handle = virt_to_phys(ret);
91 }
92 return ret;
93}
94
95static void ccio_free_consistent(struct pci_dev *dev, size_t size,
96 void *vaddr, dma_addr_t handle)
97{
98 free_pages((unsigned long)vaddr, get_order(size));
99}
100
101static dma_addr_t ccio_map_single(struct pci_dev *dev, void *ptr, size_t size,
102 int direction)
103{
104 return virt_to_phys(ptr);
105}
106
107static void ccio_unmap_single(struct pci_dev *dev, dma_addr_t dma_addr,
108 size_t size, int direction)
109{
110 /* Nothing to do */
111}
112
113
114static int ccio_map_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
115{
116 int tmp = nents;
117
118 /* KISS: map each buffer separately. */
119 while (nents) {
120 sg_dma_address(sglist) = ccio_map_single(dev, sglist->address, sglist->length, direction);
121 sg_dma_len(sglist) = sglist->length;
122 nents--;
123 sglist++;
124 }
125
126 return tmp;
127}
128
129
130static void ccio_unmap_sg(struct pci_dev *dev, struct scatterlist *sglist, int nents, int direction)
131{
132#if 0
133 while (nents) {
134 ccio_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
135 nents--;
136 sglist++;
137 }
138 return;
139#else
140 /* Do nothing (copied from current ccio_unmap_single() :^) */
141#endif
142}
143
144
145static struct pci_dma_ops ccio_ops = {
146 ccio_dma_supported,
147 ccio_alloc_consistent,
148 ccio_free_consistent,
149 ccio_map_single,
150 ccio_unmap_single,
151 ccio_map_sg,
152 ccio_unmap_sg,
153 NULL, /* dma_sync_single_for_cpu : NOP for U2 */
154 NULL, /* dma_sync_single_for_device : NOP for U2 */
155 NULL, /* dma_sync_sg_for_cpu : ditto */
156 NULL, /* dma_sync_sg_for_device : ditto */
157};
158
159
160/*
161** Determine if u2 should claim this chip (return 0) or not (return 1).
162** If so, initialize the chip and tell other partners in crime they
163** have work to do.
164*/
165static int
166ccio_probe(struct parisc_device *dev)
167{
168 printk(KERN_INFO "%s found %s at 0x%lx\n", MODULE_NAME,
169 dev->id.hversion == U2_BC_GSC ? "U2" : "UTurn",
170 dev->hpa);
171
172/*
173** FIXME - should check U2 registers to verify it's really running
174** in "Real Mode".
175*/
176
177#if 0
178/* will need this for "Virtual Mode" operation */
179 ccio_hw_init(ccio_dev);
180 ccio_common_init(ccio_dev);
181#endif
182 hppa_dma_ops = &ccio_ops;
183 return 0;
184}
185
186static struct parisc_device_id ccio_tbl[] = {
187 { HPHW_BCPORT, HVERSION_REV_ANY_ID, U2_BC_GSC, 0xc },
188 { HPHW_BCPORT, HVERSION_REV_ANY_ID, UTURN_BC_GSC, 0xc },
189 { 0, }
190};
191
192static struct parisc_driver ccio_driver = {
193 .name = "U2/Uturn",
194 .id_table = ccio_tbl,
195 .probe = ccio_probe,
196};
197
198void __init ccio_init(void)
199{
200 register_parisc_driver(&ccio_driver);
201}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
new file mode 100644
index 000000000000..b0d2a73d1d47
--- /dev/null
+++ b/drivers/parisc/dino.c
@@ -0,0 +1,1044 @@
1/*
2** DINO manager
3**
4** (c) Copyright 1999 Red Hat Software
5** (c) Copyright 1999 SuSE GmbH
6** (c) Copyright 1999,2000 Hewlett-Packard Company
7** (c) Copyright 2000 Grant Grundler
8**
9** This program is free software; you can redistribute it and/or modify
10** it under the terms of the GNU General Public License as published by
11** the Free Software Foundation; either version 2 of the License, or
12** (at your option) any later version.
13**
14** This module provides access to Dino PCI bus (config/IOport spaces)
15** and helps manage Dino IRQ lines.
16**
17** Dino interrupt handling is a bit complicated.
18** Dino always writes to the broadcast EIR via irr0 for now.
19** (BIG WARNING: using broadcast EIR is a really bad thing for SMP!)
20** Only one processor interrupt is used for the 11 IRQ line
21** inputs to dino.
22**
23** The different between Built-in Dino and Card-Mode
24** dino is in chip initialization and pci device initialization.
25**
26** Linux drivers can only use Card-Mode Dino if pci devices I/O port
27** BARs are configured and used by the driver. Programming MMIO address
28** requires substantial knowledge of available Host I/O address ranges
29** is currently not supported. Port/Config accessor functions are the
30** same. "BIOS" differences are handled within the existing routines.
31*/
32
33/* Changes :
34** 2001-06-14 : Clement Moyroud (moyroudc@esiee.fr)
35** - added support for the integrated RS232.
36*/
37
38/*
39** TODO: create a virtual address for each Dino HPA.
40** GSC code might be able to do this since IODC data tells us
41** how many pages are used. PCI subsystem could (must?) do this
42** for PCI drivers devices which implement/use MMIO registers.
43*/
44
45#include <linux/config.h>
46#include <linux/delay.h>
47#include <linux/types.h>
48#include <linux/kernel.h>
49#include <linux/pci.h>
50#include <linux/init.h>
51#include <linux/ioport.h>
52#include <linux/slab.h>
53#include <linux/interrupt.h> /* for struct irqaction */
54#include <linux/spinlock.h> /* for spinlock_t and prototypes */
55
56#include <asm/pdc.h>
57#include <asm/page.h>
58#include <asm/system.h>
59#include <asm/io.h>
60#include <asm/hardware.h>
61
62#include "gsc.h"
63
64#undef DINO_DEBUG
65
66#ifdef DINO_DEBUG
67#define DBG(x...) printk(x)
68#else
69#define DBG(x...)
70#endif
71
72/*
73** Config accessor functions only pass in the 8-bit bus number
74** and not the 8-bit "PCI Segment" number. Each Dino will be
75** assigned a PCI bus number based on "when" it's discovered.
76**
77** The "secondary" bus number is set to this before calling
78** pci_scan_bus(). If any PPB's are present, the scan will
79** discover them and update the "secondary" and "subordinate"
80** fields in Dino's pci_bus structure.
81**
82** Changes in the configuration *will* result in a different
83** bus number for each dino.
84*/
85
86#define is_card_dino(id) ((id)->hw_type == HPHW_A_DMA)
87
88#define DINO_IAR0 0x004
89#define DINO_IODC_ADDR 0x008
90#define DINO_IODC_DATA_0 0x008
91#define DINO_IODC_DATA_1 0x008
92#define DINO_IRR0 0x00C
93#define DINO_IAR1 0x010
94#define DINO_IRR1 0x014
95#define DINO_IMR 0x018
96#define DINO_IPR 0x01C
97#define DINO_TOC_ADDR 0x020
98#define DINO_ICR 0x024
99#define DINO_ILR 0x028
100#define DINO_IO_COMMAND 0x030
101#define DINO_IO_STATUS 0x034
102#define DINO_IO_CONTROL 0x038
103#define DINO_IO_GSC_ERR_RESP 0x040
104#define DINO_IO_ERR_INFO 0x044
105#define DINO_IO_PCI_ERR_RESP 0x048
106#define DINO_IO_FBB_EN 0x05c
107#define DINO_IO_ADDR_EN 0x060
108#define DINO_PCI_ADDR 0x064
109#define DINO_CONFIG_DATA 0x068
110#define DINO_IO_DATA 0x06c
111#define DINO_MEM_DATA 0x070 /* Dino 3.x only */
112#define DINO_GSC2X_CONFIG 0x7b4
113#define DINO_GMASK 0x800
114#define DINO_PAMR 0x804
115#define DINO_PAPR 0x808
116#define DINO_DAMODE 0x80c
117#define DINO_PCICMD 0x810
118#define DINO_PCISTS 0x814
119#define DINO_MLTIM 0x81c
120#define DINO_BRDG_FEAT 0x820
121#define DINO_PCIROR 0x824
122#define DINO_PCIWOR 0x828
123#define DINO_TLTIM 0x830
124
125#define DINO_IRQS 11 /* bits 0-10 are architected */
126#define DINO_IRR_MASK 0x5ff /* only 10 bits are implemented */
127
128#define DINO_MASK_IRQ(x) (1<<(x))
129
130#define PCIINTA 0x001
131#define PCIINTB 0x002
132#define PCIINTC 0x004
133#define PCIINTD 0x008
134#define PCIINTE 0x010
135#define PCIINTF 0x020
136#define GSCEXTINT 0x040
137/* #define xxx 0x080 - bit 7 is "default" */
138/* #define xxx 0x100 - bit 8 not used */
139/* #define xxx 0x200 - bit 9 not used */
140#define RS232INT 0x400
141
142struct dino_device
143{
144 struct pci_hba_data hba; /* 'C' inheritance - must be first */
145 spinlock_t dinosaur_pen;
146 unsigned long txn_addr; /* EIR addr to generate interrupt */
147 u32 txn_data; /* EIR data assign to each dino */
148 u32 imr; /* IRQ's which are enabled */
149 int global_irq[12]; /* map IMR bit to global irq */
150#ifdef DINO_DEBUG
151 unsigned int dino_irr0; /* save most recent IRQ line stat */
152#endif
153};
154
155/* Looks nice and keeps the compiler happy */
156#define DINO_DEV(d) ((struct dino_device *) d)
157
158
159/*
160 * Dino Configuration Space Accessor Functions
161 */
162
163#define DINO_CFG_TOK(bus,dfn,pos) ((u32) ((bus)<<16 | (dfn)<<8 | (pos)))
164
165/*
166 * keep the current highest bus count to assist in allocating busses. This
167 * tries to keep a global bus count total so that when we discover an
168 * entirely new bus, it can be given a unique bus number.
169 */
170static int dino_current_bus = 0;
171
172static int dino_cfg_read(struct pci_bus *bus, unsigned int devfn, int where,
173 int size, u32 *val)
174{
175 struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge));
176 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
177 u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3);
178 void __iomem *base_addr = d->hba.base_addr;
179 unsigned long flags;
180
181 spin_lock_irqsave(&d->dinosaur_pen, flags);
182
183 /* tell HW which CFG address */
184 __raw_writel(v, base_addr + DINO_PCI_ADDR);
185
186 /* generate cfg read cycle */
187 if (size == 1) {
188 *val = readb(base_addr + DINO_CONFIG_DATA + (where & 3));
189 } else if (size == 2) {
190 *val = readw(base_addr + DINO_CONFIG_DATA + (where & 2));
191 } else if (size == 4) {
192 *val = readl(base_addr + DINO_CONFIG_DATA);
193 }
194
195 spin_unlock_irqrestore(&d->dinosaur_pen, flags);
196 return 0;
197}
198
199/*
200 * Dino address stepping "feature":
201 * When address stepping, Dino attempts to drive the bus one cycle too soon
202 * even though the type of cycle (config vs. MMIO) might be different.
203 * The read of Ven/Prod ID is harmless and avoids Dino's address stepping.
204 */
205static int dino_cfg_write(struct pci_bus *bus, unsigned int devfn, int where,
206 int size, u32 val)
207{
208 struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge));
209 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
210 u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3);
211 void __iomem *base_addr = d->hba.base_addr;
212 unsigned long flags;
213
214 spin_lock_irqsave(&d->dinosaur_pen, flags);
215
216 /* avoid address stepping feature */
217 __raw_writel(v & 0xffffff00, base_addr + DINO_PCI_ADDR);
218 __raw_readl(base_addr + DINO_CONFIG_DATA);
219
220 /* tell HW which CFG address */
221 __raw_writel(v, base_addr + DINO_PCI_ADDR);
222 /* generate cfg read cycle */
223 if (size == 1) {
224 writeb(val, base_addr + DINO_CONFIG_DATA + (where & 3));
225 } else if (size == 2) {
226 writew(val, base_addr + DINO_CONFIG_DATA + (where & 2));
227 } else if (size == 4) {
228 writel(val, base_addr + DINO_CONFIG_DATA);
229 }
230
231 spin_unlock_irqrestore(&d->dinosaur_pen, flags);
232 return 0;
233}
234
235static struct pci_ops dino_cfg_ops = {
236 .read = dino_cfg_read,
237 .write = dino_cfg_write,
238};
239
240
241/*
242 * Dino "I/O Port" Space Accessor Functions
243 *
244 * Many PCI devices don't require use of I/O port space (eg Tulip,
245 * NCR720) since they export the same registers to both MMIO and
246 * I/O port space. Performance is going to stink if drivers use
247 * I/O port instead of MMIO.
248 */
249
250#define DINO_PORT_IN(type, size, mask) \
251static u##size dino_in##size (struct pci_hba_data *d, u16 addr) \
252{ \
253 u##size v; \
254 unsigned long flags; \
255 spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \
256 /* tell HW which IO Port address */ \
257 __raw_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \
258 /* generate I/O PORT read cycle */ \
259 v = read##type(d->base_addr+DINO_IO_DATA+(addr&mask)); \
260 spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \
261 return v; \
262}
263
264DINO_PORT_IN(b, 8, 3)
265DINO_PORT_IN(w, 16, 2)
266DINO_PORT_IN(l, 32, 0)
267
268#define DINO_PORT_OUT(type, size, mask) \
269static void dino_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
270{ \
271 unsigned long flags; \
272 spin_lock_irqsave(&(DINO_DEV(d)->dinosaur_pen), flags); \
273 /* tell HW which IO port address */ \
274 __raw_writel((u32) addr, d->base_addr + DINO_PCI_ADDR); \
275 /* generate cfg write cycle */ \
276 write##type(val, d->base_addr+DINO_IO_DATA+(addr&mask)); \
277 spin_unlock_irqrestore(&(DINO_DEV(d)->dinosaur_pen), flags); \
278}
279
280DINO_PORT_OUT(b, 8, 3)
281DINO_PORT_OUT(w, 16, 2)
282DINO_PORT_OUT(l, 32, 0)
283
284struct pci_port_ops dino_port_ops = {
285 .inb = dino_in8,
286 .inw = dino_in16,
287 .inl = dino_in32,
288 .outb = dino_out8,
289 .outw = dino_out16,
290 .outl = dino_out32
291};
292
293static void dino_disable_irq(unsigned int irq)
294{
295 struct dino_device *dino_dev = irq_desc[irq].handler_data;
296 int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, irq);
297
298 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, irq_dev, irq);
299
300 /* Clear the matching bit in the IMR register */
301 dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq));
302 __raw_writel(dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
303}
304
305static void dino_enable_irq(unsigned int irq)
306{
307 struct dino_device *dino_dev = irq_desc[irq].handler_data;
308 int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, irq);
309 u32 tmp;
310
311 DBG(KERN_WARNING "%s(0x%p, %d)\n", __FUNCTION__, irq_dev, irq);
312
313 /*
314 ** clear pending IRQ bits
315 **
316 ** This does NOT change ILR state!
317 ** See comment below for ILR usage.
318 */
319 __raw_readl(dino_dev->hba.base_addr+DINO_IPR);
320
321 /* set the matching bit in the IMR register */
322 dino_dev->imr |= DINO_MASK_IRQ(local_irq); /* used in dino_isr() */
323 __raw_writel( dino_dev->imr, dino_dev->hba.base_addr+DINO_IMR);
324
325 /* Emulate "Level Triggered" Interrupt
326 ** Basically, a driver is blowing it if the IRQ line is asserted
327 ** while the IRQ is disabled. But tulip.c seems to do that....
328 ** Give 'em a kluge award and a nice round of applause!
329 **
330 ** The gsc_write will generate an interrupt which invokes dino_isr().
331 ** dino_isr() will read IPR and find nothing. But then catch this
332 ** when it also checks ILR.
333 */
334 tmp = __raw_readl(dino_dev->hba.base_addr+DINO_ILR);
335 if (tmp & DINO_MASK_IRQ(local_irq)) {
336 DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n",
337 __FUNCTION__, tmp);
338 gsc_writel(dino_dev->txn_data, dino_dev->txn_addr);
339 }
340}
341
342static unsigned int dino_startup_irq(unsigned int irq)
343{
344 dino_enable_irq(irq);
345 return 0;
346}
347
348static struct hw_interrupt_type dino_interrupt_type = {
349 .typename = "GSC-PCI",
350 .startup = dino_startup_irq,
351 .shutdown = dino_disable_irq,
352 .enable = dino_enable_irq,
353 .disable = dino_disable_irq,
354 .ack = no_ack_irq,
355 .end = no_end_irq,
356};
357
358
359/*
360 * Handle a Processor interrupt generated by Dino.
361 *
362 * ilr_loop counter is a kluge to prevent a "stuck" IRQ line from
363 * wedging the CPU. Could be removed or made optional at some point.
364 */
365static irqreturn_t
366dino_isr(int irq, void *intr_dev, struct pt_regs *regs)
367{
368 struct dino_device *dino_dev = intr_dev;
369 u32 mask;
370 int ilr_loop = 100;
371
372 /* read and acknowledge pending interrupts */
373#ifdef DINO_DEBUG
374 dino_dev->dino_irr0 =
375#endif
376 mask = __raw_readl(dino_dev->hba.base_addr+DINO_IRR0) & DINO_IRR_MASK;
377
378 if (mask == 0)
379 return IRQ_NONE;
380
381ilr_again:
382 do {
383 int local_irq = __ffs(mask);
384 int irq = dino_dev->global_irq[local_irq];
385 DBG(KERN_DEBUG "%s(%d, %p) mask 0x%x\n",
386 __FUNCTION__, irq, intr_dev, mask);
387 __do_IRQ(irq, regs);
388 mask &= ~(1 << local_irq);
389 } while (mask);
390
391 /* Support for level triggered IRQ lines.
392 **
393 ** Dropping this support would make this routine *much* faster.
394 ** But since PCI requires level triggered IRQ line to share lines...
395 ** device drivers may assume lines are level triggered (and not
396 ** edge triggered like EISA/ISA can be).
397 */
398 mask = __raw_readl(dino_dev->hba.base_addr+DINO_ILR) & dino_dev->imr;
399 if (mask) {
400 if (--ilr_loop > 0)
401 goto ilr_again;
402 printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n",
403 dino_dev->hba.base_addr, mask);
404 return IRQ_NONE;
405 }
406 return IRQ_HANDLED;
407}
408
409static void dino_assign_irq(struct dino_device *dino, int local_irq, int *irqp)
410{
411 int irq = gsc_assign_irq(&dino_interrupt_type, dino);
412 if (irq == NO_IRQ)
413 return;
414
415 *irqp = irq;
416 dino->global_irq[local_irq] = irq;
417}
418
419static void dino_choose_irq(struct parisc_device *dev, void *ctrl)
420{
421 int irq;
422 struct dino_device *dino = ctrl;
423
424 switch (dev->id.sversion) {
425 case 0x00084: irq = 8; break; /* PS/2 */
426 case 0x0008c: irq = 10; break; /* RS232 */
427 case 0x00096: irq = 8; break; /* PS/2 */
428 default: return; /* Unknown */
429 }
430
431 dino_assign_irq(dino, irq, &dev->irq);
432}
433
434static void __init
435dino_bios_init(void)
436{
437 DBG("dino_bios_init\n");
438}
439
440/*
441 * dino_card_setup - Set up the memory space for a Dino in card mode.
442 * @bus: the bus under this dino
443 *
444 * Claim an 8MB chunk of unused IO space and call the generic PCI routines
445 * to set up the addresses of the devices on this bus.
446 */
447#define _8MB 0x00800000UL
448static void __init
449dino_card_setup(struct pci_bus *bus, void __iomem *base_addr)
450{
451 int i;
452 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
453 struct resource *res;
454 char name[128];
455 int size;
456
457 res = &dino_dev->hba.lmmio_space;
458 res->flags = IORESOURCE_MEM;
459 size = scnprintf(name, sizeof(name), "Dino LMMIO (%s)",
460 bus->bridge->bus_id);
461 res->name = kmalloc(size+1, GFP_KERNEL);
462 if(res->name)
463 strcpy((char *)res->name, name);
464 else
465 res->name = dino_dev->hba.lmmio_space.name;
466
467
468 if (ccio_allocate_resource(dino_dev->hba.dev, res, _8MB,
469 F_EXTEND(0xf0000000UL) | _8MB,
470 F_EXTEND(0xffffffffUL) &~ _8MB, _8MB) < 0) {
471 struct list_head *ln, *tmp_ln;
472
473 printk(KERN_ERR "Dino: cannot attach bus %s\n",
474 bus->bridge->bus_id);
475 /* kill the bus, we can't do anything with it */
476 list_for_each_safe(ln, tmp_ln, &bus->devices) {
477 struct pci_dev *dev = pci_dev_b(ln);
478
479 list_del(&dev->global_list);
480 list_del(&dev->bus_list);
481 }
482
483 return;
484 }
485 bus->resource[1] = res;
486 bus->resource[0] = &(dino_dev->hba.io_space);
487
488 /* Now tell dino what range it has */
489 for (i = 1; i < 31; i++) {
490 if (res->start == F_EXTEND(0xf0000000UL | (i * _8MB)))
491 break;
492 }
493 DBG("DINO GSC WRITE i=%d, start=%lx, dino addr = %lx\n",
494 i, res->start, base_addr + DINO_IO_ADDR_EN);
495 __raw_writel(1 << i, base_addr + DINO_IO_ADDR_EN);
496}
497
498static void __init
499dino_card_fixup(struct pci_dev *dev)
500{
501 u32 irq_pin;
502
503 /*
504 ** REVISIT: card-mode PCI-PCI expansion chassis do exist.
505 ** Not sure they were ever productized.
506 ** Die here since we'll die later in dino_inb() anyway.
507 */
508 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
509 panic("Card-Mode Dino: PCI-PCI Bridge not supported\n");
510 }
511
512 /*
513 ** Set Latency Timer to 0xff (not a shared bus)
514 ** Set CACHELINE_SIZE.
515 */
516 dino_cfg_write(dev->bus, dev->devfn,
517 PCI_CACHE_LINE_SIZE, 2, 0xff00 | L1_CACHE_BYTES/4);
518
519 /*
520 ** Program INT_LINE for card-mode devices.
521 ** The cards are hardwired according to this algorithm.
522 ** And it doesn't matter if PPB's are present or not since
523 ** the IRQ lines bypass the PPB.
524 **
525 ** "-1" converts INTA-D (1-4) to PCIINTA-D (0-3) range.
526 ** The additional "-1" adjusts for skewing the IRQ<->slot.
527 */
528 dino_cfg_read(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 1, &irq_pin);
529 dev->irq = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ;
530
531 /* Shouldn't really need to do this but it's in case someone tries
532 ** to bypass PCI services and look at the card themselves.
533 */
534 dino_cfg_write(dev->bus, dev->devfn, PCI_INTERRUPT_LINE, 1, dev->irq);
535}
536
537/* The alignment contraints for PCI bridges under dino */
538#define DINO_BRIDGE_ALIGN 0x100000
539
540
541static void __init
542dino_fixup_bus(struct pci_bus *bus)
543{
544 struct list_head *ln;
545 struct pci_dev *dev;
546 struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
547 int port_base = HBA_PORT_BASE(dino_dev->hba.hba_num);
548
549 DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
550 __FUNCTION__, bus, bus->secondary,
551 bus->bridge->platform_data);
552
553 /* Firmware doesn't set up card-mode dino, so we have to */
554 if (is_card_dino(&dino_dev->hba.dev->id)) {
555 dino_card_setup(bus, dino_dev->hba.base_addr);
556 } else if(bus->parent == NULL) {
557 /* must have a dino above it, reparent the resources
558 * into the dino window */
559 int i;
560 struct resource *res = &dino_dev->hba.lmmio_space;
561
562 bus->resource[0] = &(dino_dev->hba.io_space);
563 for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) {
564 if(res[i].flags == 0)
565 break;
566 bus->resource[i+1] = &res[i];
567 }
568
569 } else if(bus->self) {
570 int i;
571
572 pci_read_bridge_bases(bus);
573
574
575 for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
576 if((bus->self->resource[i].flags &
577 (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
578 continue;
579
580 if(bus->self->resource[i].flags & IORESOURCE_MEM) {
581 /* There's a quirk to alignment of
582 * bridge memory resources: the start
583 * is the alignment and start-end is
584 * the size. However, firmware will
585 * have assigned start and end, so we
586 * need to take this into account */
587 bus->self->resource[i].end = bus->self->resource[i].end - bus->self->resource[i].start + DINO_BRIDGE_ALIGN;
588 bus->self->resource[i].start = DINO_BRIDGE_ALIGN;
589
590 }
591
592 DBG("DEBUG %s assigning %d [0x%lx,0x%lx]\n",
593 bus->self->dev.bus_id, i,
594 bus->self->resource[i].start,
595 bus->self->resource[i].end);
596 pci_assign_resource(bus->self, i);
597 DBG("DEBUG %s after assign %d [0x%lx,0x%lx]\n",
598 bus->self->dev.bus_id, i,
599 bus->self->resource[i].start,
600 bus->self->resource[i].end);
601 }
602 }
603
604
605 list_for_each(ln, &bus->devices) {
606 int i;
607
608 dev = pci_dev_b(ln);
609 if (is_card_dino(&dino_dev->hba.dev->id))
610 dino_card_fixup(dev);
611
612 /*
613 ** P2PB's only have 2 BARs, no IRQs.
614 ** I'd like to just ignore them for now.
615 */
616 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
617 continue;
618
619 /* Adjust the I/O Port space addresses */
620 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
621 struct resource *res = &dev->resource[i];
622 if (res->flags & IORESOURCE_IO) {
623 res->start |= port_base;
624 res->end |= port_base;
625 }
626#ifdef __LP64__
627 /* Sign Extend MMIO addresses */
628 else if (res->flags & IORESOURCE_MEM) {
629 res->start |= F_EXTEND(0UL);
630 res->end |= F_EXTEND(0UL);
631 }
632#endif
633 }
634 /* null out the ROM resource if there is one (we don't
635 * care about an expansion rom on parisc, since it
636 * usually contains (x86) bios code) */
637 dev->resource[PCI_ROM_RESOURCE].flags = 0;
638
639 if(dev->irq == 255) {
640
641#define DINO_FIX_UNASSIGNED_INTERRUPTS
642#ifdef DINO_FIX_UNASSIGNED_INTERRUPTS
643
644 /* This code tries to assign an unassigned
645 * interrupt. Leave it disabled unless you
646 * *really* know what you're doing since the
647 * pin<->interrupt line mapping varies by bus
648 * and machine */
649
650 u32 irq_pin;
651
652 dino_cfg_read(dev->bus, dev->devfn,
653 PCI_INTERRUPT_PIN, 1, &irq_pin);
654 irq_pin = (irq_pin + PCI_SLOT(dev->devfn) - 1) % 4 ;
655 printk(KERN_WARNING "Device %s has undefined IRQ, "
656 "setting to %d\n", pci_name(dev), irq_pin);
657 dino_cfg_write(dev->bus, dev->devfn,
658 PCI_INTERRUPT_LINE, 1, irq_pin);
659 dino_assign_irq(dino_dev, irq_pin, &dev->irq);
660#else
661 dev->irq = 65535;
662 printk(KERN_WARNING "Device %s has unassigned IRQ\n", pci_name(dev));
663#endif
664 } else {
665
666 /* Adjust INT_LINE for that busses region */
667 dino_assign_irq(dino_dev, dev->irq, &dev->irq);
668 }
669 }
670}
671
672
673struct pci_bios_ops dino_bios_ops = {
674 .init = dino_bios_init,
675 .fixup_bus = dino_fixup_bus
676};
677
678
679/*
680 * Initialise a DINO controller chip
681 */
682static void __init
683dino_card_init(struct dino_device *dino_dev)
684{
685 u32 brdg_feat = 0x00784e05;
686
687 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_GMASK);
688 __raw_writel(0x00000001, dino_dev->hba.base_addr+DINO_IO_FBB_EN);
689 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_ICR);
690
691#if 1
692/* REVISIT - should be a runtime check (eg if (CPU_IS_PCX_L) ...) */
693 /*
694 ** PCX-L processors don't support XQL like Dino wants it.
695 ** PCX-L2 ignore XQL signal and it doesn't matter.
696 */
697 brdg_feat &= ~0x4; /* UXQL */
698#endif
699 __raw_writel( brdg_feat, dino_dev->hba.base_addr+DINO_BRDG_FEAT);
700
701 /*
702 ** Don't enable address decoding until we know which I/O range
703 ** currently is available from the host. Only affects MMIO
704 ** and not I/O port space.
705 */
706 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_IO_ADDR_EN);
707
708 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_DAMODE);
709 __raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIROR);
710 __raw_writel(0x00222222, dino_dev->hba.base_addr+DINO_PCIWOR);
711
712 __raw_writel(0x00000040, dino_dev->hba.base_addr+DINO_MLTIM);
713 __raw_writel(0x00000080, dino_dev->hba.base_addr+DINO_IO_CONTROL);
714 __raw_writel(0x0000008c, dino_dev->hba.base_addr+DINO_TLTIM);
715
716 /* Disable PAMR before writing PAPR */
717 __raw_writel(0x0000007e, dino_dev->hba.base_addr+DINO_PAMR);
718 __raw_writel(0x0000007f, dino_dev->hba.base_addr+DINO_PAPR);
719 __raw_writel(0x00000000, dino_dev->hba.base_addr+DINO_PAMR);
720
721 /*
722 ** Dino ERS encourages enabling FBB (0x6f).
723 ** We can't until we know *all* devices below us can support it.
724 ** (Something in device configuration header tells us).
725 */
726 __raw_writel(0x0000004f, dino_dev->hba.base_addr+DINO_PCICMD);
727
728 /* Somewhere, the PCI spec says give devices 1 second
729 ** to recover from the #RESET being de-asserted.
730 ** Experience shows most devices only need 10ms.
731 ** This short-cut speeds up booting significantly.
732 */
733 mdelay(pci_post_reset_delay);
734}
735
736static int __init
737dino_bridge_init(struct dino_device *dino_dev, const char *name)
738{
739 unsigned long io_addr;
740 int result, i, count=0;
741 struct resource *res, *prevres = NULL;
742 /*
743 * Decoding IO_ADDR_EN only works for Built-in Dino
744 * since PDC has already initialized this.
745 */
746
747 io_addr = __raw_readl(dino_dev->hba.base_addr + DINO_IO_ADDR_EN);
748 if (io_addr == 0) {
749 printk(KERN_WARNING "%s: No PCI devices enabled.\n", name);
750 return -ENODEV;
751 }
752
753 res = &dino_dev->hba.lmmio_space;
754 for (i = 0; i < 32; i++) {
755 unsigned long start, end;
756
757 if((io_addr & (1 << i)) == 0)
758 continue;
759
760 start = (unsigned long)(signed int)(0xf0000000 | (i << 23));
761 end = start + 8 * 1024 * 1024 - 1;
762
763 DBG("DINO RANGE %d is at 0x%lx-0x%lx\n", count,
764 start, end);
765
766 if(prevres && prevres->end + 1 == start) {
767 prevres->end = end;
768 } else {
769 if(count >= DINO_MAX_LMMIO_RESOURCES) {
770 printk(KERN_ERR "%s is out of resource windows for range %d (0x%lx-0x%lx)\n", name, count, start, end);
771 break;
772 }
773 prevres = res;
774 res->start = start;
775 res->end = end;
776 res->flags = IORESOURCE_MEM;
777 res->name = kmalloc(64, GFP_KERNEL);
778 if(res->name)
779 snprintf((char *)res->name, 64, "%s LMMIO %d",
780 name, count);
781 res++;
782 count++;
783 }
784 }
785
786 res = &dino_dev->hba.lmmio_space;
787
788 for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) {
789 if(res[i].flags == 0)
790 break;
791
792 result = ccio_request_resource(dino_dev->hba.dev, &res[i]);
793 if (result < 0) {
794 printk(KERN_ERR "%s: failed to claim PCI Bus address space %d (0x%lx-0x%lx)!\n", name, i, res[i].start, res[i].end);
795 return result;
796 }
797 }
798 return 0;
799}
800
801static int __init dino_common_init(struct parisc_device *dev,
802 struct dino_device *dino_dev, const char *name)
803{
804 int status;
805 u32 eim;
806 struct gsc_irq gsc_irq;
807 struct resource *res;
808
809 pcibios_register_hba(&dino_dev->hba);
810
811 pci_bios = &dino_bios_ops; /* used by pci_scan_bus() */
812 pci_port = &dino_port_ops;
813
814 /*
815 ** Note: SMP systems can make use of IRR1/IAR1 registers
816 ** But it won't buy much performance except in very
817 ** specific applications/configurations. Note Dino
818 ** still only has 11 IRQ input lines - just map some of them
819 ** to a different processor.
820 */
821 dev->irq = gsc_alloc_irq(&gsc_irq);
822 dino_dev->txn_addr = gsc_irq.txn_addr;
823 dino_dev->txn_data = gsc_irq.txn_data;
824 eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
825
826 /*
827 ** Dino needs a PA "IRQ" to get a processor's attention.
828 ** arch/parisc/kernel/irq.c returns an EIRR bit.
829 */
830 if (dev->irq < 0) {
831 printk(KERN_WARNING "%s: gsc_alloc_irq() failed\n", name);
832 return 1;
833 }
834
835 status = request_irq(dev->irq, dino_isr, 0, name, dino_dev);
836 if (status) {
837 printk(KERN_WARNING "%s: request_irq() failed with %d\n",
838 name, status);
839 return 1;
840 }
841
842 /* Support the serial port which is sometimes attached on built-in
843 * Dino / Cujo chips.
844 */
845
846 gsc_fixup_irqs(dev, dino_dev, dino_choose_irq);
847
848 /*
849 ** This enables DINO to generate interrupts when it sees
850 ** any of its inputs *change*. Just asserting an IRQ
851 ** before it's enabled (ie unmasked) isn't good enough.
852 */
853 __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0);
854
855 /*
856 ** Some platforms don't clear Dino's IRR0 register at boot time.
857 ** Reading will clear it now.
858 */
859 __raw_readl(dino_dev->hba.base_addr+DINO_IRR0);
860
861 /* allocate I/O Port resource region */
862 res = &dino_dev->hba.io_space;
863 if (dev->id.hversion == 0x680 || is_card_dino(&dev->id)) {
864 res->name = "Dino I/O Port";
865 } else {
866 res->name = "Cujo I/O Port";
867 }
868 res->start = HBA_PORT_BASE(dino_dev->hba.hba_num);
869 res->end = res->start + (HBA_PORT_SPACE_SIZE - 1);
870 res->flags = IORESOURCE_IO; /* do not mark it busy ! */
871 if (request_resource(&ioport_resource, res) < 0) {
872 printk(KERN_ERR "%s: request I/O Port region failed "
873 "0x%lx/%lx (hpa 0x%p)\n",
874 name, res->start, res->end, dino_dev->hba.base_addr);
875 return 1;
876 }
877
878 return 0;
879}
880
881#define CUJO_RAVEN_ADDR F_EXTEND(0xf1000000UL)
882#define CUJO_FIREHAWK_ADDR F_EXTEND(0xf1604000UL)
883#define CUJO_RAVEN_BADPAGE 0x01003000UL
884#define CUJO_FIREHAWK_BADPAGE 0x01607000UL
885
886static const char *dino_vers[] = {
887 "2.0",
888 "2.1",
889 "3.0",
890 "3.1"
891};
892
893static const char *cujo_vers[] = {
894 "1.0",
895 "2.0"
896};
897
898void ccio_cujo20_fixup(struct parisc_device *dev, u32 iovp);
899
900/*
901** Determine if dino should claim this chip (return 0) or not (return 1).
902** If so, initialize the chip appropriately (card-mode vs bridge mode).
903** Much of the initialization is common though.
904*/
905static int __init
906dino_driver_callback(struct parisc_device *dev)
907{
908 struct dino_device *dino_dev; // Dino specific control struct
909 const char *version = "unknown";
910 char *name;
911 int is_cujo = 0;
912 struct pci_bus *bus;
913
914 name = "Dino";
915 if (is_card_dino(&dev->id)) {
916 version = "3.x (card mode)";
917 } else {
918 if(dev->id.hversion == 0x680) {
919 if (dev->id.hversion_rev < 4) {
920 version = dino_vers[dev->id.hversion_rev];
921 }
922 } else {
923 name = "Cujo";
924 is_cujo = 1;
925 if (dev->id.hversion_rev < 2) {
926 version = cujo_vers[dev->id.hversion_rev];
927 }
928 }
929 }
930
931 printk("%s version %s found at 0x%lx\n", name, version, dev->hpa);
932
933 if (!request_mem_region(dev->hpa, PAGE_SIZE, name)) {
934 printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n",
935 dev->hpa);
936 return 1;
937 }
938
939 /* Check for bugs */
940 if (is_cujo && dev->id.hversion_rev == 1) {
941#ifdef CONFIG_IOMMU_CCIO
942 printk(KERN_WARNING "Enabling Cujo 2.0 bug workaround\n");
943 if (dev->hpa == (unsigned long)CUJO_RAVEN_ADDR) {
944 ccio_cujo20_fixup(dev, CUJO_RAVEN_BADPAGE);
945 } else if (dev->hpa == (unsigned long)CUJO_FIREHAWK_ADDR) {
946 ccio_cujo20_fixup(dev, CUJO_FIREHAWK_BADPAGE);
947 } else {
948 printk("Don't recognise Cujo at address 0x%lx, not enabling workaround\n", dev->hpa);
949 }
950#endif
951 } else if (!is_cujo && !is_card_dino(&dev->id) &&
952 dev->id.hversion_rev < 3) {
953 printk(KERN_WARNING
954"The GSCtoPCI (Dino hrev %d) bus converter found may exhibit\n"
955"data corruption. See Service Note Numbers: A4190A-01, A4191A-01.\n"
956"Systems shipped after Aug 20, 1997 will not exhibit this problem.\n"
957"Models affected: C180, C160, C160L, B160L, and B132L workstations.\n\n",
958 dev->id.hversion_rev);
959/* REVISIT: why are C200/C240 listed in the README table but not
960** "Models affected"? Could be an omission in the original literature.
961*/
962 }
963
964 dino_dev = kmalloc(sizeof(struct dino_device), GFP_KERNEL);
965 if (!dino_dev) {
966 printk("dino_init_chip - couldn't alloc dino_device\n");
967 return 1;
968 }
969
970 memset(dino_dev, 0, sizeof(struct dino_device));
971
972 dino_dev->hba.dev = dev;
973 dino_dev->hba.base_addr = ioremap(dev->hpa, 4096); /* faster access */
974 dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
975 spin_lock_init(&dino_dev->dinosaur_pen);
976 dino_dev->hba.iommu = ccio_get_iommu(dev);
977
978 if (is_card_dino(&dev->id)) {
979 dino_card_init(dino_dev);
980 } else {
981 dino_bridge_init(dino_dev, name);
982 }
983
984 if (dino_common_init(dev, dino_dev, name))
985 return 1;
986
987 dev->dev.platform_data = dino_dev;
988
989 /*
990 ** It's not used to avoid chicken/egg problems
991 ** with configuration accessor functions.
992 */
993 bus = pci_scan_bus_parented(&dev->dev, dino_current_bus,
994 &dino_cfg_ops, NULL);
995 if(bus) {
996 /* This code *depends* on scanning being single threaded
997 * if it isn't, this global bus number count will fail
998 */
999 dino_current_bus = bus->subordinate + 1;
1000 pci_bus_assign_resources(bus);
1001 } else {
1002 printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (probably duplicate bus number %d)\n", dev->dev.bus_id, dino_current_bus);
1003 /* increment the bus number in case of duplicates */
1004 dino_current_bus++;
1005 }
1006 dino_dev->hba.hba_bus = bus;
1007 return 0;
1008}
1009
1010/*
1011 * Normally, we would just test sversion. But the Elroy PCI adapter has
1012 * the same sversion as Dino, so we have to check hversion as well.
1013 * Unfortunately, the J2240 PDC reports the wrong hversion for the first
1014 * Dino, so we have to test for Dino, Cujo and Dino-in-a-J2240.
1015 * For card-mode Dino, most machines report an sversion of 9D. But 715
1016 * and 725 firmware misreport it as 0x08080 for no adequately explained
1017 * reason.
1018 */
1019static struct parisc_device_id dino_tbl[] = {
1020 { HPHW_A_DMA, HVERSION_REV_ANY_ID, 0x004, 0x0009D },/* Card-mode Dino */
1021 { HPHW_A_DMA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x08080 }, /* XXX */
1022 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x680, 0xa }, /* Bridge-mode Dino */
1023 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x682, 0xa }, /* Bridge-mode Cujo */
1024 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, 0x05d, 0xa }, /* Dino in a J2240 */
1025 { 0, }
1026};
1027
1028static struct parisc_driver dino_driver = {
1029 .name = "Dino",
1030 .id_table = dino_tbl,
1031 .probe = dino_driver_callback,
1032};
1033
1034/*
1035 * One time initialization to let the world know Dino is here.
1036 * This is the only routine which is NOT static.
1037 * Must be called exactly once before pci_init().
1038 */
1039int __init dino_init(void)
1040{
1041 register_parisc_driver(&dino_driver);
1042 return 0;
1043}
1044
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
new file mode 100644
index 000000000000..043d47aea75b
--- /dev/null
+++ b/drivers/parisc/eisa.c
@@ -0,0 +1,464 @@
1/*
2 * eisa.c - provide support for EISA adapters in PA-RISC machines
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
10 * Copyright (c) 2001 Daniel Engstrom <5116@telia.com>
11 *
12 * There are two distinct EISA adapters. Mongoose is found in machines
13 * before the 712; then the Wax ASIC is used. To complicate matters, the
14 * Wax ASIC also includes a PS/2 and RS-232 controller, but those are
15 * dealt with elsewhere; this file is concerned only with the EISA portions
16 * of Wax.
17 *
18 *
19 * HINT:
20 * -----
21 * To allow an ISA card to work properly in the EISA slot you need to
22 * set an edge trigger level. This may be done on the palo command line
23 * by adding the kernel parameter "eisa_irq_edge=n,n2,[...]]", with
24 * n and n2 as the irq levels you want to use.
25 *
26 * Example: "eisa_irq_edge=10,11" allows ISA cards to operate at
27 * irq levels 10 and 11.
28 */
29
30#include <linux/init.h>
31#include <linux/ioport.h>
32#include <linux/interrupt.h>
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/pci.h>
36#include <linux/sched.h>
37#include <linux/spinlock.h>
38#include <linux/eisa.h>
39
40#include <asm/byteorder.h>
41#include <asm/io.h>
42#include <asm/hardware.h>
43#include <asm/processor.h>
44#include <asm/parisc-device.h>
45#include <asm/delay.h>
46#include <asm/eisa_bus.h>
47#include <asm/eisa_eeprom.h>
48
49#if 0
50#define EISA_DBG(msg, arg... ) printk(KERN_DEBUG "eisa: " msg , ## arg )
51#else
52#define EISA_DBG(msg, arg... )
53#endif
54
55#define SNAKES_EEPROM_BASE_ADDR 0xF0810400
56#define MIRAGE_EEPROM_BASE_ADDR 0xF00C0400
57
58static DEFINE_SPINLOCK(eisa_irq_lock);
59
60void __iomem *eisa_eeprom_addr;
61
62/* We can only have one EISA adapter in the system because neither
63 * implementation can be flexed.
64 */
65static struct eisa_ba {
66 struct pci_hba_data hba;
67 unsigned long eeprom_addr;
68 struct eisa_root_device root;
69} eisa_dev;
70
71/* Port ops */
72
73static inline unsigned long eisa_permute(unsigned short port)
74{
75 if (port & 0x300) {
76 return 0xfc000000 | ((port & 0xfc00) >> 6)
77 | ((port & 0x3f8) << 9) | (port & 7);
78 } else {
79 return 0xfc000000 | port;
80 }
81}
82
83unsigned char eisa_in8(unsigned short port)
84{
85 if (EISA_bus)
86 return gsc_readb(eisa_permute(port));
87 return 0xff;
88}
89
90unsigned short eisa_in16(unsigned short port)
91{
92 if (EISA_bus)
93 return le16_to_cpu(gsc_readw(eisa_permute(port)));
94 return 0xffff;
95}
96
97unsigned int eisa_in32(unsigned short port)
98{
99 if (EISA_bus)
100 return le32_to_cpu(gsc_readl(eisa_permute(port)));
101 return 0xffffffff;
102}
103
104void eisa_out8(unsigned char data, unsigned short port)
105{
106 if (EISA_bus)
107 gsc_writeb(data, eisa_permute(port));
108}
109
110void eisa_out16(unsigned short data, unsigned short port)
111{
112 if (EISA_bus)
113 gsc_writew(cpu_to_le16(data), eisa_permute(port));
114}
115
116void eisa_out32(unsigned int data, unsigned short port)
117{
118 if (EISA_bus)
119 gsc_writel(cpu_to_le32(data), eisa_permute(port));
120}
121
122#ifndef CONFIG_PCI
123/* We call these directly without PCI. See asm/io.h. */
124EXPORT_SYMBOL(eisa_in8);
125EXPORT_SYMBOL(eisa_in16);
126EXPORT_SYMBOL(eisa_in32);
127EXPORT_SYMBOL(eisa_out8);
128EXPORT_SYMBOL(eisa_out16);
129EXPORT_SYMBOL(eisa_out32);
130#endif
131
132/* Interrupt handling */
133
134/* cached interrupt mask registers */
135static int master_mask;
136static int slave_mask;
137
138/* the trig level can be set with the
139 * eisa_irq_edge=n,n,n commandline parameter
140 * We should really read this from the EEPROM
141 * in the furure.
142 */
143/* irq 13,8,2,1,0 must be edge */
144static unsigned int eisa_irq_level; /* default to edge triggered */
145
146
147/* called by free irq */
148static void eisa_disable_irq(unsigned int irq)
149{
150 unsigned long flags;
151
152 EISA_DBG("disable irq %d\n", irq);
153 /* just mask for now */
154 spin_lock_irqsave(&eisa_irq_lock, flags);
155 if (irq & 8) {
156 slave_mask |= (1 << (irq&7));
157 eisa_out8(slave_mask, 0xa1);
158 } else {
159 master_mask |= (1 << (irq&7));
160 eisa_out8(master_mask, 0x21);
161 }
162 spin_unlock_irqrestore(&eisa_irq_lock, flags);
163 EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21));
164 EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
165}
166
167/* called by request irq */
168static void eisa_enable_irq(unsigned int irq)
169{
170 unsigned long flags;
171 EISA_DBG("enable irq %d\n", irq);
172
173 spin_lock_irqsave(&eisa_irq_lock, flags);
174 if (irq & 8) {
175 slave_mask &= ~(1 << (irq&7));
176 eisa_out8(slave_mask, 0xa1);
177 } else {
178 master_mask &= ~(1 << (irq&7));
179 eisa_out8(master_mask, 0x21);
180 }
181 spin_unlock_irqrestore(&eisa_irq_lock, flags);
182 EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21));
183 EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
184}
185
186static unsigned int eisa_startup_irq(unsigned int irq)
187{
188 eisa_enable_irq(irq);
189 return 0;
190}
191
192static struct hw_interrupt_type eisa_interrupt_type = {
193 .typename = "EISA",
194 .startup = eisa_startup_irq,
195 .shutdown = eisa_disable_irq,
196 .enable = eisa_enable_irq,
197 .disable = eisa_disable_irq,
198 .ack = no_ack_irq,
199 .end = no_end_irq,
200};
201
202static irqreturn_t eisa_irq(int wax_irq, void *intr_dev, struct pt_regs *regs)
203{
204 int irq = gsc_readb(0xfc01f000); /* EISA supports 16 irqs */
205 unsigned long flags;
206
207 spin_lock_irqsave(&eisa_irq_lock, flags);
208 /* read IRR command */
209 eisa_out8(0x0a, 0x20);
210 eisa_out8(0x0a, 0xa0);
211
212 EISA_DBG("irq IAR %02x 8259-1 irr %02x 8259-2 irr %02x\n",
213 irq, eisa_in8(0x20), eisa_in8(0xa0));
214
215 /* read ISR command */
216 eisa_out8(0x0a, 0x20);
217 eisa_out8(0x0a, 0xa0);
218 EISA_DBG("irq 8259-1 isr %02x imr %02x 8259-2 isr %02x imr %02x\n",
219 eisa_in8(0x20), eisa_in8(0x21), eisa_in8(0xa0), eisa_in8(0xa1));
220
221 irq &= 0xf;
222
223 /* mask irq and write eoi */
224 if (irq & 8) {
225 slave_mask |= (1 << (irq&7));
226 eisa_out8(slave_mask, 0xa1);
227 eisa_out8(0x60 | (irq&7),0xa0);/* 'Specific EOI' to slave */
228 eisa_out8(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */
229
230 } else {
231 master_mask |= (1 << (irq&7));
232 eisa_out8(master_mask, 0x21);
233 eisa_out8(0x60|irq,0x20); /* 'Specific EOI' to master */
234 }
235 spin_unlock_irqrestore(&eisa_irq_lock, flags);
236
237 __do_IRQ(irq, regs);
238
239 spin_lock_irqsave(&eisa_irq_lock, flags);
240 /* unmask */
241 if (irq & 8) {
242 slave_mask &= ~(1 << (irq&7));
243 eisa_out8(slave_mask, 0xa1);
244 } else {
245 master_mask &= ~(1 << (irq&7));
246 eisa_out8(master_mask, 0x21);
247 }
248 spin_unlock_irqrestore(&eisa_irq_lock, flags);
249 return IRQ_HANDLED;
250}
251
252static irqreturn_t dummy_irq2_handler(int _, void *dev, struct pt_regs *regs)
253{
254 printk(KERN_ALERT "eisa: uhh, irq2?\n");
255 return IRQ_HANDLED;
256}
257
258static struct irqaction irq2_action = {
259 .handler = dummy_irq2_handler,
260 .name = "cascade",
261};
262
263static void init_eisa_pic(void)
264{
265 unsigned long flags;
266
267 spin_lock_irqsave(&eisa_irq_lock, flags);
268
269 eisa_out8(0xff, 0x21); /* mask during init */
270 eisa_out8(0xff, 0xa1); /* mask during init */
271
272 /* master pic */
273 eisa_out8(0x11,0x20); /* ICW1 */
274 eisa_out8(0x00,0x21); /* ICW2 */
275 eisa_out8(0x04,0x21); /* ICW3 */
276 eisa_out8(0x01,0x21); /* ICW4 */
277 eisa_out8(0x40,0x20); /* OCW2 */
278
279 /* slave pic */
280 eisa_out8(0x11,0xa0); /* ICW1 */
281 eisa_out8(0x08,0xa1); /* ICW2 */
282 eisa_out8(0x02,0xa1); /* ICW3 */
283 eisa_out8(0x01,0xa1); /* ICW4 */
284 eisa_out8(0x40,0xa0); /* OCW2 */
285
286 udelay(100);
287
288 slave_mask = 0xff;
289 master_mask = 0xfb;
290 eisa_out8(slave_mask, 0xa1); /* OCW1 */
291 eisa_out8(master_mask, 0x21); /* OCW1 */
292
293 /* setup trig level */
294 EISA_DBG("EISA edge/level %04x\n", eisa_irq_level);
295
296 eisa_out8(eisa_irq_level&0xff, 0x4d0); /* Set all irq's to edge */
297 eisa_out8((eisa_irq_level >> 8) & 0xff, 0x4d1);
298
299 EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21));
300 EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1));
301 EISA_DBG("pic0 edge/level %02x\n", eisa_in8(0x4d0));
302 EISA_DBG("pic1 edge/level %02x\n", eisa_in8(0x4d1));
303
304 spin_unlock_irqrestore(&eisa_irq_lock, flags);
305}
306
307/* Device initialisation */
308
309#define is_mongoose(dev) (dev->id.sversion == 0x00076)
310
311static int __devinit eisa_probe(struct parisc_device *dev)
312{
313 int i, result;
314
315 char *name = is_mongoose(dev) ? "Mongoose" : "Wax";
316
317 printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n",
318 name, dev->hpa);
319
320 eisa_dev.hba.dev = dev;
321 eisa_dev.hba.iommu = ccio_get_iommu(dev);
322
323 eisa_dev.hba.lmmio_space.name = "EISA";
324 eisa_dev.hba.lmmio_space.start = F_EXTEND(0xfc000000);
325 eisa_dev.hba.lmmio_space.end = F_EXTEND(0xffbfffff);
326 eisa_dev.hba.lmmio_space.flags = IORESOURCE_MEM;
327 result = ccio_request_resource(dev, &eisa_dev.hba.lmmio_space);
328 if (result < 0) {
329 printk(KERN_ERR "EISA: failed to claim EISA Bus address space!\n");
330 return result;
331 }
332 eisa_dev.hba.io_space.name = "EISA";
333 eisa_dev.hba.io_space.start = 0;
334 eisa_dev.hba.io_space.end = 0xffff;
335 eisa_dev.hba.lmmio_space.flags = IORESOURCE_IO;
336 result = request_resource(&ioport_resource, &eisa_dev.hba.io_space);
337 if (result < 0) {
338 printk(KERN_ERR "EISA: failed to claim EISA Bus port space!\n");
339 return result;
340 }
341 pcibios_register_hba(&eisa_dev.hba);
342
343 result = request_irq(dev->irq, eisa_irq, SA_SHIRQ, "EISA", &eisa_dev);
344 if (result) {
345 printk(KERN_ERR "EISA: request_irq failed!\n");
346 return result;
347 }
348
349 /* Reserve IRQ2 */
350 irq_desc[2].action = &irq2_action;
351
352 for (i = 0; i < 16; i++) {
353 irq_desc[i].handler = &eisa_interrupt_type;
354 }
355
356 EISA_bus = 1;
357
358 if (dev->num_addrs) {
359 /* newer firmware hand out the eeprom address */
360 eisa_dev.eeprom_addr = dev->addr[0];
361 } else {
362 /* old firmware, need to figure out the box */
363 if (is_mongoose(dev)) {
364 eisa_dev.eeprom_addr = SNAKES_EEPROM_BASE_ADDR;
365 } else {
366 eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR;
367 }
368 }
369 eisa_eeprom_addr = ioremap(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH);
370 result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space,
371 &eisa_dev.hba.lmmio_space);
372 init_eisa_pic();
373
374 if (result >= 0) {
375 /* FIXME : Don't enumerate the bus twice. */
376 eisa_dev.root.dev = &dev->dev;
377 dev->dev.driver_data = &eisa_dev.root;
378 eisa_dev.root.bus_base_addr = 0;
379 eisa_dev.root.res = &eisa_dev.hba.io_space;
380 eisa_dev.root.slots = result;
381 eisa_dev.root.dma_mask = 0xffffffff; /* wild guess */
382 if (eisa_root_register (&eisa_dev.root)) {
383 printk(KERN_ERR "EISA: Failed to register EISA root\n");
384 return -1;
385 }
386 }
387
388 return 0;
389}
390
391static struct parisc_device_id eisa_tbl[] = {
392 { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00076 }, /* Mongoose */
393 { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00090 }, /* Wax EISA */
394 { 0, }
395};
396
397MODULE_DEVICE_TABLE(parisc, eisa_tbl);
398
399static struct parisc_driver eisa_driver = {
400 .name = "EISA Bus Adapter",
401 .id_table = eisa_tbl,
402 .probe = eisa_probe,
403};
404
405void __init eisa_init(void)
406{
407 register_parisc_driver(&eisa_driver);
408}
409
410
411static unsigned int eisa_irq_configured;
412void eisa_make_irq_level(int num)
413{
414 if (eisa_irq_configured& (1<<num)) {
415 printk(KERN_WARNING
416 "IRQ %d polarity configured twice (last to level)\n",
417 num);
418 }
419 eisa_irq_level |= (1<<num); /* set the corresponding bit */
420 eisa_irq_configured |= (1<<num); /* set the corresponding bit */
421}
422
423void eisa_make_irq_edge(int num)
424{
425 if (eisa_irq_configured& (1<<num)) {
426 printk(KERN_WARNING
427 "IRQ %d polarity configured twice (last to edge)\n",
428 num);
429 }
430 eisa_irq_level &= ~(1<<num); /* clear the corresponding bit */
431 eisa_irq_configured |= (1<<num); /* set the corresponding bit */
432}
433
434static int __init eisa_irq_setup(char *str)
435{
436 char *cur = str;
437 int val;
438
439 EISA_DBG("IRQ setup\n");
440 while (cur != NULL) {
441 char *pe;
442
443 val = (int) simple_strtoul(cur, &pe, 0);
444 if (val > 15 || val < 0) {
445 printk(KERN_ERR "eisa: EISA irq value are 0-15\n");
446 continue;
447 }
448 if (val == 2) {
449 val = 9;
450 }
451 eisa_make_irq_edge(val); /* clear the corresponding bit */
452 EISA_DBG("setting IRQ %d to edge-triggered mode\n", val);
453
454 if ((cur = strchr(cur, ','))) {
455 cur++;
456 } else {
457 break;
458 }
459 }
460 return 1;
461}
462
463__setup("eisa_irq_edge=", eisa_irq_setup);
464
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
new file mode 100644
index 000000000000..3a1b4826e5c1
--- /dev/null
+++ b/drivers/parisc/eisa_eeprom.c
@@ -0,0 +1,134 @@
1/*
2 * EISA "eeprom" support routines
3 *
4 * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/miscdevice.h>
26#include <linux/slab.h>
27#include <linux/fs.h>
28#include <asm/io.h>
29#include <asm/uaccess.h>
30#include <asm/eisa_eeprom.h>
31
32#define EISA_EEPROM_MINOR 241
33
34static loff_t eisa_eeprom_llseek(struct file *file, loff_t offset, int origin )
35{
36 switch (origin) {
37 case 0:
38 /* nothing to do */
39 break;
40 case 1:
41 offset += file->f_pos;
42 break;
43 case 2:
44 offset += HPEE_MAX_LENGTH;
45 break;
46 }
47 return (offset >= 0 && offset < HPEE_MAX_LENGTH) ? (file->f_pos = offset) : -EINVAL;
48}
49
50static ssize_t eisa_eeprom_read(struct file * file,
51 char *buf, size_t count, loff_t *ppos )
52{
53 unsigned char *tmp;
54 ssize_t ret;
55 int i;
56
57 if (*ppos >= HPEE_MAX_LENGTH)
58 return 0;
59
60 count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos;
61 tmp = kmalloc(count, GFP_KERNEL);
62 if (tmp) {
63 for (i = 0; i < count; i++)
64 tmp[i] = readb(eisa_eeprom_addr+(*ppos)++);
65
66 if (copy_to_user (buf, tmp, count))
67 ret = -EFAULT;
68 else
69 ret = count;
70 kfree (tmp);
71 } else
72 ret = -ENOMEM;
73
74 return ret;
75}
76
77static int eisa_eeprom_ioctl(struct inode *inode, struct file *file,
78 unsigned int cmd,
79 unsigned long arg)
80{
81 return -ENOTTY;
82}
83
84static int eisa_eeprom_open(struct inode *inode, struct file *file)
85{
86 if (file->f_mode & 2)
87 return -EINVAL;
88
89 return 0;
90}
91
92static int eisa_eeprom_release(struct inode *inode, struct file *file)
93{
94 return 0;
95}
96
97/*
98 * The various file operations we support.
99 */
100static struct file_operations eisa_eeprom_fops = {
101 .owner = THIS_MODULE,
102 .llseek = eisa_eeprom_llseek,
103 .read = eisa_eeprom_read,
104 .ioctl = eisa_eeprom_ioctl,
105 .open = eisa_eeprom_open,
106 .release = eisa_eeprom_release,
107};
108
109static struct miscdevice eisa_eeprom_dev = {
110 EISA_EEPROM_MINOR,
111 "eisa_eeprom",
112 &eisa_eeprom_fops
113};
114
115static int __init eisa_eeprom_init(void)
116{
117 int retval;
118
119 if (!eisa_eeprom_addr)
120 return -ENODEV;
121
122 retval = misc_register(&eisa_eeprom_dev);
123 if (retval < 0) {
124 printk(KERN_ERR "EISA EEPROM: cannot register misc device.\n");
125 return retval;
126 }
127
128 printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr);
129 return 0;
130}
131
132MODULE_LICENSE("GPL");
133
134module_init(eisa_eeprom_init);
diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c
new file mode 100644
index 000000000000..6d8aae003f6c
--- /dev/null
+++ b/drivers/parisc/eisa_enumerator.c
@@ -0,0 +1,521 @@
1/*
2 * eisa_enumerator.c - provide support for EISA adapters in PA-RISC machines
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (c) 2002 Daniel Engstrom <5116@telia.com>
10 *
11 */
12
13#include <linux/ioport.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <asm/io.h>
18#include <asm/uaccess.h>
19#include <asm/byteorder.h>
20
21#include <asm/eisa_bus.h>
22#include <asm/eisa_eeprom.h>
23
24
25/*
26 * Todo:
27 *
28 * PORT init with MASK attr and other size than byte
29 * MEMORY with other decode than 20 bit
30 * CRC stuff
31 * FREEFORM stuff
32 */
33
34#define EPI 0xc80
35#define NUM_SLOT 16
36#define SLOT2PORT(x) (x<<12)
37
38
39/* macros to handle unaligned accesses and
40 * byte swapping. The data in the EEPROM is
41 * little-endian on the big-endian PAROSC */
42#define get_8(x) (*(u_int8_t*)(x))
43
44static inline u_int16_t get_16(const unsigned char *x)
45{
46 return (x[1] << 8) | x[0];
47}
48
49static inline u_int32_t get_32(const unsigned char *x)
50{
51 return (x[3] << 24) | (x[2] << 16) | (x[1] << 8) | x[0];
52}
53
54static inline u_int32_t get_24(const unsigned char *x)
55{
56 return (x[2] << 24) | (x[1] << 16) | (x[0] << 8);
57}
58
59static void print_eisa_id(char *s, u_int32_t id)
60{
61 char vendor[4];
62 int rev;
63 int device;
64
65 rev = id & 0xff;
66 id >>= 8;
67 device = id & 0xff;
68 id >>= 8;
69 vendor[3] = '\0';
70 vendor[2] = '@' + (id & 0x1f);
71 id >>= 5;
72 vendor[1] = '@' + (id & 0x1f);
73 id >>= 5;
74 vendor[0] = '@' + (id & 0x1f);
75 id >>= 5;
76
77 sprintf(s, "%s%02X%02X", vendor, device, rev);
78}
79
80static int configure_memory(const unsigned char *buf,
81 struct resource *mem_parent,
82 char *name)
83{
84 int len;
85 u_int8_t c;
86 int i;
87 struct resource *res;
88
89 len=0;
90
91 for (i=0;i<HPEE_MEMORY_MAX_ENT;i++) {
92 c = get_8(buf+len);
93
94 if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) {
95 int result;
96
97 res->name = name;
98 res->start = mem_parent->start + get_24(buf+len+2);
99 res->end = res->start + get_16(buf+len+5)*1024;
100 res->flags = IORESOURCE_MEM;
101 printk("memory %lx-%lx ", res->start, res->end);
102 result = request_resource(mem_parent, res);
103 if (result < 0) {
104 printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
105 return result;
106 }
107 }
108
109 len+=7;
110
111 if (!(c & HPEE_MEMORY_MORE)) {
112 break;
113 }
114 }
115
116 return len;
117}
118
119
120static int configure_irq(const unsigned char *buf)
121{
122 int len;
123 u_int8_t c;
124 int i;
125
126 len=0;
127
128 for (i=0;i<HPEE_IRQ_MAX_ENT;i++) {
129 c = get_8(buf+len);
130
131 printk("IRQ %d ", c & HPEE_IRQ_CHANNEL_MASK);
132 if (c & HPEE_IRQ_TRIG_LEVEL) {
133 eisa_make_irq_level(c & HPEE_IRQ_CHANNEL_MASK);
134 } else {
135 eisa_make_irq_edge(c & HPEE_IRQ_CHANNEL_MASK);
136 }
137
138 len+=2;
139 /* hpux seems to allow for
140 * two bytes of irq data but only defines one of
141 * them, I think */
142 if (!(c & HPEE_IRQ_MORE)) {
143 break;
144 }
145 }
146
147 return len;
148}
149
150
151static int configure_dma(const unsigned char *buf)
152{
153 int len;
154 u_int8_t c;
155 int i;
156
157 len=0;
158
159 for (i=0;i<HPEE_DMA_MAX_ENT;i++) {
160 c = get_8(buf+len);
161 printk("DMA %d ", c&HPEE_DMA_CHANNEL_MASK);
162 /* fixme: maybe initialize the dma channel withthe timing ? */
163 len+=2;
164 if (!(c & HPEE_DMA_MORE)) {
165 break;
166 }
167 }
168
169 return len;
170}
171
172static int configure_port(const unsigned char *buf, struct resource *io_parent,
173 char *board)
174{
175 int len;
176 u_int8_t c;
177 int i;
178 struct resource *res;
179 int result;
180
181 len=0;
182
183 for (i=0;i<HPEE_PORT_MAX_ENT;i++) {
184 c = get_8(buf+len);
185
186 if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) {
187 res->name = board;
188 res->start = get_16(buf+len+1);
189 res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1;
190 res->flags = IORESOURCE_IO;
191 printk("ioports %lx-%lx ", res->start, res->end);
192 result = request_resource(io_parent, res);
193 if (result < 0) {
194 printk("\n" KERN_ERR "EISA Enumerator: failed to claim EISA Bus address space!\n");
195 return result;
196 }
197 }
198
199 len+=3;
200 if (!(c & HPEE_PORT_MORE)) {
201 break;
202 }
203 }
204
205 return len;
206}
207
208
209/* byte 1 and 2 is the port number to write
210 * and at byte 3 the value to write starts.
211 * I assume that there are and- and or- masks
212 * here when HPEE_PORT_INIT_MASK is set but I have
213 * not yet encountered this. */
214static int configure_port_init(const unsigned char *buf)
215{
216 int len=0;
217 u_int8_t c;
218
219 while (len<HPEE_PORT_INIT_MAX_LEN) {
220 int s=0;
221 c = get_8(buf+len);
222
223 switch (c & HPEE_PORT_INIT_WIDTH_MASK) {
224 case HPEE_PORT_INIT_WIDTH_BYTE:
225 s=1;
226 if (c & HPEE_PORT_INIT_MASK) {
227 printk("\n" KERN_WARNING "port_init: unverified mask attribute\n");
228 outb((inb(get_16(buf+len+1) &
229 get_8(buf+len+3)) |
230 get_8(buf+len+4)), get_16(buf+len+1));
231
232 } else {
233 outb(get_8(buf+len+3), get_16(buf+len+1));
234
235 }
236 break;
237 case HPEE_PORT_INIT_WIDTH_WORD:
238 s=2;
239 if (c & HPEE_PORT_INIT_MASK) {
240 printk(KERN_WARNING "port_init: unverified mask attribute\n");
241 outw((inw(get_16(buf+len+1)) &
242 get_16(buf+len+3)) |
243 get_16(buf+len+5),
244 get_16(buf+len+1));
245 } else {
246 outw(cpu_to_le16(get_16(buf+len+3)), get_16(buf+len+1));
247 }
248 break;
249 case HPEE_PORT_INIT_WIDTH_DWORD:
250 s=4;
251 if (c & HPEE_PORT_INIT_MASK) {
252 printk("\n" KERN_WARNING "port_init: unverified mask attribute\n");
253 outl((inl(get_16(buf+len+1) &
254 get_32(buf+len+3)) |
255 get_32(buf+len+7)), get_16(buf+len+1));
256 } else {
257 outl(cpu_to_le32(get_32(buf+len+3)), get_16(buf+len+1));
258 }
259
260 break;
261 default:
262 printk("\n" KERN_ERR "Invalid port init word %02x\n", c);
263 return 0;
264 }
265
266 if (c & HPEE_PORT_INIT_MASK) {
267 s*=2;
268 }
269
270 len+=s+3;
271 if (!(c & HPEE_PORT_INIT_MORE)) {
272 break;
273 }
274 }
275
276 return len;
277}
278
279static int configure_choise(const unsigned char *buf, u_int8_t *info)
280{
281 int len;
282
283 /* theis record contain the value of the functions
284 * configuration choises and an info byte which
285 * describes which other records to expect in this
286 * function */
287 len = get_8(buf);
288 *info=get_8(buf+len+1);
289
290 return len+2;
291}
292
293static int configure_type_string(const unsigned char *buf)
294{
295 int len;
296
297 /* just skip past the type field */
298 len = get_8(buf);
299 if (len > 80) {
300 printk("\n" KERN_ERR "eisa_enumerator: type info field too long (%d, max is 80)\n", len);
301 }
302
303 return 1+len;
304}
305
306static int configure_function(const unsigned char *buf, int *more)
307{
308 /* the init field seems to be a two-byte field
309 * which is non-zero if there are an other function following
310 * I think it is the length of the function def
311 */
312 *more = get_16(buf);
313
314 return 2;
315}
316
317static int parse_slot_config(int slot,
318 const unsigned char *buf,
319 struct eeprom_eisa_slot_info *es,
320 struct resource *io_parent,
321 struct resource *mem_parent)
322{
323 int res=0;
324 int function_len;
325 unsigned int pos=0;
326 unsigned int maxlen;
327 int num_func=0;
328 u_int8_t flags;
329 int p0;
330
331 char *board;
332 int id_string_used=0;
333
334 if (NULL == (board = kmalloc(8, GFP_KERNEL))) {
335 return -1;
336 }
337 print_eisa_id(board, es->eisa_slot_id);
338 printk(KERN_INFO "EISA slot %d: %s %s ",
339 slot, board, es->flags&HPEE_FLAG_BOARD_IS_ISA ? "ISA" : "EISA");
340
341 maxlen = es->config_data_length < HPEE_MAX_LENGTH ?
342 es->config_data_length : HPEE_MAX_LENGTH;
343 while ((pos < maxlen) && (num_func <= es->num_functions)) {
344 pos+=configure_function(buf+pos, &function_len);
345
346 if (!function_len) {
347 break;
348 }
349 num_func++;
350 p0 = pos;
351 pos += configure_choise(buf+pos, &flags);
352
353 if (flags & HPEE_FUNCTION_INFO_F_DISABLED) {
354 /* function disabled, skip silently */
355 pos = p0 + function_len;
356 continue;
357 }
358 if (flags & HPEE_FUNCTION_INFO_CFG_FREE_FORM) {
359 /* I have no idea how to handle this */
360 printk("function %d have free-form confgiuration, skipping ",
361 num_func);
362 pos = p0 + function_len;
363 continue;
364 }
365
366 /* the ordering of the sections need
367 * more investigation.
368 * Currently I think that memory comaed before IRQ
369 * I assume the order is LSB to MSB in the
370 * info flags
371 * eg type, memory, irq, dma, port, HPEE_PORT_init
372 */
373
374 if (flags & HPEE_FUNCTION_INFO_HAVE_TYPE) {
375 pos += configure_type_string(buf+pos);
376 }
377
378 if (flags & HPEE_FUNCTION_INFO_HAVE_MEMORY) {
379 id_string_used=1;
380 pos += configure_memory(buf+pos, mem_parent, board);
381 }
382
383 if (flags & HPEE_FUNCTION_INFO_HAVE_IRQ) {
384 pos += configure_irq(buf+pos);
385 }
386
387 if (flags & HPEE_FUNCTION_INFO_HAVE_DMA) {
388 pos += configure_dma(buf+pos);
389 }
390
391 if (flags & HPEE_FUNCTION_INFO_HAVE_PORT) {
392 id_string_used=1;
393 pos += configure_port(buf+pos, io_parent, board);
394 }
395
396 if (flags & HPEE_FUNCTION_INFO_HAVE_PORT_INIT) {
397 pos += configure_port_init(buf+pos);
398 }
399
400 if (p0 + function_len < pos) {
401 printk("\n" KERN_ERR "eisa_enumerator: function %d length mis-match "
402 "got %d, expected %d\n",
403 num_func, pos-p0, function_len);
404 res=-1;
405 break;
406 }
407 pos = p0 + function_len;
408 }
409 printk("\n");
410 if (!id_string_used) {
411 kfree(board);
412 }
413
414 if (pos != es->config_data_length) {
415 printk(KERN_ERR "eisa_enumerator: config data length mis-match got %d, expected %d\n",
416 pos, es->config_data_length);
417 res=-1;
418 }
419
420 if (num_func != es->num_functions) {
421 printk(KERN_ERR "eisa_enumerator: number of functions mis-match got %d, expected %d\n",
422 num_func, es->num_functions);
423 res=-2;
424 }
425
426 return res;
427
428}
429
430static int init_slot(int slot, struct eeprom_eisa_slot_info *es)
431{
432 unsigned int id;
433
434 char id_string[8];
435
436 if (!(es->slot_info&HPEE_SLOT_INFO_NO_READID)) {
437 /* try to read the id of the board in the slot */
438 id = le32_to_cpu(inl(SLOT2PORT(slot)+EPI));
439
440 if (0xffffffff == id) {
441 /* Maybe we didn't expect a card to be here... */
442 if (es->eisa_slot_id == 0xffffffff)
443 return -1;
444
445 /* this board is not here or it does not
446 * support readid
447 */
448 printk(KERN_ERR "EISA slot %d a configured board was not detected (",
449 slot);
450
451 print_eisa_id(id_string, es->eisa_slot_id);
452 printk(" expected %s)\n", id_string);
453
454 return -1;
455
456 }
457 if (es->eisa_slot_id != id) {
458 print_eisa_id(id_string, id);
459 printk(KERN_ERR "EISA slot %d id mis-match: got %s",
460 slot, id_string);
461
462 print_eisa_id(id_string, es->eisa_slot_id);
463 printk(" expected %s \n", id_string);
464
465 return -1;
466
467 }
468 }
469
470 /* now: we need to enable the board if
471 * it supports enabling and run through
472 * the port init sction if present
473 * and finally record any interrupt polarity
474 */
475 if (es->slot_features & HPEE_SLOT_FEATURES_ENABLE) {
476 /* enable board */
477 outb(0x01| inb(SLOT2PORT(slot)+EPI+4),
478 SLOT2PORT(slot)+EPI+4);
479 }
480
481 return 0;
482}
483
484
485int eisa_enumerator(unsigned long eeprom_addr,
486 struct resource *io_parent, struct resource *mem_parent)
487{
488 int i;
489 struct eeprom_header *eh;
490 static char eeprom_buf[HPEE_MAX_LENGTH];
491
492 for (i=0; i < HPEE_MAX_LENGTH; i++) {
493 eeprom_buf[i] = gsc_readb(eeprom_addr+i);
494 }
495
496 printk(KERN_INFO "Enumerating EISA bus\n");
497
498 eh = (struct eeprom_header*)(eeprom_buf);
499 for (i=0;i<eh->num_slots;i++) {
500 struct eeprom_eisa_slot_info *es;
501
502 es = (struct eeprom_eisa_slot_info*)
503 (&eeprom_buf[HPEE_SLOT_INFO(i)]);
504
505 if (-1==init_slot(i+1, es)) {
506 continue;
507 }
508
509 if (es->config_data_offset < HPEE_MAX_LENGTH) {
510 if (parse_slot_config(i+1, &eeprom_buf[es->config_data_offset],
511 es, io_parent, mem_parent)) {
512 return -1;
513 }
514 } else {
515 printk (KERN_WARNING "EISA EEPROM offset 0x%x out of range\n",es->config_data_offset);
516 return -1;
517 }
518 }
519 return eh->num_slots;
520}
521
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
new file mode 100644
index 000000000000..af5e02526a18
--- /dev/null
+++ b/drivers/parisc/gsc.c
@@ -0,0 +1,245 @@
1/*
2 * Interrupt management for most GSC and related devices.
3 *
4 * (c) Copyright 1999 Alex deVries for The Puffin Group
5 * (c) Copyright 1999 Grant Grundler for Hewlett-Packard
6 * (c) Copyright 1999 Matthew Wilcox
7 * (c) Copyright 2000 Helge Deller
8 * (c) Copyright 2001 Matthew Wilcox for Hewlett-Packard
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/bitops.h>
17#include <linux/config.h>
18#include <linux/errno.h>
19#include <linux/init.h>
20#include <linux/interrupt.h>
21#include <linux/ioport.h>
22#include <linux/module.h>
23#include <linux/slab.h>
24#include <linux/types.h>
25
26#include <asm/hardware.h>
27#include <asm/io.h>
28
29#include "gsc.h"
30
31#undef DEBUG
32
33#ifdef DEBUG
34#define DEBPRINTK printk
35#else
36#define DEBPRINTK(x,...)
37#endif
38
39int gsc_alloc_irq(struct gsc_irq *i)
40{
41 int irq = txn_alloc_irq(GSC_EIM_WIDTH);
42 if (irq < 0) {
43 printk("cannot get irq\n");
44 return irq;
45 }
46
47 i->txn_addr = txn_alloc_addr(irq);
48 i->txn_data = txn_alloc_data(irq);
49 i->irq = irq;
50
51 return irq;
52}
53
54int gsc_claim_irq(struct gsc_irq *i, int irq)
55{
56 int c = irq;
57
58 irq += CPU_IRQ_BASE; /* virtualize the IRQ first */
59
60 irq = txn_claim_irq(irq);
61 if (irq < 0) {
62 printk("cannot claim irq %d\n", c);
63 return irq;
64 }
65
66 i->txn_addr = txn_alloc_addr(irq);
67 i->txn_data = txn_alloc_data(irq);
68 i->irq = irq;
69
70 return irq;
71}
72
73EXPORT_SYMBOL(gsc_alloc_irq);
74EXPORT_SYMBOL(gsc_claim_irq);
75
76/* Common interrupt demultiplexer used by Asp, Lasi & Wax. */
77irqreturn_t gsc_asic_intr(int gsc_asic_irq, void *dev, struct pt_regs *regs)
78{
79 unsigned long irr;
80 struct gsc_asic *gsc_asic = dev;
81
82 irr = gsc_readl(gsc_asic->hpa + OFFSET_IRR);
83 if (irr == 0)
84 return IRQ_NONE;
85
86 DEBPRINTK("%s intr, mask=0x%x\n", gsc_asic->name, irr);
87
88 do {
89 int local_irq = __ffs(irr);
90 unsigned int irq = gsc_asic->global_irq[local_irq];
91 __do_IRQ(irq, regs);
92 irr &= ~(1 << local_irq);
93 } while (irr);
94
95 return IRQ_HANDLED;
96}
97
98int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
99{
100 int local_irq;
101
102 for (local_irq = 0; local_irq < limit; local_irq++) {
103 if (global_irqs[local_irq] == irq)
104 return local_irq;
105 }
106
107 return NO_IRQ;
108}
109
110static void gsc_asic_disable_irq(unsigned int irq)
111{
112 struct gsc_asic *irq_dev = irq_desc[irq].handler_data;
113 int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
114 u32 imr;
115
116 DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __FUNCTION__, irq,
117 irq_dev->name, imr);
118
119 /* Disable the IRQ line by clearing the bit in the IMR */
120 imr = gsc_readl(irq_dev->hpa + OFFSET_IMR);
121 imr &= ~(1 << local_irq);
122 gsc_writel(imr, irq_dev->hpa + OFFSET_IMR);
123}
124
125static void gsc_asic_enable_irq(unsigned int irq)
126{
127 struct gsc_asic *irq_dev = irq_desc[irq].handler_data;
128 int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
129 u32 imr;
130
131 DEBPRINTK(KERN_DEBUG "%s(%d) %s: IMR 0x%x\n", __FUNCTION__, irq,
132 irq_dev->name, imr);
133
134 /* Enable the IRQ line by setting the bit in the IMR */
135 imr = gsc_readl(irq_dev->hpa + OFFSET_IMR);
136 imr |= 1 << local_irq;
137 gsc_writel(imr, irq_dev->hpa + OFFSET_IMR);
138 /*
139 * FIXME: read IPR to make sure the IRQ isn't already pending.
140 * If so, we need to read IRR and manually call do_irq().
141 */
142}
143
144static unsigned int gsc_asic_startup_irq(unsigned int irq)
145{
146 gsc_asic_enable_irq(irq);
147 return 0;
148}
149
150static struct hw_interrupt_type gsc_asic_interrupt_type = {
151 .typename = "GSC-ASIC",
152 .startup = gsc_asic_startup_irq,
153 .shutdown = gsc_asic_disable_irq,
154 .enable = gsc_asic_enable_irq,
155 .disable = gsc_asic_disable_irq,
156 .ack = no_ack_irq,
157 .end = no_end_irq,
158};
159
160int gsc_assign_irq(struct hw_interrupt_type *type, void *data)
161{
162 static int irq = GSC_IRQ_BASE;
163
164 if (irq > GSC_IRQ_MAX)
165 return NO_IRQ;
166
167 irq_desc[irq].handler = type;
168 irq_desc[irq].handler_data = data;
169 return irq++;
170}
171
172void gsc_asic_assign_irq(struct gsc_asic *asic, int local_irq, int *irqp)
173{
174 int irq = asic->global_irq[local_irq];
175
176 if (irq <= 0) {
177 irq = gsc_assign_irq(&gsc_asic_interrupt_type, asic);
178 if (irq == NO_IRQ)
179 return;
180
181 asic->global_irq[local_irq] = irq;
182 }
183 *irqp = irq;
184}
185
186void gsc_fixup_irqs(struct parisc_device *parent, void *ctrl,
187 void (*choose_irq)(struct parisc_device *, void *))
188{
189 struct device *dev;
190
191 list_for_each_entry(dev, &parent->dev.children, node) {
192 struct parisc_device *padev = to_parisc_device(dev);
193
194 /* work-around for 715/64 and others which have parent
195 at path [5] and children at path [5/0/x] */
196 if (padev->id.hw_type == HPHW_FAULTY)
197 return gsc_fixup_irqs(padev, ctrl, choose_irq);
198 choose_irq(padev, ctrl);
199 }
200}
201
202int gsc_common_setup(struct parisc_device *parent, struct gsc_asic *gsc_asic)
203{
204 struct resource *res;
205 int i;
206
207 gsc_asic->gsc = parent;
208
209 /* Initialise local irq -> global irq mapping */
210 for (i = 0; i < 32; i++) {
211 gsc_asic->global_irq[i] = NO_IRQ;
212 }
213
214 /* allocate resource region */
215 res = request_mem_region(gsc_asic->hpa, 0x100000, gsc_asic->name);
216 if (res) {
217 res->flags = IORESOURCE_MEM; /* do not mark it busy ! */
218 }
219
220#if 0
221 printk(KERN_WARNING "%s IRQ %d EIM 0x%x", gsc_asic->name,
222 parent->irq, gsc_asic->eim);
223 if (gsc_readl(gsc_asic->hpa + OFFSET_IMR))
224 printk(" IMR is non-zero! (0x%x)",
225 gsc_readl(gsc_asic->hpa + OFFSET_IMR));
226 printk("\n");
227#endif
228
229 return 0;
230}
231
232extern struct parisc_driver lasi_driver;
233extern struct parisc_driver asp_driver;
234extern struct parisc_driver wax_driver;
235
236void __init gsc_init(void)
237{
238#ifdef CONFIG_GSC_LASI
239 register_parisc_driver(&lasi_driver);
240 register_parisc_driver(&asp_driver);
241#endif
242#ifdef CONFIG_GSC_WAX
243 register_parisc_driver(&wax_driver);
244#endif
245}
diff --git a/drivers/parisc/gsc.h b/drivers/parisc/gsc.h
new file mode 100644
index 000000000000..a3dc456709d7
--- /dev/null
+++ b/drivers/parisc/gsc.h
@@ -0,0 +1,47 @@
1/*
2 * drivers/parisc/gsc.h
3 * Declarations for functions in gsc.c
4 * Copyright (c) 2000-2002 Helge Deller, Matthew Wilcox
5 *
6 * Distributed under the terms of the GPL, version 2
7 */
8
9#include <linux/interrupt.h>
10#include <asm/hardware.h>
11#include <asm/parisc-device.h>
12
13#define OFFSET_IRR 0x0000 /* Interrupt request register */
14#define OFFSET_IMR 0x0004 /* Interrupt mask register */
15#define OFFSET_IPR 0x0008 /* Interrupt pending register */
16#define OFFSET_ICR 0x000C /* Interrupt control register */
17#define OFFSET_IAR 0x0010 /* Interrupt address register */
18
19/* PA I/O Architected devices support at least 5 bits in the EIM register. */
20#define GSC_EIM_WIDTH 5
21
22struct gsc_irq {
23 unsigned long txn_addr; /* IRQ "target" */
24 int txn_data; /* HW "IRQ" */
25 int irq; /* virtual IRQ */
26};
27
28struct gsc_asic {
29 struct parisc_device *gsc;
30 unsigned long hpa;
31 char *name;
32 int version;
33 int type;
34 int eim;
35 int global_irq[32];
36};
37
38int gsc_common_setup(struct parisc_device *parent, struct gsc_asic *gsc_asic);
39int gsc_alloc_irq(struct gsc_irq *dev); /* dev needs an irq */
40int gsc_claim_irq(struct gsc_irq *dev, int irq); /* dev needs this irq */
41int gsc_assign_irq(struct hw_interrupt_type *type, void *data);
42int gsc_find_local_irq(unsigned int irq, int *global_irq, int limit);
43void gsc_fixup_irqs(struct parisc_device *parent, void *ctrl,
44 void (*choose)(struct parisc_device *child, void *ctrl));
45void gsc_asic_assign_irq(struct gsc_asic *asic, int local_irq, int *irqp);
46
47irqreturn_t gsc_asic_intr(int irq, void *dev, struct pt_regs *regs);
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
new file mode 100644
index 000000000000..e869c6020370
--- /dev/null
+++ b/drivers/parisc/hppb.c
@@ -0,0 +1,109 @@
1/*
2** hppb.c:
3** HP-PB bus driver for the NOVA and K-Class systems.
4**
5** (c) Copyright 2002 Ryan Bradetich
6** (c) Copyright 2002 Hewlett-Packard Company
7**
8** This program is free software; you can redistribute it and/or modify
9** it under the terms of the GNU General Public License as published by
10** the Free Software Foundation; either version 2 of the License, or
11** (at your option) any later version.
12**
13** This Driver currently only supports the console (port 0) on the MUX.
14** Additional work will be needed on this driver to enable the full
15** functionality of the MUX.
16**
17*/
18
19#include <linux/types.h>
20#include <linux/init.h>
21#include <linux/mm.h>
22#include <linux/slab.h>
23#include <linux/ioport.h>
24
25#include <asm/io.h>
26#include <asm/hardware.h>
27#include <asm/parisc-device.h>
28
29#include <linux/pci.h>
30
31struct hppb_card {
32 unsigned long hpa;
33 struct resource mmio_region;
34 struct hppb_card *next;
35};
36
37struct hppb_card hppb_card_head = {
38 .hpa = 0,
39 .next = NULL,
40};
41
42#define IO_IO_LOW offsetof(struct bc_module, io_io_low)
43#define IO_IO_HIGH offsetof(struct bc_module, io_io_high)
44
45/**
46 * hppb_probe - Determine if the hppb driver should claim this device.
47 * @dev: The device which has been found
48 *
49 * Determine if hppb driver should claim this chip (return 0) or not
50 * (return 1). If so, initialize the chip and tell other partners in crime
51 * they have work to do.
52 */
53static int hppb_probe(struct parisc_device *dev)
54{
55 int status;
56 struct hppb_card *card = &hppb_card_head;
57
58 while(card->next) {
59 card = card->next;
60 }
61
62 if(card->hpa) {
63 card->next = kmalloc(sizeof(struct hppb_card), GFP_KERNEL);
64 if(!card->next) {
65 printk(KERN_ERR "HP-PB: Unable to allocate memory.\n");
66 return 1;
67 }
68 memset(card->next, '\0', sizeof(struct hppb_card));
69 card = card->next;
70 }
71 printk(KERN_INFO "Found GeckoBoa at 0x%lx\n", dev->hpa);
72
73 card->hpa = dev->hpa;
74 card->mmio_region.name = "HP-PB Bus";
75 card->mmio_region.flags = IORESOURCE_MEM;
76
77 card->mmio_region.start = __raw_readl(dev->hpa + IO_IO_LOW);
78 card->mmio_region.end = __raw_readl(dev->hpa + IO_IO_HIGH) - 1;
79
80 status = ccio_request_resource(dev, &card->mmio_region);
81 if(status < 0) {
82 printk(KERN_ERR "%s: failed to claim HP-PB bus space (%08lx, %08lx)\n",
83 __FILE__, card->mmio_region.start, card->mmio_region.end);
84 }
85
86 return 0;
87}
88
89
90static struct parisc_device_id hppb_tbl[] = {
91 { HPHW_BCPORT, HVERSION_REV_ANY_ID, 0x500, 0xc },
92 { 0, }
93};
94
95static struct parisc_driver hppb_driver = {
96 .name = "Gecko Boa",
97 .id_table = hppb_tbl,
98 .probe = hppb_probe,
99};
100
101/**
102 * hppb_init - HP-PB bus initalization procedure.
103 *
104 * Register this driver.
105 */
106void __init hppb_init(void)
107{
108 register_parisc_driver(&hppb_driver);
109}
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
new file mode 100644
index 000000000000..38d9e1aba1d0
--- /dev/null
+++ b/drivers/parisc/iommu-helpers.h
@@ -0,0 +1,171 @@
1/**
2 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
3 * @ioc: The I/O Controller.
4 * @startsg: The scatter/gather list of coalesced chunks.
5 * @nents: The number of entries in the scatter/gather list.
6 * @hint: The DMA Hint.
7 *
8 * This function inserts the coalesced scatter/gather list chunks into the
9 * I/O Controller's I/O Pdir.
10 */
11static inline unsigned int
12iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
13 unsigned long hint,
14 void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
15 unsigned long))
16{
17 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
18 unsigned int n_mappings = 0;
19 unsigned long dma_offset = 0, dma_len = 0;
20 u64 *pdirp = NULL;
21
22 /* Horrible hack. For efficiency's sake, dma_sg starts one
23 * entry below the true start (it is immediately incremented
24 * in the loop) */
25 dma_sg--;
26
27 while (nents-- > 0) {
28 unsigned long vaddr;
29 long size;
30
31 DBG_RUN_SG(" %d : %08lx/%05x %08lx/%05x\n", nents,
32 (unsigned long)sg_dma_address(startsg), cnt,
33 sg_virt_addr(startsg), startsg->length
34 );
35
36
37 /*
38 ** Look for the start of a new DMA stream
39 */
40
41 if (sg_dma_address(startsg) & PIDE_FLAG) {
42 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
43
44 BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
45
46 dma_sg++;
47
48 dma_len = sg_dma_len(startsg);
49 sg_dma_len(startsg) = 0;
50 dma_offset = (unsigned long) pide & ~IOVP_MASK;
51 n_mappings++;
52#if defined(ZX1_SUPPORT)
53 /* Pluto IOMMU IO Virt Address is not zero based */
54 sg_dma_address(dma_sg) = pide | ioc->ibase;
55#else
56 /* SBA, ccio, and dino are zero based.
57 * Trying to save a few CPU cycles for most users.
58 */
59 sg_dma_address(dma_sg) = pide;
60#endif
61 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
62 prefetchw(pdirp);
63 }
64
65 BUG_ON(pdirp == NULL);
66
67 vaddr = sg_virt_addr(startsg);
68 sg_dma_len(dma_sg) += startsg->length;
69 size = startsg->length + dma_offset;
70 dma_offset = 0;
71#ifdef IOMMU_MAP_STATS
72 ioc->msg_pages += startsg->length >> IOVP_SHIFT;
73#endif
74 do {
75 iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
76 vaddr, hint);
77 vaddr += IOVP_SIZE;
78 size -= IOVP_SIZE;
79 pdirp++;
80 } while(unlikely(size > 0));
81 startsg++;
82 }
83 return(n_mappings);
84}
85
86
87/*
88** First pass is to walk the SG list and determine where the breaks are
89** in the DMA stream. Allocates PDIR entries but does not fill them.
90** Returns the number of DMA chunks.
91**
92** Doing the fill separate from the coalescing/allocation keeps the
93** code simpler. Future enhancement could make one pass through
94** the sglist do both.
95*/
96
97static inline unsigned int
98iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
99 int (*iommu_alloc_range)(struct ioc *, size_t))
100{
101 struct scatterlist *contig_sg; /* contig chunk head */
102 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
103 unsigned int n_mappings = 0;
104
105 while (nents > 0) {
106
107 /*
108 ** Prepare for first/next DMA stream
109 */
110 contig_sg = startsg;
111 dma_len = startsg->length;
112 dma_offset = sg_virt_addr(startsg) & ~IOVP_MASK;
113
114 /* PARANOID: clear entries */
115 sg_dma_address(startsg) = 0;
116 sg_dma_len(startsg) = 0;
117
118 /*
119 ** This loop terminates one iteration "early" since
120 ** it's always looking one "ahead".
121 */
122 while(--nents > 0) {
123 unsigned long prevstartsg_end, startsg_end;
124
125 prevstartsg_end = sg_virt_addr(startsg) +
126 startsg->length;
127
128 startsg++;
129 startsg_end = sg_virt_addr(startsg) +
130 startsg->length;
131
132 /* PARANOID: clear entries */
133 sg_dma_address(startsg) = 0;
134 sg_dma_len(startsg) = 0;
135
136 /*
137 ** First make sure current dma stream won't
138 ** exceed DMA_CHUNK_SIZE if we coalesce the
139 ** next entry.
140 */
141 if(unlikely(ROUNDUP(dma_len + dma_offset + startsg->length,
142 IOVP_SIZE) > DMA_CHUNK_SIZE))
143 break;
144
145 /*
146 ** Next see if we can append the next chunk (i.e.
147 ** it must end on one page and begin on another
148 */
149 if (unlikely(((prevstartsg_end | sg_virt_addr(startsg)) & ~PAGE_MASK) != 0))
150 break;
151
152 dma_len += startsg->length;
153 }
154
155 /*
156 ** End of DMA Stream
157 ** Terminate last VCONTIG block.
158 ** Allocate space for DMA stream.
159 */
160 sg_dma_len(contig_sg) = dma_len;
161 dma_len = ROUNDUP(dma_len + dma_offset, IOVP_SIZE);
162 sg_dma_address(contig_sg) =
163 PIDE_FLAG
164 | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT)
165 | dma_offset;
166 n_mappings++;
167 }
168
169 return n_mappings;
170}
171
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
new file mode 100644
index 000000000000..91df0bf181dd
--- /dev/null
+++ b/drivers/parisc/iosapic.c
@@ -0,0 +1,921 @@
1/*
2** I/O Sapic Driver - PCI interrupt line support
3**
4** (c) Copyright 1999 Grant Grundler
5** (c) Copyright 1999 Hewlett-Packard Company
6**
7** This program is free software; you can redistribute it and/or modify
8** it under the terms of the GNU General Public License as published by
9** the Free Software Foundation; either version 2 of the License, or
10** (at your option) any later version.
11**
12** The I/O sapic driver manages the Interrupt Redirection Table which is
13** the control logic to convert PCI line based interrupts into a Message
14** Signaled Interrupt (aka Transaction Based Interrupt, TBI).
15**
16** Acronyms
17** --------
18** HPA Hard Physical Address (aka MMIO address)
19** IRQ Interrupt ReQuest. Implies Line based interrupt.
20** IRT Interrupt Routing Table (provided by PAT firmware)
21** IRdT Interrupt Redirection Table. IRQ line to TXN ADDR/DATA
22** table which is implemented in I/O SAPIC.
23** ISR Interrupt Service Routine. aka Interrupt handler.
24** MSI Message Signaled Interrupt. PCI 2.2 functionality.
25** aka Transaction Based Interrupt (or TBI).
26** PA Precision Architecture. HP's RISC architecture.
27** RISC Reduced Instruction Set Computer.
28**
29**
30** What's a Message Signalled Interrupt?
31** -------------------------------------
32** MSI is a write transaction which targets a processor and is similar
33** to a processor write to memory or MMIO. MSIs can be generated by I/O
34** devices as well as processors and require *architecture* to work.
35**
36** PA only supports MSI. So I/O subsystems must either natively generate
37** MSIs (e.g. GSC or HP-PB) or convert line based interrupts into MSIs
38** (e.g. PCI and EISA). IA64 supports MSIs via a "local SAPIC" which
39** acts on behalf of a processor.
40**
41** MSI allows any I/O device to interrupt any processor. This makes
42** load balancing of the interrupt processing possible on an SMP platform.
43** Interrupts are also ordered WRT to DMA data. It's possible on I/O
44** coherent systems to completely eliminate PIO reads from the interrupt
45** path. The device and driver must be designed and implemented to
46** guarantee all DMA has been issued (issues about atomicity here)
47** before the MSI is issued. I/O status can then safely be read from
48** DMA'd data by the ISR.
49**
50**
51** PA Firmware
52** -----------
53** PA-RISC platforms have two fundementally different types of firmware.
54** For PCI devices, "Legacy" PDC initializes the "INTERRUPT_LINE" register
55** and BARs similar to a traditional PC BIOS.
56** The newer "PAT" firmware supports PDC calls which return tables.
57** PAT firmware only initializes PCI Console and Boot interface.
58** With these tables, the OS can progam all other PCI devices.
59**
60** One such PAT PDC call returns the "Interrupt Routing Table" (IRT).
61** The IRT maps each PCI slot's INTA-D "output" line to an I/O SAPIC
62** input line. If the IRT is not available, this driver assumes
63** INTERRUPT_LINE register has been programmed by firmware. The latter
64** case also means online addition of PCI cards can NOT be supported
65** even if HW support is present.
66**
67** All platforms with PAT firmware to date (Oct 1999) use one Interrupt
68** Routing Table for the entire platform.
69**
70** Where's the iosapic?
71** --------------------
72** I/O sapic is part of the "Core Electronics Complex". And on HP platforms
73** it's integrated as part of the PCI bus adapter, "lba". So no bus walk
74** will discover I/O Sapic. I/O Sapic driver learns about each device
75** when lba driver advertises the presence of the I/O sapic by calling
76** iosapic_register().
77**
78**
79** IRQ handling notes
80** ------------------
81** The IO-SAPIC can indicate to the CPU which interrupt was asserted.
82** So, unlike the GSC-ASIC and Dino, we allocate one CPU interrupt per
83** IO-SAPIC interrupt and call the device driver's handler directly.
84** The IO-SAPIC driver hijacks the CPU interrupt handler so it can
85** issue the End Of Interrupt command to the IO-SAPIC.
86**
87** Overview of exported iosapic functions
88** --------------------------------------
89** (caveat: code isn't finished yet - this is just the plan)
90**
91** iosapic_init:
92** o initialize globals (lock, etc)
93** o try to read IRT. Presence of IRT determines if this is
94** a PAT platform or not.
95**
96** iosapic_register():
97** o create iosapic_info instance data structure
98** o allocate vector_info array for this iosapic
99** o initialize vector_info - read corresponding IRdT?
100**
101** iosapic_xlate_pin: (only called by fixup_irq for PAT platform)
102** o intr_pin = read cfg (INTERRUPT_PIN);
103** o if (device under PCI-PCI bridge)
104** translate slot/pin
105**
106** iosapic_fixup_irq:
107** o if PAT platform (IRT present)
108** intr_pin = iosapic_xlate_pin(isi,pcidev):
109** intr_line = find IRT entry(isi, PCI_SLOT(pcidev), intr_pin)
110** save IRT entry into vector_info later
111** write cfg INTERRUPT_LINE (with intr_line)?
112** else
113** intr_line = pcidev->irq
114** IRT pointer = NULL
115** endif
116** o locate vector_info (needs: isi, intr_line)
117** o allocate processor "irq" and get txn_addr/data
118** o request_irq(processor_irq, iosapic_interrupt, vector_info,...)
119**
120** iosapic_enable_irq:
121** o clear any pending IRQ on that line
122** o enable IRdT - call enable_irq(vector[line]->processor_irq)
123** o write EOI in case line is already asserted.
124**
125** iosapic_disable_irq:
126** o disable IRdT - call disable_irq(vector[line]->processor_irq)
127*/
128
129
130/* FIXME: determine which include files are really needed */
131#include <linux/types.h>
132#include <linux/kernel.h>
133#include <linux/spinlock.h>
134#include <linux/pci.h>
135#include <linux/init.h>
136#include <linux/slab.h>
137#include <linux/interrupt.h>
138
139#include <asm/byteorder.h> /* get in-line asm for swab */
140#include <asm/pdc.h>
141#include <asm/pdcpat.h>
142#include <asm/page.h>
143#include <asm/system.h>
144#include <asm/io.h> /* read/write functions */
145#ifdef CONFIG_SUPERIO
146#include <asm/superio.h>
147#endif
148
149#include <asm/iosapic.h>
150#include "./iosapic_private.h"
151
152#define MODULE_NAME "iosapic"
153
154/* "local" compile flags */
155#undef PCI_BRIDGE_FUNCS
156#undef DEBUG_IOSAPIC
157#undef DEBUG_IOSAPIC_IRT
158
159
160#ifdef DEBUG_IOSAPIC
161#define DBG(x...) printk(x)
162#else /* DEBUG_IOSAPIC */
163#define DBG(x...)
164#endif /* DEBUG_IOSAPIC */
165
166#ifdef DEBUG_IOSAPIC_IRT
167#define DBG_IRT(x...) printk(x)
168#else
169#define DBG_IRT(x...)
170#endif
171
172#ifdef CONFIG_64BIT
173#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa))
174#else
175#define COMPARE_IRTE_ADDR(irte, hpa) \
176 ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
177#endif
178
179#define IOSAPIC_REG_SELECT 0x00
180#define IOSAPIC_REG_WINDOW 0x10
181#define IOSAPIC_REG_EOI 0x40
182
183#define IOSAPIC_REG_VERSION 0x1
184
185#define IOSAPIC_IRDT_ENTRY(idx) (0x10+(idx)*2)
186#define IOSAPIC_IRDT_ENTRY_HI(idx) (0x11+(idx)*2)
187
188static inline unsigned int iosapic_read(void __iomem *iosapic, unsigned int reg)
189{
190 writel(reg, iosapic + IOSAPIC_REG_SELECT);
191 return readl(iosapic + IOSAPIC_REG_WINDOW);
192}
193
194static inline void iosapic_write(void __iomem *iosapic, unsigned int reg, u32 val)
195{
196 writel(reg, iosapic + IOSAPIC_REG_SELECT);
197 writel(val, iosapic + IOSAPIC_REG_WINDOW);
198}
199
200#define IOSAPIC_VERSION_MASK 0x000000ff
201#define IOSAPIC_VERSION(ver) ((int) (ver & IOSAPIC_VERSION_MASK))
202
203#define IOSAPIC_MAX_ENTRY_MASK 0x00ff0000
204#define IOSAPIC_MAX_ENTRY_SHIFT 0x10
205#define IOSAPIC_IRDT_MAX_ENTRY(ver) \
206 (int) (((ver) & IOSAPIC_MAX_ENTRY_MASK) >> IOSAPIC_MAX_ENTRY_SHIFT)
207
208/* bits in the "low" I/O Sapic IRdT entry */
209#define IOSAPIC_IRDT_ENABLE 0x10000
210#define IOSAPIC_IRDT_PO_LOW 0x02000
211#define IOSAPIC_IRDT_LEVEL_TRIG 0x08000
212#define IOSAPIC_IRDT_MODE_LPRI 0x00100
213
214/* bits in the "high" I/O Sapic IRdT entry */
215#define IOSAPIC_IRDT_ID_EID_SHIFT 0x10
216
217
218static spinlock_t iosapic_lock = SPIN_LOCK_UNLOCKED;
219
220static inline void iosapic_eoi(void __iomem *addr, unsigned int data)
221{
222 __raw_writel(data, addr);
223}
224
225/*
226** REVISIT: future platforms may have more than one IRT.
227** If so, the following three fields form a structure which
228** then be linked into a list. Names are chosen to make searching
229** for them easy - not necessarily accurate (eg "cell").
230**
231** Alternative: iosapic_info could point to the IRT it's in.
232** iosapic_register() could search a list of IRT's.
233*/
234static struct irt_entry *irt_cell;
235static size_t irt_num_entry;
236
237static struct irt_entry *iosapic_alloc_irt(int num_entries)
238{
239 unsigned long a;
240
241 /* The IRT needs to be 8-byte aligned for the PDC call.
242 * Normally kmalloc would guarantee larger alignment, but
243 * if CONFIG_DEBUG_SLAB is enabled, then we can get only
244 * 4-byte alignment on 32-bit kernels
245 */
246 a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL);
247 a = (a + 7) & ~7;
248 return (struct irt_entry *)a;
249}
250
251/**
252 * iosapic_load_irt - Fill in the interrupt routing table
253 * @cell_num: The cell number of the CPU we're currently executing on
254 * @irt: The address to place the new IRT at
255 * @return The number of entries found
256 *
257 * The "Get PCI INT Routing Table Size" option returns the number of
258 * entries in the PCI interrupt routing table for the cell specified
259 * in the cell_number argument. The cell number must be for a cell
260 * within the caller's protection domain.
261 *
262 * The "Get PCI INT Routing Table" option returns, for the cell
263 * specified in the cell_number argument, the PCI interrupt routing
264 * table in the caller allocated memory pointed to by mem_addr.
265 * We assume the IRT only contains entries for I/O SAPIC and
266 * calculate the size based on the size of I/O sapic entries.
267 *
268 * The PCI interrupt routing table entry format is derived from the
269 * IA64 SAL Specification 2.4. The PCI interrupt routing table defines
270 * the routing of PCI interrupt signals between the PCI device output
271 * "pins" and the IO SAPICs' input "lines" (including core I/O PCI
272 * devices). This table does NOT include information for devices/slots
273 * behind PCI to PCI bridges. See PCI to PCI Bridge Architecture Spec.
274 * for the architected method of routing of IRQ's behind PPB's.
275 */
276
277
278static int __init
279iosapic_load_irt(unsigned long cell_num, struct irt_entry **irt)
280{
281 long status; /* PDC return value status */
282 struct irt_entry *table; /* start of interrupt routing tbl */
283 unsigned long num_entries = 0UL;
284
285 BUG_ON(!irt);
286
287 if (is_pdc_pat()) {
288 /* Use pat pdc routine to get interrupt routing table size */
289 DBG("calling get_irt_size (cell %ld)\n", cell_num);
290 status = pdc_pat_get_irt_size(&num_entries, cell_num);
291 DBG("get_irt_size: %ld\n", status);
292
293 BUG_ON(status != PDC_OK);
294 BUG_ON(num_entries == 0);
295
296 /*
297 ** allocate memory for interrupt routing table
298 ** This interface isn't really right. We are assuming
299 ** the contents of the table are exclusively
300 ** for I/O sapic devices.
301 */
302 table = iosapic_alloc_irt(num_entries);
303 if (table == NULL) {
304 printk(KERN_WARNING MODULE_NAME ": read_irt : can "
305 "not alloc mem for IRT\n");
306 return 0;
307 }
308
309 /* get PCI INT routing table */
310 status = pdc_pat_get_irt(table, cell_num);
311 DBG("pdc_pat_get_irt: %ld\n", status);
312 WARN_ON(status != PDC_OK);
313 } else {
314 /*
315 ** C3000/J5000 (and similar) platforms with Sprockets PDC
316 ** will return exactly one IRT for all iosapics.
317 ** So if we have one, don't need to get it again.
318 */
319 if (irt_cell)
320 return 0;
321
322 /* Should be using the Elroy's HPA, but it's ignored anyway */
323 status = pdc_pci_irt_size(&num_entries, 0);
324 DBG("pdc_pci_irt_size: %ld\n", status);
325
326 if (status != PDC_OK) {
327 /* Not a "legacy" system with I/O SAPIC either */
328 return 0;
329 }
330
331 BUG_ON(num_entries == 0);
332
333 table = iosapic_alloc_irt(num_entries);
334 if (!table) {
335 printk(KERN_WARNING MODULE_NAME ": read_irt : can "
336 "not alloc mem for IRT\n");
337 return 0;
338 }
339
340 /* HPA ignored by this call too. */
341 status = pdc_pci_irt(num_entries, 0, table);
342 BUG_ON(status != PDC_OK);
343 }
344
345 /* return interrupt table address */
346 *irt = table;
347
348#ifdef DEBUG_IOSAPIC_IRT
349{
350 struct irt_entry *p = table;
351 int i;
352
353 printk(MODULE_NAME " Interrupt Routing Table (cell %ld)\n", cell_num);
354 printk(MODULE_NAME " start = 0x%p num_entries %ld entry_size %d\n",
355 table,
356 num_entries,
357 (int) sizeof(struct irt_entry));
358
359 for (i = 0 ; i < num_entries ; i++, p++) {
360 printk(MODULE_NAME " %02x %02x %02x %02x %02x %02x %02x %02x %08x%08x\n",
361 p->entry_type, p->entry_length, p->interrupt_type,
362 p->polarity_trigger, p->src_bus_irq_devno, p->src_bus_id,
363 p->src_seg_id, p->dest_iosapic_intin,
364 ((u32 *) p)[2],
365 ((u32 *) p)[3]
366 );
367 }
368}
369#endif /* DEBUG_IOSAPIC_IRT */
370
371 return num_entries;
372}
373
374
375
376void __init iosapic_init(void)
377{
378 unsigned long cell = 0;
379
380 DBG("iosapic_init()\n");
381
382#ifdef __LP64__
383 if (is_pdc_pat()) {
384 int status;
385 struct pdc_pat_cell_num cell_info;
386
387 status = pdc_pat_cell_get_number(&cell_info);
388 if (status == PDC_OK) {
389 cell = cell_info.cell_num;
390 }
391 }
392#endif
393
394 /* get interrupt routing table for this cell */
395 irt_num_entry = iosapic_load_irt(cell, &irt_cell);
396 if (irt_num_entry == 0)
397 irt_cell = NULL; /* old PDC w/o iosapic */
398}
399
400
401/*
402** Return the IRT entry in case we need to look something else up.
403*/
404static struct irt_entry *
405irt_find_irqline(struct iosapic_info *isi, u8 slot, u8 intr_pin)
406{
407 struct irt_entry *i = irt_cell;
408 int cnt; /* track how many entries we've looked at */
409 u8 irq_devno = (slot << IRT_DEV_SHIFT) | (intr_pin-1);
410
411 DBG_IRT("irt_find_irqline() SLOT %d pin %d\n", slot, intr_pin);
412
413 for (cnt=0; cnt < irt_num_entry; cnt++, i++) {
414
415 /*
416 ** Validate: entry_type, entry_length, interrupt_type
417 **
418 ** Difference between validate vs compare is the former
419 ** should print debug info and is not expected to "fail"
420 ** on current platforms.
421 */
422 if (i->entry_type != IRT_IOSAPIC_TYPE) {
423 DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d type %d\n", i, cnt, i->entry_type);
424 continue;
425 }
426
427 if (i->entry_length != IRT_IOSAPIC_LENGTH) {
428 DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d length %d\n", i, cnt, i->entry_length);
429 continue;
430 }
431
432 if (i->interrupt_type != IRT_VECTORED_INTR) {
433 DBG_IRT(KERN_WARNING MODULE_NAME ":find_irqline(0x%p): skipping entry %d interrupt_type %d\n", i, cnt, i->interrupt_type);
434 continue;
435 }
436
437 if (!COMPARE_IRTE_ADDR(i, isi->isi_hpa))
438 continue;
439
440 if ((i->src_bus_irq_devno & IRT_IRQ_DEVNO_MASK) != irq_devno)
441 continue;
442
443 /*
444 ** Ignore: src_bus_id and rc_seg_id correlate with
445 ** iosapic_info->isi_hpa on HP platforms.
446 ** If needed, pass in "PFA" (aka config space addr)
447 ** instead of slot.
448 */
449
450 /* Found it! */
451 return i;
452 }
453
454 printk(KERN_WARNING MODULE_NAME ": 0x%lx : no IRT entry for slot %d, pin %d\n",
455 isi->isi_hpa, slot, intr_pin);
456 return NULL;
457}
458
459
460/*
461** xlate_pin() supports the skewing of IRQ lines done by subsidiary bridges.
462** Legacy PDC already does this translation for us and stores it in INTR_LINE.
463**
464** PAT PDC needs to basically do what legacy PDC does:
465** o read PIN
466** o adjust PIN in case device is "behind" a PPB
467** (eg 4-port 100BT and SCSI/LAN "Combo Card")
468** o convert slot/pin to I/O SAPIC input line.
469**
470** HP platforms only support:
471** o one level of skewing for any number of PPBs
472** o only support PCI-PCI Bridges.
473*/
474static struct irt_entry *
475iosapic_xlate_pin(struct iosapic_info *isi, struct pci_dev *pcidev)
476{
477 u8 intr_pin, intr_slot;
478
479 pci_read_config_byte(pcidev, PCI_INTERRUPT_PIN, &intr_pin);
480
481 DBG_IRT("iosapic_xlate_pin(%s) SLOT %d pin %d\n",
482 pcidev->slot_name, PCI_SLOT(pcidev->devfn), intr_pin);
483
484 if (intr_pin == 0) {
485 /* The device does NOT support/use IRQ lines. */
486 return NULL;
487 }
488
489 /* Check if pcidev behind a PPB */
490 if (NULL != pcidev->bus->self) {
491 /* Convert pcidev INTR_PIN into something we
492 ** can lookup in the IRT.
493 */
494#ifdef PCI_BRIDGE_FUNCS
495 /*
496 ** Proposal #1:
497 **
498 ** call implementation specific translation function
499 ** This is architecturally "cleaner". HP-UX doesn't
500 ** support other secondary bus types (eg. E/ISA) directly.
501 ** May be needed for other processor (eg IA64) architectures
502 ** or by some ambitous soul who wants to watch TV.
503 */
504 if (pci_bridge_funcs->xlate_intr_line) {
505 intr_pin = pci_bridge_funcs->xlate_intr_line(pcidev);
506 }
507#else /* PCI_BRIDGE_FUNCS */
508 struct pci_bus *p = pcidev->bus;
509 /*
510 ** Proposal #2:
511 ** The "pin" is skewed ((pin + dev - 1) % 4).
512 **
513 ** This isn't very clean since I/O SAPIC must assume:
514 ** - all platforms only have PCI busses.
515 ** - only PCI-PCI bridge (eg not PCI-EISA, PCI-PCMCIA)
516 ** - IRQ routing is only skewed once regardless of
517 ** the number of PPB's between iosapic and device.
518 ** (Bit3 expansion chassis follows this rule)
519 **
520 ** Advantage is it's really easy to implement.
521 */
522 intr_pin = ((intr_pin-1)+PCI_SLOT(pcidev->devfn)) % 4;
523 intr_pin++; /* convert back to INTA-D (1-4) */
524#endif /* PCI_BRIDGE_FUNCS */
525
526 /*
527 ** Locate the host slot the PPB nearest the Host bus
528 ** adapter.
529 */
530 while (NULL != p->parent->self)
531 p = p->parent;
532
533 intr_slot = PCI_SLOT(p->self->devfn);
534 } else {
535 intr_slot = PCI_SLOT(pcidev->devfn);
536 }
537 DBG_IRT("iosapic_xlate_pin: bus %d slot %d pin %d\n",
538 pcidev->bus->secondary, intr_slot, intr_pin);
539
540 return irt_find_irqline(isi, intr_slot, intr_pin);
541}
542
543static void iosapic_rd_irt_entry(struct vector_info *vi , u32 *dp0, u32 *dp1)
544{
545 struct iosapic_info *isp = vi->iosapic;
546 u8 idx = vi->irqline;
547
548 *dp0 = iosapic_read(isp->addr, IOSAPIC_IRDT_ENTRY(idx));
549 *dp1 = iosapic_read(isp->addr, IOSAPIC_IRDT_ENTRY_HI(idx));
550}
551
552
553static void iosapic_wr_irt_entry(struct vector_info *vi, u32 dp0, u32 dp1)
554{
555 struct iosapic_info *isp = vi->iosapic;
556
557 DBG_IRT("iosapic_wr_irt_entry(): irq %d hpa %lx 0x%x 0x%x\n",
558 vi->irqline, isp->isi_hpa, dp0, dp1);
559
560 iosapic_write(isp->addr, IOSAPIC_IRDT_ENTRY(vi->irqline), dp0);
561
562 /* Read the window register to flush the writes down to HW */
563 dp0 = readl(isp->addr+IOSAPIC_REG_WINDOW);
564
565 iosapic_write(isp->addr, IOSAPIC_IRDT_ENTRY_HI(vi->irqline), dp1);
566
567 /* Read the window register to flush the writes down to HW */
568 dp1 = readl(isp->addr+IOSAPIC_REG_WINDOW);
569}
570
571/*
572** set_irt prepares the data (dp0, dp1) according to the vector_info
573** and target cpu (id_eid). dp0/dp1 are then used to program I/O SAPIC
574** IRdT for the given "vector" (aka IRQ line).
575*/
576static void
577iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
578{
579 u32 mode = 0;
580 struct irt_entry *p = vi->irte;
581
582 if ((p->polarity_trigger & IRT_PO_MASK) == IRT_ACTIVE_LO)
583 mode |= IOSAPIC_IRDT_PO_LOW;
584
585 if (((p->polarity_trigger >> IRT_EL_SHIFT) & IRT_EL_MASK) == IRT_LEVEL_TRIG)
586 mode |= IOSAPIC_IRDT_LEVEL_TRIG;
587
588 /*
589 ** IA64 REVISIT
590 ** PA doesn't support EXTINT or LPRIO bits.
591 */
592
593 *dp0 = mode | (u32) vi->txn_data;
594
595 /*
596 ** Extracting id_eid isn't a real clean way of getting it.
597 ** But the encoding is the same for both PA and IA64 platforms.
598 */
599 if (is_pdc_pat()) {
600 /*
601 ** PAT PDC just hands it to us "right".
602 ** txn_addr comes from cpu_data[x].txn_addr.
603 */
604 *dp1 = (u32) (vi->txn_addr);
605 } else {
606 /*
607 ** eg if base_addr == 0xfffa0000),
608 ** we want to get 0xa0ff0000.
609 **
610 ** eid 0x0ff00000 -> 0x00ff0000
611 ** id 0x000ff000 -> 0xff000000
612 */
613 *dp1 = (((u32)vi->txn_addr & 0x0ff00000) >> 4) |
614 (((u32)vi->txn_addr & 0x000ff000) << 12);
615 }
616 DBG_IRT("iosapic_set_irt_data(): 0x%x 0x%x\n", *dp0, *dp1);
617}
618
619
620static struct vector_info *iosapic_get_vector(unsigned int irq)
621{
622 return irq_desc[irq].handler_data;
623}
624
625static void iosapic_disable_irq(unsigned int irq)
626{
627 unsigned long flags;
628 struct vector_info *vi = iosapic_get_vector(irq);
629 u32 d0, d1;
630
631 spin_lock_irqsave(&iosapic_lock, flags);
632 iosapic_rd_irt_entry(vi, &d0, &d1);
633 d0 |= IOSAPIC_IRDT_ENABLE;
634 iosapic_wr_irt_entry(vi, d0, d1);
635 spin_unlock_irqrestore(&iosapic_lock, flags);
636}
637
638static void iosapic_enable_irq(unsigned int irq)
639{
640 struct vector_info *vi = iosapic_get_vector(irq);
641 u32 d0, d1;
642
643 /* data is initialized by fixup_irq */
644 WARN_ON(vi->txn_irq == 0);
645
646 iosapic_set_irt_data(vi, &d0, &d1);
647 iosapic_wr_irt_entry(vi, d0, d1);
648
649#ifdef DEBUG_IOSAPIC_IRT
650{
651 u32 *t = (u32 *) ((ulong) vi->eoi_addr & ~0xffUL);
652 printk("iosapic_enable_irq(): regs %p", vi->eoi_addr);
653 for ( ; t < vi->eoi_addr; t++)
654 printk(" %x", readl(t));
655 printk("\n");
656}
657
658printk("iosapic_enable_irq(): sel ");
659{
660 struct iosapic_info *isp = vi->iosapic;
661
662 for (d0=0x10; d0<0x1e; d0++) {
663 d1 = iosapic_read(isp->addr, d0);
664 printk(" %x", d1);
665 }
666}
667printk("\n");
668#endif
669
670 /*
671 * Issuing I/O SAPIC an EOI causes an interrupt IFF IRQ line is
672 * asserted. IRQ generally should not be asserted when a driver
673 * enables their IRQ. It can lead to "interesting" race conditions
674 * in the driver initialization sequence.
675 */
676 DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq,
677 vi->eoi_addr, vi->eoi_data);
678 iosapic_eoi(vi->eoi_addr, vi->eoi_data);
679}
680
681/*
682 * PARISC only supports PCI devices below I/O SAPIC.
683 * PCI only supports level triggered in order to share IRQ lines.
684 * ergo I/O SAPIC must always issue EOI on parisc.
685 *
686 * i386/ia64 support ISA devices and have to deal with
687 * edge-triggered interrupts too.
688 */
689static void iosapic_end_irq(unsigned int irq)
690{
691 struct vector_info *vi = iosapic_get_vector(irq);
692 DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq,
693 vi->eoi_addr, vi->eoi_data);
694 iosapic_eoi(vi->eoi_addr, vi->eoi_data);
695}
696
697static unsigned int iosapic_startup_irq(unsigned int irq)
698{
699 iosapic_enable_irq(irq);
700 return 0;
701}
702
703static struct hw_interrupt_type iosapic_interrupt_type = {
704 .typename = "IO-SAPIC-level",
705 .startup = iosapic_startup_irq,
706 .shutdown = iosapic_disable_irq,
707 .enable = iosapic_enable_irq,
708 .disable = iosapic_disable_irq,
709 .ack = no_ack_irq,
710 .end = iosapic_end_irq,
711// .set_affinity = iosapic_set_affinity_irq,
712};
713
714int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
715{
716 struct iosapic_info *isi = isi_obj;
717 struct irt_entry *irte = NULL; /* only used if PAT PDC */
718 struct vector_info *vi;
719 int isi_line; /* line used by device */
720
721 if (!isi) {
722 printk(KERN_WARNING MODULE_NAME ": hpa not registered for %s\n",
723 pci_name(pcidev));
724 return -1;
725 }
726
727#ifdef CONFIG_SUPERIO
728 /*
729 * HACK ALERT! (non-compliant PCI device support)
730 *
731 * All SuckyIO interrupts are routed through the PIC's on function 1.
732 * But SuckyIO OHCI USB controller gets an IRT entry anyway because
733 * it advertises INT D for INT_PIN. Use that IRT entry to get the
734 * SuckyIO interrupt routing for PICs on function 1 (*BLEECCHH*).
735 */
736 if (is_superio_device(pcidev)) {
737 /* We must call superio_fixup_irq() to register the pdev */
738 pcidev->irq = superio_fixup_irq(pcidev);
739
740 /* Don't return if need to program the IOSAPIC's IRT... */
741 if (PCI_FUNC(pcidev->devfn) != SUPERIO_USB_FN)
742 return pcidev->irq;
743 }
744#endif /* CONFIG_SUPERIO */
745
746 /* lookup IRT entry for isi/slot/pin set */
747 irte = iosapic_xlate_pin(isi, pcidev);
748 if (!irte) {
749 printk("iosapic: no IRTE for %s (IRQ not connected?)\n",
750 pci_name(pcidev));
751 return -1;
752 }
753 DBG_IRT("iosapic_fixup_irq(): irte %p %x %x %x %x %x %x %x %x\n",
754 irte,
755 irte->entry_type,
756 irte->entry_length,
757 irte->polarity_trigger,
758 irte->src_bus_irq_devno,
759 irte->src_bus_id,
760 irte->src_seg_id,
761 irte->dest_iosapic_intin,
762 (u32) irte->dest_iosapic_addr);
763 isi_line = irte->dest_iosapic_intin;
764
765 /* get vector info for this input line */
766 vi = isi->isi_vector + isi_line;
767 DBG_IRT("iosapic_fixup_irq: line %d vi 0x%p\n", isi_line, vi);
768
769 /* If this IRQ line has already been setup, skip it */
770 if (vi->irte)
771 goto out;
772
773 vi->irte = irte;
774
775 /*
776 * Allocate processor IRQ
777 *
778 * XXX/FIXME The txn_alloc_irq() code and related code should be
779 * moved to enable_irq(). That way we only allocate processor IRQ
780 * bits for devices that actually have drivers claiming them.
781 * Right now we assign an IRQ to every PCI device present,
782 * regardless of whether it's used or not.
783 */
784 vi->txn_irq = txn_alloc_irq(8);
785
786 if (vi->txn_irq < 0)
787 panic("I/O sapic: couldn't get TXN IRQ\n");
788
789 /* enable_irq() will use txn_* to program IRdT */
790 vi->txn_addr = txn_alloc_addr(vi->txn_irq);
791 vi->txn_data = txn_alloc_data(vi->txn_irq);
792
793 vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI;
794 vi->eoi_data = cpu_to_le32(vi->txn_data);
795
796 cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi);
797
798 out:
799 pcidev->irq = vi->txn_irq;
800
801 DBG_IRT("iosapic_fixup_irq() %d:%d %x %x line %d irq %d\n",
802 PCI_SLOT(pcidev->devfn), PCI_FUNC(pcidev->devfn),
803 pcidev->vendor, pcidev->device, isi_line, pcidev->irq);
804
805 return pcidev->irq;
806}
807
808
809/*
810** squirrel away the I/O Sapic Version
811*/
812static unsigned int
813iosapic_rd_version(struct iosapic_info *isi)
814{
815 return iosapic_read(isi->addr, IOSAPIC_REG_VERSION);
816}
817
818
819/*
820** iosapic_register() is called by "drivers" with an integrated I/O SAPIC.
821** Caller must be certain they have an I/O SAPIC and know its MMIO address.
822**
823** o allocate iosapic_info and add it to the list
824** o read iosapic version and squirrel that away
825** o read size of IRdT.
826** o allocate and initialize isi_vector[]
827** o allocate irq region
828*/
829void *iosapic_register(unsigned long hpa)
830{
831 struct iosapic_info *isi = NULL;
832 struct irt_entry *irte = irt_cell;
833 struct vector_info *vip;
834 int cnt; /* track how many entries we've looked at */
835
836 /*
837 * Astro based platforms can only support PCI OLARD if they implement
838 * PAT PDC. Legacy PDC omits LBAs with no PCI devices from the IRT.
839 * Search the IRT and ignore iosapic's which aren't in the IRT.
840 */
841 for (cnt=0; cnt < irt_num_entry; cnt++, irte++) {
842 WARN_ON(IRT_IOSAPIC_TYPE != irte->entry_type);
843 if (COMPARE_IRTE_ADDR(irte, hpa))
844 break;
845 }
846
847 if (cnt >= irt_num_entry) {
848 DBG("iosapic_register() ignoring 0x%lx (NOT FOUND)\n", hpa);
849 return NULL;
850 }
851
852 isi = (struct iosapic_info *)kmalloc(sizeof(struct iosapic_info), GFP_KERNEL);
853 if (!isi) {
854 BUG();
855 return NULL;
856 }
857
858 memset(isi, 0, sizeof(struct iosapic_info));
859
860 isi->addr = ioremap(hpa, 4096);
861 isi->isi_hpa = hpa;
862 isi->isi_version = iosapic_rd_version(isi);
863 isi->isi_num_vectors = IOSAPIC_IRDT_MAX_ENTRY(isi->isi_version) + 1;
864
865 vip = isi->isi_vector = (struct vector_info *)
866 kmalloc(sizeof(struct vector_info) * isi->isi_num_vectors, GFP_KERNEL);
867 if (vip == NULL) {
868 kfree(isi);
869 return NULL;
870 }
871
872 memset(vip, 0, sizeof(struct vector_info) * isi->isi_num_vectors);
873
874 for (cnt=0; cnt < isi->isi_num_vectors; cnt++, vip++) {
875 vip->irqline = (unsigned char) cnt;
876 vip->iosapic = isi;
877 }
878 return isi;
879}
880
881
882#ifdef DEBUG_IOSAPIC
883
884static void
885iosapic_prt_irt(void *irt, long num_entry)
886{
887 unsigned int i, *irp = (unsigned int *) irt;
888
889
890 printk(KERN_DEBUG MODULE_NAME ": Interrupt Routing Table (%lx entries)\n", num_entry);
891
892 for (i=0; i<num_entry; i++, irp += 4) {
893 printk(KERN_DEBUG "%p : %2d %.8x %.8x %.8x %.8x\n",
894 irp, i, irp[0], irp[1], irp[2], irp[3]);
895 }
896}
897
898
899static void
900iosapic_prt_vi(struct vector_info *vi)
901{
902 printk(KERN_DEBUG MODULE_NAME ": vector_info[%d] is at %p\n", vi->irqline, vi);
903 printk(KERN_DEBUG "\t\tstatus: %.4x\n", vi->status);
904 printk(KERN_DEBUG "\t\ttxn_irq: %d\n", vi->txn_irq);
905 printk(KERN_DEBUG "\t\ttxn_addr: %lx\n", vi->txn_addr);
906 printk(KERN_DEBUG "\t\ttxn_data: %lx\n", vi->txn_data);
907 printk(KERN_DEBUG "\t\teoi_addr: %p\n", vi->eoi_addr);
908 printk(KERN_DEBUG "\t\teoi_data: %x\n", vi->eoi_data);
909}
910
911
912static void
913iosapic_prt_isi(struct iosapic_info *isi)
914{
915 printk(KERN_DEBUG MODULE_NAME ": io_sapic_info at %p\n", isi);
916 printk(KERN_DEBUG "\t\tisi_hpa: %lx\n", isi->isi_hpa);
917 printk(KERN_DEBUG "\t\tisi_status: %x\n", isi->isi_status);
918 printk(KERN_DEBUG "\t\tisi_version: %x\n", isi->isi_version);
919 printk(KERN_DEBUG "\t\tisi_vector: %p\n", isi->isi_vector);
920}
921#endif /* DEBUG_IOSAPIC */
diff --git a/drivers/parisc/iosapic_private.h b/drivers/parisc/iosapic_private.h
new file mode 100644
index 000000000000..41e7ec2a44aa
--- /dev/null
+++ b/drivers/parisc/iosapic_private.h
@@ -0,0 +1,188 @@
1/*
2 * Private structs/constants for PARISC IOSAPIC support
3 *
4 * Copyright (C) 2000 Hewlett Packard (Grant Grundler)
5 * Copyright (C) 2000,2003 Grant Grundler (grundler at parisc-linux.org)
6 * Copyright (C) 2002 Matthew Wilcox (willy at parisc-linux.org)
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24/*
25** This file is private to iosapic driver.
26** If stuff needs to be used by another driver, move it to a common file.
27**
28** WARNING: fields most data structures here are ordered to make sure
29** they pack nicely for 64-bit compilation. (ie sizeof(long) == 8)
30*/
31
32
33/*
34** Interrupt Routing Stuff
35** -----------------------
36** The interrupt routing table consists of entries derived from
37** MP Specification Draft 1.5. There is one interrupt routing
38** table per cell. N- and L-class consist of a single cell.
39*/
40struct irt_entry {
41
42 /* Entry Type 139 identifies an I/O SAPIC interrupt entry */
43 u8 entry_type;
44
45 /* Entry Length 16 indicates entry is 16 bytes long */
46 u8 entry_length;
47
48 /*
49 ** Interrupt Type of 0 indicates a vectored interrupt,
50 ** all other values are reserved
51 */
52 u8 interrupt_type;
53
54 /*
55 ** PO and EL
56 ** Polarity of SAPIC I/O input signals:
57 ** 00 = Reserved
58 ** 01 = Active high
59 ** 10 = Reserved
60 ** 11 = Active low
61 ** Trigger mode of SAPIC I/O input signals:
62 ** 00 = Reserved
63 ** 01 = Edge-triggered
64 ** 10 = Reserved
65 ** 11 = Level-triggered
66 */
67 u8 polarity_trigger;
68
69 /*
70 ** IRQ and DEVNO
71 ** irq identifies PCI interrupt signal where
72 ** 0x0 corresponds to INT_A#,
73 ** 0x1 corresponds to INT_B#,
74 ** 0x2 corresponds to INT_C#
75 ** 0x3 corresponds to INT_D#
76 ** PCI device number where interrupt originates
77 */
78 u8 src_bus_irq_devno;
79
80 /* Source Bus ID identifies the bus where interrupt signal comes from */
81 u8 src_bus_id;
82
83 /*
84 ** Segment ID is unique across a protection domain and
85 ** identifies a segment of PCI buses (reserved in
86 ** MP Specification Draft 1.5)
87 */
88 u8 src_seg_id;
89
90 /*
91 ** Destination I/O SAPIC INTIN# identifies the INTIN n pin
92 ** to which the signal is connected
93 */
94 u8 dest_iosapic_intin;
95
96 /*
97 ** Destination I/O SAPIC Address identifies the I/O SAPIC
98 ** to which the signal is connected
99 */
100 u64 dest_iosapic_addr;
101};
102
103#define IRT_IOSAPIC_TYPE 139
104#define IRT_IOSAPIC_LENGTH 16
105
106#define IRT_VECTORED_INTR 0
107
108#define IRT_PO_MASK 0x3
109#define IRT_ACTIVE_HI 1
110#define IRT_ACTIVE_LO 3
111
112#define IRT_EL_MASK 0x3
113#define IRT_EL_SHIFT 2
114#define IRT_EDGE_TRIG 1
115#define IRT_LEVEL_TRIG 3
116
117#define IRT_IRQ_MASK 0x3
118#define IRT_DEV_MASK 0x1f
119#define IRT_DEV_SHIFT 2
120
121#define IRT_IRQ_DEVNO_MASK ((IRT_DEV_MASK << IRT_DEV_SHIFT) | IRT_IRQ_MASK)
122
123#ifdef SUPPORT_MULTI_CELL
124struct iosapic_irt {
125 struct iosapic_irt *irt_next; /* next routing table */
126 struct irt_entry *irt_base; /* intr routing table address */
127 size_t irte_count; /* number of entries in the table */
128 size_t irte_size; /* size (bytes) of each entry */
129};
130#endif
131
132struct vector_info {
133 struct iosapic_info *iosapic; /* I/O SAPIC this vector is on */
134 struct irt_entry *irte; /* IRT entry */
135 u32 *eoi_addr; /* precalculate EOI reg address */
136 u32 eoi_data; /* IA64: ? PA: swapped txn_data */
137 int txn_irq; /* virtual IRQ number for processor */
138 ulong txn_addr; /* IA64: id_eid PA: partial HPA */
139 u32 txn_data; /* CPU interrupt bit */
140 u8 status; /* status/flags */
141 u8 irqline; /* INTINn(IRQ) */
142};
143
144
145struct iosapic_info {
146 struct iosapic_info * isi_next; /* list of I/O SAPIC */
147 void __iomem * addr; /* remapped address */
148 unsigned long isi_hpa; /* physical base address */
149 struct vector_info * isi_vector; /* IRdT (IRQ line) array */
150 int isi_num_vectors; /* size of IRdT array */
151 int isi_status; /* status/flags */
152 unsigned int isi_version; /* DEBUG: data fr version reg */
153};
154
155
156
157#ifdef __IA64__
158/*
159** PA risc does NOT have any local sapics. IA64 does.
160** PIB (Processor Interrupt Block) is handled by Astro or Dew (Stretch CEC).
161**
162** PA: Get id_eid from IRT and hardcode PIB to 0xfeeNNNN0
163** Emulate the data on PAT platforms.
164*/
165struct local_sapic_info {
166 struct local_sapic_info *lsi_next; /* point to next CPU info */
167 int *lsi_cpu_id; /* point to logical CPU id */
168 unsigned long *lsi_id_eid; /* point to IA-64 CPU id */
169 int *lsi_status; /* point to CPU status */
170 void *lsi_private; /* point to special info */
171};
172
173/*
174** "root" data structure which ties everything together.
175** Should always be able to start with sapic_root and locate
176** the desired information.
177*/
178struct sapic_info {
179 struct sapic_info *si_next; /* info is per cell */
180 int si_cellid; /* cell id */
181 unsigned int si_status; /* status */
182 char *si_pib_base; /* intr blk base address */
183 local_sapic_info_t *si_local_info;
184 io_sapic_info_t *si_io_info;
185 extint_info_t *si_extint_info;/* External Intr info */
186};
187#endif
188
diff --git a/drivers/parisc/lasi.c b/drivers/parisc/lasi.c
new file mode 100644
index 000000000000..731855053392
--- /dev/null
+++ b/drivers/parisc/lasi.c
@@ -0,0 +1,240 @@
1/*
2 * LASI Device Driver
3 *
4 * (c) Copyright 1999 Red Hat Software
5 * Portions (c) Copyright 1999 The Puffin Group Inc.
6 * Portions (c) Copyright 1999 Hewlett-Packard
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * by Alan Cox <alan@redhat.com> and
14 * Alex deVries <alex@onefishtwo.ca>
15 */
16
17#include <linux/errno.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/slab.h>
21#include <linux/module.h>
22#include <linux/pm.h>
23#include <linux/slab.h>
24#include <linux/types.h>
25
26#include <asm/io.h>
27#include <asm/hardware.h>
28#include <asm/led.h>
29
30#include "gsc.h"
31
32
33#define LASI_VER 0xC008 /* LASI Version */
34
35#define LASI_IO_CONF 0x7FFFE /* LASI primary configuration register */
36#define LASI_IO_CONF2 0x7FFFF /* LASI secondary configuration register */
37
38static void lasi_choose_irq(struct parisc_device *dev, void *ctrl)
39{
40 int irq;
41
42 switch (dev->id.sversion) {
43 case 0x74: irq = 7; break; /* Centronics */
44 case 0x7B: irq = 13; break; /* Audio */
45 case 0x81: irq = 14; break; /* Lasi itself */
46 case 0x82: irq = 9; break; /* SCSI */
47 case 0x83: irq = 20; break; /* Floppy */
48 case 0x84: irq = 26; break; /* PS/2 Keyboard */
49 case 0x87: irq = 18; break; /* ISDN */
50 case 0x8A: irq = 8; break; /* LAN */
51 case 0x8C: irq = 5; break; /* RS232 */
52 case 0x8D: irq = (dev->hw_path == 13) ? 16 : 17; break;
53 /* Telephone */
54 default: return; /* unknown */
55 }
56
57 gsc_asic_assign_irq(ctrl, irq, &dev->irq);
58}
59
60static void __init
61lasi_init_irq(struct gsc_asic *this_lasi)
62{
63 unsigned long lasi_base = this_lasi->hpa;
64
65 /* Stop LASI barking for a bit */
66 gsc_writel(0x00000000, lasi_base+OFFSET_IMR);
67
68 /* clear pending interrupts */
69 gsc_readl(lasi_base+OFFSET_IRR);
70
71 /* We're not really convinced we want to reset the onboard
72 * devices. Firmware does it for us...
73 */
74
75 /* Resets */
76 /* gsc_writel(0xFFFFFFFF, lasi_base+0x2000);*/ /* Parallel */
77 if(pdc_add_valid(lasi_base+0x4004) == PDC_OK)
78 gsc_writel(0xFFFFFFFF, lasi_base+0x4004); /* Audio */
79 /* gsc_writel(0xFFFFFFFF, lasi_base+0x5000);*/ /* Serial */
80 /* gsc_writel(0xFFFFFFFF, lasi_base+0x6000);*/ /* SCSI */
81 gsc_writel(0xFFFFFFFF, lasi_base+0x7000); /* LAN */
82 gsc_writel(0xFFFFFFFF, lasi_base+0x8000); /* Keyboard */
83 gsc_writel(0xFFFFFFFF, lasi_base+0xA000); /* FDC */
84
85 /* Ok we hit it on the head with a hammer, our Dog is now
86 ** comatose and muzzled. Devices will now unmask LASI
87 ** interrupts as they are registered as irq's in the LASI range.
88 */
89 /* XXX: I thought it was `awks that got `it on the `ead with an
90 * `ammer. -- willy
91 */
92}
93
94
95/*
96 ** lasi_led_init()
97 **
98 ** lasi_led_init() initializes the LED controller on the LASI.
99 **
100 ** Since Mirage and Electra machines use a different LED
101 ** address register, we need to check for these machines
102 ** explicitly.
103 */
104
105#ifndef CONFIG_CHASSIS_LCD_LED
106
107#define lasi_led_init(x) /* nothing */
108
109#else
110
111void __init lasi_led_init(unsigned long lasi_hpa)
112{
113 unsigned long datareg;
114
115 switch (CPU_HVERSION) {
116 /* Gecko machines have only one single LED, which can be permanently
117 turned on by writing a zero into the power control register. */
118 case 0x600: /* Gecko (712/60) */
119 case 0x601: /* Gecko (712/80) */
120 case 0x602: /* Gecko (712/100) */
121 case 0x603: /* Anole 64 (743/64) */
122 case 0x604: /* Anole 100 (743/100) */
123 case 0x605: /* Gecko (712/120) */
124 datareg = lasi_hpa + 0x0000C000;
125 gsc_writeb(0, datareg);
126 return; /* no need to register the LED interrupt-function */
127
128 /* Mirage and Electra machines need special offsets */
129 case 0x60A: /* Mirage Jr (715/64) */
130 case 0x60B: /* Mirage 100 */
131 case 0x60C: /* Mirage 100+ */
132 case 0x60D: /* Electra 100 */
133 case 0x60E: /* Electra 120 */
134 datareg = lasi_hpa - 0x00020000;
135 break;
136
137 default:
138 datareg = lasi_hpa + 0x0000C000;
139 break;
140 }
141
142 register_led_driver(DISPLAY_MODEL_LASI, LED_CMD_REG_NONE, datareg);
143}
144#endif
145
146/*
147 * lasi_power_off
148 *
149 * Function for lasi to turn off the power. This is accomplished by setting a
150 * 1 to PWR_ON_L in the Power Control Register
151 *
152 */
153
154static unsigned long lasi_power_off_hpa;
155
156static void lasi_power_off(void)
157{
158 unsigned long datareg;
159
160 /* calculate addr of the Power Control Register */
161 datareg = lasi_power_off_hpa + 0x0000C000;
162
163 /* Power down the machine */
164 gsc_writel(0x02, datareg);
165}
166
167int __init
168lasi_init_chip(struct parisc_device *dev)
169{
170 struct gsc_asic *lasi;
171 struct gsc_irq gsc_irq;
172 int ret;
173
174 lasi = kmalloc(sizeof(*lasi), GFP_KERNEL);
175 if (!lasi)
176 return -ENOMEM;
177
178 lasi->name = "Lasi";
179 lasi->hpa = dev->hpa;
180
181 /* Check the 4-bit (yes, only 4) version register */
182 lasi->version = gsc_readl(lasi->hpa + LASI_VER) & 0xf;
183 printk(KERN_INFO "%s version %d at 0x%lx found.\n",
184 lasi->name, lasi->version, lasi->hpa);
185
186 /* initialize the chassis LEDs really early */
187 lasi_led_init(lasi->hpa);
188
189 /* Stop LASI barking for a bit */
190 lasi_init_irq(lasi);
191
192 /* the IRQ lasi should use */
193 dev->irq = gsc_alloc_irq(&gsc_irq);
194 if (dev->irq < 0) {
195 printk(KERN_ERR "%s(): cannot get GSC irq\n",
196 __FUNCTION__);
197 kfree(lasi);
198 return -EBUSY;
199 }
200
201 lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
202
203 ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi);
204 if (ret < 0) {
205 kfree(lasi);
206 return ret;
207 }
208
209 /* enable IRQ's for devices below LASI */
210 gsc_writel(lasi->eim, lasi->hpa + OFFSET_IAR);
211
212 /* Done init'ing, register this driver */
213 ret = gsc_common_setup(dev, lasi);
214 if (ret) {
215 kfree(lasi);
216 return ret;
217 }
218
219 gsc_fixup_irqs(dev, lasi, lasi_choose_irq);
220
221 /* initialize the power off function */
222 /* FIXME: Record the LASI HPA for the power off function. This should
223 * ensure that only the first LASI (the one controlling the power off)
224 * should set the HPA here */
225 lasi_power_off_hpa = lasi->hpa;
226 pm_power_off = lasi_power_off;
227
228 return ret;
229}
230
231static struct parisc_device_id lasi_tbl[] = {
232 { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00081 },
233 { 0, }
234};
235
236struct parisc_driver lasi_driver = {
237 .name = "Lasi",
238 .id_table = lasi_tbl,
239 .probe = lasi_init_chip,
240};
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
new file mode 100644
index 000000000000..dc838804c0dd
--- /dev/null
+++ b/drivers/parisc/lba_pci.c
@@ -0,0 +1,1649 @@
1/*
2**
3** PCI Lower Bus Adapter (LBA) manager
4**
5** (c) Copyright 1999,2000 Grant Grundler
6** (c) Copyright 1999,2000 Hewlett-Packard Company
7**
8** This program is free software; you can redistribute it and/or modify
9** it under the terms of the GNU General Public License as published by
10** the Free Software Foundation; either version 2 of the License, or
11** (at your option) any later version.
12**
13**
14** This module primarily provides access to PCI bus (config/IOport
15** spaces) on platforms with an SBA/LBA chipset. A/B/C/J/L/N-class
16** with 4 digit model numbers - eg C3000 (and A400...sigh).
17**
18** LBA driver isn't as simple as the Dino driver because:
19** (a) this chip has substantial bug fixes between revisions
20** (Only one Dino bug has a software workaround :^( )
21** (b) has more options which we don't (yet) support (DMA hints, OLARD)
22** (c) IRQ support lives in the I/O SAPIC driver (not with PCI driver)
23** (d) play nicely with both PAT and "Legacy" PA-RISC firmware (PDC).
24** (dino only deals with "Legacy" PDC)
25**
26** LBA driver passes the I/O SAPIC HPA to the I/O SAPIC driver.
27** (I/O SAPIC is integratd in the LBA chip).
28**
29** FIXME: Add support to SBA and LBA drivers for DMA hint sets
30** FIXME: Add support for PCI card hot-plug (OLARD).
31*/
32
33#include <linux/delay.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/spinlock.h>
37#include <linux/init.h> /* for __init and __devinit */
38#include <linux/pci.h>
39#include <linux/ioport.h>
40#include <linux/slab.h>
41#include <linux/smp_lock.h>
42
43#include <asm/byteorder.h>
44#include <asm/pdc.h>
45#include <asm/pdcpat.h>
46#include <asm/page.h>
47#include <asm/system.h>
48
49#include <asm/hardware.h> /* for register_parisc_driver() stuff */
50#include <asm/parisc-device.h>
51#include <asm/iosapic.h> /* for iosapic_register() */
52#include <asm/io.h> /* read/write stuff */
53
54#undef DEBUG_LBA /* general stuff */
55#undef DEBUG_LBA_PORT /* debug I/O Port access */
56#undef DEBUG_LBA_CFG /* debug Config Space Access (ie PCI Bus walk) */
57#undef DEBUG_LBA_PAT /* debug PCI Resource Mgt code - PDC PAT only */
58
59#undef FBB_SUPPORT /* Fast Back-Back xfers - NOT READY YET */
60
61
62#ifdef DEBUG_LBA
63#define DBG(x...) printk(x)
64#else
65#define DBG(x...)
66#endif
67
68#ifdef DEBUG_LBA_PORT
69#define DBG_PORT(x...) printk(x)
70#else
71#define DBG_PORT(x...)
72#endif
73
74#ifdef DEBUG_LBA_CFG
75#define DBG_CFG(x...) printk(x)
76#else
77#define DBG_CFG(x...)
78#endif
79
80#ifdef DEBUG_LBA_PAT
81#define DBG_PAT(x...) printk(x)
82#else
83#define DBG_PAT(x...)
84#endif
85
86
87/*
88** Config accessor functions only pass in the 8-bit bus number and not
89** the 8-bit "PCI Segment" number. Each LBA will be assigned a PCI bus
90** number based on what firmware wrote into the scratch register.
91**
92** The "secondary" bus number is set to this before calling
93** pci_register_ops(). If any PPB's are present, the scan will
94** discover them and update the "secondary" and "subordinate"
95** fields in the pci_bus structure.
96**
97** Changes in the configuration *may* result in a different
98** bus number for each LBA depending on what firmware does.
99*/
100
101#define MODULE_NAME "LBA"
102
103#define LBA_FUNC_ID 0x0000 /* function id */
104#define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */
105#define LBA_CAPABLE 0x0030 /* capabilities register */
106
107#define LBA_PCI_CFG_ADDR 0x0040 /* poke CFG address here */
108#define LBA_PCI_CFG_DATA 0x0048 /* read or write data here */
109
110#define LBA_PMC_MTLT 0x0050 /* Firmware sets this - read only. */
111#define LBA_FW_SCRATCH 0x0058 /* Firmware writes the PCI bus number here. */
112#define LBA_ERROR_ADDR 0x0070 /* On error, address gets logged here */
113
114#define LBA_ARB_MASK 0x0080 /* bit 0 enable arbitration. PAT/PDC enables */
115#define LBA_ARB_PRI 0x0088 /* firmware sets this. */
116#define LBA_ARB_MODE 0x0090 /* firmware sets this. */
117#define LBA_ARB_MTLT 0x0098 /* firmware sets this. */
118
119#define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */
120
121#define LBA_STAT_CTL 0x0108 /* Status & Control */
122#define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */
123#define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */
124#define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */
125#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
126
127#define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */
128#define LBA_LMMIO_MASK 0x0208
129
130#define LBA_GMMIO_BASE 0x0210 /* > 4GB I/O address range */
131#define LBA_GMMIO_MASK 0x0218
132
133#define LBA_WLMMIO_BASE 0x0220 /* All < 4GB ranges under the same *SBA* */
134#define LBA_WLMMIO_MASK 0x0228
135
136#define LBA_WGMMIO_BASE 0x0230 /* All > 4GB ranges under the same *SBA* */
137#define LBA_WGMMIO_MASK 0x0238
138
139#define LBA_IOS_BASE 0x0240 /* I/O port space for this LBA */
140#define LBA_IOS_MASK 0x0248
141
142#define LBA_ELMMIO_BASE 0x0250 /* Extra LMMIO range */
143#define LBA_ELMMIO_MASK 0x0258
144
145#define LBA_EIOS_BASE 0x0260 /* Extra I/O port space */
146#define LBA_EIOS_MASK 0x0268
147
148#define LBA_GLOBAL_MASK 0x0270 /* Mercury only: Global Address Mask */
149#define LBA_DMA_CTL 0x0278 /* firmware sets this */
150
151#define LBA_IBASE 0x0300 /* SBA DMA support */
152#define LBA_IMASK 0x0308
153
154/* FIXME: ignore DMA Hint stuff until we can measure performance */
155#define LBA_HINT_CFG 0x0310
156#define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
157
158#define LBA_BUS_MODE 0x0620
159
160/* ERROR regs are needed for config cycle kluges */
161#define LBA_ERROR_CONFIG 0x0680
162#define LBA_SMART_MODE 0x20
163#define LBA_ERROR_STATUS 0x0688
164#define LBA_ROPE_CTL 0x06A0
165
166#define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
167
168/* non-postable I/O port space, densely packed */
169#define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
170static void __iomem *astro_iop_base;
171
172#define ELROY_HVERS 0x782
173#define MERCURY_HVERS 0x783
174#define QUICKSILVER_HVERS 0x784
175
176static inline int IS_ELROY(struct parisc_device *d)
177{
178 return (d->id.hversion == ELROY_HVERS);
179}
180
181static inline int IS_MERCURY(struct parisc_device *d)
182{
183 return (d->id.hversion == MERCURY_HVERS);
184}
185
186static inline int IS_QUICKSILVER(struct parisc_device *d)
187{
188 return (d->id.hversion == QUICKSILVER_HVERS);
189}
190
191
192/*
193** lba_device: Per instance Elroy data structure
194*/
195struct lba_device {
196 struct pci_hba_data hba;
197
198 spinlock_t lba_lock;
199 void *iosapic_obj;
200
201#ifdef CONFIG_64BIT
202 void __iomem * iop_base; /* PA_VIEW - for IO port accessor funcs */
203#endif
204
205 int flags; /* state/functionality enabled */
206 int hw_rev; /* HW revision of chip */
207};
208
209
210static u32 lba_t32;
211
212/* lba flags */
213#define LBA_FLAG_SKIP_PROBE 0x10
214
215#define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE)
216
217
218/* Looks nice and keeps the compiler happy */
219#define LBA_DEV(d) ((struct lba_device *) (d))
220
221
222/*
223** Only allow 8 subsidiary busses per LBA
224** Problem is the PCI bus numbering is globally shared.
225*/
226#define LBA_MAX_NUM_BUSES 8
227
228/************************************
229 * LBA register read and write support
230 *
231 * BE WARNED: register writes are posted.
232 * (ie follow writes which must reach HW with a read)
233 */
234#define READ_U8(addr) __raw_readb(addr)
235#define READ_U16(addr) __raw_readw(addr)
236#define READ_U32(addr) __raw_readl(addr)
237#define WRITE_U8(value, addr) __raw_writeb(value, addr)
238#define WRITE_U16(value, addr) __raw_writew(value, addr)
239#define WRITE_U32(value, addr) __raw_writel(value, addr)
240
241#define READ_REG8(addr) readb(addr)
242#define READ_REG16(addr) readw(addr)
243#define READ_REG32(addr) readl(addr)
244#define READ_REG64(addr) readq(addr)
245#define WRITE_REG8(value, addr) writeb(value, addr)
246#define WRITE_REG16(value, addr) writew(value, addr)
247#define WRITE_REG32(value, addr) writel(value, addr)
248
249
250#define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8))
251#define LBA_CFG_BUS(tok) ((u8) ((tok)>>16))
252#define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f)
253#define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7)
254
255
256/*
257** Extract LBA (Rope) number from HPA
258** REVISIT: 16 ropes for Stretch/Ike?
259*/
260#define ROPES_PER_IOC 8
261#define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1))
262
263
264static void
265lba_dump_res(struct resource *r, int d)
266{
267 int i;
268
269 if (NULL == r)
270 return;
271
272 printk(KERN_DEBUG "(%p)", r->parent);
273 for (i = d; i ; --i) printk(" ");
274 printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r, r->start, r->end, r->flags);
275 lba_dump_res(r->child, d+2);
276 lba_dump_res(r->sibling, d);
277}
278
279
280/*
281** LBA rev 2.0, 2.1, 2.2, and 3.0 bus walks require a complex
282** workaround for cfg cycles:
283** -- preserve LBA state
284** -- prevent any DMA from occurring
285** -- turn on smart mode
286** -- probe with config writes before doing config reads
287** -- check ERROR_STATUS
288** -- clear ERROR_STATUS
289** -- restore LBA state
290**
291** The workaround is only used for device discovery.
292*/
293
294static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d)
295{
296 u8 first_bus = d->hba.hba_bus->secondary;
297 u8 last_sub_bus = d->hba.hba_bus->subordinate;
298
299 if ((bus < first_bus) ||
300 (bus > last_sub_bus) ||
301 ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) {
302 return 0;
303 }
304
305 return 1;
306}
307
308
309
310#define LBA_CFG_SETUP(d, tok) { \
311 /* Save contents of error config register. */ \
312 error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \
313\
314 /* Save contents of status control register. */ \
315 status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \
316\
317 /* For LBA rev 2.0, 2.1, 2.2, and 3.0, we must disable DMA \
318 ** arbitration for full bus walks. \
319 */ \
320 /* Save contents of arb mask register. */ \
321 arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \
322\
323 /* \
324 * Turn off all device arbitration bits (i.e. everything \
325 * except arbitration enable bit). \
326 */ \
327 WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \
328\
329 /* \
330 * Set the smart mode bit so that master aborts don't cause \
331 * LBA to go into PCI fatal mode (required). \
332 */ \
333 WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \
334}
335
336
337#define LBA_CFG_PROBE(d, tok) { \
338 /* \
339 * Setup Vendor ID write and read back the address register \
340 * to make sure that LBA is the bus master. \
341 */ \
342 WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\
343 /* \
344 * Read address register to ensure that LBA is the bus master, \
345 * which implies that DMA traffic has stopped when DMA arb is off. \
346 */ \
347 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
348 /* \
349 * Generate a cfg write cycle (will have no affect on \
350 * Vendor ID register since read-only). \
351 */ \
352 WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \
353 /* \
354 * Make sure write has completed before proceeding further, \
355 * i.e. before setting clear enable. \
356 */ \
357 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
358}
359
360
361/*
362 * HPREVISIT:
363 * -- Can't tell if config cycle got the error.
364 *
365 * OV bit is broken until rev 4.0, so can't use OV bit and
366 * LBA_ERROR_LOG_ADDR to tell if error belongs to config cycle.
367 *
368 * As of rev 4.0, no longer need the error check.
369 *
370 * -- Even if we could tell, we still want to return -1
371 * for **ANY** error (not just master abort).
372 *
373 * -- Only clear non-fatal errors (we don't want to bring
374 * LBA out of pci-fatal mode).
375 *
376 * Actually, there is still a race in which
377 * we could be clearing a fatal error. We will
378 * live with this during our initial bus walk
379 * until rev 4.0 (no driver activity during
380 * initial bus walk). The initial bus walk
381 * has race conditions concerning the use of
382 * smart mode as well.
383 */
384
385#define LBA_MASTER_ABORT_ERROR 0xc
386#define LBA_FATAL_ERROR 0x10
387
388#define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \
389 u32 error_status = 0; \
390 /* \
391 * Set clear enable (CE) bit. Unset by HW when new \
392 * errors are logged -- LBA HW ERS section 14.3.3). \
393 */ \
394 WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \
395 error_status = READ_REG32(base + LBA_ERROR_STATUS); \
396 if ((error_status & 0x1f) != 0) { \
397 /* \
398 * Fail the config read request. \
399 */ \
400 error = 1; \
401 if ((error_status & LBA_FATAL_ERROR) == 0) { \
402 /* \
403 * Clear error status (if fatal bit not set) by setting \
404 * clear error log bit (CL). \
405 */ \
406 WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \
407 } \
408 } \
409}
410
411#define LBA_CFG_TR4_ADDR_SETUP(d, addr) \
412 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR);
413
414#define LBA_CFG_ADDR_SETUP(d, addr) { \
415 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
416 /* \
417 * Read address register to ensure that LBA is the bus master, \
418 * which implies that DMA traffic has stopped when DMA arb is off. \
419 */ \
420 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
421}
422
423
424#define LBA_CFG_RESTORE(d, base) { \
425 /* \
426 * Restore status control register (turn off clear enable). \
427 */ \
428 WRITE_REG32(status_control, base + LBA_STAT_CTL); \
429 /* \
430 * Restore error config register (turn off smart mode). \
431 */ \
432 WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \
433 /* \
434 * Restore arb mask register (reenables DMA arbitration). \
435 */ \
436 WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \
437}
438
439
440
441static unsigned int
442lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
443{
444 u32 data = ~0U;
445 int error = 0;
446 u32 arb_mask = 0; /* used by LBA_CFG_SETUP/RESTORE */
447 u32 error_config = 0; /* used by LBA_CFG_SETUP/RESTORE */
448 u32 status_control = 0; /* used by LBA_CFG_SETUP/RESTORE */
449
450 LBA_CFG_SETUP(d, tok);
451 LBA_CFG_PROBE(d, tok);
452 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
453 if (!error) {
454 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
455
456 LBA_CFG_ADDR_SETUP(d, tok | reg);
457 switch (size) {
458 case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break;
459 case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break;
460 case 4: data = READ_REG32(data_reg); break;
461 }
462 }
463 LBA_CFG_RESTORE(d, d->hba.base_addr);
464 return(data);
465}
466
467
468static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
469{
470 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
471 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
472 u32 tok = LBA_CFG_TOK(local_bus, devfn);
473 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
474
475 if ((pos > 255) || (devfn > 255))
476 return -EINVAL;
477
478/* FIXME: B2K/C3600 workaround is always use old method... */
479 /* if (!LBA_SKIP_PROBE(d)) */ {
480 /* original - Generate config cycle on broken elroy
481 with risk we will miss PCI bus errors. */
482 *data = lba_rd_cfg(d, tok, pos, size);
483 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __FUNCTION__, tok, pos, *data);
484 return 0;
485 }
486
487 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->secondary, devfn, d)) {
488 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __FUNCTION__, tok, pos);
489 /* either don't want to look or know device isn't present. */
490 *data = ~0U;
491 return(0);
492 }
493
494 /* Basic Algorithm
495 ** Should only get here on fully working LBA rev.
496 ** This is how simple the code should have been.
497 */
498 LBA_CFG_ADDR_SETUP(d, tok | pos);
499 switch(size) {
500 case 1: *data = READ_REG8 (data_reg + (pos & 3)); break;
501 case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
502 case 4: *data = READ_REG32(data_reg); break;
503 }
504 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __FUNCTION__, tok, pos, *data);
505 return 0;
506}
507
508
509static void
510lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
511{
512 int error = 0;
513 u32 arb_mask = 0;
514 u32 error_config = 0;
515 u32 status_control = 0;
516 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
517
518 LBA_CFG_SETUP(d, tok);
519 LBA_CFG_ADDR_SETUP(d, tok | reg);
520 switch (size) {
521 case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break;
522 case 2: WRITE_REG16(data, data_reg + (reg & 2)); break;
523 case 4: WRITE_REG32(data, data_reg); break;
524 }
525 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
526 LBA_CFG_RESTORE(d, d->hba.base_addr);
527}
528
529
530/*
531 * LBA 4.0 config write code implements non-postable semantics
532 * by doing a read of CONFIG ADDR after the write.
533 */
534
535static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
536{
537 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
538 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
539 u32 tok = LBA_CFG_TOK(local_bus,devfn);
540
541 if ((pos > 255) || (devfn > 255))
542 return -EINVAL;
543
544 if (!LBA_SKIP_PROBE(d)) {
545 /* Original Workaround */
546 lba_wr_cfg(d, tok, pos, (u32) data, size);
547 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __FUNCTION__, tok, pos,data);
548 return 0;
549 }
550
551 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) {
552 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __FUNCTION__, tok, pos,data);
553 return 1; /* New Workaround */
554 }
555
556 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __FUNCTION__, tok, pos, data);
557
558 /* Basic Algorithm */
559 LBA_CFG_ADDR_SETUP(d, tok | pos);
560 switch(size) {
561 case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));
562 break;
563 case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2));
564 break;
565 case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
566 break;
567 }
568 /* flush posted write */
569 lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
570 return 0;
571}
572
573
574static struct pci_ops elroy_cfg_ops = {
575 .read = elroy_cfg_read,
576 .write = elroy_cfg_write,
577};
578
579/*
580 * The mercury_cfg_ops are slightly misnamed; they're also used for Elroy
581 * TR4.0 as no additional bugs were found in this areea between Elroy and
582 * Mercury
583 */
584
585static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
586{
587 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
588 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
589 u32 tok = LBA_CFG_TOK(local_bus, devfn);
590 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
591
592 if ((pos > 255) || (devfn > 255))
593 return -EINVAL;
594
595 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
596 switch(size) {
597 case 1:
598 *data = READ_REG8(data_reg + (pos & 3));
599 break;
600 case 2:
601 *data = READ_REG16(data_reg + (pos & 2));
602 break;
603 case 4:
604 *data = READ_REG32(data_reg); break;
605 break;
606 }
607
608 DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data);
609 return 0;
610}
611
612/*
613 * LBA 4.0 config write code implements non-postable semantics
614 * by doing a read of CONFIG ADDR after the write.
615 */
616
617static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
618{
619 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
620 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
621 u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
622 u32 tok = LBA_CFG_TOK(local_bus,devfn);
623
624 if ((pos > 255) || (devfn > 255))
625 return -EINVAL;
626
627 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __FUNCTION__, tok, pos, data);
628
629 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
630 switch(size) {
631 case 1:
632 WRITE_REG8 (data, data_reg + (pos & 3));
633 break;
634 case 2:
635 WRITE_REG16(data, data_reg + (pos & 2));
636 break;
637 case 4:
638 WRITE_REG32(data, data_reg);
639 break;
640 }
641
642 /* flush posted write */
643 lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
644 return 0;
645}
646
647static struct pci_ops mercury_cfg_ops = {
648 .read = mercury_cfg_read,
649 .write = mercury_cfg_write,
650};
651
652
653static void
654lba_bios_init(void)
655{
656 DBG(MODULE_NAME ": lba_bios_init\n");
657}
658
659
660#ifdef CONFIG_64BIT
661
662/*
663** Determine if a device is already configured.
664** If so, reserve it resources.
665**
666** Read PCI cfg command register and see if I/O or MMIO is enabled.
667** PAT has to enable the devices it's using.
668**
669** Note: resources are fixed up before we try to claim them.
670*/
671static void
672lba_claim_dev_resources(struct pci_dev *dev)
673{
674 u16 cmd;
675 int i, srch_flags;
676
677 (void) pci_read_config_word(dev, PCI_COMMAND, &cmd);
678
679 srch_flags = (cmd & PCI_COMMAND_IO) ? IORESOURCE_IO : 0;
680 if (cmd & PCI_COMMAND_MEMORY)
681 srch_flags |= IORESOURCE_MEM;
682
683 if (!srch_flags)
684 return;
685
686 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
687 if (dev->resource[i].flags & srch_flags) {
688 pci_claim_resource(dev, i);
689 DBG(" claimed %s %d [%lx,%lx]/%lx\n",
690 pci_name(dev), i,
691 dev->resource[i].start,
692 dev->resource[i].end,
693 dev->resource[i].flags
694 );
695 }
696 }
697}
698#else
699#define lba_claim_dev_resources(dev)
700#endif
701
702
703/*
704** The algorithm is generic code.
705** But it needs to access local data structures to get the IRQ base.
706** Could make this a "pci_fixup_irq(bus, region)" but not sure
707** it's worth it.
708**
709** Called by do_pci_scan_bus() immediately after each PCI bus is walked.
710** Resources aren't allocated until recursive buswalk below HBA is completed.
711*/
712static void
713lba_fixup_bus(struct pci_bus *bus)
714{
715 struct list_head *ln;
716#ifdef FBB_SUPPORT
717 u16 status;
718#endif
719 struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));
720 int lba_portbase = HBA_PORT_BASE(ldev->hba.hba_num);
721
722 DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",
723 bus, bus->secondary, bus->bridge->platform_data);
724
725 /*
726 ** Properly Setup MMIO resources for this bus.
727 ** pci_alloc_primary_bus() mangles this.
728 */
729 if (bus->self) {
730 /* PCI-PCI Bridge */
731 pci_read_bridge_bases(bus);
732 } else {
733 /* Host-PCI Bridge */
734 int err, i;
735
736 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
737 ldev->hba.io_space.name,
738 ldev->hba.io_space.start, ldev->hba.io_space.end,
739 ldev->hba.io_space.flags);
740 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
741 ldev->hba.lmmio_space.name,
742 ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,
743 ldev->hba.lmmio_space.flags);
744
745 err = request_resource(&ioport_resource, &(ldev->hba.io_space));
746 if (err < 0) {
747 lba_dump_res(&ioport_resource, 2);
748 BUG();
749 }
750
751 if (ldev->hba.elmmio_space.start) {
752 err = request_resource(&iomem_resource,
753 &(ldev->hba.elmmio_space));
754 if (err < 0) {
755
756 printk("FAILED: lba_fixup_bus() request for "
757 "elmmio_space [%lx/%lx]\n",
758 ldev->hba.elmmio_space.start,
759 ldev->hba.elmmio_space.end);
760
761 /* lba_dump_res(&iomem_resource, 2); */
762 /* BUG(); */
763 }
764 }
765
766 err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
767 if (err < 0) {
768 /* FIXME overlaps with elmmio will fail here.
769 * Need to prune (or disable) the distributed range.
770 *
771 * BEWARE: conflicts with this lmmio range may be
772 * elmmio range which is pointing down another rope.
773 */
774
775 printk("FAILED: lba_fixup_bus() request for "
776 "lmmio_space [%lx/%lx]\n",
777 ldev->hba.lmmio_space.start,
778 ldev->hba.lmmio_space.end);
779 /* lba_dump_res(&iomem_resource, 2); */
780 }
781
782#ifdef CONFIG_64BIT
783 /* GMMIO is distributed range. Every LBA/Rope gets part it. */
784 if (ldev->hba.gmmio_space.flags) {
785 err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
786 if (err < 0) {
787 printk("FAILED: lba_fixup_bus() request for "
788 "gmmio_space [%lx/%lx]\n",
789 ldev->hba.gmmio_space.start,
790 ldev->hba.gmmio_space.end);
791 lba_dump_res(&iomem_resource, 2);
792 BUG();
793 }
794 }
795#endif
796
797 /* advertize Host bridge resources to PCI bus */
798 bus->resource[0] = &(ldev->hba.io_space);
799 bus->resource[1] = &(ldev->hba.lmmio_space);
800 i=2;
801 if (ldev->hba.elmmio_space.start)
802 bus->resource[i++] = &(ldev->hba.elmmio_space);
803 if (ldev->hba.gmmio_space.start)
804 bus->resource[i++] = &(ldev->hba.gmmio_space);
805
806 }
807
808 list_for_each(ln, &bus->devices) {
809 int i;
810 struct pci_dev *dev = pci_dev_b(ln);
811
812 DBG("lba_fixup_bus() %s\n", pci_name(dev));
813
814 /* Virtualize Device/Bridge Resources. */
815 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
816 struct resource *res = &dev->resource[i];
817
818 /* If resource not allocated - skip it */
819 if (!res->start)
820 continue;
821
822 if (res->flags & IORESOURCE_IO) {
823 DBG("lba_fixup_bus() I/O Ports [%lx/%lx] -> ",
824 res->start, res->end);
825 res->start |= lba_portbase;
826 res->end |= lba_portbase;
827 DBG("[%lx/%lx]\n", res->start, res->end);
828 } else if (res->flags & IORESOURCE_MEM) {
829 /*
830 ** Convert PCI (IO_VIEW) addresses to
831 ** processor (PA_VIEW) addresses
832 */
833 DBG("lba_fixup_bus() MMIO [%lx/%lx] -> ",
834 res->start, res->end);
835 res->start = PCI_HOST_ADDR(HBA_DATA(ldev), res->start);
836 res->end = PCI_HOST_ADDR(HBA_DATA(ldev), res->end);
837 DBG("[%lx/%lx]\n", res->start, res->end);
838 } else {
839 DBG("lba_fixup_bus() WTF? 0x%lx [%lx/%lx] XXX",
840 res->flags, res->start, res->end);
841 }
842 }
843
844#ifdef FBB_SUPPORT
845 /*
846 ** If one device does not support FBB transfers,
847 ** No one on the bus can be allowed to use them.
848 */
849 (void) pci_read_config_word(dev, PCI_STATUS, &status);
850 bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);
851#endif
852
853 if (is_pdc_pat()) {
854 /* Claim resources for PDC's devices */
855 lba_claim_dev_resources(dev);
856 }
857
858 /*
859 ** P2PB's have no IRQs. ignore them.
860 */
861 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)
862 continue;
863
864 /* Adjust INTERRUPT_LINE for this dev */
865 iosapic_fixup_irq(ldev->iosapic_obj, dev);
866 }
867
868#ifdef FBB_SUPPORT
869/* FIXME/REVISIT - finish figuring out to set FBB on both
870** pci_setup_bridge() clobbers PCI_BRIDGE_CONTROL.
871** Can't fixup here anyway....garr...
872*/
873 if (fbb_enable) {
874 if (bus->self) {
875 u8 control;
876 /* enable on PPB */
877 (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control);
878 (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK);
879
880 } else {
881 /* enable on LBA */
882 }
883 fbb_enable = PCI_COMMAND_FAST_BACK;
884 }
885
886 /* Lastly enable FBB/PERR/SERR on all devices too */
887 list_for_each(ln, &bus->devices) {
888 (void) pci_read_config_word(dev, PCI_COMMAND, &status);
889 status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable;
890 (void) pci_write_config_word(dev, PCI_COMMAND, status);
891 }
892#endif
893}
894
895
896struct pci_bios_ops lba_bios_ops = {
897 .init = lba_bios_init,
898 .fixup_bus = lba_fixup_bus,
899};
900
901
902
903
904/*******************************************************
905**
906** LBA Sprockets "I/O Port" Space Accessor Functions
907**
908** This set of accessor functions is intended for use with
909** "legacy firmware" (ie Sprockets on Allegro/Forte boxes).
910**
911** Many PCI devices don't require use of I/O port space (eg Tulip,
912** NCR720) since they export the same registers to both MMIO and
913** I/O port space. In general I/O port space is slower than
914** MMIO since drivers are designed so PIO writes can be posted.
915**
916********************************************************/
917
918#define LBA_PORT_IN(size, mask) \
919static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \
920{ \
921 u##size t; \
922 t = READ_REG##size(astro_iop_base + addr); \
923 DBG_PORT(" 0x%x\n", t); \
924 return (t); \
925}
926
927LBA_PORT_IN( 8, 3)
928LBA_PORT_IN(16, 2)
929LBA_PORT_IN(32, 0)
930
931
932
933/*
934** BUG X4107: Ordering broken - DMA RD return can bypass PIO WR
935**
936** Fixed in Elroy 2.2. The READ_U32(..., LBA_FUNC_ID) below is
937** guarantee non-postable completion semantics - not avoid X4107.
938** The READ_U32 only guarantees the write data gets to elroy but
939** out to the PCI bus. We can't read stuff from I/O port space
940** since we don't know what has side-effects. Attempting to read
941** from configuration space would be suicidal given the number of
942** bugs in that elroy functionality.
943**
944** Description:
945** DMA read results can improperly pass PIO writes (X4107). The
946** result of this bug is that if a processor modifies a location in
947** memory after having issued PIO writes, the PIO writes are not
948** guaranteed to be completed before a PCI device is allowed to see
949** the modified data in a DMA read.
950**
951** Note that IKE bug X3719 in TR1 IKEs will result in the same
952** symptom.
953**
954** Workaround:
955** The workaround for this bug is to always follow a PIO write with
956** a PIO read to the same bus before starting DMA on that PCI bus.
957**
958*/
959#define LBA_PORT_OUT(size, mask) \
960static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
961{ \
962 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, d, addr, val); \
963 WRITE_REG##size(val, astro_iop_base + addr); \
964 if (LBA_DEV(d)->hw_rev < 3) \
965 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
966}
967
968LBA_PORT_OUT( 8, 3)
969LBA_PORT_OUT(16, 2)
970LBA_PORT_OUT(32, 0)
971
972
973static struct pci_port_ops lba_astro_port_ops = {
974 .inb = lba_astro_in8,
975 .inw = lba_astro_in16,
976 .inl = lba_astro_in32,
977 .outb = lba_astro_out8,
978 .outw = lba_astro_out16,
979 .outl = lba_astro_out32
980};
981
982
983#ifdef CONFIG_64BIT
984#define PIOP_TO_GMMIO(lba, addr) \
985 ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
986
987/*******************************************************
988**
989** LBA PAT "I/O Port" Space Accessor Functions
990**
991** This set of accessor functions is intended for use with
992** "PAT PDC" firmware (ie Prelude/Rhapsody/Piranha boxes).
993**
994** This uses the PIOP space located in the first 64MB of GMMIO.
995** Each rope gets a full 64*KB* (ie 4 bytes per page) this way.
996** bits 1:0 stay the same. bits 15:2 become 25:12.
997** Then add the base and we can generate an I/O Port cycle.
998********************************************************/
999#undef LBA_PORT_IN
1000#define LBA_PORT_IN(size, mask) \
1001static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
1002{ \
1003 u##size t; \
1004 DBG_PORT("%s(0x%p, 0x%x) ->", __FUNCTION__, l, addr); \
1005 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
1006 DBG_PORT(" 0x%x\n", t); \
1007 return (t); \
1008}
1009
1010LBA_PORT_IN( 8, 3)
1011LBA_PORT_IN(16, 2)
1012LBA_PORT_IN(32, 0)
1013
1014
1015#undef LBA_PORT_OUT
1016#define LBA_PORT_OUT(size, mask) \
1017static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
1018{ \
1019 void *where = (void *) PIOP_TO_GMMIO(LBA_DEV(l), addr); \
1020 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, l, addr, val); \
1021 WRITE_REG##size(val, where); \
1022 /* flush the I/O down to the elroy at least */ \
1023 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
1024}
1025
1026LBA_PORT_OUT( 8, 3)
1027LBA_PORT_OUT(16, 2)
1028LBA_PORT_OUT(32, 0)
1029
1030
1031static struct pci_port_ops lba_pat_port_ops = {
1032 .inb = lba_pat_in8,
1033 .inw = lba_pat_in16,
1034 .inl = lba_pat_in32,
1035 .outb = lba_pat_out8,
1036 .outw = lba_pat_out16,
1037 .outl = lba_pat_out32
1038};
1039
1040
1041
1042/*
1043** make range information from PDC available to PCI subsystem.
1044** We make the PDC call here in order to get the PCI bus range
1045** numbers. The rest will get forwarded in pcibios_fixup_bus().
1046** We don't have a struct pci_bus assigned to us yet.
1047*/
1048static void
1049lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1050{
1051 unsigned long bytecnt;
1052 pdc_pat_cell_mod_maddr_block_t pa_pdc_cell; /* PA_VIEW */
1053 pdc_pat_cell_mod_maddr_block_t io_pdc_cell; /* IO_VIEW */
1054 long io_count;
1055 long status; /* PDC return status */
1056 long pa_count;
1057 int i;
1058
1059 /* return cell module (IO view) */
1060 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1061 PA_VIEW, & pa_pdc_cell);
1062 pa_count = pa_pdc_cell.mod[1];
1063
1064 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1065 IO_VIEW, &io_pdc_cell);
1066 io_count = io_pdc_cell.mod[1];
1067
1068 /* We've already done this once for device discovery...*/
1069 if (status != PDC_OK) {
1070 panic("pdc_pat_cell_module() call failed for LBA!\n");
1071 }
1072
1073 if (PAT_GET_ENTITY(pa_pdc_cell.mod_info) != PAT_ENTITY_LBA) {
1074 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
1075 }
1076
1077 /*
1078 ** Inspect the resources PAT tells us about
1079 */
1080 for (i = 0; i < pa_count; i++) {
1081 struct {
1082 unsigned long type;
1083 unsigned long start;
1084 unsigned long end; /* aka finish */
1085 } *p, *io;
1086 struct resource *r;
1087
1088 p = (void *) &(pa_pdc_cell.mod[2+i*3]);
1089 io = (void *) &(io_pdc_cell.mod[2+i*3]);
1090
1091 /* Convert the PAT range data to PCI "struct resource" */
1092 switch(p->type & 0xff) {
1093 case PAT_PBNUM:
1094 lba_dev->hba.bus_num.start = p->start;
1095 lba_dev->hba.bus_num.end = p->end;
1096 break;
1097
1098 case PAT_LMMIO:
1099 /* used to fix up pre-initialized MEM BARs */
1100 if (!lba_dev->hba.lmmio_space.start) {
1101 sprintf(lba_dev->hba.lmmio_name,
1102 "PCI%02lx LMMIO",
1103 lba_dev->hba.bus_num.start);
1104 lba_dev->hba.lmmio_space_offset = p->start -
1105 io->start;
1106 r = &lba_dev->hba.lmmio_space;
1107 r->name = lba_dev->hba.lmmio_name;
1108 } else if (!lba_dev->hba.elmmio_space.start) {
1109 sprintf(lba_dev->hba.elmmio_name,
1110 "PCI%02lx ELMMIO",
1111 lba_dev->hba.bus_num.start);
1112 r = &lba_dev->hba.elmmio_space;
1113 r->name = lba_dev->hba.elmmio_name;
1114 } else {
1115 printk(KERN_WARNING MODULE_NAME
1116 " only supports 2 LMMIO resources!\n");
1117 break;
1118 }
1119
1120 r->start = p->start;
1121 r->end = p->end;
1122 r->flags = IORESOURCE_MEM;
1123 r->parent = r->sibling = r->child = NULL;
1124 break;
1125
1126 case PAT_GMMIO:
1127 /* MMIO space > 4GB phys addr; for 64-bit BAR */
1128 sprintf(lba_dev->hba.gmmio_name, "PCI%02lx GMMIO",
1129 lba_dev->hba.bus_num.start);
1130 r = &lba_dev->hba.gmmio_space;
1131 r->name = lba_dev->hba.gmmio_name;
1132 r->start = p->start;
1133 r->end = p->end;
1134 r->flags = IORESOURCE_MEM;
1135 r->parent = r->sibling = r->child = NULL;
1136 break;
1137
1138 case PAT_NPIOP:
1139 printk(KERN_WARNING MODULE_NAME
1140 " range[%d] : ignoring NPIOP (0x%lx)\n",
1141 i, p->start);
1142 break;
1143
1144 case PAT_PIOP:
1145 /*
1146 ** Postable I/O port space is per PCI host adapter.
1147 ** base of 64MB PIOP region
1148 */
1149 lba_dev->iop_base = ioremap(p->start, 64 * 1024 * 1024);
1150
1151 sprintf(lba_dev->hba.io_name, "PCI%02lx Ports",
1152 lba_dev->hba.bus_num.start);
1153 r = &lba_dev->hba.io_space;
1154 r->name = lba_dev->hba.io_name;
1155 r->start = HBA_PORT_BASE(lba_dev->hba.hba_num);
1156 r->end = r->start + HBA_PORT_SPACE_SIZE - 1;
1157 r->flags = IORESOURCE_IO;
1158 r->parent = r->sibling = r->child = NULL;
1159 break;
1160
1161 default:
1162 printk(KERN_WARNING MODULE_NAME
1163 " range[%d] : unknown pat range type (0x%lx)\n",
1164 i, p->type & 0xff);
1165 break;
1166 }
1167 }
1168}
1169#else
1170/* keep compiler from complaining about missing declarations */
1171#define lba_pat_port_ops lba_astro_port_ops
1172#define lba_pat_resources(pa_dev, lba_dev)
1173#endif /* CONFIG_64BIT */
1174
1175
1176extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
1177extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
1178
1179
1180static void
1181lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1182{
1183 struct resource *r;
1184 int lba_num;
1185
1186 lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
1187
1188 /*
1189 ** With "legacy" firmware, the lowest byte of FW_SCRATCH
1190 ** represents bus->secondary and the second byte represents
1191 ** bus->subsidiary (i.e. highest PPB programmed by firmware).
1192 ** PCI bus walk *should* end up with the same result.
1193 ** FIXME: But we don't have sanity checks in PCI or LBA.
1194 */
1195 lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH);
1196 r = &(lba_dev->hba.bus_num);
1197 r->name = "LBA PCI Busses";
1198 r->start = lba_num & 0xff;
1199 r->end = (lba_num>>8) & 0xff;
1200
1201 /* Set up local PCI Bus resources - we don't need them for
1202 ** Legacy boxes but it's nice to see in /proc/iomem.
1203 */
1204 r = &(lba_dev->hba.lmmio_space);
1205 sprintf(lba_dev->hba.lmmio_name, "PCI%02lx LMMIO",
1206 lba_dev->hba.bus_num.start);
1207 r->name = lba_dev->hba.lmmio_name;
1208
1209#if 1
1210 /* We want the CPU -> IO routing of addresses.
1211 * The SBA BASE/MASK registers control CPU -> IO routing.
1212 * Ask SBA what is routed to this rope/LBA.
1213 */
1214 sba_distributed_lmmio(pa_dev, r);
1215#else
1216 /*
1217 * The LBA BASE/MASK registers control IO -> System routing.
1218 *
1219 * The following code works but doesn't get us what we want.
1220 * Well, only because firmware (v5.0) on C3000 doesn't program
1221 * the LBA BASE/MASE registers to be the exact inverse of
1222 * the corresponding SBA registers. Other Astro/Pluto
1223 * based platform firmware may do it right.
1224 *
1225 * Should someone want to mess with MSI, they may need to
1226 * reprogram LBA BASE/MASK registers. Thus preserve the code
1227 * below until MSI is known to work on C3000/A500/N4000/RP3440.
1228 *
1229 * Using the code below, /proc/iomem shows:
1230 * ...
1231 * f0000000-f0ffffff : PCI00 LMMIO
1232 * f05d0000-f05d0000 : lcd_data
1233 * f05d0008-f05d0008 : lcd_cmd
1234 * f1000000-f1ffffff : PCI01 LMMIO
1235 * f4000000-f4ffffff : PCI02 LMMIO
1236 * f4000000-f4001fff : sym53c8xx
1237 * f4002000-f4003fff : sym53c8xx
1238 * f4004000-f40043ff : sym53c8xx
1239 * f4005000-f40053ff : sym53c8xx
1240 * f4007000-f4007fff : ohci_hcd
1241 * f4008000-f40083ff : tulip
1242 * f6000000-f6ffffff : PCI03 LMMIO
1243 * f8000000-fbffffff : PCI00 ELMMIO
1244 * fa100000-fa4fffff : stifb mmio
1245 * fb000000-fb1fffff : stifb fb
1246 *
1247 * But everything listed under PCI02 actually lives under PCI00.
1248 * This is clearly wrong.
1249 *
1250 * Asking SBA how things are routed tells the correct story:
1251 * LMMIO_BASE/MASK/ROUTE f4000001 fc000000 00000000
1252 * DIR0_BASE/MASK/ROUTE fa000001 fe000000 00000006
1253 * DIR1_BASE/MASK/ROUTE f9000001 ff000000 00000004
1254 * DIR2_BASE/MASK/ROUTE f0000000 fc000000 00000000
1255 * DIR3_BASE/MASK/ROUTE f0000000 fc000000 00000000
1256 *
1257 * Which looks like this in /proc/iomem:
1258 * f4000000-f47fffff : PCI00 LMMIO
1259 * f4000000-f4001fff : sym53c8xx
1260 * ...[deteled core devices - same as above]...
1261 * f4008000-f40083ff : tulip
1262 * f4800000-f4ffffff : PCI01 LMMIO
1263 * f6000000-f67fffff : PCI02 LMMIO
1264 * f7000000-f77fffff : PCI03 LMMIO
1265 * f9000000-f9ffffff : PCI02 ELMMIO
1266 * fa000000-fbffffff : PCI03 ELMMIO
1267 * fa100000-fa4fffff : stifb mmio
1268 * fb000000-fb1fffff : stifb fb
1269 *
1270 * ie all Built-in core are under now correctly under PCI00.
1271 * The "PCI02 ELMMIO" directed range is for:
1272 * +-[02]---03.0 3Dfx Interactive, Inc. Voodoo 2
1273 *
1274 * All is well now.
1275 */
1276 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE);
1277 if (r->start & 1) {
1278 unsigned long rsize;
1279
1280 r->flags = IORESOURCE_MEM;
1281 /* mmio_mask also clears Enable bit */
1282 r->start &= mmio_mask;
1283 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1284 rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK);
1285
1286 /*
1287 ** Each rope only gets part of the distributed range.
1288 ** Adjust "window" for this rope.
1289 */
1290 rsize /= ROPES_PER_IOC;
1291 r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa);
1292 r->end = r->start + rsize;
1293 } else {
1294 r->end = r->start = 0; /* Not enabled. */
1295 }
1296#endif
1297
1298 /*
1299 ** "Directed" ranges are used when the "distributed range" isn't
1300 ** sufficient for all devices below a given LBA. Typically devices
1301 ** like graphics cards or X25 may need a directed range when the
1302 ** bus has multiple slots (ie multiple devices) or the device
1303 ** needs more than the typical 4 or 8MB a distributed range offers.
1304 **
1305 ** The main reason for ignoring it now frigging complications.
1306 ** Directed ranges may overlap (and have precedence) over
1307 ** distributed ranges. Or a distributed range assigned to a unused
1308 ** rope may be used by a directed range on a different rope.
1309 ** Support for graphics devices may require fixing this
1310 ** since they may be assigned a directed range which overlaps
1311 ** an existing (but unused portion of) distributed range.
1312 */
1313 r = &(lba_dev->hba.elmmio_space);
1314 sprintf(lba_dev->hba.elmmio_name, "PCI%02lx ELMMIO",
1315 lba_dev->hba.bus_num.start);
1316 r->name = lba_dev->hba.elmmio_name;
1317
1318#if 1
1319 /* See comment which precedes call to sba_directed_lmmio() */
1320 sba_directed_lmmio(pa_dev, r);
1321#else
1322 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE);
1323
1324 if (r->start & 1) {
1325 unsigned long rsize;
1326 r->flags = IORESOURCE_MEM;
1327 /* mmio_mask also clears Enable bit */
1328 r->start &= mmio_mask;
1329 r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
1330 rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK);
1331 r->end = r->start + ~rsize;
1332 }
1333#endif
1334
1335 r = &(lba_dev->hba.io_space);
1336 sprintf(lba_dev->hba.io_name, "PCI%02lx Ports",
1337 lba_dev->hba.bus_num.start);
1338 r->name = lba_dev->hba.io_name;
1339 r->flags = IORESOURCE_IO;
1340 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L;
1341 r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
1342
1343 /* Virtualize the I/O Port space ranges */
1344 lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num);
1345 r->start |= lba_num;
1346 r->end |= lba_num;
1347}
1348
1349
1350/**************************************************************************
1351**
1352** LBA initialization code (HW and SW)
1353**
1354** o identify LBA chip itself
1355** o initialize LBA chip modes (HardFail)
1356** o FIXME: initialize DMA hints for reasonable defaults
1357** o enable configuration functions
1358** o call pci_register_ops() to discover devs (fixup/fixup_bus get invoked)
1359**
1360**************************************************************************/
1361
1362static int __init
1363lba_hw_init(struct lba_device *d)
1364{
1365 u32 stat;
1366 u32 bus_reset; /* PDC_PAT_BUG */
1367
1368#if 0
1369 printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n",
1370 d->hba.base_addr,
1371 READ_REG64(d->hba.base_addr + LBA_STAT_CTL),
1372 READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG),
1373 READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS),
1374 READ_REG64(d->hba.base_addr + LBA_DMA_CTL) );
1375 printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n",
1376 READ_REG64(d->hba.base_addr + LBA_ARB_MASK),
1377 READ_REG64(d->hba.base_addr + LBA_ARB_PRI),
1378 READ_REG64(d->hba.base_addr + LBA_ARB_MODE),
1379 READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) );
1380 printk(KERN_DEBUG " HINT cfg 0x%Lx\n",
1381 READ_REG64(d->hba.base_addr + LBA_HINT_CFG));
1382 printk(KERN_DEBUG " HINT reg ");
1383 { int i;
1384 for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8)
1385 printk(" %Lx", READ_REG64(d->hba.base_addr + i));
1386 }
1387 printk("\n");
1388#endif /* DEBUG_LBA_PAT */
1389
1390#ifdef CONFIG_64BIT
1391/*
1392 * FIXME add support for PDC_PAT_IO "Get slot status" - OLAR support
1393 * Only N-Class and up can really make use of Get slot status.
1394 * maybe L-class too but I've never played with it there.
1395 */
1396#endif
1397
1398 /* PDC_PAT_BUG: exhibited in rev 40.48 on L2000 */
1399 bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1;
1400 if (bus_reset) {
1401 printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n");
1402 }
1403
1404 stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG);
1405 if (stat & LBA_SMART_MODE) {
1406 printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n");
1407 stat &= ~LBA_SMART_MODE;
1408 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
1409 }
1410
1411 /* Set HF mode as the default (vs. -1 mode). */
1412 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
1413 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1414
1415 /*
1416 ** Writing a zero to STAT_CTL.rf (bit 0) will clear reset signal
1417 ** if it's not already set. If we just cleared the PCI Bus Reset
1418 ** signal, wait a bit for the PCI devices to recover and setup.
1419 */
1420 if (bus_reset)
1421 mdelay(pci_post_reset_delay);
1422
1423 if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) {
1424 /*
1425 ** PDC_PAT_BUG: PDC rev 40.48 on L2000.
1426 ** B2000/C3600/J6000 also have this problem?
1427 **
1428 ** Elroys with hot pluggable slots don't get configured
1429 ** correctly if the slot is empty. ARB_MASK is set to 0
1430 ** and we can't master transactions on the bus if it's
1431 ** not at least one. 0x3 enables elroy and first slot.
1432 */
1433 printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n");
1434 WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK);
1435 }
1436
1437 /*
1438 ** FIXME: Hint registers are programmed with default hint
1439 ** values by firmware. Hints should be sane even if we
1440 ** can't reprogram them the way drivers want.
1441 */
1442 return 0;
1443}
1444
1445
1446
1447/*
1448** Determine if lba should claim this chip (return 0) or not (return 1).
1449** If so, initialize the chip and tell other partners in crime they
1450** have work to do.
1451*/
1452static int __init
1453lba_driver_probe(struct parisc_device *dev)
1454{
1455 struct lba_device *lba_dev;
1456 struct pci_bus *lba_bus;
1457 struct pci_ops *cfg_ops;
1458 u32 func_class;
1459 void *tmp_obj;
1460 char *version;
1461 void __iomem *addr = ioremap(dev->hpa, 4096);
1462
1463 /* Read HW Rev First */
1464 func_class = READ_REG32(addr + LBA_FCLASS);
1465
1466 if (IS_ELROY(dev)) {
1467 func_class &= 0xf;
1468 switch (func_class) {
1469 case 0: version = "TR1.0"; break;
1470 case 1: version = "TR2.0"; break;
1471 case 2: version = "TR2.1"; break;
1472 case 3: version = "TR2.2"; break;
1473 case 4: version = "TR3.0"; break;
1474 case 5: version = "TR4.0"; break;
1475 default: version = "TR4+";
1476 }
1477
1478 printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
1479 MODULE_NAME, version, func_class & 0xf, dev->hpa);
1480
1481 if (func_class < 2) {
1482 printk(KERN_WARNING "Can't support LBA older than "
1483 "TR2.1 - continuing under adversity.\n");
1484 }
1485
1486#if 0
1487/* Elroy TR4.0 should work with simple algorithm.
1488 But it doesn't. Still missing something. *sigh*
1489*/
1490 if (func_class > 4) {
1491 cfg_ops = &mercury_cfg_ops;
1492 } else
1493#endif
1494 {
1495 cfg_ops = &elroy_cfg_ops;
1496 }
1497
1498 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
1499 func_class &= 0xff;
1500 version = kmalloc(6, GFP_KERNEL);
1501 sprintf(version,"TR%d.%d",(func_class >> 4),(func_class & 0xf));
1502 /* We could use one printk for both Elroy and Mercury,
1503 * but for the mask for func_class.
1504 */
1505 printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
1506 MODULE_NAME, version, func_class & 0xff, dev->hpa);
1507 cfg_ops = &mercury_cfg_ops;
1508 } else {
1509 printk(KERN_ERR "Unknown LBA found at 0x%lx\n", dev->hpa);
1510 return -ENODEV;
1511 }
1512
1513 /*
1514 ** Tell I/O SAPIC driver we have a IRQ handler/region.
1515 */
1516 tmp_obj = iosapic_register(dev->hpa + LBA_IOSAPIC_BASE);
1517
1518 /* NOTE: PCI devices (e.g. 103c:1005 graphics card) which don't
1519 ** have an IRT entry will get NULL back from iosapic code.
1520 */
1521
1522 lba_dev = kmalloc(sizeof(struct lba_device), GFP_KERNEL);
1523 if (!lba_dev) {
1524 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
1525 return(1);
1526 }
1527
1528 memset(lba_dev, 0, sizeof(struct lba_device));
1529
1530
1531 /* ---------- First : initialize data we already have --------- */
1532
1533 lba_dev->hw_rev = func_class;
1534 lba_dev->hba.base_addr = addr;
1535 lba_dev->hba.dev = dev;
1536 lba_dev->iosapic_obj = tmp_obj; /* save interrupt handle */
1537 lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */
1538
1539 /* ------------ Second : initialize common stuff ---------- */
1540 pci_bios = &lba_bios_ops;
1541 pcibios_register_hba(HBA_DATA(lba_dev));
1542 spin_lock_init(&lba_dev->lba_lock);
1543
1544 if (lba_hw_init(lba_dev))
1545 return(1);
1546
1547 /* ---------- Third : setup I/O Port and MMIO resources --------- */
1548
1549 if (is_pdc_pat()) {
1550 /* PDC PAT firmware uses PIOP region of GMMIO space. */
1551 pci_port = &lba_pat_port_ops;
1552 /* Go ask PDC PAT what resources this LBA has */
1553 lba_pat_resources(dev, lba_dev);
1554 } else {
1555 if (!astro_iop_base) {
1556 /* Sprockets PDC uses NPIOP region */
1557 astro_iop_base = ioremap(LBA_PORT_BASE, 64 * 1024);
1558 pci_port = &lba_astro_port_ops;
1559 }
1560
1561 /* Poke the chip a bit for /proc output */
1562 lba_legacy_resources(dev, lba_dev);
1563 }
1564
1565 /*
1566 ** Tell PCI support another PCI bus was found.
1567 ** Walks PCI bus for us too.
1568 */
1569 dev->dev.platform_data = lba_dev;
1570 lba_bus = lba_dev->hba.hba_bus =
1571 pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
1572 cfg_ops, NULL);
1573
1574 /* This is in lieu of calling pci_assign_unassigned_resources() */
1575 if (is_pdc_pat()) {
1576 /* assign resources to un-initialized devices */
1577
1578 DBG_PAT("LBA pci_bus_size_bridges()\n");
1579 pci_bus_size_bridges(lba_bus);
1580
1581 DBG_PAT("LBA pci_bus_assign_resources()\n");
1582 pci_bus_assign_resources(lba_bus);
1583
1584#ifdef DEBUG_LBA_PAT
1585 DBG_PAT("\nLBA PIOP resource tree\n");
1586 lba_dump_res(&lba_dev->hba.io_space, 2);
1587 DBG_PAT("\nLBA LMMIO resource tree\n");
1588 lba_dump_res(&lba_dev->hba.lmmio_space, 2);
1589#endif
1590 }
1591 pci_enable_bridges(lba_bus);
1592
1593
1594 /*
1595 ** Once PCI register ops has walked the bus, access to config
1596 ** space is restricted. Avoids master aborts on config cycles.
1597 ** Early LBA revs go fatal on *any* master abort.
1598 */
1599 if (cfg_ops == &elroy_cfg_ops) {
1600 lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
1601 }
1602
1603 /* Whew! Finally done! Tell services we got this one covered. */
1604 return 0;
1605}
1606
1607static struct parisc_device_id lba_tbl[] = {
1608 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa },
1609 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa },
1610 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa },
1611 { 0, }
1612};
1613
1614static struct parisc_driver lba_driver = {
1615 .name = MODULE_NAME,
1616 .id_table = lba_tbl,
1617 .probe = lba_driver_probe,
1618};
1619
1620/*
1621** One time initialization to let the world know the LBA was found.
1622** Must be called exactly once before pci_init().
1623*/
1624void __init lba_init(void)
1625{
1626 register_parisc_driver(&lba_driver);
1627}
1628
1629/*
1630** Initialize the IBASE/IMASK registers for LBA (Elroy).
1631** Only called from sba_iommu.c in order to route ranges (MMIO vs DMA).
1632** sba_iommu is responsible for locking (none needed at init time).
1633*/
1634void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
1635{
1636 void __iomem * base_addr = ioremap(lba->hpa, 4096);
1637
1638 imask <<= 2; /* adjust for hints - 2 more bits */
1639
1640 /* Make sure we aren't trying to set bits that aren't writeable. */
1641 WARN_ON((ibase & 0x001fffff) != 0);
1642 WARN_ON((imask & 0x001fffff) != 0);
1643
1644 DBG("%s() ibase 0x%x imask 0x%x\n", __FUNCTION__, ibase, imask);
1645 WRITE_REG32( imask, base_addr + LBA_IMASK);
1646 WRITE_REG32( ibase, base_addr + LBA_IBASE);
1647 iounmap(base_addr);
1648}
1649
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
new file mode 100644
index 000000000000..e90fb72a6962
--- /dev/null
+++ b/drivers/parisc/led.c
@@ -0,0 +1,760 @@
1/*
2 * Chassis LCD/LED driver for HP-PARISC workstations
3 *
4 * (c) Copyright 2000 Red Hat Software
5 * (c) Copyright 2000 Helge Deller <hdeller@redhat.com>
6 * (c) Copyright 2001-2004 Helge Deller <deller@gmx.de>
7 * (c) Copyright 2001 Randolph Chung <tausq@debian.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * TODO:
15 * - speed-up calculations with inlined assembler
16 * - interface to write to second row of LCD from /proc (if technically possible)
17 *
18 * Changes:
19 * - Audit copy_from_user in led_proc_write.
20 * Daniele Bellucci <bellucda@tiscali.it>
21 */
22
23#include <linux/config.h>
24#include <linux/module.h>
25#include <linux/stddef.h> /* for offsetof() */
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/ioport.h>
29#include <linux/utsname.h>
30#include <linux/delay.h>
31#include <linux/netdevice.h>
32#include <linux/inetdevice.h>
33#include <linux/in.h>
34#include <linux/interrupt.h>
35#include <linux/kernel_stat.h>
36#include <linux/reboot.h>
37#include <linux/proc_fs.h>
38#include <linux/ctype.h>
39#include <linux/blkdev.h>
40#include <asm/io.h>
41#include <asm/processor.h>
42#include <asm/hardware.h>
43#include <asm/param.h> /* HZ */
44#include <asm/led.h>
45#include <asm/pdc.h>
46#include <asm/uaccess.h>
47
48/* The control of the LEDs and LCDs on PARISC-machines have to be done
49 completely in software. The necessary calculations are done in a tasklet
50 which is scheduled at every timer interrupt and since the calculations
51 may consume relatively much CPU-time some of the calculations can be
52 turned off with the following variables (controlled via procfs) */
53
54static int led_type = -1;
55static int led_heartbeat = 1;
56static int led_diskio = 1;
57static int led_lanrxtx = 1;
58static char lcd_text[32];
59static char lcd_text_default[32];
60
61#if 0
62#define DPRINTK(x) printk x
63#else
64#define DPRINTK(x)
65#endif
66
67
68struct lcd_block {
69 unsigned char command; /* stores the command byte */
70 unsigned char on; /* value for turning LED on */
71 unsigned char off; /* value for turning LED off */
72};
73
74/* Structure returned by PDC_RETURN_CHASSIS_INFO */
75/* NOTE: we use unsigned long:16 two times, since the following member
76 lcd_cmd_reg_addr needs to be 64bit aligned on 64bit PA2.0-machines */
77struct pdc_chassis_lcd_info_ret_block {
78 unsigned long model:16; /* DISPLAY_MODEL_XXXX */
79 unsigned long lcd_width:16; /* width of the LCD in chars (DISPLAY_MODEL_LCD only) */
80 unsigned long lcd_cmd_reg_addr; /* ptr to LCD cmd-register & data ptr for LED */
81 unsigned long lcd_data_reg_addr; /* ptr to LCD data-register (LCD only) */
82 unsigned int min_cmd_delay; /* delay in uS after cmd-write (LCD only) */
83 unsigned char reset_cmd1; /* command #1 for writing LCD string (LCD only) */
84 unsigned char reset_cmd2; /* command #2 for writing LCD string (LCD only) */
85 unsigned char act_enable; /* 0 = no activity (LCD only) */
86 struct lcd_block heartbeat;
87 struct lcd_block disk_io;
88 struct lcd_block lan_rcv;
89 struct lcd_block lan_tx;
90 char _pad;
91};
92
93
94/* LCD_CMD and LCD_DATA for KittyHawk machines */
95#define KITTYHAWK_LCD_CMD F_EXTEND(0xf0190000UL) /* 64bit-ready */
96#define KITTYHAWK_LCD_DATA (KITTYHAWK_LCD_CMD+1)
97
98/* lcd_info is pre-initialized to the values needed to program KittyHawk LCD's
99 * HP seems to have used Sharp/Hitachi HD44780 LCDs most of the time. */
100static struct pdc_chassis_lcd_info_ret_block
101lcd_info __attribute__((aligned(8))) =
102{
103 .model = DISPLAY_MODEL_LCD,
104 .lcd_width = 16,
105 .lcd_cmd_reg_addr = KITTYHAWK_LCD_CMD,
106 .lcd_data_reg_addr = KITTYHAWK_LCD_DATA,
107 .min_cmd_delay = 40,
108 .reset_cmd1 = 0x80,
109 .reset_cmd2 = 0xc0,
110};
111
112
113/* direct access to some of the lcd_info variables */
114#define LCD_CMD_REG lcd_info.lcd_cmd_reg_addr
115#define LCD_DATA_REG lcd_info.lcd_data_reg_addr
116#define LED_DATA_REG lcd_info.lcd_cmd_reg_addr /* LASI & ASP only */
117
118
119/* ptr to LCD/LED-specific function */
120static void (*led_func_ptr) (unsigned char);
121
122#define LED_HASLCD 1
123#define LED_NOLCD 0
124#ifdef CONFIG_PROC_FS
125static int led_proc_read(char *page, char **start, off_t off, int count,
126 int *eof, void *data)
127{
128 char *out = page;
129 int len;
130
131 switch ((long)data)
132 {
133 case LED_NOLCD:
134 out += sprintf(out, "Heartbeat: %d\n", led_heartbeat);
135 out += sprintf(out, "Disk IO: %d\n", led_diskio);
136 out += sprintf(out, "LAN Rx/Tx: %d\n", led_lanrxtx);
137 break;
138 case LED_HASLCD:
139 out += sprintf(out, "%s\n", lcd_text);
140 break;
141 default:
142 *eof = 1;
143 return 0;
144 }
145
146 len = out - page - off;
147 if (len < count) {
148 *eof = 1;
149 if (len <= 0) return 0;
150 } else {
151 len = count;
152 }
153 *start = page + off;
154 return len;
155}
156
157static int led_proc_write(struct file *file, const char *buf,
158 unsigned long count, void *data)
159{
160 char *cur, lbuf[count + 1];
161 int d;
162
163 if (!capable(CAP_SYS_ADMIN))
164 return -EACCES;
165
166 memset(lbuf, 0, count + 1);
167
168 if (copy_from_user(lbuf, buf, count))
169 return -EFAULT;
170
171 cur = lbuf;
172
173 /* skip initial spaces */
174 while (*cur && isspace(*cur))
175 {
176 cur++;
177 }
178
179 switch ((long)data)
180 {
181 case LED_NOLCD:
182 d = *cur++ - '0';
183 if (d != 0 && d != 1) goto parse_error;
184 led_heartbeat = d;
185
186 if (*cur++ != ' ') goto parse_error;
187
188 d = *cur++ - '0';
189 if (d != 0 && d != 1) goto parse_error;
190 led_diskio = d;
191
192 if (*cur++ != ' ') goto parse_error;
193
194 d = *cur++ - '0';
195 if (d != 0 && d != 1) goto parse_error;
196 led_lanrxtx = d;
197
198 break;
199 case LED_HASLCD:
200 if (*cur && cur[strlen(cur)-1] == '\n')
201 cur[strlen(cur)-1] = 0;
202 if (*cur == 0)
203 cur = lcd_text_default;
204 lcd_print(cur);
205 break;
206 default:
207 return 0;
208 }
209
210 return count;
211
212parse_error:
213 if ((long)data == LED_NOLCD)
214 printk(KERN_CRIT "Parse error: expect \"n n n\" (n == 0 or 1) for heartbeat,\ndisk io and lan tx/rx indicators\n");
215 return -EINVAL;
216}
217
218static int __init led_create_procfs(void)
219{
220 struct proc_dir_entry *proc_pdc_root = NULL;
221 struct proc_dir_entry *ent;
222
223 if (led_type == -1) return -1;
224
225 proc_pdc_root = proc_mkdir("pdc", 0);
226 if (!proc_pdc_root) return -1;
227 proc_pdc_root->owner = THIS_MODULE;
228 ent = create_proc_entry("led", S_IFREG|S_IRUGO|S_IWUSR, proc_pdc_root);
229 if (!ent) return -1;
230 ent->nlink = 1;
231 ent->data = (void *)LED_NOLCD; /* LED */
232 ent->read_proc = led_proc_read;
233 ent->write_proc = led_proc_write;
234 ent->owner = THIS_MODULE;
235
236 if (led_type == LED_HASLCD)
237 {
238 ent = create_proc_entry("lcd", S_IFREG|S_IRUGO|S_IWUSR, proc_pdc_root);
239 if (!ent) return -1;
240 ent->nlink = 1;
241 ent->data = (void *)LED_HASLCD; /* LCD */
242 ent->read_proc = led_proc_read;
243 ent->write_proc = led_proc_write;
244 ent->owner = THIS_MODULE;
245 }
246
247 return 0;
248}
249#endif
250
251/*
252 **
253 ** led_ASP_driver()
254 **
255 */
256#define LED_DATA 0x01 /* data to shift (0:on 1:off) */
257#define LED_STROBE 0x02 /* strobe to clock data */
258static void led_ASP_driver(unsigned char leds)
259{
260 int i;
261
262 leds = ~leds;
263 for (i = 0; i < 8; i++) {
264 unsigned char value;
265 value = (leds & 0x80) >> 7;
266 gsc_writeb( value, LED_DATA_REG );
267 gsc_writeb( value | LED_STROBE, LED_DATA_REG );
268 leds <<= 1;
269 }
270}
271
272
273/*
274 **
275 ** led_LASI_driver()
276 **
277 */
278static void led_LASI_driver(unsigned char leds)
279{
280 leds = ~leds;
281 gsc_writeb( leds, LED_DATA_REG );
282}
283
284
285/*
286 **
287 ** led_LCD_driver()
288 **
289 ** The logic of the LCD driver is, that we write at every scheduled call
290 ** only to one of LCD_CMD_REG _or_ LCD_DATA_REG - registers.
291 ** That way we don't need to let this tasklet busywait for min_cmd_delay
292 ** milliseconds.
293 **
294 ** TODO: check the value of "min_cmd_delay" against the value of HZ.
295 **
296 */
297static void led_LCD_driver(unsigned char leds)
298{
299 static int last_index; /* 0:heartbeat, 1:disk, 2:lan_in, 3:lan_out */
300 static int last_was_cmd;/* 0: CMD was written last, 1: DATA was last */
301 struct lcd_block *block_ptr;
302 int value;
303
304 switch (last_index) {
305 case 0: block_ptr = &lcd_info.heartbeat;
306 value = leds & LED_HEARTBEAT;
307 break;
308 case 1: block_ptr = &lcd_info.disk_io;
309 value = leds & LED_DISK_IO;
310 break;
311 case 2: block_ptr = &lcd_info.lan_rcv;
312 value = leds & LED_LAN_RCV;
313 break;
314 case 3: block_ptr = &lcd_info.lan_tx;
315 value = leds & LED_LAN_TX;
316 break;
317 default: /* should never happen: */
318 return;
319 }
320
321 if (last_was_cmd) {
322 /* write the value to the LCD data port */
323 gsc_writeb( value ? block_ptr->on : block_ptr->off, LCD_DATA_REG );
324 } else {
325 /* write the command-byte to the LCD command register */
326 gsc_writeb( block_ptr->command, LCD_CMD_REG );
327 }
328
329 /* now update the vars for the next interrupt iteration */
330 if (++last_was_cmd == 2) { /* switch between cmd & data */
331 last_was_cmd = 0;
332 if (++last_index == 4)
333 last_index = 0; /* switch back to heartbeat index */
334 }
335}
336
337
338/*
339 **
340 ** led_get_net_activity()
341 **
342 ** calculate if there was TX- or RX-troughput on the network interfaces
343 ** (analog to dev_get_info() from net/core/dev.c)
344 **
345 */
346static __inline__ int led_get_net_activity(void)
347{
348#ifndef CONFIG_NET
349 return 0;
350#else
351 static unsigned long rx_total_last, tx_total_last;
352 unsigned long rx_total, tx_total;
353 struct net_device *dev;
354 int retval;
355
356 rx_total = tx_total = 0;
357
358 /* we are running as tasklet, so locking dev_base
359 * for reading should be OK */
360 read_lock(&dev_base_lock);
361 for (dev = dev_base; dev; dev = dev->next) {
362 struct net_device_stats *stats;
363 struct in_device *in_dev = __in_dev_get(dev);
364 if (!in_dev || !in_dev->ifa_list)
365 continue;
366 if (LOOPBACK(in_dev->ifa_list->ifa_local))
367 continue;
368 if (!dev->get_stats)
369 continue;
370 stats = dev->get_stats(dev);
371 rx_total += stats->rx_packets;
372 tx_total += stats->tx_packets;
373 }
374 read_unlock(&dev_base_lock);
375
376 retval = 0;
377
378 if (rx_total != rx_total_last) {
379 rx_total_last = rx_total;
380 retval |= LED_LAN_RCV;
381 }
382
383 if (tx_total != tx_total_last) {
384 tx_total_last = tx_total;
385 retval |= LED_LAN_TX;
386 }
387
388 return retval;
389#endif
390}
391
392
393/*
394 **
395 ** led_get_diskio_activity()
396 **
397 ** calculate if there was disk-io in the system
398 **
399 */
400static __inline__ int led_get_diskio_activity(void)
401{
402 static unsigned long last_pgpgin, last_pgpgout;
403 struct page_state pgstat;
404 int changed;
405
406 get_full_page_state(&pgstat); /* get no of sectors in & out */
407
408 /* Just use a very simple calculation here. Do not care about overflow,
409 since we only want to know if there was activity or not. */
410 changed = (pgstat.pgpgin != last_pgpgin) || (pgstat.pgpgout != last_pgpgout);
411 last_pgpgin = pgstat.pgpgin;
412 last_pgpgout = pgstat.pgpgout;
413
414 return (changed ? LED_DISK_IO : 0);
415}
416
417
418
419/*
420 ** led_tasklet_func()
421 **
422 ** is scheduled at every timer interrupt from time.c and
423 ** updates the chassis LCD/LED
424
425 TODO:
426 - display load average (older machines like 715/64 have 4 "free" LED's for that)
427 - optimizations
428 */
429
430#define HEARTBEAT_LEN (HZ*6/100)
431#define HEARTBEAT_2ND_RANGE_START (HZ*22/100)
432#define HEARTBEAT_2ND_RANGE_END (HEARTBEAT_2ND_RANGE_START + HEARTBEAT_LEN)
433
434#define NORMALIZED_COUNT(count) (count/(HZ/100))
435
436static void led_tasklet_func(unsigned long unused)
437{
438 static unsigned char lastleds;
439 unsigned char currentleds; /* stores current value of the LEDs */
440 static unsigned long count; /* static incremented value, not wrapped */
441 static unsigned long count_HZ; /* counter in range 0..HZ */
442
443 /* exit if not initialized */
444 if (!led_func_ptr)
445 return;
446
447 /* increment the local counters */
448 ++count;
449 if (++count_HZ == HZ)
450 count_HZ = 0;
451
452 currentleds = lastleds;
453
454 if (led_heartbeat)
455 {
456 /* flash heartbeat-LED like a real heart (2 x short then a long delay) */
457 if (count_HZ<HEARTBEAT_LEN ||
458 (count_HZ>=HEARTBEAT_2ND_RANGE_START && count_HZ<HEARTBEAT_2ND_RANGE_END))
459 currentleds |= LED_HEARTBEAT;
460 else
461 currentleds &= ~LED_HEARTBEAT;
462 }
463
464 /* look for network activity and flash LEDs respectively */
465 if (led_lanrxtx && ((NORMALIZED_COUNT(count)+(8/2)) & 7) == 0)
466 {
467 currentleds &= ~(LED_LAN_RCV | LED_LAN_TX);
468 currentleds |= led_get_net_activity();
469 }
470
471 /* avoid to calculate diskio-stats at same irq as netio-stats */
472 if (led_diskio && (NORMALIZED_COUNT(count) & 7) == 0)
473 {
474 currentleds &= ~LED_DISK_IO;
475 currentleds |= led_get_diskio_activity();
476 }
477
478 /* blink all LEDs twice a second if we got an Oops (HPMC) */
479 if (oops_in_progress) {
480 currentleds = (count_HZ<=(HZ/2)) ? 0 : 0xff;
481 }
482
483 /* update the LCD/LEDs */
484 if (currentleds != lastleds) {
485 led_func_ptr(currentleds);
486 lastleds = currentleds;
487 }
488}
489
490/* main led tasklet struct (scheduled from time.c) */
491DECLARE_TASKLET_DISABLED(led_tasklet, led_tasklet_func, 0);
492
493
494/*
495 ** led_halt()
496 **
497 ** called by the reboot notifier chain at shutdown and stops all
498 ** LED/LCD activities.
499 **
500 */
501
502static int led_halt(struct notifier_block *, unsigned long, void *);
503
504static struct notifier_block led_notifier = {
505 .notifier_call = led_halt,
506};
507
508static int led_halt(struct notifier_block *nb, unsigned long event, void *buf)
509{
510 char *txt;
511
512 switch (event) {
513 case SYS_RESTART: txt = "SYSTEM RESTART";
514 break;
515 case SYS_HALT: txt = "SYSTEM HALT";
516 break;
517 case SYS_POWER_OFF: txt = "SYSTEM POWER OFF";
518 break;
519 default: return NOTIFY_DONE;
520 }
521
522 /* completely stop the LED/LCD tasklet */
523 tasklet_disable(&led_tasklet);
524
525 if (lcd_info.model == DISPLAY_MODEL_LCD)
526 lcd_print(txt);
527 else
528 if (led_func_ptr)
529 led_func_ptr(0xff); /* turn all LEDs ON */
530
531 unregister_reboot_notifier(&led_notifier);
532 return NOTIFY_OK;
533}
534
535/*
536 ** register_led_driver()
537 **
538 ** registers an external LED or LCD for usage by this driver.
539 ** currently only LCD-, LASI- and ASP-style LCD/LED's are supported.
540 **
541 */
542
543int __init register_led_driver(int model, unsigned long cmd_reg, unsigned long data_reg)
544{
545 static int initialized;
546
547 if (initialized || !data_reg)
548 return 1;
549
550 lcd_info.model = model; /* store the values */
551 LCD_CMD_REG = (cmd_reg == LED_CMD_REG_NONE) ? 0 : cmd_reg;
552
553 switch (lcd_info.model) {
554 case DISPLAY_MODEL_LCD:
555 LCD_DATA_REG = data_reg;
556 printk(KERN_INFO "LCD display at %lx,%lx registered\n",
557 LCD_CMD_REG , LCD_DATA_REG);
558 led_func_ptr = led_LCD_driver;
559 lcd_print( lcd_text_default );
560 led_type = LED_HASLCD;
561 break;
562
563 case DISPLAY_MODEL_LASI:
564 LED_DATA_REG = data_reg;
565 led_func_ptr = led_LASI_driver;
566 printk(KERN_INFO "LED display at %lx registered\n", LED_DATA_REG);
567 led_type = LED_NOLCD;
568 break;
569
570 case DISPLAY_MODEL_OLD_ASP:
571 LED_DATA_REG = data_reg;
572 led_func_ptr = led_ASP_driver;
573 printk(KERN_INFO "LED (ASP-style) display at %lx registered\n",
574 LED_DATA_REG);
575 led_type = LED_NOLCD;
576 break;
577
578 default:
579 printk(KERN_ERR "%s: Wrong LCD/LED model %d !\n",
580 __FUNCTION__, lcd_info.model);
581 return 1;
582 }
583
584 /* mark the LCD/LED driver now as initialized and
585 * register to the reboot notifier chain */
586 initialized++;
587 register_reboot_notifier(&led_notifier);
588
589 /* start the led tasklet for the first time */
590 tasklet_enable(&led_tasklet);
591
592 return 0;
593}
594
595/*
596 ** register_led_regions()
597 **
598 ** register_led_regions() registers the LCD/LED regions for /procfs.
599 ** At bootup - where the initialisation of the LCD/LED normally happens -
600 ** not all internal structures of request_region() are properly set up,
601 ** so that we delay the led-registration until after busdevices_init()
602 ** has been executed.
603 **
604 */
605
606void __init register_led_regions(void)
607{
608 switch (lcd_info.model) {
609 case DISPLAY_MODEL_LCD:
610 request_mem_region((unsigned long)LCD_CMD_REG, 1, "lcd_cmd");
611 request_mem_region((unsigned long)LCD_DATA_REG, 1, "lcd_data");
612 break;
613 case DISPLAY_MODEL_LASI:
614 case DISPLAY_MODEL_OLD_ASP:
615 request_mem_region((unsigned long)LED_DATA_REG, 1, "led_data");
616 break;
617 }
618}
619
620
621/*
622 **
623 ** lcd_print()
624 **
625 ** Displays the given string on the LCD-Display of newer machines.
626 ** lcd_print() disables the timer-based led tasklet during its
627 ** execution and enables it afterwards again.
628 **
629 */
630int lcd_print( char *str )
631{
632 int i;
633
634 if (!led_func_ptr || lcd_info.model != DISPLAY_MODEL_LCD)
635 return 0;
636
637 /* temporarily disable the led tasklet */
638 tasklet_disable(&led_tasklet);
639
640 /* copy display string to buffer for procfs */
641 strlcpy(lcd_text, str, sizeof(lcd_text));
642
643 /* Set LCD Cursor to 1st character */
644 gsc_writeb(lcd_info.reset_cmd1, LCD_CMD_REG);
645 udelay(lcd_info.min_cmd_delay);
646
647 /* Print the string */
648 for (i=0; i < lcd_info.lcd_width; i++) {
649 if (str && *str)
650 gsc_writeb(*str++, LCD_DATA_REG);
651 else
652 gsc_writeb(' ', LCD_DATA_REG);
653 udelay(lcd_info.min_cmd_delay);
654 }
655
656 /* re-enable the led tasklet */
657 tasklet_enable(&led_tasklet);
658
659 return lcd_info.lcd_width;
660}
661
662/*
663 ** led_init()
664 **
665 ** led_init() is called very early in the bootup-process from setup.c
666 ** and asks the PDC for an usable chassis LCD or LED.
667 ** If the PDC doesn't return any info, then the LED
668 ** is detected by lasi.c or asp.c and registered with the
669 ** above functions lasi_led_init() or asp_led_init().
670 ** KittyHawk machines have often a buggy PDC, so that
671 ** we explicitly check for those machines here.
672 */
673
674int __init led_init(void)
675{
676 struct pdc_chassis_info chassis_info;
677 int ret;
678
679 snprintf(lcd_text_default, sizeof(lcd_text_default),
680 "Linux %s", system_utsname.release);
681
682 /* Work around the buggy PDC of KittyHawk-machines */
683 switch (CPU_HVERSION) {
684 case 0x580: /* KittyHawk DC2-100 (K100) */
685 case 0x581: /* KittyHawk DC3-120 (K210) */
686 case 0x582: /* KittyHawk DC3 100 (K400) */
687 case 0x583: /* KittyHawk DC3 120 (K410) */
688 case 0x58B: /* KittyHawk DC2 100 (K200) */
689 printk(KERN_INFO "%s: KittyHawk-Machine (hversion 0x%x) found, "
690 "LED detection skipped.\n", __FILE__, CPU_HVERSION);
691 goto found; /* use the preinitialized values of lcd_info */
692 }
693
694 /* initialize the struct, so that we can check for valid return values */
695 lcd_info.model = DISPLAY_MODEL_NONE;
696 chassis_info.actcnt = chassis_info.maxcnt = 0;
697
698 ret = pdc_chassis_info(&chassis_info, &lcd_info, sizeof(lcd_info));
699 if (ret == PDC_OK) {
700 DPRINTK((KERN_INFO "%s: chassis info: model=%d (%s), "
701 "lcd_width=%d, cmd_delay=%u,\n"
702 "%s: sizecnt=%d, actcnt=%ld, maxcnt=%ld\n",
703 __FILE__, lcd_info.model,
704 (lcd_info.model==DISPLAY_MODEL_LCD) ? "LCD" :
705 (lcd_info.model==DISPLAY_MODEL_LASI) ? "LED" : "unknown",
706 lcd_info.lcd_width, lcd_info.min_cmd_delay,
707 __FILE__, sizeof(lcd_info),
708 chassis_info.actcnt, chassis_info.maxcnt));
709 DPRINTK((KERN_INFO "%s: cmd=%p, data=%p, reset1=%x, reset2=%x, act_enable=%d\n",
710 __FILE__, lcd_info.lcd_cmd_reg_addr,
711 lcd_info.lcd_data_reg_addr, lcd_info.reset_cmd1,
712 lcd_info.reset_cmd2, lcd_info.act_enable ));
713
714 /* check the results. Some machines have a buggy PDC */
715 if (chassis_info.actcnt <= 0 || chassis_info.actcnt != chassis_info.maxcnt)
716 goto not_found;
717
718 switch (lcd_info.model) {
719 case DISPLAY_MODEL_LCD: /* LCD display */
720 if (chassis_info.actcnt <
721 offsetof(struct pdc_chassis_lcd_info_ret_block, _pad)-1)
722 goto not_found;
723 if (!lcd_info.act_enable) {
724 DPRINTK((KERN_INFO "PDC prohibited usage of the LCD.\n"));
725 goto not_found;
726 }
727 break;
728
729 case DISPLAY_MODEL_NONE: /* no LED or LCD available */
730 printk(KERN_INFO "PDC reported no LCD or LED.\n");
731 goto not_found;
732
733 case DISPLAY_MODEL_LASI: /* Lasi style 8 bit LED display */
734 if (chassis_info.actcnt != 8 && chassis_info.actcnt != 32)
735 goto not_found;
736 break;
737
738 default:
739 printk(KERN_WARNING "PDC reported unknown LCD/LED model %d\n",
740 lcd_info.model);
741 goto not_found;
742 } /* switch() */
743
744found:
745 /* register the LCD/LED driver */
746 register_led_driver(lcd_info.model, LCD_CMD_REG, LCD_DATA_REG);
747 return 0;
748
749 } else { /* if() */
750 DPRINTK((KERN_INFO "pdc_chassis_info call failed with retval = %d\n", ret));
751 }
752
753not_found:
754 lcd_info.model = DISPLAY_MODEL_NONE;
755 return 1;
756}
757
758#ifdef CONFIG_PROC_FS
759module_init(led_create_procfs)
760#endif
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
new file mode 100644
index 000000000000..67c8f3b44848
--- /dev/null
+++ b/drivers/parisc/pdc_stable.c
@@ -0,0 +1,735 @@
1/*
2 * Interfaces to retrieve and set PDC Stable options (firmware)
3 *
4 * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 *
21 * DEV NOTE: the PDC Procedures reference states that:
22 * "A minimum of 96 bytes of Stable Storage is required. Providing more than
23 * 96 bytes of Stable Storage is optional [...]. Failure to provide the
24 * optional locations from 96 to 192 results in the loss of certain
25 * functionality during boot."
26 *
27 * Since locations between 96 and 192 are the various paths, most (if not
28 * all) PA-RISC machines should have them. Anyway, for safety reasons, the
29 * following code can deal with only 96 bytes of Stable Storage, and all
30 * sizes between 96 and 192 bytes (provided they are multiple of struct
31 * device_path size, eg: 128, 160 and 192) to provide full information.
32 * The code makes no use of data above 192 bytes. One last word: there's one
33 * path we can always count on: the primary path.
34 */
35
36#undef PDCS_DEBUG
37#ifdef PDCS_DEBUG
38#define DPRINTK(fmt, args...) printk(KERN_DEBUG fmt, ## args)
39#else
40#define DPRINTK(fmt, args...)
41#endif
42
43#include <linux/module.h>
44#include <linux/init.h>
45#include <linux/sched.h> /* for capable() */
46#include <linux/kernel.h>
47#include <linux/string.h>
48#include <linux/ctype.h>
49#include <linux/sysfs.h>
50#include <linux/kobject.h>
51#include <linux/device.h>
52#include <linux/errno.h>
53
54#include <asm/pdc.h>
55#include <asm/page.h>
56#include <asm/uaccess.h>
57#include <asm/hardware.h>
58
59#define PDCS_VERSION "0.09"
60
61#define PDCS_ADDR_PPRI 0x00
62#define PDCS_ADDR_OSID 0x40
63#define PDCS_ADDR_FSIZ 0x5C
64#define PDCS_ADDR_PCON 0x60
65#define PDCS_ADDR_PALT 0x80
66#define PDCS_ADDR_PKBD 0xA0
67
68MODULE_AUTHOR("Thibaut VARENE <varenet@parisc-linux.org>");
69MODULE_DESCRIPTION("sysfs interface to HP PDC Stable Storage data");
70MODULE_LICENSE("GPL");
71MODULE_VERSION(PDCS_VERSION);
72
73static unsigned long pdcs_size = 0;
74
75/* This struct defines what we need to deal with a parisc pdc path entry */
76struct pdcspath_entry {
77 short ready; /* entry record is valid if != 0 */
78 unsigned long addr; /* entry address in stable storage */
79 char *name; /* entry name */
80 struct device_path devpath; /* device path in parisc representation */
81 struct device *dev; /* corresponding device */
82 struct kobject kobj;
83};
84
85struct pdcspath_attribute {
86 struct attribute attr;
87 ssize_t (*show)(struct pdcspath_entry *entry, char *buf);
88 ssize_t (*store)(struct pdcspath_entry *entry, const char *buf, size_t count);
89};
90
91#define PDCSPATH_ENTRY(_addr, _name) \
92struct pdcspath_entry pdcspath_entry_##_name = { \
93 .ready = 0, \
94 .addr = _addr, \
95 .name = __stringify(_name), \
96};
97
98#define PDCS_ATTR(_name, _mode, _show, _store) \
99struct subsys_attribute pdcs_attr_##_name = { \
100 .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE}, \
101 .show = _show, \
102 .store = _store, \
103};
104
105#define PATHS_ATTR(_name, _mode, _show, _store) \
106struct pdcspath_attribute paths_attr_##_name = { \
107 .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE}, \
108 .show = _show, \
109 .store = _store, \
110};
111
112#define to_pdcspath_attribute(_attr) container_of(_attr, struct pdcspath_attribute, attr)
113#define to_pdcspath_entry(obj) container_of(obj, struct pdcspath_entry, kobj)
114
115/**
116 * pdcspath_fetch - This function populates the path entry structs.
117 * @entry: A pointer to an allocated pdcspath_entry.
118 *
119 * The general idea is that you don't read from the Stable Storage every time
120 * you access the files provided by the facilites. We store a copy of the
121 * content of the stable storage WRT various paths in these structs. We read
122 * these structs when reading the files, and we will write to these structs when
123 * writing to the files, and only then write them back to the Stable Storage.
124 */
125static int
126pdcspath_fetch(struct pdcspath_entry *entry)
127{
128 struct device_path *devpath;
129
130 if (!entry)
131 return -EINVAL;
132
133 devpath = &entry->devpath;
134
135 DPRINTK("%s: fetch: 0x%p, 0x%p, addr: 0x%lx\n", __func__,
136 entry, devpath, entry->addr);
137
138 /* addr, devpath and count must be word aligned */
139 if (pdc_stable_read(entry->addr, devpath, sizeof(*devpath)) != PDC_OK)
140 return -EIO;
141
142 /* Find the matching device.
143 NOTE: hardware_path overlays with device_path, so the nice cast can
144 be used */
145 entry->dev = hwpath_to_device((struct hardware_path *)devpath);
146
147 entry->ready = 1;
148
149 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev);
150
151 return 0;
152}
153
154/**
155 * pdcspath_store - This function writes a path to stable storage.
156 * @entry: A pointer to an allocated pdcspath_entry.
157 *
158 * It can be used in two ways: either by passing it a preset devpath struct
159 * containing an already computed hardware path, or by passing it a device
160 * pointer, from which it'll find out the corresponding hardware path.
161 * For now we do not handle the case where there's an error in writing to the
162 * Stable Storage area, so you'd better not mess up the data :P
163 */
164static int
165pdcspath_store(struct pdcspath_entry *entry)
166{
167 struct device_path *devpath;
168
169 if (!entry)
170 return -EINVAL;
171
172 devpath = &entry->devpath;
173
174 /* We expect the caller to set the ready flag to 0 if the hardware
175 path struct provided is invalid, so that we know we have to fill it.
176 First case, we don't have a preset hwpath... */
177 if (!entry->ready) {
178 /* ...but we have a device, map it */
179 if (entry->dev)
180 device_to_hwpath(entry->dev, (struct hardware_path *)devpath);
181 else
182 return -EINVAL;
183 }
184 /* else, we expect the provided hwpath to be valid. */
185
186 DPRINTK("%s: store: 0x%p, 0x%p, addr: 0x%lx\n", __func__,
187 entry, devpath, entry->addr);
188
189 /* addr, devpath and count must be word aligned */
190 if (pdc_stable_write(entry->addr, devpath, sizeof(*devpath)) != PDC_OK) {
191 printk(KERN_ERR "%s: an error occured when writing to PDC.\n"
192 "It is likely that the Stable Storage data has been corrupted.\n"
193 "Please check it carefully upon next reboot.\n", __func__);
194 return -EIO;
195 }
196
197 entry->ready = 1;
198
199 DPRINTK("%s: device: 0x%p\n", __func__, entry->dev);
200
201 return 0;
202}
203
204/**
205 * pdcspath_hwpath_read - This function handles hardware path pretty printing.
206 * @entry: An allocated and populated pdscpath_entry struct.
207 * @buf: The output buffer to write to.
208 *
209 * We will call this function to format the output of the hwpath attribute file.
210 */
211static ssize_t
212pdcspath_hwpath_read(struct pdcspath_entry *entry, char *buf)
213{
214 char *out = buf;
215 struct device_path *devpath;
216 unsigned short i;
217
218 if (!entry || !buf)
219 return -EINVAL;
220
221 devpath = &entry->devpath;
222
223 if (!entry->ready)
224 return -ENODATA;
225
226 for (i = 0; i < 6; i++) {
227 if (devpath->bc[i] >= 128)
228 continue;
229 out += sprintf(out, "%u/", (unsigned char)devpath->bc[i]);
230 }
231 out += sprintf(out, "%u\n", (unsigned char)devpath->mod);
232
233 return out - buf;
234}
235
236/**
237 * pdcspath_hwpath_write - This function handles hardware path modifying.
238 * @entry: An allocated and populated pdscpath_entry struct.
239 * @buf: The input buffer to read from.
240 * @count: The number of bytes to be read.
241 *
242 * We will call this function to change the current hardware path.
243 * Hardware paths are to be given '/'-delimited, without brackets.
244 * We take care to make sure that the provided path actually maps to an existing
245 * device, BUT nothing would prevent some foolish user to set the path to some
246 * PCI bridge or even a CPU...
247 * A better work around would be to make sure we are at the end of a device tree
248 * for instance, but it would be IMHO beyond the simple scope of that driver.
249 * The aim is to provide a facility. Data correctness is left to userland.
250 */
251static ssize_t
252pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t count)
253{
254 struct hardware_path hwpath;
255 unsigned short i;
256 char in[count+1], *temp;
257 struct device *dev;
258
259 if (!entry || !buf || !count)
260 return -EINVAL;
261
262 /* We'll use a local copy of buf */
263 memset(in, 0, count+1);
264 strncpy(in, buf, count);
265
266 /* Let's clean up the target. 0xff is a blank pattern */
267 memset(&hwpath, 0xff, sizeof(hwpath));
268
269 /* First, pick the mod field (the last one of the input string) */
270 if (!(temp = strrchr(in, '/')))
271 return -EINVAL;
272
273 hwpath.mod = simple_strtoul(temp+1, NULL, 10);
274 in[temp-in] = '\0'; /* truncate the remaining string. just precaution */
275 DPRINTK("%s: mod: %d\n", __func__, hwpath.mod);
276
277 /* Then, loop for each delimiter, making sure we don't have too many.
278 we write the bc fields in a down-top way. No matter what, we stop
279 before writing the last field. If there are too many fields anyway,
280 then the user is a moron and it'll be caught up later when we'll
281 check the consistency of the given hwpath. */
282 for (i=5; ((temp = strrchr(in, '/'))) && (temp-in > 0) && (likely(i)); i--) {
283 hwpath.bc[i] = simple_strtoul(temp+1, NULL, 10);
284 in[temp-in] = '\0';
285 DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.bc[i]);
286 }
287
288 /* Store the final field */
289 hwpath.bc[i] = simple_strtoul(in, NULL, 10);
290 DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.bc[i]);
291
292 /* Now we check that the user isn't trying to lure us */
293 if (!(dev = hwpath_to_device((struct hardware_path *)&hwpath))) {
294 printk(KERN_WARNING "%s: attempt to set invalid \"%s\" "
295 "hardware path: %s\n", __func__, entry->name, buf);
296 return -EINVAL;
297 }
298
299 /* So far so good, let's get in deep */
300 entry->ready = 0;
301 entry->dev = dev;
302
303 /* Now, dive in. Write back to the hardware */
304 WARN_ON(pdcspath_store(entry)); /* this warn should *NEVER* happen */
305
306 /* Update the symlink to the real device */
307 sysfs_remove_link(&entry->kobj, "device");
308 sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device");
309
310 printk(KERN_INFO "PDC Stable Storage: changed \"%s\" path to \"%s\"\n",
311 entry->name, buf);
312
313 return count;
314}
315
316/**
317 * pdcspath_layer_read - Extended layer (eg. SCSI ids) pretty printing.
318 * @entry: An allocated and populated pdscpath_entry struct.
319 * @buf: The output buffer to write to.
320 *
321 * We will call this function to format the output of the layer attribute file.
322 */
323static ssize_t
324pdcspath_layer_read(struct pdcspath_entry *entry, char *buf)
325{
326 char *out = buf;
327 struct device_path *devpath;
328 unsigned short i;
329
330 if (!entry || !buf)
331 return -EINVAL;
332
333 devpath = &entry->devpath;
334
335 if (!entry->ready)
336 return -ENODATA;
337
338 for (i = 0; devpath->layers[i] && (likely(i < 6)); i++)
339 out += sprintf(out, "%u ", devpath->layers[i]);
340
341 out += sprintf(out, "\n");
342
343 return out - buf;
344}
345
346/**
347 * pdcspath_layer_write - This function handles extended layer modifying.
348 * @entry: An allocated and populated pdscpath_entry struct.
349 * @buf: The input buffer to read from.
350 * @count: The number of bytes to be read.
351 *
352 * We will call this function to change the current layer value.
353 * Layers are to be given '.'-delimited, without brackets.
354 * XXX beware we are far less checky WRT input data provided than for hwpath.
355 * Potential harm can be done, since there's no way to check the validity of
356 * the layer fields.
357 */
358static ssize_t
359pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count)
360{
361 unsigned int layers[6]; /* device-specific info (ctlr#, unit#, ...) */
362 unsigned short i;
363 char in[count+1], *temp;
364
365 if (!entry || !buf || !count)
366 return -EINVAL;
367
368 /* We'll use a local copy of buf */
369 memset(in, 0, count+1);
370 strncpy(in, buf, count);
371
372 /* Let's clean up the target. 0 is a blank pattern */
373 memset(&layers, 0, sizeof(layers));
374
375 /* First, pick the first layer */
376 if (unlikely(!isdigit(*in)))
377 return -EINVAL;
378 layers[0] = simple_strtoul(in, NULL, 10);
379 DPRINTK("%s: layer[0]: %d\n", __func__, layers[0]);
380
381 temp = in;
382 for (i=1; ((temp = strchr(temp, '.'))) && (likely(i<6)); i++) {
383 if (unlikely(!isdigit(*(++temp))))
384 return -EINVAL;
385 layers[i] = simple_strtoul(temp, NULL, 10);
386 DPRINTK("%s: layer[%d]: %d\n", __func__, i, layers[i]);
387 }
388
389 /* So far so good, let's get in deep */
390
391 /* First, overwrite the current layers with the new ones, not touching
392 the hardware path. */
393 memcpy(&entry->devpath.layers, &layers, sizeof(layers));
394
395 /* Now, dive in. Write back to the hardware */
396 WARN_ON(pdcspath_store(entry)); /* this warn should *NEVER* happen */
397
398 printk(KERN_INFO "PDC Stable Storage: changed \"%s\" layers to \"%s\"\n",
399 entry->name, buf);
400
401 return count;
402}
403
404/**
405 * pdcspath_attr_show - Generic read function call wrapper.
406 * @kobj: The kobject to get info from.
407 * @attr: The attribute looked upon.
408 * @buf: The output buffer.
409 */
410static ssize_t
411pdcspath_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
412{
413 struct pdcspath_entry *entry = to_pdcspath_entry(kobj);
414 struct pdcspath_attribute *pdcs_attr = to_pdcspath_attribute(attr);
415 ssize_t ret = 0;
416
417 if (!capable(CAP_SYS_ADMIN))
418 return -EACCES;
419
420 if (pdcs_attr->show)
421 ret = pdcs_attr->show(entry, buf);
422
423 return ret;
424}
425
426/**
427 * pdcspath_attr_store - Generic write function call wrapper.
428 * @kobj: The kobject to write info to.
429 * @attr: The attribute to be modified.
430 * @buf: The input buffer.
431 * @count: The size of the buffer.
432 */
433static ssize_t
434pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
435 const char *buf, size_t count)
436{
437 struct pdcspath_entry *entry = to_pdcspath_entry(kobj);
438 struct pdcspath_attribute *pdcs_attr = to_pdcspath_attribute(attr);
439 ssize_t ret = 0;
440
441 if (!capable(CAP_SYS_ADMIN))
442 return -EACCES;
443
444 if (pdcs_attr->store)
445 ret = pdcs_attr->store(entry, buf, count);
446
447 return ret;
448}
449
450static struct sysfs_ops pdcspath_attr_ops = {
451 .show = pdcspath_attr_show,
452 .store = pdcspath_attr_store,
453};
454
455/* These are the two attributes of any PDC path. */
456static PATHS_ATTR(hwpath, 0600, pdcspath_hwpath_read, pdcspath_hwpath_write);
457static PATHS_ATTR(layer, 0600, pdcspath_layer_read, pdcspath_layer_write);
458
459static struct attribute *paths_subsys_attrs[] = {
460 &paths_attr_hwpath.attr,
461 &paths_attr_layer.attr,
462 NULL,
463};
464
465/* Specific kobject type for our PDC paths */
466static struct kobj_type ktype_pdcspath = {
467 .sysfs_ops = &pdcspath_attr_ops,
468 .default_attrs = paths_subsys_attrs,
469};
470
471/* We hard define the 4 types of path we expect to find */
472static PDCSPATH_ENTRY(PDCS_ADDR_PPRI, primary);
473static PDCSPATH_ENTRY(PDCS_ADDR_PCON, console);
474static PDCSPATH_ENTRY(PDCS_ADDR_PALT, alternative);
475static PDCSPATH_ENTRY(PDCS_ADDR_PKBD, keyboard);
476
477/* An array containing all PDC paths we will deal with */
478static struct pdcspath_entry *pdcspath_entries[] = {
479 &pdcspath_entry_primary,
480 &pdcspath_entry_alternative,
481 &pdcspath_entry_console,
482 &pdcspath_entry_keyboard,
483 NULL,
484};
485
486/**
487 * pdcs_info_read - Pretty printing of the remaining useful data.
488 * @entry: An allocated and populated subsytem struct. We don't use it tho.
489 * @buf: The output buffer to write to.
490 *
491 * We will call this function to format the output of the 'info' attribute file.
492 * Please refer to PDC Procedures documentation, section PDC_STABLE to get a
493 * better insight of what we're doing here.
494 */
495static ssize_t
496pdcs_info_read(struct subsystem *entry, char *buf)
497{
498 char *out = buf;
499 __u32 result;
500 struct device_path devpath;
501 char *tmpstr = NULL;
502
503 if (!entry || !buf)
504 return -EINVAL;
505
506 /* show the size of the stable storage */
507 out += sprintf(out, "Stable Storage size: %ld bytes\n", pdcs_size);
508
509 /* deal with flags */
510 if (pdc_stable_read(PDCS_ADDR_PPRI, &devpath, sizeof(devpath)) != PDC_OK)
511 return -EIO;
512
513 out += sprintf(out, "Autoboot: %s\n", (devpath.flags & PF_AUTOBOOT) ? "On" : "Off");
514 out += sprintf(out, "Autosearch: %s\n", (devpath.flags & PF_AUTOSEARCH) ? "On" : "Off");
515 out += sprintf(out, "Timer: %u s\n", (devpath.flags & PF_TIMER) ? (1 << (devpath.flags & PF_TIMER)) : 0);
516
517 /* get OSID */
518 if (pdc_stable_read(PDCS_ADDR_OSID, &result, sizeof(result)) != PDC_OK)
519 return -EIO;
520
521 /* the actual result is 16 bits away */
522 switch (result >> 16) {
523 case 0x0000: tmpstr = "No OS-dependent data"; break;
524 case 0x0001: tmpstr = "HP-UX dependent data"; break;
525 case 0x0002: tmpstr = "MPE-iX dependent data"; break;
526 case 0x0003: tmpstr = "OSF dependent data"; break;
527 case 0x0004: tmpstr = "HP-RT dependent data"; break;
528 case 0x0005: tmpstr = "Novell Netware dependent data"; break;
529 default: tmpstr = "Unknown"; break;
530 }
531 out += sprintf(out, "OS ID: %s (0x%.4x)\n", tmpstr, (result >> 16));
532
533 /* get fast-size */
534 if (pdc_stable_read(PDCS_ADDR_FSIZ, &result, sizeof(result)) != PDC_OK)
535 return -EIO;
536
537 out += sprintf(out, "Memory tested: ");
538 if ((result & 0x0F) < 0x0E)
539 out += sprintf(out, "%.3f MB", 0.256*(1<<(result & 0x0F)));
540 else
541 out += sprintf(out, "All");
542 out += sprintf(out, "\n");
543
544 return out - buf;
545}
546
547/**
548 * pdcs_info_write - This function handles boot flag modifying.
549 * @entry: An allocated and populated subsytem struct. We don't use it tho.
550 * @buf: The input buffer to read from.
551 * @count: The number of bytes to be read.
552 *
553 * We will call this function to change the current boot flags.
554 * We expect a precise syntax:
555 * \"n n\" (n == 0 or 1) to toggle respectively AutoBoot and AutoSearch
556 *
557 * As of now there is no incentive on my side to provide more "knobs" to that
558 * interface, since modifying the rest of the data is pretty meaningless when
559 * the machine is running and for the expected use of that facility, such as
560 * PALO setting up the boot disk when installing a Linux distribution...
561 */
562static ssize_t
563pdcs_info_write(struct subsystem *entry, const char *buf, size_t count)
564{
565 struct pdcspath_entry *pathentry;
566 unsigned char flags;
567 char in[count+1], *temp;
568 char c;
569
570 if (!capable(CAP_SYS_ADMIN))
571 return -EACCES;
572
573 if (!entry || !buf || !count)
574 return -EINVAL;
575
576 /* We'll use a local copy of buf */
577 memset(in, 0, count+1);
578 strncpy(in, buf, count);
579
580 /* Current flags are stored in primary boot path entry */
581 pathentry = &pdcspath_entry_primary;
582
583 /* Be nice to the existing flag record */
584 flags = pathentry->devpath.flags;
585
586 DPRINTK("%s: flags before: 0x%X\n", __func__, flags);
587
588 temp = in;
589
590 while (*temp && isspace(*temp))
591 temp++;
592
593 c = *temp++ - '0';
594 if ((c != 0) && (c != 1))
595 goto parse_error;
596 if (c == 0)
597 flags &= ~PF_AUTOBOOT;
598 else
599 flags |= PF_AUTOBOOT;
600
601 if (*temp++ != ' ')
602 goto parse_error;
603
604 c = *temp++ - '0';
605 if ((c != 0) && (c != 1))
606 goto parse_error;
607 if (c == 0)
608 flags &= ~PF_AUTOSEARCH;
609 else
610 flags |= PF_AUTOSEARCH;
611
612 DPRINTK("%s: flags after: 0x%X\n", __func__, flags);
613
614 /* So far so good, let's get in deep */
615
616 /* Change the path entry flags first */
617 pathentry->devpath.flags = flags;
618
619 /* Now, dive in. Write back to the hardware */
620 WARN_ON(pdcspath_store(pathentry)); /* this warn should *NEVER* happen */
621
622 printk(KERN_INFO "PDC Stable Storage: changed flags to \"%s\"\n", buf);
623
624 return count;
625
626parse_error:
627 printk(KERN_WARNING "%s: Parse error: expect \"n n\" (n == 0 or 1) for AB and AS\n", __func__);
628 return -EINVAL;
629}
630
631/* The last attribute (the 'root' one actually) with all remaining data. */
632static PDCS_ATTR(info, 0600, pdcs_info_read, pdcs_info_write);
633
634static struct subsys_attribute *pdcs_subsys_attrs[] = {
635 &pdcs_attr_info,
636 NULL, /* maybe more in the future? */
637};
638
639static decl_subsys(paths, &ktype_pdcspath, NULL);
640static decl_subsys(pdc, NULL, NULL);
641
642/**
643 * pdcs_register_pathentries - Prepares path entries kobjects for sysfs usage.
644 *
645 * It creates kobjects corresponding to each path entry with nice sysfs
646 * links to the real device. This is where the magic takes place: when
647 * registering the subsystem attributes during module init, each kobject hereby
648 * created will show in the sysfs tree as a folder containing files as defined
649 * by path_subsys_attr[].
650 */
651static inline int __init
652pdcs_register_pathentries(void)
653{
654 unsigned short i;
655 struct pdcspath_entry *entry;
656
657 for (i = 0; (entry = pdcspath_entries[i]); i++) {
658 if (pdcspath_fetch(entry) < 0)
659 continue;
660
661 kobject_set_name(&entry->kobj, "%s", entry->name);
662 kobj_set_kset_s(entry, paths_subsys);
663 kobject_register(&entry->kobj);
664
665 if (!entry->dev)
666 continue;
667
668 /* Add a nice symlink to the real device */
669 sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device");
670 }
671
672 return 0;
673}
674
675/**
676 * pdcs_unregister_pathentries - Routine called when unregistering the module.
677 */
678static inline void __exit
679pdcs_unregister_pathentries(void)
680{
681 unsigned short i;
682 struct pdcspath_entry *entry;
683
684 for (i = 0; (entry = pdcspath_entries[i]); i++)
685 if (entry->ready)
686 kobject_unregister(&entry->kobj);
687}
688
689/*
690 * For now we register the pdc subsystem with the firmware subsystem
691 * and the paths subsystem with the pdc subsystem
692 */
693static int __init
694pdc_stable_init(void)
695{
696 struct subsys_attribute *attr;
697 int i, rc = 0, error = 0;
698
699 /* find the size of the stable storage */
700 if (pdc_stable_get_size(&pdcs_size) != PDC_OK)
701 return -ENODEV;
702
703 printk(KERN_INFO "PDC Stable Storage facility v%s\n", PDCS_VERSION);
704
705 /* For now we'll register the pdc subsys within this driver */
706 if ((rc = firmware_register(&pdc_subsys)))
707 return rc;
708
709 /* Don't forget the info entry */
710 for (i = 0; (attr = pdcs_subsys_attrs[i]) && !error; i++)
711 if (attr->show)
712 error = subsys_create_file(&pdc_subsys, attr);
713
714 /* register the paths subsys as a subsystem of pdc subsys */
715 kset_set_kset_s(&paths_subsys, pdc_subsys);
716 subsystem_register(&paths_subsys);
717
718 /* now we create all "files" for the paths subsys */
719 pdcs_register_pathentries();
720
721 return 0;
722}
723
724static void __exit
725pdc_stable_exit(void)
726{
727 pdcs_unregister_pathentries();
728 subsystem_unregister(&paths_subsys);
729
730 firmware_unregister(&pdc_subsys);
731}
732
733
734module_init(pdc_stable_init);
735module_exit(pdc_stable_exit);
diff --git a/drivers/parisc/power.c b/drivers/parisc/power.c
new file mode 100644
index 000000000000..ff75e9296df9
--- /dev/null
+++ b/drivers/parisc/power.c
@@ -0,0 +1,278 @@
1/*
2 * linux/arch/parisc/kernel/power.c
3 * HP PARISC soft power switch support driver
4 *
5 * Copyright (c) 2001-2002 Helge Deller <deller@gmx.de>
6 * All rights reserved.
7 *
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL").
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 *
31 *
32 *
33 * HINT:
34 * Support of the soft power switch button may be enabled or disabled at
35 * runtime through the "/proc/sys/kernel/power" procfs entry.
36 */
37
38#include <linux/config.h>
39#include <linux/module.h>
40#include <linux/init.h>
41#include <linux/kernel.h>
42#include <linux/string.h>
43#include <linux/notifier.h>
44#include <linux/reboot.h>
45#include <linux/sched.h>
46#include <linux/interrupt.h>
47#include <linux/workqueue.h>
48
49#include <asm/pdc.h>
50#include <asm/io.h>
51#include <asm/led.h>
52#include <asm/uaccess.h>
53
54
55#ifdef DEBUG
56# define DPRINTK(x...) printk(x)
57#else
58# define DPRINTK(x...)
59#endif
60
61
62/* filename in /proc which can be used to enable/disable the power switch */
63#define SYSCTL_FILENAME "sys/kernel/power"
64
65
66#define DIAG_CODE(code) (0x14000000 + ((code)<<5))
67
68/* this will go to processor.h or any other place... */
69/* taken from PCXL ERS page 82 */
70#define MFCPU_X(rDiagReg, t_ch, t_th, code) \
71 (DIAG_CODE(code) + ((rDiagReg)<<21) + ((t_ch)<<16) + ((t_th)<<0) )
72
73#define MTCPU(dr, gr) MFCPU_X(dr, gr, 0, 0x12) /* move value of gr to dr[dr] */
74#define MFCPU_C(dr, gr) MFCPU_X(dr, gr, 0, 0x30) /* for dr0 and dr8 only ! */
75#define MFCPU_T(dr, gr) MFCPU_X(dr, 0, gr, 0xa0) /* all dr except dr0 and dr8 */
76
77#define __getDIAG(dr) ( { \
78 register unsigned long __res asm("r28");\
79 __asm__ __volatile__ ( \
80 ".word %1\n nop\n" : "=&r" (__res) : "i" (MFCPU_T(dr,28)) \
81 ); \
82 __res; \
83} )
84
85
86static void deferred_poweroff(void *dummy)
87{
88 extern int cad_pid; /* from kernel/sys.c */
89 if (kill_proc(cad_pid, SIGINT, 1)) {
90 /* just in case killing init process failed */
91 machine_power_off();
92 }
93}
94
95/*
96 * This function gets called from interrupt context.
97 * As it's called within an interrupt, it wouldn't sync if we don't
98 * use schedule_work().
99 */
100
101static DECLARE_WORK(poweroff_work, deferred_poweroff, NULL);
102
103static void poweroff(void)
104{
105 static int powering_off;
106
107 if (powering_off)
108 return;
109
110 powering_off++;
111 schedule_work(&poweroff_work);
112}
113
114
115/* local time-counter for shutdown */
116static int shutdown_timer;
117
118/* check, give feedback and start shutdown after one second */
119static void process_shutdown(void)
120{
121 if (shutdown_timer == 0)
122 DPRINTK(KERN_INFO "Shutdown requested...\n");
123
124 shutdown_timer++;
125
126 /* wait until the button was pressed for 1 second */
127 if (shutdown_timer == HZ) {
128#if defined (DEBUG) || defined(CONFIG_CHASSIS_LCD_LED)
129 static char msg[] = "Shutting down...";
130#endif
131 DPRINTK(KERN_INFO "%s\n", msg);
132 lcd_print(msg);
133 poweroff();
134 }
135}
136
137
138/* main power switch tasklet struct (scheduled from time.c) */
139DECLARE_TASKLET_DISABLED(power_tasklet, NULL, 0);
140
141/* soft power switch enabled/disabled */
142int pwrsw_enabled = 1;
143
144/*
145 * On gecko style machines (e.g. 712/xx and 715/xx)
146 * the power switch status is stored in Bit 0 ("the highest bit")
147 * of CPU diagnose register 25.
148 *
149 */
150static void gecko_tasklet_func(unsigned long unused)
151{
152 if (!pwrsw_enabled)
153 return;
154
155 if (__getDIAG(25) & 0x80000000) {
156 /* power switch button not pressed or released again */
157 /* Warning: Some machines do never reset this DIAG flag! */
158 shutdown_timer = 0;
159 } else {
160 process_shutdown();
161 }
162}
163
164
165
166/*
167 * Check the power switch status which is read from the
168 * real I/O location at soft_power_reg.
169 * Bit 31 ("the lowest bit) is the status of the power switch.
170 */
171
172static void polling_tasklet_func(unsigned long soft_power_reg)
173{
174 unsigned long current_status;
175
176 if (!pwrsw_enabled)
177 return;
178
179 current_status = gsc_readl(soft_power_reg);
180 if (current_status & 0x1) {
181 /* power switch button not pressed */
182 shutdown_timer = 0;
183 } else {
184 process_shutdown();
185 }
186}
187
188
189/*
190 * powerfail interruption handler (irq IRQ_FROM_REGION(CPU_IRQ_REGION)+2)
191 */
192#if 0
193static void powerfail_interrupt(int code, void *x, struct pt_regs *regs)
194{
195 printk(KERN_CRIT "POWERFAIL INTERRUPTION !\n");
196 poweroff();
197}
198#endif
199
200
201
202
203/* parisc_panic_event() is called by the panic handler.
204 * As soon as a panic occurs, our tasklets above will not be
205 * executed any longer. This function then re-enables the
206 * soft-power switch and allows the user to switch off the system
207 */
208static int parisc_panic_event(struct notifier_block *this,
209 unsigned long event, void *ptr)
210{
211 /* re-enable the soft-power switch */
212 pdc_soft_power_button(0);
213 return NOTIFY_DONE;
214}
215
216static struct notifier_block parisc_panic_block = {
217 .notifier_call = parisc_panic_event,
218 .priority = INT_MAX,
219};
220
221
222static int __init power_init(void)
223{
224 unsigned long ret;
225 unsigned long soft_power_reg = 0;
226
227#if 0
228 request_irq( IRQ_FROM_REGION(CPU_IRQ_REGION)+2, &powerfail_interrupt,
229 0, "powerfail", NULL);
230#endif
231
232 /* enable the soft power switch if possible */
233 ret = pdc_soft_power_info(&soft_power_reg);
234 if (ret == PDC_OK)
235 ret = pdc_soft_power_button(1);
236 if (ret != PDC_OK)
237 soft_power_reg = -1UL;
238
239 switch (soft_power_reg) {
240 case 0: printk(KERN_INFO "Gecko-style soft power switch enabled.\n");
241 power_tasklet.func = gecko_tasklet_func;
242 break;
243
244 case -1UL: printk(KERN_INFO "Soft power switch support not available.\n");
245 return -ENODEV;
246
247 default: printk(KERN_INFO "Soft power switch enabled, polling @ 0x%08lx.\n",
248 soft_power_reg);
249 power_tasklet.data = soft_power_reg;
250 power_tasklet.func = polling_tasklet_func;
251 }
252
253 /* Register a call for panic conditions. */
254 notifier_chain_register(&panic_notifier_list, &parisc_panic_block);
255
256 tasklet_enable(&power_tasklet);
257
258 return 0;
259}
260
261static void __exit power_exit(void)
262{
263 if (!power_tasklet.func)
264 return;
265
266 tasklet_disable(&power_tasklet);
267 notifier_chain_unregister(&panic_notifier_list, &parisc_panic_block);
268 power_tasklet.func = NULL;
269 pdc_soft_power_button(0);
270}
271
272module_init(power_init);
273module_exit(power_exit);
274
275
276MODULE_AUTHOR("Helge Deller");
277MODULE_DESCRIPTION("Soft power switch driver");
278MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
new file mode 100644
index 000000000000..82ea68b55df4
--- /dev/null
+++ b/drivers/parisc/sba_iommu.c
@@ -0,0 +1,2165 @@
1/*
2** System Bus Adapter (SBA) I/O MMU manager
3**
4** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org>
5** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com>
6** (c) Copyright 2000-2004 Hewlett-Packard Company
7**
8** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
9**
10** This program is free software; you can redistribute it and/or modify
11** it under the terms of the GNU General Public License as published by
12** the Free Software Foundation; either version 2 of the License, or
13** (at your option) any later version.
14**
15**
16** This module initializes the IOC (I/O Controller) found on B1000/C3000/
17** J5000/J7000/N-class/L-class machines and their successors.
18**
19** FIXME: add DMA hint support programming in both sba and lba modules.
20*/
21
22#include <linux/config.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/init.h>
28
29#include <linux/mm.h>
30#include <linux/string.h>
31#include <linux/pci.h>
32
33#include <asm/byteorder.h>
34#include <asm/io.h>
35#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
36
37#include <asm/hardware.h> /* for register_parisc_driver() stuff */
38
39#include <linux/proc_fs.h>
40#include <asm/runway.h> /* for proc_runway_root */
41#include <asm/pdc.h> /* for PDC_MODEL_* */
42#include <asm/pdcpat.h> /* for is_pdc_pat() */
43#include <asm/parisc-device.h>
44
45
46/* declared in arch/parisc/kernel/setup.c */
47extern struct proc_dir_entry * proc_mckinley_root;
48
49#define MODULE_NAME "SBA"
50
51#ifdef CONFIG_PROC_FS
52/* depends on proc fs support. But costs CPU performance */
53#undef SBA_COLLECT_STATS
54#endif
55
56/*
57** The number of debug flags is a clue - this code is fragile.
58** Don't even think about messing with it unless you have
59** plenty of 710's to sacrifice to the computer gods. :^)
60*/
61#undef DEBUG_SBA_INIT
62#undef DEBUG_SBA_RUN
63#undef DEBUG_SBA_RUN_SG
64#undef DEBUG_SBA_RESOURCE
65#undef ASSERT_PDIR_SANITY
66#undef DEBUG_LARGE_SG_ENTRIES
67#undef DEBUG_DMB_TRAP
68
69#ifdef DEBUG_SBA_INIT
70#define DBG_INIT(x...) printk(x)
71#else
72#define DBG_INIT(x...)
73#endif
74
75#ifdef DEBUG_SBA_RUN
76#define DBG_RUN(x...) printk(x)
77#else
78#define DBG_RUN(x...)
79#endif
80
81#ifdef DEBUG_SBA_RUN_SG
82#define DBG_RUN_SG(x...) printk(x)
83#else
84#define DBG_RUN_SG(x...)
85#endif
86
87
88#ifdef DEBUG_SBA_RESOURCE
89#define DBG_RES(x...) printk(x)
90#else
91#define DBG_RES(x...)
92#endif
93
94#if defined(__LP64__) && !defined(CONFIG_PDC_NARROW)
95/* "low end" PA8800 machines use ZX1 chipset */
96#define ZX1_SUPPORT
97#endif
98
99#define SBA_INLINE __inline__
100
101
102/*
103** The number of pdir entries to "free" before issueing
104** a read to PCOM register to flush out PCOM writes.
105** Interacts with allocation granularity (ie 4 or 8 entries
106** allocated and free'd/purged at a time might make this
107** less interesting).
108*/
109#define DELAYED_RESOURCE_CNT 16
110
111#define DEFAULT_DMA_HINT_REG 0
112
113#define ASTRO_RUNWAY_PORT 0x582
114#define IKE_MERCED_PORT 0x803
115#define REO_MERCED_PORT 0x804
116#define REOG_MERCED_PORT 0x805
117#define PLUTO_MCKINLEY_PORT 0x880
118
119#define SBA_FUNC_ID 0x0000 /* function id */
120#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
121
122#define IS_ASTRO(id) ((id)->hversion == ASTRO_RUNWAY_PORT)
123#define IS_IKE(id) ((id)->hversion == IKE_MERCED_PORT)
124#define IS_PLUTO(id) ((id)->hversion == PLUTO_MCKINLEY_PORT)
125
126#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
127
128#define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE)
129#define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE)
130/* Ike's IOC's occupy functions 2 and 3 */
131#define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE)
132
133#define IOC_CTRL 0x8 /* IOC_CTRL offset */
134#define IOC_CTRL_TC (1 << 0) /* TOC Enable */
135#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
136#define IOC_CTRL_DE (1 << 2) /* Dillon Enable */
137#define IOC_CTRL_RM (1 << 8) /* Real Mode */
138#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
139#define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */
140#define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */
141
142#define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */
143
144#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
145
146
147/*
148** Offsets into MBIB (Function 0 on Ike and hopefully Astro)
149** Firmware programs this stuff. Don't touch it.
150*/
151#define LMMIO_DIRECT0_BASE 0x300
152#define LMMIO_DIRECT0_MASK 0x308
153#define LMMIO_DIRECT0_ROUTE 0x310
154
155#define LMMIO_DIST_BASE 0x360
156#define LMMIO_DIST_MASK 0x368
157#define LMMIO_DIST_ROUTE 0x370
158
159#define IOS_DIST_BASE 0x390
160#define IOS_DIST_MASK 0x398
161#define IOS_DIST_ROUTE 0x3A0
162
163#define IOS_DIRECT_BASE 0x3C0
164#define IOS_DIRECT_MASK 0x3C8
165#define IOS_DIRECT_ROUTE 0x3D0
166
167/*
168** Offsets into I/O TLB (Function 2 and 3 on Ike)
169*/
170#define ROPE0_CTL 0x200 /* "regbus pci0" */
171#define ROPE1_CTL 0x208
172#define ROPE2_CTL 0x210
173#define ROPE3_CTL 0x218
174#define ROPE4_CTL 0x220
175#define ROPE5_CTL 0x228
176#define ROPE6_CTL 0x230
177#define ROPE7_CTL 0x238
178
179#define HF_ENABLE 0x40
180
181
182#define IOC_IBASE 0x300 /* IO TLB */
183#define IOC_IMASK 0x308
184#define IOC_PCOM 0x310
185#define IOC_TCNFG 0x318
186#define IOC_PDIR_BASE 0x320
187
188/* AGP GART driver looks for this */
189#define SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
190
191
192/*
193** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
194** It's safer (avoid memory corruption) to keep DMA page mappings
195** equivalently sized to VM PAGE_SIZE.
196**
197** We really can't avoid generating a new mapping for each
198** page since the Virtual Coherence Index has to be generated
199** and updated for each page.
200**
201** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
202*/
203#define IOVP_SIZE PAGE_SIZE
204#define IOVP_SHIFT PAGE_SHIFT
205#define IOVP_MASK PAGE_MASK
206
207#define SBA_PERF_CFG 0x708 /* Performance Counter stuff */
208#define SBA_PERF_MASK1 0x718
209#define SBA_PERF_MASK2 0x730
210
211
212/*
213** Offsets into PCI Performance Counters (functions 12 and 13)
214** Controlled by PERF registers in function 2 & 3 respectively.
215*/
216#define SBA_PERF_CNT1 0x200
217#define SBA_PERF_CNT2 0x208
218#define SBA_PERF_CNT3 0x210
219
220
221struct ioc {
222 void __iomem *ioc_hpa; /* I/O MMU base address */
223 char *res_map; /* resource map, bit == pdir entry */
224 u64 *pdir_base; /* physical base address */
225 unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
226 unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
227#ifdef ZX1_SUPPORT
228 unsigned long iovp_mask; /* help convert IOVA to IOVP */
229#endif
230 unsigned long *res_hint; /* next avail IOVP - circular search */
231 spinlock_t res_lock;
232 unsigned int res_bitshift; /* from the LEFT! */
233 unsigned int res_size; /* size of resource map in bytes */
234#if SBA_HINT_SUPPORT
235/* FIXME : DMA HINTs not used */
236 unsigned long hint_mask_pdir; /* bits used for DMA hints */
237 unsigned int hint_shift_pdir;
238#endif
239#if DELAYED_RESOURCE_CNT > 0
240 int saved_cnt;
241 struct sba_dma_pair {
242 dma_addr_t iova;
243 size_t size;
244 } saved[DELAYED_RESOURCE_CNT];
245#endif
246
247#ifdef SBA_COLLECT_STATS
248#define SBA_SEARCH_SAMPLE 0x100
249 unsigned long avg_search[SBA_SEARCH_SAMPLE];
250 unsigned long avg_idx; /* current index into avg_search */
251 unsigned long used_pages;
252 unsigned long msingle_calls;
253 unsigned long msingle_pages;
254 unsigned long msg_calls;
255 unsigned long msg_pages;
256 unsigned long usingle_calls;
257 unsigned long usingle_pages;
258 unsigned long usg_calls;
259 unsigned long usg_pages;
260#endif
261
262 /* STUFF We don't need in performance path */
263 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
264};
265
266struct sba_device {
267 struct sba_device *next; /* list of SBA's in system */
268 struct parisc_device *dev; /* dev found in bus walk */
269 struct parisc_device_id *iodc; /* data about dev from firmware */
270 const char *name;
271 void __iomem *sba_hpa; /* base address */
272 spinlock_t sba_lock;
273 unsigned int flags; /* state/functionality enabled */
274 unsigned int hw_rev; /* HW revision of chip */
275
276 struct resource chip_resv; /* MMIO reserved for chip */
277 struct resource iommu_resv; /* MMIO reserved for iommu */
278
279 unsigned int num_ioc; /* number of on-board IOC's */
280 struct ioc ioc[MAX_IOC];
281};
282
283
284static struct sba_device *sba_list;
285
286static unsigned long ioc_needs_fdc = 0;
287
288/* global count of IOMMUs in the system */
289static unsigned int global_ioc_cnt = 0;
290
291/* PA8700 (Piranha 2.2) bug workaround */
292static unsigned long piranha_bad_128k = 0;
293
294/* Looks nice and keeps the compiler happy */
295#define SBA_DEV(d) ((struct sba_device *) (d))
296
297#if SBA_AGP_SUPPORT
298static int reserve_sba_gart = 1;
299#endif
300
301#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
302
303
304/************************************
305** SBA register read and write support
306**
307** BE WARNED: register writes are posted.
308** (ie follow writes which must reach HW with a read)
309**
310** Superdome (in particular, REO) allows only 64-bit CSR accesses.
311*/
312#define READ_REG32(addr) le32_to_cpu(__raw_readl(addr))
313#define READ_REG64(addr) le64_to_cpu(__raw_readq(addr))
314#define WRITE_REG32(val, addr) __raw_writel(cpu_to_le32(val), addr)
315#define WRITE_REG64(val, addr) __raw_writeq(cpu_to_le64(val), addr)
316
317#ifdef __LP64__
318#define READ_REG(addr) READ_REG64(addr)
319#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
320#else
321#define READ_REG(addr) READ_REG32(addr)
322#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
323#endif
324
325#ifdef DEBUG_SBA_INIT
326
327/* NOTE: When __LP64__ isn't defined, READ_REG64() is two 32-bit reads */
328
329/**
330 * sba_dump_ranges - debugging only - print ranges assigned to this IOA
331 * @hpa: base address of the sba
332 *
333 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO
334 * IO Adapter (aka Bus Converter).
335 */
336static void
337sba_dump_ranges(void __iomem *hpa)
338{
339 DBG_INIT("SBA at 0x%p\n", hpa);
340 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
341 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
342 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
343 DBG_INIT("\n");
344 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
345 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
346 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
347}
348
349/**
350 * sba_dump_tlb - debugging only - print IOMMU operating parameters
351 * @hpa: base address of the IOMMU
352 *
353 * Print the size/location of the IO MMU PDIR.
354 */
355static void sba_dump_tlb(void __iomem *hpa)
356{
357 DBG_INIT("IO TLB at 0x%p\n", hpa);
358 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
359 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
360 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
361 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
362 DBG_INIT("\n");
363}
364#else
365#define sba_dump_ranges(x)
366#define sba_dump_tlb(x)
367#endif
368
369
370#ifdef ASSERT_PDIR_SANITY
371
372/**
373 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
374 * @ioc: IO MMU structure which owns the pdir we are interested in.
375 * @msg: text to print ont the output line.
376 * @pide: pdir index.
377 *
378 * Print one entry of the IO MMU PDIR in human readable form.
379 */
380static void
381sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
382{
383 /* start printing from lowest pde in rval */
384 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
385 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
386 uint rcnt;
387
388 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
389 msg,
390 rptr, pide & (BITS_PER_LONG - 1), *rptr);
391
392 rcnt = 0;
393 while (rcnt < BITS_PER_LONG) {
394 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
395 (rcnt == (pide & (BITS_PER_LONG - 1)))
396 ? " -->" : " ",
397 rcnt, ptr, *ptr );
398 rcnt++;
399 ptr++;
400 }
401 printk(KERN_DEBUG "%s", msg);
402}
403
404
405/**
406 * sba_check_pdir - debugging only - consistency checker
407 * @ioc: IO MMU structure which owns the pdir we are interested in.
408 * @msg: text to print ont the output line.
409 *
410 * Verify the resource map and pdir state is consistent
411 */
412static int
413sba_check_pdir(struct ioc *ioc, char *msg)
414{
415 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
416 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */
417 u64 *pptr = ioc->pdir_base; /* pdir ptr */
418 uint pide = 0;
419
420 while (rptr < rptr_end) {
421 u32 rval = *rptr;
422 int rcnt = 32; /* number of bits we might check */
423
424 while (rcnt) {
425 /* Get last byte and highest bit from that */
426 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
427 if ((rval ^ pde) & 0x80000000)
428 {
429 /*
430 ** BUMMER! -- res_map != pdir --
431 ** Dump rval and matching pdir entries
432 */
433 sba_dump_pdir_entry(ioc, msg, pide);
434 return(1);
435 }
436 rcnt--;
437 rval <<= 1; /* try the next bit */
438 pptr++;
439 pide++;
440 }
441 rptr++; /* look at next word of res_map */
442 }
443 /* It'd be nice if we always got here :^) */
444 return 0;
445}
446
447
448/**
449 * sba_dump_sg - debugging only - print Scatter-Gather list
450 * @ioc: IO MMU structure which owns the pdir we are interested in.
451 * @startsg: head of the SG list
452 * @nents: number of entries in SG list
453 *
454 * print the SG list so we can verify it's correct by hand.
455 */
456static void
457sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
458{
459 while (nents-- > 0) {
460 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
461 nents,
462 (unsigned long) sg_dma_address(startsg),
463 sg_dma_len(startsg),
464 sg_virt_addr(startsg), startsg->length);
465 startsg++;
466 }
467}
468
469#endif /* ASSERT_PDIR_SANITY */
470
471
472
473
474/**************************************************************
475*
476* I/O Pdir Resource Management
477*
478* Bits set in the resource map are in use.
479* Each bit can represent a number of pages.
480* LSbs represent lower addresses (IOVA's).
481*
482***************************************************************/
483#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
484
485/* Convert from IOVP to IOVA and vice versa. */
486
487#ifdef ZX1_SUPPORT
488/* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */
489#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
490#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
491#else
492/* only support Astro and ancestors. Saves a few cycles in key places */
493#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
494#define SBA_IOVP(ioc,iova) (iova)
495#endif
496
497#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
498
499#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
500#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
501
502
503/**
504 * sba_search_bitmap - find free space in IO PDIR resource bitmap
505 * @ioc: IO MMU structure which owns the pdir we are interested in.
506 * @bits_wanted: number of entries we need.
507 *
508 * Find consecutive free bits in resource bitmap.
509 * Each bit represents one entry in the IO Pdir.
510 * Cool perf optimization: search for log2(size) bits at a time.
511 */
512static SBA_INLINE unsigned long
513sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
514{
515 unsigned long *res_ptr = ioc->res_hint;
516 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
517 unsigned long pide = ~0UL;
518
519 if (bits_wanted > (BITS_PER_LONG/2)) {
520 /* Search word at a time - no mask needed */
521 for(; res_ptr < res_end; ++res_ptr) {
522 if (*res_ptr == 0) {
523 *res_ptr = RESMAP_MASK(bits_wanted);
524 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
525 pide <<= 3; /* convert to bit address */
526 break;
527 }
528 }
529 /* point to the next word on next pass */
530 res_ptr++;
531 ioc->res_bitshift = 0;
532 } else {
533 /*
534 ** Search the resource bit map on well-aligned values.
535 ** "o" is the alignment.
536 ** We need the alignment to invalidate I/O TLB using
537 ** SBA HW features in the unmap path.
538 */
539 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
540 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
541 unsigned long mask;
542
543 if (bitshiftcnt >= BITS_PER_LONG) {
544 bitshiftcnt = 0;
545 res_ptr++;
546 }
547 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
548
549 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
550 while(res_ptr < res_end)
551 {
552 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
553 WARN_ON(mask == 0);
554 if(((*res_ptr) & mask) == 0) {
555 *res_ptr |= mask; /* mark resources busy! */
556 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
557 pide <<= 3; /* convert to bit address */
558 pide += bitshiftcnt;
559 break;
560 }
561 mask >>= o;
562 bitshiftcnt += o;
563 if (mask == 0) {
564 mask = RESMAP_MASK(bits_wanted);
565 bitshiftcnt=0;
566 res_ptr++;
567 }
568 }
569 /* look in the same word on the next pass */
570 ioc->res_bitshift = bitshiftcnt + bits_wanted;
571 }
572
573 /* wrapped ? */
574 if (res_end <= res_ptr) {
575 ioc->res_hint = (unsigned long *) ioc->res_map;
576 ioc->res_bitshift = 0;
577 } else {
578 ioc->res_hint = res_ptr;
579 }
580 return (pide);
581}
582
583
584/**
585 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
586 * @ioc: IO MMU structure which owns the pdir we are interested in.
587 * @size: number of bytes to create a mapping for
588 *
589 * Given a size, find consecutive unmarked and then mark those bits in the
590 * resource bit map.
591 */
592static int
593sba_alloc_range(struct ioc *ioc, size_t size)
594{
595 unsigned int pages_needed = size >> IOVP_SHIFT;
596#ifdef SBA_COLLECT_STATS
597 unsigned long cr_start = mfctl(16);
598#endif
599 unsigned long pide;
600
601 pide = sba_search_bitmap(ioc, pages_needed);
602 if (pide >= (ioc->res_size << 3)) {
603 pide = sba_search_bitmap(ioc, pages_needed);
604 if (pide >= (ioc->res_size << 3))
605 panic("%s: I/O MMU @ %p is out of mapping resources\n",
606 __FILE__, ioc->ioc_hpa);
607 }
608
609#ifdef ASSERT_PDIR_SANITY
610 /* verify the first enable bit is clear */
611 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
612 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
613 }
614#endif
615
616 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
617 __FUNCTION__, size, pages_needed, pide,
618 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
619 ioc->res_bitshift );
620
621#ifdef SBA_COLLECT_STATS
622 {
623 unsigned long cr_end = mfctl(16);
624 unsigned long tmp = cr_end - cr_start;
625 /* check for roll over */
626 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
627 }
628 ioc->avg_search[ioc->avg_idx++] = cr_start;
629 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
630
631 ioc->used_pages += pages_needed;
632#endif
633
634 return (pide);
635}
636
637
638/**
639 * sba_free_range - unmark bits in IO PDIR resource bitmap
640 * @ioc: IO MMU structure which owns the pdir we are interested in.
641 * @iova: IO virtual address which was previously allocated.
642 * @size: number of bytes to create a mapping for
643 *
644 * clear bits in the ioc's resource map
645 */
646static SBA_INLINE void
647sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
648{
649 unsigned long iovp = SBA_IOVP(ioc, iova);
650 unsigned int pide = PDIR_INDEX(iovp);
651 unsigned int ridx = pide >> 3; /* convert bit to byte address */
652 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
653
654 int bits_not_wanted = size >> IOVP_SHIFT;
655
656 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
657 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
658
659 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
660 __FUNCTION__, (uint) iova, size,
661 bits_not_wanted, m, pide, res_ptr, *res_ptr);
662
663#ifdef SBA_COLLECT_STATS
664 ioc->used_pages -= bits_not_wanted;
665#endif
666
667 *res_ptr &= ~m;
668}
669
670
671/**************************************************************
672*
673* "Dynamic DMA Mapping" support (aka "Coherent I/O")
674*
675***************************************************************/
676
677#if SBA_HINT_SUPPORT
678#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
679#endif
680
681typedef unsigned long space_t;
682#define KERNEL_SPACE 0
683
684/**
685 * sba_io_pdir_entry - fill in one IO PDIR entry
686 * @pdir_ptr: pointer to IO PDIR entry
687 * @sid: process Space ID - currently only support KERNEL_SPACE
688 * @vba: Virtual CPU address of buffer to map
689 * @hint: DMA hint set to use for this mapping
690 *
691 * SBA Mapping Routine
692 *
693 * Given a virtual address (vba, arg2) and space id, (sid, arg1)
694 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
695 * pdir_ptr (arg0).
696 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry
697 * for Astro/Ike looks like:
698 *
699 *
700 * 0 19 51 55 63
701 * +-+---------------------+----------------------------------+----+--------+
702 * |V| U | PPN[43:12] | U | VI |
703 * +-+---------------------+----------------------------------+----+--------+
704 *
705 * Pluto is basically identical, supports fewer physical address bits:
706 *
707 * 0 23 51 55 63
708 * +-+------------------------+-------------------------------+----+--------+
709 * |V| U | PPN[39:12] | U | VI |
710 * +-+------------------------+-------------------------------+----+--------+
711 *
712 * V == Valid Bit (Most Significant Bit is bit 0)
713 * U == Unused
714 * PPN == Physical Page Number
715 * VI == Virtual Index (aka Coherent Index)
716 *
717 * LPA instruction output is put into PPN field.
718 * LCI (Load Coherence Index) instruction provides the "VI" bits.
719 *
720 * We pre-swap the bytes since PCX-W is Big Endian and the
721 * IOMMU uses little endian for the pdir.
722 */
723
724void SBA_INLINE
725sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
726 unsigned long hint)
727{
728 u64 pa; /* physical address */
729 register unsigned ci; /* coherent index */
730
731 pa = virt_to_phys(vba);
732 pa &= IOVP_MASK;
733
734 mtsp(sid,1);
735 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
736 pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */
737
738 pa |= 0x8000000000000000ULL; /* set "valid" bit */
739 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
740
741 /*
742 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
743 * (bit #61, big endian), we have to flush and sync every time
744 * IO-PDIR is changed in Ike/Astro.
745 */
746 if (ioc_needs_fdc) {
747 asm volatile("fdc 0(%%sr1,%0)\n\tsync" : : "r" (pdir_ptr));
748 }
749}
750
751
752/**
753 * sba_mark_invalid - invalidate one or more IO PDIR entries
754 * @ioc: IO MMU structure which owns the pdir we are interested in.
755 * @iova: IO Virtual Address mapped earlier
756 * @byte_cnt: number of bytes this mapping covers.
757 *
758 * Marking the IO PDIR entry(ies) as Invalid and invalidate
759 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register)
760 * is to purge stale entries in the IO TLB when unmapping entries.
761 *
762 * The PCOM register supports purging of multiple pages, with a minium
763 * of 1 page and a maximum of 2GB. Hardware requires the address be
764 * aligned to the size of the range being purged. The size of the range
765 * must be a power of 2. The "Cool perf optimization" in the
766 * allocation routine helps keep that true.
767 */
768static SBA_INLINE void
769sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
770{
771 u32 iovp = (u32) SBA_IOVP(ioc,iova);
772
773 /* Even though this is a big-endian machine, the entries
774 ** in the iopdir are little endian. That's why we clear the byte
775 ** at +7 instead of at +0.
776 */
777 int off = PDIR_INDEX(iovp)*sizeof(u64)+7;
778
779#ifdef ASSERT_PDIR_SANITY
780 /* Assert first pdir entry is set */
781 if (0x80 != (((u8 *) ioc->pdir_base)[off])) {
782 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
783 }
784#endif
785
786 if (byte_cnt <= IOVP_SIZE)
787 {
788 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */
789
790 /*
791 ** clear I/O PDIR entry "valid" bit
792 ** Do NOT clear the rest - save it for debugging.
793 ** We should only clear bits that have previously
794 ** been enabled.
795 */
796 ((u8 *)(ioc->pdir_base))[off] = 0;
797 } else {
798 u32 t = get_order(byte_cnt) + PAGE_SHIFT;
799
800 iovp |= t;
801 do {
802 /* clear I/O Pdir entry "valid" bit first */
803 ((u8 *)(ioc->pdir_base))[off] = 0;
804 off += sizeof(u64);
805 byte_cnt -= IOVP_SIZE;
806 } while (byte_cnt > 0);
807 }
808
809 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
810}
811
812/**
813 * sba_dma_supported - PCI driver can query DMA support
814 * @dev: instance of PCI owned by the driver that's asking
815 * @mask: number of address bits this PCI device can handle
816 *
817 * See Documentation/DMA-mapping.txt
818 */
819static int sba_dma_supported( struct device *dev, u64 mask)
820{
821 struct ioc *ioc;
822 if (dev == NULL) {
823 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
824 BUG();
825 return(0);
826 }
827
828 ioc = GET_IOC(dev);
829
830 /* check if mask is > than the largest IO Virt Address */
831
832 return((int) (mask >= (ioc->ibase +
833 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
834}
835
836
837/**
838 * sba_map_single - map one buffer and return IOVA for DMA
839 * @dev: instance of PCI owned by the driver that's asking.
840 * @addr: driver buffer to map.
841 * @size: number of bytes to map in driver buffer.
842 * @direction: R/W or both.
843 *
844 * See Documentation/DMA-mapping.txt
845 */
846static dma_addr_t
847sba_map_single(struct device *dev, void *addr, size_t size,
848 enum dma_data_direction direction)
849{
850 struct ioc *ioc;
851 unsigned long flags;
852 dma_addr_t iovp;
853 dma_addr_t offset;
854 u64 *pdir_start;
855 int pide;
856
857 ioc = GET_IOC(dev);
858
859 /* save offset bits */
860 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
861
862 /* round up to nearest IOVP_SIZE */
863 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
864
865 spin_lock_irqsave(&ioc->res_lock, flags);
866#ifdef ASSERT_PDIR_SANITY
867 sba_check_pdir(ioc,"Check before sba_map_single()");
868#endif
869
870#ifdef SBA_COLLECT_STATS
871 ioc->msingle_calls++;
872 ioc->msingle_pages += size >> IOVP_SHIFT;
873#endif
874 pide = sba_alloc_range(ioc, size);
875 iovp = (dma_addr_t) pide << IOVP_SHIFT;
876
877 DBG_RUN("%s() 0x%p -> 0x%lx\n",
878 __FUNCTION__, addr, (long) iovp | offset);
879
880 pdir_start = &(ioc->pdir_base[pide]);
881
882 while (size > 0) {
883 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
884
885 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
886 pdir_start,
887 (u8) (((u8 *) pdir_start)[7]),
888 (u8) (((u8 *) pdir_start)[6]),
889 (u8) (((u8 *) pdir_start)[5]),
890 (u8) (((u8 *) pdir_start)[4]),
891 (u8) (((u8 *) pdir_start)[3]),
892 (u8) (((u8 *) pdir_start)[2]),
893 (u8) (((u8 *) pdir_start)[1]),
894 (u8) (((u8 *) pdir_start)[0])
895 );
896
897 addr += IOVP_SIZE;
898 size -= IOVP_SIZE;
899 pdir_start++;
900 }
901 /* form complete address */
902#ifdef ASSERT_PDIR_SANITY
903 sba_check_pdir(ioc,"Check after sba_map_single()");
904#endif
905 spin_unlock_irqrestore(&ioc->res_lock, flags);
906 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
907}
908
909
910/**
911 * sba_unmap_single - unmap one IOVA and free resources
912 * @dev: instance of PCI owned by the driver that's asking.
913 * @iova: IOVA of driver buffer previously mapped.
914 * @size: number of bytes mapped in driver buffer.
915 * @direction: R/W or both.
916 *
917 * See Documentation/DMA-mapping.txt
918 */
919static void
920sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
921 enum dma_data_direction direction)
922{
923 struct ioc *ioc;
924#if DELAYED_RESOURCE_CNT > 0
925 struct sba_dma_pair *d;
926#endif
927 unsigned long flags;
928 dma_addr_t offset;
929
930 DBG_RUN("%s() iovp 0x%lx/%x\n", __FUNCTION__, (long) iova, size);
931
932 ioc = GET_IOC(dev);
933 offset = iova & ~IOVP_MASK;
934 iova ^= offset; /* clear offset bits */
935 size += offset;
936 size = ROUNDUP(size, IOVP_SIZE);
937
938 spin_lock_irqsave(&ioc->res_lock, flags);
939
940#ifdef SBA_COLLECT_STATS
941 ioc->usingle_calls++;
942 ioc->usingle_pages += size >> IOVP_SHIFT;
943#endif
944
945 sba_mark_invalid(ioc, iova, size);
946
947#if DELAYED_RESOURCE_CNT > 0
948 /* Delaying when we re-use a IO Pdir entry reduces the number
949 * of MMIO reads needed to flush writes to the PCOM register.
950 */
951 d = &(ioc->saved[ioc->saved_cnt]);
952 d->iova = iova;
953 d->size = size;
954 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
955 int cnt = ioc->saved_cnt;
956 while (cnt--) {
957 sba_free_range(ioc, d->iova, d->size);
958 d--;
959 }
960 ioc->saved_cnt = 0;
961 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
962 }
963#else /* DELAYED_RESOURCE_CNT == 0 */
964 sba_free_range(ioc, iova, size);
965 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
966#endif /* DELAYED_RESOURCE_CNT == 0 */
967 spin_unlock_irqrestore(&ioc->res_lock, flags);
968
969 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
970 ** For Astro based systems this isn't a big deal WRT performance.
971 ** As long as 2.4 kernels copyin/copyout data from/to userspace,
972 ** we don't need the syncdma. The issue here is I/O MMU cachelines
973 ** are *not* coherent in all cases. May be hwrev dependent.
974 ** Need to investigate more.
975 asm volatile("syncdma");
976 */
977}
978
979
980/**
981 * sba_alloc_consistent - allocate/map shared mem for DMA
982 * @hwdev: instance of PCI owned by the driver that's asking.
983 * @size: number of bytes mapped in driver buffer.
984 * @dma_handle: IOVA of new buffer.
985 *
986 * See Documentation/DMA-mapping.txt
987 */
988static void *sba_alloc_consistent(struct device *hwdev, size_t size,
989 dma_addr_t *dma_handle, int gfp)
990{
991 void *ret;
992
993 if (!hwdev) {
994 /* only support PCI */
995 *dma_handle = 0;
996 return 0;
997 }
998
999 ret = (void *) __get_free_pages(gfp, get_order(size));
1000
1001 if (ret) {
1002 memset(ret, 0, size);
1003 *dma_handle = sba_map_single(hwdev, ret, size, 0);
1004 }
1005
1006 return ret;
1007}
1008
1009
1010/**
1011 * sba_free_consistent - free/unmap shared mem for DMA
1012 * @hwdev: instance of PCI owned by the driver that's asking.
1013 * @size: number of bytes mapped in driver buffer.
1014 * @vaddr: virtual address IOVA of "consistent" buffer.
1015 * @dma_handler: IO virtual address of "consistent" buffer.
1016 *
1017 * See Documentation/DMA-mapping.txt
1018 */
1019static void
1020sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
1021 dma_addr_t dma_handle)
1022{
1023 sba_unmap_single(hwdev, dma_handle, size, 0);
1024 free_pages((unsigned long) vaddr, get_order(size));
1025}
1026
1027
1028/*
1029** Since 0 is a valid pdir_base index value, can't use that
1030** to determine if a value is valid or not. Use a flag to indicate
1031** the SG list entry contains a valid pdir index.
1032*/
1033#define PIDE_FLAG 0x80000000UL
1034
1035#ifdef SBA_COLLECT_STATS
1036#define IOMMU_MAP_STATS
1037#endif
1038#include "iommu-helpers.h"
1039
1040#ifdef DEBUG_LARGE_SG_ENTRIES
1041int dump_run_sg = 0;
1042#endif
1043
1044
1045/**
1046 * sba_map_sg - map Scatter/Gather list
1047 * @dev: instance of PCI owned by the driver that's asking.
1048 * @sglist: array of buffer/length pairs
1049 * @nents: number of entries in list
1050 * @direction: R/W or both.
1051 *
1052 * See Documentation/DMA-mapping.txt
1053 */
1054static int
1055sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
1056 enum dma_data_direction direction)
1057{
1058 struct ioc *ioc;
1059 int coalesced, filled = 0;
1060 unsigned long flags;
1061
1062 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
1063
1064 ioc = GET_IOC(dev);
1065
1066 /* Fast path single entry scatterlists. */
1067 if (nents == 1) {
1068 sg_dma_address(sglist) = sba_map_single(dev,
1069 (void *)sg_virt_addr(sglist),
1070 sglist->length, direction);
1071 sg_dma_len(sglist) = sglist->length;
1072 return 1;
1073 }
1074
1075 spin_lock_irqsave(&ioc->res_lock, flags);
1076
1077#ifdef ASSERT_PDIR_SANITY
1078 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
1079 {
1080 sba_dump_sg(ioc, sglist, nents);
1081 panic("Check before sba_map_sg()");
1082 }
1083#endif
1084
1085#ifdef SBA_COLLECT_STATS
1086 ioc->msg_calls++;
1087#endif
1088
1089 /*
1090 ** First coalesce the chunks and allocate I/O pdir space
1091 **
1092 ** If this is one DMA stream, we can properly map using the
1093 ** correct virtual address associated with each DMA page.
1094 ** w/o this association, we wouldn't have coherent DMA!
1095 ** Access to the virtual address is what forces a two pass algorithm.
1096 */
1097 coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range);
1098
1099 /*
1100 ** Program the I/O Pdir
1101 **
1102 ** map the virtual addresses to the I/O Pdir
1103 ** o dma_address will contain the pdir index
1104 ** o dma_len will contain the number of bytes to map
1105 ** o address contains the virtual address.
1106 */
1107 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
1108
1109#ifdef ASSERT_PDIR_SANITY
1110 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1111 {
1112 sba_dump_sg(ioc, sglist, nents);
1113 panic("Check after sba_map_sg()\n");
1114 }
1115#endif
1116
1117 spin_unlock_irqrestore(&ioc->res_lock, flags);
1118
1119 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
1120
1121 return filled;
1122}
1123
1124
1125/**
1126 * sba_unmap_sg - unmap Scatter/Gather list
1127 * @dev: instance of PCI owned by the driver that's asking.
1128 * @sglist: array of buffer/length pairs
1129 * @nents: number of entries in list
1130 * @direction: R/W or both.
1131 *
1132 * See Documentation/DMA-mapping.txt
1133 */
1134static void
1135sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1136 enum dma_data_direction direction)
1137{
1138 struct ioc *ioc;
1139#ifdef ASSERT_PDIR_SANITY
1140 unsigned long flags;
1141#endif
1142
1143 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1144 __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length);
1145
1146 ioc = GET_IOC(dev);
1147
1148#ifdef SBA_COLLECT_STATS
1149 ioc->usg_calls++;
1150#endif
1151
1152#ifdef ASSERT_PDIR_SANITY
1153 spin_lock_irqsave(&ioc->res_lock, flags);
1154 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1155 spin_unlock_irqrestore(&ioc->res_lock, flags);
1156#endif
1157
1158 while (sg_dma_len(sglist) && nents--) {
1159
1160 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
1161#ifdef SBA_COLLECT_STATS
1162 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1163 ioc->usingle_calls--; /* kluge since call is unmap_sg() */
1164#endif
1165 ++sglist;
1166 }
1167
1168 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
1169
1170#ifdef ASSERT_PDIR_SANITY
1171 spin_lock_irqsave(&ioc->res_lock, flags);
1172 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1173 spin_unlock_irqrestore(&ioc->res_lock, flags);
1174#endif
1175
1176}
1177
1178static struct hppa_dma_ops sba_ops = {
1179 .dma_supported = sba_dma_supported,
1180 .alloc_consistent = sba_alloc_consistent,
1181 .alloc_noncoherent = sba_alloc_consistent,
1182 .free_consistent = sba_free_consistent,
1183 .map_single = sba_map_single,
1184 .unmap_single = sba_unmap_single,
1185 .map_sg = sba_map_sg,
1186 .unmap_sg = sba_unmap_sg,
1187 .dma_sync_single_for_cpu = NULL,
1188 .dma_sync_single_for_device = NULL,
1189 .dma_sync_sg_for_cpu = NULL,
1190 .dma_sync_sg_for_device = NULL,
1191};
1192
1193
1194/**************************************************************************
1195**
1196** SBA PAT PDC support
1197**
1198** o call pdc_pat_cell_module()
1199** o store ranges in PCI "resource" structures
1200**
1201**************************************************************************/
1202
1203static void
1204sba_get_pat_resources(struct sba_device *sba_dev)
1205{
1206#if 0
1207/*
1208** TODO/REVISIT/FIXME: support for directed ranges requires calls to
1209** PAT PDC to program the SBA/LBA directed range registers...this
1210** burden may fall on the LBA code since it directly supports the
1211** PCI subsystem. It's not clear yet. - ggg
1212*/
1213PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1214 FIXME : ???
1215PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1216 Tells where the dvi bits are located in the address.
1217PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1218 FIXME : ???
1219#endif
1220}
1221
1222
1223/**************************************************************
1224*
1225* Initialization and claim
1226*
1227***************************************************************/
1228#define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */
1229#define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */
1230static void *
1231sba_alloc_pdir(unsigned int pdir_size)
1232{
1233 unsigned long pdir_base;
1234 unsigned long pdir_order = get_order(pdir_size);
1235
1236 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1237 if (NULL == (void *) pdir_base)
1238 panic("sba_ioc_init() could not allocate I/O Page Table\n");
1239
1240 /* If this is not PA8700 (PCX-W2)
1241 ** OR newer than ver 2.2
1242 ** OR in a system that doesn't need VINDEX bits from SBA,
1243 **
1244 ** then we aren't exposed to the HW bug.
1245 */
1246 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1247 || (boot_cpu_data.pdc.versions > 0x202)
1248 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1249 return (void *) pdir_base;
1250
1251 /*
1252 * PA8700 (PCX-W2, aka piranha) silent data corruption fix
1253 *
1254 * An interaction between PA8700 CPU (Ver 2.2 or older) and
1255 * Ike/Astro can cause silent data corruption. This is only
1256 * a problem if the I/O PDIR is located in memory such that
1257 * (little-endian) bits 17 and 18 are on and bit 20 is off.
1258 *
1259 * Since the max IO Pdir size is 2MB, by cleverly allocating the
1260 * right physical address, we can either avoid (IOPDIR <= 1MB)
1261 * or minimize (2MB IO Pdir) the problem if we restrict the
1262 * IO Pdir to a maximum size of 2MB-128K (1902K).
1263 *
1264 * Because we always allocate 2^N sized IO pdirs, either of the
1265 * "bad" regions will be the last 128K if at all. That's easy
1266 * to test for.
1267 *
1268 */
1269 if (pdir_order <= (19-12)) {
1270 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1271 /* allocate a new one on 512k alignment */
1272 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1273 /* release original */
1274 free_pages(pdir_base, pdir_order);
1275
1276 pdir_base = new_pdir;
1277
1278 /* release excess */
1279 while (pdir_order < (19-12)) {
1280 new_pdir += pdir_size;
1281 free_pages(new_pdir, pdir_order);
1282 pdir_order +=1;
1283 pdir_size <<=1;
1284 }
1285 }
1286 } else {
1287 /*
1288 ** 1MB or 2MB Pdir
1289 ** Needs to be aligned on an "odd" 1MB boundary.
1290 */
1291 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */
1292
1293 /* release original */
1294 free_pages( pdir_base, pdir_order);
1295
1296 /* release first 1MB */
1297 free_pages(new_pdir, 20-12);
1298
1299 pdir_base = new_pdir + 1024*1024;
1300
1301 if (pdir_order > (20-12)) {
1302 /*
1303 ** 2MB Pdir.
1304 **
1305 ** Flag tells init_bitmap() to mark bad 128k as used
1306 ** and to reduce the size by 128k.
1307 */
1308 piranha_bad_128k = 1;
1309
1310 new_pdir += 3*1024*1024;
1311 /* release last 1MB */
1312 free_pages(new_pdir, 20-12);
1313
1314 /* release unusable 128KB */
1315 free_pages(new_pdir - 128*1024 , 17-12);
1316
1317 pdir_size -= 128*1024;
1318 }
1319 }
1320
1321 memset((void *) pdir_base, 0, pdir_size);
1322 return (void *) pdir_base;
1323}
1324
1325/* setup Mercury or Elroy IBASE/IMASK registers. */
1326static void setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1327{
1328 /* lba_set_iregs() is in drivers/parisc/lba_pci.c */
1329 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1330 struct device *dev;
1331
1332 list_for_each_entry(dev, &sba->dev.children, node) {
1333 struct parisc_device *lba = to_parisc_device(dev);
1334 int rope_num = (lba->hpa >> 13) & 0xf;
1335 if (rope_num >> 3 == ioc_num)
1336 lba_set_iregs(lba, ioc->ibase, ioc->imask);
1337 }
1338}
1339
1340static void
1341sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1342{
1343 u32 iova_space_mask;
1344 u32 iova_space_size;
1345 int iov_order, tcnfg;
1346#if SBA_AGP_SUPPORT
1347 int agp_found = 0;
1348#endif
1349 /*
1350 ** Firmware programs the base and size of a "safe IOVA space"
1351 ** (one that doesn't overlap memory or LMMIO space) in the
1352 ** IBASE and IMASK registers.
1353 */
1354 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1355 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1356
1357 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1358 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1359 iova_space_size /= 2;
1360 }
1361
1362 /*
1363 ** iov_order is always based on a 1GB IOVA space since we want to
1364 ** turn on the other half for AGP GART.
1365 */
1366 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1367 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1368
1369 DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits)\n",
1370 __FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20,
1371 iov_order + PAGE_SHIFT);
1372
1373 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1374 get_order(ioc->pdir_size));
1375 if (!ioc->pdir_base)
1376 panic("Couldn't allocate I/O Page Table\n");
1377
1378 memset(ioc->pdir_base, 0, ioc->pdir_size);
1379
1380 DBG_INIT("%s() pdir %p size %x\n",
1381 __FUNCTION__, ioc->pdir_base, ioc->pdir_size);
1382
1383#if SBA_HINT_SUPPORT
1384 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1385 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1386
1387 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1388 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1389#endif
1390
1391 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1392 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1393
1394 /* build IMASK for IOC and Elroy */
1395 iova_space_mask = 0xffffffff;
1396 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1397 ioc->imask = iova_space_mask;
1398#ifdef ZX1_SUPPORT
1399 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1400#endif
1401 sba_dump_tlb(ioc->ioc_hpa);
1402
1403 setup_ibase_imask(sba, ioc, ioc_num);
1404
1405 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1406
1407#ifdef __LP64__
1408 /*
1409 ** Setting the upper bits makes checking for bypass addresses
1410 ** a little faster later on.
1411 */
1412 ioc->imask |= 0xFFFFFFFF00000000UL;
1413#endif
1414
1415 /* Set I/O PDIR Page size to system page size */
1416 switch (PAGE_SHIFT) {
1417 case 12: tcnfg = 0; break; /* 4K */
1418 case 13: tcnfg = 1; break; /* 8K */
1419 case 14: tcnfg = 2; break; /* 16K */
1420 case 16: tcnfg = 3; break; /* 64K */
1421 default:
1422 panic(__FILE__ "Unsupported system page size %d",
1423 1 << PAGE_SHIFT);
1424 break;
1425 }
1426 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1427
1428 /*
1429 ** Program the IOC's ibase and enable IOVA translation
1430 ** Bit zero == enable bit.
1431 */
1432 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1433
1434 /*
1435 ** Clear I/O TLB of any possible entries.
1436 ** (Yes. This is a bit paranoid...but so what)
1437 */
1438 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1439
1440#if SBA_AGP_SUPPORT
1441 /*
1442 ** If an AGP device is present, only use half of the IOV space
1443 ** for PCI DMA. Unfortunately we can't know ahead of time
1444 ** whether GART support will actually be used, for now we
1445 ** can just key on any AGP device found in the system.
1446 ** We program the next pdir index after we stop w/ a key for
1447 ** the GART code to handshake on.
1448 */
1449 device=NULL;
1450 for (lba = sba->child; lba; lba = lba->sibling) {
1451 if (IS_QUICKSILVER(lba))
1452 break;
1453 }
1454
1455 if (lba) {
1456 DBG_INIT("%s: Reserving half of IOVA space for AGP GART support\n", __FUNCTION__);
1457 ioc->pdir_size /= 2;
1458 ((u64 *)ioc->pdir_base)[PDIR_INDEX(iova_space_size/2)] = SBA_IOMMU_COOKIE;
1459 } else {
1460 DBG_INIT("%s: No GART needed - no AGP controller found\n", __FUNCTION__);
1461 }
1462#endif /* 0 */
1463
1464}
1465
1466static void
1467sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1468{
1469 u32 iova_space_size, iova_space_mask;
1470 unsigned int pdir_size, iov_order;
1471
1472 /*
1473 ** Determine IOVA Space size from memory size.
1474 **
1475 ** Ideally, PCI drivers would register the maximum number
1476 ** of DMA they can have outstanding for each device they
1477 ** own. Next best thing would be to guess how much DMA
1478 ** can be outstanding based on PCI Class/sub-class. Both
1479 ** methods still require some "extra" to support PCI
1480 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
1481 **
1482 ** While we have 32-bits "IOVA" space, top two 2 bits are used
1483 ** for DMA hints - ergo only 30 bits max.
1484 */
1485
1486 iova_space_size = (u32) (num_physpages/global_ioc_cnt);
1487
1488 /* limit IOVA space size to 1MB-1GB */
1489 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1490 iova_space_size = 1 << (20 - PAGE_SHIFT);
1491 }
1492#ifdef __LP64__
1493 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1494 iova_space_size = 1 << (30 - PAGE_SHIFT);
1495 }
1496#endif
1497
1498 /*
1499 ** iova space must be log2() in size.
1500 ** thus, pdir/res_map will also be log2().
1501 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced)
1502 */
1503 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1504
1505 /* iova_space_size is now bytes, not pages */
1506 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1507
1508 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1509
1510 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1511 __FUNCTION__,
1512 ioc->ioc_hpa,
1513 (unsigned long) num_physpages >> (20 - PAGE_SHIFT),
1514 iova_space_size>>20,
1515 iov_order + PAGE_SHIFT);
1516
1517 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1518
1519 DBG_INIT("%s() pdir %p size %x\n",
1520 __FUNCTION__, ioc->pdir_base, pdir_size);
1521
1522#if SBA_HINT_SUPPORT
1523 /* FIXME : DMA HINTs not used */
1524 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1525 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1526
1527 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1528 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1529#endif
1530
1531 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1532
1533 /* build IMASK for IOC and Elroy */
1534 iova_space_mask = 0xffffffff;
1535 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1536
1537 /*
1538 ** On C3000 w/512MB mem, HP-UX 10.20 reports:
1539 ** ibase=0, imask=0xFE000000, size=0x2000000.
1540 */
1541 ioc->ibase = 0;
1542 ioc->imask = iova_space_mask; /* save it */
1543#ifdef ZX1_SUPPORT
1544 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1545#endif
1546
1547 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1548 __FUNCTION__, ioc->ibase, ioc->imask);
1549
1550 /*
1551 ** FIXME: Hint registers are programmed with default hint
1552 ** values during boot, so hints should be sane even if we
1553 ** can't reprogram them the way drivers want.
1554 */
1555
1556 setup_ibase_imask(sba, ioc, ioc_num);
1557
1558 /*
1559 ** Program the IOC's ibase and enable IOVA translation
1560 */
1561 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1562 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1563
1564 /* Set I/O PDIR Page size to 4K */
1565 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
1566
1567 /*
1568 ** Clear I/O TLB of any possible entries.
1569 ** (Yes. This is a bit paranoid...but so what)
1570 */
1571 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1572
1573 ioc->ibase = 0; /* used by SBA_IOVA and related macros */
1574
1575 DBG_INIT("%s() DONE\n", __FUNCTION__);
1576}
1577
1578
1579
1580/**************************************************************************
1581**
1582** SBA initialization code (HW and SW)
1583**
1584** o identify SBA chip itself
1585** o initialize SBA chip modes (HardFail)
1586** o initialize SBA chip modes (HardFail)
1587** o FIXME: initialize DMA hints for reasonable defaults
1588**
1589**************************************************************************/
1590
1591static void __iomem *ioc_remap(struct sba_device *sba_dev, int offset)
1592{
1593 return ioremap(sba_dev->dev->hpa + offset, SBA_FUNC_SIZE);
1594}
1595
1596static void sba_hw_init(struct sba_device *sba_dev)
1597{
1598 int i;
1599 int num_ioc;
1600 u64 ioc_ctl;
1601
1602 if (!is_pdc_pat()) {
1603 /* Shutdown the USB controller on Astro-based workstations.
1604 ** Once we reprogram the IOMMU, the next DMA performed by
1605 ** USB will HPMC the box. USB is only enabled if a
1606 ** keyboard is present and found.
1607 **
1608 ** With serial console, j6k v5.0 firmware says:
1609 ** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7
1610 **
1611 ** FIXME: Using GFX+USB console at power up but direct
1612 ** linux to serial console is still broken.
1613 ** USB could generate DMA so we must reset USB.
1614 ** The proper sequence would be:
1615 ** o block console output
1616 ** o reset USB device
1617 ** o reprogram serial port
1618 ** o unblock console output
1619 */
1620 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1621 pdc_io_reset_devices();
1622 }
1623
1624 }
1625
1626
1627#if 0
1628printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1629 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1630
1631 /*
1632 ** Need to deal with DMA from LAN.
1633 ** Maybe use page zero boot device as a handle to talk
1634 ** to PDC about which device to shutdown.
1635 **
1636 ** Netbooting, j6k v5.0 firmware says:
1637 ** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002
1638 ** ARGH! invalid class.
1639 */
1640 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1641 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1642 pdc_io_reset();
1643 }
1644#endif
1645
1646 if (!IS_PLUTO(sba_dev->iodc)) {
1647 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1648 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1649 __FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
1650 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1651 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1652 /* j6700 v1.6 firmware sets 0x294f */
1653 /* A500 firmware sets 0x4d */
1654
1655 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1656
1657#ifdef DEBUG_SBA_INIT
1658 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1659 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1660#endif
1661 } /* if !PLUTO */
1662
1663 if (IS_ASTRO(sba_dev->iodc)) {
1664 int err;
1665 /* PAT_PDC (L-class) also reports the same goofy base */
1666 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1667 num_ioc = 1;
1668
1669 sba_dev->chip_resv.name = "Astro Intr Ack";
1670 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1671 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1672 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1673 if (err < 0) {
1674 BUG();
1675 }
1676
1677 } else if (IS_PLUTO(sba_dev->iodc)) {
1678 int err;
1679
1680 /* We use a negative value for IOC HPA so it gets
1681 * corrected when we add it with IKE's IOC offset.
1682 * Doesnt look clean, but fewer code.
1683 */
1684 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1685 num_ioc = 1;
1686
1687 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1688 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1689 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1690 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1691 WARN_ON(err < 0);
1692
1693 sba_dev->iommu_resv.name = "IOVA Space";
1694 sba_dev->iommu_resv.start = 0x40000000UL;
1695 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1696 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1697 WARN_ON(err < 0);
1698 } else {
1699 /* IS_IKE (ie N-class, L3000, L1500) */
1700 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1701 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1702 num_ioc = 2;
1703
1704 /* TODO - LOOKUP Ike/Stretch chipset mem map */
1705 }
1706 /* XXX: What about Reo? */
1707
1708 sba_dev->num_ioc = num_ioc;
1709 for (i = 0; i < num_ioc; i++) {
1710 /*
1711 ** Make sure the box crashes if we get any errors on a rope.
1712 */
1713 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL);
1714 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL);
1715 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL);
1716 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL);
1717 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL);
1718 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL);
1719 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL);
1720 WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1721
1722 /* flush out the writes */
1723 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1724
1725 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1726 i,
1727 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1728 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1729 );
1730 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1731 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1732 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1733 );
1734
1735 if (IS_PLUTO(sba_dev->iodc)) {
1736 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1737 } else {
1738 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1739 }
1740 }
1741}
1742
1743static void
1744sba_common_init(struct sba_device *sba_dev)
1745{
1746 int i;
1747
1748 /* add this one to the head of the list (order doesn't matter)
1749 ** This will be useful for debugging - especially if we get coredumps
1750 */
1751 sba_dev->next = sba_list;
1752 sba_list = sba_dev;
1753
1754 for(i=0; i< sba_dev->num_ioc; i++) {
1755 int res_size;
1756#ifdef DEBUG_DMB_TRAP
1757 extern void iterate_pages(unsigned long , unsigned long ,
1758 void (*)(pte_t * , unsigned long),
1759 unsigned long );
1760 void set_data_memory_break(pte_t * , unsigned long);
1761#endif
1762 /* resource map size dictated by pdir_size */
1763 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
1764
1765 /* Second part of PIRANHA BUG */
1766 if (piranha_bad_128k) {
1767 res_size -= (128*1024)/sizeof(u64);
1768 }
1769
1770 res_size >>= 3; /* convert bit count to byte count */
1771 DBG_INIT("%s() res_size 0x%x\n",
1772 __FUNCTION__, res_size);
1773
1774 sba_dev->ioc[i].res_size = res_size;
1775 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1776
1777#ifdef DEBUG_DMB_TRAP
1778 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1779 set_data_memory_break, 0);
1780#endif
1781
1782 if (NULL == sba_dev->ioc[i].res_map)
1783 {
1784 panic("%s:%s() could not allocate resource map\n",
1785 __FILE__, __FUNCTION__ );
1786 }
1787
1788 memset(sba_dev->ioc[i].res_map, 0, res_size);
1789 /* next available IOVP - circular search */
1790 sba_dev->ioc[i].res_hint = (unsigned long *)
1791 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1792
1793#ifdef ASSERT_PDIR_SANITY
1794 /* Mark first bit busy - ie no IOVA 0 */
1795 sba_dev->ioc[i].res_map[0] = 0x80;
1796 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1797#endif
1798
1799 /* Third (and last) part of PIRANHA BUG */
1800 if (piranha_bad_128k) {
1801 /* region from +1408K to +1536 is un-usable. */
1802
1803 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1804 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1805 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1806 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1807
1808 /* mark that part of the io pdir busy */
1809 while (p_start < p_end)
1810 *p_start++ = -1;
1811
1812 }
1813
1814#ifdef DEBUG_DMB_TRAP
1815 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1816 set_data_memory_break, 0);
1817 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1818 set_data_memory_break, 0);
1819#endif
1820
1821 DBG_INIT("%s() %d res_map %x %p\n",
1822 __FUNCTION__, i, res_size, sba_dev->ioc[i].res_map);
1823 }
1824
1825 spin_lock_init(&sba_dev->sba_lock);
1826 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1827
1828#ifdef DEBUG_SBA_INIT
1829 /*
1830 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
1831 * (bit #61, big endian), we have to flush and sync every time
1832 * IO-PDIR is changed in Ike/Astro.
1833 */
1834 if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) {
1835 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1836 } else {
1837 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1838 }
1839#endif
1840}
1841
1842#ifdef CONFIG_PROC_FS
1843static int sba_proc_info(char *buf, char **start, off_t offset, int len)
1844{
1845 struct sba_device *sba_dev = sba_list;
1846 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1847 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
1848 unsigned long i;
1849#ifdef SBA_COLLECT_STATS
1850 unsigned long avg = 0, min, max;
1851#endif
1852
1853 sprintf(buf, "%s rev %d.%d\n",
1854 sba_dev->name,
1855 (sba_dev->hw_rev & 0x7) + 1,
1856 (sba_dev->hw_rev & 0x18) >> 3
1857 );
1858 sprintf(buf, "%sIO PDIR size : %d bytes (%d entries)\n",
1859 buf,
1860 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
1861 total_pages);
1862
1863 sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n",
1864 buf, ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
1865
1866 sprintf(buf, "%sLMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1867 buf,
1868 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1869 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1870 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)
1871 );
1872
1873 for (i=0; i<4; i++)
1874 sprintf(buf, "%sDIR%ld_BASE/MASK/ROUTE %08x %08x %08x\n",
1875 buf, i,
1876 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1877 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1878 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)
1879 );
1880
1881#ifdef SBA_COLLECT_STATS
1882 sprintf(buf, "%sIO PDIR entries : %ld free %ld used (%d%%)\n", buf,
1883 total_pages - ioc->used_pages, ioc->used_pages,
1884 (int) (ioc->used_pages * 100 / total_pages));
1885
1886 min = max = ioc->avg_search[0];
1887 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1888 avg += ioc->avg_search[i];
1889 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1890 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1891 }
1892 avg /= SBA_SEARCH_SAMPLE;
1893 sprintf(buf, "%s Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1894 buf, min, avg, max);
1895
1896 sprintf(buf, "%spci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1897 buf, ioc->msingle_calls, ioc->msingle_pages,
1898 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1899
1900 /* KLUGE - unmap_sg calls unmap_single for each mapped page */
1901 min = ioc->usingle_calls;
1902 max = ioc->usingle_pages - ioc->usg_pages;
1903 sprintf(buf, "%spci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1904 buf, min, max,
1905 (int) ((max * 1000)/min));
1906
1907 sprintf(buf, "%spci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1908 buf, ioc->msg_calls, ioc->msg_pages,
1909 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
1910
1911 sprintf(buf, "%spci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1912 buf, ioc->usg_calls, ioc->usg_pages,
1913 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
1914#endif
1915
1916 return strlen(buf);
1917}
1918
1919#if 0
1920/* XXX too much output - exceeds 4k limit and needs to be re-written */
1921static int
1922sba_resource_map(char *buf, char **start, off_t offset, int len)
1923{
1924 struct sba_device *sba_dev = sba_list;
1925 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Mutli-IOC suppoer! */
1926 unsigned int *res_ptr = (unsigned int *)ioc->res_map;
1927 int i;
1928
1929 buf[0] = '\0';
1930 for(i = 0; i < (ioc->res_size / sizeof(unsigned int)); ++i, ++res_ptr) {
1931 if ((i & 7) == 0)
1932 strcat(buf,"\n ");
1933 sprintf(buf, "%s %08x", buf, *res_ptr);
1934 }
1935 strcat(buf, "\n");
1936
1937 return strlen(buf);
1938}
1939#endif /* 0 */
1940#endif /* CONFIG_PROC_FS */
1941
1942static struct parisc_device_id sba_tbl[] = {
1943 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1944 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1945 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1946 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1947 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1948 { 0, }
1949};
1950
1951int sba_driver_callback(struct parisc_device *);
1952
1953static struct parisc_driver sba_driver = {
1954 .name = MODULE_NAME,
1955 .id_table = sba_tbl,
1956 .probe = sba_driver_callback,
1957};
1958
1959/*
1960** Determine if sba should claim this chip (return 0) or not (return 1).
1961** If so, initialize the chip and tell other partners in crime they
1962** have work to do.
1963*/
1964int
1965sba_driver_callback(struct parisc_device *dev)
1966{
1967 struct sba_device *sba_dev;
1968 u32 func_class;
1969 int i;
1970 char *version;
1971 void __iomem *sba_addr = ioremap(dev->hpa, SBA_FUNC_SIZE);
1972
1973 sba_dump_ranges(sba_addr);
1974
1975 /* Read HW Rev First */
1976 func_class = READ_REG(sba_addr + SBA_FCLASS);
1977
1978 if (IS_ASTRO(&dev->id)) {
1979 unsigned long fclass;
1980 static char astro_rev[]="Astro ?.?";
1981
1982 /* Astro is broken...Read HW Rev First */
1983 fclass = READ_REG(sba_addr);
1984
1985 astro_rev[6] = '1' + (char) (fclass & 0x7);
1986 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1987 version = astro_rev;
1988
1989 } else if (IS_IKE(&dev->id)) {
1990 static char ike_rev[] = "Ike rev ?";
1991 ike_rev[8] = '0' + (char) (func_class & 0xff);
1992 version = ike_rev;
1993 } else if (IS_PLUTO(&dev->id)) {
1994 static char pluto_rev[]="Pluto ?.?";
1995 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1996 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1997 version = pluto_rev;
1998 } else {
1999 static char reo_rev[] = "REO rev ?";
2000 reo_rev[8] = '0' + (char) (func_class & 0xff);
2001 version = reo_rev;
2002 }
2003
2004 if (!global_ioc_cnt) {
2005 global_ioc_cnt = count_parisc_driver(&sba_driver);
2006
2007 /* Astro and Pluto have one IOC per SBA */
2008 if ((!IS_ASTRO(&dev->id)) || (!IS_PLUTO(&dev->id)))
2009 global_ioc_cnt *= 2;
2010 }
2011
2012 printk(KERN_INFO "%s found %s at 0x%lx\n",
2013 MODULE_NAME, version, dev->hpa);
2014
2015 sba_dev = kmalloc(sizeof(struct sba_device), GFP_KERNEL);
2016 if (!sba_dev) {
2017 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
2018 return -ENOMEM;
2019 }
2020
2021 parisc_set_drvdata(dev, sba_dev);
2022 memset(sba_dev, 0, sizeof(struct sba_device));
2023
2024 for(i=0; i<MAX_IOC; i++)
2025 spin_lock_init(&(sba_dev->ioc[i].res_lock));
2026
2027 sba_dev->dev = dev;
2028 sba_dev->hw_rev = func_class;
2029 sba_dev->iodc = &dev->id;
2030 sba_dev->name = dev->name;
2031 sba_dev->sba_hpa = sba_addr;
2032
2033 sba_get_pat_resources(sba_dev);
2034 sba_hw_init(sba_dev);
2035 sba_common_init(sba_dev);
2036
2037 hppa_dma_ops = &sba_ops;
2038
2039#ifdef CONFIG_PROC_FS
2040 if (IS_ASTRO(&dev->id)) {
2041 create_proc_info_entry("Astro", 0, proc_runway_root, sba_proc_info);
2042 } else if (IS_IKE(&dev->id)) {
2043 create_proc_info_entry("Ike", 0, proc_runway_root, sba_proc_info);
2044 } else if (IS_PLUTO(&dev->id)) {
2045 create_proc_info_entry("Pluto", 0, proc_mckinley_root, sba_proc_info);
2046 } else {
2047 create_proc_info_entry("Reo", 0, proc_runway_root, sba_proc_info);
2048 }
2049#if 0
2050 create_proc_info_entry("bitmap", 0, proc_runway_root, sba_resource_map);
2051#endif
2052#endif
2053 parisc_vmerge_boundary = IOVP_SIZE;
2054 parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG;
2055 parisc_has_iommu();
2056 return 0;
2057}
2058
2059/*
2060** One time initialization to let the world know the SBA was found.
2061** This is the only routine which is NOT static.
2062** Must be called exactly once before pci_init().
2063*/
2064void __init sba_init(void)
2065{
2066 register_parisc_driver(&sba_driver);
2067}
2068
2069
2070/**
2071 * sba_get_iommu - Assign the iommu pointer for the pci bus controller.
2072 * @dev: The parisc device.
2073 *
2074 * Returns the appropriate IOMMU data for the given parisc PCI controller.
2075 * This is cached and used later for PCI DMA Mapping.
2076 */
2077void * sba_get_iommu(struct parisc_device *pci_hba)
2078{
2079 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2080 struct sba_device *sba = sba_dev->dev.driver_data;
2081 char t = sba_dev->id.hw_type;
2082 int iocnum = (pci_hba->hw_path >> 3); /* rope # */
2083
2084 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2085
2086 return &(sba->ioc[iocnum]);
2087}
2088
2089
2090/**
2091 * sba_directed_lmmio - return first directed LMMIO range routed to rope
2092 * @pa_dev: The parisc device.
2093 * @r: resource PCI host controller wants start/end fields assigned.
2094 *
2095 * For the given parisc PCI controller, determine if any direct ranges
2096 * are routed down the corresponding rope.
2097 */
2098void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2099{
2100 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2101 struct sba_device *sba = sba_dev->dev.driver_data;
2102 char t = sba_dev->id.hw_type;
2103 int i;
2104 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
2105
2106 if ((t!=HPHW_IOA) && (t!=HPHW_BCPORT))
2107 BUG();
2108
2109 r->start = r->end = 0;
2110
2111 /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */
2112 for (i=0; i<4; i++) {
2113 int base, size;
2114 void __iomem *reg = sba->sba_hpa + i*0x18;
2115
2116 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2117 if ((base & 1) == 0)
2118 continue; /* not enabled */
2119
2120 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2121
2122 if ((size & (ROPES_PER_IOC-1)) != rope)
2123 continue; /* directed down different rope */
2124
2125 r->start = (base & ~1UL) | PCI_F_EXTEND;
2126 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2127 r->end = r->start + size;
2128 }
2129}
2130
2131
2132/**
2133 * sba_distributed_lmmio - return portion of distributed LMMIO range
2134 * @pa_dev: The parisc device.
2135 * @r: resource PCI host controller wants start/end fields assigned.
2136 *
2137 * For the given parisc PCI controller, return portion of distributed LMMIO
2138 * range. The distributed LMMIO is always present and it's just a question
2139 * of the base address and size of the range.
2140 */
2141void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2142{
2143 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2144 struct sba_device *sba = sba_dev->dev.driver_data;
2145 char t = sba_dev->id.hw_type;
2146 int base, size;
2147 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
2148
2149 if ((t!=HPHW_IOA) && (t!=HPHW_BCPORT))
2150 BUG();
2151
2152 r->start = r->end = 0;
2153
2154 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2155 if ((base & 1) == 0) {
2156 BUG(); /* Gah! Distr Range wasn't enabled! */
2157 return;
2158 }
2159
2160 r->start = (base & ~1UL) | PCI_F_EXTEND;
2161
2162 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2163 r->start += rope * (size + 1); /* adjust base for this rope */
2164 r->end = r->start + size;
2165}
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
new file mode 100644
index 000000000000..e0efed796b92
--- /dev/null
+++ b/drivers/parisc/superio.c
@@ -0,0 +1,508 @@
1/* National Semiconductor NS87560UBD Super I/O controller used in
2 * HP [BCJ]x000 workstations.
3 *
4 * This chip is a horrid piece of engineering, and National
5 * denies any knowledge of its existence. Thus no datasheet is
6 * available off www.national.com.
7 *
8 * (C) Copyright 2000 Linuxcare, Inc.
9 * (C) Copyright 2000 Linuxcare Canada, Inc.
10 * (C) Copyright 2000 Martin K. Petersen <mkp@linuxcare.com>
11 * (C) Copyright 2000 Alex deVries <alex@onefishtwo.ca>
12 * (C) Copyright 2001 John Marvin <jsm fc hp com>
13 * (C) Copyright 2003 Grant Grundler <grundler parisc-linux org>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * The initial version of this is by Martin Peterson. Alex deVries
21 * has spent a bit of time trying to coax it into working.
22 *
23 * Major changes to get basic interrupt infrastructure working to
24 * hopefully be able to support all SuperIO devices. Currently
25 * works with serial. -- John Marvin <jsm@fc.hp.com>
26 */
27
28
29/* NOTES:
30 *
31 * Function 0 is an IDE controller. It is identical to a PC87415 IDE
32 * controller (and identifies itself as such).
33 *
34 * Function 1 is a "Legacy I/O" controller. Under this function is a
35 * whole mess of legacy I/O peripherals. Of course, HP hasn't enabled
36 * all the functionality in hardware, but the following is available:
37 *
38 * Two 16550A compatible serial controllers
39 * An IEEE 1284 compatible parallel port
40 * A floppy disk controller
41 *
42 * Function 2 is a USB controller.
43 *
44 * We must be incredibly careful during initialization. Since all
45 * interrupts are routed through function 1 (which is not allowed by
46 * the PCI spec), we need to program the PICs on the legacy I/O port
47 * *before* we attempt to set up IDE and USB. @#$!&
48 *
49 * According to HP, devices are only enabled by firmware if they have
50 * a physical device connected.
51 *
52 * Configuration register bits:
53 * 0x5A: FDC, SP1, IDE1, SP2, IDE2, PAR, Reserved, P92
54 * 0x5B: RTC, 8259, 8254, DMA1, DMA2, KBC, P61, APM
55 *
56 */
57
58#include <linux/errno.h>
59#include <linux/init.h>
60#include <linux/module.h>
61#include <linux/types.h>
62#include <linux/interrupt.h>
63#include <linux/ioport.h>
64#include <linux/serial.h>
65#include <linux/pci.h>
66#include <linux/parport.h>
67#include <linux/parport_pc.h>
68#include <linux/termios.h>
69#include <linux/tty.h>
70#include <linux/serial_core.h>
71#include <linux/delay.h>
72
73#include <asm/io.h>
74#include <asm/hardware.h>
75#include <asm/superio.h>
76
77static struct superio_device sio_dev;
78
79
80#undef DEBUG_SUPERIO_INIT
81
82#ifdef DEBUG_SUPERIO_INIT
83#define DBG_INIT(x...) printk(x)
84#else
85#define DBG_INIT(x...)
86#endif
87
88static irqreturn_t
89superio_interrupt(int parent_irq, void *devp, struct pt_regs *regs)
90{
91 u8 results;
92 u8 local_irq;
93
94 /* Poll the 8259 to see if there's an interrupt. */
95 outb (OCW3_POLL,IC_PIC1+0);
96
97 results = inb(IC_PIC1+0);
98
99 /*
100 * Bit 7: 1 = active Interrupt; 0 = no Interrupt pending
101 * Bits 6-3: zero
102 * Bits 2-0: highest priority, active requesting interrupt ID (0-7)
103 */
104 if ((results & 0x80) == 0) {
105 /* I suspect "spurious" interrupts are from unmasking an IRQ.
106 * We don't know if an interrupt was/is pending and thus
107 * just call the handler for that IRQ as if it were pending.
108 */
109 return IRQ_NONE;
110 }
111
112 /* Check to see which device is interrupting */
113 local_irq = results & 0x0f;
114
115 if (local_irq == 2 || local_irq > 7) {
116 printk(KERN_ERR "SuperIO: slave interrupted!\n");
117 return IRQ_HANDLED;
118 }
119
120 if (local_irq == 7) {
121
122 /* Could be spurious. Check in service bits */
123
124 outb(OCW3_ISR,IC_PIC1+0);
125 results = inb(IC_PIC1+0);
126 if ((results & 0x80) == 0) { /* if ISR7 not set: spurious */
127 printk(KERN_WARNING "SuperIO: spurious interrupt!\n");
128 return IRQ_HANDLED;
129 }
130 }
131
132 /* Call the appropriate device's interrupt */
133 __do_IRQ(local_irq, regs);
134
135 /* set EOI - forces a new interrupt if a lower priority device
136 * still needs service.
137 */
138 outb((OCW2_SEOI|local_irq),IC_PIC1 + 0);
139 return IRQ_HANDLED;
140}
141
142/* Initialize Super I/O device */
143
144static void __devinit
145superio_init(struct superio_device *sio)
146{
147 struct pci_dev *pdev = sio->lio_pdev;
148 u16 word;
149
150 if (sio->suckyio_irq_enabled)
151 return;
152
153 if (!pdev) BUG();
154 if (!sio->usb_pdev) BUG();
155
156 /* use the IRQ iosapic found for USB INT D... */
157 pdev->irq = sio->usb_pdev->irq;
158
159 /* ...then properly fixup the USB to point at suckyio PIC */
160 sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev);
161
162 printk (KERN_INFO "SuperIO: Found NS87560 Legacy I/O device at %s (IRQ %i) \n",
163 pci_name(pdev),pdev->irq);
164
165 pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base);
166 sio->sp1_base &= ~1;
167 printk (KERN_INFO "SuperIO: Serial port 1 at 0x%x\n", sio->sp1_base);
168
169 pci_read_config_dword (pdev, SIO_SP2BAR, &sio->sp2_base);
170 sio->sp2_base &= ~1;
171 printk (KERN_INFO "SuperIO: Serial port 2 at 0x%x\n", sio->sp2_base);
172
173 pci_read_config_dword (pdev, SIO_PPBAR, &sio->pp_base);
174 sio->pp_base &= ~1;
175 printk (KERN_INFO "SuperIO: Parallel port at 0x%x\n", sio->pp_base);
176
177 pci_read_config_dword (pdev, SIO_FDCBAR, &sio->fdc_base);
178 sio->fdc_base &= ~1;
179 printk (KERN_INFO "SuperIO: Floppy controller at 0x%x\n", sio->fdc_base);
180 pci_read_config_dword (pdev, SIO_ACPIBAR, &sio->acpi_base);
181 sio->acpi_base &= ~1;
182 printk (KERN_INFO "SuperIO: ACPI at 0x%x\n", sio->acpi_base);
183
184 request_region (IC_PIC1, 0x1f, "pic1");
185 request_region (IC_PIC2, 0x1f, "pic2");
186 request_region (sio->acpi_base, 0x1f, "acpi");
187
188 /* Enable the legacy I/O function */
189 pci_read_config_word (pdev, PCI_COMMAND, &word);
190 word |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_IO;
191 pci_write_config_word (pdev, PCI_COMMAND, word);
192
193 pci_set_master (pdev);
194 pci_enable_device(pdev);
195
196 /*
197 * Next project is programming the onboard interrupt controllers.
198 * PDC hasn't done this for us, since it's using polled I/O.
199 *
200 * XXX Use dword writes to avoid bugs in Elroy or Suckyio Config
201 * space access. PCI is by nature a 32-bit bus and config
202 * space can be sensitive to that.
203 */
204
205 /* 0x64 - 0x67 :
206 DMA Rtg 2
207 DMA Rtg 3
208 DMA Chan Ctl
209 TRIGGER_1 == 0x82 USB & IDE level triggered, rest to edge
210 */
211 pci_write_config_dword (pdev, 0x64, 0x82000000U);
212
213 /* 0x68 - 0x6b :
214 TRIGGER_2 == 0x00 all edge triggered (not used)
215 CFG_IR_SER == 0x43 SerPort1 = IRQ3, SerPort2 = IRQ4
216 CFG_IR_PF == 0x65 ParPort = IRQ5, FloppyCtlr = IRQ6
217 CFG_IR_IDE == 0x07 IDE1 = IRQ7, reserved
218 */
219 pci_write_config_dword (pdev, TRIGGER_2, 0x07654300U);
220
221 /* 0x6c - 0x6f :
222 CFG_IR_INTAB == 0x00
223 CFG_IR_INTCD == 0x10 USB = IRQ1
224 CFG_IR_PS2 == 0x00
225 CFG_IR_FXBUS == 0x00
226 */
227 pci_write_config_dword (pdev, CFG_IR_INTAB, 0x00001000U);
228
229 /* 0x70 - 0x73 :
230 CFG_IR_USB == 0x00 not used. USB is connected to INTD.
231 CFG_IR_ACPI == 0x00 not used.
232 DMA Priority == 0x4c88 Power on default value. NFC.
233 */
234 pci_write_config_dword (pdev, CFG_IR_USB, 0x4c880000U);
235
236 /* PIC1 Initialization Command Word register programming */
237 outb (0x11,IC_PIC1+0); /* ICW1: ICW4 write req | ICW1 */
238 outb (0x00,IC_PIC1+1); /* ICW2: interrupt vector table - not used */
239 outb (0x04,IC_PIC1+1); /* ICW3: Cascade */
240 outb (0x01,IC_PIC1+1); /* ICW4: x86 mode */
241
242 /* PIC1 Program Operational Control Words */
243 outb (0xff,IC_PIC1+1); /* OCW1: Mask all interrupts */
244 outb (0xc2,IC_PIC1+0); /* OCW2: priority (3-7,0-2) */
245
246 /* PIC2 Initialization Command Word register programming */
247 outb (0x11,IC_PIC2+0); /* ICW1: ICW4 write req | ICW1 */
248 outb (0x00,IC_PIC2+1); /* ICW2: N/A */
249 outb (0x02,IC_PIC2+1); /* ICW3: Slave ID code */
250 outb (0x01,IC_PIC2+1); /* ICW4: x86 mode */
251
252 /* Program Operational Control Words */
253 outb (0xff,IC_PIC1+1); /* OCW1: Mask all interrupts */
254 outb (0x68,IC_PIC1+0); /* OCW3: OCW3 select | ESMM | SMM */
255
256 /* Write master mask reg */
257 outb (0xff,IC_PIC1+1);
258
259 /* Setup USB power regulation */
260 outb(1, sio->acpi_base + USB_REG_CR);
261 if (inb(sio->acpi_base + USB_REG_CR) & 1)
262 printk(KERN_INFO "SuperIO: USB regulator enabled\n");
263 else
264 printk(KERN_ERR "USB regulator not initialized!\n");
265
266 if (request_irq(pdev->irq, superio_interrupt, SA_INTERRUPT,
267 "SuperIO", (void *)sio)) {
268
269 printk(KERN_ERR "SuperIO: could not get irq\n");
270 BUG();
271 return;
272 }
273
274 sio->suckyio_irq_enabled = 1;
275}
276
277
278static void superio_disable_irq(unsigned int irq)
279{
280 u8 r8;
281
282 if ((irq < 1) || (irq == 2) || (irq > 7)) {
283 printk(KERN_ERR "SuperIO: Illegal irq number.\n");
284 BUG();
285 return;
286 }
287
288 /* Mask interrupt */
289
290 r8 = inb(IC_PIC1+1);
291 r8 |= (1 << irq);
292 outb (r8,IC_PIC1+1);
293}
294
295static void superio_enable_irq(unsigned int irq)
296{
297 u8 r8;
298
299 if ((irq < 1) || (irq == 2) || (irq > 7)) {
300 printk(KERN_ERR "SuperIO: Illegal irq number (%d).\n", irq);
301 BUG();
302 return;
303 }
304
305 /* Unmask interrupt */
306 r8 = inb(IC_PIC1+1);
307 r8 &= ~(1 << irq);
308 outb (r8,IC_PIC1+1);
309}
310
311static unsigned int superio_startup_irq(unsigned int irq)
312{
313 superio_enable_irq(irq);
314 return 0;
315}
316
317static struct hw_interrupt_type superio_interrupt_type = {
318 .typename = "SuperIO",
319 .startup = superio_startup_irq,
320 .shutdown = superio_disable_irq,
321 .enable = superio_enable_irq,
322 .disable = superio_disable_irq,
323 .ack = no_ack_irq,
324 .end = no_end_irq,
325};
326
327#ifdef DEBUG_SUPERIO_INIT
328static unsigned short expected_device[3] = {
329 PCI_DEVICE_ID_NS_87415,
330 PCI_DEVICE_ID_NS_87560_LIO,
331 PCI_DEVICE_ID_NS_87560_USB
332};
333#endif
334
335int superio_fixup_irq(struct pci_dev *pcidev)
336{
337 int local_irq, i;
338
339#ifdef DEBUG_SUPERIO_INIT
340 int fn;
341 fn = PCI_FUNC(pcidev->devfn);
342
343 /* Verify the function number matches the expected device id. */
344 if (expected_device[fn] != pcidev->device) {
345 BUG();
346 return -1;
347 }
348 printk("superio_fixup_irq(%s) ven 0x%x dev 0x%x from %p\n",
349 pci_name(pcidev),
350 pcidev->vendor, pcidev->device,
351 __builtin_return_address(0));
352#endif
353
354 for (i = 0; i < 16; i++) {
355 irq_desc[i].handler = &superio_interrupt_type;
356 }
357
358 /*
359 * We don't allocate a SuperIO irq for the legacy IO function,
360 * since it is a "bridge". Instead, we will allocate irq's for
361 * each legacy device as they are initialized.
362 */
363
364 switch(pcidev->device) {
365 case PCI_DEVICE_ID_NS_87415: /* Function 0 */
366 local_irq = IDE_IRQ;
367 break;
368 case PCI_DEVICE_ID_NS_87560_LIO: /* Function 1 */
369 sio_dev.lio_pdev = pcidev; /* save for superio_init() */
370 return -1;
371 case PCI_DEVICE_ID_NS_87560_USB: /* Function 2 */
372 sio_dev.usb_pdev = pcidev; /* save for superio_init() */
373 local_irq = USB_IRQ;
374 break;
375 default:
376 local_irq = -1;
377 BUG();
378 break;
379 }
380
381 return local_irq;
382}
383
384static struct uart_port serial[] = {
385 {
386 .iotype = UPIO_PORT,
387 .line = 0,
388 .type = PORT_16550A,
389 .uartclk = 115200*16,
390 .fifosize = 16,
391 },
392 {
393 .iotype = UPIO_PORT,
394 .line = 1,
395 .type = PORT_16550A,
396 .uartclk = 115200*16,
397 .fifosize = 16,
398 }
399};
400
401static void __devinit superio_serial_init(void)
402{
403#ifdef CONFIG_SERIAL_8250
404 int retval;
405
406 serial[0].iobase = sio_dev.sp1_base;
407 serial[0].irq = SP1_IRQ;
408
409 retval = early_serial_setup(&serial[0]);
410 if (retval < 0) {
411 printk(KERN_WARNING "SuperIO: Register Serial #0 failed.\n");
412 return;
413 }
414
415 serial[1].iobase = sio_dev.sp2_base;
416 serial[1].irq = SP2_IRQ;
417 retval = early_serial_setup(&serial[1]);
418
419 if (retval < 0)
420 printk(KERN_WARNING "SuperIO: Register Serial #1 failed.\n");
421#endif /* CONFIG_SERIAL_8250 */
422}
423
424
425static void __devinit superio_parport_init(void)
426{
427#ifdef CONFIG_PARPORT_PC
428 if (!parport_pc_probe_port(sio_dev.pp_base,
429 0 /*base_hi*/,
430 PAR_IRQ,
431 PARPORT_DMA_NONE /* dma */,
432 NULL /*struct pci_dev* */) )
433
434 printk(KERN_WARNING "SuperIO: Probing parallel port failed.\n");
435#endif /* CONFIG_PARPORT_PC */
436}
437
438
439static void superio_fixup_pci(struct pci_dev *pdev)
440{
441 u8 prog;
442
443 pdev->class |= 0x5;
444 pci_write_config_byte(pdev, PCI_CLASS_PROG, pdev->class);
445
446 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
447 printk("PCI: Enabled native mode for NS87415 (pif=0x%x)\n", prog);
448}
449DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415, superio_fixup_pci);
450
451
452static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_id *id)
453{
454
455 /*
456 ** superio_probe(00:0e.0) ven 0x100b dev 0x2 sv 0x0 sd 0x0 class 0x1018a
457 ** superio_probe(00:0e.1) ven 0x100b dev 0xe sv 0x0 sd 0x0 class 0x68000
458 ** superio_probe(00:0e.2) ven 0x100b dev 0x12 sv 0x0 sd 0x0 class 0xc0310
459 */
460 DBG_INIT("superio_probe(%s) ven 0x%x dev 0x%x sv 0x%x sd 0x%x class 0x%x\n",
461 pci_name(dev),
462 dev->vendor, dev->device,
463 dev->subsystem_vendor, dev->subsystem_device,
464 dev->class);
465
466 superio_init(&sio_dev);
467
468 if (dev->device == PCI_DEVICE_ID_NS_87560_LIO) { /* Function 1 */
469 superio_parport_init();
470 superio_serial_init();
471 /* REVISIT XXX : superio_fdc_init() ? */
472 return 0;
473 } else if (dev->device == PCI_DEVICE_ID_NS_87415) { /* Function 0 */
474 DBG_INIT("superio_probe: ignoring IDE 87415\n");
475 } else if (dev->device == PCI_DEVICE_ID_NS_87560_USB) { /* Function 2 */
476 DBG_INIT("superio_probe: ignoring USB OHCI controller\n");
477 } else {
478 DBG_INIT("superio_probe: WTF? Fire Extinguisher?\n");
479 }
480
481 /* Let appropriate other driver claim this device. */
482 return -ENODEV;
483}
484
485static struct pci_device_id superio_tbl[] = {
486 { PCI_VENDOR_ID_NS, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
487 { 0, }
488};
489
490static struct pci_driver superio_driver = {
491 .name = "SuperIO",
492 .id_table = superio_tbl,
493 .probe = superio_probe,
494};
495
496static int __init superio_modinit(void)
497{
498 return pci_register_driver(&superio_driver);
499}
500
501static void __exit superio_exit(void)
502{
503 pci_unregister_driver(&superio_driver);
504}
505
506
507module_init(superio_modinit);
508module_exit(superio_exit);
diff --git a/drivers/parisc/wax.c b/drivers/parisc/wax.c
new file mode 100644
index 000000000000..e547d7d024d8
--- /dev/null
+++ b/drivers/parisc/wax.c
@@ -0,0 +1,140 @@
1/*
2 * WAX Device Driver
3 *
4 * (c) Copyright 2000 The Puffin Group Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * by Helge Deller <deller@gmx.de>
12 */
13
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/module.h>
20#include <linux/types.h>
21
22#include <asm/io.h>
23#include <asm/hardware.h>
24
25#include "gsc.h"
26
27#define WAX_GSC_IRQ 7 /* Hardcoded Interrupt for GSC */
28
29static void wax_choose_irq(struct parisc_device *dev, void *ctrl)
30{
31 int irq;
32
33 switch (dev->id.sversion) {
34 case 0x73: irq = 1; break; /* i8042 General */
35 case 0x8c: irq = 6; break; /* Serial */
36 case 0x90: irq = 10; break; /* EISA */
37 default: return; /* Unknown */
38 }
39
40 gsc_asic_assign_irq(ctrl, irq, &dev->irq);
41
42 switch (dev->id.sversion) {
43 case 0x73: irq = 2; break; /* i8042 High-priority */
44 case 0x90: irq = 0; break; /* EISA NMI */
45 default: return; /* No secondary IRQ */
46 }
47
48 gsc_asic_assign_irq(ctrl, irq, &dev->aux_irq);
49}
50
51static void __init
52wax_init_irq(struct gsc_asic *wax)
53{
54 unsigned long base = wax->hpa;
55
56 /* Wax-off */
57 gsc_writel(0x00000000, base+OFFSET_IMR);
58
59 /* clear pending interrupts */
60 gsc_readl(base+OFFSET_IRR);
61
62 /* We're not really convinced we want to reset the onboard
63 * devices. Firmware does it for us...
64 */
65
66 /* Resets */
67// gsc_writel(0xFFFFFFFF, base+0x1000); /* HIL */
68// gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */
69}
70
71int __init
72wax_init_chip(struct parisc_device *dev)
73{
74 struct gsc_asic *wax;
75 struct parisc_device *parent;
76 struct gsc_irq gsc_irq;
77 int ret;
78
79 wax = kmalloc(sizeof(*wax), GFP_KERNEL);
80 if (!wax)
81 return -ENOMEM;
82
83 wax->name = "wax";
84 wax->hpa = dev->hpa;
85
86 wax->version = 0; /* gsc_readb(wax->hpa+WAX_VER); */
87 printk(KERN_INFO "%s at 0x%lx found.\n", wax->name, wax->hpa);
88
89 /* Stop wax hissing for a bit */
90 wax_init_irq(wax);
91
92 /* the IRQ wax should use */
93 dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ);
94 if (dev->irq < 0) {
95 printk(KERN_ERR "%s(): cannot get GSC irq\n",
96 __FUNCTION__);
97 kfree(wax);
98 return -EBUSY;
99 }
100
101 wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
102
103 ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax);
104 if (ret < 0) {
105 kfree(wax);
106 return ret;
107 }
108
109 /* enable IRQ's for devices below WAX */
110 gsc_writel(wax->eim, wax->hpa + OFFSET_IAR);
111
112 /* Done init'ing, register this driver */
113 ret = gsc_common_setup(dev, wax);
114 if (ret) {
115 kfree(wax);
116 return ret;
117 }
118
119 gsc_fixup_irqs(dev, wax, wax_choose_irq);
120 /* On 715-class machines, Wax EISA is a sibling of Wax, not a child. */
121 parent = parisc_parent(dev);
122 if (parent->id.hw_type != HPHW_IOA) {
123 gsc_fixup_irqs(parent, wax, wax_choose_irq);
124 }
125
126 return ret;
127}
128
129static struct parisc_device_id wax_tbl[] = {
130 { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008e },
131 { 0, }
132};
133
134MODULE_DEVICE_TABLE(parisc, wax_tbl);
135
136struct parisc_driver wax_driver = {
137 .name = "wax",
138 .id_table = wax_tbl,
139 .probe = wax_init_chip,
140};