diff options
Diffstat (limited to 'drivers/ata/sata_inic162x.c')
-rw-r--r-- | drivers/ata/sata_inic162x.c | 646 |
1 files changed, 417 insertions, 229 deletions
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index d27bb9a2568..3ead02fe379 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c | |||
@@ -10,13 +10,33 @@ | |||
10 | * right. Documentation is available at initio's website but it only | 10 | * right. Documentation is available at initio's website but it only |
11 | * documents registers (not programming model). | 11 | * documents registers (not programming model). |
12 | * | 12 | * |
13 | * - ATA disks work. | 13 | * This driver has interesting history. The first version was written |
14 | * - Hotplug works. | 14 | * from the documentation and a 2.4 IDE driver posted on a Taiwan |
15 | * - ATAPI read works but burning doesn't. This thing is really | 15 | * company, which didn't use any IDMA features and couldn't handle |
16 | * peculiar about ATAPI and I couldn't figure out how ATAPI PIO and | 16 | * LBA48. The resulting driver couldn't handle LBA48 devices either |
17 | * ATAPI DMA WRITE should be programmed. If you've got a clue, be | 17 | * making it pretty useless. |
18 | * my guest. | 18 | * |
19 | * - Both STR and STD work. | 19 | * After a while, initio picked the driver up, renamed it to |
20 | * sata_initio162x, updated it to use IDMA for ATA DMA commands and | ||
21 | * posted it on their website. It only used ATA_PROT_DMA for IDMA and | ||
22 | * attaching both devices and issuing IDMA and !IDMA commands | ||
23 | * simultaneously broke it due to PIRQ masking interaction but it did | ||
24 | * show how to use the IDMA (ADMA + some initio specific twists) | ||
25 | * engine. | ||
26 | * | ||
27 | * Then, I picked up their changes again and here's the usable driver | ||
28 | * which uses IDMA for everything. Everything works now including | ||
29 | * LBA48, CD/DVD burning, suspend/resume and hotplug. There are some | ||
30 | * issues tho. Result Tf is not resported properly, NCQ isn't | ||
31 | * supported yet and CD/DVD writing works with DMA assisted PIO | ||
32 | * protocol (which, for native SATA devices, shouldn't cause any | ||
33 | * noticeable difference). | ||
34 | * | ||
35 | * Anyways, so, here's finally a working driver for inic162x. Enjoy! | ||
36 | * | ||
37 | * initio: If you guys wanna improve the driver regarding result TF | ||
38 | * access and other stuff, please feel free to contact me. I'll be | ||
39 | * happy to assist. | ||
20 | */ | 40 | */ |
21 | 41 | ||
22 | #include <linux/kernel.h> | 42 | #include <linux/kernel.h> |
@@ -28,13 +48,19 @@ | |||
28 | #include <scsi/scsi_device.h> | 48 | #include <scsi/scsi_device.h> |
29 | 49 | ||
30 | #define DRV_NAME "sata_inic162x" | 50 | #define DRV_NAME "sata_inic162x" |
31 | #define DRV_VERSION "0.3" | 51 | #define DRV_VERSION "0.4" |
32 | 52 | ||
33 | enum { | 53 | enum { |
34 | MMIO_BAR = 5, | 54 | MMIO_BAR_PCI = 5, |
55 | MMIO_BAR_CARDBUS = 1, | ||
35 | 56 | ||
36 | NR_PORTS = 2, | 57 | NR_PORTS = 2, |
37 | 58 | ||
59 | IDMA_CPB_TBL_SIZE = 4 * 32, | ||
60 | |||
61 | INIC_DMA_BOUNDARY = 0xffffff, | ||
62 | |||
63 | HOST_ACTRL = 0x08, | ||
38 | HOST_CTL = 0x7c, | 64 | HOST_CTL = 0x7c, |
39 | HOST_STAT = 0x7e, | 65 | HOST_STAT = 0x7e, |
40 | HOST_IRQ_STAT = 0xbc, | 66 | HOST_IRQ_STAT = 0xbc, |
@@ -43,22 +69,37 @@ enum { | |||
43 | PORT_SIZE = 0x40, | 69 | PORT_SIZE = 0x40, |
44 | 70 | ||
45 | /* registers for ATA TF operation */ | 71 | /* registers for ATA TF operation */ |
46 | PORT_TF = 0x00, | 72 | PORT_TF_DATA = 0x00, |
47 | PORT_ALT_STAT = 0x08, | 73 | PORT_TF_FEATURE = 0x01, |
74 | PORT_TF_NSECT = 0x02, | ||
75 | PORT_TF_LBAL = 0x03, | ||
76 | PORT_TF_LBAM = 0x04, | ||
77 | PORT_TF_LBAH = 0x05, | ||
78 | PORT_TF_DEVICE = 0x06, | ||
79 | PORT_TF_COMMAND = 0x07, | ||
80 | PORT_TF_ALT_STAT = 0x08, | ||
48 | PORT_IRQ_STAT = 0x09, | 81 | PORT_IRQ_STAT = 0x09, |
49 | PORT_IRQ_MASK = 0x0a, | 82 | PORT_IRQ_MASK = 0x0a, |
50 | PORT_PRD_CTL = 0x0b, | 83 | PORT_PRD_CTL = 0x0b, |
51 | PORT_PRD_ADDR = 0x0c, | 84 | PORT_PRD_ADDR = 0x0c, |
52 | PORT_PRD_XFERLEN = 0x10, | 85 | PORT_PRD_XFERLEN = 0x10, |
86 | PORT_CPB_CPBLAR = 0x18, | ||
87 | PORT_CPB_PTQFIFO = 0x1c, | ||
53 | 88 | ||
54 | /* IDMA register */ | 89 | /* IDMA register */ |
55 | PORT_IDMA_CTL = 0x14, | 90 | PORT_IDMA_CTL = 0x14, |
91 | PORT_IDMA_STAT = 0x16, | ||
92 | |||
93 | PORT_RPQ_FIFO = 0x1e, | ||
94 | PORT_RPQ_CNT = 0x1f, | ||
56 | 95 | ||
57 | PORT_SCR = 0x20, | 96 | PORT_SCR = 0x20, |
58 | 97 | ||
59 | /* HOST_CTL bits */ | 98 | /* HOST_CTL bits */ |
60 | HCTL_IRQOFF = (1 << 8), /* global IRQ off */ | 99 | HCTL_IRQOFF = (1 << 8), /* global IRQ off */ |
61 | HCTL_PWRDWN = (1 << 13), /* power down PHYs */ | 100 | HCTL_FTHD0 = (1 << 10), /* fifo threshold 0 */ |
101 | HCTL_FTHD1 = (1 << 11), /* fifo threshold 1*/ | ||
102 | HCTL_PWRDWN = (1 << 12), /* power down PHYs */ | ||
62 | HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ | 103 | HCTL_SOFTRST = (1 << 13), /* global reset (no phy reset) */ |
63 | HCTL_RPGSEL = (1 << 15), /* register page select */ | 104 | HCTL_RPGSEL = (1 << 15), /* register page select */ |
64 | 105 | ||
@@ -81,9 +122,7 @@ enum { | |||
81 | PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ | 122 | PIRQ_PENDING = (1 << 7), /* port IRQ pending (STAT only) */ |
82 | 123 | ||
83 | PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, | 124 | PIRQ_ERR = PIRQ_OFFLINE | PIRQ_ONLINE | PIRQ_FATAL, |
84 | 125 | PIRQ_MASK_DEFAULT = PIRQ_REPLY | PIRQ_ATA, | |
85 | PIRQ_MASK_DMA_READ = PIRQ_REPLY | PIRQ_ATA, | ||
86 | PIRQ_MASK_OTHER = PIRQ_REPLY | PIRQ_COMPLETE, | ||
87 | PIRQ_MASK_FREEZE = 0xff, | 126 | PIRQ_MASK_FREEZE = 0xff, |
88 | 127 | ||
89 | /* PORT_PRD_CTL bits */ | 128 | /* PORT_PRD_CTL bits */ |
@@ -96,20 +135,104 @@ enum { | |||
96 | IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ | 135 | IDMA_CTL_RST_IDMA = (1 << 5), /* reset IDMA machinary */ |
97 | IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ | 136 | IDMA_CTL_GO = (1 << 7), /* IDMA mode go */ |
98 | IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ | 137 | IDMA_CTL_ATA_NIEN = (1 << 8), /* ATA IRQ disable */ |
138 | |||
139 | /* PORT_IDMA_STAT bits */ | ||
140 | IDMA_STAT_PERR = (1 << 0), /* PCI ERROR MODE */ | ||
141 | IDMA_STAT_CPBERR = (1 << 1), /* ADMA CPB error */ | ||
142 | IDMA_STAT_LGCY = (1 << 3), /* ADMA legacy */ | ||
143 | IDMA_STAT_UIRQ = (1 << 4), /* ADMA unsolicited irq */ | ||
144 | IDMA_STAT_STPD = (1 << 5), /* ADMA stopped */ | ||
145 | IDMA_STAT_PSD = (1 << 6), /* ADMA pause */ | ||
146 | IDMA_STAT_DONE = (1 << 7), /* ADMA done */ | ||
147 | |||
148 | IDMA_STAT_ERR = IDMA_STAT_PERR | IDMA_STAT_CPBERR, | ||
149 | |||
150 | /* CPB Control Flags*/ | ||
151 | CPB_CTL_VALID = (1 << 0), /* CPB valid */ | ||
152 | CPB_CTL_QUEUED = (1 << 1), /* queued command */ | ||
153 | CPB_CTL_DATA = (1 << 2), /* data, rsvd in datasheet */ | ||
154 | CPB_CTL_IEN = (1 << 3), /* PCI interrupt enable */ | ||
155 | CPB_CTL_DEVDIR = (1 << 4), /* device direction control */ | ||
156 | |||
157 | /* CPB Response Flags */ | ||
158 | CPB_RESP_DONE = (1 << 0), /* ATA command complete */ | ||
159 | CPB_RESP_REL = (1 << 1), /* ATA release */ | ||
160 | CPB_RESP_IGNORED = (1 << 2), /* CPB ignored */ | ||
161 | CPB_RESP_ATA_ERR = (1 << 3), /* ATA command error */ | ||
162 | CPB_RESP_SPURIOUS = (1 << 4), /* ATA spurious interrupt error */ | ||
163 | CPB_RESP_UNDERFLOW = (1 << 5), /* APRD deficiency length error */ | ||
164 | CPB_RESP_OVERFLOW = (1 << 6), /* APRD exccess length error */ | ||
165 | CPB_RESP_CPB_ERR = (1 << 7), /* CPB error flag */ | ||
166 | |||
167 | /* PRD Control Flags */ | ||
168 | PRD_DRAIN = (1 << 1), /* ignore data excess */ | ||
169 | PRD_CDB = (1 << 2), /* atapi packet command pointer */ | ||
170 | PRD_DIRECT_INTR = (1 << 3), /* direct interrupt */ | ||
171 | PRD_DMA = (1 << 4), /* data transfer method */ | ||
172 | PRD_WRITE = (1 << 5), /* data dir, rsvd in datasheet */ | ||
173 | PRD_IOM = (1 << 6), /* io/memory transfer */ | ||
174 | PRD_END = (1 << 7), /* APRD chain end */ | ||
99 | }; | 175 | }; |
100 | 176 | ||
177 | /* Comman Parameter Block */ | ||
178 | struct inic_cpb { | ||
179 | u8 resp_flags; /* Response Flags */ | ||
180 | u8 error; /* ATA Error */ | ||
181 | u8 status; /* ATA Status */ | ||
182 | u8 ctl_flags; /* Control Flags */ | ||
183 | __le32 len; /* Total Transfer Length */ | ||
184 | __le32 prd; /* First PRD pointer */ | ||
185 | u8 rsvd[4]; | ||
186 | /* 16 bytes */ | ||
187 | u8 feature; /* ATA Feature */ | ||
188 | u8 hob_feature; /* ATA Ex. Feature */ | ||
189 | u8 device; /* ATA Device/Head */ | ||
190 | u8 mirctl; /* Mirror Control */ | ||
191 | u8 nsect; /* ATA Sector Count */ | ||
192 | u8 hob_nsect; /* ATA Ex. Sector Count */ | ||
193 | u8 lbal; /* ATA Sector Number */ | ||
194 | u8 hob_lbal; /* ATA Ex. Sector Number */ | ||
195 | u8 lbam; /* ATA Cylinder Low */ | ||
196 | u8 hob_lbam; /* ATA Ex. Cylinder Low */ | ||
197 | u8 lbah; /* ATA Cylinder High */ | ||
198 | u8 hob_lbah; /* ATA Ex. Cylinder High */ | ||
199 | u8 command; /* ATA Command */ | ||
200 | u8 ctl; /* ATA Control */ | ||
201 | u8 slave_error; /* Slave ATA Error */ | ||
202 | u8 slave_status; /* Slave ATA Status */ | ||
203 | /* 32 bytes */ | ||
204 | } __packed; | ||
205 | |||
206 | /* Physical Region Descriptor */ | ||
207 | struct inic_prd { | ||
208 | __le32 mad; /* Physical Memory Address */ | ||
209 | __le16 len; /* Transfer Length */ | ||
210 | u8 rsvd; | ||
211 | u8 flags; /* Control Flags */ | ||
212 | } __packed; | ||
213 | |||
214 | struct inic_pkt { | ||
215 | struct inic_cpb cpb; | ||
216 | struct inic_prd prd[LIBATA_MAX_PRD + 1]; /* + 1 for cdb */ | ||
217 | u8 cdb[ATAPI_CDB_LEN]; | ||
218 | } __packed; | ||
219 | |||
101 | struct inic_host_priv { | 220 | struct inic_host_priv { |
102 | u16 cached_hctl; | 221 | void __iomem *mmio_base; |
222 | u16 cached_hctl; | ||
103 | }; | 223 | }; |
104 | 224 | ||
105 | struct inic_port_priv { | 225 | struct inic_port_priv { |
106 | u8 dfl_prdctl; | 226 | struct inic_pkt *pkt; |
107 | u8 cached_prdctl; | 227 | dma_addr_t pkt_dma; |
108 | u8 cached_pirq_mask; | 228 | u32 *cpb_tbl; |
229 | dma_addr_t cpb_tbl_dma; | ||
109 | }; | 230 | }; |
110 | 231 | ||
111 | static struct scsi_host_template inic_sht = { | 232 | static struct scsi_host_template inic_sht = { |
112 | ATA_BMDMA_SHT(DRV_NAME), | 233 | ATA_BASE_SHT(DRV_NAME), |
234 | .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ | ||
235 | .dma_boundary = INIC_DMA_BOUNDARY, | ||
113 | }; | 236 | }; |
114 | 237 | ||
115 | static const int scr_map[] = { | 238 | static const int scr_map[] = { |
@@ -120,54 +243,34 @@ static const int scr_map[] = { | |||
120 | 243 | ||
121 | static void __iomem *inic_port_base(struct ata_port *ap) | 244 | static void __iomem *inic_port_base(struct ata_port *ap) |
122 | { | 245 | { |
123 | return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE; | 246 | struct inic_host_priv *hpriv = ap->host->private_data; |
124 | } | ||
125 | |||
126 | static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask) | ||
127 | { | ||
128 | void __iomem *port_base = inic_port_base(ap); | ||
129 | struct inic_port_priv *pp = ap->private_data; | ||
130 | 247 | ||
131 | writeb(mask, port_base + PORT_IRQ_MASK); | 248 | return hpriv->mmio_base + ap->port_no * PORT_SIZE; |
132 | pp->cached_pirq_mask = mask; | ||
133 | } | ||
134 | |||
135 | static void inic_set_pirq_mask(struct ata_port *ap, u8 mask) | ||
136 | { | ||
137 | struct inic_port_priv *pp = ap->private_data; | ||
138 | |||
139 | if (pp->cached_pirq_mask != mask) | ||
140 | __inic_set_pirq_mask(ap, mask); | ||
141 | } | 249 | } |
142 | 250 | ||
143 | static void inic_reset_port(void __iomem *port_base) | 251 | static void inic_reset_port(void __iomem *port_base) |
144 | { | 252 | { |
145 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | 253 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; |
146 | u16 ctl; | ||
147 | 254 | ||
148 | ctl = readw(idma_ctl); | 255 | /* stop IDMA engine */ |
149 | ctl &= ~(IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN | IDMA_CTL_GO); | 256 | readw(idma_ctl); /* flush */ |
257 | msleep(1); | ||
150 | 258 | ||
151 | /* mask IRQ and assert reset */ | 259 | /* mask IRQ and assert reset */ |
152 | writew(ctl | IDMA_CTL_RST_IDMA | IDMA_CTL_ATA_NIEN, idma_ctl); | 260 | writew(IDMA_CTL_RST_IDMA, idma_ctl); |
153 | readw(idma_ctl); /* flush */ | 261 | readw(idma_ctl); /* flush */ |
154 | |||
155 | /* give it some time */ | ||
156 | msleep(1); | 262 | msleep(1); |
157 | 263 | ||
158 | /* release reset */ | 264 | /* release reset */ |
159 | writew(ctl | IDMA_CTL_ATA_NIEN, idma_ctl); | 265 | writew(0, idma_ctl); |
160 | 266 | ||
161 | /* clear irq */ | 267 | /* clear irq */ |
162 | writeb(0xff, port_base + PORT_IRQ_STAT); | 268 | writeb(0xff, port_base + PORT_IRQ_STAT); |
163 | |||
164 | /* reenable ATA IRQ, turn off IDMA mode */ | ||
165 | writew(ctl, idma_ctl); | ||
166 | } | 269 | } |
167 | 270 | ||
168 | static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) | 271 | static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) |
169 | { | 272 | { |
170 | void __iomem *scr_addr = ap->ioaddr.scr_addr; | 273 | void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; |
171 | void __iomem *addr; | 274 | void __iomem *addr; |
172 | 275 | ||
173 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | 276 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) |
@@ -184,120 +287,126 @@ static int inic_scr_read(struct ata_port *ap, unsigned sc_reg, u32 *val) | |||
184 | 287 | ||
185 | static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) | 288 | static int inic_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) |
186 | { | 289 | { |
187 | void __iomem *scr_addr = ap->ioaddr.scr_addr; | 290 | void __iomem *scr_addr = inic_port_base(ap) + PORT_SCR; |
188 | void __iomem *addr; | ||
189 | 291 | ||
190 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) | 292 | if (unlikely(sc_reg >= ARRAY_SIZE(scr_map))) |
191 | return -EINVAL; | 293 | return -EINVAL; |
192 | 294 | ||
193 | addr = scr_addr + scr_map[sc_reg] * 4; | ||
194 | writel(val, scr_addr + scr_map[sc_reg] * 4); | 295 | writel(val, scr_addr + scr_map[sc_reg] * 4); |
195 | return 0; | 296 | return 0; |
196 | } | 297 | } |
197 | 298 | ||
198 | /* | 299 | static void inic_stop_idma(struct ata_port *ap) |
199 | * In TF mode, inic162x is very similar to SFF device. TF registers | ||
200 | * function the same. DMA engine behaves similary using the same PRD | ||
201 | * format as BMDMA but different command register, interrupt and event | ||
202 | * notification methods are used. The following inic_bmdma_*() | ||
203 | * functions do the impedance matching. | ||
204 | */ | ||
205 | static void inic_bmdma_setup(struct ata_queued_cmd *qc) | ||
206 | { | 300 | { |
207 | struct ata_port *ap = qc->ap; | ||
208 | struct inic_port_priv *pp = ap->private_data; | ||
209 | void __iomem *port_base = inic_port_base(ap); | 301 | void __iomem *port_base = inic_port_base(ap); |
210 | int rw = qc->tf.flags & ATA_TFLAG_WRITE; | ||
211 | |||
212 | /* make sure device sees PRD table writes */ | ||
213 | wmb(); | ||
214 | |||
215 | /* load transfer length */ | ||
216 | writel(qc->nbytes, port_base + PORT_PRD_XFERLEN); | ||
217 | |||
218 | /* turn on DMA and specify data direction */ | ||
219 | pp->cached_prdctl = pp->dfl_prdctl | PRD_CTL_DMAEN; | ||
220 | if (!rw) | ||
221 | pp->cached_prdctl |= PRD_CTL_WR; | ||
222 | writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | ||
223 | 302 | ||
224 | /* issue r/w command */ | 303 | readb(port_base + PORT_RPQ_FIFO); |
225 | ap->ops->sff_exec_command(ap, &qc->tf); | 304 | readb(port_base + PORT_RPQ_CNT); |
305 | writew(0, port_base + PORT_IDMA_CTL); | ||
226 | } | 306 | } |
227 | 307 | ||
228 | static void inic_bmdma_start(struct ata_queued_cmd *qc) | 308 | static void inic_host_err_intr(struct ata_port *ap, u8 irq_stat, u16 idma_stat) |
229 | { | 309 | { |
230 | struct ata_port *ap = qc->ap; | 310 | struct ata_eh_info *ehi = &ap->link.eh_info; |
231 | struct inic_port_priv *pp = ap->private_data; | 311 | struct inic_port_priv *pp = ap->private_data; |
232 | void __iomem *port_base = inic_port_base(ap); | 312 | struct inic_cpb *cpb = &pp->pkt->cpb; |
313 | bool freeze = false; | ||
233 | 314 | ||
234 | /* start host DMA transaction */ | 315 | ata_ehi_clear_desc(ehi); |
235 | pp->cached_prdctl |= PRD_CTL_START; | 316 | ata_ehi_push_desc(ehi, "irq_stat=0x%x idma_stat=0x%x", |
236 | writeb(pp->cached_prdctl, port_base + PORT_PRD_CTL); | 317 | irq_stat, idma_stat); |
237 | } | ||
238 | 318 | ||
239 | static void inic_bmdma_stop(struct ata_queued_cmd *qc) | 319 | inic_stop_idma(ap); |
240 | { | ||
241 | struct ata_port *ap = qc->ap; | ||
242 | struct inic_port_priv *pp = ap->private_data; | ||
243 | void __iomem *port_base = inic_port_base(ap); | ||
244 | 320 | ||
245 | /* stop DMA engine */ | 321 | if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { |
246 | writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); | 322 | ata_ehi_push_desc(ehi, "hotplug"); |
247 | } | 323 | ata_ehi_hotplugged(ehi); |
324 | freeze = true; | ||
325 | } | ||
248 | 326 | ||
249 | static u8 inic_bmdma_status(struct ata_port *ap) | 327 | if (idma_stat & IDMA_STAT_PERR) { |
250 | { | 328 | ata_ehi_push_desc(ehi, "PCI error"); |
251 | /* event is already verified by the interrupt handler */ | 329 | freeze = true; |
252 | return ATA_DMA_INTR; | 330 | } |
331 | |||
332 | if (idma_stat & IDMA_STAT_CPBERR) { | ||
333 | ata_ehi_push_desc(ehi, "CPB error"); | ||
334 | |||
335 | if (cpb->resp_flags & CPB_RESP_IGNORED) { | ||
336 | __ata_ehi_push_desc(ehi, " ignored"); | ||
337 | ehi->err_mask |= AC_ERR_INVALID; | ||
338 | freeze = true; | ||
339 | } | ||
340 | |||
341 | if (cpb->resp_flags & CPB_RESP_ATA_ERR) | ||
342 | ehi->err_mask |= AC_ERR_DEV; | ||
343 | |||
344 | if (cpb->resp_flags & CPB_RESP_SPURIOUS) { | ||
345 | __ata_ehi_push_desc(ehi, " spurious-intr"); | ||
346 | ehi->err_mask |= AC_ERR_HSM; | ||
347 | freeze = true; | ||
348 | } | ||
349 | |||
350 | if (cpb->resp_flags & | ||
351 | (CPB_RESP_UNDERFLOW | CPB_RESP_OVERFLOW)) { | ||
352 | __ata_ehi_push_desc(ehi, " data-over/underflow"); | ||
353 | ehi->err_mask |= AC_ERR_HSM; | ||
354 | freeze = true; | ||
355 | } | ||
356 | } | ||
357 | |||
358 | if (freeze) | ||
359 | ata_port_freeze(ap); | ||
360 | else | ||
361 | ata_port_abort(ap); | ||
253 | } | 362 | } |
254 | 363 | ||
255 | static void inic_host_intr(struct ata_port *ap) | 364 | static void inic_host_intr(struct ata_port *ap) |
256 | { | 365 | { |
257 | void __iomem *port_base = inic_port_base(ap); | 366 | void __iomem *port_base = inic_port_base(ap); |
258 | struct ata_eh_info *ehi = &ap->link.eh_info; | 367 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); |
259 | u8 irq_stat; | 368 | u8 irq_stat; |
369 | u16 idma_stat; | ||
260 | 370 | ||
261 | /* fetch and clear irq */ | 371 | /* read and clear IRQ status */ |
262 | irq_stat = readb(port_base + PORT_IRQ_STAT); | 372 | irq_stat = readb(port_base + PORT_IRQ_STAT); |
263 | writeb(irq_stat, port_base + PORT_IRQ_STAT); | 373 | writeb(irq_stat, port_base + PORT_IRQ_STAT); |
374 | idma_stat = readw(port_base + PORT_IDMA_STAT); | ||
264 | 375 | ||
265 | if (likely(!(irq_stat & PIRQ_ERR))) { | 376 | if (unlikely((irq_stat & PIRQ_ERR) || (idma_stat & IDMA_STAT_ERR))) |
266 | struct ata_queued_cmd *qc = | 377 | inic_host_err_intr(ap, irq_stat, idma_stat); |
267 | ata_qc_from_tag(ap, ap->link.active_tag); | ||
268 | 378 | ||
269 | if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { | 379 | if (unlikely(!qc)) |
270 | ap->ops->sff_check_status(ap); /* clear ATA interrupt */ | 380 | goto spurious; |
271 | return; | ||
272 | } | ||
273 | 381 | ||
274 | if (likely(ata_sff_host_intr(ap, qc))) | 382 | if (likely(idma_stat & IDMA_STAT_DONE)) { |
275 | return; | 383 | inic_stop_idma(ap); |
276 | 384 | ||
277 | ap->ops->sff_check_status(ap); /* clear ATA interrupt */ | 385 | /* Depending on circumstances, device error |
278 | ata_port_printk(ap, KERN_WARNING, "unhandled " | 386 | * isn't reported by IDMA, check it explicitly. |
279 | "interrupt, irq_stat=%x\n", irq_stat); | 387 | */ |
388 | if (unlikely(readb(port_base + PORT_TF_COMMAND) & | ||
389 | (ATA_DF | ATA_ERR))) | ||
390 | qc->err_mask |= AC_ERR_DEV; | ||
391 | |||
392 | ata_qc_complete(qc); | ||
280 | return; | 393 | return; |
281 | } | 394 | } |
282 | 395 | ||
283 | /* error */ | 396 | spurious: |
284 | ata_ehi_push_desc(ehi, "irq_stat=0x%x", irq_stat); | 397 | ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: " |
285 | 398 | "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n", | |
286 | if (irq_stat & (PIRQ_OFFLINE | PIRQ_ONLINE)) { | 399 | qc ? qc->tf.command : 0xff, irq_stat, idma_stat); |
287 | ata_ehi_hotplugged(ehi); | ||
288 | ata_port_freeze(ap); | ||
289 | } else | ||
290 | ata_port_abort(ap); | ||
291 | } | 400 | } |
292 | 401 | ||
293 | static irqreturn_t inic_interrupt(int irq, void *dev_instance) | 402 | static irqreturn_t inic_interrupt(int irq, void *dev_instance) |
294 | { | 403 | { |
295 | struct ata_host *host = dev_instance; | 404 | struct ata_host *host = dev_instance; |
296 | void __iomem *mmio_base = host->iomap[MMIO_BAR]; | 405 | struct inic_host_priv *hpriv = host->private_data; |
297 | u16 host_irq_stat; | 406 | u16 host_irq_stat; |
298 | int i, handled = 0;; | 407 | int i, handled = 0;; |
299 | 408 | ||
300 | host_irq_stat = readw(mmio_base + HOST_IRQ_STAT); | 409 | host_irq_stat = readw(hpriv->mmio_base + HOST_IRQ_STAT); |
301 | 410 | ||
302 | if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) | 411 | if (unlikely(!(host_irq_stat & HIRQ_GLOBAL))) |
303 | goto out; | 412 | goto out; |
@@ -327,60 +436,173 @@ static irqreturn_t inic_interrupt(int irq, void *dev_instance) | |||
327 | return IRQ_RETVAL(handled); | 436 | return IRQ_RETVAL(handled); |
328 | } | 437 | } |
329 | 438 | ||
439 | static int inic_check_atapi_dma(struct ata_queued_cmd *qc) | ||
440 | { | ||
441 | /* For some reason ATAPI_PROT_DMA doesn't work for some | ||
442 | * commands including writes and other misc ops. Use PIO | ||
443 | * protocol instead, which BTW is driven by the DMA engine | ||
444 | * anyway, so it shouldn't make much difference for native | ||
445 | * SATA devices. | ||
446 | */ | ||
447 | if (atapi_cmd_type(qc->cdb[0]) == READ) | ||
448 | return 0; | ||
449 | return 1; | ||
450 | } | ||
451 | |||
452 | static void inic_fill_sg(struct inic_prd *prd, struct ata_queued_cmd *qc) | ||
453 | { | ||
454 | struct scatterlist *sg; | ||
455 | unsigned int si; | ||
456 | u8 flags = 0; | ||
457 | |||
458 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
459 | flags |= PRD_WRITE; | ||
460 | |||
461 | if (ata_is_dma(qc->tf.protocol)) | ||
462 | flags |= PRD_DMA; | ||
463 | |||
464 | for_each_sg(qc->sg, sg, qc->n_elem, si) { | ||
465 | prd->mad = cpu_to_le32(sg_dma_address(sg)); | ||
466 | prd->len = cpu_to_le16(sg_dma_len(sg)); | ||
467 | prd->flags = flags; | ||
468 | prd++; | ||
469 | } | ||
470 | |||
471 | WARN_ON(!si); | ||
472 | prd[-1].flags |= PRD_END; | ||
473 | } | ||
474 | |||
475 | static void inic_qc_prep(struct ata_queued_cmd *qc) | ||
476 | { | ||
477 | struct inic_port_priv *pp = qc->ap->private_data; | ||
478 | struct inic_pkt *pkt = pp->pkt; | ||
479 | struct inic_cpb *cpb = &pkt->cpb; | ||
480 | struct inic_prd *prd = pkt->prd; | ||
481 | bool is_atapi = ata_is_atapi(qc->tf.protocol); | ||
482 | bool is_data = ata_is_data(qc->tf.protocol); | ||
483 | unsigned int cdb_len = 0; | ||
484 | |||
485 | VPRINTK("ENTER\n"); | ||
486 | |||
487 | if (is_atapi) | ||
488 | cdb_len = qc->dev->cdb_len; | ||
489 | |||
490 | /* prepare packet, based on initio driver */ | ||
491 | memset(pkt, 0, sizeof(struct inic_pkt)); | ||
492 | |||
493 | cpb->ctl_flags = CPB_CTL_VALID | CPB_CTL_IEN; | ||
494 | if (is_atapi || is_data) | ||
495 | cpb->ctl_flags |= CPB_CTL_DATA; | ||
496 | |||
497 | cpb->len = cpu_to_le32(qc->nbytes + cdb_len); | ||
498 | cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd)); | ||
499 | |||
500 | cpb->device = qc->tf.device; | ||
501 | cpb->feature = qc->tf.feature; | ||
502 | cpb->nsect = qc->tf.nsect; | ||
503 | cpb->lbal = qc->tf.lbal; | ||
504 | cpb->lbam = qc->tf.lbam; | ||
505 | cpb->lbah = qc->tf.lbah; | ||
506 | |||
507 | if (qc->tf.flags & ATA_TFLAG_LBA48) { | ||
508 | cpb->hob_feature = qc->tf.hob_feature; | ||
509 | cpb->hob_nsect = qc->tf.hob_nsect; | ||
510 | cpb->hob_lbal = qc->tf.hob_lbal; | ||
511 | cpb->hob_lbam = qc->tf.hob_lbam; | ||
512 | cpb->hob_lbah = qc->tf.hob_lbah; | ||
513 | } | ||
514 | |||
515 | cpb->command = qc->tf.command; | ||
516 | /* don't load ctl - dunno why. it's like that in the initio driver */ | ||
517 | |||
518 | /* setup PRD for CDB */ | ||
519 | if (is_atapi) { | ||
520 | memcpy(pkt->cdb, qc->cdb, ATAPI_CDB_LEN); | ||
521 | prd->mad = cpu_to_le32(pp->pkt_dma + | ||
522 | offsetof(struct inic_pkt, cdb)); | ||
523 | prd->len = cpu_to_le16(cdb_len); | ||
524 | prd->flags = PRD_CDB | PRD_WRITE; | ||
525 | if (!is_data) | ||
526 | prd->flags |= PRD_END; | ||
527 | prd++; | ||
528 | } | ||
529 | |||
530 | /* setup sg table */ | ||
531 | if (is_data) | ||
532 | inic_fill_sg(prd, qc); | ||
533 | |||
534 | pp->cpb_tbl[0] = pp->pkt_dma; | ||
535 | } | ||
536 | |||
330 | static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) | 537 | static unsigned int inic_qc_issue(struct ata_queued_cmd *qc) |
331 | { | 538 | { |
332 | struct ata_port *ap = qc->ap; | 539 | struct ata_port *ap = qc->ap; |
540 | void __iomem *port_base = inic_port_base(ap); | ||
333 | 541 | ||
334 | /* ATA IRQ doesn't wait for DMA transfer completion and vice | 542 | /* fire up the ADMA engine */ |
335 | * versa. Mask IRQ selectively to detect command completion. | 543 | writew(HCTL_FTHD0, port_base + HOST_CTL); |
336 | * Without it, ATA DMA read command can cause data corruption. | 544 | writew(IDMA_CTL_GO, port_base + PORT_IDMA_CTL); |
337 | * | 545 | writeb(0, port_base + PORT_CPB_PTQFIFO); |
338 | * Something similar might be needed for ATAPI writes. I | 546 | |
339 | * tried a lot of combinations but couldn't find the solution. | 547 | return 0; |
340 | */ | 548 | } |
341 | if (qc->tf.protocol == ATA_PROT_DMA && | 549 | |
342 | !(qc->tf.flags & ATA_TFLAG_WRITE)) | 550 | static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
343 | inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ); | 551 | { |
344 | else | 552 | void __iomem *port_base = inic_port_base(ap); |
345 | inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | 553 | |
554 | tf->feature = readb(port_base + PORT_TF_FEATURE); | ||
555 | tf->nsect = readb(port_base + PORT_TF_NSECT); | ||
556 | tf->lbal = readb(port_base + PORT_TF_LBAL); | ||
557 | tf->lbam = readb(port_base + PORT_TF_LBAM); | ||
558 | tf->lbah = readb(port_base + PORT_TF_LBAH); | ||
559 | tf->device = readb(port_base + PORT_TF_DEVICE); | ||
560 | tf->command = readb(port_base + PORT_TF_COMMAND); | ||
561 | } | ||
346 | 562 | ||
347 | /* Issuing a command to yet uninitialized port locks up the | 563 | static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc) |
348 | * controller. Most of the time, this happens for the first | 564 | { |
349 | * command after reset which are ATA and ATAPI IDENTIFYs. | 565 | struct ata_taskfile *rtf = &qc->result_tf; |
350 | * Fast fail if stat is 0x7f or 0xff for those commands. | 566 | struct ata_taskfile tf; |
567 | |||
568 | /* FIXME: Except for status and error, result TF access | ||
569 | * doesn't work. I tried reading from BAR0/2, CPB and BAR5. | ||
570 | * None works regardless of which command interface is used. | ||
571 | * For now return true iff status indicates device error. | ||
572 | * This means that we're reporting bogus sector for RW | ||
573 | * failures. Eeekk.... | ||
351 | */ | 574 | */ |
352 | if (unlikely(qc->tf.command == ATA_CMD_ID_ATA || | 575 | inic_tf_read(qc->ap, &tf); |
353 | qc->tf.command == ATA_CMD_ID_ATAPI)) { | ||
354 | u8 stat = ap->ops->sff_check_status(ap); | ||
355 | if (stat == 0x7f || stat == 0xff) | ||
356 | return AC_ERR_HSM; | ||
357 | } | ||
358 | 576 | ||
359 | return ata_sff_qc_issue(qc); | 577 | if (!(tf.command & ATA_ERR)) |
578 | return false; | ||
579 | |||
580 | rtf->command = tf.command; | ||
581 | rtf->feature = tf.feature; | ||
582 | return true; | ||
360 | } | 583 | } |
361 | 584 | ||
362 | static void inic_freeze(struct ata_port *ap) | 585 | static void inic_freeze(struct ata_port *ap) |
363 | { | 586 | { |
364 | void __iomem *port_base = inic_port_base(ap); | 587 | void __iomem *port_base = inic_port_base(ap); |
365 | 588 | ||
366 | __inic_set_pirq_mask(ap, PIRQ_MASK_FREEZE); | 589 | writeb(PIRQ_MASK_FREEZE, port_base + PORT_IRQ_MASK); |
367 | |||
368 | ap->ops->sff_check_status(ap); | ||
369 | writeb(0xff, port_base + PORT_IRQ_STAT); | 590 | writeb(0xff, port_base + PORT_IRQ_STAT); |
370 | |||
371 | readb(port_base + PORT_IRQ_STAT); /* flush */ | ||
372 | } | 591 | } |
373 | 592 | ||
374 | static void inic_thaw(struct ata_port *ap) | 593 | static void inic_thaw(struct ata_port *ap) |
375 | { | 594 | { |
376 | void __iomem *port_base = inic_port_base(ap); | 595 | void __iomem *port_base = inic_port_base(ap); |
377 | 596 | ||
378 | ap->ops->sff_check_status(ap); | ||
379 | writeb(0xff, port_base + PORT_IRQ_STAT); | 597 | writeb(0xff, port_base + PORT_IRQ_STAT); |
598 | writeb(PIRQ_MASK_DEFAULT, port_base + PORT_IRQ_MASK); | ||
599 | } | ||
380 | 600 | ||
381 | __inic_set_pirq_mask(ap, PIRQ_MASK_OTHER); | 601 | static int inic_check_ready(struct ata_link *link) |
602 | { | ||
603 | void __iomem *port_base = inic_port_base(link->ap); | ||
382 | 604 | ||
383 | readb(port_base + PORT_IRQ_STAT); /* flush */ | 605 | return ata_check_ready(readb(port_base + PORT_TF_COMMAND)); |
384 | } | 606 | } |
385 | 607 | ||
386 | /* | 608 | /* |
@@ -394,17 +616,15 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
394 | void __iomem *port_base = inic_port_base(ap); | 616 | void __iomem *port_base = inic_port_base(ap); |
395 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; | 617 | void __iomem *idma_ctl = port_base + PORT_IDMA_CTL; |
396 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | 618 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); |
397 | u16 val; | ||
398 | int rc; | 619 | int rc; |
399 | 620 | ||
400 | /* hammer it into sane state */ | 621 | /* hammer it into sane state */ |
401 | inic_reset_port(port_base); | 622 | inic_reset_port(port_base); |
402 | 623 | ||
403 | val = readw(idma_ctl); | 624 | writew(IDMA_CTL_RST_ATA, idma_ctl); |
404 | writew(val | IDMA_CTL_RST_ATA, idma_ctl); | ||
405 | readw(idma_ctl); /* flush */ | 625 | readw(idma_ctl); /* flush */ |
406 | msleep(1); | 626 | msleep(1); |
407 | writew(val & ~IDMA_CTL_RST_ATA, idma_ctl); | 627 | writew(0, idma_ctl); |
408 | 628 | ||
409 | rc = sata_link_resume(link, timing, deadline); | 629 | rc = sata_link_resume(link, timing, deadline); |
410 | if (rc) { | 630 | if (rc) { |
@@ -418,7 +638,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
418 | struct ata_taskfile tf; | 638 | struct ata_taskfile tf; |
419 | 639 | ||
420 | /* wait for link to become ready */ | 640 | /* wait for link to become ready */ |
421 | rc = ata_sff_wait_after_reset(link, 1, deadline); | 641 | rc = ata_wait_after_reset(link, deadline, inic_check_ready); |
422 | /* link occupied, -ENODEV too is an error */ | 642 | /* link occupied, -ENODEV too is an error */ |
423 | if (rc) { | 643 | if (rc) { |
424 | ata_link_printk(link, KERN_WARNING, "device not ready " | 644 | ata_link_printk(link, KERN_WARNING, "device not ready " |
@@ -426,7 +646,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
426 | return rc; | 646 | return rc; |
427 | } | 647 | } |
428 | 648 | ||
429 | ata_sff_tf_read(ap, &tf); | 649 | inic_tf_read(ap, &tf); |
430 | *class = ata_dev_classify(&tf); | 650 | *class = ata_dev_classify(&tf); |
431 | } | 651 | } |
432 | 652 | ||
@@ -436,18 +656,8 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, | |||
436 | static void inic_error_handler(struct ata_port *ap) | 656 | static void inic_error_handler(struct ata_port *ap) |
437 | { | 657 | { |
438 | void __iomem *port_base = inic_port_base(ap); | 658 | void __iomem *port_base = inic_port_base(ap); |
439 | struct inic_port_priv *pp = ap->private_data; | ||
440 | unsigned long flags; | ||
441 | 659 | ||
442 | /* reset PIO HSM and stop DMA engine */ | ||
443 | inic_reset_port(port_base); | 660 | inic_reset_port(port_base); |
444 | |||
445 | spin_lock_irqsave(ap->lock, flags); | ||
446 | ap->hsm_task_state = HSM_ST_IDLE; | ||
447 | writeb(pp->dfl_prdctl, port_base + PORT_PRD_CTL); | ||
448 | spin_unlock_irqrestore(ap->lock, flags); | ||
449 | |||
450 | /* PIO and DMA engines have been stopped, perform recovery */ | ||
451 | ata_std_error_handler(ap); | 661 | ata_std_error_handler(ap); |
452 | } | 662 | } |
453 | 663 | ||
@@ -458,26 +668,18 @@ static void inic_post_internal_cmd(struct ata_queued_cmd *qc) | |||
458 | inic_reset_port(inic_port_base(qc->ap)); | 668 | inic_reset_port(inic_port_base(qc->ap)); |
459 | } | 669 | } |
460 | 670 | ||
461 | static void inic_dev_config(struct ata_device *dev) | ||
462 | { | ||
463 | /* inic can only handle upto LBA28 max sectors */ | ||
464 | if (dev->max_sectors > ATA_MAX_SECTORS) | ||
465 | dev->max_sectors = ATA_MAX_SECTORS; | ||
466 | |||
467 | if (dev->n_sectors >= 1 << 28) { | ||
468 | ata_dev_printk(dev, KERN_ERR, | ||
469 | "ERROR: This driver doesn't support LBA48 yet and may cause\n" | ||
470 | " data corruption on such devices. Disabling.\n"); | ||
471 | ata_dev_disable(dev); | ||
472 | } | ||
473 | } | ||
474 | |||
475 | static void init_port(struct ata_port *ap) | 671 | static void init_port(struct ata_port *ap) |
476 | { | 672 | { |
477 | void __iomem *port_base = inic_port_base(ap); | 673 | void __iomem *port_base = inic_port_base(ap); |
674 | struct inic_port_priv *pp = ap->private_data; | ||
478 | 675 | ||
479 | /* Setup PRD address */ | 676 | /* clear packet and CPB table */ |
677 | memset(pp->pkt, 0, sizeof(struct inic_pkt)); | ||
678 | memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); | ||
679 | |||
680 | /* setup PRD and CPB lookup table addresses */ | ||
480 | writel(ap->prd_dma, port_base + PORT_PRD_ADDR); | 681 | writel(ap->prd_dma, port_base + PORT_PRD_ADDR); |
682 | writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); | ||
481 | } | 683 | } |
482 | 684 | ||
483 | static int inic_port_resume(struct ata_port *ap) | 685 | static int inic_port_resume(struct ata_port *ap) |
@@ -488,28 +690,30 @@ static int inic_port_resume(struct ata_port *ap) | |||
488 | 690 | ||
489 | static int inic_port_start(struct ata_port *ap) | 691 | static int inic_port_start(struct ata_port *ap) |
490 | { | 692 | { |
491 | void __iomem *port_base = inic_port_base(ap); | 693 | struct device *dev = ap->host->dev; |
492 | struct inic_port_priv *pp; | 694 | struct inic_port_priv *pp; |
493 | u8 tmp; | ||
494 | int rc; | 695 | int rc; |
495 | 696 | ||
496 | /* alloc and initialize private data */ | 697 | /* alloc and initialize private data */ |
497 | pp = devm_kzalloc(ap->host->dev, sizeof(*pp), GFP_KERNEL); | 698 | pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); |
498 | if (!pp) | 699 | if (!pp) |
499 | return -ENOMEM; | 700 | return -ENOMEM; |
500 | ap->private_data = pp; | 701 | ap->private_data = pp; |
501 | 702 | ||
502 | /* default PRD_CTL value, DMAEN, WR and START off */ | ||
503 | tmp = readb(port_base + PORT_PRD_CTL); | ||
504 | tmp &= ~(PRD_CTL_DMAEN | PRD_CTL_WR | PRD_CTL_START); | ||
505 | pp->dfl_prdctl = tmp; | ||
506 | |||
507 | /* Alloc resources */ | 703 | /* Alloc resources */ |
508 | rc = ata_port_start(ap); | 704 | rc = ata_port_start(ap); |
509 | if (rc) { | 705 | if (rc) |
510 | kfree(pp); | ||
511 | return rc; | 706 | return rc; |
512 | } | 707 | |
708 | pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), | ||
709 | &pp->pkt_dma, GFP_KERNEL); | ||
710 | if (!pp->pkt) | ||
711 | return -ENOMEM; | ||
712 | |||
713 | pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE, | ||
714 | &pp->cpb_tbl_dma, GFP_KERNEL); | ||
715 | if (!pp->cpb_tbl) | ||
716 | return -ENOMEM; | ||
513 | 717 | ||
514 | init_port(ap); | 718 | init_port(ap); |
515 | 719 | ||
@@ -517,21 +721,18 @@ static int inic_port_start(struct ata_port *ap) | |||
517 | } | 721 | } |
518 | 722 | ||
519 | static struct ata_port_operations inic_port_ops = { | 723 | static struct ata_port_operations inic_port_ops = { |
520 | .inherits = &ata_sff_port_ops, | 724 | .inherits = &sata_port_ops, |
521 | 725 | ||
522 | .bmdma_setup = inic_bmdma_setup, | 726 | .check_atapi_dma = inic_check_atapi_dma, |
523 | .bmdma_start = inic_bmdma_start, | 727 | .qc_prep = inic_qc_prep, |
524 | .bmdma_stop = inic_bmdma_stop, | ||
525 | .bmdma_status = inic_bmdma_status, | ||
526 | .qc_issue = inic_qc_issue, | 728 | .qc_issue = inic_qc_issue, |
729 | .qc_fill_rtf = inic_qc_fill_rtf, | ||
527 | 730 | ||
528 | .freeze = inic_freeze, | 731 | .freeze = inic_freeze, |
529 | .thaw = inic_thaw, | 732 | .thaw = inic_thaw, |
530 | .softreset = ATA_OP_NULL, /* softreset is broken */ | ||
531 | .hardreset = inic_hardreset, | 733 | .hardreset = inic_hardreset, |
532 | .error_handler = inic_error_handler, | 734 | .error_handler = inic_error_handler, |
533 | .post_internal_cmd = inic_post_internal_cmd, | 735 | .post_internal_cmd = inic_post_internal_cmd, |
534 | .dev_config = inic_dev_config, | ||
535 | 736 | ||
536 | .scr_read = inic_scr_read, | 737 | .scr_read = inic_scr_read, |
537 | .scr_write = inic_scr_write, | 738 | .scr_write = inic_scr_write, |
@@ -541,12 +742,6 @@ static struct ata_port_operations inic_port_ops = { | |||
541 | }; | 742 | }; |
542 | 743 | ||
543 | static struct ata_port_info inic_port_info = { | 744 | static struct ata_port_info inic_port_info = { |
544 | /* For some reason, ATAPI_PROT_PIO is broken on this | ||
545 | * controller, and no, PIO_POLLING does't fix it. It somehow | ||
546 | * manages to report the wrong ireason and ignoring ireason | ||
547 | * results in machine lock up. Tell libata to always prefer | ||
548 | * DMA. | ||
549 | */ | ||
550 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, | 745 | .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA, |
551 | .pio_mask = 0x1f, /* pio0-4 */ | 746 | .pio_mask = 0x1f, /* pio0-4 */ |
552 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 747 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
@@ -599,7 +794,6 @@ static int inic_pci_device_resume(struct pci_dev *pdev) | |||
599 | { | 794 | { |
600 | struct ata_host *host = dev_get_drvdata(&pdev->dev); | 795 | struct ata_host *host = dev_get_drvdata(&pdev->dev); |
601 | struct inic_host_priv *hpriv = host->private_data; | 796 | struct inic_host_priv *hpriv = host->private_data; |
602 | void __iomem *mmio_base = host->iomap[MMIO_BAR]; | ||
603 | int rc; | 797 | int rc; |
604 | 798 | ||
605 | rc = ata_pci_device_do_resume(pdev); | 799 | rc = ata_pci_device_do_resume(pdev); |
@@ -607,7 +801,7 @@ static int inic_pci_device_resume(struct pci_dev *pdev) | |||
607 | return rc; | 801 | return rc; |
608 | 802 | ||
609 | if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { | 803 | if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { |
610 | rc = init_controller(mmio_base, hpriv->cached_hctl); | 804 | rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); |
611 | if (rc) | 805 | if (rc) |
612 | return rc; | 806 | return rc; |
613 | } | 807 | } |
@@ -625,6 +819,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
625 | struct ata_host *host; | 819 | struct ata_host *host; |
626 | struct inic_host_priv *hpriv; | 820 | struct inic_host_priv *hpriv; |
627 | void __iomem * const *iomap; | 821 | void __iomem * const *iomap; |
822 | int mmio_bar; | ||
628 | int i, rc; | 823 | int i, rc; |
629 | 824 | ||
630 | if (!printed_version++) | 825 | if (!printed_version++) |
@@ -638,38 +833,31 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
638 | 833 | ||
639 | host->private_data = hpriv; | 834 | host->private_data = hpriv; |
640 | 835 | ||
641 | /* acquire resources and fill host */ | 836 | /* Acquire resources and fill host. Note that PCI and cardbus |
837 | * use different BARs. | ||
838 | */ | ||
642 | rc = pcim_enable_device(pdev); | 839 | rc = pcim_enable_device(pdev); |
643 | if (rc) | 840 | if (rc) |
644 | return rc; | 841 | return rc; |
645 | 842 | ||
646 | rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME); | 843 | if (pci_resource_flags(pdev, MMIO_BAR_PCI) & IORESOURCE_MEM) |
844 | mmio_bar = MMIO_BAR_PCI; | ||
845 | else | ||
846 | mmio_bar = MMIO_BAR_CARDBUS; | ||
847 | |||
848 | rc = pcim_iomap_regions(pdev, 1 << mmio_bar, DRV_NAME); | ||
647 | if (rc) | 849 | if (rc) |
648 | return rc; | 850 | return rc; |
649 | host->iomap = iomap = pcim_iomap_table(pdev); | 851 | host->iomap = iomap = pcim_iomap_table(pdev); |
852 | hpriv->mmio_base = iomap[mmio_bar]; | ||
853 | hpriv->cached_hctl = readw(hpriv->mmio_base + HOST_CTL); | ||
650 | 854 | ||
651 | for (i = 0; i < NR_PORTS; i++) { | 855 | for (i = 0; i < NR_PORTS; i++) { |
652 | struct ata_port *ap = host->ports[i]; | 856 | struct ata_port *ap = host->ports[i]; |
653 | struct ata_ioports *port = &ap->ioaddr; | ||
654 | unsigned int offset = i * PORT_SIZE; | ||
655 | |||
656 | port->cmd_addr = iomap[2 * i]; | ||
657 | port->altstatus_addr = | ||
658 | port->ctl_addr = (void __iomem *) | ||
659 | ((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS); | ||
660 | port->scr_addr = iomap[MMIO_BAR] + offset + PORT_SCR; | ||
661 | |||
662 | ata_sff_std_ports(port); | ||
663 | |||
664 | ata_port_pbar_desc(ap, MMIO_BAR, -1, "mmio"); | ||
665 | ata_port_pbar_desc(ap, MMIO_BAR, offset, "port"); | ||
666 | ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", | ||
667 | (unsigned long long)pci_resource_start(pdev, 2 * i), | ||
668 | (unsigned long long)pci_resource_start(pdev, (2 * i + 1)) | | ||
669 | ATA_PCI_CTL_OFS); | ||
670 | } | ||
671 | 857 | ||
672 | hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL); | 858 | ata_port_pbar_desc(ap, mmio_bar, -1, "mmio"); |
859 | ata_port_pbar_desc(ap, mmio_bar, i * PORT_SIZE, "port"); | ||
860 | } | ||
673 | 861 | ||
674 | /* Set dma_mask. This devices doesn't support 64bit addressing. */ | 862 | /* Set dma_mask. This devices doesn't support 64bit addressing. */ |
675 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 863 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); |
@@ -698,7 +886,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
698 | return rc; | 886 | return rc; |
699 | } | 887 | } |
700 | 888 | ||
701 | rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl); | 889 | rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); |
702 | if (rc) { | 890 | if (rc) { |
703 | dev_printk(KERN_ERR, &pdev->dev, | 891 | dev_printk(KERN_ERR, &pdev->dev, |
704 | "failed to initialize controller\n"); | 892 | "failed to initialize controller\n"); |