diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/ata/libata-sff.c | 2 | ||||
-rw-r--r-- | drivers/ata/sata_nv.c | 934 |
2 files changed, 924 insertions, 12 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 7645f2b30ccf..e178d6ae8b80 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -732,7 +732,7 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
732 | qc->tf.protocol == ATA_PROT_ATAPI_DMA)) { | 732 | qc->tf.protocol == ATA_PROT_ATAPI_DMA)) { |
733 | u8 host_stat; | 733 | u8 host_stat; |
734 | 734 | ||
735 | host_stat = ata_bmdma_status(ap); | 735 | host_stat = ap->ops->bmdma_status(ap); |
736 | 736 | ||
737 | ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat); | 737 | ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat); |
738 | 738 | ||
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index d65ebfd7c7b2..e5615be21565 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -29,6 +29,11 @@ | |||
29 | * NV-specific details such as register offsets, SATA phy location, | 29 | * NV-specific details such as register offsets, SATA phy location, |
30 | * hotplug info, etc. | 30 | * hotplug info, etc. |
31 | * | 31 | * |
32 | * CK804/MCP04 controllers support an alternate programming interface | ||
33 | * similar to the ADMA specification (with some modifications). | ||
34 | * This allows the use of NCQ. Non-DMA-mapped ATA commands are still | ||
35 | * sent through the legacy interface. | ||
36 | * | ||
32 | */ | 37 | */ |
33 | 38 | ||
34 | #include <linux/kernel.h> | 39 | #include <linux/kernel.h> |
@@ -40,10 +45,13 @@ | |||
40 | #include <linux/interrupt.h> | 45 | #include <linux/interrupt.h> |
41 | #include <linux/device.h> | 46 | #include <linux/device.h> |
42 | #include <scsi/scsi_host.h> | 47 | #include <scsi/scsi_host.h> |
48 | #include <scsi/scsi_device.h> | ||
43 | #include <linux/libata.h> | 49 | #include <linux/libata.h> |
44 | 50 | ||
45 | #define DRV_NAME "sata_nv" | 51 | #define DRV_NAME "sata_nv" |
46 | #define DRV_VERSION "2.0" | 52 | #define DRV_VERSION "3.1" |
53 | |||
54 | #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL | ||
47 | 55 | ||
48 | enum { | 56 | enum { |
49 | NV_PORTS = 2, | 57 | NV_PORTS = 2, |
@@ -78,8 +86,137 @@ enum { | |||
78 | // For PCI config register 20 | 86 | // For PCI config register 20 |
79 | NV_MCP_SATA_CFG_20 = 0x50, | 87 | NV_MCP_SATA_CFG_20 = 0x50, |
80 | NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04, | 88 | NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04, |
89 | NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17), | ||
90 | NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16), | ||
91 | NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14), | ||
92 | NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12), | ||
93 | |||
94 | NV_ADMA_MAX_CPBS = 32, | ||
95 | NV_ADMA_CPB_SZ = 128, | ||
96 | NV_ADMA_APRD_SZ = 16, | ||
97 | NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) / | ||
98 | NV_ADMA_APRD_SZ, | ||
99 | NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5, | ||
100 | NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ, | ||
101 | NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS * | ||
102 | (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ), | ||
103 | |||
104 | /* BAR5 offset to ADMA general registers */ | ||
105 | NV_ADMA_GEN = 0x400, | ||
106 | NV_ADMA_GEN_CTL = 0x00, | ||
107 | NV_ADMA_NOTIFIER_CLEAR = 0x30, | ||
108 | |||
109 | /* BAR5 offset to ADMA ports */ | ||
110 | NV_ADMA_PORT = 0x480, | ||
111 | |||
112 | /* size of ADMA port register space */ | ||
113 | NV_ADMA_PORT_SIZE = 0x100, | ||
114 | |||
115 | /* ADMA port registers */ | ||
116 | NV_ADMA_CTL = 0x40, | ||
117 | NV_ADMA_CPB_COUNT = 0x42, | ||
118 | NV_ADMA_NEXT_CPB_IDX = 0x43, | ||
119 | NV_ADMA_STAT = 0x44, | ||
120 | NV_ADMA_CPB_BASE_LOW = 0x48, | ||
121 | NV_ADMA_CPB_BASE_HIGH = 0x4C, | ||
122 | NV_ADMA_APPEND = 0x50, | ||
123 | NV_ADMA_NOTIFIER = 0x68, | ||
124 | NV_ADMA_NOTIFIER_ERROR = 0x6C, | ||
125 | |||
126 | /* NV_ADMA_CTL register bits */ | ||
127 | NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0), | ||
128 | NV_ADMA_CTL_CHANNEL_RESET = (1 << 5), | ||
129 | NV_ADMA_CTL_GO = (1 << 7), | ||
130 | NV_ADMA_CTL_AIEN = (1 << 8), | ||
131 | NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11), | ||
132 | NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12), | ||
133 | |||
134 | /* CPB response flag bits */ | ||
135 | NV_CPB_RESP_DONE = (1 << 0), | ||
136 | NV_CPB_RESP_ATA_ERR = (1 << 3), | ||
137 | NV_CPB_RESP_CMD_ERR = (1 << 4), | ||
138 | NV_CPB_RESP_CPB_ERR = (1 << 7), | ||
139 | |||
140 | /* CPB control flag bits */ | ||
141 | NV_CPB_CTL_CPB_VALID = (1 << 0), | ||
142 | NV_CPB_CTL_QUEUE = (1 << 1), | ||
143 | NV_CPB_CTL_APRD_VALID = (1 << 2), | ||
144 | NV_CPB_CTL_IEN = (1 << 3), | ||
145 | NV_CPB_CTL_FPDMA = (1 << 4), | ||
146 | |||
147 | /* APRD flags */ | ||
148 | NV_APRD_WRITE = (1 << 1), | ||
149 | NV_APRD_END = (1 << 2), | ||
150 | NV_APRD_CONT = (1 << 3), | ||
151 | |||
152 | /* NV_ADMA_STAT flags */ | ||
153 | NV_ADMA_STAT_TIMEOUT = (1 << 0), | ||
154 | NV_ADMA_STAT_HOTUNPLUG = (1 << 1), | ||
155 | NV_ADMA_STAT_HOTPLUG = (1 << 2), | ||
156 | NV_ADMA_STAT_CPBERR = (1 << 4), | ||
157 | NV_ADMA_STAT_SERROR = (1 << 5), | ||
158 | NV_ADMA_STAT_CMD_COMPLETE = (1 << 6), | ||
159 | NV_ADMA_STAT_IDLE = (1 << 8), | ||
160 | NV_ADMA_STAT_LEGACY = (1 << 9), | ||
161 | NV_ADMA_STAT_STOPPED = (1 << 10), | ||
162 | NV_ADMA_STAT_DONE = (1 << 12), | ||
163 | NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR | | ||
164 | NV_ADMA_STAT_TIMEOUT, | ||
165 | |||
166 | /* port flags */ | ||
167 | NV_ADMA_PORT_REGISTER_MODE = (1 << 0), | ||
168 | |||
169 | }; | ||
170 | |||
171 | /* ADMA Physical Region Descriptor - one SG segment */ | ||
172 | struct nv_adma_prd { | ||
173 | __le64 addr; | ||
174 | __le32 len; | ||
175 | u8 flags; | ||
176 | u8 packet_len; | ||
177 | __le16 reserved; | ||
178 | }; | ||
179 | |||
180 | enum nv_adma_regbits { | ||
181 | CMDEND = (1 << 15), /* end of command list */ | ||
182 | WNB = (1 << 14), /* wait-not-BSY */ | ||
183 | IGN = (1 << 13), /* ignore this entry */ | ||
184 | CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */ | ||
185 | DA2 = (1 << (2 + 8)), | ||
186 | DA1 = (1 << (1 + 8)), | ||
187 | DA0 = (1 << (0 + 8)), | ||
188 | }; | ||
189 | |||
190 | /* ADMA Command Parameter Block | ||
191 | The first 5 SG segments are stored inside the Command Parameter Block itself. | ||
192 | If there are more than 5 segments the remainder are stored in a separate | ||
193 | memory area indicated by next_aprd. */ | ||
194 | struct nv_adma_cpb { | ||
195 | u8 resp_flags; /* 0 */ | ||
196 | u8 reserved1; /* 1 */ | ||
197 | u8 ctl_flags; /* 2 */ | ||
198 | /* len is length of taskfile in 64 bit words */ | ||
199 | u8 len; /* 3 */ | ||
200 | u8 tag; /* 4 */ | ||
201 | u8 next_cpb_idx; /* 5 */ | ||
202 | __le16 reserved2; /* 6-7 */ | ||
203 | __le16 tf[12]; /* 8-31 */ | ||
204 | struct nv_adma_prd aprd[5]; /* 32-111 */ | ||
205 | __le64 next_aprd; /* 112-119 */ | ||
206 | __le64 reserved3; /* 120-127 */ | ||
81 | }; | 207 | }; |
82 | 208 | ||
209 | |||
210 | struct nv_adma_port_priv { | ||
211 | struct nv_adma_cpb *cpb; | ||
212 | dma_addr_t cpb_dma; | ||
213 | struct nv_adma_prd *aprd; | ||
214 | dma_addr_t aprd_dma; | ||
215 | u8 flags; | ||
216 | }; | ||
217 | |||
218 | #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) | ||
219 | |||
83 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); | 220 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); |
84 | static void nv_ck804_host_stop(struct ata_host *host); | 221 | static void nv_ck804_host_stop(struct ata_host *host); |
85 | static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); | 222 | static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); |
@@ -93,13 +230,27 @@ static void nv_nf2_thaw(struct ata_port *ap); | |||
93 | static void nv_ck804_freeze(struct ata_port *ap); | 230 | static void nv_ck804_freeze(struct ata_port *ap); |
94 | static void nv_ck804_thaw(struct ata_port *ap); | 231 | static void nv_ck804_thaw(struct ata_port *ap); |
95 | static void nv_error_handler(struct ata_port *ap); | 232 | static void nv_error_handler(struct ata_port *ap); |
233 | static int nv_adma_slave_config(struct scsi_device *sdev); | ||
234 | static void nv_adma_qc_prep(struct ata_queued_cmd *qc); | ||
235 | static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); | ||
236 | static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); | ||
237 | static void nv_adma_irq_clear(struct ata_port *ap); | ||
238 | static int nv_adma_port_start(struct ata_port *ap); | ||
239 | static void nv_adma_port_stop(struct ata_port *ap); | ||
240 | static void nv_adma_error_handler(struct ata_port *ap); | ||
241 | static void nv_adma_host_stop(struct ata_host *host); | ||
242 | static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc); | ||
243 | static void nv_adma_bmdma_start(struct ata_queued_cmd *qc); | ||
244 | static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc); | ||
245 | static u8 nv_adma_bmdma_status(struct ata_port *ap); | ||
96 | 246 | ||
97 | enum nv_host_type | 247 | enum nv_host_type |
98 | { | 248 | { |
99 | GENERIC, | 249 | GENERIC, |
100 | NFORCE2, | 250 | NFORCE2, |
101 | NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */ | 251 | NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */ |
102 | CK804 | 252 | CK804, |
253 | ADMA | ||
103 | }; | 254 | }; |
104 | 255 | ||
105 | static const struct pci_device_id nv_pci_tbl[] = { | 256 | static const struct pci_device_id nv_pci_tbl[] = { |
@@ -160,6 +311,25 @@ static struct scsi_host_template nv_sht = { | |||
160 | .bios_param = ata_std_bios_param, | 311 | .bios_param = ata_std_bios_param, |
161 | }; | 312 | }; |
162 | 313 | ||
314 | static struct scsi_host_template nv_adma_sht = { | ||
315 | .module = THIS_MODULE, | ||
316 | .name = DRV_NAME, | ||
317 | .ioctl = ata_scsi_ioctl, | ||
318 | .queuecommand = ata_scsi_queuecmd, | ||
319 | .can_queue = NV_ADMA_MAX_CPBS, | ||
320 | .this_id = ATA_SHT_THIS_ID, | ||
321 | .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN, | ||
322 | .max_sectors = ATA_MAX_SECTORS, | ||
323 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
324 | .emulated = ATA_SHT_EMULATED, | ||
325 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
326 | .proc_name = DRV_NAME, | ||
327 | .dma_boundary = NV_ADMA_DMA_BOUNDARY, | ||
328 | .slave_configure = nv_adma_slave_config, | ||
329 | .slave_destroy = ata_scsi_slave_destroy, | ||
330 | .bios_param = ata_std_bios_param, | ||
331 | }; | ||
332 | |||
163 | static const struct ata_port_operations nv_generic_ops = { | 333 | static const struct ata_port_operations nv_generic_ops = { |
164 | .port_disable = ata_port_disable, | 334 | .port_disable = ata_port_disable, |
165 | .tf_load = ata_tf_load, | 335 | .tf_load = ata_tf_load, |
@@ -241,6 +411,33 @@ static const struct ata_port_operations nv_ck804_ops = { | |||
241 | .host_stop = nv_ck804_host_stop, | 411 | .host_stop = nv_ck804_host_stop, |
242 | }; | 412 | }; |
243 | 413 | ||
414 | static const struct ata_port_operations nv_adma_ops = { | ||
415 | .port_disable = ata_port_disable, | ||
416 | .tf_load = ata_tf_load, | ||
417 | .tf_read = ata_tf_read, | ||
418 | .exec_command = ata_exec_command, | ||
419 | .check_status = ata_check_status, | ||
420 | .dev_select = ata_std_dev_select, | ||
421 | .bmdma_setup = nv_adma_bmdma_setup, | ||
422 | .bmdma_start = nv_adma_bmdma_start, | ||
423 | .bmdma_stop = nv_adma_bmdma_stop, | ||
424 | .bmdma_status = nv_adma_bmdma_status, | ||
425 | .qc_prep = nv_adma_qc_prep, | ||
426 | .qc_issue = nv_adma_qc_issue, | ||
427 | .freeze = nv_ck804_freeze, | ||
428 | .thaw = nv_ck804_thaw, | ||
429 | .error_handler = nv_adma_error_handler, | ||
430 | .post_internal_cmd = nv_adma_bmdma_stop, | ||
431 | .data_xfer = ata_mmio_data_xfer, | ||
432 | .irq_handler = nv_adma_interrupt, | ||
433 | .irq_clear = nv_adma_irq_clear, | ||
434 | .scr_read = nv_scr_read, | ||
435 | .scr_write = nv_scr_write, | ||
436 | .port_start = nv_adma_port_start, | ||
437 | .port_stop = nv_adma_port_stop, | ||
438 | .host_stop = nv_adma_host_stop, | ||
439 | }; | ||
440 | |||
244 | static struct ata_port_info nv_port_info[] = { | 441 | static struct ata_port_info nv_port_info[] = { |
245 | /* generic */ | 442 | /* generic */ |
246 | { | 443 | { |
@@ -269,6 +466,16 @@ static struct ata_port_info nv_port_info[] = { | |||
269 | .udma_mask = NV_UDMA_MASK, | 466 | .udma_mask = NV_UDMA_MASK, |
270 | .port_ops = &nv_ck804_ops, | 467 | .port_ops = &nv_ck804_ops, |
271 | }, | 468 | }, |
469 | /* ADMA */ | ||
470 | { | ||
471 | .sht = &nv_adma_sht, | ||
472 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
473 | ATA_FLAG_MMIO | ATA_FLAG_NCQ, | ||
474 | .pio_mask = NV_PIO_MASK, | ||
475 | .mwdma_mask = NV_MWDMA_MASK, | ||
476 | .udma_mask = NV_UDMA_MASK, | ||
477 | .port_ops = &nv_adma_ops, | ||
478 | }, | ||
272 | }; | 479 | }; |
273 | 480 | ||
274 | MODULE_AUTHOR("NVIDIA"); | 481 | MODULE_AUTHOR("NVIDIA"); |
@@ -277,6 +484,614 @@ MODULE_LICENSE("GPL"); | |||
277 | MODULE_DEVICE_TABLE(pci, nv_pci_tbl); | 484 | MODULE_DEVICE_TABLE(pci, nv_pci_tbl); |
278 | MODULE_VERSION(DRV_VERSION); | 485 | MODULE_VERSION(DRV_VERSION); |
279 | 486 | ||
487 | static int adma_enabled = 1; | ||
488 | |||
489 | static int nv_adma_slave_config(struct scsi_device *sdev) | ||
490 | { | ||
491 | struct ata_port *ap = ata_shost_to_port(sdev->host); | ||
492 | u64 bounce_limit; | ||
493 | unsigned long segment_boundary; | ||
494 | unsigned short sg_tablesize; | ||
495 | int rc; | ||
496 | |||
497 | rc = ata_scsi_slave_config(sdev); | ||
498 | |||
499 | if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) | ||
500 | /* Not a proper libata device, ignore */ | ||
501 | return rc; | ||
502 | |||
503 | if (ap->device[sdev->id].class == ATA_DEV_ATAPI) { | ||
504 | /* | ||
505 | * NVIDIA reports that ADMA mode does not support ATAPI commands. | ||
506 | * Therefore ATAPI commands are sent through the legacy interface. | ||
507 | * However, the legacy interface only supports 32-bit DMA. | ||
508 | * Restrict DMA parameters as required by the legacy interface | ||
509 | * when an ATAPI device is connected. | ||
510 | */ | ||
511 | bounce_limit = ATA_DMA_MASK; | ||
512 | segment_boundary = ATA_DMA_BOUNDARY; | ||
513 | /* Subtract 1 since an extra entry may be needed for padding, see | ||
514 | libata-scsi.c */ | ||
515 | sg_tablesize = LIBATA_MAX_PRD - 1; | ||
516 | } | ||
517 | else { | ||
518 | bounce_limit = *ap->dev->dma_mask; | ||
519 | segment_boundary = NV_ADMA_DMA_BOUNDARY; | ||
520 | sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; | ||
521 | } | ||
522 | |||
523 | blk_queue_bounce_limit(sdev->request_queue, bounce_limit); | ||
524 | blk_queue_segment_boundary(sdev->request_queue, segment_boundary); | ||
525 | blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); | ||
526 | ata_port_printk(ap, KERN_INFO, | ||
527 | "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n", | ||
528 | (unsigned long long)bounce_limit, segment_boundary, sg_tablesize); | ||
529 | return rc; | ||
530 | } | ||
531 | |||
532 | static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, u16 *cpb) | ||
533 | { | ||
534 | unsigned int idx = 0; | ||
535 | |||
536 | cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB); | ||
537 | |||
538 | if ((tf->flags & ATA_TFLAG_LBA48) == 0) { | ||
539 | cpb[idx++] = cpu_to_le16(IGN); | ||
540 | cpb[idx++] = cpu_to_le16(IGN); | ||
541 | cpb[idx++] = cpu_to_le16(IGN); | ||
542 | cpb[idx++] = cpu_to_le16(IGN); | ||
543 | cpb[idx++] = cpu_to_le16(IGN); | ||
544 | } | ||
545 | else { | ||
546 | cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature); | ||
547 | cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); | ||
548 | cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal); | ||
549 | cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam); | ||
550 | cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah); | ||
551 | } | ||
552 | cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature); | ||
553 | cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect); | ||
554 | cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal); | ||
555 | cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam); | ||
556 | cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); | ||
557 | |||
558 | cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); | ||
559 | |||
560 | return idx; | ||
561 | } | ||
562 | |||
563 | static inline void __iomem *__nv_adma_ctl_block(void __iomem *mmio, | ||
564 | unsigned int port_no) | ||
565 | { | ||
566 | mmio += NV_ADMA_PORT + port_no * NV_ADMA_PORT_SIZE; | ||
567 | return mmio; | ||
568 | } | ||
569 | |||
570 | static inline void __iomem *nv_adma_ctl_block(struct ata_port *ap) | ||
571 | { | ||
572 | return __nv_adma_ctl_block(ap->host->mmio_base, ap->port_no); | ||
573 | } | ||
574 | |||
575 | static inline void __iomem *nv_adma_gen_block(struct ata_port *ap) | ||
576 | { | ||
577 | return (ap->host->mmio_base + NV_ADMA_GEN); | ||
578 | } | ||
579 | |||
580 | static inline void __iomem *nv_adma_notifier_clear_block(struct ata_port *ap) | ||
581 | { | ||
582 | return (nv_adma_gen_block(ap) + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no)); | ||
583 | } | ||
584 | |||
585 | static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) | ||
586 | { | ||
587 | struct nv_adma_port_priv *pp = ap->private_data; | ||
588 | int complete = 0, have_err = 0; | ||
589 | u16 flags = pp->cpb[cpb_num].resp_flags; | ||
590 | |||
591 | VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags); | ||
592 | |||
593 | if (flags & NV_CPB_RESP_DONE) { | ||
594 | VPRINTK("CPB flags done, flags=0x%x\n", flags); | ||
595 | complete = 1; | ||
596 | } | ||
597 | if (flags & NV_CPB_RESP_ATA_ERR) { | ||
598 | ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags); | ||
599 | have_err = 1; | ||
600 | complete = 1; | ||
601 | } | ||
602 | if (flags & NV_CPB_RESP_CMD_ERR) { | ||
603 | ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags); | ||
604 | have_err = 1; | ||
605 | complete = 1; | ||
606 | } | ||
607 | if (flags & NV_CPB_RESP_CPB_ERR) { | ||
608 | ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags); | ||
609 | have_err = 1; | ||
610 | complete = 1; | ||
611 | } | ||
612 | if(complete || force_err) | ||
613 | { | ||
614 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); | ||
615 | if(likely(qc)) { | ||
616 | u8 ata_status = 0; | ||
617 | /* Only use the ATA port status for non-NCQ commands. | ||
618 | For NCQ commands the current status may have nothing to do with | ||
619 | the command just completed. */ | ||
620 | if(qc->tf.protocol != ATA_PROT_NCQ) | ||
621 | ata_status = readb(nv_adma_ctl_block(ap) + (ATA_REG_STATUS * 4)); | ||
622 | |||
623 | if(have_err || force_err) | ||
624 | ata_status |= ATA_ERR; | ||
625 | |||
626 | qc->err_mask |= ac_err_mask(ata_status); | ||
627 | DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num, | ||
628 | qc->err_mask); | ||
629 | ata_qc_complete(qc); | ||
630 | } | ||
631 | } | ||
632 | } | ||
633 | |||
634 | static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) | ||
635 | { | ||
636 | struct ata_host *host = dev_instance; | ||
637 | int i, handled = 0; | ||
638 | |||
639 | spin_lock(&host->lock); | ||
640 | |||
641 | for (i = 0; i < host->n_ports; i++) { | ||
642 | struct ata_port *ap = host->ports[i]; | ||
643 | |||
644 | if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { | ||
645 | struct nv_adma_port_priv *pp = ap->private_data; | ||
646 | void __iomem *mmio = nv_adma_ctl_block(ap); | ||
647 | u16 status; | ||
648 | u32 gen_ctl; | ||
649 | int have_global_err = 0; | ||
650 | u32 notifier, notifier_error; | ||
651 | |||
652 | /* if in ATA register mode, use standard ata interrupt handler */ | ||
653 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { | ||
654 | struct ata_queued_cmd *qc; | ||
655 | VPRINTK("in ATA register mode\n"); | ||
656 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
657 | if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) | ||
658 | handled += ata_host_intr(ap, qc); | ||
659 | else { | ||
660 | /* No request pending? Clear interrupt status | ||
661 | anyway, in case there's one pending. */ | ||
662 | ap->ops->check_status(ap); | ||
663 | handled++; | ||
664 | } | ||
665 | continue; | ||
666 | } | ||
667 | |||
668 | notifier = readl(mmio + NV_ADMA_NOTIFIER); | ||
669 | notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); | ||
670 | |||
671 | gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL); | ||
672 | |||
673 | /* Seems necessary to clear notifiers even when they were 0. | ||
674 | Otherwise we seem to stop receiving further interrupts. | ||
675 | Unsure why. */ | ||
676 | writel(notifier | notifier_error, nv_adma_notifier_clear_block(ap)); | ||
677 | |||
678 | if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && | ||
679 | !notifier_error) | ||
680 | /* Nothing to do */ | ||
681 | continue; | ||
682 | |||
683 | status = readw(mmio + NV_ADMA_STAT); | ||
684 | |||
685 | /* Clear status. Ensure the controller sees the clearing before we start | ||
686 | looking at any of the CPB statuses, so that any CPB completions after | ||
687 | this point in the handler will raise another interrupt. */ | ||
688 | writew(status, mmio + NV_ADMA_STAT); | ||
689 | readw(mmio + NV_ADMA_STAT); /* flush posted write */ | ||
690 | rmb(); | ||
691 | |||
692 | /* freeze if hotplugged */ | ||
693 | if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) { | ||
694 | ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n"); | ||
695 | ata_port_freeze(ap); | ||
696 | handled++; | ||
697 | continue; | ||
698 | } | ||
699 | |||
700 | if (status & NV_ADMA_STAT_TIMEOUT) { | ||
701 | ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status); | ||
702 | have_global_err = 1; | ||
703 | } | ||
704 | if (status & NV_ADMA_STAT_CPBERR) { | ||
705 | ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status); | ||
706 | have_global_err = 1; | ||
707 | } | ||
708 | if ((status & NV_ADMA_STAT_DONE) || have_global_err) { | ||
709 | /** Check CPBs for completed commands */ | ||
710 | |||
711 | if(ata_tag_valid(ap->active_tag)) | ||
712 | /* Non-NCQ command */ | ||
713 | nv_adma_check_cpb(ap, ap->active_tag, have_global_err || | ||
714 | (notifier_error & (1 << ap->active_tag))); | ||
715 | else { | ||
716 | int pos; | ||
717 | u32 active = ap->sactive; | ||
718 | while( (pos = ffs(active)) ) { | ||
719 | pos--; | ||
720 | nv_adma_check_cpb(ap, pos, have_global_err || | ||
721 | (notifier_error & (1 << pos)) ); | ||
722 | active &= ~(1 << pos ); | ||
723 | } | ||
724 | } | ||
725 | } | ||
726 | |||
727 | handled++; /* irq handled if we got here */ | ||
728 | } | ||
729 | } | ||
730 | |||
731 | spin_unlock(&host->lock); | ||
732 | |||
733 | return IRQ_RETVAL(handled); | ||
734 | } | ||
735 | |||
736 | static void nv_adma_irq_clear(struct ata_port *ap) | ||
737 | { | ||
738 | void __iomem *mmio = nv_adma_ctl_block(ap); | ||
739 | u16 status = readw(mmio + NV_ADMA_STAT); | ||
740 | u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); | ||
741 | u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); | ||
742 | |||
743 | /* clear ADMA status */ | ||
744 | writew(status, mmio + NV_ADMA_STAT); | ||
745 | writel(notifier | notifier_error, | ||
746 | nv_adma_notifier_clear_block(ap)); | ||
747 | |||
748 | /** clear legacy status */ | ||
749 | ap->flags &= ~ATA_FLAG_MMIO; | ||
750 | ata_bmdma_irq_clear(ap); | ||
751 | ap->flags |= ATA_FLAG_MMIO; | ||
752 | } | ||
753 | |||
754 | static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc) | ||
755 | { | ||
756 | struct nv_adma_port_priv *pp = qc->ap->private_data; | ||
757 | |||
758 | if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) { | ||
759 | WARN_ON(1); | ||
760 | return; | ||
761 | } | ||
762 | |||
763 | qc->ap->flags &= ~ATA_FLAG_MMIO; | ||
764 | ata_bmdma_setup(qc); | ||
765 | qc->ap->flags |= ATA_FLAG_MMIO; | ||
766 | } | ||
767 | |||
768 | static void nv_adma_bmdma_start(struct ata_queued_cmd *qc) | ||
769 | { | ||
770 | struct nv_adma_port_priv *pp = qc->ap->private_data; | ||
771 | |||
772 | if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) { | ||
773 | WARN_ON(1); | ||
774 | return; | ||
775 | } | ||
776 | |||
777 | qc->ap->flags &= ~ATA_FLAG_MMIO; | ||
778 | ata_bmdma_start(qc); | ||
779 | qc->ap->flags |= ATA_FLAG_MMIO; | ||
780 | } | ||
781 | |||
782 | static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc) | ||
783 | { | ||
784 | struct nv_adma_port_priv *pp = qc->ap->private_data; | ||
785 | |||
786 | if(pp->flags & NV_ADMA_PORT_REGISTER_MODE) | ||
787 | return; | ||
788 | |||
789 | qc->ap->flags &= ~ATA_FLAG_MMIO; | ||
790 | ata_bmdma_stop(qc); | ||
791 | qc->ap->flags |= ATA_FLAG_MMIO; | ||
792 | } | ||
793 | |||
794 | static u8 nv_adma_bmdma_status(struct ata_port *ap) | ||
795 | { | ||
796 | u8 status; | ||
797 | struct nv_adma_port_priv *pp = ap->private_data; | ||
798 | |||
799 | WARN_ON(pp->flags & NV_ADMA_PORT_REGISTER_MODE); | ||
800 | |||
801 | ap->flags &= ~ATA_FLAG_MMIO; | ||
802 | status = ata_bmdma_status(ap); | ||
803 | ap->flags |= ATA_FLAG_MMIO; | ||
804 | return status; | ||
805 | } | ||
806 | |||
807 | static void nv_adma_register_mode(struct ata_port *ap) | ||
808 | { | ||
809 | void __iomem *mmio = nv_adma_ctl_block(ap); | ||
810 | struct nv_adma_port_priv *pp = ap->private_data; | ||
811 | u16 tmp; | ||
812 | |||
813 | if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) | ||
814 | return; | ||
815 | |||
816 | tmp = readw(mmio + NV_ADMA_CTL); | ||
817 | writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); | ||
818 | |||
819 | pp->flags |= NV_ADMA_PORT_REGISTER_MODE; | ||
820 | } | ||
821 | |||
822 | static void nv_adma_mode(struct ata_port *ap) | ||
823 | { | ||
824 | void __iomem *mmio = nv_adma_ctl_block(ap); | ||
825 | struct nv_adma_port_priv *pp = ap->private_data; | ||
826 | u16 tmp; | ||
827 | |||
828 | if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) | ||
829 | return; | ||
830 | |||
831 | tmp = readw(mmio + NV_ADMA_CTL); | ||
832 | writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); | ||
833 | |||
834 | pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE; | ||
835 | } | ||
836 | |||
837 | static int nv_adma_port_start(struct ata_port *ap) | ||
838 | { | ||
839 | struct device *dev = ap->host->dev; | ||
840 | struct nv_adma_port_priv *pp; | ||
841 | int rc; | ||
842 | void *mem; | ||
843 | dma_addr_t mem_dma; | ||
844 | void __iomem *mmio = nv_adma_ctl_block(ap); | ||
845 | u16 tmp; | ||
846 | |||
847 | VPRINTK("ENTER\n"); | ||
848 | |||
849 | rc = ata_port_start(ap); | ||
850 | if (rc) | ||
851 | return rc; | ||
852 | |||
853 | pp = kzalloc(sizeof(*pp), GFP_KERNEL); | ||
854 | if (!pp) { | ||
855 | rc = -ENOMEM; | ||
856 | goto err_out; | ||
857 | } | ||
858 | |||
859 | mem = dma_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, | ||
860 | &mem_dma, GFP_KERNEL); | ||
861 | |||
862 | if (!mem) { | ||
863 | rc = -ENOMEM; | ||
864 | goto err_out_kfree; | ||
865 | } | ||
866 | memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ); | ||
867 | |||
868 | /* | ||
869 | * First item in chunk of DMA memory: | ||
870 | * 128-byte command parameter block (CPB) | ||
871 | * one for each command tag | ||
872 | */ | ||
873 | pp->cpb = mem; | ||
874 | pp->cpb_dma = mem_dma; | ||
875 | |||
876 | writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); | ||
877 | writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); | ||
878 | |||
879 | mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; | ||
880 | mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ; | ||
881 | |||
882 | /* | ||
883 | * Second item: block of ADMA_SGTBL_LEN s/g entries | ||
884 | */ | ||
885 | pp->aprd = mem; | ||
886 | pp->aprd_dma = mem_dma; | ||
887 | |||
888 | ap->private_data = pp; | ||
889 | |||
890 | /* clear any outstanding interrupt conditions */ | ||
891 | writew(0xffff, mmio + NV_ADMA_STAT); | ||
892 | |||
893 | /* initialize port variables */ | ||
894 | pp->flags = NV_ADMA_PORT_REGISTER_MODE; | ||
895 | |||
896 | /* clear CPB fetch count */ | ||
897 | writew(0, mmio + NV_ADMA_CPB_COUNT); | ||
898 | |||
899 | /* clear GO for register mode */ | ||
900 | tmp = readw(mmio + NV_ADMA_CTL); | ||
901 | writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); | ||
902 | |||
903 | tmp = readw(mmio + NV_ADMA_CTL); | ||
904 | writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); | ||
905 | readl( mmio + NV_ADMA_CTL ); /* flush posted write */ | ||
906 | udelay(1); | ||
907 | writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); | ||
908 | readl( mmio + NV_ADMA_CTL ); /* flush posted write */ | ||
909 | |||
910 | return 0; | ||
911 | |||
912 | err_out_kfree: | ||
913 | kfree(pp); | ||
914 | err_out: | ||
915 | ata_port_stop(ap); | ||
916 | return rc; | ||
917 | } | ||
918 | |||
919 | static void nv_adma_port_stop(struct ata_port *ap) | ||
920 | { | ||
921 | struct device *dev = ap->host->dev; | ||
922 | struct nv_adma_port_priv *pp = ap->private_data; | ||
923 | void __iomem *mmio = nv_adma_ctl_block(ap); | ||
924 | |||
925 | VPRINTK("ENTER\n"); | ||
926 | |||
927 | writew(0, mmio + NV_ADMA_CTL); | ||
928 | |||
929 | ap->private_data = NULL; | ||
930 | dma_free_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, pp->cpb, pp->cpb_dma); | ||
931 | kfree(pp); | ||
932 | ata_port_stop(ap); | ||
933 | } | ||
934 | |||
935 | |||
936 | static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port) | ||
937 | { | ||
938 | void __iomem *mmio = probe_ent->mmio_base; | ||
939 | struct ata_ioports *ioport = &probe_ent->port[port]; | ||
940 | |||
941 | VPRINTK("ENTER\n"); | ||
942 | |||
943 | mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE; | ||
944 | |||
945 | ioport->cmd_addr = (unsigned long) mmio; | ||
946 | ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4); | ||
947 | ioport->error_addr = | ||
948 | ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4); | ||
949 | ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4); | ||
950 | ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4); | ||
951 | ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4); | ||
952 | ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4); | ||
953 | ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4); | ||
954 | ioport->status_addr = | ||
955 | ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4); | ||
956 | ioport->altstatus_addr = | ||
957 | ioport->ctl_addr = (unsigned long) mmio + 0x20; | ||
958 | } | ||
959 | |||
960 | static int nv_adma_host_init(struct ata_probe_ent *probe_ent) | ||
961 | { | ||
962 | struct pci_dev *pdev = to_pci_dev(probe_ent->dev); | ||
963 | unsigned int i; | ||
964 | u32 tmp32; | ||
965 | |||
966 | VPRINTK("ENTER\n"); | ||
967 | |||
968 | /* enable ADMA on the ports */ | ||
969 | pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); | ||
970 | tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN | | ||
971 | NV_MCP_SATA_CFG_20_PORT0_PWB_EN | | ||
972 | NV_MCP_SATA_CFG_20_PORT1_EN | | ||
973 | NV_MCP_SATA_CFG_20_PORT1_PWB_EN; | ||
974 | |||
975 | pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); | ||
976 | |||
977 | for (i = 0; i < probe_ent->n_ports; i++) | ||
978 | nv_adma_setup_port(probe_ent, i); | ||
979 | |||
980 | for (i = 0; i < probe_ent->n_ports; i++) { | ||
981 | void __iomem *mmio = __nv_adma_ctl_block(probe_ent->mmio_base, i); | ||
982 | u16 tmp; | ||
983 | |||
984 | /* enable interrupt, clear reset if not already clear */ | ||
985 | tmp = readw(mmio + NV_ADMA_CTL); | ||
986 | writew(tmp | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL); | ||
987 | } | ||
988 | |||
989 | return 0; | ||
990 | } | ||
991 | |||
992 | static void nv_adma_fill_aprd(struct ata_queued_cmd *qc, | ||
993 | struct scatterlist *sg, | ||
994 | int idx, | ||
995 | struct nv_adma_prd *aprd) | ||
996 | { | ||
997 | u32 flags; | ||
998 | |||
999 | memset(aprd, 0, sizeof(struct nv_adma_prd)); | ||
1000 | |||
1001 | flags = 0; | ||
1002 | if (qc->tf.flags & ATA_TFLAG_WRITE) | ||
1003 | flags |= NV_APRD_WRITE; | ||
1004 | if (idx == qc->n_elem - 1) | ||
1005 | flags |= NV_APRD_END; | ||
1006 | else if (idx != 4) | ||
1007 | flags |= NV_APRD_CONT; | ||
1008 | |||
1009 | aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg))); | ||
1010 | aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */ | ||
1011 | aprd->flags = cpu_to_le32(flags); | ||
1012 | } | ||
1013 | |||
1014 | static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) | ||
1015 | { | ||
1016 | struct nv_adma_port_priv *pp = qc->ap->private_data; | ||
1017 | unsigned int idx; | ||
1018 | struct nv_adma_prd *aprd; | ||
1019 | struct scatterlist *sg; | ||
1020 | |||
1021 | VPRINTK("ENTER\n"); | ||
1022 | |||
1023 | idx = 0; | ||
1024 | |||
1025 | ata_for_each_sg(sg, qc) { | ||
1026 | aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)]; | ||
1027 | nv_adma_fill_aprd(qc, sg, idx, aprd); | ||
1028 | idx++; | ||
1029 | } | ||
1030 | if (idx > 5) | ||
1031 | cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); | ||
1032 | } | ||
1033 | |||
1034 | static void nv_adma_qc_prep(struct ata_queued_cmd *qc) | ||
1035 | { | ||
1036 | struct nv_adma_port_priv *pp = qc->ap->private_data; | ||
1037 | struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; | ||
1038 | u8 ctl_flags = NV_CPB_CTL_CPB_VALID | | ||
1039 | NV_CPB_CTL_APRD_VALID | | ||
1040 | NV_CPB_CTL_IEN; | ||
1041 | |||
1042 | VPRINTK("qc->flags = 0x%lx\n", qc->flags); | ||
1043 | |||
1044 | if (!(qc->flags & ATA_QCFLAG_DMAMAP) || | ||
1045 | qc->tf.protocol == ATA_PROT_ATAPI_DMA) { | ||
1046 | ata_qc_prep(qc); | ||
1047 | return; | ||
1048 | } | ||
1049 | |||
1050 | memset(cpb, 0, sizeof(struct nv_adma_cpb)); | ||
1051 | |||
1052 | cpb->len = 3; | ||
1053 | cpb->tag = qc->tag; | ||
1054 | cpb->next_cpb_idx = 0; | ||
1055 | |||
1056 | /* turn on NCQ flags for NCQ commands */ | ||
1057 | if (qc->tf.protocol == ATA_PROT_NCQ) | ||
1058 | ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA; | ||
1059 | |||
1060 | nv_adma_tf_to_cpb(&qc->tf, cpb->tf); | ||
1061 | |||
1062 | nv_adma_fill_sg(qc, cpb); | ||
1063 | |||
1064 | /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are | ||
1065 | finished filling in all of the contents */ | ||
1066 | wmb(); | ||
1067 | cpb->ctl_flags = ctl_flags; | ||
1068 | } | ||
1069 | |||
1070 | static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) | ||
1071 | { | ||
1072 | void __iomem *mmio = nv_adma_ctl_block(qc->ap); | ||
1073 | |||
1074 | VPRINTK("ENTER\n"); | ||
1075 | |||
1076 | if (!(qc->flags & ATA_QCFLAG_DMAMAP) || | ||
1077 | qc->tf.protocol == ATA_PROT_ATAPI_DMA) { | ||
1078 | /* use ATA register mode */ | ||
1079 | VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags); | ||
1080 | nv_adma_register_mode(qc->ap); | ||
1081 | return ata_qc_issue_prot(qc); | ||
1082 | } else | ||
1083 | nv_adma_mode(qc->ap); | ||
1084 | |||
1085 | /* write append register, command tag in lower 8 bits | ||
1086 | and (number of cpbs to append -1) in top 8 bits */ | ||
1087 | wmb(); | ||
1088 | writew(qc->tag, mmio + NV_ADMA_APPEND); | ||
1089 | |||
1090 | DPRINTK("Issued tag %u\n",qc->tag); | ||
1091 | |||
1092 | return 0; | ||
1093 | } | ||
1094 | |||
280 | static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) | 1095 | static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance) |
281 | { | 1096 | { |
282 | struct ata_host *host = dev_instance; | 1097 | struct ata_host *host = dev_instance; |
@@ -466,6 +1281,56 @@ static void nv_error_handler(struct ata_port *ap) | |||
466 | nv_hardreset, ata_std_postreset); | 1281 | nv_hardreset, ata_std_postreset); |
467 | } | 1282 | } |
468 | 1283 | ||
1284 | static void nv_adma_error_handler(struct ata_port *ap) | ||
1285 | { | ||
1286 | struct nv_adma_port_priv *pp = ap->private_data; | ||
1287 | if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { | ||
1288 | void __iomem *mmio = nv_adma_ctl_block(ap); | ||
1289 | int i; | ||
1290 | u16 tmp; | ||
1291 | |||
1292 | u32 notifier = readl(mmio + NV_ADMA_NOTIFIER); | ||
1293 | u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR); | ||
1294 | u32 gen_ctl = readl(nv_adma_gen_block(ap) + NV_ADMA_GEN_CTL); | ||
1295 | u32 status = readw(mmio + NV_ADMA_STAT); | ||
1296 | |||
1297 | ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X " | ||
1298 | "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n", | ||
1299 | notifier, notifier_error, gen_ctl, status); | ||
1300 | |||
1301 | for( i=0;i<NV_ADMA_MAX_CPBS;i++) { | ||
1302 | struct nv_adma_cpb *cpb = &pp->cpb[i]; | ||
1303 | if( cpb->ctl_flags || cpb->resp_flags ) | ||
1304 | ata_port_printk(ap, KERN_ERR, | ||
1305 | "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n", | ||
1306 | i, cpb->ctl_flags, cpb->resp_flags); | ||
1307 | } | ||
1308 | |||
1309 | /* Push us back into port register mode for error handling. */ | ||
1310 | nv_adma_register_mode(ap); | ||
1311 | |||
1312 | ata_port_printk(ap, KERN_ERR, "Resetting port\n"); | ||
1313 | |||
1314 | /* Mark all of the CPBs as invalid to prevent them from being executed */ | ||
1315 | for( i=0;i<NV_ADMA_MAX_CPBS;i++) | ||
1316 | pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; | ||
1317 | |||
1318 | /* clear CPB fetch count */ | ||
1319 | writew(0, mmio + NV_ADMA_CPB_COUNT); | ||
1320 | |||
1321 | /* Reset channel */ | ||
1322 | tmp = readw(mmio + NV_ADMA_CTL); | ||
1323 | writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); | ||
1324 | readl( mmio + NV_ADMA_CTL ); /* flush posted write */ | ||
1325 | udelay(1); | ||
1326 | writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL); | ||
1327 | readl( mmio + NV_ADMA_CTL ); /* flush posted write */ | ||
1328 | } | ||
1329 | |||
1330 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, | ||
1331 | nv_hardreset, ata_std_postreset); | ||
1332 | } | ||
1333 | |||
469 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | 1334 | static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) |
470 | { | 1335 | { |
471 | static int printed_version = 0; | 1336 | static int printed_version = 0; |
@@ -475,6 +1340,8 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
475 | int rc; | 1340 | int rc; |
476 | u32 bar; | 1341 | u32 bar; |
477 | unsigned long base; | 1342 | unsigned long base; |
1343 | unsigned long type = ent->driver_data; | ||
1344 | int mask_set = 0; | ||
478 | 1345 | ||
479 | // Make sure this is a SATA controller by counting the number of bars | 1346 | // Make sure this is a SATA controller by counting the number of bars |
480 | // (NVIDIA SATA controllers will always have six bars). Otherwise, | 1347 | // (NVIDIA SATA controllers will always have six bars). Otherwise, |
@@ -483,7 +1350,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
483 | if (pci_resource_start(pdev, bar) == 0) | 1350 | if (pci_resource_start(pdev, bar) == 0) |
484 | return -ENODEV; | 1351 | return -ENODEV; |
485 | 1352 | ||
486 | if (!printed_version++) | 1353 | if ( !printed_version++) |
487 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 1354 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
488 | 1355 | ||
489 | rc = pci_enable_device(pdev); | 1356 | rc = pci_enable_device(pdev); |
@@ -496,16 +1363,26 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
496 | goto err_out_disable; | 1363 | goto err_out_disable; |
497 | } | 1364 | } |
498 | 1365 | ||
499 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | 1366 | if(type >= CK804 && adma_enabled) { |
500 | if (rc) | 1367 | dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n"); |
501 | goto err_out_regions; | 1368 | type = ADMA; |
502 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | 1369 | if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && |
503 | if (rc) | 1370 | !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) |
504 | goto err_out_regions; | 1371 | mask_set = 1; |
1372 | } | ||
1373 | |||
1374 | if(!mask_set) { | ||
1375 | rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); | ||
1376 | if (rc) | ||
1377 | goto err_out_regions; | ||
1378 | rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); | ||
1379 | if (rc) | ||
1380 | goto err_out_regions; | ||
1381 | } | ||
505 | 1382 | ||
506 | rc = -ENOMEM; | 1383 | rc = -ENOMEM; |
507 | 1384 | ||
508 | ppi[0] = ppi[1] = &nv_port_info[ent->driver_data]; | 1385 | ppi[0] = ppi[1] = &nv_port_info[type]; |
509 | probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); | 1386 | probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); |
510 | if (!probe_ent) | 1387 | if (!probe_ent) |
511 | goto err_out_regions; | 1388 | goto err_out_regions; |
@@ -522,7 +1399,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
522 | probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET; | 1399 | probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET; |
523 | 1400 | ||
524 | /* enable SATA space for CK804 */ | 1401 | /* enable SATA space for CK804 */ |
525 | if (ent->driver_data == CK804) { | 1402 | if (type >= CK804) { |
526 | u8 regval; | 1403 | u8 regval; |
527 | 1404 | ||
528 | pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val); | 1405 | pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, ®val); |
@@ -532,6 +1409,12 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
532 | 1409 | ||
533 | pci_set_master(pdev); | 1410 | pci_set_master(pdev); |
534 | 1411 | ||
1412 | if (type == ADMA) { | ||
1413 | rc = nv_adma_host_init(probe_ent); | ||
1414 | if (rc) | ||
1415 | goto err_out_iounmap; | ||
1416 | } | ||
1417 | |||
535 | rc = ata_device_add(probe_ent); | 1418 | rc = ata_device_add(probe_ent); |
536 | if (rc != NV_PORTS) | 1419 | if (rc != NV_PORTS) |
537 | goto err_out_iounmap; | 1420 | goto err_out_iounmap; |
@@ -566,6 +1449,33 @@ static void nv_ck804_host_stop(struct ata_host *host) | |||
566 | ata_pci_host_stop(host); | 1449 | ata_pci_host_stop(host); |
567 | } | 1450 | } |
568 | 1451 | ||
1452 | static void nv_adma_host_stop(struct ata_host *host) | ||
1453 | { | ||
1454 | struct pci_dev *pdev = to_pci_dev(host->dev); | ||
1455 | int i; | ||
1456 | u32 tmp32; | ||
1457 | |||
1458 | for (i = 0; i < host->n_ports; i++) { | ||
1459 | void __iomem *mmio = __nv_adma_ctl_block(host->mmio_base, i); | ||
1460 | u16 tmp; | ||
1461 | |||
1462 | /* disable interrupt */ | ||
1463 | tmp = readw(mmio + NV_ADMA_CTL); | ||
1464 | writew(tmp & ~NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL); | ||
1465 | } | ||
1466 | |||
1467 | /* disable ADMA on the ports */ | ||
1468 | pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32); | ||
1469 | tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN | | ||
1470 | NV_MCP_SATA_CFG_20_PORT0_PWB_EN | | ||
1471 | NV_MCP_SATA_CFG_20_PORT1_EN | | ||
1472 | NV_MCP_SATA_CFG_20_PORT1_PWB_EN); | ||
1473 | |||
1474 | pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32); | ||
1475 | |||
1476 | nv_ck804_host_stop(host); | ||
1477 | } | ||
1478 | |||
569 | static int __init nv_init(void) | 1479 | static int __init nv_init(void) |
570 | { | 1480 | { |
571 | return pci_register_driver(&nv_pci_driver); | 1481 | return pci_register_driver(&nv_pci_driver); |
@@ -578,3 +1488,5 @@ static void __exit nv_exit(void) | |||
578 | 1488 | ||
579 | module_init(nv_init); | 1489 | module_init(nv_init); |
580 | module_exit(nv_exit); | 1490 | module_exit(nv_exit); |
1491 | module_param_named(adma, adma_enabled, bool, 0444); | ||
1492 | MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)"); | ||