diff options
author | Mark Lord <mlord@pobox.com> | 2009-02-25 15:13:03 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2009-03-24 22:02:40 -0400 |
commit | 08da175937a35d34a83eaefbb3458472eb1a89d4 (patch) | |
tree | 32e8be09d7fc0c878b479a65666a18b0341b7529 /drivers/ata/sata_mv.c | |
parent | a5bfc4714b3f01365aef89a92673f2ceb1ccf246 (diff) |
[libata] sata_mv: cache frequently-accessed registers
Maintain a local (mv_port_priv) cache of frequently accessed registers,
to avoid having to re-read them (very slow) on every transistion
between EDMA and non-EDMA modes. This speeds up things like
flushing the drive write cache, and anything using basic DMA transfers.
Signed-off-by: Mark Lord <mlord@pobox.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r-- | drivers/ata/sata_mv.c | 91 |
1 files changed, 70 insertions, 21 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 1f14b1b52340..146b8e67c44f 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -438,6 +438,17 @@ struct mv_sg { | |||
438 | __le32 reserved; | 438 | __le32 reserved; |
439 | }; | 439 | }; |
440 | 440 | ||
441 | /* | ||
442 | * We keep a local cache of a few frequently accessed port | ||
443 | * registers here, to avoid having to read them (very slow) | ||
444 | * when switching between EDMA and non-EDMA modes. | ||
445 | */ | ||
446 | struct mv_cached_regs { | ||
447 | u32 fiscfg; | ||
448 | u32 ltmode; | ||
449 | u32 haltcond; | ||
450 | }; | ||
451 | |||
441 | struct mv_port_priv { | 452 | struct mv_port_priv { |
442 | struct mv_crqb *crqb; | 453 | struct mv_crqb *crqb; |
443 | dma_addr_t crqb_dma; | 454 | dma_addr_t crqb_dma; |
@@ -450,6 +461,7 @@ struct mv_port_priv { | |||
450 | unsigned int resp_idx; | 461 | unsigned int resp_idx; |
451 | 462 | ||
452 | u32 pp_flags; | 463 | u32 pp_flags; |
464 | struct mv_cached_regs cached; | ||
453 | unsigned int delayed_eh_pmp_map; | 465 | unsigned int delayed_eh_pmp_map; |
454 | }; | 466 | }; |
455 | 467 | ||
@@ -812,6 +824,43 @@ static inline int mv_get_hc_count(unsigned long port_flags) | |||
812 | return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); | 824 | return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); |
813 | } | 825 | } |
814 | 826 | ||
827 | /** | ||
828 | * mv_save_cached_regs - (re-)initialize cached port registers | ||
829 | * @ap: the port whose registers we are caching | ||
830 | * | ||
831 | * Initialize the local cache of port registers, | ||
832 | * so that reading them over and over again can | ||
833 | * be avoided on the hotter paths of this driver. | ||
834 | * This saves a few microseconds each time we switch | ||
835 | * to/from EDMA mode to perform (eg.) a drive cache flush. | ||
836 | */ | ||
837 | static void mv_save_cached_regs(struct ata_port *ap) | ||
838 | { | ||
839 | void __iomem *port_mmio = mv_ap_base(ap); | ||
840 | struct mv_port_priv *pp = ap->private_data; | ||
841 | |||
842 | pp->cached.fiscfg = readl(port_mmio + FISCFG_OFS); | ||
843 | pp->cached.ltmode = readl(port_mmio + LTMODE_OFS); | ||
844 | pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); | ||
845 | } | ||
846 | |||
847 | /** | ||
848 | * mv_write_cached_reg - write to a cached port register | ||
849 | * @addr: hardware address of the register | ||
850 | * @old: pointer to cached value of the register | ||
851 | * @new: new value for the register | ||
852 | * | ||
853 | * Write a new value to a cached register, | ||
854 | * but only if the value is different from before. | ||
855 | */ | ||
856 | static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) | ||
857 | { | ||
858 | if (new != *old) { | ||
859 | *old = new; | ||
860 | writel(new, addr); | ||
861 | } | ||
862 | } | ||
863 | |||
815 | static void mv_set_edma_ptrs(void __iomem *port_mmio, | 864 | static void mv_set_edma_ptrs(void __iomem *port_mmio, |
816 | struct mv_host_priv *hpriv, | 865 | struct mv_host_priv *hpriv, |
817 | struct mv_port_priv *pp) | 866 | struct mv_port_priv *pp) |
@@ -1159,35 +1208,33 @@ static int mv_qc_defer(struct ata_queued_cmd *qc) | |||
1159 | return ATA_DEFER_PORT; | 1208 | return ATA_DEFER_PORT; |
1160 | } | 1209 | } |
1161 | 1210 | ||
1162 | static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs) | 1211 | static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) |
1163 | { | 1212 | { |
1164 | u32 new_fiscfg, old_fiscfg; | 1213 | struct mv_port_priv *pp = ap->private_data; |
1165 | u32 new_ltmode, old_ltmode; | 1214 | void __iomem *port_mmio; |
1166 | u32 new_haltcond, old_haltcond; | ||
1167 | 1215 | ||
1168 | old_fiscfg = readl(port_mmio + FISCFG_OFS); | 1216 | u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; |
1169 | old_ltmode = readl(port_mmio + LTMODE_OFS); | 1217 | u32 ltmode, *old_ltmode = &pp->cached.ltmode; |
1170 | old_haltcond = readl(port_mmio + EDMA_HALTCOND_OFS); | 1218 | u32 haltcond, *old_haltcond = &pp->cached.haltcond; |
1171 | 1219 | ||
1172 | new_fiscfg = old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); | 1220 | ltmode = *old_ltmode & ~LTMODE_BIT8; |
1173 | new_ltmode = old_ltmode & ~LTMODE_BIT8; | 1221 | haltcond = *old_haltcond | EDMA_ERR_DEV; |
1174 | new_haltcond = old_haltcond | EDMA_ERR_DEV; | ||
1175 | 1222 | ||
1176 | if (want_fbs) { | 1223 | if (want_fbs) { |
1177 | new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC; | 1224 | fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; |
1178 | new_ltmode = old_ltmode | LTMODE_BIT8; | 1225 | ltmode = *old_ltmode | LTMODE_BIT8; |
1179 | if (want_ncq) | 1226 | if (want_ncq) |
1180 | new_haltcond &= ~EDMA_ERR_DEV; | 1227 | haltcond &= ~EDMA_ERR_DEV; |
1181 | else | 1228 | else |
1182 | new_fiscfg |= FISCFG_WAIT_DEV_ERR; | 1229 | fiscfg |= FISCFG_WAIT_DEV_ERR; |
1230 | } else { | ||
1231 | fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); | ||
1183 | } | 1232 | } |
1184 | 1233 | ||
1185 | if (new_fiscfg != old_fiscfg) | 1234 | port_mmio = mv_ap_base(ap); |
1186 | writelfl(new_fiscfg, port_mmio + FISCFG_OFS); | 1235 | mv_write_cached_reg(port_mmio + FISCFG_OFS, old_fiscfg, fiscfg); |
1187 | if (new_ltmode != old_ltmode) | 1236 | mv_write_cached_reg(port_mmio + LTMODE_OFS, old_ltmode, ltmode); |
1188 | writelfl(new_ltmode, port_mmio + LTMODE_OFS); | 1237 | mv_write_cached_reg(port_mmio + EDMA_HALTCOND_OFS, old_haltcond, haltcond); |
1189 | if (new_haltcond != old_haltcond) | ||
1190 | writelfl(new_haltcond, port_mmio + EDMA_HALTCOND_OFS); | ||
1191 | } | 1238 | } |
1192 | 1239 | ||
1193 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) | 1240 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) |
@@ -1235,7 +1282,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) | |||
1235 | */ | 1282 | */ |
1236 | want_fbs &= want_ncq; | 1283 | want_fbs &= want_ncq; |
1237 | 1284 | ||
1238 | mv_config_fbs(port_mmio, want_ncq, want_fbs); | 1285 | mv_config_fbs(ap, want_ncq, want_fbs); |
1239 | 1286 | ||
1240 | if (want_fbs) { | 1287 | if (want_fbs) { |
1241 | pp->pp_flags |= MV_PP_FLAG_FBS_EN; | 1288 | pp->pp_flags |= MV_PP_FLAG_FBS_EN; |
@@ -1339,6 +1386,7 @@ static int mv_port_start(struct ata_port *ap) | |||
1339 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; | 1386 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; |
1340 | } | 1387 | } |
1341 | } | 1388 | } |
1389 | mv_save_cached_regs(ap); | ||
1342 | mv_edma_cfg(ap, 0, 0); | 1390 | mv_edma_cfg(ap, 0, 0); |
1343 | return 0; | 1391 | return 0; |
1344 | 1392 | ||
@@ -2997,6 +3045,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2997 | extra = HZ; /* only extend it once, max */ | 3045 | extra = HZ; /* only extend it once, max */ |
2998 | } | 3046 | } |
2999 | } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); | 3047 | } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); |
3048 | mv_save_cached_regs(ap); | ||
3000 | mv_edma_cfg(ap, 0, 0); | 3049 | mv_edma_cfg(ap, 0, 0); |
3001 | 3050 | ||
3002 | return rc; | 3051 | return rc; |