diff options
author | Thomas Bogendoerfer <tsbogend@alpha.franken.de> | 2008-01-26 18:25:53 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2008-01-30 14:03:39 -0500 |
commit | 2adbfa333ad2c365bd27f3cf21ae464501d9619d (patch) | |
tree | dcc55d2c5b1d18edbde355a8e7f8e401ccc5037c /drivers/scsi/sgiwd93.c | |
parent | 9d058ecfd444d247b7448e0ef44647514d91a4f2 (diff) |
[SCSI] sgiwd93: use cached memory access to make driver work on IP28
SGI IP28 machines would need special treatment (enable adding addtional
wait states) when accessing memory uncached. To avoid this pain I
changed the driver to use only cached access to memory.
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi/sgiwd93.c')
-rw-r--r-- | drivers/scsi/sgiwd93.c | 64 |
1 files changed, 39 insertions, 25 deletions
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c index d4ebe8c67ba9..26cfc56c7091 100644 --- a/drivers/scsi/sgiwd93.c +++ b/drivers/scsi/sgiwd93.c | |||
@@ -33,10 +33,9 @@ | |||
33 | 33 | ||
34 | struct ip22_hostdata { | 34 | struct ip22_hostdata { |
35 | struct WD33C93_hostdata wh; | 35 | struct WD33C93_hostdata wh; |
36 | struct hpc_data { | 36 | dma_addr_t dma; |
37 | dma_addr_t dma; | 37 | void *cpu; |
38 | void *cpu; | 38 | struct device *dev; |
39 | } hd; | ||
40 | }; | 39 | }; |
41 | 40 | ||
42 | #define host_to_hostdata(host) ((struct ip22_hostdata *)((host)->hostdata)) | 41 | #define host_to_hostdata(host) ((struct ip22_hostdata *)((host)->hostdata)) |
@@ -46,6 +45,11 @@ struct hpc_chunk { | |||
46 | u32 _padding; /* align to quadword boundary */ | 45 | u32 _padding; /* align to quadword boundary */ |
47 | }; | 46 | }; |
48 | 47 | ||
48 | /* space for hpc dma descriptors */ | ||
49 | #define HPC_DMA_SIZE PAGE_SIZE | ||
50 | |||
51 | #define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) | ||
52 | |||
49 | static irqreturn_t sgiwd93_intr(int irq, void *dev_id) | 53 | static irqreturn_t sgiwd93_intr(int irq, void *dev_id) |
50 | { | 54 | { |
51 | struct Scsi_Host * host = dev_id; | 55 | struct Scsi_Host * host = dev_id; |
@@ -59,15 +63,17 @@ static irqreturn_t sgiwd93_intr(int irq, void *dev_id) | |||
59 | } | 63 | } |
60 | 64 | ||
61 | static inline | 65 | static inline |
62 | void fill_hpc_entries(struct hpc_chunk *hcp, struct scsi_cmnd *cmd, int datainp) | 66 | void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din) |
63 | { | 67 | { |
64 | unsigned long len = cmd->SCp.this_residual; | 68 | unsigned long len = cmd->SCp.this_residual; |
65 | void *addr = cmd->SCp.ptr; | 69 | void *addr = cmd->SCp.ptr; |
66 | dma_addr_t physaddr; | 70 | dma_addr_t physaddr; |
67 | unsigned long count; | 71 | unsigned long count; |
72 | struct hpc_chunk *hcp; | ||
68 | 73 | ||
69 | physaddr = dma_map_single(NULL, addr, len, cmd->sc_data_direction); | 74 | physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din)); |
70 | cmd->SCp.dma_handle = physaddr; | 75 | cmd->SCp.dma_handle = physaddr; |
76 | hcp = hd->cpu; | ||
71 | 77 | ||
72 | while (len) { | 78 | while (len) { |
73 | /* | 79 | /* |
@@ -89,6 +95,9 @@ void fill_hpc_entries(struct hpc_chunk *hcp, struct scsi_cmnd *cmd, int datainp) | |||
89 | */ | 95 | */ |
90 | hcp->desc.pbuf = 0; | 96 | hcp->desc.pbuf = 0; |
91 | hcp->desc.cntinfo = HPCDMA_EOX; | 97 | hcp->desc.cntinfo = HPCDMA_EOX; |
98 | dma_cache_sync(hd->dev, hd->cpu, | ||
99 | (unsigned long)(hcp + 1) - (unsigned long)hd->cpu, | ||
100 | DMA_TO_DEVICE); | ||
92 | } | 101 | } |
93 | 102 | ||
94 | static int dma_setup(struct scsi_cmnd *cmd, int datainp) | 103 | static int dma_setup(struct scsi_cmnd *cmd, int datainp) |
@@ -96,9 +105,8 @@ static int dma_setup(struct scsi_cmnd *cmd, int datainp) | |||
96 | struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host); | 105 | struct ip22_hostdata *hdata = host_to_hostdata(cmd->device->host); |
97 | struct hpc3_scsiregs *hregs = | 106 | struct hpc3_scsiregs *hregs = |
98 | (struct hpc3_scsiregs *) cmd->device->host->base; | 107 | (struct hpc3_scsiregs *) cmd->device->host->base; |
99 | struct hpc_chunk *hcp = (struct hpc_chunk *) hdata->hd.cpu; | ||
100 | 108 | ||
101 | pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hcp); | 109 | pr_debug("dma_setup: datainp<%d> hcp<%p> ", datainp, hdata->cpu); |
102 | 110 | ||
103 | hdata->wh.dma_dir = datainp; | 111 | hdata->wh.dma_dir = datainp; |
104 | 112 | ||
@@ -111,12 +119,12 @@ static int dma_setup(struct scsi_cmnd *cmd, int datainp) | |||
111 | if (cmd->SCp.ptr == NULL || cmd->SCp.this_residual == 0) | 119 | if (cmd->SCp.ptr == NULL || cmd->SCp.this_residual == 0) |
112 | return 1; | 120 | return 1; |
113 | 121 | ||
114 | fill_hpc_entries(hcp, cmd, datainp); | 122 | fill_hpc_entries(hdata, cmd, datainp); |
115 | 123 | ||
116 | pr_debug(" HPCGO\n"); | 124 | pr_debug(" HPCGO\n"); |
117 | 125 | ||
118 | /* Start up the HPC. */ | 126 | /* Start up the HPC. */ |
119 | hregs->ndptr = hdata->hd.dma; | 127 | hregs->ndptr = hdata->dma; |
120 | if (datainp) | 128 | if (datainp) |
121 | hregs->ctrl = HPC3_SCTRL_ACTIVE; | 129 | hregs->ctrl = HPC3_SCTRL_ACTIVE; |
122 | else | 130 | else |
@@ -134,6 +142,9 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, | |||
134 | if (!SCpnt) | 142 | if (!SCpnt) |
135 | return; | 143 | return; |
136 | 144 | ||
145 | if (SCpnt->SCp.ptr == NULL || SCpnt->SCp.this_residual == 0) | ||
146 | return; | ||
147 | |||
137 | hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base; | 148 | hregs = (struct hpc3_scsiregs *) SCpnt->device->host->base; |
138 | 149 | ||
139 | pr_debug("dma_stop: status<%d> ", status); | 150 | pr_debug("dma_stop: status<%d> ", status); |
@@ -145,8 +156,9 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, | |||
145 | barrier(); | 156 | barrier(); |
146 | } | 157 | } |
147 | hregs->ctrl = 0; | 158 | hregs->ctrl = 0; |
148 | dma_unmap_single(NULL, SCpnt->SCp.dma_handle, SCpnt->SCp.this_residual, | 159 | dma_unmap_single(hdata->dev, SCpnt->SCp.dma_handle, |
149 | SCpnt->sc_data_direction); | 160 | SCpnt->SCp.this_residual, |
161 | DMA_DIR(hdata->wh.dma_dir)); | ||
150 | 162 | ||
151 | pr_debug("\n"); | 163 | pr_debug("\n"); |
152 | } | 164 | } |
@@ -161,22 +173,23 @@ void sgiwd93_reset(unsigned long base) | |||
161 | } | 173 | } |
162 | EXPORT_SYMBOL_GPL(sgiwd93_reset); | 174 | EXPORT_SYMBOL_GPL(sgiwd93_reset); |
163 | 175 | ||
164 | static inline void init_hpc_chain(struct hpc_data *hd) | 176 | static inline void init_hpc_chain(struct ip22_hostdata *hdata) |
165 | { | 177 | { |
166 | struct hpc_chunk *hcp = (struct hpc_chunk *) hd->cpu; | 178 | struct hpc_chunk *hcp = (struct hpc_chunk *)hdata->cpu; |
167 | struct hpc_chunk *dma = (struct hpc_chunk *) hd->dma; | 179 | dma_addr_t dma = hdata->dma; |
168 | unsigned long start, end; | 180 | unsigned long start, end; |
169 | 181 | ||
170 | start = (unsigned long) hcp; | 182 | start = (unsigned long) hcp; |
171 | end = start + PAGE_SIZE; | 183 | end = start + HPC_DMA_SIZE; |
172 | while (start < end) { | 184 | while (start < end) { |
173 | hcp->desc.pnext = (u32) (dma + 1); | 185 | hcp->desc.pnext = (u32) (dma + sizeof(struct hpc_chunk)); |
174 | hcp->desc.cntinfo = HPCDMA_EOX; | 186 | hcp->desc.cntinfo = HPCDMA_EOX; |
175 | hcp++; dma++; | 187 | hcp++; |
188 | dma += sizeof(struct hpc_chunk); | ||
176 | start += sizeof(struct hpc_chunk); | 189 | start += sizeof(struct hpc_chunk); |
177 | }; | 190 | }; |
178 | hcp--; | 191 | hcp--; |
179 | hcp->desc.pnext = hd->dma; | 192 | hcp->desc.pnext = hdata->dma; |
180 | } | 193 | } |
181 | 194 | ||
182 | static int sgiwd93_bus_reset(struct scsi_cmnd *cmd) | 195 | static int sgiwd93_bus_reset(struct scsi_cmnd *cmd) |
@@ -235,16 +248,17 @@ static int __init sgiwd93_probe(struct platform_device *pdev) | |||
235 | host->irq = irq; | 248 | host->irq = irq; |
236 | 249 | ||
237 | hdata = host_to_hostdata(host); | 250 | hdata = host_to_hostdata(host); |
238 | hdata->hd.cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, | 251 | hdata->dev = &pdev->dev; |
239 | &hdata->hd.dma, GFP_KERNEL); | 252 | hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE, |
240 | if (!hdata->hd.cpu) { | 253 | &hdata->dma, GFP_KERNEL); |
254 | if (!hdata->cpu) { | ||
241 | printk(KERN_WARNING "sgiwd93: Could not allocate memory for " | 255 | printk(KERN_WARNING "sgiwd93: Could not allocate memory for " |
242 | "host %d buffer.\n", unit); | 256 | "host %d buffer.\n", unit); |
243 | err = -ENOMEM; | 257 | err = -ENOMEM; |
244 | goto out_put; | 258 | goto out_put; |
245 | } | 259 | } |
246 | 260 | ||
247 | init_hpc_chain(&hdata->hd); | 261 | init_hpc_chain(hdata); |
248 | 262 | ||
249 | regs.SASR = wdregs + 3; | 263 | regs.SASR = wdregs + 3; |
250 | regs.SCMD = wdregs + 7; | 264 | regs.SCMD = wdregs + 7; |
@@ -274,7 +288,7 @@ static int __init sgiwd93_probe(struct platform_device *pdev) | |||
274 | out_irq: | 288 | out_irq: |
275 | free_irq(irq, host); | 289 | free_irq(irq, host); |
276 | out_free: | 290 | out_free: |
277 | dma_free_coherent(NULL, PAGE_SIZE, hdata->hd.cpu, hdata->hd.dma); | 291 | dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma); |
278 | out_put: | 292 | out_put: |
279 | scsi_host_put(host); | 293 | scsi_host_put(host); |
280 | out: | 294 | out: |
@@ -290,7 +304,7 @@ static void __exit sgiwd93_remove(struct platform_device *pdev) | |||
290 | 304 | ||
291 | scsi_remove_host(host); | 305 | scsi_remove_host(host); |
292 | free_irq(pd->irq, host); | 306 | free_irq(pd->irq, host); |
293 | dma_free_coherent(&pdev->dev, PAGE_SIZE, hdata->hd.cpu, hdata->hd.dma); | 307 | dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma); |
294 | scsi_host_put(host); | 308 | scsi_host_put(host); |
295 | } | 309 | } |
296 | 310 | ||