aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-08-14 18:43:39 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-14 18:43:39 -0400
commitb4b08e581fac8e0ba9ae348bdc13246c9798c99e (patch)
treeb09ea9a12a65f0a03fa7a15b0e661756913a0262 /drivers
parent27876d02b30aa34bb1ad35b81ccc40c174282f31 (diff)
Revert "dc395x: Fix support for highmem"
It introduces a repeatable oops in the driver, which is a bigger problem than the patch tries to solve. From the original description: Author: Jamie Lenehan <lenehan@twibble.org> Date: Thu Mar 3 14:41:40 2005 +0200 [PATCH] dc395x: Fix support for highmem From: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Removes the page_to_virt and maps sg lists dynamically. This makes the driver work with highmem pages. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Jamie Lenehan <lenehan@twibble.org> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com> Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/dc395x.c48
1 files changed, 13 insertions, 35 deletions
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 929170dcd3cb..600ba1202864 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -183,7 +183,7 @@
183 * cross a page boundy. 183 * cross a page boundy.
184 */ 184 */
185#define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY) 185#define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
186#define VIRTX_LEN (sizeof(void *) * DC395x_MAX_SG_LISTENTRY) 186
187 187
188struct SGentry { 188struct SGentry {
189 u32 address; /* bus! address */ 189 u32 address; /* bus! address */
@@ -235,7 +235,6 @@ struct ScsiReqBlk {
235 u8 sg_count; /* No of HW sg entries for this request */ 235 u8 sg_count; /* No of HW sg entries for this request */
236 u8 sg_index; /* Index of HW sg entry for this request */ 236 u8 sg_index; /* Index of HW sg entry for this request */
237 u32 total_xfer_length; /* Total number of bytes remaining to be transfered */ 237 u32 total_xfer_length; /* Total number of bytes remaining to be transfered */
238 void **virt_map;
239 unsigned char *virt_addr; /* Virtual address of current transfer position */ 238 unsigned char *virt_addr; /* Virtual address of current transfer position */
240 239
241 /* 240 /*
@@ -1022,14 +1021,14 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
1022 reqlen, cmd->request_buffer, cmd->use_sg, 1021 reqlen, cmd->request_buffer, cmd->use_sg,
1023 srb->sg_count); 1022 srb->sg_count);
1024 1023
1024 srb->virt_addr = page_address(sl->page);
1025 for (i = 0; i < srb->sg_count; i++) { 1025 for (i = 0; i < srb->sg_count; i++) {
1026 u32 seglen = (u32)sg_dma_len(sl + i); 1026 u32 busaddr = (u32)sg_dma_address(&sl[i]);
1027 sgp[i].address = (u32)sg_dma_address(sl + i); 1027 u32 seglen = (u32)sl[i].length;
1028 sgp[i].address = busaddr;
1028 sgp[i].length = seglen; 1029 sgp[i].length = seglen;
1029 srb->total_xfer_length += seglen; 1030 srb->total_xfer_length += seglen;
1030 srb->virt_map[i] = kmap(sl[i].page);
1031 } 1031 }
1032 srb->virt_addr = srb->virt_map[0];
1033 sgp += srb->sg_count - 1; 1032 sgp += srb->sg_count - 1;
1034 1033
1035 /* 1034 /*
@@ -1976,7 +1975,6 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
1976 int segment = cmd->use_sg; 1975 int segment = cmd->use_sg;
1977 u32 xferred = srb->total_xfer_length - left; /* bytes transfered */ 1976 u32 xferred = srb->total_xfer_length - left; /* bytes transfered */
1978 struct SGentry *psge = srb->segment_x + srb->sg_index; 1977 struct SGentry *psge = srb->segment_x + srb->sg_index;
1979 void **virt = srb->virt_map;
1980 1978
1981 dprintkdbg(DBG_0, 1979 dprintkdbg(DBG_0,
1982 "sg_update_list: Transfered %i of %i bytes, %i remain\n", 1980 "sg_update_list: Transfered %i of %i bytes, %i remain\n",
@@ -2016,16 +2014,16 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
2016 2014
2017 /* We have to walk the scatterlist to find it */ 2015 /* We have to walk the scatterlist to find it */
2018 sg = (struct scatterlist *)cmd->request_buffer; 2016 sg = (struct scatterlist *)cmd->request_buffer;
2019 idx = 0;
2020 while (segment--) { 2017 while (segment--) {
2021 unsigned long mask = 2018 unsigned long mask =
2022 ~((unsigned long)sg->length - 1) & PAGE_MASK; 2019 ~((unsigned long)sg->length - 1) & PAGE_MASK;
2023 if ((sg_dma_address(sg) & mask) == (psge->address & mask)) { 2020 if ((sg_dma_address(sg) & mask) == (psge->address & mask)) {
2024 srb->virt_addr = virt[idx] + (psge->address & ~PAGE_MASK); 2021 srb->virt_addr = (page_address(sg->page)
2022 + psge->address -
2023 (psge->address & PAGE_MASK));
2025 return; 2024 return;
2026 } 2025 }
2027 ++sg; 2026 ++sg;
2028 ++idx;
2029 } 2027 }
2030 2028
2031 dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n"); 2029 dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n");
@@ -2151,7 +2149,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2151 DC395x_read32(acb, TRM_S1040_DMA_CXCNT)); 2149 DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
2152 } 2150 }
2153 /* 2151 /*
2154 * calculate all the residue data that not yet transfered 2152 * calculate all the residue data that not yet tranfered
2155 * SCSI transfer counter + left in SCSI FIFO data 2153 * SCSI transfer counter + left in SCSI FIFO data
2156 * 2154 *
2157 * .....TRM_S1040_SCSI_COUNTER (24bits) 2155 * .....TRM_S1040_SCSI_COUNTER (24bits)
@@ -3269,7 +3267,6 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3269 struct scsi_cmnd *cmd = srb->cmd; 3267 struct scsi_cmnd *cmd = srb->cmd;
3270 enum dma_data_direction dir = cmd->sc_data_direction; 3268 enum dma_data_direction dir = cmd->sc_data_direction;
3271 if (cmd->use_sg && dir != PCI_DMA_NONE) { 3269 if (cmd->use_sg && dir != PCI_DMA_NONE) {
3272 int i;
3273 /* unmap DC395x SG list */ 3270 /* unmap DC395x SG list */
3274 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", 3271 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
3275 srb->sg_bus_addr, SEGMENTX_LEN); 3272 srb->sg_bus_addr, SEGMENTX_LEN);
@@ -3279,8 +3276,6 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3279 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", 3276 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
3280 cmd->use_sg, cmd->request_buffer); 3277 cmd->use_sg, cmd->request_buffer);
3281 /* unmap the sg segments */ 3278 /* unmap the sg segments */
3282 for (i = 0; i < srb->sg_count; i++)
3283 kunmap(virt_to_page(srb->virt_map[i]));
3284 pci_unmap_sg(acb->dev, 3279 pci_unmap_sg(acb->dev,
3285 (struct scatterlist *)cmd->request_buffer, 3280 (struct scatterlist *)cmd->request_buffer,
3286 cmd->use_sg, dir); 3281 cmd->use_sg, dir);
@@ -3327,7 +3322,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3327 3322
3328 if (cmd->use_sg) { 3323 if (cmd->use_sg) {
3329 struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer; 3324 struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer;
3330 ptr = (struct ScsiInqData *)(srb->virt_map[0] + sg->offset); 3325 ptr = (struct ScsiInqData *)(page_address(sg->page) + sg->offset);
3331 } else { 3326 } else {
3332 ptr = (struct ScsiInqData *)(cmd->request_buffer); 3327 ptr = (struct ScsiInqData *)(cmd->request_buffer);
3333 } 3328 }
@@ -4262,9 +4257,8 @@ static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
4262 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; 4257 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4263 4258
4264 for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page) 4259 for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
4265 kfree(acb->srb_array[i].segment_x); 4260 if (acb->srb_array[i].segment_x)
4266 4261 kfree(acb->srb_array[i].segment_x);
4267 vfree(acb->srb_array[0].virt_map);
4268} 4262}
4269 4263
4270 4264
@@ -4280,12 +4274,9 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4280 int srb_idx = 0; 4274 int srb_idx = 0;
4281 unsigned i = 0; 4275 unsigned i = 0;
4282 struct SGentry *ptr; 4276 struct SGentry *ptr;
4283 void **virt_array;
4284 4277
4285 for (i = 0; i < DC395x_MAX_SRB_CNT; i++) { 4278 for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
4286 acb->srb_array[i].segment_x = NULL; 4279 acb->srb_array[i].segment_x = NULL;
4287 acb->srb_array[i].virt_map = NULL;
4288 }
4289 4280
4290 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); 4281 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
4291 while (pages--) { 4282 while (pages--) {
@@ -4306,19 +4297,6 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4306 ptr + (i * DC395x_MAX_SG_LISTENTRY); 4297 ptr + (i * DC395x_MAX_SG_LISTENTRY);
4307 else 4298 else
4308 dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n"); 4299 dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
4309
4310 virt_array = vmalloc((DC395x_MAX_SRB_CNT + 1) * DC395x_MAX_SG_LISTENTRY * sizeof(void*));
4311
4312 if (!virt_array) {
4313 adapter_sg_tables_free(acb);
4314 return 1;
4315 }
4316
4317 for (i = 0; i < DC395x_MAX_SRB_CNT + 1; i++) {
4318 acb->srb_array[i].virt_map = virt_array;
4319 virt_array += DC395x_MAX_SG_LISTENTRY;
4320 }
4321
4322 return 0; 4300 return 0;
4323} 4301}
4324 4302