aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJack Hammer <jack_hammer@adaptec.com>2005-10-25 14:13:03 -0400
committerJames Bottomley <jejb@mulgrave.(none)>2005-10-29 11:39:41 -0400
commita3632fa3ecefe50d88fc70af90610f79b99e0715 (patch)
tree86dd98ba0e08220039d1f615f6667d27796e8ca0 /drivers
parenteb66fff7d94199f80378bc0b51a06e62ce379b53 (diff)
[SCSI] ips: Fix up for correct scatter/gather processing
Added kmap_atomic/kunmap_atomic. Added protection of KM_IRQ0 slot with local_irq_save(), local_irq_restore(), and comments. Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/scsi/ips.c49
1 files changed, 36 insertions, 13 deletions
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index eeae7ccb0dd2..0a252e7aca6e 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -219,15 +219,12 @@ module_param(ips, charp, 0);
219#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0) 219#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
220#include <linux/blk.h> 220#include <linux/blk.h>
221#include "sd.h" 221#include "sd.h"
222#define IPS_SG_ADDRESS(sg) ((sg)->address)
223#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags) 222#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags)
224#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags) 223#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags)
225#ifndef __devexit_p 224#ifndef __devexit_p
226#define __devexit_p(x) x 225#define __devexit_p(x) x
227#endif 226#endif
228#else 227#else
229#define IPS_SG_ADDRESS(sg) (page_address((sg)->page) ? \
230 page_address((sg)->page)+(sg)->offset : NULL)
231#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0) 228#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0)
232#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0) 229#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0)
233#endif 230#endif
@@ -1605,6 +1602,8 @@ ips_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
1605static int 1602static int
1606ips_is_passthru(Scsi_Cmnd * SC) 1603ips_is_passthru(Scsi_Cmnd * SC)
1607{ 1604{
1605 unsigned long flags;
1606
1608 METHOD_TRACE("ips_is_passthru", 1); 1607 METHOD_TRACE("ips_is_passthru", 1);
1609 1608
1610 if (!SC) 1609 if (!SC)
@@ -1622,10 +1621,20 @@ ips_is_passthru(Scsi_Cmnd * SC)
1622 return 1; 1621 return 1;
1623 else if (SC->use_sg) { 1622 else if (SC->use_sg) {
1624 struct scatterlist *sg = SC->request_buffer; 1623 struct scatterlist *sg = SC->request_buffer;
1625 char *buffer = IPS_SG_ADDRESS(sg); 1624 char *buffer;
1625
1626 /* kmap_atomic() ensures addressability of the user buffer.*/
1627 /* local_irq_save() protects the KM_IRQ0 address slot. */
1628 local_irq_save(flags);
1629 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1626 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && 1630 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1627 buffer[2] == 'P' && buffer[3] == 'P') 1631 buffer[2] == 'P' && buffer[3] == 'P') {
1632 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1633 local_irq_restore(flags);
1628 return 1; 1634 return 1;
1635 }
1636 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1637 local_irq_restore(flags);
1629 } 1638 }
1630 } 1639 }
1631 return 0; 1640 return 0;
@@ -3656,14 +3665,21 @@ ips_scmd_buf_write(Scsi_Cmnd * scmd, void *data, unsigned
3656 int i; 3665 int i;
3657 unsigned int min_cnt, xfer_cnt; 3666 unsigned int min_cnt, xfer_cnt;
3658 char *cdata = (char *) data; 3667 char *cdata = (char *) data;
3668 unsigned char *buffer;
3669 unsigned long flags;
3659 struct scatterlist *sg = scmd->request_buffer; 3670 struct scatterlist *sg = scmd->request_buffer;
3660 for (i = 0, xfer_cnt = 0; 3671 for (i = 0, xfer_cnt = 0;
3661 (i < scmd->use_sg) && (xfer_cnt < count); i++) { 3672 (i < scmd->use_sg) && (xfer_cnt < count); i++) {
3662 if (!IPS_SG_ADDRESS(&sg[i]))
3663 return;
3664 min_cnt = min(count - xfer_cnt, sg[i].length); 3673 min_cnt = min(count - xfer_cnt, sg[i].length);
3665 memcpy(IPS_SG_ADDRESS(&sg[i]), &cdata[xfer_cnt], 3674
3666 min_cnt); 3675 /* kmap_atomic() ensures addressability of the data buffer.*/
3676 /* local_irq_save() protects the KM_IRQ0 address slot. */
3677 local_irq_save(flags);
3678 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
3679 memcpy(buffer, &cdata[xfer_cnt], min_cnt);
3680 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3681 local_irq_restore(flags);
3682
3667 xfer_cnt += min_cnt; 3683 xfer_cnt += min_cnt;
3668 } 3684 }
3669 3685
@@ -3688,14 +3704,21 @@ ips_scmd_buf_read(Scsi_Cmnd * scmd, void *data, unsigned
3688 int i; 3704 int i;
3689 unsigned int min_cnt, xfer_cnt; 3705 unsigned int min_cnt, xfer_cnt;
3690 char *cdata = (char *) data; 3706 char *cdata = (char *) data;
3707 unsigned char *buffer;
3708 unsigned long flags;
3691 struct scatterlist *sg = scmd->request_buffer; 3709 struct scatterlist *sg = scmd->request_buffer;
3692 for (i = 0, xfer_cnt = 0; 3710 for (i = 0, xfer_cnt = 0;
3693 (i < scmd->use_sg) && (xfer_cnt < count); i++) { 3711 (i < scmd->use_sg) && (xfer_cnt < count); i++) {
3694 if (!IPS_SG_ADDRESS(&sg[i]))
3695 return;
3696 min_cnt = min(count - xfer_cnt, sg[i].length); 3712 min_cnt = min(count - xfer_cnt, sg[i].length);
3697 memcpy(&cdata[xfer_cnt], IPS_SG_ADDRESS(&sg[i]), 3713
3698 min_cnt); 3714 /* kmap_atomic() ensures addressability of the data buffer.*/
3715 /* local_irq_save() protects the KM_IRQ0 address slot. */
3716 local_irq_save(flags);
3717 buffer = kmap_atomic(sg[i].page, KM_IRQ0) + sg[i].offset;
3718 memcpy(&cdata[xfer_cnt], buffer, min_cnt);
3719 kunmap_atomic(buffer - sg[i].offset, KM_IRQ0);
3720 local_irq_restore(flags);
3721
3699 xfer_cnt += min_cnt; 3722 xfer_cnt += min_cnt;
3700 } 3723 }
3701 3724