aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/vme
diff options
context:
space:
mode:
authorArthur Benilov <arthur.benilov@gmail.com>2010-09-24 13:26:13 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-09-30 07:33:26 -0400
commit53059aa05988761a738fa8bc082bbf3c5d4462d1 (patch)
tree1d260afd6dce5e55389516d444570e4e6bc42c37 /drivers/staging/vme
parent0abd242839eac8437521101c9361c095728eded3 (diff)
Staging: vme: Assure D16 cycle if required in master_read and master_write
From a95892fc2246d6dc45f57b7dd68f32b9b28bd0f7 Mon Sep 17 00:00:00 2001 From: Arthur Benilov <arthur.benilov@gmail.com> Date: Fri, 24 Sep 2010 13:51:07 +0200 Subject: [PATCH] Staging: vme: Assure D16 cycle if required in master_read and master_write memcpy_fromio() and memcpy_toio() functions apply internally to __memcpy() that performs data transfer in 32-bits or 8-bits blocks (at least on x86). This makes impossible to perform D16 cycle with ca91cx42 bridge. Provided modification assures performing data transfer with 32, 16, and 8 bits chunks. Signed-off-by: Arthur Benilov <arthur.benilov@iba-group.com> Signed-off-by: Martyn Welch <martyn.welch@ge.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging/vme')
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c94
1 files changed, 89 insertions, 5 deletions
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index 06bd793c52b..4d745623211 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -848,12 +848,57 @@ ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
848 size_t count, loff_t offset) 848 size_t count, loff_t offset)
849{ 849{
850 ssize_t retval; 850 ssize_t retval;
851 void *addr = image->kern_base + offset;
852 unsigned int done = 0;
853 unsigned int count32;
854
855 if (count == 0)
856 return 0;
851 857
852 spin_lock(&(image->lock)); 858 spin_lock(&(image->lock));
853 859
854 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count); 860 /* The following code handles VME address alignment problem
855 retval = count; 861 * in order to assure the maximal data width cycle.
862 * We cannot use memcpy_xxx directly here because it
863 * may cut data transfer in 8-bits cycles, thus making
864 * D16 cycle impossible.
865 * From the other hand, the bridge itself assures that
866 * maximal configured data cycle is used and splits it
867 * automatically for non-aligned addresses.
868 */
869 if ((int)addr & 0x1) {
870 *(u8 *)buf = ioread8(addr);
871 done += 1;
872 if (done == count)
873 goto out;
874 }
875 if ((int)addr & 0x2) {
876 if ((count - done) < 2) {
877 *(u8 *)(buf + done) = ioread8(addr + done);
878 done += 1;
879 goto out;
880 } else {
881 *(u16 *)(buf + done) = ioread16(addr + done);
882 done += 2;
883 }
884 }
856 885
886 count32 = (count - done) & ~0x3;
887 if (count32 > 0) {
888 memcpy_fromio(buf + done, addr + done, (unsigned int)count);
889 done += count32;
890 }
891
892 if ((count - done) & 0x2) {
893 *(u16 *)(buf + done) = ioread16(addr + done);
894 done += 2;
895 }
896 if ((count - done) & 0x1) {
897 *(u8 *)(buf + done) = ioread8(addr + done);
898 done += 1;
899 }
900out:
901 retval = count;
857 spin_unlock(&(image->lock)); 902 spin_unlock(&(image->lock));
858 903
859 return retval; 904 return retval;
@@ -862,15 +907,54 @@ ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
862ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf, 907ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
863 size_t count, loff_t offset) 908 size_t count, loff_t offset)
864{ 909{
865 int retval = 0; 910 ssize_t retval;
911 void *addr = image->kern_base + offset;
912 unsigned int done = 0;
913 unsigned int count32;
914
915 if (count == 0)
916 return 0;
866 917
867 spin_lock(&(image->lock)); 918 spin_lock(&(image->lock));
868 919
869 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count); 920 /* Here we apply for the same strategy we do in master_read
921 * function in order to assure D16 cycle when required.
922 */
923 if ((int)addr & 0x1) {
924 iowrite8(*(u8 *)buf, addr);
925 done += 1;
926 if (done == count)
927 goto out;
928 }
929 if ((int)addr & 0x2) {
930 if ((count - done) < 2) {
931 iowrite8(*(u8 *)(buf + done), addr + done);
932 done += 1;
933 goto out;
934 } else {
935 iowrite16(*(u16 *)(buf + done), addr + done);
936 done += 2;
937 }
938 }
939
940 count32 = (count - done) & ~0x3;
941 if (count32 > 0) {
942 memcpy_toio(addr + done, buf + done, count32);
943 done += count32;
944 }
945
946 if ((count - done) & 0x2) {
947 iowrite16(*(u16 *)(buf + done), addr + done);
948 done += 2;
949 }
950 if ((count - done) & 0x1) {
951 iowrite8(*(u8 *)(buf + done), addr + done);
952 done += 1;
953 }
954out:
870 retval = count; 955 retval = count;
871 956
872 spin_unlock(&(image->lock)); 957 spin_unlock(&(image->lock));
873
874 return retval; 958 return retval;
875} 959}
876 960