aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_file_ops.c
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-07-01 07:36:03 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-01 12:56:00 -0400
commitf37bda92461313ad3bbfbf5660adc849c69718bf (patch)
treea5fe4737ca6b8fcbe2cf9b58466d6340ee12fe56 /drivers/infiniband/hw/ipath/ipath_file_ops.c
parent06993ca6bc46419027b45198a58447f4f05c14f6 (diff)
[PATCH] IB/ipath: memory management cleanups
Made in-memory rcvhdrq tail update be in dma_alloc'ed memory, not random user or special kernel (needed for ppc, also "just the right thing to do"). Some cleanups to make unexpected link transitions less likely to produce complaints about packet errors, and also to not leave SMA packets stuck and unable to go out. A few other random debug and comment cleanups. Always init rcvhdrq head/tail registers to 0, to avoid race conditions (should have been that way some time ago). Signed-off-by: Dave Olson <dave.olson@qlogic.com> Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Cc: "Michael S. Tsirkin" <mst@mellanox.co.il> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_file_ops.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c290
1 files changed, 115 insertions, 175 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 03689dbe1a9..e89d3a17acd 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -123,6 +123,7 @@ static int ipath_get_base_info(struct ipath_portdata *pd,
123 * on to yet another method of dealing with this 123 * on to yet another method of dealing with this
124 */ 124 */
125 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; 125 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
126 kinfo->spi_rcvhdr_tailaddr = (u64)pd->port_rcvhdrqtailaddr_phys;
126 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; 127 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
127 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; 128 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
128 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + 129 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
@@ -785,11 +786,12 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
785 786
786bail_rcvegrbuf_phys: 787bail_rcvegrbuf_phys:
787 for (e = 0; e < pd->port_rcvegrbuf_chunks && 788 for (e = 0; e < pd->port_rcvegrbuf_chunks &&
788 pd->port_rcvegrbuf[e]; e++) 789 pd->port_rcvegrbuf[e]; e++) {
789 dma_free_coherent(&dd->pcidev->dev, size, 790 dma_free_coherent(&dd->pcidev->dev, size,
790 pd->port_rcvegrbuf[e], 791 pd->port_rcvegrbuf[e],
791 pd->port_rcvegrbuf_phys[e]); 792 pd->port_rcvegrbuf_phys[e]);
792 793
794 }
793 vfree(pd->port_rcvegrbuf_phys); 795 vfree(pd->port_rcvegrbuf_phys);
794 pd->port_rcvegrbuf_phys = NULL; 796 pd->port_rcvegrbuf_phys = NULL;
795bail_rcvegrbuf: 797bail_rcvegrbuf:
@@ -804,10 +806,7 @@ static int ipath_do_user_init(struct ipath_portdata *pd,
804{ 806{
805 int ret = 0; 807 int ret = 0;
806 struct ipath_devdata *dd = pd->port_dd; 808 struct ipath_devdata *dd = pd->port_dd;
807 u64 physaddr, uaddr, off, atmp;
808 struct page *pagep;
809 u32 head32; 809 u32 head32;
810 u64 head;
811 810
812 /* for now, if major version is different, bail */ 811 /* for now, if major version is different, bail */
813 if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) { 812 if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
@@ -832,54 +831,6 @@ static int ipath_do_user_init(struct ipath_portdata *pd,
832 831
833 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ 832 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
834 833
835 /* set up for the rcvhdr Q tail register writeback to user memory */
836 if (!uinfo->spu_rcvhdraddr ||
837 !access_ok(VERIFY_WRITE, (u64 __user *) (unsigned long)
838 uinfo->spu_rcvhdraddr, sizeof(u64))) {
839 ipath_dbg("Port %d rcvhdrtail addr %llx not valid\n",
840 pd->port_port,
841 (unsigned long long) uinfo->spu_rcvhdraddr);
842 ret = -EINVAL;
843 goto done;
844 }
845
846 off = offset_in_page(uinfo->spu_rcvhdraddr);
847 uaddr = PAGE_MASK & (unsigned long) uinfo->spu_rcvhdraddr;
848 ret = ipath_get_user_pages_nocopy(uaddr, &pagep);
849 if (ret) {
850 dev_info(&dd->pcidev->dev, "Failed to lookup and lock "
851 "address %llx for rcvhdrtail: errno %d\n",
852 (unsigned long long) uinfo->spu_rcvhdraddr, -ret);
853 goto done;
854 }
855 ipath_stats.sps_pagelocks++;
856 pd->port_rcvhdrtail_uaddr = uaddr;
857 pd->port_rcvhdrtail_pagep = pagep;
858 pd->port_rcvhdrtail_kvaddr =
859 page_address(pagep);
860 pd->port_rcvhdrtail_kvaddr += off;
861 physaddr = page_to_phys(pagep) + off;
862 ipath_cdbg(VERBOSE, "port %d user addr %llx hdrtailaddr, %llx "
863 "physical (off=%llx)\n",
864 pd->port_port,
865 (unsigned long long) uinfo->spu_rcvhdraddr,
866 (unsigned long long) physaddr, (unsigned long long) off);
867 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
868 pd->port_port, physaddr);
869 atmp = ipath_read_kreg64_port(dd,
870 dd->ipath_kregs->kr_rcvhdrtailaddr,
871 pd->port_port);
872 if (physaddr != atmp) {
873 ipath_dev_err(dd,
874 "Catastrophic software error, "
875 "RcvHdrTailAddr%u written as %llx, "
876 "read back as %llx\n", pd->port_port,
877 (unsigned long long) physaddr,
878 (unsigned long long) atmp);
879 ret = -EINVAL;
880 goto done;
881 }
882
883 /* for right now, kernel piobufs are at end, so port 1 is at 0 */ 834 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
884 pd->port_piobufs = dd->ipath_piobufbase + 835 pd->port_piobufs = dd->ipath_piobufbase +
885 dd->ipath_pbufsport * (pd->port_port - 836 dd->ipath_pbufsport * (pd->port_port -
@@ -898,26 +849,18 @@ static int ipath_do_user_init(struct ipath_portdata *pd,
898 ret = ipath_create_user_egr(pd); 849 ret = ipath_create_user_egr(pd);
899 if (ret) 850 if (ret)
900 goto done; 851 goto done;
901 /* enable receives now */
902 /* atomically set enable bit for this port */
903 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
904 &dd->ipath_rcvctrl);
905 852
906 /* 853 /*
907 * set the head registers for this port to the current values 854 * set the eager head register for this port to the current values
908 * of the tail pointers, since we don't know if they were 855 * of the tail pointers, since we don't know if they were
909 * updated on last use of the port. 856 * updated on last use of the port.
910 */ 857 */
911 head32 = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
912 head = (u64) head32;
913 ipath_write_ureg(dd, ur_rcvhdrhead, head, pd->port_port);
914 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); 858 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
915 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); 859 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
916 dd->ipath_lastegrheads[pd->port_port] = -1; 860 dd->ipath_lastegrheads[pd->port_port] = -1;
917 dd->ipath_lastrcvhdrqtails[pd->port_port] = -1; 861 dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
918 ipath_cdbg(VERBOSE, "Wrote port%d head %llx, egrhead %x from " 862 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
919 "tail regs\n", pd->port_port, 863 pd->port_port, head32);
920 (unsigned long long) head, head32);
921 pd->port_tidcursor = 0; /* start at beginning after open */ 864 pd->port_tidcursor = 0; /* start at beginning after open */
922 /* 865 /*
923 * now enable the port; the tail registers will be written to memory 866 * now enable the port; the tail registers will be written to memory
@@ -926,24 +869,76 @@ static int ipath_do_user_init(struct ipath_portdata *pd,
926 * transition from 0 to 1, so clear it first, then set it as part of 869 * transition from 0 to 1, so clear it first, then set it as part of
927 * enabling the port. This will (very briefly) affect any other 870 * enabling the port. This will (very briefly) affect any other
928 * open ports, but it shouldn't be long enough to be an issue. 871 * open ports, but it shouldn't be long enough to be an issue.
872 * We explictly set the in-memory copy to 0 beforehand, so we don't
873 * have to wait to be sure the DMA update has happened.
929 */ 874 */
875 *pd->port_rcvhdrtail_kvaddr = 0ULL;
876 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
877 &dd->ipath_rcvctrl);
930 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 878 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
931 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD); 879 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
932 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 880 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
933 dd->ipath_rcvctrl); 881 dd->ipath_rcvctrl);
934
935done: 882done:
936 return ret; 883 return ret;
937} 884}
938 885
886
887/* common code for the mappings on dma_alloc_coherent mem */
888static int ipath_mmap_mem(struct vm_area_struct *vma,
889 struct ipath_portdata *pd, unsigned len,
890 int write_ok, dma_addr_t addr, char *what)
891{
892 struct ipath_devdata *dd = pd->port_dd;
893 unsigned pfn = (unsigned long)addr >> PAGE_SHIFT;
894 int ret;
895
896 if ((vma->vm_end - vma->vm_start) > len) {
897 dev_info(&dd->pcidev->dev,
898 "FAIL on %s: len %lx > %x\n", what,
899 vma->vm_end - vma->vm_start, len);
900 ret = -EFAULT;
901 goto bail;
902 }
903
904 if (!write_ok) {
905 if (vma->vm_flags & VM_WRITE) {
906 dev_info(&dd->pcidev->dev,
907 "%s must be mapped readonly\n", what);
908 ret = -EPERM;
909 goto bail;
910 }
911
912 /* don't allow them to later change with mprotect */
913 vma->vm_flags &= ~VM_MAYWRITE;
914 }
915
916 ret = remap_pfn_range(vma, vma->vm_start, pfn,
917 len, vma->vm_page_prot);
918 if (ret)
919 dev_info(&dd->pcidev->dev,
920 "%s port%u mmap of %lx, %x bytes r%c failed: %d\n",
921 what, pd->port_port, (unsigned long)addr, len,
922 write_ok?'w':'o', ret);
923 else
924 ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes r%c\n",
925 what, pd->port_port, (unsigned long)addr, len,
926 write_ok?'w':'o');
927bail:
928 return ret;
929}
930
939static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd, 931static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
940 u64 ureg) 932 u64 ureg)
941{ 933{
942 unsigned long phys; 934 unsigned long phys;
943 int ret; 935 int ret;
944 936
945 /* it's the real hardware, so io_remap works */ 937 /*
946 938 * This is real hardware, so use io_remap. This is the mechanism
939 * for the user process to update the head registers for their port
940 * in the chip.
941 */
947 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { 942 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
948 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen " 943 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
949 "%lx > PAGE\n", vma->vm_end - vma->vm_start); 944 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
@@ -969,10 +964,11 @@ static int mmap_piobufs(struct vm_area_struct *vma,
969 int ret; 964 int ret;
970 965
971 /* 966 /*
972 * When we map the PIO buffers, we want to map them as writeonly, no 967 * When we map the PIO buffers in the chip, we want to map them as
973 * read possible. 968 * writeonly, no read possible. This prevents access to previous
969 * process data, and catches users who might try to read the i/o
970 * space due to a bug.
974 */ 971 */
975
976 if ((vma->vm_end - vma->vm_start) > 972 if ((vma->vm_end - vma->vm_start) >
977 (dd->ipath_pbufsport * dd->ipath_palign)) { 973 (dd->ipath_pbufsport * dd->ipath_palign)) {
978 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " 974 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
@@ -983,11 +979,10 @@ static int mmap_piobufs(struct vm_area_struct *vma,
983 } 979 }
984 980
985 phys = dd->ipath_physaddr + pd->port_piobufs; 981 phys = dd->ipath_physaddr + pd->port_piobufs;
982
986 /* 983 /*
987 * Do *NOT* mark this as non-cached (PWT bit), or we don't get the 984 * Don't mark this as non-cached, or we don't get the
988 * write combining behavior we want on the PIO buffers! 985 * write combining behavior we want on the PIO buffers!
989 * vma->vm_page_prot =
990 * pgprot_noncached(vma->vm_page_prot);
991 */ 986 */
992 987
993 if (vma->vm_flags & VM_READ) { 988 if (vma->vm_flags & VM_READ) {
@@ -999,8 +994,7 @@ static int mmap_piobufs(struct vm_area_struct *vma,
999 } 994 }
1000 995
1001 /* don't allow them to later change to readable with mprotect */ 996 /* don't allow them to later change to readable with mprotect */
1002 997 vma->vm_flags &= ~VM_MAYREAD;
1003 vma->vm_flags &= ~VM_MAYWRITE;
1004 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 998 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1005 999
1006 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, 1000 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
@@ -1019,11 +1013,6 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
1019 dma_addr_t *phys; 1013 dma_addr_t *phys;
1020 int ret; 1014 int ret;
1021 1015
1022 if (!pd->port_rcvegrbuf) {
1023 ret = -EFAULT;
1024 goto bail;
1025 }
1026
1027 size = pd->port_rcvegrbuf_size; 1016 size = pd->port_rcvegrbuf_size;
1028 total_size = pd->port_rcvegrbuf_chunks * size; 1017 total_size = pd->port_rcvegrbuf_chunks * size;
1029 if ((vma->vm_end - vma->vm_start) > total_size) { 1018 if ((vma->vm_end - vma->vm_start) > total_size) {
@@ -1041,13 +1030,12 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
1041 ret = -EPERM; 1030 ret = -EPERM;
1042 goto bail; 1031 goto bail;
1043 } 1032 }
1033 /* don't allow them to later change to writeable with mprotect */
1034 vma->vm_flags &= ~VM_MAYWRITE;
1044 1035
1045 start = vma->vm_start; 1036 start = vma->vm_start;
1046 phys = pd->port_rcvegrbuf_phys; 1037 phys = pd->port_rcvegrbuf_phys;
1047 1038
1048 /* don't allow them to later change to writeable with mprotect */
1049 vma->vm_flags &= ~VM_MAYWRITE;
1050
1051 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { 1039 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
1052 ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT, 1040 ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT,
1053 size, vma->vm_page_prot); 1041 size, vma->vm_page_prot);
@@ -1060,78 +1048,6 @@ bail:
1060 return ret; 1048 return ret;
1061} 1049}
1062 1050
1063static int mmap_rcvhdrq(struct vm_area_struct *vma,
1064 struct ipath_portdata *pd)
1065{
1066 struct ipath_devdata *dd = pd->port_dd;
1067 size_t total_size;
1068 int ret;
1069
1070 /*
1071 * kmalloc'ed memory, physically contiguous; this is from
1072 * spi_rcvhdr_base; we allow user to map read-write so they can
1073 * write hdrq entries to allow protocol code to directly poll
1074 * whether a hdrq entry has been written.
1075 */
1076 total_size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1077 sizeof(u32), PAGE_SIZE);
1078 if ((vma->vm_end - vma->vm_start) > total_size) {
1079 dev_info(&dd->pcidev->dev,
1080 "FAIL on rcvhdrq: reqlen %lx > actual %lx\n",
1081 vma->vm_end - vma->vm_start,
1082 (unsigned long) total_size);
1083 ret = -EFAULT;
1084 goto bail;
1085 }
1086
1087 ret = remap_pfn_range(vma, vma->vm_start,
1088 pd->port_rcvhdrq_phys >> PAGE_SHIFT,
1089 vma->vm_end - vma->vm_start,
1090 vma->vm_page_prot);
1091bail:
1092 return ret;
1093}
1094
1095static int mmap_pioavailregs(struct vm_area_struct *vma,
1096 struct ipath_portdata *pd)
1097{
1098 struct ipath_devdata *dd = pd->port_dd;
1099 int ret;
1100
1101 /*
1102 * when we map the PIO bufferavail registers, we want to map them as
1103 * readonly, no write possible.
1104 *
1105 * kmalloc'ed memory, physically contiguous, one page only, readonly
1106 */
1107
1108 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
1109 dev_info(&dd->pcidev->dev, "FAIL on pioavailregs_dma: "
1110 "reqlen %lx > actual %lx\n",
1111 vma->vm_end - vma->vm_start,
1112 (unsigned long) PAGE_SIZE);
1113 ret = -EFAULT;
1114 goto bail;
1115 }
1116
1117 if (vma->vm_flags & VM_WRITE) {
1118 dev_info(&dd->pcidev->dev,
1119 "Can't map pioavailregs as writable (flags=%lx)\n",
1120 vma->vm_flags);
1121 ret = -EPERM;
1122 goto bail;
1123 }
1124
1125 /* don't allow them to later change with mprotect */
1126 vma->vm_flags &= ~VM_MAYWRITE;
1127
1128 ret = remap_pfn_range(vma, vma->vm_start,
1129 dd->ipath_pioavailregs_phys >> PAGE_SHIFT,
1130 PAGE_SIZE, vma->vm_page_prot);
1131bail:
1132 return ret;
1133}
1134
1135/** 1051/**
1136 * ipath_mmap - mmap various structures into user space 1052 * ipath_mmap - mmap various structures into user space
1137 * @fp: the file pointer 1053 * @fp: the file pointer
@@ -1151,6 +1067,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1151 1067
1152 pd = port_fp(fp); 1068 pd = port_fp(fp);
1153 dd = pd->port_dd; 1069 dd = pd->port_dd;
1070
1154 /* 1071 /*
1155 * This is the ipath_do_user_init() code, mapping the shared buffers 1072 * This is the ipath_do_user_init() code, mapping the shared buffers
1156 * into the user process. The address referred to by vm_pgoff is the 1073 * into the user process. The address referred to by vm_pgoff is the
@@ -1160,29 +1077,59 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1160 pgaddr = vma->vm_pgoff << PAGE_SHIFT; 1077 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1161 1078
1162 /* 1079 /*
1163 * note that ureg does *NOT* have the kregvirt as part of it, to be 1080 * Must fit in 40 bits for our hardware; some checked elsewhere,
1164 * sure that for 32 bit programs, we don't end up trying to map a > 1081 * but we'll be paranoid. Check for 0 is mostly in case one of the
1165 * 44 address. Has to match ipath_get_base_info() code that sets 1082 * allocations failed, but user called mmap anyway. We want to catch
1166 * __spi_uregbase 1083 * that before it can match.
1167 */ 1084 */
1085 if (!pgaddr || pgaddr >= (1ULL<<40)) {
1086 ipath_dev_err(dd, "Bad phys addr %llx, start %lx, end %lx\n",
1087 (unsigned long long)pgaddr, vma->vm_start, vma->vm_end);
1088 return -EINVAL;
1089 }
1168 1090
1091 /* just the offset of the port user registers, not physical addr */
1169 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; 1092 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1170 1093
1171 ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u\n", 1094 ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n",
1172 (unsigned long long) pgaddr, vma->vm_start, 1095 (unsigned long long) pgaddr, vma->vm_start,
1173 vma->vm_end - vma->vm_start, dd->ipath_unit, 1096 vma->vm_end - vma->vm_start);
1174 pd->port_port);
1175 1097
1176 if (pgaddr == ureg) 1098 if (vma->vm_start & (PAGE_SIZE-1)) {
1099 ipath_dev_err(dd,
1100 "vm_start not aligned: %lx, end=%lx phys %lx\n",
1101 vma->vm_start, vma->vm_end, (unsigned long)pgaddr);
1102 ret = -EINVAL;
1103 }
1104 else if (pgaddr == ureg)
1177 ret = mmap_ureg(vma, dd, ureg); 1105 ret = mmap_ureg(vma, dd, ureg);
1178 else if (pgaddr == pd->port_piobufs) 1106 else if (pgaddr == pd->port_piobufs)
1179 ret = mmap_piobufs(vma, dd, pd); 1107 ret = mmap_piobufs(vma, dd, pd);
1180 else if (pgaddr == (u64) pd->port_rcvegr_phys) 1108 else if (pgaddr == (u64) pd->port_rcvegr_phys)
1181 ret = mmap_rcvegrbufs(vma, pd); 1109 ret = mmap_rcvegrbufs(vma, pd);
1182 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) 1110 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) {
1183 ret = mmap_rcvhdrq(vma, pd); 1111 /*
1112 * The rcvhdrq itself; readonly except on HT-400 (so have
1113 * to allow writable mapping), multiple pages, contiguous
1114 * from an i/o perspective.
1115 */
1116 unsigned total_size =
1117 ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize
1118 * sizeof(u32), PAGE_SIZE);
1119 ret = ipath_mmap_mem(vma, pd, total_size, 1,
1120 pd->port_rcvhdrq_phys,
1121 "rcvhdrq");
1122 }
1123 else if (pgaddr == (u64)pd->port_rcvhdrqtailaddr_phys)
1124 /* in-memory copy of rcvhdrq tail register */
1125 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1126 pd->port_rcvhdrqtailaddr_phys,
1127 "rcvhdrq tail");
1184 else if (pgaddr == dd->ipath_pioavailregs_phys) 1128 else if (pgaddr == dd->ipath_pioavailregs_phys)
1185 ret = mmap_pioavailregs(vma, pd); 1129 /* in-memory copy of pioavail registers */
1130 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1131 dd->ipath_pioavailregs_phys,
1132 "pioavail registers");
1186 else 1133 else
1187 ret = -EINVAL; 1134 ret = -EINVAL;
1188 1135
@@ -1539,14 +1486,6 @@ static int ipath_close(struct inode *in, struct file *fp)
1539 } 1486 }
1540 1487
1541 if (dd->ipath_kregbase) { 1488 if (dd->ipath_kregbase) {
1542 if (pd->port_rcvhdrtail_uaddr) {
1543 pd->port_rcvhdrtail_uaddr = 0;
1544 pd->port_rcvhdrtail_kvaddr = NULL;
1545 ipath_release_user_pages_on_close(
1546 &pd->port_rcvhdrtail_pagep, 1);
1547 pd->port_rcvhdrtail_pagep = NULL;
1548 ipath_stats.sps_pageunlocks++;
1549 }
1550 ipath_write_kreg_port( 1489 ipath_write_kreg_port(
1551 dd, dd->ipath_kregs->kr_rcvhdrtailaddr, 1490 dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
1552 port, 0ULL); 1491 port, 0ULL);
@@ -1583,9 +1522,9 @@ static int ipath_close(struct inode *in, struct file *fp)
1583 1522
1584 dd->ipath_f_clear_tids(dd, pd->port_port); 1523 dd->ipath_f_clear_tids(dd, pd->port_port);
1585 1524
1586 ipath_free_pddata(dd, pd->port_port, 0); 1525 dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
1587
1588 mutex_unlock(&ipath_mutex); 1526 mutex_unlock(&ipath_mutex);
1527 ipath_free_pddata(dd, pd); /* after releasing the mutex */
1589 1528
1590 return ret; 1529 return ret;
1591} 1530}
@@ -1905,3 +1844,4 @@ void ipath_user_remove(struct ipath_devdata *dd)
1905bail: 1844bail:
1906 return; 1845 return;
1907} 1846}
1847