aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/scsi/zfcp_fsf.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-09-18 16:28:49 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-09-23 18:54:24 -0400
commitdd52e0eaf891cd85bf2ca057c15ed6bfd76db4e6 (patch)
treedc457fe1b732716b715c05864ab02be767414cb4 /drivers/s390/scsi/zfcp_fsf.c
parentd136205182b1ea4897da31e325a296f8831a6796 (diff)
[SCSI] zfcp: create private slab caches to guarantee proper data alignment
Create private slab caches in order to guarantee proper alignment of data structures that get passed to hardware. Sidenote: with this patch slab cache debugging will finally work on s390 (at least no known problems left). Furthermore this patch does some minor cleanups: - store ptr for transport template in struct zfcp_data Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andreas Herrmann <aherrman@de.ibm.com> Compile fix ups and Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/s390/scsi/zfcp_fsf.c')
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index ff2eacf5ec8c..4913ffbb2fc8 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -100,14 +100,19 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
100 if (req_flags & ZFCP_REQ_NO_QTCB) 100 if (req_flags & ZFCP_REQ_NO_QTCB)
101 size = sizeof(struct zfcp_fsf_req); 101 size = sizeof(struct zfcp_fsf_req);
102 else 102 else
103 size = sizeof(struct zfcp_fsf_req_pool_element); 103 size = sizeof(struct zfcp_fsf_req_qtcb);
104 104
105 if (likely(pool != NULL)) 105 if (likely(pool))
106 ptr = mempool_alloc(pool, GFP_ATOMIC); 106 ptr = mempool_alloc(pool, GFP_ATOMIC);
107 else 107 else {
108 ptr = kmalloc(size, GFP_ATOMIC); 108 if (req_flags & ZFCP_REQ_NO_QTCB)
109 ptr = kmalloc(size, GFP_ATOMIC);
110 else
111 ptr = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
112 SLAB_ATOMIC);
113 }
109 114
110 if (unlikely(NULL == ptr)) 115 if (unlikely(!ptr))
111 goto out; 116 goto out;
112 117
113 memset(ptr, 0, size); 118 memset(ptr, 0, size);
@@ -115,9 +120,8 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
115 if (req_flags & ZFCP_REQ_NO_QTCB) { 120 if (req_flags & ZFCP_REQ_NO_QTCB) {
116 fsf_req = (struct zfcp_fsf_req *) ptr; 121 fsf_req = (struct zfcp_fsf_req *) ptr;
117 } else { 122 } else {
118 fsf_req = &((struct zfcp_fsf_req_pool_element *) ptr)->fsf_req; 123 fsf_req = &((struct zfcp_fsf_req_qtcb *) ptr)->fsf_req;
119 fsf_req->qtcb = 124 fsf_req->qtcb = &((struct zfcp_fsf_req_qtcb *) ptr)->qtcb;
120 &((struct zfcp_fsf_req_pool_element *) ptr)->qtcb;
121 } 125 }
122 126
123 fsf_req->pool = pool; 127 fsf_req->pool = pool;
@@ -139,10 +143,17 @@ zfcp_fsf_req_alloc(mempool_t *pool, int req_flags)
139void 143void
140zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) 144zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
141{ 145{
142 if (likely(fsf_req->pool != NULL)) 146 if (likely(fsf_req->pool)) {
143 mempool_free(fsf_req, fsf_req->pool); 147 mempool_free(fsf_req, fsf_req->pool);
144 else 148 return;
145 kfree(fsf_req); 149 }
150
151 if (fsf_req->qtcb) {
152 kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, fsf_req);
153 return;
154 }
155
156 kfree(fsf_req);
146} 157}
147 158
148/** 159/**