diff options
author | Christof Schmitt <christof.schmitt@de.ibm.com> | 2011-02-22 13:54:40 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2011-02-25 12:01:59 -0500 |
commit | c7b279ae51942c14529bf2806685e9c658f28611 (patch) | |
tree | 76d48640ccd62f7f375180cb9179f03079ac8c0c /drivers/s390 | |
parent | 7c35e77b96b2f0af8c278c13d484d42dad3c7422 (diff) |
[SCSI] zfcp: Replace kmem_cache for "status read" data
zfcp requires a mempool for the status read data blocks to resubmit
the "status read" requests at any time. Each status read data block
has the size of a page (4096 bytes) and needs to be placed in one
page.
Instead of having a kmem_cache for allocating page sized chunks, use
mempool_create_page_pool to create a mempool returning pages and
remove the zfcp kmem_cache.
Signed-off-by: Christof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: Steffen Maier <maier@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/scsi/zfcp_aux.c | 20 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_def.h | 3 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_erp.c | 2 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.c | 12 |
4 files changed, 15 insertions, 22 deletions
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 51c666fb67a4..81e185602bb2 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -132,11 +132,6 @@ static int __init zfcp_module_init(void) | |||
132 | if (!zfcp_data.qtcb_cache) | 132 | if (!zfcp_data.qtcb_cache) |
133 | goto out_qtcb_cache; | 133 | goto out_qtcb_cache; |
134 | 134 | ||
135 | zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr", | ||
136 | sizeof(struct fsf_status_read_buffer)); | ||
137 | if (!zfcp_data.sr_buffer_cache) | ||
138 | goto out_sr_cache; | ||
139 | |||
140 | zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", | 135 | zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", |
141 | sizeof(struct zfcp_fc_gid_pn)); | 136 | sizeof(struct zfcp_fc_gid_pn)); |
142 | if (!zfcp_data.gid_pn_cache) | 137 | if (!zfcp_data.gid_pn_cache) |
@@ -181,8 +176,6 @@ out_transport: | |||
181 | out_adisc_cache: | 176 | out_adisc_cache: |
182 | kmem_cache_destroy(zfcp_data.gid_pn_cache); | 177 | kmem_cache_destroy(zfcp_data.gid_pn_cache); |
183 | out_gid_cache: | 178 | out_gid_cache: |
184 | kmem_cache_destroy(zfcp_data.sr_buffer_cache); | ||
185 | out_sr_cache: | ||
186 | kmem_cache_destroy(zfcp_data.qtcb_cache); | 179 | kmem_cache_destroy(zfcp_data.qtcb_cache); |
187 | out_qtcb_cache: | 180 | out_qtcb_cache: |
188 | kmem_cache_destroy(zfcp_data.gpn_ft_cache); | 181 | kmem_cache_destroy(zfcp_data.gpn_ft_cache); |
@@ -199,7 +192,6 @@ static void __exit zfcp_module_exit(void) | |||
199 | fc_release_transport(zfcp_data.scsi_transport_template); | 192 | fc_release_transport(zfcp_data.scsi_transport_template); |
200 | kmem_cache_destroy(zfcp_data.adisc_cache); | 193 | kmem_cache_destroy(zfcp_data.adisc_cache); |
201 | kmem_cache_destroy(zfcp_data.gid_pn_cache); | 194 | kmem_cache_destroy(zfcp_data.gid_pn_cache); |
202 | kmem_cache_destroy(zfcp_data.sr_buffer_cache); | ||
203 | kmem_cache_destroy(zfcp_data.qtcb_cache); | 195 | kmem_cache_destroy(zfcp_data.qtcb_cache); |
204 | kmem_cache_destroy(zfcp_data.gpn_ft_cache); | 196 | kmem_cache_destroy(zfcp_data.gpn_ft_cache); |
205 | } | 197 | } |
@@ -264,10 +256,10 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) | |||
264 | if (!adapter->pool.qtcb_pool) | 256 | if (!adapter->pool.qtcb_pool) |
265 | return -ENOMEM; | 257 | return -ENOMEM; |
266 | 258 | ||
267 | adapter->pool.status_read_data = | 259 | BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE); |
268 | mempool_create_slab_pool(FSF_STATUS_READS_RECOM, | 260 | adapter->pool.sr_data = |
269 | zfcp_data.sr_buffer_cache); | 261 | mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0); |
270 | if (!adapter->pool.status_read_data) | 262 | if (!adapter->pool.sr_data) |
271 | return -ENOMEM; | 263 | return -ENOMEM; |
272 | 264 | ||
273 | adapter->pool.gid_pn = | 265 | adapter->pool.gid_pn = |
@@ -290,8 +282,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) | |||
290 | mempool_destroy(adapter->pool.qtcb_pool); | 282 | mempool_destroy(adapter->pool.qtcb_pool); |
291 | if (adapter->pool.status_read_req) | 283 | if (adapter->pool.status_read_req) |
292 | mempool_destroy(adapter->pool.status_read_req); | 284 | mempool_destroy(adapter->pool.status_read_req); |
293 | if (adapter->pool.status_read_data) | 285 | if (adapter->pool.sr_data) |
294 | mempool_destroy(adapter->pool.status_read_data); | 286 | mempool_destroy(adapter->pool.sr_data); |
295 | if (adapter->pool.gid_pn) | 287 | if (adapter->pool.gid_pn) |
296 | mempool_destroy(adapter->pool.gid_pn); | 288 | mempool_destroy(adapter->pool.gid_pn); |
297 | } | 289 | } |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 89e43e172918..93ce500f8978 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -107,7 +107,7 @@ struct zfcp_adapter_mempool { | |||
107 | mempool_t *scsi_req; | 107 | mempool_t *scsi_req; |
108 | mempool_t *scsi_abort; | 108 | mempool_t *scsi_abort; |
109 | mempool_t *status_read_req; | 109 | mempool_t *status_read_req; |
110 | mempool_t *status_read_data; | 110 | mempool_t *sr_data; |
111 | mempool_t *gid_pn; | 111 | mempool_t *gid_pn; |
112 | mempool_t *qtcb_pool; | 112 | mempool_t *qtcb_pool; |
113 | }; | 113 | }; |
@@ -319,7 +319,6 @@ struct zfcp_data { | |||
319 | struct scsi_transport_template *scsi_transport_template; | 319 | struct scsi_transport_template *scsi_transport_template; |
320 | struct kmem_cache *gpn_ft_cache; | 320 | struct kmem_cache *gpn_ft_cache; |
321 | struct kmem_cache *qtcb_cache; | 321 | struct kmem_cache *qtcb_cache; |
322 | struct kmem_cache *sr_buffer_cache; | ||
323 | struct kmem_cache *gid_pn_cache; | 322 | struct kmem_cache *gid_pn_cache; |
324 | struct kmem_cache *adisc_cache; | 323 | struct kmem_cache *adisc_cache; |
325 | }; | 324 | }; |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index e003e306f870..6c1cddf0d0a0 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act) | |||
732 | if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) | 732 | if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) |
733 | return ZFCP_ERP_FAILED; | 733 | return ZFCP_ERP_FAILED; |
734 | 734 | ||
735 | if (mempool_resize(act->adapter->pool.status_read_data, | 735 | if (mempool_resize(act->adapter->pool.sr_data, |
736 | act->adapter->stat_read_buf_num, GFP_KERNEL)) | 736 | act->adapter->stat_read_buf_num, GFP_KERNEL)) |
737 | return ZFCP_ERP_FAILED; | 737 | return ZFCP_ERP_FAILED; |
738 | 738 | ||
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 6efaea9207ca..a2b0e8435fc3 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -212,7 +212,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
212 | 212 | ||
213 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { | 213 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { |
214 | zfcp_dbf_hba_fsf_uss("fssrh_1", req); | 214 | zfcp_dbf_hba_fsf_uss("fssrh_1", req); |
215 | mempool_free(sr_buf, adapter->pool.status_read_data); | 215 | mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); |
216 | zfcp_fsf_req_free(req); | 216 | zfcp_fsf_req_free(req); |
217 | return; | 217 | return; |
218 | } | 218 | } |
@@ -265,7 +265,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
265 | break; | 265 | break; |
266 | } | 266 | } |
267 | 267 | ||
268 | mempool_free(sr_buf, adapter->pool.status_read_data); | 268 | mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); |
269 | zfcp_fsf_req_free(req); | 269 | zfcp_fsf_req_free(req); |
270 | 270 | ||
271 | atomic_inc(&adapter->stat_miss); | 271 | atomic_inc(&adapter->stat_miss); |
@@ -723,6 +723,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) | |||
723 | struct zfcp_adapter *adapter = qdio->adapter; | 723 | struct zfcp_adapter *adapter = qdio->adapter; |
724 | struct zfcp_fsf_req *req; | 724 | struct zfcp_fsf_req *req; |
725 | struct fsf_status_read_buffer *sr_buf; | 725 | struct fsf_status_read_buffer *sr_buf; |
726 | struct page *page; | ||
726 | int retval = -EIO; | 727 | int retval = -EIO; |
727 | 728 | ||
728 | spin_lock_irq(&qdio->req_q_lock); | 729 | spin_lock_irq(&qdio->req_q_lock); |
@@ -736,11 +737,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) | |||
736 | goto out; | 737 | goto out; |
737 | } | 738 | } |
738 | 739 | ||
739 | sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); | 740 | page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); |
740 | if (!sr_buf) { | 741 | if (!page) { |
741 | retval = -ENOMEM; | 742 | retval = -ENOMEM; |
742 | goto failed_buf; | 743 | goto failed_buf; |
743 | } | 744 | } |
745 | sr_buf = page_address(page); | ||
744 | memset(sr_buf, 0, sizeof(*sr_buf)); | 746 | memset(sr_buf, 0, sizeof(*sr_buf)); |
745 | req->data = sr_buf; | 747 | req->data = sr_buf; |
746 | 748 | ||
@@ -755,7 +757,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) | |||
755 | 757 | ||
756 | failed_req_send: | 758 | failed_req_send: |
757 | req->data = NULL; | 759 | req->data = NULL; |
758 | mempool_free(sr_buf, adapter->pool.status_read_data); | 760 | mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); |
759 | failed_buf: | 761 | failed_buf: |
760 | zfcp_dbf_hba_fsf_uss("fssr__1", req); | 762 | zfcp_dbf_hba_fsf_uss("fssr__1", req); |
761 | zfcp_fsf_req_free(req); | 763 | zfcp_fsf_req_free(req); |