diff options
author | Christof Schmitt <christof.schmitt@de.ibm.com> | 2011-02-22 13:54:40 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2011-02-25 12:01:59 -0500 |
commit | c7b279ae51942c14529bf2806685e9c658f28611 (patch) | |
tree | 76d48640ccd62f7f375180cb9179f03079ac8c0c /drivers/s390/scsi/zfcp_fsf.c | |
parent | 7c35e77b96b2f0af8c278c13d484d42dad3c7422 (diff) |
[SCSI] zfcp: Replace kmem_cache for "status read" data
zfcp requires a mempool for the status read data blocks to resubmit
the "status read" requests at any time. Each status read data block
has the size of a page (4096 bytes) and needs to be placed in one
page.
Instead of having a kmem_cache for allocating page sized chunks, use
mempool_create_page_pool to create a mempool returning pages and
remove the zfcp kmem_cache.
Signed-off-by: Christof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: Steffen Maier <maier@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/s390/scsi/zfcp_fsf.c')
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 6efaea9207ca..a2b0e8435fc3 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -212,7 +212,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
212 | 212 | ||
213 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { | 213 | if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { |
214 | zfcp_dbf_hba_fsf_uss("fssrh_1", req); | 214 | zfcp_dbf_hba_fsf_uss("fssrh_1", req); |
215 | mempool_free(sr_buf, adapter->pool.status_read_data); | 215 | mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); |
216 | zfcp_fsf_req_free(req); | 216 | zfcp_fsf_req_free(req); |
217 | return; | 217 | return; |
218 | } | 218 | } |
@@ -265,7 +265,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) | |||
265 | break; | 265 | break; |
266 | } | 266 | } |
267 | 267 | ||
268 | mempool_free(sr_buf, adapter->pool.status_read_data); | 268 | mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); |
269 | zfcp_fsf_req_free(req); | 269 | zfcp_fsf_req_free(req); |
270 | 270 | ||
271 | atomic_inc(&adapter->stat_miss); | 271 | atomic_inc(&adapter->stat_miss); |
@@ -723,6 +723,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) | |||
723 | struct zfcp_adapter *adapter = qdio->adapter; | 723 | struct zfcp_adapter *adapter = qdio->adapter; |
724 | struct zfcp_fsf_req *req; | 724 | struct zfcp_fsf_req *req; |
725 | struct fsf_status_read_buffer *sr_buf; | 725 | struct fsf_status_read_buffer *sr_buf; |
726 | struct page *page; | ||
726 | int retval = -EIO; | 727 | int retval = -EIO; |
727 | 728 | ||
728 | spin_lock_irq(&qdio->req_q_lock); | 729 | spin_lock_irq(&qdio->req_q_lock); |
@@ -736,11 +737,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) | |||
736 | goto out; | 737 | goto out; |
737 | } | 738 | } |
738 | 739 | ||
739 | sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); | 740 | page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); |
740 | if (!sr_buf) { | 741 | if (!page) { |
741 | retval = -ENOMEM; | 742 | retval = -ENOMEM; |
742 | goto failed_buf; | 743 | goto failed_buf; |
743 | } | 744 | } |
745 | sr_buf = page_address(page); | ||
744 | memset(sr_buf, 0, sizeof(*sr_buf)); | 746 | memset(sr_buf, 0, sizeof(*sr_buf)); |
745 | req->data = sr_buf; | 747 | req->data = sr_buf; |
746 | 748 | ||
@@ -755,7 +757,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) | |||
755 | 757 | ||
756 | failed_req_send: | 758 | failed_req_send: |
757 | req->data = NULL; | 759 | req->data = NULL; |
758 | mempool_free(sr_buf, adapter->pool.status_read_data); | 760 | mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); |
759 | failed_buf: | 761 | failed_buf: |
760 | zfcp_dbf_hba_fsf_uss("fssr__1", req); | 762 | zfcp_dbf_hba_fsf_uss("fssr__1", req); |
761 | zfcp_fsf_req_free(req); | 763 | zfcp_fsf_req_free(req); |