[SCSI] zfcp: Replace kmem_cache for "status read" data
authorChristof Schmitt <christof.schmitt@de.ibm.com>
Tue, 22 Feb 2011 18:54:40 +0000 (19:54 +0100)
committerJames Bottomley <James.Bottomley@suse.de>
Fri, 25 Feb 2011 17:01:59 +0000 (12:01 -0500)
zfcp requires a mempool for the status read data blocks to resubmit
the "status read" requests at any time. Each status read data block
has the size of a page (4096 bytes) and needs to be placed in one
page.

Instead of having a kmem_cache for allocating page sized chunks, use
mempool_create_page_pool to create a mempool returning pages and
remove the zfcp kmem_cache.

Signed-off-by: Christof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: Steffen Maier <maier@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
drivers/s390/scsi/zfcp_aux.c
drivers/s390/scsi/zfcp_def.h
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_fsf.c

index 51c666f..81e1856 100644 (file)
@@ -132,11 +132,6 @@ static int __init zfcp_module_init(void)
        if (!zfcp_data.qtcb_cache)
                goto out_qtcb_cache;
 
-       zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
-                                       sizeof(struct fsf_status_read_buffer));
-       if (!zfcp_data.sr_buffer_cache)
-               goto out_sr_cache;
-
        zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
                                        sizeof(struct zfcp_fc_gid_pn));
        if (!zfcp_data.gid_pn_cache)
@@ -181,8 +176,6 @@ out_transport:
 out_adisc_cache:
        kmem_cache_destroy(zfcp_data.gid_pn_cache);
 out_gid_cache:
-       kmem_cache_destroy(zfcp_data.sr_buffer_cache);
-out_sr_cache:
        kmem_cache_destroy(zfcp_data.qtcb_cache);
 out_qtcb_cache:
        kmem_cache_destroy(zfcp_data.gpn_ft_cache);
@@ -199,7 +192,6 @@ static void __exit zfcp_module_exit(void)
        fc_release_transport(zfcp_data.scsi_transport_template);
        kmem_cache_destroy(zfcp_data.adisc_cache);
        kmem_cache_destroy(zfcp_data.gid_pn_cache);
-       kmem_cache_destroy(zfcp_data.sr_buffer_cache);
        kmem_cache_destroy(zfcp_data.qtcb_cache);
        kmem_cache_destroy(zfcp_data.gpn_ft_cache);
 }
@@ -264,10 +256,10 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
        if (!adapter->pool.qtcb_pool)
                return -ENOMEM;
 
-       adapter->pool.status_read_data =
-               mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
-                                        zfcp_data.sr_buffer_cache);
-       if (!adapter->pool.status_read_data)
+       BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
+       adapter->pool.sr_data =
+               mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
+       if (!adapter->pool.sr_data)
                return -ENOMEM;
 
        adapter->pool.gid_pn =
@@ -290,8 +282,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
                mempool_destroy(adapter->pool.qtcb_pool);
        if (adapter->pool.status_read_req)
                mempool_destroy(adapter->pool.status_read_req);
-       if (adapter->pool.status_read_data)
-               mempool_destroy(adapter->pool.status_read_data);
+       if (adapter->pool.sr_data)
+               mempool_destroy(adapter->pool.sr_data);
        if (adapter->pool.gid_pn)
                mempool_destroy(adapter->pool.gid_pn);
 }
index 89e43e1..93ce500 100644 (file)
@@ -107,7 +107,7 @@ struct zfcp_adapter_mempool {
        mempool_t *scsi_req;
        mempool_t *scsi_abort;
        mempool_t *status_read_req;
-       mempool_t *status_read_data;
+       mempool_t *sr_data;
        mempool_t *gid_pn;
        mempool_t *qtcb_pool;
 };
@@ -319,7 +319,6 @@ struct zfcp_data {
        struct scsi_transport_template *scsi_transport_template;
        struct kmem_cache       *gpn_ft_cache;
        struct kmem_cache       *qtcb_cache;
-       struct kmem_cache       *sr_buffer_cache;
        struct kmem_cache       *gid_pn_cache;
        struct kmem_cache       *adisc_cache;
 };
index e003e30..6c1cddf 100644 (file)
@@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
        if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
                return ZFCP_ERP_FAILED;
 
-       if (mempool_resize(act->adapter->pool.status_read_data,
+       if (mempool_resize(act->adapter->pool.sr_data,
                           act->adapter->stat_read_buf_num, GFP_KERNEL))
                return ZFCP_ERP_FAILED;
 
index 6efaea9..a2b0e84 100644 (file)
@@ -212,7 +212,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
 
        if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
                zfcp_dbf_hba_fsf_uss("fssrh_1", req);
-               mempool_free(sr_buf, adapter->pool.status_read_data);
+               mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
                zfcp_fsf_req_free(req);
                return;
        }
@@ -265,7 +265,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
                break;
        }
 
-       mempool_free(sr_buf, adapter->pool.status_read_data);
+       mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
        zfcp_fsf_req_free(req);
 
        atomic_inc(&adapter->stat_miss);
@@ -723,6 +723,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
        struct zfcp_adapter *adapter = qdio->adapter;
        struct zfcp_fsf_req *req;
        struct fsf_status_read_buffer *sr_buf;
+       struct page *page;
        int retval = -EIO;
 
        spin_lock_irq(&qdio->req_q_lock);
@@ -736,11 +737,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
                goto out;
        }
 
-       sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
-       if (!sr_buf) {
+       page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
+       if (!page) {
                retval = -ENOMEM;
                goto failed_buf;
        }
+       sr_buf = page_address(page);
        memset(sr_buf, 0, sizeof(*sr_buf));
        req->data = sr_buf;
 
@@ -755,7 +757,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
 
 failed_req_send:
        req->data = NULL;
-       mempool_free(sr_buf, adapter->pool.status_read_data);
+       mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
 failed_buf:
        zfcp_dbf_hba_fsf_uss("fssr__1", req);
        zfcp_fsf_req_free(req);