aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2011-02-23 06:39:46 -0500
committerPhilipp Reisner <philipp.reisner@linbit.com>2012-05-09 09:17:02 -0400
commit4281808fb3580c381a23cceb0a29ced92d570a5f (patch)
treeb561a60dc382305bba499ae0e12c6e0573a3df06
parent0e8488ade26b4b16a9745aa15ecb88c3fb1cb953 (diff)
drbd: add page pool to be used for meta data IO
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
-rw-r--r--drivers/block/drbd/drbd_int.h23
-rw-r--r--drivers/block/drbd/drbd_main.c9
2 files changed, 31 insertions, 1 deletions
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 6586053429b..685ed4cca17 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1496,11 +1496,32 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
1496extern mempool_t *drbd_request_mempool; 1496extern mempool_t *drbd_request_mempool;
1497extern mempool_t *drbd_ee_mempool; 1497extern mempool_t *drbd_ee_mempool;
1498 1498
1499extern struct page *drbd_pp_pool; /* drbd's page pool */ 1499/* drbd's page pool, used to buffer data received from the peer,
1500 * or data requested by the peer.
1501 *
1502 * This does not have an emergency reserve.
1503 *
1504 * When allocating from this pool, it first takes pages from the pool.
1505 * Only if the pool is depleted will try to allocate from the system.
1506 *
1507 * The assumption is that pages taken from this pool will be processed,
1508 * and given back, "quickly", and then can be recycled, so we can avoid
1509 * frequent calls to alloc_page(), and still will be able to make progress even
1510 * under memory pressure.
1511 */
1512extern struct page *drbd_pp_pool;
1500extern spinlock_t drbd_pp_lock; 1513extern spinlock_t drbd_pp_lock;
1501extern int drbd_pp_vacant; 1514extern int drbd_pp_vacant;
1502extern wait_queue_head_t drbd_pp_wait; 1515extern wait_queue_head_t drbd_pp_wait;
1503 1516
1517/* We also need a standard (emergency-reserve backed) page pool
1518 * for meta data IO (activity log, bitmap).
1519 * We can keep it global, as long as it is used as "N pages at a time".
1520 * 128 should be plenty, currently we probably can get away with as few as 1.
1521 */
1522#define DRBD_MIN_POOL_PAGES 128
1523extern mempool_t *drbd_md_io_page_pool;
1524
1504extern rwlock_t global_state_lock; 1525extern rwlock_t global_state_lock;
1505 1526
1506extern struct drbd_conf *drbd_new_device(unsigned int minor); 1527extern struct drbd_conf *drbd_new_device(unsigned int minor);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index d830116781f..ed264c92c3a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -139,6 +139,7 @@ struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ 139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140mempool_t *drbd_request_mempool; 140mempool_t *drbd_request_mempool;
141mempool_t *drbd_ee_mempool; 141mempool_t *drbd_ee_mempool;
142mempool_t *drbd_md_io_page_pool;
142 143
143/* I do not use a standard mempool, because: 144/* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first. 145 1) I want to hand out the pre-allocated objects first.
@@ -3264,6 +3265,8 @@ static void drbd_destroy_mempools(void)
3264 3265
3265 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */ 3266 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3266 3267
3268 if (drbd_md_io_page_pool)
3269 mempool_destroy(drbd_md_io_page_pool);
3267 if (drbd_ee_mempool) 3270 if (drbd_ee_mempool)
3268 mempool_destroy(drbd_ee_mempool); 3271 mempool_destroy(drbd_ee_mempool);
3269 if (drbd_request_mempool) 3272 if (drbd_request_mempool)
@@ -3277,6 +3280,7 @@ static void drbd_destroy_mempools(void)
3277 if (drbd_al_ext_cache) 3280 if (drbd_al_ext_cache)
3278 kmem_cache_destroy(drbd_al_ext_cache); 3281 kmem_cache_destroy(drbd_al_ext_cache);
3279 3282
3283 drbd_md_io_page_pool = NULL;
3280 drbd_ee_mempool = NULL; 3284 drbd_ee_mempool = NULL;
3281 drbd_request_mempool = NULL; 3285 drbd_request_mempool = NULL;
3282 drbd_ee_cache = NULL; 3286 drbd_ee_cache = NULL;
@@ -3300,6 +3304,7 @@ static int drbd_create_mempools(void)
3300 drbd_bm_ext_cache = NULL; 3304 drbd_bm_ext_cache = NULL;
3301 drbd_al_ext_cache = NULL; 3305 drbd_al_ext_cache = NULL;
3302 drbd_pp_pool = NULL; 3306 drbd_pp_pool = NULL;
3307 drbd_md_io_page_pool = NULL;
3303 3308
3304 /* caches */ 3309 /* caches */
3305 drbd_request_cache = kmem_cache_create( 3310 drbd_request_cache = kmem_cache_create(
@@ -3323,6 +3328,10 @@ static int drbd_create_mempools(void)
3323 goto Enomem; 3328 goto Enomem;
3324 3329
3325 /* mempools */ 3330 /* mempools */
3331 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
3332 if (drbd_md_io_page_pool == NULL)
3333 goto Enomem;
3334
3326 drbd_request_mempool = mempool_create(number, 3335 drbd_request_mempool = mempool_create(number,
3327 mempool_alloc_slab, mempool_free_slab, drbd_request_cache); 3336 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3328 if (drbd_request_mempool == NULL) 3337 if (drbd_request_mempool == NULL)