summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-10-10 11:42:10 -0400
committerMike Snitzer <snitzer@redhat.com>2014-11-10 15:25:28 -0500
commitac4c3f34a9af63092b3fbfafe34c3e966fbd96c5 (patch)
tree2ec28fb1bb3888f092acdc4957e7b06f3751dc82 /drivers/md
parent23ca2bb6c6104db9d4cff4e33cbabee303c49d4d (diff)
dm thin: sort the deferred cells
Sort the cells in logical block order before processing each cell in process_thin_deferred_cells(). This significantly improves the ondisk layout on rotational storage, whereby improving read performance. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-thin.c88
1 files changed, 68 insertions, 20 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 3f3a66124d46..b9d25026ab84 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/sort.h>
20#include <linux/rbtree.h> 21#include <linux/rbtree.h>
21 22
22#define DM_MSG_PREFIX "thin" 23#define DM_MSG_PREFIX "thin"
@@ -205,6 +206,8 @@ typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
205typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell); 206typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
206typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m); 207typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
207 208
209#define CELL_SORT_ARRAY_SIZE 8192
210
208struct pool { 211struct pool {
209 struct list_head list; 212 struct list_head list;
210 struct dm_target *ti; /* Only set if a pool target is bound */ 213 struct dm_target *ti; /* Only set if a pool target is bound */
@@ -252,6 +255,8 @@ struct pool {
252 255
253 process_mapping_fn process_prepared_mapping; 256 process_mapping_fn process_prepared_mapping;
254 process_mapping_fn process_prepared_discard; 257 process_mapping_fn process_prepared_discard;
258
259 struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
255}; 260};
256 261
257static enum pool_mode get_pool_mode(struct pool *pool); 262static enum pool_mode get_pool_mode(struct pool *pool);
@@ -1800,12 +1805,48 @@ static void process_thin_deferred_bios(struct thin_c *tc)
1800 blk_finish_plug(&plug); 1805 blk_finish_plug(&plug);
1801} 1806}
1802 1807
1808static int cmp_cells(const void *lhs, const void *rhs)
1809{
1810 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
1811 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
1812
1813 BUG_ON(!lhs_cell->holder);
1814 BUG_ON(!rhs_cell->holder);
1815
1816 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
1817 return -1;
1818
1819 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
1820 return 1;
1821
1822 return 0;
1823}
1824
1825static unsigned sort_cells(struct pool *pool, struct list_head *cells)
1826{
1827 unsigned count = 0;
1828 struct dm_bio_prison_cell *cell, *tmp;
1829
1830 list_for_each_entry_safe(cell, tmp, cells, user_list) {
1831 if (count >= CELL_SORT_ARRAY_SIZE)
1832 break;
1833
1834 pool->cell_sort_array[count++] = cell;
1835 list_del(&cell->user_list);
1836 }
1837
1838 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
1839
1840 return count;
1841}
1842
1803static void process_thin_deferred_cells(struct thin_c *tc) 1843static void process_thin_deferred_cells(struct thin_c *tc)
1804{ 1844{
1805 struct pool *pool = tc->pool; 1845 struct pool *pool = tc->pool;
1806 unsigned long flags; 1846 unsigned long flags;
1807 struct list_head cells; 1847 struct list_head cells;
1808 struct dm_bio_prison_cell *cell, *tmp; 1848 struct dm_bio_prison_cell *cell;
1849 unsigned i, j, count;
1809 1850
1810 INIT_LIST_HEAD(&cells); 1851 INIT_LIST_HEAD(&cells);
1811 1852
@@ -1816,27 +1857,34 @@ static void process_thin_deferred_cells(struct thin_c *tc)
1816 if (list_empty(&cells)) 1857 if (list_empty(&cells))
1817 return; 1858 return;
1818 1859
1819 list_for_each_entry_safe(cell, tmp, &cells, user_list) { 1860 do {
1820 BUG_ON(!cell->holder); 1861 count = sort_cells(tc->pool, &cells);
1821 1862
1822 /* 1863 for (i = 0; i < count; i++) {
1823 * If we've got no free new_mapping structs, and processing 1864 cell = pool->cell_sort_array[i];
1824 * this bio might require one, we pause until there are some 1865 BUG_ON(!cell->holder);
1825 * prepared mappings to process.
1826 */
1827 if (ensure_next_mapping(pool)) {
1828 spin_lock_irqsave(&tc->lock, flags);
1829 list_add(&cell->user_list, &tc->deferred_cells);
1830 list_splice(&cells, &tc->deferred_cells);
1831 spin_unlock_irqrestore(&tc->lock, flags);
1832 break;
1833 }
1834 1866
1835 if (cell->holder->bi_rw & REQ_DISCARD) 1867 /*
1836 pool->process_discard_cell(tc, cell); 1868 * If we've got no free new_mapping structs, and processing
1837 else 1869 * this bio might require one, we pause until there are some
1838 pool->process_cell(tc, cell); 1870 * prepared mappings to process.
1839 } 1871 */
1872 if (ensure_next_mapping(pool)) {
1873 for (j = i; j < count; j++)
1874 list_add(&pool->cell_sort_array[j]->user_list, &cells);
1875
1876 spin_lock_irqsave(&tc->lock, flags);
1877 list_splice(&cells, &tc->deferred_cells);
1878 spin_unlock_irqrestore(&tc->lock, flags);
1879 return;
1880 }
1881
1882 if (cell->holder->bi_rw & REQ_DISCARD)
1883 pool->process_discard_cell(tc, cell);
1884 else
1885 pool->process_cell(tc, cell);
1886 }
1887 } while (!list_empty(&cells));
1840} 1888}
1841 1889
1842static void thin_get(struct thin_c *tc); 1890static void thin_get(struct thin_c *tc);