aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/devices
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/devices')
-rw-r--r--drivers/mtd/devices/block2mtd.c57
1 files changed, 4 insertions, 53 deletions
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 3bc92153e2b6..ce47544dc120 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -40,56 +40,9 @@ struct block2mtd_dev {
40static LIST_HEAD(blkmtd_device_list); 40static LIST_HEAD(blkmtd_device_list);
41 41
42 42
43#define PAGE_READAHEAD 64 43static struct page* page_read(struct address_space *mapping, int index)
44static void cache_readahead(struct address_space *mapping, int index)
45{ 44{
46 filler_t *filler = (filler_t*)mapping->a_ops->readpage; 45 filler_t *filler = (filler_t*)mapping->a_ops->readpage;
47 int i, pagei;
48 unsigned ret = 0;
49 unsigned long end_index;
50 struct page *page;
51 LIST_HEAD(page_pool);
52 struct inode *inode = mapping->host;
53 loff_t isize = i_size_read(inode);
54
55 if (!isize) {
56 INFO("iSize=0 in cache_readahead\n");
57 return;
58 }
59
60 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
61
62 read_lock_irq(&mapping->tree_lock);
63 for (i = 0; i < PAGE_READAHEAD; i++) {
64 pagei = index + i;
65 if (pagei > end_index) {
66 INFO("Overrun end of disk in cache readahead\n");
67 break;
68 }
69 page = radix_tree_lookup(&mapping->page_tree, pagei);
70 if (page && (!i))
71 break;
72 if (page)
73 continue;
74 read_unlock_irq(&mapping->tree_lock);
75 page = page_cache_alloc_cold(mapping);
76 read_lock_irq(&mapping->tree_lock);
77 if (!page)
78 break;
79 page->index = pagei;
80 list_add(&page->lru, &page_pool);
81 ret++;
82 }
83 read_unlock_irq(&mapping->tree_lock);
84 if (ret)
85 read_cache_pages(mapping, &page_pool, filler, NULL);
86}
87
88
89static struct page* page_readahead(struct address_space *mapping, int index)
90{
91 filler_t *filler = (filler_t*)mapping->a_ops->readpage;
92 cache_readahead(mapping, index);
93 return read_cache_page(mapping, index, filler, NULL); 46 return read_cache_page(mapping, index, filler, NULL);
94} 47}
95 48
@@ -105,7 +58,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
105 u_long *max; 58 u_long *max;
106 59
107 while (pages) { 60 while (pages) {
108 page = page_readahead(mapping, index); 61 page = page_read(mapping, index);
109 if (!page) 62 if (!page)
110 return -ENOMEM; 63 return -ENOMEM;
111 if (IS_ERR(page)) 64 if (IS_ERR(page))
@@ -174,8 +127,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
174 cpylen = len; // this page 127 cpylen = len; // this page
175 len = len - cpylen; 128 len = len - cpylen;
176 129
177 // Get page 130 page = page_read(dev->blkdev->bd_inode->i_mapping, index);
178 page = page_readahead(dev->blkdev->bd_inode->i_mapping, index);
179 if (!page) 131 if (!page)
180 return -ENOMEM; 132 return -ENOMEM;
181 if (IS_ERR(page)) 133 if (IS_ERR(page))
@@ -213,8 +165,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
213 cpylen = len; // this page 165 cpylen = len; // this page
214 len = len - cpylen; 166 len = len - cpylen;
215 167
216 // Get page 168 page = page_read(mapping, index);
217 page = page_readahead(mapping, index);
218 if (!page) 169 if (!page)
219 return -ENOMEM; 170 return -ENOMEM;
220 if (IS_ERR(page)) 171 if (IS_ERR(page))