aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm26/mm/small_page.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm26/mm/small_page.c')
-rw-r--r--arch/arm26/mm/small_page.c194
1 files changed, 194 insertions, 0 deletions
diff --git a/arch/arm26/mm/small_page.c b/arch/arm26/mm/small_page.c
new file mode 100644
index 000000000000..77be86cca789
--- /dev/null
+++ b/arch/arm26/mm/small_page.c
@@ -0,0 +1,194 @@
1/*
2 * linux/arch/arm26/mm/small_page.c
3 *
4 * Copyright (C) 1996 Russell King
5 * Copyright (C) 2003 Ian Molton
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Changelog:
12 * 26/01/1996 RMK Cleaned up various areas to make little more generic
13 * 07/02/1999 RMK Support added for 16K and 32K page sizes
14 * containing 8K blocks
15 * 23/05/2004 IM Fixed to use struct page->lru (thanks wli)
16 *
17 */
18#include <linux/signal.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/string.h>
23#include <linux/types.h>
24#include <linux/ptrace.h>
25#include <linux/mman.h>
26#include <linux/mm.h>
27#include <linux/swap.h>
28#include <linux/smp.h>
29#include <linux/bitops.h>
30
31#include <asm/pgtable.h>
32
33#define PEDANTIC
34
35/*
36 * Requirement:
37 * We need to be able to allocate naturally aligned memory of finer
38 * granularity than the page size. This is typically used for the
39 * second level page tables on 32-bit ARMs.
40 *
41 * FIXME - this comment is *out of date*
42 * Theory:
43 * We "misuse" the Linux memory management system. We use alloc_page
44 * to allocate a page and then mark it as reserved. The Linux memory
45 * management system will then ignore the "offset", "next_hash" and
46 * "pprev_hash" entries in the mem_map for this page.
47 *
48 * We then use a bitstring in the "offset" field to mark which segments
49 * of the page are in use, and manipulate this as required during the
50 * allocation and freeing of these small pages.
51 *
52 * We also maintain a queue of pages being used for this purpose using
53 * the "next_hash" and "pprev_hash" entries of mem_map;
54 */
55
56struct order {
57 struct list_head queue;
58 unsigned int mask; /* (1 << shift) - 1 */
59 unsigned int shift; /* (1 << shift) size of page */
60 unsigned int block_mask; /* nr_blocks - 1 */
61 unsigned int all_used; /* (1 << nr_blocks) - 1 */
62};
63
64
65static struct order orders[] = {
66#if PAGE_SIZE == 32768
67 { LIST_HEAD_INIT(orders[0].queue), 2047, 11, 15, 0x0000ffff },
68 { LIST_HEAD_INIT(orders[1].queue), 8191, 13, 3, 0x0000000f }
69#else
70#error unsupported page size (ARGH!)
71#endif
72};
73
74#define USED_MAP(pg) ((pg)->index)
75#define TEST_AND_CLEAR_USED(pg,off) (test_and_clear_bit(off, &USED_MAP(pg)))
76#define SET_USED(pg,off) (set_bit(off, &USED_MAP(pg)))
77
78static DEFINE_SPINLOCK(small_page_lock);
79
80static unsigned long __get_small_page(int priority, struct order *order)
81{
82 unsigned long flags;
83 struct page *page;
84 int offset;
85
86 do {
87 spin_lock_irqsave(&small_page_lock, flags);
88
89 if (list_empty(&order->queue))
90 goto need_new_page;
91
92 page = list_entry(order->queue.next, struct page, lru);
93again:
94#ifdef PEDANTIC
95 if (USED_MAP(page) & ~order->all_used)
96 PAGE_BUG(page);
97#endif
98 offset = ffz(USED_MAP(page));
99 SET_USED(page, offset);
100 if (USED_MAP(page) == order->all_used)
101 list_del_init(&page->lru);
102 spin_unlock_irqrestore(&small_page_lock, flags);
103
104 return (unsigned long) page_address(page) + (offset << order->shift);
105
106need_new_page:
107 spin_unlock_irqrestore(&small_page_lock, flags);
108 page = alloc_page(priority);
109 spin_lock_irqsave(&small_page_lock, flags);
110
111 if (list_empty(&order->queue)) {
112 if (!page)
113 goto no_page;
114 SetPageReserved(page);
115 USED_MAP(page) = 0;
116 list_add(&page->lru, &order->queue);
117 goto again;
118 }
119
120 spin_unlock_irqrestore(&small_page_lock, flags);
121 __free_page(page);
122 } while (1);
123
124no_page:
125 spin_unlock_irqrestore(&small_page_lock, flags);
126 return 0;
127}
128
129static void __free_small_page(unsigned long spage, struct order *order)
130{
131 unsigned long flags;
132 struct page *page;
133
134 if (virt_addr_valid(spage)) {
135 page = virt_to_page(spage);
136
137 /*
138 * The container-page must be marked Reserved
139 */
140 if (!PageReserved(page) || spage & order->mask)
141 goto non_small;
142
143#ifdef PEDANTIC
144 if (USED_MAP(page) & ~order->all_used)
145 PAGE_BUG(page);
146#endif
147
148 spage = spage >> order->shift;
149 spage &= order->block_mask;
150
151 /*
152 * the following must be atomic wrt get_page
153 */
154 spin_lock_irqsave(&small_page_lock, flags);
155
156 if (USED_MAP(page) == order->all_used)
157 list_add(&page->lru, &order->queue);
158
159 if (!TEST_AND_CLEAR_USED(page, spage))
160 goto already_free;
161
162 if (USED_MAP(page) == 0)
163 goto free_page;
164
165 spin_unlock_irqrestore(&small_page_lock, flags);
166 }
167 return;
168
169free_page:
170 /*
171 * unlink the page from the small page queue and free it
172 */
173 list_del_init(&page->lru);
174 spin_unlock_irqrestore(&small_page_lock, flags);
175 ClearPageReserved(page);
176 __free_page(page);
177 return;
178
179non_small:
180 printk("Trying to free non-small page from %p\n", __builtin_return_address(0));
181 return;
182already_free:
183 printk("Trying to free free small page from %p\n", __builtin_return_address(0));
184}
185
186unsigned long get_page_8k(int priority)
187{
188 return __get_small_page(priority, orders+1);
189}
190
191void free_page_8k(unsigned long spage)
192{
193 __free_small_page(spage, orders+1);
194}