aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2006-06-28 14:50:09 -0400
committerTony Luck <tony.luck@intel.com>2006-08-04 13:27:27 -0400
commiteca7994f60eb6550d9e9b36d3b641a5a0e18a7c1 (patch)
tree124cf66b19d5e3d8e32985a15ac2a3f45b863e15 /arch
parentefe78cda3596f8a6d1c2d4a6b1a221bafa3e1a48 (diff)
[IA64] make uncached allocator more node aware
The uncached allocator has a function, uncached_get_new_chunk(), that needs to be serialized on a per node basis. It also has a global variable, allocated_granules, which should be defined on a per node basis and protected by that serialization. Additionally, all error returns from functions called (like ia64_pal_mc_drain()) should be handled appropriately. Signed-off-by: Dean Nelson <dcn@sgi.com> Acked-by: Jes Sorenson <jes@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kernel/uncached.c86
1 files changed, 57 insertions, 29 deletions
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 5f03b9e524dd..4c73a6763669 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -32,32 +32,38 @@
32 32
33extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); 33extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
34 34
35#define MAX_UNCACHED_GRANULES 5 35struct uncached_pool {
36static int allocated_granules; 36 struct gen_pool *pool;
37 struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
38 int nchunks_added; /* #of converted chunks added to pool */
39 atomic_t status; /* smp called function's return status*/
40};
41
42#define MAX_CONVERTED_CHUNKS_PER_NODE 2
37 43
38struct gen_pool *uncached_pool[MAX_NUMNODES]; 44struct uncached_pool uncached_pools[MAX_NUMNODES];
39 45
40 46
41static void uncached_ipi_visibility(void *data) 47static void uncached_ipi_visibility(void *data)
42{ 48{
43 int status; 49 int status;
50 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
44 51
45 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 52 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
46 if ((status != PAL_VISIBILITY_OK) && 53 if ((status != PAL_VISIBILITY_OK) &&
47 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) 54 (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
48 printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " 55 atomic_inc(&uc_pool->status);
49 "CPU %i\n", status, raw_smp_processor_id());
50} 56}
51 57
52 58
53static void uncached_ipi_mc_drain(void *data) 59static void uncached_ipi_mc_drain(void *data)
54{ 60{
55 int status; 61 int status;
62 struct uncached_pool *uc_pool = (struct uncached_pool *)data;
56 63
57 status = ia64_pal_mc_drain(); 64 status = ia64_pal_mc_drain();
58 if (status) 65 if (status != PAL_STATUS_SUCCESS)
59 printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " 66 atomic_inc(&uc_pool->status);
60 "CPU %i\n", status, raw_smp_processor_id());
61} 67}
62 68
63 69
@@ -70,21 +76,34 @@ static void uncached_ipi_mc_drain(void *data)
70 * This is accomplished by first allocating a granule of cached memory pages 76 * This is accomplished by first allocating a granule of cached memory pages
71 * and then converting them to uncached memory pages. 77 * and then converting them to uncached memory pages.
72 */ 78 */
73static int uncached_add_chunk(struct gen_pool *pool, int nid) 79static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
74{ 80{
75 struct page *page; 81 struct page *page;
76 int status, i; 82 int status, i, nchunks_added = uc_pool->nchunks_added;
77 unsigned long c_addr, uc_addr; 83 unsigned long c_addr, uc_addr;
78 84
79 if (allocated_granules >= MAX_UNCACHED_GRANULES) 85 if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
86 return -1; /* interrupted by a signal */
87
88 if (uc_pool->nchunks_added > nchunks_added) {
89 /* someone added a new chunk while we were waiting */
90 mutex_unlock(&uc_pool->add_chunk_mutex);
91 return 0;
92 }
93
94 if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
95 mutex_unlock(&uc_pool->add_chunk_mutex);
80 return -1; 96 return -1;
97 }
81 98
82 /* attempt to allocate a granule's worth of cached memory pages */ 99 /* attempt to allocate a granule's worth of cached memory pages */
83 100
84 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, 101 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
85 IA64_GRANULE_SHIFT-PAGE_SHIFT); 102 IA64_GRANULE_SHIFT-PAGE_SHIFT);
86 if (!page) 103 if (!page) {
104 mutex_unlock(&uc_pool->add_chunk_mutex);
87 return -1; 105 return -1;
106 }
88 107
89 /* convert the memory pages from cached to uncached */ 108 /* convert the memory pages from cached to uncached */
90 109
@@ -102,11 +121,14 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid)
102 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); 121 flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
103 122
104 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); 123 status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
105 if (!status) { 124 if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
106 status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); 125 atomic_set(&uc_pool->status, 0);
107 if (status) 126 status = smp_call_function(uncached_ipi_visibility, uc_pool,
127 0, 1);
128 if (status || atomic_read(&uc_pool->status))
108 goto failed; 129 goto failed;
109 } 130 } else if (status != PAL_VISIBILITY_OK)
131 goto failed;
110 132
111 preempt_disable(); 133 preempt_disable();
112 134
@@ -120,20 +142,24 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid)
120 142
121 preempt_enable(); 143 preempt_enable();
122 144
123 ia64_pal_mc_drain(); 145 status = ia64_pal_mc_drain();
124 status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); 146 if (status != PAL_STATUS_SUCCESS)
125 if (status) 147 goto failed;
148 atomic_set(&uc_pool->status, 0);
149 status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
150 if (status || atomic_read(&uc_pool->status))
126 goto failed; 151 goto failed;
127 152
128 /* 153 /*
129 * The chunk of memory pages has been converted to uncached so now we 154 * The chunk of memory pages has been converted to uncached so now we
130 * can add it to the pool. 155 * can add it to the pool.
131 */ 156 */
132 status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); 157 status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
133 if (status) 158 if (status)
134 goto failed; 159 goto failed;
135 160
136 allocated_granules++; 161 uc_pool->nchunks_added++;
162 mutex_unlock(&uc_pool->add_chunk_mutex);
137 return 0; 163 return 0;
138 164
139 /* failed to convert or add the chunk so give it back to the kernel */ 165 /* failed to convert or add the chunk so give it back to the kernel */
@@ -142,6 +168,7 @@ failed:
142 ClearPageUncached(&page[i]); 168 ClearPageUncached(&page[i]);
143 169
144 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); 170 free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
171 mutex_unlock(&uc_pool->add_chunk_mutex);
145 return -1; 172 return -1;
146} 173}
147 174
@@ -158,7 +185,7 @@ failed:
158unsigned long uncached_alloc_page(int starting_nid) 185unsigned long uncached_alloc_page(int starting_nid)
159{ 186{
160 unsigned long uc_addr; 187 unsigned long uc_addr;
161 struct gen_pool *pool; 188 struct uncached_pool *uc_pool;
162 int nid; 189 int nid;
163 190
164 if (unlikely(starting_nid >= MAX_NUMNODES)) 191 if (unlikely(starting_nid >= MAX_NUMNODES))
@@ -171,14 +198,14 @@ unsigned long uncached_alloc_page(int starting_nid)
171 do { 198 do {
172 if (!node_online(nid)) 199 if (!node_online(nid))
173 continue; 200 continue;
174 pool = uncached_pool[nid]; 201 uc_pool = &uncached_pools[nid];
175 if (pool == NULL) 202 if (uc_pool->pool == NULL)
176 continue; 203 continue;
177 do { 204 do {
178 uc_addr = gen_pool_alloc(pool, PAGE_SIZE); 205 uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE);
179 if (uc_addr != 0) 206 if (uc_addr != 0)
180 return uc_addr; 207 return uc_addr;
181 } while (uncached_add_chunk(pool, nid) == 0); 208 } while (uncached_add_chunk(uc_pool, nid) == 0);
182 209
183 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); 210 } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
184 211
@@ -197,7 +224,7 @@ EXPORT_SYMBOL(uncached_alloc_page);
197void uncached_free_page(unsigned long uc_addr) 224void uncached_free_page(unsigned long uc_addr)
198{ 225{
199 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); 226 int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
200 struct gen_pool *pool = uncached_pool[nid]; 227 struct gen_pool *pool = uncached_pools[nid].pool;
201 228
202 if (unlikely(pool == NULL)) 229 if (unlikely(pool == NULL))
203 return; 230 return;
@@ -224,7 +251,7 @@ static int __init uncached_build_memmap(unsigned long uc_start,
224 unsigned long uc_end, void *arg) 251 unsigned long uc_end, void *arg)
225{ 252{
226 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); 253 int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
227 struct gen_pool *pool = uncached_pool[nid]; 254 struct gen_pool *pool = uncached_pools[nid].pool;
228 size_t size = uc_end - uc_start; 255 size_t size = uc_end - uc_start;
229 256
230 touch_softlockup_watchdog(); 257 touch_softlockup_watchdog();
@@ -242,7 +269,8 @@ static int __init uncached_init(void)
242 int nid; 269 int nid;
243 270
244 for_each_online_node(nid) { 271 for_each_online_node(nid) {
245 uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid); 272 uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
273 mutex_init(&uncached_pools[nid].add_chunk_mutex);
246 } 274 }
247 275
248 efi_memmap_walk_uc(uncached_build_memmap, NULL); 276 efi_memmap_walk_uc(uncached_build_memmap, NULL);