diff options
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r-- | arch/ia64/mm/discontig.c | 57 | ||||
-rw-r--r-- | arch/ia64/mm/fault.c | 36 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 5 |
3 files changed, 77 insertions, 21 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index b6bcc9fa3603..525b082eb661 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -33,7 +33,6 @@ | |||
33 | */ | 33 | */ |
34 | struct early_node_data { | 34 | struct early_node_data { |
35 | struct ia64_node_data *node_data; | 35 | struct ia64_node_data *node_data; |
36 | pg_data_t *pgdat; | ||
37 | unsigned long pernode_addr; | 36 | unsigned long pernode_addr; |
38 | unsigned long pernode_size; | 37 | unsigned long pernode_size; |
39 | struct bootmem_data bootmem_data; | 38 | struct bootmem_data bootmem_data; |
@@ -46,6 +45,8 @@ struct early_node_data { | |||
46 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; | 45 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; |
47 | static nodemask_t memory_less_mask __initdata; | 46 | static nodemask_t memory_less_mask __initdata; |
48 | 47 | ||
48 | static pg_data_t *pgdat_list[MAX_NUMNODES]; | ||
49 | |||
49 | /* | 50 | /* |
50 | * To prevent cache aliasing effects, align per-node structures so that they | 51 | * To prevent cache aliasing effects, align per-node structures so that they |
51 | * start at addresses that are strided by node number. | 52 | * start at addresses that are strided by node number. |
@@ -99,7 +100,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, | |||
99 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been | 100 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been |
100 | * called yet. Note that node 0 will also count all non-existent cpus. | 101 | * called yet. Note that node 0 will also count all non-existent cpus. |
101 | */ | 102 | */ |
102 | static int __init early_nr_cpus_node(int node) | 103 | static int __meminit early_nr_cpus_node(int node) |
103 | { | 104 | { |
104 | int cpu, n = 0; | 105 | int cpu, n = 0; |
105 | 106 | ||
@@ -114,7 +115,7 @@ static int __init early_nr_cpus_node(int node) | |||
114 | * compute_pernodesize - compute size of pernode data | 115 | * compute_pernodesize - compute size of pernode data |
115 | * @node: the node id. | 116 | * @node: the node id. |
116 | */ | 117 | */ |
117 | static unsigned long __init compute_pernodesize(int node) | 118 | static unsigned long __meminit compute_pernodesize(int node) |
118 | { | 119 | { |
119 | unsigned long pernodesize = 0, cpus; | 120 | unsigned long pernodesize = 0, cpus; |
120 | 121 | ||
@@ -175,13 +176,13 @@ static void __init fill_pernode(int node, unsigned long pernode, | |||
175 | pernode += PERCPU_PAGE_SIZE * cpus; | 176 | pernode += PERCPU_PAGE_SIZE * cpus; |
176 | pernode += node * L1_CACHE_BYTES; | 177 | pernode += node * L1_CACHE_BYTES; |
177 | 178 | ||
178 | mem_data[node].pgdat = __va(pernode); | 179 | pgdat_list[node] = __va(pernode); |
179 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | 180 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); |
180 | 181 | ||
181 | mem_data[node].node_data = __va(pernode); | 182 | mem_data[node].node_data = __va(pernode); |
182 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | 183 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); |
183 | 184 | ||
184 | mem_data[node].pgdat->bdata = bdp; | 185 | pgdat_list[node]->bdata = bdp; |
185 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); | 186 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); |
186 | 187 | ||
187 | cpu_data = per_cpu_node_setup(cpu_data, node); | 188 | cpu_data = per_cpu_node_setup(cpu_data, node); |
@@ -268,7 +269,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len, | |||
268 | static int __init free_node_bootmem(unsigned long start, unsigned long len, | 269 | static int __init free_node_bootmem(unsigned long start, unsigned long len, |
269 | int node) | 270 | int node) |
270 | { | 271 | { |
271 | free_bootmem_node(mem_data[node].pgdat, start, len); | 272 | free_bootmem_node(pgdat_list[node], start, len); |
272 | 273 | ||
273 | return 0; | 274 | return 0; |
274 | } | 275 | } |
@@ -287,7 +288,7 @@ static void __init reserve_pernode_space(void) | |||
287 | int node; | 288 | int node; |
288 | 289 | ||
289 | for_each_online_node(node) { | 290 | for_each_online_node(node) { |
290 | pg_data_t *pdp = mem_data[node].pgdat; | 291 | pg_data_t *pdp = pgdat_list[node]; |
291 | 292 | ||
292 | if (node_isset(node, memory_less_mask)) | 293 | if (node_isset(node, memory_less_mask)) |
293 | continue; | 294 | continue; |
@@ -307,6 +308,17 @@ static void __init reserve_pernode_space(void) | |||
307 | } | 308 | } |
308 | } | 309 | } |
309 | 310 | ||
311 | static void __meminit scatter_node_data(void) | ||
312 | { | ||
313 | pg_data_t **dst; | ||
314 | int node; | ||
315 | |||
316 | for_each_online_node(node) { | ||
317 | dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs; | ||
318 | memcpy(dst, pgdat_list, sizeof(pgdat_list)); | ||
319 | } | ||
320 | } | ||
321 | |||
310 | /** | 322 | /** |
311 | * initialize_pernode_data - fixup per-cpu & per-node pointers | 323 | * initialize_pernode_data - fixup per-cpu & per-node pointers |
312 | * | 324 | * |
@@ -317,17 +329,10 @@ static void __init reserve_pernode_space(void) | |||
317 | */ | 329 | */ |
318 | static void __init initialize_pernode_data(void) | 330 | static void __init initialize_pernode_data(void) |
319 | { | 331 | { |
320 | pg_data_t *pgdat_list[MAX_NUMNODES]; | ||
321 | int cpu, node; | 332 | int cpu, node; |
322 | 333 | ||
323 | for_each_online_node(node) | 334 | scatter_node_data(); |
324 | pgdat_list[node] = mem_data[node].pgdat; | ||
325 | 335 | ||
326 | /* Copy the pg_data_t list to each node and init the node field */ | ||
327 | for_each_online_node(node) { | ||
328 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, | ||
329 | sizeof(pgdat_list)); | ||
330 | } | ||
331 | #ifdef CONFIG_SMP | 336 | #ifdef CONFIG_SMP |
332 | /* Set the node_data pointer for each per-cpu struct */ | 337 | /* Set the node_data pointer for each per-cpu struct */ |
333 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 338 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
@@ -372,7 +377,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) | |||
372 | if (bestnode == -1) | 377 | if (bestnode == -1) |
373 | bestnode = anynode; | 378 | bestnode = anynode; |
374 | 379 | ||
375 | ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize, | 380 | ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize, |
376 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | 381 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
377 | 382 | ||
378 | return ptr; | 383 | return ptr; |
@@ -476,7 +481,7 @@ void __init find_memory(void) | |||
476 | pernodesize = mem_data[node].pernode_size; | 481 | pernodesize = mem_data[node].pernode_size; |
477 | map = pernode + pernodesize; | 482 | map = pernode + pernodesize; |
478 | 483 | ||
479 | init_bootmem_node(mem_data[node].pgdat, | 484 | init_bootmem_node(pgdat_list[node], |
480 | map>>PAGE_SHIFT, | 485 | map>>PAGE_SHIFT, |
481 | bdp->node_boot_start>>PAGE_SHIFT, | 486 | bdp->node_boot_start>>PAGE_SHIFT, |
482 | bdp->node_low_pfn); | 487 | bdp->node_low_pfn); |
@@ -786,3 +791,21 @@ void __init paging_init(void) | |||
786 | 791 | ||
787 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); | 792 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
788 | } | 793 | } |
794 | |||
795 | pg_data_t *arch_alloc_nodedata(int nid) | ||
796 | { | ||
797 | unsigned long size = compute_pernodesize(nid); | ||
798 | |||
799 | return kzalloc(size, GFP_KERNEL); | ||
800 | } | ||
801 | |||
802 | void arch_free_nodedata(pg_data_t *pgdat) | ||
803 | { | ||
804 | kfree(pgdat); | ||
805 | } | ||
806 | |||
807 | void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat) | ||
808 | { | ||
809 | pgdat_list[update_node] = update_pgdat; | ||
810 | scatter_node_data(); | ||
811 | } | ||
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index d98ec49570b8..14ef7cceb208 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -19,6 +19,40 @@ | |||
19 | 19 | ||
20 | extern void die (char *, struct pt_regs *, long); | 20 | extern void die (char *, struct pt_regs *, long); |
21 | 21 | ||
22 | #ifdef CONFIG_KPROBES | ||
23 | ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); | ||
24 | |||
25 | /* Hook to register for page fault notifications */ | ||
26 | int register_page_fault_notifier(struct notifier_block *nb) | ||
27 | { | ||
28 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); | ||
29 | } | ||
30 | |||
31 | int unregister_page_fault_notifier(struct notifier_block *nb) | ||
32 | { | ||
33 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); | ||
34 | } | ||
35 | |||
36 | static inline int notify_page_fault(enum die_val val, const char *str, | ||
37 | struct pt_regs *regs, long err, int trap, int sig) | ||
38 | { | ||
39 | struct die_args args = { | ||
40 | .regs = regs, | ||
41 | .str = str, | ||
42 | .err = err, | ||
43 | .trapnr = trap, | ||
44 | .signr = sig | ||
45 | }; | ||
46 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); | ||
47 | } | ||
48 | #else | ||
49 | static inline int notify_page_fault(enum die_val val, const char *str, | ||
50 | struct pt_regs *regs, long err, int trap, int sig) | ||
51 | { | ||
52 | return NOTIFY_DONE; | ||
53 | } | ||
54 | #endif | ||
55 | |||
22 | /* | 56 | /* |
23 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment | 57 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment |
24 | * (inside region 5, on ia64) and that page is present. | 58 | * (inside region 5, on ia64) and that page is present. |
@@ -84,7 +118,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re | |||
84 | /* | 118 | /* |
85 | * This is to handle the kprobes on user space access instructions | 119 | * This is to handle the kprobes on user space access instructions |
86 | */ | 120 | */ |
87 | if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, | 121 | if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, |
88 | SIGSEGV) == NOTIFY_STOP) | 122 | SIGSEGV) == NOTIFY_STOP) |
89 | return; | 123 | return; |
90 | 124 | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 11f08001f8c2..38306e98f04b 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -652,7 +652,7 @@ void online_page(struct page *page) | |||
652 | num_physpages++; | 652 | num_physpages++; |
653 | } | 653 | } |
654 | 654 | ||
655 | int add_memory(u64 start, u64 size) | 655 | int arch_add_memory(int nid, u64 start, u64 size) |
656 | { | 656 | { |
657 | pg_data_t *pgdat; | 657 | pg_data_t *pgdat; |
658 | struct zone *zone; | 658 | struct zone *zone; |
@@ -660,7 +660,7 @@ int add_memory(u64 start, u64 size) | |||
660 | unsigned long nr_pages = size >> PAGE_SHIFT; | 660 | unsigned long nr_pages = size >> PAGE_SHIFT; |
661 | int ret; | 661 | int ret; |
662 | 662 | ||
663 | pgdat = NODE_DATA(0); | 663 | pgdat = NODE_DATA(nid); |
664 | 664 | ||
665 | zone = pgdat->node_zones + ZONE_NORMAL; | 665 | zone = pgdat->node_zones + ZONE_NORMAL; |
666 | ret = __add_pages(zone, start_pfn, nr_pages); | 666 | ret = __add_pages(zone, start_pfn, nr_pages); |
@@ -671,7 +671,6 @@ int add_memory(u64 start, u64 size) | |||
671 | 671 | ||
672 | return ret; | 672 | return ret; |
673 | } | 673 | } |
674 | EXPORT_SYMBOL_GPL(add_memory); | ||
675 | 674 | ||
676 | int remove_memory(u64 start, u64 size) | 675 | int remove_memory(u64 start, u64 size) |
677 | { | 676 | { |