diff options
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r-- | arch/x86_64/mm/fault.c | 7 | ||||
-rw-r--r-- | arch/x86_64/mm/numa.c | 50 | ||||
-rw-r--r-- | arch/x86_64/mm/srat.c | 22 |
3 files changed, 44 insertions, 35 deletions
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 2f187986f940..493819e543a5 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/vt_kern.h> /* For unblank_screen() */ | 23 | #include <linux/vt_kern.h> /* For unblank_screen() */ |
24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/kprobes.h> | ||
27 | 26 | ||
28 | #include <asm/system.h> | 27 | #include <asm/system.h> |
29 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
@@ -440,13 +439,13 @@ good_area: | |||
440 | * the fault. | 439 | * the fault. |
441 | */ | 440 | */ |
442 | switch (handle_mm_fault(mm, vma, address, write)) { | 441 | switch (handle_mm_fault(mm, vma, address, write)) { |
443 | case 1: | 442 | case VM_FAULT_MINOR: |
444 | tsk->min_flt++; | 443 | tsk->min_flt++; |
445 | break; | 444 | break; |
446 | case 2: | 445 | case VM_FAULT_MAJOR: |
447 | tsk->maj_flt++; | 446 | tsk->maj_flt++; |
448 | break; | 447 | break; |
449 | case 0: | 448 | case VM_FAULT_SIGBUS: |
450 | goto do_sigbus; | 449 | goto do_sigbus; |
451 | default: | 450 | default: |
452 | goto out_of_memory; | 451 | goto out_of_memory; |
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index ac61c186eb02..70cb2904a90f 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c | |||
@@ -36,34 +36,36 @@ int numa_off __initdata; | |||
36 | int __init compute_hash_shift(struct node *nodes, int numnodes) | 36 | int __init compute_hash_shift(struct node *nodes, int numnodes) |
37 | { | 37 | { |
38 | int i; | 38 | int i; |
39 | int shift = 24; | 39 | int shift = 20; |
40 | u64 addr; | 40 | unsigned long addr,maxend=0; |
41 | 41 | ||
42 | /* When in doubt use brute force. */ | 42 | for (i = 0; i < numnodes; i++) |
43 | while (shift < 48) { | 43 | if ((nodes[i].start != nodes[i].end) && (nodes[i].end > maxend)) |
44 | memset(memnodemap,0xff,sizeof(*memnodemap) * NODEMAPSIZE); | 44 | maxend = nodes[i].end; |
45 | for (i = 0; i < numnodes; i++) { | 45 | |
46 | if (nodes[i].start == nodes[i].end) | 46 | while ((1UL << shift) < (maxend / NODEMAPSIZE)) |
47 | continue; | 47 | shift++; |
48 | for (addr = nodes[i].start; | 48 | |
49 | addr < nodes[i].end; | 49 | printk (KERN_DEBUG"Using %d for the hash shift. Max adder is %lx \n", |
50 | addr += (1UL << shift)) { | 50 | shift,maxend); |
51 | if (memnodemap[addr >> shift] != 0xff && | 51 | memset(memnodemap,0xff,sizeof(*memnodemap) * NODEMAPSIZE); |
52 | memnodemap[addr >> shift] != i) { | 52 | for (i = 0; i < numnodes; i++) { |
53 | printk(KERN_INFO | 53 | if (nodes[i].start == nodes[i].end) |
54 | "node %d shift %d addr %Lx conflict %d\n", | 54 | continue; |
55 | i, shift, addr, memnodemap[addr>>shift]); | 55 | for (addr = nodes[i].start; |
56 | goto next; | 56 | addr < nodes[i].end; |
57 | } | 57 | addr += (1UL << shift)) { |
58 | memnodemap[addr >> shift] = i; | 58 | if (memnodemap[addr >> shift] != 0xff) { |
59 | printk(KERN_INFO | ||
60 | "Your memory is not aligned you need to rebuild your kernel " | ||
61 | "with a bigger NODEMAPSIZE shift=%d adder=%lu\n", | ||
62 | shift,addr); | ||
63 | return -1; | ||
59 | } | 64 | } |
65 | memnodemap[addr >> shift] = i; | ||
60 | } | 66 | } |
61 | return shift; | ||
62 | next: | ||
63 | shift++; | ||
64 | } | 67 | } |
65 | memset(memnodemap,0,sizeof(*memnodemap) * NODEMAPSIZE); | 68 | return shift; |
66 | return -1; | ||
67 | } | 69 | } |
68 | 70 | ||
69 | #ifdef CONFIG_SPARSEMEM | 71 | #ifdef CONFIG_SPARSEMEM |
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index 5d01b31472e1..8e3d097a9ddd 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c | |||
@@ -20,6 +20,9 @@ | |||
20 | 20 | ||
21 | static struct acpi_table_slit *acpi_slit; | 21 | static struct acpi_table_slit *acpi_slit; |
22 | 22 | ||
23 | /* Internal processor count */ | ||
24 | static unsigned int __initdata num_processors = 0; | ||
25 | |||
23 | static nodemask_t nodes_parsed __initdata; | 26 | static nodemask_t nodes_parsed __initdata; |
24 | static nodemask_t nodes_found __initdata; | 27 | static nodemask_t nodes_found __initdata; |
25 | static struct node nodes[MAX_NUMNODES] __initdata; | 28 | static struct node nodes[MAX_NUMNODES] __initdata; |
@@ -101,16 +104,18 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) | |||
101 | bad_srat(); | 104 | bad_srat(); |
102 | return; | 105 | return; |
103 | } | 106 | } |
104 | if (pa->apic_id >= NR_CPUS) { | 107 | if (num_processors >= NR_CPUS) { |
105 | printk(KERN_ERR "SRAT: lapic %u too large.\n", | 108 | printk(KERN_ERR "SRAT: Processor #%d (lapic %u) INVALID. (Max ID: %d).\n", |
106 | pa->apic_id); | 109 | num_processors, pa->apic_id, NR_CPUS); |
107 | bad_srat(); | 110 | bad_srat(); |
108 | return; | 111 | return; |
109 | } | 112 | } |
110 | cpu_to_node[pa->apic_id] = node; | 113 | cpu_to_node[num_processors] = node; |
111 | acpi_numa = 1; | 114 | acpi_numa = 1; |
112 | printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", | 115 | printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> CPU %u -> Node %u\n", |
113 | pxm, pa->apic_id, node); | 116 | pxm, pa->apic_id, num_processors, node); |
117 | |||
118 | num_processors++; | ||
114 | } | 119 | } |
115 | 120 | ||
116 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ | 121 | /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ |
@@ -124,7 +129,6 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) | |||
124 | 129 | ||
125 | if (srat_disabled() || ma->flags.enabled == 0) | 130 | if (srat_disabled() || ma->flags.enabled == 0) |
126 | return; | 131 | return; |
127 | /* hotplug bit is ignored for now */ | ||
128 | pxm = ma->proximity_domain; | 132 | pxm = ma->proximity_domain; |
129 | node = setup_node(pxm); | 133 | node = setup_node(pxm); |
130 | if (node < 0) { | 134 | if (node < 0) { |
@@ -134,6 +138,10 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) | |||
134 | } | 138 | } |
135 | start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); | 139 | start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); |
136 | end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); | 140 | end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); |
141 | /* It is fine to add this area to the nodes data it will be used later*/ | ||
142 | if (ma->flags.hot_pluggable == 1) | ||
143 | printk(KERN_INFO "SRAT: hot plug zone found %lx - %lx \n", | ||
144 | start, end); | ||
137 | i = conflicting_nodes(start, end); | 145 | i = conflicting_nodes(start, end); |
138 | if (i >= 0) { | 146 | if (i >= 0) { |
139 | printk(KERN_ERR | 147 | printk(KERN_ERR |