aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/mmzone.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-i386/mmzone.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/asm-i386/mmzone.h')
-rw-r--r--include/asm-i386/mmzone.h147
1 files changed, 147 insertions, 0 deletions
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
new file mode 100644
index 000000000000..13830ae67cac
--- /dev/null
+++ b/include/asm-i386/mmzone.h
@@ -0,0 +1,147 @@
1/*
2 * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002
3 *
4 */
5
6#ifndef _ASM_MMZONE_H_
7#define _ASM_MMZONE_H_
8
9#include <asm/smp.h>
10
11#ifdef CONFIG_DISCONTIGMEM
12
13#ifdef CONFIG_NUMA
14 #ifdef CONFIG_X86_NUMAQ
15 #include <asm/numaq.h>
16 #else /* summit or generic arch */
17 #include <asm/srat.h>
18 #endif
19#else /* !CONFIG_NUMA */
20 #define get_memcfg_numa get_memcfg_numa_flat
21 #define get_zholes_size(n) (0)
22#endif /* CONFIG_NUMA */
23
24extern struct pglist_data *node_data[];
25#define NODE_DATA(nid) (node_data[nid])
26
27/*
28 * generic node memory support, the following assumptions apply:
29 *
30 * 1) memory comes in 256Mb contigious chunks which are either present or not
31 * 2) we will not have more than 64Gb in total
32 *
33 * for now assume that 64Gb is max amount of RAM for whole system
34 * 64Gb / 4096bytes/page = 16777216 pages
35 */
36#define MAX_NR_PAGES 16777216
37#define MAX_ELEMENTS 256
38#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS)
39
40extern s8 physnode_map[];
41
42static inline int pfn_to_nid(unsigned long pfn)
43{
44#ifdef CONFIG_NUMA
45 return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]);
46#else
47 return 0;
48#endif
49}
50
51/*
52 * Following are macros that are specific to this numa platform.
53 */
54#define reserve_bootmem(addr, size) \
55 reserve_bootmem_node(NODE_DATA(0), (addr), (size))
56#define alloc_bootmem(x) \
57 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
58#define alloc_bootmem_low(x) \
59 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
60#define alloc_bootmem_pages(x) \
61 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
62#define alloc_bootmem_low_pages(x) \
63 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
64#define alloc_bootmem_node(ignore, x) \
65 __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
66#define alloc_bootmem_pages_node(ignore, x) \
67 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
68#define alloc_bootmem_low_pages_node(ignore, x) \
69 __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
70
71#define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn)
72
73/*
74 * Following are macros that each numa implmentation must define.
75 */
76
77/*
78 * Given a kernel address, find the home node of the underlying memory.
79 */
80#define kvaddr_to_nid(kaddr) pfn_to_nid(__pa(kaddr) >> PAGE_SHIFT)
81
82#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
83#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
84#define node_end_pfn(nid) \
85({ \
86 pg_data_t *__pgdat = NODE_DATA(nid); \
87 __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \
88})
89
90#define local_mapnr(kvaddr) \
91({ \
92 unsigned long __pfn = __pa(kvaddr) >> PAGE_SHIFT; \
93 (__pfn - node_start_pfn(pfn_to_nid(__pfn))); \
94})
95
96/* XXX: FIXME -- wli */
97#define kern_addr_valid(kaddr) (0)
98
99#define pfn_to_page(pfn) \
100({ \
101 unsigned long __pfn = pfn; \
102 int __node = pfn_to_nid(__pfn); \
103 &node_mem_map(__node)[node_localnr(__pfn,__node)]; \
104})
105
106#define page_to_pfn(pg) \
107({ \
108 struct page *__page = pg; \
109 struct zone *__zone = page_zone(__page); \
110 (unsigned long)(__page - __zone->zone_mem_map) \
111 + __zone->zone_start_pfn; \
112})
113
114#ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */
115#define pfn_valid(pfn) ((pfn) < num_physpages)
116#else
117static inline int pfn_valid(int pfn)
118{
119 int nid = pfn_to_nid(pfn);
120
121 if (nid >= 0)
122 return (pfn < node_end_pfn(nid));
123 return 0;
124}
125#endif
126
127extern int get_memcfg_numa_flat(void );
128/*
129 * This allows any one NUMA architecture to be compiled
130 * for, and still fall back to the flat function if it
131 * fails.
132 */
133static inline void get_memcfg_numa(void)
134{
135#ifdef CONFIG_X86_NUMAQ
136 if (get_memcfg_numaq())
137 return;
138#elif CONFIG_ACPI_SRAT
139 if (get_memcfg_from_srat())
140 return;
141#endif
142
143 get_memcfg_numa_flat();
144}
145
146#endif /* CONFIG_DISCONTIGMEM */
147#endif /* _ASM_MMZONE_H_ */