aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 22:56:02 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 22:56:02 -0500
commit4060994c3e337b40e0f6fa8ce2cc178e021baf3d (patch)
tree980297c1747ca89354bc879cc5d17903eacb19e2 /include/asm-x86_64
parent0174f72f848dfe7dc7488799776303c81b181b16 (diff)
parentd3ee871e63d0a0c70413dc0aa5534b8d6cd6ec37 (diff)
Merge x86-64 update from Andi
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/apic.h2
-rw-r--r--include/asm-x86_64/cache.h2
-rw-r--r--include/asm-x86_64/desc.h3
-rw-r--r--include/asm-x86_64/dma.h11
-rw-r--r--include/asm-x86_64/hpet.h35
-rw-r--r--include/asm-x86_64/hw_irq.h2
-rw-r--r--include/asm-x86_64/ia32.h5
-rw-r--r--include/asm-x86_64/mce.h10
-rw-r--r--include/asm-x86_64/mmzone.h9
-rw-r--r--include/asm-x86_64/mpspec.h7
-rw-r--r--include/asm-x86_64/msr.h2
-rw-r--r--include/asm-x86_64/numa.h2
-rw-r--r--include/asm-x86_64/page.h2
-rw-r--r--include/asm-x86_64/pda.h1
-rw-r--r--include/asm-x86_64/pgtable.h5
-rw-r--r--include/asm-x86_64/processor.h4
-rw-r--r--include/asm-x86_64/proto.h4
-rw-r--r--include/asm-x86_64/rwsem.h283
-rw-r--r--include/asm-x86_64/smp.h3
-rw-r--r--include/asm-x86_64/spinlock.h12
-rw-r--r--include/asm-x86_64/topology.h2
-rw-r--r--include/asm-x86_64/unistd.h3
22 files changed, 86 insertions, 323 deletions
diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h
index 6c5d5ca8383a..5647b7de1749 100644
--- a/include/asm-x86_64/apic.h
+++ b/include/asm-x86_64/apic.h
@@ -111,6 +111,8 @@ extern unsigned int nmi_watchdog;
111 111
112extern int disable_timer_pin_1; 112extern int disable_timer_pin_1;
113 113
114extern void setup_threshold_lvt(unsigned long lvt_off);
115
114#endif /* CONFIG_X86_LOCAL_APIC */ 116#endif /* CONFIG_X86_LOCAL_APIC */
115 117
116extern unsigned boot_cpu_id; 118extern unsigned boot_cpu_id;
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index eda62bae1240..33e53424128b 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -9,6 +9,6 @@
9/* L1 cache line size */ 9/* L1 cache line size */
10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 10#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 11#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12#define L1_CACHE_SHIFT_MAX 6 /* largest L1 which this arch supports */ 12#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
13 13
14#endif 14#endif
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index b837820c9073..33764869387b 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -98,16 +98,19 @@ static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsig
98 98
99static inline void set_intr_gate(int nr, void *func) 99static inline void set_intr_gate(int nr, void *func)
100{ 100{
101 BUG_ON((unsigned)nr > 0xFF);
101 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0); 102 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
102} 103}
103 104
104static inline void set_intr_gate_ist(int nr, void *func, unsigned ist) 105static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
105{ 106{
107 BUG_ON((unsigned)nr > 0xFF);
106 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist); 108 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
107} 109}
108 110
109static inline void set_system_gate(int nr, void *func) 111static inline void set_system_gate(int nr, void *func)
110{ 112{
113 BUG_ON((unsigned)nr > 0xFF);
111 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0); 114 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
112} 115}
113 116
diff --git a/include/asm-x86_64/dma.h b/include/asm-x86_64/dma.h
index 16fa3a064d0c..6f2a817b6a7c 100644
--- a/include/asm-x86_64/dma.h
+++ b/include/asm-x86_64/dma.h
@@ -72,8 +72,15 @@
72 72
73#define MAX_DMA_CHANNELS 8 73#define MAX_DMA_CHANNELS 8
74 74
75/* The maximum address that we can perform a DMA transfer to on this platform */ 75
76#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) 76/* 16MB ISA DMA zone */
77#define MAX_DMA_PFN ((16*1024*1024) >> PAGE_SHIFT)
78
79/* 4GB broken PCI/AGP hardware bus master zone */
80#define MAX_DMA32_PFN ((4UL*1024*1024*1024) >> PAGE_SHIFT)
81
82/* Compat define for old dma zone */
83#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
77 84
78/* 8237 DMA controllers */ 85/* 8237 DMA controllers */
79#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ 86#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h
index a3877f570998..c20c28f5c7a0 100644
--- a/include/asm-x86_64/hpet.h
+++ b/include/asm-x86_64/hpet.h
@@ -14,18 +14,18 @@
14#define HPET_CFG 0x010 14#define HPET_CFG 0x010
15#define HPET_STATUS 0x020 15#define HPET_STATUS 0x020
16#define HPET_COUNTER 0x0f0 16#define HPET_COUNTER 0x0f0
17#define HPET_T0_CFG 0x100 17#define HPET_Tn_OFFSET 0x20
18#define HPET_T0_CMP 0x108 18#define HPET_Tn_CFG(n) (0x100 + (n) * HPET_Tn_OFFSET)
19#define HPET_T0_ROUTE 0x110 19#define HPET_Tn_ROUTE(n) (0x104 + (n) * HPET_Tn_OFFSET)
20#define HPET_T1_CFG 0x120 20#define HPET_Tn_CMP(n) (0x108 + (n) * HPET_Tn_OFFSET)
21#define HPET_T1_CMP 0x128 21#define HPET_T0_CFG HPET_Tn_CFG(0)
22#define HPET_T1_ROUTE 0x130 22#define HPET_T0_CMP HPET_Tn_CMP(0)
23#define HPET_T2_CFG 0x140 23#define HPET_T1_CFG HPET_Tn_CFG(1)
24#define HPET_T2_CMP 0x148 24#define HPET_T1_CMP HPET_Tn_CMP(1)
25#define HPET_T2_ROUTE 0x150
26 25
27#define HPET_ID_VENDOR 0xffff0000 26#define HPET_ID_VENDOR 0xffff0000
28#define HPET_ID_LEGSUP 0x00008000 27#define HPET_ID_LEGSUP 0x00008000
28#define HPET_ID_64BIT 0x00002000
29#define HPET_ID_NUMBER 0x00001f00 29#define HPET_ID_NUMBER 0x00001f00
30#define HPET_ID_REV 0x000000ff 30#define HPET_ID_REV 0x000000ff
31#define HPET_ID_NUMBER_SHIFT 8 31#define HPET_ID_NUMBER_SHIFT 8
@@ -38,11 +38,18 @@
38#define HPET_LEGACY_8254 2 38#define HPET_LEGACY_8254 2
39#define HPET_LEGACY_RTC 8 39#define HPET_LEGACY_RTC 8
40 40
41#define HPET_TN_ENABLE 0x004 41#define HPET_TN_LEVEL 0x0002
42#define HPET_TN_PERIODIC 0x008 42#define HPET_TN_ENABLE 0x0004
43#define HPET_TN_PERIODIC_CAP 0x010 43#define HPET_TN_PERIODIC 0x0008
44#define HPET_TN_SETVAL 0x040 44#define HPET_TN_PERIODIC_CAP 0x0010
45#define HPET_TN_32BIT 0x100 45#define HPET_TN_64BIT_CAP 0x0020
46#define HPET_TN_SETVAL 0x0040
47#define HPET_TN_32BIT 0x0100
48#define HPET_TN_ROUTE 0x3e00
49#define HPET_TN_FSB 0x4000
50#define HPET_TN_FSB_CAP 0x8000
51
52#define HPET_TN_ROUTE_SHIFT 9
46 53
47extern int is_hpet_enabled(void); 54extern int is_hpet_enabled(void);
48extern int hpet_rtc_timer_init(void); 55extern int hpet_rtc_timer_init(void);
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index dc97668ea0f9..c14a8c7267a6 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -55,7 +55,7 @@ struct hw_interrupt_type;
55#define CALL_FUNCTION_VECTOR 0xfc 55#define CALL_FUNCTION_VECTOR 0xfc
56#define KDB_VECTOR 0xfb /* reserved for KDB */ 56#define KDB_VECTOR 0xfb /* reserved for KDB */
57#define THERMAL_APIC_VECTOR 0xfa 57#define THERMAL_APIC_VECTOR 0xfa
58/* 0xf9 free */ 58#define THRESHOLD_APIC_VECTOR 0xf9
59#define INVALIDATE_TLB_VECTOR_END 0xf8 59#define INVALIDATE_TLB_VECTOR_END 0xf8
60#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */ 60#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f8 used for TLB flush */
61 61
diff --git a/include/asm-x86_64/ia32.h b/include/asm-x86_64/ia32.h
index 6efa00fe4e7b..c7bc9c0525ba 100644
--- a/include/asm-x86_64/ia32.h
+++ b/include/asm-x86_64/ia32.h
@@ -165,6 +165,11 @@ struct siginfo_t;
165int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info); 165int do_get_thread_area(struct thread_struct *t, struct user_desc __user *info);
166int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info); 166int do_set_thread_area(struct thread_struct *t, struct user_desc __user *info);
167int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs); 167int ia32_child_tls(struct task_struct *p, struct pt_regs *childregs);
168
169struct linux_binprm;
170extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
171 unsigned long stack_top, int exec_stack);
172
168#endif 173#endif
169 174
170#endif /* !CONFIG_IA32_SUPPORT */ 175#endif /* !CONFIG_IA32_SUPPORT */
diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h
index 869249db6795..5d298b799a9f 100644
--- a/include/asm-x86_64/mce.h
+++ b/include/asm-x86_64/mce.h
@@ -67,6 +67,8 @@ struct mce_log {
67/* Software defined banks */ 67/* Software defined banks */
68#define MCE_EXTENDED_BANK 128 68#define MCE_EXTENDED_BANK 128
69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 69#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
70#define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */
71#define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4
70 72
71void mce_log(struct mce *m); 73void mce_log(struct mce *m);
72#ifdef CONFIG_X86_MCE_INTEL 74#ifdef CONFIG_X86_MCE_INTEL
@@ -77,4 +79,12 @@ static inline void mce_intel_feature_init(struct cpuinfo_x86 *c)
77} 79}
78#endif 80#endif
79 81
82#ifdef CONFIG_X86_MCE_AMD
83void mce_amd_feature_init(struct cpuinfo_x86 *c);
84#else
85static inline void mce_amd_feature_init(struct cpuinfo_x86 *c)
86{
87}
88#endif
89
80#endif 90#endif
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h
index b40c661f111e..69baaa8a3ce0 100644
--- a/include/asm-x86_64/mmzone.h
+++ b/include/asm-x86_64/mmzone.h
@@ -17,16 +17,15 @@
17/* Simple perfect hash to map physical addresses to node numbers */ 17/* Simple perfect hash to map physical addresses to node numbers */
18extern int memnode_shift; 18extern int memnode_shift;
19extern u8 memnodemap[NODEMAPSIZE]; 19extern u8 memnodemap[NODEMAPSIZE];
20extern int maxnode;
21 20
22extern struct pglist_data *node_data[]; 21extern struct pglist_data *node_data[];
23 22
24static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) 23static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
25{ 24{
26 int nid; 25 unsigned nid;
27 VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE); 26 VIRTUAL_BUG_ON((addr >> memnode_shift) >= NODEMAPSIZE);
28 nid = memnodemap[addr >> memnode_shift]; 27 nid = memnodemap[addr >> memnode_shift];
29 VIRTUAL_BUG_ON(nid > maxnode); 28 VIRTUAL_BUG_ON(nid >= MAX_NUMNODES || !node_data[nid]);
30 return nid; 29 return nid;
31} 30}
32 31
@@ -41,9 +40,7 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
41#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) 40#define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT)
42#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) 41#define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr))
43 42
44/* AK: this currently doesn't deal with invalid addresses. We'll see 43/* Requires pfn_valid(pfn) to be true */
45 if the 2.5 kernel doesn't pass them
46 (2.4 used to). */
47#define pfn_to_page(pfn) ({ \ 44#define pfn_to_page(pfn) ({ \
48 int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \ 45 int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT); \
49 ((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \ 46 ((pfn) - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map; \
diff --git a/include/asm-x86_64/mpspec.h b/include/asm-x86_64/mpspec.h
index f267e10c023d..6f8a17d105ab 100644
--- a/include/asm-x86_64/mpspec.h
+++ b/include/asm-x86_64/mpspec.h
@@ -16,7 +16,7 @@
16/* 16/*
17 * A maximum of 255 APICs with the current APIC ID architecture. 17 * A maximum of 255 APICs with the current APIC ID architecture.
18 */ 18 */
19#define MAX_APICS 128 19#define MAX_APICS 255
20 20
21struct intel_mp_floating 21struct intel_mp_floating
22{ 22{
@@ -157,7 +157,8 @@ struct mpc_config_lintsrc
157 */ 157 */
158 158
159#define MAX_MP_BUSSES 256 159#define MAX_MP_BUSSES 256
160#define MAX_IRQ_SOURCES 256 160/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
161#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
161enum mp_bustype { 162enum mp_bustype {
162 MP_BUS_ISA = 1, 163 MP_BUS_ISA = 1,
163 MP_BUS_EISA, 164 MP_BUS_EISA,
@@ -172,7 +173,7 @@ extern int smp_found_config;
172extern void find_smp_config (void); 173extern void find_smp_config (void);
173extern void get_smp_config (void); 174extern void get_smp_config (void);
174extern int nr_ioapics; 175extern int nr_ioapics;
175extern int apic_version [MAX_APICS]; 176extern unsigned char apic_version [MAX_APICS];
176extern int mp_irq_entries; 177extern int mp_irq_entries;
177extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; 178extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES];
178extern int mpc_default_type; 179extern int mpc_default_type;
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 5a7fe3c6c3d8..24dc39651bc4 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -19,7 +19,7 @@
19 : "=a" (a__), "=d" (b__) \ 19 : "=a" (a__), "=d" (b__) \
20 : "c" (msr)); \ 20 : "c" (msr)); \
21 val = a__ | (b__<<32); \ 21 val = a__ | (b__<<32); \
22} while(0); 22} while(0)
23 23
24#define wrmsr(msr,val1,val2) \ 24#define wrmsr(msr,val1,val2) \
25 __asm__ __volatile__("wrmsr" \ 25 __asm__ __volatile__("wrmsr" \
diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h
index bcf55c3f7f7f..d51e56fdc3da 100644
--- a/include/asm-x86_64/numa.h
+++ b/include/asm-x86_64/numa.h
@@ -17,6 +17,8 @@ extern void numa_add_cpu(int cpu);
17extern void numa_init_array(void); 17extern void numa_init_array(void);
18extern int numa_off; 18extern int numa_off;
19 19
20extern void numa_set_node(int cpu, int node);
21
20extern unsigned char apicid_to_node[256]; 22extern unsigned char apicid_to_node[256];
21 23
22#define NUMA_NO_NODE 0xff 24#define NUMA_NO_NODE 0xff
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index e5ab4d231f2c..06e489f32472 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -11,7 +11,7 @@
11#define PAGE_SIZE (1UL << PAGE_SHIFT) 11#define PAGE_SIZE (1UL << PAGE_SHIFT)
12#endif 12#endif
13#define PAGE_MASK (~(PAGE_SIZE-1)) 13#define PAGE_MASK (~(PAGE_SIZE-1))
14#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & (__PHYSICAL_MASK << PAGE_SHIFT)) 14#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
15 15
16#define THREAD_ORDER 1 16#define THREAD_ORDER 1
17#ifdef __ASSEMBLY__ 17#ifdef __ASSEMBLY__
diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h
index bbf89aa8a1af..8733ccfa442e 100644
--- a/include/asm-x86_64/pda.h
+++ b/include/asm-x86_64/pda.h
@@ -15,6 +15,7 @@ struct x8664_pda {
15 int irqcount; /* Irq nesting counter. Starts with -1 */ 15 int irqcount; /* Irq nesting counter. Starts with -1 */
16 int cpunumber; /* Logical CPU number */ 16 int cpunumber; /* Logical CPU number */
17 char *irqstackptr; /* top of irqstack */ 17 char *irqstackptr; /* top of irqstack */
18 int nodenumber; /* number of current node */
18 unsigned int __softirq_pending; 19 unsigned int __softirq_pending;
19 unsigned int __nmi_count; /* number of NMI on this CPUs */ 20 unsigned int __nmi_count; /* number of NMI on this CPUs */
20 struct mm_struct *active_mm; 21 struct mm_struct *active_mm;
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 7309fffeec9a..ecf58c7c1650 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -16,6 +16,7 @@ extern pud_t level3_physmem_pgt[512];
16extern pud_t level3_ident_pgt[512]; 16extern pud_t level3_ident_pgt[512];
17extern pmd_t level2_kernel_pgt[512]; 17extern pmd_t level2_kernel_pgt[512];
18extern pgd_t init_level4_pgt[]; 18extern pgd_t init_level4_pgt[];
19extern pgd_t boot_level4_pgt[];
19extern unsigned long __supported_pte_mask; 20extern unsigned long __supported_pte_mask;
20 21
21#define swapper_pg_dir init_level4_pgt 22#define swapper_pg_dir init_level4_pgt
@@ -247,7 +248,7 @@ static inline unsigned long pud_bad(pud_t pud)
247#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this 248#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this
248 right? */ 249 right? */
249#define pte_page(x) pfn_to_page(pte_pfn(x)) 250#define pte_page(x) pfn_to_page(pte_pfn(x))
250#define pte_pfn(x) ((pte_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK) 251#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
251 252
252static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) 253static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
253{ 254{
@@ -354,7 +355,7 @@ static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address)
354#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) 355#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
355#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE ) 356#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
356#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) 357#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
357#define pmd_pfn(x) ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK) 358#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
358 359
359#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) 360#define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
360#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE }) 361#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 03837d34fba0..4861246548f7 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -61,10 +61,12 @@ struct cpuinfo_x86 {
61 int x86_cache_alignment; 61 int x86_cache_alignment;
62 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/ 62 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
63 __u8 x86_virt_bits, x86_phys_bits; 63 __u8 x86_virt_bits, x86_phys_bits;
64 __u8 x86_num_cores; 64 __u8 x86_max_cores; /* cpuid returned max cores value */
65 __u32 x86_power; 65 __u32 x86_power;
66 __u32 extended_cpuid_level; /* Max extended CPUID function supported */ 66 __u32 extended_cpuid_level; /* Max extended CPUID function supported */
67 unsigned long loops_per_jiffy; 67 unsigned long loops_per_jiffy;
68 __u8 apicid;
69 __u8 booted_cores; /* number of cores as seen by OS */
68} ____cacheline_aligned; 70} ____cacheline_aligned;
69 71
70#define X86_VENDOR_INTEL 0 72#define X86_VENDOR_INTEL 0
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index dbb37b0adb43..34501086afef 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -11,6 +11,8 @@ struct pt_regs;
11extern void start_kernel(void); 11extern void start_kernel(void);
12extern void pda_init(int); 12extern void pda_init(int);
13 13
14extern void zap_low_mappings(int cpu);
15
14extern void early_idt_handler(void); 16extern void early_idt_handler(void);
15 17
16extern void mcheck_init(struct cpuinfo_x86 *c); 18extern void mcheck_init(struct cpuinfo_x86 *c);
@@ -22,6 +24,8 @@ extern void mtrr_bp_init(void);
22#define mtrr_bp_init() do {} while (0) 24#define mtrr_bp_init() do {} while (0)
23#endif 25#endif
24extern void init_memory_mapping(unsigned long start, unsigned long end); 26extern void init_memory_mapping(unsigned long start, unsigned long end);
27extern void size_zones(unsigned long *z, unsigned long *h,
28 unsigned long start_pfn, unsigned long end_pfn);
25 29
26extern void system_call(void); 30extern void system_call(void);
27extern int kernel_syscall(void); 31extern int kernel_syscall(void);
diff --git a/include/asm-x86_64/rwsem.h b/include/asm-x86_64/rwsem.h
deleted file mode 100644
index 46077e9c1910..000000000000
--- a/include/asm-x86_64/rwsem.h
+++ /dev/null
@@ -1,283 +0,0 @@
1/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for x86_64+
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Ported by Andi Kleen <ak@suse.de> to x86-64.
5 *
6 * Derived from asm-i386/semaphore.h and asm-i386/rwsem.h
7 *
8 *
9 * The MSW of the count is the negated number of active writers and waiting
10 * lockers, and the LSW is the total number of active locks
11 *
12 * The lock count is initialized to 0 (no active and no waiting lockers).
13 *
14 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
15 * uncontended lock. This can be determined because XADD returns the old value.
16 * Readers increment by 1 and see a positive value when uncontended, negative
17 * if there are writers (and maybe) readers waiting (in which case it goes to
18 * sleep).
19 *
20 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
21 * be extended to 65534 by manually checking the whole MSW rather than relying
22 * on the S flag.
23 *
24 * The value of ACTIVE_BIAS supports up to 65535 active processes.
25 *
26 * This should be totally fair - if anything is waiting, a process that wants a
27 * lock will go to the back of the queue. When the currently active lock is
28 * released, if there's a writer at the front of the queue, then that and only
29 * that will be woken up; if there's a bunch of consecutive readers at the
30 * front, then they'll all be woken up, but no other readers will be.
31 */
32
33#ifndef _X8664_RWSEM_H
34#define _X8664_RWSEM_H
35
36#ifndef _LINUX_RWSEM_H
37#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
38#endif
39
40#ifdef __KERNEL__
41
42#include <linux/list.h>
43#include <linux/spinlock.h>
44
45struct rwsem_waiter;
46
47extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
48extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
49extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
50extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
51
52/*
53 * the semaphore definition
54 */
55struct rw_semaphore {
56 signed int count;
57#define RWSEM_UNLOCKED_VALUE 0x00000000
58#define RWSEM_ACTIVE_BIAS 0x00000001
59#define RWSEM_ACTIVE_MASK 0x0000ffff
60#define RWSEM_WAITING_BIAS (-0x00010000)
61#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
62#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
63 spinlock_t wait_lock;
64 struct list_head wait_list;
65#if RWSEM_DEBUG
66 int debug;
67#endif
68};
69
70/*
71 * initialisation
72 */
73#if RWSEM_DEBUG
74#define __RWSEM_DEBUG_INIT , 0
75#else
76#define __RWSEM_DEBUG_INIT /* */
77#endif
78
79#define __RWSEM_INITIALIZER(name) \
80{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
81 __RWSEM_DEBUG_INIT }
82
83#define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
85
86static inline void init_rwsem(struct rw_semaphore *sem)
87{
88 sem->count = RWSEM_UNLOCKED_VALUE;
89 spin_lock_init(&sem->wait_lock);
90 INIT_LIST_HEAD(&sem->wait_list);
91#if RWSEM_DEBUG
92 sem->debug = 0;
93#endif
94}
95
96/*
97 * lock for reading
98 */
99static inline void __down_read(struct rw_semaphore *sem)
100{
101 __asm__ __volatile__(
102 "# beginning down_read\n\t"
103LOCK_PREFIX " incl (%%rdi)\n\t" /* adds 0x00000001, returns the old value */
104 " js 2f\n\t" /* jump if we weren't granted the lock */
105 "1:\n\t"
106 LOCK_SECTION_START("") \
107 "2:\n\t"
108 " call rwsem_down_read_failed_thunk\n\t"
109 " jmp 1b\n"
110 LOCK_SECTION_END \
111 "# ending down_read\n\t"
112 : "+m"(sem->count)
113 : "D"(sem)
114 : "memory", "cc");
115}
116
117
118/*
119 * trylock for reading -- returns 1 if successful, 0 if contention
120 */
121static inline int __down_read_trylock(struct rw_semaphore *sem)
122{
123 __s32 result, tmp;
124 __asm__ __volatile__(
125 "# beginning __down_read_trylock\n\t"
126 " movl %0,%1\n\t"
127 "1:\n\t"
128 " movl %1,%2\n\t"
129 " addl %3,%2\n\t"
130 " jle 2f\n\t"
131LOCK_PREFIX " cmpxchgl %2,%0\n\t"
132 " jnz 1b\n\t"
133 "2:\n\t"
134 "# ending __down_read_trylock\n\t"
135 : "+m"(sem->count), "=&a"(result), "=&r"(tmp)
136 : "i"(RWSEM_ACTIVE_READ_BIAS)
137 : "memory", "cc");
138 return result>=0 ? 1 : 0;
139}
140
141
142/*
143 * lock for writing
144 */
145static inline void __down_write(struct rw_semaphore *sem)
146{
147 int tmp;
148
149 tmp = RWSEM_ACTIVE_WRITE_BIAS;
150 __asm__ __volatile__(
151 "# beginning down_write\n\t"
152LOCK_PREFIX " xaddl %0,(%%rdi)\n\t" /* subtract 0x0000ffff, returns the old value */
153 " testl %0,%0\n\t" /* was the count 0 before? */
154 " jnz 2f\n\t" /* jump if we weren't granted the lock */
155 "1:\n\t"
156 LOCK_SECTION_START("")
157 "2:\n\t"
158 " call rwsem_down_write_failed_thunk\n\t"
159 " jmp 1b\n"
160 LOCK_SECTION_END
161 "# ending down_write"
162 : "=&r" (tmp)
163 : "0"(tmp), "D"(sem)
164 : "memory", "cc");
165}
166
167/*
168 * trylock for writing -- returns 1 if successful, 0 if contention
169 */
170static inline int __down_write_trylock(struct rw_semaphore *sem)
171{
172 signed long ret = cmpxchg(&sem->count,
173 RWSEM_UNLOCKED_VALUE,
174 RWSEM_ACTIVE_WRITE_BIAS);
175 if (ret == RWSEM_UNLOCKED_VALUE)
176 return 1;
177 return 0;
178}
179
180/*
181 * unlock after reading
182 */
183static inline void __up_read(struct rw_semaphore *sem)
184{
185 __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
186 __asm__ __volatile__(
187 "# beginning __up_read\n\t"
188LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* subtracts 1, returns the old value */
189 " js 2f\n\t" /* jump if the lock is being waited upon */
190 "1:\n\t"
191 LOCK_SECTION_START("")
192 "2:\n\t"
193 " decw %w[tmp]\n\t" /* do nothing if still outstanding active readers */
194 " jnz 1b\n\t"
195 " call rwsem_wake_thunk\n\t"
196 " jmp 1b\n"
197 LOCK_SECTION_END
198 "# ending __up_read\n"
199 : "+m"(sem->count), [tmp] "+r" (tmp)
200 : "D"(sem)
201 : "memory", "cc");
202}
203
204/*
205 * unlock after writing
206 */
207static inline void __up_write(struct rw_semaphore *sem)
208{
209 unsigned tmp;
210 __asm__ __volatile__(
211 "# beginning __up_write\n\t"
212 " movl %[bias],%[tmp]\n\t"
213LOCK_PREFIX " xaddl %[tmp],(%%rdi)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */
214 " jnz 2f\n\t" /* jump if the lock is being waited upon */
215 "1:\n\t"
216 LOCK_SECTION_START("")
217 "2:\n\t"
218 " decw %w[tmp]\n\t" /* did the active count reduce to 0? */
219 " jnz 1b\n\t" /* jump back if not */
220 " call rwsem_wake_thunk\n\t"
221 " jmp 1b\n"
222 LOCK_SECTION_END
223 "# ending __up_write\n"
224 : "+m"(sem->count), [tmp] "=r" (tmp)
225 : "D"(sem), [bias] "i"(-RWSEM_ACTIVE_WRITE_BIAS)
226 : "memory", "cc");
227}
228
229/*
230 * downgrade write lock to read lock
231 */
232static inline void __downgrade_write(struct rw_semaphore *sem)
233{
234 __asm__ __volatile__(
235 "# beginning __downgrade_write\n\t"
236LOCK_PREFIX " addl %[bias],(%%rdi)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
237 " js 2f\n\t" /* jump if the lock is being waited upon */
238 "1:\n\t"
239 LOCK_SECTION_START("")
240 "2:\n\t"
241 " call rwsem_downgrade_thunk\n"
242 " jmp 1b\n"
243 LOCK_SECTION_END
244 "# ending __downgrade_write\n"
245 : "=m"(sem->count)
246 : "D"(sem), [bias] "i"(-RWSEM_WAITING_BIAS), "m"(sem->count)
247 : "memory", "cc");
248}
249
250/*
251 * implement atomic add functionality
252 */
253static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
254{
255 __asm__ __volatile__(
256LOCK_PREFIX "addl %1,%0"
257 :"=m"(sem->count)
258 :"ir"(delta), "m"(sem->count));
259}
260
261/*
262 * implement exchange and add functionality
263 */
264static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
265{
266 int tmp = delta;
267
268 __asm__ __volatile__(
269LOCK_PREFIX "xaddl %0,(%2)"
270 : "=r"(tmp), "=m"(sem->count)
271 : "r"(sem), "m"(sem->count), "0" (tmp)
272 : "memory");
273
274 return tmp+delta;
275}
276
277static inline int rwsem_is_locked(struct rw_semaphore *sem)
278{
279 return (sem->count != 0);
280}
281
282#endif /* __KERNEL__ */
283#endif /* _X8664_RWSEM_H */
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index b9fb2173ef99..d030409a8fb5 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -47,7 +47,6 @@ extern void lock_ipi_call_lock(void);
47extern void unlock_ipi_call_lock(void); 47extern void unlock_ipi_call_lock(void);
48extern int smp_num_siblings; 48extern int smp_num_siblings;
49extern void smp_send_reschedule(int cpu); 49extern void smp_send_reschedule(int cpu);
50extern void zap_low_mappings(void);
51void smp_stop_cpu(void); 50void smp_stop_cpu(void);
52extern int smp_call_function_single(int cpuid, void (*func) (void *info), 51extern int smp_call_function_single(int cpuid, void (*func) (void *info),
53 void *info, int retry, int wait); 52 void *info, int retry, int wait);
@@ -82,6 +81,8 @@ extern int safe_smp_processor_id(void);
82extern int __cpu_disable(void); 81extern int __cpu_disable(void);
83extern void __cpu_die(unsigned int cpu); 82extern void __cpu_die(unsigned int cpu);
84extern void prefill_possible_map(void); 83extern void prefill_possible_map(void);
84extern unsigned num_processors;
85extern unsigned disabled_cpus;
85 86
86#endif /* !ASSEMBLY */ 87#endif /* !ASSEMBLY */
87 88
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 69636831ad2f..fe484a699cc3 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -18,22 +18,22 @@
18 */ 18 */
19 19
20#define __raw_spin_is_locked(x) \ 20#define __raw_spin_is_locked(x) \
21 (*(volatile signed char *)(&(x)->slock) <= 0) 21 (*(volatile signed int *)(&(x)->slock) <= 0)
22 22
23#define __raw_spin_lock_string \ 23#define __raw_spin_lock_string \
24 "\n1:\t" \ 24 "\n1:\t" \
25 "lock ; decb %0\n\t" \ 25 "lock ; decl %0\n\t" \
26 "js 2f\n" \ 26 "js 2f\n" \
27 LOCK_SECTION_START("") \ 27 LOCK_SECTION_START("") \
28 "2:\t" \ 28 "2:\t" \
29 "rep;nop\n\t" \ 29 "rep;nop\n\t" \
30 "cmpb $0,%0\n\t" \ 30 "cmpl $0,%0\n\t" \
31 "jle 2b\n\t" \ 31 "jle 2b\n\t" \
32 "jmp 1b\n" \ 32 "jmp 1b\n" \
33 LOCK_SECTION_END 33 LOCK_SECTION_END
34 34
35#define __raw_spin_unlock_string \ 35#define __raw_spin_unlock_string \
36 "movb $1,%0" \ 36 "movl $1,%0" \
37 :"=m" (lock->slock) : : "memory" 37 :"=m" (lock->slock) : : "memory"
38 38
39static inline void __raw_spin_lock(raw_spinlock_t *lock) 39static inline void __raw_spin_lock(raw_spinlock_t *lock)
@@ -47,10 +47,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
47 47
48static inline int __raw_spin_trylock(raw_spinlock_t *lock) 48static inline int __raw_spin_trylock(raw_spinlock_t *lock)
49{ 49{
50 char oldval; 50 int oldval;
51 51
52 __asm__ __volatile__( 52 __asm__ __volatile__(
53 "xchgb %b0,%1" 53 "xchgl %0,%1"
54 :"=q" (oldval), "=m" (lock->slock) 54 :"=q" (oldval), "=m" (lock->slock)
55 :"0" (0) : "memory"); 55 :"0" (0) : "memory");
56 56
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index 1c603cd7e4d0..d39ebd5263ed 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -28,6 +28,8 @@ extern int __node_distance(int, int);
28#define pcibus_to_node(bus) ((long)(bus->sysdata)) 28#define pcibus_to_node(bus) ((long)(bus->sysdata))
29#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)); 29#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus));
30 30
31#define numa_node_id() read_pda(nodenumber)
32
31/* sched_domains SD_NODE_INIT for x86_64 machines */ 33/* sched_domains SD_NODE_INIT for x86_64 machines */
32#define SD_NODE_INIT (struct sched_domain) { \ 34#define SD_NODE_INIT (struct sched_domain) { \
33 .span = CPU_MASK_NONE, \ 35 .span = CPU_MASK_NONE, \
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 3c494b65d33a..2c42150bce0c 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -462,7 +462,7 @@ __SYSCALL(__NR_fremovexattr, sys_fremovexattr)
462#define __NR_tkill 200 462#define __NR_tkill 200
463__SYSCALL(__NR_tkill, sys_tkill) 463__SYSCALL(__NR_tkill, sys_tkill)
464#define __NR_time 201 464#define __NR_time 201
465__SYSCALL(__NR_time, sys_time64) 465__SYSCALL(__NR_time, sys_time)
466#define __NR_futex 202 466#define __NR_futex 202
467__SYSCALL(__NR_futex, sys_futex) 467__SYSCALL(__NR_futex, sys_futex)
468#define __NR_sched_setaffinity 203 468#define __NR_sched_setaffinity 203
@@ -608,6 +608,7 @@ do { \
608#define __ARCH_WANT_SYS_SIGPENDING 608#define __ARCH_WANT_SYS_SIGPENDING
609#define __ARCH_WANT_SYS_SIGPROCMASK 609#define __ARCH_WANT_SYS_SIGPROCMASK
610#define __ARCH_WANT_SYS_RT_SIGACTION 610#define __ARCH_WANT_SYS_RT_SIGACTION
611#define __ARCH_WANT_SYS_TIME
611#define __ARCH_WANT_COMPAT_SYS_TIME 612#define __ARCH_WANT_COMPAT_SYS_TIME
612#endif 613#endif
613 614