aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/desc.h15
-rw-r--r--include/asm-x86/paravirt.h20
-rw-r--r--include/asm-x86/smp.h34
-rw-r--r--include/asm-x86/spinlock.h9
-rw-r--r--include/asm-x86/tlbflush.h10
-rw-r--r--include/asm-x86/xen/hypervisor.h14
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h10
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/xen/balloon.h61
-rw-r--r--include/xen/events.h2
12 files changed, 104 insertions, 85 deletions
diff --git a/include/asm-x86/desc.h b/include/asm-x86/desc.h
index b73fea54def2..ebc307817e98 100644
--- a/include/asm-x86/desc.h
+++ b/include/asm-x86/desc.h
@@ -24,6 +24,11 @@ static inline void fill_ldt(struct desc_struct *desc,
24 desc->d = info->seg_32bit; 24 desc->d = info->seg_32bit;
25 desc->g = info->limit_in_pages; 25 desc->g = info->limit_in_pages;
26 desc->base2 = (info->base_addr & 0xff000000) >> 24; 26 desc->base2 = (info->base_addr & 0xff000000) >> 24;
27 /*
28 * Don't allow setting of the lm bit. It is useless anyway
29 * because 64bit system calls require __USER_CS:
30 */
31 desc->l = 0;
27} 32}
28 33
29extern struct desc_ptr idt_descr; 34extern struct desc_ptr idt_descr;
@@ -97,7 +102,15 @@ static inline int desc_empty(const void *ptr)
97 native_write_gdt_entry(dt, entry, desc, type) 102 native_write_gdt_entry(dt, entry, desc, type)
98#define write_idt_entry(dt, entry, g) \ 103#define write_idt_entry(dt, entry, g) \
99 native_write_idt_entry(dt, entry, g) 104 native_write_idt_entry(dt, entry, g)
100#endif 105
106static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
107{
108}
109
110static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
111{
112}
113#endif /* CONFIG_PARAVIRT */
101 114
102static inline void native_write_idt_entry(gate_desc *idt, int entry, 115static inline void native_write_idt_entry(gate_desc *idt, int entry,
103 const gate_desc *gate) 116 const gate_desc *gate)
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index d7d358a43996..8d6ae2f760d0 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -124,6 +124,9 @@ struct pv_cpu_ops {
124 int entrynum, const void *desc, int size); 124 int entrynum, const void *desc, int size);
125 void (*write_idt_entry)(gate_desc *, 125 void (*write_idt_entry)(gate_desc *,
126 int entrynum, const gate_desc *gate); 126 int entrynum, const gate_desc *gate);
127 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
128 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
129
127 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); 130 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
128 131
129 void (*set_iopl_mask)(unsigned mask); 132 void (*set_iopl_mask)(unsigned mask);
@@ -325,6 +328,7 @@ struct pv_lock_ops {
325 int (*spin_is_locked)(struct raw_spinlock *lock); 328 int (*spin_is_locked)(struct raw_spinlock *lock);
326 int (*spin_is_contended)(struct raw_spinlock *lock); 329 int (*spin_is_contended)(struct raw_spinlock *lock);
327 void (*spin_lock)(struct raw_spinlock *lock); 330 void (*spin_lock)(struct raw_spinlock *lock);
331 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
328 int (*spin_trylock)(struct raw_spinlock *lock); 332 int (*spin_trylock)(struct raw_spinlock *lock);
329 void (*spin_unlock)(struct raw_spinlock *lock); 333 void (*spin_unlock)(struct raw_spinlock *lock);
330}; 334};
@@ -830,6 +834,16 @@ do { \
830 (aux) = __aux; \ 834 (aux) = __aux; \
831} while (0) 835} while (0)
832 836
837static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
838{
839 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
840}
841
842static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
843{
844 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
845}
846
833static inline void load_TR_desc(void) 847static inline void load_TR_desc(void)
834{ 848{
835 PVOP_VCALL0(pv_cpu_ops.load_tr_desc); 849 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
@@ -1394,6 +1408,12 @@ static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1394 PVOP_VCALL1(pv_lock_ops.spin_lock, lock); 1408 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1395} 1409}
1396 1410
1411static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1412 unsigned long flags)
1413{
1414 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1415}
1416
1397static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) 1417static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1398{ 1418{
1399 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); 1419 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 29324c103341..6df2615f9138 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -50,12 +50,16 @@ extern struct {
50struct smp_ops { 50struct smp_ops {
51 void (*smp_prepare_boot_cpu)(void); 51 void (*smp_prepare_boot_cpu)(void);
52 void (*smp_prepare_cpus)(unsigned max_cpus); 52 void (*smp_prepare_cpus)(unsigned max_cpus);
53 int (*cpu_up)(unsigned cpu);
54 void (*smp_cpus_done)(unsigned max_cpus); 53 void (*smp_cpus_done)(unsigned max_cpus);
55 54
56 void (*smp_send_stop)(void); 55 void (*smp_send_stop)(void);
57 void (*smp_send_reschedule)(int cpu); 56 void (*smp_send_reschedule)(int cpu);
58 57
58 int (*cpu_up)(unsigned cpu);
59 int (*cpu_disable)(void);
60 void (*cpu_die)(unsigned int cpu);
61 void (*play_dead)(void);
62
59 void (*send_call_func_ipi)(cpumask_t mask); 63 void (*send_call_func_ipi)(cpumask_t mask);
60 void (*send_call_func_single_ipi)(int cpu); 64 void (*send_call_func_single_ipi)(int cpu);
61}; 65};
@@ -94,6 +98,21 @@ static inline int __cpu_up(unsigned int cpu)
94 return smp_ops.cpu_up(cpu); 98 return smp_ops.cpu_up(cpu);
95} 99}
96 100
101static inline int __cpu_disable(void)
102{
103 return smp_ops.cpu_disable();
104}
105
106static inline void __cpu_die(unsigned int cpu)
107{
108 smp_ops.cpu_die(cpu);
109}
110
111static inline void play_dead(void)
112{
113 smp_ops.play_dead();
114}
115
97static inline void smp_send_reschedule(int cpu) 116static inline void smp_send_reschedule(int cpu)
98{ 117{
99 smp_ops.smp_send_reschedule(cpu); 118 smp_ops.smp_send_reschedule(cpu);
@@ -109,16 +128,19 @@ static inline void arch_send_call_function_ipi(cpumask_t mask)
109 smp_ops.send_call_func_ipi(mask); 128 smp_ops.send_call_func_ipi(mask);
110} 129}
111 130
131void cpu_disable_common(void);
112void native_smp_prepare_boot_cpu(void); 132void native_smp_prepare_boot_cpu(void);
113void native_smp_prepare_cpus(unsigned int max_cpus); 133void native_smp_prepare_cpus(unsigned int max_cpus);
114void native_smp_cpus_done(unsigned int max_cpus); 134void native_smp_cpus_done(unsigned int max_cpus);
115int native_cpu_up(unsigned int cpunum); 135int native_cpu_up(unsigned int cpunum);
136int native_cpu_disable(void);
137void native_cpu_die(unsigned int cpu);
138void native_play_dead(void);
139void play_dead_common(void);
140
116void native_send_call_func_ipi(cpumask_t mask); 141void native_send_call_func_ipi(cpumask_t mask);
117void native_send_call_func_single_ipi(int cpu); 142void native_send_call_func_single_ipi(int cpu);
118 143
119extern int __cpu_disable(void);
120extern void __cpu_die(unsigned int cpu);
121
122void smp_store_cpu_info(int id); 144void smp_store_cpu_info(int id);
123#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 145#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
124 146
@@ -205,9 +227,5 @@ static inline int hard_smp_processor_id(void)
205 227
206#endif /* CONFIG_X86_LOCAL_APIC */ 228#endif /* CONFIG_X86_LOCAL_APIC */
207 229
208#ifdef CONFIG_HOTPLUG_CPU
209extern void cpu_uninit(void);
210#endif
211
212#endif /* __ASSEMBLY__ */ 230#endif /* __ASSEMBLY__ */
213#endif /* ASM_X86__SMP_H */ 231#endif /* ASM_X86__SMP_H */
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 93adae338ac6..8badab09146b 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -182,8 +182,6 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
182} 182}
183#endif 183#endif
184 184
185#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
186
187#ifdef CONFIG_PARAVIRT 185#ifdef CONFIG_PARAVIRT
188/* 186/*
189 * Define virtualization-friendly old-style lock byte lock, for use in 187 * Define virtualization-friendly old-style lock byte lock, for use in
@@ -272,6 +270,13 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
272{ 270{
273 __ticket_spin_unlock(lock); 271 __ticket_spin_unlock(lock);
274} 272}
273
274static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
275 unsigned long flags)
276{
277 __raw_spin_lock(lock);
278}
279
275#endif /* CONFIG_PARAVIRT */ 280#endif /* CONFIG_PARAVIRT */
276 281
277static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 282static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
diff --git a/include/asm-x86/tlbflush.h b/include/asm-x86/tlbflush.h
index ef68b76dc3c5..3cdd08b5bdb7 100644
--- a/include/asm-x86/tlbflush.h
+++ b/include/asm-x86/tlbflush.h
@@ -119,6 +119,10 @@ static inline void native_flush_tlb_others(const cpumask_t *cpumask,
119{ 119{
120} 120}
121 121
122static inline void reset_lazy_tlbstate(void)
123{
124}
125
122#else /* SMP */ 126#else /* SMP */
123 127
124#include <asm/smp.h> 128#include <asm/smp.h>
@@ -151,6 +155,12 @@ struct tlb_state {
151 char __cacheline_padding[L1_CACHE_BYTES-8]; 155 char __cacheline_padding[L1_CACHE_BYTES-8];
152}; 156};
153DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 157DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
158
159void reset_lazy_tlbstate(void);
160#else
161static inline void reset_lazy_tlbstate(void)
162{
163}
154#endif 164#endif
155 165
156#endif /* SMP */ 166#endif /* SMP */
diff --git a/include/asm-x86/xen/hypervisor.h b/include/asm-x86/xen/hypervisor.h
index 0ef3a88b869d..445a24759560 100644
--- a/include/asm-x86/xen/hypervisor.h
+++ b/include/asm-x86/xen/hypervisor.h
@@ -54,7 +54,6 @@
54/* arch/i386/kernel/setup.c */ 54/* arch/i386/kernel/setup.c */
55extern struct shared_info *HYPERVISOR_shared_info; 55extern struct shared_info *HYPERVISOR_shared_info;
56extern struct start_info *xen_start_info; 56extern struct start_info *xen_start_info;
57#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
58 57
59/* arch/i386/mach-xen/evtchn.c */ 58/* arch/i386/mach-xen/evtchn.c */
60/* Force a proper event-channel callback from Xen. */ 59/* Force a proper event-channel callback from Xen. */
@@ -67,6 +66,17 @@ u64 jiffies_to_st(unsigned long jiffies);
67#define MULTI_UVMFLAGS_INDEX 3 66#define MULTI_UVMFLAGS_INDEX 3
68#define MULTI_UVMDOMID_INDEX 4 67#define MULTI_UVMDOMID_INDEX 4
69 68
70#define is_running_on_xen() (xen_start_info ? 1 : 0) 69enum xen_domain_type {
70 XEN_NATIVE,
71 XEN_PV_DOMAIN,
72 XEN_HVM_DOMAIN,
73};
74
75extern enum xen_domain_type xen_domain_type;
76
77#define xen_domain() (xen_domain_type != XEN_NATIVE)
78#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
79#define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
80#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
71 81
72#endif /* ASM_X86__XEN__HYPERVISOR_H */ 82#endif /* ASM_X86__XEN__HYPERVISOR_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2651f805ba6d..75d81f157d2e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -182,7 +182,7 @@ extern int vsscanf(const char *, const char *, va_list)
182 182
183extern int get_option(char **str, int *pint); 183extern int get_option(char **str, int *pint);
184extern char *get_options(const char *str, int nints, int *ints); 184extern char *get_options(const char *str, int nints, int *ints);
185extern unsigned long long memparse(char *ptr, char **retptr); 185extern unsigned long long memparse(const char *ptr, char **retptr);
186 186
187extern int core_kernel_text(unsigned long addr); 187extern int core_kernel_text(unsigned long addr);
188extern int __kernel_text_address(unsigned long addr); 188extern int __kernel_text_address(unsigned long addr);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 72a15dc26bbf..4194bf8e4f6c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -919,7 +919,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
919} 919}
920#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 920#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
921 921
922#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 922#if USE_SPLIT_PTLOCKS
923/* 923/*
924 * We tuck a spinlock to guard each pagetable page into its struct page, 924 * We tuck a spinlock to guard each pagetable page into its struct page,
925 * at page->private, with BUILD_BUG_ON to make sure that this will not 925 * at page->private, with BUILD_BUG_ON to make sure that this will not
@@ -932,14 +932,14 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
932} while (0) 932} while (0)
933#define pte_lock_deinit(page) ((page)->mapping = NULL) 933#define pte_lock_deinit(page) ((page)->mapping = NULL)
934#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) 934#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
935#else 935#else /* !USE_SPLIT_PTLOCKS */
936/* 936/*
937 * We use mm->page_table_lock to guard all pagetable pages of the mm. 937 * We use mm->page_table_lock to guard all pagetable pages of the mm.
938 */ 938 */
939#define pte_lock_init(page) do {} while (0) 939#define pte_lock_init(page) do {} while (0)
940#define pte_lock_deinit(page) do {} while (0) 940#define pte_lock_deinit(page) do {} while (0)
941#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) 941#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
942#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 942#endif /* USE_SPLIT_PTLOCKS */
943 943
944static inline void pgtable_page_ctor(struct page *page) 944static inline void pgtable_page_ctor(struct page *page)
945{ 945{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bf334138c7c1..9d49fa36bbef 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -21,11 +21,13 @@
21 21
22struct address_space; 22struct address_space;
23 23
24#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 24#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
25
26#if USE_SPLIT_PTLOCKS
25typedef atomic_long_t mm_counter_t; 27typedef atomic_long_t mm_counter_t;
26#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 28#else /* !USE_SPLIT_PTLOCKS */
27typedef unsigned long mm_counter_t; 29typedef unsigned long mm_counter_t;
28#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 30#endif /* !USE_SPLIT_PTLOCKS */
29 31
30/* 32/*
31 * Each physical page in the system has a struct page associated with 33 * Each physical page in the system has a struct page associated with
@@ -65,7 +67,7 @@ struct page {
65 * see PAGE_MAPPING_ANON below. 67 * see PAGE_MAPPING_ANON below.
66 */ 68 */
67 }; 69 };
68#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 70#if USE_SPLIT_PTLOCKS
69 spinlock_t ptl; 71 spinlock_t ptl;
70#endif 72#endif
71 struct kmem_cache *slab; /* SLUB: Pointer to slab */ 73 struct kmem_cache *slab; /* SLUB: Pointer to slab */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5d0819ee442a..c226c7b82946 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -352,7 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
352extern void arch_unmap_area(struct mm_struct *, unsigned long); 352extern void arch_unmap_area(struct mm_struct *, unsigned long);
353extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 353extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
354 354
355#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 355#if USE_SPLIT_PTLOCKS
356/* 356/*
357 * The mm counters are not protected by its page_table_lock, 357 * The mm counters are not protected by its page_table_lock,
358 * so must be incremented atomically. 358 * so must be incremented atomically.
@@ -363,7 +363,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
363#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member) 363#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
364#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member) 364#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
365 365
366#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 366#else /* !USE_SPLIT_PTLOCKS */
367/* 367/*
368 * The mm counters are protected by its page_table_lock, 368 * The mm counters are protected by its page_table_lock,
369 * so can be incremented directly. 369 * so can be incremented directly.
@@ -374,7 +374,7 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
374#define inc_mm_counter(mm, member) (mm)->_##member++ 374#define inc_mm_counter(mm, member) (mm)->_##member++
375#define dec_mm_counter(mm, member) (mm)->_##member-- 375#define dec_mm_counter(mm, member) (mm)->_##member--
376 376
377#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 377#endif /* !USE_SPLIT_PTLOCKS */
378 378
379#define get_mm_rss(mm) \ 379#define get_mm_rss(mm) \
380 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss)) 380 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
deleted file mode 100644
index fe43b0f3c86a..000000000000
--- a/include/xen/balloon.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/******************************************************************************
2 * balloon.h
3 *
4 * Xen balloon driver - enables returning/claiming memory to/from Xen.
5 *
6 * Copyright (c) 2003, B Dragovic
7 * Copyright (c) 2003-2004, M Williamson, K Fraser
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#ifndef __XEN_BALLOON_H__
35#define __XEN_BALLOON_H__
36
37#include <linux/spinlock.h>
38
39#if 0
40/*
41 * Inform the balloon driver that it should allow some slop for device-driver
42 * memory activities.
43 */
44void balloon_update_driver_allowance(long delta);
45
46/* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */
47struct page **alloc_empty_pages_and_pagevec(int nr_pages);
48void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
49
50void balloon_release_driver_page(struct page *page);
51
52/*
53 * Prevent the balloon driver from changing the memory reservation during
54 * a driver critical region.
55 */
56extern spinlock_t balloon_lock;
57#define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
58#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
59#endif
60
61#endif /* __XEN_BALLOON_H__ */
diff --git a/include/xen/events.h b/include/xen/events.h
index 4680ff3fbc91..0d5f1adc0363 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -46,6 +46,8 @@ extern void xen_irq_resume(void);
46 46
47/* Clear an irq's pending state, in preparation for polling on it */ 47/* Clear an irq's pending state, in preparation for polling on it */
48void xen_clear_irq_pending(int irq); 48void xen_clear_irq_pending(int irq);
49void xen_set_irq_pending(int irq);
50bool xen_test_irq_pending(int irq);
49 51
50/* Poll waiting for an irq to become pending. In the usual case, the 52/* Poll waiting for an irq to become pending. In the usual case, the
51 irq will be disabled so it won't deliver an interrupt. */ 53 irq will be disabled so it won't deliver an interrupt. */