diff options
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/Kconfig | 34 | ||||
-rw-r--r-- | drivers/xen/Makefile | 3 | ||||
-rw-r--r-- | drivers/xen/events.c | 115 | ||||
-rw-r--r-- | drivers/xen/evtchn.c | 14 | ||||
-rw-r--r-- | drivers/xen/grant-table.c | 2 | ||||
-rw-r--r-- | drivers/xen/pcpu.c | 35 | ||||
-rw-r--r-- | drivers/xen/tmem.c | 2 | ||||
-rw-r--r-- | drivers/xen/xen-acpi-cpuhotplug.c | 471 | ||||
-rw-r--r-- | drivers/xen/xen-acpi-memhotplug.c | 483 | ||||
-rw-r--r-- | drivers/xen/xen-stub.c | 101 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_probe.c | 2 |
11 files changed, 1209 insertions, 53 deletions
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index cabfa97f4674..5a32232cf7c1 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
@@ -180,6 +180,40 @@ config XEN_PRIVCMD | |||
180 | depends on XEN | 180 | depends on XEN |
181 | default m | 181 | default m |
182 | 182 | ||
183 | config XEN_STUB | ||
184 | bool "Xen stub drivers" | ||
185 | depends on XEN && X86_64 | ||
186 | default n | ||
187 | help | ||
188 | Allow kernel to install stub drivers, to reserve space for Xen drivers, | ||
189 | i.e. memory hotplug and cpu hotplug, and to block native drivers loaded, | ||
190 | so that real Xen drivers can be modular. | ||
191 | |||
192 | To enable Xen features like cpu and memory hotplug, select Y here. | ||
193 | |||
194 | config XEN_ACPI_HOTPLUG_MEMORY | ||
195 | tristate "Xen ACPI memory hotplug" | ||
196 | depends on XEN_DOM0 && XEN_STUB && ACPI | ||
197 | default n | ||
198 | help | ||
199 | This is Xen ACPI memory hotplug. | ||
200 | |||
201 | Currently Xen only support ACPI memory hot-add. If you want | ||
202 | to hot-add memory at runtime (the hot-added memory cannot be | ||
203 | removed until machine stop), select Y/M here, otherwise select N. | ||
204 | |||
205 | config XEN_ACPI_HOTPLUG_CPU | ||
206 | tristate "Xen ACPI cpu hotplug" | ||
207 | depends on XEN_DOM0 && XEN_STUB && ACPI | ||
208 | select ACPI_CONTAINER | ||
209 | default n | ||
210 | help | ||
211 | Xen ACPI cpu enumerating and hotplugging | ||
212 | |||
213 | For hotplugging, currently Xen only support ACPI cpu hotadd. | ||
214 | If you want to hotadd cpu at runtime (the hotadded cpu cannot | ||
215 | be removed until machine stop), select Y/M here. | ||
216 | |||
183 | config XEN_ACPI_PROCESSOR | 217 | config XEN_ACPI_PROCESSOR |
184 | tristate "Xen ACPI processor" | 218 | tristate "Xen ACPI processor" |
185 | depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ | 219 | depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index fb213cf81a7b..eabd0ee1c2bc 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -30,6 +30,9 @@ obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o | |||
30 | obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o | 30 | obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o |
31 | obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ | 31 | obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ |
32 | obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o | 32 | obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o |
33 | obj-$(CONFIG_XEN_STUB) += xen-stub.o | ||
34 | obj-$(CONFIG_XEN_ACPI_HOTPLUG_MEMORY) += xen-acpi-memhotplug.o | ||
35 | obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o | ||
33 | obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o | 36 | obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o |
34 | xen-evtchn-y := evtchn.o | 37 | xen-evtchn-y := evtchn.o |
35 | xen-gntdev-y := gntdev.o | 38 | xen-gntdev-y := gntdev.o |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 22f77c5f6012..d17aa41a9041 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -120,7 +120,22 @@ static unsigned long *pirq_eoi_map; | |||
120 | #endif | 120 | #endif |
121 | static bool (*pirq_needs_eoi)(unsigned irq); | 121 | static bool (*pirq_needs_eoi)(unsigned irq); |
122 | 122 | ||
123 | static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], | 123 | /* |
124 | * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be | ||
125 | * careful to only use bitops which allow for this (e.g | ||
126 | * test_bit/find_first_bit and friends but not __ffs) and to pass | ||
127 | * BITS_PER_EVTCHN_WORD as the bitmask length. | ||
128 | */ | ||
129 | #define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8) | ||
130 | /* | ||
131 | * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t | ||
132 | * array. Primarily to avoid long lines (hence the terse name). | ||
133 | */ | ||
134 | #define BM(x) (unsigned long *)(x) | ||
135 | /* Find the first set bit in a evtchn mask */ | ||
136 | #define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD) | ||
137 | |||
138 | static DEFINE_PER_CPU(xen_ulong_t [NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD], | ||
124 | cpu_evtchn_mask); | 139 | cpu_evtchn_mask); |
125 | 140 | ||
126 | /* Xen will never allocate port zero for any purpose. */ | 141 | /* Xen will never allocate port zero for any purpose. */ |
@@ -294,9 +309,9 @@ static bool pirq_needs_eoi_flag(unsigned irq) | |||
294 | return info->u.pirq.flags & PIRQ_NEEDS_EOI; | 309 | return info->u.pirq.flags & PIRQ_NEEDS_EOI; |
295 | } | 310 | } |
296 | 311 | ||
297 | static inline unsigned long active_evtchns(unsigned int cpu, | 312 | static inline xen_ulong_t active_evtchns(unsigned int cpu, |
298 | struct shared_info *sh, | 313 | struct shared_info *sh, |
299 | unsigned int idx) | 314 | unsigned int idx) |
300 | { | 315 | { |
301 | return sh->evtchn_pending[idx] & | 316 | return sh->evtchn_pending[idx] & |
302 | per_cpu(cpu_evtchn_mask, cpu)[idx] & | 317 | per_cpu(cpu_evtchn_mask, cpu)[idx] & |
@@ -312,8 +327,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
312 | cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); | 327 | cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); |
313 | #endif | 328 | #endif |
314 | 329 | ||
315 | clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))); | 330 | clear_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)))); |
316 | set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); | 331 | set_bit(chn, BM(per_cpu(cpu_evtchn_mask, cpu))); |
317 | 332 | ||
318 | info_for_irq(irq)->cpu = cpu; | 333 | info_for_irq(irq)->cpu = cpu; |
319 | } | 334 | } |
@@ -339,19 +354,19 @@ static void init_evtchn_cpu_bindings(void) | |||
339 | static inline void clear_evtchn(int port) | 354 | static inline void clear_evtchn(int port) |
340 | { | 355 | { |
341 | struct shared_info *s = HYPERVISOR_shared_info; | 356 | struct shared_info *s = HYPERVISOR_shared_info; |
342 | sync_clear_bit(port, &s->evtchn_pending[0]); | 357 | sync_clear_bit(port, BM(&s->evtchn_pending[0])); |
343 | } | 358 | } |
344 | 359 | ||
345 | static inline void set_evtchn(int port) | 360 | static inline void set_evtchn(int port) |
346 | { | 361 | { |
347 | struct shared_info *s = HYPERVISOR_shared_info; | 362 | struct shared_info *s = HYPERVISOR_shared_info; |
348 | sync_set_bit(port, &s->evtchn_pending[0]); | 363 | sync_set_bit(port, BM(&s->evtchn_pending[0])); |
349 | } | 364 | } |
350 | 365 | ||
351 | static inline int test_evtchn(int port) | 366 | static inline int test_evtchn(int port) |
352 | { | 367 | { |
353 | struct shared_info *s = HYPERVISOR_shared_info; | 368 | struct shared_info *s = HYPERVISOR_shared_info; |
354 | return sync_test_bit(port, &s->evtchn_pending[0]); | 369 | return sync_test_bit(port, BM(&s->evtchn_pending[0])); |
355 | } | 370 | } |
356 | 371 | ||
357 | 372 | ||
@@ -375,7 +390,7 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq); | |||
375 | static void mask_evtchn(int port) | 390 | static void mask_evtchn(int port) |
376 | { | 391 | { |
377 | struct shared_info *s = HYPERVISOR_shared_info; | 392 | struct shared_info *s = HYPERVISOR_shared_info; |
378 | sync_set_bit(port, &s->evtchn_mask[0]); | 393 | sync_set_bit(port, BM(&s->evtchn_mask[0])); |
379 | } | 394 | } |
380 | 395 | ||
381 | static void unmask_evtchn(int port) | 396 | static void unmask_evtchn(int port) |
@@ -389,7 +404,7 @@ static void unmask_evtchn(int port) | |||
389 | if (unlikely((cpu != cpu_from_evtchn(port)))) | 404 | if (unlikely((cpu != cpu_from_evtchn(port)))) |
390 | do_hypercall = 1; | 405 | do_hypercall = 1; |
391 | else | 406 | else |
392 | evtchn_pending = sync_test_bit(port, &s->evtchn_pending[0]); | 407 | evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0])); |
393 | 408 | ||
394 | if (unlikely(evtchn_pending && xen_hvm_domain())) | 409 | if (unlikely(evtchn_pending && xen_hvm_domain())) |
395 | do_hypercall = 1; | 410 | do_hypercall = 1; |
@@ -403,7 +418,7 @@ static void unmask_evtchn(int port) | |||
403 | } else { | 418 | } else { |
404 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); | 419 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
405 | 420 | ||
406 | sync_clear_bit(port, &s->evtchn_mask[0]); | 421 | sync_clear_bit(port, BM(&s->evtchn_mask[0])); |
407 | 422 | ||
408 | /* | 423 | /* |
409 | * The following is basically the equivalent of | 424 | * The following is basically the equivalent of |
@@ -411,8 +426,8 @@ static void unmask_evtchn(int port) | |||
411 | * the interrupt edge' if the channel is masked. | 426 | * the interrupt edge' if the channel is masked. |
412 | */ | 427 | */ |
413 | if (evtchn_pending && | 428 | if (evtchn_pending && |
414 | !sync_test_and_set_bit(port / BITS_PER_LONG, | 429 | !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD, |
415 | &vcpu_info->evtchn_pending_sel)) | 430 | BM(&vcpu_info->evtchn_pending_sel))) |
416 | vcpu_info->evtchn_upcall_pending = 1; | 431 | vcpu_info->evtchn_upcall_pending = 1; |
417 | } | 432 | } |
418 | 433 | ||
@@ -1189,7 +1204,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
1189 | { | 1204 | { |
1190 | struct shared_info *sh = HYPERVISOR_shared_info; | 1205 | struct shared_info *sh = HYPERVISOR_shared_info; |
1191 | int cpu = smp_processor_id(); | 1206 | int cpu = smp_processor_id(); |
1192 | unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); | 1207 | xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); |
1193 | int i; | 1208 | int i; |
1194 | unsigned long flags; | 1209 | unsigned long flags; |
1195 | static DEFINE_SPINLOCK(debug_lock); | 1210 | static DEFINE_SPINLOCK(debug_lock); |
@@ -1205,7 +1220,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
1205 | pending = (get_irq_regs() && i == cpu) | 1220 | pending = (get_irq_regs() && i == cpu) |
1206 | ? xen_irqs_disabled(get_irq_regs()) | 1221 | ? xen_irqs_disabled(get_irq_regs()) |
1207 | : v->evtchn_upcall_mask; | 1222 | : v->evtchn_upcall_mask; |
1208 | printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i, | 1223 | printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i, |
1209 | pending, v->evtchn_upcall_pending, | 1224 | pending, v->evtchn_upcall_pending, |
1210 | (int)(sizeof(v->evtchn_pending_sel)*2), | 1225 | (int)(sizeof(v->evtchn_pending_sel)*2), |
1211 | v->evtchn_pending_sel); | 1226 | v->evtchn_pending_sel); |
@@ -1214,49 +1229,52 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
1214 | 1229 | ||
1215 | printk("\npending:\n "); | 1230 | printk("\npending:\n "); |
1216 | for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) | 1231 | for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) |
1217 | printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2, | 1232 | printk("%0*"PRI_xen_ulong"%s", |
1233 | (int)sizeof(sh->evtchn_pending[0])*2, | ||
1218 | sh->evtchn_pending[i], | 1234 | sh->evtchn_pending[i], |
1219 | i % 8 == 0 ? "\n " : " "); | 1235 | i % 8 == 0 ? "\n " : " "); |
1220 | printk("\nglobal mask:\n "); | 1236 | printk("\nglobal mask:\n "); |
1221 | for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | 1237 | for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) |
1222 | printk("%0*lx%s", | 1238 | printk("%0*"PRI_xen_ulong"%s", |
1223 | (int)(sizeof(sh->evtchn_mask[0])*2), | 1239 | (int)(sizeof(sh->evtchn_mask[0])*2), |
1224 | sh->evtchn_mask[i], | 1240 | sh->evtchn_mask[i], |
1225 | i % 8 == 0 ? "\n " : " "); | 1241 | i % 8 == 0 ? "\n " : " "); |
1226 | 1242 | ||
1227 | printk("\nglobally unmasked:\n "); | 1243 | printk("\nglobally unmasked:\n "); |
1228 | for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | 1244 | for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) |
1229 | printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2), | 1245 | printk("%0*"PRI_xen_ulong"%s", |
1246 | (int)(sizeof(sh->evtchn_mask[0])*2), | ||
1230 | sh->evtchn_pending[i] & ~sh->evtchn_mask[i], | 1247 | sh->evtchn_pending[i] & ~sh->evtchn_mask[i], |
1231 | i % 8 == 0 ? "\n " : " "); | 1248 | i % 8 == 0 ? "\n " : " "); |
1232 | 1249 | ||
1233 | printk("\nlocal cpu%d mask:\n ", cpu); | 1250 | printk("\nlocal cpu%d mask:\n ", cpu); |
1234 | for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--) | 1251 | for (i = (NR_EVENT_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--) |
1235 | printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2), | 1252 | printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2), |
1236 | cpu_evtchn[i], | 1253 | cpu_evtchn[i], |
1237 | i % 8 == 0 ? "\n " : " "); | 1254 | i % 8 == 0 ? "\n " : " "); |
1238 | 1255 | ||
1239 | printk("\nlocally unmasked:\n "); | 1256 | printk("\nlocally unmasked:\n "); |
1240 | for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) { | 1257 | for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) { |
1241 | unsigned long pending = sh->evtchn_pending[i] | 1258 | xen_ulong_t pending = sh->evtchn_pending[i] |
1242 | & ~sh->evtchn_mask[i] | 1259 | & ~sh->evtchn_mask[i] |
1243 | & cpu_evtchn[i]; | 1260 | & cpu_evtchn[i]; |
1244 | printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2), | 1261 | printk("%0*"PRI_xen_ulong"%s", |
1262 | (int)(sizeof(sh->evtchn_mask[0])*2), | ||
1245 | pending, i % 8 == 0 ? "\n " : " "); | 1263 | pending, i % 8 == 0 ? "\n " : " "); |
1246 | } | 1264 | } |
1247 | 1265 | ||
1248 | printk("\npending list:\n"); | 1266 | printk("\npending list:\n"); |
1249 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | 1267 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { |
1250 | if (sync_test_bit(i, sh->evtchn_pending)) { | 1268 | if (sync_test_bit(i, BM(sh->evtchn_pending))) { |
1251 | int word_idx = i / BITS_PER_LONG; | 1269 | int word_idx = i / BITS_PER_EVTCHN_WORD; |
1252 | printk(" %d: event %d -> irq %d%s%s%s\n", | 1270 | printk(" %d: event %d -> irq %d%s%s%s\n", |
1253 | cpu_from_evtchn(i), i, | 1271 | cpu_from_evtchn(i), i, |
1254 | evtchn_to_irq[i], | 1272 | evtchn_to_irq[i], |
1255 | sync_test_bit(word_idx, &v->evtchn_pending_sel) | 1273 | sync_test_bit(word_idx, BM(&v->evtchn_pending_sel)) |
1256 | ? "" : " l2-clear", | 1274 | ? "" : " l2-clear", |
1257 | !sync_test_bit(i, sh->evtchn_mask) | 1275 | !sync_test_bit(i, BM(sh->evtchn_mask)) |
1258 | ? "" : " globally-masked", | 1276 | ? "" : " globally-masked", |
1259 | sync_test_bit(i, cpu_evtchn) | 1277 | sync_test_bit(i, BM(cpu_evtchn)) |
1260 | ? "" : " locally-masked"); | 1278 | ? "" : " locally-masked"); |
1261 | } | 1279 | } |
1262 | } | 1280 | } |
@@ -1273,7 +1291,7 @@ static DEFINE_PER_CPU(unsigned int, current_bit_idx); | |||
1273 | /* | 1291 | /* |
1274 | * Mask out the i least significant bits of w | 1292 | * Mask out the i least significant bits of w |
1275 | */ | 1293 | */ |
1276 | #define MASK_LSBS(w, i) (w & ((~0UL) << i)) | 1294 | #define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i)) |
1277 | 1295 | ||
1278 | /* | 1296 | /* |
1279 | * Search the CPUs pending events bitmasks. For each one found, map | 1297 | * Search the CPUs pending events bitmasks. For each one found, map |
@@ -1295,18 +1313,19 @@ static void __xen_evtchn_do_upcall(void) | |||
1295 | unsigned count; | 1313 | unsigned count; |
1296 | 1314 | ||
1297 | do { | 1315 | do { |
1298 | unsigned long pending_words; | 1316 | xen_ulong_t pending_words; |
1299 | 1317 | ||
1300 | vcpu_info->evtchn_upcall_pending = 0; | 1318 | vcpu_info->evtchn_upcall_pending = 0; |
1301 | 1319 | ||
1302 | if (__this_cpu_inc_return(xed_nesting_count) - 1) | 1320 | if (__this_cpu_inc_return(xed_nesting_count) - 1) |
1303 | goto out; | 1321 | goto out; |
1304 | 1322 | ||
1305 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ | 1323 | /* |
1306 | /* Clear master flag /before/ clearing selector flag. */ | 1324 | * Master flag must be cleared /before/ clearing |
1307 | wmb(); | 1325 | * selector flag. xchg_xen_ulong must contain an |
1308 | #endif | 1326 | * appropriate barrier. |
1309 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); | 1327 | */ |
1328 | pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); | ||
1310 | 1329 | ||
1311 | start_word_idx = __this_cpu_read(current_word_idx); | 1330 | start_word_idx = __this_cpu_read(current_word_idx); |
1312 | start_bit_idx = __this_cpu_read(current_bit_idx); | 1331 | start_bit_idx = __this_cpu_read(current_bit_idx); |
@@ -1314,8 +1333,8 @@ static void __xen_evtchn_do_upcall(void) | |||
1314 | word_idx = start_word_idx; | 1333 | word_idx = start_word_idx; |
1315 | 1334 | ||
1316 | for (i = 0; pending_words != 0; i++) { | 1335 | for (i = 0; pending_words != 0; i++) { |
1317 | unsigned long pending_bits; | 1336 | xen_ulong_t pending_bits; |
1318 | unsigned long words; | 1337 | xen_ulong_t words; |
1319 | 1338 | ||
1320 | words = MASK_LSBS(pending_words, word_idx); | 1339 | words = MASK_LSBS(pending_words, word_idx); |
1321 | 1340 | ||
@@ -1327,7 +1346,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1327 | bit_idx = 0; | 1346 | bit_idx = 0; |
1328 | continue; | 1347 | continue; |
1329 | } | 1348 | } |
1330 | word_idx = __ffs(words); | 1349 | word_idx = EVTCHN_FIRST_BIT(words); |
1331 | 1350 | ||
1332 | pending_bits = active_evtchns(cpu, s, word_idx); | 1351 | pending_bits = active_evtchns(cpu, s, word_idx); |
1333 | bit_idx = 0; /* usually scan entire word from start */ | 1352 | bit_idx = 0; /* usually scan entire word from start */ |
@@ -1342,7 +1361,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1342 | } | 1361 | } |
1343 | 1362 | ||
1344 | do { | 1363 | do { |
1345 | unsigned long bits; | 1364 | xen_ulong_t bits; |
1346 | int port, irq; | 1365 | int port, irq; |
1347 | struct irq_desc *desc; | 1366 | struct irq_desc *desc; |
1348 | 1367 | ||
@@ -1352,10 +1371,10 @@ static void __xen_evtchn_do_upcall(void) | |||
1352 | if (bits == 0) | 1371 | if (bits == 0) |
1353 | break; | 1372 | break; |
1354 | 1373 | ||
1355 | bit_idx = __ffs(bits); | 1374 | bit_idx = EVTCHN_FIRST_BIT(bits); |
1356 | 1375 | ||
1357 | /* Process port. */ | 1376 | /* Process port. */ |
1358 | port = (word_idx * BITS_PER_LONG) + bit_idx; | 1377 | port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx; |
1359 | irq = evtchn_to_irq[port]; | 1378 | irq = evtchn_to_irq[port]; |
1360 | 1379 | ||
1361 | if (irq != -1) { | 1380 | if (irq != -1) { |
@@ -1364,12 +1383,12 @@ static void __xen_evtchn_do_upcall(void) | |||
1364 | generic_handle_irq_desc(irq, desc); | 1383 | generic_handle_irq_desc(irq, desc); |
1365 | } | 1384 | } |
1366 | 1385 | ||
1367 | bit_idx = (bit_idx + 1) % BITS_PER_LONG; | 1386 | bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD; |
1368 | 1387 | ||
1369 | /* Next caller starts at last processed + 1 */ | 1388 | /* Next caller starts at last processed + 1 */ |
1370 | __this_cpu_write(current_word_idx, | 1389 | __this_cpu_write(current_word_idx, |
1371 | bit_idx ? word_idx : | 1390 | bit_idx ? word_idx : |
1372 | (word_idx+1) % BITS_PER_LONG); | 1391 | (word_idx+1) % BITS_PER_EVTCHN_WORD); |
1373 | __this_cpu_write(current_bit_idx, bit_idx); | 1392 | __this_cpu_write(current_bit_idx, bit_idx); |
1374 | } while (bit_idx != 0); | 1393 | } while (bit_idx != 0); |
1375 | 1394 | ||
@@ -1377,7 +1396,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1377 | if ((word_idx != start_word_idx) || (i != 0)) | 1396 | if ((word_idx != start_word_idx) || (i != 0)) |
1378 | pending_words &= ~(1UL << word_idx); | 1397 | pending_words &= ~(1UL << word_idx); |
1379 | 1398 | ||
1380 | word_idx = (word_idx + 1) % BITS_PER_LONG; | 1399 | word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD; |
1381 | } | 1400 | } |
1382 | 1401 | ||
1383 | BUG_ON(!irqs_disabled()); | 1402 | BUG_ON(!irqs_disabled()); |
@@ -1487,8 +1506,8 @@ int resend_irq_on_evtchn(unsigned int irq) | |||
1487 | if (!VALID_EVTCHN(evtchn)) | 1506 | if (!VALID_EVTCHN(evtchn)) |
1488 | return 1; | 1507 | return 1; |
1489 | 1508 | ||
1490 | masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); | 1509 | masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask)); |
1491 | sync_set_bit(evtchn, s->evtchn_pending); | 1510 | sync_set_bit(evtchn, BM(s->evtchn_pending)); |
1492 | if (!masked) | 1511 | if (!masked) |
1493 | unmask_evtchn(evtchn); | 1512 | unmask_evtchn(evtchn); |
1494 | 1513 | ||
@@ -1536,8 +1555,8 @@ static int retrigger_dynirq(struct irq_data *data) | |||
1536 | if (VALID_EVTCHN(evtchn)) { | 1555 | if (VALID_EVTCHN(evtchn)) { |
1537 | int masked; | 1556 | int masked; |
1538 | 1557 | ||
1539 | masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); | 1558 | masked = sync_test_and_set_bit(evtchn, BM(sh->evtchn_mask)); |
1540 | sync_set_bit(evtchn, sh->evtchn_pending); | 1559 | sync_set_bit(evtchn, BM(sh->evtchn_pending)); |
1541 | if (!masked) | 1560 | if (!masked) |
1542 | unmask_evtchn(evtchn); | 1561 | unmask_evtchn(evtchn); |
1543 | ret = 1; | 1562 | ret = 1; |
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index b1f60a0c0bea..45c8efaa6b3e 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c | |||
@@ -269,6 +269,14 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) | |||
269 | u->name, (void *)(unsigned long)port); | 269 | u->name, (void *)(unsigned long)port); |
270 | if (rc >= 0) | 270 | if (rc >= 0) |
271 | rc = evtchn_make_refcounted(port); | 271 | rc = evtchn_make_refcounted(port); |
272 | else { | ||
273 | /* bind failed, should close the port now */ | ||
274 | struct evtchn_close close; | ||
275 | close.port = port; | ||
276 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | ||
277 | BUG(); | ||
278 | set_port_user(port, NULL); | ||
279 | } | ||
272 | 280 | ||
273 | return rc; | 281 | return rc; |
274 | } | 282 | } |
@@ -277,6 +285,8 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port) | |||
277 | { | 285 | { |
278 | int irq = irq_from_evtchn(port); | 286 | int irq = irq_from_evtchn(port); |
279 | 287 | ||
288 | BUG_ON(irq < 0); | ||
289 | |||
280 | unbind_from_irqhandler(irq, (void *)(unsigned long)port); | 290 | unbind_from_irqhandler(irq, (void *)(unsigned long)port); |
281 | 291 | ||
282 | set_port_user(port, NULL); | 292 | set_port_user(port, NULL); |
@@ -534,10 +544,10 @@ static int __init evtchn_init(void) | |||
534 | 544 | ||
535 | spin_lock_init(&port_user_lock); | 545 | spin_lock_init(&port_user_lock); |
536 | 546 | ||
537 | /* Create '/dev/misc/evtchn'. */ | 547 | /* Create '/dev/xen/evtchn'. */ |
538 | err = misc_register(&evtchn_miscdev); | 548 | err = misc_register(&evtchn_miscdev); |
539 | if (err != 0) { | 549 | if (err != 0) { |
540 | printk(KERN_ALERT "Could not register /dev/misc/evtchn\n"); | 550 | printk(KERN_ERR "Could not register /dev/xen/evtchn\n"); |
541 | return err; | 551 | return err; |
542 | } | 552 | } |
543 | 553 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 157c0ccda3ef..04c1b2d9b775 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -1147,7 +1147,7 @@ static int gnttab_setup(void) | |||
1147 | return gnttab_map(0, nr_grant_frames - 1); | 1147 | return gnttab_map(0, nr_grant_frames - 1); |
1148 | 1148 | ||
1149 | if (gnttab_shared.addr == NULL) { | 1149 | if (gnttab_shared.addr == NULL) { |
1150 | gnttab_shared.addr = ioremap(xen_hvm_resume_frames, | 1150 | gnttab_shared.addr = xen_remap(xen_hvm_resume_frames, |
1151 | PAGE_SIZE * max_nr_gframes); | 1151 | PAGE_SIZE * max_nr_gframes); |
1152 | if (gnttab_shared.addr == NULL) { | 1152 | if (gnttab_shared.addr == NULL) { |
1153 | printk(KERN_WARNING | 1153 | printk(KERN_WARNING |
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c index 5a27a4599a4a..6536d5ab1697 100644 --- a/drivers/xen/pcpu.c +++ b/drivers/xen/pcpu.c | |||
@@ -332,6 +332,41 @@ static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id) | |||
332 | return IRQ_HANDLED; | 332 | return IRQ_HANDLED; |
333 | } | 333 | } |
334 | 334 | ||
335 | /* Sync with Xen hypervisor after cpu hotadded */ | ||
336 | void xen_pcpu_hotplug_sync(void) | ||
337 | { | ||
338 | schedule_work(&xen_pcpu_work); | ||
339 | } | ||
340 | EXPORT_SYMBOL_GPL(xen_pcpu_hotplug_sync); | ||
341 | |||
342 | /* | ||
343 | * For hypervisor presented cpu, return logic cpu id; | ||
344 | * For hypervisor non-presented cpu, return -ENODEV. | ||
345 | */ | ||
346 | int xen_pcpu_id(uint32_t acpi_id) | ||
347 | { | ||
348 | int cpu_id = 0, max_id = 0; | ||
349 | struct xen_platform_op op; | ||
350 | |||
351 | op.cmd = XENPF_get_cpuinfo; | ||
352 | while (cpu_id <= max_id) { | ||
353 | op.u.pcpu_info.xen_cpuid = cpu_id; | ||
354 | if (HYPERVISOR_dom0_op(&op)) { | ||
355 | cpu_id++; | ||
356 | continue; | ||
357 | } | ||
358 | |||
359 | if (acpi_id == op.u.pcpu_info.acpi_id) | ||
360 | return cpu_id; | ||
361 | if (op.u.pcpu_info.max_present > max_id) | ||
362 | max_id = op.u.pcpu_info.max_present; | ||
363 | cpu_id++; | ||
364 | } | ||
365 | |||
366 | return -ENODEV; | ||
367 | } | ||
368 | EXPORT_SYMBOL_GPL(xen_pcpu_id); | ||
369 | |||
335 | static int __init xen_pcpu_init(void) | 370 | static int __init xen_pcpu_init(void) |
336 | { | 371 | { |
337 | int irq, ret; | 372 | int irq, ret; |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index 144564e5eb29..3ee836d42581 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
@@ -385,7 +385,7 @@ static int __init xen_tmem_init(void) | |||
385 | if (old_ops.init != NULL) | 385 | if (old_ops.init != NULL) |
386 | s = " (WARNING: frontswap_ops overridden)"; | 386 | s = " (WARNING: frontswap_ops overridden)"; |
387 | printk(KERN_INFO "frontswap enabled, RAM provided by " | 387 | printk(KERN_INFO "frontswap enabled, RAM provided by " |
388 | "Xen Transcendent Memory\n"); | 388 | "Xen Transcendent Memory%s\n", s); |
389 | } | 389 | } |
390 | #endif | 390 | #endif |
391 | #ifdef CONFIG_CLEANCACHE | 391 | #ifdef CONFIG_CLEANCACHE |
diff --git a/drivers/xen/xen-acpi-cpuhotplug.c b/drivers/xen/xen-acpi-cpuhotplug.c new file mode 100644 index 000000000000..757827966e34 --- /dev/null +++ b/drivers/xen/xen-acpi-cpuhotplug.c | |||
@@ -0,0 +1,471 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Intel Corporation | ||
3 | * Author: Liu Jinsong <jinsong.liu@intel.com> | ||
4 | * Author: Jiang Yunhong <yunhong.jiang@intel.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or (at | ||
9 | * your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
15 | * details. | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/cpu.h> | ||
23 | #include <linux/acpi.h> | ||
24 | #include <linux/uaccess.h> | ||
25 | #include <acpi/acpi_bus.h> | ||
26 | #include <acpi/acpi_drivers.h> | ||
27 | #include <acpi/processor.h> | ||
28 | |||
29 | #include <xen/acpi.h> | ||
30 | #include <xen/interface/platform.h> | ||
31 | #include <asm/xen/hypercall.h> | ||
32 | |||
33 | #define PREFIX "ACPI:xen_cpu_hotplug:" | ||
34 | |||
35 | #define INSTALL_NOTIFY_HANDLER 0 | ||
36 | #define UNINSTALL_NOTIFY_HANDLER 1 | ||
37 | |||
38 | static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr); | ||
39 | |||
40 | /* -------------------------------------------------------------------------- | ||
41 | Driver Interface | ||
42 | -------------------------------------------------------------------------- */ | ||
43 | |||
44 | static int xen_acpi_processor_enable(struct acpi_device *device) | ||
45 | { | ||
46 | acpi_status status = 0; | ||
47 | unsigned long long value; | ||
48 | union acpi_object object = { 0 }; | ||
49 | struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; | ||
50 | struct acpi_processor *pr; | ||
51 | |||
52 | pr = acpi_driver_data(device); | ||
53 | if (!pr) { | ||
54 | pr_err(PREFIX "Cannot find driver data\n"); | ||
55 | return -EINVAL; | ||
56 | } | ||
57 | |||
58 | if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { | ||
59 | /* Declared with "Processor" statement; match ProcessorID */ | ||
60 | status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); | ||
61 | if (ACPI_FAILURE(status)) { | ||
62 | pr_err(PREFIX "Evaluating processor object\n"); | ||
63 | return -ENODEV; | ||
64 | } | ||
65 | |||
66 | pr->acpi_id = object.processor.proc_id; | ||
67 | } else { | ||
68 | /* Declared with "Device" statement; match _UID */ | ||
69 | status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, | ||
70 | NULL, &value); | ||
71 | if (ACPI_FAILURE(status)) { | ||
72 | pr_err(PREFIX "Evaluating processor _UID\n"); | ||
73 | return -ENODEV; | ||
74 | } | ||
75 | |||
76 | pr->acpi_id = value; | ||
77 | } | ||
78 | |||
79 | pr->id = xen_pcpu_id(pr->acpi_id); | ||
80 | |||
81 | if ((int)pr->id < 0) | ||
82 | /* This cpu is not presented at hypervisor, try to hotadd it */ | ||
83 | if (ACPI_FAILURE(xen_acpi_cpu_hotadd(pr))) { | ||
84 | pr_err(PREFIX "Hotadd CPU (acpi_id = %d) failed.\n", | ||
85 | pr->acpi_id); | ||
86 | return -ENODEV; | ||
87 | } | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static int __cpuinit xen_acpi_processor_add(struct acpi_device *device) | ||
93 | { | ||
94 | int ret; | ||
95 | struct acpi_processor *pr; | ||
96 | |||
97 | if (!device) | ||
98 | return -EINVAL; | ||
99 | |||
100 | pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); | ||
101 | if (!pr) | ||
102 | return -ENOMEM; | ||
103 | |||
104 | pr->handle = device->handle; | ||
105 | strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); | ||
106 | strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); | ||
107 | device->driver_data = pr; | ||
108 | |||
109 | ret = xen_acpi_processor_enable(device); | ||
110 | if (ret) | ||
111 | pr_err(PREFIX "Error when enabling Xen processor\n"); | ||
112 | |||
113 | return ret; | ||
114 | } | ||
115 | |||
116 | static int xen_acpi_processor_remove(struct acpi_device *device) | ||
117 | { | ||
118 | struct acpi_processor *pr; | ||
119 | |||
120 | if (!device) | ||
121 | return -EINVAL; | ||
122 | |||
123 | pr = acpi_driver_data(device); | ||
124 | if (!pr) | ||
125 | return -EINVAL; | ||
126 | |||
127 | kfree(pr); | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | /*-------------------------------------------------------------- | ||
132 | Acpi processor hotplug support | ||
133 | --------------------------------------------------------------*/ | ||
134 | |||
135 | static int is_processor_present(acpi_handle handle) | ||
136 | { | ||
137 | acpi_status status; | ||
138 | unsigned long long sta = 0; | ||
139 | |||
140 | |||
141 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); | ||
142 | |||
143 | if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT)) | ||
144 | return 1; | ||
145 | |||
146 | /* | ||
147 | * _STA is mandatory for a processor that supports hot plug | ||
148 | */ | ||
149 | if (status == AE_NOT_FOUND) | ||
150 | pr_info(PREFIX "Processor does not support hot plug\n"); | ||
151 | else | ||
152 | pr_info(PREFIX "Processor Device is not present"); | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static int xen_apic_id(acpi_handle handle) | ||
157 | { | ||
158 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
159 | union acpi_object *obj; | ||
160 | struct acpi_madt_local_apic *lapic; | ||
161 | int apic_id; | ||
162 | |||
163 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) | ||
164 | return -EINVAL; | ||
165 | |||
166 | if (!buffer.length || !buffer.pointer) | ||
167 | return -EINVAL; | ||
168 | |||
169 | obj = buffer.pointer; | ||
170 | if (obj->type != ACPI_TYPE_BUFFER || | ||
171 | obj->buffer.length < sizeof(*lapic)) { | ||
172 | kfree(buffer.pointer); | ||
173 | return -EINVAL; | ||
174 | } | ||
175 | |||
176 | lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer; | ||
177 | |||
178 | if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC || | ||
179 | !(lapic->lapic_flags & ACPI_MADT_ENABLED)) { | ||
180 | kfree(buffer.pointer); | ||
181 | return -EINVAL; | ||
182 | } | ||
183 | |||
184 | apic_id = (uint32_t)lapic->id; | ||
185 | kfree(buffer.pointer); | ||
186 | buffer.length = ACPI_ALLOCATE_BUFFER; | ||
187 | buffer.pointer = NULL; | ||
188 | |||
189 | return apic_id; | ||
190 | } | ||
191 | |||
192 | static int xen_hotadd_cpu(struct acpi_processor *pr) | ||
193 | { | ||
194 | int cpu_id, apic_id, pxm; | ||
195 | struct xen_platform_op op; | ||
196 | |||
197 | apic_id = xen_apic_id(pr->handle); | ||
198 | if (apic_id < 0) { | ||
199 | pr_err(PREFIX "Failed to get apic_id for acpi_id %d\n", | ||
200 | pr->acpi_id); | ||
201 | return -ENODEV; | ||
202 | } | ||
203 | |||
204 | pxm = xen_acpi_get_pxm(pr->handle); | ||
205 | if (pxm < 0) { | ||
206 | pr_err(PREFIX "Failed to get _PXM for acpi_id %d\n", | ||
207 | pr->acpi_id); | ||
208 | return pxm; | ||
209 | } | ||
210 | |||
211 | op.cmd = XENPF_cpu_hotadd; | ||
212 | op.u.cpu_add.apic_id = apic_id; | ||
213 | op.u.cpu_add.acpi_id = pr->acpi_id; | ||
214 | op.u.cpu_add.pxm = pxm; | ||
215 | |||
216 | cpu_id = HYPERVISOR_dom0_op(&op); | ||
217 | if (cpu_id < 0) | ||
218 | pr_err(PREFIX "Failed to hotadd CPU for acpi_id %d\n", | ||
219 | pr->acpi_id); | ||
220 | |||
221 | return cpu_id; | ||
222 | } | ||
223 | |||
224 | static acpi_status xen_acpi_cpu_hotadd(struct acpi_processor *pr) | ||
225 | { | ||
226 | if (!is_processor_present(pr->handle)) | ||
227 | return AE_ERROR; | ||
228 | |||
229 | pr->id = xen_hotadd_cpu(pr); | ||
230 | if ((int)pr->id < 0) | ||
231 | return AE_ERROR; | ||
232 | |||
233 | /* | ||
234 | * Sync with Xen hypervisor, providing new /sys/.../xen_cpuX | ||
235 | * interface after cpu hotadded. | ||
236 | */ | ||
237 | xen_pcpu_hotplug_sync(); | ||
238 | |||
239 | return AE_OK; | ||
240 | } | ||
241 | |||
242 | static | ||
243 | int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device) | ||
244 | { | ||
245 | acpi_handle phandle; | ||
246 | struct acpi_device *pdev; | ||
247 | |||
248 | if (acpi_get_parent(handle, &phandle)) | ||
249 | return -ENODEV; | ||
250 | |||
251 | if (acpi_bus_get_device(phandle, &pdev)) | ||
252 | return -ENODEV; | ||
253 | |||
254 | if (acpi_bus_scan(handle)) | ||
255 | return -ENODEV; | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static int acpi_processor_device_remove(struct acpi_device *device) | ||
261 | { | ||
262 | pr_debug(PREFIX "Xen does not support CPU hotremove\n"); | ||
263 | |||
264 | return -ENOSYS; | ||
265 | } | ||
266 | |||
267 | static void acpi_processor_hotplug_notify(acpi_handle handle, | ||
268 | u32 event, void *data) | ||
269 | { | ||
270 | struct acpi_processor *pr; | ||
271 | struct acpi_device *device = NULL; | ||
272 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ | ||
273 | int result; | ||
274 | |||
275 | switch (event) { | ||
276 | case ACPI_NOTIFY_BUS_CHECK: | ||
277 | case ACPI_NOTIFY_DEVICE_CHECK: | ||
278 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
279 | "Processor driver received %s event\n", | ||
280 | (event == ACPI_NOTIFY_BUS_CHECK) ? | ||
281 | "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK")); | ||
282 | |||
283 | if (!is_processor_present(handle)) | ||
284 | break; | ||
285 | |||
286 | if (!acpi_bus_get_device(handle, &device)) | ||
287 | break; | ||
288 | |||
289 | result = acpi_processor_device_add(handle, &device); | ||
290 | if (result) { | ||
291 | pr_err(PREFIX "Unable to add the device\n"); | ||
292 | break; | ||
293 | } | ||
294 | |||
295 | ost_code = ACPI_OST_SC_SUCCESS; | ||
296 | break; | ||
297 | |||
298 | case ACPI_NOTIFY_EJECT_REQUEST: | ||
299 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
300 | "received ACPI_NOTIFY_EJECT_REQUEST\n")); | ||
301 | |||
302 | if (acpi_bus_get_device(handle, &device)) { | ||
303 | pr_err(PREFIX "Device don't exist, dropping EJECT\n"); | ||
304 | break; | ||
305 | } | ||
306 | pr = acpi_driver_data(device); | ||
307 | if (!pr) { | ||
308 | pr_err(PREFIX "Driver data is NULL, dropping EJECT\n"); | ||
309 | break; | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * TBD: implement acpi_processor_device_remove if Xen support | ||
314 | * CPU hotremove in the future. | ||
315 | */ | ||
316 | acpi_processor_device_remove(device); | ||
317 | break; | ||
318 | |||
319 | default: | ||
320 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
321 | "Unsupported event [0x%x]\n", event)); | ||
322 | |||
323 | /* non-hotplug event; possibly handled by other handler */ | ||
324 | return; | ||
325 | } | ||
326 | |||
327 | (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL); | ||
328 | return; | ||
329 | } | ||
330 | |||
331 | static acpi_status is_processor_device(acpi_handle handle) | ||
332 | { | ||
333 | struct acpi_device_info *info; | ||
334 | char *hid; | ||
335 | acpi_status status; | ||
336 | |||
337 | status = acpi_get_object_info(handle, &info); | ||
338 | if (ACPI_FAILURE(status)) | ||
339 | return status; | ||
340 | |||
341 | if (info->type == ACPI_TYPE_PROCESSOR) { | ||
342 | kfree(info); | ||
343 | return AE_OK; /* found a processor object */ | ||
344 | } | ||
345 | |||
346 | if (!(info->valid & ACPI_VALID_HID)) { | ||
347 | kfree(info); | ||
348 | return AE_ERROR; | ||
349 | } | ||
350 | |||
351 | hid = info->hardware_id.string; | ||
352 | if ((hid == NULL) || strcmp(hid, ACPI_PROCESSOR_DEVICE_HID)) { | ||
353 | kfree(info); | ||
354 | return AE_ERROR; | ||
355 | } | ||
356 | |||
357 | kfree(info); | ||
358 | return AE_OK; /* found a processor device object */ | ||
359 | } | ||
360 | |||
361 | static acpi_status | ||
362 | processor_walk_namespace_cb(acpi_handle handle, | ||
363 | u32 lvl, void *context, void **rv) | ||
364 | { | ||
365 | acpi_status status; | ||
366 | int *action = context; | ||
367 | |||
368 | status = is_processor_device(handle); | ||
369 | if (ACPI_FAILURE(status)) | ||
370 | return AE_OK; /* not a processor; continue to walk */ | ||
371 | |||
372 | switch (*action) { | ||
373 | case INSTALL_NOTIFY_HANDLER: | ||
374 | acpi_install_notify_handler(handle, | ||
375 | ACPI_SYSTEM_NOTIFY, | ||
376 | acpi_processor_hotplug_notify, | ||
377 | NULL); | ||
378 | break; | ||
379 | case UNINSTALL_NOTIFY_HANDLER: | ||
380 | acpi_remove_notify_handler(handle, | ||
381 | ACPI_SYSTEM_NOTIFY, | ||
382 | acpi_processor_hotplug_notify); | ||
383 | break; | ||
384 | default: | ||
385 | break; | ||
386 | } | ||
387 | |||
388 | /* found a processor; skip walking underneath */ | ||
389 | return AE_CTRL_DEPTH; | ||
390 | } | ||
391 | |||
392 | static | ||
393 | void acpi_processor_install_hotplug_notify(void) | ||
394 | { | ||
395 | int action = INSTALL_NOTIFY_HANDLER; | ||
396 | acpi_walk_namespace(ACPI_TYPE_ANY, | ||
397 | ACPI_ROOT_OBJECT, | ||
398 | ACPI_UINT32_MAX, | ||
399 | processor_walk_namespace_cb, NULL, &action, NULL); | ||
400 | } | ||
401 | |||
402 | static | ||
403 | void acpi_processor_uninstall_hotplug_notify(void) | ||
404 | { | ||
405 | int action = UNINSTALL_NOTIFY_HANDLER; | ||
406 | acpi_walk_namespace(ACPI_TYPE_ANY, | ||
407 | ACPI_ROOT_OBJECT, | ||
408 | ACPI_UINT32_MAX, | ||
409 | processor_walk_namespace_cb, NULL, &action, NULL); | ||
410 | } | ||
411 | |||
412 | static const struct acpi_device_id processor_device_ids[] = { | ||
413 | {ACPI_PROCESSOR_OBJECT_HID, 0}, | ||
414 | {ACPI_PROCESSOR_DEVICE_HID, 0}, | ||
415 | {"", 0}, | ||
416 | }; | ||
417 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); | ||
418 | |||
419 | static struct acpi_driver xen_acpi_processor_driver = { | ||
420 | .name = "processor", | ||
421 | .class = ACPI_PROCESSOR_CLASS, | ||
422 | .ids = processor_device_ids, | ||
423 | .ops = { | ||
424 | .add = xen_acpi_processor_add, | ||
425 | .remove = xen_acpi_processor_remove, | ||
426 | }, | ||
427 | }; | ||
428 | |||
429 | static int __init xen_acpi_processor_init(void) | ||
430 | { | ||
431 | int result = 0; | ||
432 | |||
433 | if (!xen_initial_domain()) | ||
434 | return -ENODEV; | ||
435 | |||
436 | /* unregister the stub which only used to reserve driver space */ | ||
437 | xen_stub_processor_exit(); | ||
438 | |||
439 | result = acpi_bus_register_driver(&xen_acpi_processor_driver); | ||
440 | if (result < 0) { | ||
441 | xen_stub_processor_init(); | ||
442 | return result; | ||
443 | } | ||
444 | |||
445 | acpi_processor_install_hotplug_notify(); | ||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | static void __exit xen_acpi_processor_exit(void) | ||
450 | { | ||
451 | if (!xen_initial_domain()) | ||
452 | return; | ||
453 | |||
454 | acpi_processor_uninstall_hotplug_notify(); | ||
455 | |||
456 | acpi_bus_unregister_driver(&xen_acpi_processor_driver); | ||
457 | |||
458 | /* | ||
459 | * stub reserve space again to prevent any chance of native | ||
460 | * driver loading. | ||
461 | */ | ||
462 | xen_stub_processor_init(); | ||
463 | return; | ||
464 | } | ||
465 | |||
466 | module_init(xen_acpi_processor_init); | ||
467 | module_exit(xen_acpi_processor_exit); | ||
468 | ACPI_MODULE_NAME("xen-acpi-cpuhotplug"); | ||
469 | MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>"); | ||
470 | MODULE_DESCRIPTION("Xen Hotplug CPU Driver"); | ||
471 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/xen/xen-acpi-memhotplug.c b/drivers/xen/xen-acpi-memhotplug.c new file mode 100644 index 000000000000..853b12dba5bb --- /dev/null +++ b/drivers/xen/xen-acpi-memhotplug.c | |||
@@ -0,0 +1,483 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Intel Corporation | ||
3 | * Author: Liu Jinsong <jinsong.liu@intel.com> | ||
4 | * Author: Jiang Yunhong <yunhong.jiang@intel.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or (at | ||
9 | * your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
15 | * details. | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/acpi.h> | ||
23 | #include <acpi/acpi_drivers.h> | ||
24 | #include <xen/acpi.h> | ||
25 | #include <xen/interface/platform.h> | ||
26 | #include <asm/xen/hypercall.h> | ||
27 | |||
28 | #define PREFIX "ACPI:xen_memory_hotplug:" | ||
29 | |||
30 | struct acpi_memory_info { | ||
31 | struct list_head list; | ||
32 | u64 start_addr; /* Memory Range start physical addr */ | ||
33 | u64 length; /* Memory Range length */ | ||
34 | unsigned short caching; /* memory cache attribute */ | ||
35 | unsigned short write_protect; /* memory read/write attribute */ | ||
36 | /* copied from buffer getting from _CRS */ | ||
37 | unsigned int enabled:1; | ||
38 | }; | ||
39 | |||
40 | struct acpi_memory_device { | ||
41 | struct acpi_device *device; | ||
42 | struct list_head res_list; | ||
43 | }; | ||
44 | |||
45 | static bool acpi_hotmem_initialized __read_mostly; | ||
46 | |||
47 | static int xen_hotadd_memory(int pxm, struct acpi_memory_info *info) | ||
48 | { | ||
49 | int rc; | ||
50 | struct xen_platform_op op; | ||
51 | |||
52 | op.cmd = XENPF_mem_hotadd; | ||
53 | op.u.mem_add.spfn = info->start_addr >> PAGE_SHIFT; | ||
54 | op.u.mem_add.epfn = (info->start_addr + info->length) >> PAGE_SHIFT; | ||
55 | op.u.mem_add.pxm = pxm; | ||
56 | |||
57 | rc = HYPERVISOR_dom0_op(&op); | ||
58 | if (rc) | ||
59 | pr_err(PREFIX "Xen Hotplug Memory Add failed on " | ||
60 | "0x%lx -> 0x%lx, _PXM: %d, error: %d\n", | ||
61 | (unsigned long)info->start_addr, | ||
62 | (unsigned long)(info->start_addr + info->length), | ||
63 | pxm, rc); | ||
64 | |||
65 | return rc; | ||
66 | } | ||
67 | |||
68 | static int xen_acpi_memory_enable_device(struct acpi_memory_device *mem_device) | ||
69 | { | ||
70 | int pxm, result; | ||
71 | int num_enabled = 0; | ||
72 | struct acpi_memory_info *info; | ||
73 | |||
74 | if (!mem_device) | ||
75 | return -EINVAL; | ||
76 | |||
77 | pxm = xen_acpi_get_pxm(mem_device->device->handle); | ||
78 | if (pxm < 0) | ||
79 | return pxm; | ||
80 | |||
81 | list_for_each_entry(info, &mem_device->res_list, list) { | ||
82 | if (info->enabled) { /* just sanity check...*/ | ||
83 | num_enabled++; | ||
84 | continue; | ||
85 | } | ||
86 | |||
87 | if (!info->length) | ||
88 | continue; | ||
89 | |||
90 | result = xen_hotadd_memory(pxm, info); | ||
91 | if (result) | ||
92 | continue; | ||
93 | info->enabled = 1; | ||
94 | num_enabled++; | ||
95 | } | ||
96 | |||
97 | if (!num_enabled) | ||
98 | return -ENODEV; | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | static acpi_status | ||
104 | acpi_memory_get_resource(struct acpi_resource *resource, void *context) | ||
105 | { | ||
106 | struct acpi_memory_device *mem_device = context; | ||
107 | struct acpi_resource_address64 address64; | ||
108 | struct acpi_memory_info *info, *new; | ||
109 | acpi_status status; | ||
110 | |||
111 | status = acpi_resource_to_address64(resource, &address64); | ||
112 | if (ACPI_FAILURE(status) || | ||
113 | (address64.resource_type != ACPI_MEMORY_RANGE)) | ||
114 | return AE_OK; | ||
115 | |||
116 | list_for_each_entry(info, &mem_device->res_list, list) { | ||
117 | if ((info->caching == address64.info.mem.caching) && | ||
118 | (info->write_protect == address64.info.mem.write_protect) && | ||
119 | (info->start_addr + info->length == address64.minimum)) { | ||
120 | info->length += address64.address_length; | ||
121 | return AE_OK; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL); | ||
126 | if (!new) | ||
127 | return AE_ERROR; | ||
128 | |||
129 | INIT_LIST_HEAD(&new->list); | ||
130 | new->caching = address64.info.mem.caching; | ||
131 | new->write_protect = address64.info.mem.write_protect; | ||
132 | new->start_addr = address64.minimum; | ||
133 | new->length = address64.address_length; | ||
134 | list_add_tail(&new->list, &mem_device->res_list); | ||
135 | |||
136 | return AE_OK; | ||
137 | } | ||
138 | |||
139 | static int | ||
140 | acpi_memory_get_device_resources(struct acpi_memory_device *mem_device) | ||
141 | { | ||
142 | acpi_status status; | ||
143 | struct acpi_memory_info *info, *n; | ||
144 | |||
145 | if (!list_empty(&mem_device->res_list)) | ||
146 | return 0; | ||
147 | |||
148 | status = acpi_walk_resources(mem_device->device->handle, | ||
149 | METHOD_NAME__CRS, acpi_memory_get_resource, mem_device); | ||
150 | |||
151 | if (ACPI_FAILURE(status)) { | ||
152 | list_for_each_entry_safe(info, n, &mem_device->res_list, list) | ||
153 | kfree(info); | ||
154 | INIT_LIST_HEAD(&mem_device->res_list); | ||
155 | return -EINVAL; | ||
156 | } | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static int | ||
162 | acpi_memory_get_device(acpi_handle handle, | ||
163 | struct acpi_memory_device **mem_device) | ||
164 | { | ||
165 | acpi_status status; | ||
166 | acpi_handle phandle; | ||
167 | struct acpi_device *device = NULL; | ||
168 | struct acpi_device *pdevice = NULL; | ||
169 | int result; | ||
170 | |||
171 | if (!acpi_bus_get_device(handle, &device) && device) | ||
172 | goto end; | ||
173 | |||
174 | status = acpi_get_parent(handle, &phandle); | ||
175 | if (ACPI_FAILURE(status)) { | ||
176 | pr_warn(PREFIX "Cannot find acpi parent\n"); | ||
177 | return -EINVAL; | ||
178 | } | ||
179 | |||
180 | /* Get the parent device */ | ||
181 | result = acpi_bus_get_device(phandle, &pdevice); | ||
182 | if (result) { | ||
183 | pr_warn(PREFIX "Cannot get acpi bus device\n"); | ||
184 | return -EINVAL; | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * Now add the notified device. This creates the acpi_device | ||
189 | * and invokes .add function | ||
190 | */ | ||
191 | result = acpi_bus_scan(handle); | ||
192 | if (result) { | ||
193 | pr_warn(PREFIX "Cannot add acpi bus\n"); | ||
194 | return -EINVAL; | ||
195 | } | ||
196 | |||
197 | end: | ||
198 | *mem_device = acpi_driver_data(device); | ||
199 | if (!(*mem_device)) { | ||
200 | pr_err(PREFIX "Driver data not found\n"); | ||
201 | return -ENODEV; | ||
202 | } | ||
203 | |||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | static int acpi_memory_check_device(struct acpi_memory_device *mem_device) | ||
208 | { | ||
209 | unsigned long long current_status; | ||
210 | |||
211 | /* Get device present/absent information from the _STA */ | ||
212 | if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, | ||
213 | "_STA", NULL, ¤t_status))) | ||
214 | return -ENODEV; | ||
215 | /* | ||
216 | * Check for device status. Device should be | ||
217 | * present/enabled/functioning. | ||
218 | */ | ||
219 | if (!((current_status & ACPI_STA_DEVICE_PRESENT) | ||
220 | && (current_status & ACPI_STA_DEVICE_ENABLED) | ||
221 | && (current_status & ACPI_STA_DEVICE_FUNCTIONING))) | ||
222 | return -ENODEV; | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int acpi_memory_disable_device(struct acpi_memory_device *mem_device) | ||
228 | { | ||
229 | pr_debug(PREFIX "Xen does not support memory hotremove\n"); | ||
230 | |||
231 | return -ENOSYS; | ||
232 | } | ||
233 | |||
234 | static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data) | ||
235 | { | ||
236 | struct acpi_memory_device *mem_device; | ||
237 | struct acpi_device *device; | ||
238 | u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */ | ||
239 | |||
240 | switch (event) { | ||
241 | case ACPI_NOTIFY_BUS_CHECK: | ||
242 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
243 | "\nReceived BUS CHECK notification for device\n")); | ||
244 | /* Fall Through */ | ||
245 | case ACPI_NOTIFY_DEVICE_CHECK: | ||
246 | if (event == ACPI_NOTIFY_DEVICE_CHECK) | ||
247 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
248 | "\nReceived DEVICE CHECK notification for device\n")); | ||
249 | |||
250 | if (acpi_memory_get_device(handle, &mem_device)) { | ||
251 | pr_err(PREFIX "Cannot find driver data\n"); | ||
252 | break; | ||
253 | } | ||
254 | |||
255 | ost_code = ACPI_OST_SC_SUCCESS; | ||
256 | break; | ||
257 | |||
258 | case ACPI_NOTIFY_EJECT_REQUEST: | ||
259 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
260 | "\nReceived EJECT REQUEST notification for device\n")); | ||
261 | |||
262 | if (acpi_bus_get_device(handle, &device)) { | ||
263 | pr_err(PREFIX "Device doesn't exist\n"); | ||
264 | break; | ||
265 | } | ||
266 | mem_device = acpi_driver_data(device); | ||
267 | if (!mem_device) { | ||
268 | pr_err(PREFIX "Driver Data is NULL\n"); | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * TBD: implement acpi_memory_disable_device and invoke | ||
274 | * acpi_bus_remove if Xen support hotremove in the future | ||
275 | */ | ||
276 | acpi_memory_disable_device(mem_device); | ||
277 | break; | ||
278 | |||
279 | default: | ||
280 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
281 | "Unsupported event [0x%x]\n", event)); | ||
282 | /* non-hotplug event; possibly handled by other handler */ | ||
283 | return; | ||
284 | } | ||
285 | |||
286 | (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL); | ||
287 | return; | ||
288 | } | ||
289 | |||
290 | static int xen_acpi_memory_device_add(struct acpi_device *device) | ||
291 | { | ||
292 | int result; | ||
293 | struct acpi_memory_device *mem_device = NULL; | ||
294 | |||
295 | |||
296 | if (!device) | ||
297 | return -EINVAL; | ||
298 | |||
299 | mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL); | ||
300 | if (!mem_device) | ||
301 | return -ENOMEM; | ||
302 | |||
303 | INIT_LIST_HEAD(&mem_device->res_list); | ||
304 | mem_device->device = device; | ||
305 | sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME); | ||
306 | sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS); | ||
307 | device->driver_data = mem_device; | ||
308 | |||
309 | /* Get the range from the _CRS */ | ||
310 | result = acpi_memory_get_device_resources(mem_device); | ||
311 | if (result) { | ||
312 | kfree(mem_device); | ||
313 | return result; | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | * For booting existed memory devices, early boot code has recognized | ||
318 | * memory area by EFI/E820. If DSDT shows these memory devices on boot, | ||
319 | * hotplug is not necessary for them. | ||
320 | * For hot-added memory devices during runtime, it need hypercall to | ||
321 | * Xen hypervisor to add memory. | ||
322 | */ | ||
323 | if (!acpi_hotmem_initialized) | ||
324 | return 0; | ||
325 | |||
326 | if (!acpi_memory_check_device(mem_device)) | ||
327 | result = xen_acpi_memory_enable_device(mem_device); | ||
328 | |||
329 | return result; | ||
330 | } | ||
331 | |||
332 | static int xen_acpi_memory_device_remove(struct acpi_device *device) | ||
333 | { | ||
334 | struct acpi_memory_device *mem_device = NULL; | ||
335 | |||
336 | if (!device || !acpi_driver_data(device)) | ||
337 | return -EINVAL; | ||
338 | |||
339 | mem_device = acpi_driver_data(device); | ||
340 | kfree(mem_device); | ||
341 | |||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * Helper function to check for memory device | ||
347 | */ | ||
348 | static acpi_status is_memory_device(acpi_handle handle) | ||
349 | { | ||
350 | char *hardware_id; | ||
351 | acpi_status status; | ||
352 | struct acpi_device_info *info; | ||
353 | |||
354 | status = acpi_get_object_info(handle, &info); | ||
355 | if (ACPI_FAILURE(status)) | ||
356 | return status; | ||
357 | |||
358 | if (!(info->valid & ACPI_VALID_HID)) { | ||
359 | kfree(info); | ||
360 | return AE_ERROR; | ||
361 | } | ||
362 | |||
363 | hardware_id = info->hardware_id.string; | ||
364 | if ((hardware_id == NULL) || | ||
365 | (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID))) | ||
366 | status = AE_ERROR; | ||
367 | |||
368 | kfree(info); | ||
369 | return status; | ||
370 | } | ||
371 | |||
372 | static acpi_status | ||
373 | acpi_memory_register_notify_handler(acpi_handle handle, | ||
374 | u32 level, void *ctxt, void **retv) | ||
375 | { | ||
376 | acpi_status status; | ||
377 | |||
378 | status = is_memory_device(handle); | ||
379 | if (ACPI_FAILURE(status)) | ||
380 | return AE_OK; /* continue */ | ||
381 | |||
382 | status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY, | ||
383 | acpi_memory_device_notify, NULL); | ||
384 | /* continue */ | ||
385 | return AE_OK; | ||
386 | } | ||
387 | |||
388 | static acpi_status | ||
389 | acpi_memory_deregister_notify_handler(acpi_handle handle, | ||
390 | u32 level, void *ctxt, void **retv) | ||
391 | { | ||
392 | acpi_status status; | ||
393 | |||
394 | status = is_memory_device(handle); | ||
395 | if (ACPI_FAILURE(status)) | ||
396 | return AE_OK; /* continue */ | ||
397 | |||
398 | status = acpi_remove_notify_handler(handle, | ||
399 | ACPI_SYSTEM_NOTIFY, | ||
400 | acpi_memory_device_notify); | ||
401 | |||
402 | return AE_OK; /* continue */ | ||
403 | } | ||
404 | |||
405 | static const struct acpi_device_id memory_device_ids[] = { | ||
406 | {ACPI_MEMORY_DEVICE_HID, 0}, | ||
407 | {"", 0}, | ||
408 | }; | ||
409 | MODULE_DEVICE_TABLE(acpi, memory_device_ids); | ||
410 | |||
411 | static struct acpi_driver xen_acpi_memory_device_driver = { | ||
412 | .name = "acpi_memhotplug", | ||
413 | .class = ACPI_MEMORY_DEVICE_CLASS, | ||
414 | .ids = memory_device_ids, | ||
415 | .ops = { | ||
416 | .add = xen_acpi_memory_device_add, | ||
417 | .remove = xen_acpi_memory_device_remove, | ||
418 | }, | ||
419 | }; | ||
420 | |||
421 | static int __init xen_acpi_memory_device_init(void) | ||
422 | { | ||
423 | int result; | ||
424 | acpi_status status; | ||
425 | |||
426 | if (!xen_initial_domain()) | ||
427 | return -ENODEV; | ||
428 | |||
429 | /* unregister the stub which only used to reserve driver space */ | ||
430 | xen_stub_memory_device_exit(); | ||
431 | |||
432 | result = acpi_bus_register_driver(&xen_acpi_memory_device_driver); | ||
433 | if (result < 0) { | ||
434 | xen_stub_memory_device_init(); | ||
435 | return -ENODEV; | ||
436 | } | ||
437 | |||
438 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
439 | ACPI_UINT32_MAX, | ||
440 | acpi_memory_register_notify_handler, | ||
441 | NULL, NULL, NULL); | ||
442 | |||
443 | if (ACPI_FAILURE(status)) { | ||
444 | pr_warn(PREFIX "walk_namespace failed\n"); | ||
445 | acpi_bus_unregister_driver(&xen_acpi_memory_device_driver); | ||
446 | xen_stub_memory_device_init(); | ||
447 | return -ENODEV; | ||
448 | } | ||
449 | |||
450 | acpi_hotmem_initialized = true; | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | static void __exit xen_acpi_memory_device_exit(void) | ||
455 | { | ||
456 | acpi_status status; | ||
457 | |||
458 | if (!xen_initial_domain()) | ||
459 | return; | ||
460 | |||
461 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
462 | ACPI_UINT32_MAX, | ||
463 | acpi_memory_deregister_notify_handler, | ||
464 | NULL, NULL, NULL); | ||
465 | if (ACPI_FAILURE(status)) | ||
466 | pr_warn(PREFIX "walk_namespace failed\n"); | ||
467 | |||
468 | acpi_bus_unregister_driver(&xen_acpi_memory_device_driver); | ||
469 | |||
470 | /* | ||
471 | * stub reserve space again to prevent any chance of native | ||
472 | * driver loading. | ||
473 | */ | ||
474 | xen_stub_memory_device_init(); | ||
475 | return; | ||
476 | } | ||
477 | |||
478 | module_init(xen_acpi_memory_device_init); | ||
479 | module_exit(xen_acpi_memory_device_exit); | ||
480 | ACPI_MODULE_NAME("xen-acpi-memhotplug"); | ||
481 | MODULE_AUTHOR("Liu Jinsong <jinsong.liu@intel.com>"); | ||
482 | MODULE_DESCRIPTION("Xen Hotplug Mem Driver"); | ||
483 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/xen/xen-stub.c b/drivers/xen/xen-stub.c new file mode 100644 index 000000000000..d85e411cbf89 --- /dev/null +++ b/drivers/xen/xen-stub.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * xen-stub.c - stub drivers to reserve space for Xen | ||
3 | * | ||
4 | * Copyright (C) 2012 Intel Corporation | ||
5 | * Author: Liu Jinsong <jinsong.liu@intel.com> | ||
6 | * Author: Jiang Yunhong <yunhong.jiang@intel.com> | ||
7 | * | ||
8 | * Copyright (C) 2012 Oracle Inc | ||
9 | * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or (at | ||
14 | * your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, but | ||
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
19 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
20 | * details. | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/export.h> | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/acpi.h> | ||
28 | #include <acpi/acpi_drivers.h> | ||
29 | #include <xen/acpi.h> | ||
30 | |||
31 | #ifdef CONFIG_ACPI | ||
32 | |||
33 | /*-------------------------------------------- | ||
34 | stub driver for Xen memory hotplug | ||
35 | --------------------------------------------*/ | ||
36 | |||
37 | static const struct acpi_device_id memory_device_ids[] = { | ||
38 | {ACPI_MEMORY_DEVICE_HID, 0}, | ||
39 | {"", 0}, | ||
40 | }; | ||
41 | |||
42 | static struct acpi_driver xen_stub_memory_device_driver = { | ||
43 | /* same name as native memory driver to block native loaded */ | ||
44 | .name = "acpi_memhotplug", | ||
45 | .class = ACPI_MEMORY_DEVICE_CLASS, | ||
46 | .ids = memory_device_ids, | ||
47 | }; | ||
48 | |||
49 | int xen_stub_memory_device_init(void) | ||
50 | { | ||
51 | if (!xen_initial_domain()) | ||
52 | return -ENODEV; | ||
53 | |||
54 | /* just reserve space for Xen, block native driver loaded */ | ||
55 | return acpi_bus_register_driver(&xen_stub_memory_device_driver); | ||
56 | } | ||
57 | EXPORT_SYMBOL_GPL(xen_stub_memory_device_init); | ||
58 | subsys_initcall(xen_stub_memory_device_init); | ||
59 | |||
60 | void xen_stub_memory_device_exit(void) | ||
61 | { | ||
62 | acpi_bus_unregister_driver(&xen_stub_memory_device_driver); | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(xen_stub_memory_device_exit); | ||
65 | |||
66 | |||
67 | /*-------------------------------------------- | ||
68 | stub driver for Xen cpu hotplug | ||
69 | --------------------------------------------*/ | ||
70 | |||
71 | static const struct acpi_device_id processor_device_ids[] = { | ||
72 | {ACPI_PROCESSOR_OBJECT_HID, 0}, | ||
73 | {ACPI_PROCESSOR_DEVICE_HID, 0}, | ||
74 | {"", 0}, | ||
75 | }; | ||
76 | |||
77 | static struct acpi_driver xen_stub_processor_driver = { | ||
78 | /* same name as native processor driver to block native loaded */ | ||
79 | .name = "processor", | ||
80 | .class = ACPI_PROCESSOR_CLASS, | ||
81 | .ids = processor_device_ids, | ||
82 | }; | ||
83 | |||
84 | int xen_stub_processor_init(void) | ||
85 | { | ||
86 | if (!xen_initial_domain()) | ||
87 | return -ENODEV; | ||
88 | |||
89 | /* just reserve space for Xen, block native driver loaded */ | ||
90 | return acpi_bus_register_driver(&xen_stub_processor_driver); | ||
91 | } | ||
92 | EXPORT_SYMBOL_GPL(xen_stub_processor_init); | ||
93 | subsys_initcall(xen_stub_processor_init); | ||
94 | |||
95 | void xen_stub_processor_exit(void) | ||
96 | { | ||
97 | acpi_bus_unregister_driver(&xen_stub_processor_driver); | ||
98 | } | ||
99 | EXPORT_SYMBOL_GPL(xen_stub_processor_exit); | ||
100 | |||
101 | #endif | ||
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 038b71dbf03c..3325884c693f 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -769,7 +769,7 @@ static int __init xenbus_init(void) | |||
769 | goto out_error; | 769 | goto out_error; |
770 | xen_store_mfn = (unsigned long)v; | 770 | xen_store_mfn = (unsigned long)v; |
771 | xen_store_interface = | 771 | xen_store_interface = |
772 | ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); | 772 | xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); |
773 | break; | 773 | break; |
774 | default: | 774 | default: |
775 | pr_warn("Xenstore state unknown\n"); | 775 | pr_warn("Xenstore state unknown\n"); |