aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/machvec.h2
-rw-r--r--arch/alpha/include/asm/pci.h14
-rw-r--r--arch/alpha/include/asm/system.h547
-rw-r--r--arch/alpha/include/asm/types.h5
-rw-r--r--arch/alpha/include/asm/uaccess.h12
-rw-r--r--arch/alpha/include/asm/xchg.h258
-rw-r--r--arch/alpha/kernel/Makefile2
-rw-r--r--arch/alpha/kernel/err_ev6.c4
-rw-r--r--arch/alpha/kernel/err_ev7.c6
-rw-r--r--arch/alpha/kernel/err_marvel.c40
-rw-r--r--arch/alpha/kernel/err_titan.c28
-rw-r--r--arch/alpha/kernel/pci-sysfs.c366
-rw-r--r--arch/alpha/kernel/pci.c2
-rw-r--r--arch/alpha/kernel/pci_iommu.c34
-rw-r--r--arch/alpha/kernel/proto.h16
-rw-r--r--arch/alpha/kernel/setup.c2
-rw-r--r--arch/alpha/kernel/smc37c669.c4
-rw-r--r--arch/alpha/kernel/sys_jensen.c3
-rw-r--r--arch/alpha/kernel/sys_sable.c4
-rw-r--r--arch/alpha/kernel/traps.c2
-rw-r--r--arch/avr32/mm/fault.c18
-rw-r--r--arch/ia64/hp/sim/simserial.c49
-rw-r--r--arch/ia64/include/asm/intrinsics.h6
-rw-r--r--arch/ia64/include/asm/mmu_context.h6
-rw-r--r--arch/ia64/include/asm/module.h6
-rw-r--r--arch/ia64/include/asm/native/inst.h13
-rw-r--r--arch/ia64/include/asm/native/patchlist.h38
-rw-r--r--arch/ia64/include/asm/native/pvchk_inst.h8
-rw-r--r--arch/ia64/include/asm/paravirt.h65
-rw-r--r--arch/ia64/include/asm/paravirt_patch.h143
-rw-r--r--arch/ia64/include/asm/paravirt_privop.h365
-rw-r--r--arch/ia64/include/asm/smp.h3
-rw-r--r--arch/ia64/include/asm/timex.h1
-rw-r--r--arch/ia64/include/asm/topology.h5
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h39
-rw-r--r--arch/ia64/include/asm/xen/inst.h28
-rw-r--r--arch/ia64/include/asm/xen/interface.h9
-rw-r--r--arch/ia64/include/asm/xen/minstate.h11
-rw-r--r--arch/ia64/include/asm/xen/patchlist.h38
-rw-r--r--arch/ia64/include/asm/xen/privop.h8
-rw-r--r--arch/ia64/kernel/Makefile39
-rw-r--r--arch/ia64/kernel/Makefile.gate27
-rw-r--r--arch/ia64/kernel/acpi.c8
-rw-r--r--arch/ia64/kernel/asm-offsets.c2
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/entry.S4
-rw-r--r--arch/ia64/kernel/fsys.S35
-rw-r--r--arch/ia64/kernel/gate.S171
-rw-r--r--arch/ia64/kernel/gate.lds.S17
-rw-r--r--arch/ia64/kernel/head.S10
-rw-r--r--arch/ia64/kernel/ivt.S2
-rw-r--r--arch/ia64/kernel/mca.c6
-rw-r--r--arch/ia64/kernel/module.c35
-rw-r--r--arch/ia64/kernel/paravirt.c539
-rw-r--r--arch/ia64/kernel/paravirt_patch.c514
-rw-r--r--arch/ia64/kernel/paravirt_patchlist.c79
-rw-r--r--arch/ia64/kernel/paravirt_patchlist.h28
-rw-r--r--arch/ia64/kernel/paravirtentry.S99
-rw-r--r--arch/ia64/kernel/patch.c40
-rw-r--r--arch/ia64/kernel/perfmon.c4
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/setup.c9
-rw-r--r--arch/ia64/kernel/smp.c6
-rw-r--r--arch/ia64/kernel/smpboot.c17
-rw-r--r--arch/ia64/kernel/time.c25
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S30
-rw-r--r--arch/ia64/kvm/kvm-ia64.c2
-rw-r--r--arch/ia64/kvm/vcpu.c2
-rw-r--r--arch/ia64/kvm/vtlb.c2
-rw-r--r--arch/ia64/mm/init.c12
-rw-r--r--arch/ia64/mm/tlb.c2
-rw-r--r--arch/ia64/scripts/pvcheck.sed1
-rw-r--r--arch/ia64/sn/kernel/io_common.c15
-rw-r--r--arch/ia64/sn/kernel/io_init.c12
-rw-r--r--arch/ia64/sn/kernel/setup.c5
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c12
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c8
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c4
-rw-r--r--arch/ia64/xen/Makefile19
-rw-r--r--arch/ia64/xen/gate-data.S3
-rw-r--r--arch/ia64/xen/hypercall.S2
-rw-r--r--arch/ia64/xen/time.c48
-rw-r--r--arch/ia64/xen/xen_pv_ops.c800
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/gpio.h20
-rw-r--r--arch/mips/mm/highmem.c2
-rw-r--r--arch/parisc/kernel/time.c7
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/Kconfig.debug1
-rw-r--r--arch/powerpc/boot/dts/mpc832x_rdb.dts24
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/include/asm/pci.h4
-rw-r--r--arch/powerpc/include/asm/suspend.h3
-rw-r--r--arch/powerpc/kernel/msi.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c123
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c109
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h7
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/Kconfig.debug1
-rw-r--r--arch/s390/hypfs/hypfs_diag.c2
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/Kconfig.debug3
-rw-r--r--arch/sparc/mm/highmem.c1
-rw-r--r--arch/um/drivers/pcap_user.h10
-rw-r--r--arch/um/drivers/port.h10
-rw-r--r--arch/um/drivers/ssl.h10
-rw-r--r--arch/um/drivers/stdio_console.h10
-rw-r--r--arch/um/drivers/ubd_kern.c17
-rw-r--r--arch/um/drivers/xterm.h10
-rw-r--r--arch/um/include/asm/irq_vectors.h10
-rw-r--r--arch/um/include/asm/mmu.h10
-rw-r--r--arch/um/include/asm/pda.h10
-rw-r--r--arch/um/include/asm/pgalloc.h10
-rw-r--r--arch/um/include/asm/pgtable-3level.h10
-rw-r--r--arch/um/include/shared/frame_kern.h10
-rw-r--r--arch/um/include/shared/initrd.h10
-rw-r--r--arch/um/include/shared/irq_kern.h10
-rw-r--r--arch/um/include/shared/mem_kern.h10
-rw-r--r--arch/um/include/shared/ubd_user.h10
-rw-r--r--arch/um/kernel/Makefile6
-rw-r--r--arch/um/kernel/config.c.in18
-rw-r--r--arch/um/os-Linux/start_up.c8
-rw-r--r--arch/um/sys-i386/asm/archparam.h10
-rw-r--r--arch/um/sys-i386/shared/sysdep/checksum.h10
-rw-r--r--arch/um/sys-ia64/sysdep/ptrace.h10
-rw-r--r--arch/um/sys-ia64/sysdep/sigcontext.h10
-rw-r--r--arch/um/sys-ia64/sysdep/syscalls.h10
-rw-r--r--arch/um/sys-ppc/miscthings.c11
-rw-r--r--arch/um/sys-ppc/ptrace.c10
-rw-r--r--arch/um/sys-ppc/ptrace_user.c10
-rw-r--r--arch/um/sys-ppc/shared/sysdep/ptrace.h10
-rw-r--r--arch/um/sys-ppc/shared/sysdep/sigcontext.h10
-rw-r--r--arch/um/sys-ppc/shared/sysdep/syscalls.h10
-rw-r--r--arch/um/sys-ppc/sigcontext.c10
-rw-r--r--arch/um/sys-x86_64/asm/archparam.h10
-rw-r--r--arch/um/sys-x86_64/asm/module.h10
-rw-r--r--arch/um/sys-x86_64/mem.c9
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/Kconfig.debug1
-rw-r--r--arch/x86/boot/memory.c39
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/suspend_32.h24
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/asm-offsets_32.c1
-rw-r--r--arch/x86/kernel/asm-offsets_64.c1
-rw-r--r--arch/x86/kernel/pci-dma.c3
-rw-r--r--arch/x86/mm/highmem_32.c46
-rw-r--r--arch/x86/mm/iomap_32.c2
-rw-r--r--arch/x86/pci/early.c19
-rw-r--r--arch/x86/pci/fixup.c20
-rw-r--r--arch/x86/pci/legacy.c3
-rw-r--r--arch/x86/pci/mmconfig-shared.c227
-rw-r--r--arch/x86/pci/mmconfig_64.c17
-rw-r--r--arch/x86/power/cpu_32.c1
-rw-r--r--arch/x86/power/cpu_64.c1
-rw-r--r--arch/x86/power/hibernate_64.c1
-rw-r--r--arch/xtensa/platforms/iss/console.c29
157 files changed, 4470 insertions, 1561 deletions
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index fea4ea75b79..13cd4274381 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -80,7 +80,7 @@ struct alpha_machine_vector
80 void (*update_irq_hw)(unsigned long, unsigned long, int); 80 void (*update_irq_hw)(unsigned long, unsigned long, int);
81 void (*ack_irq)(unsigned long); 81 void (*ack_irq)(unsigned long);
82 void (*device_interrupt)(unsigned long vector); 82 void (*device_interrupt)(unsigned long vector);
83 void (*machine_check)(u64 vector, u64 la); 83 void (*machine_check)(unsigned long vector, unsigned long la);
84 84
85 void (*smp_callin)(void); 85 void (*smp_callin)(void);
86 void (*init_arch)(void); 86 void (*init_arch)(void);
diff --git a/arch/alpha/include/asm/pci.h b/arch/alpha/include/asm/pci.h
index 2a14302c17a..cb04eaa6ba3 100644
--- a/arch/alpha/include/asm/pci.h
+++ b/arch/alpha/include/asm/pci.h
@@ -273,4 +273,18 @@ struct pci_dev *alpha_gendev_to_pci(struct device *dev);
273 273
274extern struct pci_dev *isa_bridge; 274extern struct pci_dev *isa_bridge;
275 275
276extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val,
277 size_t count);
278extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
279 size_t count);
280extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
281 struct vm_area_struct *vma,
282 enum pci_mmap_state mmap_state);
283extern void pci_adjust_legacy_attr(struct pci_bus *bus,
284 enum pci_mmap_state mmap_type);
285#define HAVE_PCI_LEGACY 1
286
287extern int pci_create_resource_files(struct pci_dev *dev);
288extern void pci_remove_resource_files(struct pci_dev *dev);
289
276#endif /* __ALPHA_PCI_H */ 290#endif /* __ALPHA_PCI_H */
diff --git a/arch/alpha/include/asm/system.h b/arch/alpha/include/asm/system.h
index afe20fa58c9..5aa40cca4f2 100644
--- a/arch/alpha/include/asm/system.h
+++ b/arch/alpha/include/asm/system.h
@@ -309,518 +309,71 @@ extern int __min_ipl;
309#define tbia() __tbi(-2, /* no second argument */) 309#define tbia() __tbi(-2, /* no second argument */)
310 310
311/* 311/*
312 * Atomic exchange. 312 * Atomic exchange routines.
313 * Since it can be used to implement critical sections
314 * it must clobber "memory" (also for interrupts in UP).
315 */ 313 */
316 314
317static inline unsigned long 315#define __ASM__MB
318__xchg_u8(volatile char *m, unsigned long val) 316#define ____xchg(type, args...) __xchg ## type ## _local(args)
319{ 317#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
320 unsigned long ret, tmp, addr64; 318#include <asm/xchg.h>
321
322 __asm__ __volatile__(
323 " andnot %4,7,%3\n"
324 " insbl %1,%4,%1\n"
325 "1: ldq_l %2,0(%3)\n"
326 " extbl %2,%4,%0\n"
327 " mskbl %2,%4,%2\n"
328 " or %1,%2,%2\n"
329 " stq_c %2,0(%3)\n"
330 " beq %2,2f\n"
331#ifdef CONFIG_SMP
332 " mb\n"
333#endif
334 ".subsection 2\n"
335 "2: br 1b\n"
336 ".previous"
337 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
338 : "r" ((long)m), "1" (val) : "memory");
339
340 return ret;
341}
342
343static inline unsigned long
344__xchg_u16(volatile short *m, unsigned long val)
345{
346 unsigned long ret, tmp, addr64;
347
348 __asm__ __volatile__(
349 " andnot %4,7,%3\n"
350 " inswl %1,%4,%1\n"
351 "1: ldq_l %2,0(%3)\n"
352 " extwl %2,%4,%0\n"
353 " mskwl %2,%4,%2\n"
354 " or %1,%2,%2\n"
355 " stq_c %2,0(%3)\n"
356 " beq %2,2f\n"
357#ifdef CONFIG_SMP
358 " mb\n"
359#endif
360 ".subsection 2\n"
361 "2: br 1b\n"
362 ".previous"
363 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
364 : "r" ((long)m), "1" (val) : "memory");
365
366 return ret;
367}
368
369static inline unsigned long
370__xchg_u32(volatile int *m, unsigned long val)
371{
372 unsigned long dummy;
373
374 __asm__ __volatile__(
375 "1: ldl_l %0,%4\n"
376 " bis $31,%3,%1\n"
377 " stl_c %1,%2\n"
378 " beq %1,2f\n"
379#ifdef CONFIG_SMP
380 " mb\n"
381#endif
382 ".subsection 2\n"
383 "2: br 1b\n"
384 ".previous"
385 : "=&r" (val), "=&r" (dummy), "=m" (*m)
386 : "rI" (val), "m" (*m) : "memory");
387
388 return val;
389}
390
391static inline unsigned long
392__xchg_u64(volatile long *m, unsigned long val)
393{
394 unsigned long dummy;
395
396 __asm__ __volatile__(
397 "1: ldq_l %0,%4\n"
398 " bis $31,%3,%1\n"
399 " stq_c %1,%2\n"
400 " beq %1,2f\n"
401#ifdef CONFIG_SMP
402 " mb\n"
403#endif
404 ".subsection 2\n"
405 "2: br 1b\n"
406 ".previous"
407 : "=&r" (val), "=&r" (dummy), "=m" (*m)
408 : "rI" (val), "m" (*m) : "memory");
409 319
410 return val; 320#define xchg_local(ptr,x) \
411} 321 ({ \
412 322 __typeof__(*(ptr)) _x_ = (x); \
413/* This function doesn't exist, so you'll get a linker error 323 (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
414 if something tries to do an invalid xchg(). */ 324 sizeof(*(ptr))); \
415extern void __xchg_called_with_bad_pointer(void);
416
417#define __xchg(ptr, x, size) \
418({ \
419 unsigned long __xchg__res; \
420 volatile void *__xchg__ptr = (ptr); \
421 switch (size) { \
422 case 1: __xchg__res = __xchg_u8(__xchg__ptr, x); break; \
423 case 2: __xchg__res = __xchg_u16(__xchg__ptr, x); break; \
424 case 4: __xchg__res = __xchg_u32(__xchg__ptr, x); break; \
425 case 8: __xchg__res = __xchg_u64(__xchg__ptr, x); break; \
426 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
427 } \
428 __xchg__res; \
429})
430
431#define xchg(ptr,x) \
432 ({ \
433 __typeof__(*(ptr)) _x_ = (x); \
434 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
435 }) 325 })
436 326
437static inline unsigned long 327#define cmpxchg_local(ptr, o, n) \
438__xchg_u8_local(volatile char *m, unsigned long val) 328 ({ \
439{ 329 __typeof__(*(ptr)) _o_ = (o); \
440 unsigned long ret, tmp, addr64; 330 __typeof__(*(ptr)) _n_ = (n); \
441 331 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
442 __asm__ __volatile__( 332 (unsigned long)_n_, \
443 " andnot %4,7,%3\n" 333 sizeof(*(ptr))); \
444 " insbl %1,%4,%1\n"
445 "1: ldq_l %2,0(%3)\n"
446 " extbl %2,%4,%0\n"
447 " mskbl %2,%4,%2\n"
448 " or %1,%2,%2\n"
449 " stq_c %2,0(%3)\n"
450 " beq %2,2f\n"
451 ".subsection 2\n"
452 "2: br 1b\n"
453 ".previous"
454 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
455 : "r" ((long)m), "1" (val) : "memory");
456
457 return ret;
458}
459
460static inline unsigned long
461__xchg_u16_local(volatile short *m, unsigned long val)
462{
463 unsigned long ret, tmp, addr64;
464
465 __asm__ __volatile__(
466 " andnot %4,7,%3\n"
467 " inswl %1,%4,%1\n"
468 "1: ldq_l %2,0(%3)\n"
469 " extwl %2,%4,%0\n"
470 " mskwl %2,%4,%2\n"
471 " or %1,%2,%2\n"
472 " stq_c %2,0(%3)\n"
473 " beq %2,2f\n"
474 ".subsection 2\n"
475 "2: br 1b\n"
476 ".previous"
477 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
478 : "r" ((long)m), "1" (val) : "memory");
479
480 return ret;
481}
482
483static inline unsigned long
484__xchg_u32_local(volatile int *m, unsigned long val)
485{
486 unsigned long dummy;
487
488 __asm__ __volatile__(
489 "1: ldl_l %0,%4\n"
490 " bis $31,%3,%1\n"
491 " stl_c %1,%2\n"
492 " beq %1,2f\n"
493 ".subsection 2\n"
494 "2: br 1b\n"
495 ".previous"
496 : "=&r" (val), "=&r" (dummy), "=m" (*m)
497 : "rI" (val), "m" (*m) : "memory");
498
499 return val;
500}
501
502static inline unsigned long
503__xchg_u64_local(volatile long *m, unsigned long val)
504{
505 unsigned long dummy;
506
507 __asm__ __volatile__(
508 "1: ldq_l %0,%4\n"
509 " bis $31,%3,%1\n"
510 " stq_c %1,%2\n"
511 " beq %1,2f\n"
512 ".subsection 2\n"
513 "2: br 1b\n"
514 ".previous"
515 : "=&r" (val), "=&r" (dummy), "=m" (*m)
516 : "rI" (val), "m" (*m) : "memory");
517
518 return val;
519}
520
521#define __xchg_local(ptr, x, size) \
522({ \
523 unsigned long __xchg__res; \
524 volatile void *__xchg__ptr = (ptr); \
525 switch (size) { \
526 case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \
527 case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \
528 case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \
529 case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \
530 default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
531 } \
532 __xchg__res; \
533})
534
535#define xchg_local(ptr,x) \
536 ({ \
537 __typeof__(*(ptr)) _x_ = (x); \
538 (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
539 sizeof(*(ptr))); \
540 }) 334 })
541 335
542/* 336#define cmpxchg64_local(ptr, o, n) \
543 * Atomic compare and exchange. Compare OLD with MEM, if identical, 337 ({ \
544 * store NEW in MEM. Return the initial value in MEM. Success is 338 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
545 * indicated by comparing RETURN with OLD. 339 cmpxchg_local((ptr), (o), (n)); \
546 * 340 })
547 * The memory barrier should be placed in SMP only when we actually
548 * make the change. If we don't change anything (so if the returned
549 * prev is equal to old) then we aren't acquiring anything new and
550 * we don't need any memory barrier as far I can tell.
551 */
552
553#define __HAVE_ARCH_CMPXCHG 1
554
555static inline unsigned long
556__cmpxchg_u8(volatile char *m, long old, long new)
557{
558 unsigned long prev, tmp, cmp, addr64;
559
560 __asm__ __volatile__(
561 " andnot %5,7,%4\n"
562 " insbl %1,%5,%1\n"
563 "1: ldq_l %2,0(%4)\n"
564 " extbl %2,%5,%0\n"
565 " cmpeq %0,%6,%3\n"
566 " beq %3,2f\n"
567 " mskbl %2,%5,%2\n"
568 " or %1,%2,%2\n"
569 " stq_c %2,0(%4)\n"
570 " beq %2,3f\n"
571#ifdef CONFIG_SMP
572 " mb\n"
573#endif
574 "2:\n"
575 ".subsection 2\n"
576 "3: br 1b\n"
577 ".previous"
578 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
579 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
580
581 return prev;
582}
583
584static inline unsigned long
585__cmpxchg_u16(volatile short *m, long old, long new)
586{
587 unsigned long prev, tmp, cmp, addr64;
588
589 __asm__ __volatile__(
590 " andnot %5,7,%4\n"
591 " inswl %1,%5,%1\n"
592 "1: ldq_l %2,0(%4)\n"
593 " extwl %2,%5,%0\n"
594 " cmpeq %0,%6,%3\n"
595 " beq %3,2f\n"
596 " mskwl %2,%5,%2\n"
597 " or %1,%2,%2\n"
598 " stq_c %2,0(%4)\n"
599 " beq %2,3f\n"
600#ifdef CONFIG_SMP
601 " mb\n"
602#endif
603 "2:\n"
604 ".subsection 2\n"
605 "3: br 1b\n"
606 ".previous"
607 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
608 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
609
610 return prev;
611}
612
613static inline unsigned long
614__cmpxchg_u32(volatile int *m, int old, int new)
615{
616 unsigned long prev, cmp;
617
618 __asm__ __volatile__(
619 "1: ldl_l %0,%5\n"
620 " cmpeq %0,%3,%1\n"
621 " beq %1,2f\n"
622 " mov %4,%1\n"
623 " stl_c %1,%2\n"
624 " beq %1,3f\n"
625#ifdef CONFIG_SMP
626 " mb\n"
627#endif
628 "2:\n"
629 ".subsection 2\n"
630 "3: br 1b\n"
631 ".previous"
632 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
633 : "r"((long) old), "r"(new), "m"(*m) : "memory");
634
635 return prev;
636}
637 341
638static inline unsigned long
639__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
640{
641 unsigned long prev, cmp;
642
643 __asm__ __volatile__(
644 "1: ldq_l %0,%5\n"
645 " cmpeq %0,%3,%1\n"
646 " beq %1,2f\n"
647 " mov %4,%1\n"
648 " stq_c %1,%2\n"
649 " beq %1,3f\n"
650#ifdef CONFIG_SMP 342#ifdef CONFIG_SMP
651 " mb\n" 343#undef __ASM__MB
344#define __ASM__MB "\tmb\n"
652#endif 345#endif
653 "2:\n" 346#undef ____xchg
654 ".subsection 2\n" 347#undef ____cmpxchg
655 "3: br 1b\n" 348#define ____xchg(type, args...) __xchg ##type(args)
656 ".previous" 349#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
657 : "=&r"(prev), "=&r"(cmp), "=m"(*m) 350#include <asm/xchg.h>
658 : "r"((long) old), "r"(new), "m"(*m) : "memory"); 351
659 352#define xchg(ptr,x) \
660 return prev; 353 ({ \
661} 354 __typeof__(*(ptr)) _x_ = (x); \
662 355 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
663/* This function doesn't exist, so you'll get a linker error 356 sizeof(*(ptr))); \
664 if something tries to do an invalid cmpxchg(). */
665extern void __cmpxchg_called_with_bad_pointer(void);
666
667static __always_inline unsigned long
668__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
669{
670 switch (size) {
671 case 1:
672 return __cmpxchg_u8(ptr, old, new);
673 case 2:
674 return __cmpxchg_u16(ptr, old, new);
675 case 4:
676 return __cmpxchg_u32(ptr, old, new);
677 case 8:
678 return __cmpxchg_u64(ptr, old, new);
679 }
680 __cmpxchg_called_with_bad_pointer();
681 return old;
682}
683
684#define cmpxchg(ptr, o, n) \
685 ({ \
686 __typeof__(*(ptr)) _o_ = (o); \
687 __typeof__(*(ptr)) _n_ = (n); \
688 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
689 (unsigned long)_n_, sizeof(*(ptr))); \
690 }) 357 })
691#define cmpxchg64(ptr, o, n) \
692 ({ \
693 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
694 cmpxchg((ptr), (o), (n)); \
695 })
696
697static inline unsigned long
698__cmpxchg_u8_local(volatile char *m, long old, long new)
699{
700 unsigned long prev, tmp, cmp, addr64;
701
702 __asm__ __volatile__(
703 " andnot %5,7,%4\n"
704 " insbl %1,%5,%1\n"
705 "1: ldq_l %2,0(%4)\n"
706 " extbl %2,%5,%0\n"
707 " cmpeq %0,%6,%3\n"
708 " beq %3,2f\n"
709 " mskbl %2,%5,%2\n"
710 " or %1,%2,%2\n"
711 " stq_c %2,0(%4)\n"
712 " beq %2,3f\n"
713 "2:\n"
714 ".subsection 2\n"
715 "3: br 1b\n"
716 ".previous"
717 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
718 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
719
720 return prev;
721}
722
723static inline unsigned long
724__cmpxchg_u16_local(volatile short *m, long old, long new)
725{
726 unsigned long prev, tmp, cmp, addr64;
727
728 __asm__ __volatile__(
729 " andnot %5,7,%4\n"
730 " inswl %1,%5,%1\n"
731 "1: ldq_l %2,0(%4)\n"
732 " extwl %2,%5,%0\n"
733 " cmpeq %0,%6,%3\n"
734 " beq %3,2f\n"
735 " mskwl %2,%5,%2\n"
736 " or %1,%2,%2\n"
737 " stq_c %2,0(%4)\n"
738 " beq %2,3f\n"
739 "2:\n"
740 ".subsection 2\n"
741 "3: br 1b\n"
742 ".previous"
743 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
744 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
745
746 return prev;
747}
748
749static inline unsigned long
750__cmpxchg_u32_local(volatile int *m, int old, int new)
751{
752 unsigned long prev, cmp;
753
754 __asm__ __volatile__(
755 "1: ldl_l %0,%5\n"
756 " cmpeq %0,%3,%1\n"
757 " beq %1,2f\n"
758 " mov %4,%1\n"
759 " stl_c %1,%2\n"
760 " beq %1,3f\n"
761 "2:\n"
762 ".subsection 2\n"
763 "3: br 1b\n"
764 ".previous"
765 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
766 : "r"((long) old), "r"(new), "m"(*m) : "memory");
767
768 return prev;
769}
770
771static inline unsigned long
772__cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new)
773{
774 unsigned long prev, cmp;
775
776 __asm__ __volatile__(
777 "1: ldq_l %0,%5\n"
778 " cmpeq %0,%3,%1\n"
779 " beq %1,2f\n"
780 " mov %4,%1\n"
781 " stq_c %1,%2\n"
782 " beq %1,3f\n"
783 "2:\n"
784 ".subsection 2\n"
785 "3: br 1b\n"
786 ".previous"
787 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
788 : "r"((long) old), "r"(new), "m"(*m) : "memory");
789
790 return prev;
791}
792
793static __always_inline unsigned long
794__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
795 int size)
796{
797 switch (size) {
798 case 1:
799 return __cmpxchg_u8_local(ptr, old, new);
800 case 2:
801 return __cmpxchg_u16_local(ptr, old, new);
802 case 4:
803 return __cmpxchg_u32_local(ptr, old, new);
804 case 8:
805 return __cmpxchg_u64_local(ptr, old, new);
806 }
807 __cmpxchg_called_with_bad_pointer();
808 return old;
809}
810 358
811#define cmpxchg_local(ptr, o, n) \ 359#define cmpxchg(ptr, o, n) \
812 ({ \ 360 ({ \
813 __typeof__(*(ptr)) _o_ = (o); \ 361 __typeof__(*(ptr)) _o_ = (o); \
814 __typeof__(*(ptr)) _n_ = (n); \ 362 __typeof__(*(ptr)) _n_ = (n); \
815 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ 363 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
816 (unsigned long)_n_, sizeof(*(ptr))); \ 364 (unsigned long)_n_, sizeof(*(ptr)));\
817 }) 365 })
818#define cmpxchg64_local(ptr, o, n) \ 366
819 ({ \ 367#define cmpxchg64(ptr, o, n) \
820 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 368 ({ \
821 cmpxchg_local((ptr), (o), (n)); \ 369 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
370 cmpxchg((ptr), (o), (n)); \
822 }) 371 })
823 372
373#undef __ASM__MB
374#undef ____cmpxchg
375
376#define __HAVE_ARCH_CMPXCHG 1
824 377
825#endif /* __ASSEMBLY__ */ 378#endif /* __ASSEMBLY__ */
826 379
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index c1541353cce..f072f344497 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -8,7 +8,12 @@
8 * not a major issue. However, for interoperability, libraries still 8 * not a major issue. However, for interoperability, libraries still
9 * need to be careful to avoid a name clashes. 9 * need to be careful to avoid a name clashes.
10 */ 10 */
11
12#ifdef __KERNEL__
13#include <asm-generic/int-ll64.h>
14#else
11#include <asm-generic/int-l64.h> 15#include <asm-generic/int-l64.h>
16#endif
12 17
13#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
14 19
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index 22de3b434a2..163f3053001 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -498,13 +498,13 @@ struct exception_table_entry
498}; 498};
499 499
500/* Returns the new pc */ 500/* Returns the new pc */
501#define fixup_exception(map_reg, fixup, pc) \ 501#define fixup_exception(map_reg, _fixup, pc) \
502({ \ 502({ \
503 if ((fixup)->fixup.bits.valreg != 31) \ 503 if ((_fixup)->fixup.bits.valreg != 31) \
504 map_reg((fixup)->fixup.bits.valreg) = 0; \ 504 map_reg((_fixup)->fixup.bits.valreg) = 0; \
505 if ((fixup)->fixup.bits.errreg != 31) \ 505 if ((_fixup)->fixup.bits.errreg != 31) \
506 map_reg((fixup)->fixup.bits.errreg) = -EFAULT; \ 506 map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \
507 (pc) + (fixup)->fixup.bits.nextinsn; \ 507 (pc) + (_fixup)->fixup.bits.nextinsn; \
508}) 508})
509 509
510 510
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
new file mode 100644
index 00000000000..beba1b803e0
--- /dev/null
+++ b/arch/alpha/include/asm/xchg.h
@@ -0,0 +1,258 @@
1#ifndef __ALPHA_SYSTEM_H
2#error Do not include xchg.h directly!
3#else
4/*
5 * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code
6 * except that local version do not have the expensive memory barrier.
7 * So this file is included twice from asm/system.h.
8 */
9
10/*
11 * Atomic exchange.
12 * Since it can be used to implement critical sections
13 * it must clobber "memory" (also for interrupts in UP).
14 */
15
16static inline unsigned long
17____xchg(_u8, volatile char *m, unsigned long val)
18{
19 unsigned long ret, tmp, addr64;
20
21 __asm__ __volatile__(
22 " andnot %4,7,%3\n"
23 " insbl %1,%4,%1\n"
24 "1: ldq_l %2,0(%3)\n"
25 " extbl %2,%4,%0\n"
26 " mskbl %2,%4,%2\n"
27 " or %1,%2,%2\n"
28 " stq_c %2,0(%3)\n"
29 " beq %2,2f\n"
30 __ASM__MB
31 ".subsection 2\n"
32 "2: br 1b\n"
33 ".previous"
34 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
35 : "r" ((long)m), "1" (val) : "memory");
36
37 return ret;
38}
39
40static inline unsigned long
41____xchg(_u16, volatile short *m, unsigned long val)
42{
43 unsigned long ret, tmp, addr64;
44
45 __asm__ __volatile__(
46 " andnot %4,7,%3\n"
47 " inswl %1,%4,%1\n"
48 "1: ldq_l %2,0(%3)\n"
49 " extwl %2,%4,%0\n"
50 " mskwl %2,%4,%2\n"
51 " or %1,%2,%2\n"
52 " stq_c %2,0(%3)\n"
53 " beq %2,2f\n"
54 __ASM__MB
55 ".subsection 2\n"
56 "2: br 1b\n"
57 ".previous"
58 : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
59 : "r" ((long)m), "1" (val) : "memory");
60
61 return ret;
62}
63
64static inline unsigned long
65____xchg(_u32, volatile int *m, unsigned long val)
66{
67 unsigned long dummy;
68
69 __asm__ __volatile__(
70 "1: ldl_l %0,%4\n"
71 " bis $31,%3,%1\n"
72 " stl_c %1,%2\n"
73 " beq %1,2f\n"
74 __ASM__MB
75 ".subsection 2\n"
76 "2: br 1b\n"
77 ".previous"
78 : "=&r" (val), "=&r" (dummy), "=m" (*m)
79 : "rI" (val), "m" (*m) : "memory");
80
81 return val;
82}
83
84static inline unsigned long
85____xchg(_u64, volatile long *m, unsigned long val)
86{
87 unsigned long dummy;
88
89 __asm__ __volatile__(
90 "1: ldq_l %0,%4\n"
91 " bis $31,%3,%1\n"
92 " stq_c %1,%2\n"
93 " beq %1,2f\n"
94 __ASM__MB
95 ".subsection 2\n"
96 "2: br 1b\n"
97 ".previous"
98 : "=&r" (val), "=&r" (dummy), "=m" (*m)
99 : "rI" (val), "m" (*m) : "memory");
100
101 return val;
102}
103
104/* This function doesn't exist, so you'll get a linker error
105 if something tries to do an invalid xchg(). */
106extern void __xchg_called_with_bad_pointer(void);
107
108static __always_inline unsigned long
109____xchg(, volatile void *ptr, unsigned long x, int size)
110{
111 switch (size) {
112 case 1:
113 return ____xchg(_u8, ptr, x);
114 case 2:
115 return ____xchg(_u16, ptr, x);
116 case 4:
117 return ____xchg(_u32, ptr, x);
118 case 8:
119 return ____xchg(_u64, ptr, x);
120 }
121 __xchg_called_with_bad_pointer();
122 return x;
123}
124
125/*
126 * Atomic compare and exchange. Compare OLD with MEM, if identical,
127 * store NEW in MEM. Return the initial value in MEM. Success is
128 * indicated by comparing RETURN with OLD.
129 *
130 * The memory barrier should be placed in SMP only when we actually
131 * make the change. If we don't change anything (so if the returned
132 * prev is equal to old) then we aren't acquiring anything new and
133 * we don't need any memory barrier as far I can tell.
134 */
135
136static inline unsigned long
137____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
138{
139 unsigned long prev, tmp, cmp, addr64;
140
141 __asm__ __volatile__(
142 " andnot %5,7,%4\n"
143 " insbl %1,%5,%1\n"
144 "1: ldq_l %2,0(%4)\n"
145 " extbl %2,%5,%0\n"
146 " cmpeq %0,%6,%3\n"
147 " beq %3,2f\n"
148 " mskbl %2,%5,%2\n"
149 " or %1,%2,%2\n"
150 " stq_c %2,0(%4)\n"
151 " beq %2,3f\n"
152 __ASM__MB
153 "2:\n"
154 ".subsection 2\n"
155 "3: br 1b\n"
156 ".previous"
157 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
158 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
159
160 return prev;
161}
162
163static inline unsigned long
164____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
165{
166 unsigned long prev, tmp, cmp, addr64;
167
168 __asm__ __volatile__(
169 " andnot %5,7,%4\n"
170 " inswl %1,%5,%1\n"
171 "1: ldq_l %2,0(%4)\n"
172 " extwl %2,%5,%0\n"
173 " cmpeq %0,%6,%3\n"
174 " beq %3,2f\n"
175 " mskwl %2,%5,%2\n"
176 " or %1,%2,%2\n"
177 " stq_c %2,0(%4)\n"
178 " beq %2,3f\n"
179 __ASM__MB
180 "2:\n"
181 ".subsection 2\n"
182 "3: br 1b\n"
183 ".previous"
184 : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
185 : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
186
187 return prev;
188}
189
190static inline unsigned long
191____cmpxchg(_u32, volatile int *m, int old, int new)
192{
193 unsigned long prev, cmp;
194
195 __asm__ __volatile__(
196 "1: ldl_l %0,%5\n"
197 " cmpeq %0,%3,%1\n"
198 " beq %1,2f\n"
199 " mov %4,%1\n"
200 " stl_c %1,%2\n"
201 " beq %1,3f\n"
202 __ASM__MB
203 "2:\n"
204 ".subsection 2\n"
205 "3: br 1b\n"
206 ".previous"
207 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
208 : "r"((long) old), "r"(new), "m"(*m) : "memory");
209
210 return prev;
211}
212
213static inline unsigned long
214____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
215{
216 unsigned long prev, cmp;
217
218 __asm__ __volatile__(
219 "1: ldq_l %0,%5\n"
220 " cmpeq %0,%3,%1\n"
221 " beq %1,2f\n"
222 " mov %4,%1\n"
223 " stq_c %1,%2\n"
224 " beq %1,3f\n"
225 __ASM__MB
226 "2:\n"
227 ".subsection 2\n"
228 "3: br 1b\n"
229 ".previous"
230 : "=&r"(prev), "=&r"(cmp), "=m"(*m)
231 : "r"((long) old), "r"(new), "m"(*m) : "memory");
232
233 return prev;
234}
235
236/* This function doesn't exist, so you'll get a linker error
237 if something tries to do an invalid cmpxchg(). */
238extern void __cmpxchg_called_with_bad_pointer(void);
239
240static __always_inline unsigned long
241____cmpxchg(, volatile void *ptr, unsigned long old, unsigned long new,
242 int size)
243{
244 switch (size) {
245 case 1:
246 return ____cmpxchg(_u8, ptr, old, new);
247 case 2:
248 return ____cmpxchg(_u16, ptr, old, new);
249 case 4:
250 return ____cmpxchg(_u32, ptr, old, new);
251 case 8:
252 return ____cmpxchg(_u64, ptr, old, new);
253 }
254 __cmpxchg_called_with_bad_pointer();
255 return old;
256}
257
258#endif
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index b4697759a12..a427538252f 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -12,7 +12,7 @@ obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
12 12
13obj-$(CONFIG_VGA_HOSE) += console.o 13obj-$(CONFIG_VGA_HOSE) += console.o
14obj-$(CONFIG_SMP) += smp.o 14obj-$(CONFIG_SMP) += smp.o
15obj-$(CONFIG_PCI) += pci.o pci_iommu.o 15obj-$(CONFIG_PCI) += pci.o pci_iommu.o pci-sysfs.o
16obj-$(CONFIG_SRM_ENV) += srm_env.o 16obj-$(CONFIG_SRM_ENV) += srm_env.o
17obj-$(CONFIG_MODULES) += module.o 17obj-$(CONFIG_MODULES) += module.o
18 18
diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c
index 11aee012a8a..985e5c1681a 100644
--- a/arch/alpha/kernel/err_ev6.c
+++ b/arch/alpha/kernel/err_ev6.c
@@ -157,8 +157,8 @@ ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn,
157 err_print_prefix, 157 err_print_prefix,
158 streamname[stream], bitsname[bits], sourcename[source]); 158 streamname[stream], bitsname[bits], sourcename[source]);
159 159
160 printk("%s Address: 0x%016lx\n" 160 printk("%s Address: 0x%016llx\n"
161 " Syndrome[upper.lower]: %02lx.%02lx\n", 161 " Syndrome[upper.lower]: %02llx.%02llx\n",
162 err_print_prefix, 162 err_print_prefix,
163 c_addr, 163 c_addr,
164 c2_syn, c1_syn); 164 c2_syn, c1_syn);
diff --git a/arch/alpha/kernel/err_ev7.c b/arch/alpha/kernel/err_ev7.c
index 68cd493f54c..73770c6ca01 100644
--- a/arch/alpha/kernel/err_ev7.c
+++ b/arch/alpha/kernel/err_ev7.c
@@ -246,13 +246,13 @@ ev7_process_pal_subpacket(struct el_subpacket *header)
246 246
247 switch(header->type) { 247 switch(header->type) {
248 case EL_TYPE__PAL__LOGOUT_FRAME: 248 case EL_TYPE__PAL__LOGOUT_FRAME:
249 printk("%s*** MCHK occurred on LPID %ld (RBOX %lx)\n", 249 printk("%s*** MCHK occurred on LPID %ld (RBOX %llx)\n",
250 err_print_prefix, 250 err_print_prefix,
251 packet->by_type.logout.whami, 251 packet->by_type.logout.whami,
252 packet->by_type.logout.rbox_whami); 252 packet->by_type.logout.rbox_whami);
253 el_print_timestamp(&packet->by_type.logout.timestamp); 253 el_print_timestamp(&packet->by_type.logout.timestamp);
254 printk("%s EXC_ADDR: %016lx\n" 254 printk("%s EXC_ADDR: %016llx\n"
255 " HALT_CODE: %lx\n", 255 " HALT_CODE: %llx\n",
256 err_print_prefix, 256 err_print_prefix,
257 packet->by_type.logout.exc_addr, 257 packet->by_type.logout.exc_addr,
258 packet->by_type.logout.halt_code); 258 packet->by_type.logout.halt_code);
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c
index 413bf37eb09..6bfd243efba 100644
--- a/arch/alpha/kernel/err_marvel.c
+++ b/arch/alpha/kernel/err_marvel.c
@@ -129,7 +129,7 @@ marvel_print_po7_crrct_sym(u64 crrct_sym)
129 129
130 130
131 printk("%s Correctable Error Symptoms:\n" 131 printk("%s Correctable Error Symptoms:\n"
132 "%s Syndrome: 0x%lx\n", 132 "%s Syndrome: 0x%llx\n",
133 err_print_prefix, 133 err_print_prefix,
134 err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN)); 134 err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN));
135 marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC)); 135 marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC));
@@ -186,7 +186,7 @@ marvel_print_po7_uncrr_sym(u64 uncrr_sym, u64 valid_mask)
186 uncrr_sym &= valid_mask; 186 uncrr_sym &= valid_mask;
187 187
188 if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN)) 188 if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN))
189 printk("%s Syndrome: 0x%lx\n", 189 printk("%s Syndrome: 0x%llx\n",
190 err_print_prefix, 190 err_print_prefix,
191 EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN)); 191 EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN));
192 192
@@ -307,7 +307,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym)
307 sprintf(opcode_str, "BlkIO"); 307 sprintf(opcode_str, "BlkIO");
308 break; 308 break;
309 default: 309 default:
310 sprintf(opcode_str, "0x%lx\n", 310 sprintf(opcode_str, "0x%llx\n",
311 EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)); 311 EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE));
312 break; 312 break;
313 } 313 }
@@ -321,7 +321,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym)
321 opcode_str); 321 opcode_str);
322 322
323 if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) 323 if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE))
324 printk("%s Packet Offset 0x%08lx\n", 324 printk("%s Packet Offset 0x%08llx\n",
325 err_print_prefix, 325 err_print_prefix,
326 EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF)); 326 EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF));
327} 327}
@@ -480,8 +480,8 @@ marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io)
480 printk("%s Lost Error\n", err_print_prefix); 480 printk("%s Lost Error\n", err_print_prefix);
481 481
482 printk("%s Failing Packet:\n" 482 printk("%s Failing Packet:\n"
483 "%s Cycle 1: %016lx\n" 483 "%s Cycle 1: %016llx\n"
484 "%s Cycle 2: %016lx\n", 484 "%s Cycle 2: %016llx\n",
485 err_print_prefix, 485 err_print_prefix,
486 err_print_prefix, io->po7_err_pkt0, 486 err_print_prefix, io->po7_err_pkt0,
487 err_print_prefix, io->po7_err_pkt1); 487 err_print_prefix, io->po7_err_pkt1);
@@ -515,9 +515,9 @@ marvel_print_pox_tlb_err(u64 tlb_err)
515 if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID)) 515 if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID))
516 return; 516 return;
517 517
518 printk("%s TLB Error on index 0x%lx:\n" 518 printk("%s TLB Error on index 0x%llx:\n"
519 "%s - %s\n" 519 "%s - %s\n"
520 "%s - Addr: 0x%016lx\n", 520 "%s - Addr: 0x%016llx\n",
521 err_print_prefix, 521 err_print_prefix,
522 EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR), 522 EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR),
523 err_print_prefix, 523 err_print_prefix,
@@ -579,7 +579,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt)
579 sprintf(message, "Uncorrectable Split Write Data Error"); 579 sprintf(message, "Uncorrectable Split Write Data Error");
580 break; 580 break;
581 default: 581 default:
582 sprintf(message, "%08lx\n", 582 sprintf(message, "%08llx\n",
583 EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE)); 583 EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE));
584 break; 584 break;
585 } 585 }
@@ -620,9 +620,9 @@ marvel_print_pox_trans_sum(u64 trans_sum)
620 return; 620 return;
621 621
622 printk("%s Transaction Summary:\n" 622 printk("%s Transaction Summary:\n"
623 "%s Command: 0x%lx - %s\n" 623 "%s Command: 0x%llx - %s\n"
624 "%s Address: 0x%016lx%s\n" 624 "%s Address: 0x%016llx%s\n"
625 "%s PCI-X Master Slot: 0x%lx\n", 625 "%s PCI-X Master Slot: 0x%llx\n",
626 err_print_prefix, 626 err_print_prefix,
627 err_print_prefix, 627 err_print_prefix,
628 EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD), 628 EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD),
@@ -964,12 +964,12 @@ marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print)
964 964
965#if 0 965#if 0
966 printk("%s PORT 7 ERROR:\n" 966 printk("%s PORT 7 ERROR:\n"
967 "%s PO7_ERROR_SUM: %016lx\n" 967 "%s PO7_ERROR_SUM: %016llx\n"
968 "%s PO7_UNCRR_SYM: %016lx\n" 968 "%s PO7_UNCRR_SYM: %016llx\n"
969 "%s PO7_CRRCT_SYM: %016lx\n" 969 "%s PO7_CRRCT_SYM: %016llx\n"
970 "%s PO7_UGBGE_SYM: %016lx\n" 970 "%s PO7_UGBGE_SYM: %016llx\n"
971 "%s PO7_ERR_PKT0: %016lx\n" 971 "%s PO7_ERR_PKT0: %016llx\n"
972 "%s PO7_ERR_PKT1: %016lx\n", 972 "%s PO7_ERR_PKT1: %016llx\n",
973 err_print_prefix, 973 err_print_prefix,
974 err_print_prefix, io->po7_error_sum, 974 err_print_prefix, io->po7_error_sum,
975 err_print_prefix, io->po7_uncrr_sym, 975 err_print_prefix, io->po7_uncrr_sym,
@@ -987,12 +987,12 @@ marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print)
987 if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum)) 987 if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum))
988 continue; 988 continue;
989 989
990 printk("%s PID %u PORT %d POx_ERR_SUM: %016lx\n", 990 printk("%s PID %u PORT %d POx_ERR_SUM: %016llx\n",
991 err_print_prefix, 991 err_print_prefix,
992 lf_subpackets->io_pid, i, io->ports[i].pox_err_sum); 992 lf_subpackets->io_pid, i, io->ports[i].pox_err_sum);
993 marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]); 993 marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]);
994 994
995 printk("%s [ POx_FIRST_ERR: %016lx ]\n", 995 printk("%s [ POx_FIRST_ERR: %016llx ]\n",
996 err_print_prefix, io->ports[i].pox_first_err); 996 err_print_prefix, io->ports[i].pox_first_err);
997 marvel_print_pox_err(io->ports[i].pox_first_err, 997 marvel_print_pox_err(io->ports[i].pox_first_err,
998 &io->ports[i]); 998 &io->ports[i]);
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
index 257449ed15e..c7e28a88d6e 100644
--- a/arch/alpha/kernel/err_titan.c
+++ b/arch/alpha/kernel/err_titan.c
@@ -107,12 +107,12 @@ titan_parse_p_serror(int which, u64 serror, int print)
107 if (!print) 107 if (!print)
108 return status; 108 return status;
109 109
110 printk("%s PChip %d SERROR: %016lx\n", 110 printk("%s PChip %d SERROR: %016llx\n",
111 err_print_prefix, which, serror); 111 err_print_prefix, which, serror);
112 if (serror & TITAN__PCHIP_SERROR__ECCMASK) { 112 if (serror & TITAN__PCHIP_SERROR__ECCMASK) {
113 printk("%s %sorrectable ECC Error:\n" 113 printk("%s %sorrectable ECC Error:\n"
114 " Source: %-6s Command: %-8s Syndrome: 0x%08x\n" 114 " Source: %-6s Command: %-8s Syndrome: 0x%08x\n"
115 " Address: 0x%lx\n", 115 " Address: 0x%llx\n",
116 err_print_prefix, 116 err_print_prefix,
117 (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C", 117 (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C",
118 serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)], 118 serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)],
@@ -223,7 +223,7 @@ titan_parse_p_perror(int which, int port, u64 perror, int print)
223 if (!print) 223 if (!print)
224 return status; 224 return status;
225 225
226 printk("%s PChip %d %cPERROR: %016lx\n", 226 printk("%s PChip %d %cPERROR: %016llx\n",
227 err_print_prefix, which, 227 err_print_prefix, which,
228 port ? 'A' : 'G', perror); 228 port ? 'A' : 'G', perror);
229 if (perror & TITAN__PCHIP_PERROR__IPTPW) 229 if (perror & TITAN__PCHIP_PERROR__IPTPW)
@@ -316,7 +316,7 @@ titan_parse_p_agperror(int which, u64 agperror, int print)
316 addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3; 316 addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3;
317 len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN); 317 len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN);
318 318
319 printk("%s PChip %d AGPERROR: %016lx\n", err_print_prefix, 319 printk("%s PChip %d AGPERROR: %016llx\n", err_print_prefix,
320 which, agperror); 320 which, agperror);
321 if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW) 321 if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW)
322 printk("%s No Window\n", err_print_prefix); 322 printk("%s No Window\n", err_print_prefix);
@@ -597,16 +597,16 @@ privateer_process_680_frame(struct el_common *mchk_header, int print)
597 return status; 597 return status;
598 598
599 /* TODO - decode instead of just dumping... */ 599 /* TODO - decode instead of just dumping... */
600 printk("%s Summary Flags: %016lx\n" 600 printk("%s Summary Flags: %016llx\n"
601 " CChip DIRx: %016lx\n" 601 " CChip DIRx: %016llx\n"
602 " System Management IR: %016lx\n" 602 " System Management IR: %016llx\n"
603 " CPU IR: %016lx\n" 603 " CPU IR: %016llx\n"
604 " Power Supply IR: %016lx\n" 604 " Power Supply IR: %016llx\n"
605 " LM78 Fault Status: %016lx\n" 605 " LM78 Fault Status: %016llx\n"
606 " System Doors: %016lx\n" 606 " System Doors: %016llx\n"
607 " Temperature Warning: %016lx\n" 607 " Temperature Warning: %016llx\n"
608 " Fan Control: %016lx\n" 608 " Fan Control: %016llx\n"
609 " Fatal Power Down Code: %016lx\n", 609 " Fatal Power Down Code: %016llx\n",
610 err_print_prefix, 610 err_print_prefix,
611 emchk->summary, 611 emchk->summary,
612 emchk->c_dirx, 612 emchk->c_dirx,
diff --git a/arch/alpha/kernel/pci-sysfs.c b/arch/alpha/kernel/pci-sysfs.c
new file mode 100644
index 00000000000..6ea822e7f72
--- /dev/null
+++ b/arch/alpha/kernel/pci-sysfs.c
@@ -0,0 +1,366 @@
1/*
2 * arch/alpha/kernel/pci-sysfs.c
3 *
4 * Copyright (C) 2009 Ivan Kokshaysky
5 *
6 * Alpha PCI resource files.
7 *
8 * Loosely based on generic HAVE_PCI_MMAP implementation in
9 * drivers/pci/pci-sysfs.c
10 */
11
12#include <linux/sched.h>
13#include <linux/pci.h>
14
15static int hose_mmap_page_range(struct pci_controller *hose,
16 struct vm_area_struct *vma,
17 enum pci_mmap_state mmap_type, int sparse)
18{
19 unsigned long base;
20
21 if (mmap_type == pci_mmap_mem)
22 base = sparse ? hose->sparse_mem_base : hose->dense_mem_base;
23 else
24 base = sparse ? hose->sparse_io_base : hose->dense_io_base;
25
26 vma->vm_pgoff += base >> PAGE_SHIFT;
27 vma->vm_flags |= (VM_IO | VM_RESERVED);
28
29 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
30 vma->vm_end - vma->vm_start,
31 vma->vm_page_prot);
32}
33
34static int __pci_mmap_fits(struct pci_dev *pdev, int num,
35 struct vm_area_struct *vma, int sparse)
36{
37 unsigned long nr, start, size;
38 int shift = sparse ? 5 : 0;
39
40 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
41 start = vma->vm_pgoff;
42 size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1;
43
44 if (start < size && size - start >= nr)
45 return 1;
46 WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on %s BAR %d "
47 "(size 0x%08lx)\n",
48 current->comm, sparse ? " sparse" : "", start, start + nr,
49 pci_name(pdev), num, size);
50 return 0;
51}
52
53/**
54 * pci_mmap_resource - map a PCI resource into user memory space
55 * @kobj: kobject for mapping
56 * @attr: struct bin_attribute for the file being mapped
57 * @vma: struct vm_area_struct passed into the mmap
58 * @sparse: address space type
59 *
60 * Use the bus mapping routines to map a PCI resource into userspace.
61 */
62static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
63 struct vm_area_struct *vma, int sparse)
64{
65 struct pci_dev *pdev = to_pci_dev(container_of(kobj,
66 struct device, kobj));
67 struct resource *res = (struct resource *)attr->private;
68 enum pci_mmap_state mmap_type;
69 struct pci_bus_region bar;
70 int i;
71
72 for (i = 0; i < PCI_ROM_RESOURCE; i++)
73 if (res == &pdev->resource[i])
74 break;
75 if (i >= PCI_ROM_RESOURCE)
76 return -ENODEV;
77
78 if (!__pci_mmap_fits(pdev, i, vma, sparse))
79 return -EINVAL;
80
81 if (iomem_is_exclusive(res->start))
82 return -EINVAL;
83
84 pcibios_resource_to_bus(pdev, &bar, res);
85 vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0));
86 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
87
88 return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse);
89}
90
91static int pci_mmap_resource_sparse(struct kobject *kobj,
92 struct bin_attribute *attr,
93 struct vm_area_struct *vma)
94{
95 return pci_mmap_resource(kobj, attr, vma, 1);
96}
97
98static int pci_mmap_resource_dense(struct kobject *kobj,
99 struct bin_attribute *attr,
100 struct vm_area_struct *vma)
101{
102 return pci_mmap_resource(kobj, attr, vma, 0);
103}
104
105/**
106 * pci_remove_resource_files - cleanup resource files
107 * @dev: dev to cleanup
108 *
109 * If we created resource files for @dev, remove them from sysfs and
110 * free their resources.
111 */
112void pci_remove_resource_files(struct pci_dev *pdev)
113{
114 int i;
115
116 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
117 struct bin_attribute *res_attr;
118
119 res_attr = pdev->res_attr[i];
120 if (res_attr) {
121 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
122 kfree(res_attr);
123 }
124
125 res_attr = pdev->res_attr_wc[i];
126 if (res_attr) {
127 sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
128 kfree(res_attr);
129 }
130 }
131}
132
133static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num)
134{
135 struct pci_bus_region bar;
136 struct pci_controller *hose = pdev->sysdata;
137 long dense_offset;
138 unsigned long sparse_size;
139
140 pcibios_resource_to_bus(pdev, &bar, &pdev->resource[num]);
141
142 /* All core logic chips have 4G sparse address space, except
143 CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM
144 definitions in asm/core_xxx.h files). This corresponds
145 to 128M or 512M of the bus space. */
146 dense_offset = (long)(hose->dense_mem_base - hose->sparse_mem_base);
147 sparse_size = dense_offset >= 0x400000000UL ? 0x20000000 : 0x8000000;
148
149 return bar.end < sparse_size;
150}
151
152static int pci_create_one_attr(struct pci_dev *pdev, int num, char *name,
153 char *suffix, struct bin_attribute *res_attr,
154 unsigned long sparse)
155{
156 size_t size = pci_resource_len(pdev, num);
157
158 sprintf(name, "resource%d%s", num, suffix);
159 res_attr->mmap = sparse ? pci_mmap_resource_sparse :
160 pci_mmap_resource_dense;
161 res_attr->attr.name = name;
162 res_attr->attr.mode = S_IRUSR | S_IWUSR;
163 res_attr->size = sparse ? size << 5 : size;
164 res_attr->private = &pdev->resource[num];
165 return sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
166}
167
168static int pci_create_attr(struct pci_dev *pdev, int num)
169{
170 /* allocate attribute structure, piggyback attribute name */
171 int retval, nlen1, nlen2 = 0, res_count = 1;
172 unsigned long sparse_base, dense_base;
173 struct bin_attribute *attr;
174 struct pci_controller *hose = pdev->sysdata;
175 char *suffix, *attr_name;
176
177 suffix = ""; /* Assume bwx machine, normal resourceN files. */
178 nlen1 = 10;
179
180 if (pdev->resource[num].flags & IORESOURCE_MEM) {
181 sparse_base = hose->sparse_mem_base;
182 dense_base = hose->dense_mem_base;
183 if (sparse_base && !sparse_mem_mmap_fits(pdev, num)) {
184 sparse_base = 0;
185 suffix = "_dense";
186 nlen1 = 16; /* resourceN_dense */
187 }
188 } else {
189 sparse_base = hose->sparse_io_base;
190 dense_base = hose->dense_io_base;
191 }
192
193 if (sparse_base) {
194 suffix = "_sparse";
195 nlen1 = 17;
196 if (dense_base) {
197 nlen2 = 16; /* resourceN_dense */
198 res_count = 2;
199 }
200 }
201
202 attr = kzalloc(sizeof(*attr) * res_count + nlen1 + nlen2, GFP_ATOMIC);
203 if (!attr)
204 return -ENOMEM;
205
206 /* Create bwx, sparse or single dense file */
207 attr_name = (char *)(attr + res_count);
208 pdev->res_attr[num] = attr;
209 retval = pci_create_one_attr(pdev, num, attr_name, suffix, attr,
210 sparse_base);
211 if (retval || res_count == 1)
212 return retval;
213
214 /* Create dense file */
215 attr_name += nlen1;
216 attr++;
217 pdev->res_attr_wc[num] = attr;
218 return pci_create_one_attr(pdev, num, attr_name, "_dense", attr, 0);
219}
220
221/**
222 * pci_create_resource_files - create resource files in sysfs for @dev
223 * @dev: dev in question
224 *
225 * Walk the resources in @dev creating files for each resource available.
226 */
227int pci_create_resource_files(struct pci_dev *pdev)
228{
229 int i;
230 int retval;
231
232 /* Expose the PCI resources from this device as files */
233 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
234
235 /* skip empty resources */
236 if (!pci_resource_len(pdev, i))
237 continue;
238
239 retval = pci_create_attr(pdev, i);
240 if (retval) {
241 pci_remove_resource_files(pdev);
242 return retval;
243 }
244 }
245 return 0;
246}
247
248/* Legacy I/O bus mapping stuff. */
249
250static int __legacy_mmap_fits(struct pci_controller *hose,
251 struct vm_area_struct *vma,
252 unsigned long res_size, int sparse)
253{
254 unsigned long nr, start, size;
255
256 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
257 start = vma->vm_pgoff;
258 size = ((res_size - 1) >> PAGE_SHIFT) + 1;
259
260 if (start < size && size - start >= nr)
261 return 1;
262 WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on hose %d "
263 "(size 0x%08lx)\n",
264 current->comm, sparse ? " sparse" : "", start, start + nr,
265 hose->index, size);
266 return 0;
267}
268
269static inline int has_sparse(struct pci_controller *hose,
270 enum pci_mmap_state mmap_type)
271{
272 unsigned long base;
273
274 base = (mmap_type == pci_mmap_mem) ? hose->sparse_mem_base :
275 hose->sparse_io_base;
276
277 return base != 0;
278}
279
280int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
281 enum pci_mmap_state mmap_type)
282{
283 struct pci_controller *hose = bus->sysdata;
284 int sparse = has_sparse(hose, mmap_type);
285 unsigned long res_size;
286
287 res_size = (mmap_type == pci_mmap_mem) ? bus->legacy_mem->size :
288 bus->legacy_io->size;
289 if (!__legacy_mmap_fits(hose, vma, res_size, sparse))
290 return -EINVAL;
291
292 return hose_mmap_page_range(hose, vma, mmap_type, sparse);
293}
294
295/**
296 * pci_adjust_legacy_attr - adjustment of legacy file attributes
297 * @b: bus to create files under
298 * @mmap_type: I/O port or memory
299 *
300 * Adjust file name and size for sparse mappings.
301 */
302void pci_adjust_legacy_attr(struct pci_bus *bus, enum pci_mmap_state mmap_type)
303{
304 struct pci_controller *hose = bus->sysdata;
305
306 if (!has_sparse(hose, mmap_type))
307 return;
308
309 if (mmap_type == pci_mmap_mem) {
310 bus->legacy_mem->attr.name = "legacy_mem_sparse";
311 bus->legacy_mem->size <<= 5;
312 } else {
313 bus->legacy_io->attr.name = "legacy_io_sparse";
314 bus->legacy_io->size <<= 5;
315 }
316 return;
317}
318
319/* Legacy I/O bus read/write functions */
320int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
321{
322 struct pci_controller *hose = bus->sysdata;
323
324 port += hose->io_space->start;
325
326 switch(size) {
327 case 1:
328 *((u8 *)val) = inb(port);
329 return 1;
330 case 2:
331 if (port & 1)
332 return -EINVAL;
333 *((u16 *)val) = inw(port);
334 return 2;
335 case 4:
336 if (port & 3)
337 return -EINVAL;
338 *((u32 *)val) = inl(port);
339 return 4;
340 }
341 return -EINVAL;
342}
343
344int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
345{
346 struct pci_controller *hose = bus->sysdata;
347
348 port += hose->io_space->start;
349
350 switch(size) {
351 case 1:
352 outb(port, val);
353 return 1;
354 case 2:
355 if (port & 1)
356 return -EINVAL;
357 outw(port, val);
358 return 2;
359 case 4:
360 if (port & 3)
361 return -EINVAL;
362 outl(port, val);
363 return 4;
364 }
365 return -EINVAL;
366}
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index a3b93881140..a91ba28999b 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -168,7 +168,7 @@ pcibios_align_resource(void *data, struct resource *res,
168 */ 168 */
169 169
170 /* Align to multiple of size of minimum base. */ 170 /* Align to multiple of size of minimum base. */
171 alignto = max(0x1000UL, align); 171 alignto = max_t(resource_size_t, 0x1000, align);
172 start = ALIGN(start, alignto); 172 start = ALIGN(start, alignto);
173 if (hose->sparse_mem_base && size <= 7 * 16*MB) { 173 if (hose->sparse_mem_base && size <= 7 * 16*MB) {
174 if (((start / (16*MB)) & 0x7) == 0) { 174 if (((start / (16*MB)) & 0x7) == 0) {
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index b9094da05d7..bfb880af959 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -247,7 +247,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
247 && paddr + size <= __direct_map_size) { 247 && paddr + size <= __direct_map_size) {
248 ret = paddr + __direct_map_base; 248 ret = paddr + __direct_map_base;
249 249
250 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n", 250 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n",
251 cpu_addr, size, ret, __builtin_return_address(0)); 251 cpu_addr, size, ret, __builtin_return_address(0));
252 252
253 return ret; 253 return ret;
@@ -258,7 +258,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
258 if (dac_allowed) { 258 if (dac_allowed) {
259 ret = paddr + alpha_mv.pci_dac_offset; 259 ret = paddr + alpha_mv.pci_dac_offset;
260 260
261 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n", 261 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n",
262 cpu_addr, size, ret, __builtin_return_address(0)); 262 cpu_addr, size, ret, __builtin_return_address(0));
263 263
264 return ret; 264 return ret;
@@ -299,7 +299,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
299 ret = arena->dma_base + dma_ofs * PAGE_SIZE; 299 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
300 ret += (unsigned long)cpu_addr & ~PAGE_MASK; 300 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
301 301
302 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n", 302 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n",
303 cpu_addr, size, npages, ret, __builtin_return_address(0)); 303 cpu_addr, size, npages, ret, __builtin_return_address(0));
304 304
305 return ret; 305 return ret;
@@ -355,14 +355,14 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
355 && dma_addr < __direct_map_base + __direct_map_size) { 355 && dma_addr < __direct_map_base + __direct_map_size) {
356 /* Nothing to do. */ 356 /* Nothing to do. */
357 357
358 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n", 358 DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n",
359 dma_addr, size, __builtin_return_address(0)); 359 dma_addr, size, __builtin_return_address(0));
360 360
361 return; 361 return;
362 } 362 }
363 363
364 if (dma_addr > 0xffffffff) { 364 if (dma_addr > 0xffffffff) {
365 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n", 365 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n",
366 dma_addr, size, __builtin_return_address(0)); 366 dma_addr, size, __builtin_return_address(0));
367 return; 367 return;
368 } 368 }
@@ -373,9 +373,9 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
373 373
374 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; 374 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
375 if (dma_ofs * PAGE_SIZE >= arena->size) { 375 if (dma_ofs * PAGE_SIZE >= arena->size) {
376 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx " 376 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
377 " base %lx size %x\n", dma_addr, arena->dma_base, 377 " base %llx size %x\n",
378 arena->size); 378 dma_addr, arena->dma_base, arena->size);
379 return; 379 return;
380 BUG(); 380 BUG();
381 } 381 }
@@ -394,7 +394,7 @@ pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
394 394
395 spin_unlock_irqrestore(&arena->lock, flags); 395 spin_unlock_irqrestore(&arena->lock, flags);
396 396
397 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n", 397 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n",
398 dma_addr, size, npages, __builtin_return_address(0)); 398 dma_addr, size, npages, __builtin_return_address(0));
399} 399}
400EXPORT_SYMBOL(pci_unmap_single); 400EXPORT_SYMBOL(pci_unmap_single);
@@ -444,7 +444,7 @@ try_again:
444 goto try_again; 444 goto try_again;
445 } 445 }
446 446
447 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n", 447 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n",
448 size, cpu_addr, *dma_addrp, __builtin_return_address(0)); 448 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
449 449
450 return cpu_addr; 450 return cpu_addr;
@@ -464,7 +464,7 @@ pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
464 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); 464 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
465 free_pages((unsigned long)cpu_addr, get_order(size)); 465 free_pages((unsigned long)cpu_addr, get_order(size));
466 466
467 DBGA2("pci_free_consistent: [%x,%lx] from %p\n", 467 DBGA2("pci_free_consistent: [%llx,%zx] from %p\n",
468 dma_addr, size, __builtin_return_address(0)); 468 dma_addr, size, __builtin_return_address(0));
469} 469}
470EXPORT_SYMBOL(pci_free_consistent); 470EXPORT_SYMBOL(pci_free_consistent);
@@ -551,7 +551,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
551 out->dma_address = paddr + __direct_map_base; 551 out->dma_address = paddr + __direct_map_base;
552 out->dma_length = size; 552 out->dma_length = size;
553 553
554 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n", 554 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
555 __va(paddr), size, out->dma_address); 555 __va(paddr), size, out->dma_address);
556 556
557 return 0; 557 return 0;
@@ -563,7 +563,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
563 out->dma_address = paddr + alpha_mv.pci_dac_offset; 563 out->dma_address = paddr + alpha_mv.pci_dac_offset;
564 out->dma_length = size; 564 out->dma_length = size;
565 565
566 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n", 566 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
567 __va(paddr), size, out->dma_address); 567 __va(paddr), size, out->dma_address);
568 568
569 return 0; 569 return 0;
@@ -589,7 +589,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
589 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; 589 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
590 out->dma_length = size; 590 out->dma_length = size;
591 591
592 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n", 592 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
593 __va(paddr), size, out->dma_address, npages); 593 __va(paddr), size, out->dma_address, npages);
594 594
595 /* All virtually contiguous. We need to find the length of each 595 /* All virtually contiguous. We need to find the length of each
@@ -752,7 +752,7 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
752 752
753 if (addr > 0xffffffff) { 753 if (addr > 0xffffffff) {
754 /* It's a DAC address -- nothing to do. */ 754 /* It's a DAC address -- nothing to do. */
755 DBGA(" (%ld) DAC [%lx,%lx]\n", 755 DBGA(" (%ld) DAC [%llx,%zx]\n",
756 sg - end + nents, addr, size); 756 sg - end + nents, addr, size);
757 continue; 757 continue;
758 } 758 }
@@ -760,12 +760,12 @@ pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
760 if (addr >= __direct_map_base 760 if (addr >= __direct_map_base
761 && addr < __direct_map_base + __direct_map_size) { 761 && addr < __direct_map_base + __direct_map_size) {
762 /* Nothing to do. */ 762 /* Nothing to do. */
763 DBGA(" (%ld) direct [%lx,%lx]\n", 763 DBGA(" (%ld) direct [%llx,%zx]\n",
764 sg - end + nents, addr, size); 764 sg - end + nents, addr, size);
765 continue; 765 continue;
766 } 766 }
767 767
768 DBGA(" (%ld) sg [%lx,%lx]\n", 768 DBGA(" (%ld) sg [%llx,%zx]\n",
769 sg - end + nents, addr, size); 769 sg - end + nents, addr, size);
770 770
771 npages = iommu_num_pages(addr, size, PAGE_SIZE); 771 npages = iommu_num_pages(addr, size, PAGE_SIZE);
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index fe14c6747cd..567f2598d09 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -20,7 +20,7 @@ struct pci_controller;
20extern struct pci_ops apecs_pci_ops; 20extern struct pci_ops apecs_pci_ops;
21extern void apecs_init_arch(void); 21extern void apecs_init_arch(void);
22extern void apecs_pci_clr_err(void); 22extern void apecs_pci_clr_err(void);
23extern void apecs_machine_check(u64, u64); 23extern void apecs_machine_check(unsigned long vector, unsigned long la_ptr);
24extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); 24extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
25 25
26/* core_cia.c */ 26/* core_cia.c */
@@ -29,7 +29,7 @@ extern void cia_init_pci(void);
29extern void cia_init_arch(void); 29extern void cia_init_arch(void);
30extern void pyxis_init_arch(void); 30extern void pyxis_init_arch(void);
31extern void cia_kill_arch(int); 31extern void cia_kill_arch(int);
32extern void cia_machine_check(u64, u64); 32extern void cia_machine_check(unsigned long vector, unsigned long la_ptr);
33extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); 33extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
34 34
35/* core_irongate.c */ 35/* core_irongate.c */
@@ -42,7 +42,7 @@ extern void irongate_machine_check(u64, u64);
42/* core_lca.c */ 42/* core_lca.c */
43extern struct pci_ops lca_pci_ops; 43extern struct pci_ops lca_pci_ops;
44extern void lca_init_arch(void); 44extern void lca_init_arch(void);
45extern void lca_machine_check(u64, u64); 45extern void lca_machine_check(unsigned long vector, unsigned long la_ptr);
46extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); 46extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
47 47
48/* core_marvel.c */ 48/* core_marvel.c */
@@ -64,7 +64,7 @@ void io7_clear_errors(struct io7 *io7);
64extern struct pci_ops mcpcia_pci_ops; 64extern struct pci_ops mcpcia_pci_ops;
65extern void mcpcia_init_arch(void); 65extern void mcpcia_init_arch(void);
66extern void mcpcia_init_hoses(void); 66extern void mcpcia_init_hoses(void);
67extern void mcpcia_machine_check(u64, u64); 67extern void mcpcia_machine_check(unsigned long vector, unsigned long la_ptr);
68extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); 68extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
69 69
70/* core_polaris.c */ 70/* core_polaris.c */
@@ -72,14 +72,14 @@ extern struct pci_ops polaris_pci_ops;
72extern int polaris_read_config_dword(struct pci_dev *, int, u32 *); 72extern int polaris_read_config_dword(struct pci_dev *, int, u32 *);
73extern int polaris_write_config_dword(struct pci_dev *, int, u32); 73extern int polaris_write_config_dword(struct pci_dev *, int, u32);
74extern void polaris_init_arch(void); 74extern void polaris_init_arch(void);
75extern void polaris_machine_check(u64, u64); 75extern void polaris_machine_check(unsigned long vector, unsigned long la_ptr);
76#define polaris_pci_tbi ((void *)0) 76#define polaris_pci_tbi ((void *)0)
77 77
78/* core_t2.c */ 78/* core_t2.c */
79extern struct pci_ops t2_pci_ops; 79extern struct pci_ops t2_pci_ops;
80extern void t2_init_arch(void); 80extern void t2_init_arch(void);
81extern void t2_kill_arch(int); 81extern void t2_kill_arch(int);
82extern void t2_machine_check(u64, u64); 82extern void t2_machine_check(unsigned long vector, unsigned long la_ptr);
83extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); 83extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
84 84
85/* core_titan.c */ 85/* core_titan.c */
@@ -94,14 +94,14 @@ extern struct _alpha_agp_info *titan_agp_info(void);
94extern struct pci_ops tsunami_pci_ops; 94extern struct pci_ops tsunami_pci_ops;
95extern void tsunami_init_arch(void); 95extern void tsunami_init_arch(void);
96extern void tsunami_kill_arch(int); 96extern void tsunami_kill_arch(int);
97extern void tsunami_machine_check(u64, u64); 97extern void tsunami_machine_check(unsigned long vector, unsigned long la_ptr);
98extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); 98extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
99 99
100/* core_wildfire.c */ 100/* core_wildfire.c */
101extern struct pci_ops wildfire_pci_ops; 101extern struct pci_ops wildfire_pci_ops;
102extern void wildfire_init_arch(void); 102extern void wildfire_init_arch(void);
103extern void wildfire_kill_arch(int); 103extern void wildfire_kill_arch(int);
104extern void wildfire_machine_check(u64, u64); 104extern void wildfire_machine_check(unsigned long vector, unsigned long la_ptr);
105extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t); 105extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
106extern int wildfire_pa_to_nid(unsigned long); 106extern int wildfire_pa_to_nid(unsigned long);
107extern int wildfire_cpuid_to_nid(int); 107extern int wildfire_cpuid_to_nid(int);
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 02bee6983ce..80df86cd746 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -1255,7 +1255,7 @@ show_cpuinfo(struct seq_file *f, void *slot)
1255 platform_string(), nr_processors); 1255 platform_string(), nr_processors);
1256 1256
1257#ifdef CONFIG_SMP 1257#ifdef CONFIG_SMP
1258 seq_printf(f, "cpus active\t\t: %d\n" 1258 seq_printf(f, "cpus active\t\t: %u\n"
1259 "cpu active mask\t\t: %016lx\n", 1259 "cpu active mask\t\t: %016lx\n",
1260 num_online_cpus(), cpus_addr(cpu_possible_map)[0]); 1260 num_online_cpus(), cpus_addr(cpu_possible_map)[0]);
1261#endif 1261#endif
diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c
index fd467b207f0..bca5bda90cd 100644
--- a/arch/alpha/kernel/smc37c669.c
+++ b/arch/alpha/kernel/smc37c669.c
@@ -2542,8 +2542,8 @@ void __init SMC669_Init ( int index )
2542 SMC37c669_display_device_info( ); 2542 SMC37c669_display_device_info( );
2543#endif 2543#endif
2544 local_irq_restore(flags); 2544 local_irq_restore(flags);
2545 printk( "SMC37c669 Super I/O Controller found @ 0x%lx\n", 2545 printk( "SMC37c669 Super I/O Controller found @ 0x%p\n",
2546 (unsigned long) SMC_base ); 2546 SMC_base );
2547 } 2547 }
2548 else { 2548 else {
2549 local_irq_restore(flags); 2549 local_irq_restore(flags);
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index e2516f9a896..2b5caf3d9b1 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -244,12 +244,11 @@ jensen_init_arch(void)
244} 244}
245 245
246static void 246static void
247jensen_machine_check (u64 vector, u64 la) 247jensen_machine_check(unsigned long vector, unsigned long la)
248{ 248{
249 printk(KERN_CRIT "Machine check\n"); 249 printk(KERN_CRIT "Machine check\n");
250} 250}
251 251
252
253/* 252/*
254 * The System Vector 253 * The System Vector
255 */ 254 */
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index d232e42be01..9e263256a42 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -453,7 +453,7 @@ sable_lynx_enable_irq(unsigned int irq)
453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
454 spin_unlock(&sable_lynx_irq_lock); 454 spin_unlock(&sable_lynx_irq_lock);
455#if 0 455#if 0
456 printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n", 456 printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n",
457 __func__, mask, bit, irq); 457 __func__, mask, bit, irq);
458#endif 458#endif
459} 459}
@@ -469,7 +469,7 @@ sable_lynx_disable_irq(unsigned int irq)
469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
470 spin_unlock(&sable_lynx_irq_lock); 470 spin_unlock(&sable_lynx_irq_lock);
471#if 0 471#if 0
472 printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n", 472 printk("%s: mask 0x%lx bit 0x%lx irq 0x%x\n",
473 __func__, mask, bit, irq); 473 __func__, mask, bit, irq);
474#endif 474#endif
475} 475}
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index cefc5a355ef..6ee7655b756 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -623,7 +623,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
623 } 623 }
624 624
625 lock_kernel(); 625 lock_kernel();
626 printk("Bad unaligned kernel access at %016lx: %p %lx %ld\n", 626 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
627 pc, va, opcode, reg); 627 pc, va, opcode, reg);
628 do_exit(SIGSEGV); 628 do_exit(SIGSEGV);
629 629
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index ce4e4296b95..62d4abbaa65 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -250,21 +250,3 @@ asmlinkage void do_bus_error(unsigned long addr, int write_access,
250 dump_dtlb(); 250 dump_dtlb();
251 die("Bus Error", regs, SIGKILL); 251 die("Bus Error", regs, SIGKILL);
252} 252}
253
254/*
255 * This functionality is currently not possible to implement because
256 * we're using segmentation to ensure a fixed mapping of the kernel
257 * virtual address space.
258 *
259 * It would be possible to implement this, but it would require us to
260 * disable segmentation at startup and load the kernel mappings into
261 * the TLB like any other pages. There will be lots of trickery to
262 * avoid recursive invocation of the TLB miss handler, though...
263 */
264#ifdef CONFIG_DEBUG_PAGEALLOC
265void kernel_map_pages(struct page *page, int numpages, int enable)
266{
267
268}
269EXPORT_SYMBOL(kernel_map_pages);
270#endif
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 24b1ad5334c..2bef5261d96 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -24,6 +24,7 @@
24#include <linux/major.h> 24#include <linux/major.h>
25#include <linux/fcntl.h> 25#include <linux/fcntl.h>
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/seq_file.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/capability.h> 29#include <linux/capability.h>
29#include <linux/console.h> 30#include <linux/console.h>
@@ -848,38 +849,36 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
848 * /proc fs routines.... 849 * /proc fs routines....
849 */ 850 */
850 851
851static inline int line_info(char *buf, struct serial_state *state) 852static inline void line_info(struct seq_file *m, struct serial_state *state)
852{ 853{
853 return sprintf(buf, "%d: uart:%s port:%lX irq:%d\n", 854 seq_printf(m, "%d: uart:%s port:%lX irq:%d\n",
854 state->line, uart_config[state->type].name, 855 state->line, uart_config[state->type].name,
855 state->port, state->irq); 856 state->port, state->irq);
856} 857}
857 858
858static int rs_read_proc(char *page, char **start, off_t off, int count, 859static int rs_proc_show(struct seq_file *m, void *v)
859 int *eof, void *data)
860{ 860{
861 int i, len = 0, l; 861 int i;
862 off_t begin = 0; 862
863 863 seq_printf(m, "simserinfo:1.0 driver:%s\n", serial_version);
864 len += sprintf(page, "simserinfo:1.0 driver:%s\n", serial_version); 864 for (i = 0; i < NR_PORTS; i++)
865 for (i = 0; i < NR_PORTS && len < 4000; i++) { 865 line_info(m, &rs_table[i]);
866 l = line_info(page + len, &rs_table[i]); 866 return 0;
867 len += l;
868 if (len+begin > off+count)
869 goto done;
870 if (len+begin < off) {
871 begin += len;
872 len = 0;
873 }
874 }
875 *eof = 1;
876done:
877 if (off >= len+begin)
878 return 0;
879 *start = page + (begin-off);
880 return ((count < begin+len-off) ? count : begin+len-off);
881} 867}
882 868
869static int rs_proc_open(struct inode *inode, struct file *file)
870{
871 return single_open(file, rs_proc_show, NULL);
872}
873
874static const struct file_operations rs_proc_fops = {
875 .owner = THIS_MODULE,
876 .open = rs_proc_open,
877 .read = seq_read,
878 .llseek = seq_lseek,
879 .release = single_release,
880};
881
883/* 882/*
884 * --------------------------------------------------------------------- 883 * ---------------------------------------------------------------------
885 * rs_init() and friends 884 * rs_init() and friends
@@ -917,7 +916,7 @@ static const struct tty_operations hp_ops = {
917 .start = rs_start, 916 .start = rs_start,
918 .hangup = rs_hangup, 917 .hangup = rs_hangup,
919 .wait_until_sent = rs_wait_until_sent, 918 .wait_until_sent = rs_wait_until_sent,
920 .read_proc = rs_read_proc, 919 .proc_fops = &rs_proc_fops,
921}; 920};
922 921
923/* 922/*
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
index c47830e26cb..111ed522289 100644
--- a/arch/ia64/include/asm/intrinsics.h
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -202,7 +202,11 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
202 202
203#ifndef __ASSEMBLY__ 203#ifndef __ASSEMBLY__
204#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) 204#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__)
205#define IA64_INTRINSIC_API(name) pv_cpu_ops.name 205#ifdef ASM_SUPPORTED
206# define IA64_INTRINSIC_API(name) paravirt_ ## name
207#else
208# define IA64_INTRINSIC_API(name) pv_cpu_ops.name
209#endif
206#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name 210#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
207#else 211#else
208#define IA64_INTRINSIC_API(name) ia64_native_ ## name 212#define IA64_INTRINSIC_API(name) ia64_native_ ## name
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
index 040bc87db93..7f2a456603c 100644
--- a/arch/ia64/include/asm/mmu_context.h
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm)
87 /* re-check, now that we've got the lock: */ 87 /* re-check, now that we've got the lock: */
88 context = mm->context; 88 context = mm->context;
89 if (context == 0) { 89 if (context == 0) {
90 cpus_clear(mm->cpu_vm_mask); 90 cpumask_clear(mm_cpumask(mm));
91 if (ia64_ctx.next >= ia64_ctx.limit) { 91 if (ia64_ctx.next >= ia64_ctx.limit) {
92 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, 92 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
93 ia64_ctx.max_ctx, ia64_ctx.next); 93 ia64_ctx.max_ctx, ia64_ctx.next);
@@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm)
166 166
167 do { 167 do {
168 context = get_mmu_context(mm); 168 context = get_mmu_context(mm);
169 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) 169 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
170 cpu_set(smp_processor_id(), mm->cpu_vm_mask); 170 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
171 reload_context(context); 171 reload_context(context);
172 /* 172 /*
173 * in the unlikely event of a TLB-flush by another thread, 173 * in the unlikely event of a TLB-flush by another thread,
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
index d2da61e4c49..908eaef42a0 100644
--- a/arch/ia64/include/asm/module.h
+++ b/arch/ia64/include/asm/module.h
@@ -16,6 +16,12 @@ struct mod_arch_specific {
16 struct elf64_shdr *got; /* global offset table */ 16 struct elf64_shdr *got; /* global offset table */
17 struct elf64_shdr *opd; /* official procedure descriptors */ 17 struct elf64_shdr *opd; /* official procedure descriptors */
18 struct elf64_shdr *unwind; /* unwind-table section */ 18 struct elf64_shdr *unwind; /* unwind-table section */
19#ifdef CONFIG_PARAVIRT
20 struct elf64_shdr *paravirt_bundles;
21 /* paravirt_alt_bundle_patch table */
22 struct elf64_shdr *paravirt_insts;
23 /* paravirt_alt_inst_patch table */
24#endif
19 unsigned long gp; /* global-pointer for module */ 25 unsigned long gp; /* global-pointer for module */
20 26
21 void *core_unw_table; /* core unwind-table cookie returned by unwinder */ 27 void *core_unw_table; /* core unwind-table cookie returned by unwinder */
diff --git a/arch/ia64/include/asm/native/inst.h b/arch/ia64/include/asm/native/inst.h
index 0a1026cca4f..d2d46efb3e6 100644
--- a/arch/ia64/include/asm/native/inst.h
+++ b/arch/ia64/include/asm/native/inst.h
@@ -30,6 +30,9 @@
30#define __paravirt_work_processed_syscall_target \ 30#define __paravirt_work_processed_syscall_target \
31 ia64_work_processed_syscall 31 ia64_work_processed_syscall
32 32
33#define paravirt_fsyscall_table ia64_native_fsyscall_table
34#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down
35
33#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK 36#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
34# define PARAVIRT_POISON 0xdeadbeefbaadf00d 37# define PARAVIRT_POISON 0xdeadbeefbaadf00d
35# define CLOBBER(clob) \ 38# define CLOBBER(clob) \
@@ -74,6 +77,11 @@
74(pred) mov reg = psr \ 77(pred) mov reg = psr \
75 CLOBBER(clob) 78 CLOBBER(clob)
76 79
80#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
81(pred) mov reg = ar.itc \
82 CLOBBER(clob) \
83 CLOBBER_PRED(pred_clob)
84
77#define MOV_TO_IFA(reg, clob) \ 85#define MOV_TO_IFA(reg, clob) \
78 mov cr.ifa = reg \ 86 mov cr.ifa = reg \
79 CLOBBER(clob) 87 CLOBBER(clob)
@@ -158,6 +166,11 @@
158#define RSM_PSR_DT \ 166#define RSM_PSR_DT \
159 rsm psr.dt 167 rsm psr.dt
160 168
169#define RSM_PSR_BE_I(clob0, clob1) \
170 rsm psr.be | psr.i \
171 CLOBBER(clob0) \
172 CLOBBER(clob1)
173
161#define SSM_PSR_DT_AND_SRLZ_I \ 174#define SSM_PSR_DT_AND_SRLZ_I \
162 ssm psr.dt \ 175 ssm psr.dt \
163 ;; \ 176 ;; \
diff --git a/arch/ia64/include/asm/native/patchlist.h b/arch/ia64/include/asm/native/patchlist.h
new file mode 100644
index 00000000000..be16ca9311b
--- /dev/null
+++ b/arch/ia64/include/asm/native/patchlist.h
@@ -0,0 +1,38 @@
1/******************************************************************************
2 * arch/ia64/include/asm/native/inst.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#define __paravirt_start_gate_fsyscall_patchlist \
24 __ia64_native_start_gate_fsyscall_patchlist
25#define __paravirt_end_gate_fsyscall_patchlist \
26 __ia64_native_end_gate_fsyscall_patchlist
27#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
28 __ia64_native_start_gate_brl_fsys_bubble_down_patchlist
29#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
30 __ia64_native_end_gate_brl_fsys_bubble_down_patchlist
31#define __paravirt_start_gate_vtop_patchlist \
32 __ia64_native_start_gate_vtop_patchlist
33#define __paravirt_end_gate_vtop_patchlist \
34 __ia64_native_end_gate_vtop_patchlist
35#define __paravirt_start_gate_mckinley_e9_patchlist \
36 __ia64_native_start_gate_mckinley_e9_patchlist
37#define __paravirt_end_gate_mckinley_e9_patchlist \
38 __ia64_native_end_gate_mckinley_e9_patchlist
diff --git a/arch/ia64/include/asm/native/pvchk_inst.h b/arch/ia64/include/asm/native/pvchk_inst.h
index b8e6eb1090d..8d72962ec83 100644
--- a/arch/ia64/include/asm/native/pvchk_inst.h
+++ b/arch/ia64/include/asm/native/pvchk_inst.h
@@ -180,6 +180,11 @@
180 IS_PRED_IN(pred) \ 180 IS_PRED_IN(pred) \
181 IS_RREG_OUT(reg) \ 181 IS_RREG_OUT(reg) \
182 IS_RREG_CLOB(clob) 182 IS_RREG_CLOB(clob)
183#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
184 IS_PRED_IN(pred) \
185 IS_PRED_CLOB(pred_clob) \
186 IS_RREG_OUT(reg) \
187 IS_RREG_CLOB(clob)
183#define MOV_TO_IFA(reg, clob) \ 188#define MOV_TO_IFA(reg, clob) \
184 IS_RREG_IN(reg) \ 189 IS_RREG_IN(reg) \
185 IS_RREG_CLOB(clob) 190 IS_RREG_CLOB(clob)
@@ -246,6 +251,9 @@
246 IS_RREG_CLOB(clob2) 251 IS_RREG_CLOB(clob2)
247#define RSM_PSR_DT \ 252#define RSM_PSR_DT \
248 nop 0 253 nop 0
254#define RSM_PSR_BE_I(clob0, clob1) \
255 IS_RREG_CLOB(clob0) \
256 IS_RREG_CLOB(clob1)
249#define SSM_PSR_DT_AND_SRLZ_I \ 257#define SSM_PSR_DT_AND_SRLZ_I \
250 nop 0 258 nop 0
251#define BSW_0(clob0, clob1, clob2) \ 259#define BSW_0(clob0, clob1, clob2) \
diff --git a/arch/ia64/include/asm/paravirt.h b/arch/ia64/include/asm/paravirt.h
index 2bf3636473f..2eb0a981a09 100644
--- a/arch/ia64/include/asm/paravirt.h
+++ b/arch/ia64/include/asm/paravirt.h
@@ -22,6 +22,56 @@
22#ifndef __ASM_PARAVIRT_H 22#ifndef __ASM_PARAVIRT_H
23#define __ASM_PARAVIRT_H 23#define __ASM_PARAVIRT_H
24 24
25#ifndef __ASSEMBLY__
26/******************************************************************************
27 * fsys related addresses
28 */
29struct pv_fsys_data {
30 unsigned long *fsyscall_table;
31 void *fsys_bubble_down;
32};
33
34extern struct pv_fsys_data pv_fsys_data;
35
36unsigned long *paravirt_get_fsyscall_table(void);
37char *paravirt_get_fsys_bubble_down(void);
38
39/******************************************************************************
40 * patchlist addresses for gate page
41 */
42enum pv_gate_patchlist {
43 PV_GATE_START_FSYSCALL,
44 PV_GATE_END_FSYSCALL,
45
46 PV_GATE_START_BRL_FSYS_BUBBLE_DOWN,
47 PV_GATE_END_BRL_FSYS_BUBBLE_DOWN,
48
49 PV_GATE_START_VTOP,
50 PV_GATE_END_VTOP,
51
52 PV_GATE_START_MCKINLEY_E9,
53 PV_GATE_END_MCKINLEY_E9,
54};
55
56struct pv_patchdata {
57 unsigned long start_fsyscall_patchlist;
58 unsigned long end_fsyscall_patchlist;
59 unsigned long start_brl_fsys_bubble_down_patchlist;
60 unsigned long end_brl_fsys_bubble_down_patchlist;
61 unsigned long start_vtop_patchlist;
62 unsigned long end_vtop_patchlist;
63 unsigned long start_mckinley_e9_patchlist;
64 unsigned long end_mckinley_e9_patchlist;
65
66 void *gate_section;
67};
68
69extern struct pv_patchdata pv_patchdata;
70
71unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type);
72void *paravirt_get_gate_section(void);
73#endif
74
25#ifdef CONFIG_PARAVIRT_GUEST 75#ifdef CONFIG_PARAVIRT_GUEST
26 76
27#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 77#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
@@ -68,6 +118,14 @@ struct pv_init_ops {
68 int (*arch_setup_nomca)(void); 118 int (*arch_setup_nomca)(void);
69 119
70 void (*post_smp_prepare_boot_cpu)(void); 120 void (*post_smp_prepare_boot_cpu)(void);
121
122#ifdef ASM_SUPPORTED
123 unsigned long (*patch_bundle)(void *sbundle, void *ebundle,
124 unsigned long type);
125 unsigned long (*patch_inst)(unsigned long stag, unsigned long etag,
126 unsigned long type);
127#endif
128 void (*patch_branch)(unsigned long tag, unsigned long type);
71}; 129};
72 130
73extern struct pv_init_ops pv_init_ops; 131extern struct pv_init_ops pv_init_ops;
@@ -210,6 +268,8 @@ struct pv_time_ops {
210 int (*do_steal_accounting)(unsigned long *new_itm); 268 int (*do_steal_accounting)(unsigned long *new_itm);
211 269
212 void (*clocksource_resume)(void); 270 void (*clocksource_resume)(void);
271
272 unsigned long long (*sched_clock)(void);
213}; 273};
214 274
215extern struct pv_time_ops pv_time_ops; 275extern struct pv_time_ops pv_time_ops;
@@ -227,6 +287,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm)
227 return pv_time_ops.do_steal_accounting(new_itm); 287 return pv_time_ops.do_steal_accounting(new_itm);
228} 288}
229 289
290static inline unsigned long long paravirt_sched_clock(void)
291{
292 return pv_time_ops.sched_clock();
293}
294
230#endif /* !__ASSEMBLY__ */ 295#endif /* !__ASSEMBLY__ */
231 296
232#else 297#else
diff --git a/arch/ia64/include/asm/paravirt_patch.h b/arch/ia64/include/asm/paravirt_patch.h
new file mode 100644
index 00000000000..128ff5db6e6
--- /dev/null
+++ b/arch/ia64/include/asm/paravirt_patch.h
@@ -0,0 +1,143 @@
1/******************************************************************************
2 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21#ifndef __ASM_PARAVIRT_PATCH_H
22#define __ASM_PARAVIRT_PATCH_H
23
24#ifdef __ASSEMBLY__
25
26 .section .paravirt_branches, "a"
27 .previous
28#define PARAVIRT_PATCH_SITE_BR(type) \
29 { \
30 [1:] ; \
31 br.cond.sptk.many 2f ; \
32 nop.b 0 ; \
33 nop.b 0;; ; \
34 } ; \
35 2: \
36 .xdata8 ".paravirt_branches", 1b, type
37
38#else
39
40#include <linux/stringify.h>
41#include <asm/intrinsics.h>
42
43/* for binary patch */
44struct paravirt_patch_site_bundle {
45 void *sbundle;
46 void *ebundle;
47 unsigned long type;
48};
49
50/* label means the beginning of new bundle */
51#define paravirt_alt_bundle(instr, privop) \
52 "\t998:\n" \
53 "\t" instr "\n" \
54 "\t999:\n" \
55 "\t.pushsection .paravirt_bundles, \"a\"\n" \
56 "\t.popsection\n" \
57 "\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \
58 __stringify(privop) "\n"
59
60
61struct paravirt_patch_bundle_elem {
62 const void *sbundle;
63 const void *ebundle;
64 unsigned long type;
65};
66
67
68struct paravirt_patch_site_inst {
69 unsigned long stag;
70 unsigned long etag;
71 unsigned long type;
72};
73
74#define paravirt_alt_inst(instr, privop) \
75 "\t[998:]\n" \
76 "\t" instr "\n" \
77 "\t[999:]\n" \
78 "\t.pushsection .paravirt_insts, \"a\"\n" \
79 "\t.popsection\n" \
80 "\t.xdata8 \".paravirt_insts\", 998b, 999b, " \
81 __stringify(privop) "\n"
82
83struct paravirt_patch_site_branch {
84 unsigned long tag;
85 unsigned long type;
86};
87
88struct paravirt_patch_branch_target {
89 const void *entry;
90 unsigned long type;
91};
92
93void
94__paravirt_patch_apply_branch(
95 unsigned long tag, unsigned long type,
96 const struct paravirt_patch_branch_target *entries,
97 unsigned int nr_entries);
98
99void
100paravirt_patch_reloc_br(unsigned long tag, const void *target);
101
102void
103paravirt_patch_reloc_brl(unsigned long tag, const void *target);
104
105
106#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT)
107unsigned long
108ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
109
110unsigned long
111__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
112 const struct paravirt_patch_bundle_elem *elems,
113 unsigned long nelems,
114 const struct paravirt_patch_bundle_elem **found);
115
116void
117paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
118 const struct paravirt_patch_site_bundle *end);
119
120void
121paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
122 const struct paravirt_patch_site_inst *end);
123
124void paravirt_patch_apply(void);
125#else
126#define paravirt_patch_apply_bundle(start, end) do { } while (0)
127#define paravirt_patch_apply_inst(start, end) do { } while (0)
128#define paravirt_patch_apply() do { } while (0)
129#endif
130
131#endif /* !__ASSEMBLEY__ */
132
133#endif /* __ASM_PARAVIRT_PATCH_H */
134
135/*
136 * Local variables:
137 * mode: C
138 * c-set-style: "linux"
139 * c-basic-offset: 8
140 * tab-width: 8
141 * indent-tabs-mode: t
142 * End:
143 */
diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h
index 33c8e55f577..3d2951130b5 100644
--- a/arch/ia64/include/asm/paravirt_privop.h
+++ b/arch/ia64/include/asm/paravirt_privop.h
@@ -33,7 +33,7 @@
33 */ 33 */
34 34
35struct pv_cpu_ops { 35struct pv_cpu_ops {
36 void (*fc)(unsigned long addr); 36 void (*fc)(void *addr);
37 unsigned long (*thash)(unsigned long addr); 37 unsigned long (*thash)(unsigned long addr);
38 unsigned long (*get_cpuid)(int index); 38 unsigned long (*get_cpuid)(int index);
39 unsigned long (*get_pmd)(int index); 39 unsigned long (*get_pmd)(int index);
@@ -60,12 +60,18 @@ extern unsigned long ia64_native_getreg_func(int regnum);
60/* Instructions paravirtualized for performance */ 60/* Instructions paravirtualized for performance */
61/************************************************/ 61/************************************************/
62 62
63#ifndef ASM_SUPPORTED
64#define paravirt_ssm_i() pv_cpu_ops.ssm_i()
65#define paravirt_rsm_i() pv_cpu_ops.rsm_i()
66#define __paravirt_getreg() pv_cpu_ops.getreg()
67#endif
68
63/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). 69/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
64 * static inline function doesn't satisfy it. */ 70 * static inline function doesn't satisfy it. */
65#define paravirt_ssm(mask) \ 71#define paravirt_ssm(mask) \
66 do { \ 72 do { \
67 if ((mask) == IA64_PSR_I) \ 73 if ((mask) == IA64_PSR_I) \
68 pv_cpu_ops.ssm_i(); \ 74 paravirt_ssm_i(); \
69 else \ 75 else \
70 ia64_native_ssm(mask); \ 76 ia64_native_ssm(mask); \
71 } while (0) 77 } while (0)
@@ -73,7 +79,7 @@ extern unsigned long ia64_native_getreg_func(int regnum);
73#define paravirt_rsm(mask) \ 79#define paravirt_rsm(mask) \
74 do { \ 80 do { \
75 if ((mask) == IA64_PSR_I) \ 81 if ((mask) == IA64_PSR_I) \
76 pv_cpu_ops.rsm_i(); \ 82 paravirt_rsm_i(); \
77 else \ 83 else \
78 ia64_native_rsm(mask); \ 84 ia64_native_rsm(mask); \
79 } while (0) 85 } while (0)
@@ -86,7 +92,7 @@ extern unsigned long ia64_native_getreg_func(int regnum);
86 if ((reg) == _IA64_REG_IP) \ 92 if ((reg) == _IA64_REG_IP) \
87 res = ia64_native_getreg(_IA64_REG_IP); \ 93 res = ia64_native_getreg(_IA64_REG_IP); \
88 else \ 94 else \
89 res = pv_cpu_ops.getreg(reg); \ 95 res = __paravirt_getreg(reg); \
90 res; \ 96 res; \
91 }) 97 })
92 98
@@ -112,6 +118,12 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
112 118
113#endif /* CONFIG_PARAVIRT */ 119#endif /* CONFIG_PARAVIRT */
114 120
121#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)
122#define paravirt_dv_serialize_data() ia64_dv_serialize_data()
123#else
124#define paravirt_dv_serialize_data() /* nothing */
125#endif
126
115/* these routines utilize privilege-sensitive or performance-sensitive 127/* these routines utilize privilege-sensitive or performance-sensitive
116 * privileged instructions so the code must be replaced with 128 * privileged instructions so the code must be replaced with
117 * paravirtualized versions */ 129 * paravirtualized versions */
@@ -121,4 +133,349 @@ void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
121 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall) 133 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
122#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel) 134#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
123 135
136
137#if defined(CONFIG_PARAVIRT)
138/******************************************************************************
139 * binary patching infrastructure
140 */
141#define PARAVIRT_PATCH_TYPE_FC 1
142#define PARAVIRT_PATCH_TYPE_THASH 2
143#define PARAVIRT_PATCH_TYPE_GET_CPUID 3
144#define PARAVIRT_PATCH_TYPE_GET_PMD 4
145#define PARAVIRT_PATCH_TYPE_PTCGA 5
146#define PARAVIRT_PATCH_TYPE_GET_RR 6
147#define PARAVIRT_PATCH_TYPE_SET_RR 7
148#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4 8
149#define PARAVIRT_PATCH_TYPE_SSM_I 9
150#define PARAVIRT_PATCH_TYPE_RSM_I 10
151#define PARAVIRT_PATCH_TYPE_GET_PSR_I 11
152#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE 12
153
154/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */
155#define PARAVIRT_PATCH_TYPE_GETREG 0x10000000
156#define PARAVIRT_PATCH_TYPE_SETREG 0x20000000
157
158/*
159 * struct task_struct* (*ia64_switch_to)(void* next_task);
160 * void *ia64_leave_syscall;
161 * void *ia64_work_processed_syscall
162 * void *ia64_leave_kernel;
163 */
164
165#define PARAVIRT_PATCH_TYPE_BR_START 0x30000000
166#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO \
167 (PARAVIRT_PATCH_TYPE_BR_START + 0)
168#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL \
169 (PARAVIRT_PATCH_TYPE_BR_START + 1)
170#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL \
171 (PARAVIRT_PATCH_TYPE_BR_START + 2)
172#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL \
173 (PARAVIRT_PATCH_TYPE_BR_START + 3)
174
175#ifdef ASM_SUPPORTED
176#include <asm/paravirt_patch.h>
177
178/*
179 * pv_cpu_ops calling stub.
180 * normal function call convension can't be written by gcc
181 * inline assembly.
182 *
183 * from the caller's point of view,
184 * the following registers will be clobbered.
185 * r2, r3
186 * r8-r15
187 * r16, r17
188 * b6, b7
189 * p6-p15
190 * ar.ccv
191 *
192 * from the callee's point of view ,
193 * the following registers can be used.
194 * r2, r3: scratch
195 * r8: scratch, input argument0 and return value
196 * r0-r15: scratch, input argument1-5
197 * b6: return pointer
198 * b7: scratch
199 * p6-p15: scratch
200 * ar.ccv: scratch
201 *
202 * other registers must not be changed. especially
203 * b0: rp: preserved. gcc ignores b0 in clobbered register.
204 * r16: saved gp
205 */
206/* 5 bundles */
207#define __PARAVIRT_BR \
208 ";;\n" \
209 "{ .mlx\n" \
210 "nop 0\n" \
211 "movl r2 = %[op_addr]\n"/* get function pointer address */ \
212 ";;\n" \
213 "}\n" \
214 "1:\n" \
215 "{ .mii\n" \
216 "ld8 r2 = [r2]\n" /* load function descriptor address */ \
217 "mov r17 = ip\n" /* get ip to calc return address */ \
218 "mov r16 = gp\n" /* save gp */ \
219 ";;\n" \
220 "}\n" \
221 "{ .mii\n" \
222 "ld8 r3 = [r2], 8\n" /* load entry address */ \
223 "adds r17 = 1f - 1b, r17\n" /* calculate return address */ \
224 ";;\n" \
225 "mov b7 = r3\n" /* set entry address */ \
226 "}\n" \
227 "{ .mib\n" \
228 "ld8 gp = [r2]\n" /* load gp value */ \
229 "mov b6 = r17\n" /* set return address */ \
230 "br.cond.sptk.few b7\n" /* intrinsics are very short isns */ \
231 "}\n" \
232 "1:\n" \
233 "{ .mii\n" \
234 "mov gp = r16\n" /* restore gp value */ \
235 "nop 0\n" \
236 "nop 0\n" \
237 ";;\n" \
238 "}\n"
239
240#define PARAVIRT_OP(op) \
241 [op_addr] "i"(&pv_cpu_ops.op)
242
243#define PARAVIRT_TYPE(type) \
244 PARAVIRT_PATCH_TYPE_ ## type
245
246#define PARAVIRT_REG_CLOBBERS0 \
247 "r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
248 "r15", "r16", "r17"
249
250#define PARAVIRT_REG_CLOBBERS1 \
251 "r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14", \
252 "r15", "r16", "r17"
253
254#define PARAVIRT_REG_CLOBBERS2 \
255 "r2", "r3", /*"r8", "r9",*/ "r10", "r11", "r14", \
256 "r15", "r16", "r17"
257
258#define PARAVIRT_REG_CLOBBERS5 \
259 "r2", "r3", /*"r8", "r9", "r10", "r11", "r14",*/ \
260 "r15", "r16", "r17"
261
262#define PARAVIRT_BR_CLOBBERS \
263 "b6", "b7"
264
265#define PARAVIRT_PR_CLOBBERS \
266 "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15"
267
268#define PARAVIRT_AR_CLOBBERS \
269 "ar.ccv"
270
271#define PARAVIRT_CLOBBERS0 \
272 PARAVIRT_REG_CLOBBERS0, \
273 PARAVIRT_BR_CLOBBERS, \
274 PARAVIRT_PR_CLOBBERS, \
275 PARAVIRT_AR_CLOBBERS, \
276 "memory"
277
278#define PARAVIRT_CLOBBERS1 \
279 PARAVIRT_REG_CLOBBERS1, \
280 PARAVIRT_BR_CLOBBERS, \
281 PARAVIRT_PR_CLOBBERS, \
282 PARAVIRT_AR_CLOBBERS, \
283 "memory"
284
285#define PARAVIRT_CLOBBERS2 \
286 PARAVIRT_REG_CLOBBERS2, \
287 PARAVIRT_BR_CLOBBERS, \
288 PARAVIRT_PR_CLOBBERS, \
289 PARAVIRT_AR_CLOBBERS, \
290 "memory"
291
292#define PARAVIRT_CLOBBERS5 \
293 PARAVIRT_REG_CLOBBERS5, \
294 PARAVIRT_BR_CLOBBERS, \
295 PARAVIRT_PR_CLOBBERS, \
296 PARAVIRT_AR_CLOBBERS, \
297 "memory"
298
299#define PARAVIRT_BR0(op, type) \
300 register unsigned long ia64_clobber asm ("r8"); \
301 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
302 PARAVIRT_TYPE(type)) \
303 : "=r"(ia64_clobber) \
304 : PARAVIRT_OP(op) \
305 : PARAVIRT_CLOBBERS0)
306
307#define PARAVIRT_BR0_RET(op, type) \
308 register unsigned long ia64_intri_res asm ("r8"); \
309 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
310 PARAVIRT_TYPE(type)) \
311 : "=r"(ia64_intri_res) \
312 : PARAVIRT_OP(op) \
313 : PARAVIRT_CLOBBERS0)
314
315#define PARAVIRT_BR1(op, type, arg1) \
316 register unsigned long __##arg1 asm ("r8") = arg1; \
317 register unsigned long ia64_clobber asm ("r8"); \
318 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
319 PARAVIRT_TYPE(type)) \
320 : "=r"(ia64_clobber) \
321 : PARAVIRT_OP(op), "0"(__##arg1) \
322 : PARAVIRT_CLOBBERS1)
323
324#define PARAVIRT_BR1_RET(op, type, arg1) \
325 register unsigned long ia64_intri_res asm ("r8"); \
326 register unsigned long __##arg1 asm ("r8") = arg1; \
327 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
328 PARAVIRT_TYPE(type)) \
329 : "=r"(ia64_intri_res) \
330 : PARAVIRT_OP(op), "0"(__##arg1) \
331 : PARAVIRT_CLOBBERS1)
332
333#define PARAVIRT_BR1_VOID(op, type, arg1) \
334 register void *__##arg1 asm ("r8") = arg1; \
335 register unsigned long ia64_clobber asm ("r8"); \
336 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
337 PARAVIRT_TYPE(type)) \
338 : "=r"(ia64_clobber) \
339 : PARAVIRT_OP(op), "0"(__##arg1) \
340 : PARAVIRT_CLOBBERS1)
341
342#define PARAVIRT_BR2(op, type, arg1, arg2) \
343 register unsigned long __##arg1 asm ("r8") = arg1; \
344 register unsigned long __##arg2 asm ("r9") = arg2; \
345 register unsigned long ia64_clobber1 asm ("r8"); \
346 register unsigned long ia64_clobber2 asm ("r9"); \
347 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
348 PARAVIRT_TYPE(type)) \
349 : "=r"(ia64_clobber1), "=r"(ia64_clobber2) \
350 : PARAVIRT_OP(op), "0"(__##arg1), "1"(__##arg2) \
351 : PARAVIRT_CLOBBERS2)
352
353
354#define PARAVIRT_DEFINE_CPU_OP0(op, type) \
355 static inline void \
356 paravirt_ ## op (void) \
357 { \
358 PARAVIRT_BR0(op, type); \
359 }
360
361#define PARAVIRT_DEFINE_CPU_OP0_RET(op, type) \
362 static inline unsigned long \
363 paravirt_ ## op (void) \
364 { \
365 PARAVIRT_BR0_RET(op, type); \
366 return ia64_intri_res; \
367 }
368
369#define PARAVIRT_DEFINE_CPU_OP1_VOID(op, type) \
370 static inline void \
371 paravirt_ ## op (void *arg1) \
372 { \
373 PARAVIRT_BR1_VOID(op, type, arg1); \
374 }
375
376#define PARAVIRT_DEFINE_CPU_OP1(op, type) \
377 static inline void \
378 paravirt_ ## op (unsigned long arg1) \
379 { \
380 PARAVIRT_BR1(op, type, arg1); \
381 }
382
383#define PARAVIRT_DEFINE_CPU_OP1_RET(op, type) \
384 static inline unsigned long \
385 paravirt_ ## op (unsigned long arg1) \
386 { \
387 PARAVIRT_BR1_RET(op, type, arg1); \
388 return ia64_intri_res; \
389 }
390
391#define PARAVIRT_DEFINE_CPU_OP2(op, type) \
392 static inline void \
393 paravirt_ ## op (unsigned long arg1, \
394 unsigned long arg2) \
395 { \
396 PARAVIRT_BR2(op, type, arg1, arg2); \
397 }
398
399
400PARAVIRT_DEFINE_CPU_OP1_VOID(fc, FC);
401PARAVIRT_DEFINE_CPU_OP1_RET(thash, THASH)
402PARAVIRT_DEFINE_CPU_OP1_RET(get_cpuid, GET_CPUID)
403PARAVIRT_DEFINE_CPU_OP1_RET(get_pmd, GET_PMD)
404PARAVIRT_DEFINE_CPU_OP2(ptcga, PTCGA)
405PARAVIRT_DEFINE_CPU_OP1_RET(get_rr, GET_RR)
406PARAVIRT_DEFINE_CPU_OP2(set_rr, SET_RR)
407PARAVIRT_DEFINE_CPU_OP0(ssm_i, SSM_I)
408PARAVIRT_DEFINE_CPU_OP0(rsm_i, RSM_I)
409PARAVIRT_DEFINE_CPU_OP0_RET(get_psr_i, GET_PSR_I)
410PARAVIRT_DEFINE_CPU_OP1(intrin_local_irq_restore, INTRIN_LOCAL_IRQ_RESTORE)
411
412static inline void
413paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
414 unsigned long val2, unsigned long val3,
415 unsigned long val4)
416{
417 register unsigned long __val0 asm ("r8") = val0;
418 register unsigned long __val1 asm ("r9") = val1;
419 register unsigned long __val2 asm ("r10") = val2;
420 register unsigned long __val3 asm ("r11") = val3;
421 register unsigned long __val4 asm ("r14") = val4;
422
423 register unsigned long ia64_clobber0 asm ("r8");
424 register unsigned long ia64_clobber1 asm ("r9");
425 register unsigned long ia64_clobber2 asm ("r10");
426 register unsigned long ia64_clobber3 asm ("r11");
427 register unsigned long ia64_clobber4 asm ("r14");
428
429 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,
430 PARAVIRT_TYPE(SET_RR0_TO_RR4))
431 : "=r"(ia64_clobber0),
432 "=r"(ia64_clobber1),
433 "=r"(ia64_clobber2),
434 "=r"(ia64_clobber3),
435 "=r"(ia64_clobber4)
436 : PARAVIRT_OP(set_rr0_to_rr4),
437 "0"(__val0), "1"(__val1), "2"(__val2),
438 "3"(__val3), "4"(__val4)
439 : PARAVIRT_CLOBBERS5);
440}
441
442/* unsigned long paravirt_getreg(int reg) */
443#define __paravirt_getreg(reg) \
444 ({ \
445 register unsigned long ia64_intri_res asm ("r8"); \
446 register unsigned long __reg asm ("r8") = (reg); \
447 \
448 BUILD_BUG_ON(!__builtin_constant_p(reg)); \
449 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
450 PARAVIRT_TYPE(GETREG) \
451 + (reg)) \
452 : "=r"(ia64_intri_res) \
453 : PARAVIRT_OP(getreg), "0"(__reg) \
454 : PARAVIRT_CLOBBERS1); \
455 \
456 ia64_intri_res; \
457 })
458
459/* void paravirt_setreg(int reg, unsigned long val) */
460#define paravirt_setreg(reg, val) \
461 do { \
462 register unsigned long __val asm ("r8") = val; \
463 register unsigned long __reg asm ("r9") = reg; \
464 register unsigned long ia64_clobber1 asm ("r8"); \
465 register unsigned long ia64_clobber2 asm ("r9"); \
466 \
467 BUILD_BUG_ON(!__builtin_constant_p(reg)); \
468 asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
469 PARAVIRT_TYPE(SETREG) \
470 + (reg)) \
471 : "=r"(ia64_clobber1), \
472 "=r"(ia64_clobber2) \
473 : PARAVIRT_OP(setreg), \
474 "1"(__reg), "0"(__val) \
475 : PARAVIRT_CLOBBERS2); \
476 } while (0)
477
478#endif /* ASM_SUPPORTED */
479#endif /* CONFIG_PARAVIRT && ASM_SUPPOTED */
480
124#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ 481#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index 21c402365d0..59840833625 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -126,7 +126,8 @@ extern void identify_siblings (struct cpuinfo_ia64 *);
126extern int is_multithreading_enabled(void); 126extern int is_multithreading_enabled(void);
127 127
128extern void arch_send_call_function_single_ipi(int cpu); 128extern void arch_send_call_function_single_ipi(int cpu);
129extern void arch_send_call_function_ipi(cpumask_t mask); 129extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
130#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
130 131
131#else /* CONFIG_SMP */ 132#else /* CONFIG_SMP */
132 133
diff --git a/arch/ia64/include/asm/timex.h b/arch/ia64/include/asm/timex.h
index 4e03cfe74a0..86c7db86118 100644
--- a/arch/ia64/include/asm/timex.h
+++ b/arch/ia64/include/asm/timex.h
@@ -40,5 +40,6 @@ get_cycles (void)
40} 40}
41 41
42extern void ia64_cpu_local_tick (void); 42extern void ia64_cpu_local_tick (void);
43extern unsigned long long ia64_native_sched_clock (void);
43 44
44#endif /* _ASM_IA64_TIMEX_H */ 45#endif /* _ASM_IA64_TIMEX_H */
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index f260dcf2151..7b4c8c70b2d 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -112,11 +112,6 @@ void build_cpu_to_node_map(void);
112 112
113extern void arch_fix_phys_package_id(int num, u32 slot); 113extern void arch_fix_phys_package_id(int num, u32 slot);
114 114
115#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
116 CPU_MASK_ALL : \
117 node_to_cpumask(pcibus_to_node(bus)) \
118 )
119
120#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 115#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
121 cpu_all_mask : \ 116 cpu_all_mask : \
122 cpumask_of_node(pcibus_to_node(bus))) 117 cpumask_of_node(pcibus_to_node(bus)))
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 7a804e80fc6..e425227a418 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -33,9 +33,6 @@
33#ifndef _ASM_IA64_XEN_HYPERVISOR_H 33#ifndef _ASM_IA64_XEN_HYPERVISOR_H
34#define _ASM_IA64_XEN_HYPERVISOR_H 34#define _ASM_IA64_XEN_HYPERVISOR_H
35 35
36#ifdef CONFIG_XEN
37
38#include <linux/init.h>
39#include <xen/interface/xen.h> 36#include <xen/interface/xen.h>
40#include <xen/interface/version.h> /* to compile feature.c */ 37#include <xen/interface/version.h> /* to compile feature.c */
41#include <xen/features.h> /* to comiple xen-netfront.c */ 38#include <xen/features.h> /* to comiple xen-netfront.c */
@@ -43,22 +40,32 @@
43 40
44/* xen_domain_type is set before executing any C code by early_xen_setup */ 41/* xen_domain_type is set before executing any C code by early_xen_setup */
45enum xen_domain_type { 42enum xen_domain_type {
46 XEN_NATIVE, 43 XEN_NATIVE, /* running on bare hardware */
47 XEN_PV_DOMAIN, 44 XEN_PV_DOMAIN, /* running in a PV domain */
48 XEN_HVM_DOMAIN, 45 XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
49}; 46};
50 47
48#ifdef CONFIG_XEN
51extern enum xen_domain_type xen_domain_type; 49extern enum xen_domain_type xen_domain_type;
50#else
51#define xen_domain_type XEN_NATIVE
52#endif
52 53
53#define xen_domain() (xen_domain_type != XEN_NATIVE) 54#define xen_domain() (xen_domain_type != XEN_NATIVE)
54#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN) 55#define xen_pv_domain() (xen_domain() && \
55#define xen_initial_domain() (xen_pv_domain() && \ 56 xen_domain_type == XEN_PV_DOMAIN)
57#define xen_hvm_domain() (xen_domain() && \
58 xen_domain_type == XEN_HVM_DOMAIN)
59
60#ifdef CONFIG_XEN_DOM0
61#define xen_initial_domain() (xen_pv_domain() && \
56 (xen_start_info->flags & SIF_INITDOMAIN)) 62 (xen_start_info->flags & SIF_INITDOMAIN))
57#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) 63#else
64#define xen_initial_domain() (0)
65#endif
58 66
59/* deprecated. remove this */
60#define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN)
61 67
68#ifdef CONFIG_XEN
62extern struct shared_info *HYPERVISOR_shared_info; 69extern struct shared_info *HYPERVISOR_shared_info;
63extern struct start_info *xen_start_info; 70extern struct start_info *xen_start_info;
64 71
@@ -74,16 +81,6 @@ void force_evtchn_callback(void);
74 81
75/* For setup_arch() in arch/ia64/kernel/setup.c */ 82/* For setup_arch() in arch/ia64/kernel/setup.c */
76void xen_ia64_enable_opt_feature(void); 83void xen_ia64_enable_opt_feature(void);
77
78#else /* CONFIG_XEN */
79
80#define xen_domain() (0)
81#define xen_pv_domain() (0)
82#define xen_initial_domain() (0)
83#define xen_hvm_domain() (0)
84#define is_running_on_xen() (0) /* deprecated. remove this */
85#endif 84#endif
86 85
87#define is_initial_xendomain() (0) /* deprecated. remove this */
88
89#endif /* _ASM_IA64_XEN_HYPERVISOR_H */ 86#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
diff --git a/arch/ia64/include/asm/xen/inst.h b/arch/ia64/include/asm/xen/inst.h
index 19c2ae1d878..c53a4761120 100644
--- a/arch/ia64/include/asm/xen/inst.h
+++ b/arch/ia64/include/asm/xen/inst.h
@@ -33,6 +33,9 @@
33#define __paravirt_work_processed_syscall_target \ 33#define __paravirt_work_processed_syscall_target \
34 xen_work_processed_syscall 34 xen_work_processed_syscall
35 35
36#define paravirt_fsyscall_table xen_fsyscall_table
37#define paravirt_fsys_bubble_down xen_fsys_bubble_down
38
36#define MOV_FROM_IFA(reg) \ 39#define MOV_FROM_IFA(reg) \
37 movl reg = XSI_IFA; \ 40 movl reg = XSI_IFA; \
38 ;; \ 41 ;; \
@@ -110,6 +113,27 @@
110.endm 113.endm
111#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob 114#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
112 115
116/* assuming ar.itc is read with interrupt disabled. */
117#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
118(pred) movl clob = XSI_ITC_OFFSET; \
119 ;; \
120(pred) ld8 clob = [clob]; \
121(pred) mov reg = ar.itc; \
122 ;; \
123(pred) add reg = reg, clob; \
124 ;; \
125(pred) movl clob = XSI_ITC_LAST; \
126 ;; \
127(pred) ld8 clob = [clob]; \
128 ;; \
129(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \
130 ;; \
131(pred_clob) add reg = 1, clob; \
132 ;; \
133(pred) movl clob = XSI_ITC_LAST; \
134 ;; \
135(pred) st8 [clob] = reg
136
113 137
114#define MOV_TO_IFA(reg, clob) \ 138#define MOV_TO_IFA(reg, clob) \
115 movl clob = XSI_IFA; \ 139 movl clob = XSI_IFA; \
@@ -362,6 +386,10 @@
362#define RSM_PSR_DT \ 386#define RSM_PSR_DT \
363 XEN_HYPER_RSM_PSR_DT 387 XEN_HYPER_RSM_PSR_DT
364 388
389#define RSM_PSR_BE_I(clob0, clob1) \
390 RSM_PSR_I(p0, clob0, clob1); \
391 rum psr.be
392
365#define SSM_PSR_DT_AND_SRLZ_I \ 393#define SSM_PSR_DT_AND_SRLZ_I \
366 XEN_HYPER_SSM_PSR_DT 394 XEN_HYPER_SSM_PSR_DT
367 395
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
index f00fab40854..e951e740bdf 100644
--- a/arch/ia64/include/asm/xen/interface.h
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -209,6 +209,15 @@ struct mapped_regs {
209 unsigned long krs[8]; /* kernel registers */ 209 unsigned long krs[8]; /* kernel registers */
210 unsigned long tmp[16]; /* temp registers 210 unsigned long tmp[16]; /* temp registers
211 (e.g. for hyperprivops) */ 211 (e.g. for hyperprivops) */
212
213 /* itc paravirtualization
214 * vAR.ITC = mAR.ITC + itc_offset
215 * itc_last is one which was lastly passed to
216 * the guest OS in order to prevent it from
217 * going backwords.
218 */
219 unsigned long itc_offset;
220 unsigned long itc_last;
212 }; 221 };
213 }; 222 };
214}; 223};
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
index 4d92d9bbda7..c57fa910f2c 100644
--- a/arch/ia64/include/asm/xen/minstate.h
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -1,3 +1,12 @@
1
2#ifdef CONFIG_VIRT_CPU_ACCOUNTING
3/* read ar.itc in advance, and use it before leaving bank 0 */
4#define XEN_ACCOUNT_GET_STAMP \
5 MOV_FROM_ITC(pUStk, p6, r20, r2);
6#else
7#define XEN_ACCOUNT_GET_STAMP
8#endif
9
1/* 10/*
2 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves 11 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
3 * the minimum state necessary that allows us to turn psr.ic back 12 * the minimum state necessary that allows us to turn psr.ic back
@@ -123,7 +132,7 @@
123 ;; \ 132 ;; \
124.mem.offset 0,0; st8.spill [r16]=r2,16; \ 133.mem.offset 0,0; st8.spill [r16]=r2,16; \
125.mem.offset 8,0; st8.spill [r17]=r3,16; \ 134.mem.offset 8,0; st8.spill [r17]=r3,16; \
126 ACCOUNT_GET_STAMP \ 135 XEN_ACCOUNT_GET_STAMP \
127 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ 136 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
128 ;; \ 137 ;; \
129 EXTRA; \ 138 EXTRA; \
diff --git a/arch/ia64/include/asm/xen/patchlist.h b/arch/ia64/include/asm/xen/patchlist.h
new file mode 100644
index 00000000000..eae944e8884
--- /dev/null
+++ b/arch/ia64/include/asm/xen/patchlist.h
@@ -0,0 +1,38 @@
1/******************************************************************************
2 * arch/ia64/include/asm/xen/patchlist.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#define __paravirt_start_gate_fsyscall_patchlist \
24 __xen_start_gate_fsyscall_patchlist
25#define __paravirt_end_gate_fsyscall_patchlist \
26 __xen_end_gate_fsyscall_patchlist
27#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
28 __xen_start_gate_brl_fsys_bubble_down_patchlist
29#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
30 __xen_end_gate_brl_fsys_bubble_down_patchlist
31#define __paravirt_start_gate_vtop_patchlist \
32 __xen_start_gate_vtop_patchlist
33#define __paravirt_end_gate_vtop_patchlist \
34 __xen_end_gate_vtop_patchlist
35#define __paravirt_start_gate_mckinley_e9_patchlist \
36 __xen_start_gate_mckinley_e9_patchlist
37#define __paravirt_end_gate_mckinley_e9_patchlist \
38 __xen_end_gate_mckinley_e9_patchlist
diff --git a/arch/ia64/include/asm/xen/privop.h b/arch/ia64/include/asm/xen/privop.h
index 71ec7546e10..fb4ec5e0b06 100644
--- a/arch/ia64/include/asm/xen/privop.h
+++ b/arch/ia64/include/asm/xen/privop.h
@@ -55,6 +55,8 @@
55#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) 55#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
56#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) 56#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
57#define XSI_IHA (XSI_BASE + XSI_IHA_OFS) 57#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
58#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS)
59#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS)
58#endif 60#endif
59 61
60#ifndef __ASSEMBLY__ 62#ifndef __ASSEMBLY__
@@ -67,7 +69,7 @@
67 * may have different semantics depending on whether they are executed 69 * may have different semantics depending on whether they are executed
68 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't 70 * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
69 * be allowed to execute directly, lest incorrect semantics result. */ 71 * be allowed to execute directly, lest incorrect semantics result. */
70extern void xen_fc(unsigned long addr); 72extern void xen_fc(void *addr);
71extern unsigned long xen_thash(unsigned long addr); 73extern unsigned long xen_thash(unsigned long addr);
72 74
73/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" 75/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
@@ -80,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr);
80extern unsigned long xen_get_cpuid(int index); 82extern unsigned long xen_get_cpuid(int index);
81extern unsigned long xen_get_pmd(int index); 83extern unsigned long xen_get_pmd(int index);
82 84
85#ifndef ASM_SUPPORTED
83extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ 86extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
84extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ 87extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
88#endif
85 89
86/************************************************/ 90/************************************************/
87/* Instructions paravirtualized for performance */ 91/* Instructions paravirtualized for performance */
@@ -106,6 +110,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
106#define xen_get_virtual_pend() \ 110#define xen_get_virtual_pend() \
107 (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) 111 (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
108 112
113#ifndef ASM_SUPPORTED
109/* Although all privileged operations can be left to trap and will 114/* Although all privileged operations can be left to trap and will
110 * be properly handled by Xen, some are frequent enough that we use 115 * be properly handled by Xen, some are frequent enough that we use
111 * hyperprivops for performance. */ 116 * hyperprivops for performance. */
@@ -123,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
123 unsigned long val4); 128 unsigned long val4);
124extern void xen_set_kr(unsigned long index, unsigned long val); 129extern void xen_set_kr(unsigned long index, unsigned long val);
125extern void xen_ptcga(unsigned long addr, unsigned long size); 130extern void xen_ptcga(unsigned long addr, unsigned long size);
131#endif /* !ASM_SUPPORTED */
126 132
127#endif /* !__ASSEMBLY__ */ 133#endif /* !__ASSEMBLY__ */
128 134
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index f2778f2c4fd..5628e9a990a 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := head.o init_task.o vmlinux.lds 5extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 8 irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \
9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
10 unwind.o mca.o mca_asm.o topology.o dma-mapping.o 10 unwind.o mca.o mca_asm.o topology.o dma-mapping.o
11 11
@@ -36,7 +36,8 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o
36mca_recovery-y += mca_drv.o mca_drv_asm.o 36mca_recovery-y += mca_drv.o mca_drv_asm.o
37obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o 37obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
38 38
39obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o 39obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \
40 paravirt_patch.o
40 41
41obj-$(CONFIG_IA64_ESI) += esi.o 42obj-$(CONFIG_IA64_ESI) += esi.o
42ifneq ($(CONFIG_IA64_ESI),) 43ifneq ($(CONFIG_IA64_ESI),)
@@ -45,35 +46,13 @@ endif
45obj-$(CONFIG_DMAR) += pci-dma.o 46obj-$(CONFIG_DMAR) += pci-dma.o
46obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 47obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
47 48
48# The gate DSO image is built using a special linker script.
49targets += gate.so gate-syms.o
50
51extra-y += gate.so gate-syms.o gate.lds gate.o
52
53# fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. 49# fp_emulate() expects f2-f5,f16-f31 to contain the user-level state.
54CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 50CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
55 51
56CPPFLAGS_gate.lds := -P -C -U$(ARCH) 52# The gate DSO image is built using a special linker script.
57 53include $(srctree)/arch/ia64/kernel/Makefile.gate
58quiet_cmd_gate = GATE $@ 54# tell compiled for native
59 cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ 55CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE
60
61GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
62 $(call ld-option, -Wl$(comma)--hash-style=sysv)
63$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
64 $(call if_changed,gate)
65
66$(obj)/built-in.o: $(obj)/gate-syms.o
67$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
68
69GATECFLAGS_gate-syms.o = -r
70$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
71 $(call if_changed,gate)
72
73# gate-data.o contains the gate DSO image as data in section .data.gate.
74# We must build gate.so before we can assemble it.
75# Note: kbuild does not track this dependency due to usage of .incbin
76$(obj)/gate-data.o: $(obj)/gate.so
77 56
78# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config 57# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
79define sed-y 58define sed-y
@@ -109,9 +88,9 @@ include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
109clean-files += $(objtree)/include/asm-ia64/nr-irqs.h 88clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
110 89
111# 90#
112# native ivt.S and entry.S 91# native ivt.S, entry.S and fsys.S
113# 92#
114ASM_PARAVIRT_OBJS = ivt.o entry.o 93ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o
115define paravirtualized_native 94define paravirtualized_native
116AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE 95AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
117AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK 96AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
new file mode 100644
index 00000000000..1d87f84069b
--- /dev/null
+++ b/arch/ia64/kernel/Makefile.gate
@@ -0,0 +1,27 @@
1# The gate DSO image is built using a special linker script.
2
3targets += gate.so gate-syms.o
4
5extra-y += gate.so gate-syms.o gate.lds gate.o
6
7CPPFLAGS_gate.lds := -P -C -U$(ARCH)
8
9quiet_cmd_gate = GATE $@
10 cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
11
12GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
13 $(call ld-option, -Wl$(comma)--hash-style=sysv)
14$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
15 $(call if_changed,gate)
16
17$(obj)/built-in.o: $(obj)/gate-syms.o
18$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
19
20GATECFLAGS_gate-syms.o = -r
21$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
22 $(call if_changed,gate)
23
24# gate-data.o contains the gate DSO image as data in section .data.gate.
25# We must build gate.so before we can assemble it.
26# Note: kbuild does not track this dependency due to usage of .incbin
27$(obj)/gate-data.o: $(obj)/gate.so
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index bdef2ce38c8..5510317db37 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -890,7 +890,7 @@ __init void prefill_possible_map(void)
890 possible, max((possible - available_cpus), 0)); 890 possible, max((possible - available_cpus), 0));
891 891
892 for (i = 0; i < possible; i++) 892 for (i = 0; i < possible; i++)
893 cpu_set(i, cpu_possible_map); 893 set_cpu_possible(i, true);
894} 894}
895 895
896int acpi_map_lsapic(acpi_handle handle, int *pcpu) 896int acpi_map_lsapic(acpi_handle handle, int *pcpu)
@@ -928,9 +928,9 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
928 buffer.length = ACPI_ALLOCATE_BUFFER; 928 buffer.length = ACPI_ALLOCATE_BUFFER;
929 buffer.pointer = NULL; 929 buffer.pointer = NULL;
930 930
931 cpus_complement(tmp_map, cpu_present_map); 931 cpumask_complement(&tmp_map, cpu_present_mask);
932 cpu = first_cpu(tmp_map); 932 cpu = cpumask_first(&tmp_map);
933 if (cpu >= NR_CPUS) 933 if (cpu >= nr_cpu_ids)
934 return -EINVAL; 934 return -EINVAL;
935 935
936 acpi_map_cpu2node(handle, cpu, physid); 936 acpi_map_cpu2node(handle, cpu, physid);
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 742dbb1d5a4..af565016904 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -316,5 +316,7 @@ void foo(void)
316 DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); 316 DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
317 DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); 317 DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
318 DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); 318 DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
319 DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset);
320 DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last);
319#endif /* CONFIG_XEN */ 321#endif /* CONFIG_XEN */
320} 322}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index efaff15d8cf..7ef80e8161c 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -456,6 +456,7 @@ efi_map_pal_code (void)
456 GRANULEROUNDDOWN((unsigned long) pal_vaddr), 456 GRANULEROUNDDOWN((unsigned long) pal_vaddr),
457 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), 457 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
458 IA64_GRANULE_SHIFT); 458 IA64_GRANULE_SHIFT);
459 paravirt_dv_serialize_data();
459 ia64_set_psr(psr); /* restore psr */ 460 ia64_set_psr(psr); /* restore psr */
460} 461}
461 462
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index e5341e2c117..ccfdeee9d89 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -735,7 +735,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
735__paravirt_work_processed_syscall: 735__paravirt_work_processed_syscall:
736#ifdef CONFIG_VIRT_CPU_ACCOUNTING 736#ifdef CONFIG_VIRT_CPU_ACCOUNTING
737 adds r2=PT(LOADRS)+16,r12 737 adds r2=PT(LOADRS)+16,r12
738(pUStk) mov.m r22=ar.itc // fetch time at leave 738 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
739 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 739 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
740 ;; 740 ;;
741(p6) ld4 r31=[r18] // load current_thread_info()->flags 741(p6) ld4 r31=[r18] // load current_thread_info()->flags
@@ -984,7 +984,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
984#ifdef CONFIG_VIRT_CPU_ACCOUNTING 984#ifdef CONFIG_VIRT_CPU_ACCOUNTING
985 .pred.rel.mutex pUStk,pKStk 985 .pred.rel.mutex pUStk,pKStk
986 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled 986 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
987(pUStk) mov.m r22=ar.itc // M fetch time at leave 987 MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave
988 nop.i 0 988 nop.i 0
989 ;; 989 ;;
990#else 990#else
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index c1625c7e177..3567d54f8ce 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -25,6 +25,7 @@
25#include <asm/unistd.h> 25#include <asm/unistd.h>
26 26
27#include "entry.h" 27#include "entry.h"
28#include "paravirt_inst.h"
28 29
29/* 30/*
30 * See Documentation/ia64/fsys.txt for details on fsyscalls. 31 * See Documentation/ia64/fsys.txt for details on fsyscalls.
@@ -279,7 +280,7 @@ ENTRY(fsys_gettimeofday)
279(p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control 280(p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control
280 ;; 281 ;;
281 .pred.rel.mutex p8,p9 282 .pred.rel.mutex p8,p9
282(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! 283 MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!!
283(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. 284(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
284(p13) ld8 r25 = [r19] // get itc_lastcycle value 285(p13) ld8 r25 = [r19] // get itc_lastcycle value
285 ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec 286 ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
@@ -418,7 +419,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
418 mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1)) 419 mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1))
419 ;; 420 ;;
420 421
421 rsm psr.i // mask interrupt delivery 422 RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
422 mov ar.ccv=0 423 mov ar.ccv=0
423 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP 424 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
424 425
@@ -491,7 +492,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
491#ifdef CONFIG_SMP 492#ifdef CONFIG_SMP
492 st4.rel [r31]=r0 // release the lock 493 st4.rel [r31]=r0 // release the lock
493#endif 494#endif
494 ssm psr.i 495 SSM_PSR_I(p0, p9, r31)
495 ;; 496 ;;
496 497
497 srlz.d // ensure psr.i is set again 498 srlz.d // ensure psr.i is set again
@@ -513,7 +514,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
513#ifdef CONFIG_SMP 514#ifdef CONFIG_SMP
514 st4.rel [r31]=r0 // release the lock 515 st4.rel [r31]=r0 // release the lock
515#endif 516#endif
516 ssm psr.i 517 SSM_PSR_I(p0, p9, r17)
517 ;; 518 ;;
518 srlz.d 519 srlz.d
519 br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall 520 br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall
@@ -521,7 +522,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
521#ifdef CONFIG_SMP 522#ifdef CONFIG_SMP
522.lock_contention: 523.lock_contention:
523 /* Rather than spinning here, fall back on doing a heavy-weight syscall. */ 524 /* Rather than spinning here, fall back on doing a heavy-weight syscall. */
524 ssm psr.i 525 SSM_PSR_I(p0, p9, r17)
525 ;; 526 ;;
526 srlz.d 527 srlz.d
527 br.sptk.many fsys_fallback_syscall 528 br.sptk.many fsys_fallback_syscall
@@ -592,17 +593,17 @@ ENTRY(fsys_fallback_syscall)
592 adds r17=-1024,r15 593 adds r17=-1024,r15
593 movl r14=sys_call_table 594 movl r14=sys_call_table
594 ;; 595 ;;
595 rsm psr.i 596 RSM_PSR_I(p0, r26, r27)
596 shladd r18=r17,3,r14 597 shladd r18=r17,3,r14
597 ;; 598 ;;
598 ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point 599 ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point
599 mov r29=psr // read psr (12 cyc load latency) 600 MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency)
600 mov r27=ar.rsc 601 mov r27=ar.rsc
601 mov r21=ar.fpsr 602 mov r21=ar.fpsr
602 mov r26=ar.pfs 603 mov r26=ar.pfs
603END(fsys_fallback_syscall) 604END(fsys_fallback_syscall)
604 /* FALL THROUGH */ 605 /* FALL THROUGH */
605GLOBAL_ENTRY(fsys_bubble_down) 606GLOBAL_ENTRY(paravirt_fsys_bubble_down)
606 .prologue 607 .prologue
607 .altrp b6 608 .altrp b6
608 .body 609 .body
@@ -640,7 +641,7 @@ GLOBAL_ENTRY(fsys_bubble_down)
640 * 641 *
641 * PSR.BE : already is turned off in __kernel_syscall_via_epc() 642 * PSR.BE : already is turned off in __kernel_syscall_via_epc()
642 * PSR.AC : don't care (kernel normally turns PSR.AC on) 643 * PSR.AC : don't care (kernel normally turns PSR.AC on)
643 * PSR.I : already turned off by the time fsys_bubble_down gets 644 * PSR.I : already turned off by the time paravirt_fsys_bubble_down gets
644 * invoked 645 * invoked
645 * PSR.DFL: always 0 (kernel never turns it on) 646 * PSR.DFL: always 0 (kernel never turns it on)
646 * PSR.DFH: don't care --- kernel never touches f32-f127 on its own 647 * PSR.DFH: don't care --- kernel never touches f32-f127 on its own
@@ -650,7 +651,7 @@ GLOBAL_ENTRY(fsys_bubble_down)
650 * PSR.DB : don't care --- kernel never enables kernel-level 651 * PSR.DB : don't care --- kernel never enables kernel-level
651 * breakpoints 652 * breakpoints
652 * PSR.TB : must be 0 already; if it wasn't zero on entry to 653 * PSR.TB : must be 0 already; if it wasn't zero on entry to
653 * __kernel_syscall_via_epc, the branch to fsys_bubble_down 654 * __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down
654 * will trigger a taken branch; the taken-trap-handler then 655 * will trigger a taken branch; the taken-trap-handler then
655 * converts the syscall into a break-based system-call. 656 * converts the syscall into a break-based system-call.
656 */ 657 */
@@ -683,7 +684,7 @@ GLOBAL_ENTRY(fsys_bubble_down)
683 ;; 684 ;;
684 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 685 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
685#ifdef CONFIG_VIRT_CPU_ACCOUNTING 686#ifdef CONFIG_VIRT_CPU_ACCOUNTING
686 mov.m r30=ar.itc // M get cycle for accounting 687 MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting
687#else 688#else
688 nop.m 0 689 nop.m 0
689#endif 690#endif
@@ -734,21 +735,21 @@ GLOBAL_ENTRY(fsys_bubble_down)
734 mov rp=r14 // I0 set the real return addr 735 mov rp=r14 // I0 set the real return addr
735 and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A 736 and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
736 ;; 737 ;;
737 ssm psr.i // M2 we're on kernel stacks now, reenable irqs 738 SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs
738 cmp.eq p8,p0=r3,r0 // A 739 cmp.eq p8,p0=r3,r0 // A
739(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT 740(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
740 741
741 nop.m 0 742 nop.m 0
742(p8) br.call.sptk.many b6=b6 // B (ignore return address) 743(p8) br.call.sptk.many b6=b6 // B (ignore return address)
743 br.cond.spnt ia64_trace_syscall // B 744 br.cond.spnt ia64_trace_syscall // B
744END(fsys_bubble_down) 745END(paravirt_fsys_bubble_down)
745 746
746 .rodata 747 .rodata
747 .align 8 748 .align 8
748 .globl fsyscall_table 749 .globl paravirt_fsyscall_table
749 750
750 data8 fsys_bubble_down 751 data8 paravirt_fsys_bubble_down
751fsyscall_table: 752paravirt_fsyscall_table:
752 data8 fsys_ni_syscall 753 data8 fsys_ni_syscall
753 data8 0 // exit // 1025 754 data8 0 // exit // 1025
754 data8 0 // read 755 data8 0 // read
@@ -1033,4 +1034,4 @@ fsyscall_table:
1033 1034
1034 // fill in zeros for the remaining entries 1035 // fill in zeros for the remaining entries
1035 .zero: 1036 .zero:
1036 .space fsyscall_table + 8*NR_syscalls - .zero, 0 1037 .space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index 74b1ccce4e8..cf5e0a105e1 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -13,6 +13,7 @@
13#include <asm/sigcontext.h> 13#include <asm/sigcontext.h>
14#include <asm/system.h> 14#include <asm/system.h>
15#include <asm/unistd.h> 15#include <asm/unistd.h>
16#include "paravirt_inst.h"
16 17
17/* 18/*
18 * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, 19 * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
@@ -48,87 +49,6 @@ GLOBAL_ENTRY(__kernel_syscall_via_break)
48} 49}
49END(__kernel_syscall_via_break) 50END(__kernel_syscall_via_break)
50 51
51/*
52 * On entry:
53 * r11 = saved ar.pfs
54 * r15 = system call #
55 * b0 = saved return address
56 * b6 = return address
57 * On exit:
58 * r11 = saved ar.pfs
59 * r15 = system call #
60 * b0 = saved return address
61 * all other "scratch" registers: undefined
62 * all "preserved" registers: same as on entry
63 */
64
65GLOBAL_ENTRY(__kernel_syscall_via_epc)
66 .prologue
67 .altrp b6
68 .body
69{
70 /*
71 * Note: the kernel cannot assume that the first two instructions in this
72 * bundle get executed. The remaining code must be safe even if
73 * they do not get executed.
74 */
75 adds r17=-1024,r15 // A
76 mov r10=0 // A default to successful syscall execution
77 epc // B causes split-issue
78}
79 ;;
80 rsm psr.be | psr.i // M2 (5 cyc to srlz.d)
81 LOAD_FSYSCALL_TABLE(r14) // X
82 ;;
83 mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
84 shladd r18=r17,3,r14 // A
85 mov r19=NR_syscalls-1 // A
86 ;;
87 lfetch [r18] // M0|1
88 mov r29=psr // M2 (12 cyc)
89 // If r17 is a NaT, p6 will be zero
90 cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
91 ;;
92 mov r21=ar.fpsr // M2 (12 cyc)
93 tnat.nz p10,p9=r15 // I0
94 mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
95 ;;
96 srlz.d // M0 (forces split-issue) ensure PSR.BE==0
97(p6) ld8 r18=[r18] // M0|1
98 nop.i 0
99 ;;
100 nop.m 0
101(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
102 nop.i 0
103 ;;
104(p8) ssm psr.i
105(p6) mov b7=r18 // I0
106(p8) br.dptk.many b7 // B
107
108 mov r27=ar.rsc // M2 (12 cyc)
109/*
110 * brl.cond doesn't work as intended because the linker would convert this branch
111 * into a branch to a PLT. Perhaps there will be a way to avoid this with some
112 * future version of the linker. In the meantime, we just use an indirect branch
113 * instead.
114 */
115#ifdef CONFIG_ITANIUM
116(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
117 ;;
118(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
119 ;;
120(p6) mov b7=r14
121(p6) br.sptk.many b7
122#else
123 BRL_COND_FSYS_BUBBLE_DOWN(p6)
124#endif
125 ssm psr.i
126 mov r10=-1
127(p10) mov r8=EINVAL
128(p9) mov r8=ENOSYS
129 FSYS_RETURN
130END(__kernel_syscall_via_epc)
131
132# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) 52# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET)
133# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) 53# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET)
134# define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) 54# define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET)
@@ -374,3 +294,92 @@ restore_rbs:
374 // invala not necessary as that will happen when returning to user-mode 294 // invala not necessary as that will happen when returning to user-mode
375 br.cond.sptk back_from_restore_rbs 295 br.cond.sptk back_from_restore_rbs
376END(__kernel_sigtramp) 296END(__kernel_sigtramp)
297
298/*
299 * On entry:
300 * r11 = saved ar.pfs
301 * r15 = system call #
302 * b0 = saved return address
303 * b6 = return address
304 * On exit:
305 * r11 = saved ar.pfs
306 * r15 = system call #
307 * b0 = saved return address
308 * all other "scratch" registers: undefined
309 * all "preserved" registers: same as on entry
310 */
311
312GLOBAL_ENTRY(__kernel_syscall_via_epc)
313 .prologue
314 .altrp b6
315 .body
316{
317 /*
318 * Note: the kernel cannot assume that the first two instructions in this
319 * bundle get executed. The remaining code must be safe even if
320 * they do not get executed.
321 */
322 adds r17=-1024,r15 // A
323 mov r10=0 // A default to successful syscall execution
324 epc // B causes split-issue
325}
326 ;;
327 RSM_PSR_BE_I(r20, r22) // M2 (5 cyc to srlz.d)
328 LOAD_FSYSCALL_TABLE(r14) // X
329 ;;
330 mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
331 shladd r18=r17,3,r14 // A
332 mov r19=NR_syscalls-1 // A
333 ;;
334 lfetch [r18] // M0|1
335 MOV_FROM_PSR(p0, r29, r8) // M2 (12 cyc)
336 // If r17 is a NaT, p6 will be zero
337 cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
338 ;;
339 mov r21=ar.fpsr // M2 (12 cyc)
340 tnat.nz p10,p9=r15 // I0
341 mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
342 ;;
343 srlz.d // M0 (forces split-issue) ensure PSR.BE==0
344(p6) ld8 r18=[r18] // M0|1
345 nop.i 0
346 ;;
347 nop.m 0
348(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
349 nop.i 0
350 ;;
351 SSM_PSR_I(p8, p14, r25)
352(p6) mov b7=r18 // I0
353(p8) br.dptk.many b7 // B
354
355 mov r27=ar.rsc // M2 (12 cyc)
356/*
357 * brl.cond doesn't work as intended because the linker would convert this branch
358 * into a branch to a PLT. Perhaps there will be a way to avoid this with some
359 * future version of the linker. In the meantime, we just use an indirect branch
360 * instead.
361 */
362#ifdef CONFIG_ITANIUM
363(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
364 ;;
365(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
366 ;;
367(p6) mov b7=r14
368(p6) br.sptk.many b7
369#else
370 BRL_COND_FSYS_BUBBLE_DOWN(p6)
371#endif
372 SSM_PSR_I(p0, p14, r10)
373 mov r10=-1
374(p10) mov r8=EINVAL
375(p9) mov r8=ENOSYS
376 FSYS_RETURN
377
378#ifdef CONFIG_PARAVIRT
379 /*
380 * padd to make the size of this symbol constant
381 * independent of paravirtualization.
382 */
383 .align PAGE_SIZE / 8
384#endif
385END(__kernel_syscall_via_epc)
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
index 3cb1abc00e2..88c64ed47c3 100644
--- a/arch/ia64/kernel/gate.lds.S
+++ b/arch/ia64/kernel/gate.lds.S
@@ -7,6 +7,7 @@
7 7
8 8
9#include <asm/system.h> 9#include <asm/system.h>
10#include "paravirt_patchlist.h"
10 11
11SECTIONS 12SECTIONS
12{ 13{
@@ -33,21 +34,21 @@ SECTIONS
33 . = GATE_ADDR + 0x600; 34 . = GATE_ADDR + 0x600;
34 35
35 .data.patch : { 36 .data.patch : {
36 __start_gate_mckinley_e9_patchlist = .; 37 __paravirt_start_gate_mckinley_e9_patchlist = .;
37 *(.data.patch.mckinley_e9) 38 *(.data.patch.mckinley_e9)
38 __end_gate_mckinley_e9_patchlist = .; 39 __paravirt_end_gate_mckinley_e9_patchlist = .;
39 40
40 __start_gate_vtop_patchlist = .; 41 __paravirt_start_gate_vtop_patchlist = .;
41 *(.data.patch.vtop) 42 *(.data.patch.vtop)
42 __end_gate_vtop_patchlist = .; 43 __paravirt_end_gate_vtop_patchlist = .;
43 44
44 __start_gate_fsyscall_patchlist = .; 45 __paravirt_start_gate_fsyscall_patchlist = .;
45 *(.data.patch.fsyscall_table) 46 *(.data.patch.fsyscall_table)
46 __end_gate_fsyscall_patchlist = .; 47 __paravirt_end_gate_fsyscall_patchlist = .;
47 48
48 __start_gate_brl_fsys_bubble_down_patchlist = .; 49 __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .;
49 *(.data.patch.brl_fsys_bubble_down) 50 *(.data.patch.brl_fsys_bubble_down)
50 __end_gate_brl_fsys_bubble_down_patchlist = .; 51 __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .;
51 } :readable 52 } :readable
52 53
53 .IA_64.unwind_info : { *(.IA_64.unwind_info*) } 54 .IA_64.unwind_info : { *(.IA_64.unwind_info*) }
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 59301c47280..23f846de62d 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1050,7 +1050,7 @@ END(ia64_delay_loop)
1050 * except that the multiplication and the shift are done with 128-bit 1050 * except that the multiplication and the shift are done with 128-bit
1051 * intermediate precision so that we can produce a full 64-bit result. 1051 * intermediate precision so that we can produce a full 64-bit result.
1052 */ 1052 */
1053GLOBAL_ENTRY(sched_clock) 1053GLOBAL_ENTRY(ia64_native_sched_clock)
1054 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 1054 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
1055 mov.m r9=ar.itc // fetch cycle-counter (35 cyc) 1055 mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
1056 ;; 1056 ;;
@@ -1066,7 +1066,13 @@ GLOBAL_ENTRY(sched_clock)
1066 ;; 1066 ;;
1067 shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT 1067 shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
1068 br.ret.sptk.many rp 1068 br.ret.sptk.many rp
1069END(sched_clock) 1069END(ia64_native_sched_clock)
1070#ifndef CONFIG_PARAVIRT
1071 //unsigned long long
1072 //sched_clock(void) __attribute__((alias("ia64_native_sched_clock")));
1073 .global sched_clock
1074sched_clock = ia64_native_sched_clock
1075#endif
1070 1076
1071#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1077#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1072GLOBAL_ENTRY(cycle_to_cputime) 1078GLOBAL_ENTRY(cycle_to_cputime)
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index f675d8e3385..ec9a5fdfa1b 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -804,7 +804,7 @@ ENTRY(break_fault)
804/////////////////////////////////////////////////////////////////////// 804///////////////////////////////////////////////////////////////////////
805 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag 805 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
806#ifdef CONFIG_VIRT_CPU_ACCOUNTING 806#ifdef CONFIG_VIRT_CPU_ACCOUNTING
807 mov.m r30=ar.itc // M get cycle for accounting 807 MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting
808#else 808#else
809 mov b6=r30 // I0 setup syscall handler branch reg early 809 mov b6=r30 // I0 setup syscall handler branch reg early
810#endif 810#endif
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index bab1de2d2f6..8f33a884042 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1456,9 +1456,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
1456 1456
1457 ia64_mca_cmc_int_handler(cmc_irq, arg); 1457 ia64_mca_cmc_int_handler(cmc_irq, arg);
1458 1458
1459 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); 1459 cpuid = cpumask_next(cpuid+1, cpu_online_mask);
1460 1460
1461 if (cpuid < NR_CPUS) { 1461 if (cpuid < nr_cpu_ids) {
1462 platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); 1462 platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1463 } else { 1463 } else {
1464 /* If no log record, switch out of polling mode */ 1464 /* If no log record, switch out of polling mode */
@@ -1525,7 +1525,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
1525 1525
1526 ia64_mca_cpe_int_handler(cpe_irq, arg); 1526 ia64_mca_cpe_int_handler(cpe_irq, arg);
1527 1527
1528 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); 1528 cpuid = cpumask_next(cpuid+1, cpu_online_mask);
1529 1529
1530 if (cpuid < NR_CPUS) { 1530 if (cpuid < NR_CPUS) {
1531 platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); 1531 platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index aaa7d901521..da3b0cf495a 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -446,6 +446,14 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
446 mod->arch.opd = s; 446 mod->arch.opd = s;
447 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) 447 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
448 mod->arch.unwind = s; 448 mod->arch.unwind = s;
449#ifdef CONFIG_PARAVIRT
450 else if (strcmp(".paravirt_bundles",
451 secstrings + s->sh_name) == 0)
452 mod->arch.paravirt_bundles = s;
453 else if (strcmp(".paravirt_insts",
454 secstrings + s->sh_name) == 0)
455 mod->arch.paravirt_insts = s;
456#endif
449 457
450 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { 458 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
451 printk(KERN_ERR "%s: sections missing\n", mod->name); 459 printk(KERN_ERR "%s: sections missing\n", mod->name);
@@ -525,8 +533,7 @@ get_ltoff (struct module *mod, uint64_t value, int *okp)
525 goto found; 533 goto found;
526 534
527 /* Not enough GOT entries? */ 535 /* Not enough GOT entries? */
528 if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)) 536 BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
529 BUG();
530 537
531 e->val = value; 538 e->val = value;
532 ++mod->arch.next_got_entry; 539 ++mod->arch.next_got_entry;
@@ -921,6 +928,30 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
921 DEBUGP("%s: init: entry=%p\n", __func__, mod->init); 928 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
922 if (mod->arch.unwind) 929 if (mod->arch.unwind)
923 register_unwind_table(mod); 930 register_unwind_table(mod);
931#ifdef CONFIG_PARAVIRT
932 if (mod->arch.paravirt_bundles) {
933 struct paravirt_patch_site_bundle *start =
934 (struct paravirt_patch_site_bundle *)
935 mod->arch.paravirt_bundles->sh_addr;
936 struct paravirt_patch_site_bundle *end =
937 (struct paravirt_patch_site_bundle *)
938 (mod->arch.paravirt_bundles->sh_addr +
939 mod->arch.paravirt_bundles->sh_size);
940
941 paravirt_patch_apply_bundle(start, end);
942 }
943 if (mod->arch.paravirt_insts) {
944 struct paravirt_patch_site_inst *start =
945 (struct paravirt_patch_site_inst *)
946 mod->arch.paravirt_insts->sh_addr;
947 struct paravirt_patch_site_inst *end =
948 (struct paravirt_patch_site_inst *)
949 (mod->arch.paravirt_insts->sh_addr +
950 mod->arch.paravirt_insts->sh_size);
951
952 paravirt_patch_apply_inst(start, end);
953 }
954#endif
924 return 0; 955 return 0;
925} 956}
926 957
diff --git a/arch/ia64/kernel/paravirt.c b/arch/ia64/kernel/paravirt.c
index 9f14c16f636..a21d7bb9c69 100644
--- a/arch/ia64/kernel/paravirt.c
+++ b/arch/ia64/kernel/paravirt.c
@@ -46,13 +46,23 @@ struct pv_info pv_info = {
46 * initialization hooks. 46 * initialization hooks.
47 */ 47 */
48 48
49struct pv_init_ops pv_init_ops; 49static void __init
50ia64_native_patch_branch(unsigned long tag, unsigned long type);
51
52struct pv_init_ops pv_init_ops =
53{
54#ifdef ASM_SUPPORTED
55 .patch_bundle = ia64_native_patch_bundle,
56#endif
57 .patch_branch = ia64_native_patch_branch,
58};
50 59
51/*************************************************************************** 60/***************************************************************************
52 * pv_cpu_ops 61 * pv_cpu_ops
53 * intrinsics hooks. 62 * intrinsics hooks.
54 */ 63 */
55 64
65#ifndef ASM_SUPPORTED
56/* ia64_native_xxx are macros so that we have to make them real functions */ 66/* ia64_native_xxx are macros so that we have to make them real functions */
57 67
58#define DEFINE_VOID_FUNC1(name) \ 68#define DEFINE_VOID_FUNC1(name) \
@@ -60,7 +70,14 @@ struct pv_init_ops pv_init_ops;
60 ia64_native_ ## name ## _func(unsigned long arg) \ 70 ia64_native_ ## name ## _func(unsigned long arg) \
61 { \ 71 { \
62 ia64_native_ ## name(arg); \ 72 ia64_native_ ## name(arg); \
63 } \ 73 }
74
75#define DEFINE_VOID_FUNC1_VOID(name) \
76 static void \
77 ia64_native_ ## name ## _func(void *arg) \
78 { \
79 ia64_native_ ## name(arg); \
80 }
64 81
65#define DEFINE_VOID_FUNC2(name) \ 82#define DEFINE_VOID_FUNC2(name) \
66 static void \ 83 static void \
@@ -68,7 +85,7 @@ struct pv_init_ops pv_init_ops;
68 unsigned long arg1) \ 85 unsigned long arg1) \
69 { \ 86 { \
70 ia64_native_ ## name(arg0, arg1); \ 87 ia64_native_ ## name(arg0, arg1); \
71 } \ 88 }
72 89
73#define DEFINE_FUNC0(name) \ 90#define DEFINE_FUNC0(name) \
74 static unsigned long \ 91 static unsigned long \
@@ -84,7 +101,7 @@ struct pv_init_ops pv_init_ops;
84 return ia64_native_ ## name(arg); \ 101 return ia64_native_ ## name(arg); \
85 } \ 102 } \
86 103
87DEFINE_VOID_FUNC1(fc); 104DEFINE_VOID_FUNC1_VOID(fc);
88DEFINE_VOID_FUNC1(intrin_local_irq_restore); 105DEFINE_VOID_FUNC1(intrin_local_irq_restore);
89 106
90DEFINE_VOID_FUNC2(ptcga); 107DEFINE_VOID_FUNC2(ptcga);
@@ -274,6 +291,266 @@ ia64_native_setreg_func(int regnum, unsigned long val)
274 break; 291 break;
275 } 292 }
276} 293}
294#else
295
296#define __DEFINE_FUNC(name, code) \
297 extern const char ia64_native_ ## name ## _direct_start[]; \
298 extern const char ia64_native_ ## name ## _direct_end[]; \
299 asm (".align 32\n" \
300 ".proc ia64_native_" #name "_func\n" \
301 "ia64_native_" #name "_func:\n" \
302 "ia64_native_" #name "_direct_start:\n" \
303 code \
304 "ia64_native_" #name "_direct_end:\n" \
305 "br.cond.sptk.many b6\n" \
306 ".endp ia64_native_" #name "_func\n")
307
308#define DEFINE_VOID_FUNC0(name, code) \
309 extern void \
310 ia64_native_ ## name ## _func(void); \
311 __DEFINE_FUNC(name, code)
312
313#define DEFINE_VOID_FUNC1(name, code) \
314 extern void \
315 ia64_native_ ## name ## _func(unsigned long arg); \
316 __DEFINE_FUNC(name, code)
317
318#define DEFINE_VOID_FUNC1_VOID(name, code) \
319 extern void \
320 ia64_native_ ## name ## _func(void *arg); \
321 __DEFINE_FUNC(name, code)
322
323#define DEFINE_VOID_FUNC2(name, code) \
324 extern void \
325 ia64_native_ ## name ## _func(unsigned long arg0, \
326 unsigned long arg1); \
327 __DEFINE_FUNC(name, code)
328
329#define DEFINE_FUNC0(name, code) \
330 extern unsigned long \
331 ia64_native_ ## name ## _func(void); \
332 __DEFINE_FUNC(name, code)
333
334#define DEFINE_FUNC1(name, type, code) \
335 extern unsigned long \
336 ia64_native_ ## name ## _func(type arg); \
337 __DEFINE_FUNC(name, code)
338
339DEFINE_VOID_FUNC1_VOID(fc,
340 "fc r8\n");
341DEFINE_VOID_FUNC1(intrin_local_irq_restore,
342 ";;\n"
343 " cmp.ne p6, p7 = r8, r0\n"
344 ";;\n"
345 "(p6) ssm psr.i\n"
346 "(p7) rsm psr.i\n"
347 ";;\n"
348 "(p6) srlz.d\n");
349
350DEFINE_VOID_FUNC2(ptcga,
351 "ptc.ga r8, r9\n");
352DEFINE_VOID_FUNC2(set_rr,
353 "mov rr[r8] = r9\n");
354
355/* ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I */
356DEFINE_FUNC0(get_psr_i,
357 "mov r2 = " __stringify(1 << IA64_PSR_I_BIT) "\n"
358 "mov r8 = psr\n"
359 ";;\n"
360 "and r8 = r2, r8\n");
361
362DEFINE_FUNC1(thash, unsigned long,
363 "thash r8 = r8\n");
364DEFINE_FUNC1(get_cpuid, int,
365 "mov r8 = cpuid[r8]\n");
366DEFINE_FUNC1(get_pmd, int,
367 "mov r8 = pmd[r8]\n");
368DEFINE_FUNC1(get_rr, unsigned long,
369 "mov r8 = rr[r8]\n");
370
371DEFINE_VOID_FUNC0(ssm_i,
372 "ssm psr.i\n");
373DEFINE_VOID_FUNC0(rsm_i,
374 "rsm psr.i\n");
375
376extern void
377ia64_native_set_rr0_to_rr4_func(unsigned long val0, unsigned long val1,
378 unsigned long val2, unsigned long val3,
379 unsigned long val4);
380__DEFINE_FUNC(set_rr0_to_rr4,
381 "mov rr[r0] = r8\n"
382 "movl r2 = 0x2000000000000000\n"
383 ";;\n"
384 "mov rr[r2] = r9\n"
385 "shl r3 = r2, 1\n" /* movl r3 = 0x4000000000000000 */
386 ";;\n"
387 "add r2 = r2, r3\n" /* movl r2 = 0x6000000000000000 */
388 "mov rr[r3] = r10\n"
389 ";;\n"
390 "mov rr[r2] = r11\n"
391 "shl r3 = r3, 1\n" /* movl r3 = 0x8000000000000000 */
392 ";;\n"
393 "mov rr[r3] = r14\n");
394
395extern unsigned long ia64_native_getreg_func(int regnum);
396asm(".global ia64_native_getreg_func\n");
397#define __DEFINE_GET_REG(id, reg) \
398 "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
399 ";;\n" \
400 "cmp.eq p6, p0 = r2, r8\n" \
401 ";;\n" \
402 "(p6) mov r8 = " #reg "\n" \
403 "(p6) br.cond.sptk.many b6\n" \
404 ";;\n"
405#define __DEFINE_GET_AR(id, reg) __DEFINE_GET_REG(AR_ ## id, ar.reg)
406#define __DEFINE_GET_CR(id, reg) __DEFINE_GET_REG(CR_ ## id, cr.reg)
407
408__DEFINE_FUNC(getreg,
409 __DEFINE_GET_REG(GP, gp)
410 /*__DEFINE_GET_REG(IP, ip)*/ /* returned ip value shouldn't be constant */
411 __DEFINE_GET_REG(PSR, psr)
412 __DEFINE_GET_REG(TP, tp)
413 __DEFINE_GET_REG(SP, sp)
414
415 __DEFINE_GET_REG(AR_KR0, ar0)
416 __DEFINE_GET_REG(AR_KR1, ar1)
417 __DEFINE_GET_REG(AR_KR2, ar2)
418 __DEFINE_GET_REG(AR_KR3, ar3)
419 __DEFINE_GET_REG(AR_KR4, ar4)
420 __DEFINE_GET_REG(AR_KR5, ar5)
421 __DEFINE_GET_REG(AR_KR6, ar6)
422 __DEFINE_GET_REG(AR_KR7, ar7)
423 __DEFINE_GET_AR(RSC, rsc)
424 __DEFINE_GET_AR(BSP, bsp)
425 __DEFINE_GET_AR(BSPSTORE, bspstore)
426 __DEFINE_GET_AR(RNAT, rnat)
427 __DEFINE_GET_AR(FCR, fcr)
428 __DEFINE_GET_AR(EFLAG, eflag)
429 __DEFINE_GET_AR(CSD, csd)
430 __DEFINE_GET_AR(SSD, ssd)
431 __DEFINE_GET_REG(AR_CFLAG, ar27)
432 __DEFINE_GET_AR(FSR, fsr)
433 __DEFINE_GET_AR(FIR, fir)
434 __DEFINE_GET_AR(FDR, fdr)
435 __DEFINE_GET_AR(CCV, ccv)
436 __DEFINE_GET_AR(UNAT, unat)
437 __DEFINE_GET_AR(FPSR, fpsr)
438 __DEFINE_GET_AR(ITC, itc)
439 __DEFINE_GET_AR(PFS, pfs)
440 __DEFINE_GET_AR(LC, lc)
441 __DEFINE_GET_AR(EC, ec)
442
443 __DEFINE_GET_CR(DCR, dcr)
444 __DEFINE_GET_CR(ITM, itm)
445 __DEFINE_GET_CR(IVA, iva)
446 __DEFINE_GET_CR(PTA, pta)
447 __DEFINE_GET_CR(IPSR, ipsr)
448 __DEFINE_GET_CR(ISR, isr)
449 __DEFINE_GET_CR(IIP, iip)
450 __DEFINE_GET_CR(IFA, ifa)
451 __DEFINE_GET_CR(ITIR, itir)
452 __DEFINE_GET_CR(IIPA, iipa)
453 __DEFINE_GET_CR(IFS, ifs)
454 __DEFINE_GET_CR(IIM, iim)
455 __DEFINE_GET_CR(IHA, iha)
456 __DEFINE_GET_CR(LID, lid)
457 __DEFINE_GET_CR(IVR, ivr)
458 __DEFINE_GET_CR(TPR, tpr)
459 __DEFINE_GET_CR(EOI, eoi)
460 __DEFINE_GET_CR(IRR0, irr0)
461 __DEFINE_GET_CR(IRR1, irr1)
462 __DEFINE_GET_CR(IRR2, irr2)
463 __DEFINE_GET_CR(IRR3, irr3)
464 __DEFINE_GET_CR(ITV, itv)
465 __DEFINE_GET_CR(PMV, pmv)
466 __DEFINE_GET_CR(CMCV, cmcv)
467 __DEFINE_GET_CR(LRR0, lrr0)
468 __DEFINE_GET_CR(LRR1, lrr1)
469
470 "mov r8 = -1\n" /* unsupported case */
471 );
472
473extern void ia64_native_setreg_func(int regnum, unsigned long val);
474asm(".global ia64_native_setreg_func\n");
475#define __DEFINE_SET_REG(id, reg) \
476 "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
477 ";;\n" \
478 "cmp.eq p6, p0 = r2, r9\n" \
479 ";;\n" \
480 "(p6) mov " #reg " = r8\n" \
481 "(p6) br.cond.sptk.many b6\n" \
482 ";;\n"
483#define __DEFINE_SET_AR(id, reg) __DEFINE_SET_REG(AR_ ## id, ar.reg)
484#define __DEFINE_SET_CR(id, reg) __DEFINE_SET_REG(CR_ ## id, cr.reg)
485__DEFINE_FUNC(setreg,
486 "mov r2 = " __stringify(_IA64_REG_PSR_L) "\n"
487 ";;\n"
488 "cmp.eq p6, p0 = r2, r9\n"
489 ";;\n"
490 "(p6) mov psr.l = r8\n"
491#ifdef HAVE_SERIALIZE_DIRECTIVE
492 ".serialize.data\n"
493#endif
494 "(p6) br.cond.sptk.many b6\n"
495 __DEFINE_SET_REG(GP, gp)
496 __DEFINE_SET_REG(SP, sp)
497
498 __DEFINE_SET_REG(AR_KR0, ar0)
499 __DEFINE_SET_REG(AR_KR1, ar1)
500 __DEFINE_SET_REG(AR_KR2, ar2)
501 __DEFINE_SET_REG(AR_KR3, ar3)
502 __DEFINE_SET_REG(AR_KR4, ar4)
503 __DEFINE_SET_REG(AR_KR5, ar5)
504 __DEFINE_SET_REG(AR_KR6, ar6)
505 __DEFINE_SET_REG(AR_KR7, ar7)
506 __DEFINE_SET_AR(RSC, rsc)
507 __DEFINE_SET_AR(BSP, bsp)
508 __DEFINE_SET_AR(BSPSTORE, bspstore)
509 __DEFINE_SET_AR(RNAT, rnat)
510 __DEFINE_SET_AR(FCR, fcr)
511 __DEFINE_SET_AR(EFLAG, eflag)
512 __DEFINE_SET_AR(CSD, csd)
513 __DEFINE_SET_AR(SSD, ssd)
514 __DEFINE_SET_REG(AR_CFLAG, ar27)
515 __DEFINE_SET_AR(FSR, fsr)
516 __DEFINE_SET_AR(FIR, fir)
517 __DEFINE_SET_AR(FDR, fdr)
518 __DEFINE_SET_AR(CCV, ccv)
519 __DEFINE_SET_AR(UNAT, unat)
520 __DEFINE_SET_AR(FPSR, fpsr)
521 __DEFINE_SET_AR(ITC, itc)
522 __DEFINE_SET_AR(PFS, pfs)
523 __DEFINE_SET_AR(LC, lc)
524 __DEFINE_SET_AR(EC, ec)
525
526 __DEFINE_SET_CR(DCR, dcr)
527 __DEFINE_SET_CR(ITM, itm)
528 __DEFINE_SET_CR(IVA, iva)
529 __DEFINE_SET_CR(PTA, pta)
530 __DEFINE_SET_CR(IPSR, ipsr)
531 __DEFINE_SET_CR(ISR, isr)
532 __DEFINE_SET_CR(IIP, iip)
533 __DEFINE_SET_CR(IFA, ifa)
534 __DEFINE_SET_CR(ITIR, itir)
535 __DEFINE_SET_CR(IIPA, iipa)
536 __DEFINE_SET_CR(IFS, ifs)
537 __DEFINE_SET_CR(IIM, iim)
538 __DEFINE_SET_CR(IHA, iha)
539 __DEFINE_SET_CR(LID, lid)
540 __DEFINE_SET_CR(IVR, ivr)
541 __DEFINE_SET_CR(TPR, tpr)
542 __DEFINE_SET_CR(EOI, eoi)
543 __DEFINE_SET_CR(IRR0, irr0)
544 __DEFINE_SET_CR(IRR1, irr1)
545 __DEFINE_SET_CR(IRR2, irr2)
546 __DEFINE_SET_CR(IRR3, irr3)
547 __DEFINE_SET_CR(ITV, itv)
548 __DEFINE_SET_CR(PMV, pmv)
549 __DEFINE_SET_CR(CMCV, cmcv)
550 __DEFINE_SET_CR(LRR0, lrr0)
551 __DEFINE_SET_CR(LRR1, lrr1)
552 );
553#endif
277 554
278struct pv_cpu_ops pv_cpu_ops = { 555struct pv_cpu_ops pv_cpu_ops = {
279 .fc = ia64_native_fc_func, 556 .fc = ia64_native_fc_func,
@@ -366,4 +643,258 @@ ia64_native_do_steal_accounting(unsigned long *new_itm)
366 643
367struct pv_time_ops pv_time_ops = { 644struct pv_time_ops pv_time_ops = {
368 .do_steal_accounting = ia64_native_do_steal_accounting, 645 .do_steal_accounting = ia64_native_do_steal_accounting,
646 .sched_clock = ia64_native_sched_clock,
647};
648
649/***************************************************************************
650 * binary pacthing
651 * pv_init_ops.patch_bundle
652 */
653
654#ifdef ASM_SUPPORTED
655#define IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg) \
656 __DEFINE_FUNC(get_ ## name, \
657 ";;\n" \
658 "mov r8 = " #reg "\n" \
659 ";;\n")
660
661#define IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \
662 __DEFINE_FUNC(set_ ## name, \
663 ";;\n" \
664 "mov " #reg " = r8\n" \
665 ";;\n")
666
667#define IA64_NATIVE_PATCH_DEFINE_REG(name, reg) \
668 IA64_NATIVE_PATCH_DEFINE_GET_REG(name, reg); \
669 IA64_NATIVE_PATCH_DEFINE_SET_REG(name, reg) \
670
671#define IA64_NATIVE_PATCH_DEFINE_AR(name, reg) \
672 IA64_NATIVE_PATCH_DEFINE_REG(ar_ ## name, ar.reg)
673
674#define IA64_NATIVE_PATCH_DEFINE_CR(name, reg) \
675 IA64_NATIVE_PATCH_DEFINE_REG(cr_ ## name, cr.reg)
676
677
678IA64_NATIVE_PATCH_DEFINE_GET_REG(psr, psr);
679IA64_NATIVE_PATCH_DEFINE_GET_REG(tp, tp);
680
681/* IA64_NATIVE_PATCH_DEFINE_SET_REG(psr_l, psr.l); */
682__DEFINE_FUNC(set_psr_l,
683 ";;\n"
684 "mov psr.l = r8\n"
685#ifdef HAVE_SERIALIZE_DIRECTIVE
686 ".serialize.data\n"
687#endif
688 ";;\n");
689
690IA64_NATIVE_PATCH_DEFINE_REG(gp, gp);
691IA64_NATIVE_PATCH_DEFINE_REG(sp, sp);
692
693IA64_NATIVE_PATCH_DEFINE_REG(kr0, ar0);
694IA64_NATIVE_PATCH_DEFINE_REG(kr1, ar1);
695IA64_NATIVE_PATCH_DEFINE_REG(kr2, ar2);
696IA64_NATIVE_PATCH_DEFINE_REG(kr3, ar3);
697IA64_NATIVE_PATCH_DEFINE_REG(kr4, ar4);
698IA64_NATIVE_PATCH_DEFINE_REG(kr5, ar5);
699IA64_NATIVE_PATCH_DEFINE_REG(kr6, ar6);
700IA64_NATIVE_PATCH_DEFINE_REG(kr7, ar7);
701
702IA64_NATIVE_PATCH_DEFINE_AR(rsc, rsc);
703IA64_NATIVE_PATCH_DEFINE_AR(bsp, bsp);
704IA64_NATIVE_PATCH_DEFINE_AR(bspstore, bspstore);
705IA64_NATIVE_PATCH_DEFINE_AR(rnat, rnat);
706IA64_NATIVE_PATCH_DEFINE_AR(fcr, fcr);
707IA64_NATIVE_PATCH_DEFINE_AR(eflag, eflag);
708IA64_NATIVE_PATCH_DEFINE_AR(csd, csd);
709IA64_NATIVE_PATCH_DEFINE_AR(ssd, ssd);
710IA64_NATIVE_PATCH_DEFINE_REG(ar27, ar27);
711IA64_NATIVE_PATCH_DEFINE_AR(fsr, fsr);
712IA64_NATIVE_PATCH_DEFINE_AR(fir, fir);
713IA64_NATIVE_PATCH_DEFINE_AR(fdr, fdr);
714IA64_NATIVE_PATCH_DEFINE_AR(ccv, ccv);
715IA64_NATIVE_PATCH_DEFINE_AR(unat, unat);
716IA64_NATIVE_PATCH_DEFINE_AR(fpsr, fpsr);
717IA64_NATIVE_PATCH_DEFINE_AR(itc, itc);
718IA64_NATIVE_PATCH_DEFINE_AR(pfs, pfs);
719IA64_NATIVE_PATCH_DEFINE_AR(lc, lc);
720IA64_NATIVE_PATCH_DEFINE_AR(ec, ec);
721
722IA64_NATIVE_PATCH_DEFINE_CR(dcr, dcr);
723IA64_NATIVE_PATCH_DEFINE_CR(itm, itm);
724IA64_NATIVE_PATCH_DEFINE_CR(iva, iva);
725IA64_NATIVE_PATCH_DEFINE_CR(pta, pta);
726IA64_NATIVE_PATCH_DEFINE_CR(ipsr, ipsr);
727IA64_NATIVE_PATCH_DEFINE_CR(isr, isr);
728IA64_NATIVE_PATCH_DEFINE_CR(iip, iip);
729IA64_NATIVE_PATCH_DEFINE_CR(ifa, ifa);
730IA64_NATIVE_PATCH_DEFINE_CR(itir, itir);
731IA64_NATIVE_PATCH_DEFINE_CR(iipa, iipa);
732IA64_NATIVE_PATCH_DEFINE_CR(ifs, ifs);
733IA64_NATIVE_PATCH_DEFINE_CR(iim, iim);
734IA64_NATIVE_PATCH_DEFINE_CR(iha, iha);
735IA64_NATIVE_PATCH_DEFINE_CR(lid, lid);
736IA64_NATIVE_PATCH_DEFINE_CR(ivr, ivr);
737IA64_NATIVE_PATCH_DEFINE_CR(tpr, tpr);
738IA64_NATIVE_PATCH_DEFINE_CR(eoi, eoi);
739IA64_NATIVE_PATCH_DEFINE_CR(irr0, irr0);
740IA64_NATIVE_PATCH_DEFINE_CR(irr1, irr1);
741IA64_NATIVE_PATCH_DEFINE_CR(irr2, irr2);
742IA64_NATIVE_PATCH_DEFINE_CR(irr3, irr3);
743IA64_NATIVE_PATCH_DEFINE_CR(itv, itv);
744IA64_NATIVE_PATCH_DEFINE_CR(pmv, pmv);
745IA64_NATIVE_PATCH_DEFINE_CR(cmcv, cmcv);
746IA64_NATIVE_PATCH_DEFINE_CR(lrr0, lrr0);
747IA64_NATIVE_PATCH_DEFINE_CR(lrr1, lrr1);
748
749static const struct paravirt_patch_bundle_elem ia64_native_patch_bundle_elems[]
750__initdata_or_module =
751{
752#define IA64_NATIVE_PATCH_BUNDLE_ELEM(name, type) \
753 { \
754 (void*)ia64_native_ ## name ## _direct_start, \
755 (void*)ia64_native_ ## name ## _direct_end, \
756 PARAVIRT_PATCH_TYPE_ ## type, \
757 }
758
759 IA64_NATIVE_PATCH_BUNDLE_ELEM(fc, FC),
760 IA64_NATIVE_PATCH_BUNDLE_ELEM(thash, THASH),
761 IA64_NATIVE_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
762 IA64_NATIVE_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
763 IA64_NATIVE_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
764 IA64_NATIVE_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
765 IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
766 IA64_NATIVE_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
767 IA64_NATIVE_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
768 IA64_NATIVE_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
769 IA64_NATIVE_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
770 IA64_NATIVE_PATCH_BUNDLE_ELEM(intrin_local_irq_restore,
771 INTRIN_LOCAL_IRQ_RESTORE),
772
773#define IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
774 { \
775 (void*)ia64_native_get_ ## name ## _direct_start, \
776 (void*)ia64_native_get_ ## name ## _direct_end, \
777 PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
778 }
779
780#define IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
781 { \
782 (void*)ia64_native_set_ ## name ## _direct_start, \
783 (void*)ia64_native_set_ ## name ## _direct_end, \
784 PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
785 }
786
787#define IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(name, reg) \
788 IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(name, reg), \
789 IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
790
791#define IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(name, reg) \
792 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar_ ## name, AR_ ## reg)
793
794#define IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(name, reg) \
795 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(cr_ ## name, CR_ ## reg)
796
797 IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
798 IA64_NATIVE_PATCH_BUNDLE_ELEM_GETREG(tp, TP),
799
800 IA64_NATIVE_PATCH_BUNDLE_ELEM_SETREG(psr_l, PSR_L),
801
802 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(gp, GP),
803 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(sp, SP),
804
805 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr0, AR_KR0),
806 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr1, AR_KR1),
807 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr2, AR_KR2),
808 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr3, AR_KR3),
809 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr4, AR_KR4),
810 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr5, AR_KR5),
811 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr6, AR_KR6),
812 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(kr7, AR_KR7),
813
814 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rsc, RSC),
815 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bsp, BSP),
816 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(bspstore, BSPSTORE),
817 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(rnat, RNAT),
818 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fcr, FCR),
819 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(eflag, EFLAG),
820 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(csd, CSD),
821 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ssd, SSD),
822 IA64_NATIVE_PATCH_BUNDLE_ELEM_REG(ar27, AR_CFLAG),
823 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fsr, FSR),
824 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fir, FIR),
825 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fdr, FDR),
826 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ccv, CCV),
827 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(unat, UNAT),
828 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(fpsr, FPSR),
829 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(itc, ITC),
830 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(pfs, PFS),
831 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(lc, LC),
832 IA64_NATIVE_PATCH_BUNDLE_ELEM_AR(ec, EC),
833
834 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(dcr, DCR),
835 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itm, ITM),
836 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iva, IVA),
837 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pta, PTA),
838 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ipsr, IPSR),
839 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(isr, ISR),
840 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iip, IIP),
841 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifa, IFA),
842 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itir, ITIR),
843 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iipa, IIPA),
844 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ifs, IFS),
845 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iim, IIM),
846 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(iha, IHA),
847 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lid, LID),
848 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(ivr, IVR),
849 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(tpr, TPR),
850 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(eoi, EOI),
851 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr0, IRR0),
852 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr1, IRR1),
853 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr2, IRR2),
854 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(irr3, IRR3),
855 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(itv, ITV),
856 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(pmv, PMV),
857 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(cmcv, CMCV),
858 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr0, LRR0),
859 IA64_NATIVE_PATCH_BUNDLE_ELEM_CR(lrr1, LRR1),
369}; 860};
861
862unsigned long __init_or_module
863ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
864{
865 const unsigned long nelems = sizeof(ia64_native_patch_bundle_elems) /
866 sizeof(ia64_native_patch_bundle_elems[0]);
867
868 return __paravirt_patch_apply_bundle(sbundle, ebundle, type,
869 ia64_native_patch_bundle_elems,
870 nelems, NULL);
871}
872#endif /* ASM_SUPPOTED */
873
874extern const char ia64_native_switch_to[];
875extern const char ia64_native_leave_syscall[];
876extern const char ia64_native_work_processed_syscall[];
877extern const char ia64_native_leave_kernel[];
878
879const struct paravirt_patch_branch_target ia64_native_branch_target[]
880__initconst = {
881#define PARAVIRT_BR_TARGET(name, type) \
882 { \
883 ia64_native_ ## name, \
884 PARAVIRT_PATCH_TYPE_BR_ ## type, \
885 }
886 PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
887 PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
888 PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
889 PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
890};
891
892static void __init
893ia64_native_patch_branch(unsigned long tag, unsigned long type)
894{
895 const unsigned long nelem =
896 sizeof(ia64_native_branch_target) /
897 sizeof(ia64_native_branch_target[0]);
898 __paravirt_patch_apply_branch(tag, type,
899 ia64_native_branch_target, nelem);
900}
diff --git a/arch/ia64/kernel/paravirt_patch.c b/arch/ia64/kernel/paravirt_patch.c
new file mode 100644
index 00000000000..bfdfef1b1ff
--- /dev/null
+++ b/arch/ia64/kernel/paravirt_patch.c
@@ -0,0 +1,514 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirt_patch.c
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/init.h>
24#include <asm/intrinsics.h>
25#include <asm/kprobes.h>
26#include <asm/paravirt.h>
27#include <asm/paravirt_patch.h>
28
29typedef union ia64_inst {
30 struct {
31 unsigned long long qp : 6;
32 unsigned long long : 31;
33 unsigned long long opcode : 4;
34 unsigned long long reserved : 23;
35 } generic;
36 unsigned long long l;
37} ia64_inst_t;
38
39/*
40 * flush_icache_range() can't be used here.
41 * we are here before cpu_init() which initializes
42 * ia64_i_cache_stride_shift. flush_icache_range() uses it.
43 */
44void __init_or_module
45paravirt_flush_i_cache_range(const void *instr, unsigned long size)
46{
47 extern void paravirt_fc_i(const void *addr);
48 unsigned long i;
49
50 for (i = 0; i < size; i += sizeof(bundle_t))
51 paravirt_fc_i(instr + i);
52}
53
54bundle_t* __init_or_module
55paravirt_get_bundle(unsigned long tag)
56{
57 return (bundle_t *)(tag & ~3UL);
58}
59
60unsigned long __init_or_module
61paravirt_get_slot(unsigned long tag)
62{
63 return tag & 3UL;
64}
65
66unsigned long __init_or_module
67paravirt_get_num_inst(unsigned long stag, unsigned long etag)
68{
69 bundle_t *sbundle = paravirt_get_bundle(stag);
70 unsigned long sslot = paravirt_get_slot(stag);
71 bundle_t *ebundle = paravirt_get_bundle(etag);
72 unsigned long eslot = paravirt_get_slot(etag);
73
74 return (ebundle - sbundle) * 3 + eslot - sslot + 1;
75}
76
77unsigned long __init_or_module
78paravirt_get_next_tag(unsigned long tag)
79{
80 unsigned long slot = paravirt_get_slot(tag);
81
82 switch (slot) {
83 case 0:
84 case 1:
85 return tag + 1;
86 case 2: {
87 bundle_t *bundle = paravirt_get_bundle(tag);
88 return (unsigned long)(bundle + 1);
89 }
90 default:
91 BUG();
92 }
93 /* NOTREACHED */
94}
95
96ia64_inst_t __init_or_module
97paravirt_read_slot0(const bundle_t *bundle)
98{
99 ia64_inst_t inst;
100 inst.l = bundle->quad0.slot0;
101 return inst;
102}
103
104ia64_inst_t __init_or_module
105paravirt_read_slot1(const bundle_t *bundle)
106{
107 ia64_inst_t inst;
108 inst.l = bundle->quad0.slot1_p0 |
109 ((unsigned long long)bundle->quad1.slot1_p1 << 18UL);
110 return inst;
111}
112
113ia64_inst_t __init_or_module
114paravirt_read_slot2(const bundle_t *bundle)
115{
116 ia64_inst_t inst;
117 inst.l = bundle->quad1.slot2;
118 return inst;
119}
120
121ia64_inst_t __init_or_module
122paravirt_read_inst(unsigned long tag)
123{
124 bundle_t *bundle = paravirt_get_bundle(tag);
125 unsigned long slot = paravirt_get_slot(tag);
126
127 switch (slot) {
128 case 0:
129 return paravirt_read_slot0(bundle);
130 case 1:
131 return paravirt_read_slot1(bundle);
132 case 2:
133 return paravirt_read_slot2(bundle);
134 default:
135 BUG();
136 }
137 /* NOTREACHED */
138}
139
140void __init_or_module
141paravirt_write_slot0(bundle_t *bundle, ia64_inst_t inst)
142{
143 bundle->quad0.slot0 = inst.l;
144}
145
146void __init_or_module
147paravirt_write_slot1(bundle_t *bundle, ia64_inst_t inst)
148{
149 bundle->quad0.slot1_p0 = inst.l;
150 bundle->quad1.slot1_p1 = inst.l >> 18UL;
151}
152
153void __init_or_module
154paravirt_write_slot2(bundle_t *bundle, ia64_inst_t inst)
155{
156 bundle->quad1.slot2 = inst.l;
157}
158
159void __init_or_module
160paravirt_write_inst(unsigned long tag, ia64_inst_t inst)
161{
162 bundle_t *bundle = paravirt_get_bundle(tag);
163 unsigned long slot = paravirt_get_slot(tag);
164
165 switch (slot) {
166 case 0:
167 paravirt_write_slot0(bundle, inst);
168 break;
169 case 1:
170 paravirt_write_slot1(bundle, inst);
171 break;
172 case 2:
173 paravirt_write_slot2(bundle, inst);
174 break;
175 default:
176 BUG();
177 break;
178 }
179 paravirt_flush_i_cache_range(bundle, sizeof(*bundle));
180}
181
182/* for debug */
183void
184paravirt_print_bundle(const bundle_t *bundle)
185{
186 const unsigned long *quad = (const unsigned long *)bundle;
187 ia64_inst_t slot0 = paravirt_read_slot0(bundle);
188 ia64_inst_t slot1 = paravirt_read_slot1(bundle);
189 ia64_inst_t slot2 = paravirt_read_slot2(bundle);
190
191 printk(KERN_DEBUG
192 "bundle 0x%p 0x%016lx 0x%016lx\n", bundle, quad[0], quad[1]);
193 printk(KERN_DEBUG
194 "bundle template 0x%x\n",
195 bundle->quad0.template);
196 printk(KERN_DEBUG
197 "slot0 0x%lx slot1_p0 0x%lx slot1_p1 0x%lx slot2 0x%lx\n",
198 (unsigned long)bundle->quad0.slot0,
199 (unsigned long)bundle->quad0.slot1_p0,
200 (unsigned long)bundle->quad1.slot1_p1,
201 (unsigned long)bundle->quad1.slot2);
202 printk(KERN_DEBUG
203 "slot0 0x%016llx slot1 0x%016llx slot2 0x%016llx\n",
204 slot0.l, slot1.l, slot2.l);
205}
206
207static int noreplace_paravirt __init_or_module = 0;
208
209static int __init setup_noreplace_paravirt(char *str)
210{
211 noreplace_paravirt = 1;
212 return 1;
213}
214__setup("noreplace-paravirt", setup_noreplace_paravirt);
215
216#ifdef ASM_SUPPORTED
217static void __init_or_module
218fill_nop_bundle(void *sbundle, void *ebundle)
219{
220 extern const char paravirt_nop_bundle[];
221 extern const unsigned long paravirt_nop_bundle_size;
222
223 void *bundle = sbundle;
224
225 BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0);
226 BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0);
227
228 while (bundle < ebundle) {
229 memcpy(bundle, paravirt_nop_bundle, paravirt_nop_bundle_size);
230
231 bundle += paravirt_nop_bundle_size;
232 }
233}
234
235/* helper function */
236unsigned long __init_or_module
237__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
238 const struct paravirt_patch_bundle_elem *elems,
239 unsigned long nelems,
240 const struct paravirt_patch_bundle_elem **found)
241{
242 unsigned long used = 0;
243 unsigned long i;
244
245 BUG_ON((((unsigned long)sbundle) % sizeof(bundle_t)) != 0);
246 BUG_ON((((unsigned long)ebundle) % sizeof(bundle_t)) != 0);
247
248 found = NULL;
249 for (i = 0; i < nelems; i++) {
250 const struct paravirt_patch_bundle_elem *p = &elems[i];
251 if (p->type == type) {
252 unsigned long need = p->ebundle - p->sbundle;
253 unsigned long room = ebundle - sbundle;
254
255 if (found != NULL)
256 *found = p;
257
258 if (room < need) {
259 /* no room to replace. skip it */
260 printk(KERN_DEBUG
261 "the space is too small to put "
262 "bundles. type %ld need %ld room %ld\n",
263 type, need, room);
264 break;
265 }
266
267 used = need;
268 memcpy(sbundle, p->sbundle, used);
269 break;
270 }
271 }
272
273 return used;
274}
275
276void __init_or_module
277paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
278 const struct paravirt_patch_site_bundle *end)
279{
280 const struct paravirt_patch_site_bundle *p;
281
282 if (noreplace_paravirt)
283 return;
284 if (pv_init_ops.patch_bundle == NULL)
285 return;
286
287 for (p = start; p < end; p++) {
288 unsigned long used;
289
290 used = (*pv_init_ops.patch_bundle)(p->sbundle, p->ebundle,
291 p->type);
292 if (used == 0)
293 continue;
294
295 fill_nop_bundle(p->sbundle + used, p->ebundle);
296 paravirt_flush_i_cache_range(p->sbundle,
297 p->ebundle - p->sbundle);
298 }
299 ia64_sync_i();
300 ia64_srlz_i();
301}
302
303/*
304 * nop.i, nop.m, nop.f instruction are same format.
305 * but nop.b has differennt format.
306 * This doesn't support nop.b for now.
307 */
308static void __init_or_module
309fill_nop_inst(unsigned long stag, unsigned long etag)
310{
311 extern const bundle_t paravirt_nop_mfi_inst_bundle[];
312 unsigned long tag;
313 const ia64_inst_t nop_inst =
314 paravirt_read_slot0(paravirt_nop_mfi_inst_bundle);
315
316 for (tag = stag; tag < etag; tag = paravirt_get_next_tag(tag))
317 paravirt_write_inst(tag, nop_inst);
318}
319
320void __init_or_module
321paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
322 const struct paravirt_patch_site_inst *end)
323{
324 const struct paravirt_patch_site_inst *p;
325
326 if (noreplace_paravirt)
327 return;
328 if (pv_init_ops.patch_inst == NULL)
329 return;
330
331 for (p = start; p < end; p++) {
332 unsigned long tag;
333 bundle_t *sbundle;
334 bundle_t *ebundle;
335
336 tag = (*pv_init_ops.patch_inst)(p->stag, p->etag, p->type);
337 if (tag == p->stag)
338 continue;
339
340 fill_nop_inst(tag, p->etag);
341 sbundle = paravirt_get_bundle(p->stag);
342 ebundle = paravirt_get_bundle(p->etag) + 1;
343 paravirt_flush_i_cache_range(sbundle, (ebundle - sbundle) *
344 sizeof(bundle_t));
345 }
346 ia64_sync_i();
347 ia64_srlz_i();
348}
349#endif /* ASM_SUPPOTED */
350
351/* brl.cond.sptk.many <target64> X3 */
352typedef union inst_x3_op {
353 ia64_inst_t inst;
354 struct {
355 unsigned long qp: 6;
356 unsigned long btyp: 3;
357 unsigned long unused: 3;
358 unsigned long p: 1;
359 unsigned long imm20b: 20;
360 unsigned long wh: 2;
361 unsigned long d: 1;
362 unsigned long i: 1;
363 unsigned long opcode: 4;
364 };
365 unsigned long l;
366} inst_x3_op_t;
367
368typedef union inst_x3_imm {
369 ia64_inst_t inst;
370 struct {
371 unsigned long unused: 2;
372 unsigned long imm39: 39;
373 };
374 unsigned long l;
375} inst_x3_imm_t;
376
377void __init_or_module
378paravirt_patch_reloc_brl(unsigned long tag, const void *target)
379{
380 unsigned long tag_op = paravirt_get_next_tag(tag);
381 unsigned long tag_imm = tag;
382 bundle_t *bundle = paravirt_get_bundle(tag);
383
384 ia64_inst_t inst_op = paravirt_read_inst(tag_op);
385 ia64_inst_t inst_imm = paravirt_read_inst(tag_imm);
386
387 inst_x3_op_t inst_x3_op = { .l = inst_op.l };
388 inst_x3_imm_t inst_x3_imm = { .l = inst_imm.l };
389
390 unsigned long imm60 =
391 ((unsigned long)target - (unsigned long)bundle) >> 4;
392
393 BUG_ON(paravirt_get_slot(tag) != 1); /* MLX */
394 BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0);
395
396 /* imm60[59] 1bit */
397 inst_x3_op.i = (imm60 >> 59) & 1;
398 /* imm60[19:0] 20bit */
399 inst_x3_op.imm20b = imm60 & ((1UL << 20) - 1);
400 /* imm60[58:20] 39bit */
401 inst_x3_imm.imm39 = (imm60 >> 20) & ((1UL << 39) - 1);
402
403 inst_op.l = inst_x3_op.l;
404 inst_imm.l = inst_x3_imm.l;
405
406 paravirt_write_inst(tag_op, inst_op);
407 paravirt_write_inst(tag_imm, inst_imm);
408}
409
410/* br.cond.sptk.many <target25> B1 */
411typedef union inst_b1 {
412 ia64_inst_t inst;
413 struct {
414 unsigned long qp: 6;
415 unsigned long btype: 3;
416 unsigned long unused: 3;
417 unsigned long p: 1;
418 unsigned long imm20b: 20;
419 unsigned long wh: 2;
420 unsigned long d: 1;
421 unsigned long s: 1;
422 unsigned long opcode: 4;
423 };
424 unsigned long l;
425} inst_b1_t;
426
427void __init
428paravirt_patch_reloc_br(unsigned long tag, const void *target)
429{
430 bundle_t *bundle = paravirt_get_bundle(tag);
431 ia64_inst_t inst = paravirt_read_inst(tag);
432 unsigned long target25 = (unsigned long)target - (unsigned long)bundle;
433 inst_b1_t inst_b1;
434
435 BUG_ON(((unsigned long)target & (sizeof(bundle_t) - 1)) != 0);
436
437 inst_b1.l = inst.l;
438 if (target25 & (1UL << 63))
439 inst_b1.s = 1;
440 else
441 inst_b1.s = 0;
442
443 inst_b1.imm20b = target25 >> 4;
444 inst.l = inst_b1.l;
445
446 paravirt_write_inst(tag, inst);
447}
448
449void __init
450__paravirt_patch_apply_branch(
451 unsigned long tag, unsigned long type,
452 const struct paravirt_patch_branch_target *entries,
453 unsigned int nr_entries)
454{
455 unsigned int i;
456 for (i = 0; i < nr_entries; i++) {
457 if (entries[i].type == type) {
458 paravirt_patch_reloc_br(tag, entries[i].entry);
459 break;
460 }
461 }
462}
463
464static void __init
465paravirt_patch_apply_branch(const struct paravirt_patch_site_branch *start,
466 const struct paravirt_patch_site_branch *end)
467{
468 const struct paravirt_patch_site_branch *p;
469
470 if (noreplace_paravirt)
471 return;
472 if (pv_init_ops.patch_branch == NULL)
473 return;
474
475 for (p = start; p < end; p++)
476 (*pv_init_ops.patch_branch)(p->tag, p->type);
477
478 ia64_sync_i();
479 ia64_srlz_i();
480}
481
482void __init
483paravirt_patch_apply(void)
484{
485 extern const char __start_paravirt_bundles[];
486 extern const char __stop_paravirt_bundles[];
487 extern const char __start_paravirt_insts[];
488 extern const char __stop_paravirt_insts[];
489 extern const char __start_paravirt_branches[];
490 extern const char __stop_paravirt_branches[];
491
492 paravirt_patch_apply_bundle((const struct paravirt_patch_site_bundle *)
493 __start_paravirt_bundles,
494 (const struct paravirt_patch_site_bundle *)
495 __stop_paravirt_bundles);
496 paravirt_patch_apply_inst((const struct paravirt_patch_site_inst *)
497 __start_paravirt_insts,
498 (const struct paravirt_patch_site_inst *)
499 __stop_paravirt_insts);
500 paravirt_patch_apply_branch((const struct paravirt_patch_site_branch *)
501 __start_paravirt_branches,
502 (const struct paravirt_patch_site_branch *)
503 __stop_paravirt_branches);
504}
505
506/*
507 * Local variables:
508 * mode: C
509 * c-set-style: "linux"
510 * c-basic-offset: 8
511 * tab-width: 8
512 * indent-tabs-mode: t
513 * End:
514 */
diff --git a/arch/ia64/kernel/paravirt_patchlist.c b/arch/ia64/kernel/paravirt_patchlist.c
new file mode 100644
index 00000000000..b28082a95d4
--- /dev/null
+++ b/arch/ia64/kernel/paravirt_patchlist.c
@@ -0,0 +1,79 @@
1/******************************************************************************
2 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21#include <linux/bug.h>
22#include <asm/paravirt.h>
23
24#define DECLARE(name) \
25 extern unsigned long \
26 __ia64_native_start_gate_##name##_patchlist[]; \
27 extern unsigned long \
28 __ia64_native_end_gate_##name##_patchlist[]
29
30DECLARE(fsyscall);
31DECLARE(brl_fsys_bubble_down);
32DECLARE(vtop);
33DECLARE(mckinley_e9);
34
35extern unsigned long __start_gate_section[];
36
37#define ASSIGN(name) \
38 .start_##name##_patchlist = \
39 (unsigned long)__ia64_native_start_gate_##name##_patchlist, \
40 .end_##name##_patchlist = \
41 (unsigned long)__ia64_native_end_gate_##name##_patchlist
42
43struct pv_patchdata pv_patchdata __initdata = {
44 ASSIGN(fsyscall),
45 ASSIGN(brl_fsys_bubble_down),
46 ASSIGN(vtop),
47 ASSIGN(mckinley_e9),
48
49 .gate_section = (void*)__start_gate_section,
50};
51
52
53unsigned long __init
54paravirt_get_gate_patchlist(enum pv_gate_patchlist type)
55{
56
57#define CASE(NAME, name) \
58 case PV_GATE_START_##NAME: \
59 return pv_patchdata.start_##name##_patchlist; \
60 case PV_GATE_END_##NAME: \
61 return pv_patchdata.end_##name##_patchlist; \
62
63 switch (type) {
64 CASE(FSYSCALL, fsyscall);
65 CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down);
66 CASE(VTOP, vtop);
67 CASE(MCKINLEY_E9, mckinley_e9);
68 default:
69 BUG();
70 break;
71 }
72 return 0;
73}
74
75void * __init
76paravirt_get_gate_section(void)
77{
78 return pv_patchdata.gate_section;
79}
diff --git a/arch/ia64/kernel/paravirt_patchlist.h b/arch/ia64/kernel/paravirt_patchlist.h
new file mode 100644
index 00000000000..0684aa6c650
--- /dev/null
+++ b/arch/ia64/kernel/paravirt_patchlist.h
@@ -0,0 +1,28 @@
1/******************************************************************************
2 * linux/arch/ia64/xen/paravirt_patchlist.h
3 *
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN)
24#include <asm/xen/patchlist.h>
25#else
26#include <asm/native/patchlist.h>
27#endif
28
diff --git a/arch/ia64/kernel/paravirtentry.S b/arch/ia64/kernel/paravirtentry.S
index 2f42fcb9776..6158560d7f1 100644
--- a/arch/ia64/kernel/paravirtentry.S
+++ b/arch/ia64/kernel/paravirtentry.S
@@ -20,8 +20,11 @@
20 * 20 *
21 */ 21 */
22 22
23#include <linux/init.h>
23#include <asm/asmmacro.h> 24#include <asm/asmmacro.h>
24#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
26#include <asm/paravirt_privop.h>
27#include <asm/paravirt_patch.h>
25#include "entry.h" 28#include "entry.h"
26 29
27#define DATA8(sym, init_value) \ 30#define DATA8(sym, init_value) \
@@ -32,29 +35,87 @@
32 data8 init_value ; \ 35 data8 init_value ; \
33 .popsection 36 .popsection
34 37
35#define BRANCH(targ, reg, breg) \ 38#define BRANCH(targ, reg, breg, type) \
36 movl reg=targ ; \ 39 PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \
37 ;; \ 40 ;; \
38 ld8 reg=[reg] ; \ 41 movl reg=targ ; \
39 ;; \ 42 ;; \
40 mov breg=reg ; \ 43 ld8 reg=[reg] ; \
44 ;; \
45 mov breg=reg ; \
41 br.cond.sptk.many breg 46 br.cond.sptk.many breg
42 47
43#define BRANCH_PROC(sym, reg, breg) \ 48#define BRANCH_PROC(sym, reg, breg, type) \
44 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ 49 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
45 GLOBAL_ENTRY(paravirt_ ## sym) ; \ 50 GLOBAL_ENTRY(paravirt_ ## sym) ; \
46 BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ 51 BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
47 END(paravirt_ ## sym) 52 END(paravirt_ ## sym)
48 53
49#define BRANCH_PROC_UNWINFO(sym, reg, breg) \ 54#define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \
50 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ 55 DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
51 GLOBAL_ENTRY(paravirt_ ## sym) ; \ 56 GLOBAL_ENTRY(paravirt_ ## sym) ; \
52 PT_REGS_UNWIND_INFO(0) ; \ 57 PT_REGS_UNWIND_INFO(0) ; \
53 BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ 58 BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
54 END(paravirt_ ## sym) 59 END(paravirt_ ## sym)
55 60
56 61
57BRANCH_PROC(switch_to, r22, b7) 62BRANCH_PROC(switch_to, r22, b7, SWITCH_TO)
58BRANCH_PROC_UNWINFO(leave_syscall, r22, b7) 63BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL)
59BRANCH_PROC(work_processed_syscall, r2, b7) 64BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL)
60BRANCH_PROC_UNWINFO(leave_kernel, r22, b7) 65BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL)
66
67
68#ifdef CONFIG_MODULES
69#define __INIT_OR_MODULE .text
70#define __INITDATA_OR_MODULE .data
71#else
72#define __INIT_OR_MODULE __INIT
73#define __INITDATA_OR_MODULE __INITDATA
74#endif /* CONFIG_MODULES */
75
76 __INIT_OR_MODULE
77 GLOBAL_ENTRY(paravirt_fc_i)
78 fc.i r32
79 br.ret.sptk.many rp
80 END(paravirt_fc_i)
81 __FINIT
82
83 __INIT_OR_MODULE
84 .align 32
85 GLOBAL_ENTRY(paravirt_nop_b_inst_bundle)
86 {
87 nop.b 0
88 nop.b 0
89 nop.b 0
90 }
91 END(paravirt_nop_b_inst_bundle)
92 __FINIT
93
94 /* NOTE: nop.[mfi] has same format */
95 __INIT_OR_MODULE
96 GLOBAL_ENTRY(paravirt_nop_mfi_inst_bundle)
97 {
98 nop.m 0
99 nop.f 0
100 nop.i 0
101 }
102 END(paravirt_nop_mfi_inst_bundle)
103 __FINIT
104
105 __INIT_OR_MODULE
106 GLOBAL_ENTRY(paravirt_nop_bundle)
107paravirt_nop_bundle_start:
108 {
109 nop 0
110 nop 0
111 nop 0
112 }
113paravirt_nop_bundle_end:
114 END(paravirt_nop_bundle)
115 __FINIT
116
117 __INITDATA_OR_MODULE
118 .align 8
119 .global paravirt_nop_bundle_size
120paravirt_nop_bundle_size:
121 data8 paravirt_nop_bundle_end - paravirt_nop_bundle_start
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index b83b2c51600..68a1311db80 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -7,6 +7,7 @@
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/string.h> 8#include <linux/string.h>
9 9
10#include <asm/paravirt.h>
10#include <asm/patch.h> 11#include <asm/patch.h>
11#include <asm/processor.h> 12#include <asm/processor.h>
12#include <asm/sections.h> 13#include <asm/sections.h>
@@ -169,16 +170,35 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
169 ia64_srlz_i(); 170 ia64_srlz_i();
170} 171}
171 172
173extern unsigned long ia64_native_fsyscall_table[NR_syscalls];
174extern char ia64_native_fsys_bubble_down[];
175struct pv_fsys_data pv_fsys_data __initdata = {
176 .fsyscall_table = (unsigned long *)ia64_native_fsyscall_table,
177 .fsys_bubble_down = (void *)ia64_native_fsys_bubble_down,
178};
179
180unsigned long * __init
181paravirt_get_fsyscall_table(void)
182{
183 return pv_fsys_data.fsyscall_table;
184}
185
186char * __init
187paravirt_get_fsys_bubble_down(void)
188{
189 return pv_fsys_data.fsys_bubble_down;
190}
191
172static void __init 192static void __init
173patch_fsyscall_table (unsigned long start, unsigned long end) 193patch_fsyscall_table (unsigned long start, unsigned long end)
174{ 194{
175 extern unsigned long fsyscall_table[NR_syscalls]; 195 u64 fsyscall_table = (u64)paravirt_get_fsyscall_table();
176 s32 *offp = (s32 *) start; 196 s32 *offp = (s32 *) start;
177 u64 ip; 197 u64 ip;
178 198
179 while (offp < (s32 *) end) { 199 while (offp < (s32 *) end) {
180 ip = (u64) ia64_imva((char *) offp + *offp); 200 ip = (u64) ia64_imva((char *) offp + *offp);
181 ia64_patch_imm64(ip, (u64) fsyscall_table); 201 ia64_patch_imm64(ip, fsyscall_table);
182 ia64_fc((void *) ip); 202 ia64_fc((void *) ip);
183 ++offp; 203 ++offp;
184 } 204 }
@@ -189,7 +209,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end)
189static void __init 209static void __init
190patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) 210patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
191{ 211{
192 extern char fsys_bubble_down[]; 212 u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down();
193 s32 *offp = (s32 *) start; 213 s32 *offp = (s32 *) start;
194 u64 ip; 214 u64 ip;
195 215
@@ -207,13 +227,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
207void __init 227void __init
208ia64_patch_gate (void) 228ia64_patch_gate (void)
209{ 229{
210# define START(name) ((unsigned long) __start_gate_##name##_patchlist) 230# define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name)
211# define END(name) ((unsigned long)__end_gate_##name##_patchlist) 231# define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name)
212 232
213 patch_fsyscall_table(START(fsyscall), END(fsyscall)); 233 patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL));
214 patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down)); 234 patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN));
215 ia64_patch_vtop(START(vtop), END(vtop)); 235 ia64_patch_vtop(START(VTOP), END(VTOP));
216 ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); 236 ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9));
217} 237}
218 238
219void ia64_patch_phys_stack_reg(unsigned long val) 239void ia64_patch_phys_stack_reg(unsigned long val)
@@ -229,7 +249,7 @@ void ia64_patch_phys_stack_reg(unsigned long val)
229 while (offp < end) { 249 while (offp < end) {
230 ip = (u64) offp + *offp; 250 ip = (u64) offp + *offp;
231 ia64_patch(ip, mask, imm); 251 ia64_patch(ip, mask, imm);
232 ia64_fc(ip); 252 ia64_fc((void *)ip);
233 ++offp; 253 ++offp;
234 } 254 }
235 ia64_sync_i(); 255 ia64_sync_i();
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 5c0f408cfd7..8a06dc48059 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg)
5603 * /proc/perfmon interface, for debug only 5603 * /proc/perfmon interface, for debug only
5604 */ 5604 */
5605 5605
5606#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) 5606#define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1)
5607 5607
5608static void * 5608static void *
5609pfm_proc_start(struct seq_file *m, loff_t *pos) 5609pfm_proc_start(struct seq_file *m, loff_t *pos)
@@ -5612,7 +5612,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos)
5612 return PFM_PROC_SHOW_HEADER; 5612 return PFM_PROC_SHOW_HEADER;
5613 } 5613 }
5614 5614
5615 while (*pos <= NR_CPUS) { 5615 while (*pos <= nr_cpu_ids) {
5616 if (cpu_online(*pos - 1)) { 5616 if (cpu_online(*pos - 1)) {
5617 return (void *)*pos; 5617 return (void *)*pos;
5618 } 5618 }
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index ecb9eb78d68..7053c55b764 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -317,7 +317,7 @@ retry:
317 } 317 }
318 318
319 n = data->cpu_check; 319 n = data->cpu_check;
320 for (i = 0; i < NR_CPUS; i++) { 320 for (i = 0; i < nr_cpu_ids; i++) {
321 if (cpu_isset(n, data->cpu_event)) { 321 if (cpu_isset(n, data->cpu_event)) {
322 if (!cpu_online(n)) { 322 if (!cpu_online(n)) {
323 cpu_clear(n, data->cpu_event); 323 cpu_clear(n, data->cpu_event);
@@ -326,7 +326,7 @@ retry:
326 cpu = n; 326 cpu = n;
327 break; 327 break;
328 } 328 }
329 if (++n == NR_CPUS) 329 if (++n == nr_cpu_ids)
330 n = 0; 330 n = 0;
331 } 331 }
332 332
@@ -337,7 +337,7 @@ retry:
337 337
338 /* for next read, start checking at next CPU */ 338 /* for next read, start checking at next CPU */
339 data->cpu_check = cpu; 339 data->cpu_check = cpu;
340 if (++data->cpu_check == NR_CPUS) 340 if (++data->cpu_check == nr_cpu_ids)
341 data->cpu_check = 0; 341 data->cpu_check = 0;
342 342
343 snprintf(cmd, sizeof(cmd), "read %d\n", cpu); 343 snprintf(cmd, sizeof(cmd), "read %d\n", cpu);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 865af27c773..714066aeda7 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -52,6 +52,7 @@
52#include <asm/meminit.h> 52#include <asm/meminit.h>
53#include <asm/page.h> 53#include <asm/page.h>
54#include <asm/paravirt.h> 54#include <asm/paravirt.h>
55#include <asm/paravirt_patch.h>
55#include <asm/patch.h> 56#include <asm/patch.h>
56#include <asm/pgtable.h> 57#include <asm/pgtable.h>
57#include <asm/processor.h> 58#include <asm/processor.h>
@@ -537,6 +538,7 @@ setup_arch (char **cmdline_p)
537 paravirt_arch_setup_early(); 538 paravirt_arch_setup_early();
538 539
539 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 540 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
541 paravirt_patch_apply();
540 542
541 *cmdline_p = __va(ia64_boot_param->command_line); 543 *cmdline_p = __va(ia64_boot_param->command_line);
542 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 544 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
@@ -730,10 +732,10 @@ static void *
730c_start (struct seq_file *m, loff_t *pos) 732c_start (struct seq_file *m, loff_t *pos)
731{ 733{
732#ifdef CONFIG_SMP 734#ifdef CONFIG_SMP
733 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 735 while (*pos < nr_cpu_ids && !cpu_online(*pos))
734 ++*pos; 736 ++*pos;
735#endif 737#endif
736 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 738 return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
737} 739}
738 740
739static void * 741static void *
@@ -1016,8 +1018,7 @@ cpu_init (void)
1016 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 1018 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
1017 atomic_inc(&init_mm.mm_count); 1019 atomic_inc(&init_mm.mm_count);
1018 current->active_mm = &init_mm; 1020 current->active_mm = &init_mm;
1019 if (current->mm) 1021 BUG_ON(current->mm);
1020 BUG();
1021 1022
1022 ia64_mmu_init(ia64_imva(cpu_data)); 1023 ia64_mmu_init(ia64_imva(cpu_data));
1023 ia64_mca_cpu_init(ia64_imva(cpu_data)); 1024 ia64_mca_cpu_init(ia64_imva(cpu_data));
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index da8f020d82c..2ea4199d9c5 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -166,11 +166,11 @@ send_IPI_allbutself (int op)
166 * Called with preemption disabled. 166 * Called with preemption disabled.
167 */ 167 */
168static inline void 168static inline void
169send_IPI_mask(cpumask_t mask, int op) 169send_IPI_mask(const struct cpumask *mask, int op)
170{ 170{
171 unsigned int cpu; 171 unsigned int cpu;
172 172
173 for_each_cpu_mask(cpu, mask) { 173 for_each_cpu(cpu, mask) {
174 send_IPI_single(cpu, op); 174 send_IPI_single(cpu, op);
175 } 175 }
176} 176}
@@ -316,7 +316,7 @@ void arch_send_call_function_single_ipi(int cpu)
316 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); 316 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
317} 317}
318 318
319void arch_send_call_function_ipi(cpumask_t mask) 319void arch_send_call_function_ipi_mask(const struct cpumask *mask)
320{ 320{
321 send_IPI_mask(mask, IPI_CALL_FUNC); 321 send_IPI_mask(mask, IPI_CALL_FUNC);
322} 322}
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 52290547c85..7700e23034b 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -581,14 +581,14 @@ smp_build_cpu_map (void)
581 581
582 ia64_cpu_to_sapicid[0] = boot_cpu_id; 582 ia64_cpu_to_sapicid[0] = boot_cpu_id;
583 cpus_clear(cpu_present_map); 583 cpus_clear(cpu_present_map);
584 cpu_set(0, cpu_present_map); 584 set_cpu_present(0, true);
585 cpu_set(0, cpu_possible_map); 585 set_cpu_possible(0, true);
586 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { 586 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
587 sapicid = smp_boot_data.cpu_phys_id[i]; 587 sapicid = smp_boot_data.cpu_phys_id[i];
588 if (sapicid == boot_cpu_id) 588 if (sapicid == boot_cpu_id)
589 continue; 589 continue;
590 cpu_set(cpu, cpu_present_map); 590 set_cpu_present(cpu, true);
591 cpu_set(cpu, cpu_possible_map); 591 set_cpu_possible(cpu, true);
592 ia64_cpu_to_sapicid[cpu] = sapicid; 592 ia64_cpu_to_sapicid[cpu] = sapicid;
593 cpu++; 593 cpu++;
594 } 594 }
@@ -626,12 +626,9 @@ smp_prepare_cpus (unsigned int max_cpus)
626 */ 626 */
627 if (!max_cpus) { 627 if (!max_cpus) {
628 printk(KERN_INFO "SMP mode deactivated.\n"); 628 printk(KERN_INFO "SMP mode deactivated.\n");
629 cpus_clear(cpu_online_map); 629 init_cpu_online(cpumask_of(0));
630 cpus_clear(cpu_present_map); 630 init_cpu_present(cpumask_of(0));
631 cpus_clear(cpu_possible_map); 631 init_cpu_possible(cpumask_of(0));
632 cpu_set(0, cpu_online_map);
633 cpu_set(0, cpu_present_map);
634 cpu_set(0, cpu_possible_map);
635 return; 632 return;
636 } 633 }
637} 634}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index f0ebb342409..641c8b61c4f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -20,6 +20,7 @@
20#include <linux/efi.h> 20#include <linux/efi.h>
21#include <linux/timex.h> 21#include <linux/timex.h>
22#include <linux/clocksource.h> 22#include <linux/clocksource.h>
23#include <linux/platform_device.h>
23 24
24#include <asm/machvec.h> 25#include <asm/machvec.h>
25#include <asm/delay.h> 26#include <asm/delay.h>
@@ -50,6 +51,15 @@ EXPORT_SYMBOL(last_cli_ip);
50#endif 51#endif
51 52
52#ifdef CONFIG_PARAVIRT 53#ifdef CONFIG_PARAVIRT
54/* We need to define a real function for sched_clock, to override the
55 weak default version */
56unsigned long long sched_clock(void)
57{
58 return paravirt_sched_clock();
59}
60#endif
61
62#ifdef CONFIG_PARAVIRT
53static void 63static void
54paravirt_clocksource_resume(void) 64paravirt_clocksource_resume(void)
55{ 65{
@@ -405,6 +415,21 @@ static struct irqaction timer_irqaction = {
405 .name = "timer" 415 .name = "timer"
406}; 416};
407 417
418static struct platform_device rtc_efi_dev = {
419 .name = "rtc-efi",
420 .id = -1,
421};
422
423static int __init rtc_init(void)
424{
425 if (platform_device_register(&rtc_efi_dev) < 0)
426 printk(KERN_ERR "unable to register rtc device...\n");
427
428 /* not necessarily an error */
429 return 0;
430}
431module_init(rtc_init);
432
408void __init 433void __init
409time_init (void) 434time_init (void)
410{ 435{
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 3765efc5f96..4a95e86b9ac 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -169,6 +169,30 @@ SECTIONS
169 __end___mckinley_e9_bundles = .; 169 __end___mckinley_e9_bundles = .;
170 } 170 }
171 171
172#if defined(CONFIG_PARAVIRT)
173 . = ALIGN(16);
174 .paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET)
175 {
176 __start_paravirt_bundles = .;
177 *(.paravirt_bundles)
178 __stop_paravirt_bundles = .;
179 }
180 . = ALIGN(16);
181 .paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET)
182 {
183 __start_paravirt_insts = .;
184 *(.paravirt_insts)
185 __stop_paravirt_insts = .;
186 }
187 . = ALIGN(16);
188 .paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET)
189 {
190 __start_paravirt_branches = .;
191 *(.paravirt_branches)
192 __stop_paravirt_branches = .;
193 }
194#endif
195
172#if defined(CONFIG_IA64_GENERIC) 196#if defined(CONFIG_IA64_GENERIC)
173 /* Machine Vector */ 197 /* Machine Vector */
174 . = ALIGN(16); 198 . = ALIGN(16);
@@ -201,6 +225,12 @@ SECTIONS
201 __start_gate_section = .; 225 __start_gate_section = .;
202 *(.data.gate) 226 *(.data.gate)
203 __stop_gate_section = .; 227 __stop_gate_section = .;
228#ifdef CONFIG_XEN
229 . = ALIGN(PAGE_SIZE);
230 __xen_start_gate_section = .;
231 *(.data.gate.xen)
232 __xen_stop_gate_section = .;
233#endif
204 } 234 }
205 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose 235 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
206 * kernel data 236 * kernel data
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 076b00d1dbf..28af6a731bb 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -70,7 +70,7 @@ static void kvm_flush_icache(unsigned long start, unsigned long len)
70 int l; 70 int l;
71 71
72 for (l = 0; l < (len + 32); l += 32) 72 for (l = 0; l < (len + 32); l += 32)
73 ia64_fc(start + l); 73 ia64_fc((void *)(start + l));
74 74
75 ia64_sync_i(); 75 ia64_sync_i();
76 ia64_srlz_i(); 76 ia64_srlz_i();
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index d4d28050587..a18ee17b919 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -386,7 +386,7 @@ void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
386 else 386 else
387 *rnat_addr = (*rnat_addr) & (~nat_mask); 387 *rnat_addr = (*rnat_addr) & (~nat_mask);
388 388
389 ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore); 389 ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
390 ia64_setreg(_IA64_REG_AR_RNAT, rnat); 390 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
391 } 391 }
392 local_irq_restore(psr); 392 local_irq_restore(psr);
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 38232b37668..2c2501f1315 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -210,6 +210,7 @@ void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
210 phy_pte &= ~PAGE_FLAGS_RV_MASK; 210 phy_pte &= ~PAGE_FLAGS_RV_MASK;
211 psr = ia64_clear_ic(); 211 psr = ia64_clear_ic();
212 ia64_itc(type, va, phy_pte, itir_ps(itir)); 212 ia64_itc(type, va, phy_pte, itir_ps(itir));
213 paravirt_dv_serialize_data();
213 ia64_set_psr(psr); 214 ia64_set_psr(psr);
214 } 215 }
215 216
@@ -456,6 +457,7 @@ void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
456 phy_pte &= ~PAGE_FLAGS_RV_MASK; 457 phy_pte &= ~PAGE_FLAGS_RV_MASK;
457 psr = ia64_clear_ic(); 458 psr = ia64_clear_ic();
458 ia64_itc(type, ifa, phy_pte, ps); 459 ia64_itc(type, ifa, phy_pte, ps);
460 paravirt_dv_serialize_data();
459 ia64_set_psr(psr); 461 ia64_set_psr(psr);
460 } 462 }
461 if (!(pte&VTLB_PTE_IO)) 463 if (!(pte&VTLB_PTE_IO))
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 56e12903973..c0f3bee6904 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -35,6 +35,7 @@
35#include <asm/uaccess.h> 35#include <asm/uaccess.h>
36#include <asm/unistd.h> 36#include <asm/unistd.h>
37#include <asm/mca.h> 37#include <asm/mca.h>
38#include <asm/paravirt.h>
38 39
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40 41
@@ -259,6 +260,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
259static void __init 260static void __init
260setup_gate (void) 261setup_gate (void)
261{ 262{
263 void *gate_section;
262 struct page *page; 264 struct page *page;
263 265
264 /* 266 /*
@@ -266,10 +268,11 @@ setup_gate (void)
266 * headers etc. and once execute-only page to enable 268 * headers etc. and once execute-only page to enable
267 * privilege-promotion via "epc": 269 * privilege-promotion via "epc":
268 */ 270 */
269 page = virt_to_page(ia64_imva(__start_gate_section)); 271 gate_section = paravirt_get_gate_section();
272 page = virt_to_page(ia64_imva(gate_section));
270 put_kernel_page(page, GATE_ADDR, PAGE_READONLY); 273 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
271#ifdef HAVE_BUGGY_SEGREL 274#ifdef HAVE_BUGGY_SEGREL
272 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); 275 page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
273 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); 276 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
274#else 277#else
275 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); 278 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
@@ -633,8 +636,7 @@ mem_init (void)
633#endif 636#endif
634 637
635#ifdef CONFIG_FLATMEM 638#ifdef CONFIG_FLATMEM
636 if (!mem_map) 639 BUG_ON(!mem_map);
637 BUG();
638 max_mapnr = max_low_pfn; 640 max_mapnr = max_low_pfn;
639#endif 641#endif
640 642
@@ -667,8 +669,8 @@ mem_init (void)
667 * code can tell them apart. 669 * code can tell them apart.
668 */ 670 */
669 for (i = 0; i < NR_syscalls; ++i) { 671 for (i = 0; i < NR_syscalls; ++i) {
670 extern unsigned long fsyscall_table[NR_syscalls];
671 extern unsigned long sys_call_table[NR_syscalls]; 672 extern unsigned long sys_call_table[NR_syscalls];
673 unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
672 674
673 if (!fsyscall_table[i] || nolwsys) 675 if (!fsyscall_table[i] || nolwsys)
674 fsyscall_table[i] = sys_call_table[i] | 1; 676 fsyscall_table[i] = sys_call_table[i] | 1;
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index bd9818a36b4..b9f3d7bbb33 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
309 309
310 preempt_disable(); 310 preempt_disable();
311#ifdef CONFIG_SMP 311#ifdef CONFIG_SMP
312 if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { 312 if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
313 platform_global_tlb_purge(mm, start, end, nbits); 313 platform_global_tlb_purge(mm, start, end, nbits);
314 preempt_enable(); 314 preempt_enable();
315 return; 315 return;
diff --git a/arch/ia64/scripts/pvcheck.sed b/arch/ia64/scripts/pvcheck.sed
index ba66ac2e4c6..e59809a3fc0 100644
--- a/arch/ia64/scripts/pvcheck.sed
+++ b/arch/ia64/scripts/pvcheck.sed
@@ -17,6 +17,7 @@ s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g
17s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g 17s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g
18s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr 18s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr
19s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g 19s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g
20s/mov.*=.*ar\.itc.*/.warning \"ar.itc should not used directly\"/g
20s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g 21s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g
21s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g 22s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g
22s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g 23s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 0d4ffa4da1d..57f280dd9de 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -135,8 +135,7 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
135 } 135 }
136 136
137 war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); 137 war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
138 if (!war_list) 138 BUG_ON(!war_list);
139 BUG();
140 139
141 SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, 140 SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
142 nasid, widget, __pa(war_list), 0, 0, 0 ,0); 141 nasid, widget, __pa(war_list), 0, 0, 0 ,0);
@@ -180,23 +179,20 @@ sn_common_hubdev_init(struct hubdev_info *hubdev)
180 sizeof(struct sn_flush_device_kernel *); 179 sizeof(struct sn_flush_device_kernel *);
181 hubdev->hdi_flush_nasid_list.widget_p = 180 hubdev->hdi_flush_nasid_list.widget_p =
182 kzalloc(size, GFP_KERNEL); 181 kzalloc(size, GFP_KERNEL);
183 if (!hubdev->hdi_flush_nasid_list.widget_p) 182 BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p);
184 BUG();
185 183
186 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { 184 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
187 size = DEV_PER_WIDGET * 185 size = DEV_PER_WIDGET *
188 sizeof(struct sn_flush_device_kernel); 186 sizeof(struct sn_flush_device_kernel);
189 sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); 187 sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
190 if (!sn_flush_device_kernel) 188 BUG_ON(!sn_flush_device_kernel);
191 BUG();
192 189
193 dev_entry = sn_flush_device_kernel; 190 dev_entry = sn_flush_device_kernel;
194 for (device = 0; device < DEV_PER_WIDGET; 191 for (device = 0; device < DEV_PER_WIDGET;
195 device++, dev_entry++) { 192 device++, dev_entry++) {
196 size = sizeof(struct sn_flush_device_common); 193 size = sizeof(struct sn_flush_device_common);
197 dev_entry->common = kzalloc(size, GFP_KERNEL); 194 dev_entry->common = kzalloc(size, GFP_KERNEL);
198 if (!dev_entry->common) 195 BUG_ON(!dev_entry->common);
199 BUG();
200 if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST)) 196 if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST))
201 status = sal_get_device_dmaflush_list( 197 status = sal_get_device_dmaflush_list(
202 hubdev->hdi_nasid, widget, device, 198 hubdev->hdi_nasid, widget, device,
@@ -326,8 +322,7 @@ sn_common_bus_fixup(struct pci_bus *bus,
326 */ 322 */
327 controller->platform_data = kzalloc(sizeof(struct sn_platform_data), 323 controller->platform_data = kzalloc(sizeof(struct sn_platform_data),
328 GFP_KERNEL); 324 GFP_KERNEL);
329 if (controller->platform_data == NULL) 325 BUG_ON(controller->platform_data == NULL);
330 BUG();
331 sn_platform_data = 326 sn_platform_data =
332 (struct sn_platform_data *) controller->platform_data; 327 (struct sn_platform_data *) controller->platform_data;
333 sn_platform_data->provider_soft = provider_soft; 328 sn_platform_data->provider_soft = provider_soft;
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index e2eb2da60f9..ee774c366a0 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -128,8 +128,7 @@ sn_legacy_pci_window_fixup(struct pci_controller *controller,
128{ 128{
129 controller->window = kcalloc(2, sizeof(struct pci_window), 129 controller->window = kcalloc(2, sizeof(struct pci_window),
130 GFP_KERNEL); 130 GFP_KERNEL);
131 if (controller->window == NULL) 131 BUG_ON(controller->window == NULL);
132 BUG();
133 controller->window[0].offset = legacy_io; 132 controller->window[0].offset = legacy_io;
134 controller->window[0].resource.name = "legacy_io"; 133 controller->window[0].resource.name = "legacy_io";
135 controller->window[0].resource.flags = IORESOURCE_IO; 134 controller->window[0].resource.flags = IORESOURCE_IO;
@@ -168,8 +167,7 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
168 idx = controller->windows; 167 idx = controller->windows;
169 new_count = controller->windows + count; 168 new_count = controller->windows + count;
170 new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL); 169 new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
171 if (new_window == NULL) 170 BUG_ON(new_window == NULL);
172 BUG();
173 if (controller->window) { 171 if (controller->window) {
174 memcpy(new_window, controller->window, 172 memcpy(new_window, controller->window,
175 sizeof(struct pci_window) * controller->windows); 173 sizeof(struct pci_window) * controller->windows);
@@ -222,8 +220,7 @@ sn_io_slot_fixup(struct pci_dev *dev)
222 (u64) __pa(pcidev_info), 220 (u64) __pa(pcidev_info),
223 (u64) __pa(sn_irq_info)); 221 (u64) __pa(sn_irq_info));
224 222
225 if (status) 223 BUG_ON(status); /* Cannot get platform pci device information */
226 BUG(); /* Cannot get platform pci device information */
227 224
228 225
229 /* Copy over PIO Mapped Addresses */ 226 /* Copy over PIO Mapped Addresses */
@@ -307,8 +304,7 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
307 prom_bussoft_ptr = __va(prom_bussoft_ptr); 304 prom_bussoft_ptr = __va(prom_bussoft_ptr);
308 305
309 controller = kzalloc(sizeof(*controller), GFP_KERNEL); 306 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
310 if (!controller) 307 BUG_ON(!controller);
311 BUG();
312 controller->segment = segment; 308 controller->segment = segment;
313 309
314 /* 310 /*
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 02c5b8a9fb6..e456f062f24 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -732,8 +732,7 @@ void __init build_cnode_tables(void)
732 kl_config_hdr_t *klgraph_header; 732 kl_config_hdr_t *klgraph_header;
733 nasid = cnodeid_to_nasid(node); 733 nasid = cnodeid_to_nasid(node);
734 klgraph_header = ia64_sn_get_klconfig_addr(nasid); 734 klgraph_header = ia64_sn_get_klconfig_addr(nasid);
735 if (klgraph_header == NULL) 735 BUG_ON(klgraph_header == NULL);
736 BUG();
737 brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); 736 brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
738 while (brd) { 737 while (brd) {
739 if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { 738 if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) {
@@ -750,7 +749,7 @@ nasid_slice_to_cpuid(int nasid, int slice)
750{ 749{
751 long cpu; 750 long cpu;
752 751
753 for (cpu = 0; cpu < NR_CPUS; cpu++) 752 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
754 if (cpuid_to_nasid(cpu) == nasid && 753 if (cpuid_to_nasid(cpu) == nasid &&
755 cpuid_to_slice(cpu) == slice) 754 cpuid_to_slice(cpu) == slice)
756 return cpu; 755 return cpu;
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index e585f9a2afb..1176506b2ba 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm)
133 unsigned long itc; 133 unsigned long itc;
134 134
135 itc = ia64_get_itc(); 135 itc = ia64_get_itc();
136 smp_flush_tlb_cpumask(mm->cpu_vm_mask); 136 smp_flush_tlb_cpumask(*mm_cpumask(mm));
137 itc = ia64_get_itc() - itc; 137 itc = ia64_get_itc() - itc;
138 __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; 138 __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
139 __get_cpu_var(ptcstats).shub_ipi_flushes++; 139 __get_cpu_var(ptcstats).shub_ipi_flushes++;
@@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
182 nodes_clear(nodes_flushed); 182 nodes_clear(nodes_flushed);
183 i = 0; 183 i = 0;
184 184
185 for_each_cpu_mask(cpu, mm->cpu_vm_mask) { 185 for_each_cpu(cpu, mm_cpumask(mm)) {
186 cnode = cpu_to_node(cpu); 186 cnode = cpu_to_node(cpu);
187 node_set(cnode, nodes_flushed); 187 node_set(cnode, nodes_flushed);
188 lcpu = cpu; 188 lcpu = cpu;
@@ -461,7 +461,7 @@ bool sn_cpu_disable_allowed(int cpu)
461 461
462static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) 462static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
463{ 463{
464 if (*offset < NR_CPUS) 464 if (*offset < nr_cpu_ids)
465 return offset; 465 return offset;
466 return NULL; 466 return NULL;
467} 467}
@@ -469,7 +469,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
469static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) 469static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
470{ 470{
471 (*offset)++; 471 (*offset)++;
472 if (*offset < NR_CPUS) 472 if (*offset < nr_cpu_ids)
473 return offset; 473 return offset;
474 return NULL; 474 return NULL;
475} 475}
@@ -491,7 +491,7 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
491 seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); 491 seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
492 } 492 }
493 493
494 if (cpu < NR_CPUS && cpu_online(cpu)) { 494 if (cpu < nr_cpu_ids && cpu_online(cpu)) {
495 stat = &per_cpu(ptcstats, cpu); 495 stat = &per_cpu(ptcstats, cpu);
496 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, 496 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
497 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, 497 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
@@ -554,7 +554,7 @@ static int __init sn2_ptc_init(void)
554 554
555 proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, 555 proc_sn2_ptc = proc_create(PTC_BASENAME, 0444,
556 NULL, &proc_sn2_ptc_operations); 556 NULL, &proc_sn2_ptc_operations);
557 if (!&proc_sn2_ptc_operations) { 557 if (!proc_sn2_ptc) {
558 printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); 558 printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
559 return -EINVAL; 559 return -EINVAL;
560 } 560 }
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index be339477f90..9e6491cf72b 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -275,8 +275,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
275 275
276 /* get it's interconnect topology */ 276 /* get it's interconnect topology */
277 sz = op->ports * sizeof(struct sn_hwperf_port_info); 277 sz = op->ports * sizeof(struct sn_hwperf_port_info);
278 if (sz > sizeof(ptdata)) 278 BUG_ON(sz > sizeof(ptdata));
279 BUG();
280 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, 279 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
281 SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, 280 SN_HWPERF_ENUM_PORTS, nodeobj->id, sz,
282 (u64)&ptdata, 0, 0, NULL); 281 (u64)&ptdata, 0, 0, NULL);
@@ -310,8 +309,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
310 if (router && (!found_cpu || !found_mem)) { 309 if (router && (!found_cpu || !found_mem)) {
311 /* search for a node connected to the same router */ 310 /* search for a node connected to the same router */
312 sz = router->ports * sizeof(struct sn_hwperf_port_info); 311 sz = router->ports * sizeof(struct sn_hwperf_port_info);
313 if (sz > sizeof(ptdata)) 312 BUG_ON(sz > sizeof(ptdata));
314 BUG();
315 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, 313 e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
316 SN_HWPERF_ENUM_PORTS, router->id, sz, 314 SN_HWPERF_ENUM_PORTS, router->id, sz,
317 (u64)&ptdata, 0, 0, NULL); 315 (u64)&ptdata, 0, 0, NULL);
@@ -612,7 +610,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
612 op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; 610 op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
613 611
614 if (cpu != SN_HWPERF_ARG_ANY_CPU) { 612 if (cpu != SN_HWPERF_ARG_ANY_CPU) {
615 if (cpu >= NR_CPUS || !cpu_online(cpu)) { 613 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
616 r = -EINVAL; 614 r = -EINVAL;
617 goto out; 615 goto out;
618 } 616 }
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index 060df4aa991..c659ad5613a 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -256,9 +256,7 @@ void sn_dma_flush(u64 addr)
256 256
257 hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; 257 hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
258 258
259 if (!hubinfo) { 259 BUG_ON(!hubinfo);
260 BUG();
261 }
262 260
263 flush_nasid_list = &hubinfo->hdi_flush_nasid_list; 261 flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
264 if (flush_nasid_list->widget_p == NULL) 262 if (flush_nasid_list->widget_p == NULL)
diff --git a/arch/ia64/xen/Makefile b/arch/ia64/xen/Makefile
index 0ad0224693d..e6f4a0a7422 100644
--- a/arch/ia64/xen/Makefile
+++ b/arch/ia64/xen/Makefile
@@ -3,14 +3,29 @@
3# 3#
4 4
5obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ 5obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \
6 hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o 6 hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \
7 gate-data.o
7 8
8obj-$(CONFIG_IA64_GENERIC) += machvec.o 9obj-$(CONFIG_IA64_GENERIC) += machvec.o
9 10
11# The gate DSO image is built using a special linker script.
12include $(srctree)/arch/ia64/kernel/Makefile.gate
13
14# tell compiled for xen
15CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN
16AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN
17
18# use same file of native.
19$(obj)/gate.o: $(src)/../kernel/gate.S FORCE
20 $(call if_changed_dep,as_o_S)
21$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE
22 $(call if_changed_dep,cpp_lds_S)
23
24
10AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN 25AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN
11 26
12# xen multi compile 27# xen multi compile
13ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S 28ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S
14ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) 29ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o))
15obj-y += $(ASM_PARAVIRT_OBJS) 30obj-y += $(ASM_PARAVIRT_OBJS)
16define paravirtualized_xen 31define paravirtualized_xen
diff --git a/arch/ia64/xen/gate-data.S b/arch/ia64/xen/gate-data.S
new file mode 100644
index 00000000000..7d4830afc91
--- /dev/null
+++ b/arch/ia64/xen/gate-data.S
@@ -0,0 +1,3 @@
1 .section .data.gate.xen, "aw"
2
3 .incbin "arch/ia64/xen/gate.so"
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
index 45e02bb64a9..e32dae444dd 100644
--- a/arch/ia64/xen/hypercall.S
+++ b/arch/ia64/xen/hypercall.S
@@ -9,6 +9,7 @@
9#include <asm/intrinsics.h> 9#include <asm/intrinsics.h>
10#include <asm/xen/privop.h> 10#include <asm/xen/privop.h>
11 11
12#ifdef __INTEL_COMPILER
12/* 13/*
13 * Hypercalls without parameter. 14 * Hypercalls without parameter.
14 */ 15 */
@@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4)
72 br.ret.sptk.many rp 73 br.ret.sptk.many rp
73 ;; 74 ;;
74END(xen_set_rr0_to_rr4) 75END(xen_set_rr0_to_rr4)
76#endif
75 77
76GLOBAL_ENTRY(xen_send_ipi) 78GLOBAL_ENTRY(xen_send_ipi)
77 mov r14=r32 79 mov r14=r32
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index 68d6204c3f1..fb833269017 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -175,10 +175,58 @@ static void xen_itc_jitter_data_reset(void)
175 } while (unlikely(ret != lcycle)); 175 } while (unlikely(ret != lcycle));
176} 176}
177 177
178/* based on xen_sched_clock() in arch/x86/xen/time.c. */
179/*
180 * This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined,
181 * something similar logic should be implemented here.
182 */
183/*
184 * Xen sched_clock implementation. Returns the number of unstolen
185 * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
186 * states.
187 */
188static unsigned long long xen_sched_clock(void)
189{
190 struct vcpu_runstate_info runstate;
191
192 unsigned long long now;
193 unsigned long long offset;
194 unsigned long long ret;
195
196 /*
197 * Ideally sched_clock should be called on a per-cpu basis
198 * anyway, so preempt should already be disabled, but that's
199 * not current practice at the moment.
200 */
201 preempt_disable();
202
203 /*
204 * both ia64_native_sched_clock() and xen's runstate are
205 * based on mAR.ITC. So difference of them makes sense.
206 */
207 now = ia64_native_sched_clock();
208
209 get_runstate_snapshot(&runstate);
210
211 WARN_ON(runstate.state != RUNSTATE_running);
212
213 offset = 0;
214 if (now > runstate.state_entry_time)
215 offset = now - runstate.state_entry_time;
216 ret = runstate.time[RUNSTATE_blocked] +
217 runstate.time[RUNSTATE_running] +
218 offset;
219
220 preempt_enable();
221
222 return ret;
223}
224
178struct pv_time_ops xen_time_ops __initdata = { 225struct pv_time_ops xen_time_ops __initdata = {
179 .init_missing_ticks_accounting = xen_init_missing_ticks_accounting, 226 .init_missing_ticks_accounting = xen_init_missing_ticks_accounting,
180 .do_steal_accounting = xen_do_steal_accounting, 227 .do_steal_accounting = xen_do_steal_accounting,
181 .clocksource_resume = xen_itc_jitter_data_reset, 228 .clocksource_resume = xen_itc_jitter_data_reset,
229 .sched_clock = xen_sched_clock,
182}; 230};
183 231
184/* Called after suspend, to resume time. */ 232/* Called after suspend, to resume time. */
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
index 936cff3c96e..5e2270a999f 100644
--- a/arch/ia64/xen/xen_pv_ops.c
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/pm.h> 26#include <linux/pm.h>
27#include <linux/unistd.h>
27 28
28#include <asm/xen/hypervisor.h> 29#include <asm/xen/hypervisor.h>
29#include <asm/xen/xencomm.h> 30#include <asm/xen/xencomm.h>
@@ -153,6 +154,13 @@ xen_post_smp_prepare_boot_cpu(void)
153 xen_setup_vcpu_info_placement(); 154 xen_setup_vcpu_info_placement();
154} 155}
155 156
157#ifdef ASM_SUPPORTED
158static unsigned long __init_or_module
159xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
160#endif
161static void __init
162xen_patch_branch(unsigned long tag, unsigned long type);
163
156static const struct pv_init_ops xen_init_ops __initconst = { 164static const struct pv_init_ops xen_init_ops __initconst = {
157 .banner = xen_banner, 165 .banner = xen_banner,
158 166
@@ -163,6 +171,53 @@ static const struct pv_init_ops xen_init_ops __initconst = {
163 .arch_setup_nomca = xen_arch_setup_nomca, 171 .arch_setup_nomca = xen_arch_setup_nomca,
164 172
165 .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu, 173 .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
174#ifdef ASM_SUPPORTED
175 .patch_bundle = xen_patch_bundle,
176#endif
177 .patch_branch = xen_patch_branch,
178};
179
180/***************************************************************************
181 * pv_fsys_data
182 * addresses for fsys
183 */
184
185extern unsigned long xen_fsyscall_table[NR_syscalls];
186extern char xen_fsys_bubble_down[];
187struct pv_fsys_data xen_fsys_data __initdata = {
188 .fsyscall_table = (unsigned long *)xen_fsyscall_table,
189 .fsys_bubble_down = (void *)xen_fsys_bubble_down,
190};
191
192/***************************************************************************
193 * pv_patchdata
194 * patchdata addresses
195 */
196
197#define DECLARE(name) \
198 extern unsigned long __xen_start_gate_##name##_patchlist[]; \
199 extern unsigned long __xen_end_gate_##name##_patchlist[]
200
201DECLARE(fsyscall);
202DECLARE(brl_fsys_bubble_down);
203DECLARE(vtop);
204DECLARE(mckinley_e9);
205
206extern unsigned long __xen_start_gate_section[];
207
208#define ASSIGN(name) \
209 .start_##name##_patchlist = \
210 (unsigned long)__xen_start_gate_##name##_patchlist, \
211 .end_##name##_patchlist = \
212 (unsigned long)__xen_end_gate_##name##_patchlist
213
214static struct pv_patchdata xen_patchdata __initdata = {
215 ASSIGN(fsyscall),
216 ASSIGN(brl_fsys_bubble_down),
217 ASSIGN(vtop),
218 ASSIGN(mckinley_e9),
219
220 .gate_section = (void*)__xen_start_gate_section,
166}; 221};
167 222
168/*************************************************************************** 223/***************************************************************************
@@ -170,6 +225,76 @@ static const struct pv_init_ops xen_init_ops __initconst = {
170 * intrinsics hooks. 225 * intrinsics hooks.
171 */ 226 */
172 227
228#ifndef ASM_SUPPORTED
229static void
230xen_set_itm_with_offset(unsigned long val)
231{
232 /* ia64_cpu_local_tick() calls this with interrupt enabled. */
233 /* WARN_ON(!irqs_disabled()); */
234 xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
235}
236
237static unsigned long
238xen_get_itm_with_offset(void)
239{
240 /* unused at this moment */
241 printk(KERN_DEBUG "%s is called.\n", __func__);
242
243 WARN_ON(!irqs_disabled());
244 return ia64_native_getreg(_IA64_REG_CR_ITM) +
245 XEN_MAPPEDREGS->itc_offset;
246}
247
248/* ia64_set_itc() is only called by
249 * cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
250 * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
251 */
252static void
253xen_set_itc(unsigned long val)
254{
255 unsigned long mitc;
256
257 WARN_ON(!irqs_disabled());
258 mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
259 XEN_MAPPEDREGS->itc_offset = val - mitc;
260 XEN_MAPPEDREGS->itc_last = val;
261}
262
263static unsigned long
264xen_get_itc(void)
265{
266 unsigned long res;
267 unsigned long itc_offset;
268 unsigned long itc_last;
269 unsigned long ret_itc_last;
270
271 itc_offset = XEN_MAPPEDREGS->itc_offset;
272 do {
273 itc_last = XEN_MAPPEDREGS->itc_last;
274 res = ia64_native_getreg(_IA64_REG_AR_ITC);
275 res += itc_offset;
276 if (itc_last >= res)
277 res = itc_last + 1;
278 ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
279 itc_last, res);
280 } while (unlikely(ret_itc_last != itc_last));
281 return res;
282
283#if 0
284 /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled.
285 Should it be paravirtualized instead? */
286 WARN_ON(!irqs_disabled());
287 itc_offset = XEN_MAPPEDREGS->itc_offset;
288 itc_last = XEN_MAPPEDREGS->itc_last;
289 res = ia64_native_getreg(_IA64_REG_AR_ITC);
290 res += itc_offset;
291 if (itc_last >= res)
292 res = itc_last + 1;
293 XEN_MAPPEDREGS->itc_last = res;
294 return res;
295#endif
296}
297
173static void xen_setreg(int regnum, unsigned long val) 298static void xen_setreg(int regnum, unsigned long val)
174{ 299{
175 switch (regnum) { 300 switch (regnum) {
@@ -181,11 +306,14 @@ static void xen_setreg(int regnum, unsigned long val)
181 xen_set_eflag(val); 306 xen_set_eflag(val);
182 break; 307 break;
183#endif 308#endif
309 case _IA64_REG_AR_ITC:
310 xen_set_itc(val);
311 break;
184 case _IA64_REG_CR_TPR: 312 case _IA64_REG_CR_TPR:
185 xen_set_tpr(val); 313 xen_set_tpr(val);
186 break; 314 break;
187 case _IA64_REG_CR_ITM: 315 case _IA64_REG_CR_ITM:
188 xen_set_itm(val); 316 xen_set_itm_with_offset(val);
189 break; 317 break;
190 case _IA64_REG_CR_EOI: 318 case _IA64_REG_CR_EOI:
191 xen_eoi(val); 319 xen_eoi(val);
@@ -209,6 +337,12 @@ static unsigned long xen_getreg(int regnum)
209 res = xen_get_eflag(); 337 res = xen_get_eflag();
210 break; 338 break;
211#endif 339#endif
340 case _IA64_REG_AR_ITC:
341 res = xen_get_itc();
342 break;
343 case _IA64_REG_CR_ITM:
344 res = xen_get_itm_with_offset();
345 break;
212 case _IA64_REG_CR_IVR: 346 case _IA64_REG_CR_IVR:
213 res = xen_get_ivr(); 347 res = xen_get_ivr();
214 break; 348 break;
@@ -259,8 +393,417 @@ xen_intrin_local_irq_restore(unsigned long mask)
259 else 393 else
260 xen_rsm_i(); 394 xen_rsm_i();
261} 395}
396#else
397#define __DEFINE_FUNC(name, code) \
398 extern const char xen_ ## name ## _direct_start[]; \
399 extern const char xen_ ## name ## _direct_end[]; \
400 asm (".align 32\n" \
401 ".proc xen_" #name "\n" \
402 "xen_" #name ":\n" \
403 "xen_" #name "_direct_start:\n" \
404 code \
405 "xen_" #name "_direct_end:\n" \
406 "br.cond.sptk.many b6\n" \
407 ".endp xen_" #name "\n")
408
409#define DEFINE_VOID_FUNC0(name, code) \
410 extern void \
411 xen_ ## name (void); \
412 __DEFINE_FUNC(name, code)
413
414#define DEFINE_VOID_FUNC1(name, code) \
415 extern void \
416 xen_ ## name (unsigned long arg); \
417 __DEFINE_FUNC(name, code)
418
419#define DEFINE_VOID_FUNC1_VOID(name, code) \
420 extern void \
421 xen_ ## name (void *arg); \
422 __DEFINE_FUNC(name, code)
423
424#define DEFINE_VOID_FUNC2(name, code) \
425 extern void \
426 xen_ ## name (unsigned long arg0, \
427 unsigned long arg1); \
428 __DEFINE_FUNC(name, code)
262 429
263static const struct pv_cpu_ops xen_cpu_ops __initdata = { 430#define DEFINE_FUNC0(name, code) \
431 extern unsigned long \
432 xen_ ## name (void); \
433 __DEFINE_FUNC(name, code)
434
435#define DEFINE_FUNC1(name, type, code) \
436 extern unsigned long \
437 xen_ ## name (type arg); \
438 __DEFINE_FUNC(name, code)
439
440#define XEN_PSR_I_ADDR_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
441
442/*
443 * static void xen_set_itm_with_offset(unsigned long val)
444 * xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
445 */
446/* 2 bundles */
447DEFINE_VOID_FUNC1(set_itm_with_offset,
448 "mov r2 = " __stringify(XSI_BASE) " + "
449 __stringify(XSI_ITC_OFFSET_OFS) "\n"
450 ";;\n"
451 "ld8 r3 = [r2]\n"
452 ";;\n"
453 "sub r8 = r8, r3\n"
454 "break " __stringify(HYPERPRIVOP_SET_ITM) "\n");
455
456/*
457 * static unsigned long xen_get_itm_with_offset(void)
458 * return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset;
459 */
460/* 2 bundles */
461DEFINE_FUNC0(get_itm_with_offset,
462 "mov r2 = " __stringify(XSI_BASE) " + "
463 __stringify(XSI_ITC_OFFSET_OFS) "\n"
464 ";;\n"
465 "ld8 r3 = [r2]\n"
466 "mov r8 = cr.itm\n"
467 ";;\n"
468 "add r8 = r8, r2\n");
469
470/*
471 * static void xen_set_itc(unsigned long val)
472 * unsigned long mitc;
473 *
474 * WARN_ON(!irqs_disabled());
475 * mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
476 * XEN_MAPPEDREGS->itc_offset = val - mitc;
477 * XEN_MAPPEDREGS->itc_last = val;
478 */
479/* 2 bundles */
480DEFINE_VOID_FUNC1(set_itc,
481 "mov r2 = " __stringify(XSI_BASE) " + "
482 __stringify(XSI_ITC_LAST_OFS) "\n"
483 "mov r3 = ar.itc\n"
484 ";;\n"
485 "sub r3 = r8, r3\n"
486 "st8 [r2] = r8, "
487 __stringify(XSI_ITC_LAST_OFS) " - "
488 __stringify(XSI_ITC_OFFSET_OFS) "\n"
489 ";;\n"
490 "st8 [r2] = r3\n");
491
492/*
493 * static unsigned long xen_get_itc(void)
494 * unsigned long res;
495 * unsigned long itc_offset;
496 * unsigned long itc_last;
497 * unsigned long ret_itc_last;
498 *
499 * itc_offset = XEN_MAPPEDREGS->itc_offset;
500 * do {
501 * itc_last = XEN_MAPPEDREGS->itc_last;
502 * res = ia64_native_getreg(_IA64_REG_AR_ITC);
503 * res += itc_offset;
504 * if (itc_last >= res)
505 * res = itc_last + 1;
506 * ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
507 * itc_last, res);
508 * } while (unlikely(ret_itc_last != itc_last));
509 * return res;
510 */
511/* 5 bundles */
512DEFINE_FUNC0(get_itc,
513 "mov r2 = " __stringify(XSI_BASE) " + "
514 __stringify(XSI_ITC_OFFSET_OFS) "\n"
515 ";;\n"
516 "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - "
517 __stringify(XSI_ITC_OFFSET_OFS) "\n"
518 /* r9 = itc_offset */
519 /* r2 = XSI_ITC_OFFSET */
520 "888:\n"
521 "mov r8 = ar.itc\n" /* res = ar.itc */
522 ";;\n"
523 "ld8 r3 = [r2]\n" /* r3 = itc_last */
524 "add r8 = r8, r9\n" /* res = ar.itc + itc_offset */
525 ";;\n"
526 "cmp.gtu p6, p0 = r3, r8\n"
527 ";;\n"
528 "(p6) add r8 = 1, r3\n" /* if (itc_last > res) itc_last + 1 */
529 ";;\n"
530 "mov ar.ccv = r8\n"
531 ";;\n"
532 "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n"
533 ";;\n"
534 "cmp.ne p6, p0 = r10, r3\n"
535 "(p6) hint @pause\n"
536 "(p6) br.cond.spnt 888b\n");
537
538DEFINE_VOID_FUNC1_VOID(fc,
539 "break " __stringify(HYPERPRIVOP_FC) "\n");
540
541/*
542 * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR
543 * masked_addr = *psr_i_addr_addr
544 * pending_intr_addr = masked_addr - 1
545 * if (val & IA64_PSR_I) {
546 * masked = *masked_addr
547 * *masked_addr = 0:xen_set_virtual_psr_i(1)
548 * compiler barrier
549 * if (masked) {
550 * uint8_t pending = *pending_intr_addr;
551 * if (pending)
552 * XEN_HYPER_SSM_I
553 * }
554 * } else {
555 * *masked_addr = 1:xen_set_virtual_psr_i(0)
556 * }
557 */
558/* 6 bundles */
559DEFINE_VOID_FUNC1(intrin_local_irq_restore,
560 /* r8 = input value: 0 or IA64_PSR_I
561 * p6 = (flags & IA64_PSR_I)
562 * = if clause
563 * p7 = !(flags & IA64_PSR_I)
564 * = else clause
565 */
566 "cmp.ne p6, p7 = r8, r0\n"
567 "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
568 ";;\n"
569 /* r9 = XEN_PSR_I_ADDR */
570 "ld8 r9 = [r9]\n"
571 ";;\n"
572
573 /* r10 = masked previous value */
574 "(p6) ld1.acq r10 = [r9]\n"
575 ";;\n"
576
577 /* p8 = !masked interrupt masked previously? */
578 "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
579
580 /* p7 = else clause */
581 "(p7) mov r11 = 1\n"
582 ";;\n"
583 /* masked = 1 */
584 "(p7) st1.rel [r9] = r11\n"
585
586 /* p6 = if clause */
587 /* masked = 0
588 * r9 = masked_addr - 1
589 * = pending_intr_addr
590 */
591 "(p8) st1.rel [r9] = r0, -1\n"
592 ";;\n"
593 /* r8 = pending_intr */
594 "(p8) ld1.acq r11 = [r9]\n"
595 ";;\n"
596 /* p9 = interrupt pending? */
597 "(p8) cmp.ne.unc p9, p10 = r11, r0\n"
598 ";;\n"
599 "(p10) mf\n"
600 /* issue hypercall to trigger interrupt */
601 "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n");
602
603DEFINE_VOID_FUNC2(ptcga,
604 "break " __stringify(HYPERPRIVOP_PTC_GA) "\n");
605DEFINE_VOID_FUNC2(set_rr,
606 "break " __stringify(HYPERPRIVOP_SET_RR) "\n");
607
608/*
609 * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR;
610 * tmp = *tmp
611 * tmp = *tmp;
612 * psr_i = tmp? 0: IA64_PSR_I;
613 */
614/* 4 bundles */
615DEFINE_FUNC0(get_psr_i,
616 "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
617 ";;\n"
618 "ld8 r9 = [r9]\n" /* r9 = XEN_PSR_I_ADDR */
619 "mov r8 = 0\n" /* psr_i = 0 */
620 ";;\n"
621 "ld1.acq r9 = [r9]\n" /* r9 = XEN_PSR_I */
622 ";;\n"
623 "cmp.eq.unc p6, p0 = r9, r0\n" /* p6 = (XEN_PSR_I != 0) */
624 ";;\n"
625 "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n");
626
627DEFINE_FUNC1(thash, unsigned long,
628 "break " __stringify(HYPERPRIVOP_THASH) "\n");
629DEFINE_FUNC1(get_cpuid, int,
630 "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n");
631DEFINE_FUNC1(get_pmd, int,
632 "break " __stringify(HYPERPRIVOP_GET_PMD) "\n");
633DEFINE_FUNC1(get_rr, unsigned long,
634 "break " __stringify(HYPERPRIVOP_GET_RR) "\n");
635
636/*
637 * void xen_privop_ssm_i(void)
638 *
639 * int masked = !xen_get_virtual_psr_i();
640 * // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr)
641 * xen_set_virtual_psr_i(1)
642 * // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0
643 * // compiler barrier
644 * if (masked) {
645 * uint8_t* pend_int_addr =
646 * (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1;
647 * uint8_t pending = *pend_int_addr;
648 * if (pending)
649 * XEN_HYPER_SSM_I
650 * }
651 */
652/* 4 bundles */
653DEFINE_VOID_FUNC0(ssm_i,
654 "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
655 ";;\n"
656 "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I_ADDR */
657 ";;\n"
658 "ld1.acq r9 = [r8]\n" /* r9 = XEN_PSR_I */
659 ";;\n"
660 "st1.rel [r8] = r0, -1\n" /* psr_i = 0. enable interrupt
661 * r8 = XEN_PSR_I_ADDR - 1
662 * = pend_int_addr
663 */
664 "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I
665 * previously interrupt
666 * masked?
667 */
668 ";;\n"
669 "(p6) ld1.acq r8 = [r8]\n" /* r8 = xen_pend_int */
670 ";;\n"
671 "(p6) cmp.eq.unc p6, p7 = r8, r0\n" /*interrupt pending?*/
672 ";;\n"
673 /* issue hypercall to get interrupt */
674 "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
675 ";;\n");
676
677/*
678 * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr
679 * = XEN_PSR_I_ADDR_ADDR;
680 * psr_i_addr = *psr_i_addr_addr;
681 * *psr_i_addr = 1;
682 */
683/* 2 bundles */
684DEFINE_VOID_FUNC0(rsm_i,
685 "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
686 /* r8 = XEN_PSR_I_ADDR */
687 "mov r9 = 1\n"
688 ";;\n"
689 "ld8 r8 = [r8]\n" /* r8 = XEN_PSR_I */
690 ";;\n"
691 "st1.rel [r8] = r9\n"); /* XEN_PSR_I = 1 */
692
693extern void
694xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
695 unsigned long val2, unsigned long val3,
696 unsigned long val4);
697__DEFINE_FUNC(set_rr0_to_rr4,
698 "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n");
699
700
701extern unsigned long xen_getreg(int regnum);
702#define __DEFINE_GET_REG(id, privop) \
703 "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
704 ";;\n" \
705 "cmp.eq p6, p0 = r2, r8\n" \
706 ";;\n" \
707 "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n" \
708 "(p6) br.cond.sptk.many b6\n" \
709 ";;\n"
710
711__DEFINE_FUNC(getreg,
712 __DEFINE_GET_REG(PSR, PSR)
713#ifdef CONFIG_IA32_SUPPORT
714 __DEFINE_GET_REG(AR_EFLAG, EFLAG)
715#endif
716
717 /* get_itc */
718 "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
719 ";;\n"
720 "cmp.eq p6, p0 = r2, r8\n"
721 ";;\n"
722 "(p6) br.cond.spnt xen_get_itc\n"
723 ";;\n"
724
725 /* get itm */
726 "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
727 ";;\n"
728 "cmp.eq p6, p0 = r2, r8\n"
729 ";;\n"
730 "(p6) br.cond.spnt xen_get_itm_with_offset\n"
731 ";;\n"
732
733 __DEFINE_GET_REG(CR_IVR, IVR)
734 __DEFINE_GET_REG(CR_TPR, TPR)
735
736 /* fall back */
737 "movl r2 = ia64_native_getreg_func\n"
738 ";;\n"
739 "mov b7 = r2\n"
740 ";;\n"
741 "br.cond.sptk.many b7\n");
742
743extern void xen_setreg(int regnum, unsigned long val);
744#define __DEFINE_SET_REG(id, privop) \
745 "mov r2 = " __stringify(_IA64_REG_ ## id) "\n" \
746 ";;\n" \
747 "cmp.eq p6, p0 = r2, r9\n" \
748 ";;\n" \
749 "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n" \
750 "(p6) br.cond.sptk.many b6\n" \
751 ";;\n"
752
753__DEFINE_FUNC(setreg,
754 /* kr0 .. kr 7*/
755 /*
756 * if (_IA64_REG_AR_KR0 <= regnum &&
757 * regnum <= _IA64_REG_AR_KR7) {
758 * register __index asm ("r8") = regnum - _IA64_REG_AR_KR0
759 * register __val asm ("r9") = val
760 * "break HYPERPRIVOP_SET_KR"
761 * }
762 */
763 "mov r17 = r9\n"
764 "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n"
765 ";;\n"
766 "cmp.ge p6, p0 = r9, r2\n"
767 "sub r17 = r17, r2\n"
768 ";;\n"
769 "(p6) cmp.ge.unc p7, p0 = "
770 __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0)
771 ", r17\n"
772 ";;\n"
773 "(p7) mov r9 = r8\n"
774 ";;\n"
775 "(p7) mov r8 = r17\n"
776 "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n"
777
778 /* set itm */
779 "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
780 ";;\n"
781 "cmp.eq p6, p0 = r2, r8\n"
782 ";;\n"
783 "(p6) br.cond.spnt xen_set_itm_with_offset\n"
784
785 /* set itc */
786 "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
787 ";;\n"
788 "cmp.eq p6, p0 = r2, r8\n"
789 ";;\n"
790 "(p6) br.cond.spnt xen_set_itc\n"
791
792#ifdef CONFIG_IA32_SUPPORT
793 __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG)
794#endif
795 __DEFINE_SET_REG(CR_TPR, SET_TPR)
796 __DEFINE_SET_REG(CR_EOI, EOI)
797
798 /* fall back */
799 "movl r2 = ia64_native_setreg_func\n"
800 ";;\n"
801 "mov b7 = r2\n"
802 ";;\n"
803 "br.cond.sptk.many b7\n");
804#endif
805
806static const struct pv_cpu_ops xen_cpu_ops __initconst = {
264 .fc = xen_fc, 807 .fc = xen_fc,
265 .thash = xen_thash, 808 .thash = xen_thash,
266 .get_cpuid = xen_get_cpuid, 809 .get_cpuid = xen_get_cpuid,
@@ -337,7 +880,7 @@ xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
337 HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op); 880 HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
338} 881}
339 882
340static const struct pv_iosapic_ops xen_iosapic_ops __initconst = { 883static struct pv_iosapic_ops xen_iosapic_ops __initdata = {
341 .pcat_compat_init = xen_pcat_compat_init, 884 .pcat_compat_init = xen_pcat_compat_init,
342 .__get_irq_chip = xen_iosapic_get_irq_chip, 885 .__get_irq_chip = xen_iosapic_get_irq_chip,
343 886
@@ -355,6 +898,8 @@ xen_setup_pv_ops(void)
355 xen_info_init(); 898 xen_info_init();
356 pv_info = xen_info; 899 pv_info = xen_info;
357 pv_init_ops = xen_init_ops; 900 pv_init_ops = xen_init_ops;
901 pv_fsys_data = xen_fsys_data;
902 pv_patchdata = xen_patchdata;
358 pv_cpu_ops = xen_cpu_ops; 903 pv_cpu_ops = xen_cpu_ops;
359 pv_iosapic_ops = xen_iosapic_ops; 904 pv_iosapic_ops = xen_iosapic_ops;
360 pv_irq_ops = xen_irq_ops; 905 pv_irq_ops = xen_irq_ops;
@@ -362,3 +907,252 @@ xen_setup_pv_ops(void)
362 907
363 paravirt_cpu_asm_init(&xen_cpu_asm_switch); 908 paravirt_cpu_asm_init(&xen_cpu_asm_switch);
364} 909}
910
911#ifdef ASM_SUPPORTED
912/***************************************************************************
913 * binary pacthing
914 * pv_init_ops.patch_bundle
915 */
916
917#define DEFINE_FUNC_GETREG(name, privop) \
918 DEFINE_FUNC0(get_ ## name, \
919 "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n")
920
921DEFINE_FUNC_GETREG(psr, PSR);
922DEFINE_FUNC_GETREG(eflag, EFLAG);
923DEFINE_FUNC_GETREG(ivr, IVR);
924DEFINE_FUNC_GETREG(tpr, TPR);
925
926#define DEFINE_FUNC_SET_KR(n) \
927 DEFINE_VOID_FUNC0(set_kr ## n, \
928 ";;\n" \
929 "mov r9 = r8\n" \
930 "mov r8 = " #n "\n" \
931 "break " __stringify(HYPERPRIVOP_SET_KR) "\n")
932
933DEFINE_FUNC_SET_KR(0);
934DEFINE_FUNC_SET_KR(1);
935DEFINE_FUNC_SET_KR(2);
936DEFINE_FUNC_SET_KR(3);
937DEFINE_FUNC_SET_KR(4);
938DEFINE_FUNC_SET_KR(5);
939DEFINE_FUNC_SET_KR(6);
940DEFINE_FUNC_SET_KR(7);
941
942#define __DEFINE_FUNC_SETREG(name, privop) \
943 DEFINE_VOID_FUNC0(name, \
944 "break "__stringify(HYPERPRIVOP_ ## privop) "\n")
945
946#define DEFINE_FUNC_SETREG(name, privop) \
947 __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop)
948
949DEFINE_FUNC_SETREG(eflag, EFLAG);
950DEFINE_FUNC_SETREG(tpr, TPR);
951__DEFINE_FUNC_SETREG(eoi, EOI);
952
953extern const char xen_check_events[];
954extern const char __xen_intrin_local_irq_restore_direct_start[];
955extern const char __xen_intrin_local_irq_restore_direct_end[];
956extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc;
957
958asm (
959 ".align 32\n"
960 ".proc xen_check_events\n"
961 "xen_check_events:\n"
962 /* masked = 0
963 * r9 = masked_addr - 1
964 * = pending_intr_addr
965 */
966 "st1.rel [r9] = r0, -1\n"
967 ";;\n"
968 /* r8 = pending_intr */
969 "ld1.acq r11 = [r9]\n"
970 ";;\n"
971 /* p9 = interrupt pending? */
972 "cmp.ne p9, p10 = r11, r0\n"
973 ";;\n"
974 "(p10) mf\n"
975 /* issue hypercall to trigger interrupt */
976 "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
977 "br.cond.sptk.many b6\n"
978 ".endp xen_check_events\n"
979 "\n"
980 ".align 32\n"
981 ".proc __xen_intrin_local_irq_restore_direct\n"
982 "__xen_intrin_local_irq_restore_direct:\n"
983 "__xen_intrin_local_irq_restore_direct_start:\n"
984 "1:\n"
985 "{\n"
986 "cmp.ne p6, p7 = r8, r0\n"
987 "mov r17 = ip\n" /* get ip to calc return address */
988 "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n"
989 ";;\n"
990 "}\n"
991 "{\n"
992 /* r9 = XEN_PSR_I_ADDR */
993 "ld8 r9 = [r9]\n"
994 ";;\n"
995 /* r10 = masked previous value */
996 "(p6) ld1.acq r10 = [r9]\n"
997 "adds r17 = 1f - 1b, r17\n" /* calculate return address */
998 ";;\n"
999 "}\n"
1000 "{\n"
1001 /* p8 = !masked interrupt masked previously? */
1002 "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
1003 "\n"
1004 /* p7 = else clause */
1005 "(p7) mov r11 = 1\n"
1006 ";;\n"
1007 "(p8) mov b6 = r17\n" /* set return address */
1008 "}\n"
1009 "{\n"
1010 /* masked = 1 */
1011 "(p7) st1.rel [r9] = r11\n"
1012 "\n"
1013 "[99:]\n"
1014 "(p8) brl.cond.dptk.few xen_check_events\n"
1015 "}\n"
1016 /* pv calling stub is 5 bundles. fill nop to adjust return address */
1017 "{\n"
1018 "nop 0\n"
1019 "nop 0\n"
1020 "nop 0\n"
1021 "}\n"
1022 "1:\n"
1023 "__xen_intrin_local_irq_restore_direct_end:\n"
1024 ".endp __xen_intrin_local_irq_restore_direct\n"
1025 "\n"
1026 ".align 8\n"
1027 "__xen_intrin_local_irq_restore_direct_reloc:\n"
1028 "data8 99b\n"
1029);
1030
1031static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[]
1032__initdata_or_module =
1033{
1034#define XEN_PATCH_BUNDLE_ELEM(name, type) \
1035 { \
1036 (void*)xen_ ## name ## _direct_start, \
1037 (void*)xen_ ## name ## _direct_end, \
1038 PARAVIRT_PATCH_TYPE_ ## type, \
1039 }
1040
1041 XEN_PATCH_BUNDLE_ELEM(fc, FC),
1042 XEN_PATCH_BUNDLE_ELEM(thash, THASH),
1043 XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
1044 XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
1045 XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
1046 XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
1047 XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
1048 XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
1049 XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
1050 XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
1051 XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
1052 {
1053 (void*)__xen_intrin_local_irq_restore_direct_start,
1054 (void*)__xen_intrin_local_irq_restore_direct_end,
1055 PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE,
1056 },
1057
1058#define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg) \
1059 { \
1060 xen_get_ ## name ## _direct_start, \
1061 xen_get_ ## name ## _direct_end, \
1062 PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
1063 }
1064
1065 XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
1066 XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG),
1067
1068 XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR),
1069 XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR),
1070
1071 XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC),
1072 XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM),
1073
1074
1075#define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
1076 { \
1077 xen_ ## name ## _direct_start, \
1078 xen_ ## name ## _direct_end, \
1079 PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
1080 }
1081
1082#define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg) \
1083 __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg)
1084
1085 XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0),
1086 XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1),
1087 XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2),
1088 XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3),
1089 XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4),
1090 XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5),
1091 XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6),
1092 XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7),
1093
1094 XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG),
1095 XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR),
1096 __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI),
1097
1098 XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC),
1099 XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM),
1100};
1101
1102static unsigned long __init_or_module
1103xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
1104{
1105 const unsigned long nelems = sizeof(xen_patch_bundle_elems) /
1106 sizeof(xen_patch_bundle_elems[0]);
1107 unsigned long used;
1108 const struct paravirt_patch_bundle_elem *found;
1109
1110 used = __paravirt_patch_apply_bundle(sbundle, ebundle, type,
1111 xen_patch_bundle_elems, nelems,
1112 &found);
1113
1114 if (found == NULL)
1115 /* fallback */
1116 return ia64_native_patch_bundle(sbundle, ebundle, type);
1117 if (used == 0)
1118 return used;
1119
1120 /* relocation */
1121 switch (type) {
1122 case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: {
1123 unsigned long reloc =
1124 __xen_intrin_local_irq_restore_direct_reloc;
1125 unsigned long reloc_offset = reloc - (unsigned long)
1126 __xen_intrin_local_irq_restore_direct_start;
1127 unsigned long tag = (unsigned long)sbundle + reloc_offset;
1128 paravirt_patch_reloc_brl(tag, xen_check_events);
1129 break;
1130 }
1131 default:
1132 /* nothing */
1133 break;
1134 }
1135 return used;
1136}
1137#endif /* ASM_SUPPOTED */
1138
1139const struct paravirt_patch_branch_target xen_branch_target[]
1140__initconst = {
1141#define PARAVIRT_BR_TARGET(name, type) \
1142 { \
1143 &xen_ ## name, \
1144 PARAVIRT_PATCH_TYPE_BR_ ## type, \
1145 }
1146 PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
1147 PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
1148 PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
1149 PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
1150};
1151
1152static void __init
1153xen_patch_branch(unsigned long tag, unsigned long type)
1154{
1155 const unsigned long nelem =
1156 sizeof(xen_branch_target) / sizeof(xen_branch_target[0]);
1157 __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem);
1158}
diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h
index d8ff4cd89ab..1784fde2e28 100644
--- a/arch/mips/include/asm/mach-bcm47xx/gpio.h
+++ b/arch/mips/include/asm/mach-bcm47xx/gpio.h
@@ -31,24 +31,28 @@ static inline void gpio_set_value(unsigned gpio, int value)
31 31
32static inline int gpio_direction_input(unsigned gpio) 32static inline int gpio_direction_input(unsigned gpio)
33{ 33{
34 return ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 0); 34 ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 0);
35 return 0;
35} 36}
36 37
37static inline int gpio_direction_output(unsigned gpio, int value) 38static inline int gpio_direction_output(unsigned gpio, int value)
38{ 39{
39 return ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 1 << gpio); 40 ssb_gpio_outen(&ssb_bcm47xx, 1 << gpio, 1 << gpio);
41 return 0;
40} 42}
41 43
42static int gpio_intmask(unsigned gpio, int value) 44static inline int gpio_intmask(unsigned gpio, int value)
43{ 45{
44 return ssb_gpio_intmask(&ssb_bcm47xx, 1 << gpio, 46 ssb_gpio_intmask(&ssb_bcm47xx, 1 << gpio,
45 value ? 1 << gpio : 0); 47 value ? 1 << gpio : 0);
48 return 0;
46} 49}
47 50
48static int gpio_polarity(unsigned gpio, int value) 51static inline int gpio_polarity(unsigned gpio, int value)
49{ 52{
50 return ssb_gpio_polarity(&ssb_bcm47xx, 1 << gpio, 53 ssb_gpio_polarity(&ssb_bcm47xx, 1 << gpio,
51 value ? 1 << gpio : 0); 54 value ? 1 << gpio : 0);
55 return 0;
52} 56}
53 57
54 58
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 060d28dca8a..4481656d106 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -42,6 +42,7 @@ void *__kmap_atomic(struct page *page, enum km_type type)
42 if (!PageHighMem(page)) 42 if (!PageHighMem(page))
43 return page_address(page); 43 return page_address(page);
44 44
45 debug_kmap_atomic(type);
45 idx = type + KM_TYPE_NR*smp_processor_id(); 46 idx = type + KM_TYPE_NR*smp_processor_id();
46 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 47 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
47#ifdef CONFIG_DEBUG_HIGHMEM 48#ifdef CONFIG_DEBUG_HIGHMEM
@@ -88,6 +89,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
88 89
89 pagefault_disable(); 90 pagefault_disable();
90 91
92 debug_kmap_atomic(type);
91 idx = type + KM_TYPE_NR*smp_processor_id(); 93 idx = type + KM_TYPE_NR*smp_processor_id();
92 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 94 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
93 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); 95 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 9d46c43a415..e75cae6072c 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -216,17 +216,14 @@ void __init start_cpu_itimer(void)
216 per_cpu(cpu_data, cpu).it_value = next_tick; 216 per_cpu(cpu_data, cpu).it_value = next_tick;
217} 217}
218 218
219struct platform_device rtc_parisc_dev = { 219static struct platform_device rtc_parisc_dev = {
220 .name = "rtc-parisc", 220 .name = "rtc-parisc",
221 .id = -1, 221 .id = -1,
222}; 222};
223 223
224static int __init rtc_init(void) 224static int __init rtc_init(void)
225{ 225{
226 int ret; 226 if (platform_device_register(&rtc_parisc_dev) < 0)
227
228 ret = platform_device_register(&rtc_parisc_dev);
229 if (ret < 0)
230 printk(KERN_ERR "unable to register rtc device...\n"); 227 printk(KERN_ERR "unable to register rtc device...\n");
231 228
232 /* not necessarily an error */ 229 /* not necessarily an error */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ad6b1c084fe..45192dce65c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -228,6 +228,9 @@ config PPC_OF_PLATFORM_PCI
228 depends on PPC64 # not supported on 32 bits yet 228 depends on PPC64 # not supported on 32 bits yet
229 default n 229 default n
230 230
231config ARCH_SUPPORTS_DEBUG_PAGEALLOC
232 def_bool y
233
231source "init/Kconfig" 234source "init/Kconfig"
232 235
233source "kernel/Kconfig.freezer" 236source "kernel/Kconfig.freezer"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 22091bbfdc9..6aa0b5e087c 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -30,6 +30,7 @@ config DEBUG_STACK_USAGE
30config DEBUG_PAGEALLOC 30config DEBUG_PAGEALLOC
31 bool "Debug page memory allocations" 31 bool "Debug page memory allocations"
32 depends on DEBUG_KERNEL && !HIBERNATION 32 depends on DEBUG_KERNEL && !HIBERNATION
33 depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
33 help 34 help
34 Unmap pages from the kernel linear mapping after free_pages(). 35 Unmap pages from the kernel linear mapping after free_pages().
35 This results in a large slowdown, but helps to find certain types 36 This results in a large slowdown, but helps to find certain types
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index dea30910c13..4319bd70a58 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -152,10 +152,21 @@
152 }; 152 };
153 153
154 par_io@1400 { 154 par_io@1400 {
155 #address-cells = <1>;
156 #size-cells = <1>;
155 reg = <0x1400 0x100>; 157 reg = <0x1400 0x100>;
158 ranges = <3 0x1448 0x18>;
159 compatible = "fsl,mpc8323-qe-pario";
156 device_type = "par_io"; 160 device_type = "par_io";
157 num-ports = <7>; 161 num-ports = <7>;
158 162
163 qe_pio_d: gpio-controller@1448 {
164 #gpio-cells = <2>;
165 compatible = "fsl,mpc8323-qe-pario-bank";
166 reg = <3 0x18>;
167 gpio-controller;
168 };
169
159 ucc2pio:ucc_pin@02 { 170 ucc2pio:ucc_pin@02 {
160 pio-map = < 171 pio-map = <
161 /* port pin dir open_drain assignment has_irq */ 172 /* port pin dir open_drain assignment has_irq */
@@ -225,12 +236,25 @@
225 }; 236 };
226 237
227 spi@4c0 { 238 spi@4c0 {
239 #address-cells = <1>;
240 #size-cells = <0>;
228 cell-index = <0>; 241 cell-index = <0>;
229 compatible = "fsl,spi"; 242 compatible = "fsl,spi";
230 reg = <0x4c0 0x40>; 243 reg = <0x4c0 0x40>;
231 interrupts = <2>; 244 interrupts = <2>;
232 interrupt-parent = <&qeic>; 245 interrupt-parent = <&qeic>;
246 gpios = <&qe_pio_d 13 0>;
233 mode = "cpu-qe"; 247 mode = "cpu-qe";
248
249 mmc-slot@0 {
250 compatible = "fsl,mpc8323rdb-mmc-slot",
251 "mmc-spi-slot";
252 reg = <0>;
253 gpios = <&qe_pio_d 14 1
254 &qe_pio_d 15 0>;
255 voltage-ranges = <3300 3300>;
256 spi-max-frequency = <50000000>;
257 };
234 }; 258 };
235 259
236 spi@500 { 260 spi@500 {
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 545028f8648..684a73f4324 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -24,6 +24,7 @@
24 24
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/highmem.h>
27#include <asm/kmap_types.h> 28#include <asm/kmap_types.h>
28#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
29#include <asm/page.h> 30#include <asm/page.h>
@@ -94,6 +95,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
94 if (!PageHighMem(page)) 95 if (!PageHighMem(page))
95 return page_address(page); 96 return page_address(page);
96 97
98 debug_kmap_atomic(type);
97 idx = type + KM_TYPE_NR*smp_processor_id(); 99 idx = type + KM_TYPE_NR*smp_processor_id();
98 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 100 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
99#ifdef CONFIG_DEBUG_HIGHMEM 101#ifdef CONFIG_DEBUG_HIGHMEM
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 3548159a1be..ba17d5d90a4 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -114,6 +114,10 @@ extern int pci_domain_nr(struct pci_bus *bus);
114/* Decide whether to display the domain number in /proc */ 114/* Decide whether to display the domain number in /proc */
115extern int pci_proc_domain(struct pci_bus *bus); 115extern int pci_proc_domain(struct pci_bus *bus);
116 116
117/* MSI arch hooks */
118#define arch_setup_msi_irqs arch_setup_msi_irqs
119#define arch_teardown_msi_irqs arch_teardown_msi_irqs
120#define arch_msi_check_device arch_msi_check_device
117 121
118struct vm_area_struct; 122struct vm_area_struct;
119/* Map a range of PCI memory or I/O space for a device into user space */ 123/* Map a range of PCI memory or I/O space for a device into user space */
diff --git a/arch/powerpc/include/asm/suspend.h b/arch/powerpc/include/asm/suspend.h
index cbf2c9404c3..c6efc3466aa 100644
--- a/arch/powerpc/include/asm/suspend.h
+++ b/arch/powerpc/include/asm/suspend.h
@@ -3,7 +3,4 @@
3 3
4static inline int arch_prepare_suspend(void) { return 0; } 4static inline int arch_prepare_suspend(void) { return 0; }
5 5
6void save_processor_state(void);
7void restore_processor_state(void);
8
9#endif /* __ASM_POWERPC_SUSPEND_H */ 6#endif /* __ASM_POWERPC_SUSPEND_H */
diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
index 3bb7d3dd28b..8bbc12d20f5 100644
--- a/arch/powerpc/kernel/msi.c
+++ b/arch/powerpc/kernel/msi.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/msi.h> 11#include <linux/msi.h>
12#include <linux/pci.h>
12 13
13#include <asm/machdep.h> 14#include <asm/machdep.h>
14 15
@@ -19,6 +20,10 @@ int arch_msi_check_device(struct pci_dev* dev, int nvec, int type)
19 return -ENOSYS; 20 return -ENOSYS;
20 } 21 }
21 22
23 /* PowerPC doesn't support multiple MSI yet */
24 if (type == PCI_CAP_ID_MSI && nvec > 1)
25 return 1;
26
22 if (ppc_md.msi_check_device) { 27 if (ppc_md.msi_check_device) {
23 pr_debug("msi: Using platform check routine.\n"); 28 pr_debug("msi: Using platform check routine.\n");
24 return ppc_md.msi_check_device(dev, nvec, type); 29 return ppc_md.msi_check_device(dev, nvec, type);
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 2a1295f1983..567ded7c3b9 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -20,6 +20,7 @@
20#include <linux/spi/mmc_spi.h> 20#include <linux/spi/mmc_spi.h>
21#include <linux/mmc/host.h> 21#include <linux/mmc/host.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/fsl_devices.h>
23 24
24#include <asm/time.h> 25#include <asm/time.h>
25#include <asm/ipic.h> 26#include <asm/ipic.h>
@@ -39,16 +40,116 @@
39#endif 40#endif
40 41
41#ifdef CONFIG_QUICC_ENGINE 42#ifdef CONFIG_QUICC_ENGINE
42static void mpc83xx_spi_activate_cs(u8 cs, u8 polarity) 43static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
44 struct spi_board_info *board_infos,
45 unsigned int num_board_infos,
46 void (*cs_control)(struct spi_device *dev,
47 bool on))
43{ 48{
44 pr_debug("%s %d %d\n", __func__, cs, polarity); 49 struct device_node *np;
45 par_io_data_set(3, 13, polarity); 50 unsigned int i = 0;
51
52 for_each_compatible_node(np, type, compatible) {
53 int ret;
54 unsigned int j;
55 const void *prop;
56 struct resource res[2];
57 struct platform_device *pdev;
58 struct fsl_spi_platform_data pdata = {
59 .cs_control = cs_control,
60 };
61
62 memset(res, 0, sizeof(res));
63
64 pdata.sysclk = sysclk;
65
66 prop = of_get_property(np, "reg", NULL);
67 if (!prop)
68 goto err;
69 pdata.bus_num = *(u32 *)prop;
70
71 prop = of_get_property(np, "cell-index", NULL);
72 if (prop)
73 i = *(u32 *)prop;
74
75 prop = of_get_property(np, "mode", NULL);
76 if (prop && !strcmp(prop, "cpu-qe"))
77 pdata.qe_mode = 1;
78
79 for (j = 0; j < num_board_infos; j++) {
80 if (board_infos[j].bus_num == pdata.bus_num)
81 pdata.max_chipselect++;
82 }
83
84 if (!pdata.max_chipselect)
85 continue;
86
87 ret = of_address_to_resource(np, 0, &res[0]);
88 if (ret)
89 goto err;
90
91 ret = of_irq_to_resource(np, 0, &res[1]);
92 if (ret == NO_IRQ)
93 goto err;
94
95 pdev = platform_device_alloc("mpc83xx_spi", i);
96 if (!pdev)
97 goto err;
98
99 ret = platform_device_add_data(pdev, &pdata, sizeof(pdata));
100 if (ret)
101 goto unreg;
102
103 ret = platform_device_add_resources(pdev, res,
104 ARRAY_SIZE(res));
105 if (ret)
106 goto unreg;
107
108 ret = platform_device_add(pdev);
109 if (ret)
110 goto unreg;
111
112 goto next;
113unreg:
114 platform_device_del(pdev);
115err:
116 pr_err("%s: registration failed\n", np->full_name);
117next:
118 i++;
119 }
120
121 return i;
46} 122}
47 123
48static void mpc83xx_spi_deactivate_cs(u8 cs, u8 polarity) 124static int __init fsl_spi_init(struct spi_board_info *board_infos,
125 unsigned int num_board_infos,
126 void (*cs_control)(struct spi_device *spi,
127 bool on))
49{ 128{
50 pr_debug("%s %d %d\n", __func__, cs, polarity); 129 u32 sysclk = -1;
51 par_io_data_set(3, 13, !polarity); 130 int ret;
131
132 /* SPI controller is either clocked from QE or SoC clock */
133 sysclk = get_brgfreq();
134 if (sysclk == -1) {
135 sysclk = fsl_get_sys_freq();
136 if (sysclk == -1)
137 return -ENODEV;
138 }
139
140 ret = of_fsl_spi_probe(NULL, "fsl,spi", sysclk, board_infos,
141 num_board_infos, cs_control);
142 if (!ret)
143 of_fsl_spi_probe("spi", "fsl_spi", sysclk, board_infos,
144 num_board_infos, cs_control);
145
146 return spi_register_board_info(board_infos, num_board_infos);
147}
148
149static void mpc83xx_spi_cs_control(struct spi_device *spi, bool on)
150{
151 pr_debug("%s %d %d\n", __func__, spi->chip_select, on);
152 par_io_data_set(3, 13, on);
52} 153}
53 154
54static struct mmc_spi_platform_data mpc832x_mmc_pdata = { 155static struct mmc_spi_platform_data mpc832x_mmc_pdata = {
@@ -74,9 +175,13 @@ static int __init mpc832x_spi_init(void)
74 par_io_config_pin(3, 14, 2, 0, 0, 0); /* SD_INSERT, I */ 175 par_io_config_pin(3, 14, 2, 0, 0, 0); /* SD_INSERT, I */
75 par_io_config_pin(3, 15, 2, 0, 0, 0); /* SD_PROTECT,I */ 176 par_io_config_pin(3, 15, 2, 0, 0, 0); /* SD_PROTECT,I */
76 177
77 return fsl_spi_init(&mpc832x_spi_boardinfo, 1, 178 /*
78 mpc83xx_spi_activate_cs, 179 * Don't bother with legacy stuff when device tree contains
79 mpc83xx_spi_deactivate_cs); 180 * mmc-spi-slot node.
181 */
182 if (of_find_compatible_node(NULL, NULL, "mmc-spi-slot"))
183 return 0;
184 return fsl_spi_init(&mpc832x_spi_boardinfo, 1, mpc83xx_spi_cs_control);
80} 185}
81machine_device_initcall(mpc832x_rdb, mpc832x_spi_init); 186machine_device_initcall(mpc832x_rdb, mpc832x_spi_init);
82#endif /* CONFIG_QUICC_ENGINE */ 187#endif /* CONFIG_QUICC_ENGINE */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index a01c89d3f9b..afe8dbc964a 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -417,115 +417,6 @@ err:
417 417
418arch_initcall(fsl_usb_of_init); 418arch_initcall(fsl_usb_of_init);
419 419
420static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
421 struct spi_board_info *board_infos,
422 unsigned int num_board_infos,
423 void (*activate_cs)(u8 cs, u8 polarity),
424 void (*deactivate_cs)(u8 cs, u8 polarity))
425{
426 struct device_node *np;
427 unsigned int i = 0;
428
429 for_each_compatible_node(np, type, compatible) {
430 int ret;
431 unsigned int j;
432 const void *prop;
433 struct resource res[2];
434 struct platform_device *pdev;
435 struct fsl_spi_platform_data pdata = {
436 .activate_cs = activate_cs,
437 .deactivate_cs = deactivate_cs,
438 };
439
440 memset(res, 0, sizeof(res));
441
442 pdata.sysclk = sysclk;
443
444 prop = of_get_property(np, "reg", NULL);
445 if (!prop)
446 goto err;
447 pdata.bus_num = *(u32 *)prop;
448
449 prop = of_get_property(np, "cell-index", NULL);
450 if (prop)
451 i = *(u32 *)prop;
452
453 prop = of_get_property(np, "mode", NULL);
454 if (prop && !strcmp(prop, "cpu-qe"))
455 pdata.qe_mode = 1;
456
457 for (j = 0; j < num_board_infos; j++) {
458 if (board_infos[j].bus_num == pdata.bus_num)
459 pdata.max_chipselect++;
460 }
461
462 if (!pdata.max_chipselect)
463 continue;
464
465 ret = of_address_to_resource(np, 0, &res[0]);
466 if (ret)
467 goto err;
468
469 ret = of_irq_to_resource(np, 0, &res[1]);
470 if (ret == NO_IRQ)
471 goto err;
472
473 pdev = platform_device_alloc("mpc83xx_spi", i);
474 if (!pdev)
475 goto err;
476
477 ret = platform_device_add_data(pdev, &pdata, sizeof(pdata));
478 if (ret)
479 goto unreg;
480
481 ret = platform_device_add_resources(pdev, res,
482 ARRAY_SIZE(res));
483 if (ret)
484 goto unreg;
485
486 ret = platform_device_add(pdev);
487 if (ret)
488 goto unreg;
489
490 goto next;
491unreg:
492 platform_device_del(pdev);
493err:
494 pr_err("%s: registration failed\n", np->full_name);
495next:
496 i++;
497 }
498
499 return i;
500}
501
502int __init fsl_spi_init(struct spi_board_info *board_infos,
503 unsigned int num_board_infos,
504 void (*activate_cs)(u8 cs, u8 polarity),
505 void (*deactivate_cs)(u8 cs, u8 polarity))
506{
507 u32 sysclk = -1;
508 int ret;
509
510#ifdef CONFIG_QUICC_ENGINE
511 /* SPI controller is either clocked from QE or SoC clock */
512 sysclk = get_brgfreq();
513#endif
514 if (sysclk == -1) {
515 sysclk = fsl_get_sys_freq();
516 if (sysclk == -1)
517 return -ENODEV;
518 }
519
520 ret = of_fsl_spi_probe(NULL, "fsl,spi", sysclk, board_infos,
521 num_board_infos, activate_cs, deactivate_cs);
522 if (!ret)
523 of_fsl_spi_probe("spi", "fsl_spi", sysclk, board_infos,
524 num_board_infos, activate_cs, deactivate_cs);
525
526 return spi_register_board_info(board_infos, num_board_infos);
527}
528
529#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx) 420#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx)
530static __be32 __iomem *rstcr; 421static __be32 __iomem *rstcr;
531 422
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 9c744e4285a..42381bb6cd5 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -4,6 +4,8 @@
4 4
5#include <asm/mmu.h> 5#include <asm/mmu.h>
6 6
7struct spi_device;
8
7extern phys_addr_t get_immrbase(void); 9extern phys_addr_t get_immrbase(void);
8#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx) 10#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
9extern u32 get_brgfreq(void); 11extern u32 get_brgfreq(void);
@@ -17,11 +19,6 @@ extern u32 fsl_get_sys_freq(void);
17struct spi_board_info; 19struct spi_board_info;
18struct device_node; 20struct device_node;
19 21
20extern int fsl_spi_init(struct spi_board_info *board_infos,
21 unsigned int num_board_infos,
22 void (*activate_cs)(u8 cs, u8 polarity),
23 void (*deactivate_cs)(u8 cs, u8 polarity));
24
25extern void fsl_rstcr_restart(char *cmd); 22extern void fsl_rstcr_restart(char *cmd);
26 23
27#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 24#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2a8af5e1634..dcb667c4375 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -72,6 +72,9 @@ config PGSTE
72config VIRT_CPU_ACCOUNTING 72config VIRT_CPU_ACCOUNTING
73 def_bool y 73 def_bool y
74 74
75config ARCH_SUPPORTS_DEBUG_PAGEALLOC
76 def_bool y
77
75mainmenu "Linux Kernel Configuration" 78mainmenu "Linux Kernel Configuration"
76 79
77config S390 80config S390
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 4599fa06bd8..7e297a3cde3 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -9,6 +9,7 @@ source "lib/Kconfig.debug"
9config DEBUG_PAGEALLOC 9config DEBUG_PAGEALLOC
10 bool "Debug page memory allocations" 10 bool "Debug page memory allocations"
11 depends on DEBUG_KERNEL 11 depends on DEBUG_KERNEL
12 depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
12 help 13 help
13 Unmap pages from the kernel linear mapping after free_pages(). 14 Unmap pages from the kernel linear mapping after free_pages().
14 This results in a slowdown, but helps to find certain types of 15 This results in a slowdown, but helps to find certain types of
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index b1e892a4381..704dd396257 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -12,6 +12,8 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include <linux/gfp.h>
16#include <linux/slab.h>
15#include <linux/string.h> 17#include <linux/string.h>
16#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
17#include <asm/ebcdic.h> 19#include <asm/ebcdic.h>
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 6dccb071aec..619bf94b11f 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -456,6 +456,8 @@ struct ciw {
456#define CIO_OPER 0x0004 456#define CIO_OPER 0x0004
457/* Sick revalidation of device. */ 457/* Sick revalidation of device. */
458#define CIO_REVALIDATE 0x0008 458#define CIO_REVALIDATE 0x0008
459/* Device did not respond in time. */
460#define CIO_BOXED 0x0010
459 461
460/** 462/**
461 * struct ccw_dev_id - unique identifier for ccw devices 463 * struct ccw_dev_id - unique identifier for ccw devices
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index c3ea215334f..cc12cd48bbc 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -124,6 +124,9 @@ config ARCH_NO_VIRT_TO_BUS
124config OF 124config OF
125 def_bool y 125 def_bool y
126 126
127config ARCH_SUPPORTS_DEBUG_PAGEALLOC
128 def_bool y if SPARC64
129
127source "init/Kconfig" 130source "init/Kconfig"
128 131
129source "kernel/Kconfig.freezer" 132source "kernel/Kconfig.freezer"
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug
index b8a15e271bf..d001b42041a 100644
--- a/arch/sparc/Kconfig.debug
+++ b/arch/sparc/Kconfig.debug
@@ -24,7 +24,8 @@ config STACK_DEBUG
24 24
25config DEBUG_PAGEALLOC 25config DEBUG_PAGEALLOC
26 bool "Debug page memory allocations" 26 bool "Debug page memory allocations"
27 depends on SPARC64 && DEBUG_KERNEL && !HIBERNATION 27 depends on DEBUG_KERNEL && !HIBERNATION
28 depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
28 help 29 help
29 Unmap pages from the kernel linear mapping after free_pages(). 30 Unmap pages from the kernel linear mapping after free_pages().
30 This results in a large slowdown, but helps to find certain types 31 This results in a large slowdown, but helps to find certain types
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 752d0c9fb54..7916feba6e4 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -39,6 +39,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
39 if (!PageHighMem(page)) 39 if (!PageHighMem(page))
40 return page_address(page); 40 return page_address(page);
41 41
42 debug_kmap_atomic(type);
42 idx = type + KM_TYPE_NR*smp_processor_id(); 43 idx = type + KM_TYPE_NR*smp_processor_id();
43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
44 45
diff --git a/arch/um/drivers/pcap_user.h b/arch/um/drivers/pcap_user.h
index 96b80b565ee..d8ba6153f91 100644
--- a/arch/um/drivers/pcap_user.h
+++ b/arch/um/drivers/pcap_user.h
@@ -19,13 +19,3 @@ extern const struct net_user_info pcap_user_info;
19 19
20extern int pcap_user_read(int fd, void *buf, int len, struct pcap_data *pri); 20extern int pcap_user_read(int fd, void *buf, int len, struct pcap_data *pri);
21 21
22/*
23 * Overrides for Emacs so that we follow Linus's tabbing style.
24 * Emacs will notice this stuff at the end of the file and automatically
25 * adjust the settings for this buffer only. This must remain at the end
26 * of the file.
27 * ---------------------------------------------------------------------------
28 * Local variables:
29 * c-file-style: "linux"
30 * End:
31 */
diff --git a/arch/um/drivers/port.h b/arch/um/drivers/port.h
index 9117609a575..372a80c0556 100644
--- a/arch/um/drivers/port.h
+++ b/arch/um/drivers/port.h
@@ -18,13 +18,3 @@ extern void port_remove_dev(void *d);
18 18
19#endif 19#endif
20 20
21/*
22 * Overrides for Emacs so that we follow Linus's tabbing style.
23 * Emacs will notice this stuff at the end of the file and automatically
24 * adjust the settings for this buffer only. This must remain at the end
25 * of the file.
26 * ---------------------------------------------------------------------------
27 * Local variables:
28 * c-file-style: "linux"
29 * End:
30 */
diff --git a/arch/um/drivers/ssl.h b/arch/um/drivers/ssl.h
index 98412aa6660..314d17725ce 100644
--- a/arch/um/drivers/ssl.h
+++ b/arch/um/drivers/ssl.h
@@ -11,13 +11,3 @@ extern void ssl_receive_char(int line, char ch);
11 11
12#endif 12#endif
13 13
14/*
15 * Overrides for Emacs so that we follow Linus's tabbing style.
16 * Emacs will notice this stuff at the end of the file and automatically
17 * adjust the settings for this buffer only. This must remain at the end
18 * of the file.
19 * ---------------------------------------------------------------------------
20 * Local variables:
21 * c-file-style: "linux"
22 * End:
23 */
diff --git a/arch/um/drivers/stdio_console.h b/arch/um/drivers/stdio_console.h
index 505a3d5bea5..6d8275f71fd 100644
--- a/arch/um/drivers/stdio_console.h
+++ b/arch/um/drivers/stdio_console.h
@@ -9,13 +9,3 @@
9extern void save_console_flags(void); 9extern void save_console_flags(void);
10#endif 10#endif
11 11
12/*
13 * Overrides for Emacs so that we follow Linus's tabbing style.
14 * Emacs will notice this stuff at the end of the file and automatically
15 * adjust the settings for this buffer only. This must remain at the end
16 * of the file.
17 * ---------------------------------------------------------------------------
18 * Local variables:
19 * c-file-style: "linux"
20 * End:
21 */
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 0a868118cf0..d42f826a8ab 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -17,7 +17,6 @@
17 * James McMechan 17 * James McMechan
18 */ 18 */
19 19
20#define MAJOR_NR UBD_MAJOR
21#define UBD_SHIFT 4 20#define UBD_SHIFT 4
22 21
23#include "linux/kernel.h" 22#include "linux/kernel.h"
@@ -115,7 +114,7 @@ static struct block_device_operations ubd_blops = {
115}; 114};
116 115
117/* Protected by ubd_lock */ 116/* Protected by ubd_lock */
118static int fake_major = MAJOR_NR; 117static int fake_major = UBD_MAJOR;
119static struct gendisk *ubd_gendisk[MAX_DEV]; 118static struct gendisk *ubd_gendisk[MAX_DEV];
120static struct gendisk *fake_gendisk[MAX_DEV]; 119static struct gendisk *fake_gendisk[MAX_DEV];
121 120
@@ -299,7 +298,7 @@ static int ubd_setup_common(char *str, int *index_out, char **error_out)
299 } 298 }
300 299
301 mutex_lock(&ubd_lock); 300 mutex_lock(&ubd_lock);
302 if(fake_major != MAJOR_NR){ 301 if (fake_major != UBD_MAJOR) {
303 *error_out = "Can't assign a fake major twice"; 302 *error_out = "Can't assign a fake major twice";
304 goto out1; 303 goto out1;
305 } 304 }
@@ -818,13 +817,13 @@ static int ubd_disk_register(int major, u64 size, int unit,
818 disk->first_minor = unit << UBD_SHIFT; 817 disk->first_minor = unit << UBD_SHIFT;
819 disk->fops = &ubd_blops; 818 disk->fops = &ubd_blops;
820 set_capacity(disk, size / 512); 819 set_capacity(disk, size / 512);
821 if(major == MAJOR_NR) 820 if (major == UBD_MAJOR)
822 sprintf(disk->disk_name, "ubd%c", 'a' + unit); 821 sprintf(disk->disk_name, "ubd%c", 'a' + unit);
823 else 822 else
824 sprintf(disk->disk_name, "ubd_fake%d", unit); 823 sprintf(disk->disk_name, "ubd_fake%d", unit);
825 824
826 /* sysfs register (not for ide fake devices) */ 825 /* sysfs register (not for ide fake devices) */
827 if (major == MAJOR_NR) { 826 if (major == UBD_MAJOR) {
828 ubd_devs[unit].pdev.id = unit; 827 ubd_devs[unit].pdev.id = unit;
829 ubd_devs[unit].pdev.name = DRIVER_NAME; 828 ubd_devs[unit].pdev.name = DRIVER_NAME;
830 ubd_devs[unit].pdev.dev.release = ubd_device_release; 829 ubd_devs[unit].pdev.dev.release = ubd_device_release;
@@ -871,13 +870,13 @@ static int ubd_add(int n, char **error_out)
871 ubd_dev->queue->queuedata = ubd_dev; 870 ubd_dev->queue->queuedata = ubd_dev;
872 871
873 blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG); 872 blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG);
874 err = ubd_disk_register(MAJOR_NR, ubd_dev->size, n, &ubd_gendisk[n]); 873 err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
875 if(err){ 874 if(err){
876 *error_out = "Failed to register device"; 875 *error_out = "Failed to register device";
877 goto out_cleanup; 876 goto out_cleanup;
878 } 877 }
879 878
880 if(fake_major != MAJOR_NR) 879 if (fake_major != UBD_MAJOR)
881 ubd_disk_register(fake_major, ubd_dev->size, n, 880 ubd_disk_register(fake_major, ubd_dev->size, n,
882 &fake_gendisk[n]); 881 &fake_gendisk[n]);
883 882
@@ -1059,10 +1058,10 @@ static int __init ubd_init(void)
1059 char *error; 1058 char *error;
1060 int i, err; 1059 int i, err;
1061 1060
1062 if (register_blkdev(MAJOR_NR, "ubd")) 1061 if (register_blkdev(UBD_MAJOR, "ubd"))
1063 return -1; 1062 return -1;
1064 1063
1065 if (fake_major != MAJOR_NR) { 1064 if (fake_major != UBD_MAJOR) {
1066 char name[sizeof("ubd_nnn\0")]; 1065 char name[sizeof("ubd_nnn\0")];
1067 1066
1068 snprintf(name, sizeof(name), "ubd_%d", fake_major); 1067 snprintf(name, sizeof(name), "ubd_%d", fake_major);
diff --git a/arch/um/drivers/xterm.h b/arch/um/drivers/xterm.h
index f33a6e77b18..56b9c4aba42 100644
--- a/arch/um/drivers/xterm.h
+++ b/arch/um/drivers/xterm.h
@@ -10,13 +10,3 @@ extern int xterm_fd(int socket, int *pid_out);
10 10
11#endif 11#endif
12 12
13/*
14 * Overrides for Emacs so that we follow Linus's tabbing style.
15 * Emacs will notice this stuff at the end of the file and automatically
16 * adjust the settings for this buffer only. This must remain at the end
17 * of the file.
18 * ---------------------------------------------------------------------------
19 * Local variables:
20 * c-file-style: "linux"
21 * End:
22 */
diff --git a/arch/um/include/asm/irq_vectors.h b/arch/um/include/asm/irq_vectors.h
index 62ddba6fc73..272a81e0ce1 100644
--- a/arch/um/include/asm/irq_vectors.h
+++ b/arch/um/include/asm/irq_vectors.h
@@ -8,13 +8,3 @@
8 8
9#endif 9#endif
10 10
11/*
12 * Overrides for Emacs so that we follow Linus's tabbing style.
13 * Emacs will notice this stuff at the end of the file and automatically
14 * adjust the settings for this buffer only. This must remain at the end
15 * of the file.
16 * ---------------------------------------------------------------------------
17 * Local variables:
18 * c-file-style: "linux"
19 * End:
20 */
diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h
index 2cf35c21d69..cf259de5153 100644
--- a/arch/um/include/asm/mmu.h
+++ b/arch/um/include/asm/mmu.h
@@ -10,13 +10,3 @@
10 10
11#endif 11#endif
12 12
13/*
14 * Overrides for Emacs so that we follow Linus's tabbing style.
15 * Emacs will notice this stuff at the end of the file and automatically
16 * adjust the settings for this buffer only. This must remain at the end
17 * of the file.
18 * ---------------------------------------------------------------------------
19 * Local variables:
20 * c-file-style: "linux"
21 * End:
22 */
diff --git a/arch/um/include/asm/pda.h b/arch/um/include/asm/pda.h
index 0d8bf33ffd4..ddcd774fc2a 100644
--- a/arch/um/include/asm/pda.h
+++ b/arch/um/include/asm/pda.h
@@ -19,13 +19,3 @@ extern struct foo me;
19 19
20#endif 20#endif
21 21
22/*
23 * Overrides for Emacs so that we follow Linus's tabbing style.
24 * Emacs will notice this stuff at the end of the file and automatically
25 * adjust the settings for this buffer only. This must remain at the end
26 * of the file.
27 * ---------------------------------------------------------------------------
28 * Local variables:
29 * c-file-style: "linux"
30 * End:
31 */
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 9062a6e7224..718984359f8 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -60,13 +60,3 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
60 60
61#endif 61#endif
62 62
63/*
64 * Overrides for Emacs so that we follow Linus's tabbing style.
65 * Emacs will notice this stuff at the end of the file and automatically
66 * adjust the settings for this buffer only. This must remain at the end
67 * of the file.
68 * ---------------------------------------------------------------------------
69 * Local variables:
70 * c-file-style: "linux"
71 * End:
72 */
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
index 0446f456b42..084de4a9fc7 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -134,13 +134,3 @@ static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
134 134
135#endif 135#endif
136 136
137/*
138 * Overrides for Emacs so that we follow Linus's tabbing style.
139 * Emacs will notice this stuff at the end of the file and automatically
140 * adjust the settings for this buffer only. This must remain at the end
141 * of the file.
142 * ---------------------------------------------------------------------------
143 * Local variables:
144 * c-file-style: "linux"
145 * End:
146 */
diff --git a/arch/um/include/shared/frame_kern.h b/arch/um/include/shared/frame_kern.h
index ce9514f5721..76078490c25 100644
--- a/arch/um/include/shared/frame_kern.h
+++ b/arch/um/include/shared/frame_kern.h
@@ -20,13 +20,3 @@ extern int setup_signal_stack_si(unsigned long stack_top, int sig,
20 20
21#endif 21#endif
22 22
23/*
24 * Overrides for Emacs so that we follow Linus's tabbing style.
25 * Emacs will notice this stuff at the end of the file and automatically
26 * adjust the settings for this buffer only. This must remain at the end
27 * of the file.
28 * ---------------------------------------------------------------------------
29 * Local variables:
30 * c-file-style: "linux"
31 * End:
32 */
diff --git a/arch/um/include/shared/initrd.h b/arch/um/include/shared/initrd.h
index 439b9a81498..22673bcc273 100644
--- a/arch/um/include/shared/initrd.h
+++ b/arch/um/include/shared/initrd.h
@@ -10,13 +10,3 @@ extern int load_initrd(char *filename, void *buf, int size);
10 10
11#endif 11#endif
12 12
13/*
14 * Overrides for Emacs so that we follow Linus's tabbing style.
15 * Emacs will notice this stuff at the end of the file and automatically
16 * adjust the settings for this buffer only. This must remain at the end
17 * of the file.
18 * ---------------------------------------------------------------------------
19 * Local variables:
20 * c-file-style: "linux"
21 * End:
22 */
diff --git a/arch/um/include/shared/irq_kern.h b/arch/um/include/shared/irq_kern.h
index fba3895274f..b05d22f3d84 100644
--- a/arch/um/include/shared/irq_kern.h
+++ b/arch/um/include/shared/irq_kern.h
@@ -16,13 +16,3 @@ extern int um_request_irq(unsigned int irq, int fd, int type,
16 16
17#endif 17#endif
18 18
19/*
20 * Overrides for Emacs so that we follow Linus's tabbing style.
21 * Emacs will notice this stuff at the end of the file and automatically
22 * adjust the settings for this buffer only. This must remain at the end
23 * of the file.
24 * ---------------------------------------------------------------------------
25 * Local variables:
26 * c-file-style: "linux"
27 * End:
28 */
diff --git a/arch/um/include/shared/mem_kern.h b/arch/um/include/shared/mem_kern.h
index cb7e196d366..69be0fd0ce4 100644
--- a/arch/um/include/shared/mem_kern.h
+++ b/arch/um/include/shared/mem_kern.h
@@ -18,13 +18,3 @@ extern void register_remapper(struct remapper *info);
18 18
19#endif 19#endif
20 20
21/*
22 * Overrides for Emacs so that we follow Linus's tabbing style.
23 * Emacs will notice this stuff at the end of the file and automatically
24 * adjust the settings for this buffer only. This must remain at the end
25 * of the file.
26 * ---------------------------------------------------------------------------
27 * Local variables:
28 * c-file-style: "linux"
29 * End:
30 */
diff --git a/arch/um/include/shared/ubd_user.h b/arch/um/include/shared/ubd_user.h
index bb66517f073..3845051f1b1 100644
--- a/arch/um/include/shared/ubd_user.h
+++ b/arch/um/include/shared/ubd_user.h
@@ -14,13 +14,3 @@ extern int kernel_fd;
14 14
15#endif 15#endif
16 16
17/*
18 * Overrides for Emacs so that we follow Linus's tabbing style.
19 * Emacs will notice this stuff at the end of the file and automatically
20 * adjust the settings for this buffer only. This must remain at the end
21 * of the file.
22 * ---------------------------------------------------------------------------
23 * Local variables:
24 * c-file-style: "linux"
25 * End:
26 */
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 499e5e95e60..388ec0a3ea9 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -28,7 +28,7 @@ $(obj)/config.tmp: $(objtree)/.config FORCE
28 $(call if_changed,quote1) 28 $(call if_changed,quote1)
29 29
30quiet_cmd_quote1 = QUOTE $@ 30quiet_cmd_quote1 = QUOTE $@
31 cmd_quote1 = sed -e 's/"/\\"/g' -e 's/^/"/' -e 's/$$/\\n"/' \ 31 cmd_quote1 = sed -e 's/"/\\"/g' -e 's/^/"/' -e 's/$$/\\n",/' \
32 $< > $@ 32 $< > $@
33 33
34$(obj)/config.c: $(src)/config.c.in $(obj)/config.tmp FORCE 34$(obj)/config.c: $(src)/config.c.in $(obj)/config.tmp FORCE
@@ -36,9 +36,9 @@ $(obj)/config.c: $(src)/config.c.in $(obj)/config.tmp FORCE
36 36
37quiet_cmd_quote2 = QUOTE $@ 37quiet_cmd_quote2 = QUOTE $@
38 cmd_quote2 = sed -e '/CONFIG/{' \ 38 cmd_quote2 = sed -e '/CONFIG/{' \
39 -e 's/"CONFIG"\;/""/' \ 39 -e 's/"CONFIG"//' \
40 -e 'r $(obj)/config.tmp' \ 40 -e 'r $(obj)/config.tmp' \
41 -e 'a \' \ 41 -e 'a \' \
42 -e '""\;' \ 42 -e '""' \
43 -e '}' \ 43 -e '}' \
44 $< > $@ 44 $< > $@
diff --git a/arch/um/kernel/config.c.in b/arch/um/kernel/config.c.in
index c062cbfe386..b7a43feafde 100644
--- a/arch/um/kernel/config.c.in
+++ b/arch/um/kernel/config.c.in
@@ -7,11 +7,15 @@
7#include <stdlib.h> 7#include <stdlib.h>
8#include "init.h" 8#include "init.h"
9 9
10static __initdata char *config = "CONFIG"; 10static __initdata const char *config[] = {
11"CONFIG"
12};
11 13
12static int __init print_config(char *line, int *add) 14static int __init print_config(char *line, int *add)
13{ 15{
14 printf("%s", config); 16 int i;
17 for (i = 0; i < sizeof(config)/sizeof(config[0]); i++)
18 printf("%s", config[i]);
15 exit(0); 19 exit(0);
16} 20}
17 21
@@ -20,13 +24,3 @@ __uml_setup("--showconfig", print_config,
20" Prints the config file that this UML binary was generated from.\n\n" 24" Prints the config file that this UML binary was generated from.\n\n"
21); 25);
22 26
23/*
24 * Overrides for Emacs so that we follow Linus's tabbing style.
25 * Emacs will notice this stuff at the end of the file and automatically
26 * adjust the settings for this buffer only. This must remain at the end
27 * of the file.
28 * ---------------------------------------------------------------------------
29 * Local variables:
30 * c-file-style: "linux"
31 * End:
32 */
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 183db26d01b..02ee9adff54 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -244,7 +244,7 @@ static void __init check_sysemu(void)
244 244
245 if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, 245 if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0,
246 (void *) PTRACE_O_TRACESYSGOOD) < 0)) 246 (void *) PTRACE_O_TRACESYSGOOD) < 0))
247 fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed"); 247 fatal_perror("check_sysemu: PTRACE_OLDSETOPTIONS failed");
248 248
249 while (1) { 249 while (1) {
250 count++; 250 count++;
@@ -252,12 +252,12 @@ static void __init check_sysemu(void)
252 goto fail; 252 goto fail;
253 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); 253 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED));
254 if (n < 0) 254 if (n < 0)
255 fatal_perror("check_ptrace : wait failed"); 255 fatal_perror("check_sysemu: wait failed");
256 256
257 if (WIFSTOPPED(status) && 257 if (WIFSTOPPED(status) &&
258 (WSTOPSIG(status) == (SIGTRAP|0x80))) { 258 (WSTOPSIG(status) == (SIGTRAP|0x80))) {
259 if (!count) { 259 if (!count) {
260 non_fatal("check_ptrace : SYSEMU_SINGLESTEP " 260 non_fatal("check_sysemu: SYSEMU_SINGLESTEP "
261 "doesn't singlestep"); 261 "doesn't singlestep");
262 goto fail; 262 goto fail;
263 } 263 }
@@ -271,7 +271,7 @@ static void __init check_sysemu(void)
271 else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP)) 271 else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP))
272 count++; 272 count++;
273 else { 273 else {
274 non_fatal("check_ptrace : expected SIGTRAP or " 274 non_fatal("check_sysemu: expected SIGTRAP or "
275 "(SIGTRAP | 0x80), got status = %d\n", 275 "(SIGTRAP | 0x80), got status = %d\n",
276 status); 276 status);
277 goto fail; 277 goto fail;
diff --git a/arch/um/sys-i386/asm/archparam.h b/arch/um/sys-i386/asm/archparam.h
index 93fd723344e..2a18a884ca1 100644
--- a/arch/um/sys-i386/asm/archparam.h
+++ b/arch/um/sys-i386/asm/archparam.h
@@ -14,13 +14,3 @@
14 14
15#endif 15#endif
16 16
17/*
18 * Overrides for Emacs so that we follow Linus's tabbing style.
19 * Emacs will notice this stuff at the end of the file and automatically
20 * adjust the settings for this buffer only. This must remain at the end
21 * of the file.
22 * ---------------------------------------------------------------------------
23 * Local variables:
24 * c-file-style: "linux"
25 * End:
26 */
diff --git a/arch/um/sys-i386/shared/sysdep/checksum.h b/arch/um/sys-i386/shared/sysdep/checksum.h
index 0cb4645cbeb..ed47445f390 100644
--- a/arch/um/sys-i386/shared/sysdep/checksum.h
+++ b/arch/um/sys-i386/shared/sysdep/checksum.h
@@ -199,13 +199,3 @@ static __inline__ __wsum csum_and_copy_to_user(const void *src,
199 199
200#endif 200#endif
201 201
202/*
203 * Overrides for Emacs so that we follow Linus's tabbing style.
204 * Emacs will notice this stuff at the end of the file and automatically
205 * adjust the settings for this buffer only. This must remain at the end
206 * of the file.
207 * ---------------------------------------------------------------------------
208 * Local variables:
209 * c-file-style: "linux"
210 * End:
211 */
diff --git a/arch/um/sys-ia64/sysdep/ptrace.h b/arch/um/sys-ia64/sysdep/ptrace.h
index 42dd8fb6f2f..0f0f4e6fd33 100644
--- a/arch/um/sys-ia64/sysdep/ptrace.h
+++ b/arch/um/sys-ia64/sysdep/ptrace.h
@@ -14,13 +14,3 @@ struct sys_pt_regs {
14 14
15#endif 15#endif
16 16
17/*
18 * Overrides for Emacs so that we follow Linus's tabbing style.
19 * Emacs will notice this stuff at the end of the file and automatically
20 * adjust the settings for this buffer only. This must remain at the end
21 * of the file.
22 * ---------------------------------------------------------------------------
23 * Local variables:
24 * c-file-style: "linux"
25 * End:
26 */
diff --git a/arch/um/sys-ia64/sysdep/sigcontext.h b/arch/um/sys-ia64/sysdep/sigcontext.h
index f15fb25260b..76b43161e77 100644
--- a/arch/um/sys-ia64/sysdep/sigcontext.h
+++ b/arch/um/sys-ia64/sysdep/sigcontext.h
@@ -8,13 +8,3 @@
8 8
9#endif 9#endif
10 10
11/*
12 * Overrides for Emacs so that we follow Linus's tabbing style.
13 * Emacs will notice this stuff at the end of the file and automatically
14 * adjust the settings for this buffer only. This must remain at the end
15 * of the file.
16 * ---------------------------------------------------------------------------
17 * Local variables:
18 * c-file-style: "linux"
19 * End:
20 */
diff --git a/arch/um/sys-ia64/sysdep/syscalls.h b/arch/um/sys-ia64/sysdep/syscalls.h
index 4a1f46ef1eb..5f6700c4155 100644
--- a/arch/um/sys-ia64/sysdep/syscalls.h
+++ b/arch/um/sys-ia64/sysdep/syscalls.h
@@ -8,13 +8,3 @@
8 8
9#endif 9#endif
10 10
11/*
12 * Overrides for Emacs so that we follow Linus's tabbing style.
13 * Emacs will notice this stuff at the end of the file and automatically
14 * adjust the settings for this buffer only. This must remain at the end
15 * of the file.
16 * ---------------------------------------------------------------------------
17 * Local variables:
18 * c-file-style: "linux"
19 * End:
20 */
diff --git a/arch/um/sys-ppc/miscthings.c b/arch/um/sys-ppc/miscthings.c
index 373061c5012..1c11aed9c71 100644
--- a/arch/um/sys-ppc/miscthings.c
+++ b/arch/um/sys-ppc/miscthings.c
@@ -40,14 +40,3 @@ void shove_aux_table(unsigned long sp)
40} 40}
41/* END stuff taken from arch/ppc/kernel/process.c */ 41/* END stuff taken from arch/ppc/kernel/process.c */
42 42
43
44/*
45 * Overrides for Emacs so that we follow Linus's tabbing style.
46 * Emacs will notice this stuff at the end of the file and automatically
47 * adjust the settings for this buffer only. This must remain at the end
48 * of the file.
49 * ---------------------------------------------------------------------------
50 * Local variables:
51 * c-file-style: "linux"
52 * End:
53 */
diff --git a/arch/um/sys-ppc/ptrace.c b/arch/um/sys-ppc/ptrace.c
index 8e71b47f2b8..66ef155248f 100644
--- a/arch/um/sys-ppc/ptrace.c
+++ b/arch/um/sys-ppc/ptrace.c
@@ -56,13 +56,3 @@ int peek_user(struct task_struct *child, long addr, long data)
56 return put_user(tmp, (unsigned long *) data); 56 return put_user(tmp, (unsigned long *) data);
57} 57}
58 58
59/*
60 * Overrides for Emacs so that we follow Linus's tabbing style.
61 * Emacs will notice this stuff at the end of the file and automatically
62 * adjust the settings for this buffer only. This must remain at the end
63 * of the file.
64 * ---------------------------------------------------------------------------
65 * Local variables:
66 * c-file-style: "linux"
67 * End:
68 */
diff --git a/arch/um/sys-ppc/ptrace_user.c b/arch/um/sys-ppc/ptrace_user.c
index ff0b9c077a1..224d2403c37 100644
--- a/arch/um/sys-ppc/ptrace_user.c
+++ b/arch/um/sys-ppc/ptrace_user.c
@@ -27,13 +27,3 @@ int ptrace_setregs(long pid, unsigned long *regs_in)
27 } 27 }
28 return 0; 28 return 0;
29} 29}
30/*
31 * Overrides for Emacs so that we follow Linus's tabbing style.
32 * Emacs will notice this stuff at the end of the file and automatically
33 * adjust the settings for this buffer only. This must remain at the end
34 * of the file.
35 * ---------------------------------------------------------------------------
36 * Local variables:
37 * c-file-style: "linux"
38 * End:
39 */
diff --git a/arch/um/sys-ppc/shared/sysdep/ptrace.h b/arch/um/sys-ppc/shared/sysdep/ptrace.h
index df2397dba3e..0e3230e937e 100644
--- a/arch/um/sys-ppc/shared/sysdep/ptrace.h
+++ b/arch/um/sys-ppc/shared/sysdep/ptrace.h
@@ -91,13 +91,3 @@ extern void shove_aux_table(unsigned long sp);
91 91
92#endif 92#endif
93 93
94/*
95 * Overrides for Emacs so that we follow Linus's tabbing style.
96 * Emacs will notice this stuff at the end of the file and automatically
97 * adjust the settings for this buffer only. This must remain at the end
98 * of the file.
99 * ---------------------------------------------------------------------------
100 * Local variables:
101 * c-file-style: "linux"
102 * End:
103 */
diff --git a/arch/um/sys-ppc/shared/sysdep/sigcontext.h b/arch/um/sys-ppc/shared/sysdep/sigcontext.h
index f20d965de9c..b7286f0a1e0 100644
--- a/arch/um/sys-ppc/shared/sysdep/sigcontext.h
+++ b/arch/um/sys-ppc/shared/sysdep/sigcontext.h
@@ -50,13 +50,3 @@
50 50
51#endif 51#endif
52 52
53/*
54 * Overrides for Emacs so that we follow Linus's tabbing style.
55 * Emacs will notice this stuff at the end of the file and automatically
56 * adjust the settings for this buffer only. This must remain at the end
57 * of the file.
58 * ---------------------------------------------------------------------------
59 * Local variables:
60 * c-file-style: "linux"
61 * End:
62 */
diff --git a/arch/um/sys-ppc/shared/sysdep/syscalls.h b/arch/um/sys-ppc/shared/sysdep/syscalls.h
index 679df351e19..1ff81552251 100644
--- a/arch/um/sys-ppc/shared/sysdep/syscalls.h
+++ b/arch/um/sys-ppc/shared/sysdep/syscalls.h
@@ -41,13 +41,3 @@ int old_mmap(unsigned long addr, unsigned long len,
41 41
42#define LAST_ARCH_SYSCALL __NR_fadvise64 42#define LAST_ARCH_SYSCALL __NR_fadvise64
43 43
44/*
45 * Overrides for Emacs so that we follow Linus's tabbing style.
46 * Emacs will notice this stuff at the end of the file and automatically
47 * adjust the settings for this buffer only. This must remain at the end
48 * of the file.
49 * ---------------------------------------------------------------------------
50 * Local variables:
51 * c-file-style: "linux"
52 * End:
53 */
diff --git a/arch/um/sys-ppc/sigcontext.c b/arch/um/sys-ppc/sigcontext.c
index 4bdc15c89ed..40694d0f3d1 100644
--- a/arch/um/sys-ppc/sigcontext.c
+++ b/arch/um/sys-ppc/sigcontext.c
@@ -2,13 +2,3 @@
2#include "asm/sigcontext.h" 2#include "asm/sigcontext.h"
3#include "sysdep/ptrace.h" 3#include "sysdep/ptrace.h"
4 4
5/*
6 * Overrides for Emacs so that we follow Linus's tabbing style.
7 * Emacs will notice this stuff at the end of the file and automatically
8 * adjust the settings for this buffer only. This must remain at the end
9 * of the file.
10 * ---------------------------------------------------------------------------
11 * Local variables:
12 * c-file-style: "linux"
13 * End:
14 */
diff --git a/arch/um/sys-x86_64/asm/archparam.h b/arch/um/sys-x86_64/asm/archparam.h
index 270ed9586b6..6c083663b8d 100644
--- a/arch/um/sys-x86_64/asm/archparam.h
+++ b/arch/um/sys-x86_64/asm/archparam.h
@@ -14,13 +14,3 @@
14 14
15#endif 15#endif
16 16
17/*
18 * Overrides for Emacs so that we follow Linus's tabbing style.
19 * Emacs will notice this stuff at the end of the file and automatically
20 * adjust the settings for this buffer only. This must remain at the end
21 * of the file.
22 * ---------------------------------------------------------------------------
23 * Local variables:
24 * c-file-style: "linux"
25 * End:
26 */
diff --git a/arch/um/sys-x86_64/asm/module.h b/arch/um/sys-x86_64/asm/module.h
index 35b5491d3e9..8eb79c2d07d 100644
--- a/arch/um/sys-x86_64/asm/module.h
+++ b/arch/um/sys-x86_64/asm/module.h
@@ -18,13 +18,3 @@ struct mod_arch_specific
18 18
19#endif 19#endif
20 20
21/*
22 * Overrides for Emacs so that we follow Linus's tabbing style.
23 * Emacs will notice this stuff at the end of the file and automatically
24 * adjust the settings for this buffer only. This must remain at the end
25 * of the file.
26 * ---------------------------------------------------------------------------
27 * Local variables:
28 * c-file-style: "linux"
29 * End:
30 */
diff --git a/arch/um/sys-x86_64/mem.c b/arch/um/sys-x86_64/mem.c
index 3f59a0a4f15..3f8df8abf34 100644
--- a/arch/um/sys-x86_64/mem.c
+++ b/arch/um/sys-x86_64/mem.c
@@ -14,12 +14,3 @@ unsigned long vm_data_default_flags = __VM_DATA_DEFAULT_FLAGS;
14unsigned long vm_data_default_flags32 = __VM_DATA_DEFAULT_FLAGS; 14unsigned long vm_data_default_flags32 = __VM_DATA_DEFAULT_FLAGS;
15unsigned long vm_force_exec32 = PROT_EXEC; 15unsigned long vm_force_exec32 = PROT_EXEC;
16 16
17/* Overrides for Emacs so that we follow Linus's tabbing style.
18 * Emacs will notice this stuff at the end of the file and automatically
19 * adjust the settings for this buffer only. This must remain at the end
20 * of the file.
21 * ---------------------------------------------------------------------------
22 * Local variables:
23 * c-file-style: "linux"
24 * End:
25 */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 45161b81631..748e50a1a15 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -165,6 +165,9 @@ config AUDIT_ARCH
165config ARCH_SUPPORTS_OPTIMIZED_INLINING 165config ARCH_SUPPORTS_OPTIMIZED_INLINING
166 def_bool y 166 def_bool y
167 167
168config ARCH_SUPPORTS_DEBUG_PAGEALLOC
169 def_bool y
170
168# Use the generic interrupt handling code in kernel/irq/: 171# Use the generic interrupt handling code in kernel/irq/:
169config GENERIC_HARDIRQS 172config GENERIC_HARDIRQS
170 bool 173 bool
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index fdb45df608b..a345cb5447a 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -75,6 +75,7 @@ config DEBUG_STACK_USAGE
75config DEBUG_PAGEALLOC 75config DEBUG_PAGEALLOC
76 bool "Debug page memory allocations" 76 bool "Debug page memory allocations"
77 depends on DEBUG_KERNEL 77 depends on DEBUG_KERNEL
78 depends on ARCH_SUPPORTS_DEBUG_PAGEALLOC
78 ---help--- 79 ---help---
79 Unmap pages from the kernel linear mapping after free_pages(). 80 Unmap pages from the kernel linear mapping after free_pages().
80 This results in a large slowdown, but helps to find certain types 81 This results in a large slowdown, but helps to find certain types
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
index 8c3c25f3557..5054c2ddd1a 100644
--- a/arch/x86/boot/memory.c
+++ b/arch/x86/boot/memory.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds 3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright 2007 rPath, Inc. - All Rights Reserved 4 * Copyright 2007 rPath, Inc. - All Rights Reserved
5 * Copyright 2009 Intel Corporation; author H. Peter Anvin
5 * 6 *
6 * This file is part of the Linux kernel, and is made available under 7 * This file is part of the Linux kernel, and is made available under
7 * the terms of the GNU General Public License version 2. 8 * the terms of the GNU General Public License version 2.
@@ -16,24 +17,38 @@
16 17
17#define SMAP 0x534d4150 /* ASCII "SMAP" */ 18#define SMAP 0x534d4150 /* ASCII "SMAP" */
18 19
20struct e820_ext_entry {
21 struct e820entry std;
22 u32 ext_flags;
23} __attribute__((packed));
24
19static int detect_memory_e820(void) 25static int detect_memory_e820(void)
20{ 26{
21 int count = 0; 27 int count = 0;
22 u32 next = 0; 28 u32 next = 0;
23 u32 size, id; 29 u32 size, id, edi;
24 u8 err; 30 u8 err;
25 struct e820entry *desc = boot_params.e820_map; 31 struct e820entry *desc = boot_params.e820_map;
32 static struct e820_ext_entry buf; /* static so it is zeroed */
33
34 /*
35 * Set this here so that if the BIOS doesn't change this field
36 * but still doesn't change %ecx, we're still okay...
37 */
38 buf.ext_flags = 1;
26 39
27 do { 40 do {
28 size = sizeof(struct e820entry); 41 size = sizeof buf;
29 42
30 /* Important: %edx is clobbered by some BIOSes, 43 /* Important: %edx and %esi are clobbered by some BIOSes,
31 so it must be either used for the error output 44 so they must be either used for the error output
32 or explicitly marked clobbered. */ 45 or explicitly marked clobbered. Given that, assume there
33 asm("int $0x15; setc %0" 46 is something out there clobbering %ebp and %edi, too. */
47 asm("pushl %%ebp; int $0x15; popl %%ebp; setc %0"
34 : "=d" (err), "+b" (next), "=a" (id), "+c" (size), 48 : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
35 "=m" (*desc) 49 "=D" (edi), "+m" (buf)
36 : "D" (desc), "d" (SMAP), "a" (0xe820)); 50 : "D" (&buf), "d" (SMAP), "a" (0xe820)
51 : "esi");
37 52
38 /* BIOSes which terminate the chain with CF = 1 as opposed 53 /* BIOSes which terminate the chain with CF = 1 as opposed
39 to %ebx = 0 don't always report the SMAP signature on 54 to %ebx = 0 don't always report the SMAP signature on
@@ -51,8 +66,14 @@ static int detect_memory_e820(void)
51 break; 66 break;
52 } 67 }
53 68
69 /* ACPI 3.0 added the extended flags support. If bit 0
70 in the extended flags is zero, we're supposed to simply
71 ignore the entry -- a backwards incompatible change! */
72 if (size > 20 && !(buf.ext_flags & 1))
73 continue;
74
75 *desc++ = buf.std;
54 count++; 76 count++;
55 desc++;
56 } while (next && count < ARRAY_SIZE(boot_params.e820_map)); 77 } while (next && count < ARRAY_SIZE(boot_params.e820_map));
57 78
58 return boot_params.e820_entries = count; 79 return boot_params.e820_entries = count;
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index a977de23cb4..a0301bfeb95 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -86,6 +86,9 @@ static inline void early_quirks(void) { }
86 86
87extern void pci_iommu_alloc(void); 87extern void pci_iommu_alloc(void);
88 88
89/* MSI arch hook */
90#define arch_setup_msi_irqs arch_setup_msi_irqs
91
89#endif /* __KERNEL__ */ 92#endif /* __KERNEL__ */
90 93
91#ifdef CONFIG_X86_32 94#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index a5074bd0f8b..48dcfa62ea0 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -24,28 +24,4 @@ struct saved_context {
24 unsigned long return_address; 24 unsigned long return_address;
25} __attribute__((packed)); 25} __attribute__((packed));
26 26
27#ifdef CONFIG_ACPI
28extern unsigned long saved_eip;
29extern unsigned long saved_esp;
30extern unsigned long saved_ebp;
31extern unsigned long saved_ebx;
32extern unsigned long saved_esi;
33extern unsigned long saved_edi;
34
35static inline void acpi_save_register_state(unsigned long return_point)
36{
37 saved_eip = return_point;
38 asm volatile("movl %%esp,%0" : "=m" (saved_esp));
39 asm volatile("movl %%ebp,%0" : "=m" (saved_ebp));
40 asm volatile("movl %%ebx,%0" : "=m" (saved_ebx));
41 asm volatile("movl %%edi,%0" : "=m" (saved_edi));
42 asm volatile("movl %%esi,%0" : "=m" (saved_esi));
43}
44
45#define acpi_restore_register_state() do {} while (0)
46
47/* routines for saving/restoring kernel state */
48extern int acpi_save_state_mem(void);
49#endif
50
51#endif /* _ASM_X86_SUSPEND_32_H */ 27#endif /* _ASM_X86_SUSPEND_32_H */
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index da99ffcdfde..1bb5c6cee3e 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3468,6 +3468,10 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3468 struct intel_iommu *iommu = NULL; 3468 struct intel_iommu *iommu = NULL;
3469 int index = 0; 3469 int index = 0;
3470 3470
3471 /* x86 doesn't support multiple MSI yet */
3472 if (type == PCI_CAP_ID_MSI && nvec > 1)
3473 return 1;
3474
3471 irq_want = nr_irqs_gsi; 3475 irq_want = nr_irqs_gsi;
3472 sub_handle = 0; 3476 sub_handle = 0;
3473 list_for_each_entry(msidesc, &dev->msi_list, list) { 3477 list_for_each_entry(msidesc, &dev->msi_list, list) {
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index fbf2f33e308..5a6aa1c1162 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -18,6 +18,7 @@
18#include <asm/thread_info.h> 18#include <asm/thread_info.h>
19#include <asm/bootparam.h> 19#include <asm/bootparam.h>
20#include <asm/elf.h> 20#include <asm/elf.h>
21#include <asm/suspend.h>
21 22
22#include <xen/interface/xen.h> 23#include <xen/interface/xen.h>
23 24
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 8793ab33e2c..e72f062fb4b 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -16,6 +16,7 @@
16#include <asm/thread_info.h> 16#include <asm/thread_info.h>
17#include <asm/ia32.h> 17#include <asm/ia32.h>
18#include <asm/bootparam.h> 18#include <asm/bootparam.h>
19#include <asm/suspend.h>
19 20
20#include <xen/interface/xen.h> 21#include <xen/interface/xen.h>
21 22
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index c7c4776ff63..90f5b9ef5de 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -300,8 +300,7 @@ fs_initcall(pci_iommu_init);
300static __devinit void via_no_dac(struct pci_dev *dev) 300static __devinit void via_no_dac(struct pci_dev *dev)
301{ 301{
302 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { 302 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
303 printk(KERN_INFO 303 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
304 "PCI: VIA PCI bridge detected. Disabling DAC.\n");
305 forbid_dac = 1; 304 forbid_dac = 1;
306 } 305 }
307} 306}
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 522db5e3d0b..5bc5d1688c1 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -19,49 +19,6 @@ void kunmap(struct page *page)
19 kunmap_high(page); 19 kunmap_high(page);
20} 20}
21 21
22static void debug_kmap_atomic_prot(enum km_type type)
23{
24#ifdef CONFIG_DEBUG_HIGHMEM
25 static unsigned warn_count = 10;
26
27 if (unlikely(warn_count == 0))
28 return;
29
30 if (unlikely(in_interrupt())) {
31 if (in_irq()) {
32 if (type != KM_IRQ0 && type != KM_IRQ1 &&
33 type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
34 type != KM_BOUNCE_READ) {
35 WARN_ON(1);
36 warn_count--;
37 }
38 } else if (!irqs_disabled()) { /* softirq */
39 if (type != KM_IRQ0 && type != KM_IRQ1 &&
40 type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
41 type != KM_SKB_SUNRPC_DATA &&
42 type != KM_SKB_DATA_SOFTIRQ &&
43 type != KM_BOUNCE_READ) {
44 WARN_ON(1);
45 warn_count--;
46 }
47 }
48 }
49
50 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
51 type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
52 if (!irqs_disabled()) {
53 WARN_ON(1);
54 warn_count--;
55 }
56 } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
57 if (irq_count() == 0 && !irqs_disabled()) {
58 WARN_ON(1);
59 warn_count--;
60 }
61 }
62#endif
63}
64
65/* 22/*
66 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 23 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
67 * no global lock is needed and because the kmap code must perform a global TLB 24 * no global lock is needed and because the kmap code must perform a global TLB
@@ -81,8 +38,9 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
81 if (!PageHighMem(page)) 38 if (!PageHighMem(page))
82 return page_address(page); 39 return page_address(page);
83 40
84 debug_kmap_atomic_prot(type); 41 debug_kmap_atomic(type);
85 42
43 debug_kmap_atomic(type);
86 idx = type + KM_TYPE_NR*smp_processor_id(); 44 idx = type + KM_TYPE_NR*smp_processor_id();
87 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
88 BUG_ON(!pte_none(*(kmap_pte-idx))); 46 BUG_ON(!pte_none(*(kmap_pte-idx)));
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 699c9b2895a..bff0c9032f8 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -19,6 +19,7 @@
19#include <asm/iomap.h> 19#include <asm/iomap.h>
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/highmem.h>
22 23
23int is_io_mapping_possible(resource_size_t base, unsigned long size) 24int is_io_mapping_possible(resource_size_t base, unsigned long size)
24{ 25{
@@ -71,6 +72,7 @@ iounmap_atomic(void *kvaddr, enum km_type type)
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 72 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 73 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
73 74
75 debug_kmap_atomic(type);
74 /* 76 /*
75 * Force other mappings to Oops if they'll try to access this pte 77 * Force other mappings to Oops if they'll try to access this pte
76 * without first remap it. Keeping stale mappings around is a bad idea 78 * without first remap it. Keeping stale mappings around is a bad idea
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index f6adf2c6d75..aaf26ae58cd 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -69,11 +69,12 @@ void early_dump_pci_device(u8 bus, u8 slot, u8 func)
69 int j; 69 int j;
70 u32 val; 70 u32 val;
71 71
72 printk(KERN_INFO "PCI: %02x:%02x:%02x", bus, slot, func); 72 printk(KERN_INFO "pci 0000:%02x:%02x.%d config space:",
73 bus, slot, func);
73 74
74 for (i = 0; i < 256; i += 4) { 75 for (i = 0; i < 256; i += 4) {
75 if (!(i & 0x0f)) 76 if (!(i & 0x0f))
76 printk("\n%04x:",i); 77 printk("\n %02x:",i);
77 78
78 val = read_pci_config(bus, slot, func, i); 79 val = read_pci_config(bus, slot, func, i);
79 for (j = 0; j < 4; j++) { 80 for (j = 0; j < 4; j++) {
@@ -96,20 +97,22 @@ void early_dump_pci_devices(void)
96 for (func = 0; func < 8; func++) { 97 for (func = 0; func < 8; func++) {
97 u32 class; 98 u32 class;
98 u8 type; 99 u8 type;
100
99 class = read_pci_config(bus, slot, func, 101 class = read_pci_config(bus, slot, func,
100 PCI_CLASS_REVISION); 102 PCI_CLASS_REVISION);
101 if (class == 0xffffffff) 103 if (class == 0xffffffff)
102 break; 104 continue;
103 105
104 early_dump_pci_device(bus, slot, func); 106 early_dump_pci_device(bus, slot, func);
105 107
106 /* No multi-function device? */ 108 if (func == 0) {
107 type = read_pci_config_byte(bus, slot, func, 109 type = read_pci_config_byte(bus, slot,
110 func,
108 PCI_HEADER_TYPE); 111 PCI_HEADER_TYPE);
109 if (!(type & 0x80)) 112 if (!(type & 0x80))
110 break; 113 break;
114 }
111 } 115 }
112 } 116 }
113 } 117 }
114} 118}
115
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 9c49919e4d1..6dd89555fbf 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -495,26 +495,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
495 pci_siemens_interrupt_controller); 495 pci_siemens_interrupt_controller);
496 496
497/* 497/*
498 * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have
499 * 4096 bytes configuration space for each function of their processor
500 * configuration space.
501 */
502static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev)
503{
504 dev->cfg_size = pci_cfg_space_size_ext(dev);
505}
506DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size);
507DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size);
508DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size);
509DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size);
510DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size);
511DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size);
512DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size);
513DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size);
514DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size);
515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size);
516
517/*
518 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from 498 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
519 * confusing the PCI engine: 499 * confusing the PCI engine:
520 */ 500 */
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c
index f1065b129e9..4061bb0f267 100644
--- a/arch/x86/pci/legacy.c
+++ b/arch/x86/pci/legacy.c
@@ -50,8 +50,6 @@ static int __init pci_legacy_init(void)
50 if (pci_root_bus) 50 if (pci_root_bus)
51 pci_bus_add_devices(pci_root_bus); 51 pci_bus_add_devices(pci_root_bus);
52 52
53 pcibios_fixup_peer_bridges();
54
55 return 0; 53 return 0;
56} 54}
57 55
@@ -67,6 +65,7 @@ int __init pci_subsys_init(void)
67 pci_visws_init(); 65 pci_visws_init();
68#endif 66#endif
69 pci_legacy_init(); 67 pci_legacy_init();
68 pcibios_fixup_peer_bridges();
70 pcibios_irq_init(); 69 pcibios_irq_init();
71 pcibios_init(); 70 pcibios_init();
72 71
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 89bf9242c80..905bb526b13 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <linux/bitmap.h> 16#include <linux/bitmap.h>
17#include <linux/sort.h>
17#include <asm/e820.h> 18#include <asm/e820.h>
18#include <asm/pci_x86.h> 19#include <asm/pci_x86.h>
19 20
@@ -24,24 +25,49 @@
24/* Indicate if the mmcfg resources have been placed into the resource table. */ 25/* Indicate if the mmcfg resources have been placed into the resource table. */
25static int __initdata pci_mmcfg_resources_inserted; 26static int __initdata pci_mmcfg_resources_inserted;
26 27
28static __init int extend_mmcfg(int num)
29{
30 struct acpi_mcfg_allocation *new;
31 int new_num = pci_mmcfg_config_num + num;
32
33 new = kzalloc(sizeof(pci_mmcfg_config[0]) * new_num, GFP_KERNEL);
34 if (!new)
35 return -1;
36
37 if (pci_mmcfg_config) {
38 memcpy(new, pci_mmcfg_config,
39 sizeof(pci_mmcfg_config[0]) * new_num);
40 kfree(pci_mmcfg_config);
41 }
42 pci_mmcfg_config = new;
43
44 return 0;
45}
46
47static __init void fill_one_mmcfg(u64 addr, int segment, int start, int end)
48{
49 int i = pci_mmcfg_config_num;
50
51 pci_mmcfg_config_num++;
52 pci_mmcfg_config[i].address = addr;
53 pci_mmcfg_config[i].pci_segment = segment;
54 pci_mmcfg_config[i].start_bus_number = start;
55 pci_mmcfg_config[i].end_bus_number = end;
56}
57
27static const char __init *pci_mmcfg_e7520(void) 58static const char __init *pci_mmcfg_e7520(void)
28{ 59{
29 u32 win; 60 u32 win;
30 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win); 61 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
31 62
32 win = win & 0xf000; 63 win = win & 0xf000;
33 if(win == 0x0000 || win == 0xf000) 64 if (win == 0x0000 || win == 0xf000)
34 pci_mmcfg_config_num = 0; 65 return NULL;
35 else { 66
36 pci_mmcfg_config_num = 1; 67 if (extend_mmcfg(1) == -1)
37 pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]), GFP_KERNEL); 68 return NULL;
38 if (!pci_mmcfg_config) 69
39 return NULL; 70 fill_one_mmcfg(win << 16, 0, 0, 255);
40 pci_mmcfg_config[0].address = win << 16;
41 pci_mmcfg_config[0].pci_segment = 0;
42 pci_mmcfg_config[0].start_bus_number = 0;
43 pci_mmcfg_config[0].end_bus_number = 255;
44 }
45 71
46 return "Intel Corporation E7520 Memory Controller Hub"; 72 return "Intel Corporation E7520 Memory Controller Hub";
47} 73}
@@ -50,13 +76,11 @@ static const char __init *pci_mmcfg_intel_945(void)
50{ 76{
51 u32 pciexbar, mask = 0, len = 0; 77 u32 pciexbar, mask = 0, len = 0;
52 78
53 pci_mmcfg_config_num = 1;
54
55 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar); 79 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar);
56 80
57 /* Enable bit */ 81 /* Enable bit */
58 if (!(pciexbar & 1)) 82 if (!(pciexbar & 1))
59 pci_mmcfg_config_num = 0; 83 return NULL;
60 84
61 /* Size bits */ 85 /* Size bits */
62 switch ((pciexbar >> 1) & 3) { 86 switch ((pciexbar >> 1) & 3) {
@@ -73,28 +97,23 @@ static const char __init *pci_mmcfg_intel_945(void)
73 len = 0x04000000U; 97 len = 0x04000000U;
74 break; 98 break;
75 default: 99 default:
76 pci_mmcfg_config_num = 0; 100 return NULL;
77 } 101 }
78 102
79 /* Errata #2, things break when not aligned on a 256Mb boundary */ 103 /* Errata #2, things break when not aligned on a 256Mb boundary */
80 /* Can only happen in 64M/128M mode */ 104 /* Can only happen in 64M/128M mode */
81 105
82 if ((pciexbar & mask) & 0x0fffffffU) 106 if ((pciexbar & mask) & 0x0fffffffU)
83 pci_mmcfg_config_num = 0; 107 return NULL;
84 108
85 /* Don't hit the APIC registers and their friends */ 109 /* Don't hit the APIC registers and their friends */
86 if ((pciexbar & mask) >= 0xf0000000U) 110 if ((pciexbar & mask) >= 0xf0000000U)
87 pci_mmcfg_config_num = 0; 111 return NULL;
88 112
89 if (pci_mmcfg_config_num) { 113 if (extend_mmcfg(1) == -1)
90 pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]), GFP_KERNEL); 114 return NULL;
91 if (!pci_mmcfg_config) 115
92 return NULL; 116 fill_one_mmcfg(pciexbar & mask, 0, 0, (len >> 20) - 1);
93 pci_mmcfg_config[0].address = pciexbar & mask;
94 pci_mmcfg_config[0].pci_segment = 0;
95 pci_mmcfg_config[0].start_bus_number = 0;
96 pci_mmcfg_config[0].end_bus_number = (len >> 20) - 1;
97 }
98 117
99 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; 118 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
100} 119}
@@ -138,22 +157,77 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
138 busnbits = 8; 157 busnbits = 8;
139 } 158 }
140 159
141 pci_mmcfg_config_num = (1 << segnbits); 160 if (extend_mmcfg(1 << segnbits) == -1)
142 pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]) *
143 pci_mmcfg_config_num, GFP_KERNEL);
144 if (!pci_mmcfg_config)
145 return NULL; 161 return NULL;
146 162
147 for (i = 0; i < (1 << segnbits); i++) { 163 for (i = 0; i < (1 << segnbits); i++)
148 pci_mmcfg_config[i].address = base + (1<<28) * i; 164 fill_one_mmcfg(base + (1<<28) * i, i, 0, (1 << busnbits) - 1);
149 pci_mmcfg_config[i].pci_segment = i;
150 pci_mmcfg_config[i].start_bus_number = 0;
151 pci_mmcfg_config[i].end_bus_number = (1 << busnbits) - 1;
152 }
153 165
154 return "AMD Family 10h NB"; 166 return "AMD Family 10h NB";
155} 167}
156 168
169static bool __initdata mcp55_checked;
170static const char __init *pci_mmcfg_nvidia_mcp55(void)
171{
172 int bus;
173 int mcp55_mmconf_found = 0;
174
175 static const u32 extcfg_regnum = 0x90;
176 static const u32 extcfg_regsize = 4;
177 static const u32 extcfg_enable_mask = 1<<31;
178 static const u32 extcfg_start_mask = 0xff<<16;
179 static const int extcfg_start_shift = 16;
180 static const u32 extcfg_size_mask = 0x3<<28;
181 static const int extcfg_size_shift = 28;
182 static const int extcfg_sizebus[] = {0x100, 0x80, 0x40, 0x20};
183 static const u32 extcfg_base_mask[] = {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff};
184 static const int extcfg_base_lshift = 25;
185
186 /*
187 * do check if amd fam10h already took over
188 */
189 if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked)
190 return NULL;
191
192 mcp55_checked = true;
193 for (bus = 0; bus < 256; bus++) {
194 u64 base;
195 u32 l, extcfg;
196 u16 vendor, device;
197 int start, size_index, end;
198
199 raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l);
200 vendor = l & 0xffff;
201 device = (l >> 16) & 0xffff;
202
203 if (PCI_VENDOR_ID_NVIDIA != vendor || 0x0369 != device)
204 continue;
205
206 raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum,
207 extcfg_regsize, &extcfg);
208
209 if (!(extcfg & extcfg_enable_mask))
210 continue;
211
212 if (extend_mmcfg(1) == -1)
213 continue;
214
215 size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
216 base = extcfg & extcfg_base_mask[size_index];
217 /* base could > 4G */
218 base <<= extcfg_base_lshift;
219 start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
220 end = start + extcfg_sizebus[size_index] - 1;
221 fill_one_mmcfg(base, 0, start, end);
222 mcp55_mmconf_found++;
223 }
224
225 if (!mcp55_mmconf_found)
226 return NULL;
227
228 return "nVidia MCP55";
229}
230
157struct pci_mmcfg_hostbridge_probe { 231struct pci_mmcfg_hostbridge_probe {
158 u32 bus; 232 u32 bus;
159 u32 devfn; 233 u32 devfn;
@@ -171,8 +245,52 @@ static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = {
171 0x1200, pci_mmcfg_amd_fam10h }, 245 0x1200, pci_mmcfg_amd_fam10h },
172 { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD, 246 { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD,
173 0x1200, pci_mmcfg_amd_fam10h }, 247 0x1200, pci_mmcfg_amd_fam10h },
248 { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_NVIDIA,
249 0x0369, pci_mmcfg_nvidia_mcp55 },
174}; 250};
175 251
252static int __init cmp_mmcfg(const void *x1, const void *x2)
253{
254 const typeof(pci_mmcfg_config[0]) *m1 = x1;
255 const typeof(pci_mmcfg_config[0]) *m2 = x2;
256 int start1, start2;
257
258 start1 = m1->start_bus_number;
259 start2 = m2->start_bus_number;
260
261 return start1 - start2;
262}
263
264static void __init pci_mmcfg_check_end_bus_number(void)
265{
266 int i;
267 typeof(pci_mmcfg_config[0]) *cfg, *cfgx;
268
269 /* sort them at first */
270 sort(pci_mmcfg_config, pci_mmcfg_config_num,
271 sizeof(pci_mmcfg_config[0]), cmp_mmcfg, NULL);
272
273 /* last one*/
274 if (pci_mmcfg_config_num > 0) {
275 i = pci_mmcfg_config_num - 1;
276 cfg = &pci_mmcfg_config[i];
277 if (cfg->end_bus_number < cfg->start_bus_number)
278 cfg->end_bus_number = 255;
279 }
280
281 /* don't overlap please */
282 for (i = 0; i < pci_mmcfg_config_num - 1; i++) {
283 cfg = &pci_mmcfg_config[i];
284 cfgx = &pci_mmcfg_config[i+1];
285
286 if (cfg->end_bus_number < cfg->start_bus_number)
287 cfg->end_bus_number = 255;
288
289 if (cfg->end_bus_number >= cfgx->start_bus_number)
290 cfg->end_bus_number = cfgx->start_bus_number - 1;
291 }
292}
293
176static int __init pci_mmcfg_check_hostbridge(void) 294static int __init pci_mmcfg_check_hostbridge(void)
177{ 295{
178 u32 l; 296 u32 l;
@@ -186,31 +304,33 @@ static int __init pci_mmcfg_check_hostbridge(void)
186 304
187 pci_mmcfg_config_num = 0; 305 pci_mmcfg_config_num = 0;
188 pci_mmcfg_config = NULL; 306 pci_mmcfg_config = NULL;
189 name = NULL;
190 307
191 for (i = 0; !name && i < ARRAY_SIZE(pci_mmcfg_probes); i++) { 308 for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
192 bus = pci_mmcfg_probes[i].bus; 309 bus = pci_mmcfg_probes[i].bus;
193 devfn = pci_mmcfg_probes[i].devfn; 310 devfn = pci_mmcfg_probes[i].devfn;
194 raw_pci_ops->read(0, bus, devfn, 0, 4, &l); 311 raw_pci_ops->read(0, bus, devfn, 0, 4, &l);
195 vendor = l & 0xffff; 312 vendor = l & 0xffff;
196 device = (l >> 16) & 0xffff; 313 device = (l >> 16) & 0xffff;
197 314
315 name = NULL;
198 if (pci_mmcfg_probes[i].vendor == vendor && 316 if (pci_mmcfg_probes[i].vendor == vendor &&
199 pci_mmcfg_probes[i].device == device) 317 pci_mmcfg_probes[i].device == device)
200 name = pci_mmcfg_probes[i].probe(); 318 name = pci_mmcfg_probes[i].probe();
201 }
202 319
203 if (name) { 320 if (name)
204 printk(KERN_INFO "PCI: Found %s %s MMCONFIG support.\n", 321 printk(KERN_INFO "PCI: Found %s with MMCONFIG support.\n",
205 name, pci_mmcfg_config_num ? "with" : "without"); 322 name);
206 } 323 }
207 324
208 return name != NULL; 325 /* some end_bus_number is crazy, fix it */
326 pci_mmcfg_check_end_bus_number();
327
328 return pci_mmcfg_config_num != 0;
209} 329}
210 330
211static void __init pci_mmcfg_insert_resources(void) 331static void __init pci_mmcfg_insert_resources(void)
212{ 332{
213#define PCI_MMCFG_RESOURCE_NAME_LEN 19 333#define PCI_MMCFG_RESOURCE_NAME_LEN 24
214 int i; 334 int i;
215 struct resource *res; 335 struct resource *res;
216 char *names; 336 char *names;
@@ -228,9 +348,10 @@ static void __init pci_mmcfg_insert_resources(void)
228 struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i]; 348 struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i];
229 num_buses = cfg->end_bus_number - cfg->start_bus_number + 1; 349 num_buses = cfg->end_bus_number - cfg->start_bus_number + 1;
230 res->name = names; 350 res->name = names;
231 snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN, "PCI MMCONFIG %u", 351 snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN,
232 cfg->pci_segment); 352 "PCI MMCONFIG %u [%02x-%02x]", cfg->pci_segment,
233 res->start = cfg->address; 353 cfg->start_bus_number, cfg->end_bus_number);
354 res->start = cfg->address + (cfg->start_bus_number << 20);
234 res->end = res->start + (num_buses << 20) - 1; 355 res->end = res->start + (num_buses << 20) - 1;
235 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 356 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
236 insert_resource(&iomem_resource, res); 357 insert_resource(&iomem_resource, res);
@@ -354,8 +475,6 @@ static void __init pci_mmcfg_reject_broken(int early)
354 (pci_mmcfg_config[0].address == 0)) 475 (pci_mmcfg_config[0].address == 0))
355 return; 476 return;
356 477
357 cfg = &pci_mmcfg_config[0];
358
359 for (i = 0; i < pci_mmcfg_config_num; i++) { 478 for (i = 0; i < pci_mmcfg_config_num; i++) {
360 int valid = 0; 479 int valid = 0;
361 u64 addr, size; 480 u64 addr, size;
@@ -423,10 +542,10 @@ static void __init __pci_mmcfg_init(int early)
423 known_bridge = 1; 542 known_bridge = 1;
424 } 543 }
425 544
426 if (!known_bridge) { 545 if (!known_bridge)
427 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); 546 acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
428 pci_mmcfg_reject_broken(early); 547
429 } 548 pci_mmcfg_reject_broken(early);
430 549
431 if ((pci_mmcfg_config_num == 0) || 550 if ((pci_mmcfg_config_num == 0) ||
432 (pci_mmcfg_config == NULL) || 551 (pci_mmcfg_config == NULL) ||
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
index 30007ffc8e1..94349f8b2f9 100644
--- a/arch/x86/pci/mmconfig_64.c
+++ b/arch/x86/pci/mmconfig_64.c
@@ -112,13 +112,18 @@ static struct pci_raw_ops pci_mmcfg = {
112static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg) 112static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg)
113{ 113{
114 void __iomem *addr; 114 void __iomem *addr;
115 u32 size; 115 u64 start, size;
116 116
117 size = (cfg->end_bus_number + 1) << 20; 117 start = cfg->start_bus_number;
118 addr = ioremap_nocache(cfg->address, size); 118 start <<= 20;
119 start += cfg->address;
120 size = cfg->end_bus_number + 1 - cfg->start_bus_number;
121 size <<= 20;
122 addr = ioremap_nocache(start, size);
119 if (addr) { 123 if (addr) {
120 printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n", 124 printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n",
121 cfg->address, cfg->address + size - 1); 125 start, start + size - 1);
126 addr -= cfg->start_bus_number << 20;
122 } 127 }
123 return addr; 128 return addr;
124} 129}
@@ -157,7 +162,7 @@ void __init pci_mmcfg_arch_free(void)
157 162
158 for (i = 0; i < pci_mmcfg_config_num; ++i) { 163 for (i = 0; i < pci_mmcfg_config_num; ++i) {
159 if (pci_mmcfg_virt[i].virt) { 164 if (pci_mmcfg_virt[i].virt) {
160 iounmap(pci_mmcfg_virt[i].virt); 165 iounmap(pci_mmcfg_virt[i].virt + (pci_mmcfg_virt[i].cfg->start_bus_number << 20));
161 pci_mmcfg_virt[i].virt = NULL; 166 pci_mmcfg_virt[i].virt = NULL;
162 pci_mmcfg_virt[i].cfg = NULL; 167 pci_mmcfg_virt[i].cfg = NULL;
163 } 168 }
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c
index 274d06082f4..ce702c5b3a2 100644
--- a/arch/x86/power/cpu_32.c
+++ b/arch/x86/power/cpu_32.c
@@ -12,6 +12,7 @@
12#include <asm/mtrr.h> 12#include <asm/mtrr.h>
13#include <asm/mce.h> 13#include <asm/mce.h>
14#include <asm/xcr.h> 14#include <asm/xcr.h>
15#include <asm/suspend.h>
15 16
16static struct saved_context saved_context; 17static struct saved_context saved_context;
17 18
diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c
index e3b6cf70d62..5343540f260 100644
--- a/arch/x86/power/cpu_64.c
+++ b/arch/x86/power/cpu_64.c
@@ -15,6 +15,7 @@
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/mtrr.h> 16#include <asm/mtrr.h>
17#include <asm/xcr.h> 17#include <asm/xcr.h>
18#include <asm/suspend.h>
18 19
19static void fix_processor_context(void); 20static void fix_processor_context(void);
20 21
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 6dd000dd793..65fdc86e923 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -14,6 +14,7 @@
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/mtrr.h> 16#include <asm/mtrr.h>
17#include <asm/suspend.h>
17 18
18/* References to section boundaries */ 19/* References to section boundaries */
19extern const void __nosave_begin, __nosave_end; 20extern const void __nosave_begin, __nosave_end;
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 25d46c84eb0..4c559cf7da2 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -18,6 +18,7 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/major.h> 19#include <linux/major.h>
20#include <linux/param.h> 20#include <linux/param.h>
21#include <linux/seq_file.h>
21#include <linux/serial.h> 22#include <linux/serial.h>
22#include <linux/serialP.h> 23#include <linux/serialP.h>
23 24
@@ -176,22 +177,24 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
176 /* Stub, once again.. */ 177 /* Stub, once again.. */
177} 178}
178 179
179static int rs_read_proc(char *page, char **start, off_t off, int count, 180static int rs_proc_show(struct seq_file *m, void *v)
180 int *eof, void *data)
181{ 181{
182 int len = 0; 182 seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version);
183 off_t begin = 0; 183 return 0;
184 184}
185 len += sprintf(page, "serinfo:1.0 driver:%s\n", serial_version);
186 *eof = 1;
187
188 if (off >= len + begin)
189 return 0;
190 185
191 *start = page + (off - begin); 186static int rs_proc_open(struct inode *inode, struct file *file)
192 return ((count < begin + len - off) ? count : begin + len - off); 187{
188 return single_open(file, rs_proc_show, NULL);
193} 189}
194 190
191static const struct file_operations rs_proc_fops = {
192 .owner = THIS_MODULE,
193 .open = rs_proc_open,
194 .read = seq_read,
195 .llseek = seq_lseek,
196 .release = single_release,
197};
195 198
196static struct tty_operations serial_ops = { 199static struct tty_operations serial_ops = {
197 .open = rs_open, 200 .open = rs_open,
@@ -203,7 +206,7 @@ static struct tty_operations serial_ops = {
203 .chars_in_buffer = rs_chars_in_buffer, 206 .chars_in_buffer = rs_chars_in_buffer,
204 .hangup = rs_hangup, 207 .hangup = rs_hangup,
205 .wait_until_sent = rs_wait_until_sent, 208 .wait_until_sent = rs_wait_until_sent,
206 .read_proc = rs_read_proc 209 .proc_fops = &rs_proc_fops,
207}; 210};
208 211
209int __init rs_init(void) 212int __init rs_init(void)