diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-14 11:59:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-14 11:59:48 -0400 |
commit | cbe619b162121577bc1e8ed4384dfb85f19e43d8 (patch) | |
tree | a26287f4fcb2ae732ac3f5ee1e18473de8a2247a | |
parent | 5dd80d5d096f58b7a83ae493a20bfc9d0de30226 (diff) | |
parent | 45b3f4cc6025bdb14beb65b8a3a732bf55d1bc41 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6: (23 commits)
[SPARC64]: virt_to_real_irq_table --> virt_irq_table
[SPARC64]: virt_irq --> bucket mapping no longer necessary
[SPARC64]: Kill ugly __bucket() macro.
[SPARC64]: Kill ugly __irq_ino() macro.
[SPARC64]: Only use bypass accesses to INO buckets.
[SPARC64]: Update defconfig.
[SPARC64]: Use sun4v VIRQ interfaces as intended.
[SPARC64]: Allocate ivector_table dynamically.
[SPARC64]: Access ivector_table[] using physical addresses.
[SPARC64]: Make IVEC pointers 64-bit.
[SPARC64]: Fix register usage in xor_raid_4().
[SPARC64]: Kill pci_memspace_mask.
[SPARC64]: Consolidate MSI support code.
[SPARC/64]: Move of_platform_driver initialisations: arch/sparc{,64}.
[SPARC64]: Fix bugs in SYSV IPC handling in 64-bit processes.
[SPARC/64]: Prepare to remove of_platform_driver name.
[SPARC32]: Add irqflags.h to sparc32 and use it from generic code.
[SPARC64]: beautify vmlinux.lds
[SPARC]: beautify vmlinux.lds
[SPARC64]: Enable MSI on sun4u Fire PCI-E controllers.
...
33 files changed, 1760 insertions, 1148 deletions
diff --git a/Documentation/sparc/sbus_drivers.txt b/Documentation/sparc/sbus_drivers.txt index 8418d35484fc..eb1e28ad8822 100644 --- a/Documentation/sparc/sbus_drivers.txt +++ b/Documentation/sparc/sbus_drivers.txt | |||
@@ -67,10 +67,12 @@ probe in an SBUS driver under Linux: | |||
67 | MODULE_DEVICE_TABLE(of, mydevice_match); | 67 | MODULE_DEVICE_TABLE(of, mydevice_match); |
68 | 68 | ||
69 | static struct of_platform_driver mydevice_driver = { | 69 | static struct of_platform_driver mydevice_driver = { |
70 | .name = "mydevice", | ||
71 | .match_table = mydevice_match, | 70 | .match_table = mydevice_match, |
72 | .probe = mydevice_probe, | 71 | .probe = mydevice_probe, |
73 | .remove = __devexit_p(mydevice_remove), | 72 | .remove = __devexit_p(mydevice_remove), |
73 | .driver = { | ||
74 | .name = "mydevice", | ||
75 | }, | ||
74 | }; | 76 | }; |
75 | 77 | ||
76 | static int __init mydevice_init(void) | 78 | static int __init mydevice_init(void) |
diff --git a/arch/sparc/Kconfig.debug b/arch/sparc/Kconfig.debug index 120f6b529348..87dd496f15eb 100644 --- a/arch/sparc/Kconfig.debug +++ b/arch/sparc/Kconfig.debug | |||
@@ -1,5 +1,9 @@ | |||
1 | menu "Kernel hacking" | 1 | menu "Kernel hacking" |
2 | 2 | ||
3 | config TRACE_IRQFLAGS_SUPPORT | ||
4 | bool | ||
5 | default y | ||
6 | |||
3 | source "lib/Kconfig.debug" | 7 | source "lib/Kconfig.debug" |
4 | 8 | ||
5 | config DEBUG_STACK_USAGE | 9 | config DEBUG_STACK_USAGE |
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index b76dc03fc318..722d67d32961 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c | |||
@@ -56,7 +56,7 @@ | |||
56 | #define SMP_NOP2 | 56 | #define SMP_NOP2 |
57 | #define SMP_NOP3 | 57 | #define SMP_NOP3 |
58 | #endif /* SMP */ | 58 | #endif /* SMP */ |
59 | unsigned long __local_irq_save(void) | 59 | unsigned long __raw_local_irq_save(void) |
60 | { | 60 | { |
61 | unsigned long retval; | 61 | unsigned long retval; |
62 | unsigned long tmp; | 62 | unsigned long tmp; |
@@ -74,7 +74,7 @@ unsigned long __local_irq_save(void) | |||
74 | return retval; | 74 | return retval; |
75 | } | 75 | } |
76 | 76 | ||
77 | void local_irq_enable(void) | 77 | void raw_local_irq_enable(void) |
78 | { | 78 | { |
79 | unsigned long tmp; | 79 | unsigned long tmp; |
80 | 80 | ||
@@ -89,7 +89,7 @@ void local_irq_enable(void) | |||
89 | : "memory"); | 89 | : "memory"); |
90 | } | 90 | } |
91 | 91 | ||
92 | void local_irq_restore(unsigned long old_psr) | 92 | void raw_local_irq_restore(unsigned long old_psr) |
93 | { | 93 | { |
94 | unsigned long tmp; | 94 | unsigned long tmp; |
95 | 95 | ||
@@ -105,9 +105,9 @@ void local_irq_restore(unsigned long old_psr) | |||
105 | : "memory"); | 105 | : "memory"); |
106 | } | 106 | } |
107 | 107 | ||
108 | EXPORT_SYMBOL(__local_irq_save); | 108 | EXPORT_SYMBOL(__raw_local_irq_save); |
109 | EXPORT_SYMBOL(local_irq_enable); | 109 | EXPORT_SYMBOL(raw_local_irq_enable); |
110 | EXPORT_SYMBOL(local_irq_restore); | 110 | EXPORT_SYMBOL(raw_local_irq_restore); |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * Dave Redman (djhr@tadpole.co.uk) | 113 | * Dave Redman (djhr@tadpole.co.uk) |
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c index 36383f73d685..fb2caef79cec 100644 --- a/arch/sparc/kernel/of_device.c +++ b/arch/sparc/kernel/of_device.c | |||
@@ -588,7 +588,10 @@ __setup("of_debug=", of_debug); | |||
588 | int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) | 588 | int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) |
589 | { | 589 | { |
590 | /* initialize common driver fields */ | 590 | /* initialize common driver fields */ |
591 | drv->driver.name = drv->name; | 591 | if (!drv->driver.name) |
592 | drv->driver.name = drv->name; | ||
593 | if (!drv->driver.owner) | ||
594 | drv->driver.owner = drv->owner; | ||
592 | drv->driver.bus = bus; | 595 | drv->driver.bus = bus; |
593 | 596 | ||
594 | /* register with core */ | 597 | /* register with core */ |
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index 6a2513321620..4bf78a5e8e0f 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c | |||
@@ -347,9 +347,11 @@ static struct of_device_id clock_match[] = { | |||
347 | }; | 347 | }; |
348 | 348 | ||
349 | static struct of_platform_driver clock_driver = { | 349 | static struct of_platform_driver clock_driver = { |
350 | .name = "clock", | ||
351 | .match_table = clock_match, | 350 | .match_table = clock_match, |
352 | .probe = clock_probe, | 351 | .probe = clock_probe, |
352 | .driver = { | ||
353 | .name = "clock", | ||
354 | }, | ||
353 | }; | 355 | }; |
354 | 356 | ||
355 | 357 | ||
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 15109c156e83..a8b4200f9cc3 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S | |||
@@ -1,6 +1,7 @@ | |||
1 | /* ld script to make SparcLinux kernel */ | 1 | /* ld script to make SparcLinux kernel */ |
2 | 2 | ||
3 | #include <asm-generic/vmlinux.lds.h> | 3 | #include <asm-generic/vmlinux.lds.h> |
4 | #include <asm/page.h> | ||
4 | 5 | ||
5 | OUTPUT_FORMAT("elf32-sparc", "elf32-sparc", "elf32-sparc") | 6 | OUTPUT_FORMAT("elf32-sparc", "elf32-sparc", "elf32-sparc") |
6 | OUTPUT_ARCH(sparc) | 7 | OUTPUT_ARCH(sparc) |
@@ -8,84 +9,104 @@ ENTRY(_start) | |||
8 | jiffies = jiffies_64 + 4; | 9 | jiffies = jiffies_64 + 4; |
9 | SECTIONS | 10 | SECTIONS |
10 | { | 11 | { |
11 | . = 0x10000 + SIZEOF_HEADERS; | 12 | . = 0x10000 + SIZEOF_HEADERS; |
12 | .text 0xf0004000 : | 13 | .text 0xf0004000 : |
13 | { | 14 | { |
14 | _text = .; | 15 | _text = .; |
15 | TEXT_TEXT | 16 | TEXT_TEXT |
16 | SCHED_TEXT | 17 | SCHED_TEXT |
17 | LOCK_TEXT | 18 | LOCK_TEXT |
18 | *(.gnu.warning) | 19 | *(.gnu.warning) |
19 | } =0 | 20 | } = 0 |
20 | _etext = .; | 21 | _etext = .; |
21 | PROVIDE (etext = .); | 22 | PROVIDE (etext = .); |
22 | RODATA | 23 | RODATA |
23 | .data : | 24 | .data : { |
24 | { | 25 | DATA_DATA |
25 | DATA_DATA | 26 | CONSTRUCTORS |
26 | CONSTRUCTORS | 27 | } |
27 | } | 28 | .data1 : { |
28 | .data1 : { *(.data1) } | 29 | *(.data1) |
29 | _edata = .; | 30 | } |
30 | PROVIDE (edata = .); | 31 | _edata = .; |
31 | __start___fixup = .; | 32 | PROVIDE (edata = .); |
32 | .fixup : { *(.fixup) } | ||
33 | __stop___fixup = .; | ||
34 | __start___ex_table = .; | ||
35 | __ex_table : { *(__ex_table) } | ||
36 | __stop___ex_table = .; | ||
37 | 33 | ||
38 | NOTES | 34 | .fixup : { |
35 | __start___fixup = .; | ||
36 | *(.fixup) | ||
37 | __stop___fixup = .; | ||
38 | } | ||
39 | __ex_table : { | ||
40 | __start___ex_table = .; | ||
41 | *(__ex_table) | ||
42 | __stop___ex_table = .; | ||
43 | } | ||
39 | 44 | ||
40 | . = ALIGN(4096); | 45 | NOTES |
41 | __init_begin = .; | 46 | |
42 | _sinittext = .; | 47 | . = ALIGN(PAGE_SIZE); |
43 | .init.text : { | 48 | __init_begin = .; |
44 | *(.init.text) | 49 | .init.text : { |
45 | } | 50 | _sinittext = .; |
46 | _einittext = .; | 51 | *(.init.text) |
47 | __init_text_end = .; | 52 | _einittext = .; |
48 | .init.data : { *(.init.data) } | 53 | } |
49 | . = ALIGN(16); | 54 | __init_text_end = .; |
50 | __setup_start = .; | 55 | .init.data : { |
51 | .init.setup : { *(.init.setup) } | 56 | *(.init.data) |
52 | __setup_end = .; | 57 | } |
53 | __initcall_start = .; | 58 | . = ALIGN(16); |
54 | .initcall.init : { | 59 | .init.setup : { |
55 | INITCALLS | 60 | __setup_start = .; |
56 | } | 61 | *(.init.setup) |
57 | __initcall_end = .; | 62 | __setup_end = .; |
58 | __con_initcall_start = .; | 63 | } |
59 | .con_initcall.init : { *(.con_initcall.init) } | 64 | .initcall.init : { |
60 | __con_initcall_end = .; | 65 | __initcall_start = .; |
61 | SECURITY_INIT | 66 | INITCALLS |
67 | __initcall_end = .; | ||
68 | } | ||
69 | .con_initcall.init : { | ||
70 | __con_initcall_start = .; | ||
71 | *(.con_initcall.init) | ||
72 | __con_initcall_end = .; | ||
73 | } | ||
74 | SECURITY_INIT | ||
62 | 75 | ||
63 | #ifdef CONFIG_BLK_DEV_INITRD | 76 | #ifdef CONFIG_BLK_DEV_INITRD |
64 | . = ALIGN(4096); | 77 | . = ALIGN(PAGE_SIZE); |
65 | __initramfs_start = .; | 78 | .init.ramfs : { |
66 | .init.ramfs : { *(.init.ramfs) } | 79 | __initramfs_start = .; |
67 | __initramfs_end = .; | 80 | *(.init.ramfs) |
81 | __initramfs_end = .; | ||
82 | } | ||
68 | #endif | 83 | #endif |
69 | 84 | ||
70 | PERCPU(4096) | 85 | PERCPU(PAGE_SIZE) |
71 | . = ALIGN(4096); | 86 | . = ALIGN(PAGE_SIZE); |
72 | __init_end = .; | 87 | __init_end = .; |
73 | . = ALIGN(32); | 88 | . = ALIGN(32); |
74 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | 89 | .data.cacheline_aligned : { |
75 | 90 | *(.data.cacheline_aligned) | |
76 | __bss_start = .; | 91 | } |
77 | .sbss : { *(.sbss) *(.scommon) } | ||
78 | .bss : | ||
79 | { | ||
80 | *(.dynbss) | ||
81 | *(.bss) | ||
82 | *(COMMON) | ||
83 | } | ||
84 | _end = . ; | ||
85 | PROVIDE (end = .); | ||
86 | /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) } | ||
87 | 92 | ||
88 | STABS_DEBUG | 93 | __bss_start = .; |
94 | .sbss : { | ||
95 | *(.sbss) | ||
96 | *(.scommon) } | ||
97 | .bss : { | ||
98 | *(.dynbss) | ||
99 | *(.bss) | ||
100 | *(COMMON) | ||
101 | } | ||
102 | _end = . ; | ||
103 | PROVIDE (end = .); | ||
104 | /DISCARD/ : { | ||
105 | *(.exit.text) | ||
106 | *(.exit.data) | ||
107 | *(.exitcall.exit) | ||
108 | } | ||
89 | 109 | ||
90 | DWARF_DEBUG | 110 | STABS_DEBUG |
111 | DWARF_DEBUG | ||
91 | } | 112 | } |
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig index 7d07297db878..1aa2c4048e4b 100644 --- a/arch/sparc64/defconfig +++ b/arch/sparc64/defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.23-rc6 | 3 | # Linux kernel version: 2.6.23 |
4 | # Sun Sep 16 09:52:11 2007 | 4 | # Sat Oct 13 21:53:54 2007 |
5 | # | 5 | # |
6 | CONFIG_SPARC=y | 6 | CONFIG_SPARC=y |
7 | CONFIG_SPARC64=y | 7 | CONFIG_SPARC64=y |
@@ -69,7 +69,6 @@ CONFIG_FUTEX=y | |||
69 | CONFIG_ANON_INODES=y | 69 | CONFIG_ANON_INODES=y |
70 | CONFIG_EPOLL=y | 70 | CONFIG_EPOLL=y |
71 | CONFIG_SIGNALFD=y | 71 | CONFIG_SIGNALFD=y |
72 | CONFIG_TIMERFD=y | ||
73 | CONFIG_EVENTFD=y | 72 | CONFIG_EVENTFD=y |
74 | CONFIG_SHMEM=y | 73 | CONFIG_SHMEM=y |
75 | CONFIG_VM_EVENT_COUNTERS=y | 74 | CONFIG_VM_EVENT_COUNTERS=y |
@@ -89,6 +88,7 @@ CONFIG_KMOD=y | |||
89 | CONFIG_BLOCK=y | 88 | CONFIG_BLOCK=y |
90 | CONFIG_BLK_DEV_IO_TRACE=y | 89 | CONFIG_BLK_DEV_IO_TRACE=y |
91 | CONFIG_BLK_DEV_BSG=y | 90 | CONFIG_BLK_DEV_BSG=y |
91 | CONFIG_BLOCK_COMPAT=y | ||
92 | 92 | ||
93 | # | 93 | # |
94 | # IO Schedulers | 94 | # IO Schedulers |
@@ -111,6 +111,7 @@ CONFIG_GENERIC_HARDIRQS=y | |||
111 | CONFIG_TICK_ONESHOT=y | 111 | CONFIG_TICK_ONESHOT=y |
112 | CONFIG_NO_HZ=y | 112 | CONFIG_NO_HZ=y |
113 | CONFIG_HIGH_RES_TIMERS=y | 113 | CONFIG_HIGH_RES_TIMERS=y |
114 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
114 | # CONFIG_SMP is not set | 115 | # CONFIG_SMP is not set |
115 | CONFIG_CPU_FREQ=y | 116 | CONFIG_CPU_FREQ=y |
116 | CONFIG_CPU_FREQ_TABLE=m | 117 | CONFIG_CPU_FREQ_TABLE=m |
@@ -119,6 +120,8 @@ CONFIG_CPU_FREQ_STAT=m | |||
119 | CONFIG_CPU_FREQ_STAT_DETAILS=y | 120 | CONFIG_CPU_FREQ_STAT_DETAILS=y |
120 | CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y | 121 | CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y |
121 | # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set | 122 | # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set |
123 | # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set | ||
124 | # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set | ||
122 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y | 125 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y |
123 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m | 126 | CONFIG_CPU_FREQ_GOV_POWERSAVE=m |
124 | CONFIG_CPU_FREQ_GOV_USERSPACE=m | 127 | CONFIG_CPU_FREQ_GOV_USERSPACE=m |
@@ -213,6 +216,7 @@ CONFIG_INET_TUNNEL=y | |||
213 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | 216 | CONFIG_INET_XFRM_MODE_TRANSPORT=y |
214 | CONFIG_INET_XFRM_MODE_TUNNEL=y | 217 | CONFIG_INET_XFRM_MODE_TUNNEL=y |
215 | CONFIG_INET_XFRM_MODE_BEET=y | 218 | CONFIG_INET_XFRM_MODE_BEET=y |
219 | CONFIG_INET_LRO=y | ||
216 | CONFIG_INET_DIAG=y | 220 | CONFIG_INET_DIAG=y |
217 | CONFIG_INET_TCP_DIAG=y | 221 | CONFIG_INET_TCP_DIAG=y |
218 | # CONFIG_TCP_CONG_ADVANCED is not set | 222 | # CONFIG_TCP_CONG_ADVANCED is not set |
@@ -304,6 +308,7 @@ CONFIG_NET_TCPPROBE=m | |||
304 | # | 308 | # |
305 | # Generic Driver Options | 309 | # Generic Driver Options |
306 | # | 310 | # |
311 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
307 | CONFIG_STANDALONE=y | 312 | CONFIG_STANDALONE=y |
308 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | 313 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set |
309 | CONFIG_FW_LOADER=y | 314 | CONFIG_FW_LOADER=y |
@@ -355,6 +360,11 @@ CONFIG_IDE_PROC_FS=y | |||
355 | # IDE chipset support/bugfixes | 360 | # IDE chipset support/bugfixes |
356 | # | 361 | # |
357 | CONFIG_IDE_GENERIC=y | 362 | CONFIG_IDE_GENERIC=y |
363 | # CONFIG_BLK_DEV_PLATFORM is not set | ||
364 | |||
365 | # | ||
366 | # PCI IDE chipsets support | ||
367 | # | ||
358 | CONFIG_BLK_DEV_IDEPCI=y | 368 | CONFIG_BLK_DEV_IDEPCI=y |
359 | # CONFIG_IDEPCI_SHARE_IRQ is not set | 369 | # CONFIG_IDEPCI_SHARE_IRQ is not set |
360 | CONFIG_IDEPCI_PCIBUS_ORDER=y | 370 | CONFIG_IDEPCI_PCIBUS_ORDER=y |
@@ -391,7 +401,6 @@ CONFIG_BLK_DEV_ALI15X3=y | |||
391 | # CONFIG_BLK_DEV_TC86C001 is not set | 401 | # CONFIG_BLK_DEV_TC86C001 is not set |
392 | # CONFIG_IDE_ARM is not set | 402 | # CONFIG_IDE_ARM is not set |
393 | CONFIG_BLK_DEV_IDEDMA=y | 403 | CONFIG_BLK_DEV_IDEDMA=y |
394 | # CONFIG_IDEDMA_IVB is not set | ||
395 | # CONFIG_BLK_DEV_HD is not set | 404 | # CONFIG_BLK_DEV_HD is not set |
396 | 405 | ||
397 | # | 406 | # |
@@ -505,6 +514,8 @@ CONFIG_DUMMY=m | |||
505 | # CONFIG_MACVLAN is not set | 514 | # CONFIG_MACVLAN is not set |
506 | # CONFIG_EQUALIZER is not set | 515 | # CONFIG_EQUALIZER is not set |
507 | # CONFIG_TUN is not set | 516 | # CONFIG_TUN is not set |
517 | # CONFIG_VETH is not set | ||
518 | # CONFIG_IP1000 is not set | ||
508 | # CONFIG_ARCNET is not set | 519 | # CONFIG_ARCNET is not set |
509 | # CONFIG_PHYLIB is not set | 520 | # CONFIG_PHYLIB is not set |
510 | CONFIG_NET_ETHERNET=y | 521 | CONFIG_NET_ETHERNET=y |
@@ -518,13 +529,16 @@ CONFIG_CASSINI=m | |||
518 | # CONFIG_NET_VENDOR_3COM is not set | 529 | # CONFIG_NET_VENDOR_3COM is not set |
519 | # CONFIG_NET_TULIP is not set | 530 | # CONFIG_NET_TULIP is not set |
520 | # CONFIG_HP100 is not set | 531 | # CONFIG_HP100 is not set |
532 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | ||
533 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | ||
534 | # CONFIG_IBM_NEW_EMAC_TAH is not set | ||
535 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | ||
521 | CONFIG_NET_PCI=y | 536 | CONFIG_NET_PCI=y |
522 | # CONFIG_PCNET32 is not set | 537 | # CONFIG_PCNET32 is not set |
523 | # CONFIG_AMD8111_ETH is not set | 538 | # CONFIG_AMD8111_ETH is not set |
524 | # CONFIG_ADAPTEC_STARFIRE is not set | 539 | # CONFIG_ADAPTEC_STARFIRE is not set |
525 | # CONFIG_B44 is not set | 540 | # CONFIG_B44 is not set |
526 | # CONFIG_FORCEDETH is not set | 541 | # CONFIG_FORCEDETH is not set |
527 | # CONFIG_DGRS is not set | ||
528 | # CONFIG_EEPRO100 is not set | 542 | # CONFIG_EEPRO100 is not set |
529 | # CONFIG_E100 is not set | 543 | # CONFIG_E100 is not set |
530 | # CONFIG_FEALNX is not set | 544 | # CONFIG_FEALNX is not set |
@@ -543,6 +557,7 @@ CONFIG_NETDEV_1000=y | |||
543 | CONFIG_E1000=m | 557 | CONFIG_E1000=m |
544 | CONFIG_E1000_NAPI=y | 558 | CONFIG_E1000_NAPI=y |
545 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set | 559 | # CONFIG_E1000_DISABLE_PACKET_SPLIT is not set |
560 | # CONFIG_E1000E is not set | ||
546 | # CONFIG_MYRI_SBUS is not set | 561 | # CONFIG_MYRI_SBUS is not set |
547 | # CONFIG_NS83820 is not set | 562 | # CONFIG_NS83820 is not set |
548 | # CONFIG_HAMACHI is not set | 563 | # CONFIG_HAMACHI is not set |
@@ -560,11 +575,14 @@ CONFIG_BNX2=m | |||
560 | CONFIG_NETDEV_10000=y | 575 | CONFIG_NETDEV_10000=y |
561 | # CONFIG_CHELSIO_T1 is not set | 576 | # CONFIG_CHELSIO_T1 is not set |
562 | # CONFIG_CHELSIO_T3 is not set | 577 | # CONFIG_CHELSIO_T3 is not set |
578 | # CONFIG_IXGBE is not set | ||
563 | # CONFIG_IXGB is not set | 579 | # CONFIG_IXGB is not set |
564 | # CONFIG_S2IO is not set | 580 | # CONFIG_S2IO is not set |
565 | # CONFIG_MYRI10GE is not set | 581 | # CONFIG_MYRI10GE is not set |
566 | # CONFIG_NETXEN_NIC is not set | 582 | # CONFIG_NETXEN_NIC is not set |
583 | # CONFIG_NIU is not set | ||
567 | # CONFIG_MLX4_CORE is not set | 584 | # CONFIG_MLX4_CORE is not set |
585 | # CONFIG_TEHUTI is not set | ||
568 | # CONFIG_TR is not set | 586 | # CONFIG_TR is not set |
569 | 587 | ||
570 | # | 588 | # |
@@ -820,6 +838,12 @@ CONFIG_HWMON=y | |||
820 | # CONFIG_HWMON_DEBUG_CHIP is not set | 838 | # CONFIG_HWMON_DEBUG_CHIP is not set |
821 | 839 | ||
822 | # | 840 | # |
841 | # Sonics Silicon Backplane | ||
842 | # | ||
843 | CONFIG_SSB_POSSIBLE=y | ||
844 | # CONFIG_SSB is not set | ||
845 | |||
846 | # | ||
823 | # Multifunction device drivers | 847 | # Multifunction device drivers |
824 | # | 848 | # |
825 | # CONFIG_MFD_SM501 is not set | 849 | # CONFIG_MFD_SM501 is not set |
@@ -1399,6 +1423,7 @@ CONFIG_ASYNC_MEMCPY=m | |||
1399 | CONFIG_ASYNC_XOR=m | 1423 | CONFIG_ASYNC_XOR=m |
1400 | CONFIG_CRYPTO=y | 1424 | CONFIG_CRYPTO=y |
1401 | CONFIG_CRYPTO_ALGAPI=y | 1425 | CONFIG_CRYPTO_ALGAPI=y |
1426 | CONFIG_CRYPTO_AEAD=m | ||
1402 | CONFIG_CRYPTO_BLKCIPHER=y | 1427 | CONFIG_CRYPTO_BLKCIPHER=y |
1403 | CONFIG_CRYPTO_HASH=y | 1428 | CONFIG_CRYPTO_HASH=y |
1404 | CONFIG_CRYPTO_MANAGER=y | 1429 | CONFIG_CRYPTO_MANAGER=y |
@@ -1417,6 +1442,7 @@ CONFIG_CRYPTO_ECB=m | |||
1417 | CONFIG_CRYPTO_CBC=y | 1442 | CONFIG_CRYPTO_CBC=y |
1418 | CONFIG_CRYPTO_PCBC=m | 1443 | CONFIG_CRYPTO_PCBC=m |
1419 | CONFIG_CRYPTO_LRW=m | 1444 | CONFIG_CRYPTO_LRW=m |
1445 | CONFIG_CRYPTO_XTS=m | ||
1420 | # CONFIG_CRYPTO_CRYPTD is not set | 1446 | # CONFIG_CRYPTO_CRYPTD is not set |
1421 | CONFIG_CRYPTO_DES=y | 1447 | CONFIG_CRYPTO_DES=y |
1422 | CONFIG_CRYPTO_FCRYPT=m | 1448 | CONFIG_CRYPTO_FCRYPT=m |
@@ -1431,11 +1457,13 @@ CONFIG_CRYPTO_TEA=m | |||
1431 | CONFIG_CRYPTO_ARC4=m | 1457 | CONFIG_CRYPTO_ARC4=m |
1432 | CONFIG_CRYPTO_KHAZAD=m | 1458 | CONFIG_CRYPTO_KHAZAD=m |
1433 | CONFIG_CRYPTO_ANUBIS=m | 1459 | CONFIG_CRYPTO_ANUBIS=m |
1460 | CONFIG_CRYPTO_SEED=m | ||
1434 | CONFIG_CRYPTO_DEFLATE=y | 1461 | CONFIG_CRYPTO_DEFLATE=y |
1435 | CONFIG_CRYPTO_MICHAEL_MIC=m | 1462 | CONFIG_CRYPTO_MICHAEL_MIC=m |
1436 | CONFIG_CRYPTO_CRC32C=m | 1463 | CONFIG_CRYPTO_CRC32C=m |
1437 | CONFIG_CRYPTO_CAMELLIA=m | 1464 | CONFIG_CRYPTO_CAMELLIA=m |
1438 | CONFIG_CRYPTO_TEST=m | 1465 | CONFIG_CRYPTO_TEST=m |
1466 | CONFIG_CRYPTO_AUTHENC=m | ||
1439 | CONFIG_CRYPTO_HW=y | 1467 | CONFIG_CRYPTO_HW=y |
1440 | 1468 | ||
1441 | # | 1469 | # |
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index 40d2f3aae91e..112c46e66578 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile | |||
@@ -18,6 +18,7 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o | |||
18 | obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o \ | 18 | obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o \ |
19 | pci_psycho.o pci_sabre.o pci_schizo.o \ | 19 | pci_psycho.o pci_sabre.o pci_schizo.o \ |
20 | pci_sun4v.o pci_sun4v_asm.o pci_fire.o | 20 | pci_sun4v.o pci_sun4v_asm.o pci_fire.o |
21 | obj-$(CONFIG_PCI_MSI) += pci_msi.o | ||
21 | obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o | 22 | obj-$(CONFIG_SMP) += smp.o trampoline.o hvtramp.o |
22 | obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o | 23 | obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o |
23 | obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o | 24 | obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o |
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c index 7b379761e9f8..c55f0293eacd 100644 --- a/arch/sparc64/kernel/auxio.c +++ b/arch/sparc64/kernel/auxio.c | |||
@@ -148,9 +148,11 @@ static int __devinit auxio_probe(struct of_device *dev, const struct of_device_i | |||
148 | } | 148 | } |
149 | 149 | ||
150 | static struct of_platform_driver auxio_driver = { | 150 | static struct of_platform_driver auxio_driver = { |
151 | .name = "auxio", | ||
152 | .match_table = auxio_match, | 151 | .match_table = auxio_match, |
153 | .probe = auxio_probe, | 152 | .probe = auxio_probe, |
153 | .driver = { | ||
154 | .name = "auxio", | ||
155 | }, | ||
154 | }; | 156 | }; |
155 | 157 | ||
156 | static int __init auxio_init(void) | 158 | static int __init auxio_init(void) |
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 8059531bf0ac..c9b0d7af64ae 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -429,16 +429,16 @@ do_ivec: | |||
429 | stxa %g0, [%g0] ASI_INTR_RECEIVE | 429 | stxa %g0, [%g0] ASI_INTR_RECEIVE |
430 | membar #Sync | 430 | membar #Sync |
431 | 431 | ||
432 | sethi %hi(ivector_table), %g2 | 432 | sethi %hi(ivector_table_pa), %g2 |
433 | sllx %g3, 3, %g3 | 433 | ldx [%g2 + %lo(ivector_table_pa)], %g2 |
434 | or %g2, %lo(ivector_table), %g2 | 434 | sllx %g3, 4, %g3 |
435 | add %g2, %g3, %g3 | 435 | add %g2, %g3, %g3 |
436 | 436 | ||
437 | TRAP_LOAD_IRQ_WORK(%g6, %g1) | 437 | TRAP_LOAD_IRQ_WORK_PA(%g6, %g1) |
438 | 438 | ||
439 | lduw [%g6], %g5 /* g5 = irq_work(cpu) */ | 439 | ldx [%g6], %g5 |
440 | stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ | 440 | stxa %g5, [%g3] ASI_PHYS_USE_EC |
441 | stw %g3, [%g6] /* irq_work(cpu) = bucket */ | 441 | stx %g3, [%g6] |
442 | wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint | 442 | wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint |
443 | retry | 443 | retry |
444 | do_ivec_xcall: | 444 | do_ivec_xcall: |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 23956096b3bf..f3922e5a89f6 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/seq_file.h> | 21 | #include <linux/seq_file.h> |
22 | #include <linux/bootmem.h> | 22 | #include <linux/bootmem.h> |
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/msi.h> | ||
25 | 24 | ||
26 | #include <asm/ptrace.h> | 25 | #include <asm/ptrace.h> |
27 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
@@ -43,6 +42,7 @@ | |||
43 | #include <asm/auxio.h> | 42 | #include <asm/auxio.h> |
44 | #include <asm/head.h> | 43 | #include <asm/head.h> |
45 | #include <asm/hypervisor.h> | 44 | #include <asm/hypervisor.h> |
45 | #include <asm/cacheflush.h> | ||
46 | 46 | ||
47 | /* UPA nodes send interrupt packet to UltraSparc with first data reg | 47 | /* UPA nodes send interrupt packet to UltraSparc with first data reg |
48 | * value low 5 (7 on Starfire) bits holding the IRQ identifier being | 48 | * value low 5 (7 on Starfire) bits holding the IRQ identifier being |
@@ -52,86 +52,128 @@ | |||
52 | * To make processing these packets efficient and race free we use | 52 | * To make processing these packets efficient and race free we use |
53 | * an array of irq buckets below. The interrupt vector handler in | 53 | * an array of irq buckets below. The interrupt vector handler in |
54 | * entry.S feeds incoming packets into per-cpu pil-indexed lists. | 54 | * entry.S feeds incoming packets into per-cpu pil-indexed lists. |
55 | * The IVEC handler does not need to act atomically, the PIL dispatch | ||
56 | * code uses CAS to get an atomic snapshot of the list and clear it | ||
57 | * at the same time. | ||
58 | * | 55 | * |
59 | * If you make changes to ino_bucket, please update hand coded assembler | 56 | * If you make changes to ino_bucket, please update hand coded assembler |
60 | * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S | 57 | * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S |
61 | */ | 58 | */ |
62 | struct ino_bucket { | 59 | struct ino_bucket { |
63 | /* Next handler in per-CPU IRQ worklist. We know that | 60 | /*0x00*/unsigned long __irq_chain_pa; |
64 | * bucket pointers have the high 32-bits clear, so to | ||
65 | * save space we only store the bits we need. | ||
66 | */ | ||
67 | /*0x00*/unsigned int irq_chain; | ||
68 | 61 | ||
69 | /* Virtual interrupt number assigned to this INO. */ | 62 | /* Virtual interrupt number assigned to this INO. */ |
70 | /*0x04*/unsigned int virt_irq; | 63 | /*0x08*/unsigned int __virt_irq; |
64 | /*0x0c*/unsigned int __pad; | ||
71 | }; | 65 | }; |
72 | 66 | ||
73 | #define NUM_IVECS (IMAP_INR + 1) | 67 | #define NUM_IVECS (IMAP_INR + 1) |
74 | struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES))); | 68 | struct ino_bucket *ivector_table; |
75 | 69 | unsigned long ivector_table_pa; | |
76 | #define __irq_ino(irq) \ | 70 | |
77 | (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0]) | 71 | /* On several sun4u processors, it is illegal to mix bypass and |
78 | #define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq)) | 72 | * non-bypass accesses. Therefore we access all INO buckets |
79 | #define __irq(bucket) ((unsigned int)(unsigned long)(bucket)) | 73 | * using bypass accesses only. |
80 | |||
81 | /* This has to be in the main kernel image, it cannot be | ||
82 | * turned into per-cpu data. The reason is that the main | ||
83 | * kernel image is locked into the TLB and this structure | ||
84 | * is accessed from the vectored interrupt trap handler. If | ||
85 | * access to this structure takes a TLB miss it could cause | ||
86 | * the 5-level sparc v9 trap stack to overflow. | ||
87 | */ | 74 | */ |
88 | #define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) | 75 | static unsigned long bucket_get_chain_pa(unsigned long bucket_pa) |
76 | { | ||
77 | unsigned long ret; | ||
78 | |||
79 | __asm__ __volatile__("ldxa [%1] %2, %0" | ||
80 | : "=&r" (ret) | ||
81 | : "r" (bucket_pa + | ||
82 | offsetof(struct ino_bucket, | ||
83 | __irq_chain_pa)), | ||
84 | "i" (ASI_PHYS_USE_EC)); | ||
85 | |||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | static void bucket_clear_chain_pa(unsigned long bucket_pa) | ||
90 | { | ||
91 | __asm__ __volatile__("stxa %%g0, [%0] %1" | ||
92 | : /* no outputs */ | ||
93 | : "r" (bucket_pa + | ||
94 | offsetof(struct ino_bucket, | ||
95 | __irq_chain_pa)), | ||
96 | "i" (ASI_PHYS_USE_EC)); | ||
97 | } | ||
98 | |||
99 | static unsigned int bucket_get_virt_irq(unsigned long bucket_pa) | ||
100 | { | ||
101 | unsigned int ret; | ||
102 | |||
103 | __asm__ __volatile__("lduwa [%1] %2, %0" | ||
104 | : "=&r" (ret) | ||
105 | : "r" (bucket_pa + | ||
106 | offsetof(struct ino_bucket, | ||
107 | __virt_irq)), | ||
108 | "i" (ASI_PHYS_USE_EC)); | ||
109 | |||
110 | return ret; | ||
111 | } | ||
112 | |||
113 | static void bucket_set_virt_irq(unsigned long bucket_pa, | ||
114 | unsigned int virt_irq) | ||
115 | { | ||
116 | __asm__ __volatile__("stwa %0, [%1] %2" | ||
117 | : /* no outputs */ | ||
118 | : "r" (virt_irq), | ||
119 | "r" (bucket_pa + | ||
120 | offsetof(struct ino_bucket, | ||
121 | __virt_irq)), | ||
122 | "i" (ASI_PHYS_USE_EC)); | ||
123 | } | ||
124 | |||
125 | #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) | ||
89 | 126 | ||
90 | static struct { | 127 | static struct { |
91 | unsigned int irq; | ||
92 | unsigned int dev_handle; | 128 | unsigned int dev_handle; |
93 | unsigned int dev_ino; | 129 | unsigned int dev_ino; |
94 | } virt_to_real_irq_table[NR_IRQS]; | 130 | unsigned int in_use; |
131 | } virt_irq_table[NR_IRQS]; | ||
132 | static DEFINE_SPINLOCK(virt_irq_alloc_lock); | ||
95 | 133 | ||
96 | static unsigned char virt_irq_alloc(unsigned int real_irq) | 134 | unsigned char virt_irq_alloc(unsigned int dev_handle, |
135 | unsigned int dev_ino) | ||
97 | { | 136 | { |
137 | unsigned long flags; | ||
98 | unsigned char ent; | 138 | unsigned char ent; |
99 | 139 | ||
100 | BUILD_BUG_ON(NR_IRQS >= 256); | 140 | BUILD_BUG_ON(NR_IRQS >= 256); |
101 | 141 | ||
142 | spin_lock_irqsave(&virt_irq_alloc_lock, flags); | ||
143 | |||
102 | for (ent = 1; ent < NR_IRQS; ent++) { | 144 | for (ent = 1; ent < NR_IRQS; ent++) { |
103 | if (!virt_to_real_irq_table[ent].irq) | 145 | if (!virt_irq_table[ent].in_use) |
104 | break; | 146 | break; |
105 | } | 147 | } |
106 | if (ent >= NR_IRQS) { | 148 | if (ent >= NR_IRQS) { |
107 | printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); | 149 | printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); |
108 | return 0; | 150 | ent = 0; |
151 | } else { | ||
152 | virt_irq_table[ent].dev_handle = dev_handle; | ||
153 | virt_irq_table[ent].dev_ino = dev_ino; | ||
154 | virt_irq_table[ent].in_use = 1; | ||
109 | } | 155 | } |
110 | 156 | ||
111 | virt_to_real_irq_table[ent].irq = real_irq; | 157 | spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); |
112 | 158 | ||
113 | return ent; | 159 | return ent; |
114 | } | 160 | } |
115 | 161 | ||
116 | #ifdef CONFIG_PCI_MSI | 162 | #ifdef CONFIG_PCI_MSI |
117 | static void virt_irq_free(unsigned int virt_irq) | 163 | void virt_irq_free(unsigned int virt_irq) |
118 | { | 164 | { |
119 | unsigned int real_irq; | 165 | unsigned long flags; |
120 | 166 | ||
121 | if (virt_irq >= NR_IRQS) | 167 | if (virt_irq >= NR_IRQS) |
122 | return; | 168 | return; |
123 | 169 | ||
124 | real_irq = virt_to_real_irq_table[virt_irq].irq; | 170 | spin_lock_irqsave(&virt_irq_alloc_lock, flags); |
125 | virt_to_real_irq_table[virt_irq].irq = 0; | ||
126 | 171 | ||
127 | __bucket(real_irq)->virt_irq = 0; | 172 | virt_irq_table[virt_irq].in_use = 0; |
128 | } | ||
129 | #endif | ||
130 | 173 | ||
131 | static unsigned int virt_to_real_irq(unsigned char virt_irq) | 174 | spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); |
132 | { | ||
133 | return virt_to_real_irq_table[virt_irq].irq; | ||
134 | } | 175 | } |
176 | #endif | ||
135 | 177 | ||
136 | /* | 178 | /* |
137 | * /proc/interrupts printing: | 179 | * /proc/interrupts printing: |
@@ -217,38 +259,8 @@ struct irq_handler_data { | |||
217 | void (*pre_handler)(unsigned int, void *, void *); | 259 | void (*pre_handler)(unsigned int, void *, void *); |
218 | void *pre_handler_arg1; | 260 | void *pre_handler_arg1; |
219 | void *pre_handler_arg2; | 261 | void *pre_handler_arg2; |
220 | |||
221 | u32 msi; | ||
222 | }; | 262 | }; |
223 | 263 | ||
224 | void sparc64_set_msi(unsigned int virt_irq, u32 msi) | ||
225 | { | ||
226 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
227 | |||
228 | if (data) | ||
229 | data->msi = msi; | ||
230 | } | ||
231 | |||
232 | u32 sparc64_get_msi(unsigned int virt_irq) | ||
233 | { | ||
234 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | ||
235 | |||
236 | if (data) | ||
237 | return data->msi; | ||
238 | return 0xffffffff; | ||
239 | } | ||
240 | |||
241 | static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq) | ||
242 | { | ||
243 | unsigned int real_irq = virt_to_real_irq(virt_irq); | ||
244 | struct ino_bucket *bucket = NULL; | ||
245 | |||
246 | if (likely(real_irq)) | ||
247 | bucket = __bucket(real_irq); | ||
248 | |||
249 | return bucket; | ||
250 | } | ||
251 | |||
252 | #ifdef CONFIG_SMP | 264 | #ifdef CONFIG_SMP |
253 | static int irq_choose_cpu(unsigned int virt_irq) | 265 | static int irq_choose_cpu(unsigned int virt_irq) |
254 | { | 266 | { |
@@ -348,201 +360,152 @@ static void sun4u_irq_end(unsigned int virt_irq) | |||
348 | 360 | ||
349 | static void sun4v_irq_enable(unsigned int virt_irq) | 361 | static void sun4v_irq_enable(unsigned int virt_irq) |
350 | { | 362 | { |
351 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 363 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; |
352 | unsigned int ino = bucket - &ivector_table[0]; | 364 | unsigned long cpuid = irq_choose_cpu(virt_irq); |
353 | 365 | int err; | |
354 | if (likely(bucket)) { | ||
355 | unsigned long cpuid; | ||
356 | int err; | ||
357 | 366 | ||
358 | cpuid = irq_choose_cpu(virt_irq); | 367 | err = sun4v_intr_settarget(ino, cpuid); |
359 | 368 | if (err != HV_EOK) | |
360 | err = sun4v_intr_settarget(ino, cpuid); | 369 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " |
361 | if (err != HV_EOK) | 370 | "err(%d)\n", ino, cpuid, err); |
362 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | 371 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
363 | "err(%d)\n", ino, cpuid, err); | 372 | if (err != HV_EOK) |
364 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | 373 | printk(KERN_ERR "sun4v_intr_setstate(%x): " |
365 | if (err != HV_EOK) | 374 | "err(%d)\n", ino, err); |
366 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | 375 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); |
367 | "err(%d)\n", ino, err); | 376 | if (err != HV_EOK) |
368 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); | 377 | printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", |
369 | if (err != HV_EOK) | 378 | ino, err); |
370 | printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", | ||
371 | ino, err); | ||
372 | } | ||
373 | } | 379 | } |
374 | 380 | ||
375 | static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) | 381 | static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) |
376 | { | 382 | { |
377 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 383 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; |
378 | unsigned int ino = bucket - &ivector_table[0]; | 384 | unsigned long cpuid = irq_choose_cpu(virt_irq); |
385 | int err; | ||
379 | 386 | ||
380 | if (likely(bucket)) { | 387 | err = sun4v_intr_settarget(ino, cpuid); |
381 | unsigned long cpuid; | 388 | if (err != HV_EOK) |
382 | int err; | 389 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " |
383 | 390 | "err(%d)\n", ino, cpuid, err); | |
384 | cpuid = irq_choose_cpu(virt_irq); | ||
385 | |||
386 | err = sun4v_intr_settarget(ino, cpuid); | ||
387 | if (err != HV_EOK) | ||
388 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | ||
389 | "err(%d)\n", ino, cpuid, err); | ||
390 | } | ||
391 | } | 391 | } |
392 | 392 | ||
393 | static void sun4v_irq_disable(unsigned int virt_irq) | 393 | static void sun4v_irq_disable(unsigned int virt_irq) |
394 | { | 394 | { |
395 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 395 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; |
396 | unsigned int ino = bucket - &ivector_table[0]; | 396 | int err; |
397 | |||
398 | if (likely(bucket)) { | ||
399 | int err; | ||
400 | |||
401 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); | ||
402 | if (err != HV_EOK) | ||
403 | printk(KERN_ERR "sun4v_intr_setenabled(%x): " | ||
404 | "err(%d)\n", ino, err); | ||
405 | } | ||
406 | } | ||
407 | |||
408 | #ifdef CONFIG_PCI_MSI | ||
409 | static void sun4v_msi_enable(unsigned int virt_irq) | ||
410 | { | ||
411 | sun4v_irq_enable(virt_irq); | ||
412 | unmask_msi_irq(virt_irq); | ||
413 | } | ||
414 | 397 | ||
415 | static void sun4v_msi_disable(unsigned int virt_irq) | 398 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); |
416 | { | 399 | if (err != HV_EOK) |
417 | mask_msi_irq(virt_irq); | 400 | printk(KERN_ERR "sun4v_intr_setenabled(%x): " |
418 | sun4v_irq_disable(virt_irq); | 401 | "err(%d)\n", ino, err); |
419 | } | 402 | } |
420 | #endif | ||
421 | 403 | ||
422 | static void sun4v_irq_end(unsigned int virt_irq) | 404 | static void sun4v_irq_end(unsigned int virt_irq) |
423 | { | 405 | { |
424 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 406 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; |
425 | unsigned int ino = bucket - &ivector_table[0]; | ||
426 | struct irq_desc *desc = irq_desc + virt_irq; | 407 | struct irq_desc *desc = irq_desc + virt_irq; |
408 | int err; | ||
427 | 409 | ||
428 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | 410 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) |
429 | return; | 411 | return; |
430 | 412 | ||
431 | if (likely(bucket)) { | 413 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
432 | int err; | 414 | if (err != HV_EOK) |
433 | 415 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | |
434 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | 416 | "err(%d)\n", ino, err); |
435 | if (err != HV_EOK) | ||
436 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | ||
437 | "err(%d)\n", ino, err); | ||
438 | } | ||
439 | } | 417 | } |
440 | 418 | ||
441 | static void sun4v_virq_enable(unsigned int virt_irq) | 419 | static void sun4v_virq_enable(unsigned int virt_irq) |
442 | { | 420 | { |
443 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 421 | unsigned long cpuid, dev_handle, dev_ino; |
444 | 422 | int err; | |
445 | if (likely(bucket)) { | 423 | |
446 | unsigned long cpuid, dev_handle, dev_ino; | 424 | cpuid = irq_choose_cpu(virt_irq); |
447 | int err; | 425 | |
448 | 426 | dev_handle = virt_irq_table[virt_irq].dev_handle; | |
449 | cpuid = irq_choose_cpu(virt_irq); | 427 | dev_ino = virt_irq_table[virt_irq].dev_ino; |
450 | 428 | ||
451 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; | 429 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
452 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; | 430 | if (err != HV_EOK) |
453 | 431 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | |
454 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | 432 | "err(%d)\n", |
455 | if (err != HV_EOK) | 433 | dev_handle, dev_ino, cpuid, err); |
456 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | 434 | err = sun4v_vintr_set_state(dev_handle, dev_ino, |
457 | "err(%d)\n", | 435 | HV_INTR_STATE_IDLE); |
458 | dev_handle, dev_ino, cpuid, err); | 436 | if (err != HV_EOK) |
459 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | 437 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
460 | HV_INTR_STATE_IDLE); | 438 | "HV_INTR_STATE_IDLE): err(%d)\n", |
461 | if (err != HV_EOK) | 439 | dev_handle, dev_ino, err); |
462 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | 440 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, |
463 | "HV_INTR_STATE_IDLE): err(%d)\n", | 441 | HV_INTR_ENABLED); |
464 | dev_handle, dev_ino, err); | 442 | if (err != HV_EOK) |
465 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | 443 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
466 | HV_INTR_ENABLED); | 444 | "HV_INTR_ENABLED): err(%d)\n", |
467 | if (err != HV_EOK) | 445 | dev_handle, dev_ino, err); |
468 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
469 | "HV_INTR_ENABLED): err(%d)\n", | ||
470 | dev_handle, dev_ino, err); | ||
471 | } | ||
472 | } | 446 | } |
473 | 447 | ||
474 | static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) | 448 | static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) |
475 | { | 449 | { |
476 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 450 | unsigned long cpuid, dev_handle, dev_ino; |
451 | int err; | ||
477 | 452 | ||
478 | if (likely(bucket)) { | 453 | cpuid = irq_choose_cpu(virt_irq); |
479 | unsigned long cpuid, dev_handle, dev_ino; | ||
480 | int err; | ||
481 | 454 | ||
482 | cpuid = irq_choose_cpu(virt_irq); | 455 | dev_handle = virt_irq_table[virt_irq].dev_handle; |
456 | dev_ino = virt_irq_table[virt_irq].dev_ino; | ||
483 | 457 | ||
484 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; | 458 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
485 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; | 459 | if (err != HV_EOK) |
486 | 460 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | |
487 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | 461 | "err(%d)\n", |
488 | if (err != HV_EOK) | 462 | dev_handle, dev_ino, cpuid, err); |
489 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | ||
490 | "err(%d)\n", | ||
491 | dev_handle, dev_ino, cpuid, err); | ||
492 | } | ||
493 | } | 463 | } |
494 | 464 | ||
495 | static void sun4v_virq_disable(unsigned int virt_irq) | 465 | static void sun4v_virq_disable(unsigned int virt_irq) |
496 | { | 466 | { |
497 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 467 | unsigned long dev_handle, dev_ino; |
468 | int err; | ||
498 | 469 | ||
499 | if (likely(bucket)) { | 470 | dev_handle = virt_irq_table[virt_irq].dev_handle; |
500 | unsigned long dev_handle, dev_ino; | 471 | dev_ino = virt_irq_table[virt_irq].dev_ino; |
501 | int err; | ||
502 | 472 | ||
503 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; | 473 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, |
504 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; | 474 | HV_INTR_DISABLED); |
505 | 475 | if (err != HV_EOK) | |
506 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | 476 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
507 | HV_INTR_DISABLED); | 477 | "HV_INTR_DISABLED): err(%d)\n", |
508 | if (err != HV_EOK) | 478 | dev_handle, dev_ino, err); |
509 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
510 | "HV_INTR_DISABLED): err(%d)\n", | ||
511 | dev_handle, dev_ino, err); | ||
512 | } | ||
513 | } | 479 | } |
514 | 480 | ||
515 | static void sun4v_virq_end(unsigned int virt_irq) | 481 | static void sun4v_virq_end(unsigned int virt_irq) |
516 | { | 482 | { |
517 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | ||
518 | struct irq_desc *desc = irq_desc + virt_irq; | 483 | struct irq_desc *desc = irq_desc + virt_irq; |
484 | unsigned long dev_handle, dev_ino; | ||
485 | int err; | ||
519 | 486 | ||
520 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | 487 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) |
521 | return; | 488 | return; |
522 | 489 | ||
523 | if (likely(bucket)) { | 490 | dev_handle = virt_irq_table[virt_irq].dev_handle; |
524 | unsigned long dev_handle, dev_ino; | 491 | dev_ino = virt_irq_table[virt_irq].dev_ino; |
525 | int err; | ||
526 | 492 | ||
527 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; | 493 | err = sun4v_vintr_set_state(dev_handle, dev_ino, |
528 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; | 494 | HV_INTR_STATE_IDLE); |
529 | 495 | if (err != HV_EOK) | |
530 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | 496 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
531 | HV_INTR_STATE_IDLE); | 497 | "HV_INTR_STATE_IDLE): err(%d)\n", |
532 | if (err != HV_EOK) | 498 | dev_handle, dev_ino, err); |
533 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
534 | "HV_INTR_STATE_IDLE): err(%d)\n", | ||
535 | dev_handle, dev_ino, err); | ||
536 | } | ||
537 | } | 499 | } |
538 | 500 | ||
539 | static void run_pre_handler(unsigned int virt_irq) | 501 | static void run_pre_handler(unsigned int virt_irq) |
540 | { | 502 | { |
541 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | ||
542 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | 503 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); |
504 | unsigned int ino; | ||
543 | 505 | ||
506 | ino = virt_irq_table[virt_irq].dev_ino; | ||
544 | if (likely(data->pre_handler)) { | 507 | if (likely(data->pre_handler)) { |
545 | data->pre_handler(__irq_ino(__irq(bucket)), | 508 | data->pre_handler(ino, |
546 | data->pre_handler_arg1, | 509 | data->pre_handler_arg1, |
547 | data->pre_handler_arg2); | 510 | data->pre_handler_arg2); |
548 | } | 511 | } |
@@ -573,28 +536,6 @@ static struct irq_chip sun4v_irq = { | |||
573 | .set_affinity = sun4v_set_affinity, | 536 | .set_affinity = sun4v_set_affinity, |
574 | }; | 537 | }; |
575 | 538 | ||
576 | static struct irq_chip sun4v_irq_ack = { | ||
577 | .typename = "sun4v+ack", | ||
578 | .enable = sun4v_irq_enable, | ||
579 | .disable = sun4v_irq_disable, | ||
580 | .ack = run_pre_handler, | ||
581 | .end = sun4v_irq_end, | ||
582 | .set_affinity = sun4v_set_affinity, | ||
583 | }; | ||
584 | |||
585 | #ifdef CONFIG_PCI_MSI | ||
586 | static struct irq_chip sun4v_msi = { | ||
587 | .typename = "sun4v+msi", | ||
588 | .mask = mask_msi_irq, | ||
589 | .unmask = unmask_msi_irq, | ||
590 | .enable = sun4v_msi_enable, | ||
591 | .disable = sun4v_msi_disable, | ||
592 | .ack = run_pre_handler, | ||
593 | .end = sun4v_irq_end, | ||
594 | .set_affinity = sun4v_set_affinity, | ||
595 | }; | ||
596 | #endif | ||
597 | |||
598 | static struct irq_chip sun4v_virq = { | 539 | static struct irq_chip sun4v_virq = { |
599 | .typename = "vsun4v", | 540 | .typename = "vsun4v", |
600 | .enable = sun4v_virq_enable, | 541 | .enable = sun4v_virq_enable, |
@@ -603,59 +544,48 @@ static struct irq_chip sun4v_virq = { | |||
603 | .set_affinity = sun4v_virt_set_affinity, | 544 | .set_affinity = sun4v_virt_set_affinity, |
604 | }; | 545 | }; |
605 | 546 | ||
606 | static struct irq_chip sun4v_virq_ack = { | ||
607 | .typename = "vsun4v+ack", | ||
608 | .enable = sun4v_virq_enable, | ||
609 | .disable = sun4v_virq_disable, | ||
610 | .ack = run_pre_handler, | ||
611 | .end = sun4v_virq_end, | ||
612 | .set_affinity = sun4v_virt_set_affinity, | ||
613 | }; | ||
614 | |||
615 | void irq_install_pre_handler(int virt_irq, | 547 | void irq_install_pre_handler(int virt_irq, |
616 | void (*func)(unsigned int, void *, void *), | 548 | void (*func)(unsigned int, void *, void *), |
617 | void *arg1, void *arg2) | 549 | void *arg1, void *arg2) |
618 | { | 550 | { |
619 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); | 551 | struct irq_handler_data *data = get_irq_chip_data(virt_irq); |
620 | struct irq_chip *chip; | 552 | struct irq_chip *chip = get_irq_chip(virt_irq); |
553 | |||
554 | if (WARN_ON(chip == &sun4v_irq || chip == &sun4v_virq)) { | ||
555 | printk(KERN_ERR "IRQ: Trying to install pre-handler on " | ||
556 | "sun4v irq %u\n", virt_irq); | ||
557 | return; | ||
558 | } | ||
621 | 559 | ||
622 | data->pre_handler = func; | 560 | data->pre_handler = func; |
623 | data->pre_handler_arg1 = arg1; | 561 | data->pre_handler_arg1 = arg1; |
624 | data->pre_handler_arg2 = arg2; | 562 | data->pre_handler_arg2 = arg2; |
625 | 563 | ||
626 | chip = get_irq_chip(virt_irq); | 564 | if (chip == &sun4u_irq_ack) |
627 | if (chip == &sun4u_irq_ack || | ||
628 | chip == &sun4v_irq_ack || | ||
629 | chip == &sun4v_virq_ack | ||
630 | #ifdef CONFIG_PCI_MSI | ||
631 | || chip == &sun4v_msi | ||
632 | #endif | ||
633 | ) | ||
634 | return; | 565 | return; |
635 | 566 | ||
636 | chip = (chip == &sun4u_irq ? | 567 | set_irq_chip(virt_irq, &sun4u_irq_ack); |
637 | &sun4u_irq_ack : | ||
638 | (chip == &sun4v_irq ? | ||
639 | &sun4v_irq_ack : &sun4v_virq_ack)); | ||
640 | set_irq_chip(virt_irq, chip); | ||
641 | } | 568 | } |
642 | 569 | ||
643 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) | 570 | unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) |
644 | { | 571 | { |
645 | struct ino_bucket *bucket; | 572 | struct ino_bucket *bucket; |
646 | struct irq_handler_data *data; | 573 | struct irq_handler_data *data; |
574 | unsigned int virt_irq; | ||
647 | int ino; | 575 | int ino; |
648 | 576 | ||
649 | BUG_ON(tlb_type == hypervisor); | 577 | BUG_ON(tlb_type == hypervisor); |
650 | 578 | ||
651 | ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; | 579 | ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; |
652 | bucket = &ivector_table[ino]; | 580 | bucket = &ivector_table[ino]; |
653 | if (!bucket->virt_irq) { | 581 | virt_irq = bucket_get_virt_irq(__pa(bucket)); |
654 | bucket->virt_irq = virt_irq_alloc(__irq(bucket)); | 582 | if (!virt_irq) { |
655 | set_irq_chip(bucket->virt_irq, &sun4u_irq); | 583 | virt_irq = virt_irq_alloc(0, ino); |
584 | bucket_set_virt_irq(__pa(bucket), virt_irq); | ||
585 | set_irq_chip(virt_irq, &sun4u_irq); | ||
656 | } | 586 | } |
657 | 587 | ||
658 | data = get_irq_chip_data(bucket->virt_irq); | 588 | data = get_irq_chip_data(virt_irq); |
659 | if (unlikely(data)) | 589 | if (unlikely(data)) |
660 | goto out; | 590 | goto out; |
661 | 591 | ||
@@ -664,13 +594,13 @@ unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) | |||
664 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); | 594 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); |
665 | prom_halt(); | 595 | prom_halt(); |
666 | } | 596 | } |
667 | set_irq_chip_data(bucket->virt_irq, data); | 597 | set_irq_chip_data(virt_irq, data); |
668 | 598 | ||
669 | data->imap = imap; | 599 | data->imap = imap; |
670 | data->iclr = iclr; | 600 | data->iclr = iclr; |
671 | 601 | ||
672 | out: | 602 | out: |
673 | return bucket->virt_irq; | 603 | return virt_irq; |
674 | } | 604 | } |
675 | 605 | ||
676 | static unsigned int sun4v_build_common(unsigned long sysino, | 606 | static unsigned int sun4v_build_common(unsigned long sysino, |
@@ -678,16 +608,19 @@ static unsigned int sun4v_build_common(unsigned long sysino, | |||
678 | { | 608 | { |
679 | struct ino_bucket *bucket; | 609 | struct ino_bucket *bucket; |
680 | struct irq_handler_data *data; | 610 | struct irq_handler_data *data; |
611 | unsigned int virt_irq; | ||
681 | 612 | ||
682 | BUG_ON(tlb_type != hypervisor); | 613 | BUG_ON(tlb_type != hypervisor); |
683 | 614 | ||
684 | bucket = &ivector_table[sysino]; | 615 | bucket = &ivector_table[sysino]; |
685 | if (!bucket->virt_irq) { | 616 | virt_irq = bucket_get_virt_irq(__pa(bucket)); |
686 | bucket->virt_irq = virt_irq_alloc(__irq(bucket)); | 617 | if (!virt_irq) { |
687 | set_irq_chip(bucket->virt_irq, chip); | 618 | virt_irq = virt_irq_alloc(0, sysino); |
619 | bucket_set_virt_irq(__pa(bucket), virt_irq); | ||
620 | set_irq_chip(virt_irq, chip); | ||
688 | } | 621 | } |
689 | 622 | ||
690 | data = get_irq_chip_data(bucket->virt_irq); | 623 | data = get_irq_chip_data(virt_irq); |
691 | if (unlikely(data)) | 624 | if (unlikely(data)) |
692 | goto out; | 625 | goto out; |
693 | 626 | ||
@@ -696,7 +629,7 @@ static unsigned int sun4v_build_common(unsigned long sysino, | |||
696 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); | 629 | prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); |
697 | prom_halt(); | 630 | prom_halt(); |
698 | } | 631 | } |
699 | set_irq_chip_data(bucket->virt_irq, data); | 632 | set_irq_chip_data(virt_irq, data); |
700 | 633 | ||
701 | /* Catch accidental accesses to these things. IMAP/ICLR handling | 634 | /* Catch accidental accesses to these things. IMAP/ICLR handling |
702 | * is done by hypervisor calls on sun4v platforms, not by direct | 635 | * is done by hypervisor calls on sun4v platforms, not by direct |
@@ -706,7 +639,7 @@ static unsigned int sun4v_build_common(unsigned long sysino, | |||
706 | data->iclr = ~0UL; | 639 | data->iclr = ~0UL; |
707 | 640 | ||
708 | out: | 641 | out: |
709 | return bucket->virt_irq; | 642 | return virt_irq; |
710 | } | 643 | } |
711 | 644 | ||
712 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) | 645 | unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) |
@@ -718,86 +651,52 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) | |||
718 | 651 | ||
719 | unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | 652 | unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) |
720 | { | 653 | { |
721 | unsigned long sysino, hv_err; | ||
722 | unsigned int virq; | ||
723 | |||
724 | BUG_ON(devhandle & devino); | ||
725 | |||
726 | sysino = devhandle | devino; | ||
727 | BUG_ON(sysino & ~(IMAP_IGN | IMAP_INO)); | ||
728 | |||
729 | hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino); | ||
730 | if (hv_err) { | ||
731 | prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] " | ||
732 | "err=%lu\n", devhandle, devino, hv_err); | ||
733 | prom_halt(); | ||
734 | } | ||
735 | |||
736 | virq = sun4v_build_common(sysino, &sun4v_virq); | ||
737 | |||
738 | virt_to_real_irq_table[virq].dev_handle = devhandle; | ||
739 | virt_to_real_irq_table[virq].dev_ino = devino; | ||
740 | |||
741 | return virq; | ||
742 | } | ||
743 | |||
744 | #ifdef CONFIG_PCI_MSI | ||
745 | unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p, | ||
746 | unsigned int msi_start, unsigned int msi_end) | ||
747 | { | ||
748 | struct ino_bucket *bucket; | ||
749 | struct irq_handler_data *data; | 654 | struct irq_handler_data *data; |
750 | unsigned long sysino; | 655 | struct ino_bucket *bucket; |
751 | unsigned int devino; | 656 | unsigned long hv_err, cookie; |
752 | 657 | unsigned int virt_irq; | |
753 | BUG_ON(tlb_type != hypervisor); | ||
754 | |||
755 | /* Find a free devino in the given range. */ | ||
756 | for (devino = msi_start; devino < msi_end; devino++) { | ||
757 | sysino = sun4v_devino_to_sysino(devhandle, devino); | ||
758 | bucket = &ivector_table[sysino]; | ||
759 | if (!bucket->virt_irq) | ||
760 | break; | ||
761 | } | ||
762 | if (devino >= msi_end) | ||
763 | return -ENOSPC; | ||
764 | 658 | ||
765 | sysino = sun4v_devino_to_sysino(devhandle, devino); | 659 | bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); |
766 | bucket = &ivector_table[sysino]; | 660 | if (unlikely(!bucket)) |
767 | bucket->virt_irq = virt_irq_alloc(__irq(bucket)); | 661 | return 0; |
768 | *virt_irq_p = bucket->virt_irq; | 662 | __flush_dcache_range((unsigned long) bucket, |
769 | set_irq_chip(bucket->virt_irq, &sun4v_msi); | 663 | ((unsigned long) bucket + |
664 | sizeof(struct ino_bucket))); | ||
770 | 665 | ||
771 | data = get_irq_chip_data(bucket->virt_irq); | 666 | virt_irq = virt_irq_alloc(devhandle, devino); |
772 | if (unlikely(data)) | 667 | bucket_set_virt_irq(__pa(bucket), virt_irq); |
773 | return devino; | 668 | set_irq_chip(virt_irq, &sun4v_virq); |
774 | 669 | ||
775 | data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); | 670 | data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); |
776 | if (unlikely(!data)) { | 671 | if (unlikely(!data)) |
777 | virt_irq_free(*virt_irq_p); | 672 | return 0; |
778 | return -ENOMEM; | 673 | |
779 | } | 674 | set_irq_chip_data(virt_irq, data); |
780 | set_irq_chip_data(bucket->virt_irq, data); | ||
781 | 675 | ||
676 | /* Catch accidental accesses to these things. IMAP/ICLR handling | ||
677 | * is done by hypervisor calls on sun4v platforms, not by direct | ||
678 | * register accesses. | ||
679 | */ | ||
782 | data->imap = ~0UL; | 680 | data->imap = ~0UL; |
783 | data->iclr = ~0UL; | 681 | data->iclr = ~0UL; |
784 | 682 | ||
785 | return devino; | 683 | cookie = ~__pa(bucket); |
786 | } | 684 | hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); |
685 | if (hv_err) { | ||
686 | prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] " | ||
687 | "err=%lu\n", devhandle, devino, hv_err); | ||
688 | prom_halt(); | ||
689 | } | ||
787 | 690 | ||
788 | void sun4v_destroy_msi(unsigned int virt_irq) | 691 | return virt_irq; |
789 | { | ||
790 | virt_irq_free(virt_irq); | ||
791 | } | 692 | } |
792 | #endif | ||
793 | 693 | ||
794 | void ack_bad_irq(unsigned int virt_irq) | 694 | void ack_bad_irq(unsigned int virt_irq) |
795 | { | 695 | { |
796 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 696 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; |
797 | unsigned int ino = 0xdeadbeef; | ||
798 | 697 | ||
799 | if (bucket) | 698 | if (!ino) |
800 | ino = bucket - &ivector_table[0]; | 699 | ino = 0xdeadbeef; |
801 | 700 | ||
802 | printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", | 701 | printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", |
803 | ino, virt_irq); | 702 | ino, virt_irq); |
@@ -805,7 +704,7 @@ void ack_bad_irq(unsigned int virt_irq) | |||
805 | 704 | ||
806 | void handler_irq(int irq, struct pt_regs *regs) | 705 | void handler_irq(int irq, struct pt_regs *regs) |
807 | { | 706 | { |
808 | struct ino_bucket *bucket; | 707 | unsigned long pstate, bucket_pa; |
809 | struct pt_regs *old_regs; | 708 | struct pt_regs *old_regs; |
810 | 709 | ||
811 | clear_softint(1 << irq); | 710 | clear_softint(1 << irq); |
@@ -813,15 +712,28 @@ void handler_irq(int irq, struct pt_regs *regs) | |||
813 | old_regs = set_irq_regs(regs); | 712 | old_regs = set_irq_regs(regs); |
814 | irq_enter(); | 713 | irq_enter(); |
815 | 714 | ||
816 | /* Sliiiick... */ | 715 | /* Grab an atomic snapshot of the pending IVECs. */ |
817 | bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0)); | 716 | __asm__ __volatile__("rdpr %%pstate, %0\n\t" |
818 | while (bucket) { | 717 | "wrpr %0, %3, %%pstate\n\t" |
819 | struct ino_bucket *next = __bucket(bucket->irq_chain); | 718 | "ldx [%2], %1\n\t" |
719 | "stx %%g0, [%2]\n\t" | ||
720 | "wrpr %0, 0x0, %%pstate\n\t" | ||
721 | : "=&r" (pstate), "=&r" (bucket_pa) | ||
722 | : "r" (irq_work_pa(smp_processor_id())), | ||
723 | "i" (PSTATE_IE) | ||
724 | : "memory"); | ||
725 | |||
726 | while (bucket_pa) { | ||
727 | unsigned long next_pa; | ||
728 | unsigned int virt_irq; | ||
820 | 729 | ||
821 | bucket->irq_chain = 0; | 730 | next_pa = bucket_get_chain_pa(bucket_pa); |
822 | __do_IRQ(bucket->virt_irq); | 731 | virt_irq = bucket_get_virt_irq(bucket_pa); |
732 | bucket_clear_chain_pa(bucket_pa); | ||
823 | 733 | ||
824 | bucket = next; | 734 | __do_IRQ(virt_irq); |
735 | |||
736 | bucket_pa = next_pa; | ||
825 | } | 737 | } |
826 | 738 | ||
827 | irq_exit(); | 739 | irq_exit(); |
@@ -921,7 +833,7 @@ void init_irqwork_curcpu(void) | |||
921 | { | 833 | { |
922 | int cpu = hard_smp_processor_id(); | 834 | int cpu = hard_smp_processor_id(); |
923 | 835 | ||
924 | trap_block[cpu].irq_worklist = 0; | 836 | trap_block[cpu].irq_worklist_pa = 0UL; |
925 | } | 837 | } |
926 | 838 | ||
927 | /* Please be very careful with register_one_mondo() and | 839 | /* Please be very careful with register_one_mondo() and |
@@ -1035,9 +947,21 @@ static struct irqaction timer_irq_action = { | |||
1035 | /* Only invoked on boot processor. */ | 947 | /* Only invoked on boot processor. */ |
1036 | void __init init_IRQ(void) | 948 | void __init init_IRQ(void) |
1037 | { | 949 | { |
950 | unsigned long size; | ||
951 | |||
1038 | map_prom_timers(); | 952 | map_prom_timers(); |
1039 | kill_prom_timer(); | 953 | kill_prom_timer(); |
1040 | memset(&ivector_table[0], 0, sizeof(ivector_table)); | 954 | |
955 | size = sizeof(struct ino_bucket) * NUM_IVECS; | ||
956 | ivector_table = alloc_bootmem_low(size); | ||
957 | if (!ivector_table) { | ||
958 | prom_printf("Fatal error, cannot allocate ivector_table\n"); | ||
959 | prom_halt(); | ||
960 | } | ||
961 | __flush_dcache_range((unsigned long) ivector_table, | ||
962 | ((unsigned long) ivector_table) + size); | ||
963 | |||
964 | ivector_table_pa = __pa(ivector_table); | ||
1041 | 965 | ||
1042 | if (tlb_type == hypervisor) | 966 | if (tlb_type == hypervisor) |
1043 | sun4v_init_mondo_queues(); | 967 | sun4v_init_mondo_queues(); |
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c index 4cc77485f536..42d779866fba 100644 --- a/arch/sparc64/kernel/of_device.c +++ b/arch/sparc64/kernel/of_device.c | |||
@@ -872,7 +872,10 @@ __setup("of_debug=", of_debug); | |||
872 | int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) | 872 | int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) |
873 | { | 873 | { |
874 | /* initialize common driver fields */ | 874 | /* initialize common driver fields */ |
875 | drv->driver.name = drv->name; | 875 | if (!drv->driver.name) |
876 | drv->driver.name = drv->name; | ||
877 | if (!drv->driver.owner) | ||
878 | drv->driver.owner = drv->owner; | ||
876 | drv->driver.bus = bus; | 879 | drv->driver.bus = bus; |
877 | 880 | ||
878 | /* register with core */ | 881 | /* register with core */ |
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index e8dac81d8a0d..9b808640a193 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c | |||
@@ -29,8 +29,6 @@ | |||
29 | 29 | ||
30 | #include "pci_impl.h" | 30 | #include "pci_impl.h" |
31 | 31 | ||
32 | unsigned long pci_memspace_mask = 0xffffffffUL; | ||
33 | |||
34 | #ifndef CONFIG_PCI | 32 | #ifndef CONFIG_PCI |
35 | /* A "nop" PCI implementation. */ | 33 | /* A "nop" PCI implementation. */ |
36 | asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn, | 34 | asmlinkage int sys_pciconfig_read(unsigned long bus, unsigned long dfn, |
@@ -1066,8 +1064,8 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc | |||
1066 | return 0; | 1064 | return 0; |
1067 | } | 1065 | } |
1068 | 1066 | ||
1069 | /* Adjust vm_pgoff of VMA such that it is the physical page offset corresponding | 1067 | /* Adjust vm_pgoff of VMA such that it is the physical page offset |
1070 | * to the 32-bit pci bus offset for DEV requested by the user. | 1068 | * corresponding to the 32-bit pci bus offset for DEV requested by the user. |
1071 | * | 1069 | * |
1072 | * Basically, the user finds the base address for his device which he wishes | 1070 | * Basically, the user finds the base address for his device which he wishes |
1073 | * to mmap. They read the 32-bit value from the config space base register, | 1071 | * to mmap. They read the 32-bit value from the config space base register, |
@@ -1076,21 +1074,35 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc | |||
1076 | * | 1074 | * |
1077 | * Returns negative error code on failure, zero on success. | 1075 | * Returns negative error code on failure, zero on success. |
1078 | */ | 1076 | */ |
1079 | static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma, | 1077 | static int __pci_mmap_make_offset(struct pci_dev *pdev, |
1078 | struct vm_area_struct *vma, | ||
1080 | enum pci_mmap_state mmap_state) | 1079 | enum pci_mmap_state mmap_state) |
1081 | { | 1080 | { |
1082 | unsigned long user_offset = vma->vm_pgoff << PAGE_SHIFT; | 1081 | unsigned long user_paddr, user_size; |
1083 | unsigned long user32 = user_offset & pci_memspace_mask; | 1082 | int i, err; |
1084 | unsigned long largest_base, this_base, addr32; | ||
1085 | int i; | ||
1086 | 1083 | ||
1087 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) | 1084 | /* First compute the physical address in vma->vm_pgoff, |
1088 | return __pci_mmap_make_offset_bus(dev, vma, mmap_state); | 1085 | * making sure the user offset is within range in the |
1086 | * appropriate PCI space. | ||
1087 | */ | ||
1088 | err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state); | ||
1089 | if (err) | ||
1090 | return err; | ||
1091 | |||
1092 | /* If this is a mapping on a host bridge, any address | ||
1093 | * is OK. | ||
1094 | */ | ||
1095 | if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST) | ||
1096 | return err; | ||
1097 | |||
1098 | /* Otherwise make sure it's in the range for one of the | ||
1099 | * device's resources. | ||
1100 | */ | ||
1101 | user_paddr = vma->vm_pgoff << PAGE_SHIFT; | ||
1102 | user_size = vma->vm_end - vma->vm_start; | ||
1089 | 1103 | ||
1090 | /* Figure out which base address this is for. */ | ||
1091 | largest_base = 0UL; | ||
1092 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | 1104 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { |
1093 | struct resource *rp = &dev->resource[i]; | 1105 | struct resource *rp = &pdev->resource[i]; |
1094 | 1106 | ||
1095 | /* Active? */ | 1107 | /* Active? */ |
1096 | if (!rp->flags) | 1108 | if (!rp->flags) |
@@ -1108,26 +1120,14 @@ static int __pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vm | |||
1108 | continue; | 1120 | continue; |
1109 | } | 1121 | } |
1110 | 1122 | ||
1111 | this_base = rp->start; | 1123 | if ((rp->start <= user_paddr) && |
1112 | 1124 | (user_paddr + user_size) <= (rp->end + 1UL)) | |
1113 | addr32 = (this_base & PAGE_MASK) & pci_memspace_mask; | 1125 | break; |
1114 | |||
1115 | if (mmap_state == pci_mmap_io) | ||
1116 | addr32 &= 0xffffff; | ||
1117 | |||
1118 | if (addr32 <= user32 && this_base > largest_base) | ||
1119 | largest_base = this_base; | ||
1120 | } | 1126 | } |
1121 | 1127 | ||
1122 | if (largest_base == 0UL) | 1128 | if (i > PCI_ROM_RESOURCE) |
1123 | return -EINVAL; | 1129 | return -EINVAL; |
1124 | 1130 | ||
1125 | /* Now construct the final physical address. */ | ||
1126 | if (mmap_state == pci_mmap_io) | ||
1127 | vma->vm_pgoff = (((largest_base & ~0xffffffUL) | user32) >> PAGE_SHIFT); | ||
1128 | else | ||
1129 | vma->vm_pgoff = (((largest_base & ~(pci_memspace_mask)) | user32) >> PAGE_SHIFT); | ||
1130 | |||
1131 | return 0; | 1131 | return 0; |
1132 | } | 1132 | } |
1133 | 1133 | ||
diff --git a/arch/sparc64/kernel/pci_fire.c b/arch/sparc64/kernel/pci_fire.c index 14d67fe21ab2..fef3b37487bf 100644 --- a/arch/sparc64/kernel/pci_fire.c +++ b/arch/sparc64/kernel/pci_fire.c | |||
@@ -6,9 +6,12 @@ | |||
6 | #include <linux/pci.h> | 6 | #include <linux/pci.h> |
7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/msi.h> | ||
10 | #include <linux/irq.h> | ||
9 | 11 | ||
10 | #include <asm/oplib.h> | 12 | #include <asm/oplib.h> |
11 | #include <asm/prom.h> | 13 | #include <asm/prom.h> |
14 | #include <asm/irq.h> | ||
12 | 15 | ||
13 | #include "pci_impl.h" | 16 | #include "pci_impl.h" |
14 | 17 | ||
@@ -84,6 +87,266 @@ static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm) | |||
84 | return 0; | 87 | return 0; |
85 | } | 88 | } |
86 | 89 | ||
90 | #ifdef CONFIG_PCI_MSI | ||
91 | struct pci_msiq_entry { | ||
92 | u64 word0; | ||
93 | #define MSIQ_WORD0_RESV 0x8000000000000000UL | ||
94 | #define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL | ||
95 | #define MSIQ_WORD0_FMT_TYPE_SHIFT 56 | ||
96 | #define MSIQ_WORD0_LEN 0x00ffc00000000000UL | ||
97 | #define MSIQ_WORD0_LEN_SHIFT 46 | ||
98 | #define MSIQ_WORD0_ADDR0 0x00003fff00000000UL | ||
99 | #define MSIQ_WORD0_ADDR0_SHIFT 32 | ||
100 | #define MSIQ_WORD0_RID 0x00000000ffff0000UL | ||
101 | #define MSIQ_WORD0_RID_SHIFT 16 | ||
102 | #define MSIQ_WORD0_DATA0 0x000000000000ffffUL | ||
103 | #define MSIQ_WORD0_DATA0_SHIFT 0 | ||
104 | |||
105 | #define MSIQ_TYPE_MSG 0x6 | ||
106 | #define MSIQ_TYPE_MSI32 0xb | ||
107 | #define MSIQ_TYPE_MSI64 0xf | ||
108 | |||
109 | u64 word1; | ||
110 | #define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL | ||
111 | #define MSIQ_WORD1_ADDR1_SHIFT 16 | ||
112 | #define MSIQ_WORD1_DATA1 0x000000000000ffffUL | ||
113 | #define MSIQ_WORD1_DATA1_SHIFT 0 | ||
114 | |||
115 | u64 resv[6]; | ||
116 | }; | ||
117 | |||
118 | /* All MSI registers are offset from pbm->pbm_regs */ | ||
119 | #define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL | ||
120 | #define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL | ||
121 | |||
122 | #define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL) | ||
123 | #define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL | ||
124 | #define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL | ||
125 | |||
126 | #define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL) | ||
127 | #define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL | ||
128 | #define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL | ||
129 | #define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL | ||
130 | |||
131 | #define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL) | ||
132 | #define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL | ||
133 | #define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL | ||
134 | #define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL | ||
135 | #define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL | ||
136 | |||
137 | #define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL) | ||
138 | #define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL | ||
139 | #define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL | ||
140 | |||
141 | #define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL) | ||
142 | #define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL | ||
143 | |||
144 | #define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL) | ||
145 | #define MSI_MAP_VALID 0x8000000000000000UL | ||
146 | #define MSI_MAP_EQWR_N 0x4000000000000000UL | ||
147 | #define MSI_MAP_EQNUM 0x000000000000003fUL | ||
148 | |||
149 | #define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL) | ||
150 | #define MSI_CLEAR_EQWR_N 0x4000000000000000UL | ||
151 | |||
152 | #define IMONDO_DATA0 0x02C000UL | ||
153 | #define IMONDO_DATA0_DATA 0xffffffffffffffc0UL | ||
154 | |||
155 | #define IMONDO_DATA1 0x02C008UL | ||
156 | #define IMONDO_DATA1_DATA 0xffffffffffffffffUL | ||
157 | |||
158 | #define MSI_32BIT_ADDR 0x034000UL | ||
159 | #define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL | ||
160 | |||
161 | #define MSI_64BIT_ADDR 0x034008UL | ||
162 | #define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL | ||
163 | |||
164 | static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
165 | unsigned long *head) | ||
166 | { | ||
167 | *head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
172 | unsigned long *head, unsigned long *msi) | ||
173 | { | ||
174 | unsigned long type_fmt, type, msi_num; | ||
175 | struct pci_msiq_entry *base, *ep; | ||
176 | |||
177 | base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192)); | ||
178 | ep = &base[*head]; | ||
179 | |||
180 | if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0) | ||
181 | return 0; | ||
182 | |||
183 | type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >> | ||
184 | MSIQ_WORD0_FMT_TYPE_SHIFT); | ||
185 | type = (type_fmt >> 3); | ||
186 | if (unlikely(type != MSIQ_TYPE_MSI32 && | ||
187 | type != MSIQ_TYPE_MSI64)) | ||
188 | return -EINVAL; | ||
189 | |||
190 | *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >> | ||
191 | MSIQ_WORD0_DATA0_SHIFT); | ||
192 | |||
193 | fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num), | ||
194 | MSI_CLEAR_EQWR_N); | ||
195 | |||
196 | /* Clear the entry. */ | ||
197 | ep->word0 &= ~MSIQ_WORD0_FMT_TYPE; | ||
198 | |||
199 | /* Go to next entry in ring. */ | ||
200 | (*head)++; | ||
201 | if (*head >= pbm->msiq_ent_count) | ||
202 | *head = 0; | ||
203 | |||
204 | return 1; | ||
205 | } | ||
206 | |||
207 | static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
208 | unsigned long head) | ||
209 | { | ||
210 | fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid), head); | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
215 | unsigned long msi, int is_msi64) | ||
216 | { | ||
217 | u64 val; | ||
218 | |||
219 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi)); | ||
220 | val &= ~(MSI_MAP_EQNUM); | ||
221 | val |= msiqid; | ||
222 | fire_write(pbm->pbm_regs + MSI_MAP(msi), val); | ||
223 | |||
224 | fire_write(pbm->pbm_regs + MSI_CLEAR(msi), | ||
225 | MSI_CLEAR_EQWR_N); | ||
226 | |||
227 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi)); | ||
228 | val |= MSI_MAP_VALID; | ||
229 | fire_write(pbm->pbm_regs + MSI_MAP(msi), val); | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) | ||
235 | { | ||
236 | unsigned long msiqid; | ||
237 | u64 val; | ||
238 | |||
239 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi)); | ||
240 | msiqid = (val & MSI_MAP_EQNUM); | ||
241 | |||
242 | val &= ~MSI_MAP_VALID; | ||
243 | |||
244 | fire_write(pbm->pbm_regs + MSI_MAP(msi), val); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm) | ||
250 | { | ||
251 | unsigned long pages, order, i; | ||
252 | |||
253 | order = get_order(512 * 1024); | ||
254 | pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); | ||
255 | if (pages == 0UL) { | ||
256 | printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", | ||
257 | order); | ||
258 | return -ENOMEM; | ||
259 | } | ||
260 | memset((char *)pages, 0, PAGE_SIZE << order); | ||
261 | pbm->msi_queues = (void *) pages; | ||
262 | |||
263 | fire_write(pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG, | ||
264 | (EVENT_QUEUE_BASE_ADDR_ALL_ONES | | ||
265 | __pa(pbm->msi_queues))); | ||
266 | |||
267 | fire_write(pbm->pbm_regs + IMONDO_DATA0, | ||
268 | pbm->portid << 6); | ||
269 | fire_write(pbm->pbm_regs + IMONDO_DATA1, 0); | ||
270 | |||
271 | fire_write(pbm->pbm_regs + MSI_32BIT_ADDR, | ||
272 | pbm->msi32_start); | ||
273 | fire_write(pbm->pbm_regs + MSI_64BIT_ADDR, | ||
274 | pbm->msi64_start); | ||
275 | |||
276 | for (i = 0; i < pbm->msiq_num; i++) { | ||
277 | fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(i), 0); | ||
278 | fire_write(pbm->pbm_regs + EVENT_QUEUE_TAIL(i), 0); | ||
279 | } | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | static void pci_fire_msiq_free(struct pci_pbm_info *pbm) | ||
285 | { | ||
286 | unsigned long pages, order; | ||
287 | |||
288 | order = get_order(512 * 1024); | ||
289 | pages = (unsigned long) pbm->msi_queues; | ||
290 | |||
291 | free_pages(pages, order); | ||
292 | |||
293 | pbm->msi_queues = NULL; | ||
294 | } | ||
295 | |||
296 | static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm, | ||
297 | unsigned long msiqid, | ||
298 | unsigned long devino) | ||
299 | { | ||
300 | unsigned long cregs = (unsigned long) pbm->pbm_regs; | ||
301 | unsigned long imap_reg, iclr_reg, int_ctrlr; | ||
302 | unsigned int virt_irq; | ||
303 | int fixup; | ||
304 | u64 val; | ||
305 | |||
306 | imap_reg = cregs + (0x001000UL + (devino * 0x08UL)); | ||
307 | iclr_reg = cregs + (0x001400UL + (devino * 0x08UL)); | ||
308 | |||
309 | /* XXX iterate amongst the 4 IRQ controllers XXX */ | ||
310 | int_ctrlr = (1UL << 6); | ||
311 | |||
312 | val = fire_read(imap_reg); | ||
313 | val |= (1UL << 63) | int_ctrlr; | ||
314 | fire_write(imap_reg, val); | ||
315 | |||
316 | fixup = ((pbm->portid << 6) | devino) - int_ctrlr; | ||
317 | |||
318 | virt_irq = build_irq(fixup, iclr_reg, imap_reg); | ||
319 | if (!virt_irq) | ||
320 | return -ENOMEM; | ||
321 | |||
322 | fire_write(pbm->pbm_regs + | ||
323 | EVENT_QUEUE_CONTROL_SET(msiqid), | ||
324 | EVENT_QUEUE_CONTROL_SET_EN); | ||
325 | |||
326 | return virt_irq; | ||
327 | } | ||
328 | |||
329 | static const struct sparc64_msiq_ops pci_fire_msiq_ops = { | ||
330 | .get_head = pci_fire_get_head, | ||
331 | .dequeue_msi = pci_fire_dequeue_msi, | ||
332 | .set_head = pci_fire_set_head, | ||
333 | .msi_setup = pci_fire_msi_setup, | ||
334 | .msi_teardown = pci_fire_msi_teardown, | ||
335 | .msiq_alloc = pci_fire_msiq_alloc, | ||
336 | .msiq_free = pci_fire_msiq_free, | ||
337 | .msiq_build_irq = pci_fire_msiq_build_irq, | ||
338 | }; | ||
339 | |||
340 | static void pci_fire_msi_init(struct pci_pbm_info *pbm) | ||
341 | { | ||
342 | sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops); | ||
343 | } | ||
344 | #else /* CONFIG_PCI_MSI */ | ||
345 | static void pci_fire_msi_init(struct pci_pbm_info *pbm) | ||
346 | { | ||
347 | } | ||
348 | #endif /* !(CONFIG_PCI_MSI) */ | ||
349 | |||
87 | /* Based at pbm->controller_regs */ | 350 | /* Based at pbm->controller_regs */ |
88 | #define FIRE_PARITY_CONTROL 0x470010UL | 351 | #define FIRE_PARITY_CONTROL 0x470010UL |
89 | #define FIRE_PARITY_ENAB 0x8000000000000000UL | 352 | #define FIRE_PARITY_ENAB 0x8000000000000000UL |
@@ -176,6 +439,7 @@ static int pci_fire_pbm_init(struct pci_controller_info *p, | |||
176 | { | 439 | { |
177 | const struct linux_prom64_registers *regs; | 440 | const struct linux_prom64_registers *regs; |
178 | struct pci_pbm_info *pbm; | 441 | struct pci_pbm_info *pbm; |
442 | int err; | ||
179 | 443 | ||
180 | if ((portid & 1) == 0) | 444 | if ((portid & 1) == 0) |
181 | pbm = &p->pbm_A; | 445 | pbm = &p->pbm_A; |
@@ -208,7 +472,13 @@ static int pci_fire_pbm_init(struct pci_controller_info *p, | |||
208 | 472 | ||
209 | pci_fire_hw_init(pbm); | 473 | pci_fire_hw_init(pbm); |
210 | 474 | ||
211 | return pci_fire_pbm_iommu_init(pbm); | 475 | err = pci_fire_pbm_iommu_init(pbm); |
476 | if (err) | ||
477 | return err; | ||
478 | |||
479 | pci_fire_msi_init(pbm); | ||
480 | |||
481 | return 0; | ||
212 | } | 482 | } |
213 | 483 | ||
214 | static inline int portid_compare(u32 x, u32 y) | 484 | static inline int portid_compare(u32 x, u32 y) |
@@ -249,13 +519,6 @@ void fire_pci_init(struct device_node *dp, const char *model_name) | |||
249 | 519 | ||
250 | p->pbm_B.iommu = iommu; | 520 | p->pbm_B.iommu = iommu; |
251 | 521 | ||
252 | /* XXX MSI support XXX */ | ||
253 | |||
254 | /* Like PSYCHO and SCHIZO we have a 2GB aligned area | ||
255 | * for memory space. | ||
256 | */ | ||
257 | pci_memspace_mask = 0x7fffffffUL; | ||
258 | |||
259 | if (pci_fire_pbm_init(p, dp, portid)) | 522 | if (pci_fire_pbm_init(p, dp, portid)) |
260 | goto fatal_memory_error; | 523 | goto fatal_memory_error; |
261 | 524 | ||
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h index f660c2b685eb..4a50da13ce48 100644 --- a/arch/sparc64/kernel/pci_impl.h +++ b/arch/sparc64/kernel/pci_impl.h | |||
@@ -29,6 +29,33 @@ | |||
29 | #define PCI_STC_FLUSHFLAG_SET(STC) \ | 29 | #define PCI_STC_FLUSHFLAG_SET(STC) \ |
30 | (*((STC)->strbuf_flushflag) != 0UL) | 30 | (*((STC)->strbuf_flushflag) != 0UL) |
31 | 31 | ||
32 | #ifdef CONFIG_PCI_MSI | ||
33 | struct pci_pbm_info; | ||
34 | struct sparc64_msiq_ops { | ||
35 | int (*get_head)(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
36 | unsigned long *head); | ||
37 | int (*dequeue_msi)(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
38 | unsigned long *head, unsigned long *msi); | ||
39 | int (*set_head)(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
40 | unsigned long head); | ||
41 | int (*msi_setup)(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
42 | unsigned long msi, int is_msi64); | ||
43 | int (*msi_teardown)(struct pci_pbm_info *pbm, unsigned long msi); | ||
44 | int (*msiq_alloc)(struct pci_pbm_info *pbm); | ||
45 | void (*msiq_free)(struct pci_pbm_info *pbm); | ||
46 | int (*msiq_build_irq)(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
47 | unsigned long devino); | ||
48 | }; | ||
49 | |||
50 | extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm, | ||
51 | const struct sparc64_msiq_ops *ops); | ||
52 | |||
53 | struct sparc64_msiq_cookie { | ||
54 | struct pci_pbm_info *pbm; | ||
55 | unsigned long msiqid; | ||
56 | }; | ||
57 | #endif | ||
58 | |||
32 | struct pci_controller_info; | 59 | struct pci_controller_info; |
33 | 60 | ||
34 | struct pci_pbm_info { | 61 | struct pci_pbm_info { |
@@ -90,6 +117,8 @@ struct pci_pbm_info { | |||
90 | u32 msiq_ent_count; | 117 | u32 msiq_ent_count; |
91 | u32 msiq_first; | 118 | u32 msiq_first; |
92 | u32 msiq_first_devino; | 119 | u32 msiq_first_devino; |
120 | u32 msiq_rotor; | ||
121 | struct sparc64_msiq_cookie *msiq_irq_cookies; | ||
93 | u32 msi_num; | 122 | u32 msi_num; |
94 | u32 msi_first; | 123 | u32 msi_first; |
95 | u32 msi_data_mask; | 124 | u32 msi_data_mask; |
@@ -100,9 +129,11 @@ struct pci_pbm_info { | |||
100 | u32 msi64_len; | 129 | u32 msi64_len; |
101 | void *msi_queues; | 130 | void *msi_queues; |
102 | unsigned long *msi_bitmap; | 131 | unsigned long *msi_bitmap; |
132 | unsigned int *msi_irq_table; | ||
103 | int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, | 133 | int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, |
104 | struct msi_desc *entry); | 134 | struct msi_desc *entry); |
105 | void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev); | 135 | void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev); |
136 | const struct sparc64_msiq_ops *msi_ops; | ||
106 | #endif /* !(CONFIG_PCI_MSI) */ | 137 | #endif /* !(CONFIG_PCI_MSI) */ |
107 | 138 | ||
108 | /* This PBM's streaming buffer. */ | 139 | /* This PBM's streaming buffer. */ |
@@ -126,7 +157,6 @@ struct pci_controller_info { | |||
126 | }; | 157 | }; |
127 | 158 | ||
128 | extern struct pci_pbm_info *pci_pbm_root; | 159 | extern struct pci_pbm_info *pci_pbm_root; |
129 | extern unsigned long pci_memspace_mask; | ||
130 | 160 | ||
131 | extern int pci_num_pbms; | 161 | extern int pci_num_pbms; |
132 | 162 | ||
diff --git a/arch/sparc64/kernel/pci_msi.c b/arch/sparc64/kernel/pci_msi.c new file mode 100644 index 000000000000..31a165fd3e48 --- /dev/null +++ b/arch/sparc64/kernel/pci_msi.c | |||
@@ -0,0 +1,433 @@ | |||
1 | /* pci_msi.c: Sparc64 MSI support common layer. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/irq.h> | ||
8 | |||
9 | #include "pci_impl.h" | ||
10 | |||
11 | static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie) | ||
12 | { | ||
13 | struct sparc64_msiq_cookie *msiq_cookie = cookie; | ||
14 | struct pci_pbm_info *pbm = msiq_cookie->pbm; | ||
15 | unsigned long msiqid = msiq_cookie->msiqid; | ||
16 | const struct sparc64_msiq_ops *ops; | ||
17 | unsigned long orig_head, head; | ||
18 | int err; | ||
19 | |||
20 | ops = pbm->msi_ops; | ||
21 | |||
22 | err = ops->get_head(pbm, msiqid, &head); | ||
23 | if (unlikely(err < 0)) | ||
24 | goto err_get_head; | ||
25 | |||
26 | orig_head = head; | ||
27 | for (;;) { | ||
28 | unsigned long msi; | ||
29 | |||
30 | err = ops->dequeue_msi(pbm, msiqid, &head, &msi); | ||
31 | if (likely(err > 0)) | ||
32 | __do_IRQ(pbm->msi_irq_table[msi - pbm->msi_first]); | ||
33 | |||
34 | if (unlikely(err < 0)) | ||
35 | goto err_dequeue; | ||
36 | |||
37 | if (err == 0) | ||
38 | break; | ||
39 | } | ||
40 | if (likely(head != orig_head)) { | ||
41 | err = ops->set_head(pbm, msiqid, head); | ||
42 | if (unlikely(err < 0)) | ||
43 | goto err_set_head; | ||
44 | } | ||
45 | return IRQ_HANDLED; | ||
46 | |||
47 | err_get_head: | ||
48 | printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n", | ||
49 | msiqid, err); | ||
50 | goto err_out; | ||
51 | |||
52 | err_dequeue: | ||
53 | printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] " | ||
54 | "gives error %d\n", | ||
55 | head, msiqid, err); | ||
56 | goto err_out; | ||
57 | |||
58 | err_set_head: | ||
59 | printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] " | ||
60 | "gives error %d\n", | ||
61 | head, msiqid, err); | ||
62 | goto err_out; | ||
63 | |||
64 | err_out: | ||
65 | return IRQ_NONE; | ||
66 | } | ||
67 | |||
68 | static u32 pick_msiq(struct pci_pbm_info *pbm) | ||
69 | { | ||
70 | static DEFINE_SPINLOCK(rotor_lock); | ||
71 | unsigned long flags; | ||
72 | u32 ret, rotor; | ||
73 | |||
74 | spin_lock_irqsave(&rotor_lock, flags); | ||
75 | |||
76 | rotor = pbm->msiq_rotor; | ||
77 | ret = pbm->msiq_first + rotor; | ||
78 | |||
79 | if (++rotor >= pbm->msiq_num) | ||
80 | rotor = 0; | ||
81 | pbm->msiq_rotor = rotor; | ||
82 | |||
83 | spin_unlock_irqrestore(&rotor_lock, flags); | ||
84 | |||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | |||
89 | static int alloc_msi(struct pci_pbm_info *pbm) | ||
90 | { | ||
91 | int i; | ||
92 | |||
93 | for (i = 0; i < pbm->msi_num; i++) { | ||
94 | if (!test_and_set_bit(i, pbm->msi_bitmap)) | ||
95 | return i + pbm->msi_first; | ||
96 | } | ||
97 | |||
98 | return -ENOENT; | ||
99 | } | ||
100 | |||
101 | static void free_msi(struct pci_pbm_info *pbm, int msi_num) | ||
102 | { | ||
103 | msi_num -= pbm->msi_first; | ||
104 | clear_bit(msi_num, pbm->msi_bitmap); | ||
105 | } | ||
106 | |||
107 | static struct irq_chip msi_irq = { | ||
108 | .typename = "PCI-MSI", | ||
109 | .mask = mask_msi_irq, | ||
110 | .unmask = unmask_msi_irq, | ||
111 | .enable = unmask_msi_irq, | ||
112 | .disable = mask_msi_irq, | ||
113 | /* XXX affinity XXX */ | ||
114 | }; | ||
115 | |||
116 | int sparc64_setup_msi_irq(unsigned int *virt_irq_p, | ||
117 | struct pci_dev *pdev, | ||
118 | struct msi_desc *entry) | ||
119 | { | ||
120 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | ||
121 | const struct sparc64_msiq_ops *ops = pbm->msi_ops; | ||
122 | struct msi_msg msg; | ||
123 | int msi, err; | ||
124 | u32 msiqid; | ||
125 | |||
126 | *virt_irq_p = virt_irq_alloc(0, 0); | ||
127 | err = -ENOMEM; | ||
128 | if (!*virt_irq_p) | ||
129 | goto out_err; | ||
130 | |||
131 | set_irq_chip(*virt_irq_p, &msi_irq); | ||
132 | |||
133 | err = alloc_msi(pbm); | ||
134 | if (unlikely(err < 0)) | ||
135 | goto out_virt_irq_free; | ||
136 | |||
137 | msi = err; | ||
138 | |||
139 | msiqid = pick_msiq(pbm); | ||
140 | |||
141 | err = ops->msi_setup(pbm, msiqid, msi, | ||
142 | (entry->msi_attrib.is_64 ? 1 : 0)); | ||
143 | if (err) | ||
144 | goto out_msi_free; | ||
145 | |||
146 | pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p; | ||
147 | |||
148 | if (entry->msi_attrib.is_64) { | ||
149 | msg.address_hi = pbm->msi64_start >> 32; | ||
150 | msg.address_lo = pbm->msi64_start & 0xffffffff; | ||
151 | } else { | ||
152 | msg.address_hi = 0; | ||
153 | msg.address_lo = pbm->msi32_start; | ||
154 | } | ||
155 | msg.data = msi; | ||
156 | |||
157 | set_irq_msi(*virt_irq_p, entry); | ||
158 | write_msi_msg(*virt_irq_p, &msg); | ||
159 | |||
160 | return 0; | ||
161 | |||
162 | out_msi_free: | ||
163 | free_msi(pbm, msi); | ||
164 | |||
165 | out_virt_irq_free: | ||
166 | set_irq_chip(*virt_irq_p, NULL); | ||
167 | virt_irq_free(*virt_irq_p); | ||
168 | *virt_irq_p = 0; | ||
169 | |||
170 | out_err: | ||
171 | return err; | ||
172 | } | ||
173 | |||
174 | void sparc64_teardown_msi_irq(unsigned int virt_irq, | ||
175 | struct pci_dev *pdev) | ||
176 | { | ||
177 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | ||
178 | const struct sparc64_msiq_ops *ops = pbm->msi_ops; | ||
179 | unsigned int msi_num; | ||
180 | int i, err; | ||
181 | |||
182 | for (i = 0; i < pbm->msi_num; i++) { | ||
183 | if (pbm->msi_irq_table[i] == virt_irq) | ||
184 | break; | ||
185 | } | ||
186 | if (i >= pbm->msi_num) { | ||
187 | printk(KERN_ERR "%s: teardown: No MSI for irq %u\n", | ||
188 | pbm->name, virt_irq); | ||
189 | return; | ||
190 | } | ||
191 | |||
192 | msi_num = pbm->msi_first + i; | ||
193 | pbm->msi_irq_table[i] = ~0U; | ||
194 | |||
195 | err = ops->msi_teardown(pbm, msi_num); | ||
196 | if (err) { | ||
197 | printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, " | ||
198 | "irq %u, gives error %d\n", | ||
199 | pbm->name, msi_num, virt_irq, err); | ||
200 | return; | ||
201 | } | ||
202 | |||
203 | free_msi(pbm, msi_num); | ||
204 | |||
205 | set_irq_chip(virt_irq, NULL); | ||
206 | virt_irq_free(virt_irq); | ||
207 | } | ||
208 | |||
209 | static int msi_bitmap_alloc(struct pci_pbm_info *pbm) | ||
210 | { | ||
211 | unsigned long size, bits_per_ulong; | ||
212 | |||
213 | bits_per_ulong = sizeof(unsigned long) * 8; | ||
214 | size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); | ||
215 | size /= 8; | ||
216 | BUG_ON(size % sizeof(unsigned long)); | ||
217 | |||
218 | pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); | ||
219 | if (!pbm->msi_bitmap) | ||
220 | return -ENOMEM; | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static void msi_bitmap_free(struct pci_pbm_info *pbm) | ||
226 | { | ||
227 | kfree(pbm->msi_bitmap); | ||
228 | pbm->msi_bitmap = NULL; | ||
229 | } | ||
230 | |||
231 | static int msi_table_alloc(struct pci_pbm_info *pbm) | ||
232 | { | ||
233 | int size, i; | ||
234 | |||
235 | size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie); | ||
236 | pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL); | ||
237 | if (!pbm->msiq_irq_cookies) | ||
238 | return -ENOMEM; | ||
239 | |||
240 | for (i = 0; i < pbm->msiq_num; i++) { | ||
241 | struct sparc64_msiq_cookie *p; | ||
242 | |||
243 | p = &pbm->msiq_irq_cookies[i]; | ||
244 | p->pbm = pbm; | ||
245 | p->msiqid = pbm->msiq_first + i; | ||
246 | } | ||
247 | |||
248 | size = pbm->msi_num * sizeof(unsigned int); | ||
249 | pbm->msi_irq_table = kzalloc(size, GFP_KERNEL); | ||
250 | if (!pbm->msi_irq_table) { | ||
251 | kfree(pbm->msiq_irq_cookies); | ||
252 | pbm->msiq_irq_cookies = NULL; | ||
253 | return -ENOMEM; | ||
254 | } | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static void msi_table_free(struct pci_pbm_info *pbm) | ||
260 | { | ||
261 | kfree(pbm->msiq_irq_cookies); | ||
262 | pbm->msiq_irq_cookies = NULL; | ||
263 | |||
264 | kfree(pbm->msi_irq_table); | ||
265 | pbm->msi_irq_table = NULL; | ||
266 | } | ||
267 | |||
268 | static int bringup_one_msi_queue(struct pci_pbm_info *pbm, | ||
269 | const struct sparc64_msiq_ops *ops, | ||
270 | unsigned long msiqid, | ||
271 | unsigned long devino) | ||
272 | { | ||
273 | int irq = ops->msiq_build_irq(pbm, msiqid, devino); | ||
274 | int err; | ||
275 | |||
276 | if (irq < 0) | ||
277 | return irq; | ||
278 | |||
279 | err = request_irq(irq, sparc64_msiq_interrupt, 0, | ||
280 | "MSIQ", | ||
281 | &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]); | ||
282 | if (err) | ||
283 | return err; | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm, | ||
289 | const struct sparc64_msiq_ops *ops) | ||
290 | { | ||
291 | int i; | ||
292 | |||
293 | for (i = 0; i < pbm->msiq_num; i++) { | ||
294 | unsigned long msiqid = i + pbm->msiq_first; | ||
295 | unsigned long devino = i + pbm->msiq_first_devino; | ||
296 | int err; | ||
297 | |||
298 | err = bringup_one_msi_queue(pbm, ops, msiqid, devino); | ||
299 | if (err) | ||
300 | return err; | ||
301 | } | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | void sparc64_pbm_msi_init(struct pci_pbm_info *pbm, | ||
307 | const struct sparc64_msiq_ops *ops) | ||
308 | { | ||
309 | const u32 *val; | ||
310 | int len; | ||
311 | |||
312 | val = of_get_property(pbm->prom_node, "#msi-eqs", &len); | ||
313 | if (!val || len != 4) | ||
314 | goto no_msi; | ||
315 | pbm->msiq_num = *val; | ||
316 | if (pbm->msiq_num) { | ||
317 | const struct msiq_prop { | ||
318 | u32 first_msiq; | ||
319 | u32 num_msiq; | ||
320 | u32 first_devino; | ||
321 | } *mqp; | ||
322 | const struct msi_range_prop { | ||
323 | u32 first_msi; | ||
324 | u32 num_msi; | ||
325 | } *mrng; | ||
326 | const struct addr_range_prop { | ||
327 | u32 msi32_high; | ||
328 | u32 msi32_low; | ||
329 | u32 msi32_len; | ||
330 | u32 msi64_high; | ||
331 | u32 msi64_low; | ||
332 | u32 msi64_len; | ||
333 | } *arng; | ||
334 | |||
335 | val = of_get_property(pbm->prom_node, "msi-eq-size", &len); | ||
336 | if (!val || len != 4) | ||
337 | goto no_msi; | ||
338 | |||
339 | pbm->msiq_ent_count = *val; | ||
340 | |||
341 | mqp = of_get_property(pbm->prom_node, | ||
342 | "msi-eq-to-devino", &len); | ||
343 | if (!mqp) | ||
344 | mqp = of_get_property(pbm->prom_node, | ||
345 | "msi-eq-devino", &len); | ||
346 | if (!mqp || len != sizeof(struct msiq_prop)) | ||
347 | goto no_msi; | ||
348 | |||
349 | pbm->msiq_first = mqp->first_msiq; | ||
350 | pbm->msiq_first_devino = mqp->first_devino; | ||
351 | |||
352 | val = of_get_property(pbm->prom_node, "#msi", &len); | ||
353 | if (!val || len != 4) | ||
354 | goto no_msi; | ||
355 | pbm->msi_num = *val; | ||
356 | |||
357 | mrng = of_get_property(pbm->prom_node, "msi-ranges", &len); | ||
358 | if (!mrng || len != sizeof(struct msi_range_prop)) | ||
359 | goto no_msi; | ||
360 | pbm->msi_first = mrng->first_msi; | ||
361 | |||
362 | val = of_get_property(pbm->prom_node, "msi-data-mask", &len); | ||
363 | if (!val || len != 4) | ||
364 | goto no_msi; | ||
365 | pbm->msi_data_mask = *val; | ||
366 | |||
367 | val = of_get_property(pbm->prom_node, "msix-data-width", &len); | ||
368 | if (!val || len != 4) | ||
369 | goto no_msi; | ||
370 | pbm->msix_data_width = *val; | ||
371 | |||
372 | arng = of_get_property(pbm->prom_node, "msi-address-ranges", | ||
373 | &len); | ||
374 | if (!arng || len != sizeof(struct addr_range_prop)) | ||
375 | goto no_msi; | ||
376 | pbm->msi32_start = ((u64)arng->msi32_high << 32) | | ||
377 | (u64) arng->msi32_low; | ||
378 | pbm->msi64_start = ((u64)arng->msi64_high << 32) | | ||
379 | (u64) arng->msi64_low; | ||
380 | pbm->msi32_len = arng->msi32_len; | ||
381 | pbm->msi64_len = arng->msi64_len; | ||
382 | |||
383 | if (msi_bitmap_alloc(pbm)) | ||
384 | goto no_msi; | ||
385 | |||
386 | if (msi_table_alloc(pbm)) { | ||
387 | msi_bitmap_free(pbm); | ||
388 | goto no_msi; | ||
389 | } | ||
390 | |||
391 | if (ops->msiq_alloc(pbm)) { | ||
392 | msi_table_free(pbm); | ||
393 | msi_bitmap_free(pbm); | ||
394 | goto no_msi; | ||
395 | } | ||
396 | |||
397 | if (sparc64_bringup_msi_queues(pbm, ops)) { | ||
398 | ops->msiq_free(pbm); | ||
399 | msi_table_free(pbm); | ||
400 | msi_bitmap_free(pbm); | ||
401 | goto no_msi; | ||
402 | } | ||
403 | |||
404 | printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] " | ||
405 | "devino[0x%x]\n", | ||
406 | pbm->name, | ||
407 | pbm->msiq_first, pbm->msiq_num, | ||
408 | pbm->msiq_ent_count, | ||
409 | pbm->msiq_first_devino); | ||
410 | printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] " | ||
411 | "width[%u]\n", | ||
412 | pbm->name, | ||
413 | pbm->msi_first, pbm->msi_num, pbm->msi_data_mask, | ||
414 | pbm->msix_data_width); | ||
415 | printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] " | ||
416 | "addr64[0x%lx:0x%x]\n", | ||
417 | pbm->name, | ||
418 | pbm->msi32_start, pbm->msi32_len, | ||
419 | pbm->msi64_start, pbm->msi64_len); | ||
420 | printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n", | ||
421 | pbm->name, | ||
422 | __pa(pbm->msi_queues)); | ||
423 | |||
424 | pbm->msi_ops = ops; | ||
425 | pbm->setup_msi_irq = sparc64_setup_msi_irq; | ||
426 | pbm->teardown_msi_irq = sparc64_teardown_msi_irq; | ||
427 | } | ||
428 | return; | ||
429 | |||
430 | no_msi: | ||
431 | pbm->msiq_num = 0; | ||
432 | printk(KERN_INFO "%s: No MSI support.\n", pbm->name); | ||
433 | } | ||
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index b6b4cfea5b5f..d27ee5d528a2 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c | |||
@@ -1058,12 +1058,6 @@ void psycho_init(struct device_node *dp, char *model_name) | |||
1058 | p->pbm_A.config_space = p->pbm_B.config_space = | 1058 | p->pbm_A.config_space = p->pbm_B.config_space = |
1059 | (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE); | 1059 | (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE); |
1060 | 1060 | ||
1061 | /* | ||
1062 | * Psycho's PCI MEM space is mapped to a 2GB aligned area, so | ||
1063 | * we need to adjust our MEM space mask. | ||
1064 | */ | ||
1065 | pci_memspace_mask = 0x7fffffffUL; | ||
1066 | |||
1067 | psycho_controller_hwinit(&p->pbm_A); | 1061 | psycho_controller_hwinit(&p->pbm_A); |
1068 | 1062 | ||
1069 | if (psycho_iommu_init(&p->pbm_A)) | 1063 | if (psycho_iommu_init(&p->pbm_A)) |
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 3c30bfa1f3a3..9546ba9f5dee 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
@@ -1464,9 +1464,6 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ | |||
1464 | 1464 | ||
1465 | p->pbm_B.iommu = iommu; | 1465 | p->pbm_B.iommu = iommu; |
1466 | 1466 | ||
1467 | /* Like PSYCHO we have a 2GB aligned area for memory space. */ | ||
1468 | pci_memspace_mask = 0x7fffffffUL; | ||
1469 | |||
1470 | if (schizo_pbm_init(p, dp, portid, chip_type)) | 1467 | if (schizo_pbm_init(p, dp, portid, chip_type)) |
1471 | goto fatal_memory_error; | 1468 | goto fatal_memory_error; |
1472 | 1469 | ||
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c index da724b13e89e..95de1444ee67 100644 --- a/arch/sparc64/kernel/pci_sun4v.c +++ b/arch/sparc64/kernel/pci_sun4v.c | |||
@@ -748,111 +748,102 @@ struct pci_sun4v_msiq_entry { | |||
748 | u64 reserved2; | 748 | u64 reserved2; |
749 | }; | 749 | }; |
750 | 750 | ||
751 | /* For now this just runs as a pre-handler for the real interrupt handler. | 751 | static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, |
752 | * So we just walk through the queue and ACK all the entries, update the | 752 | unsigned long *head) |
753 | * head pointer, and return. | ||
754 | * | ||
755 | * In the longer term it would be nice to do something more integrated | ||
756 | * wherein we can pass in some of this MSI info to the drivers. This | ||
757 | * would be most useful for PCIe fabric error messages, although we could | ||
758 | * invoke those directly from the loop here in order to pass the info around. | ||
759 | */ | ||
760 | static void pci_sun4v_msi_prehandler(unsigned int ino, void *data1, void *data2) | ||
761 | { | 753 | { |
762 | struct pci_pbm_info *pbm = data1; | 754 | unsigned long err, limit; |
763 | struct pci_sun4v_msiq_entry *base, *ep; | ||
764 | unsigned long msiqid, orig_head, head, type, err; | ||
765 | |||
766 | msiqid = (unsigned long) data2; | ||
767 | 755 | ||
768 | head = 0xdeadbeef; | 756 | err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head); |
769 | err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, &head); | ||
770 | if (unlikely(err)) | 757 | if (unlikely(err)) |
771 | goto hv_error_get; | 758 | return -ENXIO; |
772 | |||
773 | if (unlikely(head >= (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))) | ||
774 | goto bad_offset; | ||
775 | |||
776 | head /= sizeof(struct pci_sun4v_msiq_entry); | ||
777 | orig_head = head; | ||
778 | base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * | ||
779 | (pbm->msiq_ent_count * | ||
780 | sizeof(struct pci_sun4v_msiq_entry)))); | ||
781 | ep = &base[head]; | ||
782 | while ((ep->version_type & MSIQ_TYPE_MASK) != 0) { | ||
783 | type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; | ||
784 | if (unlikely(type != MSIQ_TYPE_MSI32 && | ||
785 | type != MSIQ_TYPE_MSI64)) | ||
786 | goto bad_type; | ||
787 | |||
788 | pci_sun4v_msi_setstate(pbm->devhandle, | ||
789 | ep->msi_data /* msi_num */, | ||
790 | HV_MSISTATE_IDLE); | ||
791 | |||
792 | /* Clear the entry. */ | ||
793 | ep->version_type &= ~MSIQ_TYPE_MASK; | ||
794 | |||
795 | /* Go to next entry in ring. */ | ||
796 | head++; | ||
797 | if (head >= pbm->msiq_ent_count) | ||
798 | head = 0; | ||
799 | ep = &base[head]; | ||
800 | } | ||
801 | 759 | ||
802 | if (likely(head != orig_head)) { | 760 | limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); |
803 | /* ACK entries by updating head pointer. */ | 761 | if (unlikely(*head >= limit)) |
804 | head *= sizeof(struct pci_sun4v_msiq_entry); | 762 | return -EFBIG; |
805 | err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); | ||
806 | if (unlikely(err)) | ||
807 | goto hv_error_set; | ||
808 | } | ||
809 | return; | ||
810 | 763 | ||
811 | hv_error_set: | 764 | return 0; |
812 | printk(KERN_EMERG "MSI: Hypervisor set head gives error %lu\n", err); | 765 | } |
813 | goto hv_error_cont; | ||
814 | 766 | ||
815 | hv_error_get: | 767 | static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm, |
816 | printk(KERN_EMERG "MSI: Hypervisor get head gives error %lu\n", err); | 768 | unsigned long msiqid, unsigned long *head, |
769 | unsigned long *msi) | ||
770 | { | ||
771 | struct pci_sun4v_msiq_entry *ep; | ||
772 | unsigned long err, type; | ||
817 | 773 | ||
818 | hv_error_cont: | 774 | /* Note: void pointer arithmetic, 'head' is a byte offset */ |
819 | printk(KERN_EMERG "MSI: devhandle[%x] msiqid[%lx] head[%lu]\n", | 775 | ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * |
820 | pbm->devhandle, msiqid, head); | 776 | (pbm->msiq_ent_count * |
821 | return; | 777 | sizeof(struct pci_sun4v_msiq_entry))) + |
778 | *head); | ||
822 | 779 | ||
823 | bad_offset: | 780 | if ((ep->version_type & MSIQ_TYPE_MASK) == 0) |
824 | printk(KERN_EMERG "MSI: Hypervisor gives bad offset %lx max(%lx)\n", | 781 | return 0; |
825 | head, pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)); | ||
826 | return; | ||
827 | 782 | ||
828 | bad_type: | 783 | type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT; |
829 | printk(KERN_EMERG "MSI: Entry has bad type %lx\n", type); | 784 | if (unlikely(type != MSIQ_TYPE_MSI32 && |
830 | return; | 785 | type != MSIQ_TYPE_MSI64)) |
786 | return -EINVAL; | ||
787 | |||
788 | *msi = ep->msi_data; | ||
789 | |||
790 | err = pci_sun4v_msi_setstate(pbm->devhandle, | ||
791 | ep->msi_data /* msi_num */, | ||
792 | HV_MSISTATE_IDLE); | ||
793 | if (unlikely(err)) | ||
794 | return -ENXIO; | ||
795 | |||
796 | /* Clear the entry. */ | ||
797 | ep->version_type &= ~MSIQ_TYPE_MASK; | ||
798 | |||
799 | (*head) += sizeof(struct pci_sun4v_msiq_entry); | ||
800 | if (*head >= | ||
801 | (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry))) | ||
802 | *head = 0; | ||
803 | |||
804 | return 1; | ||
831 | } | 805 | } |
832 | 806 | ||
833 | static int msi_bitmap_alloc(struct pci_pbm_info *pbm) | 807 | static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, |
808 | unsigned long head) | ||
834 | { | 809 | { |
835 | unsigned long size, bits_per_ulong; | 810 | unsigned long err; |
836 | 811 | ||
837 | bits_per_ulong = sizeof(unsigned long) * 8; | 812 | err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head); |
838 | size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1); | 813 | if (unlikely(err)) |
839 | size /= 8; | 814 | return -EINVAL; |
840 | BUG_ON(size % sizeof(unsigned long)); | ||
841 | 815 | ||
842 | pbm->msi_bitmap = kzalloc(size, GFP_KERNEL); | 816 | return 0; |
843 | if (!pbm->msi_bitmap) | 817 | } |
844 | return -ENOMEM; | ||
845 | 818 | ||
819 | static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, | ||
820 | unsigned long msi, int is_msi64) | ||
821 | { | ||
822 | if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid, | ||
823 | (is_msi64 ? | ||
824 | HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) | ||
825 | return -ENXIO; | ||
826 | if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE)) | ||
827 | return -ENXIO; | ||
828 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID)) | ||
829 | return -ENXIO; | ||
846 | return 0; | 830 | return 0; |
847 | } | 831 | } |
848 | 832 | ||
849 | static void msi_bitmap_free(struct pci_pbm_info *pbm) | 833 | static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) |
850 | { | 834 | { |
851 | kfree(pbm->msi_bitmap); | 835 | unsigned long err, msiqid; |
852 | pbm->msi_bitmap = NULL; | 836 | |
837 | err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid); | ||
838 | if (err) | ||
839 | return -ENXIO; | ||
840 | |||
841 | pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID); | ||
842 | |||
843 | return 0; | ||
853 | } | 844 | } |
854 | 845 | ||
855 | static int msi_queue_alloc(struct pci_pbm_info *pbm) | 846 | static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm) |
856 | { | 847 | { |
857 | unsigned long q_size, alloc_size, pages, order; | 848 | unsigned long q_size, alloc_size, pages, order; |
858 | int i; | 849 | int i; |
@@ -906,232 +897,59 @@ h_error: | |||
906 | return -EINVAL; | 897 | return -EINVAL; |
907 | } | 898 | } |
908 | 899 | ||
909 | 900 | static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm) | |
910 | static int alloc_msi(struct pci_pbm_info *pbm) | ||
911 | { | 901 | { |
902 | unsigned long q_size, alloc_size, pages, order; | ||
912 | int i; | 903 | int i; |
913 | 904 | ||
914 | for (i = 0; i < pbm->msi_num; i++) { | 905 | for (i = 0; i < pbm->msiq_num; i++) { |
915 | if (!test_and_set_bit(i, pbm->msi_bitmap)) | 906 | unsigned long msiqid = pbm->msiq_first + i; |
916 | return i + pbm->msi_first; | ||
917 | } | ||
918 | |||
919 | return -ENOENT; | ||
920 | } | ||
921 | |||
922 | static void free_msi(struct pci_pbm_info *pbm, int msi_num) | ||
923 | { | ||
924 | msi_num -= pbm->msi_first; | ||
925 | clear_bit(msi_num, pbm->msi_bitmap); | ||
926 | } | ||
927 | |||
928 | static int pci_sun4v_setup_msi_irq(unsigned int *virt_irq_p, | ||
929 | struct pci_dev *pdev, | ||
930 | struct msi_desc *entry) | ||
931 | { | ||
932 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | ||
933 | unsigned long devino, msiqid; | ||
934 | struct msi_msg msg; | ||
935 | int msi_num, err; | ||
936 | |||
937 | *virt_irq_p = 0; | ||
938 | |||
939 | msi_num = alloc_msi(pbm); | ||
940 | if (msi_num < 0) | ||
941 | return msi_num; | ||
942 | |||
943 | err = sun4v_build_msi(pbm->devhandle, virt_irq_p, | ||
944 | pbm->msiq_first_devino, | ||
945 | (pbm->msiq_first_devino + | ||
946 | pbm->msiq_num)); | ||
947 | if (err < 0) | ||
948 | goto out_err; | ||
949 | devino = err; | ||
950 | |||
951 | msiqid = ((devino - pbm->msiq_first_devino) + | ||
952 | pbm->msiq_first); | ||
953 | |||
954 | err = -EINVAL; | ||
955 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) | ||
956 | if (err) | ||
957 | goto out_err; | ||
958 | |||
959 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) | ||
960 | goto out_err; | ||
961 | |||
962 | if (pci_sun4v_msi_setmsiq(pbm->devhandle, | ||
963 | msi_num, msiqid, | ||
964 | (entry->msi_attrib.is_64 ? | ||
965 | HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32))) | ||
966 | goto out_err; | ||
967 | |||
968 | if (pci_sun4v_msi_setstate(pbm->devhandle, msi_num, HV_MSISTATE_IDLE)) | ||
969 | goto out_err; | ||
970 | |||
971 | if (pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_VALID)) | ||
972 | goto out_err; | ||
973 | |||
974 | sparc64_set_msi(*virt_irq_p, msi_num); | ||
975 | 907 | ||
976 | if (entry->msi_attrib.is_64) { | 908 | (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0); |
977 | msg.address_hi = pbm->msi64_start >> 32; | ||
978 | msg.address_lo = pbm->msi64_start & 0xffffffff; | ||
979 | } else { | ||
980 | msg.address_hi = 0; | ||
981 | msg.address_lo = pbm->msi32_start; | ||
982 | } | 909 | } |
983 | msg.data = msi_num; | ||
984 | |||
985 | set_irq_msi(*virt_irq_p, entry); | ||
986 | write_msi_msg(*virt_irq_p, &msg); | ||
987 | 910 | ||
988 | irq_install_pre_handler(*virt_irq_p, | 911 | q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry); |
989 | pci_sun4v_msi_prehandler, | 912 | alloc_size = (pbm->msiq_num * q_size); |
990 | pbm, (void *) msiqid); | 913 | order = get_order(alloc_size); |
991 | 914 | ||
992 | return 0; | 915 | pages = (unsigned long) pbm->msi_queues; |
993 | 916 | ||
994 | out_err: | 917 | free_pages(pages, order); |
995 | free_msi(pbm, msi_num); | ||
996 | return err; | ||
997 | 918 | ||
919 | pbm->msi_queues = NULL; | ||
998 | } | 920 | } |
999 | 921 | ||
1000 | static void pci_sun4v_teardown_msi_irq(unsigned int virt_irq, | 922 | static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, |
1001 | struct pci_dev *pdev) | 923 | unsigned long msiqid, |
924 | unsigned long devino) | ||
1002 | { | 925 | { |
1003 | struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; | 926 | unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino); |
1004 | unsigned long msiqid, err; | ||
1005 | unsigned int msi_num; | ||
1006 | |||
1007 | msi_num = sparc64_get_msi(virt_irq); | ||
1008 | err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi_num, &msiqid); | ||
1009 | if (err) { | ||
1010 | printk(KERN_ERR "%s: getmsiq gives error %lu\n", | ||
1011 | pbm->name, err); | ||
1012 | return; | ||
1013 | } | ||
1014 | 927 | ||
1015 | pci_sun4v_msi_setvalid(pbm->devhandle, msi_num, HV_MSIVALID_INVALID); | 928 | if (!virt_irq) |
1016 | pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_INVALID); | 929 | return -ENOMEM; |
1017 | 930 | ||
1018 | free_msi(pbm, msi_num); | 931 | if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) |
932 | return -EINVAL; | ||
933 | if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) | ||
934 | return -EINVAL; | ||
1019 | 935 | ||
1020 | /* The sun4v_destroy_msi() will liberate the devino and thus the MSIQ | 936 | return virt_irq; |
1021 | * allocation. | ||
1022 | */ | ||
1023 | sun4v_destroy_msi(virt_irq); | ||
1024 | } | 937 | } |
1025 | 938 | ||
939 | static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { | ||
940 | .get_head = pci_sun4v_get_head, | ||
941 | .dequeue_msi = pci_sun4v_dequeue_msi, | ||
942 | .set_head = pci_sun4v_set_head, | ||
943 | .msi_setup = pci_sun4v_msi_setup, | ||
944 | .msi_teardown = pci_sun4v_msi_teardown, | ||
945 | .msiq_alloc = pci_sun4v_msiq_alloc, | ||
946 | .msiq_free = pci_sun4v_msiq_free, | ||
947 | .msiq_build_irq = pci_sun4v_msiq_build_irq, | ||
948 | }; | ||
949 | |||
1026 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | 950 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) |
1027 | { | 951 | { |
1028 | const u32 *val; | 952 | sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops); |
1029 | int len; | ||
1030 | |||
1031 | val = of_get_property(pbm->prom_node, "#msi-eqs", &len); | ||
1032 | if (!val || len != 4) | ||
1033 | goto no_msi; | ||
1034 | pbm->msiq_num = *val; | ||
1035 | if (pbm->msiq_num) { | ||
1036 | const struct msiq_prop { | ||
1037 | u32 first_msiq; | ||
1038 | u32 num_msiq; | ||
1039 | u32 first_devino; | ||
1040 | } *mqp; | ||
1041 | const struct msi_range_prop { | ||
1042 | u32 first_msi; | ||
1043 | u32 num_msi; | ||
1044 | } *mrng; | ||
1045 | const struct addr_range_prop { | ||
1046 | u32 msi32_high; | ||
1047 | u32 msi32_low; | ||
1048 | u32 msi32_len; | ||
1049 | u32 msi64_high; | ||
1050 | u32 msi64_low; | ||
1051 | u32 msi64_len; | ||
1052 | } *arng; | ||
1053 | |||
1054 | val = of_get_property(pbm->prom_node, "msi-eq-size", &len); | ||
1055 | if (!val || len != 4) | ||
1056 | goto no_msi; | ||
1057 | |||
1058 | pbm->msiq_ent_count = *val; | ||
1059 | |||
1060 | mqp = of_get_property(pbm->prom_node, | ||
1061 | "msi-eq-to-devino", &len); | ||
1062 | if (!mqp || len != sizeof(struct msiq_prop)) | ||
1063 | goto no_msi; | ||
1064 | |||
1065 | pbm->msiq_first = mqp->first_msiq; | ||
1066 | pbm->msiq_first_devino = mqp->first_devino; | ||
1067 | |||
1068 | val = of_get_property(pbm->prom_node, "#msi", &len); | ||
1069 | if (!val || len != 4) | ||
1070 | goto no_msi; | ||
1071 | pbm->msi_num = *val; | ||
1072 | |||
1073 | mrng = of_get_property(pbm->prom_node, "msi-ranges", &len); | ||
1074 | if (!mrng || len != sizeof(struct msi_range_prop)) | ||
1075 | goto no_msi; | ||
1076 | pbm->msi_first = mrng->first_msi; | ||
1077 | |||
1078 | val = of_get_property(pbm->prom_node, "msi-data-mask", &len); | ||
1079 | if (!val || len != 4) | ||
1080 | goto no_msi; | ||
1081 | pbm->msi_data_mask = *val; | ||
1082 | |||
1083 | val = of_get_property(pbm->prom_node, "msix-data-width", &len); | ||
1084 | if (!val || len != 4) | ||
1085 | goto no_msi; | ||
1086 | pbm->msix_data_width = *val; | ||
1087 | |||
1088 | arng = of_get_property(pbm->prom_node, "msi-address-ranges", | ||
1089 | &len); | ||
1090 | if (!arng || len != sizeof(struct addr_range_prop)) | ||
1091 | goto no_msi; | ||
1092 | pbm->msi32_start = ((u64)arng->msi32_high << 32) | | ||
1093 | (u64) arng->msi32_low; | ||
1094 | pbm->msi64_start = ((u64)arng->msi64_high << 32) | | ||
1095 | (u64) arng->msi64_low; | ||
1096 | pbm->msi32_len = arng->msi32_len; | ||
1097 | pbm->msi64_len = arng->msi64_len; | ||
1098 | |||
1099 | if (msi_bitmap_alloc(pbm)) | ||
1100 | goto no_msi; | ||
1101 | |||
1102 | if (msi_queue_alloc(pbm)) { | ||
1103 | msi_bitmap_free(pbm); | ||
1104 | goto no_msi; | ||
1105 | } | ||
1106 | |||
1107 | printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] " | ||
1108 | "devino[0x%x]\n", | ||
1109 | pbm->name, | ||
1110 | pbm->msiq_first, pbm->msiq_num, | ||
1111 | pbm->msiq_ent_count, | ||
1112 | pbm->msiq_first_devino); | ||
1113 | printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] " | ||
1114 | "width[%u]\n", | ||
1115 | pbm->name, | ||
1116 | pbm->msi_first, pbm->msi_num, pbm->msi_data_mask, | ||
1117 | pbm->msix_data_width); | ||
1118 | printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] " | ||
1119 | "addr64[0x%lx:0x%x]\n", | ||
1120 | pbm->name, | ||
1121 | pbm->msi32_start, pbm->msi32_len, | ||
1122 | pbm->msi64_start, pbm->msi64_len); | ||
1123 | printk(KERN_INFO "%s: MSI queues at RA [%p]\n", | ||
1124 | pbm->name, | ||
1125 | pbm->msi_queues); | ||
1126 | } | ||
1127 | pbm->setup_msi_irq = pci_sun4v_setup_msi_irq; | ||
1128 | pbm->teardown_msi_irq = pci_sun4v_teardown_msi_irq; | ||
1129 | |||
1130 | return; | ||
1131 | |||
1132 | no_msi: | ||
1133 | pbm->msiq_num = 0; | ||
1134 | printk(KERN_INFO "%s: No MSI support.\n", pbm->name); | ||
1135 | } | 953 | } |
1136 | #else /* CONFIG_PCI_MSI */ | 954 | #else /* CONFIG_PCI_MSI */ |
1137 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) | 955 | static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) |
@@ -1237,11 +1055,6 @@ void __init sun4v_pci_init(struct device_node *dp, char *model_name) | |||
1237 | 1055 | ||
1238 | p->pbm_B.iommu = iommu; | 1056 | p->pbm_B.iommu = iommu; |
1239 | 1057 | ||
1240 | /* Like PSYCHO and SCHIZO we have a 2GB aligned area | ||
1241 | * for memory space. | ||
1242 | */ | ||
1243 | pci_memspace_mask = 0x7fffffffUL; | ||
1244 | |||
1245 | pci_sun4v_pbm_init(p, dp, devhandle); | 1058 | pci_sun4v_pbm_init(p, dp, devhandle); |
1246 | return; | 1059 | return; |
1247 | 1060 | ||
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c index 881a09ee4c4c..850cdffdd69c 100644 --- a/arch/sparc64/kernel/power.c +++ b/arch/sparc64/kernel/power.c | |||
@@ -105,9 +105,11 @@ static struct of_device_id power_match[] = { | |||
105 | }; | 105 | }; |
106 | 106 | ||
107 | static struct of_platform_driver power_driver = { | 107 | static struct of_platform_driver power_driver = { |
108 | .name = "power", | ||
109 | .match_table = power_match, | 108 | .match_table = power_match, |
110 | .probe = power_probe, | 109 | .probe = power_probe, |
110 | .driver = { | ||
111 | .name = "power", | ||
112 | }, | ||
111 | }; | 113 | }; |
112 | 114 | ||
113 | void __init power_init(void) | 115 | void __init power_init(void) |
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S index 574bc248bca6..e2f8e1b4882a 100644 --- a/arch/sparc64/kernel/sun4v_ivec.S +++ b/arch/sparc64/kernel/sun4v_ivec.S | |||
@@ -96,19 +96,21 @@ sun4v_dev_mondo: | |||
96 | stxa %g2, [%g4] ASI_QUEUE | 96 | stxa %g2, [%g4] ASI_QUEUE |
97 | membar #Sync | 97 | membar #Sync |
98 | 98 | ||
99 | /* Get &__irq_work[smp_processor_id()] into %g1. */ | 99 | TRAP_LOAD_IRQ_WORK_PA(%g1, %g4) |
100 | TRAP_LOAD_IRQ_WORK(%g1, %g4) | ||
101 | 100 | ||
102 | /* Get &ivector_table[IVEC] into %g4. */ | 101 | /* For VIRQs, cookie is encoded as ~bucket_phys_addr */ |
103 | sethi %hi(ivector_table), %g4 | 102 | brlz,pt %g3, 1f |
104 | sllx %g3, 3, %g3 | 103 | xnor %g3, %g0, %g4 |
105 | or %g4, %lo(ivector_table), %g4 | 104 | |
105 | /* Get __pa(&ivector_table[IVEC]) into %g4. */ | ||
106 | sethi %hi(ivector_table_pa), %g4 | ||
107 | ldx [%g4 + %lo(ivector_table_pa)], %g4 | ||
108 | sllx %g3, 4, %g3 | ||
106 | add %g4, %g3, %g4 | 109 | add %g4, %g3, %g4 |
107 | 110 | ||
108 | /* Insert ivector_table[] entry into __irq_work[] queue. */ | 111 | 1: ldx [%g1], %g2 |
109 | lduw [%g1], %g2 /* g2 = irq_work(cpu) */ | 112 | stxa %g2, [%g4] ASI_PHYS_USE_EC |
110 | stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */ | 113 | stx %g4, [%g1] |
111 | stw %g4, [%g1] /* irq_work(cpu) = bucket */ | ||
112 | 114 | ||
113 | /* Signal the interrupt by setting (1 << pil) in %softint. */ | 115 | /* Signal the interrupt by setting (1 << pil) in %softint. */ |
114 | wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint | 116 | wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint |
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c index d108eeb0734f..0d5c50264945 100644 --- a/arch/sparc64/kernel/sys_sparc.c +++ b/arch/sparc64/kernel/sys_sparc.c | |||
@@ -436,7 +436,7 @@ out: | |||
436 | asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, | 436 | asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, |
437 | unsigned long third, void __user *ptr, long fifth) | 437 | unsigned long third, void __user *ptr, long fifth) |
438 | { | 438 | { |
439 | int err; | 439 | long err; |
440 | 440 | ||
441 | /* No need for backward compatibility. We can start fresh... */ | 441 | /* No need for backward compatibility. We can start fresh... */ |
442 | if (call <= SEMCTL) { | 442 | if (call <= SEMCTL) { |
@@ -453,16 +453,9 @@ asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second, | |||
453 | err = sys_semget(first, (int)second, (int)third); | 453 | err = sys_semget(first, (int)second, (int)third); |
454 | goto out; | 454 | goto out; |
455 | case SEMCTL: { | 455 | case SEMCTL: { |
456 | union semun fourth; | 456 | err = sys_semctl(first, third, |
457 | err = -EINVAL; | 457 | (int)second | IPC_64, |
458 | if (!ptr) | 458 | (union semun) ptr); |
459 | goto out; | ||
460 | err = -EFAULT; | ||
461 | if (get_user(fourth.__pad, | ||
462 | (void __user * __user *) ptr)) | ||
463 | goto out; | ||
464 | err = sys_semctl(first, (int)second | IPC_64, | ||
465 | (int)third, fourth); | ||
466 | goto out; | 459 | goto out; |
467 | } | 460 | } |
468 | default: | 461 | default: |
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 69cad1b653c1..cd8c740cba1d 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
@@ -764,9 +764,11 @@ static struct of_device_id clock_match[] = { | |||
764 | }; | 764 | }; |
765 | 765 | ||
766 | static struct of_platform_driver clock_driver = { | 766 | static struct of_platform_driver clock_driver = { |
767 | .name = "clock", | ||
768 | .match_table = clock_match, | 767 | .match_table = clock_match, |
769 | .probe = clock_probe, | 768 | .probe = clock_probe, |
769 | .driver = { | ||
770 | .name = "clock", | ||
771 | }, | ||
770 | }; | 772 | }; |
771 | 773 | ||
772 | static int __init clock_init(void) | 774 | static int __init clock_init(void) |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 6ef42b8e53d8..34573a55b6e5 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -2569,8 +2569,8 @@ void __init trap_init(void) | |||
2569 | offsetof(struct trap_per_cpu, tsb_huge)) || | 2569 | offsetof(struct trap_per_cpu, tsb_huge)) || |
2570 | (TRAP_PER_CPU_TSB_HUGE_TEMP != | 2570 | (TRAP_PER_CPU_TSB_HUGE_TEMP != |
2571 | offsetof(struct trap_per_cpu, tsb_huge_temp)) || | 2571 | offsetof(struct trap_per_cpu, tsb_huge_temp)) || |
2572 | (TRAP_PER_CPU_IRQ_WORKLIST != | 2572 | (TRAP_PER_CPU_IRQ_WORKLIST_PA != |
2573 | offsetof(struct trap_per_cpu, irq_worklist)) || | 2573 | offsetof(struct trap_per_cpu, irq_worklist_pa)) || |
2574 | (TRAP_PER_CPU_CPU_MONDO_QMASK != | 2574 | (TRAP_PER_CPU_CPU_MONDO_QMASK != |
2575 | offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || | 2575 | offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || |
2576 | (TRAP_PER_CPU_DEV_MONDO_QMASK != | 2576 | (TRAP_PER_CPU_DEV_MONDO_QMASK != |
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index b982fa3dd748..9fcd503bc04a 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S | |||
@@ -10,105 +10,138 @@ ENTRY(_start) | |||
10 | jiffies = jiffies_64; | 10 | jiffies = jiffies_64; |
11 | SECTIONS | 11 | SECTIONS |
12 | { | 12 | { |
13 | swapper_low_pmd_dir = 0x0000000000402000; | 13 | swapper_low_pmd_dir = 0x0000000000402000; |
14 | . = 0x4000; | 14 | . = 0x4000; |
15 | .text 0x0000000000404000 : | 15 | .text 0x0000000000404000 : { |
16 | { | 16 | _text = .; |
17 | _text = .; | 17 | TEXT_TEXT |
18 | TEXT_TEXT | 18 | SCHED_TEXT |
19 | SCHED_TEXT | 19 | LOCK_TEXT |
20 | LOCK_TEXT | 20 | KPROBES_TEXT |
21 | KPROBES_TEXT | 21 | *(.gnu.warning) |
22 | *(.gnu.warning) | 22 | } = 0 |
23 | } =0 | 23 | _etext = .; |
24 | _etext = .; | 24 | PROVIDE (etext = .); |
25 | PROVIDE (etext = .); | ||
26 | 25 | ||
27 | RO_DATA(PAGE_SIZE) | 26 | RO_DATA(PAGE_SIZE) |
27 | .data : { | ||
28 | DATA_DATA | ||
29 | CONSTRUCTORS | ||
30 | } | ||
31 | .data1 : { | ||
32 | *(.data1) | ||
33 | } | ||
34 | . = ALIGN(64); | ||
35 | .data.cacheline_aligned : { | ||
36 | *(.data.cacheline_aligned) | ||
37 | } | ||
38 | . = ALIGN(64); | ||
39 | .data.read_mostly : { | ||
40 | *(.data.read_mostly) | ||
41 | } | ||
42 | _edata = .; | ||
43 | PROVIDE (edata = .); | ||
44 | .fixup : { | ||
45 | *(.fixup) | ||
46 | } | ||
47 | . = ALIGN(16); | ||
48 | __ex_table : { | ||
49 | __start___ex_table = .; | ||
50 | *(__ex_table) | ||
51 | __stop___ex_table = .; | ||
52 | } | ||
53 | NOTES | ||
28 | 54 | ||
29 | .data : | 55 | . = ALIGN(PAGE_SIZE); |
30 | { | 56 | .init.text : { |
31 | DATA_DATA | 57 | __init_begin = .; |
32 | CONSTRUCTORS | 58 | _sinittext = .; |
33 | } | 59 | *(.init.text) |
34 | .data1 : { *(.data1) } | 60 | _einittext = .; |
35 | . = ALIGN(64); | 61 | } |
36 | .data.cacheline_aligned : { *(.data.cacheline_aligned) } | 62 | .init.data : { |
37 | . = ALIGN(64); | 63 | *(.init.data) |
38 | .data.read_mostly : { *(.data.read_mostly) } | 64 | } |
39 | _edata = .; | 65 | . = ALIGN(16); |
40 | PROVIDE (edata = .); | 66 | .init.setup : { |
41 | .fixup : { *(.fixup) } | 67 | __setup_start = .; |
68 | *(.init.setup) | ||
69 | __setup_end = .; | ||
70 | } | ||
71 | .initcall.init : { | ||
72 | __initcall_start = .; | ||
73 | INITCALLS | ||
74 | __initcall_end = .; | ||
75 | } | ||
76 | .con_initcall.init : { | ||
77 | __con_initcall_start = .; | ||
78 | *(.con_initcall.init) | ||
79 | __con_initcall_end = .; | ||
80 | } | ||
81 | SECURITY_INIT | ||
42 | 82 | ||
43 | . = ALIGN(16); | 83 | . = ALIGN(4); |
44 | __start___ex_table = .; | 84 | .tsb_ldquad_phys_patch : { |
45 | __ex_table : { *(__ex_table) } | 85 | __tsb_ldquad_phys_patch = .; |
46 | __stop___ex_table = .; | 86 | *(.tsb_ldquad_phys_patch) |
87 | __tsb_ldquad_phys_patch_end = .; | ||
88 | } | ||
47 | 89 | ||
48 | NOTES | 90 | .tsb_phys_patch : { |
91 | __tsb_phys_patch = .; | ||
92 | *(.tsb_phys_patch) | ||
93 | __tsb_phys_patch_end = .; | ||
94 | } | ||
49 | 95 | ||
50 | . = ALIGN(PAGE_SIZE); | 96 | .cpuid_patch : { |
51 | __init_begin = .; | 97 | __cpuid_patch = .; |
52 | .init.text : { | 98 | *(.cpuid_patch) |
53 | _sinittext = .; | 99 | __cpuid_patch_end = .; |
54 | *(.init.text) | 100 | } |
55 | _einittext = .; | 101 | |
56 | } | 102 | .sun4v_1insn_patch : { |
57 | .init.data : { *(.init.data) } | 103 | __sun4v_1insn_patch = .; |
58 | . = ALIGN(16); | 104 | *(.sun4v_1insn_patch) |
59 | __setup_start = .; | 105 | __sun4v_1insn_patch_end = .; |
60 | .init.setup : { *(.init.setup) } | 106 | } |
61 | __setup_end = .; | 107 | .sun4v_2insn_patch : { |
62 | __initcall_start = .; | 108 | __sun4v_2insn_patch = .; |
63 | .initcall.init : { | 109 | *(.sun4v_2insn_patch) |
64 | INITCALLS | 110 | __sun4v_2insn_patch_end = .; |
65 | } | 111 | } |
66 | __initcall_end = .; | ||
67 | __con_initcall_start = .; | ||
68 | .con_initcall.init : { *(.con_initcall.init) } | ||
69 | __con_initcall_end = .; | ||
70 | SECURITY_INIT | ||
71 | . = ALIGN(4); | ||
72 | __tsb_ldquad_phys_patch = .; | ||
73 | .tsb_ldquad_phys_patch : { *(.tsb_ldquad_phys_patch) } | ||
74 | __tsb_ldquad_phys_patch_end = .; | ||
75 | __tsb_phys_patch = .; | ||
76 | .tsb_phys_patch : { *(.tsb_phys_patch) } | ||
77 | __tsb_phys_patch_end = .; | ||
78 | __cpuid_patch = .; | ||
79 | .cpuid_patch : { *(.cpuid_patch) } | ||
80 | __cpuid_patch_end = .; | ||
81 | __sun4v_1insn_patch = .; | ||
82 | .sun4v_1insn_patch : { *(.sun4v_1insn_patch) } | ||
83 | __sun4v_1insn_patch_end = .; | ||
84 | __sun4v_2insn_patch = .; | ||
85 | .sun4v_2insn_patch : { *(.sun4v_2insn_patch) } | ||
86 | __sun4v_2insn_patch_end = .; | ||
87 | 112 | ||
88 | #ifdef CONFIG_BLK_DEV_INITRD | 113 | #ifdef CONFIG_BLK_DEV_INITRD |
89 | . = ALIGN(PAGE_SIZE); | 114 | . = ALIGN(PAGE_SIZE); |
90 | __initramfs_start = .; | 115 | .init.ramfs : { |
91 | .init.ramfs : { *(.init.ramfs) } | 116 | __initramfs_start = .; |
92 | __initramfs_end = .; | 117 | *(.init.ramfs) |
118 | __initramfs_end = .; | ||
119 | } | ||
93 | #endif | 120 | #endif |
94 | 121 | ||
95 | PERCPU(PAGE_SIZE) | 122 | PERCPU(PAGE_SIZE) |
96 | 123 | ||
97 | . = ALIGN(PAGE_SIZE); | 124 | . = ALIGN(PAGE_SIZE); |
98 | __init_end = .; | 125 | __init_end = .; |
99 | __bss_start = .; | 126 | __bss_start = .; |
100 | .sbss : { *(.sbss) *(.scommon) } | 127 | .sbss : { |
101 | .bss : | 128 | *(.sbss) |
102 | { | 129 | *(.scommon) |
103 | *(.dynbss) | 130 | } |
104 | *(.bss) | 131 | .bss : { |
105 | *(COMMON) | 132 | *(.dynbss) |
106 | } | 133 | *(.bss) |
107 | _end = . ; | 134 | *(COMMON) |
108 | PROVIDE (end = .); | 135 | } |
109 | /DISCARD/ : { *(.exit.text) *(.exit.data) *(.exitcall.exit) } | 136 | _end = . ; |
137 | PROVIDE (end = .); | ||
110 | 138 | ||
111 | STABS_DEBUG | 139 | /DISCARD/ : { |
140 | *(.exit.text) | ||
141 | *(.exit.data) | ||
142 | *(.exitcall.exit) | ||
143 | } | ||
112 | 144 | ||
113 | DWARF_DEBUG | 145 | STABS_DEBUG |
146 | DWARF_DEBUG | ||
114 | } | 147 | } |
diff --git a/arch/sparc64/lib/xor.S b/arch/sparc64/lib/xor.S index a79c8888170d..f44f58f40234 100644 --- a/arch/sparc64/lib/xor.S +++ b/arch/sparc64/lib/xor.S | |||
@@ -491,12 +491,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ | |||
491 | ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ | 491 | ldda [%i1 + 0x10] %asi, %i2 /* %i2/%i3 = src1 + 0x10 */ |
492 | xor %g2, %i4, %g2 | 492 | xor %g2, %i4, %g2 |
493 | xor %g3, %i5, %g3 | 493 | xor %g3, %i5, %g3 |
494 | ldda [%i7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ | 494 | ldda [%l7 + 0x10] %asi, %i4 /* %i4/%i5 = src2 + 0x10 */ |
495 | xor %l0, %g2, %l0 | 495 | xor %l0, %g2, %l0 |
496 | xor %l1, %g3, %l1 | 496 | xor %l1, %g3, %l1 |
497 | stxa %l0, [%i0 + 0x00] %asi | 497 | stxa %l0, [%i0 + 0x00] %asi |
498 | stxa %l1, [%i0 + 0x08] %asi | 498 | stxa %l1, [%i0 + 0x08] %asi |
499 | ldda [%i6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ | 499 | ldda [%l6 + 0x10] %asi, %g2 /* %g2/%g3 = src3 + 0x10 */ |
500 | ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */ | 500 | ldda [%i0 + 0x10] %asi, %l0 /* %l0/%l1 = dest + 0x10 */ |
501 | 501 | ||
502 | xor %i4, %i2, %i4 | 502 | xor %i4, %i2, %i4 |
@@ -504,12 +504,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ | |||
504 | ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ | 504 | ldda [%i1 + 0x20] %asi, %i2 /* %i2/%i3 = src1 + 0x20 */ |
505 | xor %g2, %i4, %g2 | 505 | xor %g2, %i4, %g2 |
506 | xor %g3, %i5, %g3 | 506 | xor %g3, %i5, %g3 |
507 | ldda [%i7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ | 507 | ldda [%l7 + 0x20] %asi, %i4 /* %i4/%i5 = src2 + 0x20 */ |
508 | xor %l0, %g2, %l0 | 508 | xor %l0, %g2, %l0 |
509 | xor %l1, %g3, %l1 | 509 | xor %l1, %g3, %l1 |
510 | stxa %l0, [%i0 + 0x10] %asi | 510 | stxa %l0, [%i0 + 0x10] %asi |
511 | stxa %l1, [%i0 + 0x18] %asi | 511 | stxa %l1, [%i0 + 0x18] %asi |
512 | ldda [%i6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ | 512 | ldda [%l6 + 0x20] %asi, %g2 /* %g2/%g3 = src3 + 0x20 */ |
513 | ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */ | 513 | ldda [%i0 + 0x20] %asi, %l0 /* %l0/%l1 = dest + 0x20 */ |
514 | 514 | ||
515 | xor %i4, %i2, %i4 | 515 | xor %i4, %i2, %i4 |
@@ -517,12 +517,12 @@ xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */ | |||
517 | ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ | 517 | ldda [%i1 + 0x30] %asi, %i2 /* %i2/%i3 = src1 + 0x30 */ |
518 | xor %g2, %i4, %g2 | 518 | xor %g2, %i4, %g2 |
519 | xor %g3, %i5, %g3 | 519 | xor %g3, %i5, %g3 |
520 | ldda [%i7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ | 520 | ldda [%l7 + 0x30] %asi, %i4 /* %i4/%i5 = src2 + 0x30 */ |
521 | xor %l0, %g2, %l0 | 521 | xor %l0, %g2, %l0 |
522 | xor %l1, %g3, %l1 | 522 | xor %l1, %g3, %l1 |
523 | stxa %l0, [%i0 + 0x20] %asi | 523 | stxa %l0, [%i0 + 0x20] %asi |
524 | stxa %l1, [%i0 + 0x28] %asi | 524 | stxa %l1, [%i0 + 0x28] %asi |
525 | ldda [%i6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ | 525 | ldda [%l6 + 0x30] %asi, %g2 /* %g2/%g3 = src3 + 0x30 */ |
526 | ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */ | 526 | ldda [%i0 + 0x30] %asi, %l0 /* %l0/%l1 = dest + 0x30 */ |
527 | 527 | ||
528 | prefetch [%i1 + 0x40], #one_read | 528 | prefetch [%i1 + 0x40], #one_read |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 3010227fe243..f0ab9aab308f 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -631,7 +631,6 @@ void prom_world(int enter) | |||
631 | __asm__ __volatile__("flushw"); | 631 | __asm__ __volatile__("flushw"); |
632 | } | 632 | } |
633 | 633 | ||
634 | #ifdef DCACHE_ALIASING_POSSIBLE | ||
635 | void __flush_dcache_range(unsigned long start, unsigned long end) | 634 | void __flush_dcache_range(unsigned long start, unsigned long end) |
636 | { | 635 | { |
637 | unsigned long va; | 636 | unsigned long va; |
@@ -655,7 +654,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end) | |||
655 | "i" (ASI_DCACHE_INVALIDATE)); | 654 | "i" (ASI_DCACHE_INVALIDATE)); |
656 | } | 655 | } |
657 | } | 656 | } |
658 | #endif /* DCACHE_ALIASING_POSSIBLE */ | ||
659 | 657 | ||
660 | /* get_new_mmu_context() uses "cache + 1". */ | 658 | /* get_new_mmu_context() uses "cache + 1". */ |
661 | DEFINE_SPINLOCK(ctx_alloc_lock); | 659 | DEFINE_SPINLOCK(ctx_alloc_lock); |
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c index ee9046db9c7d..549891d76ef5 100644 --- a/drivers/video/cg6.c +++ b/drivers/video/cg6.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | 20 | ||
21 | #include <asm/io.h> | 21 | #include <asm/io.h> |
22 | #include <asm/prom.h> | ||
23 | #include <asm/of_device.h> | 22 | #include <asm/of_device.h> |
24 | #include <asm/fbio.h> | 23 | #include <asm/fbio.h> |
25 | 24 | ||
@@ -38,6 +37,7 @@ static void cg6_fillrect(struct fb_info *, const struct fb_fillrect *); | |||
38 | static int cg6_sync(struct fb_info *); | 37 | static int cg6_sync(struct fb_info *); |
39 | static int cg6_mmap(struct fb_info *, struct vm_area_struct *); | 38 | static int cg6_mmap(struct fb_info *, struct vm_area_struct *); |
40 | static int cg6_ioctl(struct fb_info *, unsigned int, unsigned long); | 39 | static int cg6_ioctl(struct fb_info *, unsigned int, unsigned long); |
40 | static void cg6_copyarea(struct fb_info *info, const struct fb_copyarea *area); | ||
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Frame buffer operations | 43 | * Frame buffer operations |
@@ -48,7 +48,7 @@ static struct fb_ops cg6_ops = { | |||
48 | .fb_setcolreg = cg6_setcolreg, | 48 | .fb_setcolreg = cg6_setcolreg, |
49 | .fb_blank = cg6_blank, | 49 | .fb_blank = cg6_blank, |
50 | .fb_fillrect = cg6_fillrect, | 50 | .fb_fillrect = cg6_fillrect, |
51 | .fb_copyarea = cfb_copyarea, | 51 | .fb_copyarea = cg6_copyarea, |
52 | .fb_imageblit = cg6_imageblit, | 52 | .fb_imageblit = cg6_imageblit, |
53 | .fb_sync = cg6_sync, | 53 | .fb_sync = cg6_sync, |
54 | .fb_mmap = cg6_mmap, | 54 | .fb_mmap = cg6_mmap, |
@@ -65,41 +65,41 @@ static struct fb_ops cg6_ops = { | |||
65 | * The FBC could be the frame buffer control | 65 | * The FBC could be the frame buffer control |
66 | * The FHC could is the frame buffer hardware control. | 66 | * The FHC could is the frame buffer hardware control. |
67 | */ | 67 | */ |
68 | #define CG6_ROM_OFFSET 0x0UL | 68 | #define CG6_ROM_OFFSET 0x0UL |
69 | #define CG6_BROOKTREE_OFFSET 0x200000UL | 69 | #define CG6_BROOKTREE_OFFSET 0x200000UL |
70 | #define CG6_DHC_OFFSET 0x240000UL | 70 | #define CG6_DHC_OFFSET 0x240000UL |
71 | #define CG6_ALT_OFFSET 0x280000UL | 71 | #define CG6_ALT_OFFSET 0x280000UL |
72 | #define CG6_FHC_OFFSET 0x300000UL | 72 | #define CG6_FHC_OFFSET 0x300000UL |
73 | #define CG6_THC_OFFSET 0x301000UL | 73 | #define CG6_THC_OFFSET 0x301000UL |
74 | #define CG6_FBC_OFFSET 0x700000UL | 74 | #define CG6_FBC_OFFSET 0x700000UL |
75 | #define CG6_TEC_OFFSET 0x701000UL | 75 | #define CG6_TEC_OFFSET 0x701000UL |
76 | #define CG6_RAM_OFFSET 0x800000UL | 76 | #define CG6_RAM_OFFSET 0x800000UL |
77 | 77 | ||
78 | /* FHC definitions */ | 78 | /* FHC definitions */ |
79 | #define CG6_FHC_FBID_SHIFT 24 | 79 | #define CG6_FHC_FBID_SHIFT 24 |
80 | #define CG6_FHC_FBID_MASK 255 | 80 | #define CG6_FHC_FBID_MASK 255 |
81 | #define CG6_FHC_REV_SHIFT 20 | 81 | #define CG6_FHC_REV_SHIFT 20 |
82 | #define CG6_FHC_REV_MASK 15 | 82 | #define CG6_FHC_REV_MASK 15 |
83 | #define CG6_FHC_FROP_DISABLE (1 << 19) | 83 | #define CG6_FHC_FROP_DISABLE (1 << 19) |
84 | #define CG6_FHC_ROW_DISABLE (1 << 18) | 84 | #define CG6_FHC_ROW_DISABLE (1 << 18) |
85 | #define CG6_FHC_SRC_DISABLE (1 << 17) | 85 | #define CG6_FHC_SRC_DISABLE (1 << 17) |
86 | #define CG6_FHC_DST_DISABLE (1 << 16) | 86 | #define CG6_FHC_DST_DISABLE (1 << 16) |
87 | #define CG6_FHC_RESET (1 << 15) | 87 | #define CG6_FHC_RESET (1 << 15) |
88 | #define CG6_FHC_LITTLE_ENDIAN (1 << 13) | 88 | #define CG6_FHC_LITTLE_ENDIAN (1 << 13) |
89 | #define CG6_FHC_RES_MASK (3 << 11) | 89 | #define CG6_FHC_RES_MASK (3 << 11) |
90 | #define CG6_FHC_1024 (0 << 11) | 90 | #define CG6_FHC_1024 (0 << 11) |
91 | #define CG6_FHC_1152 (1 << 11) | 91 | #define CG6_FHC_1152 (1 << 11) |
92 | #define CG6_FHC_1280 (2 << 11) | 92 | #define CG6_FHC_1280 (2 << 11) |
93 | #define CG6_FHC_1600 (3 << 11) | 93 | #define CG6_FHC_1600 (3 << 11) |
94 | #define CG6_FHC_CPU_MASK (3 << 9) | 94 | #define CG6_FHC_CPU_MASK (3 << 9) |
95 | #define CG6_FHC_CPU_SPARC (0 << 9) | 95 | #define CG6_FHC_CPU_SPARC (0 << 9) |
96 | #define CG6_FHC_CPU_68020 (1 << 9) | 96 | #define CG6_FHC_CPU_68020 (1 << 9) |
97 | #define CG6_FHC_CPU_386 (2 << 9) | 97 | #define CG6_FHC_CPU_386 (2 << 9) |
98 | #define CG6_FHC_TEST (1 << 8) | 98 | #define CG6_FHC_TEST (1 << 8) |
99 | #define CG6_FHC_TEST_X_SHIFT 4 | 99 | #define CG6_FHC_TEST_X_SHIFT 4 |
100 | #define CG6_FHC_TEST_X_MASK 15 | 100 | #define CG6_FHC_TEST_X_MASK 15 |
101 | #define CG6_FHC_TEST_Y_SHIFT 0 | 101 | #define CG6_FHC_TEST_Y_SHIFT 0 |
102 | #define CG6_FHC_TEST_Y_MASK 15 | 102 | #define CG6_FHC_TEST_Y_MASK 15 |
103 | 103 | ||
104 | /* FBC mode definitions */ | 104 | /* FBC mode definitions */ |
105 | #define CG6_FBC_BLIT_IGNORE 0x00000000 | 105 | #define CG6_FBC_BLIT_IGNORE 0x00000000 |
@@ -150,17 +150,17 @@ static struct fb_ops cg6_ops = { | |||
150 | #define CG6_FBC_INDEX_MASK 0x00000030 | 150 | #define CG6_FBC_INDEX_MASK 0x00000030 |
151 | 151 | ||
152 | /* THC definitions */ | 152 | /* THC definitions */ |
153 | #define CG6_THC_MISC_REV_SHIFT 16 | 153 | #define CG6_THC_MISC_REV_SHIFT 16 |
154 | #define CG6_THC_MISC_REV_MASK 15 | 154 | #define CG6_THC_MISC_REV_MASK 15 |
155 | #define CG6_THC_MISC_RESET (1 << 12) | 155 | #define CG6_THC_MISC_RESET (1 << 12) |
156 | #define CG6_THC_MISC_VIDEO (1 << 10) | 156 | #define CG6_THC_MISC_VIDEO (1 << 10) |
157 | #define CG6_THC_MISC_SYNC (1 << 9) | 157 | #define CG6_THC_MISC_SYNC (1 << 9) |
158 | #define CG6_THC_MISC_VSYNC (1 << 8) | 158 | #define CG6_THC_MISC_VSYNC (1 << 8) |
159 | #define CG6_THC_MISC_SYNC_ENAB (1 << 7) | 159 | #define CG6_THC_MISC_SYNC_ENAB (1 << 7) |
160 | #define CG6_THC_MISC_CURS_RES (1 << 6) | 160 | #define CG6_THC_MISC_CURS_RES (1 << 6) |
161 | #define CG6_THC_MISC_INT_ENAB (1 << 5) | 161 | #define CG6_THC_MISC_INT_ENAB (1 << 5) |
162 | #define CG6_THC_MISC_INT (1 << 4) | 162 | #define CG6_THC_MISC_INT (1 << 4) |
163 | #define CG6_THC_MISC_INIT 0x9f | 163 | #define CG6_THC_MISC_INIT 0x9f |
164 | 164 | ||
165 | /* The contents are unknown */ | 165 | /* The contents are unknown */ |
166 | struct cg6_tec { | 166 | struct cg6_tec { |
@@ -170,25 +170,25 @@ struct cg6_tec { | |||
170 | }; | 170 | }; |
171 | 171 | ||
172 | struct cg6_thc { | 172 | struct cg6_thc { |
173 | u32 thc_pad0[512]; | 173 | u32 thc_pad0[512]; |
174 | u32 thc_hs; /* hsync timing */ | 174 | u32 thc_hs; /* hsync timing */ |
175 | u32 thc_hsdvs; | 175 | u32 thc_hsdvs; |
176 | u32 thc_hd; | 176 | u32 thc_hd; |
177 | u32 thc_vs; /* vsync timing */ | 177 | u32 thc_vs; /* vsync timing */ |
178 | u32 thc_vd; | 178 | u32 thc_vd; |
179 | u32 thc_refresh; | 179 | u32 thc_refresh; |
180 | u32 thc_misc; | 180 | u32 thc_misc; |
181 | u32 thc_pad1[56]; | 181 | u32 thc_pad1[56]; |
182 | u32 thc_cursxy; /* cursor x,y position (16 bits each) */ | 182 | u32 thc_cursxy; /* cursor x,y position (16 bits each) */ |
183 | u32 thc_cursmask[32]; /* cursor mask bits */ | 183 | u32 thc_cursmask[32]; /* cursor mask bits */ |
184 | u32 thc_cursbits[32]; /* what to show where mask enabled */ | 184 | u32 thc_cursbits[32]; /* what to show where mask enabled */ |
185 | }; | 185 | }; |
186 | 186 | ||
187 | struct cg6_fbc { | 187 | struct cg6_fbc { |
188 | u32 xxx0[1]; | 188 | u32 xxx0[1]; |
189 | u32 mode; | 189 | u32 mode; |
190 | u32 clip; | 190 | u32 clip; |
191 | u32 xxx1[1]; | 191 | u32 xxx1[1]; |
192 | u32 s; | 192 | u32 s; |
193 | u32 draw; | 193 | u32 draw; |
194 | u32 blit; | 194 | u32 blit; |
@@ -243,10 +243,10 @@ struct cg6_fbc { | |||
243 | }; | 243 | }; |
244 | 244 | ||
245 | struct bt_regs { | 245 | struct bt_regs { |
246 | u32 addr; | 246 | u32 addr; |
247 | u32 color_map; | 247 | u32 color_map; |
248 | u32 control; | 248 | u32 control; |
249 | u32 cursor; | 249 | u32 cursor; |
250 | }; | 250 | }; |
251 | 251 | ||
252 | struct cg6_par { | 252 | struct cg6_par { |
@@ -267,7 +267,7 @@ struct cg6_par { | |||
267 | 267 | ||
268 | static int cg6_sync(struct fb_info *info) | 268 | static int cg6_sync(struct fb_info *info) |
269 | { | 269 | { |
270 | struct cg6_par *par = (struct cg6_par *) info->par; | 270 | struct cg6_par *par = (struct cg6_par *)info->par; |
271 | struct cg6_fbc __iomem *fbc = par->fbc; | 271 | struct cg6_fbc __iomem *fbc = par->fbc; |
272 | int limit = 10000; | 272 | int limit = 10000; |
273 | 273 | ||
@@ -281,24 +281,24 @@ static int cg6_sync(struct fb_info *info) | |||
281 | } | 281 | } |
282 | 282 | ||
283 | /** | 283 | /** |
284 | * cg6_fillrect - REQUIRED function. Can use generic routines if | 284 | * cg6_fillrect - Draws a rectangle on the screen. |
285 | * non acclerated hardware and packed pixel based. | ||
286 | * Draws a rectangle on the screen. | ||
287 | * | 285 | * |
288 | * @info: frame buffer structure that represents a single frame buffer | 286 | * @info: frame buffer structure that represents a single frame buffer |
289 | * @rect: structure defining the rectagle and operation. | 287 | * @rect: structure defining the rectagle and operation. |
290 | */ | 288 | */ |
291 | static void cg6_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 289 | static void cg6_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
292 | { | 290 | { |
293 | struct cg6_par *par = (struct cg6_par *) info->par; | 291 | struct cg6_par *par = (struct cg6_par *)info->par; |
294 | struct cg6_fbc __iomem *fbc = par->fbc; | 292 | struct cg6_fbc __iomem *fbc = par->fbc; |
295 | unsigned long flags; | 293 | unsigned long flags; |
296 | s32 val; | 294 | s32 val; |
297 | 295 | ||
298 | /* XXX doesn't handle ROP_XOR */ | 296 | /* CG6 doesn't handle ROP_XOR */ |
299 | 297 | ||
300 | spin_lock_irqsave(&par->lock, flags); | 298 | spin_lock_irqsave(&par->lock, flags); |
299 | |||
301 | cg6_sync(info); | 300 | cg6_sync(info); |
301 | |||
302 | sbus_writel(rect->color, &fbc->fg); | 302 | sbus_writel(rect->color, &fbc->fg); |
303 | sbus_writel(~(u32)0, &fbc->pixelm); | 303 | sbus_writel(~(u32)0, &fbc->pixelm); |
304 | sbus_writel(0xea80ff00, &fbc->alu); | 304 | sbus_writel(0xea80ff00, &fbc->alu); |
@@ -316,16 +316,56 @@ static void cg6_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
316 | } | 316 | } |
317 | 317 | ||
318 | /** | 318 | /** |
319 | * cg6_imageblit - REQUIRED function. Can use generic routines if | 319 | * cg6_copyarea - Copies one area of the screen to another area. |
320 | * non acclerated hardware and packed pixel based. | 320 | * |
321 | * Copies a image from system memory to the screen. | 321 | * @info: frame buffer structure that represents a single frame buffer |
322 | * @area: Structure providing the data to copy the framebuffer contents | ||
323 | * from one region to another. | ||
324 | * | ||
325 | * This drawing operation copies a rectangular area from one area of the | ||
326 | * screen to another area. | ||
327 | */ | ||
328 | static void cg6_copyarea(struct fb_info *info, const struct fb_copyarea *area) | ||
329 | { | ||
330 | struct cg6_par *par = (struct cg6_par *)info->par; | ||
331 | struct cg6_fbc __iomem *fbc = par->fbc; | ||
332 | unsigned long flags; | ||
333 | int i; | ||
334 | |||
335 | spin_lock_irqsave(&par->lock, flags); | ||
336 | |||
337 | cg6_sync(info); | ||
338 | |||
339 | sbus_writel(0xff, &fbc->fg); | ||
340 | sbus_writel(0x00, &fbc->bg); | ||
341 | sbus_writel(~0, &fbc->pixelm); | ||
342 | sbus_writel(0xe880cccc, &fbc->alu); | ||
343 | sbus_writel(0, &fbc->s); | ||
344 | sbus_writel(0, &fbc->clip); | ||
345 | |||
346 | sbus_writel(area->sy, &fbc->y0); | ||
347 | sbus_writel(area->sx, &fbc->x0); | ||
348 | sbus_writel(area->sy + area->height - 1, &fbc->y1); | ||
349 | sbus_writel(area->sx + area->width - 1, &fbc->x1); | ||
350 | sbus_writel(area->dy, &fbc->y2); | ||
351 | sbus_writel(area->dx, &fbc->x2); | ||
352 | sbus_writel(area->dy + area->height - 1, &fbc->y3); | ||
353 | sbus_writel(area->dx + area->width - 1, &fbc->x3); | ||
354 | do { | ||
355 | i = sbus_readl(&fbc->blit); | ||
356 | } while (i < 0 && (i & 0x20000000)); | ||
357 | spin_unlock_irqrestore(&par->lock, flags); | ||
358 | } | ||
359 | |||
360 | /** | ||
361 | * cg6_imageblit - Copies a image from system memory to the screen. | ||
322 | * | 362 | * |
323 | * @info: frame buffer structure that represents a single frame buffer | 363 | * @info: frame buffer structure that represents a single frame buffer |
324 | * @image: structure defining the image. | 364 | * @image: structure defining the image. |
325 | */ | 365 | */ |
326 | static void cg6_imageblit(struct fb_info *info, const struct fb_image *image) | 366 | static void cg6_imageblit(struct fb_info *info, const struct fb_image *image) |
327 | { | 367 | { |
328 | struct cg6_par *par = (struct cg6_par *) info->par; | 368 | struct cg6_par *par = (struct cg6_par *)info->par; |
329 | struct cg6_fbc __iomem *fbc = par->fbc; | 369 | struct cg6_fbc __iomem *fbc = par->fbc; |
330 | const u8 *data = image->data; | 370 | const u8 *data = image->data; |
331 | unsigned long flags; | 371 | unsigned long flags; |
@@ -363,7 +403,7 @@ static void cg6_imageblit(struct fb_info *info, const struct fb_image *image) | |||
363 | sbus_writel(y, &fbc->y0); | 403 | sbus_writel(y, &fbc->y0); |
364 | sbus_writel(x, &fbc->x0); | 404 | sbus_writel(x, &fbc->x0); |
365 | sbus_writel(x + 32 - 1, &fbc->x1); | 405 | sbus_writel(x + 32 - 1, &fbc->x1); |
366 | 406 | ||
367 | val = ((u32)data[0] << 24) | | 407 | val = ((u32)data[0] << 24) | |
368 | ((u32)data[1] << 16) | | 408 | ((u32)data[1] << 16) | |
369 | ((u32)data[2] << 8) | | 409 | ((u32)data[2] << 8) | |
@@ -404,19 +444,20 @@ static void cg6_imageblit(struct fb_info *info, const struct fb_image *image) | |||
404 | } | 444 | } |
405 | 445 | ||
406 | /** | 446 | /** |
407 | * cg6_setcolreg - Optional function. Sets a color register. | 447 | * cg6_setcolreg - Sets a color register. |
408 | * @regno: boolean, 0 copy local, 1 get_user() function | 448 | * |
409 | * @red: frame buffer colormap structure | 449 | * @regno: boolean, 0 copy local, 1 get_user() function |
410 | * @green: The green value which can be up to 16 bits wide | 450 | * @red: frame buffer colormap structure |
411 | * @blue: The blue value which can be up to 16 bits wide. | 451 | * @green: The green value which can be up to 16 bits wide |
412 | * @transp: If supported the alpha value which can be up to 16 bits wide. | 452 | * @blue: The blue value which can be up to 16 bits wide. |
413 | * @info: frame buffer info structure | 453 | * @transp: If supported the alpha value which can be up to 16 bits wide. |
454 | * @info: frame buffer info structure | ||
414 | */ | 455 | */ |
415 | static int cg6_setcolreg(unsigned regno, | 456 | static int cg6_setcolreg(unsigned regno, |
416 | unsigned red, unsigned green, unsigned blue, | 457 | unsigned red, unsigned green, unsigned blue, |
417 | unsigned transp, struct fb_info *info) | 458 | unsigned transp, struct fb_info *info) |
418 | { | 459 | { |
419 | struct cg6_par *par = (struct cg6_par *) info->par; | 460 | struct cg6_par *par = (struct cg6_par *)info->par; |
420 | struct bt_regs __iomem *bt = par->bt; | 461 | struct bt_regs __iomem *bt = par->bt; |
421 | unsigned long flags; | 462 | unsigned long flags; |
422 | 463 | ||
@@ -440,25 +481,24 @@ static int cg6_setcolreg(unsigned regno, | |||
440 | } | 481 | } |
441 | 482 | ||
442 | /** | 483 | /** |
443 | * cg6_blank - Optional function. Blanks the display. | 484 | * cg6_blank - Blanks the display. |
444 | * @blank_mode: the blank mode we want. | 485 | * |
445 | * @info: frame buffer structure that represents a single frame buffer | 486 | * @blank_mode: the blank mode we want. |
487 | * @info: frame buffer structure that represents a single frame buffer | ||
446 | */ | 488 | */ |
447 | static int | 489 | static int cg6_blank(int blank, struct fb_info *info) |
448 | cg6_blank(int blank, struct fb_info *info) | ||
449 | { | 490 | { |
450 | struct cg6_par *par = (struct cg6_par *) info->par; | 491 | struct cg6_par *par = (struct cg6_par *)info->par; |
451 | struct cg6_thc __iomem *thc = par->thc; | 492 | struct cg6_thc __iomem *thc = par->thc; |
452 | unsigned long flags; | 493 | unsigned long flags; |
453 | u32 val; | 494 | u32 val; |
454 | 495 | ||
455 | spin_lock_irqsave(&par->lock, flags); | 496 | spin_lock_irqsave(&par->lock, flags); |
497 | val = sbus_readl(&thc->thc_misc); | ||
456 | 498 | ||
457 | switch (blank) { | 499 | switch (blank) { |
458 | case FB_BLANK_UNBLANK: /* Unblanking */ | 500 | case FB_BLANK_UNBLANK: /* Unblanking */ |
459 | val = sbus_readl(&thc->thc_misc); | ||
460 | val |= CG6_THC_MISC_VIDEO; | 501 | val |= CG6_THC_MISC_VIDEO; |
461 | sbus_writel(val, &thc->thc_misc); | ||
462 | par->flags &= ~CG6_FLAG_BLANKED; | 502 | par->flags &= ~CG6_FLAG_BLANKED; |
463 | break; | 503 | break; |
464 | 504 | ||
@@ -466,13 +506,12 @@ cg6_blank(int blank, struct fb_info *info) | |||
466 | case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ | 506 | case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ |
467 | case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ | 507 | case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ |
468 | case FB_BLANK_POWERDOWN: /* Poweroff */ | 508 | case FB_BLANK_POWERDOWN: /* Poweroff */ |
469 | val = sbus_readl(&thc->thc_misc); | ||
470 | val &= ~CG6_THC_MISC_VIDEO; | 509 | val &= ~CG6_THC_MISC_VIDEO; |
471 | sbus_writel(val, &thc->thc_misc); | ||
472 | par->flags |= CG6_FLAG_BLANKED; | 510 | par->flags |= CG6_FLAG_BLANKED; |
473 | break; | 511 | break; |
474 | } | 512 | } |
475 | 513 | ||
514 | sbus_writel(val, &thc->thc_misc); | ||
476 | spin_unlock_irqrestore(&par->lock, flags); | 515 | spin_unlock_irqrestore(&par->lock, flags); |
477 | 516 | ||
478 | return 0; | 517 | return 0; |
@@ -533,7 +572,7 @@ static int cg6_mmap(struct fb_info *info, struct vm_area_struct *vma) | |||
533 | 572 | ||
534 | static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) | 573 | static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) |
535 | { | 574 | { |
536 | struct cg6_par *par = (struct cg6_par *) info->par; | 575 | struct cg6_par *par = (struct cg6_par *)info->par; |
537 | 576 | ||
538 | return sbusfb_ioctl_helper(cmd, arg, info, | 577 | return sbusfb_ioctl_helper(cmd, arg, info, |
539 | FBTYPE_SUNFAST_COLOR, 8, par->fbsize); | 578 | FBTYPE_SUNFAST_COLOR, 8, par->fbsize); |
@@ -543,15 +582,14 @@ static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) | |||
543 | * Initialisation | 582 | * Initialisation |
544 | */ | 583 | */ |
545 | 584 | ||
546 | static void | 585 | static void __devinit cg6_init_fix(struct fb_info *info, int linebytes) |
547 | cg6_init_fix(struct fb_info *info, int linebytes) | ||
548 | { | 586 | { |
549 | struct cg6_par *par = (struct cg6_par *)info->par; | 587 | struct cg6_par *par = (struct cg6_par *)info->par; |
550 | const char *cg6_cpu_name, *cg6_card_name; | 588 | const char *cg6_cpu_name, *cg6_card_name; |
551 | u32 conf; | 589 | u32 conf; |
552 | 590 | ||
553 | conf = sbus_readl(par->fhc); | 591 | conf = sbus_readl(par->fhc); |
554 | switch(conf & CG6_FHC_CPU_MASK) { | 592 | switch (conf & CG6_FHC_CPU_MASK) { |
555 | case CG6_FHC_CPU_SPARC: | 593 | case CG6_FHC_CPU_SPARC: |
556 | cg6_cpu_name = "sparc"; | 594 | cg6_cpu_name = "sparc"; |
557 | break; | 595 | break; |
@@ -563,21 +601,19 @@ cg6_init_fix(struct fb_info *info, int linebytes) | |||
563 | break; | 601 | break; |
564 | }; | 602 | }; |
565 | if (((conf >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK) >= 11) { | 603 | if (((conf >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK) >= 11) { |
566 | if (par->fbsize <= 0x100000) { | 604 | if (par->fbsize <= 0x100000) |
567 | cg6_card_name = "TGX"; | 605 | cg6_card_name = "TGX"; |
568 | } else { | 606 | else |
569 | cg6_card_name = "TGX+"; | 607 | cg6_card_name = "TGX+"; |
570 | } | ||
571 | } else { | 608 | } else { |
572 | if (par->fbsize <= 0x100000) { | 609 | if (par->fbsize <= 0x100000) |
573 | cg6_card_name = "GX"; | 610 | cg6_card_name = "GX"; |
574 | } else { | 611 | else |
575 | cg6_card_name = "GX+"; | 612 | cg6_card_name = "GX+"; |
576 | } | ||
577 | } | 613 | } |
578 | 614 | ||
579 | sprintf(info->fix.id, "%s %s", cg6_card_name, cg6_cpu_name); | 615 | sprintf(info->fix.id, "%s %s", cg6_card_name, cg6_cpu_name); |
580 | info->fix.id[sizeof(info->fix.id)-1] = 0; | 616 | info->fix.id[sizeof(info->fix.id) - 1] = 0; |
581 | 617 | ||
582 | info->fix.type = FB_TYPE_PACKED_PIXELS; | 618 | info->fix.type = FB_TYPE_PACKED_PIXELS; |
583 | info->fix.visual = FB_VISUAL_PSEUDOCOLOR; | 619 | info->fix.visual = FB_VISUAL_PSEUDOCOLOR; |
@@ -588,28 +624,28 @@ cg6_init_fix(struct fb_info *info, int linebytes) | |||
588 | } | 624 | } |
589 | 625 | ||
590 | /* Initialize Brooktree DAC */ | 626 | /* Initialize Brooktree DAC */ |
591 | static void cg6_bt_init(struct cg6_par *par) | 627 | static void __devinit cg6_bt_init(struct cg6_par *par) |
592 | { | 628 | { |
593 | struct bt_regs __iomem *bt = par->bt; | 629 | struct bt_regs __iomem *bt = par->bt; |
594 | 630 | ||
595 | sbus_writel(0x04 << 24, &bt->addr); /* color planes */ | 631 | sbus_writel(0x04 << 24, &bt->addr); /* color planes */ |
596 | sbus_writel(0xff << 24, &bt->control); | 632 | sbus_writel(0xff << 24, &bt->control); |
597 | sbus_writel(0x05 << 24, &bt->addr); | 633 | sbus_writel(0x05 << 24, &bt->addr); |
598 | sbus_writel(0x00 << 24, &bt->control); | 634 | sbus_writel(0x00 << 24, &bt->control); |
599 | sbus_writel(0x06 << 24, &bt->addr); /* overlay plane */ | 635 | sbus_writel(0x06 << 24, &bt->addr); /* overlay plane */ |
600 | sbus_writel(0x73 << 24, &bt->control); | 636 | sbus_writel(0x73 << 24, &bt->control); |
601 | sbus_writel(0x07 << 24, &bt->addr); | 637 | sbus_writel(0x07 << 24, &bt->addr); |
602 | sbus_writel(0x00 << 24, &bt->control); | 638 | sbus_writel(0x00 << 24, &bt->control); |
603 | } | 639 | } |
604 | 640 | ||
605 | static void cg6_chip_init(struct fb_info *info) | 641 | static void __devinit cg6_chip_init(struct fb_info *info) |
606 | { | 642 | { |
607 | struct cg6_par *par = (struct cg6_par *) info->par; | 643 | struct cg6_par *par = (struct cg6_par *)info->par; |
608 | struct cg6_tec __iomem *tec = par->tec; | 644 | struct cg6_tec __iomem *tec = par->tec; |
609 | struct cg6_fbc __iomem *fbc = par->fbc; | 645 | struct cg6_fbc __iomem *fbc = par->fbc; |
610 | u32 rev, conf, mode; | 646 | u32 rev, conf, mode; |
611 | int i; | 647 | int i; |
612 | 648 | ||
613 | /* Turn off stuff in the Transform Engine. */ | 649 | /* Turn off stuff in the Transform Engine. */ |
614 | sbus_writel(0, &tec->tec_matrix); | 650 | sbus_writel(0, &tec->tec_matrix); |
615 | sbus_writel(0, &tec->tec_clip); | 651 | sbus_writel(0, &tec->tec_clip); |
@@ -635,13 +671,13 @@ static void cg6_chip_init(struct fb_info *info) | |||
635 | i = sbus_readl(&fbc->s); | 671 | i = sbus_readl(&fbc->s); |
636 | } while (i & 0x10000000); | 672 | } while (i & 0x10000000); |
637 | mode &= ~(CG6_FBC_BLIT_MASK | CG6_FBC_MODE_MASK | | 673 | mode &= ~(CG6_FBC_BLIT_MASK | CG6_FBC_MODE_MASK | |
638 | CG6_FBC_DRAW_MASK | CG6_FBC_BWRITE0_MASK | | 674 | CG6_FBC_DRAW_MASK | CG6_FBC_BWRITE0_MASK | |
639 | CG6_FBC_BWRITE1_MASK | CG6_FBC_BREAD_MASK | | 675 | CG6_FBC_BWRITE1_MASK | CG6_FBC_BREAD_MASK | |
640 | CG6_FBC_BDISP_MASK); | 676 | CG6_FBC_BDISP_MASK); |
641 | mode |= (CG6_FBC_BLIT_SRC | CG6_FBC_MODE_COLOR8 | | 677 | mode |= (CG6_FBC_BLIT_SRC | CG6_FBC_MODE_COLOR8 | |
642 | CG6_FBC_DRAW_RENDER | CG6_FBC_BWRITE0_ENABLE | | 678 | CG6_FBC_DRAW_RENDER | CG6_FBC_BWRITE0_ENABLE | |
643 | CG6_FBC_BWRITE1_DISABLE | CG6_FBC_BREAD_0 | | 679 | CG6_FBC_BWRITE1_DISABLE | CG6_FBC_BREAD_0 | |
644 | CG6_FBC_BDISP_0); | 680 | CG6_FBC_BDISP_0); |
645 | sbus_writel(mode, &fbc->mode); | 681 | sbus_writel(mode, &fbc->mode); |
646 | 682 | ||
647 | sbus_writel(0, &fbc->clip); | 683 | sbus_writel(0, &fbc->clip); |
@@ -671,7 +707,8 @@ static void cg6_unmap_regs(struct of_device *op, struct fb_info *info, | |||
671 | of_iounmap(&op->resource[0], info->screen_base, par->fbsize); | 707 | of_iounmap(&op->resource[0], info->screen_base, par->fbsize); |
672 | } | 708 | } |
673 | 709 | ||
674 | static int __devinit cg6_probe(struct of_device *op, const struct of_device_id *match) | 710 | static int __devinit cg6_probe(struct of_device *op, |
711 | const struct of_device_id *match) | ||
675 | { | 712 | { |
676 | struct device_node *dp = op->node; | 713 | struct device_node *dp = op->node; |
677 | struct fb_info *info; | 714 | struct fb_info *info; |
@@ -705,22 +742,23 @@ static int __devinit cg6_probe(struct of_device *op, const struct of_device_id * | |||
705 | par->fbsize *= 4; | 742 | par->fbsize *= 4; |
706 | 743 | ||
707 | par->fbc = of_ioremap(&op->resource[0], CG6_FBC_OFFSET, | 744 | par->fbc = of_ioremap(&op->resource[0], CG6_FBC_OFFSET, |
708 | 4096, "cgsix fbc"); | 745 | 4096, "cgsix fbc"); |
709 | par->tec = of_ioremap(&op->resource[0], CG6_TEC_OFFSET, | 746 | par->tec = of_ioremap(&op->resource[0], CG6_TEC_OFFSET, |
710 | sizeof(struct cg6_tec), "cgsix tec"); | 747 | sizeof(struct cg6_tec), "cgsix tec"); |
711 | par->thc = of_ioremap(&op->resource[0], CG6_THC_OFFSET, | 748 | par->thc = of_ioremap(&op->resource[0], CG6_THC_OFFSET, |
712 | sizeof(struct cg6_thc), "cgsix thc"); | 749 | sizeof(struct cg6_thc), "cgsix thc"); |
713 | par->bt = of_ioremap(&op->resource[0], CG6_BROOKTREE_OFFSET, | 750 | par->bt = of_ioremap(&op->resource[0], CG6_BROOKTREE_OFFSET, |
714 | sizeof(struct bt_regs), "cgsix dac"); | 751 | sizeof(struct bt_regs), "cgsix dac"); |
715 | par->fhc = of_ioremap(&op->resource[0], CG6_FHC_OFFSET, | 752 | par->fhc = of_ioremap(&op->resource[0], CG6_FHC_OFFSET, |
716 | sizeof(u32), "cgsix fhc"); | 753 | sizeof(u32), "cgsix fhc"); |
717 | 754 | ||
718 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_IMAGEBLIT | | 755 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_IMAGEBLIT | |
719 | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; | 756 | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | |
757 | FBINFO_READS_FAST; | ||
720 | info->fbops = &cg6_ops; | 758 | info->fbops = &cg6_ops; |
721 | 759 | ||
722 | info->screen_base = of_ioremap(&op->resource[0], CG6_RAM_OFFSET, | 760 | info->screen_base = of_ioremap(&op->resource[0], CG6_RAM_OFFSET, |
723 | par->fbsize, "cgsix ram"); | 761 | par->fbsize, "cgsix ram"); |
724 | if (!par->fbc || !par->tec || !par->thc || | 762 | if (!par->fbc || !par->tec || !par->thc || |
725 | !par->bt || !par->fhc || !info->screen_base) | 763 | !par->bt || !par->fhc || !info->screen_base) |
726 | goto out_unmap_regs; | 764 | goto out_unmap_regs; |
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c index 4b520b573911..d7e24889650e 100644 --- a/drivers/video/ffb.c +++ b/drivers/video/ffb.c | |||
@@ -171,17 +171,17 @@ static struct fb_ops ffb_ops = { | |||
171 | #define FFB_PPC_CS_VAR 0x000002 | 171 | #define FFB_PPC_CS_VAR 0x000002 |
172 | #define FFB_PPC_CS_CONST 0x000003 | 172 | #define FFB_PPC_CS_CONST 0x000003 |
173 | 173 | ||
174 | #define FFB_ROP_NEW 0x83 | 174 | #define FFB_ROP_NEW 0x83 |
175 | #define FFB_ROP_OLD 0x85 | 175 | #define FFB_ROP_OLD 0x85 |
176 | #define FFB_ROP_NEW_XOR_OLD 0x86 | 176 | #define FFB_ROP_NEW_XOR_OLD 0x86 |
177 | 177 | ||
178 | #define FFB_UCSR_FIFO_MASK 0x00000fff | 178 | #define FFB_UCSR_FIFO_MASK 0x00000fff |
179 | #define FFB_UCSR_FB_BUSY 0x01000000 | 179 | #define FFB_UCSR_FB_BUSY 0x01000000 |
180 | #define FFB_UCSR_RP_BUSY 0x02000000 | 180 | #define FFB_UCSR_RP_BUSY 0x02000000 |
181 | #define FFB_UCSR_ALL_BUSY (FFB_UCSR_RP_BUSY|FFB_UCSR_FB_BUSY) | 181 | #define FFB_UCSR_ALL_BUSY (FFB_UCSR_RP_BUSY|FFB_UCSR_FB_BUSY) |
182 | #define FFB_UCSR_READ_ERR 0x40000000 | 182 | #define FFB_UCSR_READ_ERR 0x40000000 |
183 | #define FFB_UCSR_FIFO_OVFL 0x80000000 | 183 | #define FFB_UCSR_FIFO_OVFL 0x80000000 |
184 | #define FFB_UCSR_ALL_ERRORS (FFB_UCSR_READ_ERR|FFB_UCSR_FIFO_OVFL) | 184 | #define FFB_UCSR_ALL_ERRORS (FFB_UCSR_READ_ERR|FFB_UCSR_FIFO_OVFL) |
185 | 185 | ||
186 | struct ffb_fbc { | 186 | struct ffb_fbc { |
187 | /* Next vertex registers */ | 187 | /* Next vertex registers */ |
@@ -197,7 +197,7 @@ struct ffb_fbc { | |||
197 | u32 ryf; | 197 | u32 ryf; |
198 | u32 rxf; | 198 | u32 rxf; |
199 | u32 xxx3[2]; | 199 | u32 xxx3[2]; |
200 | 200 | ||
201 | u32 dmyf; | 201 | u32 dmyf; |
202 | u32 dmxf; | 202 | u32 dmxf; |
203 | u32 xxx4[2]; | 203 | u32 xxx4[2]; |
@@ -211,13 +211,13 @@ struct ffb_fbc { | |||
211 | u32 bh; | 211 | u32 bh; |
212 | u32 bw; | 212 | u32 bw; |
213 | u32 xxx6[2]; | 213 | u32 xxx6[2]; |
214 | 214 | ||
215 | u32 xxx7[32]; | 215 | u32 xxx7[32]; |
216 | 216 | ||
217 | /* Setup unit vertex state register */ | 217 | /* Setup unit vertex state register */ |
218 | u32 suvtx; | 218 | u32 suvtx; |
219 | u32 xxx8[63]; | 219 | u32 xxx8[63]; |
220 | 220 | ||
221 | /* Control registers */ | 221 | /* Control registers */ |
222 | u32 ppc; | 222 | u32 ppc; |
223 | u32 wid; | 223 | u32 wid; |
@@ -235,7 +235,7 @@ struct ffb_fbc { | |||
235 | u32 dcsb; | 235 | u32 dcsb; |
236 | u32 dczf; | 236 | u32 dczf; |
237 | u32 dczb; | 237 | u32 dczb; |
238 | 238 | ||
239 | u32 xxx9; | 239 | u32 xxx9; |
240 | u32 blendc; | 240 | u32 blendc; |
241 | u32 blendc1; | 241 | u32 blendc1; |
@@ -252,7 +252,7 @@ struct ffb_fbc { | |||
252 | u32 fbcfg1; | 252 | u32 fbcfg1; |
253 | u32 fbcfg2; | 253 | u32 fbcfg2; |
254 | u32 fbcfg3; | 254 | u32 fbcfg3; |
255 | 255 | ||
256 | u32 ppcfg; | 256 | u32 ppcfg; |
257 | u32 pick; | 257 | u32 pick; |
258 | u32 fillmode; | 258 | u32 fillmode; |
@@ -269,7 +269,7 @@ struct ffb_fbc { | |||
269 | u32 clip2max; | 269 | u32 clip2max; |
270 | u32 clip3min; | 270 | u32 clip3min; |
271 | u32 clip3max; | 271 | u32 clip3max; |
272 | 272 | ||
273 | /* New 3dRAM III support regs */ | 273 | /* New 3dRAM III support regs */ |
274 | u32 rawblend2; | 274 | u32 rawblend2; |
275 | u32 rawpreblend; | 275 | u32 rawpreblend; |
@@ -287,7 +287,7 @@ struct ffb_fbc { | |||
287 | u32 rawcmp; | 287 | u32 rawcmp; |
288 | u32 rawwac; | 288 | u32 rawwac; |
289 | u32 fbramid; | 289 | u32 fbramid; |
290 | 290 | ||
291 | u32 drawop; | 291 | u32 drawop; |
292 | u32 xxx10[2]; | 292 | u32 xxx10[2]; |
293 | u32 fontlpat; | 293 | u32 fontlpat; |
@@ -302,7 +302,7 @@ struct ffb_fbc { | |||
302 | u32 stencil; | 302 | u32 stencil; |
303 | u32 stencilctl; | 303 | u32 stencilctl; |
304 | 304 | ||
305 | u32 xxx13[4]; | 305 | u32 xxx13[4]; |
306 | u32 dcss1; | 306 | u32 dcss1; |
307 | u32 dcss2; | 307 | u32 dcss2; |
308 | u32 dcss3; | 308 | u32 dcss3; |
@@ -315,17 +315,17 @@ struct ffb_fbc { | |||
315 | u32 dcd3; | 315 | u32 dcd3; |
316 | u32 dcd4; | 316 | u32 dcd4; |
317 | u32 xxx15; | 317 | u32 xxx15; |
318 | 318 | ||
319 | u32 pattern[32]; | 319 | u32 pattern[32]; |
320 | 320 | ||
321 | u32 xxx16[256]; | 321 | u32 xxx16[256]; |
322 | 322 | ||
323 | u32 devid; | 323 | u32 devid; |
324 | u32 xxx17[63]; | 324 | u32 xxx17[63]; |
325 | 325 | ||
326 | u32 ucsr; | 326 | u32 ucsr; |
327 | u32 xxx18[31]; | 327 | u32 xxx18[31]; |
328 | 328 | ||
329 | u32 mer; | 329 | u32 mer; |
330 | }; | 330 | }; |
331 | 331 | ||
@@ -336,20 +336,20 @@ struct ffb_dac { | |||
336 | u32 value2; | 336 | u32 value2; |
337 | }; | 337 | }; |
338 | 338 | ||
339 | #define FFB_DAC_UCTRL 0x1001 /* User Control */ | 339 | #define FFB_DAC_UCTRL 0x1001 /* User Control */ |
340 | #define FFB_DAC_UCTRL_MANREV 0x00000f00 /* 4-bit Manufacturing Revision */ | 340 | #define FFB_DAC_UCTRL_MANREV 0x00000f00 /* 4-bit Manufacturing Revision */ |
341 | #define FFB_DAC_UCTRL_MANREV_SHIFT 8 | 341 | #define FFB_DAC_UCTRL_MANREV_SHIFT 8 |
342 | #define FFB_DAC_TGEN 0x6000 /* Timing Generator */ | 342 | #define FFB_DAC_TGEN 0x6000 /* Timing Generator */ |
343 | #define FFB_DAC_TGEN_VIDE 0x00000001 /* Video Enable */ | 343 | #define FFB_DAC_TGEN_VIDE 0x00000001 /* Video Enable */ |
344 | #define FFB_DAC_DID 0x8000 /* Device Identification */ | 344 | #define FFB_DAC_DID 0x8000 /* Device Identification */ |
345 | #define FFB_DAC_DID_PNUM 0x0ffff000 /* Device Part Number */ | 345 | #define FFB_DAC_DID_PNUM 0x0ffff000 /* Device Part Number */ |
346 | #define FFB_DAC_DID_PNUM_SHIFT 12 | 346 | #define FFB_DAC_DID_PNUM_SHIFT 12 |
347 | #define FFB_DAC_DID_REV 0xf0000000 /* Device Revision */ | 347 | #define FFB_DAC_DID_REV 0xf0000000 /* Device Revision */ |
348 | #define FFB_DAC_DID_REV_SHIFT 28 | 348 | #define FFB_DAC_DID_REV_SHIFT 28 |
349 | 349 | ||
350 | #define FFB_DAC_CUR_CTRL 0x100 | 350 | #define FFB_DAC_CUR_CTRL 0x100 |
351 | #define FFB_DAC_CUR_CTRL_P0 0x00000001 | 351 | #define FFB_DAC_CUR_CTRL_P0 0x00000001 |
352 | #define FFB_DAC_CUR_CTRL_P1 0x00000002 | 352 | #define FFB_DAC_CUR_CTRL_P1 0x00000002 |
353 | 353 | ||
354 | struct ffb_par { | 354 | struct ffb_par { |
355 | spinlock_t lock; | 355 | spinlock_t lock; |
@@ -382,7 +382,9 @@ static void FFBFifo(struct ffb_par *par, int n) | |||
382 | 382 | ||
383 | if (cache - n < 0) { | 383 | if (cache - n < 0) { |
384 | fbc = par->fbc; | 384 | fbc = par->fbc; |
385 | do { cache = (upa_readl(&fbc->ucsr) & FFB_UCSR_FIFO_MASK) - 8; | 385 | do { |
386 | cache = (upa_readl(&fbc->ucsr) & FFB_UCSR_FIFO_MASK); | ||
387 | cache -= 8; | ||
386 | } while (cache - n < 0); | 388 | } while (cache - n < 0); |
387 | } | 389 | } |
388 | par->fifo_cache = cache - n; | 390 | par->fifo_cache = cache - n; |
@@ -401,12 +403,12 @@ static void FFBWait(struct ffb_par *par) | |||
401 | upa_writel(FFB_UCSR_ALL_ERRORS, &fbc->ucsr); | 403 | upa_writel(FFB_UCSR_ALL_ERRORS, &fbc->ucsr); |
402 | } | 404 | } |
403 | udelay(10); | 405 | udelay(10); |
404 | } while(--limit > 0); | 406 | } while (--limit > 0); |
405 | } | 407 | } |
406 | 408 | ||
407 | static int ffb_sync(struct fb_info *p) | 409 | static int ffb_sync(struct fb_info *p) |
408 | { | 410 | { |
409 | struct ffb_par *par = (struct ffb_par *) p->par; | 411 | struct ffb_par *par = (struct ffb_par *)p->par; |
410 | 412 | ||
411 | FFBWait(par); | 413 | FFBWait(par); |
412 | return 0; | 414 | return 0; |
@@ -431,8 +433,8 @@ static void ffb_switch_from_graph(struct ffb_par *par) | |||
431 | FFBWait(par); | 433 | FFBWait(par); |
432 | par->fifo_cache = 0; | 434 | par->fifo_cache = 0; |
433 | FFBFifo(par, 7); | 435 | FFBFifo(par, 7); |
434 | upa_writel(FFB_PPC_VCE_DISABLE|FFB_PPC_TBE_OPAQUE| | 436 | upa_writel(FFB_PPC_VCE_DISABLE | FFB_PPC_TBE_OPAQUE | |
435 | FFB_PPC_APE_DISABLE|FFB_PPC_CS_CONST, | 437 | FFB_PPC_APE_DISABLE | FFB_PPC_CS_CONST, |
436 | &fbc->ppc); | 438 | &fbc->ppc); |
437 | upa_writel(0x2000707f, &fbc->fbc); | 439 | upa_writel(0x2000707f, &fbc->fbc); |
438 | upa_writel(par->rop_cache, &fbc->rop); | 440 | upa_writel(par->rop_cache, &fbc->rop); |
@@ -455,7 +457,7 @@ static void ffb_switch_from_graph(struct ffb_par *par) | |||
455 | 457 | ||
456 | static int ffb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) | 458 | static int ffb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) |
457 | { | 459 | { |
458 | struct ffb_par *par = (struct ffb_par *) info->par; | 460 | struct ffb_par *par = (struct ffb_par *)info->par; |
459 | 461 | ||
460 | /* We just use this to catch switches out of | 462 | /* We just use this to catch switches out of |
461 | * graphics mode. | 463 | * graphics mode. |
@@ -468,16 +470,14 @@ static int ffb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) | |||
468 | } | 470 | } |
469 | 471 | ||
470 | /** | 472 | /** |
471 | * ffb_fillrect - REQUIRED function. Can use generic routines if | 473 | * ffb_fillrect - Draws a rectangle on the screen. |
472 | * non acclerated hardware and packed pixel based. | ||
473 | * Draws a rectangle on the screen. | ||
474 | * | 474 | * |
475 | * @info: frame buffer structure that represents a single frame buffer | 475 | * @info: frame buffer structure that represents a single frame buffer |
476 | * @rect: structure defining the rectagle and operation. | 476 | * @rect: structure defining the rectagle and operation. |
477 | */ | 477 | */ |
478 | static void ffb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | 478 | static void ffb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) |
479 | { | 479 | { |
480 | struct ffb_par *par = (struct ffb_par *) info->par; | 480 | struct ffb_par *par = (struct ffb_par *)info->par; |
481 | struct ffb_fbc __iomem *fbc = par->fbc; | 481 | struct ffb_fbc __iomem *fbc = par->fbc; |
482 | unsigned long flags; | 482 | unsigned long flags; |
483 | u32 fg; | 483 | u32 fg; |
@@ -494,9 +494,9 @@ static void ffb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
494 | par->fg_cache = fg; | 494 | par->fg_cache = fg; |
495 | } | 495 | } |
496 | 496 | ||
497 | ffb_rop(par, (rect->rop == ROP_COPY ? | 497 | ffb_rop(par, rect->rop == ROP_COPY ? |
498 | FFB_ROP_NEW : | 498 | FFB_ROP_NEW : |
499 | FFB_ROP_NEW_XOR_OLD)); | 499 | FFB_ROP_NEW_XOR_OLD); |
500 | 500 | ||
501 | FFBFifo(par, 5); | 501 | FFBFifo(par, 5); |
502 | upa_writel(FFB_DRAWOP_RECTANGLE, &fbc->drawop); | 502 | upa_writel(FFB_DRAWOP_RECTANGLE, &fbc->drawop); |
@@ -509,18 +509,15 @@ static void ffb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | |||
509 | } | 509 | } |
510 | 510 | ||
511 | /** | 511 | /** |
512 | * ffb_copyarea - REQUIRED function. Can use generic routines if | 512 | * ffb_copyarea - Copies on area of the screen to another area. |
513 | * non acclerated hardware and packed pixel based. | ||
514 | * Copies on area of the screen to another area. | ||
515 | * | 513 | * |
516 | * @info: frame buffer structure that represents a single frame buffer | 514 | * @info: frame buffer structure that represents a single frame buffer |
517 | * @area: structure defining the source and destination. | 515 | * @area: structure defining the source and destination. |
518 | */ | 516 | */ |
519 | 517 | ||
520 | static void | 518 | static void ffb_copyarea(struct fb_info *info, const struct fb_copyarea *area) |
521 | ffb_copyarea(struct fb_info *info, const struct fb_copyarea *area) | ||
522 | { | 519 | { |
523 | struct ffb_par *par = (struct ffb_par *) info->par; | 520 | struct ffb_par *par = (struct ffb_par *)info->par; |
524 | struct ffb_fbc __iomem *fbc = par->fbc; | 521 | struct ffb_fbc __iomem *fbc = par->fbc; |
525 | unsigned long flags; | 522 | unsigned long flags; |
526 | 523 | ||
@@ -547,16 +544,14 @@ ffb_copyarea(struct fb_info *info, const struct fb_copyarea *area) | |||
547 | } | 544 | } |
548 | 545 | ||
549 | /** | 546 | /** |
550 | * ffb_imageblit - REQUIRED function. Can use generic routines if | 547 | * ffb_imageblit - Copies a image from system memory to the screen. |
551 | * non acclerated hardware and packed pixel based. | ||
552 | * Copies a image from system memory to the screen. | ||
553 | * | 548 | * |
554 | * @info: frame buffer structure that represents a single frame buffer | 549 | * @info: frame buffer structure that represents a single frame buffer |
555 | * @image: structure defining the image. | 550 | * @image: structure defining the image. |
556 | */ | 551 | */ |
557 | static void ffb_imageblit(struct fb_info *info, const struct fb_image *image) | 552 | static void ffb_imageblit(struct fb_info *info, const struct fb_image *image) |
558 | { | 553 | { |
559 | struct ffb_par *par = (struct ffb_par *) info->par; | 554 | struct ffb_par *par = (struct ffb_par *)info->par; |
560 | struct ffb_fbc __iomem *fbc = par->fbc; | 555 | struct ffb_fbc __iomem *fbc = par->fbc; |
561 | const u8 *data = image->data; | 556 | const u8 *data = image->data; |
562 | unsigned long flags; | 557 | unsigned long flags; |
@@ -644,13 +639,14 @@ static void ffb_fixup_var_rgb(struct fb_var_screeninfo *var) | |||
644 | } | 639 | } |
645 | 640 | ||
646 | /** | 641 | /** |
647 | * ffb_setcolreg - Optional function. Sets a color register. | 642 | * ffb_setcolreg - Sets a color register. |
648 | * @regno: boolean, 0 copy local, 1 get_user() function | 643 | * |
649 | * @red: frame buffer colormap structure | 644 | * @regno: boolean, 0 copy local, 1 get_user() function |
650 | * @green: The green value which can be up to 16 bits wide | 645 | * @red: frame buffer colormap structure |
651 | * @blue: The blue value which can be up to 16 bits wide. | 646 | * @green: The green value which can be up to 16 bits wide |
652 | * @transp: If supported the alpha value which can be up to 16 bits wide. | 647 | * @blue: The blue value which can be up to 16 bits wide. |
653 | * @info: frame buffer info structure | 648 | * @transp: If supported the alpha value which can be up to 16 bits wide. |
649 | * @info: frame buffer info structure | ||
654 | */ | 650 | */ |
655 | static int ffb_setcolreg(unsigned regno, | 651 | static int ffb_setcolreg(unsigned regno, |
656 | unsigned red, unsigned green, unsigned blue, | 652 | unsigned red, unsigned green, unsigned blue, |
@@ -672,14 +668,13 @@ static int ffb_setcolreg(unsigned regno, | |||
672 | } | 668 | } |
673 | 669 | ||
674 | /** | 670 | /** |
675 | * ffb_blank - Optional function. Blanks the display. | 671 | * ffb_blank - Optional function. Blanks the display. |
676 | * @blank_mode: the blank mode we want. | 672 | * @blank_mode: the blank mode we want. |
677 | * @info: frame buffer structure that represents a single frame buffer | 673 | * @info: frame buffer structure that represents a single frame buffer |
678 | */ | 674 | */ |
679 | static int | 675 | static int ffb_blank(int blank, struct fb_info *info) |
680 | ffb_blank(int blank, struct fb_info *info) | ||
681 | { | 676 | { |
682 | struct ffb_par *par = (struct ffb_par *) info->par; | 677 | struct ffb_par *par = (struct ffb_par *)info->par; |
683 | struct ffb_dac __iomem *dac = par->dac; | 678 | struct ffb_dac __iomem *dac = par->dac; |
684 | unsigned long flags; | 679 | unsigned long flags; |
685 | u32 val; | 680 | u32 val; |
@@ -867,7 +862,7 @@ static int ffb_mmap(struct fb_info *info, struct vm_area_struct *vma) | |||
867 | 862 | ||
868 | static int ffb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) | 863 | static int ffb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) |
869 | { | 864 | { |
870 | struct ffb_par *par = (struct ffb_par *) info->par; | 865 | struct ffb_par *par = (struct ffb_par *)info->par; |
871 | 866 | ||
872 | return sbusfb_ioctl_helper(cmd, arg, info, | 867 | return sbusfb_ioctl_helper(cmd, arg, info, |
873 | FBTYPE_CREATOR, 24, par->fbsize); | 868 | FBTYPE_CREATOR, 24, par->fbsize); |
@@ -877,8 +872,7 @@ static int ffb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) | |||
877 | * Initialisation | 872 | * Initialisation |
878 | */ | 873 | */ |
879 | 874 | ||
880 | static void | 875 | static void ffb_init_fix(struct fb_info *info) |
881 | ffb_init_fix(struct fb_info *info) | ||
882 | { | 876 | { |
883 | struct ffb_par *par = (struct ffb_par *)info->par; | 877 | struct ffb_par *par = (struct ffb_par *)info->par; |
884 | const char *ffb_type_name; | 878 | const char *ffb_type_name; |
@@ -902,7 +896,8 @@ ffb_init_fix(struct fb_info *info) | |||
902 | info->fix.accel = FB_ACCEL_SUN_CREATOR; | 896 | info->fix.accel = FB_ACCEL_SUN_CREATOR; |
903 | } | 897 | } |
904 | 898 | ||
905 | static int __devinit ffb_probe(struct of_device *op, const struct of_device_id *match) | 899 | static int __devinit ffb_probe(struct of_device *op, |
900 | const struct of_device_id *match) | ||
906 | { | 901 | { |
907 | struct device_node *dp = op->node; | 902 | struct device_node *dp = op->node; |
908 | struct ffb_fbc __iomem *fbc; | 903 | struct ffb_fbc __iomem *fbc; |
diff --git a/include/asm-sparc/irqflags.h b/include/asm-sparc/irqflags.h new file mode 100644 index 000000000000..db398fb32826 --- /dev/null +++ b/include/asm-sparc/irqflags.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * include/asm-sparc/irqflags.h | ||
3 | * | ||
4 | * IRQ flags handling | ||
5 | * | ||
6 | * This file gets included from lowlevel asm headers too, to provide | ||
7 | * wrapped versions of the local_irq_*() APIs, based on the | ||
8 | * raw_local_irq_*() functions from the lowlevel headers. | ||
9 | */ | ||
10 | #ifndef _ASM_IRQFLAGS_H | ||
11 | #define _ASM_IRQFLAGS_H | ||
12 | |||
13 | #ifndef __ASSEMBLY__ | ||
14 | |||
15 | extern void raw_local_irq_restore(unsigned long); | ||
16 | extern unsigned long __raw_local_irq_save(void); | ||
17 | extern void raw_local_irq_enable(void); | ||
18 | |||
19 | static inline unsigned long getipl(void) | ||
20 | { | ||
21 | unsigned long retval; | ||
22 | |||
23 | __asm__ __volatile__("rd %%psr, %0" : "=r" (retval)); | ||
24 | return retval; | ||
25 | } | ||
26 | |||
27 | #define raw_local_save_flags(flags) ((flags) = getipl()) | ||
28 | #define raw_local_irq_save(flags) ((flags) = __raw_local_irq_save()) | ||
29 | #define raw_local_irq_disable() ((void) __raw_local_irq_save()) | ||
30 | #define raw_irqs_disabled() ((getipl() & PSR_PIL) != 0) | ||
31 | |||
32 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
33 | { | ||
34 | return ((flags & PSR_PIL) != 0); | ||
35 | } | ||
36 | |||
37 | #endif /* (__ASSEMBLY__) */ | ||
38 | |||
39 | #endif /* !(_ASM_IRQFLAGS_H) */ | ||
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index d1a2572e3f55..8c259de02614 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h | |||
@@ -15,6 +15,8 @@ | |||
15 | 15 | ||
16 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
17 | 17 | ||
18 | #include <linux/irqflags.h> | ||
19 | |||
18 | /* | 20 | /* |
19 | * Sparc (general) CPU types | 21 | * Sparc (general) CPU types |
20 | */ | 22 | */ |
@@ -164,26 +166,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, | |||
164 | "o0", "o1", "o2", "o3", "o7"); \ | 166 | "o0", "o1", "o2", "o3", "o7"); \ |
165 | } while(0) | 167 | } while(0) |
166 | 168 | ||
167 | /* | ||
168 | * Changing the IRQ level on the Sparc. | ||
169 | */ | ||
170 | extern void local_irq_restore(unsigned long); | ||
171 | extern unsigned long __local_irq_save(void); | ||
172 | extern void local_irq_enable(void); | ||
173 | |||
174 | static inline unsigned long getipl(void) | ||
175 | { | ||
176 | unsigned long retval; | ||
177 | |||
178 | __asm__ __volatile__("rd %%psr, %0" : "=r" (retval)); | ||
179 | return retval; | ||
180 | } | ||
181 | |||
182 | #define local_save_flags(flags) ((flags) = getipl()) | ||
183 | #define local_irq_save(flags) ((flags) = __local_irq_save()) | ||
184 | #define local_irq_disable() ((void) __local_irq_save()) | ||
185 | #define irqs_disabled() ((getipl() & PSR_PIL) != 0) | ||
186 | |||
187 | /* XXX Change this if we ever use a PSO mode kernel. */ | 169 | /* XXX Change this if we ever use a PSO mode kernel. */ |
188 | #define mb() __asm__ __volatile__ ("" : : : "memory") | 170 | #define mb() __asm__ __volatile__ ("" : : : "memory") |
189 | #define rmb() mb() | 171 | #define rmb() mb() |
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 98a6e609163e..542421460a12 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h | |||
@@ -75,12 +75,11 @@ struct trap_per_cpu { | |||
75 | unsigned long tsb_huge_temp; | 75 | unsigned long tsb_huge_temp; |
76 | 76 | ||
77 | /* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ | 77 | /* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ |
78 | unsigned int irq_worklist; | 78 | unsigned long irq_worklist_pa; |
79 | unsigned int cpu_mondo_qmask; | 79 | unsigned int cpu_mondo_qmask; |
80 | unsigned int dev_mondo_qmask; | 80 | unsigned int dev_mondo_qmask; |
81 | unsigned int resum_qmask; | 81 | unsigned int resum_qmask; |
82 | unsigned int nonresum_qmask; | 82 | unsigned int nonresum_qmask; |
83 | unsigned int __pad2[1]; | ||
84 | void *hdesc; | 83 | void *hdesc; |
85 | } __attribute__((aligned(64))); | 84 | } __attribute__((aligned(64))); |
86 | extern struct trap_per_cpu trap_block[NR_CPUS]; | 85 | extern struct trap_per_cpu trap_block[NR_CPUS]; |
@@ -128,11 +127,11 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | |||
128 | #define TRAP_PER_CPU_CPU_LIST_PA 0xc8 | 127 | #define TRAP_PER_CPU_CPU_LIST_PA 0xc8 |
129 | #define TRAP_PER_CPU_TSB_HUGE 0xd0 | 128 | #define TRAP_PER_CPU_TSB_HUGE 0xd0 |
130 | #define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 | 129 | #define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 |
131 | #define TRAP_PER_CPU_IRQ_WORKLIST 0xe0 | 130 | #define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0 |
132 | #define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe4 | 131 | #define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8 |
133 | #define TRAP_PER_CPU_DEV_MONDO_QMASK 0xe8 | 132 | #define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec |
134 | #define TRAP_PER_CPU_RESUM_QMASK 0xec | 133 | #define TRAP_PER_CPU_RESUM_QMASK 0xf0 |
135 | #define TRAP_PER_CPU_NONRESUM_QMASK 0xf0 | 134 | #define TRAP_PER_CPU_NONRESUM_QMASK 0xf4 |
136 | 135 | ||
137 | #define TRAP_BLOCK_SZ_SHIFT 8 | 136 | #define TRAP_BLOCK_SZ_SHIFT 8 |
138 | 137 | ||
@@ -184,9 +183,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | |||
184 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | 183 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; |
185 | 184 | ||
186 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ | 185 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ |
187 | #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ | 186 | #define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ |
188 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | 187 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ |
189 | add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST; | 188 | add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; |
190 | 189 | ||
191 | /* Clobbers TMP, loads DEST with current thread info pointer. */ | 190 | /* Clobbers TMP, loads DEST with current thread info pointer. */ |
192 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | 191 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ |
@@ -223,9 +222,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | |||
223 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | 222 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; |
224 | 223 | ||
225 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ | 224 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ |
226 | #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ | 225 | #define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \ |
227 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | 226 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ |
228 | add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST; | 227 | add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST; |
229 | 228 | ||
230 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | 229 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ |
231 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | 230 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ |
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h index c00ad152771b..182dba05c702 100644 --- a/include/asm-sparc64/irq.h +++ b/include/asm-sparc64/irq.h | |||
@@ -51,10 +51,19 @@ extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p, | |||
51 | unsigned int msi_devino_start, | 51 | unsigned int msi_devino_start, |
52 | unsigned int msi_devino_end); | 52 | unsigned int msi_devino_end); |
53 | extern void sun4v_destroy_msi(unsigned int virt_irq); | 53 | extern void sun4v_destroy_msi(unsigned int virt_irq); |
54 | extern unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p, | ||
55 | unsigned int msi_devino_start, | ||
56 | unsigned int msi_devino_end, | ||
57 | unsigned long imap_base, | ||
58 | unsigned long iclr_base); | ||
59 | extern void sun4u_destroy_msi(unsigned int virt_irq); | ||
54 | extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); | 60 | extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); |
55 | 61 | ||
56 | extern void sparc64_set_msi(unsigned int virt_irq, u32 msi); | 62 | extern unsigned char virt_irq_alloc(unsigned int dev_handle, |
57 | extern u32 sparc64_get_msi(unsigned int virt_irq); | 63 | unsigned int dev_ino); |
64 | #ifdef CONFIG_PCI_MSI | ||
65 | extern void virt_irq_free(unsigned int virt_irq); | ||
66 | #endif | ||
58 | 67 | ||
59 | extern void fixup_irqs(void); | 68 | extern void fixup_irqs(void); |
60 | 69 | ||