aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/IPMI.txt4
-rw-r--r--Documentation/filesystems/configfs/configfs_example.c19
-rw-r--r--Documentation/sysctl/vm.txt13
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/arm/mm/mm-armv.c2
-rw-r--r--arch/i386/kernel/msr.c9
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c6
-rw-r--r--arch/i386/mm/pgtable.c14
-rw-r--r--arch/ia64/Kconfig5
-rw-r--r--arch/s390/appldata/appldata_base.c1
-rw-r--r--arch/s390/appldata/appldata_mem.c23
-rw-r--r--arch/sparc/kernel/ioport.c17
-rw-r--r--arch/sparc/kernel/of_device.c477
-rw-r--r--arch/sparc/kernel/prom.c30
-rw-r--r--arch/sparc/kernel/sys_sunos.c2
-rw-r--r--arch/sparc/kernel/time.c109
-rw-r--r--arch/sparc64/kernel/auxio.c78
-rw-r--r--arch/sparc64/kernel/ebus.c150
-rw-r--r--arch/sparc64/kernel/irq.c4
-rw-r--r--arch/sparc64/kernel/isa.c101
-rw-r--r--arch/sparc64/kernel/of_device.c689
-rw-r--r--arch/sparc64/kernel/pci.c8
-rw-r--r--arch/sparc64/kernel/pci_common.c291
-rw-r--r--arch/sparc64/kernel/pci_psycho.c165
-rw-r--r--arch/sparc64/kernel/pci_sabre.c158
-rw-r--r--arch/sparc64/kernel/pci_schizo.c311
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c10
-rw-r--r--arch/sparc64/kernel/power.c59
-rw-r--r--arch/sparc64/kernel/prom.c787
-rw-r--r--arch/sparc64/kernel/sbus.c6
-rw-r--r--arch/sparc64/kernel/starfire.c4
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c2
-rw-r--r--arch/sparc64/kernel/time.c258
-rw-r--r--arch/sparc64/kernel/unaligned.c4
-rw-r--r--arch/um/Makefile-x86_647
-rw-r--r--arch/um/drivers/stderr_console.c22
-rw-r--r--arch/um/kernel/skas/mmu.c2
-rw-r--r--arch/um/kernel/time.c172
-rw-r--r--arch/um/kernel/time_kern.c32
-rw-r--r--arch/um/kernel/vmlinux.lds.S2
-rw-r--r--arch/um/os-Linux/mem.c21
-rw-r--r--arch/um/sys-i386/sys_call_table.S2
-rw-r--r--arch/um/sys-x86_64/syscall_table.c6
-rw-r--r--arch/x86_64/kernel/functionlist1
-rw-r--r--block/ll_rw_blk.c4
-rw-r--r--drivers/acorn/block/Kconfig2
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/idt77105.c2
-rw-r--r--drivers/atm/idt77105.h2
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/base/node.c63
-rw-r--r--drivers/cdrom/cm206.c9
-rw-r--r--drivers/char/istallion.c17
-rw-r--r--drivers/char/pc8736x_gpio.c5
-rw-r--r--drivers/char/scx200_gpio.c6
-rw-r--r--drivers/edac/amd76x_edac.c98
-rw-r--r--drivers/edac/e752x_edac.c344
-rw-r--r--drivers/edac/e7xxx_edac.c175
-rw-r--r--drivers/edac/edac_mc.c589
-rw-r--r--drivers/edac/edac_mc.h18
-rw-r--r--drivers/edac/i82860_edac.c131
-rw-r--r--drivers/edac/i82875p_edac.c219
-rw-r--r--drivers/edac/r82600_edac.c142
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/input/serio/i8042-sparcio.h108
-rw-r--r--drivers/net/bnx2.c32
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/irda/Kconfig2
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/sunhme.c10
-rw-r--r--drivers/parisc/led.c11
-rw-r--r--drivers/serial/sunsab.c299
-rw-r--r--drivers/serial/sunsu.c497
-rw-r--r--drivers/serial/sunzilog.c793
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/core/inode.c2
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/video/bw2.c213
-rw-r--r--drivers/video/cg14.c326
-rw-r--r--drivers/video/cg3.c217
-rw-r--r--drivers/video/cg6.c337
-rw-r--r--drivers/video/ffb.c466
-rw-r--r--drivers/video/imacfb.c4
-rw-r--r--drivers/video/leo.c294
-rw-r--r--drivers/video/p9100.c251
-rw-r--r--drivers/video/tcx.c224
-rw-r--r--fs/Kconfig12
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/configfs/dir.c6
-rw-r--r--fs/configfs/symlink.c2
-rw-r--r--fs/fs-writeback.c4
-rw-r--r--fs/inode.c9
-rw-r--r--fs/ioprio.c29
-rw-r--r--fs/ncpfs/mmap.c2
-rw-r--r--fs/nfs/pagelist.c1
-rw-r--r--fs/nfs/write.c9
-rw-r--r--fs/nfsd/export.c2
-rw-r--r--fs/nfsd/nfs4state.c32
-rw-r--r--fs/nfsd/nfsfh.c27
-rw-r--r--fs/nfsd/vfs.c14
-rw-r--r--fs/ocfs2/aops.c9
-rw-r--r--fs/ocfs2/cluster/heartbeat.c20
-rw-r--r--fs/ocfs2/cluster/masklog.h22
-rw-r--r--fs/ocfs2/cluster/ocfs2_heartbeat.h1
-rw-r--r--fs/ocfs2/cluster/tcp.c14
-rw-r--r--fs/ocfs2/dir.c6
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c9
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c8
-rw-r--r--fs/ocfs2/dlmglue.c3
-rw-r--r--fs/ocfs2/extent_map.c29
-rw-r--r--fs/ocfs2/journal.c5
-rw-r--r--fs/ocfs2/mmap.c4
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/slot_map.c2
-rw-r--r--fs/ocfs2/super.c49
-rw-r--r--fs/ocfs2/symlink.c2
-rw-r--r--fs/proc/proc_misc.c23
-rw-r--r--include/asm-alpha/socket.h1
-rw-r--r--include/asm-arm/socket.h1
-rw-r--r--include/asm-arm26/socket.h1
-rw-r--r--include/asm-cris/socket.h1
-rw-r--r--include/asm-frv/socket.h1
-rw-r--r--include/asm-h8300/socket.h1
-rw-r--r--include/asm-i386/socket.h1
-rw-r--r--include/asm-ia64/socket.h1
-rw-r--r--include/asm-m32r/socket.h1
-rw-r--r--include/asm-m68k/socket.h1
-rw-r--r--include/asm-mips/socket.h1
-rw-r--r--include/asm-parisc/socket.h1
-rw-r--r--include/asm-powerpc/socket.h1
-rw-r--r--include/asm-s390/pgtable.h7
-rw-r--r--include/asm-s390/socket.h1
-rw-r--r--include/asm-sh/socket.h1
-rw-r--r--include/asm-sparc/of_device.h20
-rw-r--r--include/asm-sparc/prom.h12
-rw-r--r--include/asm-sparc/socket.h1
-rw-r--r--include/asm-sparc64/of_device.h20
-rw-r--r--include/asm-sparc64/pbm.h5
-rw-r--r--include/asm-sparc64/prom.h20
-rw-r--r--include/asm-sparc64/sbus.h1
-rw-r--r--include/asm-sparc64/socket.h1
-rw-r--r--include/asm-sparc64/starfire.h2
-rw-r--r--include/asm-um/io.h5
-rw-r--r--include/asm-v850/socket.h1
-rw-r--r--include/asm-x86_64/socket.h1
-rw-r--r--include/asm-xtensa/socket.h1
-rw-r--r--include/linux/atmdev.h4
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mmzone.h38
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/netdevice.h18
-rw-r--r--include/linux/page-flags.h149
-rw-r--r--include/linux/pagemap.h45
-rw-r--r--include/linux/rcupdate.h24
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/security.h51
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/sunrpc/svc.h4
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/vmstat.h215
-rw-r--r--include/net/af_unix.h6
-rw-r--r--include/net/pkt_sched.h18
-rw-r--r--include/net/protocol.h3
-rw-r--r--include/net/scm.h17
-rw-r--r--include/net/sock.h3
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/tcp_ecn.h6
-rw-r--r--init/Kconfig13
-rw-r--r--init/main.c7
-rw-r--r--kernel/audit.c8
-rw-r--r--kernel/sched.c25
-rw-r--r--kernel/signal.c7
-rw-r--r--kernel/sysctl.c9
-rw-r--r--mm/Makefile2
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/highmem.c6
-rw-r--r--mm/memory.c8
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/page-writeback.c93
-rw-r--r--mm/page_alloc.c472
-rw-r--r--mm/page_io.c4
-rw-r--r--mm/rmap.c7
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c124
-rw-r--r--mm/swap.c4
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/vmscan.c68
-rw-r--r--mm/vmstat.c614
-rw-r--r--net/atm/Makefile2
-rw-r--r--net/atm/atm_sysfs.c176
-rw-r--r--net/atm/common.c7
-rw-r--r--net/atm/common.h2
-rw-r--r--net/atm/resources.c22
-rw-r--r--net/atm/resources.h2
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_if.c3
-rw-r--r--net/core/dev.c39
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/core/sock.c11
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c2
-rw-r--r--net/ipv4/af_inet.c6
-rw-r--r--net/ipv4/netfilter/Kconfig2
-rw-r--r--net/ipv4/netfilter/arp_tables.c3
-rw-r--r--net/ipv4/netfilter/ip_queue.c14
-rw-r--r--net/ipv4/netfilter/ip_tables.c3
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_diag.c5
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/netfilter/ip6_queue.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c3
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/irda/irlan/irlan_client.c3
-rw-r--r--net/netfilter/Kconfig5
-rw-r--r--net/netfilter/nf_conntrack_netlink.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c2
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c12
-rw-r--r--net/netfilter/xt_sctp.c2
-rw-r--r--net/netfilter/xt_tcpudp.c2
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c6
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c271
-rw-r--r--net/sunrpc/svc.c3
-rw-r--r--net/tipc/core.c3
-rw-r--r--net/tipc/link.c11
-rw-r--r--net/tipc/node.c26
-rw-r--r--net/tipc/zone.h4
-rw-r--r--net/unix/af_unix.c27
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--security/commoncap.c4
-rw-r--r--security/dummy.c16
-rw-r--r--security/selinux/hooks.c63
244 files changed, 7383 insertions, 6057 deletions
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index bf1cf98d2a27..0256805b548f 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -10,7 +10,7 @@ standard for controlling intelligent devices that monitor a system.
10It provides for dynamic discovery of sensors in the system and the 10It provides for dynamic discovery of sensors in the system and the
11ability to monitor the sensors and be informed when the sensor's 11ability to monitor the sensors and be informed when the sensor's
12values change or go outside certain boundaries. It also has a 12values change or go outside certain boundaries. It also has a
13standardized database for field-replacable units (FRUs) and a watchdog 13standardized database for field-replaceable units (FRUs) and a watchdog
14timer. 14timer.
15 15
16To use this, you need an interface to an IPMI controller in your 16To use this, you need an interface to an IPMI controller in your
@@ -64,7 +64,7 @@ situation, you need to read the section below named 'The SI Driver' or
64IPMI defines a standard watchdog timer. You can enable this with the 64IPMI defines a standard watchdog timer. You can enable this with the
65'IPMI Watchdog Timer' config option. If you compile the driver into 65'IPMI Watchdog Timer' config option. If you compile the driver into
66the kernel, then via a kernel command-line option you can have the 66the kernel, then via a kernel command-line option you can have the
67watchdog timer start as soon as it intitializes. It also have a lot 67watchdog timer start as soon as it initializes. It also have a lot
68of other options, see the 'Watchdog' section below for more details. 68of other options, see the 'Watchdog' section below for more details.
69Note that you can also have the watchdog continue to run if it is 69Note that you can also have the watchdog continue to run if it is
70closed (by default it is disabled on close). Go into the 'Watchdog 70closed (by default it is disabled on close). Go into the 'Watchdog
diff --git a/Documentation/filesystems/configfs/configfs_example.c b/Documentation/filesystems/configfs/configfs_example.c
index 3d4713a6c207..2d6a14a463e0 100644
--- a/Documentation/filesystems/configfs/configfs_example.c
+++ b/Documentation/filesystems/configfs/configfs_example.c
@@ -264,6 +264,15 @@ static struct config_item_type simple_child_type = {
264}; 264};
265 265
266 266
267struct simple_children {
268 struct config_group group;
269};
270
271static inline struct simple_children *to_simple_children(struct config_item *item)
272{
273 return item ? container_of(to_config_group(item), struct simple_children, group) : NULL;
274}
275
267static struct config_item *simple_children_make_item(struct config_group *group, const char *name) 276static struct config_item *simple_children_make_item(struct config_group *group, const char *name)
268{ 277{
269 struct simple_child *simple_child; 278 struct simple_child *simple_child;
@@ -304,7 +313,13 @@ static ssize_t simple_children_attr_show(struct config_item *item,
304"items have only one attribute that is readable and writeable.\n"); 313"items have only one attribute that is readable and writeable.\n");
305} 314}
306 315
316static void simple_children_release(struct config_item *item)
317{
318 kfree(to_simple_children(item));
319}
320
307static struct configfs_item_operations simple_children_item_ops = { 321static struct configfs_item_operations simple_children_item_ops = {
322 .release = simple_children_release,
308 .show_attribute = simple_children_attr_show, 323 .show_attribute = simple_children_attr_show,
309}; 324};
310 325
@@ -345,10 +360,6 @@ static struct configfs_subsystem simple_children_subsys = {
345 * children of its own. 360 * children of its own.
346 */ 361 */
347 362
348struct simple_children {
349 struct config_group group;
350};
351
352static struct config_group *group_children_make_group(struct config_group *group, const char *name) 363static struct config_group *group_children_make_group(struct config_group *group, const char *name)
353{ 364{
354 struct simple_children *simple_children; 365 struct simple_children *simple_children;
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 2dc246af4885..86754eb390da 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -28,7 +28,6 @@ Currently, these files are in /proc/sys/vm:
28- block_dump 28- block_dump
29- drop-caches 29- drop-caches
30- zone_reclaim_mode 30- zone_reclaim_mode
31- zone_reclaim_interval
32- panic_on_oom 31- panic_on_oom
33 32
34============================================================== 33==============================================================
@@ -167,18 +166,6 @@ use of files and builds up large slab caches. However, the slab
167shrink operation is global, may take a long time and free slabs 166shrink operation is global, may take a long time and free slabs
168in all nodes of the system. 167in all nodes of the system.
169 168
170================================================================
171
172zone_reclaim_interval:
173
174The time allowed for off node allocations after zone reclaim
175has failed to reclaim enough pages to allow a local allocation.
176
177Time is set in seconds and set by default to 30 seconds.
178
179Reduce the interval if undesired off node allocations occur. However, too
180frequent scans will have a negative impact onoff node allocation performance.
181
182============================================================= 169=============================================================
183 170
184panic_on_oom 171panic_on_oom
diff --git a/MAINTAINERS b/MAINTAINERS
index 31a13720f23c..7e5a7ff2a86c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -925,23 +925,21 @@ S: Maintained
925 925
926EDAC-CORE 926EDAC-CORE
927P: Doug Thompson 927P: Doug Thompson
928M: norsk5@xmission.com, dthompson@linuxnetworx.com 928M: norsk5@xmission.com
929P: Dave Peterson
930M: dsp@llnl.gov, dave_peterson@pobox.com
931L: bluesmoke-devel@lists.sourceforge.net 929L: bluesmoke-devel@lists.sourceforge.net
932W: bluesmoke.sourceforge.net 930W: bluesmoke.sourceforge.net
933S: Maintained 931S: Supported
934 932
935EDAC-E752X 933EDAC-E752X
936P: Dave Peterson 934P: Mark Gross
937M: dsp@llnl.gov, dave_peterson@pobox.com 935M: mark.gross@intel.com
938L: bluesmoke-devel@lists.sourceforge.net 936L: bluesmoke-devel@lists.sourceforge.net
939W: bluesmoke.sourceforge.net 937W: bluesmoke.sourceforge.net
940S: Maintained 938S: Maintained
941 939
942EDAC-E7XXX 940EDAC-E7XXX
943P: Dave Peterson 941P: Doug Thompson
944M: dsp@llnl.gov, dave_peterson@pobox.com 942M: norsk5@xmission.com
945L: bluesmoke-devel@lists.sourceforge.net 943L: bluesmoke-devel@lists.sourceforge.net
946W: bluesmoke.sourceforge.net 944W: bluesmoke.sourceforge.net
947S: Maintained 945S: Maintained
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 95273de4f772..931be1798122 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -227,7 +227,7 @@ void free_pgd_slow(pgd_t *pgd)
227 227
228 pte = pmd_page(*pmd); 228 pte = pmd_page(*pmd);
229 pmd_clear(pmd); 229 pmd_clear(pmd);
230 dec_page_state(nr_page_table_pages); 230 dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
231 pte_lock_deinit(pte); 231 pte_lock_deinit(pte);
232 pte_free(pte); 232 pte_free(pte);
233 pmd_free(pmd); 233 pmd_free(pmd);
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index d022cb8fd725..5c29a9fb4a44 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -251,7 +251,9 @@ static int msr_class_device_create(int i)
251 return err; 251 return err;
252} 252}
253 253
254static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 254#ifdef CONFIG_HOTPLUG_CPU
255static int msr_class_cpu_callback(struct notifier_block *nfb,
256 unsigned long action, void *hcpu)
255{ 257{
256 unsigned int cpu = (unsigned long)hcpu; 258 unsigned int cpu = (unsigned long)hcpu;
257 259
@@ -270,6 +272,7 @@ static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
270{ 272{
271 .notifier_call = msr_class_cpu_callback, 273 .notifier_call = msr_class_cpu_callback,
272}; 274};
275#endif
273 276
274static int __init msr_init(void) 277static int __init msr_init(void)
275{ 278{
@@ -292,7 +295,7 @@ static int __init msr_init(void)
292 if (err != 0) 295 if (err != 0)
293 goto out_class; 296 goto out_class;
294 } 297 }
295 register_cpu_notifier(&msr_class_cpu_notifier); 298 register_hotcpu_notifier(&msr_class_cpu_notifier);
296 299
297 err = 0; 300 err = 0;
298 goto out; 301 goto out;
@@ -315,7 +318,7 @@ static void __exit msr_exit(void)
315 class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); 318 class_device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
316 class_destroy(msr_class); 319 class_destroy(msr_class);
317 unregister_chrdev(MSR_MAJOR, "cpu/msr"); 320 unregister_chrdev(MSR_MAJOR, "cpu/msr");
318 unregister_cpu_notifier(&msr_class_cpu_notifier); 321 unregister_hotcpu_notifier(&msr_class_cpu_notifier);
319} 322}
320 323
321module_init(msr_init); 324module_init(msr_init);
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 5b8b579a079f..6e9e494c6c3d 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -1938,3 +1938,9 @@ smp_cpus_done(unsigned int max_cpus)
1938{ 1938{
1939 zap_low_mappings(); 1939 zap_low_mappings();
1940} 1940}
1941
1942void __init
1943smp_setup_processor_id(void)
1944{
1945 current_thread_info()->cpu = hard_smp_processor_id();
1946}
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index 2889567e21a1..5e735ff90e8a 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -30,7 +30,6 @@ void show_mem(void)
30 struct page *page; 30 struct page *page;
31 pg_data_t *pgdat; 31 pg_data_t *pgdat;
32 unsigned long i; 32 unsigned long i;
33 struct page_state ps;
34 unsigned long flags; 33 unsigned long flags;
35 34
36 printk(KERN_INFO "Mem-info:\n"); 35 printk(KERN_INFO "Mem-info:\n");
@@ -58,12 +57,13 @@ void show_mem(void)
58 printk(KERN_INFO "%d pages shared\n", shared); 57 printk(KERN_INFO "%d pages shared\n", shared);
59 printk(KERN_INFO "%d pages swap cached\n", cached); 58 printk(KERN_INFO "%d pages swap cached\n", cached);
60 59
61 get_page_state(&ps); 60 printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
62 printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty); 61 printk(KERN_INFO "%lu pages writeback\n",
63 printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback); 62 global_page_state(NR_WRITEBACK));
64 printk(KERN_INFO "%lu pages mapped\n", ps.nr_mapped); 63 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
65 printk(KERN_INFO "%lu pages slab\n", ps.nr_slab); 64 printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
66 printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages); 65 printk(KERN_INFO "%lu pages pagetables\n",
66 global_page_state(NR_PAGETABLE));
67} 67}
68 68
69/* 69/*
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index b487e227a1f7..47de9ee6bcd6 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -70,6 +70,11 @@ config DMA_IS_DMA32
70 bool 70 bool
71 default y 71 default y
72 72
73config DMA_IS_NORMAL
74 bool
75 depends on IA64_SGI_SN2
76 default y
77
73choice 78choice
74 prompt "System type" 79 prompt "System type"
75 default IA64_GENERIC 80 default IA64_GENERIC
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 61bc44626c04..2476ca739c1e 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -766,7 +766,6 @@ unsigned long nr_iowait(void)
766#endif /* MODULE */ 766#endif /* MODULE */
767EXPORT_SYMBOL_GPL(si_swapinfo); 767EXPORT_SYMBOL_GPL(si_swapinfo);
768EXPORT_SYMBOL_GPL(nr_threads); 768EXPORT_SYMBOL_GPL(nr_threads);
769EXPORT_SYMBOL_GPL(get_full_page_state);
770EXPORT_SYMBOL_GPL(nr_running); 769EXPORT_SYMBOL_GPL(nr_running);
771EXPORT_SYMBOL_GPL(nr_iowait); 770EXPORT_SYMBOL_GPL(nr_iowait);
772//EXPORT_SYMBOL_GPL(nr_context_switches); 771//EXPORT_SYMBOL_GPL(nr_context_switches);
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 7915a197d96d..4811e2dac864 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -107,21 +107,21 @@ static void appldata_get_mem_data(void *data)
107 * serialized through the appldata_ops_lock and can use static 107 * serialized through the appldata_ops_lock and can use static
108 */ 108 */
109 static struct sysinfo val; 109 static struct sysinfo val;
110 static struct page_state ps; 110 unsigned long ev[NR_VM_EVENT_ITEMS];
111 struct appldata_mem_data *mem_data; 111 struct appldata_mem_data *mem_data;
112 112
113 mem_data = data; 113 mem_data = data;
114 mem_data->sync_count_1++; 114 mem_data->sync_count_1++;
115 115
116 get_full_page_state(&ps); 116 all_vm_events(ev);
117 mem_data->pgpgin = ps.pgpgin >> 1; 117 mem_data->pgpgin = ev[PGPGIN] >> 1;
118 mem_data->pgpgout = ps.pgpgout >> 1; 118 mem_data->pgpgout = ev[PGPGOUT] >> 1;
119 mem_data->pswpin = ps.pswpin; 119 mem_data->pswpin = ev[PSWPIN];
120 mem_data->pswpout = ps.pswpout; 120 mem_data->pswpout = ev[PSWPOUT];
121 mem_data->pgalloc = ps.pgalloc_high + ps.pgalloc_normal + 121 mem_data->pgalloc = ev[PGALLOC_HIGH] + ev[PGALLOC_NORMAL] +
122 ps.pgalloc_dma; 122 ev[PGALLOC_DMA];
123 mem_data->pgfault = ps.pgfault; 123 mem_data->pgfault = ev[PGFAULT];
124 mem_data->pgmajfault = ps.pgmajfault; 124 mem_data->pgmajfault = ev[PGMAJFAULT];
125 125
126 si_meminfo(&val); 126 si_meminfo(&val);
127 mem_data->sharedram = val.sharedram; 127 mem_data->sharedram = val.sharedram;
@@ -130,7 +130,8 @@ static void appldata_get_mem_data(void *data)
130 mem_data->totalhigh = P2K(val.totalhigh); 130 mem_data->totalhigh = P2K(val.totalhigh);
131 mem_data->freehigh = P2K(val.freehigh); 131 mem_data->freehigh = P2K(val.freehigh);
132 mem_data->bufferram = P2K(val.bufferram); 132 mem_data->bufferram = P2K(val.bufferram);
133 mem_data->cached = P2K(atomic_read(&nr_pagecache) - val.bufferram); 133 mem_data->cached = P2K(global_page_state(NR_FILE_PAGES)
134 - val.bufferram);
134 135
135 si_swapinfo(&val); 136 si_swapinfo(&val);
136 mem_data->totalswap = P2K(val.totalswap); 137 mem_data->totalswap = P2K(val.totalswap);
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 79d177149fdb..8654b446ac9e 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/config.h> 28#include <linux/config.h>
29#include <linux/module.h>
29#include <linux/sched.h> 30#include <linux/sched.h>
30#include <linux/kernel.h> 31#include <linux/kernel.h>
31#include <linux/errno.h> 32#include <linux/errno.h>
@@ -40,6 +41,7 @@
40#include <asm/vaddrs.h> 41#include <asm/vaddrs.h>
41#include <asm/oplib.h> 42#include <asm/oplib.h>
42#include <asm/prom.h> 43#include <asm/prom.h>
44#include <asm/of_device.h>
43#include <asm/sbus.h> 45#include <asm/sbus.h>
44#include <asm/page.h> 46#include <asm/page.h>
45#include <asm/pgalloc.h> 47#include <asm/pgalloc.h>
@@ -143,6 +145,21 @@ void __iomem *sbus_ioremap(struct resource *phyres, unsigned long offset,
143 phyres->start + offset, size, name); 145 phyres->start + offset, size, name);
144} 146}
145 147
148void __iomem *of_ioremap(struct resource *res, unsigned long offset,
149 unsigned long size, char *name)
150{
151 return _sparc_alloc_io(res->flags & 0xF,
152 res->start + offset,
153 size, name);
154}
155EXPORT_SYMBOL(of_ioremap);
156
157void of_iounmap(void __iomem *base, unsigned long size)
158{
159 iounmap(base);
160}
161EXPORT_SYMBOL(of_iounmap);
162
146/* 163/*
147 */ 164 */
148void sbus_iounmap(volatile void __iomem *addr, unsigned long size) 165void sbus_iounmap(volatile void __iomem *addr, unsigned long size)
diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
index 80a809478781..bc956c530376 100644
--- a/arch/sparc/kernel/of_device.c
+++ b/arch/sparc/kernel/of_device.c
@@ -129,6 +129,26 @@ static int of_device_resume(struct device * dev)
129 return error; 129 return error;
130} 130}
131 131
132static int node_match(struct device *dev, void *data)
133{
134 struct of_device *op = to_of_device(dev);
135 struct device_node *dp = data;
136
137 return (op->node == dp);
138}
139
140struct of_device *of_find_device_by_node(struct device_node *dp)
141{
142 struct device *dev = bus_find_device(&of_bus_type, NULL,
143 dp, node_match);
144
145 if (dev)
146 return to_of_device(dev);
147
148 return NULL;
149}
150EXPORT_SYMBOL(of_find_device_by_node);
151
132#ifdef CONFIG_PCI 152#ifdef CONFIG_PCI
133struct bus_type ebus_bus_type = { 153struct bus_type ebus_bus_type = {
134 .name = "ebus", 154 .name = "ebus",
@@ -153,10 +173,459 @@ struct bus_type sbus_bus_type = {
153EXPORT_SYMBOL(sbus_bus_type); 173EXPORT_SYMBOL(sbus_bus_type);
154#endif 174#endif
155 175
176struct bus_type of_bus_type = {
177 .name = "of",
178 .match = of_platform_bus_match,
179 .probe = of_device_probe,
180 .remove = of_device_remove,
181 .suspend = of_device_suspend,
182 .resume = of_device_resume,
183};
184EXPORT_SYMBOL(of_bus_type);
185
186static inline u64 of_read_addr(u32 *cell, int size)
187{
188 u64 r = 0;
189 while (size--)
190 r = (r << 32) | *(cell++);
191 return r;
192}
193
194static void __init get_cells(struct device_node *dp,
195 int *addrc, int *sizec)
196{
197 if (addrc)
198 *addrc = of_n_addr_cells(dp);
199 if (sizec)
200 *sizec = of_n_size_cells(dp);
201}
202
203/* Max address size we deal with */
204#define OF_MAX_ADDR_CELLS 4
205
206struct of_bus {
207 const char *name;
208 const char *addr_prop_name;
209 int (*match)(struct device_node *parent);
210 void (*count_cells)(struct device_node *child,
211 int *addrc, int *sizec);
212 u64 (*map)(u32 *addr, u32 *range, int na, int ns, int pna);
213 int (*translate)(u32 *addr, u64 offset, int na);
214 unsigned int (*get_flags)(u32 *addr);
215};
216
217/*
218 * Default translator (generic bus)
219 */
220
221static void of_bus_default_count_cells(struct device_node *dev,
222 int *addrc, int *sizec)
223{
224 get_cells(dev, addrc, sizec);
225}
226
227static u64 of_bus_default_map(u32 *addr, u32 *range, int na, int ns, int pna)
228{
229 u64 cp, s, da;
230
231 cp = of_read_addr(range, na);
232 s = of_read_addr(range + na + pna, ns);
233 da = of_read_addr(addr, na);
234
235 if (da < cp || da >= (cp + s))
236 return OF_BAD_ADDR;
237 return da - cp;
238}
239
240static int of_bus_default_translate(u32 *addr, u64 offset, int na)
241{
242 u64 a = of_read_addr(addr, na);
243 memset(addr, 0, na * 4);
244 a += offset;
245 if (na > 1)
246 addr[na - 2] = a >> 32;
247 addr[na - 1] = a & 0xffffffffu;
248
249 return 0;
250}
251
252static unsigned int of_bus_default_get_flags(u32 *addr)
253{
254 return IORESOURCE_MEM;
255}
256
257
258/*
259 * PCI bus specific translator
260 */
261
262static int of_bus_pci_match(struct device_node *np)
263{
264 return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex");
265}
266
267static void of_bus_pci_count_cells(struct device_node *np,
268 int *addrc, int *sizec)
269{
270 if (addrc)
271 *addrc = 3;
272 if (sizec)
273 *sizec = 2;
274}
275
276static u64 of_bus_pci_map(u32 *addr, u32 *range, int na, int ns, int pna)
277{
278 u64 cp, s, da;
279
280 /* Check address type match */
281 if ((addr[0] ^ range[0]) & 0x03000000)
282 return OF_BAD_ADDR;
283
284 /* Read address values, skipping high cell */
285 cp = of_read_addr(range + 1, na - 1);
286 s = of_read_addr(range + na + pna, ns);
287 da = of_read_addr(addr + 1, na - 1);
288
289 if (da < cp || da >= (cp + s))
290 return OF_BAD_ADDR;
291 return da - cp;
292}
293
294static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
295{
296 return of_bus_default_translate(addr + 1, offset, na - 1);
297}
298
299static unsigned int of_bus_pci_get_flags(u32 *addr)
300{
301 unsigned int flags = 0;
302 u32 w = addr[0];
303
304 switch((w >> 24) & 0x03) {
305 case 0x01:
306 flags |= IORESOURCE_IO;
307 case 0x02: /* 32 bits */
308 case 0x03: /* 64 bits */
309 flags |= IORESOURCE_MEM;
310 }
311 if (w & 0x40000000)
312 flags |= IORESOURCE_PREFETCH;
313 return flags;
314}
315
316/*
317 * SBUS bus specific translator
318 */
319
320static int of_bus_sbus_match(struct device_node *np)
321{
322 return !strcmp(np->name, "sbus") ||
323 !strcmp(np->name, "sbi");
324}
325
326static void of_bus_sbus_count_cells(struct device_node *child,
327 int *addrc, int *sizec)
328{
329 if (addrc)
330 *addrc = 2;
331 if (sizec)
332 *sizec = 1;
333}
334
335static u64 of_bus_sbus_map(u32 *addr, u32 *range, int na, int ns, int pna)
336{
337 return of_bus_default_map(addr, range, na, ns, pna);
338}
339
340static int of_bus_sbus_translate(u32 *addr, u64 offset, int na)
341{
342 return of_bus_default_translate(addr, offset, na);
343}
344
345static unsigned int of_bus_sbus_get_flags(u32 *addr)
346{
347 return IORESOURCE_MEM;
348}
349
350
351/*
352 * Array of bus specific translators
353 */
354
355static struct of_bus of_busses[] = {
356 /* PCI */
357 {
358 .name = "pci",
359 .addr_prop_name = "assigned-addresses",
360 .match = of_bus_pci_match,
361 .count_cells = of_bus_pci_count_cells,
362 .map = of_bus_pci_map,
363 .translate = of_bus_pci_translate,
364 .get_flags = of_bus_pci_get_flags,
365 },
366 /* SBUS */
367 {
368 .name = "sbus",
369 .addr_prop_name = "reg",
370 .match = of_bus_sbus_match,
371 .count_cells = of_bus_sbus_count_cells,
372 .map = of_bus_sbus_map,
373 .translate = of_bus_sbus_translate,
374 .get_flags = of_bus_sbus_get_flags,
375 },
376 /* Default */
377 {
378 .name = "default",
379 .addr_prop_name = "reg",
380 .match = NULL,
381 .count_cells = of_bus_default_count_cells,
382 .map = of_bus_default_map,
383 .translate = of_bus_default_translate,
384 .get_flags = of_bus_default_get_flags,
385 },
386};
387
388static struct of_bus *of_match_bus(struct device_node *np)
389{
390 int i;
391
392 for (i = 0; i < ARRAY_SIZE(of_busses); i ++)
393 if (!of_busses[i].match || of_busses[i].match(np))
394 return &of_busses[i];
395 BUG();
396 return NULL;
397}
398
399static int __init build_one_resource(struct device_node *parent,
400 struct of_bus *bus,
401 struct of_bus *pbus,
402 u32 *addr,
403 int na, int ns, int pna)
404{
405 u32 *ranges;
406 unsigned int rlen;
407 int rone;
408 u64 offset = OF_BAD_ADDR;
409
410 ranges = of_get_property(parent, "ranges", &rlen);
411 if (ranges == NULL || rlen == 0) {
412 offset = of_read_addr(addr, na);
413 memset(addr, 0, pna * 4);
414 goto finish;
415 }
416
417 /* Now walk through the ranges */
418 rlen /= 4;
419 rone = na + pna + ns;
420 for (; rlen >= rone; rlen -= rone, ranges += rone) {
421 offset = bus->map(addr, ranges, na, ns, pna);
422 if (offset != OF_BAD_ADDR)
423 break;
424 }
425 if (offset == OF_BAD_ADDR)
426 return 1;
427
428 memcpy(addr, ranges + na, 4 * pna);
429
430finish:
431 /* Translate it into parent bus space */
432 return pbus->translate(addr, offset, pna);
433}
434
435static void __init build_device_resources(struct of_device *op,
436 struct device *parent)
437{
438 struct of_device *p_op;
439 struct of_bus *bus;
440 int na, ns;
441 int index, num_reg;
442 void *preg;
443
444 if (!parent)
445 return;
446
447 p_op = to_of_device(parent);
448 bus = of_match_bus(p_op->node);
449 bus->count_cells(op->node, &na, &ns);
450
451 preg = of_get_property(op->node, bus->addr_prop_name, &num_reg);
452 if (!preg || num_reg == 0)
453 return;
454
455 /* Convert to num-cells. */
456 num_reg /= 4;
457
458 /* Conver to num-entries. */
459 num_reg /= na + ns;
460
461 for (index = 0; index < num_reg; index++) {
462 struct resource *r = &op->resource[index];
463 u32 addr[OF_MAX_ADDR_CELLS];
464 u32 *reg = (preg + (index * ((na + ns) * 4)));
465 struct device_node *dp = op->node;
466 struct device_node *pp = p_op->node;
467 struct of_bus *pbus;
468 u64 size, result = OF_BAD_ADDR;
469 unsigned long flags;
470 int dna, dns;
471 int pna, pns;
472
473 size = of_read_addr(reg + na, ns);
474 flags = bus->get_flags(reg);
475
476 memcpy(addr, reg, na * 4);
477
478 /* If the immediate parent has no ranges property to apply,
479 * just use a 1<->1 mapping.
480 */
481 if (of_find_property(pp, "ranges", NULL) == NULL) {
482 result = of_read_addr(addr, na);
483 goto build_res;
484 }
485
486 dna = na;
487 dns = ns;
488
489 while (1) {
490 dp = pp;
491 pp = dp->parent;
492 if (!pp) {
493 result = of_read_addr(addr, dna);
494 break;
495 }
496
497 pbus = of_match_bus(pp);
498 pbus->count_cells(dp, &pna, &pns);
499
500 if (build_one_resource(dp, bus, pbus, addr, dna, dns, pna))
501 break;
502
503 dna = pna;
504 dns = pns;
505 bus = pbus;
506 }
507
508 build_res:
509 memset(r, 0, sizeof(*r));
510 if (result != OF_BAD_ADDR) {
511 r->start = result & 0xffffffff;
512 r->end = result + size - 1;
513 r->flags = flags | ((result >> 32ULL) & 0xffUL);
514 } else {
515 r->start = ~0UL;
516 r->end = ~0UL;
517 }
518 r->name = op->node->name;
519 }
520}
521
522static struct of_device * __init scan_one_device(struct device_node *dp,
523 struct device *parent)
524{
525 struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
526 struct linux_prom_irqs *intr;
527 int len, i;
528
529 if (!op)
530 return NULL;
531
532 op->node = dp;
533
534 op->clock_freq = of_getintprop_default(dp, "clock-frequency",
535 (25*1000*1000));
536 op->portid = of_getintprop_default(dp, "upa-portid", -1);
537 if (op->portid == -1)
538 op->portid = of_getintprop_default(dp, "portid", -1);
539
540 intr = of_get_property(dp, "intr", &len);
541 if (intr) {
542 op->num_irqs = len / sizeof(struct linux_prom_irqs);
543 for (i = 0; i < op->num_irqs; i++)
544 op->irqs[i] = intr[i].pri;
545 } else {
546 unsigned int *irq = of_get_property(dp, "interrupts", &len);
547
548 if (irq) {
549 op->num_irqs = len / sizeof(unsigned int);
550 for (i = 0; i < op->num_irqs; i++)
551 op->irqs[i] = irq[i];
552 } else {
553 op->num_irqs = 0;
554 }
555 }
556 if (sparc_cpu_model == sun4d) {
557 static int pil_to_sbus[] = {
558 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
559 };
560 struct device_node *busp = dp->parent;
561 struct linux_prom_registers *regs;
562 int board = of_getintprop_default(busp, "board#", 0);
563 int slot;
564
565 regs = of_get_property(dp, "reg", NULL);
566 slot = regs->which_io;
567
568 for (i = 0; i < op->num_irqs; i++) {
569 int this_irq = op->irqs[i];
570 int sbusl = pil_to_sbus[this_irq];
571
572 if (sbusl)
573 this_irq = (((board + 1) << 5) +
574 (sbusl << 2) +
575 slot);
576
577 op->irqs[i] = this_irq;
578 }
579 }
580
581 build_device_resources(op, parent);
582
583 op->dev.parent = parent;
584 op->dev.bus = &of_bus_type;
585 if (!parent)
586 strcpy(op->dev.bus_id, "root");
587 else
588 strcpy(op->dev.bus_id, dp->path_component_name);
589
590 if (of_device_register(op)) {
591 printk("%s: Could not register of device.\n",
592 dp->full_name);
593 kfree(op);
594 op = NULL;
595 }
596
597 return op;
598}
599
600static void __init scan_tree(struct device_node *dp, struct device *parent)
601{
602 while (dp) {
603 struct of_device *op = scan_one_device(dp, parent);
604
605 if (op)
606 scan_tree(dp->child, &op->dev);
607
608 dp = dp->sibling;
609 }
610}
611
612static void __init scan_of_devices(void)
613{
614 struct device_node *root = of_find_node_by_path("/");
615 struct of_device *parent;
616
617 parent = scan_one_device(root, NULL);
618 if (!parent)
619 return;
620
621 scan_tree(root->child, &parent->dev);
622}
623
156static int __init of_bus_driver_init(void) 624static int __init of_bus_driver_init(void)
157{ 625{
158 int err = 0; 626 int err;
159 627
628 err = bus_register(&of_bus_type);
160#ifdef CONFIG_PCI 629#ifdef CONFIG_PCI
161 if (!err) 630 if (!err)
162 err = bus_register(&ebus_bus_type); 631 err = bus_register(&ebus_bus_type);
@@ -165,7 +634,11 @@ static int __init of_bus_driver_init(void)
165 if (!err) 634 if (!err)
166 err = bus_register(&sbus_bus_type); 635 err = bus_register(&sbus_bus_type);
167#endif 636#endif
168 return 0; 637
638 if (!err)
639 scan_of_devices();
640
641 return err;
169} 642}
170 643
171postcore_initcall(of_bus_driver_init); 644postcore_initcall(of_bus_driver_init);
diff --git a/arch/sparc/kernel/prom.c b/arch/sparc/kernel/prom.c
index 946ce6d15819..4b06dcb00ebd 100644
--- a/arch/sparc/kernel/prom.c
+++ b/arch/sparc/kernel/prom.c
@@ -190,6 +190,36 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
190} 190}
191EXPORT_SYMBOL(of_getintprop_default); 191EXPORT_SYMBOL(of_getintprop_default);
192 192
193int of_n_addr_cells(struct device_node *np)
194{
195 int* ip;
196 do {
197 if (np->parent)
198 np = np->parent;
199 ip = of_get_property(np, "#address-cells", NULL);
200 if (ip != NULL)
201 return *ip;
202 } while (np->parent);
203 /* No #address-cells property for the root node, default to 2 */
204 return 2;
205}
206EXPORT_SYMBOL(of_n_addr_cells);
207
208int of_n_size_cells(struct device_node *np)
209{
210 int* ip;
211 do {
212 if (np->parent)
213 np = np->parent;
214 ip = of_get_property(np, "#size-cells", NULL);
215 if (ip != NULL)
216 return *ip;
217 } while (np->parent);
218 /* No #size-cells property for the root node, default to 1 */
219 return 1;
220}
221EXPORT_SYMBOL(of_n_size_cells);
222
193int of_set_property(struct device_node *dp, const char *name, void *val, int len) 223int of_set_property(struct device_node *dp, const char *name, void *val, int len)
194{ 224{
195 struct property **prevp; 225 struct property **prevp;
diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c
index 288de276d9ff..aa0fb2efb615 100644
--- a/arch/sparc/kernel/sys_sunos.c
+++ b/arch/sparc/kernel/sys_sunos.c
@@ -196,7 +196,7 @@ asmlinkage int sunos_brk(unsigned long brk)
196 * simple, it hopefully works in most obvious cases.. Easy to 196 * simple, it hopefully works in most obvious cases.. Easy to
197 * fool it, but this should catch most mistakes. 197 * fool it, but this should catch most mistakes.
198 */ 198 */
199 freepages = get_page_cache_size(); 199 freepages = global_page_state(NR_FILE_PAGES);
200 freepages >>= 1; 200 freepages >>= 1;
201 freepages += nr_free_pages(); 201 freepages += nr_free_pages();
202 freepages += nr_swap_pages; 202 freepages += nr_swap_pages;
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 7dadcdb4ca42..9631e8f4ae60 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -42,6 +42,7 @@
42#include <asm/sun4paddr.h> 42#include <asm/sun4paddr.h>
43#include <asm/page.h> 43#include <asm/page.h>
44#include <asm/pcic.h> 44#include <asm/pcic.h>
45#include <asm/of_device.h>
45 46
46extern unsigned long wall_jiffies; 47extern unsigned long wall_jiffies;
47 48
@@ -273,83 +274,31 @@ static __inline__ void sun4_clock_probe(void)
273#endif 274#endif
274} 275}
275 276
276/* Probe for the mostek real time clock chip. */ 277static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
277static __inline__ void clock_probe(void)
278{ 278{
279 struct linux_prom_registers clk_reg[2]; 279 struct device_node *dp = op->node;
280 char model[128]; 280 char *model = of_get_property(dp, "model", NULL);
281 register int node, cpuunit, bootbus;
282 struct resource r;
283
284 cpuunit = bootbus = 0;
285 memset(&r, 0, sizeof(r));
286
287 /* Determine the correct starting PROM node for the probe. */
288 node = prom_getchild(prom_root_node);
289 switch (sparc_cpu_model) {
290 case sun4c:
291 break;
292 case sun4m:
293 node = prom_getchild(prom_searchsiblings(node, "obio"));
294 break;
295 case sun4d:
296 node = prom_getchild(bootbus = prom_searchsiblings(prom_getchild(cpuunit = prom_searchsiblings(node, "cpu-unit")), "bootbus"));
297 break;
298 default:
299 prom_printf("CLOCK: Unsupported architecture!\n");
300 prom_halt();
301 }
302 281
303 /* Find the PROM node describing the real time clock. */ 282 if (!model)
304 sp_clock_typ = MSTK_INVALID; 283 return -ENODEV;
305 node = prom_searchsiblings(node,"eeprom");
306 if (!node) {
307 prom_printf("CLOCK: No clock found!\n");
308 prom_halt();
309 }
310 284
311 /* Get the model name and setup everything up. */ 285 if (!strcmp(model, "mk48t02")) {
312 model[0] = '\0';
313 prom_getstring(node, "model", model, sizeof(model));
314 if (strcmp(model, "mk48t02") == 0) {
315 sp_clock_typ = MSTK48T02; 286 sp_clock_typ = MSTK48T02;
316 if (prom_getproperty(node, "reg", (char *) clk_reg, sizeof(clk_reg)) == -1) { 287
317 prom_printf("clock_probe: FAILED!\n");
318 prom_halt();
319 }
320 if (sparc_cpu_model == sun4d)
321 prom_apply_generic_ranges (bootbus, cpuunit, clk_reg, 1);
322 else
323 prom_apply_obio_ranges(clk_reg, 1);
324 /* Map the clock register io area read-only */ 288 /* Map the clock register io area read-only */
325 r.flags = clk_reg[0].which_io; 289 mstk48t02_regs = of_ioremap(&op->resource[0], 0,
326 r.start = clk_reg[0].phys_addr; 290 sizeof(struct mostek48t02),
327 mstk48t02_regs = sbus_ioremap(&r, 0, 291 "mk48t02");
328 sizeof(struct mostek48t02), "mk48t02");
329 mstk48t08_regs = NULL; /* To catch weirdness */ 292 mstk48t08_regs = NULL; /* To catch weirdness */
330 } else if (strcmp(model, "mk48t08") == 0) { 293 } else if (!strcmp(model, "mk48t08")) {
331 sp_clock_typ = MSTK48T08; 294 sp_clock_typ = MSTK48T08;
332 if(prom_getproperty(node, "reg", (char *) clk_reg, 295 mstk48t08_regs = of_ioremap(&op->resource[0], 0,
333 sizeof(clk_reg)) == -1) { 296 sizeof(struct mostek48t08),
334 prom_printf("clock_probe: FAILED!\n"); 297 "mk48t08");
335 prom_halt();
336 }
337 if (sparc_cpu_model == sun4d)
338 prom_apply_generic_ranges (bootbus, cpuunit, clk_reg, 1);
339 else
340 prom_apply_obio_ranges(clk_reg, 1);
341 /* Map the clock register io area read-only */
342 /* XXX r/o attribute is somewhere in r.flags */
343 r.flags = clk_reg[0].which_io;
344 r.start = clk_reg[0].phys_addr;
345 mstk48t08_regs = sbus_ioremap(&r, 0,
346 sizeof(struct mostek48t08), "mk48t08");
347 298
348 mstk48t02_regs = &mstk48t08_regs->regs; 299 mstk48t02_regs = &mstk48t08_regs->regs;
349 } else { 300 } else
350 prom_printf("CLOCK: Unknown model name '%s'\n",model); 301 return -ENODEV;
351 prom_halt();
352 }
353 302
354 /* Report a low battery voltage condition. */ 303 /* Report a low battery voltage condition. */
355 if (has_low_battery()) 304 if (has_low_battery())
@@ -358,6 +307,28 @@ static __inline__ void clock_probe(void)
358 /* Kick start the clock if it is completely stopped. */ 307 /* Kick start the clock if it is completely stopped. */
359 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP) 308 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
360 kick_start_clock(); 309 kick_start_clock();
310
311 return 0;
312}
313
314static struct of_device_id clock_match[] = {
315 {
316 .name = "eeprom",
317 },
318 {},
319};
320
321static struct of_platform_driver clock_driver = {
322 .name = "clock",
323 .match_table = clock_match,
324 .probe = clock_probe,
325};
326
327
328/* Probe for the mostek real time clock chip. */
329static void clock_init(void)
330{
331 of_register_driver(&clock_driver, &of_bus_type);
361} 332}
362 333
363void __init sbus_time_init(void) 334void __init sbus_time_init(void)
@@ -376,7 +347,7 @@ void __init sbus_time_init(void)
376 if (ARCH_SUN4) 347 if (ARCH_SUN4)
377 sun4_clock_probe(); 348 sun4_clock_probe();
378 else 349 else
379 clock_probe(); 350 clock_init();
380 351
381 sparc_init_timers(timer_interrupt); 352 sparc_init_timers(timer_interrupt);
382 353
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
index c2c69c167d18..718350aba1ec 100644
--- a/arch/sparc64/kernel/auxio.c
+++ b/arch/sparc64/kernel/auxio.c
@@ -11,10 +11,9 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/ioport.h> 12#include <linux/ioport.h>
13 13
14#include <asm/oplib.h> 14#include <asm/prom.h>
15#include <asm/of_device.h>
15#include <asm/io.h> 16#include <asm/io.h>
16#include <asm/sbus.h>
17#include <asm/ebus.h>
18#include <asm/auxio.h> 17#include <asm/auxio.h>
19 18
20void __iomem *auxio_register = NULL; 19void __iomem *auxio_register = NULL;
@@ -111,12 +110,6 @@ void auxio_set_lte(int on)
111 } 110 }
112} 111}
113 112
114static void __devinit auxio_report_dev(struct device_node *dp)
115{
116 printk(KERN_INFO "AUXIO: Found device at %s\n",
117 dp->full_name);
118}
119
120static struct of_device_id auxio_match[] = { 113static struct of_device_id auxio_match[] = {
121 { 114 {
122 .name = "auxio", 115 .name = "auxio",
@@ -126,67 +119,48 @@ static struct of_device_id auxio_match[] = {
126 119
127MODULE_DEVICE_TABLE(of, auxio_match); 120MODULE_DEVICE_TABLE(of, auxio_match);
128 121
129#ifdef CONFIG_SBUS 122static int __devinit auxio_probe(struct of_device *dev, const struct of_device_id *match)
130static int __devinit auxio_sbus_probe(struct of_device *dev, const struct of_device_id *match)
131{ 123{
132 struct sbus_dev *sdev = to_sbus_device(&dev->dev); 124 struct device_node *dp = dev->node;
133 125 unsigned long size;
134 auxio_devtype = AUXIO_TYPE_SBUS; 126
135 auxio_register = sbus_ioremap(&sdev->resource[0], 0, 127 if (!strcmp(dp->parent->name, "ebus")) {
136 sdev->reg_addrs[0].reg_size, 128 auxio_devtype = AUXIO_TYPE_EBUS;
137 "auxiliaryIO"); 129 size = sizeof(u32);
138 if (!auxio_register) 130 } else if (!strcmp(dp->parent->name, "sbus")) {
131 auxio_devtype = AUXIO_TYPE_SBUS;
132 size = 1;
133 } else {
134 printk("auxio: Unknown parent bus type [%s]\n",
135 dp->parent->name);
139 return -ENODEV; 136 return -ENODEV;
140 137 }
141 auxio_report_dev(dev->node); 138 auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
142 return 0;
143}
144
145static struct of_platform_driver auxio_sbus_driver = {
146 .name = "auxio",
147 .match_table = auxio_match,
148 .probe = auxio_sbus_probe,
149};
150#endif
151
152#ifdef CONFIG_PCI
153static int __devinit auxio_ebus_probe(struct of_device *dev, const struct of_device_id *match)
154{
155 struct linux_ebus_device *edev = to_ebus_device(&dev->dev);
156
157 auxio_devtype = AUXIO_TYPE_EBUS;
158 auxio_register = ioremap(edev->resource[0].start, sizeof(u32));
159 if (!auxio_register) 139 if (!auxio_register)
160 return -ENODEV; 140 return -ENODEV;
161 141
162 auxio_report_dev(dev->node); 142 printk(KERN_INFO "AUXIO: Found device at %s\n",
143 dp->full_name);
163 144
164 auxio_set_led(AUXIO_LED_ON); 145 if (auxio_devtype == AUXIO_TYPE_EBUS)
146 auxio_set_led(AUXIO_LED_ON);
165 147
166 return 0; 148 return 0;
167} 149}
168 150
169static struct of_platform_driver auxio_ebus_driver = { 151static struct of_platform_driver auxio_driver = {
170 .name = "auxio", 152 .name = "auxio",
171 .match_table = auxio_match, 153 .match_table = auxio_match,
172 .probe = auxio_ebus_probe, 154 .probe = auxio_probe,
173}; 155};
174#endif
175 156
176static int __init auxio_probe(void) 157static int __init auxio_init(void)
177{ 158{
178#ifdef CONFIG_SBUS 159 return of_register_driver(&auxio_driver, &of_bus_type);
179 of_register_driver(&auxio_sbus_driver, &sbus_bus_type);
180#endif
181#ifdef CONFIG_PCI
182 of_register_driver(&auxio_ebus_driver, &ebus_bus_type);
183#endif
184
185 return 0;
186} 160}
187 161
188/* Must be after subsys_initcall() so that busses are probed. Must 162/* Must be after subsys_initcall() so that busses are probed. Must
189 * be before device_initcall() because things like the floppy driver 163 * be before device_initcall() because things like the floppy driver
190 * need to use the AUXIO register. 164 * need to use the AUXIO register.
191 */ 165 */
192fs_initcall(auxio_probe); 166fs_initcall(auxio_init);
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index 98e0a8cbeecd..aac014d15ad3 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -20,6 +20,8 @@
20#include <asm/pbm.h> 20#include <asm/pbm.h>
21#include <asm/ebus.h> 21#include <asm/ebus.h>
22#include <asm/oplib.h> 22#include <asm/oplib.h>
23#include <asm/prom.h>
24#include <asm/of_device.h>
23#include <asm/bpp.h> 25#include <asm/bpp.h>
24#include <asm/irq.h> 26#include <asm/irq.h>
25 27
@@ -279,45 +281,12 @@ static inline void *ebus_alloc(size_t size)
279 return mem; 281 return mem;
280} 282}
281 283
282int __init ebus_intmap_match(struct linux_ebus *ebus, 284static void __init fill_ebus_child(struct device_node *dp,
283 struct linux_prom_registers *reg, 285 struct linux_ebus_child *dev,
284 int *interrupt) 286 int non_standard_regs)
285{
286 struct linux_prom_ebus_intmap *imap;
287 struct linux_prom_ebus_intmask *imask;
288 unsigned int hi, lo, irq;
289 int i, len, n_imap;
290
291 imap = of_get_property(ebus->prom_node, "interrupt-map", &len);
292 if (!imap)
293 return 0;
294 n_imap = len / sizeof(imap[0]);
295
296 imask = of_get_property(ebus->prom_node, "interrupt-map-mask", NULL);
297 if (!imask)
298 return 0;
299
300 hi = reg->which_io & imask->phys_hi;
301 lo = reg->phys_addr & imask->phys_lo;
302 irq = *interrupt & imask->interrupt;
303 for (i = 0; i < n_imap; i++) {
304 if ((imap[i].phys_hi == hi) &&
305 (imap[i].phys_lo == lo) &&
306 (imap[i].interrupt == irq)) {
307 *interrupt = imap[i].cinterrupt;
308 return 0;
309 }
310 }
311 return -1;
312}
313
314void __init fill_ebus_child(struct device_node *dp,
315 struct linux_prom_registers *preg,
316 struct linux_ebus_child *dev,
317 int non_standard_regs)
318{ 287{
288 struct of_device *op;
319 int *regs; 289 int *regs;
320 int *irqs;
321 int i, len; 290 int i, len;
322 291
323 dev->prom_node = dp; 292 dev->prom_node = dp;
@@ -354,12 +323,16 @@ void __init fill_ebus_child(struct device_node *dp,
354 } 323 }
355 } 324 }
356 325
357 for (i = 0; i < PROMINTR_MAX; i++) 326 op = of_find_device_by_node(dp);
358 dev->irqs[i] = PCI_IRQ_NONE; 327 if (!op) {
359
360 irqs = of_get_property(dp, "interrupts", &len);
361 if (!irqs) {
362 dev->num_irqs = 0; 328 dev->num_irqs = 0;
329 } else {
330 dev->num_irqs = op->num_irqs;
331 for (i = 0; i < dev->num_irqs; i++)
332 dev->irqs[i] = op->irqs[i];
333 }
334
335 if (!dev->num_irqs) {
363 /* 336 /*
364 * Oh, well, some PROMs don't export interrupts 337 * Oh, well, some PROMs don't export interrupts
365 * property to children of EBus devices... 338 * property to children of EBus devices...
@@ -375,23 +348,6 @@ void __init fill_ebus_child(struct device_node *dp,
375 dev->irqs[0] = dev->parent->irqs[1]; 348 dev->irqs[0] = dev->parent->irqs[1];
376 } 349 }
377 } 350 }
378 } else {
379 dev->num_irqs = len / sizeof(irqs[0]);
380 for (i = 0; i < dev->num_irqs; i++) {
381 struct pci_pbm_info *pbm = dev->bus->parent;
382 struct pci_controller_info *p = pbm->parent;
383
384 if (ebus_intmap_match(dev->bus, preg, &irqs[i]) != -1) {
385 dev->irqs[i] = p->irq_build(pbm,
386 dev->bus->self,
387 irqs[i]);
388 } else {
389 /* If we get a bogus interrupt property, just
390 * record the raw value instead of punting.
391 */
392 dev->irqs[i] = irqs[i];
393 }
394 }
395 } 351 }
396} 352}
397 353
@@ -403,72 +359,32 @@ static int __init child_regs_nonstandard(struct linux_ebus_device *dev)
403 return 0; 359 return 0;
404} 360}
405 361
406void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev) 362static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *dev)
407{ 363{
408 struct linux_prom_registers *regs;
409 struct linux_ebus_child *child; 364 struct linux_ebus_child *child;
410 int *irqs; 365 struct of_device *op;
411 int i, n, len; 366 int i, len;
412 367
413 dev->prom_node = dp; 368 dev->prom_node = dp;
414 369
415 printk(" [%s", dp->name); 370 printk(" [%s", dp->name);
416 371
417 regs = of_get_property(dp, "reg", &len); 372 op = of_find_device_by_node(dp);
418 if (!regs) { 373 if (!op) {
419 dev->num_addrs = 0; 374 dev->num_addrs = 0;
420 goto probe_interrupts;
421 }
422
423 if (len % sizeof(struct linux_prom_registers)) {
424 prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
425 dev->prom_node->name, len,
426 (int)sizeof(struct linux_prom_registers));
427 prom_halt();
428 }
429 dev->num_addrs = len / sizeof(struct linux_prom_registers);
430
431 for (i = 0; i < dev->num_addrs; i++) {
432 /* XXX Learn how to interpret ebus ranges... -DaveM */
433 if (regs[i].which_io >= 0x10)
434 n = (regs[i].which_io - 0x10) >> 2;
435 else
436 n = regs[i].which_io;
437
438 dev->resource[i].start = dev->bus->self->resource[n].start;
439 dev->resource[i].start += (unsigned long)regs[i].phys_addr;
440 dev->resource[i].end =
441 (dev->resource[i].start + (unsigned long)regs[i].reg_size - 1UL);
442 dev->resource[i].flags = IORESOURCE_MEM;
443 dev->resource[i].name = dev->prom_node->name;
444 request_resource(&dev->bus->self->resource[n],
445 &dev->resource[i]);
446 }
447
448probe_interrupts:
449 for (i = 0; i < PROMINTR_MAX; i++)
450 dev->irqs[i] = PCI_IRQ_NONE;
451
452 irqs = of_get_property(dp, "interrupts", &len);
453 if (!irqs) {
454 dev->num_irqs = 0; 375 dev->num_irqs = 0;
455 } else { 376 } else {
456 dev->num_irqs = len / sizeof(irqs[0]); 377 (void) of_get_property(dp, "reg", &len);
457 for (i = 0; i < dev->num_irqs; i++) { 378 dev->num_addrs = len / sizeof(struct linux_prom_registers);
458 struct pci_pbm_info *pbm = dev->bus->parent; 379
459 struct pci_controller_info *p = pbm->parent; 380 for (i = 0; i < dev->num_addrs; i++)
460 381 memcpy(&dev->resource[i],
461 if (ebus_intmap_match(dev->bus, &regs[0], &irqs[i]) != -1) { 382 &op->resource[i],
462 dev->irqs[i] = p->irq_build(pbm, 383 sizeof(struct resource));
463 dev->bus->self, 384
464 irqs[i]); 385 dev->num_irqs = op->num_irqs;
465 } else { 386 for (i = 0; i < dev->num_irqs; i++)
466 /* If we get a bogus interrupt property, just 387 dev->irqs[i] = op->irqs[i];
467 * record the raw value instead of punting.
468 */
469 dev->irqs[i] = irqs[i];
470 }
471 }
472 } 388 }
473 389
474 dev->ofdev.node = dp; 390 dev->ofdev.node = dp;
@@ -490,7 +406,7 @@ probe_interrupts:
490 child->next = NULL; 406 child->next = NULL;
491 child->parent = dev; 407 child->parent = dev;
492 child->bus = dev->bus; 408 child->bus = dev->bus;
493 fill_ebus_child(dp, regs, child, 409 fill_ebus_child(dp, child,
494 child_regs_nonstandard(dev)); 410 child_regs_nonstandard(dev));
495 411
496 while ((dp = dp->sibling) != NULL) { 412 while ((dp = dp->sibling) != NULL) {
@@ -500,7 +416,7 @@ probe_interrupts:
500 child->next = NULL; 416 child->next = NULL;
501 child->parent = dev; 417 child->parent = dev;
502 child->bus = dev->bus; 418 child->bus = dev->bus;
503 fill_ebus_child(dp, regs, child, 419 fill_ebus_child(dp, child,
504 child_regs_nonstandard(dev)); 420 child_regs_nonstandard(dev));
505 } 421 }
506 } 422 }
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index ab9e640df228..eebe02f3f4cb 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -414,6 +414,10 @@ void irq_install_pre_handler(int virt_irq,
414 data->pre_handler_arg1 = arg1; 414 data->pre_handler_arg1 = arg1;
415 data->pre_handler_arg2 = arg2; 415 data->pre_handler_arg2 = arg2;
416 416
417 if (desc->chip == &sun4u_irq_ack ||
418 desc->chip == &sun4v_irq_ack)
419 return;
420
417 desc->chip = (desc->chip == &sun4u_irq ? 421 desc->chip = (desc->chip == &sun4u_irq ?
418 &sun4u_irq_ack : &sun4v_irq_ack); 422 &sun4u_irq_ack : &sun4v_irq_ack);
419} 423}
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
index 6f16dee280a8..0f3aec72ef5f 100644
--- a/arch/sparc64/kernel/isa.c
+++ b/arch/sparc64/kernel/isa.c
@@ -3,6 +3,8 @@
3#include <linux/pci.h> 3#include <linux/pci.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <asm/oplib.h> 5#include <asm/oplib.h>
6#include <asm/prom.h>
7#include <asm/of_device.h>
6#include <asm/isa.h> 8#include <asm/isa.h>
7 9
8struct sparc_isa_bridge *isa_chain; 10struct sparc_isa_bridge *isa_chain;
@@ -46,107 +48,16 @@ isa_dev_get_resource(struct sparc_isa_device *isa_dev)
46 return pregs; 48 return pregs;
47} 49}
48 50
49/* I can't believe they didn't put a real INO in the isa device
50 * interrupts property. The whole point of the OBP properties
51 * is to shield the kernel from IRQ routing details.
52 *
53 * The P1275 standard for ISA devices seems to also have been
54 * totally ignored.
55 *
56 * On later systems, an interrupt-map and interrupt-map-mask scheme
57 * akin to EBUS is used.
58 */
59static struct {
60 int obp_irq;
61 int pci_ino;
62} grover_irq_table[] = {
63 { 1, 0x00 }, /* dma, unknown ino at this point */
64 { 2, 0x27 }, /* floppy */
65 { 3, 0x22 }, /* parallel */
66 { 4, 0x2b }, /* serial */
67 { 5, 0x25 }, /* acpi power management */
68
69 { 0, 0x00 } /* end of table */
70};
71
72static int __init isa_dev_get_irq_using_imap(struct sparc_isa_device *isa_dev,
73 struct sparc_isa_bridge *isa_br,
74 int *interrupt,
75 struct linux_prom_registers *reg)
76{
77 struct linux_prom_ebus_intmap *imap;
78 struct linux_prom_ebus_intmap *imask;
79 unsigned int hi, lo, irq;
80 int i, len, n_imap;
81
82 imap = of_get_property(isa_br->prom_node, "interrupt-map", &len);
83 if (!imap)
84 return 0;
85 n_imap = len / sizeof(imap[0]);
86
87 imask = of_get_property(isa_br->prom_node, "interrupt-map-mask", NULL);
88 if (!imask)
89 return 0;
90
91 hi = reg->which_io & imask->phys_hi;
92 lo = reg->phys_addr & imask->phys_lo;
93 irq = *interrupt & imask->interrupt;
94 for (i = 0; i < n_imap; i++) {
95 if ((imap[i].phys_hi == hi) &&
96 (imap[i].phys_lo == lo) &&
97 (imap[i].interrupt == irq)) {
98 *interrupt = imap[i].cinterrupt;
99 return 0;
100 }
101 }
102 return -1;
103}
104
105static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev, 51static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev,
106 struct linux_prom_registers *pregs) 52 struct linux_prom_registers *pregs)
107{ 53{
108 int irq_prop; 54 struct of_device *op = of_find_device_by_node(isa_dev->prom_node);
109 55
110 irq_prop = of_getintprop_default(isa_dev->prom_node, 56 if (!op || !op->num_irqs) {
111 "interrupts", -1); 57 isa_dev->irq = PCI_IRQ_NONE;
112 if (irq_prop <= 0) {
113 goto no_irq;
114 } else { 58 } else {
115 struct pci_controller_info *pcic; 59 isa_dev->irq = op->irqs[0];
116 struct pci_pbm_info *pbm;
117 int i;
118
119 if (of_find_property(isa_dev->bus->prom_node,
120 "interrupt-map", NULL)) {
121 if (!isa_dev_get_irq_using_imap(isa_dev,
122 isa_dev->bus,
123 &irq_prop,
124 pregs))
125 goto route_irq;
126 }
127
128 for (i = 0; grover_irq_table[i].obp_irq != 0; i++) {
129 if (grover_irq_table[i].obp_irq == irq_prop) {
130 int ino = grover_irq_table[i].pci_ino;
131
132 if (ino == 0)
133 goto no_irq;
134
135 irq_prop = ino;
136 goto route_irq;
137 }
138 }
139 goto no_irq;
140
141route_irq:
142 pbm = isa_dev->bus->parent;
143 pcic = pbm->parent;
144 isa_dev->irq = pcic->irq_build(pbm, NULL, irq_prop);
145 return;
146 } 60 }
147
148no_irq:
149 isa_dev->irq = PCI_IRQ_NONE;
150} 61}
151 62
152static void __init isa_fill_children(struct sparc_isa_device *parent_isa_dev) 63static void __init isa_fill_children(struct sparc_isa_device *parent_isa_dev)
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
index 768475bbce82..3670dc8a7d5f 100644
--- a/arch/sparc64/kernel/of_device.c
+++ b/arch/sparc64/kernel/of_device.c
@@ -129,6 +129,43 @@ static int of_device_resume(struct device * dev)
129 return error; 129 return error;
130} 130}
131 131
132void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name)
133{
134 unsigned long ret = res->start + offset;
135
136 if (!request_region(ret, size, name))
137 ret = 0;
138
139 return (void __iomem *) ret;
140}
141EXPORT_SYMBOL(of_ioremap);
142
143void of_iounmap(void __iomem *base, unsigned long size)
144{
145 release_region((unsigned long) base, size);
146}
147EXPORT_SYMBOL(of_iounmap);
148
149static int node_match(struct device *dev, void *data)
150{
151 struct of_device *op = to_of_device(dev);
152 struct device_node *dp = data;
153
154 return (op->node == dp);
155}
156
157struct of_device *of_find_device_by_node(struct device_node *dp)
158{
159 struct device *dev = bus_find_device(&of_bus_type, NULL,
160 dp, node_match);
161
162 if (dev)
163 return to_of_device(dev);
164
165 return NULL;
166}
167EXPORT_SYMBOL(of_find_device_by_node);
168
132#ifdef CONFIG_PCI 169#ifdef CONFIG_PCI
133struct bus_type isa_bus_type = { 170struct bus_type isa_bus_type = {
134 .name = "isa", 171 .name = "isa",
@@ -163,10 +200,654 @@ struct bus_type sbus_bus_type = {
163EXPORT_SYMBOL(sbus_bus_type); 200EXPORT_SYMBOL(sbus_bus_type);
164#endif 201#endif
165 202
203struct bus_type of_bus_type = {
204 .name = "of",
205 .match = of_platform_bus_match,
206 .probe = of_device_probe,
207 .remove = of_device_remove,
208 .suspend = of_device_suspend,
209 .resume = of_device_resume,
210};
211EXPORT_SYMBOL(of_bus_type);
212
213static inline u64 of_read_addr(u32 *cell, int size)
214{
215 u64 r = 0;
216 while (size--)
217 r = (r << 32) | *(cell++);
218 return r;
219}
220
221static void __init get_cells(struct device_node *dp,
222 int *addrc, int *sizec)
223{
224 if (addrc)
225 *addrc = of_n_addr_cells(dp);
226 if (sizec)
227 *sizec = of_n_size_cells(dp);
228}
229
230/* Max address size we deal with */
231#define OF_MAX_ADDR_CELLS 4
232
233struct of_bus {
234 const char *name;
235 const char *addr_prop_name;
236 int (*match)(struct device_node *parent);
237 void (*count_cells)(struct device_node *child,
238 int *addrc, int *sizec);
239 u64 (*map)(u32 *addr, u32 *range, int na, int ns, int pna);
240 int (*translate)(u32 *addr, u64 offset, int na);
241 unsigned int (*get_flags)(u32 *addr);
242};
243
244/*
245 * Default translator (generic bus)
246 */
247
248static void of_bus_default_count_cells(struct device_node *dev,
249 int *addrc, int *sizec)
250{
251 get_cells(dev, addrc, sizec);
252}
253
254static u64 of_bus_default_map(u32 *addr, u32 *range, int na, int ns, int pna)
255{
256 u64 cp, s, da;
257
258 cp = of_read_addr(range, na);
259 s = of_read_addr(range + na + pna, ns);
260 da = of_read_addr(addr, na);
261
262 if (da < cp || da >= (cp + s))
263 return OF_BAD_ADDR;
264 return da - cp;
265}
266
267static int of_bus_default_translate(u32 *addr, u64 offset, int na)
268{
269 u64 a = of_read_addr(addr, na);
270 memset(addr, 0, na * 4);
271 a += offset;
272 if (na > 1)
273 addr[na - 2] = a >> 32;
274 addr[na - 1] = a & 0xffffffffu;
275
276 return 0;
277}
278
279static unsigned int of_bus_default_get_flags(u32 *addr)
280{
281 return IORESOURCE_MEM;
282}
283
284/*
285 * PCI bus specific translator
286 */
287
288static int of_bus_pci_match(struct device_node *np)
289{
290 return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex");
291}
292
293static void of_bus_pci_count_cells(struct device_node *np,
294 int *addrc, int *sizec)
295{
296 if (addrc)
297 *addrc = 3;
298 if (sizec)
299 *sizec = 2;
300}
301
302static u64 of_bus_pci_map(u32 *addr, u32 *range, int na, int ns, int pna)
303{
304 u64 cp, s, da;
305
306 /* Check address type match */
307 if ((addr[0] ^ range[0]) & 0x03000000)
308 return OF_BAD_ADDR;
309
310 /* Read address values, skipping high cell */
311 cp = of_read_addr(range + 1, na - 1);
312 s = of_read_addr(range + na + pna, ns);
313 da = of_read_addr(addr + 1, na - 1);
314
315 if (da < cp || da >= (cp + s))
316 return OF_BAD_ADDR;
317 return da - cp;
318}
319
320static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
321{
322 return of_bus_default_translate(addr + 1, offset, na - 1);
323}
324
325static unsigned int of_bus_pci_get_flags(u32 *addr)
326{
327 unsigned int flags = 0;
328 u32 w = addr[0];
329
330 switch((w >> 24) & 0x03) {
331 case 0x01:
332 flags |= IORESOURCE_IO;
333 case 0x02: /* 32 bits */
334 case 0x03: /* 64 bits */
335 flags |= IORESOURCE_MEM;
336 }
337 if (w & 0x40000000)
338 flags |= IORESOURCE_PREFETCH;
339 return flags;
340}
341
342/*
343 * ISA bus specific translator
344 */
345
346static int of_bus_isa_match(struct device_node *np)
347{
348 return !strcmp(np->name, "isa");
349}
350
351static void of_bus_isa_count_cells(struct device_node *child,
352 int *addrc, int *sizec)
353{
354 if (addrc)
355 *addrc = 2;
356 if (sizec)
357 *sizec = 1;
358}
359
360static u64 of_bus_isa_map(u32 *addr, u32 *range, int na, int ns, int pna)
361{
362 u64 cp, s, da;
363
364 /* Check address type match */
365 if ((addr[0] ^ range[0]) & 0x00000001)
366 return OF_BAD_ADDR;
367
368 /* Read address values, skipping high cell */
369 cp = of_read_addr(range + 1, na - 1);
370 s = of_read_addr(range + na + pna, ns);
371 da = of_read_addr(addr + 1, na - 1);
372
373 if (da < cp || da >= (cp + s))
374 return OF_BAD_ADDR;
375 return da - cp;
376}
377
378static int of_bus_isa_translate(u32 *addr, u64 offset, int na)
379{
380 return of_bus_default_translate(addr + 1, offset, na - 1);
381}
382
383static unsigned int of_bus_isa_get_flags(u32 *addr)
384{
385 unsigned int flags = 0;
386 u32 w = addr[0];
387
388 if (w & 1)
389 flags |= IORESOURCE_IO;
390 else
391 flags |= IORESOURCE_MEM;
392 return flags;
393}
394
395/*
396 * SBUS bus specific translator
397 */
398
399static int of_bus_sbus_match(struct device_node *np)
400{
401 return !strcmp(np->name, "sbus") ||
402 !strcmp(np->name, "sbi");
403}
404
405static void of_bus_sbus_count_cells(struct device_node *child,
406 int *addrc, int *sizec)
407{
408 if (addrc)
409 *addrc = 2;
410 if (sizec)
411 *sizec = 1;
412}
413
414static u64 of_bus_sbus_map(u32 *addr, u32 *range, int na, int ns, int pna)
415{
416 return of_bus_default_map(addr, range, na, ns, pna);
417}
418
419static int of_bus_sbus_translate(u32 *addr, u64 offset, int na)
420{
421 return of_bus_default_translate(addr, offset, na);
422}
423
424static unsigned int of_bus_sbus_get_flags(u32 *addr)
425{
426 return IORESOURCE_MEM;
427}
428
429
430/*
431 * Array of bus specific translators
432 */
433
434static struct of_bus of_busses[] = {
435 /* PCI */
436 {
437 .name = "pci",
438 .addr_prop_name = "assigned-addresses",
439 .match = of_bus_pci_match,
440 .count_cells = of_bus_pci_count_cells,
441 .map = of_bus_pci_map,
442 .translate = of_bus_pci_translate,
443 .get_flags = of_bus_pci_get_flags,
444 },
445 /* ISA */
446 {
447 .name = "isa",
448 .addr_prop_name = "reg",
449 .match = of_bus_isa_match,
450 .count_cells = of_bus_isa_count_cells,
451 .map = of_bus_isa_map,
452 .translate = of_bus_isa_translate,
453 .get_flags = of_bus_isa_get_flags,
454 },
455 /* SBUS */
456 {
457 .name = "sbus",
458 .addr_prop_name = "reg",
459 .match = of_bus_sbus_match,
460 .count_cells = of_bus_sbus_count_cells,
461 .map = of_bus_sbus_map,
462 .translate = of_bus_sbus_translate,
463 .get_flags = of_bus_sbus_get_flags,
464 },
465 /* Default */
466 {
467 .name = "default",
468 .addr_prop_name = "reg",
469 .match = NULL,
470 .count_cells = of_bus_default_count_cells,
471 .map = of_bus_default_map,
472 .translate = of_bus_default_translate,
473 .get_flags = of_bus_default_get_flags,
474 },
475};
476
477static struct of_bus *of_match_bus(struct device_node *np)
478{
479 int i;
480
481 for (i = 0; i < ARRAY_SIZE(of_busses); i ++)
482 if (!of_busses[i].match || of_busses[i].match(np))
483 return &of_busses[i];
484 BUG();
485 return NULL;
486}
487
488static int __init build_one_resource(struct device_node *parent,
489 struct of_bus *bus,
490 struct of_bus *pbus,
491 u32 *addr,
492 int na, int ns, int pna)
493{
494 u32 *ranges;
495 unsigned int rlen;
496 int rone;
497 u64 offset = OF_BAD_ADDR;
498
499 ranges = of_get_property(parent, "ranges", &rlen);
500 if (ranges == NULL || rlen == 0) {
501 offset = of_read_addr(addr, na);
502 memset(addr, 0, pna * 4);
503 goto finish;
504 }
505
506 /* Now walk through the ranges */
507 rlen /= 4;
508 rone = na + pna + ns;
509 for (; rlen >= rone; rlen -= rone, ranges += rone) {
510 offset = bus->map(addr, ranges, na, ns, pna);
511 if (offset != OF_BAD_ADDR)
512 break;
513 }
514 if (offset == OF_BAD_ADDR)
515 return 1;
516
517 memcpy(addr, ranges + na, 4 * pna);
518
519finish:
520 /* Translate it into parent bus space */
521 return pbus->translate(addr, offset, pna);
522}
523
524static void __init build_device_resources(struct of_device *op,
525 struct device *parent)
526{
527 struct of_device *p_op;
528 struct of_bus *bus;
529 int na, ns;
530 int index, num_reg;
531 void *preg;
532
533 if (!parent)
534 return;
535
536 p_op = to_of_device(parent);
537 bus = of_match_bus(p_op->node);
538 bus->count_cells(op->node, &na, &ns);
539
540 preg = of_get_property(op->node, bus->addr_prop_name, &num_reg);
541 if (!preg || num_reg == 0)
542 return;
543
544 /* Convert to num-cells. */
545 num_reg /= 4;
546
547 /* Conver to num-entries. */
548 num_reg /= na + ns;
549
550 for (index = 0; index < num_reg; index++) {
551 struct resource *r = &op->resource[index];
552 u32 addr[OF_MAX_ADDR_CELLS];
553 u32 *reg = (preg + (index * ((na + ns) * 4)));
554 struct device_node *dp = op->node;
555 struct device_node *pp = p_op->node;
556 struct of_bus *pbus;
557 u64 size, result = OF_BAD_ADDR;
558 unsigned long flags;
559 int dna, dns;
560 int pna, pns;
561
562 size = of_read_addr(reg + na, ns);
563 flags = bus->get_flags(reg);
564
565 memcpy(addr, reg, na * 4);
566
567 /* If the immediate parent has no ranges property to apply,
568 * just use a 1<->1 mapping. Unless it is the 'dma' child
569 * of an isa bus, which must be passed up towards the root.
570 *
571 * Also, don't try to translate PMU bus device registers.
572 */
573 if ((of_find_property(pp, "ranges", NULL) == NULL &&
574 strcmp(pp->name, "dma") != 0) ||
575 !strcmp(pp->name, "pmu")) {
576 result = of_read_addr(addr, na);
577 goto build_res;
578 }
579
580 dna = na;
581 dns = ns;
582
583 while (1) {
584 dp = pp;
585 pp = dp->parent;
586 if (!pp) {
587 result = of_read_addr(addr, dna);
588 break;
589 }
590
591 pbus = of_match_bus(pp);
592 pbus->count_cells(dp, &pna, &pns);
593
594 if (build_one_resource(dp, bus, pbus, addr, dna, dns, pna))
595 break;
596
597 dna = pna;
598 dns = pns;
599 bus = pbus;
600 }
601
602 build_res:
603 memset(r, 0, sizeof(*r));
604 if (result != OF_BAD_ADDR) {
605 r->start = result;
606 r->end = result + size - 1;
607 r->flags = flags;
608 } else {
609 r->start = ~0UL;
610 r->end = ~0UL;
611 }
612 r->name = op->node->name;
613 }
614}
615
616static struct device_node * __init
617apply_interrupt_map(struct device_node *dp, struct device_node *pp,
618 u32 *imap, int imlen, u32 *imask,
619 unsigned int *irq_p)
620{
621 struct device_node *cp;
622 unsigned int irq = *irq_p;
623 struct of_bus *bus;
624 phandle handle;
625 u32 *reg;
626 int na, num_reg, i;
627
628 bus = of_match_bus(pp);
629 bus->count_cells(dp, &na, NULL);
630
631 reg = of_get_property(dp, "reg", &num_reg);
632 if (!reg || !num_reg)
633 return NULL;
634
635 imlen /= ((na + 3) * 4);
636 handle = 0;
637 for (i = 0; i < imlen; i++) {
638 int j;
639
640 for (j = 0; j < na; j++) {
641 if ((reg[j] & imask[j]) != imap[j])
642 goto next;
643 }
644 if (imap[na] == irq) {
645 handle = imap[na + 1];
646 irq = imap[na + 2];
647 break;
648 }
649
650 next:
651 imap += (na + 3);
652 }
653 if (i == imlen)
654 return NULL;
655
656 *irq_p = irq;
657 cp = of_find_node_by_phandle(handle);
658
659 return cp;
660}
661
662static unsigned int __init pci_irq_swizzle(struct device_node *dp,
663 struct device_node *pp,
664 unsigned int irq)
665{
666 struct linux_prom_pci_registers *regs;
667 unsigned int devfn, slot, ret;
668
669 if (irq < 1 || irq > 4)
670 return irq;
671
672 regs = of_get_property(dp, "reg", NULL);
673 if (!regs)
674 return irq;
675
676 devfn = (regs->phys_hi >> 8) & 0xff;
677 slot = (devfn >> 3) & 0x1f;
678
679 ret = ((irq - 1 + (slot & 3)) & 3) + 1;
680
681 return ret;
682}
683
684static unsigned int __init build_one_device_irq(struct of_device *op,
685 struct device *parent,
686 unsigned int irq)
687{
688 struct device_node *dp = op->node;
689 struct device_node *pp, *ip;
690 unsigned int orig_irq = irq;
691
692 if (irq == 0xffffffff)
693 return irq;
694
695 if (dp->irq_trans) {
696 irq = dp->irq_trans->irq_build(dp, irq,
697 dp->irq_trans->data);
698#if 1
699 printk("%s: direct translate %x --> %x\n",
700 dp->full_name, orig_irq, irq);
701#endif
702 return irq;
703 }
704
705 /* Something more complicated. Walk up to the root, applying
706 * interrupt-map or bus specific translations, until we hit
707 * an IRQ translator.
708 *
709 * If we hit a bus type or situation we cannot handle, we
710 * stop and assume that the original IRQ number was in a
711 * format which has special meaning to it's immediate parent.
712 */
713 pp = dp->parent;
714 ip = NULL;
715 while (pp) {
716 void *imap, *imsk;
717 int imlen;
718
719 imap = of_get_property(pp, "interrupt-map", &imlen);
720 imsk = of_get_property(pp, "interrupt-map-mask", NULL);
721 if (imap && imsk) {
722 struct device_node *iret;
723 int this_orig_irq = irq;
724
725 iret = apply_interrupt_map(dp, pp,
726 imap, imlen, imsk,
727 &irq);
728#if 1
729 printk("%s: Apply [%s:%x] imap --> [%s:%x]\n",
730 op->node->full_name,
731 pp->full_name, this_orig_irq,
732 (iret ? iret->full_name : "NULL"), irq);
733#endif
734 if (!iret)
735 break;
736
737 if (iret->irq_trans) {
738 ip = iret;
739 break;
740 }
741 } else {
742 if (!strcmp(pp->type, "pci") ||
743 !strcmp(pp->type, "pciex")) {
744 unsigned int this_orig_irq = irq;
745
746 irq = pci_irq_swizzle(dp, pp, irq);
747#if 1
748 printk("%s: PCI swizzle [%s] %x --> %x\n",
749 op->node->full_name,
750 pp->full_name, this_orig_irq, irq);
751#endif
752 }
753
754 if (pp->irq_trans) {
755 ip = pp;
756 break;
757 }
758 }
759 dp = pp;
760 pp = pp->parent;
761 }
762 if (!ip)
763 return orig_irq;
764
765 irq = ip->irq_trans->irq_build(op->node, irq,
766 ip->irq_trans->data);
767#if 1
768 printk("%s: Apply IRQ trans [%s] %x --> %x\n",
769 op->node->full_name, ip->full_name, orig_irq, irq);
770#endif
771
772 return irq;
773}
774
775static struct of_device * __init scan_one_device(struct device_node *dp,
776 struct device *parent)
777{
778 struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
779 unsigned int *irq;
780 int len, i;
781
782 if (!op)
783 return NULL;
784
785 op->node = dp;
786
787 op->clock_freq = of_getintprop_default(dp, "clock-frequency",
788 (25*1000*1000));
789 op->portid = of_getintprop_default(dp, "upa-portid", -1);
790 if (op->portid == -1)
791 op->portid = of_getintprop_default(dp, "portid", -1);
792
793 irq = of_get_property(dp, "interrupts", &len);
794 if (irq) {
795 memcpy(op->irqs, irq, len);
796 op->num_irqs = len / 4;
797 } else {
798 op->num_irqs = 0;
799 }
800
801 build_device_resources(op, parent);
802 for (i = 0; i < op->num_irqs; i++)
803 op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]);
804
805 op->dev.parent = parent;
806 op->dev.bus = &of_bus_type;
807 if (!parent)
808 strcpy(op->dev.bus_id, "root");
809 else
810 strcpy(op->dev.bus_id, dp->path_component_name);
811
812 if (of_device_register(op)) {
813 printk("%s: Could not register of device.\n",
814 dp->full_name);
815 kfree(op);
816 op = NULL;
817 }
818
819 return op;
820}
821
822static void __init scan_tree(struct device_node *dp, struct device *parent)
823{
824 while (dp) {
825 struct of_device *op = scan_one_device(dp, parent);
826
827 if (op)
828 scan_tree(dp->child, &op->dev);
829
830 dp = dp->sibling;
831 }
832}
833
834static void __init scan_of_devices(void)
835{
836 struct device_node *root = of_find_node_by_path("/");
837 struct of_device *parent;
838
839 parent = scan_one_device(root, NULL);
840 if (!parent)
841 return;
842
843 scan_tree(root->child, &parent->dev);
844}
845
166static int __init of_bus_driver_init(void) 846static int __init of_bus_driver_init(void)
167{ 847{
168 int err = 0; 848 int err;
169 849
850 err = bus_register(&of_bus_type);
170#ifdef CONFIG_PCI 851#ifdef CONFIG_PCI
171 if (!err) 852 if (!err)
172 err = bus_register(&isa_bus_type); 853 err = bus_register(&isa_bus_type);
@@ -177,7 +858,11 @@ static int __init of_bus_driver_init(void)
177 if (!err) 858 if (!err)
178 err = bus_register(&sbus_bus_type); 859 err = bus_register(&sbus_bus_type);
179#endif 860#endif
180 return 0; 861
862 if (!err)
863 scan_of_devices();
864
865 return err;
181} 866}
182 867
183postcore_initcall(of_bus_driver_init); 868postcore_initcall(of_bus_driver_init);
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index 20ca9ec8fd3b..04ea6c2eb7a1 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -307,7 +307,6 @@ static void __init pci_scan_each_controller_bus(void)
307 p->scan_bus(p); 307 p->scan_bus(p);
308} 308}
309 309
310extern void clock_probe(void);
311extern void power_init(void); 310extern void power_init(void);
312 311
313static int __init pcibios_init(void) 312static int __init pcibios_init(void)
@@ -320,7 +319,6 @@ static int __init pcibios_init(void)
320 319
321 isa_init(); 320 isa_init();
322 ebus_init(); 321 ebus_init();
323 clock_probe();
324 power_init(); 322 power_init();
325 323
326 return 0; 324 return 0;
@@ -406,14 +404,8 @@ void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res,
406} 404}
407EXPORT_SYMBOL(pcibios_bus_to_resource); 405EXPORT_SYMBOL(pcibios_bus_to_resource);
408 406
409extern int pci_irq_verbose;
410
411char * __init pcibios_setup(char *str) 407char * __init pcibios_setup(char *str)
412{ 408{
413 if (!strcmp(str, "irq_verbose")) {
414 pci_irq_verbose = 1;
415 return NULL;
416 }
417 return str; 409 return str;
418} 410}
419 411
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
index b06a2955bf5f..7a59cc72c844 100644
--- a/arch/sparc64/kernel/pci_common.c
+++ b/arch/sparc64/kernel/pci_common.c
@@ -10,12 +10,10 @@
10 10
11#include <asm/pbm.h> 11#include <asm/pbm.h>
12#include <asm/prom.h> 12#include <asm/prom.h>
13#include <asm/of_device.h>
13 14
14#include "pci_impl.h" 15#include "pci_impl.h"
15 16
16/* Pass "pci=irq_verbose" on the kernel command line to enable this. */
17int pci_irq_verbose;
18
19/* Fix self device of BUS and hook it into BUS->self. 17/* Fix self device of BUS and hook it into BUS->self.
20 * The pci_scan_bus does not do this for the host bridge. 18 * The pci_scan_bus does not do this for the host bridge.
21 */ 19 */
@@ -169,6 +167,7 @@ static void __init pdev_cookie_fillin(struct pci_pbm_info *pbm,
169 } 167 }
170 pcp->pbm = pbm; 168 pcp->pbm = pbm;
171 pcp->prom_node = dp; 169 pcp->prom_node = dp;
170 pcp->op = of_find_device_by_node(dp);
172 memcpy(pcp->prom_regs, pregs, 171 memcpy(pcp->prom_regs, pregs,
173 nregs * sizeof(struct linux_prom_pci_registers)); 172 nregs * sizeof(struct linux_prom_pci_registers));
174 pcp->num_prom_regs = nregs; 173 pcp->num_prom_regs = nregs;
@@ -549,296 +548,18 @@ void __init pci_assign_unassigned(struct pci_pbm_info *pbm,
549 pci_assign_unassigned(pbm, bus); 548 pci_assign_unassigned(pbm, bus);
550} 549}
551 550
552static inline unsigned int pci_slot_swivel(struct pci_pbm_info *pbm,
553 struct pci_dev *toplevel_pdev,
554 struct pci_dev *pdev,
555 unsigned int interrupt)
556{
557 unsigned int ret;
558
559 if (unlikely(interrupt < 1 || interrupt > 4)) {
560 printk("%s: Device %s interrupt value of %u is strange.\n",
561 pbm->name, pci_name(pdev), interrupt);
562 return interrupt;
563 }
564
565 ret = ((interrupt - 1 + (PCI_SLOT(pdev->devfn) & 3)) & 3) + 1;
566
567 if (pci_irq_verbose)
568 printk("%s: %s IRQ Swivel %s [%x:%x] -> [%x]\n",
569 pbm->name, pci_name(toplevel_pdev), pci_name(pdev),
570 interrupt, PCI_SLOT(pdev->devfn), ret);
571
572 return ret;
573}
574
575static inline unsigned int pci_apply_intmap(struct pci_pbm_info *pbm,
576 struct pci_dev *toplevel_pdev,
577 struct pci_dev *pbus,
578 struct pci_dev *pdev,
579 unsigned int interrupt,
580 struct device_node **cnode)
581{
582 struct linux_prom_pci_intmap *imap;
583 struct linux_prom_pci_intmask *imask;
584 struct pcidev_cookie *pbus_pcp = pbus->sysdata;
585 struct pcidev_cookie *pdev_pcp = pdev->sysdata;
586 struct linux_prom_pci_registers *pregs = pdev_pcp->prom_regs;
587 struct property *prop;
588 int plen, num_imap, i;
589 unsigned int hi, mid, lo, irq, orig_interrupt;
590
591 *cnode = pbus_pcp->prom_node;
592
593 prop = of_find_property(pbus_pcp->prom_node, "interrupt-map", &plen);
594 if (!prop ||
595 (plen % sizeof(struct linux_prom_pci_intmap)) != 0) {
596 printk("%s: Device %s interrupt-map has bad len %d\n",
597 pbm->name, pci_name(pbus), plen);
598 goto no_intmap;
599 }
600 imap = prop->value;
601 num_imap = plen / sizeof(struct linux_prom_pci_intmap);
602
603 prop = of_find_property(pbus_pcp->prom_node, "interrupt-map-mask", &plen);
604 if (!prop ||
605 (plen % sizeof(struct linux_prom_pci_intmask)) != 0) {
606 printk("%s: Device %s interrupt-map-mask has bad len %d\n",
607 pbm->name, pci_name(pbus), plen);
608 goto no_intmap;
609 }
610 imask = prop->value;
611
612 orig_interrupt = interrupt;
613
614 hi = pregs->phys_hi & imask->phys_hi;
615 mid = pregs->phys_mid & imask->phys_mid;
616 lo = pregs->phys_lo & imask->phys_lo;
617 irq = interrupt & imask->interrupt;
618
619 for (i = 0; i < num_imap; i++) {
620 if (imap[i].phys_hi == hi &&
621 imap[i].phys_mid == mid &&
622 imap[i].phys_lo == lo &&
623 imap[i].interrupt == irq) {
624 *cnode = of_find_node_by_phandle(imap[i].cnode);
625 interrupt = imap[i].cinterrupt;
626 }
627 }
628
629 if (pci_irq_verbose)
630 printk("%s: %s MAP BUS %s DEV %s [%x] -> [%x]\n",
631 pbm->name, pci_name(toplevel_pdev),
632 pci_name(pbus), pci_name(pdev),
633 orig_interrupt, interrupt);
634
635no_intmap:
636 return interrupt;
637}
638
639/* For each PCI bus on the way to the root:
640 * 1) If it has an interrupt-map property, apply it.
641 * 2) Else, swivel the interrupt number based upon the PCI device number.
642 *
643 * Return the "IRQ controller" node. If this is the PBM's device node,
644 * all interrupt translations are complete, else we should use that node's
645 * "reg" property to apply the PBM's "interrupt-{map,mask}" to the interrupt.
646 */
647static struct device_node * __init
648pci_intmap_match_to_root(struct pci_pbm_info *pbm,
649 struct pci_dev *pdev,
650 unsigned int *interrupt)
651{
652 struct pci_dev *toplevel_pdev = pdev;
653 struct pcidev_cookie *toplevel_pcp = toplevel_pdev->sysdata;
654 struct device_node *cnode = toplevel_pcp->prom_node;
655
656 while (pdev->bus->number != pbm->pci_first_busno) {
657 struct pci_dev *pbus = pdev->bus->self;
658 struct pcidev_cookie *pcp = pbus->sysdata;
659 struct property *prop;
660
661 prop = of_find_property(pcp->prom_node, "interrupt-map", NULL);
662 if (!prop) {
663 *interrupt = pci_slot_swivel(pbm, toplevel_pdev,
664 pdev, *interrupt);
665 cnode = pcp->prom_node;
666 } else {
667 *interrupt = pci_apply_intmap(pbm, toplevel_pdev,
668 pbus, pdev,
669 *interrupt, &cnode);
670
671 while (pcp->prom_node != cnode &&
672 pbus->bus->number != pbm->pci_first_busno) {
673 pbus = pbus->bus->self;
674 pcp = pbus->sysdata;
675 }
676 }
677 pdev = pbus;
678
679 if (cnode == pbm->prom_node)
680 break;
681 }
682
683 return cnode;
684}
685
686static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt)
687{
688 struct pcidev_cookie *dev_pcp = pdev->sysdata;
689 struct pci_pbm_info *pbm = dev_pcp->pbm;
690 struct linux_prom_pci_registers *reg;
691 struct device_node *cnode;
692 struct property *prop;
693 unsigned int hi, mid, lo, irq;
694 int i, plen;
695
696 cnode = pci_intmap_match_to_root(pbm, pdev, interrupt);
697 if (cnode == pbm->prom_node)
698 goto success;
699
700 prop = of_find_property(cnode, "reg", &plen);
701 if (!prop ||
702 (plen % sizeof(struct linux_prom_pci_registers)) != 0) {
703 printk("%s: OBP node %s reg property has bad len %d\n",
704 pbm->name, cnode->full_name, plen);
705 goto fail;
706 }
707 reg = prop->value;
708
709 hi = reg[0].phys_hi & pbm->pbm_intmask->phys_hi;
710 mid = reg[0].phys_mid & pbm->pbm_intmask->phys_mid;
711 lo = reg[0].phys_lo & pbm->pbm_intmask->phys_lo;
712 irq = *interrupt & pbm->pbm_intmask->interrupt;
713
714 for (i = 0; i < pbm->num_pbm_intmap; i++) {
715 struct linux_prom_pci_intmap *intmap;
716
717 intmap = &pbm->pbm_intmap[i];
718
719 if (intmap->phys_hi == hi &&
720 intmap->phys_mid == mid &&
721 intmap->phys_lo == lo &&
722 intmap->interrupt == irq) {
723 *interrupt = intmap->cinterrupt;
724 goto success;
725 }
726 }
727
728fail:
729 return 0;
730
731success:
732 if (pci_irq_verbose)
733 printk("%s: Routing bus[%2x] slot[%2x] to INO[%02x]\n",
734 pbm->name,
735 pdev->bus->number, PCI_SLOT(pdev->devfn),
736 *interrupt);
737 return 1;
738}
739
740static void __init pdev_fixup_irq(struct pci_dev *pdev) 551static void __init pdev_fixup_irq(struct pci_dev *pdev)
741{ 552{
742 struct pcidev_cookie *pcp = pdev->sysdata; 553 struct pcidev_cookie *pcp = pdev->sysdata;
743 struct pci_pbm_info *pbm = pcp->pbm; 554 struct of_device *op = pcp->op;
744 struct pci_controller_info *p = pbm->parent;
745 unsigned int portid = pbm->portid;
746 unsigned int prom_irq;
747 struct device_node *dp = pcp->prom_node;
748 struct property *prop;
749
750 /* If this is an empty EBUS device, sometimes OBP fails to
751 * give it a valid fully specified interrupts property.
752 * The EBUS hooked up to SunHME on PCI I/O boards of
753 * Ex000 systems is one such case.
754 *
755 * The interrupt is not important so just ignore it.
756 */
757 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
758 pdev->device == PCI_DEVICE_ID_SUN_EBUS &&
759 !dp->child) {
760 pdev->irq = 0;
761 return;
762 }
763 555
764 prop = of_find_property(dp, "interrupts", NULL); 556 if (op->irqs[0] == 0xffffffff) {
765 if (!prop) { 557 pdev->irq = PCI_IRQ_NONE;
766 pdev->irq = 0;
767 return; 558 return;
768 } 559 }
769 prom_irq = *(unsigned int *) prop->value;
770
771 if (tlb_type != hypervisor) {
772 /* Fully specified already? */
773 if (((prom_irq & PCI_IRQ_IGN) >> 6) == portid) {
774 pdev->irq = p->irq_build(pbm, pdev, prom_irq);
775 goto have_irq;
776 }
777
778 /* An onboard device? (bit 5 set) */
779 if ((prom_irq & PCI_IRQ_INO) & 0x20) {
780 pdev->irq = p->irq_build(pbm, pdev, (portid << 6 | prom_irq));
781 goto have_irq;
782 }
783 }
784
785 /* Can we find a matching entry in the interrupt-map? */
786 if (pci_intmap_match(pdev, &prom_irq)) {
787 pdev->irq = p->irq_build(pbm, pdev, (portid << 6) | prom_irq);
788 goto have_irq;
789 }
790
791 /* Ok, we have to do it the hard way. */
792 {
793 unsigned int bus, slot, line;
794
795 bus = (pbm == &pbm->parent->pbm_B) ? (1 << 4) : 0;
796
797 /* If we have a legal interrupt property, use it as
798 * the IRQ line.
799 */
800 if (prom_irq > 0 && prom_irq < 5) {
801 line = ((prom_irq - 1) & 3);
802 } else {
803 u8 pci_irq_line;
804 560
805 /* Else just directly consult PCI config space. */ 561 pdev->irq = op->irqs[0];
806 pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pci_irq_line);
807 line = ((pci_irq_line - 1) & 3);
808 }
809
810 /* Now figure out the slot.
811 *
812 * Basically, device number zero on the top-level bus is
813 * always the PCI host controller. Slot 0 is then device 1.
814 * PBM A supports two external slots (0 and 1), and PBM B
815 * supports 4 external slots (0, 1, 2, and 3). On-board PCI
816 * devices are wired to device numbers outside of these
817 * ranges. -DaveM
818 */
819 if (pdev->bus->number == pbm->pci_first_busno) {
820 slot = PCI_SLOT(pdev->devfn) - pbm->pci_first_slot;
821 } else {
822 struct pci_dev *bus_dev;
823
824 /* Underneath a bridge, use slot number of parent
825 * bridge which is closest to the PBM.
826 */
827 bus_dev = pdev->bus->self;
828 while (bus_dev->bus &&
829 bus_dev->bus->number != pbm->pci_first_busno)
830 bus_dev = bus_dev->bus->self;
831
832 slot = PCI_SLOT(bus_dev->devfn) - pbm->pci_first_slot;
833 }
834 slot = slot << 2;
835
836 pdev->irq = p->irq_build(pbm, pdev,
837 ((portid << 6) & PCI_IRQ_IGN) |
838 (bus | slot | line));
839 }
840 562
841have_irq:
842 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 563 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE,
843 pdev->irq & PCI_IRQ_INO); 564 pdev->irq & PCI_IRQ_INO);
844} 565}
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 5b2261ebda6f..bf7b32b36705 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -18,6 +18,7 @@
18#include <asm/irq.h> 18#include <asm/irq.h>
19#include <asm/starfire.h> 19#include <asm/starfire.h>
20#include <asm/prom.h> 20#include <asm/prom.h>
21#include <asm/of_device.h>
21 22
22#include "pci_impl.h" 23#include "pci_impl.h"
23#include "iommu_common.h" 24#include "iommu_common.h"
@@ -208,110 +209,6 @@ static struct pci_ops psycho_ops = {
208 .write = psycho_write_pci_cfg, 209 .write = psycho_write_pci_cfg,
209}; 210};
210 211
211/* PSYCHO interrupt mapping support. */
212#define PSYCHO_IMAP_A_SLOT0 0x0c00UL
213#define PSYCHO_IMAP_B_SLOT0 0x0c20UL
214static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
215{
216 unsigned int bus = (ino & 0x10) >> 4;
217 unsigned int slot = (ino & 0x0c) >> 2;
218
219 if (bus == 0)
220 return PSYCHO_IMAP_A_SLOT0 + (slot * 8);
221 else
222 return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
223}
224
225#define PSYCHO_IMAP_SCSI 0x1000UL
226#define PSYCHO_IMAP_ETH 0x1008UL
227#define PSYCHO_IMAP_BPP 0x1010UL
228#define PSYCHO_IMAP_AU_REC 0x1018UL
229#define PSYCHO_IMAP_AU_PLAY 0x1020UL
230#define PSYCHO_IMAP_PFAIL 0x1028UL
231#define PSYCHO_IMAP_KMS 0x1030UL
232#define PSYCHO_IMAP_FLPY 0x1038UL
233#define PSYCHO_IMAP_SHW 0x1040UL
234#define PSYCHO_IMAP_KBD 0x1048UL
235#define PSYCHO_IMAP_MS 0x1050UL
236#define PSYCHO_IMAP_SER 0x1058UL
237#define PSYCHO_IMAP_TIM0 0x1060UL
238#define PSYCHO_IMAP_TIM1 0x1068UL
239#define PSYCHO_IMAP_UE 0x1070UL
240#define PSYCHO_IMAP_CE 0x1078UL
241#define PSYCHO_IMAP_A_ERR 0x1080UL
242#define PSYCHO_IMAP_B_ERR 0x1088UL
243#define PSYCHO_IMAP_PMGMT 0x1090UL
244#define PSYCHO_IMAP_GFX 0x1098UL
245#define PSYCHO_IMAP_EUPA 0x10a0UL
246
247static unsigned long __onboard_imap_off[] = {
248/*0x20*/ PSYCHO_IMAP_SCSI,
249/*0x21*/ PSYCHO_IMAP_ETH,
250/*0x22*/ PSYCHO_IMAP_BPP,
251/*0x23*/ PSYCHO_IMAP_AU_REC,
252/*0x24*/ PSYCHO_IMAP_AU_PLAY,
253/*0x25*/ PSYCHO_IMAP_PFAIL,
254/*0x26*/ PSYCHO_IMAP_KMS,
255/*0x27*/ PSYCHO_IMAP_FLPY,
256/*0x28*/ PSYCHO_IMAP_SHW,
257/*0x29*/ PSYCHO_IMAP_KBD,
258/*0x2a*/ PSYCHO_IMAP_MS,
259/*0x2b*/ PSYCHO_IMAP_SER,
260/*0x2c*/ PSYCHO_IMAP_TIM0,
261/*0x2d*/ PSYCHO_IMAP_TIM1,
262/*0x2e*/ PSYCHO_IMAP_UE,
263/*0x2f*/ PSYCHO_IMAP_CE,
264/*0x30*/ PSYCHO_IMAP_A_ERR,
265/*0x31*/ PSYCHO_IMAP_B_ERR,
266/*0x32*/ PSYCHO_IMAP_PMGMT
267};
268#define PSYCHO_ONBOARD_IRQ_BASE 0x20
269#define PSYCHO_ONBOARD_IRQ_LAST 0x32
270#define psycho_onboard_imap_offset(__ino) \
271 __onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
272
273#define PSYCHO_ICLR_A_SLOT0 0x1400UL
274#define PSYCHO_ICLR_SCSI 0x1800UL
275
276#define psycho_iclr_offset(ino) \
277 ((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
278 (PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
279
280static unsigned int psycho_irq_build(struct pci_pbm_info *pbm,
281 struct pci_dev *pdev,
282 unsigned int ino)
283{
284 unsigned long imap, iclr;
285 unsigned long imap_off, iclr_off;
286 int inofixup = 0;
287
288 ino &= PCI_IRQ_INO;
289 if (ino < PSYCHO_ONBOARD_IRQ_BASE) {
290 /* PCI slot */
291 imap_off = psycho_pcislot_imap_offset(ino);
292 } else {
293 /* Onboard device */
294 if (ino > PSYCHO_ONBOARD_IRQ_LAST) {
295 prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino);
296 prom_halt();
297 }
298 imap_off = psycho_onboard_imap_offset(ino);
299 }
300
301 /* Now build the IRQ bucket. */
302 imap = pbm->controller_regs + imap_off;
303 imap += 4;
304
305 iclr_off = psycho_iclr_offset(ino);
306 iclr = pbm->controller_regs + iclr_off;
307 iclr += 4;
308
309 if ((ino & 0x20) == 0)
310 inofixup = ino & 0x03;
311
312 return build_irq(inofixup, iclr, imap);
313}
314
315/* PSYCHO error handling support. */ 212/* PSYCHO error handling support. */
316enum psycho_error_type { 213enum psycho_error_type {
317 UE_ERR, CE_ERR, PCI_ERR 214 UE_ERR, CE_ERR, PCI_ERR
@@ -944,51 +841,34 @@ static irqreturn_t psycho_pcierr_intr(int irq, void *dev_id, struct pt_regs *reg
944#define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */ 841#define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
945#define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */ 842#define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
946#define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */ 843#define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
947#define PSYCHO_UE_INO 0x2e
948#define PSYCHO_CE_INO 0x2f
949#define PSYCHO_PCIERR_A_INO 0x30
950#define PSYCHO_PCIERR_B_INO 0x31
951static void psycho_register_error_handlers(struct pci_controller_info *p) 844static void psycho_register_error_handlers(struct pci_controller_info *p)
952{ 845{
953 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */ 846 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
847 struct of_device *op = of_find_device_by_node(pbm->prom_node);
954 unsigned long base = p->pbm_A.controller_regs; 848 unsigned long base = p->pbm_A.controller_regs;
955 unsigned int irq, portid = pbm->portid;
956 u64 tmp; 849 u64 tmp;
957 850
958 /* Build IRQs and register handlers. */ 851 if (!op)
959 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_UE_INO); 852 return;
960 if (request_irq(irq, psycho_ue_intr,
961 SA_SHIRQ, "PSYCHO UE", p) < 0) {
962 prom_printf("PSYCHO%d: Cannot register UE interrupt.\n",
963 p->index);
964 prom_halt();
965 }
966 853
967 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_CE_INO); 854 /* Psycho interrupt property order is:
968 if (request_irq(irq, psycho_ce_intr, 855 * 0: PCIERR PBM B INO
969 SA_SHIRQ, "PSYCHO CE", p) < 0) { 856 * 1: UE ERR
970 prom_printf("PSYCHO%d: Cannot register CE interrupt.\n", 857 * 2: CE ERR
971 p->index); 858 * 3: POWER FAIL
972 prom_halt(); 859 * 4: SPARE HARDWARE
973 } 860 * 5: PCIERR PBM A INO
861 */
974 862
975 pbm = &p->pbm_A; 863 if (op->num_irqs < 6)
976 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_A_INO); 864 return;
977 if (request_irq(irq, psycho_pcierr_intr,
978 SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_A) < 0) {
979 prom_printf("PSYCHO%d(PBMA): Cannot register PciERR interrupt.\n",
980 p->index);
981 prom_halt();
982 }
983 865
984 pbm = &p->pbm_B; 866 request_irq(op->irqs[1], psycho_ue_intr, SA_SHIRQ, "PSYCHO UE", p);
985 irq = psycho_irq_build(pbm, NULL, (portid << 6) | PSYCHO_PCIERR_B_INO); 867 request_irq(op->irqs[2], psycho_ce_intr, SA_SHIRQ, "PSYCHO CE", p);
986 if (request_irq(irq, psycho_pcierr_intr, 868 request_irq(op->irqs[5], psycho_pcierr_intr, SA_SHIRQ,
987 SA_SHIRQ, "PSYCHO PCIERR", &p->pbm_B) < 0) { 869 "PSYCHO PCIERR-A", &p->pbm_A);
988 prom_printf("PSYCHO%d(PBMB): Cannot register PciERR interrupt.\n", 870 request_irq(op->irqs[0], psycho_pcierr_intr, SA_SHIRQ,
989 p->index); 871 "PSYCHO PCIERR-B", &p->pbm_B);
990 prom_halt();
991 }
992 872
993 /* Enable UE and CE interrupts for controller. */ 873 /* Enable UE and CE interrupts for controller. */
994 psycho_write(base + PSYCHO_ECC_CTRL, 874 psycho_write(base + PSYCHO_ECC_CTRL,
@@ -1171,9 +1051,7 @@ static void psycho_iommu_init(struct pci_controller_info *p)
1171 1051
1172 /* If necessary, hook us up for starfire IRQ translations. */ 1052 /* If necessary, hook us up for starfire IRQ translations. */
1173 if (this_is_starfire) 1053 if (this_is_starfire)
1174 p->starfire_cookie = starfire_hookup(p->pbm_A.portid); 1054 starfire_hookup(p->pbm_A.portid);
1175 else
1176 p->starfire_cookie = NULL;
1177} 1055}
1178 1056
1179#define PSYCHO_IRQ_RETRY 0x1a00UL 1057#define PSYCHO_IRQ_RETRY 0x1a00UL
@@ -1408,7 +1286,6 @@ void psycho_init(struct device_node *dp, char *model_name)
1408 p->index = pci_num_controllers++; 1286 p->index = pci_num_controllers++;
1409 p->pbms_same_domain = 0; 1287 p->pbms_same_domain = 0;
1410 p->scan_bus = psycho_scan_bus; 1288 p->scan_bus = psycho_scan_bus;
1411 p->irq_build = psycho_irq_build;
1412 p->base_address_update = psycho_base_address_update; 1289 p->base_address_update = psycho_base_address_update;
1413 p->resource_adjust = psycho_resource_adjust; 1290 p->resource_adjust = psycho_resource_adjust;
1414 p->pci_ops = &psycho_ops; 1291 p->pci_ops = &psycho_ops;
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 26f194ce4400..5e087b0fb4c9 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -485,114 +485,6 @@ static struct pci_ops sabre_ops = {
485 .write = sabre_write_pci_cfg, 485 .write = sabre_write_pci_cfg,
486}; 486};
487 487
488static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
489{
490 unsigned int bus = (ino & 0x10) >> 4;
491 unsigned int slot = (ino & 0x0c) >> 2;
492
493 if (bus == 0)
494 return SABRE_IMAP_A_SLOT0 + (slot * 8);
495 else
496 return SABRE_IMAP_B_SLOT0 + (slot * 8);
497}
498
499static unsigned long __onboard_imap_off[] = {
500/*0x20*/ SABRE_IMAP_SCSI,
501/*0x21*/ SABRE_IMAP_ETH,
502/*0x22*/ SABRE_IMAP_BPP,
503/*0x23*/ SABRE_IMAP_AU_REC,
504/*0x24*/ SABRE_IMAP_AU_PLAY,
505/*0x25*/ SABRE_IMAP_PFAIL,
506/*0x26*/ SABRE_IMAP_KMS,
507/*0x27*/ SABRE_IMAP_FLPY,
508/*0x28*/ SABRE_IMAP_SHW,
509/*0x29*/ SABRE_IMAP_KBD,
510/*0x2a*/ SABRE_IMAP_MS,
511/*0x2b*/ SABRE_IMAP_SER,
512/*0x2c*/ 0 /* reserved */,
513/*0x2d*/ 0 /* reserved */,
514/*0x2e*/ SABRE_IMAP_UE,
515/*0x2f*/ SABRE_IMAP_CE,
516/*0x30*/ SABRE_IMAP_PCIERR,
517};
518#define SABRE_ONBOARD_IRQ_BASE 0x20
519#define SABRE_ONBOARD_IRQ_LAST 0x30
520#define sabre_onboard_imap_offset(__ino) \
521 __onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE]
522
523#define sabre_iclr_offset(ino) \
524 ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
525 (SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
526
527/* When a device lives behind a bridge deeper in the PCI bus topology
528 * than APB, a special sequence must run to make sure all pending DMA
529 * transfers at the time of IRQ delivery are visible in the coherency
530 * domain by the cpu. This sequence is to perform a read on the far
531 * side of the non-APB bridge, then perform a read of Sabre's DMA
532 * write-sync register.
533 */
534static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
535{
536 struct pci_dev *pdev = _arg1;
537 unsigned long sync_reg = (unsigned long) _arg2;
538 u16 _unused;
539
540 pci_read_config_word(pdev, PCI_VENDOR_ID, &_unused);
541 sabre_read(sync_reg);
542}
543
544static unsigned int sabre_irq_build(struct pci_pbm_info *pbm,
545 struct pci_dev *pdev,
546 unsigned int ino)
547{
548 unsigned long imap, iclr;
549 unsigned long imap_off, iclr_off;
550 int inofixup = 0;
551 int virt_irq;
552
553 ino &= PCI_IRQ_INO;
554 if (ino < SABRE_ONBOARD_IRQ_BASE) {
555 /* PCI slot */
556 imap_off = sabre_pcislot_imap_offset(ino);
557 } else {
558 /* onboard device */
559 if (ino > SABRE_ONBOARD_IRQ_LAST) {
560 prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino);
561 prom_halt();
562 }
563 imap_off = sabre_onboard_imap_offset(ino);
564 }
565
566 /* Now build the IRQ bucket. */
567 imap = pbm->controller_regs + imap_off;
568 imap += 4;
569
570 iclr_off = sabre_iclr_offset(ino);
571 iclr = pbm->controller_regs + iclr_off;
572 iclr += 4;
573
574 if ((ino & 0x20) == 0)
575 inofixup = ino & 0x03;
576
577 virt_irq = build_irq(inofixup, iclr, imap);
578
579 if (pdev) {
580 struct pcidev_cookie *pcp = pdev->sysdata;
581
582 if (pdev->bus->number != pcp->pbm->pci_first_busno) {
583 struct pci_controller_info *p = pcp->pbm->parent;
584
585 irq_install_pre_handler(virt_irq,
586 sabre_wsync_handler,
587 pdev,
588 (void *)
589 p->pbm_A.controller_regs +
590 SABRE_WRSYNC);
591 }
592 }
593 return virt_irq;
594}
595
596/* SABRE error handling support. */ 488/* SABRE error handling support. */
597static void sabre_check_iommu_error(struct pci_controller_info *p, 489static void sabre_check_iommu_error(struct pci_controller_info *p,
598 unsigned long afsr, 490 unsigned long afsr,
@@ -929,17 +821,30 @@ static irqreturn_t sabre_pcierr_intr(int irq, void *dev_id, struct pt_regs *regs
929 return IRQ_HANDLED; 821 return IRQ_HANDLED;
930} 822}
931 823
932/* XXX What about PowerFail/PowerManagement??? -DaveM */
933#define SABRE_UE_INO 0x2e
934#define SABRE_CE_INO 0x2f
935#define SABRE_PCIERR_INO 0x30
936static void sabre_register_error_handlers(struct pci_controller_info *p) 824static void sabre_register_error_handlers(struct pci_controller_info *p)
937{ 825{
938 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */ 826 struct pci_pbm_info *pbm = &p->pbm_A; /* arbitrary */
827 struct device_node *dp = pbm->prom_node;
828 struct of_device *op;
939 unsigned long base = pbm->controller_regs; 829 unsigned long base = pbm->controller_regs;
940 unsigned long irq, portid = pbm->portid;
941 u64 tmp; 830 u64 tmp;
942 831
832 if (pbm->chip_type == PBM_CHIP_TYPE_SABRE)
833 dp = dp->parent;
834
835 op = of_find_device_by_node(dp);
836 if (!op)
837 return;
838
839 /* Sabre/Hummingbird IRQ property layout is:
840 * 0: PCI ERR
841 * 1: UE ERR
842 * 2: CE ERR
843 * 3: POWER FAIL
844 */
845 if (op->num_irqs < 4)
846 return;
847
943 /* We clear the error bits in the appropriate AFSR before 848 /* We clear the error bits in the appropriate AFSR before
944 * registering the handler so that we don't get spurious 849 * registering the handler so that we don't get spurious
945 * interrupts. 850 * interrupts.
@@ -948,32 +853,16 @@ static void sabre_register_error_handlers(struct pci_controller_info *p)
948 (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR | 853 (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
949 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR | 854 SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
950 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE)); 855 SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE));
951 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_UE_INO); 856
952 if (request_irq(irq, sabre_ue_intr, 857 request_irq(op->irqs[1], sabre_ue_intr, SA_SHIRQ, "SABRE UE", p);
953 SA_SHIRQ, "SABRE UE", p) < 0) {
954 prom_printf("SABRE%d: Cannot register UE interrupt.\n",
955 p->index);
956 prom_halt();
957 }
958 858
959 sabre_write(base + SABRE_CE_AFSR, 859 sabre_write(base + SABRE_CE_AFSR,
960 (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR | 860 (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
961 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR)); 861 SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR));
962 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_CE_INO);
963 if (request_irq(irq, sabre_ce_intr,
964 SA_SHIRQ, "SABRE CE", p) < 0) {
965 prom_printf("SABRE%d: Cannot register CE interrupt.\n",
966 p->index);
967 prom_halt();
968 }
969 862
970 irq = sabre_irq_build(pbm, NULL, (portid << 6) | SABRE_PCIERR_INO); 863 request_irq(op->irqs[2], sabre_ce_intr, SA_SHIRQ, "SABRE CE", p);
971 if (request_irq(irq, sabre_pcierr_intr, 864 request_irq(op->irqs[0], sabre_pcierr_intr, SA_SHIRQ,
972 SA_SHIRQ, "SABRE PCIERR", p) < 0) { 865 "SABRE PCIERR", p);
973 prom_printf("SABRE%d: Cannot register PciERR interrupt.\n",
974 p->index);
975 prom_halt();
976 }
977 866
978 tmp = sabre_read(base + SABRE_PCICTRL); 867 tmp = sabre_read(base + SABRE_PCICTRL);
979 tmp |= SABRE_PCICTRL_ERREN; 868 tmp |= SABRE_PCICTRL_ERREN;
@@ -1492,7 +1381,6 @@ void sabre_init(struct device_node *dp, char *model_name)
1492 p->index = pci_num_controllers++; 1381 p->index = pci_num_controllers++;
1493 p->pbms_same_domain = 1; 1382 p->pbms_same_domain = 1;
1494 p->scan_bus = sabre_scan_bus; 1383 p->scan_bus = sabre_scan_bus;
1495 p->irq_build = sabre_irq_build;
1496 p->base_address_update = sabre_base_address_update; 1384 p->base_address_update = sabre_base_address_update;
1497 p->resource_adjust = sabre_resource_adjust; 1385 p->resource_adjust = sabre_resource_adjust;
1498 p->pci_ops = &sabre_ops; 1386 p->pci_ops = &sabre_ops;
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index f16449ccd7bc..5c6e2a9b91f8 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -217,116 +217,6 @@ static struct pci_ops schizo_ops = {
217 .write = schizo_write_pci_cfg, 217 .write = schizo_write_pci_cfg,
218}; 218};
219 219
220/* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the
221 * imap/iclr registers are per-PBM.
222 */
223#define SCHIZO_IMAP_BASE 0x1000UL
224#define SCHIZO_ICLR_BASE 0x1400UL
225
226static unsigned long schizo_imap_offset(unsigned long ino)
227{
228 return SCHIZO_IMAP_BASE + (ino * 8UL);
229}
230
231static unsigned long schizo_iclr_offset(unsigned long ino)
232{
233 return SCHIZO_ICLR_BASE + (ino * 8UL);
234}
235
236static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
237{
238 unsigned long sync_reg = (unsigned long) _arg2;
239 u64 mask = 1UL << (ino & IMAP_INO);
240 u64 val;
241 int limit;
242
243 schizo_write(sync_reg, mask);
244
245 limit = 100000;
246 val = 0;
247 while (--limit) {
248 val = schizo_read(sync_reg);
249 if (!(val & mask))
250 break;
251 }
252 if (limit <= 0) {
253 printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n",
254 val, mask);
255 }
256
257 if (_arg1) {
258 static unsigned char cacheline[64]
259 __attribute__ ((aligned (64)));
260
261 __asm__ __volatile__("rd %%fprs, %0\n\t"
262 "or %0, %4, %1\n\t"
263 "wr %1, 0x0, %%fprs\n\t"
264 "stda %%f0, [%5] %6\n\t"
265 "wr %0, 0x0, %%fprs\n\t"
266 "membar #Sync"
267 : "=&r" (mask), "=&r" (val)
268 : "0" (mask), "1" (val),
269 "i" (FPRS_FEF), "r" (&cacheline[0]),
270 "i" (ASI_BLK_COMMIT_P));
271 }
272}
273
274static unsigned long schizo_ino_to_iclr(struct pci_pbm_info *pbm,
275 unsigned int ino)
276{
277 ino &= PCI_IRQ_INO;
278 return pbm->pbm_regs + schizo_iclr_offset(ino) + 4;
279}
280
281static unsigned long schizo_ino_to_imap(struct pci_pbm_info *pbm,
282 unsigned int ino)
283{
284 ino &= PCI_IRQ_INO;
285 return pbm->pbm_regs + schizo_imap_offset(ino) + 4;
286}
287
288static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
289 struct pci_dev *pdev,
290 unsigned int ino)
291{
292 unsigned long imap, iclr;
293 int ign_fixup;
294 int virt_irq;
295
296 ino &= PCI_IRQ_INO;
297
298 /* Now build the IRQ bucket. */
299 imap = schizo_ino_to_imap(pbm, ino);
300 iclr = schizo_ino_to_iclr(pbm, ino);
301
302 /* On Schizo, no inofixup occurs. This is because each
303 * INO has it's own IMAP register. On Psycho and Sabre
304 * there is only one IMAP register for each PCI slot even
305 * though four different INOs can be generated by each
306 * PCI slot.
307 *
308 * But, for JBUS variants (essentially, Tomatillo), we have
309 * to fixup the lowest bit of the interrupt group number.
310 */
311 ign_fixup = 0;
312 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
313 if (pbm->portid & 1)
314 ign_fixup = (1 << 6);
315 }
316
317 virt_irq = build_irq(ign_fixup, iclr, imap);
318
319 if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
320 irq_install_pre_handler(virt_irq,
321 tomatillo_wsync_handler,
322 ((pbm->chip_version <= 4) ?
323 (void *) 1 : (void *) 0),
324 (void *) pbm->sync_reg);
325 }
326
327 return virt_irq;
328}
329
330/* SCHIZO error handling support. */ 220/* SCHIZO error handling support. */
331enum schizo_error_type { 221enum schizo_error_type {
332 UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR 222 UE_ERR, CE_ERR, PCI_ERR, SAFARI_ERR
@@ -362,34 +252,6 @@ struct pci_pbm_info *pbm_for_ino(struct pci_controller_info *p, u32 ino)
362 return &p->pbm_A; 252 return &p->pbm_A;
363} 253}
364 254
365static void schizo_clear_other_err_intr(struct pci_controller_info *p, int irq)
366{
367 struct pci_pbm_info *pbm;
368 unsigned long iclr;
369
370 /* Do not clear the interrupt for the other PCI bus.
371 *
372 * This "ACK both PBM IRQs" only needs to be performed
373 * for chip-wide error interrupts.
374 */
375 if ((irq & IMAP_INO) == SCHIZO_PCIERR_A_INO ||
376 (irq & IMAP_INO) == SCHIZO_PCIERR_B_INO)
377 return;
378
379 pbm = pbm_for_ino(p, irq);
380 if (pbm == &p->pbm_A)
381 pbm = &p->pbm_B;
382 else
383 pbm = &p->pbm_A;
384
385 schizo_irq_build(pbm, NULL,
386 (pbm->portid << 6) | (irq & IMAP_INO));
387
388 iclr = schizo_ino_to_iclr(pbm,
389 (pbm->portid << 6) | (irq & IMAP_INO));
390 upa_writel(ICLR_IDLE, iclr);
391}
392
393#define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */ 255#define SCHIZO_STC_ERR 0xb800UL /* --> 0xba00 */
394#define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */ 256#define SCHIZO_STC_TAG 0xba00UL /* --> 0xba80 */
395#define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */ 257#define SCHIZO_STC_LINE 0xbb00UL /* --> 0xbb80 */
@@ -720,8 +582,6 @@ static irqreturn_t schizo_ue_intr(int irq, void *dev_id, struct pt_regs *regs)
720 /* Interrogate IOMMU for error status. */ 582 /* Interrogate IOMMU for error status. */
721 schizo_check_iommu_error(p, UE_ERR); 583 schizo_check_iommu_error(p, UE_ERR);
722 584
723 schizo_clear_other_err_intr(p, irq);
724
725 return IRQ_HANDLED; 585 return IRQ_HANDLED;
726} 586}
727 587
@@ -811,8 +671,6 @@ static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
811 printk("(none)"); 671 printk("(none)");
812 printk("]\n"); 672 printk("]\n");
813 673
814 schizo_clear_other_err_intr(p, irq);
815
816 return IRQ_HANDLED; 674 return IRQ_HANDLED;
817} 675}
818 676
@@ -1033,8 +891,6 @@ static irqreturn_t schizo_pcierr_intr(int irq, void *dev_id, struct pt_regs *reg
1033 if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR)) 891 if (error_bits & (SCHIZO_PCIAFSR_PPERR | SCHIZO_PCIAFSR_SPERR))
1034 pci_scan_for_parity_error(p, pbm, pbm->pci_bus); 892 pci_scan_for_parity_error(p, pbm, pbm->pci_bus);
1035 893
1036 schizo_clear_other_err_intr(p, irq);
1037
1038 return IRQ_HANDLED; 894 return IRQ_HANDLED;
1039} 895}
1040 896
@@ -1090,7 +946,6 @@ static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *
1090 printk("PCI%d: Unexpected Safari/JBUS error interrupt, errlog[%016lx]\n", 946 printk("PCI%d: Unexpected Safari/JBUS error interrupt, errlog[%016lx]\n",
1091 p->index, errlog); 947 p->index, errlog);
1092 948
1093 schizo_clear_other_err_intr(p, irq);
1094 return IRQ_HANDLED; 949 return IRQ_HANDLED;
1095 } 950 }
1096 951
@@ -1098,7 +953,6 @@ static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *
1098 p->index); 953 p->index);
1099 schizo_check_iommu_error(p, SAFARI_ERR); 954 schizo_check_iommu_error(p, SAFARI_ERR);
1100 955
1101 schizo_clear_other_err_intr(p, irq);
1102 return IRQ_HANDLED; 956 return IRQ_HANDLED;
1103} 957}
1104 958
@@ -1130,74 +984,47 @@ static irqreturn_t schizo_safarierr_intr(int irq, void *dev_id, struct pt_regs *
1130static void tomatillo_register_error_handlers(struct pci_controller_info *p) 984static void tomatillo_register_error_handlers(struct pci_controller_info *p)
1131{ 985{
1132 struct pci_pbm_info *pbm; 986 struct pci_pbm_info *pbm;
1133 unsigned int irq; 987 struct of_device *op;
1134 u64 tmp, err_mask, err_no_mask; 988 u64 tmp, err_mask, err_no_mask;
1135 989
1136 /* Build IRQs and register handlers. */ 990 /* Tomatillo IRQ property layout is:
991 * 0: PCIERR
992 * 1: UE ERR
993 * 2: CE ERR
994 * 3: SERR
995 * 4: POWER FAIL?
996 */
997
1137 pbm = pbm_for_ino(p, SCHIZO_UE_INO); 998 pbm = pbm_for_ino(p, SCHIZO_UE_INO);
1138 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO); 999 op = of_find_device_by_node(pbm->prom_node);
1139 if (request_irq(irq, schizo_ue_intr, 1000 if (op)
1140 SA_SHIRQ, "TOMATILLO UE", p) < 0) { 1001 request_irq(op->irqs[1], schizo_ue_intr, SA_SHIRQ,
1141 prom_printf("%s: Cannot register UE interrupt.\n", 1002 "TOMATILLO_UE", p);
1142 pbm->name);
1143 prom_halt();
1144 }
1145 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
1146 upa_writel(tmp, (pbm->pbm_regs +
1147 schizo_imap_offset(SCHIZO_UE_INO) + 4));
1148 1003
1149 pbm = pbm_for_ino(p, SCHIZO_CE_INO); 1004 pbm = pbm_for_ino(p, SCHIZO_CE_INO);
1150 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO); 1005 op = of_find_device_by_node(pbm->prom_node);
1151 if (request_irq(irq, schizo_ce_intr, 1006 if (op)
1152 SA_SHIRQ, "TOMATILLO CE", p) < 0) { 1007 request_irq(op->irqs[2], schizo_ce_intr, SA_SHIRQ,
1153 prom_printf("%s: Cannot register CE interrupt.\n", 1008 "TOMATILLO CE", p);
1154 pbm->name);
1155 prom_halt();
1156 }
1157 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
1158 upa_writel(tmp, (pbm->pbm_regs +
1159 schizo_imap_offset(SCHIZO_CE_INO) + 4));
1160 1009
1161 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO); 1010 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
1162 irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) | 1011 op = of_find_device_by_node(pbm->prom_node);
1163 SCHIZO_PCIERR_A_INO)); 1012 if (op)
1164 if (request_irq(irq, schizo_pcierr_intr, 1013 request_irq(op->irqs[0], schizo_pcierr_intr, SA_SHIRQ,
1165 SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) { 1014 "TOMATILLO PCIERR-A", pbm);
1166 prom_printf("%s: Cannot register PBM A PciERR interrupt.\n", 1015
1167 pbm->name);
1168 prom_halt();
1169 }
1170 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
1171 SCHIZO_PCIERR_A_INO)));
1172 upa_writel(tmp, (pbm->pbm_regs +
1173 schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
1174 1016
1175 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO); 1017 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
1176 irq = schizo_irq_build(pbm, NULL, ((pbm->portid << 6) | 1018 op = of_find_device_by_node(pbm->prom_node);
1177 SCHIZO_PCIERR_B_INO)); 1019 if (op)
1178 if (request_irq(irq, schizo_pcierr_intr, 1020 request_irq(op->irqs[0], schizo_pcierr_intr, SA_SHIRQ,
1179 SA_SHIRQ, "TOMATILLO PCIERR", pbm) < 0) { 1021 "TOMATILLO PCIERR-B", pbm);
1180 prom_printf("%s: Cannot register PBM B PciERR interrupt.\n",
1181 pbm->name);
1182 prom_halt();
1183 }
1184 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
1185 SCHIZO_PCIERR_B_INO)));
1186 upa_writel(tmp, (pbm->pbm_regs +
1187 schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
1188 1022
1189 pbm = pbm_for_ino(p, SCHIZO_SERR_INO); 1023 pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
1190 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO); 1024 op = of_find_device_by_node(pbm->prom_node);
1191 if (request_irq(irq, schizo_safarierr_intr, 1025 if (op)
1192 SA_SHIRQ, "TOMATILLO SERR", p) < 0) { 1026 request_irq(op->irqs[3], schizo_safarierr_intr, SA_SHIRQ,
1193 prom_printf("%s: Cannot register SafariERR interrupt.\n", 1027 "TOMATILLO SERR", p);
1194 pbm->name);
1195 prom_halt();
1196 }
1197 tmp = upa_readl(schizo_ino_to_imap(pbm, ((pbm->portid << 6) |
1198 SCHIZO_SERR_INO)));
1199 upa_writel(tmp, (pbm->pbm_regs +
1200 schizo_imap_offset(SCHIZO_SERR_INO) + 4));
1201 1028
1202 /* Enable UE and CE interrupts for controller. */ 1029 /* Enable UE and CE interrupts for controller. */
1203 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL, 1030 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
@@ -1265,64 +1092,47 @@ static void tomatillo_register_error_handlers(struct pci_controller_info *p)
1265static void schizo_register_error_handlers(struct pci_controller_info *p) 1092static void schizo_register_error_handlers(struct pci_controller_info *p)
1266{ 1093{
1267 struct pci_pbm_info *pbm; 1094 struct pci_pbm_info *pbm;
1268 unsigned int irq; 1095 struct of_device *op;
1269 u64 tmp, err_mask, err_no_mask; 1096 u64 tmp, err_mask, err_no_mask;
1270 1097
1271 /* Build IRQs and register handlers. */ 1098 /* Schizo IRQ property layout is:
1099 * 0: PCIERR
1100 * 1: UE ERR
1101 * 2: CE ERR
1102 * 3: SERR
1103 * 4: POWER FAIL?
1104 */
1105
1272 pbm = pbm_for_ino(p, SCHIZO_UE_INO); 1106 pbm = pbm_for_ino(p, SCHIZO_UE_INO);
1273 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_UE_INO); 1107 op = of_find_device_by_node(pbm->prom_node);
1274 if (request_irq(irq, schizo_ue_intr, 1108 if (op)
1275 SA_SHIRQ, "SCHIZO UE", p) < 0) { 1109 request_irq(op->irqs[1], schizo_ue_intr, SA_SHIRQ,
1276 prom_printf("%s: Cannot register UE interrupt.\n", 1110 "SCHIZO_UE", p);
1277 pbm->name);
1278 prom_halt();
1279 }
1280 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_UE_INO));
1281 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_UE_INO) + 4));
1282 1111
1283 pbm = pbm_for_ino(p, SCHIZO_CE_INO); 1112 pbm = pbm_for_ino(p, SCHIZO_CE_INO);
1284 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_CE_INO); 1113 op = of_find_device_by_node(pbm->prom_node);
1285 if (request_irq(irq, schizo_ce_intr, 1114 if (op)
1286 SA_SHIRQ, "SCHIZO CE", p) < 0) { 1115 request_irq(op->irqs[2], schizo_ce_intr, SA_SHIRQ,
1287 prom_printf("%s: Cannot register CE interrupt.\n", 1116 "SCHIZO CE", p);
1288 pbm->name);
1289 prom_halt();
1290 }
1291 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_CE_INO));
1292 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_CE_INO) + 4));
1293 1117
1294 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO); 1118 pbm = pbm_for_ino(p, SCHIZO_PCIERR_A_INO);
1295 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO); 1119 op = of_find_device_by_node(pbm->prom_node);
1296 if (request_irq(irq, schizo_pcierr_intr, 1120 if (op)
1297 SA_SHIRQ, "SCHIZO PCIERR", pbm) < 0) { 1121 request_irq(op->irqs[0], schizo_pcierr_intr, SA_SHIRQ,
1298 prom_printf("%s: Cannot register PBM A PciERR interrupt.\n", 1122 "SCHIZO PCIERR-A", pbm);
1299 pbm->name); 1123
1300 prom_halt();
1301 }
1302 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_A_INO));
1303 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_A_INO) + 4));
1304 1124
1305 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO); 1125 pbm = pbm_for_ino(p, SCHIZO_PCIERR_B_INO);
1306 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO); 1126 op = of_find_device_by_node(pbm->prom_node);
1307 if (request_irq(irq, schizo_pcierr_intr, 1127 if (op)
1308 SA_SHIRQ, "SCHIZO PCIERR", &p->pbm_B) < 0) { 1128 request_irq(op->irqs[0], schizo_pcierr_intr, SA_SHIRQ,
1309 prom_printf("%s: Cannot register PBM B PciERR interrupt.\n", 1129 "SCHIZO PCIERR-B", pbm);
1310 pbm->name);
1311 prom_halt();
1312 }
1313 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_PCIERR_B_INO));
1314 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_PCIERR_B_INO) + 4));
1315 1130
1316 pbm = pbm_for_ino(p, SCHIZO_SERR_INO); 1131 pbm = pbm_for_ino(p, SCHIZO_SERR_INO);
1317 irq = schizo_irq_build(pbm, NULL, (pbm->portid << 6) | SCHIZO_SERR_INO); 1132 op = of_find_device_by_node(pbm->prom_node);
1318 if (request_irq(irq, schizo_safarierr_intr, 1133 if (op)
1319 SA_SHIRQ, "SCHIZO SERR", p) < 0) { 1134 request_irq(op->irqs[3], schizo_safarierr_intr, SA_SHIRQ,
1320 prom_printf("%s: Cannot register SafariERR interrupt.\n", 1135 "SCHIZO SERR", p);
1321 pbm->name);
1322 prom_halt();
1323 }
1324 tmp = upa_readl(schizo_ino_to_imap(pbm, (pbm->portid << 6) | SCHIZO_SERR_INO));
1325 upa_writel(tmp, (pbm->pbm_regs + schizo_imap_offset(SCHIZO_SERR_INO) + 4));
1326 1136
1327 /* Enable UE and CE interrupts for controller. */ 1137 /* Enable UE and CE interrupts for controller. */
1328 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL, 1138 schizo_write(p->pbm_A.controller_regs + SCHIZO_ECC_CTRL,
@@ -2022,7 +1832,6 @@ static void __schizo_init(struct device_node *dp, char *model_name, int chip_typ
2022 p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ? 1832 p->scan_bus = (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
2023 tomatillo_scan_bus : 1833 tomatillo_scan_bus :
2024 schizo_scan_bus); 1834 schizo_scan_bus);
2025 p->irq_build = schizo_irq_build;
2026 p->base_address_update = schizo_base_address_update; 1835 p->base_address_update = schizo_base_address_update;
2027 p->resource_adjust = schizo_resource_adjust; 1836 p->resource_adjust = schizo_resource_adjust;
2028 p->pci_ops = &schizo_ops; 1837 p->pci_ops = &schizo_ops;
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index b69e2270a721..03ad4c06758e 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -843,15 +843,6 @@ static void pci_sun4v_scan_bus(struct pci_controller_info *p)
843 /* XXX register error interrupt handlers XXX */ 843 /* XXX register error interrupt handlers XXX */
844} 844}
845 845
846static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
847 struct pci_dev *pdev,
848 unsigned int devino)
849{
850 u32 devhandle = pbm->devhandle;
851
852 return sun4v_build_irq(devhandle, devino);
853}
854
855static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) 846static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
856{ 847{
857 struct pcidev_cookie *pcp = pdev->sysdata; 848 struct pcidev_cookie *pcp = pdev->sysdata;
@@ -1200,7 +1191,6 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
1200 p->pbms_same_domain = 0; 1191 p->pbms_same_domain = 0;
1201 1192
1202 p->scan_bus = pci_sun4v_scan_bus; 1193 p->scan_bus = pci_sun4v_scan_bus;
1203 p->irq_build = pci_sun4v_irq_build;
1204 p->base_address_update = pci_sun4v_base_address_update; 1194 p->base_address_update = pci_sun4v_base_address_update;
1205 p->resource_adjust = pci_sun4v_resource_adjust; 1195 p->resource_adjust = pci_sun4v_resource_adjust;
1206 p->pci_ops = &pci_sun4v_ops; 1196 p->pci_ops = &pci_sun4v_ops;
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 9496c7734014..4febeda958a3 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -17,9 +17,10 @@
17#include <linux/pm.h> 17#include <linux/pm.h>
18 18
19#include <asm/system.h> 19#include <asm/system.h>
20#include <asm/ebus.h>
21#include <asm/isa.h>
22#include <asm/auxio.h> 20#include <asm/auxio.h>
21#include <asm/prom.h>
22#include <asm/of_device.h>
23#include <asm/io.h>
23 24
24#include <linux/unistd.h> 25#include <linux/unistd.h>
25 26
@@ -30,6 +31,7 @@
30int scons_pwroff = 1; 31int scons_pwroff = 1;
31 32
32#ifdef CONFIG_PCI 33#ifdef CONFIG_PCI
34#include <linux/pci.h>
33static void __iomem *power_reg; 35static void __iomem *power_reg;
34 36
35static DECLARE_WAIT_QUEUE_HEAD(powerd_wait); 37static DECLARE_WAIT_QUEUE_HEAD(powerd_wait);
@@ -115,27 +117,33 @@ static int __init has_button_interrupt(unsigned int irq, struct device_node *dp)
115 return 1; 117 return 1;
116} 118}
117 119
118static void __devinit power_probe_common(struct of_device *dev, struct resource *res, unsigned int irq) 120static int __devinit power_probe(struct of_device *op, const struct of_device_id *match)
119{ 121{
120 power_reg = ioremap(res->start, 0x4); 122 struct resource *res = &op->resource[0];
123 unsigned int irq= op->irqs[0];
121 124
122 printk("power: Control reg at %p ... ", power_reg); 125 power_reg = of_ioremap(res, 0, 0x4, "power");
126
127 printk("%s: Control reg at %lx ... ",
128 op->node->name, res->start);
123 129
124 poweroff_method = machine_halt; /* able to use the standard halt */ 130 poweroff_method = machine_halt; /* able to use the standard halt */
125 131
126 if (has_button_interrupt(irq, dev->node)) { 132 if (has_button_interrupt(irq, op->node)) {
127 if (kernel_thread(powerd, NULL, CLONE_FS) < 0) { 133 if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
128 printk("Failed to start power daemon.\n"); 134 printk("Failed to start power daemon.\n");
129 return; 135 return 0;
130 } 136 }
131 printk("powerd running.\n"); 137 printk("powerd running.\n");
132 138
133 if (request_irq(irq, 139 if (request_irq(irq,
134 power_handler, SA_SHIRQ, "power", NULL) < 0) 140 power_handler, 0, "power", NULL) < 0)
135 printk("power: Error, cannot register IRQ handler.\n"); 141 printk("power: Error, cannot register IRQ handler.\n");
136 } else { 142 } else {
137 printk("not using powerd.\n"); 143 printk("not using powerd.\n");
138 } 144 }
145
146 return 0;
139} 147}
140 148
141static struct of_device_id power_match[] = { 149static struct of_device_id power_match[] = {
@@ -145,44 +153,15 @@ static struct of_device_id power_match[] = {
145 {}, 153 {},
146}; 154};
147 155
148static int __devinit ebus_power_probe(struct of_device *dev, const struct of_device_id *match) 156static struct of_platform_driver power_driver = {
149{
150 struct linux_ebus_device *edev = to_ebus_device(&dev->dev);
151 struct resource *res = &edev->resource[0];
152 unsigned int irq = edev->irqs[0];
153
154 power_probe_common(dev, res,irq);
155
156 return 0;
157}
158
159static struct of_platform_driver ebus_power_driver = {
160 .name = "power",
161 .match_table = power_match,
162 .probe = ebus_power_probe,
163};
164
165static int __devinit isa_power_probe(struct of_device *dev, const struct of_device_id *match)
166{
167 struct sparc_isa_device *idev = to_isa_device(&dev->dev);
168 struct resource *res = &idev->resource;
169 unsigned int irq = idev->irq;
170
171 power_probe_common(dev, res,irq);
172
173 return 0;
174}
175
176static struct of_platform_driver isa_power_driver = {
177 .name = "power", 157 .name = "power",
178 .match_table = power_match, 158 .match_table = power_match,
179 .probe = isa_power_probe, 159 .probe = power_probe,
180}; 160};
181 161
182void __init power_init(void) 162void __init power_init(void)
183{ 163{
184 of_register_driver(&ebus_power_driver, &ebus_bus_type); 164 of_register_driver(&power_driver, &of_bus_type);
185 of_register_driver(&isa_power_driver, &isa_bus_type);
186 return; 165 return;
187} 166}
188#endif /* CONFIG_PCI */ 167#endif /* CONFIG_PCI */
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c
index 8e87e7ea0325..8a70c52c0447 100644
--- a/arch/sparc64/kernel/prom.c
+++ b/arch/sparc64/kernel/prom.c
@@ -15,6 +15,7 @@
15 * 2 of the License, or (at your option) any later version. 15 * 2 of the License, or (at your option) any later version.
16 */ 16 */
17 17
18#include <linux/config.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
19#include <linux/types.h> 20#include <linux/types.h>
20#include <linux/string.h> 21#include <linux/string.h>
@@ -23,7 +24,11 @@
23#include <linux/module.h> 24#include <linux/module.h>
24 25
25#include <asm/prom.h> 26#include <asm/prom.h>
27#include <asm/of_device.h>
26#include <asm/oplib.h> 28#include <asm/oplib.h>
29#include <asm/irq.h>
30#include <asm/asi.h>
31#include <asm/upa.h>
27 32
28static struct device_node *allnodes; 33static struct device_node *allnodes;
29 34
@@ -190,6 +195,36 @@ int of_getintprop_default(struct device_node *np, const char *name, int def)
190} 195}
191EXPORT_SYMBOL(of_getintprop_default); 196EXPORT_SYMBOL(of_getintprop_default);
192 197
198int of_n_addr_cells(struct device_node *np)
199{
200 int* ip;
201 do {
202 if (np->parent)
203 np = np->parent;
204 ip = of_get_property(np, "#address-cells", NULL);
205 if (ip != NULL)
206 return *ip;
207 } while (np->parent);
208 /* No #address-cells property for the root node, default to 2 */
209 return 2;
210}
211EXPORT_SYMBOL(of_n_addr_cells);
212
213int of_n_size_cells(struct device_node *np)
214{
215 int* ip;
216 do {
217 if (np->parent)
218 np = np->parent;
219 ip = of_get_property(np, "#size-cells", NULL);
220 if (ip != NULL)
221 return *ip;
222 } while (np->parent);
223 /* No #size-cells property for the root node, default to 1 */
224 return 1;
225}
226EXPORT_SYMBOL(of_n_size_cells);
227
193int of_set_property(struct device_node *dp, const char *name, void *val, int len) 228int of_set_property(struct device_node *dp, const char *name, void *val, int len)
194{ 229{
195 struct property **prevp; 230 struct property **prevp;
@@ -253,6 +288,754 @@ static void * __init prom_early_alloc(unsigned long size)
253 return ret; 288 return ret;
254} 289}
255 290
291#ifdef CONFIG_PCI
292/* PSYCHO interrupt mapping support. */
293#define PSYCHO_IMAP_A_SLOT0 0x0c00UL
294#define PSYCHO_IMAP_B_SLOT0 0x0c20UL
295static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
296{
297 unsigned int bus = (ino & 0x10) >> 4;
298 unsigned int slot = (ino & 0x0c) >> 2;
299
300 if (bus == 0)
301 return PSYCHO_IMAP_A_SLOT0 + (slot * 8);
302 else
303 return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
304}
305
306#define PSYCHO_IMAP_SCSI 0x1000UL
307#define PSYCHO_IMAP_ETH 0x1008UL
308#define PSYCHO_IMAP_BPP 0x1010UL
309#define PSYCHO_IMAP_AU_REC 0x1018UL
310#define PSYCHO_IMAP_AU_PLAY 0x1020UL
311#define PSYCHO_IMAP_PFAIL 0x1028UL
312#define PSYCHO_IMAP_KMS 0x1030UL
313#define PSYCHO_IMAP_FLPY 0x1038UL
314#define PSYCHO_IMAP_SHW 0x1040UL
315#define PSYCHO_IMAP_KBD 0x1048UL
316#define PSYCHO_IMAP_MS 0x1050UL
317#define PSYCHO_IMAP_SER 0x1058UL
318#define PSYCHO_IMAP_TIM0 0x1060UL
319#define PSYCHO_IMAP_TIM1 0x1068UL
320#define PSYCHO_IMAP_UE 0x1070UL
321#define PSYCHO_IMAP_CE 0x1078UL
322#define PSYCHO_IMAP_A_ERR 0x1080UL
323#define PSYCHO_IMAP_B_ERR 0x1088UL
324#define PSYCHO_IMAP_PMGMT 0x1090UL
325#define PSYCHO_IMAP_GFX 0x1098UL
326#define PSYCHO_IMAP_EUPA 0x10a0UL
327
328static unsigned long __psycho_onboard_imap_off[] = {
329/*0x20*/ PSYCHO_IMAP_SCSI,
330/*0x21*/ PSYCHO_IMAP_ETH,
331/*0x22*/ PSYCHO_IMAP_BPP,
332/*0x23*/ PSYCHO_IMAP_AU_REC,
333/*0x24*/ PSYCHO_IMAP_AU_PLAY,
334/*0x25*/ PSYCHO_IMAP_PFAIL,
335/*0x26*/ PSYCHO_IMAP_KMS,
336/*0x27*/ PSYCHO_IMAP_FLPY,
337/*0x28*/ PSYCHO_IMAP_SHW,
338/*0x29*/ PSYCHO_IMAP_KBD,
339/*0x2a*/ PSYCHO_IMAP_MS,
340/*0x2b*/ PSYCHO_IMAP_SER,
341/*0x2c*/ PSYCHO_IMAP_TIM0,
342/*0x2d*/ PSYCHO_IMAP_TIM1,
343/*0x2e*/ PSYCHO_IMAP_UE,
344/*0x2f*/ PSYCHO_IMAP_CE,
345/*0x30*/ PSYCHO_IMAP_A_ERR,
346/*0x31*/ PSYCHO_IMAP_B_ERR,
347/*0x32*/ PSYCHO_IMAP_PMGMT
348};
349#define PSYCHO_ONBOARD_IRQ_BASE 0x20
350#define PSYCHO_ONBOARD_IRQ_LAST 0x32
351#define psycho_onboard_imap_offset(__ino) \
352 __psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
353
354#define PSYCHO_ICLR_A_SLOT0 0x1400UL
355#define PSYCHO_ICLR_SCSI 0x1800UL
356
357#define psycho_iclr_offset(ino) \
358 ((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
359 (PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
360
361static unsigned int psycho_irq_build(struct device_node *dp,
362 unsigned int ino,
363 void *_data)
364{
365 unsigned long controller_regs = (unsigned long) _data;
366 unsigned long imap, iclr;
367 unsigned long imap_off, iclr_off;
368 int inofixup = 0;
369
370 ino &= 0x3f;
371 if (ino < PSYCHO_ONBOARD_IRQ_BASE) {
372 /* PCI slot */
373 imap_off = psycho_pcislot_imap_offset(ino);
374 } else {
375 /* Onboard device */
376 if (ino > PSYCHO_ONBOARD_IRQ_LAST) {
377 prom_printf("psycho_irq_build: Wacky INO [%x]\n", ino);
378 prom_halt();
379 }
380 imap_off = psycho_onboard_imap_offset(ino);
381 }
382
383 /* Now build the IRQ bucket. */
384 imap = controller_regs + imap_off;
385 imap += 4;
386
387 iclr_off = psycho_iclr_offset(ino);
388 iclr = controller_regs + iclr_off;
389 iclr += 4;
390
391 if ((ino & 0x20) == 0)
392 inofixup = ino & 0x03;
393
394 return build_irq(inofixup, iclr, imap);
395}
396
397static void psycho_irq_trans_init(struct device_node *dp)
398{
399 struct linux_prom64_registers *regs;
400
401 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
402 dp->irq_trans->irq_build = psycho_irq_build;
403
404 regs = of_get_property(dp, "reg", NULL);
405 dp->irq_trans->data = (void *) regs[2].phys_addr;
406}
407
408#define sabre_read(__reg) \
409({ u64 __ret; \
410 __asm__ __volatile__("ldxa [%1] %2, %0" \
411 : "=r" (__ret) \
412 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
413 : "memory"); \
414 __ret; \
415})
416
417struct sabre_irq_data {
418 unsigned long controller_regs;
419 unsigned int pci_first_busno;
420};
421#define SABRE_CONFIGSPACE 0x001000000UL
422#define SABRE_WRSYNC 0x1c20UL
423
424#define SABRE_CONFIG_BASE(CONFIG_SPACE) \
425 (CONFIG_SPACE | (1UL << 24))
426#define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \
427 (((unsigned long)(BUS) << 16) | \
428 ((unsigned long)(DEVFN) << 8) | \
429 ((unsigned long)(REG)))
430
431/* When a device lives behind a bridge deeper in the PCI bus topology
432 * than APB, a special sequence must run to make sure all pending DMA
433 * transfers at the time of IRQ delivery are visible in the coherency
434 * domain by the cpu. This sequence is to perform a read on the far
435 * side of the non-APB bridge, then perform a read of Sabre's DMA
436 * write-sync register.
437 */
438static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
439{
440 unsigned int phys_hi = (unsigned int) (unsigned long) _arg1;
441 struct sabre_irq_data *irq_data = _arg2;
442 unsigned long controller_regs = irq_data->controller_regs;
443 unsigned long sync_reg = controller_regs + SABRE_WRSYNC;
444 unsigned long config_space = controller_regs + SABRE_CONFIGSPACE;
445 unsigned int bus, devfn;
446 u16 _unused;
447
448 config_space = SABRE_CONFIG_BASE(config_space);
449
450 bus = (phys_hi >> 16) & 0xff;
451 devfn = (phys_hi >> 8) & 0xff;
452
453 config_space |= SABRE_CONFIG_ENCODE(bus, devfn, 0x00);
454
455 __asm__ __volatile__("membar #Sync\n\t"
456 "lduha [%1] %2, %0\n\t"
457 "membar #Sync"
458 : "=r" (_unused)
459 : "r" ((u16 *) config_space),
460 "i" (ASI_PHYS_BYPASS_EC_E_L)
461 : "memory");
462
463 sabre_read(sync_reg);
464}
465
466#define SABRE_IMAP_A_SLOT0 0x0c00UL
467#define SABRE_IMAP_B_SLOT0 0x0c20UL
468#define SABRE_IMAP_SCSI 0x1000UL
469#define SABRE_IMAP_ETH 0x1008UL
470#define SABRE_IMAP_BPP 0x1010UL
471#define SABRE_IMAP_AU_REC 0x1018UL
472#define SABRE_IMAP_AU_PLAY 0x1020UL
473#define SABRE_IMAP_PFAIL 0x1028UL
474#define SABRE_IMAP_KMS 0x1030UL
475#define SABRE_IMAP_FLPY 0x1038UL
476#define SABRE_IMAP_SHW 0x1040UL
477#define SABRE_IMAP_KBD 0x1048UL
478#define SABRE_IMAP_MS 0x1050UL
479#define SABRE_IMAP_SER 0x1058UL
480#define SABRE_IMAP_UE 0x1070UL
481#define SABRE_IMAP_CE 0x1078UL
482#define SABRE_IMAP_PCIERR 0x1080UL
483#define SABRE_IMAP_GFX 0x1098UL
484#define SABRE_IMAP_EUPA 0x10a0UL
485#define SABRE_ICLR_A_SLOT0 0x1400UL
486#define SABRE_ICLR_B_SLOT0 0x1480UL
487#define SABRE_ICLR_SCSI 0x1800UL
488#define SABRE_ICLR_ETH 0x1808UL
489#define SABRE_ICLR_BPP 0x1810UL
490#define SABRE_ICLR_AU_REC 0x1818UL
491#define SABRE_ICLR_AU_PLAY 0x1820UL
492#define SABRE_ICLR_PFAIL 0x1828UL
493#define SABRE_ICLR_KMS 0x1830UL
494#define SABRE_ICLR_FLPY 0x1838UL
495#define SABRE_ICLR_SHW 0x1840UL
496#define SABRE_ICLR_KBD 0x1848UL
497#define SABRE_ICLR_MS 0x1850UL
498#define SABRE_ICLR_SER 0x1858UL
499#define SABRE_ICLR_UE 0x1870UL
500#define SABRE_ICLR_CE 0x1878UL
501#define SABRE_ICLR_PCIERR 0x1880UL
502
503static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
504{
505 unsigned int bus = (ino & 0x10) >> 4;
506 unsigned int slot = (ino & 0x0c) >> 2;
507
508 if (bus == 0)
509 return SABRE_IMAP_A_SLOT0 + (slot * 8);
510 else
511 return SABRE_IMAP_B_SLOT0 + (slot * 8);
512}
513
514static unsigned long __sabre_onboard_imap_off[] = {
515/*0x20*/ SABRE_IMAP_SCSI,
516/*0x21*/ SABRE_IMAP_ETH,
517/*0x22*/ SABRE_IMAP_BPP,
518/*0x23*/ SABRE_IMAP_AU_REC,
519/*0x24*/ SABRE_IMAP_AU_PLAY,
520/*0x25*/ SABRE_IMAP_PFAIL,
521/*0x26*/ SABRE_IMAP_KMS,
522/*0x27*/ SABRE_IMAP_FLPY,
523/*0x28*/ SABRE_IMAP_SHW,
524/*0x29*/ SABRE_IMAP_KBD,
525/*0x2a*/ SABRE_IMAP_MS,
526/*0x2b*/ SABRE_IMAP_SER,
527/*0x2c*/ 0 /* reserved */,
528/*0x2d*/ 0 /* reserved */,
529/*0x2e*/ SABRE_IMAP_UE,
530/*0x2f*/ SABRE_IMAP_CE,
531/*0x30*/ SABRE_IMAP_PCIERR,
532};
533#define SABRE_ONBOARD_IRQ_BASE 0x20
534#define SABRE_ONBOARD_IRQ_LAST 0x30
535#define sabre_onboard_imap_offset(__ino) \
536 __sabre_onboard_imap_off[(__ino) - SABRE_ONBOARD_IRQ_BASE]
537
538#define sabre_iclr_offset(ino) \
539 ((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
540 (SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
541
542static unsigned int sabre_irq_build(struct device_node *dp,
543 unsigned int ino,
544 void *_data)
545{
546 struct sabre_irq_data *irq_data = _data;
547 unsigned long controller_regs = irq_data->controller_regs;
548 struct linux_prom_pci_registers *regs;
549 unsigned long imap, iclr;
550 unsigned long imap_off, iclr_off;
551 int inofixup = 0;
552 int virt_irq;
553
554 ino &= 0x3f;
555 if (ino < SABRE_ONBOARD_IRQ_BASE) {
556 /* PCI slot */
557 imap_off = sabre_pcislot_imap_offset(ino);
558 } else {
559 /* onboard device */
560 if (ino > SABRE_ONBOARD_IRQ_LAST) {
561 prom_printf("sabre_irq_build: Wacky INO [%x]\n", ino);
562 prom_halt();
563 }
564 imap_off = sabre_onboard_imap_offset(ino);
565 }
566
567 /* Now build the IRQ bucket. */
568 imap = controller_regs + imap_off;
569 imap += 4;
570
571 iclr_off = sabre_iclr_offset(ino);
572 iclr = controller_regs + iclr_off;
573 iclr += 4;
574
575 if ((ino & 0x20) == 0)
576 inofixup = ino & 0x03;
577
578 virt_irq = build_irq(inofixup, iclr, imap);
579
580 regs = of_get_property(dp, "reg", NULL);
581 if (regs &&
582 ((regs->phys_hi >> 16) & 0xff) != irq_data->pci_first_busno) {
583 irq_install_pre_handler(virt_irq,
584 sabre_wsync_handler,
585 (void *) (long) regs->phys_hi,
586 (void *)
587 controller_regs +
588 SABRE_WRSYNC);
589 }
590
591 return virt_irq;
592}
593
594static void sabre_irq_trans_init(struct device_node *dp)
595{
596 struct linux_prom64_registers *regs;
597 struct sabre_irq_data *irq_data;
598 u32 *busrange;
599
600 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
601 dp->irq_trans->irq_build = sabre_irq_build;
602
603 irq_data = prom_early_alloc(sizeof(struct sabre_irq_data));
604
605 regs = of_get_property(dp, "reg", NULL);
606 irq_data->controller_regs = regs[0].phys_addr;
607
608 busrange = of_get_property(dp, "bus-range", NULL);
609 irq_data->pci_first_busno = busrange[0];
610
611 dp->irq_trans->data = irq_data;
612}
613
614/* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the
615 * imap/iclr registers are per-PBM.
616 */
617#define SCHIZO_IMAP_BASE 0x1000UL
618#define SCHIZO_ICLR_BASE 0x1400UL
619
620static unsigned long schizo_imap_offset(unsigned long ino)
621{
622 return SCHIZO_IMAP_BASE + (ino * 8UL);
623}
624
625static unsigned long schizo_iclr_offset(unsigned long ino)
626{
627 return SCHIZO_ICLR_BASE + (ino * 8UL);
628}
629
630static unsigned long schizo_ino_to_iclr(unsigned long pbm_regs,
631 unsigned int ino)
632{
633 return pbm_regs + schizo_iclr_offset(ino) + 4;
634}
635
636static unsigned long schizo_ino_to_imap(unsigned long pbm_regs,
637 unsigned int ino)
638{
639 return pbm_regs + schizo_imap_offset(ino) + 4;
640}
641
642#define schizo_read(__reg) \
643({ u64 __ret; \
644 __asm__ __volatile__("ldxa [%1] %2, %0" \
645 : "=r" (__ret) \
646 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
647 : "memory"); \
648 __ret; \
649})
650#define schizo_write(__reg, __val) \
651 __asm__ __volatile__("stxa %0, [%1] %2" \
652 : /* no outputs */ \
653 : "r" (__val), "r" (__reg), \
654 "i" (ASI_PHYS_BYPASS_EC_E) \
655 : "memory")
656
657static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
658{
659 unsigned long sync_reg = (unsigned long) _arg2;
660 u64 mask = 1UL << (ino & IMAP_INO);
661 u64 val;
662 int limit;
663
664 schizo_write(sync_reg, mask);
665
666 limit = 100000;
667 val = 0;
668 while (--limit) {
669 val = schizo_read(sync_reg);
670 if (!(val & mask))
671 break;
672 }
673 if (limit <= 0) {
674 printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n",
675 val, mask);
676 }
677
678 if (_arg1) {
679 static unsigned char cacheline[64]
680 __attribute__ ((aligned (64)));
681
682 __asm__ __volatile__("rd %%fprs, %0\n\t"
683 "or %0, %4, %1\n\t"
684 "wr %1, 0x0, %%fprs\n\t"
685 "stda %%f0, [%5] %6\n\t"
686 "wr %0, 0x0, %%fprs\n\t"
687 "membar #Sync"
688 : "=&r" (mask), "=&r" (val)
689 : "0" (mask), "1" (val),
690 "i" (FPRS_FEF), "r" (&cacheline[0]),
691 "i" (ASI_BLK_COMMIT_P));
692 }
693}
694
695struct schizo_irq_data {
696 unsigned long pbm_regs;
697 unsigned long sync_reg;
698 u32 portid;
699 int chip_version;
700};
701
702static unsigned int schizo_irq_build(struct device_node *dp,
703 unsigned int ino,
704 void *_data)
705{
706 struct schizo_irq_data *irq_data = _data;
707 unsigned long pbm_regs = irq_data->pbm_regs;
708 unsigned long imap, iclr;
709 int ign_fixup;
710 int virt_irq;
711 int is_tomatillo;
712
713 ino &= 0x3f;
714
715 /* Now build the IRQ bucket. */
716 imap = schizo_ino_to_imap(pbm_regs, ino);
717 iclr = schizo_ino_to_iclr(pbm_regs, ino);
718
719 /* On Schizo, no inofixup occurs. This is because each
720 * INO has it's own IMAP register. On Psycho and Sabre
721 * there is only one IMAP register for each PCI slot even
722 * though four different INOs can be generated by each
723 * PCI slot.
724 *
725 * But, for JBUS variants (essentially, Tomatillo), we have
726 * to fixup the lowest bit of the interrupt group number.
727 */
728 ign_fixup = 0;
729
730 is_tomatillo = (irq_data->sync_reg != 0UL);
731
732 if (is_tomatillo) {
733 if (irq_data->portid & 1)
734 ign_fixup = (1 << 6);
735 }
736
737 virt_irq = build_irq(ign_fixup, iclr, imap);
738
739 if (is_tomatillo) {
740 irq_install_pre_handler(virt_irq,
741 tomatillo_wsync_handler,
742 ((irq_data->chip_version <= 4) ?
743 (void *) 1 : (void *) 0),
744 (void *) irq_data->sync_reg);
745 }
746
747 return virt_irq;
748}
749
750static void schizo_irq_trans_init(struct device_node *dp)
751{
752 struct linux_prom64_registers *regs;
753 struct schizo_irq_data *irq_data;
754
755 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
756 dp->irq_trans->irq_build = schizo_irq_build;
757
758 irq_data = prom_early_alloc(sizeof(struct schizo_irq_data));
759
760 regs = of_get_property(dp, "reg", NULL);
761 dp->irq_trans->data = irq_data;
762
763 irq_data->pbm_regs = regs[0].phys_addr;
764 irq_data->sync_reg = regs[3].phys_addr + 0x1a18UL;
765 irq_data->portid = of_getintprop_default(dp, "portid", 0);
766 irq_data->chip_version = of_getintprop_default(dp, "version#", 0);
767}
768
769static unsigned int pci_sun4v_irq_build(struct device_node *dp,
770 unsigned int devino,
771 void *_data)
772{
773 u32 devhandle = (u32) (unsigned long) _data;
774
775 return sun4v_build_irq(devhandle, devino);
776}
777
778static void pci_sun4v_irq_trans_init(struct device_node *dp)
779{
780 struct linux_prom64_registers *regs;
781
782 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
783 dp->irq_trans->irq_build = pci_sun4v_irq_build;
784
785 regs = of_get_property(dp, "reg", NULL);
786 dp->irq_trans->data = (void *) (unsigned long)
787 ((regs->phys_addr >> 32UL) & 0x0fffffff);
788}
789#endif /* CONFIG_PCI */
790
791#ifdef CONFIG_SBUS
792/* INO number to IMAP register offset for SYSIO external IRQ's.
793 * This should conform to both Sunfire/Wildfire server and Fusion
794 * desktop designs.
795 */
796#define SYSIO_IMAP_SLOT0 0x2c04UL
797#define SYSIO_IMAP_SLOT1 0x2c0cUL
798#define SYSIO_IMAP_SLOT2 0x2c14UL
799#define SYSIO_IMAP_SLOT3 0x2c1cUL
800#define SYSIO_IMAP_SCSI 0x3004UL
801#define SYSIO_IMAP_ETH 0x300cUL
802#define SYSIO_IMAP_BPP 0x3014UL
803#define SYSIO_IMAP_AUDIO 0x301cUL
804#define SYSIO_IMAP_PFAIL 0x3024UL
805#define SYSIO_IMAP_KMS 0x302cUL
806#define SYSIO_IMAP_FLPY 0x3034UL
807#define SYSIO_IMAP_SHW 0x303cUL
808#define SYSIO_IMAP_KBD 0x3044UL
809#define SYSIO_IMAP_MS 0x304cUL
810#define SYSIO_IMAP_SER 0x3054UL
811#define SYSIO_IMAP_TIM0 0x3064UL
812#define SYSIO_IMAP_TIM1 0x306cUL
813#define SYSIO_IMAP_UE 0x3074UL
814#define SYSIO_IMAP_CE 0x307cUL
815#define SYSIO_IMAP_SBERR 0x3084UL
816#define SYSIO_IMAP_PMGMT 0x308cUL
817#define SYSIO_IMAP_GFX 0x3094UL
818#define SYSIO_IMAP_EUPA 0x309cUL
819
820#define bogon ((unsigned long) -1)
821static unsigned long sysio_irq_offsets[] = {
822 /* SBUS Slot 0 --> 3, level 1 --> 7 */
823 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
824 SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
825 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
826 SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
827 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
828 SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
829 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
830 SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
831
832 /* Onboard devices (not relevant/used on SunFire). */
833 SYSIO_IMAP_SCSI,
834 SYSIO_IMAP_ETH,
835 SYSIO_IMAP_BPP,
836 bogon,
837 SYSIO_IMAP_AUDIO,
838 SYSIO_IMAP_PFAIL,
839 bogon,
840 bogon,
841 SYSIO_IMAP_KMS,
842 SYSIO_IMAP_FLPY,
843 SYSIO_IMAP_SHW,
844 SYSIO_IMAP_KBD,
845 SYSIO_IMAP_MS,
846 SYSIO_IMAP_SER,
847 bogon,
848 bogon,
849 SYSIO_IMAP_TIM0,
850 SYSIO_IMAP_TIM1,
851 bogon,
852 bogon,
853 SYSIO_IMAP_UE,
854 SYSIO_IMAP_CE,
855 SYSIO_IMAP_SBERR,
856 SYSIO_IMAP_PMGMT,
857};
858
859#undef bogon
860
861#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
862
863/* Convert Interrupt Mapping register pointer to associated
864 * Interrupt Clear register pointer, SYSIO specific version.
865 */
866#define SYSIO_ICLR_UNUSED0 0x3400UL
867#define SYSIO_ICLR_SLOT0 0x340cUL
868#define SYSIO_ICLR_SLOT1 0x344cUL
869#define SYSIO_ICLR_SLOT2 0x348cUL
870#define SYSIO_ICLR_SLOT3 0x34ccUL
871static unsigned long sysio_imap_to_iclr(unsigned long imap)
872{
873 unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
874 return imap + diff;
875}
876
877static unsigned int sbus_of_build_irq(struct device_node *dp,
878 unsigned int ino,
879 void *_data)
880{
881 unsigned long reg_base = (unsigned long) _data;
882 struct linux_prom_registers *regs;
883 unsigned long imap, iclr;
884 int sbus_slot = 0;
885 int sbus_level = 0;
886
887 ino &= 0x3f;
888
889 regs = of_get_property(dp, "reg", NULL);
890 if (regs)
891 sbus_slot = regs->which_io;
892
893 if (ino < 0x20)
894 ino += (sbus_slot * 8);
895
896 imap = sysio_irq_offsets[ino];
897 if (imap == ((unsigned long)-1)) {
898 prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
899 ino);
900 prom_halt();
901 }
902 imap += reg_base;
903
904 /* SYSIO inconsistency. For external SLOTS, we have to select
905 * the right ICLR register based upon the lower SBUS irq level
906 * bits.
907 */
908 if (ino >= 0x20) {
909 iclr = sysio_imap_to_iclr(imap);
910 } else {
911 sbus_level = ino & 0x7;
912
913 switch(sbus_slot) {
914 case 0:
915 iclr = reg_base + SYSIO_ICLR_SLOT0;
916 break;
917 case 1:
918 iclr = reg_base + SYSIO_ICLR_SLOT1;
919 break;
920 case 2:
921 iclr = reg_base + SYSIO_ICLR_SLOT2;
922 break;
923 default:
924 case 3:
925 iclr = reg_base + SYSIO_ICLR_SLOT3;
926 break;
927 };
928
929 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
930 }
931 return build_irq(sbus_level, iclr, imap);
932}
933
934static void sbus_irq_trans_init(struct device_node *dp)
935{
936 struct linux_prom64_registers *regs;
937
938 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
939 dp->irq_trans->irq_build = sbus_of_build_irq;
940
941 regs = of_get_property(dp, "reg", NULL);
942 dp->irq_trans->data = (void *) (unsigned long) regs->phys_addr;
943}
944#endif /* CONFIG_SBUS */
945
946
947static unsigned int central_build_irq(struct device_node *dp,
948 unsigned int ino,
949 void *_data)
950{
951 struct device_node *central_dp = _data;
952 struct of_device *central_op = of_find_device_by_node(central_dp);
953 struct resource *res;
954 unsigned long imap, iclr;
955 u32 tmp;
956
957 if (!strcmp(dp->name, "eeprom")) {
958 res = &central_op->resource[5];
959 } else if (!strcmp(dp->name, "zs")) {
960 res = &central_op->resource[4];
961 } else if (!strcmp(dp->name, "clock-board")) {
962 res = &central_op->resource[3];
963 } else {
964 return ino;
965 }
966
967 imap = res->start + 0x00UL;
968 iclr = res->start + 0x10UL;
969
970 /* Set the INO state to idle, and disable. */
971 upa_writel(0, iclr);
972 upa_readl(iclr);
973
974 tmp = upa_readl(imap);
975 tmp &= ~0x80000000;
976 upa_writel(tmp, imap);
977
978 return build_irq(0, iclr, imap);
979}
980
981static void central_irq_trans_init(struct device_node *dp)
982{
983 dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
984 dp->irq_trans->irq_build = central_build_irq;
985
986 dp->irq_trans->data = dp;
987}
988
989struct irq_trans {
990 const char *name;
991 void (*init)(struct device_node *);
992};
993
994#ifdef CONFIG_PCI
995static struct irq_trans pci_irq_trans_table[] = {
996 { "SUNW,sabre", sabre_irq_trans_init },
997 { "pci108e,a000", sabre_irq_trans_init },
998 { "pci108e,a001", sabre_irq_trans_init },
999 { "SUNW,psycho", psycho_irq_trans_init },
1000 { "pci108e,8000", psycho_irq_trans_init },
1001 { "SUNW,schizo", schizo_irq_trans_init },
1002 { "pci108e,8001", schizo_irq_trans_init },
1003 { "SUNW,schizo+", schizo_irq_trans_init },
1004 { "pci108e,8002", schizo_irq_trans_init },
1005 { "SUNW,tomatillo", schizo_irq_trans_init },
1006 { "pci108e,a801", schizo_irq_trans_init },
1007 { "SUNW,sun4v-pci", pci_sun4v_irq_trans_init },
1008};
1009#endif
1010
1011static void irq_trans_init(struct device_node *dp)
1012{
1013 const char *model;
1014 int i;
1015
1016 model = of_get_property(dp, "model", NULL);
1017 if (!model)
1018 model = of_get_property(dp, "compatible", NULL);
1019 if (!model)
1020 return;
1021
1022#ifdef CONFIG_PCI
1023 for (i = 0; i < ARRAY_SIZE(pci_irq_trans_table); i++) {
1024 struct irq_trans *t = &pci_irq_trans_table[i];
1025
1026 if (!strcmp(model, t->name))
1027 return t->init(dp);
1028 }
1029#endif
1030#ifdef CONFIG_SBUS
1031 if (!strcmp(dp->name, "sbus") ||
1032 !strcmp(dp->name, "sbi"))
1033 return sbus_irq_trans_init(dp);
1034#endif
1035 if (!strcmp(dp->name, "central"))
1036 return central_irq_trans_init(dp->child);
1037}
1038
256static int is_root_node(const struct device_node *dp) 1039static int is_root_node(const struct device_node *dp)
257{ 1040{
258 if (!dp) 1041 if (!dp)
@@ -676,10 +1459,10 @@ static struct device_node * __init create_node(phandle node)
676 dp->type = get_one_property(node, "device_type"); 1459 dp->type = get_one_property(node, "device_type");
677 dp->node = node; 1460 dp->node = node;
678 1461
679 /* Build interrupts later... */
680
681 dp->properties = build_prop_list(node); 1462 dp->properties = build_prop_list(node);
682 1463
1464 irq_trans_init(dp);
1465
683 return dp; 1466 return dp;
684} 1467}
685 1468
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index ac05e0f692ef..ef68aa4fec65 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -1221,9 +1221,7 @@ static void __init sbus_iommu_init(int __node, struct sbus_bus *sbus)
1221 1221
1222 /* Now some Xfire specific grot... */ 1222 /* Now some Xfire specific grot... */
1223 if (this_is_starfire) 1223 if (this_is_starfire)
1224 sbus->starfire_cookie = starfire_hookup(sbus->portid); 1224 starfire_hookup(sbus->portid);
1225 else
1226 sbus->starfire_cookie = NULL;
1227 1225
1228 sysio_register_error_handlers(sbus); 1226 sysio_register_error_handlers(sbus);
1229} 1227}
@@ -1269,8 +1267,6 @@ int __init sbus_arch_preinit(void)
1269void __init sbus_arch_postinit(void) 1267void __init sbus_arch_postinit(void)
1270{ 1268{
1271 extern void firetruck_init(void); 1269 extern void firetruck_init(void);
1272 extern void clock_probe(void);
1273 1270
1274 firetruck_init(); 1271 firetruck_init();
1275 clock_probe();
1276} 1272}
diff --git a/arch/sparc64/kernel/starfire.c b/arch/sparc64/kernel/starfire.c
index ae859d40771e..b930fee7708a 100644
--- a/arch/sparc64/kernel/starfire.c
+++ b/arch/sparc64/kernel/starfire.c
@@ -54,7 +54,7 @@ struct starfire_irqinfo {
54static struct starfire_irqinfo *sflist = NULL; 54static struct starfire_irqinfo *sflist = NULL;
55 55
56/* Beam me up Scott(McNeil)y... */ 56/* Beam me up Scott(McNeil)y... */
57void *starfire_hookup(int upaid) 57void starfire_hookup(int upaid)
58{ 58{
59 struct starfire_irqinfo *p; 59 struct starfire_irqinfo *p;
60 unsigned long treg_base, hwmid, i; 60 unsigned long treg_base, hwmid, i;
@@ -81,8 +81,6 @@ void *starfire_hookup(int upaid)
81 p->upaid = upaid; 81 p->upaid = upaid;
82 p->next = sflist; 82 p->next = sflist;
83 sflist = p; 83 sflist = p;
84
85 return (void *) p;
86} 84}
87 85
88unsigned int starfire_translate(unsigned long imap, 86unsigned int starfire_translate(unsigned long imap,
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
index ae5b32f817f0..87ebdf858a3a 100644
--- a/arch/sparc64/kernel/sys_sunos32.c
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -155,7 +155,7 @@ asmlinkage int sunos_brk(u32 baddr)
155 * simple, it hopefully works in most obvious cases.. Easy to 155 * simple, it hopefully works in most obvious cases.. Easy to
156 * fool it, but this should catch most mistakes. 156 * fool it, but this should catch most mistakes.
157 */ 157 */
158 freepages = get_page_cache_size(); 158 freepages = global_page_state(NR_FILE_PAGES);
159 freepages >>= 1; 159 freepages >>= 1;
160 freepages += nr_free_pages(); 160 freepages += nr_free_pages();
161 freepages += nr_swap_pages; 161 freepages += nr_swap_pages;
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 348b82035561..5f3dd4d800cd 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -38,11 +38,8 @@
38#include <asm/timer.h> 38#include <asm/timer.h>
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/io.h> 40#include <asm/io.h>
41#include <asm/sbus.h> 41#include <asm/prom.h>
42#include <asm/fhc.h> 42#include <asm/of_device.h>
43#include <asm/pbm.h>
44#include <asm/ebus.h>
45#include <asm/isa.h>
46#include <asm/starfire.h> 43#include <asm/starfire.h>
47#include <asm/smp.h> 44#include <asm/smp.h>
48#include <asm/sections.h> 45#include <asm/sections.h>
@@ -770,237 +767,106 @@ static int __init clock_model_matches(char *model)
770 return 1; 767 return 1;
771} 768}
772 769
773static void __init __clock_assign_common(void __iomem *addr, char *model) 770static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
774{ 771{
775 if (model[5] == '0' && model[6] == '2') { 772 struct device_node *dp = op->node;
776 mstk48t02_regs = addr; 773 char *model = of_get_property(dp, "model", NULL);
777 } else if(model[5] == '0' && model[6] == '8') { 774 unsigned long size, flags;
778 mstk48t08_regs = addr; 775 void __iomem *regs;
779 mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02;
780 } else {
781 mstk48t59_regs = addr;
782 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
783 }
784}
785
786static void __init clock_assign_clk_reg(struct linux_prom_registers *clk_reg,
787 char *model)
788{
789 unsigned long addr;
790
791 addr = ((unsigned long) clk_reg[0].phys_addr |
792 (((unsigned long) clk_reg[0].which_io) << 32UL));
793
794 __clock_assign_common((void __iomem *) addr, model);
795}
796
797static int __init clock_probe_central(void)
798{
799 struct linux_prom_registers clk_reg[2], *pr;
800 struct device_node *dp;
801 char *model;
802 776
803 if (!central_bus) 777 if (!model || !clock_model_matches(model))
804 return 0; 778 return -ENODEV;
805
806 /* Get Central FHC's prom node. */
807 dp = central_bus->child->prom_node;
808
809 /* Then get the first child device below it. */
810 dp = dp->child;
811
812 while (dp) {
813 model = of_get_property(dp, "model", NULL);
814 if (!model || !clock_model_matches(model))
815 goto next_sibling;
816
817 pr = of_get_property(dp, "reg", NULL);
818 memcpy(clk_reg, pr, sizeof(clk_reg));
819
820 apply_fhc_ranges(central_bus->child, clk_reg, 1);
821 apply_central_ranges(central_bus, clk_reg, 1);
822
823 clock_assign_clk_reg(clk_reg, model);
824 return 1;
825 779
826 next_sibling: 780 /* On an Enterprise system there can be multiple mostek clocks.
827 dp = dp->sibling; 781 * We should only match the one that is on the central FHC bus.
828 } 782 */
783 if (!strcmp(dp->parent->name, "fhc") &&
784 strcmp(dp->parent->parent->name, "central") != 0)
785 return -ENODEV;
829 786
830 return 0; 787 size = (op->resource[0].end - op->resource[0].start) + 1;
831} 788 regs = of_ioremap(&op->resource[0], 0, size, "clock");
789 if (!regs)
790 return -ENOMEM;
832 791
833#ifdef CONFIG_PCI
834static void __init clock_isa_ebus_assign_regs(struct resource *res, char *model)
835{
836 if (!strcmp(model, "ds1287") || 792 if (!strcmp(model, "ds1287") ||
837 !strcmp(model, "m5819") || 793 !strcmp(model, "m5819") ||
838 !strcmp(model, "m5819p") || 794 !strcmp(model, "m5819p") ||
839 !strcmp(model, "m5823")) { 795 !strcmp(model, "m5823")) {
840 ds1287_regs = res->start; 796 ds1287_regs = (unsigned long) regs;
797 } else if (model[5] == '0' && model[6] == '2') {
798 mstk48t02_regs = regs;
799 } else if(model[5] == '0' && model[6] == '8') {
800 mstk48t08_regs = regs;
801 mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02;
841 } else { 802 } else {
842 mstk48t59_regs = (void __iomem *) res->start; 803 mstk48t59_regs = regs;
843 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02; 804 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
844 } 805 }
845}
846
847static int __init clock_probe_one_ebus_dev(struct linux_ebus_device *edev)
848{
849 struct device_node *dp = edev->prom_node;
850 char *model;
851
852 model = of_get_property(dp, "model", NULL);
853 if (!clock_model_matches(model))
854 return 0;
855 806
856 clock_isa_ebus_assign_regs(&edev->resource[0], model); 807 printk(KERN_INFO "%s: Clock regs at %p\n", dp->full_name, regs);
857 808
858 return 1; 809 local_irq_save(flags);
859}
860
861static int __init clock_probe_ebus(void)
862{
863 struct linux_ebus *ebus;
864 810
865 for_each_ebus(ebus) { 811 if (mstk48t02_regs != NULL) {
866 struct linux_ebus_device *edev; 812 /* Report a low battery voltage condition. */
813 if (has_low_battery())
814 prom_printf("NVRAM: Low battery voltage!\n");
867 815
868 for_each_ebusdev(edev, ebus) { 816 /* Kick start the clock if it is completely stopped. */
869 if (clock_probe_one_ebus_dev(edev)) 817 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
870 return 1; 818 kick_start_clock();
871 }
872 } 819 }
873 820
874 return 0; 821 set_system_time();
875} 822
876 823 local_irq_restore(flags);
877static int __init clock_probe_one_isa_dev(struct sparc_isa_device *idev)
878{
879 struct device_node *dp = idev->prom_node;
880 char *model;
881
882 model = of_get_property(dp, "model", NULL);
883 if (!clock_model_matches(model))
884 return 0;
885
886 clock_isa_ebus_assign_regs(&idev->resource, model);
887
888 return 1;
889}
890
891static int __init clock_probe_isa(void)
892{
893 struct sparc_isa_bridge *isa_br;
894
895 for_each_isa(isa_br) {
896 struct sparc_isa_device *isa_dev;
897
898 for_each_isadev(isa_dev, isa_br) {
899 if (clock_probe_one_isa_dev(isa_dev))
900 return 1;
901 }
902 }
903 824
904 return 0; 825 return 0;
905} 826}
906#endif /* CONFIG_PCI */
907
908#ifdef CONFIG_SBUS
909static int __init clock_probe_one_sbus_dev(struct sbus_bus *sbus, struct sbus_dev *sdev)
910{
911 struct resource *res;
912 char model[64];
913 void __iomem *addr;
914
915 prom_getstring(sdev->prom_node, "model", model, sizeof(model));
916 if (!clock_model_matches(model))
917 return 0;
918
919 res = &sdev->resource[0];
920 addr = sbus_ioremap(res, 0, 0x800UL, "eeprom");
921 827
922 __clock_assign_common(addr, model); 828static struct of_device_id clock_match[] = {
923 829 {
924 return 1; 830 .name = "eeprom",
925} 831 },
926 832 {
927static int __init clock_probe_sbus(void) 833 .name = "rtc",
928{ 834 },
929 struct sbus_bus *sbus; 835 {},
930 836};
931 for_each_sbus(sbus) {
932 struct sbus_dev *sdev;
933
934 for_each_sbusdev(sdev, sbus) {
935 if (clock_probe_one_sbus_dev(sbus, sdev))
936 return 1;
937 }
938 }
939 837
940 return 0; 838static struct of_platform_driver clock_driver = {
941} 839 .name = "clock",
942#endif 840 .match_table = clock_match,
841 .probe = clock_probe,
842};
943 843
944void __init clock_probe(void) 844static int __init clock_init(void)
945{ 845{
946 static int invoked;
947 unsigned long flags;
948
949 if (invoked)
950 return;
951 invoked = 1;
952
953 if (this_is_starfire) { 846 if (this_is_starfire) {
954 xtime.tv_sec = starfire_get_time(); 847 xtime.tv_sec = starfire_get_time();
955 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); 848 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
956 set_normalized_timespec(&wall_to_monotonic, 849 set_normalized_timespec(&wall_to_monotonic,
957 -xtime.tv_sec, -xtime.tv_nsec); 850 -xtime.tv_sec, -xtime.tv_nsec);
958 return; 851 return 0;
959 } 852 }
960 if (tlb_type == hypervisor) { 853 if (tlb_type == hypervisor) {
961 xtime.tv_sec = hypervisor_get_time(); 854 xtime.tv_sec = hypervisor_get_time();
962 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ); 855 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
963 set_normalized_timespec(&wall_to_monotonic, 856 set_normalized_timespec(&wall_to_monotonic,
964 -xtime.tv_sec, -xtime.tv_nsec); 857 -xtime.tv_sec, -xtime.tv_nsec);
965 return; 858 return 0;
966 }
967
968 /* Check FHC Central then EBUSs then ISA bridges then SBUSs.
969 * That way we handle the presence of multiple properly.
970 *
971 * As a special case, machines with Central must provide the
972 * timer chip there.
973 */
974 if (!clock_probe_central() &&
975#ifdef CONFIG_PCI
976 !clock_probe_ebus() &&
977 !clock_probe_isa() &&
978#endif
979#ifdef CONFIG_SBUS
980 !clock_probe_sbus()
981#endif
982 ) {
983 printk(KERN_WARNING "No clock chip found.\n");
984 return;
985 }
986
987 local_irq_save(flags);
988
989 if (mstk48t02_regs != NULL) {
990 /* Report a low battery voltage condition. */
991 if (has_low_battery())
992 prom_printf("NVRAM: Low battery voltage!\n");
993
994 /* Kick start the clock if it is completely stopped. */
995 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
996 kick_start_clock();
997 } 859 }
998 860
999 set_system_time(); 861 return of_register_driver(&clock_driver, &of_bus_type);
1000
1001 local_irq_restore(flags);
1002} 862}
1003 863
864/* Must be after subsys_initcall() so that busses are probed. Must
865 * be before device_initcall() because things like the RTC driver
866 * need to see the clock registers.
867 */
868fs_initcall(clock_init);
869
1004/* This is gets the master TICK_INT timer going. */ 870/* This is gets the master TICK_INT timer going. */
1005static unsigned long sparc64_init_timers(void) 871static unsigned long sparc64_init_timers(void)
1006{ 872{
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index bb2d68577855..a9b765271b85 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -20,6 +20,7 @@
20#include <linux/smp.h> 20#include <linux/smp.h>
21#include <linux/smp_lock.h> 21#include <linux/smp_lock.h>
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/kallsyms.h>
23#include <asm/fpumacro.h> 24#include <asm/fpumacro.h>
24 25
25/* #define DEBUG_MNA */ 26/* #define DEBUG_MNA */
@@ -291,7 +292,8 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
291 if (count < 5) { 292 if (count < 5) {
292 last_time = jiffies; 293 last_time = jiffies;
293 count++; 294 count++;
294 printk("Kernel unaligned access at TPC[%lx]\n", regs->tpc); 295 printk("Kernel unaligned access at TPC[%lx] ", regs->tpc);
296 print_symbol("%s\n", regs->tpc);
295 } 297 }
296 298
297 if (!ok_for_kernel(insn) || dir == both) { 299 if (!ok_for_kernel(insn) || dir == both) {
diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64
index dfd88b652fbe..dffd1184c956 100644
--- a/arch/um/Makefile-x86_64
+++ b/arch/um/Makefile-x86_64
@@ -6,9 +6,11 @@ START := 0x60000000
6 6
7#We #undef __x86_64__ for kernelspace, not for userspace where 7#We #undef __x86_64__ for kernelspace, not for userspace where
8#it's needed for headers to work! 8#it's needed for headers to work!
9CFLAGS += -U__$(SUBARCH)__ -fno-builtin 9CFLAGS += -U__$(SUBARCH)__ -fno-builtin -m64
10USER_CFLAGS += -fno-builtin 10USER_CFLAGS += -fno-builtin -m64
11CHECKFLAGS += -m64 11CHECKFLAGS += -m64
12AFLAGS += -m64
13LDFLAGS += -m elf_x86_64
12 14
13ELF_ARCH := i386:x86-64 15ELF_ARCH := i386:x86-64
14ELF_FORMAT := elf64-x86-64 16ELF_FORMAT := elf64-x86-64
@@ -16,3 +18,4 @@ ELF_FORMAT := elf64-x86-64
16# Not on all 64-bit distros /lib is a symlink to /lib64. PLD is an example. 18# Not on all 64-bit distros /lib is a symlink to /lib64. PLD is an example.
17 19
18LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64 20LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64
21LINK-y += -m64
diff --git a/arch/um/drivers/stderr_console.c b/arch/um/drivers/stderr_console.c
index 429ae8e6c7e5..6d2cf32a9e8f 100644
--- a/arch/um/drivers/stderr_console.c
+++ b/arch/um/drivers/stderr_console.c
@@ -8,10 +8,7 @@
8 8
9/* 9/*
10 * Don't register by default -- as this registeres very early in the 10 * Don't register by default -- as this registeres very early in the
11 * boot process it becomes the default console. And as this isn't a 11 * boot process it becomes the default console.
12 * real tty driver init isn't able to open /dev/console then.
13 *
14 * In most cases this isn't what you want ...
15 */ 12 */
16static int use_stderr_console = 0; 13static int use_stderr_console = 0;
17 14
@@ -43,3 +40,20 @@ static int stderr_setup(char *str)
43 return 1; 40 return 1;
44} 41}
45__setup("stderr=", stderr_setup); 42__setup("stderr=", stderr_setup);
43
44/* The previous behavior of not unregistering led to /dev/console being
45 * impossible to open. My FC5 filesystem started having init die, and the
46 * system panicing because of this. Unregistering causes the real
47 * console to become the default console, and /dev/console can then be
48 * opened. Making this an initcall makes this happen late enough that
49 * there is no added value in dumping everything to stderr, and the
50 * normal console is good enough to show you all available output.
51 */
52static int __init unregister_stderr(void)
53{
54 unregister_console(&stderr_console);
55
56 return 0;
57}
58
59__initcall(unregister_stderr);
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index c5c9885a8297..624ca238d1fd 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -152,7 +152,7 @@ void destroy_context_skas(struct mm_struct *mm)
152 free_page(mmu->id.stack); 152 free_page(mmu->id.stack);
153 pte_lock_deinit(virt_to_page(mmu->last_page_table)); 153 pte_lock_deinit(virt_to_page(mmu->last_page_table));
154 pte_free_kernel((pte_t *) mmu->last_page_table); 154 pte_free_kernel((pte_t *) mmu->last_page_table);
155 dec_page_state(nr_page_table_pages); 155 dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
156#ifdef CONFIG_3_LEVEL_PGTABLES 156#ifdef CONFIG_3_LEVEL_PGTABLES
157 pmd_free((pmd_t *) mmu->last_pmd); 157 pmd_free((pmd_t *) mmu->last_pmd);
158#endif 158#endif
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
deleted file mode 100644
index 8fa2ae7f3026..000000000000
--- a/arch/um/kernel/time.c
+++ /dev/null
@@ -1,172 +0,0 @@
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include <stdio.h>
7#include <stdlib.h>
8#include <unistd.h>
9#include <time.h>
10#include <sys/time.h>
11#include <signal.h>
12#include <errno.h>
13#include "user_util.h"
14#include "kern_util.h"
15#include "user.h"
16#include "process.h"
17#include "time_user.h"
18#include "kern_constants.h"
19#include "os.h"
20
21/* XXX This really needs to be declared and initialized in a kernel file since
22 * it's in <linux/time.h>
23 */
24extern struct timespec wall_to_monotonic;
25
26extern struct timeval xtime;
27
28struct timeval local_offset = { 0, 0 };
29
30void timer(void)
31{
32 gettimeofday(&xtime, NULL);
33 timeradd(&xtime, &local_offset, &xtime);
34}
35
36static void set_interval(int timer_type)
37{
38 int usec = 1000000/hz();
39 struct itimerval interval = ((struct itimerval) { { 0, usec },
40 { 0, usec } });
41
42 if(setitimer(timer_type, &interval, NULL) == -1)
43 panic("setitimer failed - errno = %d\n", errno);
44}
45
46void enable_timer(void)
47{
48 set_interval(ITIMER_VIRTUAL);
49}
50
51void prepare_timer(void * ptr)
52{
53 int usec = 1000000/hz();
54 *(struct itimerval *)ptr = ((struct itimerval) { { 0, usec },
55 { 0, usec }});
56}
57
58void disable_timer(void)
59{
60 struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
61 if((setitimer(ITIMER_VIRTUAL, &disable, NULL) < 0) ||
62 (setitimer(ITIMER_REAL, &disable, NULL) < 0))
63 printk("disnable_timer - setitimer failed, errno = %d\n",
64 errno);
65 /* If there are signals already queued, after unblocking ignore them */
66 set_handler(SIGALRM, SIG_IGN, 0, -1);
67 set_handler(SIGVTALRM, SIG_IGN, 0, -1);
68}
69
70void switch_timers(int to_real)
71{
72 struct itimerval disable = ((struct itimerval) { { 0, 0 }, { 0, 0 }});
73 struct itimerval enable = ((struct itimerval) { { 0, 1000000/hz() },
74 { 0, 1000000/hz() }});
75 int old, new;
76
77 if(to_real){
78 old = ITIMER_VIRTUAL;
79 new = ITIMER_REAL;
80 }
81 else {
82 old = ITIMER_REAL;
83 new = ITIMER_VIRTUAL;
84 }
85
86 if((setitimer(old, &disable, NULL) < 0) ||
87 (setitimer(new, &enable, NULL)))
88 printk("switch_timers - setitimer failed, errno = %d\n",
89 errno);
90}
91
92void uml_idle_timer(void)
93{
94 if(signal(SIGVTALRM, SIG_IGN) == SIG_ERR)
95 panic("Couldn't unset SIGVTALRM handler");
96
97 set_handler(SIGALRM, (__sighandler_t) alarm_handler,
98 SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGVTALRM, -1);
99 set_interval(ITIMER_REAL);
100}
101
102extern void ktime_get_ts(struct timespec *ts);
103#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
104
105void time_init(void)
106{
107 struct timespec now;
108
109 if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR)
110 panic("Couldn't set SIGVTALRM handler");
111 set_interval(ITIMER_VIRTUAL);
112
113 do_posix_clock_monotonic_gettime(&now);
114 wall_to_monotonic.tv_sec = -now.tv_sec;
115 wall_to_monotonic.tv_nsec = -now.tv_nsec;
116}
117
118/* Defined in linux/ktimer.h, which can't be included here */
119#define clock_was_set() do { } while (0)
120
121void do_gettimeofday(struct timeval *tv)
122{
123 unsigned long flags;
124
125 flags = time_lock();
126 gettimeofday(tv, NULL);
127 timeradd(tv, &local_offset, tv);
128 time_unlock(flags);
129 clock_was_set();
130}
131
132int do_settimeofday(struct timespec *tv)
133{
134 struct timeval now;
135 unsigned long flags;
136 struct timeval tv_in;
137
138 if ((unsigned long) tv->tv_nsec >= UM_NSEC_PER_SEC)
139 return -EINVAL;
140
141 tv_in.tv_sec = tv->tv_sec;
142 tv_in.tv_usec = tv->tv_nsec / 1000;
143
144 flags = time_lock();
145 gettimeofday(&now, NULL);
146 timersub(&tv_in, &now, &local_offset);
147 time_unlock(flags);
148
149 return(0);
150}
151
152void idle_sleep(int secs)
153{
154 struct timespec ts;
155
156 ts.tv_sec = secs;
157 ts.tv_nsec = 0;
158 nanosleep(&ts, NULL);
159}
160
161/* XXX This partly duplicates init_irq_signals */
162
163void user_time_init(void)
164{
165 set_handler(SIGVTALRM, (__sighandler_t) alarm_handler,
166 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH,
167 SIGALRM, SIGUSR2, -1);
168 set_handler(SIGALRM, (__sighandler_t) alarm_handler,
169 SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH,
170 SIGVTALRM, SIGUSR2, -1);
171 set_interval(ITIMER_VIRTUAL);
172}
diff --git a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c
index 87cdbc560d36..820fa3615a3f 100644
--- a/arch/um/kernel/time_kern.c
+++ b/arch/um/kernel/time_kern.c
@@ -96,11 +96,15 @@ void time_init_kern(void)
96 96
97void do_boot_timer_handler(struct sigcontext * sc) 97void do_boot_timer_handler(struct sigcontext * sc)
98{ 98{
99 unsigned long flags;
99 struct pt_regs regs; 100 struct pt_regs regs;
100 101
101 CHOOSE_MODE((void) (UPT_SC(&regs.regs) = sc), 102 CHOOSE_MODE((void) (UPT_SC(&regs.regs) = sc),
102 (void) (regs.regs.skas.is_user = 0)); 103 (void) (regs.regs.skas.is_user = 0));
104
105 write_seqlock_irqsave(&xtime_lock, flags);
103 do_timer(&regs); 106 do_timer(&regs);
107 write_sequnlock_irqrestore(&xtime_lock, flags);
104} 108}
105 109
106static DEFINE_SPINLOCK(timer_spinlock); 110static DEFINE_SPINLOCK(timer_spinlock);
@@ -125,25 +129,17 @@ irqreturn_t um_timer(int irq, void *dev, struct pt_regs *regs)
125 unsigned long long nsecs; 129 unsigned long long nsecs;
126 unsigned long flags; 130 unsigned long flags;
127 131
132 write_seqlock_irqsave(&xtime_lock, flags);
133
128 do_timer(regs); 134 do_timer(regs);
129 135
130 write_seqlock_irqsave(&xtime_lock, flags);
131 nsecs = get_time() + local_offset; 136 nsecs = get_time() + local_offset;
132 xtime.tv_sec = nsecs / NSEC_PER_SEC; 137 xtime.tv_sec = nsecs / NSEC_PER_SEC;
133 xtime.tv_nsec = nsecs - xtime.tv_sec * NSEC_PER_SEC; 138 xtime.tv_nsec = nsecs - xtime.tv_sec * NSEC_PER_SEC;
134 write_sequnlock_irqrestore(&xtime_lock, flags);
135
136 return(IRQ_HANDLED);
137}
138 139
139long um_time(int __user *tloc) 140 write_sequnlock_irqrestore(&xtime_lock, flags);
140{
141 long ret = get_time() / NSEC_PER_SEC;
142
143 if((tloc != NULL) && put_user(ret, tloc))
144 return -EFAULT;
145 141
146 return ret; 142 return IRQ_HANDLED;
147} 143}
148 144
149void do_gettimeofday(struct timeval *tv) 145void do_gettimeofday(struct timeval *tv)
@@ -174,18 +170,6 @@ static inline void set_time(unsigned long long nsecs)
174 clock_was_set(); 170 clock_was_set();
175} 171}
176 172
177long um_stime(int __user *tptr)
178{
179 int value;
180
181 if (get_user(value, tptr))
182 return -EFAULT;
183
184 set_time((unsigned long long) value * NSEC_PER_SEC);
185
186 return 0;
187}
188
189int do_settimeofday(struct timespec *tv) 173int do_settimeofday(struct timespec *tv)
190{ 174{
191 set_time((unsigned long long) tv->tv_sec * NSEC_PER_SEC + tv->tv_nsec); 175 set_time((unsigned long long) tv->tv_sec * NSEC_PER_SEC + tv->tv_nsec);
diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S
index 1660a769674b..0a7d50ff9a4c 100644
--- a/arch/um/kernel/vmlinux.lds.S
+++ b/arch/um/kernel/vmlinux.lds.S
@@ -1,4 +1,6 @@
1#include <linux/config.h> 1#include <linux/config.h>
2/* in case the preprocessor is a 32bit one */
3#undef i386
2#ifdef CONFIG_LD_SCRIPT_STATIC 4#ifdef CONFIG_LD_SCRIPT_STATIC
3#include "uml.lds.S" 5#include "uml.lds.S"
4#else 6#else
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index c6432e729241..560c8063c77c 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -55,7 +55,7 @@ static void __init find_tempdir(void)
55 */ 55 */
56static int next(int fd, char *buf, int size, char c) 56static int next(int fd, char *buf, int size, char c)
57{ 57{
58 int n; 58 int n, len;
59 char *ptr; 59 char *ptr;
60 60
61 while((ptr = strchr(buf, c)) == NULL){ 61 while((ptr = strchr(buf, c)) == NULL){
@@ -69,7 +69,17 @@ static int next(int fd, char *buf, int size, char c)
69 } 69 }
70 70
71 ptr++; 71 ptr++;
72 memmove(buf, ptr, strlen(ptr) + 1); 72 len = strlen(ptr);
73 memmove(buf, ptr, len + 1);
74
75 /* Refill the buffer so that if there's a partial string that we care
76 * about, it will be completed, and we can recognize it.
77 */
78 n = read(fd, &buf[len], size - len - 1);
79 if(n < 0)
80 return -errno;
81
82 buf[len + n] = '\0';
73 return 1; 83 return 1;
74} 84}
75 85
@@ -200,8 +210,11 @@ int create_tmp_file(unsigned long long len)
200 exit(1); 210 exit(1);
201 } 211 }
202 212
203 if (lseek64(fd, len, SEEK_SET) < 0) { 213 /* Seek to len - 1 because writing a character there will
204 perror("os_seek_file"); 214 * increase the file size by one byte, to the desired length.
215 */
216 if (lseek64(fd, len - 1, SEEK_SET) < 0) {
217 perror("os_seek_file");
205 exit(1); 218 exit(1);
206 } 219 }
207 220
diff --git a/arch/um/sys-i386/sys_call_table.S b/arch/um/sys-i386/sys_call_table.S
index 1ff61474b25c..2497554b7b95 100644
--- a/arch/um/sys-i386/sys_call_table.S
+++ b/arch/um/sys-i386/sys_call_table.S
@@ -7,8 +7,6 @@
7#define sys_vm86old sys_ni_syscall 7#define sys_vm86old sys_ni_syscall
8#define sys_vm86 sys_ni_syscall 8#define sys_vm86 sys_ni_syscall
9 9
10#define sys_stime um_stime
11#define sys_time um_time
12#define old_mmap old_mmap_i386 10#define old_mmap old_mmap_i386
13 11
14#include "../../i386/kernel/syscall_table.S" 12#include "../../i386/kernel/syscall_table.S"
diff --git a/arch/um/sys-x86_64/syscall_table.c b/arch/um/sys-x86_64/syscall_table.c
index 34b2e842864f..3c4318165de0 100644
--- a/arch/um/sys-x86_64/syscall_table.c
+++ b/arch/um/sys-x86_64/syscall_table.c
@@ -20,12 +20,6 @@
20/*#define sys_set_thread_area sys_ni_syscall 20/*#define sys_set_thread_area sys_ni_syscall
21#define sys_get_thread_area sys_ni_syscall*/ 21#define sys_get_thread_area sys_ni_syscall*/
22 22
23/* For __NR_time. The x86-64 name hopefully will change from sys_time64 to
24 * sys_time (since the current situation is bogus). I've sent a patch to cleanup
25 * this. Remove below the obsoleted line. */
26#define sys_time64 um_time
27#define sys_time um_time
28
29/* On UML we call it this way ("old" means it's not mmap2) */ 23/* On UML we call it this way ("old" means it's not mmap2) */
30#define sys_mmap old_mmap 24#define sys_mmap old_mmap
31/* On x86-64 sys_uname is actually sys_newuname plus a compatibility trick. 25/* On x86-64 sys_uname is actually sys_newuname plus a compatibility trick.
diff --git a/arch/x86_64/kernel/functionlist b/arch/x86_64/kernel/functionlist
index 2bcebdc3eedb..01fa23580c85 100644
--- a/arch/x86_64/kernel/functionlist
+++ b/arch/x86_64/kernel/functionlist
@@ -384,7 +384,6 @@
384*(.text.__end_that_request_first) 384*(.text.__end_that_request_first)
385*(.text.wake_up_bit) 385*(.text.wake_up_bit)
386*(.text.unuse_mm) 386*(.text.unuse_mm)
387*(.text.skb_release_data)
388*(.text.shrink_icache_memory) 387*(.text.shrink_icache_memory)
389*(.text.sched_balance_self) 388*(.text.sched_balance_self)
390*(.text.__pmd_alloc) 389*(.text.__pmd_alloc)
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index eee03a3876a3..fb83547f563e 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3117,9 +3117,9 @@ void submit_bio(int rw, struct bio *bio)
3117 BIO_BUG_ON(!bio->bi_io_vec); 3117 BIO_BUG_ON(!bio->bi_io_vec);
3118 bio->bi_rw |= rw; 3118 bio->bi_rw |= rw;
3119 if (rw & WRITE) 3119 if (rw & WRITE)
3120 mod_page_state(pgpgout, count); 3120 count_vm_events(PGPGOUT, count);
3121 else 3121 else
3122 mod_page_state(pgpgin, count); 3122 count_vm_events(PGPGIN, count);
3123 3123
3124 if (unlikely(block_dump)) { 3124 if (unlikely(block_dump)) {
3125 char b[BDEVNAME_SIZE]; 3125 char b[BDEVNAME_SIZE];
diff --git a/drivers/acorn/block/Kconfig b/drivers/acorn/block/Kconfig
index 073add35e66f..a0ff25ea439f 100644
--- a/drivers/acorn/block/Kconfig
+++ b/drivers/acorn/block/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menu "Acorn-specific block devices" 5menu "Acorn-specific block devices"
6 depends on ARCH_ACORN 6 depends on ARCH_ARC || ARCH_A5K
7 7
8config BLK_DEV_FD1772 8config BLK_DEV_FD1772
9 tristate "Old Archimedes floppy (1772) support" 9 tristate "Old Archimedes floppy (1772) support"
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index fde9334059af..a486eb1f1640 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1018,7 +1018,7 @@ he_init_irq(struct he_dev *he_dev)
1018 return 0; 1018 return 0;
1019} 1019}
1020 1020
1021static int __init 1021static int __devinit
1022he_start(struct atm_dev *dev) 1022he_start(struct atm_dev *dev)
1023{ 1023{
1024 struct he_dev *he_dev; 1024 struct he_dev *he_dev;
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index 0aabfc2a59d9..325325afabec 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -358,7 +358,7 @@ static const struct atmphy_ops idt77105_ops = {
358}; 358};
359 359
360 360
361int idt77105_init(struct atm_dev *dev) 361int __devinit idt77105_init(struct atm_dev *dev)
362{ 362{
363 dev->phy = &idt77105_ops; 363 dev->phy = &idt77105_ops;
364 return 0; 364 return 0;
diff --git a/drivers/atm/idt77105.h b/drivers/atm/idt77105.h
index 8ba8218aaefe..3fd2bc899761 100644
--- a/drivers/atm/idt77105.h
+++ b/drivers/atm/idt77105.h
@@ -76,7 +76,7 @@
76#define IDT77105_CTRSEL_RHEC 0x01 /* W, Rx HEC Error Counter */ 76#define IDT77105_CTRSEL_RHEC 0x01 /* W, Rx HEC Error Counter */
77 77
78#ifdef __KERNEL__ 78#ifdef __KERNEL__
79int idt77105_init(struct atm_dev *dev) __init; 79int idt77105_init(struct atm_dev *dev);
80#endif 80#endif
81 81
82/* 82/*
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 2e2e50e1167a..333a7bc609d2 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2284,7 +2284,7 @@ static int reset_sar(struct atm_dev *dev)
2284} 2284}
2285 2285
2286 2286
2287static int __init ia_init(struct atm_dev *dev) 2287static int __devinit ia_init(struct atm_dev *dev)
2288{ 2288{
2289 IADEV *iadev; 2289 IADEV *iadev;
2290 unsigned long real_base; 2290 unsigned long real_base;
@@ -2480,7 +2480,7 @@ static void ia_free_rx(IADEV *iadev)
2480 iadev->rx_dle_dma); 2480 iadev->rx_dle_dma);
2481} 2481}
2482 2482
2483static int __init ia_start(struct atm_dev *dev) 2483static int __devinit ia_start(struct atm_dev *dev)
2484{ 2484{
2485 IADEV *iadev; 2485 IADEV *iadev;
2486 int error; 2486 int error;
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index b1d063cc4fbe..f04f39c00833 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -289,7 +289,7 @@ static const struct atmphy_ops suni_ops = {
289}; 289};
290 290
291 291
292int suni_init(struct atm_dev *dev) 292int __devinit suni_init(struct atm_dev *dev)
293{ 293{
294 unsigned char mri; 294 unsigned char mri;
295 295
diff --git a/drivers/base/node.c b/drivers/base/node.c
index eae2bdc183bb..d7de1753e094 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -40,24 +40,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
40 int n; 40 int n;
41 int nid = dev->id; 41 int nid = dev->id;
42 struct sysinfo i; 42 struct sysinfo i;
43 struct page_state ps;
44 unsigned long inactive; 43 unsigned long inactive;
45 unsigned long active; 44 unsigned long active;
46 unsigned long free; 45 unsigned long free;
47 46
48 si_meminfo_node(&i, nid); 47 si_meminfo_node(&i, nid);
49 get_page_state_node(&ps, nid);
50 __get_zone_counts(&active, &inactive, &free, NODE_DATA(nid)); 48 __get_zone_counts(&active, &inactive, &free, NODE_DATA(nid));
51 49
52 /* Check for negative values in these approximate counters */
53 if ((long)ps.nr_dirty < 0)
54 ps.nr_dirty = 0;
55 if ((long)ps.nr_writeback < 0)
56 ps.nr_writeback = 0;
57 if ((long)ps.nr_mapped < 0)
58 ps.nr_mapped = 0;
59 if ((long)ps.nr_slab < 0)
60 ps.nr_slab = 0;
61 50
62 n = sprintf(buf, "\n" 51 n = sprintf(buf, "\n"
63 "Node %d MemTotal: %8lu kB\n" 52 "Node %d MemTotal: %8lu kB\n"
@@ -71,7 +60,12 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
71 "Node %d LowFree: %8lu kB\n" 60 "Node %d LowFree: %8lu kB\n"
72 "Node %d Dirty: %8lu kB\n" 61 "Node %d Dirty: %8lu kB\n"
73 "Node %d Writeback: %8lu kB\n" 62 "Node %d Writeback: %8lu kB\n"
63 "Node %d FilePages: %8lu kB\n"
74 "Node %d Mapped: %8lu kB\n" 64 "Node %d Mapped: %8lu kB\n"
65 "Node %d AnonPages: %8lu kB\n"
66 "Node %d PageTables: %8lu kB\n"
67 "Node %d NFS Unstable: %8lu kB\n"
68 "Node %d Bounce: %8lu kB\n"
75 "Node %d Slab: %8lu kB\n", 69 "Node %d Slab: %8lu kB\n",
76 nid, K(i.totalram), 70 nid, K(i.totalram),
77 nid, K(i.freeram), 71 nid, K(i.freeram),
@@ -82,10 +76,15 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
82 nid, K(i.freehigh), 76 nid, K(i.freehigh),
83 nid, K(i.totalram - i.totalhigh), 77 nid, K(i.totalram - i.totalhigh),
84 nid, K(i.freeram - i.freehigh), 78 nid, K(i.freeram - i.freehigh),
85 nid, K(ps.nr_dirty), 79 nid, K(node_page_state(nid, NR_FILE_DIRTY)),
86 nid, K(ps.nr_writeback), 80 nid, K(node_page_state(nid, NR_WRITEBACK)),
87 nid, K(ps.nr_mapped), 81 nid, K(node_page_state(nid, NR_FILE_PAGES)),
88 nid, K(ps.nr_slab)); 82 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
83 nid, K(node_page_state(nid, NR_ANON_PAGES)),
84 nid, K(node_page_state(nid, NR_PAGETABLE)),
85 nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
86 nid, K(node_page_state(nid, NR_BOUNCE)),
87 nid, K(node_page_state(nid, NR_SLAB)));
89 n += hugetlb_report_node_meminfo(nid, buf + n); 88 n += hugetlb_report_node_meminfo(nid, buf + n);
90 return n; 89 return n;
91} 90}
@@ -95,28 +94,6 @@ static SYSDEV_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
95 94
96static ssize_t node_read_numastat(struct sys_device * dev, char * buf) 95static ssize_t node_read_numastat(struct sys_device * dev, char * buf)
97{ 96{
98 unsigned long numa_hit, numa_miss, interleave_hit, numa_foreign;
99 unsigned long local_node, other_node;
100 int i, cpu;
101 pg_data_t *pg = NODE_DATA(dev->id);
102 numa_hit = 0;
103 numa_miss = 0;
104 interleave_hit = 0;
105 numa_foreign = 0;
106 local_node = 0;
107 other_node = 0;
108 for (i = 0; i < MAX_NR_ZONES; i++) {
109 struct zone *z = &pg->node_zones[i];
110 for_each_online_cpu(cpu) {
111 struct per_cpu_pageset *ps = zone_pcp(z,cpu);
112 numa_hit += ps->numa_hit;
113 numa_miss += ps->numa_miss;
114 numa_foreign += ps->numa_foreign;
115 interleave_hit += ps->interleave_hit;
116 local_node += ps->local_node;
117 other_node += ps->other_node;
118 }
119 }
120 return sprintf(buf, 97 return sprintf(buf,
121 "numa_hit %lu\n" 98 "numa_hit %lu\n"
122 "numa_miss %lu\n" 99 "numa_miss %lu\n"
@@ -124,12 +101,12 @@ static ssize_t node_read_numastat(struct sys_device * dev, char * buf)
124 "interleave_hit %lu\n" 101 "interleave_hit %lu\n"
125 "local_node %lu\n" 102 "local_node %lu\n"
126 "other_node %lu\n", 103 "other_node %lu\n",
127 numa_hit, 104 node_page_state(dev->id, NUMA_HIT),
128 numa_miss, 105 node_page_state(dev->id, NUMA_MISS),
129 numa_foreign, 106 node_page_state(dev->id, NUMA_FOREIGN),
130 interleave_hit, 107 node_page_state(dev->id, NUMA_INTERLEAVE_HIT),
131 local_node, 108 node_page_state(dev->id, NUMA_LOCAL),
132 other_node); 109 node_page_state(dev->id, NUMA_OTHER));
133} 110}
134static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); 111static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
135 112
diff --git a/drivers/cdrom/cm206.c b/drivers/cdrom/cm206.c
index 4ee288688fed..9b05ddd23141 100644
--- a/drivers/cdrom/cm206.c
+++ b/drivers/cdrom/cm206.c
@@ -914,7 +914,7 @@ static void seek(int lba)
914 cd->dsb = wait_dsb(); 914 cd->dsb = wait_dsb();
915} 915}
916 916
917uch bcdbin(unsigned char bcd) 917static uch bcdbin(unsigned char bcd)
918{ /* stolen from mcd.c! */ 918{ /* stolen from mcd.c! */
919 return (bcd >> 4) * 10 + (bcd & 0xf); 919 return (bcd >> 4) * 10 + (bcd & 0xf);
920} 920}
@@ -1532,7 +1532,7 @@ static void __init parse_options(void)
1532 } 1532 }
1533} 1533}
1534 1534
1535static int __cm206_init(void) 1535static int __init __cm206_init(void)
1536{ 1536{
1537 parse_options(); 1537 parse_options();
1538#if !defined(AUTO_PROBE_MODULE) 1538#if !defined(AUTO_PROBE_MODULE)
@@ -1593,8 +1593,3 @@ __setup("cm206=", cm206_setup);
1593#endif /* !MODULE */ 1593#endif /* !MODULE */
1594MODULE_ALIAS_BLOCKDEV_MAJOR(CM206_CDROM_MAJOR); 1594MODULE_ALIAS_BLOCKDEV_MAJOR(CM206_CDROM_MAJOR);
1595 1595
1596/*
1597 * Local variables:
1598 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -m486 -DMODULE -DMODVERSIONS -include /usr/src/linux/include/linux/modversions.h -c -o cm206.o cm206.c"
1599 * End:
1600 */
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index c74e5660a9b7..18c0dcf894cd 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -282,7 +282,6 @@ static char *stli_brdnames[] = {
282 282
283/*****************************************************************************/ 283/*****************************************************************************/
284 284
285#ifdef MODULE
286/* 285/*
287 * Define some string labels for arguments passed from the module 286 * Define some string labels for arguments passed from the module
288 * load line. These allow for easy board definitions, and easy 287 * load line. These allow for easy board definitions, and easy
@@ -381,8 +380,6 @@ MODULE_PARM_DESC(board2, "Board 2 config -> name[,ioaddr[,memaddr]");
381module_param_array(board3, charp, NULL, 0); 380module_param_array(board3, charp, NULL, 0);
382MODULE_PARM_DESC(board3, "Board 3 config -> name[,ioaddr[,memaddr]"); 381MODULE_PARM_DESC(board3, "Board 3 config -> name[,ioaddr[,memaddr]");
383 382
384#endif
385
386/* 383/*
387 * Set up a default memory address table for EISA board probing. 384 * Set up a default memory address table for EISA board probing.
388 * The default addresses are all bellow 1Mbyte, which has to be the 385 * The default addresses are all bellow 1Mbyte, which has to be the
@@ -643,14 +640,8 @@ static unsigned int stli_baudrates[] = {
643 * Prototype all functions in this driver! 640 * Prototype all functions in this driver!
644 */ 641 */
645 642
646#ifdef MODULE
647static void stli_argbrds(void);
648static int stli_parsebrd(stlconf_t *confp, char **argp); 643static int stli_parsebrd(stlconf_t *confp, char **argp);
649 644static int stli_init(void);
650static unsigned long stli_atol(char *str);
651#endif
652
653int stli_init(void);
654static int stli_open(struct tty_struct *tty, struct file *filp); 645static int stli_open(struct tty_struct *tty, struct file *filp);
655static void stli_close(struct tty_struct *tty, struct file *filp); 646static void stli_close(struct tty_struct *tty, struct file *filp);
656static int stli_write(struct tty_struct *tty, const unsigned char *buf, int count); 647static int stli_write(struct tty_struct *tty, const unsigned char *buf, int count);
@@ -786,8 +777,6 @@ static int stli_timeron;
786 777
787static struct class *istallion_class; 778static struct class *istallion_class;
788 779
789#ifdef MODULE
790
791/* 780/*
792 * Loadable module initialization stuff. 781 * Loadable module initialization stuff.
793 */ 782 */
@@ -954,8 +943,6 @@ static int stli_parsebrd(stlconf_t *confp, char **argp)
954 return(1); 943 return(1);
955} 944}
956 945
957#endif
958
959/*****************************************************************************/ 946/*****************************************************************************/
960 947
961static int stli_open(struct tty_struct *tty, struct file *filp) 948static int stli_open(struct tty_struct *tty, struct file *filp)
@@ -4694,7 +4681,7 @@ static struct tty_operations stli_ops = {
4694 4681
4695/*****************************************************************************/ 4682/*****************************************************************************/
4696 4683
4697int __init stli_init(void) 4684static int __init stli_init(void)
4698{ 4685{
4699 int i; 4686 int i;
4700 printk(KERN_INFO "%s: version %s\n", stli_drvtitle, stli_drvversion); 4687 printk(KERN_INFO "%s: version %s\n", stli_drvtitle, stli_drvversion);
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
index 1c706ccfdbb3..c860de6a6fde 100644
--- a/drivers/char/pc8736x_gpio.c
+++ b/drivers/char/pc8736x_gpio.c
@@ -319,9 +319,10 @@ static int __init pc8736x_gpio_init(void)
319 return 0; 319 return 0;
320 320
321undo_platform_dev_add: 321undo_platform_dev_add:
322 platform_device_put(pdev); 322 platform_device_del(pdev);
323undo_platform_dev_alloc: 323undo_platform_dev_alloc:
324 kfree(pdev); 324 platform_device_put(pdev);
325
325 return rc; 326 return rc;
326} 327}
327 328
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 5a280a330401..45083e5dd23b 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -126,9 +126,10 @@ static int __init scx200_gpio_init(void)
126undo_chrdev_region: 126undo_chrdev_region:
127 unregister_chrdev_region(dev, num_pins); 127 unregister_chrdev_region(dev, num_pins);
128undo_platform_device_add: 128undo_platform_device_add:
129 platform_device_put(pdev); 129 platform_device_del(pdev);
130undo_malloc: 130undo_malloc:
131 kfree(pdev); 131 platform_device_put(pdev);
132
132 return rc; 133 return rc;
133} 134}
134 135
@@ -136,7 +137,6 @@ static void __exit scx200_gpio_cleanup(void)
136{ 137{
137 kfree(scx200_devices); 138 kfree(scx200_devices);
138 unregister_chrdev_region(MKDEV(major, 0), num_pins); 139 unregister_chrdev_region(MKDEV(major, 0), num_pins);
139 platform_device_put(pdev);
140 platform_device_unregister(pdev); 140 platform_device_unregister(pdev);
141 /* kfree(pdev); */ 141 /* kfree(pdev); */
142} 142}
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 53423ad6d4a3..702141c5501b 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -20,6 +20,9 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include "edac_mc.h" 21#include "edac_mc.h"
22 22
23#define AMD76X_REVISION " Ver: 2.0.0 " __DATE__
24
25
23#define amd76x_printk(level, fmt, arg...) \ 26#define amd76x_printk(level, fmt, arg...) \
24 edac_printk(level, "amd76x", fmt, ##arg) 27 edac_printk(level, "amd76x", fmt, ##arg)
25 28
@@ -102,15 +105,18 @@ static const struct amd76x_dev_info amd76x_devs[] = {
102static void amd76x_get_error_info(struct mem_ctl_info *mci, 105static void amd76x_get_error_info(struct mem_ctl_info *mci,
103 struct amd76x_error_info *info) 106 struct amd76x_error_info *info)
104{ 107{
105 pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS, 108 struct pci_dev *pdev;
109
110 pdev = to_pci_dev(mci->dev);
111 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
106 &info->ecc_mode_status); 112 &info->ecc_mode_status);
107 113
108 if (info->ecc_mode_status & BIT(8)) 114 if (info->ecc_mode_status & BIT(8))
109 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, 115 pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
110 (u32) BIT(8), (u32) BIT(8)); 116 (u32) BIT(8), (u32) BIT(8));
111 117
112 if (info->ecc_mode_status & BIT(9)) 118 if (info->ecc_mode_status & BIT(9))
113 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, 119 pci_write_bits32(pdev, AMD76X_ECC_MODE_STATUS,
114 (u32) BIT(9), (u32) BIT(9)); 120 (u32) BIT(9), (u32) BIT(9));
115} 121}
116 122
@@ -176,6 +182,38 @@ static void amd76x_check(struct mem_ctl_info *mci)
176 amd76x_process_error_info(mci, &info, 1); 182 amd76x_process_error_info(mci, &info, 1);
177} 183}
178 184
185static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
186 enum edac_type edac_mode)
187{
188 struct csrow_info *csrow;
189 u32 mba, mba_base, mba_mask, dms;
190 int index;
191
192 for (index = 0; index < mci->nr_csrows; index++) {
193 csrow = &mci->csrows[index];
194
195 /* find the DRAM Chip Select Base address and mask */
196 pci_read_config_dword(pdev,
197 AMD76X_MEM_BASE_ADDR + (index * 4),
198 &mba);
199
200 if (!(mba & BIT(0)))
201 continue;
202
203 mba_base = mba & 0xff800000UL;
204 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
205 pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
206 csrow->first_page = mba_base >> PAGE_SHIFT;
207 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
208 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
209 csrow->page_mask = mba_mask >> PAGE_SHIFT;
210 csrow->grain = csrow->nr_pages << PAGE_SHIFT;
211 csrow->mtype = MEM_RDDR;
212 csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
213 csrow->edac_mode = edac_mode;
214 }
215}
216
179/** 217/**
180 * amd76x_probe1 - Perform set up for detected device 218 * amd76x_probe1 - Perform set up for detected device
181 * @pdev; PCI device detected 219 * @pdev; PCI device detected
@@ -187,15 +225,13 @@ static void amd76x_check(struct mem_ctl_info *mci)
187 */ 225 */
188static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) 226static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
189{ 227{
190 int rc = -ENODEV; 228 static const enum edac_type ems_modes[] = {
191 int index;
192 struct mem_ctl_info *mci = NULL;
193 enum edac_type ems_modes[] = {
194 EDAC_NONE, 229 EDAC_NONE,
195 EDAC_EC, 230 EDAC_EC,
196 EDAC_SECDED, 231 EDAC_SECDED,
197 EDAC_SECDED 232 EDAC_SECDED
198 }; 233 };
234 struct mem_ctl_info *mci = NULL;
199 u32 ems; 235 u32 ems;
200 u32 ems_mode; 236 u32 ems_mode;
201 struct amd76x_error_info discard; 237 struct amd76x_error_info discard;
@@ -206,53 +242,28 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
206 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); 242 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
207 243
208 if (mci == NULL) { 244 if (mci == NULL) {
209 rc = -ENOMEM; 245 return -ENOMEM;
210 goto fail;
211 } 246 }
212 247
213 debugf0("%s(): mci = %p\n", __func__, mci); 248 debugf0("%s(): mci = %p\n", __func__, mci);
214 mci->pdev = pdev; 249 mci->dev = &pdev->dev;
215 mci->mtype_cap = MEM_FLAG_RDDR; 250 mci->mtype_cap = MEM_FLAG_RDDR;
216 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 251 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
217 mci->edac_cap = ems_mode ? 252 mci->edac_cap = ems_mode ?
218 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; 253 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
219 mci->mod_name = EDAC_MOD_STR; 254 mci->mod_name = EDAC_MOD_STR;
220 mci->mod_ver = "$Revision: 1.4.2.5 $"; 255 mci->mod_ver = AMD76X_REVISION;
221 mci->ctl_name = amd76x_devs[dev_idx].ctl_name; 256 mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
222 mci->edac_check = amd76x_check; 257 mci->edac_check = amd76x_check;
223 mci->ctl_page_to_phys = NULL; 258 mci->ctl_page_to_phys = NULL;
224 259
225 for (index = 0; index < mci->nr_csrows; index++) { 260 amd76x_init_csrows(mci, pdev, ems_modes[ems_mode]);
226 struct csrow_info *csrow = &mci->csrows[index];
227 u32 mba;
228 u32 mba_base;
229 u32 mba_mask;
230 u32 dms;
231
232 /* find the DRAM Chip Select Base address and mask */
233 pci_read_config_dword(mci->pdev,
234 AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
235
236 if (!(mba & BIT(0)))
237 continue;
238
239 mba_base = mba & 0xff800000UL;
240 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
241 pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS,
242 &dms);
243 csrow->first_page = mba_base >> PAGE_SHIFT;
244 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
245 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
246 csrow->page_mask = mba_mask >> PAGE_SHIFT;
247 csrow->grain = csrow->nr_pages << PAGE_SHIFT;
248 csrow->mtype = MEM_RDDR;
249 csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
250 csrow->edac_mode = ems_modes[ems_mode];
251 }
252
253 amd76x_get_error_info(mci, &discard); /* clear counters */ 261 amd76x_get_error_info(mci, &discard); /* clear counters */
254 262
255 if (edac_mc_add_mc(mci)) { 263 /* Here we assume that we will never see multiple instances of this
264 * type of memory controller. The ID is therefore hardcoded to 0.
265 */
266 if (edac_mc_add_mc(mci,0)) {
256 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 267 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
257 goto fail; 268 goto fail;
258 } 269 }
@@ -262,9 +273,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
262 return 0; 273 return 0;
263 274
264fail: 275fail:
265 if (mci != NULL) 276 edac_mc_free(mci);
266 edac_mc_free(mci); 277 return -ENODEV;
267 return rc;
268} 278}
269 279
270/* returns count (>= 0), or negative on error */ 280/* returns count (>= 0), or negative on error */
@@ -291,7 +301,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
291 301
292 debugf0("%s()\n", __func__); 302 debugf0("%s()\n", __func__);
293 303
294 if ((mci = edac_mc_del_mc(pdev)) == NULL) 304 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
295 return; 305 return;
296 306
297 edac_mc_free(mci); 307 edac_mc_free(mci);
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index fce31936e6d7..5351a76739e5 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -25,6 +25,8 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include "edac_mc.h" 26#include "edac_mc.h"
27 27
28#define E752X_REVISION " Ver: 2.0.0 " __DATE__
29
28static int force_function_unhide; 30static int force_function_unhide;
29 31
30#define e752x_printk(level, fmt, arg...) \ 32#define e752x_printk(level, fmt, arg...) \
@@ -763,22 +765,174 @@ static void e752x_check(struct mem_ctl_info *mci)
763 e752x_process_error_info(mci, &info, 1); 765 e752x_process_error_info(mci, &info, 1);
764} 766}
765 767
766static int e752x_probe1(struct pci_dev *pdev, int dev_idx) 768/* Return 1 if dual channel mode is active. Else return 0. */
769static inline int dual_channel_active(u16 ddrcsr)
770{
771 return (((ddrcsr >> 12) & 3) == 3);
772}
773
774static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
775 u16 ddrcsr)
776{
777 struct csrow_info *csrow;
778 unsigned long last_cumul_size;
779 int index, mem_dev, drc_chan;
780 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
781 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
782 u8 value;
783 u32 dra, drc, cumul_size;
784
785 pci_read_config_dword(pdev, E752X_DRA, &dra);
786 pci_read_config_dword(pdev, E752X_DRC, &drc);
787 drc_chan = dual_channel_active(ddrcsr);
788 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
789 drc_ddim = (drc >> 20) & 0x3;
790
791 /* The dram row boundary (DRB) reg values are boundary address for
792 * each DRAM row with a granularity of 64 or 128MB (single/dual
793 * channel operation). DRB regs are cumulative; therefore DRB7 will
794 * contain the total memory contained in all eight rows.
795 */
796 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
797 /* mem_dev 0=x8, 1=x4 */
798 mem_dev = (dra >> (index * 4 + 2)) & 0x3;
799 csrow = &mci->csrows[index];
800
801 mem_dev = (mem_dev == 2);
802 pci_read_config_byte(pdev, E752X_DRB + index, &value);
803 /* convert a 128 or 64 MiB DRB to a page size. */
804 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
805 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
806 cumul_size);
807 if (cumul_size == last_cumul_size)
808 continue; /* not populated */
809
810 csrow->first_page = last_cumul_size;
811 csrow->last_page = cumul_size - 1;
812 csrow->nr_pages = cumul_size - last_cumul_size;
813 last_cumul_size = cumul_size;
814 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
815 csrow->mtype = MEM_RDDR; /* only one type supported */
816 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
817
818 /*
819 * if single channel or x8 devices then SECDED
820 * if dual channel and x4 then S4ECD4ED
821 */
822 if (drc_ddim) {
823 if (drc_chan && mem_dev) {
824 csrow->edac_mode = EDAC_S4ECD4ED;
825 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
826 } else {
827 csrow->edac_mode = EDAC_SECDED;
828 mci->edac_cap |= EDAC_FLAG_SECDED;
829 }
830 } else
831 csrow->edac_mode = EDAC_NONE;
832 }
833}
834
835static void e752x_init_mem_map_table(struct pci_dev *pdev,
836 struct e752x_pvt *pvt)
767{ 837{
768 int rc = -ENODEV;
769 int index; 838 int index;
839 u8 value, last, row, stat8;
840
841 last = 0;
842 row = 0;
843
844 for (index = 0; index < 8; index += 2) {
845 pci_read_config_byte(pdev, E752X_DRB + index, &value);
846 /* test if there is a dimm in this slot */
847 if (value == last) {
848 /* no dimm in the slot, so flag it as empty */
849 pvt->map[index] = 0xff;
850 pvt->map[index + 1] = 0xff;
851 } else { /* there is a dimm in the slot */
852 pvt->map[index] = row;
853 row++;
854 last = value;
855 /* test the next value to see if the dimm is double
856 * sided
857 */
858 pci_read_config_byte(pdev, E752X_DRB + index + 1,
859 &value);
860 pvt->map[index + 1] = (value == last) ?
861 0xff : /* the dimm is single sided,
862 so flag as empty */
863 row; /* this is a double sided dimm
864 to save the next row # */
865 row++;
866 last = value;
867 }
868 }
869
870 /* set the map type. 1 = normal, 0 = reversed */
871 pci_read_config_byte(pdev, E752X_DRM, &stat8);
872 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
873}
874
875/* Return 0 on success or 1 on failure. */
876static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
877 struct e752x_pvt *pvt)
878{
879 struct pci_dev *dev;
880
881 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
882 pvt->dev_info->err_dev,
883 pvt->bridge_ck);
884
885 if (pvt->bridge_ck == NULL)
886 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
887 PCI_DEVFN(0, 1));
888
889 if (pvt->bridge_ck == NULL) {
890 e752x_printk(KERN_ERR, "error reporting device not found:"
891 "vendor %x device 0x%x (broken BIOS?)\n",
892 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
893 return 1;
894 }
895
896 dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
897 NULL);
898
899 if (dev == NULL)
900 goto fail;
901
902 pvt->dev_d0f0 = dev;
903 pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
904
905 return 0;
906
907fail:
908 pci_dev_put(pvt->bridge_ck);
909 return 1;
910}
911
912static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
913{
914 struct pci_dev *dev;
915
916 dev = pvt->dev_d0f1;
917 /* Turn off error disable & SMI in case the BIOS turned it on */
918 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
919 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
920 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
921 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
922 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
923 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
924 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
925 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
926}
927
928static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
929{
770 u16 pci_data; 930 u16 pci_data;
771 u8 stat8; 931 u8 stat8;
772 struct mem_ctl_info *mci = NULL; 932 struct mem_ctl_info *mci;
773 struct e752x_pvt *pvt = NULL; 933 struct e752x_pvt *pvt;
774 u16 ddrcsr; 934 u16 ddrcsr;
775 u32 drc;
776 int drc_chan; /* Number of channels 0=1chan,1=2chan */ 935 int drc_chan; /* Number of channels 0=1chan,1=2chan */
777 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
778 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
779 u32 dra;
780 unsigned long last_cumul_size;
781 struct pci_dev *dev = NULL;
782 struct e752x_error_info discard; 936 struct e752x_error_info discard;
783 937
784 debugf0("%s(): mci\n", __func__); 938 debugf0("%s(): mci\n", __func__);
@@ -792,25 +946,20 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
792 if (!force_function_unhide && !(stat8 & (1 << 5))) { 946 if (!force_function_unhide && !(stat8 & (1 << 5))) {
793 printk(KERN_INFO "Contact your BIOS vendor to see if the " 947 printk(KERN_INFO "Contact your BIOS vendor to see if the "
794 "E752x error registers can be safely un-hidden\n"); 948 "E752x error registers can be safely un-hidden\n");
795 goto fail; 949 return -ENOMEM;
796 } 950 }
797 stat8 |= (1 << 5); 951 stat8 |= (1 << 5);
798 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); 952 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
799 953
800 /* need to find out the number of channels */
801 pci_read_config_dword(pdev, E752X_DRC, &drc);
802 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr); 954 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
803 /* FIXME: should check >>12 or 0xf, true for all? */ 955 /* FIXME: should check >>12 or 0xf, true for all? */
804 /* Dual channel = 1, Single channel = 0 */ 956 /* Dual channel = 1, Single channel = 0 */
805 drc_chan = (((ddrcsr >> 12) & 3) == 3); 957 drc_chan = dual_channel_active(ddrcsr);
806 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
807 drc_ddim = (drc >> 20) & 0x3;
808 958
809 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1); 959 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1);
810 960
811 if (mci == NULL) { 961 if (mci == NULL) {
812 rc = -ENOMEM; 962 return -ENOMEM;
813 goto fail;
814 } 963 }
815 964
816 debugf3("%s(): init mci\n", __func__); 965 debugf3("%s(): init mci\n", __func__);
@@ -819,159 +968,54 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
819 EDAC_FLAG_S4ECD4ED; 968 EDAC_FLAG_S4ECD4ED;
820 /* FIXME - what if different memory types are in different csrows? */ 969 /* FIXME - what if different memory types are in different csrows? */
821 mci->mod_name = EDAC_MOD_STR; 970 mci->mod_name = EDAC_MOD_STR;
822 mci->mod_ver = "$Revision: 1.5.2.11 $"; 971 mci->mod_ver = E752X_REVISION;
823 mci->pdev = pdev; 972 mci->dev = &pdev->dev;
824 973
825 debugf3("%s(): init pvt\n", __func__); 974 debugf3("%s(): init pvt\n", __func__);
826 pvt = (struct e752x_pvt *) mci->pvt_info; 975 pvt = (struct e752x_pvt *) mci->pvt_info;
827 pvt->dev_info = &e752x_devs[dev_idx]; 976 pvt->dev_info = &e752x_devs[dev_idx];
828 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 977 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
829 pvt->dev_info->err_dev,
830 pvt->bridge_ck);
831
832 if (pvt->bridge_ck == NULL)
833 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
834 PCI_DEVFN(0, 1));
835 978
836 if (pvt->bridge_ck == NULL) { 979 if (e752x_get_devs(pdev, dev_idx, pvt)) {
837 e752x_printk(KERN_ERR, "error reporting device not found:" 980 edac_mc_free(mci);
838 "vendor %x device 0x%x (broken BIOS?)\n", 981 return -ENODEV;
839 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
840 goto fail;
841 } 982 }
842 983
843 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
844 debugf3("%s(): more mci init\n", __func__); 984 debugf3("%s(): more mci init\n", __func__);
845 mci->ctl_name = pvt->dev_info->ctl_name; 985 mci->ctl_name = pvt->dev_info->ctl_name;
846 mci->edac_check = e752x_check; 986 mci->edac_check = e752x_check;
847 mci->ctl_page_to_phys = ctl_page_to_phys; 987 mci->ctl_page_to_phys = ctl_page_to_phys;
848 988
849 /* find out the device types */ 989 e752x_init_csrows(mci, pdev, ddrcsr);
850 pci_read_config_dword(pdev, E752X_DRA, &dra); 990 e752x_init_mem_map_table(pdev, pvt);
851
852 /*
853 * The dram row boundary (DRB) reg values are boundary address for
854 * each DRAM row with a granularity of 64 or 128MB (single/dual
855 * channel operation). DRB regs are cumulative; therefore DRB7 will
856 * contain the total memory contained in all eight rows.
857 */
858 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
859 u8 value;
860 u32 cumul_size;
861
862 /* mem_dev 0=x8, 1=x4 */
863 int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
864 struct csrow_info *csrow = &mci->csrows[index];
865
866 mem_dev = (mem_dev == 2);
867 pci_read_config_byte(mci->pdev, E752X_DRB + index, &value);
868 /* convert a 128 or 64 MiB DRB to a page size. */
869 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
870 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
871 cumul_size);
872
873 if (cumul_size == last_cumul_size)
874 continue; /* not populated */
875
876 csrow->first_page = last_cumul_size;
877 csrow->last_page = cumul_size - 1;
878 csrow->nr_pages = cumul_size - last_cumul_size;
879 last_cumul_size = cumul_size;
880 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
881 csrow->mtype = MEM_RDDR; /* only one type supported */
882 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
883
884 /*
885 * if single channel or x8 devices then SECDED
886 * if dual channel and x4 then S4ECD4ED
887 */
888 if (drc_ddim) {
889 if (drc_chan && mem_dev) {
890 csrow->edac_mode = EDAC_S4ECD4ED;
891 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
892 } else {
893 csrow->edac_mode = EDAC_SECDED;
894 mci->edac_cap |= EDAC_FLAG_SECDED;
895 }
896 } else
897 csrow->edac_mode = EDAC_NONE;
898 }
899
900 /* Fill in the memory map table */
901 {
902 u8 value;
903 u8 last = 0;
904 u8 row = 0;
905
906 for (index = 0; index < 8; index += 2) {
907 pci_read_config_byte(mci->pdev, E752X_DRB + index,
908 &value);
909
910 /* test if there is a dimm in this slot */
911 if (value == last) {
912 /* no dimm in the slot, so flag it as empty */
913 pvt->map[index] = 0xff;
914 pvt->map[index + 1] = 0xff;
915 } else { /* there is a dimm in the slot */
916 pvt->map[index] = row;
917 row++;
918 last = value;
919 /* test the next value to see if the dimm is
920 double sided */
921 pci_read_config_byte(mci->pdev,
922 E752X_DRB + index + 1,
923 &value);
924 pvt->map[index + 1] = (value == last) ?
925 0xff : /* the dimm is single sided,
926 * so flag as empty
927 */
928 row; /* this is a double sided dimm
929 * to save the next row #
930 */
931 row++;
932 last = value;
933 }
934 }
935 }
936 991
937 /* set the map type. 1 = normal, 0 = reversed */ 992 /* set the map type. 1 = normal, 0 = reversed */
938 pci_read_config_byte(mci->pdev, E752X_DRM, &stat8); 993 pci_read_config_byte(pdev, E752X_DRM, &stat8);
939 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); 994 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
940 995
941 mci->edac_cap |= EDAC_FLAG_NONE; 996 mci->edac_cap |= EDAC_FLAG_NONE;
942 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 997 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
943 998
944 /* load the top of low memory, remap base, and remap limit vars */ 999 /* load the top of low memory, remap base, and remap limit vars */
945 pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data); 1000 pci_read_config_word(pdev, E752X_TOLM, &pci_data);
946 pvt->tolm = ((u32) pci_data) << 4; 1001 pvt->tolm = ((u32) pci_data) << 4;
947 pci_read_config_word(mci->pdev, E752X_REMAPBASE, &pci_data); 1002 pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
948 pvt->remapbase = ((u32) pci_data) << 14; 1003 pvt->remapbase = ((u32) pci_data) << 14;
949 pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data); 1004 pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
950 pvt->remaplimit = ((u32) pci_data) << 14; 1005 pvt->remaplimit = ((u32) pci_data) << 14;
951 e752x_printk(KERN_INFO, 1006 e752x_printk(KERN_INFO,
952 "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, 1007 "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
953 pvt->remapbase, pvt->remaplimit); 1008 pvt->remapbase, pvt->remaplimit);
954 1009
955 if (edac_mc_add_mc(mci)) { 1010 /* Here we assume that we will never see multiple instances of this
1011 * type of memory controller. The ID is therefore hardcoded to 0.
1012 */
1013 if (edac_mc_add_mc(mci,0)) {
956 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 1014 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
957 goto fail; 1015 goto fail;
958 } 1016 }
959 1017
960 dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, 1018 e752x_init_error_reporting_regs(pvt);
961 NULL);
962 pvt->dev_d0f0 = dev;
963 /* find the error reporting device and clear errors */
964 dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
965 /* Turn off error disable & SMI in case the BIOS turned it on */
966 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
967 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
968 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
969 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
970 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
971 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
972 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
973 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
974
975 e752x_get_error_info(mci, &discard); /* clear other MCH errors */ 1019 e752x_get_error_info(mci, &discard); /* clear other MCH errors */
976 1020
977 /* get this far and it's successful */ 1021 /* get this far and it's successful */
@@ -979,20 +1023,12 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
979 return 0; 1023 return 0;
980 1024
981fail: 1025fail:
982 if (mci) { 1026 pci_dev_put(pvt->dev_d0f0);
983 if (pvt->dev_d0f0) 1027 pci_dev_put(pvt->dev_d0f1);
984 pci_dev_put(pvt->dev_d0f0); 1028 pci_dev_put(pvt->bridge_ck);
985 1029 edac_mc_free(mci);
986 if (pvt->dev_d0f1)
987 pci_dev_put(pvt->dev_d0f1);
988
989 if (pvt->bridge_ck)
990 pci_dev_put(pvt->bridge_ck);
991
992 edac_mc_free(mci);
993 }
994 1030
995 return rc; 1031 return -ENODEV;
996} 1032}
997 1033
998/* returns count (>= 0), or negative on error */ 1034/* returns count (>= 0), or negative on error */
@@ -1015,7 +1051,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
1015 1051
1016 debugf0("%s()\n", __func__); 1052 debugf0("%s()\n", __func__);
1017 1053
1018 if ((mci = edac_mc_del_mc(pdev)) == NULL) 1054 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
1019 return; 1055 return;
1020 1056
1021 pvt = (struct e752x_pvt *) mci->pvt_info; 1057 pvt = (struct e752x_pvt *) mci->pvt_info;
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index a9518d3e4be4..9878379b4993 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -30,6 +30,8 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include "edac_mc.h" 31#include "edac_mc.h"
32 32
33#define E7XXX_REVISION " Ver: 2.0.0 " __DATE__
34
33#define e7xxx_printk(level, fmt, arg...) \ 35#define e7xxx_printk(level, fmt, arg...) \
34 edac_printk(level, "e7xxx", fmt, ##arg) 36 edac_printk(level, "e7xxx", fmt, ##arg)
35 37
@@ -333,99 +335,61 @@ static void e7xxx_check(struct mem_ctl_info *mci)
333 e7xxx_process_error_info(mci, &info, 1); 335 e7xxx_process_error_info(mci, &info, 1);
334} 336}
335 337
336static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) 338/* Return 1 if dual channel mode is active. Else return 0. */
339static inline int dual_channel_active(u32 drc, int dev_idx)
337{ 340{
338 int rc = -ENODEV; 341 return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1;
339 int index; 342}
340 u16 pci_data;
341 struct mem_ctl_info *mci = NULL;
342 struct e7xxx_pvt *pvt = NULL;
343 u32 drc;
344 int drc_chan = 1; /* Number of channels 0=1chan,1=2chan */
345 int drc_drbg = 1; /* DRB granularity 0=32mb,1=64mb */
346 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
347 u32 dra;
348 unsigned long last_cumul_size;
349 struct e7xxx_error_info discard;
350
351 debugf0("%s(): mci\n", __func__);
352 343
353 /* need to find out the number of channels */
354 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
355 344
345/* Return DRB granularity (0=32mb, 1=64mb). */
346static inline int drb_granularity(u32 drc, int dev_idx)
347{
356 /* only e7501 can be single channel */ 348 /* only e7501 can be single channel */
357 if (dev_idx == E7501) { 349 return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1;
358 drc_chan = ((drc >> 22) & 0x1); 350}
359 drc_drbg = (drc >> 18) & 0x3;
360 }
361
362 drc_ddim = (drc >> 20) & 0x3;
363 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
364
365 if (mci == NULL) {
366 rc = -ENOMEM;
367 goto fail;
368 }
369
370 debugf3("%s(): init mci\n", __func__);
371 mci->mtype_cap = MEM_FLAG_RDDR;
372 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
373 EDAC_FLAG_S4ECD4ED;
374 /* FIXME - what if different memory types are in different csrows? */
375 mci->mod_name = EDAC_MOD_STR;
376 mci->mod_ver = "$Revision: 1.5.2.9 $";
377 mci->pdev = pdev;
378 351
379 debugf3("%s(): init pvt\n", __func__);
380 pvt = (struct e7xxx_pvt *) mci->pvt_info;
381 pvt->dev_info = &e7xxx_devs[dev_idx];
382 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
383 pvt->dev_info->err_dev,
384 pvt->bridge_ck);
385 352
386 if (!pvt->bridge_ck) { 353static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
387 e7xxx_printk(KERN_ERR, "error reporting device not found:" 354 int dev_idx, u32 drc)
388 "vendor %x device 0x%x (broken BIOS?)\n", 355{
389 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); 356 unsigned long last_cumul_size;
390 goto fail; 357 int index;
391 } 358 u8 value;
392 359 u32 dra, cumul_size;
393 debugf3("%s(): more mci init\n", __func__); 360 int drc_chan, drc_drbg, drc_ddim, mem_dev;
394 mci->ctl_name = pvt->dev_info->ctl_name; 361 struct csrow_info *csrow;
395 mci->edac_check = e7xxx_check;
396 mci->ctl_page_to_phys = ctl_page_to_phys;
397 362
398 /* find out the device types */
399 pci_read_config_dword(pdev, E7XXX_DRA, &dra); 363 pci_read_config_dword(pdev, E7XXX_DRA, &dra);
364 drc_chan = dual_channel_active(drc, dev_idx);
365 drc_drbg = drb_granularity(drc, dev_idx);
366 drc_ddim = (drc >> 20) & 0x3;
367 last_cumul_size = 0;
400 368
401 /* 369 /* The dram row boundary (DRB) reg values are boundary address
402 * The dram row boundary (DRB) reg values are boundary address
403 * for each DRAM row with a granularity of 32 or 64MB (single/dual 370 * for each DRAM row with a granularity of 32 or 64MB (single/dual
404 * channel operation). DRB regs are cumulative; therefore DRB7 will 371 * channel operation). DRB regs are cumulative; therefore DRB7 will
405 * contain the total memory contained in all eight rows. 372 * contain the total memory contained in all eight rows.
406 */ 373 */
407 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 374 for (index = 0; index < mci->nr_csrows; index++) {
408 u8 value;
409 u32 cumul_size;
410 /* mem_dev 0=x8, 1=x4 */ 375 /* mem_dev 0=x8, 1=x4 */
411 int mem_dev = (dra >> (index * 4 + 3)) & 0x1; 376 mem_dev = (dra >> (index * 4 + 3)) & 0x1;
412 struct csrow_info *csrow = &mci->csrows[index]; 377 csrow = &mci->csrows[index];
413 378
414 pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value); 379 pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
415 /* convert a 64 or 32 MiB DRB to a page size. */ 380 /* convert a 64 or 32 MiB DRB to a page size. */
416 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 381 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
417 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 382 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
418 cumul_size); 383 cumul_size);
419
420 if (cumul_size == last_cumul_size) 384 if (cumul_size == last_cumul_size)
421 continue; /* not populated */ 385 continue; /* not populated */
422 386
423 csrow->first_page = last_cumul_size; 387 csrow->first_page = last_cumul_size;
424 csrow->last_page = cumul_size - 1; 388 csrow->last_page = cumul_size - 1;
425 csrow->nr_pages = cumul_size - last_cumul_size; 389 csrow->nr_pages = cumul_size - last_cumul_size;
426 last_cumul_size = cumul_size; 390 last_cumul_size = cumul_size;
427 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 391 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
428 csrow->mtype = MEM_RDDR; /* only one type supported */ 392 csrow->mtype = MEM_RDDR; /* only one type supported */
429 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 393 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
430 394
431 /* 395 /*
@@ -443,16 +407,61 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
443 } else 407 } else
444 csrow->edac_mode = EDAC_NONE; 408 csrow->edac_mode = EDAC_NONE;
445 } 409 }
410}
446 411
447 mci->edac_cap |= EDAC_FLAG_NONE; 412static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
413{
414 u16 pci_data;
415 struct mem_ctl_info *mci = NULL;
416 struct e7xxx_pvt *pvt = NULL;
417 u32 drc;
418 int drc_chan;
419 struct e7xxx_error_info discard;
420
421 debugf0("%s(): mci\n", __func__);
422 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
423
424 drc_chan = dual_channel_active(drc, dev_idx);
425 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
426
427 if (mci == NULL)
428 return -ENOMEM;
429
430 debugf3("%s(): init mci\n", __func__);
431 mci->mtype_cap = MEM_FLAG_RDDR;
432 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
433 EDAC_FLAG_S4ECD4ED;
434 /* FIXME - what if different memory types are in different csrows? */
435 mci->mod_name = EDAC_MOD_STR;
436 mci->mod_ver = E7XXX_REVISION;
437 mci->dev = &pdev->dev;
438 debugf3("%s(): init pvt\n", __func__);
439 pvt = (struct e7xxx_pvt *) mci->pvt_info;
440 pvt->dev_info = &e7xxx_devs[dev_idx];
441 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
442 pvt->dev_info->err_dev,
443 pvt->bridge_ck);
448 444
445 if (!pvt->bridge_ck) {
446 e7xxx_printk(KERN_ERR, "error reporting device not found:"
447 "vendor %x device 0x%x (broken BIOS?)\n",
448 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
449 goto fail0;
450 }
451
452 debugf3("%s(): more mci init\n", __func__);
453 mci->ctl_name = pvt->dev_info->ctl_name;
454 mci->edac_check = e7xxx_check;
455 mci->ctl_page_to_phys = ctl_page_to_phys;
456 e7xxx_init_csrows(mci, pdev, dev_idx, drc);
457 mci->edac_cap |= EDAC_FLAG_NONE;
449 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); 458 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
450 /* load the top of low memory, remap base, and remap limit vars */ 459 /* load the top of low memory, remap base, and remap limit vars */
451 pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data); 460 pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
452 pvt->tolm = ((u32) pci_data) << 4; 461 pvt->tolm = ((u32) pci_data) << 4;
453 pci_read_config_word(mci->pdev, E7XXX_REMAPBASE, &pci_data); 462 pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data);
454 pvt->remapbase = ((u32) pci_data) << 14; 463 pvt->remapbase = ((u32) pci_data) << 14;
455 pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data); 464 pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data);
456 pvt->remaplimit = ((u32) pci_data) << 14; 465 pvt->remaplimit = ((u32) pci_data) << 14;
457 e7xxx_printk(KERN_INFO, 466 e7xxx_printk(KERN_INFO,
458 "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, 467 "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
@@ -461,23 +470,25 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
461 /* clear any pending errors, or initial state bits */ 470 /* clear any pending errors, or initial state bits */
462 e7xxx_get_error_info(mci, &discard); 471 e7xxx_get_error_info(mci, &discard);
463 472
464 if (edac_mc_add_mc(mci) != 0) { 473 /* Here we assume that we will never see multiple instances of this
474 * type of memory controller. The ID is therefore hardcoded to 0.
475 */
476 if (edac_mc_add_mc(mci,0)) {
465 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 477 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
466 goto fail; 478 goto fail1;
467 } 479 }
468 480
469 /* get this far and it's successful */ 481 /* get this far and it's successful */
470 debugf3("%s(): success\n", __func__); 482 debugf3("%s(): success\n", __func__);
471 return 0; 483 return 0;
472 484
473fail: 485fail1:
474 if (mci != NULL) { 486 pci_dev_put(pvt->bridge_ck);
475 if(pvt != NULL && pvt->bridge_ck) 487
476 pci_dev_put(pvt->bridge_ck); 488fail0:
477 edac_mc_free(mci); 489 edac_mc_free(mci);
478 }
479 490
480 return rc; 491 return -ENODEV;
481} 492}
482 493
483/* returns count (>= 0), or negative on error */ 494/* returns count (>= 0), or negative on error */
@@ -498,7 +509,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
498 509
499 debugf0("%s()\n", __func__); 510 debugf0("%s()\n", __func__);
500 511
501 if ((mci = edac_mc_del_mc(pdev)) == NULL) 512 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
502 return; 513 return;
503 514
504 pvt = (struct e7xxx_pvt *) mci->pvt_info; 515 pvt = (struct e7xxx_pvt *) mci->pvt_info;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index ea06e3a4dc35..357c95f30fc6 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -54,16 +54,17 @@ static int log_ce = 1;
54static int panic_on_ue; 54static int panic_on_ue;
55static int poll_msec = 1000; 55static int poll_msec = 1000;
56 56
57static int check_pci_parity = 0; /* default YES check PCI parity */
58static int panic_on_pci_parity; /* default no panic on PCI Parity */
59static atomic_t pci_parity_count = ATOMIC_INIT(0);
60
61/* lock to memory controller's control array */ 57/* lock to memory controller's control array */
62static DECLARE_MUTEX(mem_ctls_mutex); 58static DECLARE_MUTEX(mem_ctls_mutex);
63static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); 59static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
64 60
65static struct task_struct *edac_thread; 61static struct task_struct *edac_thread;
66 62
63#ifdef CONFIG_PCI
64static int check_pci_parity = 0; /* default YES check PCI parity */
65static int panic_on_pci_parity; /* default no panic on PCI Parity */
66static atomic_t pci_parity_count = ATOMIC_INIT(0);
67
67/* Structure of the whitelist and blacklist arrays */ 68/* Structure of the whitelist and blacklist arrays */
68struct edac_pci_device_list { 69struct edac_pci_device_list {
69 unsigned int vendor; /* Vendor ID */ 70 unsigned int vendor; /* Vendor ID */
@@ -80,6 +81,12 @@ static int pci_blacklist_count;
80static struct edac_pci_device_list pci_whitelist[MAX_LISTED_PCI_DEVICES]; 81static struct edac_pci_device_list pci_whitelist[MAX_LISTED_PCI_DEVICES];
81static int pci_whitelist_count ; 82static int pci_whitelist_count ;
82 83
84#ifndef DISABLE_EDAC_SYSFS
85static struct kobject edac_pci_kobj; /* /sys/devices/system/edac/pci */
86static struct completion edac_pci_kobj_complete;
87#endif /* DISABLE_EDAC_SYSFS */
88#endif /* CONFIG_PCI */
89
83/* START sysfs data and methods */ 90/* START sysfs data and methods */
84 91
85#ifndef DISABLE_EDAC_SYSFS 92#ifndef DISABLE_EDAC_SYSFS
@@ -127,18 +134,15 @@ static struct sysdev_class edac_class = {
127 set_kset_name("edac"), 134 set_kset_name("edac"),
128}; 135};
129 136
130/* sysfs objects: 137/* sysfs object:
131 * /sys/devices/system/edac/mc 138 * /sys/devices/system/edac/mc
132 * /sys/devices/system/edac/pci
133 */ 139 */
134static struct kobject edac_memctrl_kobj; 140static struct kobject edac_memctrl_kobj;
135static struct kobject edac_pci_kobj;
136 141
137/* We use these to wait for the reference counts on edac_memctrl_kobj and 142/* We use these to wait for the reference counts on edac_memctrl_kobj and
138 * edac_pci_kobj to reach 0. 143 * edac_pci_kobj to reach 0.
139 */ 144 */
140static struct completion edac_memctrl_kobj_complete; 145static struct completion edac_memctrl_kobj_complete;
141static struct completion edac_pci_kobj_complete;
142 146
143/* 147/*
144 * /sys/devices/system/edac/mc; 148 * /sys/devices/system/edac/mc;
@@ -324,6 +328,8 @@ static void edac_sysfs_memctrl_teardown(void)
324#endif /* DISABLE_EDAC_SYSFS */ 328#endif /* DISABLE_EDAC_SYSFS */
325} 329}
326 330
331#ifdef CONFIG_PCI
332
327#ifndef DISABLE_EDAC_SYSFS 333#ifndef DISABLE_EDAC_SYSFS
328 334
329/* 335/*
@@ -624,6 +630,252 @@ static void edac_sysfs_pci_teardown(void)
624#endif 630#endif
625} 631}
626 632
633
634static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
635{
636 int where;
637 u16 status;
638
639 where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
640 pci_read_config_word(dev, where, &status);
641
642 /* If we get back 0xFFFF then we must suspect that the card has been
643 * pulled but the Linux PCI layer has not yet finished cleaning up.
644 * We don't want to report on such devices
645 */
646
647 if (status == 0xFFFF) {
648 u32 sanity;
649
650 pci_read_config_dword(dev, 0, &sanity);
651
652 if (sanity == 0xFFFFFFFF)
653 return 0;
654 }
655
656 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
657 PCI_STATUS_PARITY;
658
659 if (status)
660 /* reset only the bits we are interested in */
661 pci_write_config_word(dev, where, status);
662
663 return status;
664}
665
666typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
667
668/* Clear any PCI parity errors logged by this device. */
669static void edac_pci_dev_parity_clear(struct pci_dev *dev)
670{
671 u8 header_type;
672
673 get_pci_parity_status(dev, 0);
674
675 /* read the device TYPE, looking for bridges */
676 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
677
678 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
679 get_pci_parity_status(dev, 1);
680}
681
682/*
683 * PCI Parity polling
684 *
685 */
686static void edac_pci_dev_parity_test(struct pci_dev *dev)
687{
688 u16 status;
689 u8 header_type;
690
691 /* read the STATUS register on this device
692 */
693 status = get_pci_parity_status(dev, 0);
694
695 debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
696
697 /* check the status reg for errors */
698 if (status) {
699 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
700 edac_printk(KERN_CRIT, EDAC_PCI,
701 "Signaled System Error on %s\n",
702 pci_name(dev));
703
704 if (status & (PCI_STATUS_PARITY)) {
705 edac_printk(KERN_CRIT, EDAC_PCI,
706 "Master Data Parity Error on %s\n",
707 pci_name(dev));
708
709 atomic_inc(&pci_parity_count);
710 }
711
712 if (status & (PCI_STATUS_DETECTED_PARITY)) {
713 edac_printk(KERN_CRIT, EDAC_PCI,
714 "Detected Parity Error on %s\n",
715 pci_name(dev));
716
717 atomic_inc(&pci_parity_count);
718 }
719 }
720
721 /* read the device TYPE, looking for bridges */
722 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
723
724 debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
725
726 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
727 /* On bridges, need to examine secondary status register */
728 status = get_pci_parity_status(dev, 1);
729
730 debugf2("PCI SEC_STATUS= 0x%04x %s\n",
731 status, dev->dev.bus_id );
732
733 /* check the secondary status reg for errors */
734 if (status) {
735 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
736 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
737 "Signaled System Error on %s\n",
738 pci_name(dev));
739
740 if (status & (PCI_STATUS_PARITY)) {
741 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
742 "Master Data Parity Error on "
743 "%s\n", pci_name(dev));
744
745 atomic_inc(&pci_parity_count);
746 }
747
748 if (status & (PCI_STATUS_DETECTED_PARITY)) {
749 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
750 "Detected Parity Error on %s\n",
751 pci_name(dev));
752
753 atomic_inc(&pci_parity_count);
754 }
755 }
756 }
757}
758
759/*
760 * check_dev_on_list: Scan for a PCI device on a white/black list
761 * @list: an EDAC &edac_pci_device_list white/black list pointer
762 * @free_index: index of next free entry on the list
763 * @pci_dev: PCI Device pointer
764 *
765 * see if list contains the device.
766 *
767 * Returns: 0 not found
768 * 1 found on list
769 */
770static int check_dev_on_list(struct edac_pci_device_list *list,
771 int free_index, struct pci_dev *dev)
772{
773 int i;
774 int rc = 0; /* Assume not found */
775 unsigned short vendor=dev->vendor;
776 unsigned short device=dev->device;
777
778 /* Scan the list, looking for a vendor/device match */
779 for (i = 0; i < free_index; i++, list++ ) {
780 if ((list->vendor == vendor ) && (list->device == device )) {
781 rc = 1;
782 break;
783 }
784 }
785
786 return rc;
787}
788
789/*
790 * pci_dev parity list iterator
791 * Scan the PCI device list for one iteration, looking for SERRORs
792 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
793 */
794static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
795{
796 struct pci_dev *dev = NULL;
797
798 /* request for kernel access to the next PCI device, if any,
799 * and while we are looking at it have its reference count
800 * bumped until we are done with it
801 */
802 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
803 /* if whitelist exists then it has priority, so only scan
804 * those devices on the whitelist
805 */
806 if (pci_whitelist_count > 0 ) {
807 if (check_dev_on_list(pci_whitelist,
808 pci_whitelist_count, dev))
809 fn(dev);
810 } else {
811 /*
812 * if no whitelist, then check if this devices is
813 * blacklisted
814 */
815 if (!check_dev_on_list(pci_blacklist,
816 pci_blacklist_count, dev))
817 fn(dev);
818 }
819 }
820}
821
822static void do_pci_parity_check(void)
823{
824 unsigned long flags;
825 int before_count;
826
827 debugf3("%s()\n", __func__);
828
829 if (!check_pci_parity)
830 return;
831
832 before_count = atomic_read(&pci_parity_count);
833
834 /* scan all PCI devices looking for a Parity Error on devices and
835 * bridges
836 */
837 local_irq_save(flags);
838 edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
839 local_irq_restore(flags);
840
841 /* Only if operator has selected panic on PCI Error */
842 if (panic_on_pci_parity) {
843 /* If the count is different 'after' from 'before' */
844 if (before_count != atomic_read(&pci_parity_count))
845 panic("EDAC: PCI Parity Error");
846 }
847}
848
849static inline void clear_pci_parity_errors(void)
850{
851 /* Clear any PCI bus parity errors that devices initially have logged
852 * in their registers.
853 */
854 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
855}
856
857#else /* CONFIG_PCI */
858
859static inline void do_pci_parity_check(void)
860{
861 /* no-op */
862}
863
864static inline void clear_pci_parity_errors(void)
865{
866 /* no-op */
867}
868
869static void edac_sysfs_pci_teardown(void)
870{
871}
872
873static int edac_sysfs_pci_setup(void)
874{
875 return 0;
876}
877#endif /* CONFIG_PCI */
878
627#ifndef DISABLE_EDAC_SYSFS 879#ifndef DISABLE_EDAC_SYSFS
628 880
629/* EDAC sysfs CSROW data structures and methods */ 881/* EDAC sysfs CSROW data structures and methods */
@@ -1132,7 +1384,7 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1132 return err; 1384 return err;
1133 1385
1134 /* create a symlink for the device */ 1386 /* create a symlink for the device */
1135 err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj, 1387 err = sysfs_create_link(edac_mci_kobj, &mci->dev->kobj,
1136 EDAC_DEVICE_SYMLINK); 1388 EDAC_DEVICE_SYMLINK);
1137 1389
1138 if (err) 1390 if (err)
@@ -1238,7 +1490,7 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci)
1238 debugf4("\tmci->edac_check = %p\n", mci->edac_check); 1490 debugf4("\tmci->edac_check = %p\n", mci->edac_check);
1239 debugf3("\tmci->nr_csrows = %d, csrows = %p\n", 1491 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
1240 mci->nr_csrows, mci->csrows); 1492 mci->nr_csrows, mci->csrows);
1241 debugf3("\tpdev = %p\n", mci->pdev); 1493 debugf3("\tdev = %p\n", mci->dev);
1242 debugf3("\tmod_name:ctl_name = %s:%s\n", 1494 debugf3("\tmod_name:ctl_name = %s:%s\n",
1243 mci->mod_name, mci->ctl_name); 1495 mci->mod_name, mci->ctl_name);
1244 debugf3("\tpvt_info = %p\n\n", mci->pvt_info); 1496 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
@@ -1363,7 +1615,7 @@ void edac_mc_free(struct mem_ctl_info *mci)
1363} 1615}
1364EXPORT_SYMBOL_GPL(edac_mc_free); 1616EXPORT_SYMBOL_GPL(edac_mc_free);
1365 1617
1366static struct mem_ctl_info *find_mci_by_pdev(struct pci_dev *pdev) 1618static struct mem_ctl_info *find_mci_by_dev(struct device *dev)
1367{ 1619{
1368 struct mem_ctl_info *mci; 1620 struct mem_ctl_info *mci;
1369 struct list_head *item; 1621 struct list_head *item;
@@ -1373,54 +1625,53 @@ static struct mem_ctl_info *find_mci_by_pdev(struct pci_dev *pdev)
1373 list_for_each(item, &mc_devices) { 1625 list_for_each(item, &mc_devices) {
1374 mci = list_entry(item, struct mem_ctl_info, link); 1626 mci = list_entry(item, struct mem_ctl_info, link);
1375 1627
1376 if (mci->pdev == pdev) 1628 if (mci->dev == dev)
1377 return mci; 1629 return mci;
1378 } 1630 }
1379 1631
1380 return NULL; 1632 return NULL;
1381} 1633}
1382 1634
1383static int add_mc_to_global_list(struct mem_ctl_info *mci) 1635/* Return 0 on success, 1 on failure.
1636 * Before calling this function, caller must
1637 * assign a unique value to mci->mc_idx.
1638 */
1639static int add_mc_to_global_list (struct mem_ctl_info *mci)
1384{ 1640{
1385 struct list_head *item, *insert_before; 1641 struct list_head *item, *insert_before;
1386 struct mem_ctl_info *p; 1642 struct mem_ctl_info *p;
1387 int i;
1388 1643
1389 if (list_empty(&mc_devices)) { 1644 insert_before = &mc_devices;
1390 mci->mc_idx = 0;
1391 insert_before = &mc_devices;
1392 } else {
1393 if (find_mci_by_pdev(mci->pdev)) {
1394 edac_printk(KERN_WARNING, EDAC_MC,
1395 "%s (%s) %s %s already assigned %d\n",
1396 mci->pdev->dev.bus_id,
1397 pci_name(mci->pdev), mci->mod_name,
1398 mci->ctl_name, mci->mc_idx);
1399 return 1;
1400 }
1401 1645
1402 insert_before = NULL; 1646 if (unlikely((p = find_mci_by_dev(mci->dev)) != NULL))
1403 i = 0; 1647 goto fail0;
1404 1648
1405 list_for_each(item, &mc_devices) { 1649 list_for_each(item, &mc_devices) {
1406 p = list_entry(item, struct mem_ctl_info, link); 1650 p = list_entry(item, struct mem_ctl_info, link);
1407 1651
1408 if (p->mc_idx != i) { 1652 if (p->mc_idx >= mci->mc_idx) {
1409 insert_before = item; 1653 if (unlikely(p->mc_idx == mci->mc_idx))
1410 break; 1654 goto fail1;
1411 }
1412 1655
1413 i++; 1656 insert_before = item;
1657 break;
1414 } 1658 }
1415
1416 mci->mc_idx = i;
1417
1418 if (insert_before == NULL)
1419 insert_before = &mc_devices;
1420 } 1659 }
1421 1660
1422 list_add_tail_rcu(&mci->link, insert_before); 1661 list_add_tail_rcu(&mci->link, insert_before);
1423 return 0; 1662 return 0;
1663
1664fail0:
1665 edac_printk(KERN_WARNING, EDAC_MC,
1666 "%s (%s) %s %s already assigned %d\n", p->dev->bus_id,
1667 dev_name(p->dev), p->mod_name, p->ctl_name, p->mc_idx);
1668 return 1;
1669
1670fail1:
1671 edac_printk(KERN_WARNING, EDAC_MC,
1672 "bug in low-level driver: attempt to assign\n"
1673 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
1674 return 1;
1424} 1675}
1425 1676
1426static void complete_mc_list_del(struct rcu_head *head) 1677static void complete_mc_list_del(struct rcu_head *head)
@@ -1444,6 +1695,7 @@ static void del_mc_from_global_list(struct mem_ctl_info *mci)
1444 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and 1695 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
1445 * create sysfs entries associated with mci structure 1696 * create sysfs entries associated with mci structure
1446 * @mci: pointer to the mci structure to be added to the list 1697 * @mci: pointer to the mci structure to be added to the list
1698 * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
1447 * 1699 *
1448 * Return: 1700 * Return:
1449 * 0 Success 1701 * 0 Success
@@ -1451,9 +1703,10 @@ static void del_mc_from_global_list(struct mem_ctl_info *mci)
1451 */ 1703 */
1452 1704
1453/* FIXME - should a warning be printed if no error detection? correction? */ 1705/* FIXME - should a warning be printed if no error detection? correction? */
1454int edac_mc_add_mc(struct mem_ctl_info *mci) 1706int edac_mc_add_mc(struct mem_ctl_info *mci, int mc_idx)
1455{ 1707{
1456 debugf0("%s()\n", __func__); 1708 debugf0("%s()\n", __func__);
1709 mci->mc_idx = mc_idx;
1457#ifdef CONFIG_EDAC_DEBUG 1710#ifdef CONFIG_EDAC_DEBUG
1458 if (edac_debug_level >= 3) 1711 if (edac_debug_level >= 3)
1459 edac_mc_dump_mci(mci); 1712 edac_mc_dump_mci(mci);
@@ -1486,8 +1739,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
1486 } 1739 }
1487 1740
1488 /* Report action taken */ 1741 /* Report action taken */
1489 edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: PCI %s\n", 1742 edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: DEV %s\n",
1490 mci->mod_name, mci->ctl_name, pci_name(mci->pdev)); 1743 mci->mod_name, mci->ctl_name, dev_name(mci->dev));
1491 1744
1492 up(&mem_ctls_mutex); 1745 up(&mem_ctls_mutex);
1493 return 0; 1746 return 0;
@@ -1504,18 +1757,18 @@ EXPORT_SYMBOL_GPL(edac_mc_add_mc);
1504/** 1757/**
1505 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and 1758 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
1506 * remove mci structure from global list 1759 * remove mci structure from global list
1507 * @pdev: Pointer to 'struct pci_dev' representing mci structure to remove. 1760 * @pdev: Pointer to 'struct device' representing mci structure to remove.
1508 * 1761 *
1509 * Return pointer to removed mci structure, or NULL if device not found. 1762 * Return pointer to removed mci structure, or NULL if device not found.
1510 */ 1763 */
1511struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev) 1764struct mem_ctl_info * edac_mc_del_mc(struct device *dev)
1512{ 1765{
1513 struct mem_ctl_info *mci; 1766 struct mem_ctl_info *mci;
1514 1767
1515 debugf0("MC: %s()\n", __func__); 1768 debugf0("MC: %s()\n", __func__);
1516 down(&mem_ctls_mutex); 1769 down(&mem_ctls_mutex);
1517 1770
1518 if ((mci = find_mci_by_pdev(pdev)) == NULL) { 1771 if ((mci = find_mci_by_dev(dev)) == NULL) {
1519 up(&mem_ctls_mutex); 1772 up(&mem_ctls_mutex);
1520 return NULL; 1773 return NULL;
1521 } 1774 }
@@ -1524,8 +1777,8 @@ struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev)
1524 del_mc_from_global_list(mci); 1777 del_mc_from_global_list(mci);
1525 up(&mem_ctls_mutex); 1778 up(&mem_ctls_mutex);
1526 edac_printk(KERN_INFO, EDAC_MC, 1779 edac_printk(KERN_INFO, EDAC_MC,
1527 "Removed device %d for %s %s: PCI %s\n", mci->mc_idx, 1780 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
1528 mci->mod_name, mci->ctl_name, pci_name(mci->pdev)); 1781 mci->mod_name, mci->ctl_name, dev_name(mci->dev));
1529 return mci; 1782 return mci;
1530} 1783}
1531EXPORT_SYMBOL_GPL(edac_mc_del_mc); 1784EXPORT_SYMBOL_GPL(edac_mc_del_mc);
@@ -1739,244 +1992,6 @@ void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
1739} 1992}
1740EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info); 1993EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
1741 1994
1742#ifdef CONFIG_PCI
1743
1744static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
1745{
1746 int where;
1747 u16 status;
1748
1749 where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
1750 pci_read_config_word(dev, where, &status);
1751
1752 /* If we get back 0xFFFF then we must suspect that the card has been
1753 * pulled but the Linux PCI layer has not yet finished cleaning up.
1754 * We don't want to report on such devices
1755 */
1756
1757 if (status == 0xFFFF) {
1758 u32 sanity;
1759
1760 pci_read_config_dword(dev, 0, &sanity);
1761
1762 if (sanity == 0xFFFFFFFF)
1763 return 0;
1764 }
1765
1766 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
1767 PCI_STATUS_PARITY;
1768
1769 if (status)
1770 /* reset only the bits we are interested in */
1771 pci_write_config_word(dev, where, status);
1772
1773 return status;
1774}
1775
1776typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
1777
1778/* Clear any PCI parity errors logged by this device. */
1779static void edac_pci_dev_parity_clear(struct pci_dev *dev)
1780{
1781 u8 header_type;
1782
1783 get_pci_parity_status(dev, 0);
1784
1785 /* read the device TYPE, looking for bridges */
1786 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1787
1788 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
1789 get_pci_parity_status(dev, 1);
1790}
1791
1792/*
1793 * PCI Parity polling
1794 *
1795 */
1796static void edac_pci_dev_parity_test(struct pci_dev *dev)
1797{
1798 u16 status;
1799 u8 header_type;
1800
1801 /* read the STATUS register on this device
1802 */
1803 status = get_pci_parity_status(dev, 0);
1804
1805 debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
1806
1807 /* check the status reg for errors */
1808 if (status) {
1809 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1810 edac_printk(KERN_CRIT, EDAC_PCI,
1811 "Signaled System Error on %s\n",
1812 pci_name(dev));
1813
1814 if (status & (PCI_STATUS_PARITY)) {
1815 edac_printk(KERN_CRIT, EDAC_PCI,
1816 "Master Data Parity Error on %s\n",
1817 pci_name(dev));
1818
1819 atomic_inc(&pci_parity_count);
1820 }
1821
1822 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1823 edac_printk(KERN_CRIT, EDAC_PCI,
1824 "Detected Parity Error on %s\n",
1825 pci_name(dev));
1826
1827 atomic_inc(&pci_parity_count);
1828 }
1829 }
1830
1831 /* read the device TYPE, looking for bridges */
1832 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1833
1834 debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
1835
1836 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
1837 /* On bridges, need to examine secondary status register */
1838 status = get_pci_parity_status(dev, 1);
1839
1840 debugf2("PCI SEC_STATUS= 0x%04x %s\n",
1841 status, dev->dev.bus_id );
1842
1843 /* check the secondary status reg for errors */
1844 if (status) {
1845 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1846 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1847 "Signaled System Error on %s\n",
1848 pci_name(dev));
1849
1850 if (status & (PCI_STATUS_PARITY)) {
1851 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1852 "Master Data Parity Error on "
1853 "%s\n", pci_name(dev));
1854
1855 atomic_inc(&pci_parity_count);
1856 }
1857
1858 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1859 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1860 "Detected Parity Error on %s\n",
1861 pci_name(dev));
1862
1863 atomic_inc(&pci_parity_count);
1864 }
1865 }
1866 }
1867}
1868
1869/*
1870 * check_dev_on_list: Scan for a PCI device on a white/black list
1871 * @list: an EDAC &edac_pci_device_list white/black list pointer
1872 * @free_index: index of next free entry on the list
1873 * @pci_dev: PCI Device pointer
1874 *
1875 * see if list contains the device.
1876 *
1877 * Returns: 0 not found
1878 * 1 found on list
1879 */
1880static int check_dev_on_list(struct edac_pci_device_list *list,
1881 int free_index, struct pci_dev *dev)
1882{
1883 int i;
1884 int rc = 0; /* Assume not found */
1885 unsigned short vendor=dev->vendor;
1886 unsigned short device=dev->device;
1887
1888 /* Scan the list, looking for a vendor/device match */
1889 for (i = 0; i < free_index; i++, list++ ) {
1890 if ((list->vendor == vendor ) && (list->device == device )) {
1891 rc = 1;
1892 break;
1893 }
1894 }
1895
1896 return rc;
1897}
1898
1899/*
1900 * pci_dev parity list iterator
1901 * Scan the PCI device list for one iteration, looking for SERRORs
1902 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
1903 */
1904static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
1905{
1906 struct pci_dev *dev = NULL;
1907
1908 /* request for kernel access to the next PCI device, if any,
1909 * and while we are looking at it have its reference count
1910 * bumped until we are done with it
1911 */
1912 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1913 /* if whitelist exists then it has priority, so only scan
1914 * those devices on the whitelist
1915 */
1916 if (pci_whitelist_count > 0 ) {
1917 if (check_dev_on_list(pci_whitelist,
1918 pci_whitelist_count, dev))
1919 fn(dev);
1920 } else {
1921 /*
1922 * if no whitelist, then check if this devices is
1923 * blacklisted
1924 */
1925 if (!check_dev_on_list(pci_blacklist,
1926 pci_blacklist_count, dev))
1927 fn(dev);
1928 }
1929 }
1930}
1931
1932static void do_pci_parity_check(void)
1933{
1934 unsigned long flags;
1935 int before_count;
1936
1937 debugf3("%s()\n", __func__);
1938
1939 if (!check_pci_parity)
1940 return;
1941
1942 before_count = atomic_read(&pci_parity_count);
1943
1944 /* scan all PCI devices looking for a Parity Error on devices and
1945 * bridges
1946 */
1947 local_irq_save(flags);
1948 edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
1949 local_irq_restore(flags);
1950
1951 /* Only if operator has selected panic on PCI Error */
1952 if (panic_on_pci_parity) {
1953 /* If the count is different 'after' from 'before' */
1954 if (before_count != atomic_read(&pci_parity_count))
1955 panic("EDAC: PCI Parity Error");
1956 }
1957}
1958
1959static inline void clear_pci_parity_errors(void)
1960{
1961 /* Clear any PCI bus parity errors that devices initially have logged
1962 * in their registers.
1963 */
1964 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
1965}
1966
1967#else /* CONFIG_PCI */
1968
1969static inline void do_pci_parity_check(void)
1970{
1971 /* no-op */
1972}
1973
1974static inline void clear_pci_parity_errors(void)
1975{
1976 /* no-op */
1977}
1978
1979#endif /* CONFIG_PCI */
1980 1995
1981/* 1996/*
1982 * Iterate over all MC instances and check for ECC, et al, errors 1997 * Iterate over all MC instances and check for ECC, et al, errors
@@ -2096,10 +2111,12 @@ MODULE_DESCRIPTION("Core library routines for MC reporting");
2096 2111
2097module_param(panic_on_ue, int, 0644); 2112module_param(panic_on_ue, int, 0644);
2098MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on"); 2113MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
2114#ifdef CONFIG_PCI
2099module_param(check_pci_parity, int, 0644); 2115module_param(check_pci_parity, int, 0644);
2100MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on"); 2116MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
2101module_param(panic_on_pci_parity, int, 0644); 2117module_param(panic_on_pci_parity, int, 0644);
2102MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on"); 2118MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
2119#endif
2103module_param(log_ue, int, 0644); 2120module_param(log_ue, int, 0644);
2104MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on"); 2121MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
2105module_param(log_ce, int, 0644); 2122module_param(log_ce, int, 0644);
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 8d9e83909b9c..342979677d2f 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -88,6 +88,12 @@ extern int edac_debug_level;
88#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \ 88#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
89 PCI_DEVICE_ID_ ## vend ## _ ## dev 89 PCI_DEVICE_ID_ ## vend ## _ ## dev
90 90
91#if defined(CONFIG_X86) && defined(CONFIG_PCI)
92#define dev_name(dev) pci_name(to_pci_dev(dev))
93#else
94#define dev_name(dev) to_platform_device(dev)->name
95#endif
96
91/* memory devices */ 97/* memory devices */
92enum dev_type { 98enum dev_type {
93 DEV_UNKNOWN = 0, 99 DEV_UNKNOWN = 0,
@@ -327,10 +333,10 @@ struct mem_ctl_info {
327 struct csrow_info *csrows; 333 struct csrow_info *csrows;
328 /* 334 /*
329 * FIXME - what about controllers on other busses? - IDs must be 335 * FIXME - what about controllers on other busses? - IDs must be
330 * unique. pdev pointer should be sufficiently unique, but 336 * unique. dev pointer should be sufficiently unique, but
331 * BUS:SLOT.FUNC numbers may not be unique. 337 * BUS:SLOT.FUNC numbers may not be unique.
332 */ 338 */
333 struct pci_dev *pdev; 339 struct device *dev;
334 const char *mod_name; 340 const char *mod_name;
335 const char *mod_ver; 341 const char *mod_ver;
336 const char *ctl_name; 342 const char *ctl_name;
@@ -353,6 +359,8 @@ struct mem_ctl_info {
353 struct completion kobj_complete; 359 struct completion kobj_complete;
354}; 360};
355 361
362#ifdef CONFIG_PCI
363
356/* write all or some bits in a byte-register*/ 364/* write all or some bits in a byte-register*/
357static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value, 365static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
358 u8 mask) 366 u8 mask)
@@ -401,14 +409,16 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
401 pci_write_config_dword(pdev, offset, value); 409 pci_write_config_dword(pdev, offset, value);
402} 410}
403 411
412#endif /* CONFIG_PCI */
413
404#ifdef CONFIG_EDAC_DEBUG 414#ifdef CONFIG_EDAC_DEBUG
405void edac_mc_dump_channel(struct channel_info *chan); 415void edac_mc_dump_channel(struct channel_info *chan);
406void edac_mc_dump_mci(struct mem_ctl_info *mci); 416void edac_mc_dump_mci(struct mem_ctl_info *mci);
407void edac_mc_dump_csrow(struct csrow_info *csrow); 417void edac_mc_dump_csrow(struct csrow_info *csrow);
408#endif /* CONFIG_EDAC_DEBUG */ 418#endif /* CONFIG_EDAC_DEBUG */
409 419
410extern int edac_mc_add_mc(struct mem_ctl_info *mci); 420extern int edac_mc_add_mc(struct mem_ctl_info *mci,int mc_idx);
411extern struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev); 421extern struct mem_ctl_info * edac_mc_del_mc(struct device *dev);
412extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, 422extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
413 unsigned long page); 423 unsigned long page);
414extern void edac_mc_scrub_block(unsigned long page, unsigned long offset, 424extern void edac_mc_scrub_block(unsigned long page, unsigned long offset,
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index fd342163cf97..d196dcc850a8 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -17,6 +17,8 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include "edac_mc.h" 18#include "edac_mc.h"
19 19
20#define I82860_REVISION " Ver: 2.0.0 " __DATE__
21
20#define i82860_printk(level, fmt, arg...) \ 22#define i82860_printk(level, fmt, arg...) \
21 edac_printk(level, "i82860", fmt, ##arg) 23 edac_printk(level, "i82860", fmt, ##arg)
22 24
@@ -63,17 +65,21 @@ static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
63static void i82860_get_error_info(struct mem_ctl_info *mci, 65static void i82860_get_error_info(struct mem_ctl_info *mci,
64 struct i82860_error_info *info) 66 struct i82860_error_info *info)
65{ 67{
68 struct pci_dev *pdev;
69
70 pdev = to_pci_dev(mci->dev);
71
66 /* 72 /*
67 * This is a mess because there is no atomic way to read all the 73 * This is a mess because there is no atomic way to read all the
68 * registers at once and the registers can transition from CE being 74 * registers at once and the registers can transition from CE being
69 * overwritten by UE. 75 * overwritten by UE.
70 */ 76 */
71 pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts); 77 pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts);
72 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap); 78 pci_read_config_dword(pdev, I82860_EAP, &info->eap);
73 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, &info->derrsyn); 79 pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn);
74 pci_read_config_word(mci->pdev, I82860_ERRSTS, &info->errsts2); 80 pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts2);
75 81
76 pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003); 82 pci_write_bits16(pdev, I82860_ERRSTS, 0x0003, 0x0003);
77 83
78 /* 84 /*
79 * If the error is the same for both reads then the first set of reads 85 * If the error is the same for both reads then the first set of reads
@@ -84,8 +90,8 @@ static void i82860_get_error_info(struct mem_ctl_info *mci,
84 return; 90 return;
85 91
86 if ((info->errsts ^ info->errsts2) & 0x0003) { 92 if ((info->errsts ^ info->errsts2) & 0x0003) {
87 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap); 93 pci_read_config_dword(pdev, I82860_EAP, &info->eap);
88 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, 94 pci_read_config_word(pdev, I82860_DERRCTL_STS,
89 &info->derrsyn); 95 &info->derrsyn);
90 } 96 }
91} 97}
@@ -127,15 +133,50 @@ static void i82860_check(struct mem_ctl_info *mci)
127 i82860_process_error_info(mci, &info, 1); 133 i82860_process_error_info(mci, &info, 1);
128} 134}
129 135
130static int i82860_probe1(struct pci_dev *pdev, int dev_idx) 136static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
131{ 137{
132 int rc = -ENODEV;
133 int index;
134 struct mem_ctl_info *mci = NULL;
135 unsigned long last_cumul_size; 138 unsigned long last_cumul_size;
136 struct i82860_error_info discard; 139 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
140 u16 value;
141 u32 cumul_size;
142 struct csrow_info *csrow;
143 int index;
144
145 pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
146 mchcfg_ddim = mchcfg_ddim & 0x180;
147 last_cumul_size = 0;
148
149 /* The group row boundary (GRA) reg values are boundary address
150 * for each DRAM row with a granularity of 16MB. GRA regs are
151 * cumulative; therefore GRA15 will contain the total memory contained
152 * in all eight rows.
153 */
154 for (index = 0; index < mci->nr_csrows; index++) {
155 csrow = &mci->csrows[index];
156 pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
157 cumul_size = (value & I82860_GBA_MASK) <<
158 (I82860_GBA_SHIFT - PAGE_SHIFT);
159 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
160 cumul_size);
137 161
138 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 162 if (cumul_size == last_cumul_size)
163 continue; /* not populated */
164
165 csrow->first_page = last_cumul_size;
166 csrow->last_page = cumul_size - 1;
167 csrow->nr_pages = cumul_size - last_cumul_size;
168 last_cumul_size = cumul_size;
169 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
170 csrow->mtype = MEM_RMBS;
171 csrow->dtype = DEV_UNKNOWN;
172 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
173 }
174}
175
176static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
177{
178 struct mem_ctl_info *mci;
179 struct i82860_error_info discard;
139 180
140 /* RDRAM has channels but these don't map onto the abstractions that 181 /* RDRAM has channels but these don't map onto the abstractions that
141 edac uses. 182 edac uses.
@@ -151,67 +192,35 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
151 return -ENOMEM; 192 return -ENOMEM;
152 193
153 debugf3("%s(): init mci\n", __func__); 194 debugf3("%s(): init mci\n", __func__);
154 mci->pdev = pdev; 195 mci->dev = &pdev->dev;
155 mci->mtype_cap = MEM_FLAG_DDR; 196 mci->mtype_cap = MEM_FLAG_DDR;
156
157 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 197 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
158 /* I"m not sure about this but I think that all RDRAM is SECDED */ 198 /* I"m not sure about this but I think that all RDRAM is SECDED */
159 mci->edac_cap = EDAC_FLAG_SECDED; 199 mci->edac_cap = EDAC_FLAG_SECDED;
160 /* adjust FLAGS */
161
162 mci->mod_name = EDAC_MOD_STR; 200 mci->mod_name = EDAC_MOD_STR;
163 mci->mod_ver = "$Revision: 1.1.2.6 $"; 201 mci->mod_ver = I82860_REVISION;
164 mci->ctl_name = i82860_devs[dev_idx].ctl_name; 202 mci->ctl_name = i82860_devs[dev_idx].ctl_name;
165 mci->edac_check = i82860_check; 203 mci->edac_check = i82860_check;
166 mci->ctl_page_to_phys = NULL; 204 mci->ctl_page_to_phys = NULL;
205 i82860_init_csrows(mci, pdev);
206 i82860_get_error_info(mci, &discard); /* clear counters */
167 207
168 pci_read_config_word(mci->pdev, I82860_MCHCFG, &mchcfg_ddim); 208 /* Here we assume that we will never see multiple instances of this
169 mchcfg_ddim = mchcfg_ddim & 0x180; 209 * type of memory controller. The ID is therefore hardcoded to 0.
170
171 /*
172 * The group row boundary (GRA) reg values are boundary address
173 * for each DRAM row with a granularity of 16MB. GRA regs are
174 * cumulative; therefore GRA15 will contain the total memory contained
175 * in all eight rows.
176 */ 210 */
177 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 211 if (edac_mc_add_mc(mci,0)) {
178 u16 value; 212 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
179 u32 cumul_size; 213 goto fail;
180 struct csrow_info *csrow = &mci->csrows[index];
181
182 pci_read_config_word(mci->pdev, I82860_GBA + index * 2,
183 &value);
184
185 cumul_size = (value & I82860_GBA_MASK) <<
186 (I82860_GBA_SHIFT - PAGE_SHIFT);
187 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
188 cumul_size);
189
190 if (cumul_size == last_cumul_size)
191 continue; /* not populated */
192
193 csrow->first_page = last_cumul_size;
194 csrow->last_page = cumul_size - 1;
195 csrow->nr_pages = cumul_size - last_cumul_size;
196 last_cumul_size = cumul_size;
197 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
198 csrow->mtype = MEM_RMBS;
199 csrow->dtype = DEV_UNKNOWN;
200 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
201 } 214 }
202 215
203 i82860_get_error_info(mci, &discard); /* clear counters */ 216 /* get this far and it's successful */
217 debugf3("%s(): success\n", __func__);
204 218
205 if (edac_mc_add_mc(mci)) { 219 return 0;
206 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
207 edac_mc_free(mci);
208 } else {
209 /* get this far and it's successful */
210 debugf3("%s(): success\n", __func__);
211 rc = 0;
212 }
213 220
214 return rc; 221fail:
222 edac_mc_free(mci);
223 return -ENODEV;
215} 224}
216 225
217/* returns count (>= 0), or negative on error */ 226/* returns count (>= 0), or negative on error */
@@ -240,7 +249,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
240 249
241 debugf0("%s()\n", __func__); 250 debugf0("%s()\n", __func__);
242 251
243 if ((mci = edac_mc_del_mc(pdev)) == NULL) 252 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
244 return; 253 return;
245 254
246 edac_mc_free(mci); 255 edac_mc_free(mci);
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 0aec92698f17..6787403463a1 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -21,6 +21,8 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include "edac_mc.h" 22#include "edac_mc.h"
23 23
24#define I82875P_REVISION " Ver: 2.0.0 " __DATE__
25
24#define i82875p_printk(level, fmt, arg...) \ 26#define i82875p_printk(level, fmt, arg...) \
25 edac_printk(level, "i82875p", fmt, ##arg) 27 edac_printk(level, "i82875p", fmt, ##arg)
26 28
@@ -185,18 +187,22 @@ static int i82875p_registered = 1;
185static void i82875p_get_error_info(struct mem_ctl_info *mci, 187static void i82875p_get_error_info(struct mem_ctl_info *mci,
186 struct i82875p_error_info *info) 188 struct i82875p_error_info *info)
187{ 189{
190 struct pci_dev *pdev;
191
192 pdev = to_pci_dev(mci->dev);
193
188 /* 194 /*
189 * This is a mess because there is no atomic way to read all the 195 * This is a mess because there is no atomic way to read all the
190 * registers at once and the registers can transition from CE being 196 * registers at once and the registers can transition from CE being
191 * overwritten by UE. 197 * overwritten by UE.
192 */ 198 */
193 pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts); 199 pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts);
194 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap); 200 pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
195 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des); 201 pci_read_config_byte(pdev, I82875P_DES, &info->des);
196 pci_read_config_byte(mci->pdev, I82875P_DERRSYN, &info->derrsyn); 202 pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
197 pci_read_config_word(mci->pdev, I82875P_ERRSTS, &info->errsts2); 203 pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2);
198 204
199 pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081); 205 pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
200 206
201 /* 207 /*
202 * If the error is the same then we can for both reads then 208 * If the error is the same then we can for both reads then
@@ -208,9 +214,9 @@ static void i82875p_get_error_info(struct mem_ctl_info *mci,
208 return; 214 return;
209 215
210 if ((info->errsts ^ info->errsts2) & 0x0081) { 216 if ((info->errsts ^ info->errsts2) & 0x0081) {
211 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap); 217 pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
212 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des); 218 pci_read_config_byte(pdev, I82875P_DES, &info->des);
213 pci_read_config_byte(mci->pdev, I82875P_DERRSYN, 219 pci_read_config_byte(pdev, I82875P_DERRSYN,
214 &info->derrsyn); 220 &info->derrsyn);
215 } 221 }
216} 222}
@@ -259,116 +265,109 @@ static void i82875p_check(struct mem_ctl_info *mci)
259extern int pci_proc_attach_device(struct pci_dev *); 265extern int pci_proc_attach_device(struct pci_dev *);
260#endif 266#endif
261 267
262static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) 268/* Return 0 on success or 1 on failure. */
269static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
270 struct pci_dev **ovrfl_pdev, void __iomem **ovrfl_window)
263{ 271{
264 int rc = -ENODEV; 272 struct pci_dev *dev;
265 int index; 273 void __iomem *window;
266 struct mem_ctl_info *mci = NULL;
267 struct i82875p_pvt *pvt = NULL;
268 unsigned long last_cumul_size;
269 struct pci_dev *ovrfl_pdev;
270 void __iomem *ovrfl_window = NULL;
271 u32 drc;
272 u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
273 u32 nr_chans;
274 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
275 struct i82875p_error_info discard;
276 274
277 debugf0("%s()\n", __func__); 275 *ovrfl_pdev = NULL;
278 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); 276 *ovrfl_window = NULL;
277 dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
279 278
280 if (!ovrfl_pdev) { 279 if (dev == NULL) {
281 /* 280 /* Intel tells BIOS developers to hide device 6 which
282 * Intel tells BIOS developers to hide device 6 which
283 * configures the overflow device access containing 281 * configures the overflow device access containing
284 * the DRBs - this is where we expose device 6. 282 * the DRBs - this is where we expose device 6.
285 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm 283 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
286 */ 284 */
287 pci_write_bits8(pdev, 0xf4, 0x2, 0x2); 285 pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
288 ovrfl_pdev = 286 dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
289 pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
290 287
291 if (!ovrfl_pdev) 288 if (dev == NULL)
292 return -ENODEV; 289 return 1;
293 } 290 }
294 291
292 *ovrfl_pdev = dev;
293
295#ifdef CONFIG_PROC_FS 294#ifdef CONFIG_PROC_FS
296 if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { 295 if ((dev->procent == NULL) && pci_proc_attach_device(dev)) {
297 i82875p_printk(KERN_ERR, 296 i82875p_printk(KERN_ERR, "%s(): Failed to attach overflow "
298 "%s(): Failed to attach overflow device\n", __func__); 297 "device\n", __func__);
299 return -ENODEV; 298 return 1;
300 } 299 }
301#endif 300#endif /* CONFIG_PROC_FS */
302 /* CONFIG_PROC_FS */ 301 if (pci_enable_device(dev)) {
303 if (pci_enable_device(ovrfl_pdev)) { 302 i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
304 i82875p_printk(KERN_ERR, 303 "device\n", __func__);
305 "%s(): Failed to enable overflow device\n", __func__); 304 return 1;
306 return -ENODEV;
307 } 305 }
308 306
309 if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { 307 if (pci_request_regions(dev, pci_name(dev))) {
310#ifdef CORRECT_BIOS 308#ifdef CORRECT_BIOS
311 goto fail0; 309 goto fail0;
312#endif 310#endif
313 } 311 }
314 312
315 /* cache is irrelevant for PCI bus reads/writes */ 313 /* cache is irrelevant for PCI bus reads/writes */
316 ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), 314 window = ioremap_nocache(pci_resource_start(dev, 0),
317 pci_resource_len(ovrfl_pdev, 0)); 315 pci_resource_len(dev, 0));
318 316
319 if (!ovrfl_window) { 317 if (window == NULL) {
320 i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", 318 i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
321 __func__); 319 __func__);
322 goto fail1; 320 goto fail1;
323 } 321 }
324 322
325 /* need to find out the number of channels */ 323 *ovrfl_window = window;
326 drc = readl(ovrfl_window + I82875P_DRC); 324 return 0;
327 drc_chan = ((drc >> 21) & 0x1);
328 nr_chans = drc_chan + 1;
329 325
330 drc_ddim = (drc >> 18) & 0x1; 326fail1:
331 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), 327 pci_release_regions(dev);
332 nr_chans);
333 328
334 if (!mci) { 329#ifdef CORRECT_BIOS
335 rc = -ENOMEM; 330fail0:
336 goto fail2; 331 pci_disable_device(dev);
337 } 332#endif
333 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
334 return 1;
335}
338 336
339 debugf3("%s(): init mci\n", __func__);
340 mci->pdev = pdev;
341 mci->mtype_cap = MEM_FLAG_DDR;
342 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
343 mci->edac_cap = EDAC_FLAG_UNKNOWN;
344 /* adjust FLAGS */
345 337
346 mci->mod_name = EDAC_MOD_STR; 338/* Return 1 if dual channel mode is active. Else return 0. */
347 mci->mod_ver = "$Revision: 1.5.2.11 $"; 339static inline int dual_channel_active(u32 drc)
348 mci->ctl_name = i82875p_devs[dev_idx].ctl_name; 340{
349 mci->edac_check = i82875p_check; 341 return (drc >> 21) & 0x1;
350 mci->ctl_page_to_phys = NULL; 342}
351 debugf3("%s(): init pvt\n", __func__);
352 pvt = (struct i82875p_pvt *) mci->pvt_info;
353 pvt->ovrfl_pdev = ovrfl_pdev;
354 pvt->ovrfl_window = ovrfl_window;
355 343
356 /* 344
357 * The dram row boundary (DRB) reg values are boundary address 345static void i82875p_init_csrows(struct mem_ctl_info *mci,
346 struct pci_dev *pdev, void __iomem *ovrfl_window, u32 drc)
347{
348 struct csrow_info *csrow;
349 unsigned long last_cumul_size;
350 u8 value;
351 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
352 u32 cumul_size;
353 int index;
354
355 drc_ddim = (drc >> 18) & 0x1;
356 last_cumul_size = 0;
357
358 /* The dram row boundary (DRB) reg values are boundary address
358 * for each DRAM row with a granularity of 32 or 64MB (single/dual 359 * for each DRAM row with a granularity of 32 or 64MB (single/dual
359 * channel operation). DRB regs are cumulative; therefore DRB7 will 360 * channel operation). DRB regs are cumulative; therefore DRB7 will
360 * contain the total memory contained in all eight rows. 361 * contain the total memory contained in all eight rows.
361 */ 362 */
362 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 363
363 u8 value; 364 for (index = 0; index < mci->nr_csrows; index++) {
364 u32 cumul_size; 365 csrow = &mci->csrows[index];
365 struct csrow_info *csrow = &mci->csrows[index];
366 366
367 value = readb(ovrfl_window + I82875P_DRB + index); 367 value = readb(ovrfl_window + I82875P_DRB + index);
368 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); 368 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
369 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 369 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
370 cumul_size); 370 cumul_size);
371
372 if (cumul_size == last_cumul_size) 371 if (cumul_size == last_cumul_size)
373 continue; /* not populated */ 372 continue; /* not populated */
374 373
@@ -376,35 +375,75 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
376 csrow->last_page = cumul_size - 1; 375 csrow->last_page = cumul_size - 1;
377 csrow->nr_pages = cumul_size - last_cumul_size; 376 csrow->nr_pages = cumul_size - last_cumul_size;
378 last_cumul_size = cumul_size; 377 last_cumul_size = cumul_size;
379 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ 378 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
380 csrow->mtype = MEM_DDR; 379 csrow->mtype = MEM_DDR;
381 csrow->dtype = DEV_UNKNOWN; 380 csrow->dtype = DEV_UNKNOWN;
382 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; 381 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
383 } 382 }
383}
384
385static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
386{
387 int rc = -ENODEV;
388 struct mem_ctl_info *mci;
389 struct i82875p_pvt *pvt;
390 struct pci_dev *ovrfl_pdev;
391 void __iomem *ovrfl_window;
392 u32 drc;
393 u32 nr_chans;
394 struct i82875p_error_info discard;
395
396 debugf0("%s()\n", __func__);
397 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
398
399 if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
400 return -ENODEV;
401 drc = readl(ovrfl_window + I82875P_DRC);
402 nr_chans = dual_channel_active(drc) + 1;
403 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
404 nr_chans);
405
406 if (!mci) {
407 rc = -ENOMEM;
408 goto fail0;
409 }
384 410
411 debugf3("%s(): init mci\n", __func__);
412 mci->dev = &pdev->dev;
413 mci->mtype_cap = MEM_FLAG_DDR;
414 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
415 mci->edac_cap = EDAC_FLAG_UNKNOWN;
416 mci->mod_name = EDAC_MOD_STR;
417 mci->mod_ver = I82875P_REVISION;
418 mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
419 mci->edac_check = i82875p_check;
420 mci->ctl_page_to_phys = NULL;
421 debugf3("%s(): init pvt\n", __func__);
422 pvt = (struct i82875p_pvt *) mci->pvt_info;
423 pvt->ovrfl_pdev = ovrfl_pdev;
424 pvt->ovrfl_window = ovrfl_window;
425 i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
385 i82875p_get_error_info(mci, &discard); /* clear counters */ 426 i82875p_get_error_info(mci, &discard); /* clear counters */
386 427
387 if (edac_mc_add_mc(mci)) { 428 /* Here we assume that we will never see multiple instances of this
429 * type of memory controller. The ID is therefore hardcoded to 0.
430 */
431 if (edac_mc_add_mc(mci,0)) {
388 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 432 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
389 goto fail3; 433 goto fail1;
390 } 434 }
391 435
392 /* get this far and it's successful */ 436 /* get this far and it's successful */
393 debugf3("%s(): success\n", __func__); 437 debugf3("%s(): success\n", __func__);
394 return 0; 438 return 0;
395 439
396fail3: 440fail1:
397 edac_mc_free(mci); 441 edac_mc_free(mci);
398 442
399fail2: 443fail0:
400 iounmap(ovrfl_window); 444 iounmap(ovrfl_window);
401
402fail1:
403 pci_release_regions(ovrfl_pdev); 445 pci_release_regions(ovrfl_pdev);
404 446
405#ifdef CORRECT_BIOS
406fail0:
407#endif
408 pci_disable_device(ovrfl_pdev); 447 pci_disable_device(ovrfl_pdev);
409 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ 448 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
410 return rc; 449 return rc;
@@ -437,7 +476,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
437 476
438 debugf0("%s()\n", __func__); 477 debugf0("%s()\n", __func__);
439 478
440 if ((mci = edac_mc_del_mc(pdev)) == NULL) 479 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
441 return; 480 return;
442 481
443 pvt = (struct i82875p_pvt *) mci->pvt_info; 482 pvt = (struct i82875p_pvt *) mci->pvt_info;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 2c29fafe67c7..fecdb2c9ee28 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -23,6 +23,8 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include "edac_mc.h" 24#include "edac_mc.h"
25 25
26#define R82600_REVISION " Ver: 2.0.0 " __DATE__
27
26#define r82600_printk(level, fmt, arg...) \ 28#define r82600_printk(level, fmt, arg...) \
27 edac_printk(level, "r82600", fmt, ##arg) 29 edac_printk(level, "r82600", fmt, ##arg)
28 30
@@ -134,17 +136,20 @@ static unsigned int disable_hardware_scrub = 0;
134static void r82600_get_error_info (struct mem_ctl_info *mci, 136static void r82600_get_error_info (struct mem_ctl_info *mci,
135 struct r82600_error_info *info) 137 struct r82600_error_info *info)
136{ 138{
137 pci_read_config_dword(mci->pdev, R82600_EAP, &info->eapr); 139 struct pci_dev *pdev;
140
141 pdev = to_pci_dev(mci->dev);
142 pci_read_config_dword(pdev, R82600_EAP, &info->eapr);
138 143
139 if (info->eapr & BIT(0)) 144 if (info->eapr & BIT(0))
140 /* Clear error to allow next error to be reported [p.62] */ 145 /* Clear error to allow next error to be reported [p.62] */
141 pci_write_bits32(mci->pdev, R82600_EAP, 146 pci_write_bits32(pdev, R82600_EAP,
142 ((u32) BIT(0) & (u32) BIT(1)), 147 ((u32) BIT(0) & (u32) BIT(1)),
143 ((u32) BIT(0) & (u32) BIT(1))); 148 ((u32) BIT(0) & (u32) BIT(1)));
144 149
145 if (info->eapr & BIT(1)) 150 if (info->eapr & BIT(1))
146 /* Clear error to allow next error to be reported [p.62] */ 151 /* Clear error to allow next error to be reported [p.62] */
147 pci_write_bits32(mci->pdev, R82600_EAP, 152 pci_write_bits32(pdev, R82600_EAP,
148 ((u32) BIT(0) & (u32) BIT(1)), 153 ((u32) BIT(0) & (u32) BIT(1)),
149 ((u32) BIT(0) & (u32) BIT(1))); 154 ((u32) BIT(0) & (u32) BIT(1)));
150} 155}
@@ -200,25 +205,72 @@ static void r82600_check(struct mem_ctl_info *mci)
200 r82600_process_error_info(mci, &info, 1); 205 r82600_process_error_info(mci, &info, 1);
201} 206}
202 207
203static int r82600_probe1(struct pci_dev *pdev, int dev_idx) 208static inline int ecc_enabled(u8 dramcr)
209{
210 return dramcr & BIT(5);
211}
212
213static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
214 u8 dramcr)
204{ 215{
205 int rc = -ENODEV; 216 struct csrow_info *csrow;
206 int index; 217 int index;
207 struct mem_ctl_info *mci = NULL; 218 u8 drbar; /* SDRAM Row Boundry Address Register */
219 u32 row_high_limit, row_high_limit_last;
220 u32 reg_sdram, ecc_on, row_base;
221
222 ecc_on = ecc_enabled(dramcr);
223 reg_sdram = dramcr & BIT(4);
224 row_high_limit_last = 0;
225
226 for (index = 0; index < mci->nr_csrows; index++) {
227 csrow = &mci->csrows[index];
228
229 /* find the DRAM Chip Select Base address and mask */
230 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
231
232 debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar);
233
234 row_high_limit = ((u32) drbar << 24);
235/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
236
237 debugf1("%s() Row=%d, Boundry Address=%#0x, Last = %#0x\n",
238 __func__, index, row_high_limit, row_high_limit_last);
239
240 /* Empty row [p.57] */
241 if (row_high_limit == row_high_limit_last)
242 continue;
243
244 row_base = row_high_limit_last;
245
246 csrow->first_page = row_base >> PAGE_SHIFT;
247 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
248 csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
249 /* Error address is top 19 bits - so granularity is *
250 * 14 bits */
251 csrow->grain = 1 << 14;
252 csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
253 /* FIXME - check that this is unknowable with this chipset */
254 csrow->dtype = DEV_UNKNOWN;
255
256 /* Mode is global on 82600 */
257 csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
258 row_high_limit_last = row_high_limit;
259 }
260}
261
262static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
263{
264 struct mem_ctl_info *mci;
208 u8 dramcr; 265 u8 dramcr;
209 u32 ecc_on;
210 u32 reg_sdram;
211 u32 eapr; 266 u32 eapr;
212 u32 scrub_disabled; 267 u32 scrub_disabled;
213 u32 sdram_refresh_rate; 268 u32 sdram_refresh_rate;
214 u32 row_high_limit_last = 0;
215 struct r82600_error_info discard; 269 struct r82600_error_info discard;
216 270
217 debugf0("%s()\n", __func__); 271 debugf0("%s()\n", __func__);
218 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); 272 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
219 pci_read_config_dword(pdev, R82600_EAP, &eapr); 273 pci_read_config_dword(pdev, R82600_EAP, &eapr);
220 ecc_on = dramcr & BIT(5);
221 reg_sdram = dramcr & BIT(4);
222 scrub_disabled = eapr & BIT(31); 274 scrub_disabled = eapr & BIT(31);
223 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); 275 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
224 debugf2("%s(): sdram refresh rate = %#0x\n", __func__, 276 debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
@@ -226,13 +278,11 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
226 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); 278 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
227 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); 279 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
228 280
229 if (mci == NULL) { 281 if (mci == NULL)
230 rc = -ENOMEM; 282 return -ENOMEM;
231 goto fail;
232 }
233 283
234 debugf0("%s(): mci = %p\n", __func__, mci); 284 debugf0("%s(): mci = %p\n", __func__, mci);
235 mci->pdev = pdev; 285 mci->dev = &pdev->dev;
236 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 286 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
237 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 287 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
238 /* FIXME try to work out if the chip leads have been used for COM2 288 /* FIXME try to work out if the chip leads have been used for COM2
@@ -245,7 +295,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
245 * is possible. */ 295 * is possible. */
246 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 296 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
247 297
248 if (ecc_on) { 298 if (ecc_enabled(dramcr)) {
249 if (scrub_disabled) 299 if (scrub_disabled)
250 debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " 300 debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
251 "%#0x\n", __func__, mci, eapr); 301 "%#0x\n", __func__, mci, eapr);
@@ -253,53 +303,17 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
253 mci->edac_cap = EDAC_FLAG_NONE; 303 mci->edac_cap = EDAC_FLAG_NONE;
254 304
255 mci->mod_name = EDAC_MOD_STR; 305 mci->mod_name = EDAC_MOD_STR;
256 mci->mod_ver = "$Revision: 1.1.2.6 $"; 306 mci->mod_ver = R82600_REVISION;
257 mci->ctl_name = "R82600"; 307 mci->ctl_name = "R82600";
258 mci->edac_check = r82600_check; 308 mci->edac_check = r82600_check;
259 mci->ctl_page_to_phys = NULL; 309 mci->ctl_page_to_phys = NULL;
260 310 r82600_init_csrows(mci, pdev, dramcr);
261 for (index = 0; index < mci->nr_csrows; index++) {
262 struct csrow_info *csrow = &mci->csrows[index];
263 u8 drbar; /* sDram Row Boundry Address Register */
264 u32 row_high_limit;
265 u32 row_base;
266
267 /* find the DRAM Chip Select Base address and mask */
268 pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar);
269
270 debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx,
271 __func__, index, drbar);
272
273 row_high_limit = ((u32) drbar << 24);
274/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
275
276 debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = "
277 "%#0x \n", mci->mc_idx, __func__, index,
278 row_high_limit, row_high_limit_last);
279
280 /* Empty row [p.57] */
281 if (row_high_limit == row_high_limit_last)
282 continue;
283
284 row_base = row_high_limit_last;
285 csrow->first_page = row_base >> PAGE_SHIFT;
286 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
287 csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
288 /* Error address is top 19 bits - so granularity is *
289 * 14 bits */
290 csrow->grain = 1 << 14;
291 csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
292 /* FIXME - check that this is unknowable with this chipset */
293 csrow->dtype = DEV_UNKNOWN;
294
295 /* Mode is global on 82600 */
296 csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
297 row_high_limit_last = row_high_limit;
298 }
299
300 r82600_get_error_info(mci, &discard); /* clear counters */ 311 r82600_get_error_info(mci, &discard); /* clear counters */
301 312
302 if (edac_mc_add_mc(mci)) { 313 /* Here we assume that we will never see multiple instances of this
314 * type of memory controller. The ID is therefore hardcoded to 0.
315 */
316 if (edac_mc_add_mc(mci,0)) {
303 debugf3("%s(): failed edac_mc_add_mc()\n", __func__); 317 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
304 goto fail; 318 goto fail;
305 } 319 }
@@ -309,17 +323,15 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
309 if (disable_hardware_scrub) { 323 if (disable_hardware_scrub) {
310 debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n", 324 debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n",
311 __func__); 325 __func__);
312 pci_write_bits32(mci->pdev, R82600_EAP, BIT(31), BIT(31)); 326 pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
313 } 327 }
314 328
315 debugf3("%s(): success\n", __func__); 329 debugf3("%s(): success\n", __func__);
316 return 0; 330 return 0;
317 331
318fail: 332fail:
319 if (mci) 333 edac_mc_free(mci);
320 edac_mc_free(mci); 334 return -ENODEV;
321
322 return rc;
323} 335}
324 336
325/* returns count (>= 0), or negative on error */ 337/* returns count (>= 0), or negative on error */
@@ -338,7 +350,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
338 350
339 debugf0("%s()\n", __func__); 351 debugf0("%s()\n", __func__);
340 352
341 if ((mci = edac_mc_del_mc(pdev)) == NULL) 353 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
342 return; 354 return;
343 355
344 edac_mc_free(mci); 356 edac_mc_free(mci);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 8b46ef7d9ff8..7be1d0a3e8f8 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -494,8 +494,8 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
494 err = pci_request_region(dev, SMBBAR, i801_driver.name); 494 err = pci_request_region(dev, SMBBAR, i801_driver.name);
495 if (err) { 495 if (err) {
496 dev_err(&dev->dev, "Failed to request SMBus region " 496 dev_err(&dev->dev, "Failed to request SMBus region "
497 "0x%lx-0x%lx\n", i801_smba, 497 "0x%lx-0x%Lx\n", i801_smba,
498 pci_resource_end(dev, SMBBAR)); 498 (unsigned long long)pci_resource_end(dev, SMBBAR));
499 goto exit; 499 goto exit;
500 } 500 }
501 501
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 4c3f2de2a06e..b2c033edb03c 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -54,7 +54,6 @@
54#include <linux/blkdev.h> 54#include <linux/blkdev.h>
55#include <linux/init.h> 55#include <linux/init.h>
56#include <linux/ioctl.h> 56#include <linux/ioctl.h>
57#include <linux/devfs_fs_kernel.h>
58#include <linux/cdev.h> 57#include <linux/cdev.h>
59#include <linux/in.h> 58#include <linux/in.h>
60#include <linux/net.h> 59#include <linux/net.h>
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index 6d66351805a2..9cad197a4e68 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -3,11 +3,9 @@
3 3
4#include <linux/config.h> 4#include <linux/config.h>
5#include <asm/io.h> 5#include <asm/io.h>
6
7#ifdef CONFIG_PCI
8#include <asm/oplib.h> 6#include <asm/oplib.h>
9#include <asm/ebus.h> 7#include <asm/prom.h>
10#endif 8#include <asm/of_device.h>
11 9
12static int i8042_kbd_irq = -1; 10static int i8042_kbd_irq = -1;
13static int i8042_aux_irq = -1; 11static int i8042_aux_irq = -1;
@@ -48,54 +46,83 @@ static inline void i8042_write_command(int val)
48#define OBP_PS2MS_NAME1 "kdmouse" 46#define OBP_PS2MS_NAME1 "kdmouse"
49#define OBP_PS2MS_NAME2 "mouse" 47#define OBP_PS2MS_NAME2 "mouse"
50 48
49static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_device_id *match)
50{
51 struct device_node *dp = op->node;
52
53 dp = dp->child;
54 while (dp) {
55 if (!strcmp(dp->name, OBP_PS2KBD_NAME1) ||
56 !strcmp(dp->name, OBP_PS2KBD_NAME2)) {
57 struct of_device *kbd = of_find_device_by_node(dp);
58 unsigned int irq = kbd->irqs[0];
59 if (irq == 0xffffffff)
60 irq = op->irqs[0];
61 i8042_kbd_irq = irq;
62 kbd_iobase = of_ioremap(&kbd->resource[0],
63 0, 8, "kbd");
64 } else if (!strcmp(dp->name, OBP_PS2MS_NAME1) ||
65 !strcmp(dp->name, OBP_PS2MS_NAME2)) {
66 struct of_device *ms = of_find_device_by_node(dp);
67 unsigned int irq = ms->irqs[0];
68 if (irq == 0xffffffff)
69 irq = op->irqs[0];
70 i8042_aux_irq = irq;
71 }
72
73 dp = dp->sibling;
74 }
75
76 return 0;
77}
78
79static int __devexit sparc_i8042_remove(struct of_device *op)
80{
81 of_iounmap(kbd_iobase, 8);
82
83 return 0;
84}
85
86static struct of_device_id sparc_i8042_match[] = {
87 {
88 .name = "8042",
89 },
90 {},
91};
92MODULE_DEVICE_TABLE(of, i8042_match);
93
94static struct of_platform_driver sparc_i8042_driver = {
95 .name = "i8042",
96 .match_table = sparc_i8042_match,
97 .probe = sparc_i8042_probe,
98 .remove = __devexit_p(sparc_i8042_remove),
99};
100
51static int __init i8042_platform_init(void) 101static int __init i8042_platform_init(void)
52{ 102{
53#ifndef CONFIG_PCI 103#ifndef CONFIG_PCI
54 return -ENODEV; 104 return -ENODEV;
55#else 105#else
56 char prop[128]; 106 struct device_node *root = of_find_node_by_path("/");
57 int len;
58 107
59 len = prom_getproperty(prom_root_node, "name", prop, sizeof(prop)); 108 if (!strcmp(root->name, "SUNW,JavaStation-1")) {
60 if (len < 0) {
61 printk("i8042: Cannot get name property of root OBP node.\n");
62 return -ENODEV;
63 }
64 if (strncmp(prop, "SUNW,JavaStation-1", len) == 0) {
65 /* Hardcoded values for MrCoffee. */ 109 /* Hardcoded values for MrCoffee. */
66 i8042_kbd_irq = i8042_aux_irq = 13 | 0x20; 110 i8042_kbd_irq = i8042_aux_irq = 13 | 0x20;
67 kbd_iobase = ioremap(0x71300060, 8); 111 kbd_iobase = ioremap(0x71300060, 8);
68 if (!kbd_iobase) 112 if (!kbd_iobase)
69 return -ENODEV; 113 return -ENODEV;
70 } else { 114 } else {
71 struct linux_ebus *ebus; 115 int err = of_register_driver(&sparc_i8042_driver,
72 struct linux_ebus_device *edev; 116 &of_bus_type);
73 struct linux_ebus_child *child; 117 if (err)
74 118 return err;
75 for_each_ebus(ebus) { 119
76 for_each_ebusdev(edev, ebus) {
77 if (!strcmp(edev->prom_node->name, "8042"))
78 goto edev_found;
79 }
80 }
81 return -ENODEV;
82
83 edev_found:
84 for_each_edevchild(edev, child) {
85 if (!strcmp(child->prom_node->name, OBP_PS2KBD_NAME1) ||
86 !strcmp(child->prom_node->name, OBP_PS2KBD_NAME2)) {
87 i8042_kbd_irq = child->irqs[0];
88 kbd_iobase =
89 ioremap(child->resource[0].start, 8);
90 }
91 if (!strcmp(child->prom_node->name, OBP_PS2MS_NAME1) ||
92 !strcmp(child->prom_node->name, OBP_PS2MS_NAME2))
93 i8042_aux_irq = child->irqs[0];
94 }
95 if (i8042_kbd_irq == -1 || 120 if (i8042_kbd_irq == -1 ||
96 i8042_aux_irq == -1) { 121 i8042_aux_irq == -1) {
97 printk("i8042: Error, 8042 device lacks both kbd and " 122 if (kbd_iobase) {
98 "mouse nodes.\n"); 123 of_iounmap(kbd_iobase, 8);
124 kbd_iobase = (void __iomem *) NULL;
125 }
99 return -ENODEV; 126 return -ENODEV;
100 } 127 }
101 } 128 }
@@ -109,7 +136,10 @@ static int __init i8042_platform_init(void)
109static inline void i8042_platform_exit(void) 136static inline void i8042_platform_exit(void)
110{ 137{
111#ifdef CONFIG_PCI 138#ifdef CONFIG_PCI
112 iounmap(kbd_iobase); 139 struct device_node *root = of_find_node_by_path("/");
140
141 if (strcmp(root->name, "SUNW,JavaStation-1"))
142 of_unregister_driver(&sparc_i8042_driver);
113#endif 143#endif
114} 144}
115 145
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 7635736cc791..d55b0f7939a6 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -57,8 +57,8 @@
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": " 59#define PFX DRV_MODULE_NAME ": "
60#define DRV_MODULE_VERSION "1.4.42" 60#define DRV_MODULE_VERSION "1.4.43"
61#define DRV_MODULE_RELDATE "June 12, 2006" 61#define DRV_MODULE_RELDATE "June 28, 2006"
62 62
63#define RUN_AT(x) (jiffies + (x)) 63#define RUN_AT(x) (jiffies + (x))
64 64
@@ -1676,7 +1676,7 @@ bnx2_tx_int(struct bnx2 *bp)
1676 1676
1677 tx_free_bd += last + 1; 1677 tx_free_bd += last + 1;
1678 1678
1679 dev_kfree_skb_irq(skb); 1679 dev_kfree_skb(skb);
1680 1680
1681 hw_cons = bp->hw_tx_cons = 1681 hw_cons = bp->hw_tx_cons =
1682 sblk->status_tx_quick_consumer_index0; 1682 sblk->status_tx_quick_consumer_index0;
@@ -1824,7 +1824,7 @@ reuse_rx:
1824 if ((len > (bp->dev->mtu + ETH_HLEN)) && 1824 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1825 (ntohs(skb->protocol) != 0x8100)) { 1825 (ntohs(skb->protocol) != 0x8100)) {
1826 1826
1827 dev_kfree_skb_irq(skb); 1827 dev_kfree_skb(skb);
1828 goto next_rx; 1828 goto next_rx;
1829 1829
1830 } 1830 }
@@ -3643,7 +3643,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
3643 skb_shinfo(skb)->frags[j].size, 3643 skb_shinfo(skb)->frags[j].size,
3644 PCI_DMA_TODEVICE); 3644 PCI_DMA_TODEVICE);
3645 } 3645 }
3646 dev_kfree_skb_any(skb); 3646 dev_kfree_skb(skb);
3647 i += j + 1; 3647 i += j + 1;
3648 } 3648 }
3649 3649
@@ -3669,7 +3669,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
3669 3669
3670 rx_buf->skb = NULL; 3670 rx_buf->skb = NULL;
3671 3671
3672 dev_kfree_skb_any(skb); 3672 dev_kfree_skb(skb);
3673 } 3673 }
3674} 3674}
3675 3675
@@ -3999,7 +3999,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3999 udelay(5); 3999 udelay(5);
4000 4000
4001 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); 4001 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4002 dev_kfree_skb_irq(skb); 4002 dev_kfree_skb(skb);
4003 4003
4004 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) { 4004 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4005 goto loopback_test_done; 4005 goto loopback_test_done;
@@ -4541,7 +4541,7 @@ bnx2_close(struct net_device *dev)
4541 bnx2_netif_stop(bp); 4541 bnx2_netif_stop(bp);
4542 del_timer_sync(&bp->timer); 4542 del_timer_sync(&bp->timer);
4543 if (bp->flags & NO_WOL_FLAG) 4543 if (bp->flags & NO_WOL_FLAG)
4544 reset_code = BNX2_DRV_MSG_CODE_UNLOAD; 4544 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4545 else if (bp->wol) 4545 else if (bp->wol)
4546 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; 4546 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4547 else 4547 else
@@ -5128,6 +5128,16 @@ bnx2_set_rx_csum(struct net_device *dev, u32 data)
5128 return 0; 5128 return 0;
5129} 5129}
5130 5130
5131static int
5132bnx2_set_tso(struct net_device *dev, u32 data)
5133{
5134 if (data)
5135 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5136 else
5137 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5138 return 0;
5139}
5140
5131#define BNX2_NUM_STATS 46 5141#define BNX2_NUM_STATS 46
5132 5142
5133static struct { 5143static struct {
@@ -5445,7 +5455,7 @@ static struct ethtool_ops bnx2_ethtool_ops = {
5445 .set_sg = ethtool_op_set_sg, 5455 .set_sg = ethtool_op_set_sg,
5446#ifdef BCM_TSO 5456#ifdef BCM_TSO
5447 .get_tso = ethtool_op_get_tso, 5457 .get_tso = ethtool_op_get_tso,
5448 .set_tso = ethtool_op_set_tso, 5458 .set_tso = bnx2_set_tso,
5449#endif 5459#endif
5450 .self_test_count = bnx2_self_test_count, 5460 .self_test_count = bnx2_self_test_count,
5451 .self_test = bnx2_self_test, 5461 .self_test = bnx2_self_test,
@@ -5926,7 +5936,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5926 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 5936 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5927#endif 5937#endif
5928#ifdef BCM_TSO 5938#ifdef BCM_TSO
5929 dev->features |= NETIF_F_TSO; 5939 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5930#endif 5940#endif
5931 5941
5932 netif_carrier_off(bp->dev); 5942 netif_carrier_off(bp->dev);
@@ -5968,7 +5978,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5968 netif_device_detach(dev); 5978 netif_device_detach(dev);
5969 del_timer_sync(&bp->timer); 5979 del_timer_sync(&bp->timer);
5970 if (bp->flags & NO_WOL_FLAG) 5980 if (bp->flags & NO_WOL_FLAG)
5971 reset_code = BNX2_DRV_MSG_CODE_UNLOAD; 5981 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5972 else if (bp->wol) 5982 else if (bp->wol)
5973 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; 5983 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5974 else 5984 else
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5845e334941b..658c5ee95c73 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -4174,6 +4174,7 @@ struct fw_info {
4174#define BNX2_DRV_MSG_CODE_PULSE 0x06000000 4174#define BNX2_DRV_MSG_CODE_PULSE 0x06000000
4175#define BNX2_DRV_MSG_CODE_DIAG 0x07000000 4175#define BNX2_DRV_MSG_CODE_DIAG 0x07000000
4176#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000 4176#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000
4177#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000
4177 4178
4178#define BNX2_DRV_MSG_DATA 0x00ff0000 4179#define BNX2_DRV_MSG_DATA 0x00ff0000
4179#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000 4180#define BNX2_DRV_MSG_DATA_WAIT0 0x00010000
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index d2ce4896abff..e9e6d99a9add 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -350,7 +350,7 @@ config TOSHIBA_FIR
350 350
351config AU1000_FIR 351config AU1000_FIR
352 tristate "Alchemy Au1000 SIR/FIR" 352 tristate "Alchemy Au1000 SIR/FIR"
353 depends on MIPS_AU1000 && IRDA 353 depends on SOC_AU1000 && IRDA
354 354
355config SMC_IRCC_FIR 355config SMC_IRCC_FIR
356 tristate "SMSC IrCC (EXPERIMENTAL)" 356 tristate "SMSC IrCC (EXPERIMENTAL)"
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 754297fc8f22..47f6f64d604c 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -101,7 +101,7 @@ static int transceiver_type = MCS_TSC_VISHAY;
101module_param(transceiver_type, int, 0444); 101module_param(transceiver_type, int, 0444);
102MODULE_PARM_DESC(transceiver_type, "IR transceiver type, see mcs7780.h."); 102MODULE_PARM_DESC(transceiver_type, "IR transceiver type, see mcs7780.h.");
103 103
104struct usb_driver mcs_driver = { 104static struct usb_driver mcs_driver = {
105 .name = "mcs7780", 105 .name = "mcs7780",
106 .probe = mcs_probe, 106 .probe = mcs_probe,
107 .disconnect = mcs_disconnect, 107 .disconnect = mcs_disconnect,
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index c33ead3470db..9b246e44f756 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2523,7 +2523,7 @@ static struct ethtool_ops hme_ethtool_ops = {
2523static int hme_version_printed; 2523static int hme_version_printed;
2524 2524
2525#ifdef CONFIG_SBUS 2525#ifdef CONFIG_SBUS
2526void __init quattro_get_ranges(struct quattro *qp) 2526void __devinit quattro_get_ranges(struct quattro *qp)
2527{ 2527{
2528 struct sbus_dev *sdev = qp->quattro_dev; 2528 struct sbus_dev *sdev = qp->quattro_dev;
2529 int err; 2529 int err;
@@ -2539,7 +2539,7 @@ void __init quattro_get_ranges(struct quattro *qp)
2539 qp->nranges = (err / sizeof(struct linux_prom_ranges)); 2539 qp->nranges = (err / sizeof(struct linux_prom_ranges));
2540} 2540}
2541 2541
2542static void __init quattro_apply_ranges(struct quattro *qp, struct happy_meal *hp) 2542static void __devinit quattro_apply_ranges(struct quattro *qp, struct happy_meal *hp)
2543{ 2543{
2544 struct sbus_dev *sdev = hp->happy_dev; 2544 struct sbus_dev *sdev = hp->happy_dev;
2545 int rng; 2545 int rng;
@@ -2566,7 +2566,7 @@ static void __init quattro_apply_ranges(struct quattro *qp, struct happy_meal *h
2566 * 2566 *
2567 * Return NULL on failure. 2567 * Return NULL on failure.
2568 */ 2568 */
2569static struct quattro * __init quattro_sbus_find(struct sbus_dev *goal_sdev) 2569static struct quattro * __devinit quattro_sbus_find(struct sbus_dev *goal_sdev)
2570{ 2570{
2571 struct sbus_dev *sdev; 2571 struct sbus_dev *sdev;
2572 struct quattro *qp; 2572 struct quattro *qp;
@@ -2618,7 +2618,7 @@ static void __init quattro_sbus_register_irqs(void)
2618 } 2618 }
2619} 2619}
2620 2620
2621static void __devexit quattro_sbus_free_irqs(void) 2621static void quattro_sbus_free_irqs(void)
2622{ 2622{
2623 struct quattro *qp; 2623 struct quattro *qp;
2624 2624
@@ -2662,7 +2662,7 @@ static struct quattro * __init quattro_pci_find(struct pci_dev *pdev)
2662#endif /* CONFIG_PCI */ 2662#endif /* CONFIG_PCI */
2663 2663
2664#ifdef CONFIG_SBUS 2664#ifdef CONFIG_SBUS
2665static int __init happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe) 2665static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe)
2666{ 2666{
2667 struct device_node *dp = sdev->ofdev.node; 2667 struct device_node *dp = sdev->ofdev.node;
2668 struct quattro *qp = NULL; 2668 struct quattro *qp = NULL;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 298f2ddb2c17..d7024c7483bd 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -411,16 +411,17 @@ static __inline__ int led_get_net_activity(void)
411static __inline__ int led_get_diskio_activity(void) 411static __inline__ int led_get_diskio_activity(void)
412{ 412{
413 static unsigned long last_pgpgin, last_pgpgout; 413 static unsigned long last_pgpgin, last_pgpgout;
414 struct page_state pgstat; 414 unsigned long events[NR_VM_EVENT_ITEMS];
415 int changed; 415 int changed;
416 416
417 get_full_page_state(&pgstat); /* get no of sectors in & out */ 417 all_vm_events(events);
418 418
419 /* Just use a very simple calculation here. Do not care about overflow, 419 /* Just use a very simple calculation here. Do not care about overflow,
420 since we only want to know if there was activity or not. */ 420 since we only want to know if there was activity or not. */
421 changed = (pgstat.pgpgin != last_pgpgin) || (pgstat.pgpgout != last_pgpgout); 421 changed = (events[PGPGIN] != last_pgpgin) ||
422 last_pgpgin = pgstat.pgpgin; 422 (events[PGPGOUT] != last_pgpgout);
423 last_pgpgout = pgstat.pgpgout; 423 last_pgpgin = events[PGPGIN];
424 last_pgpgout = events[PGPGOUT];
424 425
425 return (changed ? LED_DISK_IO : 0); 426 return (changed ? LED_DISK_IO : 0);
426} 427}
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 7da02d11c364..141fedbefbc4 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -1,7 +1,7 @@
1/* sunsab.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC. 1/* sunsab.c: ASYNC Driver for the SIEMENS SAB82532 DUSCC.
2 * 2 *
3 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) 3 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
4 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2002, 2006 David S. Miller (davem@davemloft.net)
5 * 5 *
6 * Rewrote buffer handling to use CIRC(Circular Buffer) macros. 6 * Rewrote buffer handling to use CIRC(Circular Buffer) macros.
7 * Maxim Krasnyanskiy <maxk@qualcomm.com> 7 * Maxim Krasnyanskiy <maxk@qualcomm.com>
@@ -12,7 +12,7 @@
12 * Theodore Ts'o <tytso@mit.edu>, 2001-Oct-12 12 * Theodore Ts'o <tytso@mit.edu>, 2001-Oct-12
13 * 13 *
14 * Ported to new 2.5.x UART layer. 14 * Ported to new 2.5.x UART layer.
15 * David S. Miller <davem@redhat.com> 15 * David S. Miller <davem@davemloft.net>
16 */ 16 */
17 17
18#include <linux/config.h> 18#include <linux/config.h>
@@ -37,8 +37,8 @@
37 37
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/oplib.h> 40#include <asm/prom.h>
41#include <asm/ebus.h> 41#include <asm/of_device.h>
42 42
43#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 43#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
44#define SUPPORT_SYSRQ 44#define SUPPORT_SYSRQ
@@ -976,199 +976,188 @@ static inline struct console *SUNSAB_CONSOLE(void)
976#define sunsab_console_init() do { } while (0) 976#define sunsab_console_init() do { } while (0)
977#endif 977#endif
978 978
979static void __init for_each_sab_edev(void (*callback)(struct linux_ebus_device *, void *), void *arg) 979static int __devinit sunsab_init_one(struct uart_sunsab_port *up,
980 struct of_device *op,
981 unsigned long offset,
982 int line)
980{ 983{
981 struct linux_ebus *ebus; 984 up->port.line = line;
982 struct linux_ebus_device *edev = NULL; 985 up->port.dev = &op->dev;
983 986
984 for_each_ebus(ebus) { 987 up->port.mapbase = op->resource[0].start + offset;
985 for_each_ebusdev(edev, ebus) { 988 up->port.membase = of_ioremap(&op->resource[0], offset,
986 if (!strcmp(edev->prom_node->name, "se")) { 989 sizeof(union sab82532_async_regs),
987 callback(edev, arg); 990 "sab");
988 continue; 991 if (!up->port.membase)
989 } else if (!strcmp(edev->prom_node->name, "serial")) { 992 return -ENOMEM;
990 char *compat; 993 up->regs = (union sab82532_async_regs __iomem *) up->port.membase;
991 int clen;
992
993 /* On RIO this can be an SE, check it. We could
994 * just check ebus->is_rio, but this is more portable.
995 */
996 compat = of_get_property(edev->prom_node,
997 "compatible", &clen);
998 if (compat && clen > 0) {
999 if (strncmp(compat, "sab82532", 8) == 0) {
1000 callback(edev, arg);
1001 continue;
1002 }
1003 }
1004 }
1005 }
1006 }
1007}
1008 994
1009static void __init sab_count_callback(struct linux_ebus_device *edev, void *arg) 995 up->port.irq = op->irqs[0];
1010{
1011 int *count_p = arg;
1012 996
1013 (*count_p)++; 997 up->port.fifosize = SAB82532_XMIT_FIFO_SIZE;
1014} 998 up->port.iotype = UPIO_MEM;
1015 999
1016static void __init sab_attach_callback(struct linux_ebus_device *edev, void *arg) 1000 writeb(SAB82532_IPC_IC_ACT_LOW, &up->regs->w.ipc);
1017{
1018 int *instance_p = arg;
1019 struct uart_sunsab_port *up;
1020 unsigned long regs, offset;
1021 int i;
1022 1001
1023 /* Note: ports are located in reverse order */ 1002 up->port.ops = &sunsab_pops;
1024 regs = edev->resource[0].start; 1003 up->port.type = PORT_SUNSAB;
1025 offset = sizeof(union sab82532_async_regs); 1004 up->port.uartclk = SAB_BASE_BAUD;
1026 for (i = 0; i < 2; i++) {
1027 up = &sunsab_ports[(*instance_p * 2) + 1 - i];
1028 1005
1029 memset(up, 0, sizeof(*up)); 1006 up->type = readb(&up->regs->r.vstr) & 0x0f;
1030 up->regs = ioremap(regs + offset, sizeof(union sab82532_async_regs)); 1007 writeb(~((1 << 1) | (1 << 2) | (1 << 4)), &up->regs->w.pcr);
1031 up->port.irq = edev->irqs[0]; 1008 writeb(0xff, &up->regs->w.pim);
1032 up->port.fifosize = SAB82532_XMIT_FIFO_SIZE; 1009 if ((up->port.line & 0x1) == 0) {
1033 up->port.mapbase = (unsigned long)up->regs; 1010 up->pvr_dsr_bit = (1 << 0);
1034 up->port.iotype = UPIO_MEM; 1011 up->pvr_dtr_bit = (1 << 1);
1012 } else {
1013 up->pvr_dsr_bit = (1 << 3);
1014 up->pvr_dtr_bit = (1 << 2);
1015 }
1016 up->cached_pvr = (1 << 1) | (1 << 2) | (1 << 4);
1017 writeb(up->cached_pvr, &up->regs->w.pvr);
1018 up->cached_mode = readb(&up->regs->rw.mode);
1019 up->cached_mode |= SAB82532_MODE_FRTS;
1020 writeb(up->cached_mode, &up->regs->rw.mode);
1021 up->cached_mode |= SAB82532_MODE_RTS;
1022 writeb(up->cached_mode, &up->regs->rw.mode);
1035 1023
1036 writeb(SAB82532_IPC_IC_ACT_LOW, &up->regs->w.ipc); 1024 up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT;
1025 up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT;
1037 1026
1038 offset -= sizeof(union sab82532_async_regs); 1027 if (!(up->port.line & 0x01)) {
1028 int err;
1029
1030 err = request_irq(up->port.irq, sunsab_interrupt,
1031 SA_SHIRQ, "sab", up);
1032 if (err) {
1033 of_iounmap(up->port.membase,
1034 sizeof(union sab82532_async_regs));
1035 return err;
1036 }
1039 } 1037 }
1040 1038
1041 (*instance_p)++; 1039 return 0;
1042} 1040}
1043 1041
1044static int __init probe_for_sabs(void) 1042static int __devinit sab_probe(struct of_device *op, const struct of_device_id *match)
1045{ 1043{
1046 int this_sab = 0; 1044 static int inst;
1045 struct uart_sunsab_port *up;
1046 int err;
1047
1048 up = &sunsab_ports[inst * 2];
1049
1050 err = sunsab_init_one(&up[0], op,
1051 sizeof(union sab82532_async_regs),
1052 (inst * 2) + 0);
1053 if (err)
1054 return err;
1055
1056 err = sunsab_init_one(&up[0], op, 0,
1057 (inst * 2) + 1);
1058 if (err) {
1059 of_iounmap(up[0].port.membase,
1060 sizeof(union sab82532_async_regs));
1061 free_irq(up[0].port.irq, &up[0]);
1062 return err;
1063 }
1047 1064
1048 /* Find device instances. */ 1065 uart_add_one_port(&sunsab_reg, &up[0].port);
1049 for_each_sab_edev(&sab_count_callback, &this_sab); 1066 uart_add_one_port(&sunsab_reg, &up[1].port);
1050 if (!this_sab)
1051 return -ENODEV;
1052 1067
1053 /* Allocate tables. */ 1068 dev_set_drvdata(&op->dev, &up[0]);
1054 sunsab_ports = kmalloc(sizeof(struct uart_sunsab_port) * this_sab * 2,
1055 GFP_KERNEL);
1056 if (!sunsab_ports)
1057 return -ENOMEM;
1058 1069
1059 num_channels = this_sab * 2; 1070 inst++;
1060 1071
1061 this_sab = 0;
1062 for_each_sab_edev(&sab_attach_callback, &this_sab);
1063 return 0; 1072 return 0;
1064} 1073}
1065 1074
1066static void __init sunsab_init_hw(void) 1075static void __devexit sab_remove_one(struct uart_sunsab_port *up)
1067{ 1076{
1068 int i; 1077 uart_remove_one_port(&sunsab_reg, &up->port);
1069 1078 if (!(up->port.line & 1))
1070 for (i = 0; i < num_channels; i++) { 1079 free_irq(up->port.irq, up);
1071 struct uart_sunsab_port *up = &sunsab_ports[i]; 1080 of_iounmap(up->port.membase,
1072 1081 sizeof(union sab82532_async_regs));
1073 up->port.line = i;
1074 up->port.ops = &sunsab_pops;
1075 up->port.type = PORT_SUNSAB;
1076 up->port.uartclk = SAB_BASE_BAUD;
1077
1078 up->type = readb(&up->regs->r.vstr) & 0x0f;
1079 writeb(~((1 << 1) | (1 << 2) | (1 << 4)), &up->regs->w.pcr);
1080 writeb(0xff, &up->regs->w.pim);
1081 if (up->port.line == 0) {
1082 up->pvr_dsr_bit = (1 << 0);
1083 up->pvr_dtr_bit = (1 << 1);
1084 } else {
1085 up->pvr_dsr_bit = (1 << 3);
1086 up->pvr_dtr_bit = (1 << 2);
1087 }
1088 up->cached_pvr = (1 << 1) | (1 << 2) | (1 << 4);
1089 writeb(up->cached_pvr, &up->regs->w.pvr);
1090 up->cached_mode = readb(&up->regs->rw.mode);
1091 up->cached_mode |= SAB82532_MODE_FRTS;
1092 writeb(up->cached_mode, &up->regs->rw.mode);
1093 up->cached_mode |= SAB82532_MODE_RTS;
1094 writeb(up->cached_mode, &up->regs->rw.mode);
1095
1096 up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT;
1097 up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT;
1098
1099 if (!(up->port.line & 0x01)) {
1100 if (request_irq(up->port.irq, sunsab_interrupt,
1101 SA_SHIRQ, "serial(sab82532)", up)) {
1102 printk("sunsab%d: can't get IRQ %x\n",
1103 i, up->port.irq);
1104 continue;
1105 }
1106 }
1107 }
1108} 1082}
1109 1083
1110static int __init sunsab_init(void) 1084static int __devexit sab_remove(struct of_device *op)
1111{ 1085{
1112 int ret = probe_for_sabs(); 1086 struct uart_sunsab_port *up = dev_get_drvdata(&op->dev);
1113 int i;
1114
1115 if (ret < 0)
1116 return ret;
1117 1087
1118 sunsab_init_hw(); 1088 sab_remove_one(&up[0]);
1089 sab_remove_one(&up[1]);
1119 1090
1120 sunsab_reg.minor = sunserial_current_minor; 1091 dev_set_drvdata(&op->dev, NULL);
1121 sunsab_reg.nr = num_channels;
1122 1092
1123 ret = uart_register_driver(&sunsab_reg); 1093 return 0;
1124 if (ret < 0) { 1094}
1125 int i;
1126 1095
1127 for (i = 0; i < num_channels; i++) { 1096static struct of_device_id sab_match[] = {
1128 struct uart_sunsab_port *up = &sunsab_ports[i]; 1097 {
1098 .name = "se",
1099 },
1100 {
1101 .name = "serial",
1102 .compatible = "sab82532",
1103 },
1104 {},
1105};
1106MODULE_DEVICE_TABLE(of, sab_match);
1129 1107
1130 if (!(up->port.line & 0x01)) 1108static struct of_platform_driver sab_driver = {
1131 free_irq(up->port.irq, up); 1109 .name = "sab",
1132 iounmap(up->regs); 1110 .match_table = sab_match,
1133 } 1111 .probe = sab_probe,
1134 kfree(sunsab_ports); 1112 .remove = __devexit_p(sab_remove),
1135 sunsab_ports = NULL; 1113};
1136 1114
1137 return ret; 1115static int __init sunsab_init(void)
1116{
1117 struct device_node *dp;
1118 int err;
1119
1120 num_channels = 0;
1121 for_each_node_by_name(dp, "su")
1122 num_channels += 2;
1123 for_each_node_by_name(dp, "serial") {
1124 if (of_device_is_compatible(dp, "sab82532"))
1125 num_channels += 2;
1138 } 1126 }
1139 1127
1140 sunsab_reg.tty_driver->name_base = sunsab_reg.minor - 64; 1128 if (num_channels) {
1129 sunsab_ports = kzalloc(sizeof(struct uart_sunsab_port) *
1130 num_channels, GFP_KERNEL);
1131 if (!sunsab_ports)
1132 return -ENOMEM;
1141 1133
1142 sunsab_reg.cons = SUNSAB_CONSOLE(); 1134 sunsab_reg.minor = sunserial_current_minor;
1135 sunsab_reg.nr = num_channels;
1143 1136
1144 sunserial_current_minor += num_channels; 1137 err = uart_register_driver(&sunsab_reg);
1145 1138 if (err) {
1146 for (i = 0; i < num_channels; i++) { 1139 kfree(sunsab_ports);
1147 struct uart_sunsab_port *up = &sunsab_ports[i]; 1140 sunsab_ports = NULL;
1148 1141
1149 uart_add_one_port(&sunsab_reg, &up->port); 1142 return err;
1143 }
1144
1145 sunsab_reg.tty_driver->name_base = sunsab_reg.minor - 64;
1146 sunsab_reg.cons = SUNSAB_CONSOLE();
1147 sunserial_current_minor += num_channels;
1150 } 1148 }
1151 1149
1152 return 0; 1150 return of_register_driver(&sab_driver, &of_bus_type);
1153} 1151}
1154 1152
1155static void __exit sunsab_exit(void) 1153static void __exit sunsab_exit(void)
1156{ 1154{
1157 int i; 1155 of_unregister_driver(&sab_driver);
1158 1156 if (num_channels) {
1159 for (i = 0; i < num_channels; i++) { 1157 sunserial_current_minor -= num_channels;
1160 struct uart_sunsab_port *up = &sunsab_ports[i]; 1158 uart_unregister_driver(&sunsab_reg);
1161
1162 uart_remove_one_port(&sunsab_reg, &up->port);
1163
1164 if (!(up->port.line & 0x01))
1165 free_irq(up->port.irq, up);
1166 iounmap(up->regs);
1167 } 1159 }
1168 1160
1169 sunserial_current_minor -= num_channels;
1170 uart_unregister_driver(&sunsab_reg);
1171
1172 kfree(sunsab_ports); 1161 kfree(sunsab_ports);
1173 sunsab_ports = NULL; 1162 sunsab_ports = NULL;
1174} 1163}
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 6e28c25138cf..73a043b914ef 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -12,7 +12,7 @@
12 * Theodore Ts'o <tytso@mit.edu>, 2001-Oct-12 12 * Theodore Ts'o <tytso@mit.edu>, 2001-Oct-12
13 * 13 *
14 * Converted to new 2.5.x UART layer. 14 * Converted to new 2.5.x UART layer.
15 * David S. Miller (davem@redhat.com), 2002-Jul-29 15 * David S. Miller (davem@davemloft.net), 2002-Jul-29
16 */ 16 */
17 17
18#include <linux/config.h> 18#include <linux/config.h>
@@ -40,11 +40,8 @@
40 40
41#include <asm/io.h> 41#include <asm/io.h>
42#include <asm/irq.h> 42#include <asm/irq.h>
43#include <asm/oplib.h> 43#include <asm/prom.h>
44#include <asm/ebus.h> 44#include <asm/of_device.h>
45#ifdef CONFIG_SPARC64
46#include <asm/isa.h>
47#endif
48 45
49#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 46#if defined(CONFIG_SERIAL_SUNSU_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
50#define SUPPORT_SYSRQ 47#define SUPPORT_SYSRQ
@@ -94,10 +91,10 @@ struct uart_sunsu_port {
94 /* Probing information. */ 91 /* Probing information. */
95 enum su_type su_type; 92 enum su_type su_type;
96 unsigned int type_probed; /* XXX Stupid */ 93 unsigned int type_probed; /* XXX Stupid */
97 int port_node; 94 unsigned long reg_size;
98 95
99#ifdef CONFIG_SERIO 96#ifdef CONFIG_SERIO
100 struct serio *serio; 97 struct serio serio;
101 int serio_open; 98 int serio_open;
102#endif 99#endif
103}; 100};
@@ -509,7 +506,7 @@ static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *reg
509 /* Stop-A is handled by drivers/char/keyboard.c now. */ 506 /* Stop-A is handled by drivers/char/keyboard.c now. */
510 if (up->su_type == SU_PORT_KBD) { 507 if (up->su_type == SU_PORT_KBD) {
511#ifdef CONFIG_SERIO 508#ifdef CONFIG_SERIO
512 serio_interrupt(up->serio, ch, 0, regs); 509 serio_interrupt(&up->serio, ch, 0, regs);
513#endif 510#endif
514 } else if (up->su_type == SU_PORT_MS) { 511 } else if (up->su_type == SU_PORT_MS) {
515 int ret = suncore_mouse_baud_detection(ch, is_break); 512 int ret = suncore_mouse_baud_detection(ch, is_break);
@@ -523,7 +520,7 @@ static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *reg
523 520
524 case 0: 521 case 0:
525#ifdef CONFIG_SERIO 522#ifdef CONFIG_SERIO
526 serio_interrupt(up->serio, ch, 0, regs); 523 serio_interrupt(&up->serio, ch, 0, regs);
527#endif 524#endif
528 break; 525 break;
529 }; 526 };
@@ -1031,99 +1028,14 @@ static void sunsu_autoconfig(struct uart_sunsu_port *up)
1031{ 1028{
1032 unsigned char status1, status2, scratch, scratch2, scratch3; 1029 unsigned char status1, status2, scratch, scratch2, scratch3;
1033 unsigned char save_lcr, save_mcr; 1030 unsigned char save_lcr, save_mcr;
1034 struct linux_ebus_device *dev = NULL;
1035 struct linux_ebus *ebus;
1036#ifdef CONFIG_SPARC64
1037 struct sparc_isa_bridge *isa_br;
1038 struct sparc_isa_device *isa_dev;
1039#endif
1040#ifndef CONFIG_SPARC64
1041 struct linux_prom_registers reg0;
1042#endif
1043 unsigned long flags; 1031 unsigned long flags;
1044 1032
1045 if (!up->port_node || !up->su_type) 1033 if (up->su_type == SU_PORT_NONE)
1046 return; 1034 return;
1047 1035
1048 up->type_probed = PORT_UNKNOWN; 1036 up->type_probed = PORT_UNKNOWN;
1049 up->port.iotype = UPIO_MEM; 1037 up->port.iotype = UPIO_MEM;
1050 1038
1051 /*
1052 * First we look for Ebus-bases su's
1053 */
1054 for_each_ebus(ebus) {
1055 for_each_ebusdev(dev, ebus) {
1056 if (dev->prom_node->node == up->port_node) {
1057 /*
1058 * The EBus is broken on sparc; it delivers
1059 * virtual addresses in resources. Oh well...
1060 * This is correct on sparc64, though.
1061 */
1062 up->port.membase = (char *) dev->resource[0].start;
1063 /*
1064 * This is correct on both architectures.
1065 */
1066 up->port.mapbase = dev->resource[0].start;
1067 up->port.irq = dev->irqs[0];
1068 goto ebus_done;
1069 }
1070 }
1071 }
1072
1073#ifdef CONFIG_SPARC64
1074 for_each_isa(isa_br) {
1075 for_each_isadev(isa_dev, isa_br) {
1076 if (isa_dev->prom_node->node == up->port_node) {
1077 /* Same on sparc64. Cool architecure... */
1078 up->port.membase = (char *) isa_dev->resource.start;
1079 up->port.mapbase = isa_dev->resource.start;
1080 up->port.irq = isa_dev->irq;
1081 goto ebus_done;
1082 }
1083 }
1084 }
1085#endif
1086
1087#ifdef CONFIG_SPARC64
1088 /*
1089 * Not on Ebus, bailing.
1090 */
1091 return;
1092#else
1093 /*
1094 * Not on Ebus, must be OBIO.
1095 */
1096 if (prom_getproperty(up->port_node, "reg",
1097 (char *)&reg0, sizeof(reg0)) == -1) {
1098 prom_printf("sunsu: no \"reg\" property\n");
1099 return;
1100 }
1101 prom_apply_obio_ranges(&reg0, 1);
1102 if (reg0.which_io != 0) { /* Just in case... */
1103 prom_printf("sunsu: bus number nonzero: 0x%x:%x\n",
1104 reg0.which_io, reg0.phys_addr);
1105 return;
1106 }
1107 up->port.mapbase = reg0.phys_addr;
1108 if ((up->port.membase = ioremap(reg0.phys_addr, reg0.reg_size)) == 0) {
1109 prom_printf("sunsu: Cannot map registers.\n");
1110 return;
1111 }
1112
1113 /*
1114 * 0x20 is sun4m thing, Dave Redman heritage.
1115 * See arch/sparc/kernel/irq.c.
1116 */
1117#define IRQ_4M(n) ((n)|0x20)
1118
1119 /*
1120 * There is no intr property on MrCoffee, so hardwire it.
1121 */
1122 up->port.irq = IRQ_4M(13);
1123#endif
1124
1125ebus_done:
1126
1127 spin_lock_irqsave(&up->port.lock, flags); 1039 spin_lock_irqsave(&up->port.lock, flags);
1128 1040
1129 if (!(up->port.flags & UPF_BUGGY_UART)) { 1041 if (!(up->port.flags & UPF_BUGGY_UART)) {
@@ -1269,18 +1181,13 @@ static struct uart_driver sunsu_reg = {
1269 .major = TTY_MAJOR, 1181 .major = TTY_MAJOR,
1270}; 1182};
1271 1183
1272static int __init sunsu_kbd_ms_init(struct uart_sunsu_port *up, int channel) 1184static int __init sunsu_kbd_ms_init(struct uart_sunsu_port *up)
1273{ 1185{
1274 int quot, baud; 1186 int quot, baud;
1275#ifdef CONFIG_SERIO 1187#ifdef CONFIG_SERIO
1276 struct serio *serio; 1188 struct serio *serio;
1277#endif 1189#endif
1278 1190
1279 spin_lock_init(&up->port.lock);
1280 up->port.line = channel;
1281 up->port.type = PORT_UNKNOWN;
1282 up->port.uartclk = (SU_BASE_BAUD * 16);
1283
1284 if (up->su_type == SU_PORT_KBD) { 1191 if (up->su_type == SU_PORT_KBD) {
1285 up->cflag = B1200 | CS8 | CLOCAL | CREAD; 1192 up->cflag = B1200 | CS8 | CLOCAL | CREAD;
1286 baud = 1200; 1193 baud = 1200;
@@ -1292,41 +1199,31 @@ static int __init sunsu_kbd_ms_init(struct uart_sunsu_port *up, int channel)
1292 1199
1293 sunsu_autoconfig(up); 1200 sunsu_autoconfig(up);
1294 if (up->port.type == PORT_UNKNOWN) 1201 if (up->port.type == PORT_UNKNOWN)
1295 return -1; 1202 return -ENODEV;
1296
1297 printk(KERN_INFO "su%d at 0x%p (irq = %d) is a %s\n",
1298 channel,
1299 up->port.membase, up->port.irq,
1300 sunsu_type(&up->port));
1301 1203
1302#ifdef CONFIG_SERIO 1204#ifdef CONFIG_SERIO
1303 up->serio = serio = kmalloc(sizeof(struct serio), GFP_KERNEL); 1205 serio = &up->serio;
1304 if (serio) { 1206 serio->port_data = up;
1305 memset(serio, 0, sizeof(*serio));
1306
1307 serio->port_data = up;
1308
1309 serio->id.type = SERIO_RS232;
1310 if (up->su_type == SU_PORT_KBD) {
1311 serio->id.proto = SERIO_SUNKBD;
1312 strlcpy(serio->name, "sukbd", sizeof(serio->name));
1313 } else {
1314 serio->id.proto = SERIO_SUN;
1315 serio->id.extra = 1;
1316 strlcpy(serio->name, "sums", sizeof(serio->name));
1317 }
1318 strlcpy(serio->phys, (channel == 0 ? "su/serio0" : "su/serio1"),
1319 sizeof(serio->phys));
1320
1321 serio->write = sunsu_serio_write;
1322 serio->open = sunsu_serio_open;
1323 serio->close = sunsu_serio_close;
1324 1207
1325 serio_register_port(serio); 1208 serio->id.type = SERIO_RS232;
1209 if (up->su_type == SU_PORT_KBD) {
1210 serio->id.proto = SERIO_SUNKBD;
1211 strlcpy(serio->name, "sukbd", sizeof(serio->name));
1326 } else { 1212 } else {
1327 printk(KERN_WARNING "su%d: not enough memory for serio port\n", 1213 serio->id.proto = SERIO_SUN;
1328 channel); 1214 serio->id.extra = 1;
1215 strlcpy(serio->name, "sums", sizeof(serio->name));
1329 } 1216 }
1217 strlcpy(serio->phys,
1218 (!(up->port.line & 1) ? "su/serio0" : "su/serio1"),
1219 sizeof(serio->phys));
1220
1221 serio->write = sunsu_serio_write;
1222 serio->open = sunsu_serio_open;
1223 serio->close = sunsu_serio_close;
1224 serio->dev.parent = up->port.dev;
1225
1226 serio_register_port(serio);
1330#endif 1227#endif
1331 1228
1332 sunsu_change_speed(&up->port, up->cflag, 0, quot); 1229 sunsu_change_speed(&up->port, up->cflag, 0, quot);
@@ -1458,22 +1355,20 @@ static struct console sunsu_cons = {
1458 * Register console. 1355 * Register console.
1459 */ 1356 */
1460 1357
1461static inline struct console *SUNSU_CONSOLE(void) 1358static inline struct console *SUNSU_CONSOLE(int num_uart)
1462{ 1359{
1463 int i; 1360 int i;
1464 1361
1465 if (con_is_present()) 1362 if (con_is_present())
1466 return NULL; 1363 return NULL;
1467 1364
1468 for (i = 0; i < UART_NR; i++) { 1365 for (i = 0; i < num_uart; i++) {
1469 int this_minor = sunsu_reg.minor + i; 1366 int this_minor = sunsu_reg.minor + i;
1470 1367
1471 if ((this_minor - 64) == (serial_console - 1)) 1368 if ((this_minor - 64) == (serial_console - 1))
1472 break; 1369 break;
1473 } 1370 }
1474 if (i == UART_NR) 1371 if (i == num_uart)
1475 return NULL;
1476 if (sunsu_ports[i].port_node == 0)
1477 return NULL; 1372 return NULL;
1478 1373
1479 sunsu_cons.index = i; 1374 sunsu_cons.index = i;
@@ -1481,252 +1376,184 @@ static inline struct console *SUNSU_CONSOLE(void)
1481 return &sunsu_cons; 1376 return &sunsu_cons;
1482} 1377}
1483#else 1378#else
1484#define SUNSU_CONSOLE() (NULL) 1379#define SUNSU_CONSOLE(num_uart) (NULL)
1485#define sunsu_serial_console_init() do { } while (0) 1380#define sunsu_serial_console_init() do { } while (0)
1486#endif 1381#endif
1487 1382
1488static int __init sunsu_serial_init(void) 1383static enum su_type __devinit su_get_type(struct device_node *dp)
1489{ 1384{
1490 int instance, ret, i; 1385 struct device_node *ap = of_find_node_by_path("/aliases");
1491 1386
1492 /* How many instances do we need? */ 1387 if (ap) {
1493 instance = 0; 1388 char *keyb = of_get_property(ap, "keyboard", NULL);
1494 for (i = 0; i < UART_NR; i++) { 1389 char *ms = of_get_property(ap, "mouse", NULL);
1495 struct uart_sunsu_port *up = &sunsu_ports[i];
1496 1390
1497 if (up->su_type == SU_PORT_MS || 1391 if (keyb) {
1498 up->su_type == SU_PORT_KBD) 1392 if (dp == of_find_node_by_path(keyb))
1499 continue; 1393 return SU_PORT_KBD;
1500 1394 }
1501 spin_lock_init(&up->port.lock); 1395 if (ms) {
1502 up->port.flags |= UPF_BOOT_AUTOCONF; 1396 if (dp == of_find_node_by_path(ms))
1503 up->port.type = PORT_UNKNOWN; 1397 return SU_PORT_MS;
1504 up->port.uartclk = (SU_BASE_BAUD * 16); 1398 }
1399 }
1505 1400
1506 sunsu_autoconfig(up); 1401 return SU_PORT_PORT;
1507 if (up->port.type == PORT_UNKNOWN) 1402}
1508 continue;
1509 1403
1510 up->port.line = instance++; 1404static int __devinit su_probe(struct of_device *op, const struct of_device_id *match)
1511 up->port.ops = &sunsu_pops; 1405{
1512 } 1406 static int inst;
1407 struct device_node *dp = op->node;
1408 struct uart_sunsu_port *up;
1409 struct resource *rp;
1410 int err;
1513 1411
1514 sunsu_reg.minor = sunserial_current_minor; 1412 if (inst >= UART_NR)
1413 return -EINVAL;
1515 1414
1516 sunsu_reg.nr = instance; 1415 up = &sunsu_ports[inst];
1416 up->port.line = inst;
1517 1417
1518 ret = uart_register_driver(&sunsu_reg); 1418 spin_lock_init(&up->port.lock);
1519 if (ret < 0)
1520 return ret;
1521 1419
1522 sunsu_reg.tty_driver->name_base = sunsu_reg.minor - 64; 1420 up->su_type = su_get_type(dp);
1523 1421
1524 sunserial_current_minor += instance; 1422 rp = &op->resource[0];
1423 up->port.mapbase = op->resource[0].start;
1525 1424
1526 sunsu_reg.cons = SUNSU_CONSOLE(); 1425 up->reg_size = (rp->end - rp->start) + 1;
1426 up->port.membase = of_ioremap(rp, 0, up->reg_size, "su");
1427 if (!up->port.membase)
1428 return -ENOMEM;
1527 1429
1528 for (i = 0; i < UART_NR; i++) { 1430 up->port.irq = op->irqs[0];
1529 struct uart_sunsu_port *up = &sunsu_ports[i];
1530 1431
1531 /* Do not register Keyboard/Mouse lines with UART 1432 up->port.dev = &op->dev;
1532 * layer.
1533 */
1534 if (up->su_type == SU_PORT_MS ||
1535 up->su_type == SU_PORT_KBD)
1536 continue;
1537 1433
1538 if (up->port.type == PORT_UNKNOWN) 1434 up->port.type = PORT_UNKNOWN;
1539 continue; 1435 up->port.uartclk = (SU_BASE_BAUD * 16);
1540 1436
1541 uart_add_one_port(&sunsu_reg, &up->port); 1437 err = 0;
1438 if (up->su_type == SU_PORT_KBD || up->su_type == SU_PORT_MS) {
1439 err = sunsu_kbd_ms_init(up);
1440 if (err)
1441 goto out_unmap;
1542 } 1442 }
1543 1443
1544 return 0; 1444 up->port.flags |= UPF_BOOT_AUTOCONF;
1545}
1546 1445
1547static int su_node_ok(int node, char *name, int namelen) 1446 sunsu_autoconfig(up);
1548{
1549 if (strncmp(name, "su", namelen) == 0 ||
1550 strncmp(name, "su_pnp", namelen) == 0)
1551 return 1;
1552
1553 if (strncmp(name, "serial", namelen) == 0) {
1554 char compat[32];
1555 int clen;
1556
1557 /* Is it _really_ a 'su' device? */
1558 clen = prom_getproperty(node, "compatible", compat, sizeof(compat));
1559 if (clen > 0) {
1560 if (strncmp(compat, "sab82532", 8) == 0) {
1561 /* Nope, Siemens serial, not for us. */
1562 return 0;
1563 }
1564 }
1565 return 1;
1566 }
1567 1447
1568 return 0; 1448 err = -ENODEV;
1569} 1449 if (up->port.type == PORT_UNKNOWN)
1450 goto out_unmap;
1570 1451
1571#define SU_PROPSIZE 128 1452 up->port.ops = &sunsu_pops;
1572 1453
1573/* 1454 err = uart_add_one_port(&sunsu_reg, &up->port);
1574 * Scan status structure. 1455 if (err)
1575 * "prop" is a local variable but it eats stack to keep it in each 1456 goto out_unmap;
1576 * stack frame of a recursive procedure.
1577 */
1578struct su_probe_scan {
1579 int msnode, kbnode; /* PROM nodes for mouse and keyboard */
1580 int msx, kbx; /* minors for mouse and keyboard */
1581 int devices; /* scan index */
1582 char prop[SU_PROPSIZE];
1583};
1584 1457
1585/* 1458 dev_set_drvdata(&op->dev, up);
1586 * We have several platforms which present 'su' in different parts
1587 * of the device tree. 'su' may be found under obio, ebus, isa and pci.
1588 * We walk over the tree and find them wherever PROM hides them.
1589 */
1590static void __init su_probe_any(struct su_probe_scan *t, int sunode)
1591{
1592 struct uart_sunsu_port *up;
1593 int len;
1594 1459
1595 if (t->devices >= UART_NR) 1460 inst++;
1596 return;
1597 1461
1598 for (; sunode != 0; sunode = prom_getsibling(sunode)) { 1462 return 0;
1599 len = prom_getproperty(sunode, "name", t->prop, SU_PROPSIZE); 1463
1600 if (len <= 1) 1464out_unmap:
1601 continue; /* Broken PROM node */ 1465 of_iounmap(up->port.membase, up->reg_size);
1602 1466 return err;
1603 if (su_node_ok(sunode, t->prop, len)) {
1604 up = &sunsu_ports[t->devices];
1605 if (t->kbnode != 0 && sunode == t->kbnode) {
1606 t->kbx = t->devices;
1607 up->su_type = SU_PORT_KBD;
1608 } else if (t->msnode != 0 && sunode == t->msnode) {
1609 t->msx = t->devices;
1610 up->su_type = SU_PORT_MS;
1611 } else {
1612#ifdef CONFIG_SPARC64
1613 /*
1614 * Do not attempt to use the truncated
1615 * keyboard/mouse ports as serial ports
1616 * on Ultras with PC keyboard attached.
1617 */
1618 if (prom_getbool(sunode, "mouse"))
1619 continue;
1620 if (prom_getbool(sunode, "keyboard"))
1621 continue;
1622#endif
1623 up->su_type = SU_PORT_PORT;
1624 }
1625 up->port_node = sunode;
1626 ++t->devices;
1627 } else {
1628 su_probe_any(t, prom_getchild(sunode));
1629 }
1630 }
1631} 1467}
1632 1468
1633static int __init sunsu_probe(void) 1469static int __devexit su_remove(struct of_device *dev)
1634{ 1470{
1635 int node; 1471 struct uart_sunsu_port *up = dev_get_drvdata(&dev->dev);;
1636 int len;
1637 struct su_probe_scan scan;
1638 1472
1639 /* 1473 if (up->su_type == SU_PORT_MS ||
1640 * First, we scan the tree. 1474 up->su_type == SU_PORT_KBD) {
1641 */ 1475#ifdef CONFIG_SERIO
1642 scan.devices = 0; 1476 serio_unregister_port(&up->serio);
1643 scan.msx = -1; 1477#endif
1644 scan.kbx = -1; 1478 } else if (up->port.type != PORT_UNKNOWN)
1645 scan.kbnode = 0; 1479 uart_remove_one_port(&sunsu_reg, &up->port);
1646 scan.msnode = 0;
1647 1480
1648 /* 1481 return 0;
1649 * Get the nodes for keyboard and mouse from 'aliases'... 1482}
1650 */
1651 node = prom_getchild(prom_root_node);
1652 node = prom_searchsiblings(node, "aliases");
1653 if (node != 0) {
1654 len = prom_getproperty(node, "keyboard", scan.prop, SU_PROPSIZE);
1655 if (len > 0) {
1656 scan.prop[len] = 0;
1657 scan.kbnode = prom_finddevice(scan.prop);
1658 }
1659 1483
1660 len = prom_getproperty(node, "mouse", scan.prop, SU_PROPSIZE); 1484static struct of_device_id su_match[] = {
1661 if (len > 0) { 1485 {
1662 scan.prop[len] = 0; 1486 .name = "su",
1663 scan.msnode = prom_finddevice(scan.prop); 1487 },
1664 } 1488 {
1665 } 1489 .name = "su_pnp",
1490 },
1491 {
1492 .name = "serial",
1493 .compatible = "su",
1494 },
1495 {},
1496};
1497MODULE_DEVICE_TABLE(of, su_match);
1666 1498
1667 su_probe_any(&scan, prom_getchild(prom_root_node)); 1499static struct of_platform_driver su_driver = {
1500 .name = "su",
1501 .match_table = su_match,
1502 .probe = su_probe,
1503 .remove = __devexit_p(su_remove),
1504};
1668 1505
1669 /* 1506static int num_uart;
1670 * Second, we process the special case of keyboard and mouse.
1671 *
1672 * Currently if we got keyboard and mouse hooked to "su" ports
1673 * we do not use any possible remaining "su" as a serial port.
1674 * Thus, we ignore values of .msx and .kbx, then compact ports.
1675 */
1676 if (scan.msx != -1 && scan.kbx != -1) {
1677 sunsu_ports[0].su_type = SU_PORT_MS;
1678 sunsu_ports[0].port_node = scan.msnode;
1679 sunsu_kbd_ms_init(&sunsu_ports[0], 0);
1680 1507
1681 sunsu_ports[1].su_type = SU_PORT_KBD; 1508static int __init sunsu_init(void)
1682 sunsu_ports[1].port_node = scan.kbnode; 1509{
1683 sunsu_kbd_ms_init(&sunsu_ports[1], 1); 1510 struct device_node *dp;
1511 int err;
1684 1512
1685 return 0; 1513 num_uart = 0;
1514 for_each_node_by_name(dp, "su") {
1515 if (su_get_type(dp) == SU_PORT_PORT)
1516 num_uart++;
1686 } 1517 }
1687 1518 for_each_node_by_name(dp, "su_pnp") {
1688 if (scan.msx != -1 || scan.kbx != -1) { 1519 if (su_get_type(dp) == SU_PORT_PORT)
1689 printk("sunsu_probe: cannot match keyboard and mouse, confused\n"); 1520 num_uart++;
1690 return -ENODEV; 1521 }
1522 for_each_node_by_name(dp, "serial") {
1523 if (of_device_is_compatible(dp, "su")) {
1524 if (su_get_type(dp) == SU_PORT_PORT)
1525 num_uart++;
1526 }
1691 } 1527 }
1692 1528
1693 if (scan.devices == 0) 1529 if (num_uart) {
1694 return -ENODEV; 1530 sunsu_reg.minor = sunserial_current_minor;
1531 sunsu_reg.nr = num_uart;
1532 err = uart_register_driver(&sunsu_reg);
1533 if (err)
1534 return err;
1535 sunsu_reg.tty_driver->name_base = sunsu_reg.minor - 64;
1536 sunserial_current_minor += num_uart;
1537 sunsu_reg.cons = SUNSU_CONSOLE(num_uart);
1538 }
1695 1539
1696 /* 1540 err = of_register_driver(&su_driver, &of_bus_type);
1697 * Console must be initiated after the generic initialization. 1541 if (err && num_uart)
1698 */ 1542 uart_unregister_driver(&sunsu_reg);
1699 sunsu_serial_init();
1700 1543
1701 return 0; 1544 return err;
1702} 1545}
1703 1546
1704static void __exit sunsu_exit(void) 1547static void __exit sunsu_exit(void)
1705{ 1548{
1706 int i, saw_uart; 1549 if (num_uart)
1707
1708 saw_uart = 0;
1709 for (i = 0; i < UART_NR; i++) {
1710 struct uart_sunsu_port *up = &sunsu_ports[i];
1711
1712 if (up->su_type == SU_PORT_MS ||
1713 up->su_type == SU_PORT_KBD) {
1714#ifdef CONFIG_SERIO
1715 if (up->serio) {
1716 serio_unregister_port(up->serio);
1717 up->serio = NULL;
1718 }
1719#endif
1720 } else if (up->port.type != PORT_UNKNOWN) {
1721 uart_remove_one_port(&sunsu_reg, &up->port);
1722 saw_uart++;
1723 }
1724 }
1725
1726 if (saw_uart)
1727 uart_unregister_driver(&sunsu_reg); 1550 uart_unregister_driver(&sunsu_reg);
1728} 1551}
1729 1552
1730module_init(sunsu_probe); 1553module_init(sunsu_init);
1731module_exit(sunsu_exit); 1554module_exit(sunsu_exit);
1555
1556MODULE_AUTHOR("Eddie C. Dost, Peter Zaitcev, and David S. Miller");
1557MODULE_DESCRIPTION("Sun SU serial port driver");
1558MODULE_VERSION("2.0");
1732MODULE_LICENSE("GPL"); 1559MODULE_LICENSE("GPL");
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 9f42677287ad..1caa286a6be6 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1,5 +1,4 @@
1/* 1/* sunzilog.c: Zilog serial driver for Sparc systems.
2 * sunzilog.c
3 * 2 *
4 * Driver for Zilog serial chips found on Sun workstations and 3 * Driver for Zilog serial chips found on Sun workstations and
5 * servers. This driver could actually be made more generic. 4 * servers. This driver could actually be made more generic.
@@ -10,7 +9,7 @@
10 * C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell for their 9 * C. Dost, Pete Zaitcev, Ted Ts'o and Alex Buell for their
11 * work there. 10 * work there.
12 * 11 *
13 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 12 * Copyright (C) 2002, 2006 David S. Miller (davem@davemloft.net)
14 */ 13 */
15 14
16#include <linux/config.h> 15#include <linux/config.h>
@@ -38,10 +37,8 @@
38 37
39#include <asm/io.h> 38#include <asm/io.h>
40#include <asm/irq.h> 39#include <asm/irq.h>
41#ifdef CONFIG_SPARC64 40#include <asm/prom.h>
42#include <asm/fhc.h> 41#include <asm/of_device.h>
43#endif
44#include <asm/sbus.h>
45 42
46#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) 43#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
47#define SUPPORT_SYSRQ 44#define SUPPORT_SYSRQ
@@ -65,7 +62,7 @@
65#define ZSDELAY() 62#define ZSDELAY()
66#define ZSDELAY_LONG() 63#define ZSDELAY_LONG()
67#define ZS_WSYNC(__channel) \ 64#define ZS_WSYNC(__channel) \
68 sbus_readb(&((__channel)->control)) 65 readb(&((__channel)->control))
69#endif 66#endif
70 67
71static int num_sunzilog; 68static int num_sunzilog;
@@ -107,7 +104,7 @@ struct uart_sunzilog_port {
107 unsigned char prev_status; 104 unsigned char prev_status;
108 105
109#ifdef CONFIG_SERIO 106#ifdef CONFIG_SERIO
110 struct serio *serio; 107 struct serio serio;
111 int serio_open; 108 int serio_open;
112#endif 109#endif
113}; 110};
@@ -138,9 +135,9 @@ static unsigned char read_zsreg(struct zilog_channel __iomem *channel,
138{ 135{
139 unsigned char retval; 136 unsigned char retval;
140 137
141 sbus_writeb(reg, &channel->control); 138 writeb(reg, &channel->control);
142 ZSDELAY(); 139 ZSDELAY();
143 retval = sbus_readb(&channel->control); 140 retval = readb(&channel->control);
144 ZSDELAY(); 141 ZSDELAY();
145 142
146 return retval; 143 return retval;
@@ -149,9 +146,9 @@ static unsigned char read_zsreg(struct zilog_channel __iomem *channel,
149static void write_zsreg(struct zilog_channel __iomem *channel, 146static void write_zsreg(struct zilog_channel __iomem *channel,
150 unsigned char reg, unsigned char value) 147 unsigned char reg, unsigned char value)
151{ 148{
152 sbus_writeb(reg, &channel->control); 149 writeb(reg, &channel->control);
153 ZSDELAY(); 150 ZSDELAY();
154 sbus_writeb(value, &channel->control); 151 writeb(value, &channel->control);
155 ZSDELAY(); 152 ZSDELAY();
156} 153}
157 154
@@ -162,17 +159,17 @@ static void sunzilog_clear_fifo(struct zilog_channel __iomem *channel)
162 for (i = 0; i < 32; i++) { 159 for (i = 0; i < 32; i++) {
163 unsigned char regval; 160 unsigned char regval;
164 161
165 regval = sbus_readb(&channel->control); 162 regval = readb(&channel->control);
166 ZSDELAY(); 163 ZSDELAY();
167 if (regval & Rx_CH_AV) 164 if (regval & Rx_CH_AV)
168 break; 165 break;
169 166
170 regval = read_zsreg(channel, R1); 167 regval = read_zsreg(channel, R1);
171 sbus_readb(&channel->data); 168 readb(&channel->data);
172 ZSDELAY(); 169 ZSDELAY();
173 170
174 if (regval & (PAR_ERR | Rx_OVR | CRC_ERR)) { 171 if (regval & (PAR_ERR | Rx_OVR | CRC_ERR)) {
175 sbus_writeb(ERR_RES, &channel->control); 172 writeb(ERR_RES, &channel->control);
176 ZSDELAY(); 173 ZSDELAY();
177 ZS_WSYNC(channel); 174 ZS_WSYNC(channel);
178 } 175 }
@@ -194,7 +191,7 @@ static void __load_zsregs(struct zilog_channel __iomem *channel, unsigned char *
194 udelay(100); 191 udelay(100);
195 } 192 }
196 193
197 sbus_writeb(ERR_RES, &channel->control); 194 writeb(ERR_RES, &channel->control);
198 ZSDELAY(); 195 ZSDELAY();
199 ZS_WSYNC(channel); 196 ZS_WSYNC(channel);
200 197
@@ -291,7 +288,7 @@ static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up,
291 /* Stop-A is handled by drivers/char/keyboard.c now. */ 288 /* Stop-A is handled by drivers/char/keyboard.c now. */
292#ifdef CONFIG_SERIO 289#ifdef CONFIG_SERIO
293 if (up->serio_open) 290 if (up->serio_open)
294 serio_interrupt(up->serio, ch, 0, regs); 291 serio_interrupt(&up->serio, ch, 0, regs);
295#endif 292#endif
296 } else if (ZS_IS_MOUSE(up)) { 293 } else if (ZS_IS_MOUSE(up)) {
297 int ret = suncore_mouse_baud_detection(ch, is_break); 294 int ret = suncore_mouse_baud_detection(ch, is_break);
@@ -306,7 +303,7 @@ static void sunzilog_kbdms_receive_chars(struct uart_sunzilog_port *up,
306 case 0: 303 case 0:
307#ifdef CONFIG_SERIO 304#ifdef CONFIG_SERIO
308 if (up->serio_open) 305 if (up->serio_open)
309 serio_interrupt(up->serio, ch, 0, regs); 306 serio_interrupt(&up->serio, ch, 0, regs);
310#endif 307#endif
311 break; 308 break;
312 }; 309 };
@@ -330,12 +327,12 @@ sunzilog_receive_chars(struct uart_sunzilog_port *up,
330 327
331 r1 = read_zsreg(channel, R1); 328 r1 = read_zsreg(channel, R1);
332 if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) { 329 if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
333 sbus_writeb(ERR_RES, &channel->control); 330 writeb(ERR_RES, &channel->control);
334 ZSDELAY(); 331 ZSDELAY();
335 ZS_WSYNC(channel); 332 ZS_WSYNC(channel);
336 } 333 }
337 334
338 ch = sbus_readb(&channel->control); 335 ch = readb(&channel->control);
339 ZSDELAY(); 336 ZSDELAY();
340 337
341 /* This funny hack depends upon BRK_ABRT not interfering 338 /* This funny hack depends upon BRK_ABRT not interfering
@@ -347,7 +344,7 @@ sunzilog_receive_chars(struct uart_sunzilog_port *up,
347 if (!(ch & Rx_CH_AV)) 344 if (!(ch & Rx_CH_AV))
348 break; 345 break;
349 346
350 ch = sbus_readb(&channel->data); 347 ch = readb(&channel->data);
351 ZSDELAY(); 348 ZSDELAY();
352 349
353 ch &= up->parity_mask; 350 ch &= up->parity_mask;
@@ -406,10 +403,10 @@ static void sunzilog_status_handle(struct uart_sunzilog_port *up,
406{ 403{
407 unsigned char status; 404 unsigned char status;
408 405
409 status = sbus_readb(&channel->control); 406 status = readb(&channel->control);
410 ZSDELAY(); 407 ZSDELAY();
411 408
412 sbus_writeb(RES_EXT_INT, &channel->control); 409 writeb(RES_EXT_INT, &channel->control);
413 ZSDELAY(); 410 ZSDELAY();
414 ZS_WSYNC(channel); 411 ZS_WSYNC(channel);
415 412
@@ -421,7 +418,7 @@ static void sunzilog_status_handle(struct uart_sunzilog_port *up,
421 * confusing the PROM. 418 * confusing the PROM.
422 */ 419 */
423 while (1) { 420 while (1) {
424 status = sbus_readb(&channel->control); 421 status = readb(&channel->control);
425 ZSDELAY(); 422 ZSDELAY();
426 if (!(status & BRK_ABRT)) 423 if (!(status & BRK_ABRT))
427 break; 424 break;
@@ -458,7 +455,7 @@ static void sunzilog_transmit_chars(struct uart_sunzilog_port *up,
458 struct circ_buf *xmit; 455 struct circ_buf *xmit;
459 456
460 if (ZS_IS_CONS(up)) { 457 if (ZS_IS_CONS(up)) {
461 unsigned char status = sbus_readb(&channel->control); 458 unsigned char status = readb(&channel->control);
462 ZSDELAY(); 459 ZSDELAY();
463 460
464 /* TX still busy? Just wait for the next TX done interrupt. 461 /* TX still busy? Just wait for the next TX done interrupt.
@@ -487,7 +484,7 @@ static void sunzilog_transmit_chars(struct uart_sunzilog_port *up,
487 484
488 if (up->port.x_char) { 485 if (up->port.x_char) {
489 up->flags |= SUNZILOG_FLAG_TX_ACTIVE; 486 up->flags |= SUNZILOG_FLAG_TX_ACTIVE;
490 sbus_writeb(up->port.x_char, &channel->data); 487 writeb(up->port.x_char, &channel->data);
491 ZSDELAY(); 488 ZSDELAY();
492 ZS_WSYNC(channel); 489 ZS_WSYNC(channel);
493 490
@@ -506,7 +503,7 @@ static void sunzilog_transmit_chars(struct uart_sunzilog_port *up,
506 goto ack_tx_int; 503 goto ack_tx_int;
507 504
508 up->flags |= SUNZILOG_FLAG_TX_ACTIVE; 505 up->flags |= SUNZILOG_FLAG_TX_ACTIVE;
509 sbus_writeb(xmit->buf[xmit->tail], &channel->data); 506 writeb(xmit->buf[xmit->tail], &channel->data);
510 ZSDELAY(); 507 ZSDELAY();
511 ZS_WSYNC(channel); 508 ZS_WSYNC(channel);
512 509
@@ -519,7 +516,7 @@ static void sunzilog_transmit_chars(struct uart_sunzilog_port *up,
519 return; 516 return;
520 517
521ack_tx_int: 518ack_tx_int:
522 sbus_writeb(RES_Tx_P, &channel->control); 519 writeb(RES_Tx_P, &channel->control);
523 ZSDELAY(); 520 ZSDELAY();
524 ZS_WSYNC(channel); 521 ZS_WSYNC(channel);
525} 522}
@@ -540,7 +537,7 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id, struct pt_regs *reg
540 /* Channel A */ 537 /* Channel A */
541 tty = NULL; 538 tty = NULL;
542 if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { 539 if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
543 sbus_writeb(RES_H_IUS, &channel->control); 540 writeb(RES_H_IUS, &channel->control);
544 ZSDELAY(); 541 ZSDELAY();
545 ZS_WSYNC(channel); 542 ZS_WSYNC(channel);
546 543
@@ -563,7 +560,7 @@ static irqreturn_t sunzilog_interrupt(int irq, void *dev_id, struct pt_regs *reg
563 spin_lock(&up->port.lock); 560 spin_lock(&up->port.lock);
564 tty = NULL; 561 tty = NULL;
565 if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { 562 if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
566 sbus_writeb(RES_H_IUS, &channel->control); 563 writeb(RES_H_IUS, &channel->control);
567 ZSDELAY(); 564 ZSDELAY();
568 ZS_WSYNC(channel); 565 ZS_WSYNC(channel);
569 566
@@ -594,7 +591,7 @@ static __inline__ unsigned char sunzilog_read_channel_status(struct uart_port *p
594 unsigned char status; 591 unsigned char status;
595 592
596 channel = ZILOG_CHANNEL_FROM_PORT(port); 593 channel = ZILOG_CHANNEL_FROM_PORT(port);
597 status = sbus_readb(&channel->control); 594 status = readb(&channel->control);
598 ZSDELAY(); 595 ZSDELAY();
599 596
600 return status; 597 return status;
@@ -682,7 +679,7 @@ static void sunzilog_start_tx(struct uart_port *port)
682 up->flags |= SUNZILOG_FLAG_TX_ACTIVE; 679 up->flags |= SUNZILOG_FLAG_TX_ACTIVE;
683 up->flags &= ~SUNZILOG_FLAG_TX_STOPPED; 680 up->flags &= ~SUNZILOG_FLAG_TX_STOPPED;
684 681
685 status = sbus_readb(&channel->control); 682 status = readb(&channel->control);
686 ZSDELAY(); 683 ZSDELAY();
687 684
688 /* TX busy? Just wait for the TX done interrupt. */ 685 /* TX busy? Just wait for the TX done interrupt. */
@@ -693,7 +690,7 @@ static void sunzilog_start_tx(struct uart_port *port)
693 * IRQ sending engine. 690 * IRQ sending engine.
694 */ 691 */
695 if (port->x_char) { 692 if (port->x_char) {
696 sbus_writeb(port->x_char, &channel->data); 693 writeb(port->x_char, &channel->data);
697 ZSDELAY(); 694 ZSDELAY();
698 ZS_WSYNC(channel); 695 ZS_WSYNC(channel);
699 696
@@ -702,7 +699,7 @@ static void sunzilog_start_tx(struct uart_port *port)
702 } else { 699 } else {
703 struct circ_buf *xmit = &port->info->xmit; 700 struct circ_buf *xmit = &port->info->xmit;
704 701
705 sbus_writeb(xmit->buf[xmit->tail], &channel->data); 702 writeb(xmit->buf[xmit->tail], &channel->data);
706 ZSDELAY(); 703 ZSDELAY();
707 ZS_WSYNC(channel); 704 ZS_WSYNC(channel);
708 705
@@ -779,7 +776,7 @@ static void __sunzilog_startup(struct uart_sunzilog_port *up)
779 struct zilog_channel __iomem *channel; 776 struct zilog_channel __iomem *channel;
780 777
781 channel = ZILOG_CHANNEL_FROM_PORT(&up->port); 778 channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
782 up->prev_status = sbus_readb(&channel->control); 779 up->prev_status = readb(&channel->control);
783 780
784 /* Enable receiver and transmitter. */ 781 /* Enable receiver and transmitter. */
785 up->curregs[R3] |= RxENAB; 782 up->curregs[R3] |= RxENAB;
@@ -963,7 +960,7 @@ sunzilog_set_termios(struct uart_port *port, struct termios *termios,
963 960
964static const char *sunzilog_type(struct uart_port *port) 961static const char *sunzilog_type(struct uart_port *port)
965{ 962{
966 return "SunZilog"; 963 return "zs";
967} 964}
968 965
969/* We do not request/release mappings of the registers here, this 966/* We do not request/release mappings of the registers here, this
@@ -1012,7 +1009,6 @@ static struct uart_sunzilog_port *sunzilog_port_table;
1012static struct zilog_layout __iomem **sunzilog_chip_regs; 1009static struct zilog_layout __iomem **sunzilog_chip_regs;
1013 1010
1014static struct uart_sunzilog_port *sunzilog_irq_chain; 1011static struct uart_sunzilog_port *sunzilog_irq_chain;
1015static int zilog_irq = -1;
1016 1012
1017static struct uart_driver sunzilog_reg = { 1013static struct uart_driver sunzilog_reg = {
1018 .owner = THIS_MODULE, 1014 .owner = THIS_MODULE,
@@ -1021,232 +1017,47 @@ static struct uart_driver sunzilog_reg = {
1021 .major = TTY_MAJOR, 1017 .major = TTY_MAJOR,
1022}; 1018};
1023 1019
1024static void * __init alloc_one_table(unsigned long size) 1020static int __init sunzilog_alloc_tables(void)
1025{
1026 void *ret;
1027
1028 ret = kmalloc(size, GFP_KERNEL);
1029 if (ret != NULL)
1030 memset(ret, 0, size);
1031
1032 return ret;
1033}
1034
1035static void __init sunzilog_alloc_tables(void)
1036{
1037 sunzilog_port_table =
1038 alloc_one_table(NUM_CHANNELS * sizeof(struct uart_sunzilog_port));
1039 sunzilog_chip_regs =
1040 alloc_one_table(NUM_SUNZILOG * sizeof(struct zilog_layout __iomem *));
1041
1042 if (sunzilog_port_table == NULL || sunzilog_chip_regs == NULL) {
1043 prom_printf("SunZilog: Cannot allocate tables.\n");
1044 prom_halt();
1045 }
1046}
1047
1048#ifdef CONFIG_SPARC64
1049
1050/* We used to attempt to use the address property of the Zilog device node
1051 * but that totally is not necessary on sparc64.
1052 */
1053static struct zilog_layout __iomem * __init get_zs_sun4u(int chip, int zsnode)
1054{ 1021{
1055 void __iomem *mapped_addr; 1022 struct uart_sunzilog_port *up;
1056 unsigned int sun4u_ino; 1023 unsigned long size;
1057 struct sbus_bus *sbus = NULL; 1024 int i;
1058 struct sbus_dev *sdev = NULL;
1059 int err;
1060
1061 if (central_bus == NULL) {
1062 for_each_sbus(sbus) {
1063 for_each_sbusdev(sdev, sbus) {
1064 if (sdev->prom_node == zsnode)
1065 goto found;
1066 }
1067 }
1068 }
1069 found:
1070 if (sdev == NULL && central_bus == NULL) {
1071 prom_printf("SunZilog: sdev&&central == NULL for "
1072 "Zilog %d in get_zs_sun4u.\n", chip);
1073 prom_halt();
1074 }
1075 if (central_bus == NULL) {
1076 mapped_addr =
1077 sbus_ioremap(&sdev->resource[0], 0,
1078 PAGE_SIZE,
1079 "Zilog Registers");
1080 } else {
1081 struct linux_prom_registers zsregs[1];
1082
1083 err = prom_getproperty(zsnode, "reg",
1084 (char *) &zsregs[0],
1085 sizeof(zsregs));
1086 if (err == -1) {
1087 prom_printf("SunZilog: Cannot map "
1088 "Zilog %d regs on "
1089 "central bus.\n", chip);
1090 prom_halt();
1091 }
1092 apply_fhc_ranges(central_bus->child,
1093 &zsregs[0], 1);
1094 apply_central_ranges(central_bus, &zsregs[0], 1);
1095 mapped_addr = (void __iomem *)
1096 ((((u64)zsregs[0].which_io)<<32UL) |
1097 ((u64)zsregs[0].phys_addr));
1098 }
1099
1100 if (zilog_irq == -1) {
1101 if (central_bus) {
1102 unsigned long iclr, imap;
1103
1104 iclr = central_bus->child->fhc_regs.uregs
1105 + FHC_UREGS_ICLR;
1106 imap = central_bus->child->fhc_regs.uregs
1107 + FHC_UREGS_IMAP;
1108 zilog_irq = build_irq(0, iclr, imap);
1109 } else {
1110 err = prom_getproperty(zsnode, "interrupts",
1111 (char *) &sun4u_ino,
1112 sizeof(sun4u_ino));
1113 zilog_irq = sbus_build_irq(sbus_root, sun4u_ino);
1114 }
1115 }
1116
1117 return (struct zilog_layout __iomem *) mapped_addr;
1118}
1119#else /* CONFIG_SPARC64 */
1120
1121/*
1122 * XXX The sun4d case is utterly screwed: it tries to re-walk the tree
1123 * (for the 3rd time) in order to find bootbus and cpu. Streamline it.
1124 */
1125static struct zilog_layout __iomem * __init get_zs_sun4cmd(int chip, int node)
1126{
1127 struct linux_prom_irqs irq_info[2];
1128 void __iomem *mapped_addr = NULL;
1129 int zsnode, cpunode, bbnode;
1130 struct linux_prom_registers zsreg[4];
1131 struct resource res;
1132
1133 if (sparc_cpu_model == sun4d) {
1134 int walk;
1135
1136 zsnode = 0;
1137 bbnode = 0;
1138 cpunode = 0;
1139 for (walk = prom_getchild(prom_root_node);
1140 (walk = prom_searchsiblings(walk, "cpu-unit")) != 0;
1141 walk = prom_getsibling(walk)) {
1142 bbnode = prom_getchild(walk);
1143 if (bbnode &&
1144 (bbnode = prom_searchsiblings(bbnode, "bootbus"))) {
1145 if ((zsnode = prom_getchild(bbnode)) == node) {
1146 cpunode = walk;
1147 break;
1148 }
1149 }
1150 }
1151 if (!walk) {
1152 prom_printf("SunZilog: Cannot find the %d'th bootbus on sun4d.\n",
1153 (chip / 2));
1154 prom_halt();
1155 }
1156 1025
1157 if (prom_getproperty(zsnode, "reg", 1026 size = NUM_CHANNELS * sizeof(struct uart_sunzilog_port);
1158 (char *) zsreg, sizeof(zsreg)) == -1) { 1027 sunzilog_port_table = kzalloc(size, GFP_KERNEL);
1159 prom_printf("SunZilog: Cannot map Zilog %d\n", chip); 1028 if (!sunzilog_port_table)
1160 prom_halt(); 1029 return -ENOMEM;
1161 }
1162 /* XXX Looks like an off by one? */
1163 prom_apply_generic_ranges(bbnode, cpunode, zsreg, 1);
1164 res.start = zsreg[0].phys_addr;
1165 res.end = res.start + (8 - 1);
1166 res.flags = zsreg[0].which_io | IORESOURCE_IO;
1167 mapped_addr = sbus_ioremap(&res, 0, 8, "Zilog Serial");
1168 1030
1169 } else { 1031 for (i = 0; i < NUM_CHANNELS; i++) {
1170 zsnode = node; 1032 up = &sunzilog_port_table[i];
1171 1033
1172#if 0 /* XXX When was this used? */ 1034 spin_lock_init(&up->port.lock);
1173 if (prom_getintdefault(zsnode, "slave", -1) != chipid) {
1174 zsnode = prom_getsibling(zsnode);
1175 continue;
1176 }
1177#endif
1178 1035
1179 /* 1036 if (i == 0)
1180 * "address" is only present on ports that OBP opened 1037 sunzilog_irq_chain = up;
1181 * (from Mitch Bradley's "Hitchhiker's Guide to OBP").
1182 * We do not use it.
1183 */
1184 1038
1185 if (prom_getproperty(zsnode, "reg", 1039 if (i < NUM_CHANNELS - 1)
1186 (char *) zsreg, sizeof(zsreg)) == -1) { 1040 up->next = up + 1;
1187 prom_printf("SunZilog: Cannot map Zilog %d\n", chip); 1041 else
1188 prom_halt(); 1042 up->next = NULL;
1189 }
1190 if (sparc_cpu_model == sun4m) /* Crude. Pass parent. XXX */
1191 prom_apply_obio_ranges(zsreg, 1);
1192 res.start = zsreg[0].phys_addr;
1193 res.end = res.start + (8 - 1);
1194 res.flags = zsreg[0].which_io | IORESOURCE_IO;
1195 mapped_addr = sbus_ioremap(&res, 0, 8, "Zilog Serial");
1196 } 1043 }
1197 1044
1198 if (prom_getproperty(zsnode, "intr", 1045 size = NUM_SUNZILOG * sizeof(struct zilog_layout __iomem *);
1199 (char *) irq_info, sizeof(irq_info)) 1046 sunzilog_chip_regs = kzalloc(size, GFP_KERNEL);
1200 % sizeof(struct linux_prom_irqs)) { 1047 if (!sunzilog_chip_regs) {
1201 prom_printf("SunZilog: Cannot get IRQ property for Zilog %d.\n", 1048 kfree(sunzilog_port_table);
1202 chip); 1049 sunzilog_irq_chain = NULL;
1203 prom_halt(); 1050 return -ENOMEM;
1204 }
1205 if (zilog_irq == -1) {
1206 zilog_irq = irq_info[0].pri;
1207 } else if (zilog_irq != irq_info[0].pri) {
1208 /* XXX. Dumb. Should handle per-chip IRQ, for add-ons. */
1209 prom_printf("SunZilog: Inconsistent IRQ layout for Zilog %d.\n",
1210 chip);
1211 prom_halt();
1212 } 1051 }
1213 1052
1214 return (struct zilog_layout __iomem *) mapped_addr; 1053 return 0;
1215} 1054}
1216#endif /* !(CONFIG_SPARC64) */
1217 1055
1218/* Get the address of the registers for SunZilog instance CHIP. */ 1056static void sunzilog_free_tables(void)
1219static struct zilog_layout __iomem * __init get_zs(int chip, int node)
1220{ 1057{
1221 if (chip < 0 || chip >= NUM_SUNZILOG) { 1058 kfree(sunzilog_port_table);
1222 prom_printf("SunZilog: Illegal chip number %d in get_zs.\n", chip); 1059 sunzilog_irq_chain = NULL;
1223 prom_halt(); 1060 kfree(sunzilog_chip_regs);
1224 }
1225
1226#ifdef CONFIG_SPARC64
1227 return get_zs_sun4u(chip, node);
1228#else
1229
1230 if (sparc_cpu_model == sun4) {
1231 struct resource res;
1232
1233 /* Not probe-able, hard code it. */
1234 switch (chip) {
1235 case 0:
1236 res.start = 0xf1000000;
1237 break;
1238 case 1:
1239 res.start = 0xf0000000;
1240 break;
1241 };
1242 zilog_irq = 12;
1243 res.end = (res.start + (8 - 1));
1244 res.flags = IORESOURCE_IO;
1245 return sbus_ioremap(&res, 0, 8, "SunZilog");
1246 }
1247
1248 return get_zs_sun4cmd(chip, node);
1249#endif
1250} 1061}
1251 1062
1252#define ZS_PUT_CHAR_MAX_DELAY 2000 /* 10 ms */ 1063#define ZS_PUT_CHAR_MAX_DELAY 2000 /* 10 ms */
@@ -1260,7 +1071,7 @@ static void sunzilog_putchar(struct uart_port *port, int ch)
1260 * udelay with ZSDELAY as that is a NOP on some platforms. -DaveM 1071 * udelay with ZSDELAY as that is a NOP on some platforms. -DaveM
1261 */ 1072 */
1262 do { 1073 do {
1263 unsigned char val = sbus_readb(&channel->control); 1074 unsigned char val = readb(&channel->control);
1264 if (val & Tx_BUF_EMP) { 1075 if (val & Tx_BUF_EMP) {
1265 ZSDELAY(); 1076 ZSDELAY();
1266 break; 1077 break;
@@ -1268,7 +1079,7 @@ static void sunzilog_putchar(struct uart_port *port, int ch)
1268 udelay(5); 1079 udelay(5);
1269 } while (--loops); 1080 } while (--loops);
1270 1081
1271 sbus_writeb(ch, &channel->data); 1082 writeb(ch, &channel->data);
1272 ZSDELAY(); 1083 ZSDELAY();
1273 ZS_WSYNC(channel); 1084 ZS_WSYNC(channel);
1274} 1085}
@@ -1385,28 +1196,6 @@ static struct console sunzilog_console = {
1385 .data = &sunzilog_reg, 1196 .data = &sunzilog_reg,
1386}; 1197};
1387 1198
1388static int __init sunzilog_console_init(void)
1389{
1390 int i;
1391
1392 if (con_is_present())
1393 return 0;
1394
1395 for (i = 0; i < NUM_CHANNELS; i++) {
1396 int this_minor = sunzilog_reg.minor + i;
1397
1398 if ((this_minor - 64) == (serial_console - 1))
1399 break;
1400 }
1401 if (i == NUM_CHANNELS)
1402 return 0;
1403
1404 sunzilog_console.index = i;
1405 sunzilog_port_table[i].flags |= SUNZILOG_FLAG_IS_CONS;
1406 register_console(&sunzilog_console);
1407 return 0;
1408}
1409
1410static inline struct console *SUNZILOG_CONSOLE(void) 1199static inline struct console *SUNZILOG_CONSOLE(void)
1411{ 1200{
1412 int i; 1201 int i;
@@ -1431,101 +1220,8 @@ static inline struct console *SUNZILOG_CONSOLE(void)
1431 1220
1432#else 1221#else
1433#define SUNZILOG_CONSOLE() (NULL) 1222#define SUNZILOG_CONSOLE() (NULL)
1434#define sunzilog_console_init() do { } while (0)
1435#endif 1223#endif
1436 1224
1437/*
1438 * We scan the PROM tree recursively. This is the most reliable way
1439 * to find Zilog nodes on various platforms. However, we face an extreme
1440 * shortage of kernel stack, so we must be very careful. To that end,
1441 * we scan only to a certain depth, and we use a common property buffer
1442 * in the scan structure.
1443 */
1444#define ZS_PROPSIZE 128
1445#define ZS_SCAN_DEPTH 5
1446
1447struct zs_probe_scan {
1448 int depth;
1449 void (*scanner)(struct zs_probe_scan *t, int node);
1450
1451 int devices;
1452 char prop[ZS_PROPSIZE];
1453};
1454
1455static int __inline__ sunzilog_node_ok(int node, const char *name, int len)
1456{
1457 if (strncmp(name, "zs", len) == 0)
1458 return 1;
1459 /* Don't fold this procedure just yet. Compare to su_node_ok(). */
1460 return 0;
1461}
1462
1463static void __init sunzilog_scan(struct zs_probe_scan *t, int node)
1464{
1465 int len;
1466
1467 for (; node != 0; node = prom_getsibling(node)) {
1468 len = prom_getproperty(node, "name", t->prop, ZS_PROPSIZE);
1469 if (len <= 1)
1470 continue; /* Broken PROM node */
1471 if (sunzilog_node_ok(node, t->prop, len)) {
1472 (*t->scanner)(t, node);
1473 } else {
1474 if (t->depth < ZS_SCAN_DEPTH) {
1475 t->depth++;
1476 sunzilog_scan(t, prom_getchild(node));
1477 --t->depth;
1478 }
1479 }
1480 }
1481}
1482
1483static void __init sunzilog_prepare(void)
1484{
1485 struct uart_sunzilog_port *up;
1486 struct zilog_layout __iomem *rp;
1487 int channel, chip;
1488
1489 /*
1490 * Temporary fix.
1491 */
1492 for (channel = 0; channel < NUM_CHANNELS; channel++)
1493 spin_lock_init(&sunzilog_port_table[channel].port.lock);
1494
1495 sunzilog_irq_chain = up = &sunzilog_port_table[0];
1496 for (channel = 0; channel < NUM_CHANNELS - 1; channel++)
1497 up[channel].next = &up[channel + 1];
1498 up[channel].next = NULL;
1499
1500 for (chip = 0; chip < NUM_SUNZILOG; chip++) {
1501 rp = sunzilog_chip_regs[chip];
1502 up[(chip * 2) + 0].port.membase = (void __iomem *)&rp->channelA;
1503 up[(chip * 2) + 1].port.membase = (void __iomem *)&rp->channelB;
1504
1505 /* Channel A */
1506 up[(chip * 2) + 0].port.iotype = UPIO_MEM;
1507 up[(chip * 2) + 0].port.irq = zilog_irq;
1508 up[(chip * 2) + 0].port.uartclk = ZS_CLOCK;
1509 up[(chip * 2) + 0].port.fifosize = 1;
1510 up[(chip * 2) + 0].port.ops = &sunzilog_pops;
1511 up[(chip * 2) + 0].port.type = PORT_SUNZILOG;
1512 up[(chip * 2) + 0].port.flags = 0;
1513 up[(chip * 2) + 0].port.line = (chip * 2) + 0;
1514 up[(chip * 2) + 0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A;
1515
1516 /* Channel B */
1517 up[(chip * 2) + 1].port.iotype = UPIO_MEM;
1518 up[(chip * 2) + 1].port.irq = zilog_irq;
1519 up[(chip * 2) + 1].port.uartclk = ZS_CLOCK;
1520 up[(chip * 2) + 1].port.fifosize = 1;
1521 up[(chip * 2) + 1].port.ops = &sunzilog_pops;
1522 up[(chip * 2) + 1].port.type = PORT_SUNZILOG;
1523 up[(chip * 2) + 1].port.flags = 0;
1524 up[(chip * 2) + 1].port.line = (chip * 2) + 1;
1525 up[(chip * 2) + 1].flags |= 0;
1526 }
1527}
1528
1529static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel) 1225static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel)
1530{ 1226{
1531 int baud, brg; 1227 int baud, brg;
@@ -1539,8 +1235,6 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
1539 up->cflag = B4800 | CS8 | CLOCAL | CREAD; 1235 up->cflag = B4800 | CS8 | CLOCAL | CREAD;
1540 baud = 4800; 1236 baud = 4800;
1541 } 1237 }
1542 printk(KERN_INFO "zs%d at 0x%p (irq = %d) is a SunZilog\n",
1543 channel, up->port.membase, zilog_irq);
1544 1238
1545 up->curregs[R15] = BRKIE; 1239 up->curregs[R15] = BRKIE;
1546 brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); 1240 brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
@@ -1552,216 +1246,268 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
1552#ifdef CONFIG_SERIO 1246#ifdef CONFIG_SERIO
1553static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int channel) 1247static void __init sunzilog_register_serio(struct uart_sunzilog_port *up, int channel)
1554{ 1248{
1555 struct serio *serio; 1249 struct serio *serio = &up->serio;
1556
1557 up->serio = serio = kmalloc(sizeof(struct serio), GFP_KERNEL);
1558 if (serio) {
1559 memset(serio, 0, sizeof(*serio));
1560
1561 serio->port_data = up;
1562
1563 serio->id.type = SERIO_RS232;
1564 if (channel == KEYBOARD_LINE) {
1565 serio->id.proto = SERIO_SUNKBD;
1566 strlcpy(serio->name, "zskbd", sizeof(serio->name));
1567 } else {
1568 serio->id.proto = SERIO_SUN;
1569 serio->id.extra = 1;
1570 strlcpy(serio->name, "zsms", sizeof(serio->name));
1571 }
1572 strlcpy(serio->phys,
1573 (channel == KEYBOARD_LINE ? "zs/serio0" : "zs/serio1"),
1574 sizeof(serio->phys));
1575 1250
1576 serio->write = sunzilog_serio_write; 1251 serio->port_data = up;
1577 serio->open = sunzilog_serio_open;
1578 serio->close = sunzilog_serio_close;
1579 1252
1580 serio_register_port(serio); 1253 serio->id.type = SERIO_RS232;
1254 if (channel == KEYBOARD_LINE) {
1255 serio->id.proto = SERIO_SUNKBD;
1256 strlcpy(serio->name, "zskbd", sizeof(serio->name));
1581 } else { 1257 } else {
1582 printk(KERN_WARNING "zs%d: not enough memory for serio port\n", 1258 serio->id.proto = SERIO_SUN;
1583 channel); 1259 serio->id.extra = 1;
1260 strlcpy(serio->name, "zsms", sizeof(serio->name));
1584 } 1261 }
1262 strlcpy(serio->phys,
1263 (channel == KEYBOARD_LINE ? "zs/serio0" : "zs/serio1"),
1264 sizeof(serio->phys));
1265
1266 serio->write = sunzilog_serio_write;
1267 serio->open = sunzilog_serio_open;
1268 serio->close = sunzilog_serio_close;
1269 serio->dev.parent = up->port.dev;
1270
1271 serio_register_port(serio);
1585} 1272}
1586#endif 1273#endif
1587 1274
1588static void __init sunzilog_init_hw(void) 1275static void __init sunzilog_init_hw(struct uart_sunzilog_port *up)
1589{ 1276{
1590 int i; 1277 struct zilog_channel __iomem *channel;
1591 1278 unsigned long flags;
1592 for (i = 0; i < NUM_CHANNELS; i++) { 1279 int baud, brg;
1593 struct uart_sunzilog_port *up = &sunzilog_port_table[i];
1594 struct zilog_channel __iomem *channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
1595 unsigned long flags;
1596 int baud, brg;
1597 1280
1598 spin_lock_irqsave(&up->port.lock, flags); 1281 channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
1599 1282
1600 if (ZS_IS_CHANNEL_A(up)) { 1283 spin_lock_irqsave(&up->port.lock, flags);
1601 write_zsreg(channel, R9, FHWRES); 1284 if (ZS_IS_CHANNEL_A(up)) {
1602 ZSDELAY_LONG(); 1285 write_zsreg(channel, R9, FHWRES);
1603 (void) read_zsreg(channel, R0); 1286 ZSDELAY_LONG();
1604 } 1287 (void) read_zsreg(channel, R0);
1288 }
1605 1289
1606 if (i == KEYBOARD_LINE || i == MOUSE_LINE) { 1290 if (up->port.line == KEYBOARD_LINE ||
1607 sunzilog_init_kbdms(up, i); 1291 up->port.line == MOUSE_LINE) {
1608 up->curregs[R9] |= (NV | MIE); 1292 sunzilog_init_kbdms(up, up->port.line);
1609 write_zsreg(channel, R9, up->curregs[R9]); 1293 up->curregs[R9] |= (NV | MIE);
1610 } else { 1294 write_zsreg(channel, R9, up->curregs[R9]);
1611 /* Normal serial TTY. */ 1295 } else {
1612 up->parity_mask = 0xff; 1296 /* Normal serial TTY. */
1613 up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB; 1297 up->parity_mask = 0xff;
1614 up->curregs[R4] = PAR_EVEN | X16CLK | SB1; 1298 up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB;
1615 up->curregs[R3] = RxENAB | Rx8; 1299 up->curregs[R4] = PAR_EVEN | X16CLK | SB1;
1616 up->curregs[R5] = TxENAB | Tx8; 1300 up->curregs[R3] = RxENAB | Rx8;
1617 up->curregs[R9] = NV | MIE; 1301 up->curregs[R5] = TxENAB | Tx8;
1618 up->curregs[R10] = NRZ; 1302 up->curregs[R9] = NV | MIE;
1619 up->curregs[R11] = TCBR | RCBR; 1303 up->curregs[R10] = NRZ;
1620 baud = 9600; 1304 up->curregs[R11] = TCBR | RCBR;
1621 brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); 1305 baud = 9600;
1622 up->curregs[R12] = (brg & 0xff); 1306 brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
1623 up->curregs[R13] = (brg >> 8) & 0xff; 1307 up->curregs[R12] = (brg & 0xff);
1624 up->curregs[R14] = BRSRC | BRENAB; 1308 up->curregs[R13] = (brg >> 8) & 0xff;
1625 __load_zsregs(channel, up->curregs); 1309 up->curregs[R14] = BRSRC | BRENAB;
1626 write_zsreg(channel, R9, up->curregs[R9]); 1310 __load_zsregs(channel, up->curregs);
1627 } 1311 write_zsreg(channel, R9, up->curregs[R9]);
1312 }
1628 1313
1629 spin_unlock_irqrestore(&up->port.lock, flags); 1314 spin_unlock_irqrestore(&up->port.lock, flags);
1630 1315
1631#ifdef CONFIG_SERIO 1316#ifdef CONFIG_SERIO
1632 if (i == KEYBOARD_LINE || i == MOUSE_LINE) 1317 if (up->port.line == KEYBOARD_LINE || up->port.line == MOUSE_LINE)
1633 sunzilog_register_serio(up, i); 1318 sunzilog_register_serio(up, up->port.line);
1634#endif 1319#endif
1635 }
1636}
1637
1638static struct zilog_layout __iomem * __init get_zs(int chip, int node);
1639
1640static void __init sunzilog_scan_probe(struct zs_probe_scan *t, int node)
1641{
1642 sunzilog_chip_regs[t->devices] = get_zs(t->devices, node);
1643 t->devices++;
1644} 1320}
1645 1321
1646static int __init sunzilog_ports_init(void) 1322static int __devinit zs_get_instance(struct device_node *dp)
1647{ 1323{
1648 struct zs_probe_scan scan;
1649 int ret; 1324 int ret;
1650 int uart_count;
1651 int i;
1652
1653 printk(KERN_DEBUG "SunZilog: %d chips.\n", NUM_SUNZILOG);
1654
1655 scan.scanner = sunzilog_scan_probe;
1656 scan.depth = 0;
1657 scan.devices = 0;
1658 sunzilog_scan(&scan, prom_getchild(prom_root_node));
1659
1660 sunzilog_prepare();
1661 1325
1662 if (request_irq(zilog_irq, sunzilog_interrupt, SA_SHIRQ, 1326 ret = of_getintprop_default(dp, "slave", -1);
1663 "SunZilog", sunzilog_irq_chain)) { 1327 if (ret != -1)
1664 prom_printf("SunZilog: Unable to register zs interrupt handler.\n"); 1328 return ret;
1665 prom_halt();
1666 }
1667 1329
1668 sunzilog_init_hw(); 1330 if (of_find_property(dp, "keyboard", NULL))
1331 ret = 1;
1332 else
1333 ret = 0;
1669 1334
1670 /* We can only init this once we have probed the Zilogs 1335 return ret;
1671 * in the system. Do not count channels assigned to keyboards 1336}
1672 * or mice when we are deciding how many ports to register.
1673 */
1674 uart_count = 0;
1675 for (i = 0; i < NUM_CHANNELS; i++) {
1676 struct uart_sunzilog_port *up = &sunzilog_port_table[i];
1677 1337
1678 if (ZS_IS_KEYB(up) || ZS_IS_MOUSE(up)) 1338static int zilog_irq = -1;
1679 continue;
1680 1339
1681 uart_count++; 1340static int __devinit zs_probe(struct of_device *dev, const struct of_device_id *match)
1682 } 1341{
1683 1342 struct of_device *op = to_of_device(&dev->dev);
1684 sunzilog_reg.nr = uart_count; 1343 struct uart_sunzilog_port *up;
1685 sunzilog_reg.minor = sunserial_current_minor; 1344 struct zilog_layout __iomem *rp;
1345 int inst = zs_get_instance(dev->node);
1346 int err;
1686 1347
1687 ret = uart_register_driver(&sunzilog_reg); 1348 sunzilog_chip_regs[inst] = of_ioremap(&op->resource[0], 0,
1688 if (ret == 0) { 1349 sizeof(struct zilog_layout),
1689 sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64; 1350 "zs");
1690 sunzilog_reg.cons = SUNZILOG_CONSOLE(); 1351 if (!sunzilog_chip_regs[inst])
1352 return -ENOMEM;
1691 1353
1692 sunserial_current_minor += uart_count; 1354 rp = sunzilog_chip_regs[inst];
1693 1355
1694 for (i = 0; i < NUM_CHANNELS; i++) { 1356 if (zilog_irq == -1) {
1695 struct uart_sunzilog_port *up = &sunzilog_port_table[i]; 1357 zilog_irq = op->irqs[0];
1358 err = request_irq(zilog_irq, sunzilog_interrupt, SA_SHIRQ,
1359 "zs", sunzilog_irq_chain);
1360 if (err) {
1361 of_iounmap(rp, sizeof(struct zilog_layout));
1696 1362
1697 if (ZS_IS_KEYB(up) || ZS_IS_MOUSE(up)) 1363 return err;
1698 continue; 1364 }
1365 }
1699 1366
1700 if (uart_add_one_port(&sunzilog_reg, &up->port)) { 1367 up = &sunzilog_port_table[inst * 2];
1701 printk(KERN_ERR 1368
1702 "SunZilog: failed to add port zs%d\n", i); 1369 /* Channel A */
1703 } 1370 up[0].port.mapbase = op->resource[0].start + 0x00;
1371 up[0].port.membase = (void __iomem *) &rp->channelA;
1372 up[0].port.iotype = UPIO_MEM;
1373 up[0].port.irq = op->irqs[0];
1374 up[0].port.uartclk = ZS_CLOCK;
1375 up[0].port.fifosize = 1;
1376 up[0].port.ops = &sunzilog_pops;
1377 up[0].port.type = PORT_SUNZILOG;
1378 up[0].port.flags = 0;
1379 up[0].port.line = (inst * 2) + 0;
1380 up[0].port.dev = &op->dev;
1381 up[0].flags |= SUNZILOG_FLAG_IS_CHANNEL_A;
1382 if (inst == 1)
1383 up[0].flags |= SUNZILOG_FLAG_CONS_KEYB;
1384 sunzilog_init_hw(&up[0]);
1385
1386 /* Channel B */
1387 up[1].port.mapbase = op->resource[0].start + 0x04;
1388 up[1].port.membase = (void __iomem *) &rp->channelB;
1389 up[1].port.iotype = UPIO_MEM;
1390 up[1].port.irq = op->irqs[0];
1391 up[1].port.uartclk = ZS_CLOCK;
1392 up[1].port.fifosize = 1;
1393 up[1].port.ops = &sunzilog_pops;
1394 up[1].port.type = PORT_SUNZILOG;
1395 up[1].port.flags = 0;
1396 up[1].port.line = (inst * 2) + 1;
1397 up[1].port.dev = &op->dev;
1398 up[1].flags |= 0;
1399 if (inst == 1)
1400 up[1].flags |= SUNZILOG_FLAG_CONS_MOUSE;
1401 sunzilog_init_hw(&up[1]);
1402
1403 if (inst != 1) {
1404 err = uart_add_one_port(&sunzilog_reg, &up[0].port);
1405 if (err) {
1406 of_iounmap(rp, sizeof(struct zilog_layout));
1407 return err;
1408 }
1409 err = uart_add_one_port(&sunzilog_reg, &up[1].port);
1410 if (err) {
1411 uart_remove_one_port(&sunzilog_reg, &up[0].port);
1412 of_iounmap(rp, sizeof(struct zilog_layout));
1413 return err;
1704 } 1414 }
1705 } 1415 }
1706 1416
1707 return ret; 1417 dev_set_drvdata(&dev->dev, &up[0]);
1418
1419 return 0;
1708} 1420}
1709 1421
1710static void __init sunzilog_scan_count(struct zs_probe_scan *t, int node) 1422static void __devexit zs_remove_one(struct uart_sunzilog_port *up)
1711{ 1423{
1712 t->devices++; 1424 if (ZS_IS_KEYB(up) || ZS_IS_MOUSE(up)) {
1425#ifdef CONFIG_SERIO
1426 serio_unregister_port(&up->serio);
1427#endif
1428 } else
1429 uart_remove_one_port(&sunzilog_reg, &up->port);
1713} 1430}
1714 1431
1715static int __init sunzilog_ports_count(void) 1432static int __devexit zs_remove(struct of_device *dev)
1716{ 1433{
1717 struct zs_probe_scan scan; 1434 struct uart_sunzilog_port *up = dev_get_drvdata(&dev->dev);
1435 struct zilog_layout __iomem *regs;
1718 1436
1719 /* Sun4 Zilog setup is hard coded, no probing to do. */ 1437 zs_remove_one(&up[0]);
1720 if (sparc_cpu_model == sun4) 1438 zs_remove_one(&up[1]);
1721 return 2;
1722 1439
1723 scan.scanner = sunzilog_scan_count; 1440 regs = sunzilog_chip_regs[up[0].port.line / 2];
1724 scan.depth = 0; 1441 of_iounmap(regs, sizeof(struct zilog_layout));
1725 scan.devices = 0;
1726 1442
1727 sunzilog_scan(&scan, prom_getchild(prom_root_node)); 1443 dev_set_drvdata(&dev->dev, NULL);
1728 1444
1729 return scan.devices; 1445 return 0;
1730} 1446}
1731 1447
1448static struct of_device_id zs_match[] = {
1449 {
1450 .name = "zs",
1451 },
1452 {},
1453};
1454MODULE_DEVICE_TABLE(of, zs_match);
1455
1456static struct of_platform_driver zs_driver = {
1457 .name = "zs",
1458 .match_table = zs_match,
1459 .probe = zs_probe,
1460 .remove = __devexit_p(zs_remove),
1461};
1462
1732static int __init sunzilog_init(void) 1463static int __init sunzilog_init(void)
1733{ 1464{
1465 struct device_node *dp;
1466 int err;
1734 1467
1735 NUM_SUNZILOG = sunzilog_ports_count(); 1468 NUM_SUNZILOG = 0;
1736 if (NUM_SUNZILOG == 0) 1469 for_each_node_by_name(dp, "zs")
1737 return -ENODEV; 1470 NUM_SUNZILOG++;
1738 1471
1739 sunzilog_alloc_tables(); 1472 if (NUM_SUNZILOG) {
1473 int uart_count;
1740 1474
1741 sunzilog_ports_init(); 1475 err = sunzilog_alloc_tables();
1476 if (err)
1477 return err;
1742 1478
1743 return 0; 1479 /* Subtract 1 for keyboard, 1 for mouse. */
1480 uart_count = (NUM_SUNZILOG * 2) - 2;
1481
1482 sunzilog_reg.nr = uart_count;
1483 sunzilog_reg.minor = sunserial_current_minor;
1484 err = uart_register_driver(&sunzilog_reg);
1485 if (err) {
1486 sunzilog_free_tables();
1487 return err;
1488 }
1489 sunzilog_reg.tty_driver->name_base = sunzilog_reg.minor - 64;
1490 sunzilog_reg.cons = SUNZILOG_CONSOLE();
1491
1492 sunserial_current_minor += uart_count;
1493 }
1494
1495 return of_register_driver(&zs_driver, &of_bus_type);
1744} 1496}
1745 1497
1746static void __exit sunzilog_exit(void) 1498static void __exit sunzilog_exit(void)
1747{ 1499{
1748 int i; 1500 of_unregister_driver(&zs_driver);
1749 1501
1750 for (i = 0; i < NUM_CHANNELS; i++) { 1502 if (zilog_irq != -1) {
1751 struct uart_sunzilog_port *up = &sunzilog_port_table[i]; 1503 free_irq(zilog_irq, sunzilog_irq_chain);
1752 1504 zilog_irq = -1;
1753 if (ZS_IS_KEYB(up) || ZS_IS_MOUSE(up)) {
1754#ifdef CONFIG_SERIO
1755 if (up->serio) {
1756 serio_unregister_port(up->serio);
1757 up->serio = NULL;
1758 }
1759#endif
1760 } else
1761 uart_remove_one_port(&sunzilog_reg, &up->port);
1762 } 1505 }
1763 1506
1764 uart_unregister_driver(&sunzilog_reg); 1507 if (NUM_SUNZILOG) {
1508 uart_unregister_driver(&sunzilog_reg);
1509 sunzilog_free_tables();
1510 }
1765} 1511}
1766 1512
1767module_init(sunzilog_init); 1513module_init(sunzilog_init);
@@ -1769,4 +1515,5 @@ module_exit(sunzilog_exit);
1769 1515
1770MODULE_AUTHOR("David S. Miller"); 1516MODULE_AUTHOR("David S. Miller");
1771MODULE_DESCRIPTION("Sun Zilog serial port driver"); 1517MODULE_DESCRIPTION("Sun Zilog serial port driver");
1518MODULE_VERSION("2.0");
1772MODULE_LICENSE("GPL"); 1519MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index bcbeaf7101d1..f7bdd94b3aa8 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -47,6 +47,7 @@
47#include <linux/usbdevice_fs.h> 47#include <linux/usbdevice_fs.h>
48#include <linux/cdev.h> 48#include <linux/cdev.h>
49#include <linux/notifier.h> 49#include <linux/notifier.h>
50#include <linux/security.h>
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51#include <asm/byteorder.h> 52#include <asm/byteorder.h>
52#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
@@ -68,6 +69,7 @@ struct async {
68 void __user *userbuffer; 69 void __user *userbuffer;
69 void __user *userurb; 70 void __user *userurb;
70 struct urb *urb; 71 struct urb *urb;
72 u32 secid;
71}; 73};
72 74
73static int usbfs_snoop = 0; 75static int usbfs_snoop = 0;
@@ -312,7 +314,7 @@ static void async_completed(struct urb *urb, struct pt_regs *regs)
312 sinfo.si_code = SI_ASYNCIO; 314 sinfo.si_code = SI_ASYNCIO;
313 sinfo.si_addr = as->userurb; 315 sinfo.si_addr = as->userurb;
314 kill_proc_info_as_uid(as->signr, &sinfo, as->pid, as->uid, 316 kill_proc_info_as_uid(as->signr, &sinfo, as->pid, as->uid,
315 as->euid); 317 as->euid, as->secid);
316 } 318 }
317 snoop(&urb->dev->dev, "urb complete\n"); 319 snoop(&urb->dev->dev, "urb complete\n");
318 snoop_urb(urb, as->userurb); 320 snoop_urb(urb, as->userurb);
@@ -572,6 +574,7 @@ static int usbdev_open(struct inode *inode, struct file *file)
572 ps->disc_euid = current->euid; 574 ps->disc_euid = current->euid;
573 ps->disccontext = NULL; 575 ps->disccontext = NULL;
574 ps->ifclaimed = 0; 576 ps->ifclaimed = 0;
577 security_task_getsecid(current, &ps->secid);
575 wmb(); 578 wmb();
576 list_add_tail(&ps->list, &dev->filelist); 579 list_add_tail(&ps->list, &dev->filelist);
577 file->private_data = ps; 580 file->private_data = ps;
@@ -1053,6 +1056,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
1053 as->pid = current->pid; 1056 as->pid = current->pid;
1054 as->uid = current->uid; 1057 as->uid = current->uid;
1055 as->euid = current->euid; 1058 as->euid = current->euid;
1059 security_task_getsecid(current, &as->secid);
1056 if (!(uurb->endpoint & USB_DIR_IN)) { 1060 if (!(uurb->endpoint & USB_DIR_IN)) {
1057 if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, as->urb->transfer_buffer_length)) { 1061 if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, as->urb->transfer_buffer_length)) {
1058 free_async(as); 1062 free_async(as);
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index bfc9b28a7242..d0a208de32cf 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -700,7 +700,7 @@ static void usbfs_remove_device(struct usb_device *dev)
700 sinfo.si_errno = EPIPE; 700 sinfo.si_errno = EPIPE;
701 sinfo.si_code = SI_ASYNCIO; 701 sinfo.si_code = SI_ASYNCIO;
702 sinfo.si_addr = ds->disccontext; 702 sinfo.si_addr = ds->disccontext;
703 kill_proc_info_as_uid(ds->discsignr, &sinfo, ds->disc_pid, ds->disc_uid, ds->disc_euid); 703 kill_proc_info_as_uid(ds->discsignr, &sinfo, ds->disc_pid, ds->disc_uid, ds->disc_euid, ds->secid);
704 } 704 }
705 } 705 }
706} 706}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 7a650c763a62..49f69236b420 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -80,6 +80,7 @@ struct dev_state {
80 uid_t disc_uid, disc_euid; 80 uid_t disc_uid, disc_euid;
81 void __user *disccontext; 81 void __user *disccontext;
82 unsigned long ifclaimed; 82 unsigned long ifclaimed;
83 u32 secid;
83}; 84};
84 85
85/* internal notify stuff */ 86/* internal notify stuff */
diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c
index 6577fdfdfc16..c66e3d52cbf3 100644
--- a/drivers/video/bw2.c
+++ b/drivers/video/bw2.c
@@ -1,6 +1,6 @@
1/* bw2.c: BWTWO frame buffer driver 1/* bw2.c: BWTWO frame buffer driver
2 * 2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) 4 * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) 6 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
@@ -19,14 +19,11 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20 20
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/sbus.h>
23#include <asm/oplib.h> 22#include <asm/oplib.h>
23#include <asm/prom.h>
24#include <asm/of_device.h>
24#include <asm/fbio.h> 25#include <asm/fbio.h>
25 26
26#ifdef CONFIG_SPARC32
27#include <asm/sun4paddr.h>
28#endif
29
30#include "sbuslib.h" 27#include "sbuslib.h"
31 28
32/* 29/*
@@ -59,30 +56,30 @@ static struct fb_ops bw2_ops = {
59#define BWTWO_REGISTER_OFFSET 0x400000 56#define BWTWO_REGISTER_OFFSET 0x400000
60 57
61struct bt_regs { 58struct bt_regs {
62 volatile u32 addr; 59 u32 addr;
63 volatile u32 color_map; 60 u32 color_map;
64 volatile u32 control; 61 u32 control;
65 volatile u32 cursor; 62 u32 cursor;
66}; 63};
67 64
68struct bw2_regs { 65struct bw2_regs {
69 struct bt_regs cmap; 66 struct bt_regs cmap;
70 volatile u8 control; 67 u8 control;
71 volatile u8 status; 68 u8 status;
72 volatile u8 cursor_start; 69 u8 cursor_start;
73 volatile u8 cursor_end; 70 u8 cursor_end;
74 volatile u8 h_blank_start; 71 u8 h_blank_start;
75 volatile u8 h_blank_end; 72 u8 h_blank_end;
76 volatile u8 h_sync_start; 73 u8 h_sync_start;
77 volatile u8 h_sync_end; 74 u8 h_sync_end;
78 volatile u8 comp_sync_end; 75 u8 comp_sync_end;
79 volatile u8 v_blank_start_high; 76 u8 v_blank_start_high;
80 volatile u8 v_blank_start_low; 77 u8 v_blank_start_low;
81 volatile u8 v_blank_end; 78 u8 v_blank_end;
82 volatile u8 v_sync_start; 79 u8 v_sync_start;
83 volatile u8 v_sync_end; 80 u8 v_sync_end;
84 volatile u8 xfer_holdoff_start; 81 u8 xfer_holdoff_start;
85 volatile u8 xfer_holdoff_end; 82 u8 xfer_holdoff_end;
86}; 83};
87 84
88/* Status Register Constants */ 85/* Status Register Constants */
@@ -117,9 +114,8 @@ struct bw2_par {
117#define BW2_FLAG_BLANKED 0x00000001 114#define BW2_FLAG_BLANKED 0x00000001
118 115
119 unsigned long physbase; 116 unsigned long physbase;
117 unsigned long which_io;
120 unsigned long fbsize; 118 unsigned long fbsize;
121
122 struct sbus_dev *sdev;
123}; 119};
124 120
125/** 121/**
@@ -174,9 +170,7 @@ static int bw2_mmap(struct fb_info *info, struct vm_area_struct *vma)
174 170
175 return sbusfb_mmap_helper(bw2_mmap_map, 171 return sbusfb_mmap_helper(bw2_mmap_map,
176 par->physbase, par->fbsize, 172 par->physbase, par->fbsize,
177 (par->sdev ? 173 par->which_io,
178 par->sdev->reg_addrs[0].which_io :
179 0),
180 vma); 174 vma);
181} 175}
182 176
@@ -288,139 +282,124 @@ static void bw2_do_default_mode(struct bw2_par *par, struct fb_info *info,
288struct all_info { 282struct all_info {
289 struct fb_info info; 283 struct fb_info info;
290 struct bw2_par par; 284 struct bw2_par par;
291 struct list_head list;
292}; 285};
293static LIST_HEAD(bw2_list);
294 286
295static void bw2_init_one(struct sbus_dev *sdev) 287static int __devinit bw2_init_one(struct of_device *op)
296{ 288{
289 struct device_node *dp = op->node;
297 struct all_info *all; 290 struct all_info *all;
298 struct resource *resp; 291 int linebytes, err;
299#ifdef CONFIG_SUN4
300 struct resource res;
301#endif
302 int linebytes;
303 292
304 all = kmalloc(sizeof(*all), GFP_KERNEL); 293 all = kzalloc(sizeof(*all), GFP_KERNEL);
305 if (!all) { 294 if (!all)
306 printk(KERN_ERR "bw2: Cannot allocate memory.\n"); 295 return -ENOMEM;
307 return;
308 }
309 memset(all, 0, sizeof(*all));
310
311 INIT_LIST_HEAD(&all->list);
312 296
313 spin_lock_init(&all->par.lock); 297 spin_lock_init(&all->par.lock);
314 all->par.sdev = sdev; 298
315 299 all->par.physbase = op->resource[0].start;
316#ifdef CONFIG_SUN4 300 all->par.which_io = op->resource[0].flags & IORESOURCE_BITS;
317 if (!sdev) { 301
318 all->par.physbase = sun4_bwtwo_physaddr; 302 sbusfb_fill_var(&all->info.var, dp->node, 1);
319 res.start = sun4_bwtwo_physaddr; 303 linebytes = of_getintprop_default(dp, "linebytes",
320 res.end = res.start + BWTWO_REGISTER_OFFSET + sizeof(struct bw2_regs) - 1; 304 all->info.var.xres);
321 res.flags = IORESOURCE_IO; 305
322 resp = &res;
323 all->info.var.xres = all->info.var.xres_virtual = 1152;
324 all->info.var.yres = all->info.var.yres_virtual = 900;
325 all->info.var.bits_per_pixel = 1;
326 linebytes = 1152 / 8;
327 } else
328#else
329 {
330 BUG_ON(!sdev);
331 all->par.physbase = sdev->reg_addrs[0].phys_addr;
332 resp = &sdev->resource[0];
333 sbusfb_fill_var(&all->info.var, (sdev ? sdev->prom_node : 0), 1);
334 linebytes = prom_getintdefault(sdev->prom_node, "linebytes",
335 all->info.var.xres);
336 }
337#endif
338 all->info.var.red.length = all->info.var.green.length = 306 all->info.var.red.length = all->info.var.green.length =
339 all->info.var.blue.length = all->info.var.bits_per_pixel; 307 all->info.var.blue.length = all->info.var.bits_per_pixel;
340 all->info.var.red.offset = all->info.var.green.offset = 308 all->info.var.red.offset = all->info.var.green.offset =
341 all->info.var.blue.offset = 0; 309 all->info.var.blue.offset = 0;
342 310
343 all->par.regs = sbus_ioremap(resp, BWTWO_REGISTER_OFFSET, 311 all->par.regs = of_ioremap(&op->resource[0], BWTWO_REGISTER_OFFSET,
344 sizeof(struct bw2_regs), "bw2 regs"); 312 sizeof(struct bw2_regs), "bw2 regs");
345 313
346 if (sdev && !prom_getbool(sdev->prom_node, "width")) 314 if (!of_find_property(dp, "width", NULL))
347 bw2_do_default_mode(&all->par, &all->info, &linebytes); 315 bw2_do_default_mode(&all->par, &all->info, &linebytes);
348 316
349 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); 317 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres);
350 318
351 all->info.flags = FBINFO_DEFAULT; 319 all->info.flags = FBINFO_DEFAULT;
352 all->info.fbops = &bw2_ops; 320 all->info.fbops = &bw2_ops;
353#if defined(CONFIG_SPARC32) 321
354 if (sdev) 322 all->info.screen_base =
355 all->info.screen_base = (char __iomem *) 323 sbus_ioremap(&op->resource[0], 0, all->par.fbsize, "bw2 ram");
356 prom_getintdefault(sdev->prom_node, "address", 0);
357#endif
358 if (!all->info.screen_base)
359 all->info.screen_base =
360 sbus_ioremap(resp, 0, all->par.fbsize, "bw2 ram");
361 all->info.par = &all->par; 324 all->info.par = &all->par;
362 325
363 bw2_blank(0, &all->info); 326 bw2_blank(0, &all->info);
364 327
365 bw2_init_fix(&all->info, linebytes); 328 bw2_init_fix(&all->info, linebytes);
366 329
367 if (register_framebuffer(&all->info) < 0) { 330 err= register_framebuffer(&all->info);
368 printk(KERN_ERR "bw2: Could not register framebuffer.\n"); 331 if (err < 0) {
332 of_iounmap(all->par.regs, sizeof(struct bw2_regs));
333 of_iounmap(all->info.screen_base, all->par.fbsize);
369 kfree(all); 334 kfree(all);
370 return; 335 return err;
371 } 336 }
372 337
373 list_add(&all->list, &bw2_list); 338 dev_set_drvdata(&op->dev, all);
339
340 printk("%s: bwtwo at %lx:%lx\n",
341 dp->full_name,
342 all->par.which_io, all->par.physbase);
374 343
375 printk("bw2: bwtwo at %lx:%lx\n", 344 return 0;
376 (long) (sdev ? sdev->reg_addrs[0].which_io : 0),
377 (long) all->par.physbase);
378} 345}
379 346
380int __init bw2_init(void) 347static int __devinit bw2_probe(struct of_device *dev, const struct of_device_id *match)
381{ 348{
382 struct sbus_bus *sbus; 349 struct of_device *op = to_of_device(&dev->dev);
383 struct sbus_dev *sdev;
384 350
385 if (fb_get_options("bw2fb", NULL)) 351 return bw2_init_one(op);
386 return -ENODEV; 352}
387 353
388#ifdef CONFIG_SUN4 354static int __devexit bw2_remove(struct of_device *dev)
389 bw2_init_one(NULL); 355{
390#endif 356 struct all_info *all = dev_get_drvdata(&dev->dev);
391 for_all_sbusdev(sdev, sbus) { 357
392 if (!strcmp(sdev->prom_name, "bwtwo")) 358 unregister_framebuffer(&all->info);
393 bw2_init_one(sdev); 359
394 } 360 of_iounmap(all->par.regs, sizeof(struct bw2_regs));
361 of_iounmap(all->info.screen_base, all->par.fbsize);
362
363 kfree(all);
364
365 dev_set_drvdata(&dev->dev, NULL);
395 366
396 return 0; 367 return 0;
397} 368}
398 369
399void __exit bw2_exit(void) 370static struct of_device_id bw2_match[] = {
400{ 371 {
401 struct list_head *pos, *tmp; 372 .name = "bwtwo",
373 },
374 {},
375};
376MODULE_DEVICE_TABLE(of, bw2_match);
402 377
403 list_for_each_safe(pos, tmp, &bw2_list) { 378static struct of_platform_driver bw2_driver = {
404 struct all_info *all = list_entry(pos, typeof(*all), list); 379 .name = "bw2",
380 .match_table = bw2_match,
381 .probe = bw2_probe,
382 .remove = __devexit_p(bw2_remove),
383};
405 384
406 unregister_framebuffer(&all->info); 385static int __init bw2_init(void)
407 kfree(all); 386{
408 } 387 if (fb_get_options("bw2fb", NULL))
388 return -ENODEV;
389
390 return of_register_driver(&bw2_driver, &of_bus_type);
409} 391}
410 392
411int __init 393static void __exit bw2_exit(void)
412bw2_setup(char *arg)
413{ 394{
414 /* No cmdline options yet... */ 395 return of_unregister_driver(&bw2_driver);
415 return 0;
416} 396}
417 397
418module_init(bw2_init);
419 398
420#ifdef MODULE 399module_init(bw2_init);
421module_exit(bw2_exit); 400module_exit(bw2_exit);
422#endif
423 401
424MODULE_DESCRIPTION("framebuffer driver for BWTWO chipsets"); 402MODULE_DESCRIPTION("framebuffer driver for BWTWO chipsets");
425MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 403MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
404MODULE_VERSION("2.0");
426MODULE_LICENSE("GPL"); 405MODULE_LICENSE("GPL");
diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c
index 63b6c79c8a0a..7f926c619b61 100644
--- a/drivers/video/cg14.c
+++ b/drivers/video/cg14.c
@@ -1,6 +1,6 @@
1/* cg14.c: CGFOURTEEN frame buffer driver 1/* cg14.c: CGFOURTEEN frame buffer driver
2 * 2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) 4 * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) 5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * 6 *
@@ -18,8 +18,8 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19 19
20#include <asm/io.h> 20#include <asm/io.h>
21#include <asm/sbus.h> 21#include <asm/prom.h>
22#include <asm/oplib.h> 22#include <asm/of_device.h>
23#include <asm/fbio.h> 23#include <asm/fbio.h>
24 24
25#include "sbuslib.h" 25#include "sbuslib.h"
@@ -99,73 +99,73 @@ static struct fb_ops cg14_ops = {
99#define CG14_MCR_PIXMODE_32 3 99#define CG14_MCR_PIXMODE_32 3
100 100
101struct cg14_regs{ 101struct cg14_regs{
102 volatile u8 mcr; /* Master Control Reg */ 102 u8 mcr; /* Master Control Reg */
103 volatile u8 ppr; /* Packed Pixel Reg */ 103 u8 ppr; /* Packed Pixel Reg */
104 volatile u8 tms[2]; /* Test Mode Status Regs */ 104 u8 tms[2]; /* Test Mode Status Regs */
105 volatile u8 msr; /* Master Status Reg */ 105 u8 msr; /* Master Status Reg */
106 volatile u8 fsr; /* Fault Status Reg */ 106 u8 fsr; /* Fault Status Reg */
107 volatile u8 rev; /* Revision & Impl */ 107 u8 rev; /* Revision & Impl */
108 volatile u8 ccr; /* Clock Control Reg */ 108 u8 ccr; /* Clock Control Reg */
109 volatile u32 tmr; /* Test Mode Read Back */ 109 u32 tmr; /* Test Mode Read Back */
110 volatile u8 mod; /* Monitor Operation Data Reg */ 110 u8 mod; /* Monitor Operation Data Reg */
111 volatile u8 acr; /* Aux Control */ 111 u8 acr; /* Aux Control */
112 u8 xxx0[6]; 112 u8 xxx0[6];
113 volatile u16 hct; /* Hor Counter */ 113 u16 hct; /* Hor Counter */
114 volatile u16 vct; /* Vert Counter */ 114 u16 vct; /* Vert Counter */
115 volatile u16 hbs; /* Hor Blank Start */ 115 u16 hbs; /* Hor Blank Start */
116 volatile u16 hbc; /* Hor Blank Clear */ 116 u16 hbc; /* Hor Blank Clear */
117 volatile u16 hss; /* Hor Sync Start */ 117 u16 hss; /* Hor Sync Start */
118 volatile u16 hsc; /* Hor Sync Clear */ 118 u16 hsc; /* Hor Sync Clear */
119 volatile u16 csc; /* Composite Sync Clear */ 119 u16 csc; /* Composite Sync Clear */
120 volatile u16 vbs; /* Vert Blank Start */ 120 u16 vbs; /* Vert Blank Start */
121 volatile u16 vbc; /* Vert Blank Clear */ 121 u16 vbc; /* Vert Blank Clear */
122 volatile u16 vss; /* Vert Sync Start */ 122 u16 vss; /* Vert Sync Start */
123 volatile u16 vsc; /* Vert Sync Clear */ 123 u16 vsc; /* Vert Sync Clear */
124 volatile u16 xcs; 124 u16 xcs;
125 volatile u16 xcc; 125 u16 xcc;
126 volatile u16 fsa; /* Fault Status Address */ 126 u16 fsa; /* Fault Status Address */
127 volatile u16 adr; /* Address Registers */ 127 u16 adr; /* Address Registers */
128 u8 xxx1[0xce]; 128 u8 xxx1[0xce];
129 volatile u8 pcg[0x100]; /* Pixel Clock Generator */ 129 u8 pcg[0x100]; /* Pixel Clock Generator */
130 volatile u32 vbr; /* Frame Base Row */ 130 u32 vbr; /* Frame Base Row */
131 volatile u32 vmcr; /* VBC Master Control */ 131 u32 vmcr; /* VBC Master Control */
132 volatile u32 vcr; /* VBC refresh */ 132 u32 vcr; /* VBC refresh */
133 volatile u32 vca; /* VBC Config */ 133 u32 vca; /* VBC Config */
134}; 134};
135 135
136#define CG14_CCR_ENABLE 0x04 136#define CG14_CCR_ENABLE 0x04
137#define CG14_CCR_SELECT 0x02 /* HW/Full screen */ 137#define CG14_CCR_SELECT 0x02 /* HW/Full screen */
138 138
139struct cg14_cursor { 139struct cg14_cursor {
140 volatile u32 cpl0[32]; /* Enable plane 0 */ 140 u32 cpl0[32]; /* Enable plane 0 */
141 volatile u32 cpl1[32]; /* Color selection plane */ 141 u32 cpl1[32]; /* Color selection plane */
142 volatile u8 ccr; /* Cursor Control Reg */ 142 u8 ccr; /* Cursor Control Reg */
143 u8 xxx0[3]; 143 u8 xxx0[3];
144 volatile u16 cursx; /* Cursor x,y position */ 144 u16 cursx; /* Cursor x,y position */
145 volatile u16 cursy; /* Cursor x,y position */ 145 u16 cursy; /* Cursor x,y position */
146 volatile u32 color0; 146 u32 color0;
147 volatile u32 color1; 147 u32 color1;
148 u32 xxx1[0x1bc]; 148 u32 xxx1[0x1bc];
149 volatile u32 cpl0i[32]; /* Enable plane 0 autoinc */ 149 u32 cpl0i[32]; /* Enable plane 0 autoinc */
150 volatile u32 cpl1i[32]; /* Color selection autoinc */ 150 u32 cpl1i[32]; /* Color selection autoinc */
151}; 151};
152 152
153struct cg14_dac { 153struct cg14_dac {
154 volatile u8 addr; /* Address Register */ 154 u8 addr; /* Address Register */
155 u8 xxx0[255]; 155 u8 xxx0[255];
156 volatile u8 glut; /* Gamma table */ 156 u8 glut; /* Gamma table */
157 u8 xxx1[255]; 157 u8 xxx1[255];
158 volatile u8 select; /* Register Select */ 158 u8 select; /* Register Select */
159 u8 xxx2[255]; 159 u8 xxx2[255];
160 volatile u8 mode; /* Mode Register */ 160 u8 mode; /* Mode Register */
161}; 161};
162 162
163struct cg14_xlut{ 163struct cg14_xlut{
164 volatile u8 x_xlut [256]; 164 u8 x_xlut [256];
165 volatile u8 x_xlutd [256]; 165 u8 x_xlutd [256];
166 u8 xxx0[0x600]; 166 u8 xxx0[0x600];
167 volatile u8 x_xlut_inc [256]; 167 u8 x_xlut_inc [256];
168 volatile u8 x_xlutd_inc [256]; 168 u8 x_xlutd_inc [256];
169}; 169};
170 170
171/* Color look up table (clut) */ 171/* Color look up table (clut) */
@@ -204,7 +204,6 @@ struct cg14_par {
204 204
205 int mode; 205 int mode;
206 int ramsize; 206 int ramsize;
207 struct sbus_dev *sdev;
208}; 207};
209 208
210static void __cg14_reset(struct cg14_par *par) 209static void __cg14_reset(struct cg14_par *par)
@@ -355,14 +354,9 @@ static int cg14_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
355 * Initialisation 354 * Initialisation
356 */ 355 */
357 356
358static void cg14_init_fix(struct fb_info *info, int linebytes) 357static void cg14_init_fix(struct fb_info *info, int linebytes, struct device_node *dp)
359{ 358{
360 struct cg14_par *par = (struct cg14_par *)info->par; 359 const char *name = dp->name;
361 const char *name;
362
363 name = "cgfourteen";
364 if (par->sdev)
365 name = par->sdev->prom_name;
366 360
367 strlcpy(info->fix.id, name, sizeof(info->fix.id)); 361 strlcpy(info->fix.id, name, sizeof(info->fix.id));
368 362
@@ -456,98 +450,81 @@ static struct sbus_mmap_map __cg14_mmap_map[CG14_MMAP_ENTRIES] __initdata = {
456struct all_info { 450struct all_info {
457 struct fb_info info; 451 struct fb_info info;
458 struct cg14_par par; 452 struct cg14_par par;
459 struct list_head list;
460}; 453};
461static LIST_HEAD(cg14_list);
462 454
463static void cg14_init_one(struct sbus_dev *sdev, int node, int parent_node) 455static void cg14_unmap_regs(struct all_info *all)
464{ 456{
465 struct all_info *all; 457 if (all->par.regs)
466 unsigned long phys, rphys; 458 of_iounmap(all->par.regs, sizeof(struct cg14_regs));
467 u32 bases[6]; 459 if (all->par.clut)
468 int is_8mb, linebytes, i; 460 of_iounmap(all->par.clut, sizeof(struct cg14_clut));
469 461 if (all->par.cursor)
470 if (!sdev) { 462 of_iounmap(all->par.cursor, sizeof(struct cg14_cursor));
471 if (prom_getproperty(node, "address", 463 if (all->info.screen_base)
472 (char *) &bases[0], sizeof(bases)) <= 0 464 of_iounmap(all->info.screen_base, all->par.fbsize);
473 || !bases[0]) { 465}
474 printk(KERN_ERR "cg14: Device is not mapped.\n");
475 return;
476 }
477 if (__get_iospace(bases[0]) != __get_iospace(bases[1])) {
478 printk(KERN_ERR "cg14: I/O spaces don't match.\n");
479 return;
480 }
481 }
482 466
483 all = kmalloc(sizeof(*all), GFP_KERNEL); 467static int __devinit cg14_init_one(struct of_device *op)
484 if (!all) { 468{
485 printk(KERN_ERR "cg14: Cannot allocate memory.\n"); 469 struct device_node *dp = op->node;
486 return; 470 struct all_info *all;
487 } 471 int is_8mb, linebytes, i, err;
488 memset(all, 0, sizeof(*all));
489 472
490 INIT_LIST_HEAD(&all->list); 473 all = kzalloc(sizeof(*all), GFP_KERNEL);
474 if (!all)
475 return -ENOMEM;
491 476
492 spin_lock_init(&all->par.lock); 477 spin_lock_init(&all->par.lock);
493 478
494 sbusfb_fill_var(&all->info.var, node, 8); 479 sbusfb_fill_var(&all->info.var, dp->node, 8);
495 all->info.var.red.length = 8; 480 all->info.var.red.length = 8;
496 all->info.var.green.length = 8; 481 all->info.var.green.length = 8;
497 all->info.var.blue.length = 8; 482 all->info.var.blue.length = 8;
498 483
499 linebytes = prom_getintdefault(node, "linebytes", 484 linebytes = of_getintprop_default(dp, "linebytes",
500 all->info.var.xres); 485 all->info.var.xres);
501 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); 486 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres);
502 487
503 all->par.sdev = sdev; 488 if (!strcmp(dp->parent->name, "sbus") ||
504 if (sdev) { 489 !strcmp(dp->parent->name, "sbi")) {
505 rphys = sdev->reg_addrs[0].phys_addr; 490 all->par.physbase = op->resource[0].start;
506 all->par.physbase = phys = sdev->reg_addrs[1].phys_addr; 491 all->par.iospace = op->resource[0].flags & IORESOURCE_BITS;
507 all->par.iospace = sdev->reg_addrs[0].which_io;
508
509 all->par.regs = sbus_ioremap(&sdev->resource[0], 0,
510 sizeof(struct cg14_regs),
511 "cg14 regs");
512 all->par.clut = sbus_ioremap(&sdev->resource[0], CG14_CLUT1,
513 sizeof(struct cg14_clut),
514 "cg14 clut");
515 all->par.cursor = sbus_ioremap(&sdev->resource[0], CG14_CURSORREGS,
516 sizeof(struct cg14_cursor),
517 "cg14 cursor");
518 all->info.screen_base = sbus_ioremap(&sdev->resource[1], 0,
519 all->par.fbsize, "cg14 ram");
520 } else { 492 } else {
521 rphys = __get_phys(bases[0]); 493 all->par.physbase = op->resource[1].start;
522 all->par.physbase = phys = __get_phys(bases[1]); 494 all->par.iospace = op->resource[0].flags & IORESOURCE_BITS;
523 all->par.iospace = __get_iospace(bases[0]);
524 all->par.regs = (struct cg14_regs __iomem *)(unsigned long)bases[0];
525 all->par.clut = (struct cg14_clut __iomem *)((unsigned long)bases[0] +
526 CG14_CLUT1);
527 all->par.cursor =
528 (struct cg14_cursor __iomem *)((unsigned long)bases[0] +
529 CG14_CURSORREGS);
530
531 all->info.screen_base = (char __iomem *)(unsigned long)bases[1];
532 } 495 }
533 496
534 prom_getproperty(node, "reg", (char *) &bases[0], sizeof(bases)); 497 all->par.regs = of_ioremap(&op->resource[0], 0,
535 is_8mb = (bases[5] == 0x800000); 498 sizeof(struct cg14_regs), "cg14 regs");
499 all->par.clut = of_ioremap(&op->resource[0], CG14_CLUT1,
500 sizeof(struct cg14_clut), "cg14 clut");
501 all->par.cursor = of_ioremap(&op->resource[0], CG14_CURSORREGS,
502 sizeof(struct cg14_cursor), "cg14 cursor");
536 503
537 if (sizeof(all->par.mmap_map) != sizeof(__cg14_mmap_map)) { 504 all->info.screen_base = of_ioremap(&op->resource[1], 0,
538 extern void __cg14_mmap_sized_wrongly(void); 505 all->par.fbsize, "cg14 ram");
539 506
540 __cg14_mmap_sized_wrongly(); 507 if (!all->par.regs || !all->par.clut || !all->par.cursor ||
541 } 508 !all->info.screen_base)
509 cg14_unmap_regs(all);
510
511 is_8mb = (((op->resource[1].end - op->resource[1].start) + 1) ==
512 (8 * 1024 * 1024));
513
514 BUILD_BUG_ON(sizeof(all->par.mmap_map) != sizeof(__cg14_mmap_map));
542 515
543 memcpy(&all->par.mmap_map, &__cg14_mmap_map, sizeof(all->par.mmap_map)); 516 memcpy(&all->par.mmap_map, &__cg14_mmap_map,
517 sizeof(all->par.mmap_map));
518
544 for (i = 0; i < CG14_MMAP_ENTRIES; i++) { 519 for (i = 0; i < CG14_MMAP_ENTRIES; i++) {
545 struct sbus_mmap_map *map = &all->par.mmap_map[i]; 520 struct sbus_mmap_map *map = &all->par.mmap_map[i];
546 521
547 if (!map->size) 522 if (!map->size)
548 break; 523 break;
549 if (map->poff & 0x80000000) 524 if (map->poff & 0x80000000)
550 map->poff = (map->poff & 0x7fffffff) + rphys - phys; 525 map->poff = (map->poff & 0x7fffffff) +
526 (op->resource[0].start -
527 op->resource[1].start);
551 if (is_8mb && 528 if (is_8mb &&
552 map->size >= 0x100000 && 529 map->size >= 0x100000 &&
553 map->size <= 0x400000) 530 map->size <= 0x400000)
@@ -564,84 +541,87 @@ static void cg14_init_one(struct sbus_dev *sdev, int node, int parent_node)
564 __cg14_reset(&all->par); 541 __cg14_reset(&all->par);
565 542
566 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) { 543 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) {
567 printk(KERN_ERR "cg14: Could not allocate color map.\n"); 544 cg14_unmap_regs(all);
568 kfree(all); 545 kfree(all);
569 return; 546 return -ENOMEM;
570 } 547 }
571 fb_set_cmap(&all->info.cmap, &all->info); 548 fb_set_cmap(&all->info.cmap, &all->info);
572 549
573 cg14_init_fix(&all->info, linebytes); 550 cg14_init_fix(&all->info, linebytes, dp);
574 551
575 if (register_framebuffer(&all->info) < 0) { 552 err = register_framebuffer(&all->info);
576 printk(KERN_ERR "cg14: Could not register framebuffer.\n"); 553 if (err < 0) {
577 fb_dealloc_cmap(&all->info.cmap); 554 fb_dealloc_cmap(&all->info.cmap);
555 cg14_unmap_regs(all);
578 kfree(all); 556 kfree(all);
579 return; 557 return err;
580 } 558 }
581 559
582 list_add(&all->list, &cg14_list); 560 dev_set_drvdata(&op->dev, all);
583 561
584 printk("cg14: cgfourteen at %lx:%lx, %dMB\n", 562 printk("%s: cgfourteen at %lx:%lx, %dMB\n",
585 all->par.iospace, all->par.physbase, all->par.ramsize >> 20); 563 dp->full_name,
564 all->par.iospace, all->par.physbase,
565 all->par.ramsize >> 20);
586 566
567 return 0;
587} 568}
588 569
589int __init cg14_init(void) 570static int __devinit cg14_probe(struct of_device *dev, const struct of_device_id *match)
590{ 571{
591 struct sbus_bus *sbus; 572 struct of_device *op = to_of_device(&dev->dev);
592 struct sbus_dev *sdev;
593 573
594 if (fb_get_options("cg14fb", NULL)) 574 return cg14_init_one(op);
595 return -ENODEV; 575}
596 576
597#ifdef CONFIG_SPARC32 577static int __devexit cg14_remove(struct of_device *dev)
598 { 578{
599 int root, node; 579 struct all_info *all = dev_get_drvdata(&dev->dev);
600 580
601 root = prom_getchild(prom_root_node); 581 unregister_framebuffer(&all->info);
602 root = prom_searchsiblings(root, "obio"); 582 fb_dealloc_cmap(&all->info.cmap);
603 if (root) { 583
604 node = prom_searchsiblings(prom_getchild(root), 584 cg14_unmap_regs(all);
605 "cgfourteen"); 585
606 if (node) 586 kfree(all);
607 cg14_init_one(NULL, node, root); 587
608 } 588 dev_set_drvdata(&dev->dev, NULL);
609 }
610#endif
611 for_all_sbusdev(sdev, sbus) {
612 if (!strcmp(sdev->prom_name, "cgfourteen"))
613 cg14_init_one(sdev, sdev->prom_node, sbus->prom_node);
614 }
615 589
616 return 0; 590 return 0;
617} 591}
618 592
619void __exit cg14_exit(void) 593static struct of_device_id cg14_match[] = {
620{ 594 {
621 struct list_head *pos, *tmp; 595 .name = "cgfourteen",
596 },
597 {},
598};
599MODULE_DEVICE_TABLE(of, cg14_match);
622 600
623 list_for_each_safe(pos, tmp, &cg14_list) { 601static struct of_platform_driver cg14_driver = {
624 struct all_info *all = list_entry(pos, typeof(*all), list); 602 .name = "cg14",
603 .match_table = cg14_match,
604 .probe = cg14_probe,
605 .remove = __devexit_p(cg14_remove),
606};
625 607
626 unregister_framebuffer(&all->info); 608int __init cg14_init(void)
627 fb_dealloc_cmap(&all->info.cmap); 609{
628 kfree(all); 610 if (fb_get_options("cg14fb", NULL))
629 } 611 return -ENODEV;
612
613 return of_register_driver(&cg14_driver, &of_bus_type);
630} 614}
631 615
632int __init 616void __exit cg14_exit(void)
633cg14_setup(char *arg)
634{ 617{
635 /* No cmdline options yet... */ 618 of_unregister_driver(&cg14_driver);
636 return 0;
637} 619}
638 620
639module_init(cg14_init); 621module_init(cg14_init);
640
641#ifdef MODULE
642module_exit(cg14_exit); 622module_exit(cg14_exit);
643#endif
644 623
645MODULE_DESCRIPTION("framebuffer driver for CGfourteen chipsets"); 624MODULE_DESCRIPTION("framebuffer driver for CGfourteen chipsets");
646MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 625MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
626MODULE_VERSION("2.0");
647MODULE_LICENSE("GPL"); 627MODULE_LICENSE("GPL");
diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c
index 3de6e1b5ab2f..9c8c753ef454 100644
--- a/drivers/video/cg3.c
+++ b/drivers/video/cg3.c
@@ -1,6 +1,6 @@
1/* cg3.c: CGTHREE frame buffer driver 1/* cg3.c: CGTHREE frame buffer driver
2 * 2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) 4 * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) 6 * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
@@ -19,8 +19,9 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20 20
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/sbus.h>
23#include <asm/oplib.h> 22#include <asm/oplib.h>
23#include <asm/prom.h>
24#include <asm/of_device.h>
24#include <asm/fbio.h> 25#include <asm/fbio.h>
25 26
26#include "sbuslib.h" 27#include "sbuslib.h"
@@ -80,30 +81,30 @@ enum cg3_type {
80}; 81};
81 82
82struct bt_regs { 83struct bt_regs {
83 volatile u32 addr; 84 u32 addr;
84 volatile u32 color_map; 85 u32 color_map;
85 volatile u32 control; 86 u32 control;
86 volatile u32 cursor; 87 u32 cursor;
87}; 88};
88 89
89struct cg3_regs { 90struct cg3_regs {
90 struct bt_regs cmap; 91 struct bt_regs cmap;
91 volatile u8 control; 92 u8 control;
92 volatile u8 status; 93 u8 status;
93 volatile u8 cursor_start; 94 u8 cursor_start;
94 volatile u8 cursor_end; 95 u8 cursor_end;
95 volatile u8 h_blank_start; 96 u8 h_blank_start;
96 volatile u8 h_blank_end; 97 u8 h_blank_end;
97 volatile u8 h_sync_start; 98 u8 h_sync_start;
98 volatile u8 h_sync_end; 99 u8 h_sync_end;
99 volatile u8 comp_sync_end; 100 u8 comp_sync_end;
100 volatile u8 v_blank_start_high; 101 u8 v_blank_start_high;
101 volatile u8 v_blank_start_low; 102 u8 v_blank_start_low;
102 volatile u8 v_blank_end; 103 u8 v_blank_end;
103 volatile u8 v_sync_start; 104 u8 v_sync_start;
104 volatile u8 v_sync_end; 105 u8 v_sync_end;
105 volatile u8 xfer_holdoff_start; 106 u8 xfer_holdoff_start;
106 volatile u8 xfer_holdoff_end; 107 u8 xfer_holdoff_end;
107}; 108};
108 109
109/* Offset of interesting structures in the OBIO space */ 110/* Offset of interesting structures in the OBIO space */
@@ -120,9 +121,8 @@ struct cg3_par {
120#define CG3_FLAG_RDI 0x00000002 121#define CG3_FLAG_RDI 0x00000002
121 122
122 unsigned long physbase; 123 unsigned long physbase;
124 unsigned long which_io;
123 unsigned long fbsize; 125 unsigned long fbsize;
124
125 struct sbus_dev *sdev;
126}; 126};
127 127
128/** 128/**
@@ -235,7 +235,7 @@ static int cg3_mmap(struct fb_info *info, struct vm_area_struct *vma)
235 235
236 return sbusfb_mmap_helper(cg3_mmap_map, 236 return sbusfb_mmap_helper(cg3_mmap_map,
237 par->physbase, par->fbsize, 237 par->physbase, par->fbsize,
238 par->sdev->reg_addrs[0].which_io, 238 par->which_io,
239 vma); 239 vma);
240} 240}
241 241
@@ -252,11 +252,9 @@ static int cg3_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
252 */ 252 */
253 253
254static void 254static void
255cg3_init_fix(struct fb_info *info, int linebytes) 255cg3_init_fix(struct fb_info *info, int linebytes, struct device_node *dp)
256{ 256{
257 struct cg3_par *par = (struct cg3_par *)info->par; 257 strlcpy(info->fix.id, dp->name, sizeof(info->fix.id));
258
259 strlcpy(info->fix.id, par->sdev->prom_name, sizeof(info->fix.id));
260 258
261 info->fix.type = FB_TYPE_PACKED_PIXELS; 259 info->fix.type = FB_TYPE_PACKED_PIXELS;
262 info->fix.visual = FB_VISUAL_PSEUDOCOLOR; 260 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
@@ -267,16 +265,15 @@ cg3_init_fix(struct fb_info *info, int linebytes)
267} 265}
268 266
269static void cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var, 267static void cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var,
270 struct sbus_dev *sdev) 268 struct device_node *dp)
271{ 269{
272 char buffer[40]; 270 char *params;
273 char *p; 271 char *p;
274 int ww, hh; 272 int ww, hh;
275 273
276 *buffer = 0; 274 params = of_get_property(dp, "params", NULL);
277 prom_getstring(sdev->prom_node, "params", buffer, sizeof(buffer)); 275 if (params) {
278 if (*buffer) { 276 ww = simple_strtoul(params, &p, 10);
279 ww = simple_strtoul(buffer, &p, 10);
280 if (ww && *p == 'x') { 277 if (ww && *p == 'x') {
281 hh = simple_strtoul(p + 1, &p, 10); 278 hh = simple_strtoul(p + 1, &p, 10);
282 if (hh && *p == '-') { 279 if (hh && *p == '-') {
@@ -348,11 +345,11 @@ static void cg3_do_default_mode(struct cg3_par *par)
348 sbus_writeb(p[1], regp); 345 sbus_writeb(p[1], regp);
349 } 346 }
350 for (p = cg3_dacvals; *p; p += 2) { 347 for (p = cg3_dacvals; *p; p += 2) {
351 volatile u8 __iomem *regp; 348 u8 __iomem *regp;
352 349
353 regp = (volatile u8 __iomem *)&par->regs->cmap.addr; 350 regp = (u8 __iomem *)&par->regs->cmap.addr;
354 sbus_writeb(p[0], regp); 351 sbus_writeb(p[0], regp);
355 regp = (volatile u8 __iomem *)&par->regs->cmap.control; 352 regp = (u8 __iomem *)&par->regs->cmap.control;
356 sbus_writeb(p[1], regp); 353 sbus_writeb(p[1], regp);
357 } 354 }
358} 355}
@@ -360,129 +357,137 @@ static void cg3_do_default_mode(struct cg3_par *par)
360struct all_info { 357struct all_info {
361 struct fb_info info; 358 struct fb_info info;
362 struct cg3_par par; 359 struct cg3_par par;
363 struct list_head list;
364}; 360};
365static LIST_HEAD(cg3_list);
366 361
367static void cg3_init_one(struct sbus_dev *sdev) 362static int __devinit cg3_init_one(struct of_device *op)
368{ 363{
364 struct device_node *dp = op->node;
369 struct all_info *all; 365 struct all_info *all;
370 int linebytes; 366 int linebytes, err;
371
372 all = kmalloc(sizeof(*all), GFP_KERNEL);
373 if (!all) {
374 printk(KERN_ERR "cg3: Cannot allocate memory.\n");
375 return;
376 }
377 memset(all, 0, sizeof(*all));
378 367
379 INIT_LIST_HEAD(&all->list); 368 all = kzalloc(sizeof(*all), GFP_KERNEL);
369 if (!all)
370 return -ENOMEM;
380 371
381 spin_lock_init(&all->par.lock); 372 spin_lock_init(&all->par.lock);
382 all->par.sdev = sdev;
383 373
384 all->par.physbase = sdev->reg_addrs[0].phys_addr; 374 all->par.physbase = op->resource[0].start;
375 all->par.which_io = op->resource[0].flags & IORESOURCE_BITS;
385 376
386 sbusfb_fill_var(&all->info.var, sdev->prom_node, 8); 377 sbusfb_fill_var(&all->info.var, dp->node, 8);
387 all->info.var.red.length = 8; 378 all->info.var.red.length = 8;
388 all->info.var.green.length = 8; 379 all->info.var.green.length = 8;
389 all->info.var.blue.length = 8; 380 all->info.var.blue.length = 8;
390 if (!strcmp(sdev->prom_name, "cgRDI")) 381 if (!strcmp(dp->name, "cgRDI"))
391 all->par.flags |= CG3_FLAG_RDI; 382 all->par.flags |= CG3_FLAG_RDI;
392 if (all->par.flags & CG3_FLAG_RDI) 383 if (all->par.flags & CG3_FLAG_RDI)
393 cg3_rdi_maybe_fixup_var(&all->info.var, sdev); 384 cg3_rdi_maybe_fixup_var(&all->info.var, dp);
394 385
395 linebytes = prom_getintdefault(sdev->prom_node, "linebytes", 386 linebytes = of_getintprop_default(dp, "linebytes",
396 all->info.var.xres); 387 all->info.var.xres);
397 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); 388 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres);
398 389
399 all->par.regs = sbus_ioremap(&sdev->resource[0], CG3_REGS_OFFSET, 390 all->par.regs = of_ioremap(&op->resource[0], CG3_REGS_OFFSET,
400 sizeof(struct cg3_regs), "cg3 regs"); 391 sizeof(struct cg3_regs), "cg3 regs");
401 392
402 all->info.flags = FBINFO_DEFAULT; 393 all->info.flags = FBINFO_DEFAULT;
403 all->info.fbops = &cg3_ops; 394 all->info.fbops = &cg3_ops;
404#ifdef CONFIG_SPARC32 395 all->info.screen_base =
405 all->info.screen_base = (char __iomem *) 396 of_ioremap(&op->resource[0], CG3_RAM_OFFSET,
406 prom_getintdefault(sdev->prom_node, "address", 0); 397 all->par.fbsize, "cg3 ram");
407#endif
408 if (!all->info.screen_base)
409 all->info.screen_base =
410 sbus_ioremap(&sdev->resource[0], CG3_RAM_OFFSET,
411 all->par.fbsize, "cg3 ram");
412 all->info.par = &all->par; 398 all->info.par = &all->par;
413 399
414 cg3_blank(0, &all->info); 400 cg3_blank(0, &all->info);
415 401
416 if (!prom_getbool(sdev->prom_node, "width")) 402 if (!of_find_property(dp, "width", NULL))
417 cg3_do_default_mode(&all->par); 403 cg3_do_default_mode(&all->par);
418 404
419 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) { 405 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) {
420 printk(KERN_ERR "cg3: Could not allocate color map.\n"); 406 of_iounmap(all->par.regs, sizeof(struct cg3_regs));
407 of_iounmap(all->info.screen_base, all->par.fbsize);
421 kfree(all); 408 kfree(all);
422 return; 409 return -ENOMEM;
423 } 410 }
424 fb_set_cmap(&all->info.cmap, &all->info); 411 fb_set_cmap(&all->info.cmap, &all->info);
425 412
426 cg3_init_fix(&all->info, linebytes); 413 cg3_init_fix(&all->info, linebytes, dp);
427 414
428 if (register_framebuffer(&all->info) < 0) { 415 err = register_framebuffer(&all->info);
429 printk(KERN_ERR "cg3: Could not register framebuffer.\n"); 416 if (err < 0) {
430 fb_dealloc_cmap(&all->info.cmap); 417 fb_dealloc_cmap(&all->info.cmap);
418 of_iounmap(all->par.regs, sizeof(struct cg3_regs));
419 of_iounmap(all->info.screen_base, all->par.fbsize);
431 kfree(all); 420 kfree(all);
432 return; 421 return err;
433 } 422 }
434 423
435 list_add(&all->list, &cg3_list); 424 dev_set_drvdata(&op->dev, all);
425
426 printk("%s: cg3 at %lx:%lx\n",
427 dp->full_name, all->par.which_io, all->par.physbase);
436 428
437 printk("cg3: %s at %lx:%lx\n", 429 return 0;
438 sdev->prom_name,
439 (long) sdev->reg_addrs[0].which_io,
440 (long) sdev->reg_addrs[0].phys_addr);
441} 430}
442 431
443int __init cg3_init(void) 432static int __devinit cg3_probe(struct of_device *dev, const struct of_device_id *match)
444{ 433{
445 struct sbus_bus *sbus; 434 struct of_device *op = to_of_device(&dev->dev);
446 struct sbus_dev *sdev;
447 435
448 if (fb_get_options("cg3fb", NULL)) 436 return cg3_init_one(op);
449 return -ENODEV; 437}
450 438
451 for_all_sbusdev(sdev, sbus) { 439static int __devexit cg3_remove(struct of_device *dev)
452 if (!strcmp(sdev->prom_name, "cgthree") || 440{
453 !strcmp(sdev->prom_name, "cgRDI")) 441 struct all_info *all = dev_get_drvdata(&dev->dev);
454 cg3_init_one(sdev); 442
455 } 443 unregister_framebuffer(&all->info);
444 fb_dealloc_cmap(&all->info.cmap);
445
446 of_iounmap(all->par.regs, sizeof(struct cg3_regs));
447 of_iounmap(all->info.screen_base, all->par.fbsize);
448
449 kfree(all);
450
451 dev_set_drvdata(&dev->dev, NULL);
456 452
457 return 0; 453 return 0;
458} 454}
459 455
460void __exit cg3_exit(void) 456static struct of_device_id cg3_match[] = {
461{ 457 {
462 struct list_head *pos, *tmp; 458 .name = "cgthree",
459 },
460 {
461 .name = "cgRDI",
462 },
463 {},
464};
465MODULE_DEVICE_TABLE(of, cg3_match);
463 466
464 list_for_each_safe(pos, tmp, &cg3_list) { 467static struct of_platform_driver cg3_driver = {
465 struct all_info *all = list_entry(pos, typeof(*all), list); 468 .name = "cg3",
469 .match_table = cg3_match,
470 .probe = cg3_probe,
471 .remove = __devexit_p(cg3_remove),
472};
466 473
467 unregister_framebuffer(&all->info); 474static int __init cg3_init(void)
468 fb_dealloc_cmap(&all->info.cmap); 475{
469 kfree(all); 476 if (fb_get_options("cg3fb", NULL))
470 } 477 return -ENODEV;
478
479 return of_register_driver(&cg3_driver, &of_bus_type);
471} 480}
472 481
473int __init 482static void __exit cg3_exit(void)
474cg3_setup(char *arg)
475{ 483{
476 /* No cmdline options yet... */ 484 of_unregister_driver(&cg3_driver);
477 return 0;
478} 485}
479 486
480module_init(cg3_init); 487module_init(cg3_init);
481
482#ifdef MODULE
483module_exit(cg3_exit); 488module_exit(cg3_exit);
484#endif
485 489
486MODULE_DESCRIPTION("framebuffer driver for CGthree chipsets"); 490MODULE_DESCRIPTION("framebuffer driver for CGthree chipsets");
487MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 491MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
492MODULE_VERSION("2.0");
488MODULE_LICENSE("GPL"); 493MODULE_LICENSE("GPL");
diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c
index 7aab91ead681..64146be2eeb0 100644
--- a/drivers/video/cg6.c
+++ b/drivers/video/cg6.c
@@ -1,6 +1,6 @@
1/* cg6.c: CGSIX (GX, GXplus, TGX) frame buffer driver 1/* cg6.c: CGSIX (GX, GXplus, TGX) frame buffer driver
2 * 2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) 4 * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
@@ -19,8 +19,8 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20 20
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/sbus.h> 22#include <asm/prom.h>
23#include <asm/oplib.h> 23#include <asm/of_device.h>
24#include <asm/fbio.h> 24#include <asm/fbio.h>
25 25
26#include "sbuslib.h" 26#include "sbuslib.h"
@@ -164,89 +164,89 @@ static struct fb_ops cg6_ops = {
164 164
165/* The contents are unknown */ 165/* The contents are unknown */
166struct cg6_tec { 166struct cg6_tec {
167 volatile int tec_matrix; 167 int tec_matrix;
168 volatile int tec_clip; 168 int tec_clip;
169 volatile int tec_vdc; 169 int tec_vdc;
170}; 170};
171 171
172struct cg6_thc { 172struct cg6_thc {
173 uint thc_pad0[512]; 173 u32 thc_pad0[512];
174 volatile uint thc_hs; /* hsync timing */ 174 u32 thc_hs; /* hsync timing */
175 volatile uint thc_hsdvs; 175 u32 thc_hsdvs;
176 volatile uint thc_hd; 176 u32 thc_hd;
177 volatile uint thc_vs; /* vsync timing */ 177 u32 thc_vs; /* vsync timing */
178 volatile uint thc_vd; 178 u32 thc_vd;
179 volatile uint thc_refresh; 179 u32 thc_refresh;
180 volatile uint thc_misc; 180 u32 thc_misc;
181 uint thc_pad1[56]; 181 u32 thc_pad1[56];
182 volatile uint thc_cursxy; /* cursor x,y position (16 bits each) */ 182 u32 thc_cursxy; /* cursor x,y position (16 bits each) */
183 volatile uint thc_cursmask[32]; /* cursor mask bits */ 183 u32 thc_cursmask[32]; /* cursor mask bits */
184 volatile uint thc_cursbits[32]; /* what to show where mask enabled */ 184 u32 thc_cursbits[32]; /* what to show where mask enabled */
185}; 185};
186 186
187struct cg6_fbc { 187struct cg6_fbc {
188 u32 xxx0[1]; 188 u32 xxx0[1];
189 volatile u32 mode; 189 u32 mode;
190 volatile u32 clip; 190 u32 clip;
191 u32 xxx1[1]; 191 u32 xxx1[1];
192 volatile u32 s; 192 u32 s;
193 volatile u32 draw; 193 u32 draw;
194 volatile u32 blit; 194 u32 blit;
195 volatile u32 font; 195 u32 font;
196 u32 xxx2[24]; 196 u32 xxx2[24];
197 volatile u32 x0, y0, z0, color0; 197 u32 x0, y0, z0, color0;
198 volatile u32 x1, y1, z1, color1; 198 u32 x1, y1, z1, color1;
199 volatile u32 x2, y2, z2, color2; 199 u32 x2, y2, z2, color2;
200 volatile u32 x3, y3, z3, color3; 200 u32 x3, y3, z3, color3;
201 volatile u32 offx, offy; 201 u32 offx, offy;
202 u32 xxx3[2]; 202 u32 xxx3[2];
203 volatile u32 incx, incy; 203 u32 incx, incy;
204 u32 xxx4[2]; 204 u32 xxx4[2];
205 volatile u32 clipminx, clipminy; 205 u32 clipminx, clipminy;
206 u32 xxx5[2]; 206 u32 xxx5[2];
207 volatile u32 clipmaxx, clipmaxy; 207 u32 clipmaxx, clipmaxy;
208 u32 xxx6[2]; 208 u32 xxx6[2];
209 volatile u32 fg; 209 u32 fg;
210 volatile u32 bg; 210 u32 bg;
211 volatile u32 alu; 211 u32 alu;
212 volatile u32 pm; 212 u32 pm;
213 volatile u32 pixelm; 213 u32 pixelm;
214 u32 xxx7[2]; 214 u32 xxx7[2];
215 volatile u32 patalign; 215 u32 patalign;
216 volatile u32 pattern[8]; 216 u32 pattern[8];
217 u32 xxx8[432]; 217 u32 xxx8[432];
218 volatile u32 apointx, apointy, apointz; 218 u32 apointx, apointy, apointz;
219 u32 xxx9[1]; 219 u32 xxx9[1];
220 volatile u32 rpointx, rpointy, rpointz; 220 u32 rpointx, rpointy, rpointz;
221 u32 xxx10[5]; 221 u32 xxx10[5];
222 volatile u32 pointr, pointg, pointb, pointa; 222 u32 pointr, pointg, pointb, pointa;
223 volatile u32 alinex, aliney, alinez; 223 u32 alinex, aliney, alinez;
224 u32 xxx11[1]; 224 u32 xxx11[1];
225 volatile u32 rlinex, rliney, rlinez; 225 u32 rlinex, rliney, rlinez;
226 u32 xxx12[5]; 226 u32 xxx12[5];
227 volatile u32 liner, lineg, lineb, linea; 227 u32 liner, lineg, lineb, linea;
228 volatile u32 atrix, atriy, atriz; 228 u32 atrix, atriy, atriz;
229 u32 xxx13[1]; 229 u32 xxx13[1];
230 volatile u32 rtrix, rtriy, rtriz; 230 u32 rtrix, rtriy, rtriz;
231 u32 xxx14[5]; 231 u32 xxx14[5];
232 volatile u32 trir, trig, trib, tria; 232 u32 trir, trig, trib, tria;
233 volatile u32 aquadx, aquady, aquadz; 233 u32 aquadx, aquady, aquadz;
234 u32 xxx15[1]; 234 u32 xxx15[1];
235 volatile u32 rquadx, rquady, rquadz; 235 u32 rquadx, rquady, rquadz;
236 u32 xxx16[5]; 236 u32 xxx16[5];
237 volatile u32 quadr, quadg, quadb, quada; 237 u32 quadr, quadg, quadb, quada;
238 volatile u32 arectx, arecty, arectz; 238 u32 arectx, arecty, arectz;
239 u32 xxx17[1]; 239 u32 xxx17[1];
240 volatile u32 rrectx, rrecty, rrectz; 240 u32 rrectx, rrecty, rrectz;
241 u32 xxx18[5]; 241 u32 xxx18[5];
242 volatile u32 rectr, rectg, rectb, recta; 242 u32 rectr, rectg, rectb, recta;
243}; 243};
244 244
245struct bt_regs { 245struct bt_regs {
246 volatile u32 addr; 246 u32 addr;
247 volatile u32 color_map; 247 u32 color_map;
248 volatile u32 control; 248 u32 control;
249 volatile u32 cursor; 249 u32 cursor;
250}; 250};
251 251
252struct cg6_par { 252struct cg6_par {
@@ -255,15 +255,14 @@ struct cg6_par {
255 struct cg6_fbc __iomem *fbc; 255 struct cg6_fbc __iomem *fbc;
256 struct cg6_thc __iomem *thc; 256 struct cg6_thc __iomem *thc;
257 struct cg6_tec __iomem *tec; 257 struct cg6_tec __iomem *tec;
258 volatile u32 __iomem *fhc; 258 u32 __iomem *fhc;
259 259
260 u32 flags; 260 u32 flags;
261#define CG6_FLAG_BLANKED 0x00000001 261#define CG6_FLAG_BLANKED 0x00000001
262 262
263 unsigned long physbase; 263 unsigned long physbase;
264 unsigned long which_io;
264 unsigned long fbsize; 265 unsigned long fbsize;
265
266 struct sbus_dev *sdev;
267}; 266};
268 267
269static int cg6_sync(struct fb_info *info) 268static int cg6_sync(struct fb_info *info)
@@ -529,8 +528,7 @@ static int cg6_mmap(struct fb_info *info, struct vm_area_struct *vma)
529 528
530 return sbusfb_mmap_helper(cg6_mmap_map, 529 return sbusfb_mmap_helper(cg6_mmap_map,
531 par->physbase, par->fbsize, 530 par->physbase, par->fbsize,
532 par->sdev->reg_addrs[0].which_io, 531 par->which_io, vma);
533 vma);
534} 532}
535 533
536static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) 534static int cg6_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
@@ -658,62 +656,75 @@ static void cg6_chip_init(struct fb_info *info)
658struct all_info { 656struct all_info {
659 struct fb_info info; 657 struct fb_info info;
660 struct cg6_par par; 658 struct cg6_par par;
661 struct list_head list;
662}; 659};
663static LIST_HEAD(cg6_list);
664 660
665static void cg6_init_one(struct sbus_dev *sdev) 661static void cg6_unmap_regs(struct all_info *all)
666{ 662{
667 struct all_info *all; 663 if (all->par.fbc)
668 int linebytes; 664 of_iounmap(all->par.fbc, 4096);
665 if (all->par.tec)
666 of_iounmap(all->par.tec, sizeof(struct cg6_tec));
667 if (all->par.thc)
668 of_iounmap(all->par.thc, sizeof(struct cg6_thc));
669 if (all->par.bt)
670 of_iounmap(all->par.bt, sizeof(struct bt_regs));
671 if (all->par.fhc)
672 of_iounmap(all->par.fhc, sizeof(u32));
673
674 if (all->info.screen_base)
675 of_iounmap(all->info.screen_base, all->par.fbsize);
676}
669 677
670 all = kmalloc(sizeof(*all), GFP_KERNEL); 678static int __devinit cg6_init_one(struct of_device *op)
671 if (!all) { 679{
672 printk(KERN_ERR "cg6: Cannot allocate memory.\n"); 680 struct device_node *dp = op->node;
673 return; 681 struct all_info *all;
674 } 682 int linebytes, err;
675 memset(all, 0, sizeof(*all));
676 683
677 INIT_LIST_HEAD(&all->list); 684 all = kzalloc(sizeof(*all), GFP_KERNEL);
685 if (!all)
686 return -ENOMEM;
678 687
679 spin_lock_init(&all->par.lock); 688 spin_lock_init(&all->par.lock);
680 all->par.sdev = sdev;
681 689
682 all->par.physbase = sdev->reg_addrs[0].phys_addr; 690 all->par.physbase = op->resource[0].start;
691 all->par.which_io = op->resource[0].flags & IORESOURCE_BITS;
683 692
684 sbusfb_fill_var(&all->info.var, sdev->prom_node, 8); 693 sbusfb_fill_var(&all->info.var, dp->node, 8);
685 all->info.var.red.length = 8; 694 all->info.var.red.length = 8;
686 all->info.var.green.length = 8; 695 all->info.var.green.length = 8;
687 all->info.var.blue.length = 8; 696 all->info.var.blue.length = 8;
688 697
689 linebytes = prom_getintdefault(sdev->prom_node, "linebytes", 698 linebytes = of_getintprop_default(dp, "linebytes",
690 all->info.var.xres); 699 all->info.var.xres);
691 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); 700 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres);
692 if (prom_getbool(sdev->prom_node, "dblbuf")) 701 if (of_find_property(dp, "dblbuf", NULL))
693 all->par.fbsize *= 4; 702 all->par.fbsize *= 4;
694 703
695 all->par.fbc = sbus_ioremap(&sdev->resource[0], CG6_FBC_OFFSET, 704 all->par.fbc = of_ioremap(&op->resource[0], CG6_FBC_OFFSET,
696 4096, "cgsix fbc"); 705 4096, "cgsix fbc");
697 all->par.tec = sbus_ioremap(&sdev->resource[0], CG6_TEC_OFFSET, 706 all->par.tec = of_ioremap(&op->resource[0], CG6_TEC_OFFSET,
698 sizeof(struct cg6_tec), "cgsix tec"); 707 sizeof(struct cg6_tec), "cgsix tec");
699 all->par.thc = sbus_ioremap(&sdev->resource[0], CG6_THC_OFFSET, 708 all->par.thc = of_ioremap(&op->resource[0], CG6_THC_OFFSET,
700 sizeof(struct cg6_thc), "cgsix thc"); 709 sizeof(struct cg6_thc), "cgsix thc");
701 all->par.bt = sbus_ioremap(&sdev->resource[0], CG6_BROOKTREE_OFFSET, 710 all->par.bt = of_ioremap(&op->resource[0], CG6_BROOKTREE_OFFSET,
702 sizeof(struct bt_regs), "cgsix dac"); 711 sizeof(struct bt_regs), "cgsix dac");
703 all->par.fhc = sbus_ioremap(&sdev->resource[0], CG6_FHC_OFFSET, 712 all->par.fhc = of_ioremap(&op->resource[0], CG6_FHC_OFFSET,
704 sizeof(u32), "cgsix fhc"); 713 sizeof(u32), "cgsix fhc");
705 714
706 all->info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_IMAGEBLIT | 715 all->info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_IMAGEBLIT |
707 FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; 716 FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
708 all->info.fbops = &cg6_ops; 717 all->info.fbops = &cg6_ops;
709#ifdef CONFIG_SPARC32 718
710 all->info.screen_base = (char __iomem *) 719 all->info.screen_base = of_ioremap(&op->resource[0], CG6_RAM_OFFSET,
711 prom_getintdefault(sdev->prom_node, "address", 0); 720 all->par.fbsize, "cgsix ram");
712#endif 721 if (!all->par.fbc || !all->par.tec || !all->par.thc ||
713 if (!all->info.screen_base) 722 !all->par.bt || !all->par.fhc || !all->info.screen_base) {
714 all->info.screen_base = 723 cg6_unmap_regs(all);
715 sbus_ioremap(&sdev->resource[0], CG6_RAM_OFFSET, 724 kfree(all);
716 all->par.fbsize, "cgsix ram"); 725 return -ENOMEM;
726 }
727
717 all->info.par = &all->par; 728 all->info.par = &all->par;
718 729
719 all->info.var.accel_flags = FB_ACCELF_TEXT; 730 all->info.var.accel_flags = FB_ACCELF_TEXT;
@@ -723,72 +734,90 @@ static void cg6_init_one(struct sbus_dev *sdev)
723 cg6_blank(0, &all->info); 734 cg6_blank(0, &all->info);
724 735
725 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) { 736 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) {
726 printk(KERN_ERR "cg6: Could not allocate color map.\n"); 737 cg6_unmap_regs(all);
727 kfree(all); 738 kfree(all);
728 return; 739 return -ENOMEM;
729 } 740 }
730 741
731 fb_set_cmap(&all->info.cmap, &all->info); 742 fb_set_cmap(&all->info.cmap, &all->info);
732 cg6_init_fix(&all->info, linebytes); 743 cg6_init_fix(&all->info, linebytes);
733 744
734 if (register_framebuffer(&all->info) < 0) { 745 err = register_framebuffer(&all->info);
735 printk(KERN_ERR "cg6: Could not register framebuffer.\n"); 746 if (err < 0) {
747 cg6_unmap_regs(all);
736 fb_dealloc_cmap(&all->info.cmap); 748 fb_dealloc_cmap(&all->info.cmap);
737 kfree(all); 749 kfree(all);
738 return; 750 return err;
739 } 751 }
740 752
741 list_add(&all->list, &cg6_list); 753 dev_set_drvdata(&op->dev, all);
742 754
743 printk("cg6: CGsix [%s] at %lx:%lx\n", 755 printk("%s: CGsix [%s] at %lx:%lx\n",
756 dp->full_name,
744 all->info.fix.id, 757 all->info.fix.id,
745 (long) sdev->reg_addrs[0].which_io, 758 all->par.which_io, all->par.physbase);
746 (long) sdev->reg_addrs[0].phys_addr); 759
760 return 0;
747} 761}
748 762
749int __init cg6_init(void) 763static int __devinit cg6_probe(struct of_device *dev, const struct of_device_id *match)
750{ 764{
751 struct sbus_bus *sbus; 765 struct of_device *op = to_of_device(&dev->dev);
752 struct sbus_dev *sdev;
753 766
754 if (fb_get_options("cg6fb", NULL)) 767 return cg6_init_one(op);
755 return -ENODEV; 768}
756 769
757 for_all_sbusdev(sdev, sbus) { 770static int __devexit cg6_remove(struct of_device *dev)
758 if (!strcmp(sdev->prom_name, "cgsix") || 771{
759 !strcmp(sdev->prom_name, "cgthree+")) 772 struct all_info *all = dev_get_drvdata(&dev->dev);
760 cg6_init_one(sdev); 773
761 } 774 unregister_framebuffer(&all->info);
775 fb_dealloc_cmap(&all->info.cmap);
776
777 cg6_unmap_regs(all);
778
779 kfree(all);
780
781 dev_set_drvdata(&dev->dev, NULL);
762 782
763 return 0; 783 return 0;
764} 784}
765 785
766void __exit cg6_exit(void) 786static struct of_device_id cg6_match[] = {
767{ 787 {
768 struct list_head *pos, *tmp; 788 .name = "cgsix",
789 },
790 {
791 .name = "cgthree+",
792 },
793 {},
794};
795MODULE_DEVICE_TABLE(of, cg6_match);
769 796
770 list_for_each_safe(pos, tmp, &cg6_list) { 797static struct of_platform_driver cg6_driver = {
771 struct all_info *all = list_entry(pos, typeof(*all), list); 798 .name = "cg6",
799 .match_table = cg6_match,
800 .probe = cg6_probe,
801 .remove = __devexit_p(cg6_remove),
802};
772 803
773 unregister_framebuffer(&all->info); 804static int __init cg6_init(void)
774 fb_dealloc_cmap(&all->info.cmap); 805{
775 kfree(all); 806 if (fb_get_options("cg6fb", NULL))
776 } 807 return -ENODEV;
808
809 return of_register_driver(&cg6_driver, &of_bus_type);
777} 810}
778 811
779int __init 812static void __exit cg6_exit(void)
780cg6_setup(char *arg)
781{ 813{
782 /* No cmdline options yet... */ 814 of_unregister_driver(&cg6_driver);
783 return 0;
784} 815}
785 816
786module_init(cg6_init); 817module_init(cg6_init);
787
788#ifdef MODULE
789module_exit(cg6_exit); 818module_exit(cg6_exit);
790#endif
791 819
792MODULE_DESCRIPTION("framebuffer driver for CGsix chipsets"); 820MODULE_DESCRIPTION("framebuffer driver for CGsix chipsets");
793MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 821MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
822MODULE_VERSION("2.0");
794MODULE_LICENSE("GPL"); 823MODULE_LICENSE("GPL");
diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c
index 7633e41adda1..2a0e8210d398 100644
--- a/drivers/video/ffb.c
+++ b/drivers/video/ffb.c
@@ -1,6 +1,6 @@
1/* ffb.c: Creator/Elite3D frame buffer driver 1/* ffb.c: Creator/Elite3D frame buffer driver
2 * 2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz) 4 * Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz)
5 * 5 *
6 * Driver layout based loosely on tgafb.c, see that file for credits. 6 * Driver layout based loosely on tgafb.c, see that file for credits.
@@ -19,7 +19,8 @@
19 19
20#include <asm/io.h> 20#include <asm/io.h>
21#include <asm/upa.h> 21#include <asm/upa.h>
22#include <asm/oplib.h> 22#include <asm/prom.h>
23#include <asm/of_device.h>
23#include <asm/fbio.h> 24#include <asm/fbio.h>
24 25
25#include "sbuslib.h" 26#include "sbuslib.h"
@@ -184,161 +185,161 @@ static struct fb_ops ffb_ops = {
184 185
185struct ffb_fbc { 186struct ffb_fbc {
186 /* Next vertex registers */ 187 /* Next vertex registers */
187 u32 xxx1[3]; 188 u32 xxx1[3];
188 volatile u32 alpha; 189 u32 alpha;
189 volatile u32 red; 190 u32 red;
190 volatile u32 green; 191 u32 green;
191 volatile u32 blue; 192 u32 blue;
192 volatile u32 depth; 193 u32 depth;
193 volatile u32 y; 194 u32 y;
194 volatile u32 x; 195 u32 x;
195 u32 xxx2[2]; 196 u32 xxx2[2];
196 volatile u32 ryf; 197 u32 ryf;
197 volatile u32 rxf; 198 u32 rxf;
198 u32 xxx3[2]; 199 u32 xxx3[2];
199 200
200 volatile u32 dmyf; 201 u32 dmyf;
201 volatile u32 dmxf; 202 u32 dmxf;
202 u32 xxx4[2]; 203 u32 xxx4[2];
203 volatile u32 ebyi; 204 u32 ebyi;
204 volatile u32 ebxi; 205 u32 ebxi;
205 u32 xxx5[2]; 206 u32 xxx5[2];
206 volatile u32 by; 207 u32 by;
207 volatile u32 bx; 208 u32 bx;
208 u32 dy; 209 u32 dy;
209 u32 dx; 210 u32 dx;
210 volatile u32 bh; 211 u32 bh;
211 volatile u32 bw; 212 u32 bw;
212 u32 xxx6[2]; 213 u32 xxx6[2];
213 214
214 u32 xxx7[32]; 215 u32 xxx7[32];
215 216
216 /* Setup unit vertex state register */ 217 /* Setup unit vertex state register */
217 volatile u32 suvtx; 218 u32 suvtx;
218 u32 xxx8[63]; 219 u32 xxx8[63];
219 220
220 /* Control registers */ 221 /* Control registers */
221 volatile u32 ppc; 222 u32 ppc;
222 volatile u32 wid; 223 u32 wid;
223 volatile u32 fg; 224 u32 fg;
224 volatile u32 bg; 225 u32 bg;
225 volatile u32 consty; 226 u32 consty;
226 volatile u32 constz; 227 u32 constz;
227 volatile u32 xclip; 228 u32 xclip;
228 volatile u32 dcss; 229 u32 dcss;
229 volatile u32 vclipmin; 230 u32 vclipmin;
230 volatile u32 vclipmax; 231 u32 vclipmax;
231 volatile u32 vclipzmin; 232 u32 vclipzmin;
232 volatile u32 vclipzmax; 233 u32 vclipzmax;
233 volatile u32 dcsf; 234 u32 dcsf;
234 volatile u32 dcsb; 235 u32 dcsb;
235 volatile u32 dczf; 236 u32 dczf;
236 volatile u32 dczb; 237 u32 dczb;
237 238
238 u32 xxx9; 239 u32 xxx9;
239 volatile u32 blendc; 240 u32 blendc;
240 volatile u32 blendc1; 241 u32 blendc1;
241 volatile u32 blendc2; 242 u32 blendc2;
242 volatile u32 fbramitc; 243 u32 fbramitc;
243 volatile u32 fbc; 244 u32 fbc;
244 volatile u32 rop; 245 u32 rop;
245 volatile u32 cmp; 246 u32 cmp;
246 volatile u32 matchab; 247 u32 matchab;
247 volatile u32 matchc; 248 u32 matchc;
248 volatile u32 magnab; 249 u32 magnab;
249 volatile u32 magnc; 250 u32 magnc;
250 volatile u32 fbcfg0; 251 u32 fbcfg0;
251 volatile u32 fbcfg1; 252 u32 fbcfg1;
252 volatile u32 fbcfg2; 253 u32 fbcfg2;
253 volatile u32 fbcfg3; 254 u32 fbcfg3;
254 255
255 u32 ppcfg; 256 u32 ppcfg;
256 volatile u32 pick; 257 u32 pick;
257 volatile u32 fillmode; 258 u32 fillmode;
258 volatile u32 fbramwac; 259 u32 fbramwac;
259 volatile u32 pmask; 260 u32 pmask;
260 volatile u32 xpmask; 261 u32 xpmask;
261 volatile u32 ypmask; 262 u32 ypmask;
262 volatile u32 zpmask; 263 u32 zpmask;
263 volatile u32 clip0min; 264 u32 clip0min;
264 volatile u32 clip0max; 265 u32 clip0max;
265 volatile u32 clip1min; 266 u32 clip1min;
266 volatile u32 clip1max; 267 u32 clip1max;
267 volatile u32 clip2min; 268 u32 clip2min;
268 volatile u32 clip2max; 269 u32 clip2max;
269 volatile u32 clip3min; 270 u32 clip3min;
270 volatile u32 clip3max; 271 u32 clip3max;
271 272
272 /* New 3dRAM III support regs */ 273 /* New 3dRAM III support regs */
273 volatile u32 rawblend2; 274 u32 rawblend2;
274 volatile u32 rawpreblend; 275 u32 rawpreblend;
275 volatile u32 rawstencil; 276 u32 rawstencil;
276 volatile u32 rawstencilctl; 277 u32 rawstencilctl;
277 volatile u32 threedram1; 278 u32 threedram1;
278 volatile u32 threedram2; 279 u32 threedram2;
279 volatile u32 passin; 280 u32 passin;
280 volatile u32 rawclrdepth; 281 u32 rawclrdepth;
281 volatile u32 rawpmask; 282 u32 rawpmask;
282 volatile u32 rawcsrc; 283 u32 rawcsrc;
283 volatile u32 rawmatch; 284 u32 rawmatch;
284 volatile u32 rawmagn; 285 u32 rawmagn;
285 volatile u32 rawropblend; 286 u32 rawropblend;
286 volatile u32 rawcmp; 287 u32 rawcmp;
287 volatile u32 rawwac; 288 u32 rawwac;
288 volatile u32 fbramid; 289 u32 fbramid;
289 290
290 volatile u32 drawop; 291 u32 drawop;
291 u32 xxx10[2]; 292 u32 xxx10[2];
292 volatile u32 fontlpat; 293 u32 fontlpat;
293 u32 xxx11; 294 u32 xxx11;
294 volatile u32 fontxy; 295 u32 fontxy;
295 volatile u32 fontw; 296 u32 fontw;
296 volatile u32 fontinc; 297 u32 fontinc;
297 volatile u32 font; 298 u32 font;
298 u32 xxx12[3]; 299 u32 xxx12[3];
299 volatile u32 blend2; 300 u32 blend2;
300 volatile u32 preblend; 301 u32 preblend;
301 volatile u32 stencil; 302 u32 stencil;
302 volatile u32 stencilctl; 303 u32 stencilctl;
303 304
304 u32 xxx13[4]; 305 u32 xxx13[4];
305 volatile u32 dcss1; 306 u32 dcss1;
306 volatile u32 dcss2; 307 u32 dcss2;
307 volatile u32 dcss3; 308 u32 dcss3;
308 volatile u32 widpmask; 309 u32 widpmask;
309 volatile u32 dcs2; 310 u32 dcs2;
310 volatile u32 dcs3; 311 u32 dcs3;
311 volatile u32 dcs4; 312 u32 dcs4;
312 u32 xxx14; 313 u32 xxx14;
313 volatile u32 dcd2; 314 u32 dcd2;
314 volatile u32 dcd3; 315 u32 dcd3;
315 volatile u32 dcd4; 316 u32 dcd4;
316 u32 xxx15; 317 u32 xxx15;
317 318
318 volatile u32 pattern[32]; 319 u32 pattern[32];
319 320
320 u32 xxx16[256]; 321 u32 xxx16[256];
321 322
322 volatile u32 devid; 323 u32 devid;
323 u32 xxx17[63]; 324 u32 xxx17[63];
324 325
325 volatile u32 ucsr; 326 u32 ucsr;
326 u32 xxx18[31]; 327 u32 xxx18[31];
327 328
328 volatile u32 mer; 329 u32 mer;
329}; 330};
330 331
331struct ffb_dac { 332struct ffb_dac {
332 volatile u32 type; 333 u32 type;
333 volatile u32 value; 334 u32 value;
334 volatile u32 type2; 335 u32 type2;
335 volatile u32 value2; 336 u32 value2;
336}; 337};
337 338
338struct ffb_par { 339struct ffb_par {
339 spinlock_t lock; 340 spinlock_t lock;
340 struct ffb_fbc *fbc; 341 struct ffb_fbc __iomem *fbc;
341 struct ffb_dac *dac; 342 struct ffb_dac __iomem *dac;
342 343
343 u32 flags; 344 u32 flags;
344#define FFB_FLAG_AFB 0x00000001 345#define FFB_FLAG_AFB 0x00000001
@@ -353,16 +354,13 @@ struct ffb_par {
353 unsigned long physbase; 354 unsigned long physbase;
354 unsigned long fbsize; 355 unsigned long fbsize;
355 356
356 char name[64];
357 int prom_node;
358 int prom_parent_node;
359 int dac_rev; 357 int dac_rev;
360 int board_type; 358 int board_type;
361}; 359};
362 360
363static void FFBFifo(struct ffb_par *par, int n) 361static void FFBFifo(struct ffb_par *par, int n)
364{ 362{
365 struct ffb_fbc *fbc; 363 struct ffb_fbc __iomem *fbc;
366 int cache = par->fifo_cache; 364 int cache = par->fifo_cache;
367 365
368 if (cache - n < 0) { 366 if (cache - n < 0) {
@@ -375,7 +373,7 @@ static void FFBFifo(struct ffb_par *par, int n)
375 373
376static void FFBWait(struct ffb_par *par) 374static void FFBWait(struct ffb_par *par)
377{ 375{
378 struct ffb_fbc *fbc; 376 struct ffb_fbc __iomem *fbc;
379 int limit = 10000; 377 int limit = 10000;
380 378
381 fbc = par->fbc; 379 fbc = par->fbc;
@@ -408,8 +406,8 @@ static __inline__ void ffb_rop(struct ffb_par *par, u32 rop)
408 406
409static void ffb_switch_from_graph(struct ffb_par *par) 407static void ffb_switch_from_graph(struct ffb_par *par)
410{ 408{
411 struct ffb_fbc *fbc = par->fbc; 409 struct ffb_fbc __iomem *fbc = par->fbc;
412 struct ffb_dac *dac = par->dac; 410 struct ffb_dac __iomem *dac = par->dac;
413 unsigned long flags; 411 unsigned long flags;
414 412
415 spin_lock_irqsave(&par->lock, flags); 413 spin_lock_irqsave(&par->lock, flags);
@@ -462,7 +460,7 @@ static int ffb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
462static void ffb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 460static void ffb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
463{ 461{
464 struct ffb_par *par = (struct ffb_par *) info->par; 462 struct ffb_par *par = (struct ffb_par *) info->par;
465 struct ffb_fbc *fbc = par->fbc; 463 struct ffb_fbc __iomem *fbc = par->fbc;
466 unsigned long flags; 464 unsigned long flags;
467 u32 fg; 465 u32 fg;
468 466
@@ -505,7 +503,7 @@ static void
505ffb_copyarea(struct fb_info *info, const struct fb_copyarea *area) 503ffb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
506{ 504{
507 struct ffb_par *par = (struct ffb_par *) info->par; 505 struct ffb_par *par = (struct ffb_par *) info->par;
508 struct ffb_fbc *fbc = par->fbc; 506 struct ffb_fbc __iomem *fbc = par->fbc;
509 unsigned long flags; 507 unsigned long flags;
510 508
511 if (area->dx != area->sx || 509 if (area->dx != area->sx ||
@@ -541,7 +539,7 @@ ffb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
541static void ffb_imageblit(struct fb_info *info, const struct fb_image *image) 539static void ffb_imageblit(struct fb_info *info, const struct fb_image *image)
542{ 540{
543 struct ffb_par *par = (struct ffb_par *) info->par; 541 struct ffb_par *par = (struct ffb_par *) info->par;
544 struct ffb_fbc *fbc = par->fbc; 542 struct ffb_fbc __iomem *fbc = par->fbc;
545 const u8 *data = image->data; 543 const u8 *data = image->data;
546 unsigned long flags; 544 unsigned long flags;
547 u32 fg, bg, xy; 545 u32 fg, bg, xy;
@@ -664,7 +662,7 @@ static int
664ffb_blank(int blank, struct fb_info *info) 662ffb_blank(int blank, struct fb_info *info)
665{ 663{
666 struct ffb_par *par = (struct ffb_par *) info->par; 664 struct ffb_par *par = (struct ffb_par *) info->par;
667 struct ffb_dac *dac = par->dac; 665 struct ffb_dac __iomem *dac = par->dac;
668 unsigned long flags; 666 unsigned long flags;
669 u32 tmp; 667 u32 tmp;
670 668
@@ -883,78 +881,42 @@ ffb_init_fix(struct fb_info *info)
883 info->fix.accel = FB_ACCEL_SUN_CREATOR; 881 info->fix.accel = FB_ACCEL_SUN_CREATOR;
884} 882}
885 883
886static int ffb_apply_upa_parent_ranges(int parent,
887 struct linux_prom64_registers *regs)
888{
889 struct linux_prom64_ranges ranges[PROMREG_MAX];
890 char name[128];
891 int len, i;
892
893 prom_getproperty(parent, "name", name, sizeof(name));
894 if (strcmp(name, "upa") != 0)
895 return 0;
896
897 len = prom_getproperty(parent, "ranges", (void *) ranges, sizeof(ranges));
898 if (len <= 0)
899 return 1;
900
901 len /= sizeof(struct linux_prom64_ranges);
902 for (i = 0; i < len; i++) {
903 struct linux_prom64_ranges *rng = &ranges[i];
904 u64 phys_addr = regs->phys_addr;
905
906 if (phys_addr >= rng->ot_child_base &&
907 phys_addr < (rng->ot_child_base + rng->or_size)) {
908 regs->phys_addr -= rng->ot_child_base;
909 regs->phys_addr += rng->ot_parent_base;
910 return 0;
911 }
912 }
913
914 return 1;
915}
916
917struct all_info { 884struct all_info {
918 struct fb_info info; 885 struct fb_info info;
919 struct ffb_par par; 886 struct ffb_par par;
920 u32 pseudo_palette[256]; 887 u32 pseudo_palette[256];
921 struct list_head list;
922}; 888};
923static LIST_HEAD(ffb_list);
924 889
925static void ffb_init_one(int node, int parent) 890static int ffb_init_one(struct of_device *op)
926{ 891{
927 struct linux_prom64_registers regs[2*PROMREG_MAX]; 892 struct device_node *dp = op->node;
928 struct ffb_fbc *fbc; 893 struct ffb_fbc __iomem *fbc;
929 struct ffb_dac *dac; 894 struct ffb_dac __iomem *dac;
930 struct all_info *all; 895 struct all_info *all;
896 int err;
931 897
932 if (prom_getproperty(node, "reg", (void *) regs, sizeof(regs)) <= 0) { 898 all = kzalloc(sizeof(*all), GFP_KERNEL);
933 printk("ffb: Cannot get reg device node property.\n"); 899 if (!all)
934 return; 900 return -ENOMEM;
935 }
936 901
937 if (ffb_apply_upa_parent_ranges(parent, &regs[0])) { 902 spin_lock_init(&all->par.lock);
938 printk("ffb: Cannot apply parent ranges to regs.\n"); 903 all->par.fbc = of_ioremap(&op->resource[2], 0,
939 return; 904 sizeof(struct ffb_fbc), "ffb fbc");
905 if (!all->par.fbc) {
906 kfree(all);
907 return -ENOMEM;
940 } 908 }
941 909
942 all = kmalloc(sizeof(*all), GFP_KERNEL); 910 all->par.dac = of_ioremap(&op->resource[1], 0,
943 if (!all) { 911 sizeof(struct ffb_dac), "ffb dac");
944 printk(KERN_ERR "ffb: Cannot allocate memory.\n"); 912 if (!all->par.dac) {
945 return; 913 of_iounmap(all->par.fbc, sizeof(struct ffb_fbc));
914 kfree(all);
915 return -ENOMEM;
946 } 916 }
947 memset(all, 0, sizeof(*all));
948
949 INIT_LIST_HEAD(&all->list);
950 917
951 spin_lock_init(&all->par.lock);
952 all->par.fbc = (struct ffb_fbc *)(regs[0].phys_addr + FFB_FBC_REGS_POFF);
953 all->par.dac = (struct ffb_dac *)(regs[0].phys_addr + FFB_DAC_POFF);
954 all->par.rop_cache = FFB_ROP_NEW; 918 all->par.rop_cache = FFB_ROP_NEW;
955 all->par.physbase = regs[0].phys_addr; 919 all->par.physbase = op->resource[0].start;
956 all->par.prom_node = node;
957 all->par.prom_parent_node = parent;
958 920
959 /* Don't mention copyarea, so SCROLL_REDRAW is always 921 /* Don't mention copyarea, so SCROLL_REDRAW is always
960 * used. It is the fastest on this chip. 922 * used. It is the fastest on this chip.
@@ -968,7 +930,7 @@ static void ffb_init_one(int node, int parent)
968 all->info.par = &all->par; 930 all->info.par = &all->par;
969 all->info.pseudo_palette = all->pseudo_palette; 931 all->info.pseudo_palette = all->pseudo_palette;
970 932
971 sbusfb_fill_var(&all->info.var, all->par.prom_node, 32); 933 sbusfb_fill_var(&all->info.var, dp->node, 32);
972 all->par.fbsize = PAGE_ALIGN(all->info.var.xres * 934 all->par.fbsize = PAGE_ALIGN(all->info.var.xres *
973 all->info.var.yres * 935 all->info.var.yres *
974 4); 936 4);
@@ -976,14 +938,13 @@ static void ffb_init_one(int node, int parent)
976 938
977 all->info.var.accel_flags = FB_ACCELF_TEXT; 939 all->info.var.accel_flags = FB_ACCELF_TEXT;
978 940
979 prom_getstring(node, "name", all->par.name, sizeof(all->par.name)); 941 if (!strcmp(dp->name, "SUNW,afb"))
980 if (!strcmp(all->par.name, "SUNW,afb"))
981 all->par.flags |= FFB_FLAG_AFB; 942 all->par.flags |= FFB_FLAG_AFB;
982 943
983 all->par.board_type = prom_getintdefault(node, "board_type", 0); 944 all->par.board_type = of_getintprop_default(dp, "board_type", 0);
984 945
985 fbc = all->par.fbc; 946 fbc = all->par.fbc;
986 if((upa_readl(&fbc->ucsr) & FFB_UCSR_ALL_ERRORS) != 0) 947 if ((upa_readl(&fbc->ucsr) & FFB_UCSR_ALL_ERRORS) != 0)
987 upa_writel(FFB_UCSR_ALL_ERRORS, &fbc->ucsr); 948 upa_writel(FFB_UCSR_ALL_ERRORS, &fbc->ucsr);
988 949
989 ffb_switch_from_graph(&all->par); 950 ffb_switch_from_graph(&all->par);
@@ -1008,81 +969,88 @@ static void ffb_init_one(int node, int parent)
1008 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) { 969 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) {
1009 printk(KERN_ERR "ffb: Could not allocate color map.\n"); 970 printk(KERN_ERR "ffb: Could not allocate color map.\n");
1010 kfree(all); 971 kfree(all);
1011 return; 972 return -ENOMEM;
1012 } 973 }
1013 974
1014 ffb_init_fix(&all->info); 975 ffb_init_fix(&all->info);
1015 976
1016 if (register_framebuffer(&all->info) < 0) { 977 err = register_framebuffer(&all->info);
978 if (err < 0) {
1017 printk(KERN_ERR "ffb: Could not register framebuffer.\n"); 979 printk(KERN_ERR "ffb: Could not register framebuffer.\n");
1018 fb_dealloc_cmap(&all->info.cmap); 980 fb_dealloc_cmap(&all->info.cmap);
1019 kfree(all); 981 kfree(all);
1020 return; 982 return err;
1021 } 983 }
1022 984
1023 list_add(&all->list, &ffb_list); 985 dev_set_drvdata(&op->dev, all);
1024 986
1025 printk("ffb: %s at %016lx type %d DAC %d\n", 987 printk("%s: %s at %016lx, type %d, DAC revision %d\n",
988 dp->full_name,
1026 ((all->par.flags & FFB_FLAG_AFB) ? "AFB" : "FFB"), 989 ((all->par.flags & FFB_FLAG_AFB) ? "AFB" : "FFB"),
1027 regs[0].phys_addr, all->par.board_type, all->par.dac_rev); 990 all->par.physbase, all->par.board_type, all->par.dac_rev);
991
992 return 0;
1028} 993}
1029 994
1030static void ffb_scan_siblings(int root) 995static int __devinit ffb_probe(struct of_device *dev, const struct of_device_id *match)
1031{ 996{
1032 int node, child; 997 struct of_device *op = to_of_device(&dev->dev);
1033 998
1034 child = prom_getchild(root); 999 return ffb_init_one(op);
1035 for (node = prom_searchsiblings(child, "SUNW,ffb"); node;
1036 node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb"))
1037 ffb_init_one(node, root);
1038 for (node = prom_searchsiblings(child, "SUNW,afb"); node;
1039 node = prom_searchsiblings(prom_getsibling(node), "SUNW,afb"))
1040 ffb_init_one(node, root);
1041} 1000}
1042 1001
1043int __init ffb_init(void) 1002static int __devexit ffb_remove(struct of_device *dev)
1044{ 1003{
1045 int root; 1004 struct all_info *all = dev_get_drvdata(&dev->dev);
1046 1005
1047 if (fb_get_options("ffb", NULL)) 1006 unregister_framebuffer(&all->info);
1048 return -ENODEV; 1007 fb_dealloc_cmap(&all->info.cmap);
1049 1008
1050 ffb_scan_siblings(prom_root_node); 1009 of_iounmap(all->par.fbc, sizeof(struct ffb_fbc));
1010 of_iounmap(all->par.dac, sizeof(struct ffb_dac));
1051 1011
1052 root = prom_getchild(prom_root_node); 1012 kfree(all);
1053 for (root = prom_searchsiblings(root, "upa"); root; 1013
1054 root = prom_searchsiblings(prom_getsibling(root), "upa")) 1014 dev_set_drvdata(&dev->dev, NULL);
1055 ffb_scan_siblings(root);
1056 1015
1057 return 0; 1016 return 0;
1058} 1017}
1059 1018
1060void __exit ffb_exit(void) 1019static struct of_device_id ffb_match[] = {
1061{ 1020 {
1062 struct list_head *pos, *tmp; 1021 .name = "SUNW,ffb",
1022 },
1023 {
1024 .name = "SUNW,afb",
1025 },
1026 {},
1027};
1028MODULE_DEVICE_TABLE(of, ffb_match);
1029
1030static struct of_platform_driver ffb_driver = {
1031 .name = "ffb",
1032 .match_table = ffb_match,
1033 .probe = ffb_probe,
1034 .remove = __devexit_p(ffb_remove),
1035};
1063 1036
1064 list_for_each_safe(pos, tmp, &ffb_list) { 1037int __init ffb_init(void)
1065 struct all_info *all = list_entry(pos, typeof(*all), list); 1038{
1039 if (fb_get_options("ffb", NULL))
1040 return -ENODEV;
1066 1041
1067 unregister_framebuffer(&all->info); 1042 return of_register_driver(&ffb_driver, &of_bus_type);
1068 fb_dealloc_cmap(&all->info.cmap);
1069 kfree(all);
1070 }
1071} 1043}
1072 1044
1073int __init 1045void __exit ffb_exit(void)
1074ffb_setup(char *arg)
1075{ 1046{
1076 /* No cmdline options yet... */ 1047 of_unregister_driver(&ffb_driver);
1077 return 0;
1078} 1048}
1079 1049
1080module_init(ffb_init); 1050module_init(ffb_init);
1081
1082#ifdef MODULE
1083module_exit(ffb_exit); 1051module_exit(ffb_exit);
1084#endif
1085 1052
1086MODULE_DESCRIPTION("framebuffer driver for Creator/Elite3D chipsets"); 1053MODULE_DESCRIPTION("framebuffer driver for Creator/Elite3D chipsets");
1087MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 1054MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
1055MODULE_VERSION("2.0");
1088MODULE_LICENSE("GPL"); 1056MODULE_LICENSE("GPL");
diff --git a/drivers/video/imacfb.c b/drivers/video/imacfb.c
index 7b1c168c834d..cdbae173d69a 100644
--- a/drivers/video/imacfb.c
+++ b/drivers/video/imacfb.c
@@ -207,10 +207,6 @@ static int __init imacfb_probe(struct platform_device *dev)
207 size_remap = size_total; 207 size_remap = size_total;
208 imacfb_fix.smem_len = size_remap; 208 imacfb_fix.smem_len = size_remap;
209 209
210#ifndef __i386__
211 screen_info.imacpm_seg = 0;
212#endif
213
214 if (!request_mem_region(imacfb_fix.smem_start, size_total, "imacfb")) { 210 if (!request_mem_region(imacfb_fix.smem_start, size_total, "imacfb")) {
215 printk(KERN_WARNING 211 printk(KERN_WARNING
216 "imacfb: cannot reserve video memory at 0x%lx\n", 212 "imacfb: cannot reserve video memory at 0x%lx\n",
diff --git a/drivers/video/leo.c b/drivers/video/leo.c
index a23cfdb9d826..f3a24338d9ac 100644
--- a/drivers/video/leo.c
+++ b/drivers/video/leo.c
@@ -1,6 +1,6 @@
1/* leo.c: LEO frame buffer driver 1/* leo.c: LEO frame buffer driver
2 * 2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996-1999 Jakub Jelinek (jj@ultra.linux.cz) 4 * Copyright (C) 1996-1999 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1997 Michal Rehacek (Michal.Rehacek@st.mff.cuni.cz) 5 * Copyright (C) 1997 Michal Rehacek (Michal.Rehacek@st.mff.cuni.cz)
6 * 6 *
@@ -18,8 +18,8 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19 19
20#include <asm/io.h> 20#include <asm/io.h>
21#include <asm/sbus.h> 21#include <asm/prom.h>
22#include <asm/oplib.h> 22#include <asm/of_device.h>
23#include <asm/fbio.h> 23#include <asm/fbio.h>
24 24
25#include "sbuslib.h" 25#include "sbuslib.h"
@@ -80,10 +80,10 @@ static struct fb_ops leo_ops = {
80 80
81struct leo_cursor { 81struct leo_cursor {
82 u8 xxx0[16]; 82 u8 xxx0[16];
83 volatile u32 cur_type; 83 u32 cur_type;
84 volatile u32 cur_misc; 84 u32 cur_misc;
85 volatile u32 cur_cursxy; 85 u32 cur_cursxy;
86 volatile u32 cur_data; 86 u32 cur_data;
87}; 87};
88 88
89#define LEO_KRN_TYPE_CLUT0 0x00001000 89#define LEO_KRN_TYPE_CLUT0 0x00001000
@@ -99,27 +99,27 @@ struct leo_cursor {
99#define LEO_KRN_CSR_UNK2 0x00000001 99#define LEO_KRN_CSR_UNK2 0x00000001
100 100
101struct leo_lx_krn { 101struct leo_lx_krn {
102 volatile u32 krn_type; 102 u32 krn_type;
103 volatile u32 krn_csr; 103 u32 krn_csr;
104 volatile u32 krn_value; 104 u32 krn_value;
105}; 105};
106 106
107struct leo_lc_ss0_krn { 107struct leo_lc_ss0_krn {
108 volatile u32 misc; 108 u32 misc;
109 u8 xxx0[0x800-4]; 109 u8 xxx0[0x800-4];
110 volatile u32 rev; 110 u32 rev;
111}; 111};
112 112
113struct leo_lc_ss0_usr { 113struct leo_lc_ss0_usr {
114 volatile u32 csr; 114 u32 csr;
115 volatile u32 addrspace; 115 u32 addrspace;
116 volatile u32 fontmsk; 116 u32 fontmsk;
117 volatile u32 fontt; 117 u32 fontt;
118 volatile u32 extent; 118 u32 extent;
119 volatile u32 src; 119 u32 src;
120 u32 dst; 120 u32 dst;
121 volatile u32 copy; 121 u32 copy;
122 volatile u32 fill; 122 u32 fill;
123}; 123};
124 124
125struct leo_lc_ss1_krn { 125struct leo_lc_ss1_krn {
@@ -132,47 +132,47 @@ struct leo_lc_ss1_usr {
132 132
133struct leo_ld { 133struct leo_ld {
134 u8 xxx0[0xe00]; 134 u8 xxx0[0xe00];
135 volatile u32 csr; 135 u32 csr;
136 volatile u32 wid; 136 u32 wid;
137 volatile u32 wmask; 137 u32 wmask;
138 volatile u32 widclip; 138 u32 widclip;
139 volatile u32 vclipmin; 139 u32 vclipmin;
140 volatile u32 vclipmax; 140 u32 vclipmax;
141 volatile u32 pickmin; /* SS1 only */ 141 u32 pickmin; /* SS1 only */
142 volatile u32 pickmax; /* SS1 only */ 142 u32 pickmax; /* SS1 only */
143 volatile u32 fg; 143 u32 fg;
144 volatile u32 bg; 144 u32 bg;
145 volatile u32 src; /* Copy/Scroll (SS0 only) */ 145 u32 src; /* Copy/Scroll (SS0 only) */
146 volatile u32 dst; /* Copy/Scroll/Fill (SS0 only) */ 146 u32 dst; /* Copy/Scroll/Fill (SS0 only) */
147 volatile u32 extent; /* Copy/Scroll/Fill size (SS0 only) */ 147 u32 extent; /* Copy/Scroll/Fill size (SS0 only) */
148 u32 xxx1[3]; 148 u32 xxx1[3];
149 volatile u32 setsem; /* SS1 only */ 149 u32 setsem; /* SS1 only */
150 volatile u32 clrsem; /* SS1 only */ 150 u32 clrsem; /* SS1 only */
151 volatile u32 clrpick; /* SS1 only */ 151 u32 clrpick; /* SS1 only */
152 volatile u32 clrdat; /* SS1 only */ 152 u32 clrdat; /* SS1 only */
153 volatile u32 alpha; /* SS1 only */ 153 u32 alpha; /* SS1 only */
154 u8 xxx2[0x2c]; 154 u8 xxx2[0x2c];
155 volatile u32 winbg; 155 u32 winbg;
156 volatile u32 planemask; 156 u32 planemask;
157 volatile u32 rop; 157 u32 rop;
158 volatile u32 z; 158 u32 z;
159 volatile u32 dczf; /* SS1 only */ 159 u32 dczf; /* SS1 only */
160 volatile u32 dczb; /* SS1 only */ 160 u32 dczb; /* SS1 only */
161 volatile u32 dcs; /* SS1 only */ 161 u32 dcs; /* SS1 only */
162 volatile u32 dczs; /* SS1 only */ 162 u32 dczs; /* SS1 only */
163 volatile u32 pickfb; /* SS1 only */ 163 u32 pickfb; /* SS1 only */
164 volatile u32 pickbb; /* SS1 only */ 164 u32 pickbb; /* SS1 only */
165 volatile u32 dcfc; /* SS1 only */ 165 u32 dcfc; /* SS1 only */
166 volatile u32 forcecol; /* SS1 only */ 166 u32 forcecol; /* SS1 only */
167 volatile u32 door[8]; /* SS1 only */ 167 u32 door[8]; /* SS1 only */
168 volatile u32 pick[5]; /* SS1 only */ 168 u32 pick[5]; /* SS1 only */
169}; 169};
170 170
171#define LEO_SS1_MISC_ENABLE 0x00000001 171#define LEO_SS1_MISC_ENABLE 0x00000001
172#define LEO_SS1_MISC_STEREO 0x00000002 172#define LEO_SS1_MISC_STEREO 0x00000002
173struct leo_ld_ss1 { 173struct leo_ld_ss1 {
174 u8 xxx0[0xef4]; 174 u8 xxx0[0xef4];
175 volatile u32 ss1_misc; 175 u32 ss1_misc;
176}; 176};
177 177
178struct leo_ld_gbl { 178struct leo_ld_gbl {
@@ -193,9 +193,8 @@ struct leo_par {
193#define LEO_FLAG_BLANKED 0x00000001 193#define LEO_FLAG_BLANKED 0x00000001
194 194
195 unsigned long physbase; 195 unsigned long physbase;
196 unsigned long which_io;
196 unsigned long fbsize; 197 unsigned long fbsize;
197
198 struct sbus_dev *sdev;
199}; 198};
200 199
201static void leo_wait(struct leo_lx_krn __iomem *lx_krn) 200static void leo_wait(struct leo_lx_krn __iomem *lx_krn)
@@ -368,8 +367,7 @@ static int leo_mmap(struct fb_info *info, struct vm_area_struct *vma)
368 367
369 return sbusfb_mmap_helper(leo_mmap_map, 368 return sbusfb_mmap_helper(leo_mmap_map,
370 par->physbase, par->fbsize, 369 par->physbase, par->fbsize,
371 par->sdev->reg_addrs[0].which_io, 370 par->which_io, vma);
372 vma);
373} 371}
374 372
375static int leo_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) 373static int leo_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
@@ -385,11 +383,9 @@ static int leo_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
385 */ 383 */
386 384
387static void 385static void
388leo_init_fix(struct fb_info *info) 386leo_init_fix(struct fb_info *info, struct device_node *dp)
389{ 387{
390 struct leo_par *par = (struct leo_par *)info->par; 388 strlcpy(info->fix.id, dp->name, sizeof(info->fix.id));
391
392 strlcpy(info->fix.id, par->sdev->prom_name, sizeof(info->fix.id));
393 389
394 info->fix.type = FB_TYPE_PACKED_PIXELS; 390 info->fix.type = FB_TYPE_PACKED_PIXELS;
395 info->fix.visual = FB_VISUAL_TRUECOLOR; 391 info->fix.visual = FB_VISUAL_TRUECOLOR;
@@ -532,60 +528,74 @@ static void leo_fixup_var_rgb(struct fb_var_screeninfo *var)
532struct all_info { 528struct all_info {
533 struct fb_info info; 529 struct fb_info info;
534 struct leo_par par; 530 struct leo_par par;
535 struct list_head list;
536}; 531};
537static LIST_HEAD(leo_list);
538 532
539static void leo_init_one(struct sbus_dev *sdev) 533static void leo_unmap_regs(struct all_info *all)
540{ 534{
541 struct all_info *all; 535 if (all->par.lc_ss0_usr)
542 int linebytes; 536 of_iounmap(all->par.lc_ss0_usr, 0x1000);
537 if (all->par.ld_ss0)
538 of_iounmap(all->par.ld_ss0, 0x1000);
539 if (all->par.ld_ss1)
540 of_iounmap(all->par.ld_ss1, 0x1000);
541 if (all->par.lx_krn)
542 of_iounmap(all->par.lx_krn, 0x1000);
543 if (all->par.cursor)
544 of_iounmap(all->par.cursor, sizeof(struct leo_cursor));
545 if (all->info.screen_base)
546 of_iounmap(all->info.screen_base, 0x800000);
547}
543 548
544 all = kmalloc(sizeof(*all), GFP_KERNEL); 549static int __devinit leo_init_one(struct of_device *op)
545 if (!all) { 550{
546 printk(KERN_ERR "leo: Cannot allocate memory.\n"); 551 struct device_node *dp = op->node;
547 return; 552 struct all_info *all;
548 } 553 int linebytes, err;
549 memset(all, 0, sizeof(*all));
550 554
551 INIT_LIST_HEAD(&all->list); 555 all = kzalloc(sizeof(*all), GFP_KERNEL);
556 if (!all)
557 return -ENOMEM;
552 558
553 spin_lock_init(&all->par.lock); 559 spin_lock_init(&all->par.lock);
554 all->par.sdev = sdev;
555 560
556 all->par.physbase = sdev->reg_addrs[0].phys_addr; 561 all->par.physbase = op->resource[0].start;
562 all->par.which_io = op->resource[0].flags & IORESOURCE_BITS;
557 563
558 sbusfb_fill_var(&all->info.var, sdev->prom_node, 32); 564 sbusfb_fill_var(&all->info.var, dp->node, 32);
559 leo_fixup_var_rgb(&all->info.var); 565 leo_fixup_var_rgb(&all->info.var);
560 566
561 linebytes = prom_getintdefault(sdev->prom_node, "linebytes", 567 linebytes = of_getintprop_default(dp, "linebytes",
562 all->info.var.xres); 568 all->info.var.xres);
563 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); 569 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres);
564 570
565#ifdef CONFIG_SPARC32
566 all->info.screen_base = (char __iomem *)
567 prom_getintdefault(sdev->prom_node, "address", 0);
568#endif
569 if (!all->info.screen_base)
570 all->info.screen_base =
571 sbus_ioremap(&sdev->resource[0], LEO_OFF_SS0,
572 0x800000, "leo ram");
573
574 all->par.lc_ss0_usr = 571 all->par.lc_ss0_usr =
575 sbus_ioremap(&sdev->resource[0], LEO_OFF_LC_SS0_USR, 572 of_ioremap(&op->resource[0], LEO_OFF_LC_SS0_USR,
576 0x1000, "leolc ss0usr"); 573 0x1000, "leolc ss0usr");
577 all->par.ld_ss0 = 574 all->par.ld_ss0 =
578 sbus_ioremap(&sdev->resource[0], LEO_OFF_LD_SS0, 575 of_ioremap(&op->resource[0], LEO_OFF_LD_SS0,
579 0x1000, "leold ss0"); 576 0x1000, "leold ss0");
580 all->par.ld_ss1 = 577 all->par.ld_ss1 =
581 sbus_ioremap(&sdev->resource[0], LEO_OFF_LD_SS1, 578 of_ioremap(&op->resource[0], LEO_OFF_LD_SS1,
582 0x1000, "leold ss1"); 579 0x1000, "leold ss1");
583 all->par.lx_krn = 580 all->par.lx_krn =
584 sbus_ioremap(&sdev->resource[0], LEO_OFF_LX_KRN, 581 of_ioremap(&op->resource[0], LEO_OFF_LX_KRN,
585 0x1000, "leolx krn"); 582 0x1000, "leolx krn");
586 all->par.cursor = 583 all->par.cursor =
587 sbus_ioremap(&sdev->resource[0], LEO_OFF_LX_CURSOR, 584 of_ioremap(&op->resource[0], LEO_OFF_LX_CURSOR,
588 sizeof(struct leo_cursor), "leolx cursor"); 585 sizeof(struct leo_cursor), "leolx cursor");
586 all->info.screen_base =
587 of_ioremap(&op->resource[0], LEO_OFF_SS0,
588 0x800000, "leo ram");
589 if (!all->par.lc_ss0_usr ||
590 !all->par.ld_ss0 ||
591 !all->par.ld_ss1 ||
592 !all->par.lx_krn ||
593 !all->par.cursor ||
594 !all->info.screen_base) {
595 leo_unmap_regs(all);
596 kfree(all);
597 return -ENOMEM;
598 }
589 599
590 all->info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; 600 all->info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
591 all->info.fbops = &leo_ops; 601 all->info.fbops = &leo_ops;
@@ -597,69 +607,85 @@ static void leo_init_one(struct sbus_dev *sdev)
597 leo_blank(0, &all->info); 607 leo_blank(0, &all->info);
598 608
599 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) { 609 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) {
600 printk(KERN_ERR "leo: Could not allocate color map.\n"); 610 leo_unmap_regs(all);
601 kfree(all); 611 kfree(all);
602 return; 612 return -ENOMEM;;
603 } 613 }
604 614
605 leo_init_fix(&all->info); 615 leo_init_fix(&all->info, dp);
606 616
607 if (register_framebuffer(&all->info) < 0) { 617 err = register_framebuffer(&all->info);
608 printk(KERN_ERR "leo: Could not register framebuffer.\n"); 618 if (err < 0) {
609 fb_dealloc_cmap(&all->info.cmap); 619 fb_dealloc_cmap(&all->info.cmap);
620 leo_unmap_regs(all);
610 kfree(all); 621 kfree(all);
611 return; 622 return err;
612 } 623 }
613 624
614 list_add(&all->list, &leo_list); 625 dev_set_drvdata(&op->dev, all);
626
627 printk("%s: leo at %lx:%lx\n",
628 dp->full_name,
629 all->par.which_io, all->par.physbase);
615 630
616 printk("leo: %s at %lx:%lx\n", 631 return 0;
617 sdev->prom_name,
618 (long) sdev->reg_addrs[0].which_io,
619 (long) sdev->reg_addrs[0].phys_addr);
620} 632}
621 633
622int __init leo_init(void) 634static int __devinit leo_probe(struct of_device *dev, const struct of_device_id *match)
623{ 635{
624 struct sbus_bus *sbus; 636 struct of_device *op = to_of_device(&dev->dev);
625 struct sbus_dev *sdev;
626 637
627 if (fb_get_options("leofb", NULL)) 638 return leo_init_one(op);
628 return -ENODEV; 639}
629 640
630 for_all_sbusdev(sdev, sbus) { 641static int __devexit leo_remove(struct of_device *dev)
631 if (!strcmp(sdev->prom_name, "leo")) 642{
632 leo_init_one(sdev); 643 struct all_info *all = dev_get_drvdata(&dev->dev);
633 } 644
645 unregister_framebuffer(&all->info);
646 fb_dealloc_cmap(&all->info.cmap);
647
648 leo_unmap_regs(all);
649
650 kfree(all);
651
652 dev_set_drvdata(&dev->dev, NULL);
634 653
635 return 0; 654 return 0;
636} 655}
637 656
638void __exit leo_exit(void) 657static struct of_device_id leo_match[] = {
639{ 658 {
640 struct list_head *pos, *tmp; 659 .name = "leo",
660 },
661 {},
662};
663MODULE_DEVICE_TABLE(of, leo_match);
664
665static struct of_platform_driver leo_driver = {
666 .name = "leo",
667 .match_table = leo_match,
668 .probe = leo_probe,
669 .remove = __devexit_p(leo_remove),
670};
641 671
642 list_for_each_safe(pos, tmp, &leo_list) { 672static int __init leo_init(void)
643 struct all_info *all = list_entry(pos, typeof(*all), list); 673{
674 if (fb_get_options("leofb", NULL))
675 return -ENODEV;
644 676
645 unregister_framebuffer(&all->info); 677 return of_register_driver(&leo_driver, &of_bus_type);
646 fb_dealloc_cmap(&all->info.cmap);
647 kfree(all);
648 }
649} 678}
650 679
651int __init 680static void __exit leo_exit(void)
652leo_setup(char *arg)
653{ 681{
654 /* No cmdline options yet... */ 682 of_unregister_driver(&leo_driver);
655 return 0;
656} 683}
657 684
658module_init(leo_init); 685module_init(leo_init);
659#ifdef MODULE
660module_exit(leo_exit); 686module_exit(leo_exit);
661#endif
662 687
663MODULE_DESCRIPTION("framebuffer driver for LEO chipsets"); 688MODULE_DESCRIPTION("framebuffer driver for LEO chipsets");
664MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 689MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
690MODULE_VERSION("2.0");
665MODULE_LICENSE("GPL"); 691MODULE_LICENSE("GPL");
diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c
index 0d1957505359..56ac51d6a7f3 100644
--- a/drivers/video/p9100.c
+++ b/drivers/video/p9100.c
@@ -1,6 +1,6 @@
1/* p9100.c: P9100 frame buffer driver 1/* p9100.c: P9100 frame buffer driver
2 * 2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
4 * Copyright 1999 Derrick J Brashear (shadow@dementia.org) 4 * Copyright 1999 Derrick J Brashear (shadow@dementia.org)
5 * 5 *
6 * Driver layout based loosely on tgafb.c, see that file for credits. 6 * Driver layout based loosely on tgafb.c, see that file for credits.
@@ -17,8 +17,8 @@
17#include <linux/mm.h> 17#include <linux/mm.h>
18 18
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/sbus.h> 20#include <asm/prom.h>
21#include <asm/oplib.h> 21#include <asm/of_device.h>
22#include <asm/fbio.h> 22#include <asm/fbio.h>
23 23
24#include "sbuslib.h" 24#include "sbuslib.h"
@@ -72,60 +72,60 @@ static struct fb_ops p9100_ops = {
72 72
73struct p9100_regs { 73struct p9100_regs {
74 /* Registers for the system control */ 74 /* Registers for the system control */
75 volatile u32 sys_base; 75 u32 sys_base;
76 volatile u32 sys_config; 76 u32 sys_config;
77 volatile u32 sys_intr; 77 u32 sys_intr;
78 volatile u32 sys_int_ena; 78 u32 sys_int_ena;
79 volatile u32 sys_alt_rd; 79 u32 sys_alt_rd;
80 volatile u32 sys_alt_wr; 80 u32 sys_alt_wr;
81 volatile u32 sys_xxx[58]; 81 u32 sys_xxx[58];
82 82
83 /* Registers for the video control */ 83 /* Registers for the video control */
84 volatile u32 vid_base; 84 u32 vid_base;
85 volatile u32 vid_hcnt; 85 u32 vid_hcnt;
86 volatile u32 vid_htotal; 86 u32 vid_htotal;
87 volatile u32 vid_hsync_rise; 87 u32 vid_hsync_rise;
88 volatile u32 vid_hblank_rise; 88 u32 vid_hblank_rise;
89 volatile u32 vid_hblank_fall; 89 u32 vid_hblank_fall;
90 volatile u32 vid_hcnt_preload; 90 u32 vid_hcnt_preload;
91 volatile u32 vid_vcnt; 91 u32 vid_vcnt;
92 volatile u32 vid_vlen; 92 u32 vid_vlen;
93 volatile u32 vid_vsync_rise; 93 u32 vid_vsync_rise;
94 volatile u32 vid_vblank_rise; 94 u32 vid_vblank_rise;
95 volatile u32 vid_vblank_fall; 95 u32 vid_vblank_fall;
96 volatile u32 vid_vcnt_preload; 96 u32 vid_vcnt_preload;
97 volatile u32 vid_screenpaint_addr; 97 u32 vid_screenpaint_addr;
98 volatile u32 vid_screenpaint_timectl1; 98 u32 vid_screenpaint_timectl1;
99 volatile u32 vid_screenpaint_qsfcnt; 99 u32 vid_screenpaint_qsfcnt;
100 volatile u32 vid_screenpaint_timectl2; 100 u32 vid_screenpaint_timectl2;
101 volatile u32 vid_xxx[15]; 101 u32 vid_xxx[15];
102 102
103 /* Registers for the video control */ 103 /* Registers for the video control */
104 volatile u32 vram_base; 104 u32 vram_base;
105 volatile u32 vram_memcfg; 105 u32 vram_memcfg;
106 volatile u32 vram_refresh_pd; 106 u32 vram_refresh_pd;
107 volatile u32 vram_refresh_cnt; 107 u32 vram_refresh_cnt;
108 volatile u32 vram_raslo_max; 108 u32 vram_raslo_max;
109 volatile u32 vram_raslo_cur; 109 u32 vram_raslo_cur;
110 volatile u32 pwrup_cfg; 110 u32 pwrup_cfg;
111 volatile u32 vram_xxx[25]; 111 u32 vram_xxx[25];
112 112
113 /* Registers for IBM RGB528 Palette */ 113 /* Registers for IBM RGB528 Palette */
114 volatile u32 ramdac_cmap_wridx; 114 u32 ramdac_cmap_wridx;
115 volatile u32 ramdac_palette_data; 115 u32 ramdac_palette_data;
116 volatile u32 ramdac_pixel_mask; 116 u32 ramdac_pixel_mask;
117 volatile u32 ramdac_palette_rdaddr; 117 u32 ramdac_palette_rdaddr;
118 volatile u32 ramdac_idx_lo; 118 u32 ramdac_idx_lo;
119 volatile u32 ramdac_idx_hi; 119 u32 ramdac_idx_hi;
120 volatile u32 ramdac_idx_data; 120 u32 ramdac_idx_data;
121 volatile u32 ramdac_idx_ctl; 121 u32 ramdac_idx_ctl;
122 volatile u32 ramdac_xxx[1784]; 122 u32 ramdac_xxx[1784];
123}; 123};
124 124
125struct p9100_cmd_parameng { 125struct p9100_cmd_parameng {
126 volatile u32 parameng_status; 126 u32 parameng_status;
127 volatile u32 parameng_bltcmd; 127 u32 parameng_bltcmd;
128 volatile u32 parameng_quadcmd; 128 u32 parameng_quadcmd;
129}; 129};
130 130
131struct p9100_par { 131struct p9100_par {
@@ -136,9 +136,8 @@ struct p9100_par {
136#define P9100_FLAG_BLANKED 0x00000001 136#define P9100_FLAG_BLANKED 0x00000001
137 137
138 unsigned long physbase; 138 unsigned long physbase;
139 unsigned long which_io;
139 unsigned long fbsize; 140 unsigned long fbsize;
140
141 struct sbus_dev *sdev;
142}; 141};
143 142
144/** 143/**
@@ -227,8 +226,7 @@ static int p9100_mmap(struct fb_info *info, struct vm_area_struct *vma)
227 226
228 return sbusfb_mmap_helper(p9100_mmap_map, 227 return sbusfb_mmap_helper(p9100_mmap_map,
229 par->physbase, par->fbsize, 228 par->physbase, par->fbsize,
230 par->sdev->reg_addrs[0].which_io, 229 par->which_io, vma);
231 vma);
232} 230}
233 231
234static int p9100_ioctl(struct fb_info *info, unsigned int cmd, 232static int p9100_ioctl(struct fb_info *info, unsigned int cmd,
@@ -245,12 +243,9 @@ static int p9100_ioctl(struct fb_info *info, unsigned int cmd,
245 * Initialisation 243 * Initialisation
246 */ 244 */
247 245
248static void 246static void p9100_init_fix(struct fb_info *info, int linebytes, struct device_node *dp)
249p9100_init_fix(struct fb_info *info, int linebytes)
250{ 247{
251 struct p9100_par *par = (struct p9100_par *)info->par; 248 strlcpy(info->fix.id, dp->name, sizeof(info->fix.id));
252
253 strlcpy(info->fix.id, par->sdev->prom_name, sizeof(info->fix.id));
254 249
255 info->fix.type = FB_TYPE_PACKED_PIXELS; 250 info->fix.type = FB_TYPE_PACKED_PIXELS;
256 info->fix.visual = FB_VISUAL_PSEUDOCOLOR; 251 info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
@@ -263,121 +258,137 @@ p9100_init_fix(struct fb_info *info, int linebytes)
263struct all_info { 258struct all_info {
264 struct fb_info info; 259 struct fb_info info;
265 struct p9100_par par; 260 struct p9100_par par;
266 struct list_head list;
267}; 261};
268static LIST_HEAD(p9100_list);
269 262
270static void p9100_init_one(struct sbus_dev *sdev) 263static int __devinit p9100_init_one(struct of_device *op)
271{ 264{
265 struct device_node *dp = op->node;
272 struct all_info *all; 266 struct all_info *all;
273 int linebytes; 267 int linebytes, err;
274
275 all = kmalloc(sizeof(*all), GFP_KERNEL);
276 if (!all) {
277 printk(KERN_ERR "p9100: Cannot allocate memory.\n");
278 return;
279 }
280 memset(all, 0, sizeof(*all));
281 268
282 INIT_LIST_HEAD(&all->list); 269 all = kzalloc(sizeof(*all), GFP_KERNEL);
270 if (!all)
271 return -ENOMEM;
283 272
284 spin_lock_init(&all->par.lock); 273 spin_lock_init(&all->par.lock);
285 all->par.sdev = sdev;
286 274
287 /* This is the framebuffer and the only resource apps can mmap. */ 275 /* This is the framebuffer and the only resource apps can mmap. */
288 all->par.physbase = sdev->reg_addrs[2].phys_addr; 276 all->par.physbase = op->resource[2].start;
277 all->par.which_io = op->resource[2].flags & IORESOURCE_BITS;
289 278
290 sbusfb_fill_var(&all->info.var, sdev->prom_node, 8); 279 sbusfb_fill_var(&all->info.var, dp->node, 8);
291 all->info.var.red.length = 8; 280 all->info.var.red.length = 8;
292 all->info.var.green.length = 8; 281 all->info.var.green.length = 8;
293 all->info.var.blue.length = 8; 282 all->info.var.blue.length = 8;
294 283
295 linebytes = prom_getintdefault(sdev->prom_node, "linebytes", 284 linebytes = of_getintprop_default(dp, "linebytes",
296 all->info.var.xres); 285 all->info.var.xres);
297 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); 286 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres);
298 287
299 all->par.regs = sbus_ioremap(&sdev->resource[0], 0, 288 all->par.regs = of_ioremap(&op->resource[0], 0,
300 sizeof(struct p9100_regs), "p9100 regs"); 289 sizeof(struct p9100_regs), "p9100 regs");
290 if (!all->par.regs) {
291 kfree(all);
292 return -ENOMEM;
293 }
301 294
302 all->info.flags = FBINFO_DEFAULT; 295 all->info.flags = FBINFO_DEFAULT;
303 all->info.fbops = &p9100_ops; 296 all->info.fbops = &p9100_ops;
304#ifdef CONFIG_SPARC32 297 all->info.screen_base = of_ioremap(&op->resource[2], 0,
305 all->info.screen_base = (char __iomem *) 298 all->par.fbsize, "p9100 ram");
306 prom_getintdefault(sdev->prom_node, "address", 0); 299 if (!all->info.screen_base) {
307#endif 300 of_iounmap(all->par.regs, sizeof(struct p9100_regs));
308 if (!all->info.screen_base) 301 kfree(all);
309 all->info.screen_base = sbus_ioremap(&sdev->resource[2], 0, 302 return -ENOMEM;
310 all->par.fbsize, "p9100 ram"); 303 }
311 all->info.par = &all->par; 304 all->info.par = &all->par;
312 305
313 p9100_blank(0, &all->info); 306 p9100_blank(0, &all->info);
314 307
315 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) { 308 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) {
316 printk(KERN_ERR "p9100: Could not allocate color map.\n"); 309 of_iounmap(all->par.regs, sizeof(struct p9100_regs));
310 of_iounmap(all->info.screen_base, all->par.fbsize);
317 kfree(all); 311 kfree(all);
318 return; 312 return -ENOMEM;
319 } 313 }
320 314
321 p9100_init_fix(&all->info, linebytes); 315 p9100_init_fix(&all->info, linebytes, dp);
322 316
323 if (register_framebuffer(&all->info) < 0) { 317 err = register_framebuffer(&all->info);
324 printk(KERN_ERR "p9100: Could not register framebuffer.\n"); 318 if (err < 0) {
325 fb_dealloc_cmap(&all->info.cmap); 319 fb_dealloc_cmap(&all->info.cmap);
320 of_iounmap(all->par.regs, sizeof(struct p9100_regs));
321 of_iounmap(all->info.screen_base, all->par.fbsize);
326 kfree(all); 322 kfree(all);
327 return; 323 return err;
328 } 324 }
329 fb_set_cmap(&all->info.cmap, &all->info); 325 fb_set_cmap(&all->info.cmap, &all->info);
330 326
331 list_add(&all->list, &p9100_list); 327 dev_set_drvdata(&op->dev, all);
328
329 printk("%s: p9100 at %lx:%lx\n",
330 dp->full_name,
331 all->par.which_io, all->par.physbase);
332 332
333 printk("p9100: %s at %lx:%lx\n", 333 return 0;
334 sdev->prom_name,
335 (long) sdev->reg_addrs[0].which_io,
336 (long) sdev->reg_addrs[0].phys_addr);
337} 334}
338 335
339int __init p9100_init(void) 336static int __devinit p9100_probe(struct of_device *dev, const struct of_device_id *match)
340{ 337{
341 struct sbus_bus *sbus; 338 struct of_device *op = to_of_device(&dev->dev);
342 struct sbus_dev *sdev;
343 339
344 if (fb_get_options("p9100fb", NULL)) 340 return p9100_init_one(op);
345 return -ENODEV; 341}
346 342
347 for_all_sbusdev(sdev, sbus) { 343static int __devexit p9100_remove(struct of_device *dev)
348 if (!strcmp(sdev->prom_name, "p9100")) 344{
349 p9100_init_one(sdev); 345 struct all_info *all = dev_get_drvdata(&dev->dev);
350 } 346
347 unregister_framebuffer(&all->info);
348 fb_dealloc_cmap(&all->info.cmap);
349
350 of_iounmap(all->par.regs, sizeof(struct p9100_regs));
351 of_iounmap(all->info.screen_base, all->par.fbsize);
352
353 kfree(all);
354
355 dev_set_drvdata(&dev->dev, NULL);
351 356
352 return 0; 357 return 0;
353} 358}
354 359
355void __exit p9100_exit(void) 360static struct of_device_id p9100_match[] = {
356{ 361 {
357 struct list_head *pos, *tmp; 362 .name = "p9100",
363 },
364 {},
365};
366MODULE_DEVICE_TABLE(of, p9100_match);
358 367
359 list_for_each_safe(pos, tmp, &p9100_list) { 368static struct of_platform_driver p9100_driver = {
360 struct all_info *all = list_entry(pos, typeof(*all), list); 369 .name = "p9100",
370 .match_table = p9100_match,
371 .probe = p9100_probe,
372 .remove = __devexit_p(p9100_remove),
373};
361 374
362 unregister_framebuffer(&all->info); 375static int __init p9100_init(void)
363 fb_dealloc_cmap(&all->info.cmap); 376{
364 kfree(all); 377 if (fb_get_options("p9100fb", NULL))
365 } 378 return -ENODEV;
379
380 return of_register_driver(&p9100_driver, &of_bus_type);
366} 381}
367 382
368int __init 383static void __exit p9100_exit(void)
369p9100_setup(char *arg)
370{ 384{
371 /* No cmdline options yet... */ 385 of_unregister_driver(&p9100_driver);
372 return 0;
373} 386}
374 387
375module_init(p9100_init); 388module_init(p9100_init);
376
377#ifdef MODULE
378module_exit(p9100_exit); 389module_exit(p9100_exit);
379#endif
380 390
381MODULE_DESCRIPTION("framebuffer driver for P9100 chipsets"); 391MODULE_DESCRIPTION("framebuffer driver for P9100 chipsets");
382MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 392MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
393MODULE_VERSION("2.0");
383MODULE_LICENSE("GPL"); 394MODULE_LICENSE("GPL");
diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c
index 95b918229d9b..6990ab11cd06 100644
--- a/drivers/video/tcx.c
+++ b/drivers/video/tcx.c
@@ -1,6 +1,6 @@
1/* tcx.c: TCX frame buffer driver 1/* tcx.c: TCX frame buffer driver
2 * 2 *
3 * Copyright (C) 2003 David S. Miller (davem@redhat.com) 3 * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) 4 * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) 5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
@@ -19,8 +19,8 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20 20
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/sbus.h> 22#include <asm/prom.h>
23#include <asm/oplib.h> 23#include <asm/of_device.h>
24#include <asm/fbio.h> 24#include <asm/fbio.h>
25 25
26#include "sbuslib.h" 26#include "sbuslib.h"
@@ -77,32 +77,32 @@ static struct fb_ops tcx_ops = {
77 77
78/* The contents are unknown */ 78/* The contents are unknown */
79struct tcx_tec { 79struct tcx_tec {
80 volatile u32 tec_matrix; 80 u32 tec_matrix;
81 volatile u32 tec_clip; 81 u32 tec_clip;
82 volatile u32 tec_vdc; 82 u32 tec_vdc;
83}; 83};
84 84
85struct tcx_thc { 85struct tcx_thc {
86 volatile u32 thc_rev; 86 u32 thc_rev;
87 u32 thc_pad0[511]; 87 u32 thc_pad0[511];
88 volatile u32 thc_hs; /* hsync timing */ 88 u32 thc_hs; /* hsync timing */
89 volatile u32 thc_hsdvs; 89 u32 thc_hsdvs;
90 volatile u32 thc_hd; 90 u32 thc_hd;
91 volatile u32 thc_vs; /* vsync timing */ 91 u32 thc_vs; /* vsync timing */
92 volatile u32 thc_vd; 92 u32 thc_vd;
93 volatile u32 thc_refresh; 93 u32 thc_refresh;
94 volatile u32 thc_misc; 94 u32 thc_misc;
95 u32 thc_pad1[56]; 95 u32 thc_pad1[56];
96 volatile u32 thc_cursxy; /* cursor x,y position (16 bits each) */ 96 u32 thc_cursxy; /* cursor x,y position (16 bits each) */
97 volatile u32 thc_cursmask[32]; /* cursor mask bits */ 97 u32 thc_cursmask[32]; /* cursor mask bits */
98 volatile u32 thc_cursbits[32]; /* what to show where mask enabled */ 98 u32 thc_cursbits[32]; /* what to show where mask enabled */
99}; 99};
100 100
101struct bt_regs { 101struct bt_regs {
102 volatile u32 addr; 102 u32 addr;
103 volatile u32 color_map; 103 u32 color_map;
104 volatile u32 control; 104 u32 control;
105 volatile u32 cursor; 105 u32 cursor;
106}; 106};
107 107
108#define TCX_MMAP_ENTRIES 14 108#define TCX_MMAP_ENTRIES 14
@@ -112,24 +112,23 @@ struct tcx_par {
112 struct bt_regs __iomem *bt; 112 struct bt_regs __iomem *bt;
113 struct tcx_thc __iomem *thc; 113 struct tcx_thc __iomem *thc;
114 struct tcx_tec __iomem *tec; 114 struct tcx_tec __iomem *tec;
115 volatile u32 __iomem *cplane; 115 u32 __iomem *cplane;
116 116
117 u32 flags; 117 u32 flags;
118#define TCX_FLAG_BLANKED 0x00000001 118#define TCX_FLAG_BLANKED 0x00000001
119 119
120 unsigned long physbase; 120 unsigned long physbase;
121 unsigned long which_io;
121 unsigned long fbsize; 122 unsigned long fbsize;
122 123
123 struct sbus_mmap_map mmap_map[TCX_MMAP_ENTRIES]; 124 struct sbus_mmap_map mmap_map[TCX_MMAP_ENTRIES];
124 int lowdepth; 125 int lowdepth;
125
126 struct sbus_dev *sdev;
127}; 126};
128 127
129/* Reset control plane so that WID is 8-bit plane. */ 128/* Reset control plane so that WID is 8-bit plane. */
130static void __tcx_set_control_plane (struct tcx_par *par) 129static void __tcx_set_control_plane (struct tcx_par *par)
131{ 130{
132 volatile u32 __iomem *p, *pend; 131 u32 __iomem *p, *pend;
133 132
134 if (par->lowdepth) 133 if (par->lowdepth)
135 return; 134 return;
@@ -307,8 +306,7 @@ static int tcx_mmap(struct fb_info *info, struct vm_area_struct *vma)
307 306
308 return sbusfb_mmap_helper(par->mmap_map, 307 return sbusfb_mmap_helper(par->mmap_map,
309 par->physbase, par->fbsize, 308 par->physbase, par->fbsize,
310 par->sdev->reg_addrs[0].which_io, 309 par->which_io, vma);
311 vma);
312} 310}
313 311
314static int tcx_ioctl(struct fb_info *info, unsigned int cmd, 312static int tcx_ioctl(struct fb_info *info, unsigned int cmd,
@@ -350,48 +348,71 @@ tcx_init_fix(struct fb_info *info, int linebytes)
350struct all_info { 348struct all_info {
351 struct fb_info info; 349 struct fb_info info;
352 struct tcx_par par; 350 struct tcx_par par;
353 struct list_head list;
354}; 351};
355static LIST_HEAD(tcx_list);
356 352
357static void tcx_init_one(struct sbus_dev *sdev) 353static void tcx_unmap_regs(struct all_info *all)
358{ 354{
359 struct all_info *all; 355 if (all->par.tec)
360 int linebytes, i; 356 of_iounmap(all->par.tec, sizeof(struct tcx_tec));
357 if (all->par.thc)
358 of_iounmap(all->par.thc, sizeof(struct tcx_thc));
359 if (all->par.bt)
360 of_iounmap(all->par.bt, sizeof(struct bt_regs));
361 if (all->par.cplane)
362 of_iounmap(all->par.cplane, all->par.fbsize * sizeof(u32));
363 if (all->info.screen_base)
364 of_iounmap(all->info.screen_base, all->par.fbsize);
365}
361 366
362 all = kmalloc(sizeof(*all), GFP_KERNEL); 367static int __devinit tcx_init_one(struct of_device *op)
363 if (!all) { 368{
364 printk(KERN_ERR "tcx: Cannot allocate memory.\n"); 369 struct device_node *dp = op->node;
365 return; 370 struct all_info *all;
366 } 371 int linebytes, i, err;
367 memset(all, 0, sizeof(*all));
368 372
369 INIT_LIST_HEAD(&all->list); 373 all = kzalloc(sizeof(*all), GFP_KERNEL);
374 if (!all)
375 return -ENOMEM;
370 376
371 spin_lock_init(&all->par.lock); 377 spin_lock_init(&all->par.lock);
372 all->par.sdev = sdev;
373 378
374 all->par.lowdepth = prom_getbool(sdev->prom_node, "tcx-8-bit"); 379 all->par.lowdepth =
380 (of_find_property(dp, "tcx-8-bit", NULL) != NULL);
375 381
376 sbusfb_fill_var(&all->info.var, sdev->prom_node, 8); 382 sbusfb_fill_var(&all->info.var, dp->node, 8);
377 all->info.var.red.length = 8; 383 all->info.var.red.length = 8;
378 all->info.var.green.length = 8; 384 all->info.var.green.length = 8;
379 all->info.var.blue.length = 8; 385 all->info.var.blue.length = 8;
380 386
381 linebytes = prom_getintdefault(sdev->prom_node, "linebytes", 387 linebytes = of_getintprop_default(dp, "linebytes",
382 all->info.var.xres); 388 all->info.var.xres);
383 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres); 389 all->par.fbsize = PAGE_ALIGN(linebytes * all->info.var.yres);
384 390
385 all->par.tec = sbus_ioremap(&sdev->resource[7], 0, 391 all->par.tec = of_ioremap(&op->resource[7], 0,
386 sizeof(struct tcx_tec), "tcx tec"); 392 sizeof(struct tcx_tec), "tcx tec");
387 all->par.thc = sbus_ioremap(&sdev->resource[9], 0, 393 all->par.thc = of_ioremap(&op->resource[9], 0,
388 sizeof(struct tcx_thc), "tcx thc"); 394 sizeof(struct tcx_thc), "tcx thc");
389 all->par.bt = sbus_ioremap(&sdev->resource[8], 0, 395 all->par.bt = of_ioremap(&op->resource[8], 0,
390 sizeof(struct bt_regs), "tcx dac"); 396 sizeof(struct bt_regs), "tcx dac");
397 all->info.screen_base = of_ioremap(&op->resource[0], 0,
398 all->par.fbsize, "tcx ram");
399 if (!all->par.tec || !all->par.thc ||
400 !all->par.bt || !all->info.screen_base) {
401 tcx_unmap_regs(all);
402 kfree(all);
403 return -ENOMEM;
404 }
405
391 memcpy(&all->par.mmap_map, &__tcx_mmap_map, sizeof(all->par.mmap_map)); 406 memcpy(&all->par.mmap_map, &__tcx_mmap_map, sizeof(all->par.mmap_map));
392 if (!all->par.lowdepth) { 407 if (!all->par.lowdepth) {
393 all->par.cplane = sbus_ioremap(&sdev->resource[4], 0, 408 all->par.cplane = of_ioremap(&op->resource[4], 0,
394 all->par.fbsize * sizeof(u32), "tcx cplane"); 409 all->par.fbsize * sizeof(u32),
410 "tcx cplane");
411 if (!all->par.cplane) {
412 tcx_unmap_regs(all);
413 kfree(all);
414 return -ENOMEM;
415 }
395 } else { 416 } else {
396 all->par.mmap_map[1].size = SBUS_MMAP_EMPTY; 417 all->par.mmap_map[1].size = SBUS_MMAP_EMPTY;
397 all->par.mmap_map[4].size = SBUS_MMAP_EMPTY; 418 all->par.mmap_map[4].size = SBUS_MMAP_EMPTY;
@@ -400,6 +421,8 @@ static void tcx_init_one(struct sbus_dev *sdev)
400 } 421 }
401 422
402 all->par.physbase = 0; 423 all->par.physbase = 0;
424 all->par.which_io = op->resource[0].flags & IORESOURCE_BITS;
425
403 for (i = 0; i < TCX_MMAP_ENTRIES; i++) { 426 for (i = 0; i < TCX_MMAP_ENTRIES; i++) {
404 int j; 427 int j;
405 428
@@ -416,18 +439,11 @@ static void tcx_init_one(struct sbus_dev *sdev)
416 j = i; 439 j = i;
417 break; 440 break;
418 }; 441 };
419 all->par.mmap_map[i].poff = sdev->reg_addrs[j].phys_addr; 442 all->par.mmap_map[i].poff = op->resource[j].start;
420 } 443 }
421 444
422 all->info.flags = FBINFO_DEFAULT; 445 all->info.flags = FBINFO_DEFAULT;
423 all->info.fbops = &tcx_ops; 446 all->info.fbops = &tcx_ops;
424#ifdef CONFIG_SPARC32
425 all->info.screen_base = (char __iomem *)
426 prom_getintdefault(sdev->prom_node, "address", 0);
427#endif
428 if (!all->info.screen_base)
429 all->info.screen_base = sbus_ioremap(&sdev->resource[0], 0,
430 all->par.fbsize, "tcx ram");
431 all->info.par = &all->par; 447 all->info.par = &all->par;
432 448
433 /* Initialize brooktree DAC. */ 449 /* Initialize brooktree DAC. */
@@ -445,72 +461,88 @@ static void tcx_init_one(struct sbus_dev *sdev)
445 tcx_blank(FB_BLANK_UNBLANK, &all->info); 461 tcx_blank(FB_BLANK_UNBLANK, &all->info);
446 462
447 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) { 463 if (fb_alloc_cmap(&all->info.cmap, 256, 0)) {
448 printk(KERN_ERR "tcx: Could not allocate color map.\n"); 464 tcx_unmap_regs(all);
449 kfree(all); 465 kfree(all);
450 return; 466 return -ENOMEM;
451 } 467 }
452 468
453 fb_set_cmap(&all->info.cmap, &all->info); 469 fb_set_cmap(&all->info.cmap, &all->info);
454 tcx_init_fix(&all->info, linebytes); 470 tcx_init_fix(&all->info, linebytes);
455 471
456 if (register_framebuffer(&all->info) < 0) { 472 err = register_framebuffer(&all->info);
457 printk(KERN_ERR "tcx: Could not register framebuffer.\n"); 473 if (err < 0) {
458 fb_dealloc_cmap(&all->info.cmap); 474 fb_dealloc_cmap(&all->info.cmap);
475 tcx_unmap_regs(all);
459 kfree(all); 476 kfree(all);
460 return; 477 return err;
461 } 478 }
462 479
463 list_add(&all->list, &tcx_list); 480 dev_set_drvdata(&op->dev, all);
464 481
465 printk("tcx: %s at %lx:%lx, %s\n", 482 printk("%s: TCX at %lx:%lx, %s\n",
466 sdev->prom_name, 483 dp->full_name,
467 (long) sdev->reg_addrs[0].which_io, 484 all->par.which_io,
468 (long) sdev->reg_addrs[0].phys_addr, 485 op->resource[0].start,
469 all->par.lowdepth ? "8-bit only" : "24-bit depth"); 486 all->par.lowdepth ? "8-bit only" : "24-bit depth");
487
488 return 0;
470} 489}
471 490
472int __init tcx_init(void) 491static int __devinit tcx_probe(struct of_device *dev, const struct of_device_id *match)
473{ 492{
474 struct sbus_bus *sbus; 493 struct of_device *op = to_of_device(&dev->dev);
475 struct sbus_dev *sdev;
476 494
477 if (fb_get_options("tcxfb", NULL)) 495 return tcx_init_one(op);
478 return -ENODEV; 496}
479 497
480 for_all_sbusdev(sdev, sbus) { 498static int __devexit tcx_remove(struct of_device *dev)
481 if (!strcmp(sdev->prom_name, "SUNW,tcx")) 499{
482 tcx_init_one(sdev); 500 struct all_info *all = dev_get_drvdata(&dev->dev);
483 } 501
502 unregister_framebuffer(&all->info);
503 fb_dealloc_cmap(&all->info.cmap);
504
505 tcx_unmap_regs(all);
506
507 kfree(all);
508
509 dev_set_drvdata(&dev->dev, NULL);
484 510
485 return 0; 511 return 0;
486} 512}
487 513
488void __exit tcx_exit(void) 514static struct of_device_id tcx_match[] = {
489{ 515 {
490 struct list_head *pos, *tmp; 516 .name = "SUNW,tcx",
517 },
518 {},
519};
520MODULE_DEVICE_TABLE(of, tcx_match);
491 521
492 list_for_each_safe(pos, tmp, &tcx_list) { 522static struct of_platform_driver tcx_driver = {
493 struct all_info *all = list_entry(pos, typeof(*all), list); 523 .name = "tcx",
524 .match_table = tcx_match,
525 .probe = tcx_probe,
526 .remove = __devexit_p(tcx_remove),
527};
494 528
495 unregister_framebuffer(&all->info); 529int __init tcx_init(void)
496 fb_dealloc_cmap(&all->info.cmap); 530{
497 kfree(all); 531 if (fb_get_options("tcxfb", NULL))
498 } 532 return -ENODEV;
533
534 return of_register_driver(&tcx_driver, &of_bus_type);
499} 535}
500 536
501int __init 537void __exit tcx_exit(void)
502tcx_setup(char *arg)
503{ 538{
504 /* No cmdline options yet... */ 539 of_unregister_driver(&tcx_driver);
505 return 0;
506} 540}
507 541
508module_init(tcx_init); 542module_init(tcx_init);
509
510#ifdef MODULE
511module_exit(tcx_exit); 543module_exit(tcx_exit);
512#endif
513 544
514MODULE_DESCRIPTION("framebuffer driver for TCX chipsets"); 545MODULE_DESCRIPTION("framebuffer driver for TCX chipsets");
515MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 546MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
547MODULE_VERSION("2.0");
516MODULE_LICENSE("GPL"); 548MODULE_LICENSE("GPL");
diff --git a/fs/Kconfig b/fs/Kconfig
index 6dc8cfd6d80c..53f5c6d61121 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -326,7 +326,7 @@ source "fs/xfs/Kconfig"
326 326
327config OCFS2_FS 327config OCFS2_FS
328 tristate "OCFS2 file system support (EXPERIMENTAL)" 328 tristate "OCFS2 file system support (EXPERIMENTAL)"
329 depends on NET && EXPERIMENTAL 329 depends on NET && SYSFS && EXPERIMENTAL
330 select CONFIGFS_FS 330 select CONFIGFS_FS
331 select JBD 331 select JBD
332 select CRC32 332 select CRC32
@@ -356,6 +356,16 @@ config OCFS2_FS
356 - POSIX ACLs 356 - POSIX ACLs
357 - readpages / writepages (not user visible) 357 - readpages / writepages (not user visible)
358 358
359config OCFS2_DEBUG_MASKLOG
360 bool "OCFS2 logging support"
361 depends on OCFS2_FS
362 default y
363 help
364 The ocfs2 filesystem has an extensive logging system. The system
365 allows selection of events to log via files in /sys/o2cb/logmask/.
366 This option will enlarge your kernel, but it allows debugging of
367 ocfs2 filesystem issues.
368
359config MINIX_FS 369config MINIX_FS
360 tristate "Minix fs support" 370 tristate "Minix fs support"
361 help 371 help
diff --git a/fs/buffer.c b/fs/buffer.c
index e9994722f4a3..90e52e677209 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -852,7 +852,7 @@ int __set_page_dirty_buffers(struct page *page)
852 write_lock_irq(&mapping->tree_lock); 852 write_lock_irq(&mapping->tree_lock);
853 if (page->mapping) { /* Race with truncate? */ 853 if (page->mapping) { /* Race with truncate? */
854 if (mapping_cap_account_dirty(mapping)) 854 if (mapping_cap_account_dirty(mapping))
855 inc_page_state(nr_dirty); 855 __inc_zone_page_state(page, NR_FILE_DIRTY);
856 radix_tree_tag_set(&mapping->page_tree, 856 radix_tree_tag_set(&mapping->page_tree,
857 page_index(page), 857 page_index(page),
858 PAGECACHE_TAG_DIRTY); 858 PAGECACHE_TAG_DIRTY);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 207f8006fd6c..df025453dd97 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -211,7 +211,7 @@ static void remove_dir(struct dentry * d)
211 struct configfs_dirent * sd; 211 struct configfs_dirent * sd;
212 212
213 sd = d->d_fsdata; 213 sd = d->d_fsdata;
214 list_del_init(&sd->s_sibling); 214 list_del_init(&sd->s_sibling);
215 configfs_put(sd); 215 configfs_put(sd);
216 if (d->d_inode) 216 if (d->d_inode)
217 simple_rmdir(parent->d_inode,d); 217 simple_rmdir(parent->d_inode,d);
@@ -330,7 +330,7 @@ static int configfs_detach_prep(struct dentry *dentry)
330 330
331 ret = configfs_detach_prep(sd->s_dentry); 331 ret = configfs_detach_prep(sd->s_dentry);
332 if (!ret) 332 if (!ret)
333 continue; 333 continue;
334 } else 334 } else
335 ret = -ENOTEMPTY; 335 ret = -ENOTEMPTY;
336 336
@@ -931,7 +931,7 @@ int configfs_rename_dir(struct config_item * item, const char *new_name)
931 931
932 new_dentry = lookup_one_len(new_name, parent, strlen(new_name)); 932 new_dentry = lookup_one_len(new_name, parent, strlen(new_name));
933 if (!IS_ERR(new_dentry)) { 933 if (!IS_ERR(new_dentry)) {
934 if (!new_dentry->d_inode) { 934 if (!new_dentry->d_inode) {
935 error = config_item_set_name(item, "%s", new_name); 935 error = config_item_set_name(item, "%s", new_name);
936 if (!error) { 936 if (!error) {
937 d_add(new_dentry, NULL); 937 d_add(new_dentry, NULL);
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index e5512e295cf2..fb65e0800a86 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -66,7 +66,7 @@ static void fill_item_path(struct config_item * item, char * buffer, int length)
66} 66}
67 67
68static int create_link(struct config_item *parent_item, 68static int create_link(struct config_item *parent_item,
69 struct config_item *item, 69 struct config_item *item,
70 struct dentry *dentry) 70 struct dentry *dentry)
71{ 71{
72 struct configfs_dirent *target_sd = item->ci_dentry->d_fsdata; 72 struct configfs_dirent *target_sd = item->ci_dentry->d_fsdata;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 031b27a4bc9a..892643dc9af1 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -464,8 +464,8 @@ void sync_inodes_sb(struct super_block *sb, int wait)
464 .range_start = 0, 464 .range_start = 0,
465 .range_end = LLONG_MAX, 465 .range_end = LLONG_MAX,
466 }; 466 };
467 unsigned long nr_dirty = read_page_state(nr_dirty); 467 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
468 unsigned long nr_unstable = read_page_state(nr_unstable); 468 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
469 469
470 wbc.nr_to_write = nr_dirty + nr_unstable + 470 wbc.nr_to_write = nr_dirty + nr_unstable +
471 (inodes_stat.nr_inodes - inodes_stat.nr_unused) + 471 (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
diff --git a/fs/inode.c b/fs/inode.c
index f42961eb983b..14a6c4147e4e 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -452,15 +452,14 @@ static void prune_icache(int nr_to_scan)
452 nr_pruned++; 452 nr_pruned++;
453 } 453 }
454 inodes_stat.nr_unused -= nr_pruned; 454 inodes_stat.nr_unused -= nr_pruned;
455 if (current_is_kswapd())
456 __count_vm_events(KSWAPD_INODESTEAL, reap);
457 else
458 __count_vm_events(PGINODESTEAL, reap);
455 spin_unlock(&inode_lock); 459 spin_unlock(&inode_lock);
456 460
457 dispose_list(&freeable); 461 dispose_list(&freeable);
458 mutex_unlock(&iprune_mutex); 462 mutex_unlock(&iprune_mutex);
459
460 if (current_is_kswapd())
461 mod_page_state(kswapd_inodesteal, reap);
462 else
463 mod_page_state(pginodesteal, reap);
464} 463}
465 464
466/* 465/*
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 7fa76ed53c10..93aa5715f224 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -125,11 +125,24 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio)
125 return ret; 125 return ret;
126} 126}
127 127
128static int get_task_ioprio(struct task_struct *p)
129{
130 int ret;
131
132 ret = security_task_getioprio(p);
133 if (ret)
134 goto out;
135 ret = p->ioprio;
136out:
137 return ret;
138}
139
128asmlinkage long sys_ioprio_get(int which, int who) 140asmlinkage long sys_ioprio_get(int which, int who)
129{ 141{
130 struct task_struct *g, *p; 142 struct task_struct *g, *p;
131 struct user_struct *user; 143 struct user_struct *user;
132 int ret = -ESRCH; 144 int ret = -ESRCH;
145 int tmpio;
133 146
134 read_lock_irq(&tasklist_lock); 147 read_lock_irq(&tasklist_lock);
135 switch (which) { 148 switch (which) {
@@ -139,16 +152,19 @@ asmlinkage long sys_ioprio_get(int which, int who)
139 else 152 else
140 p = find_task_by_pid(who); 153 p = find_task_by_pid(who);
141 if (p) 154 if (p)
142 ret = p->ioprio; 155 ret = get_task_ioprio(p);
143 break; 156 break;
144 case IOPRIO_WHO_PGRP: 157 case IOPRIO_WHO_PGRP:
145 if (!who) 158 if (!who)
146 who = process_group(current); 159 who = process_group(current);
147 do_each_task_pid(who, PIDTYPE_PGID, p) { 160 do_each_task_pid(who, PIDTYPE_PGID, p) {
161 tmpio = get_task_ioprio(p);
162 if (tmpio < 0)
163 continue;
148 if (ret == -ESRCH) 164 if (ret == -ESRCH)
149 ret = p->ioprio; 165 ret = tmpio;
150 else 166 else
151 ret = ioprio_best(ret, p->ioprio); 167 ret = ioprio_best(ret, tmpio);
152 } while_each_task_pid(who, PIDTYPE_PGID, p); 168 } while_each_task_pid(who, PIDTYPE_PGID, p);
153 break; 169 break;
154 case IOPRIO_WHO_USER: 170 case IOPRIO_WHO_USER:
@@ -163,10 +179,13 @@ asmlinkage long sys_ioprio_get(int which, int who)
163 do_each_thread(g, p) { 179 do_each_thread(g, p) {
164 if (p->uid != user->uid) 180 if (p->uid != user->uid)
165 continue; 181 continue;
182 tmpio = get_task_ioprio(p);
183 if (tmpio < 0)
184 continue;
166 if (ret == -ESRCH) 185 if (ret == -ESRCH)
167 ret = p->ioprio; 186 ret = tmpio;
168 else 187 else
169 ret = ioprio_best(ret, p->ioprio); 188 ret = ioprio_best(ret, tmpio);
170 } while_each_thread(g, p); 189 } while_each_thread(g, p);
171 190
172 if (who) 191 if (who)
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 52d60c3d8996..e7d5a3097fe6 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -93,7 +93,7 @@ static struct page* ncp_file_mmap_nopage(struct vm_area_struct *area,
93 */ 93 */
94 if (type) 94 if (type)
95 *type = VM_FAULT_MAJOR; 95 *type = VM_FAULT_MAJOR;
96 inc_page_state(pgmajfault); 96 count_vm_event(PGMAJFAULT);
97 return page; 97 return page;
98} 98}
99 99
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index d89f6fb3b3a3..26b1fe909377 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -315,6 +315,7 @@ nfs_scan_lock_dirty(struct nfs_inode *nfsi, struct list_head *dst,
315 req->wb_index, NFS_PAGE_TAG_DIRTY); 315 req->wb_index, NFS_PAGE_TAG_DIRTY);
316 nfs_list_remove_request(req); 316 nfs_list_remove_request(req);
317 nfs_list_add_request(req, dst); 317 nfs_list_add_request(req, dst);
318 dec_zone_page_state(req->wb_page, NR_FILE_DIRTY);
318 res++; 319 res++;
319 } 320 }
320 } 321 }
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 8fccb9cb173b..f21e268705c0 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -497,7 +497,7 @@ nfs_mark_request_dirty(struct nfs_page *req)
497 nfs_list_add_request(req, &nfsi->dirty); 497 nfs_list_add_request(req, &nfsi->dirty);
498 nfsi->ndirty++; 498 nfsi->ndirty++;
499 spin_unlock(&nfsi->req_lock); 499 spin_unlock(&nfsi->req_lock);
500 inc_page_state(nr_dirty); 500 inc_zone_page_state(req->wb_page, NR_FILE_DIRTY);
501 mark_inode_dirty(inode); 501 mark_inode_dirty(inode);
502} 502}
503 503
@@ -525,7 +525,7 @@ nfs_mark_request_commit(struct nfs_page *req)
525 nfs_list_add_request(req, &nfsi->commit); 525 nfs_list_add_request(req, &nfsi->commit);
526 nfsi->ncommit++; 526 nfsi->ncommit++;
527 spin_unlock(&nfsi->req_lock); 527 spin_unlock(&nfsi->req_lock);
528 inc_page_state(nr_unstable); 528 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
529 mark_inode_dirty(inode); 529 mark_inode_dirty(inode);
530} 530}
531#endif 531#endif
@@ -609,7 +609,6 @@ nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_sta
609 if (nfsi->ndirty != 0) { 609 if (nfsi->ndirty != 0) {
610 res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages); 610 res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages);
611 nfsi->ndirty -= res; 611 nfsi->ndirty -= res;
612 sub_page_state(nr_dirty,res);
613 if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) 612 if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty))
614 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); 613 printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n");
615 } 614 }
@@ -1394,7 +1393,6 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
1394{ 1393{
1395 struct nfs_write_data *data = calldata; 1394 struct nfs_write_data *data = calldata;
1396 struct nfs_page *req; 1395 struct nfs_page *req;
1397 int res = 0;
1398 1396
1399 dprintk("NFS: %4d nfs_commit_done (status %d)\n", 1397 dprintk("NFS: %4d nfs_commit_done (status %d)\n",
1400 task->tk_pid, task->tk_status); 1398 task->tk_pid, task->tk_status);
@@ -1406,6 +1404,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
1406 while (!list_empty(&data->pages)) { 1404 while (!list_empty(&data->pages)) {
1407 req = nfs_list_entry(data->pages.next); 1405 req = nfs_list_entry(data->pages.next);
1408 nfs_list_remove_request(req); 1406 nfs_list_remove_request(req);
1407 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1409 1408
1410 dprintk("NFS: commit (%s/%Ld %d@%Ld)", 1409 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1411 req->wb_context->dentry->d_inode->i_sb->s_id, 1410 req->wb_context->dentry->d_inode->i_sb->s_id,
@@ -1432,9 +1431,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
1432 nfs_mark_request_dirty(req); 1431 nfs_mark_request_dirty(req);
1433 next: 1432 next:
1434 nfs_clear_page_writeback(req); 1433 nfs_clear_page_writeback(req);
1435 res++;
1436 } 1434 }
1437 sub_page_state(nr_unstable,res);
1438} 1435}
1439 1436
1440static const struct rpc_call_ops nfs_commit_ops = { 1437static const struct rpc_call_ops nfs_commit_ops = {
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 3eec30000f3f..01bc68c628ad 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -126,7 +126,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
126 if (*ep) 126 if (*ep)
127 goto out; 127 goto out;
128 dprintk("found fsidtype %d\n", fsidtype); 128 dprintk("found fsidtype %d\n", fsidtype);
129 if (fsidtype > 2) 129 if (key_len(fsidtype)==0) /* invalid type */
130 goto out; 130 goto out;
131 if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0) 131 if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0)
132 goto out; 132 goto out;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 7c7d01672d35..9daa0b9feb8d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1237,8 +1237,15 @@ find_file(struct inode *ino)
1237 return NULL; 1237 return NULL;
1238} 1238}
1239 1239
1240#define TEST_ACCESS(x) ((x > 0 || x < 4)?1:0) 1240static int access_valid(u32 x)
1241#define TEST_DENY(x) ((x >= 0 || x < 5)?1:0) 1241{
1242 return (x > 0 && x < 4);
1243}
1244
1245static int deny_valid(u32 x)
1246{
1247 return (x >= 0 && x < 5);
1248}
1242 1249
1243static void 1250static void
1244set_access(unsigned int *access, unsigned long bmap) { 1251set_access(unsigned int *access, unsigned long bmap) {
@@ -1745,7 +1752,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
1745 int status; 1752 int status;
1746 1753
1747 status = nfserr_inval; 1754 status = nfserr_inval;
1748 if (!TEST_ACCESS(open->op_share_access) || !TEST_DENY(open->op_share_deny)) 1755 if (!access_valid(open->op_share_access)
1756 || !deny_valid(open->op_share_deny))
1749 goto out; 1757 goto out;
1750 /* 1758 /*
1751 * Lookup file; if found, lookup stateid and check open request, 1759 * Lookup file; if found, lookup stateid and check open request,
@@ -1782,10 +1790,10 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
1782 } else { 1790 } else {
1783 /* Stateid was not found, this is a new OPEN */ 1791 /* Stateid was not found, this is a new OPEN */
1784 int flags = 0; 1792 int flags = 0;
1793 if (open->op_share_access & NFS4_SHARE_ACCESS_READ)
1794 flags |= MAY_READ;
1785 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) 1795 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
1786 flags = MAY_WRITE; 1796 flags |= MAY_WRITE;
1787 else
1788 flags = MAY_READ;
1789 status = nfs4_new_open(rqstp, &stp, dp, current_fh, flags); 1797 status = nfs4_new_open(rqstp, &stp, dp, current_fh, flags);
1790 if (status) 1798 if (status)
1791 goto out; 1799 goto out;
@@ -2070,16 +2078,12 @@ nfs4_preprocess_stateid_op(struct svc_fh *current_fh, stateid_t *stateid, int fl
2070 if (!stateid->si_fileid) { /* delegation stateid */ 2078 if (!stateid->si_fileid) { /* delegation stateid */
2071 if(!(dp = find_delegation_stateid(ino, stateid))) { 2079 if(!(dp = find_delegation_stateid(ino, stateid))) {
2072 dprintk("NFSD: delegation stateid not found\n"); 2080 dprintk("NFSD: delegation stateid not found\n");
2073 if (nfs4_in_grace())
2074 status = nfserr_grace;
2075 goto out; 2081 goto out;
2076 } 2082 }
2077 stidp = &dp->dl_stateid; 2083 stidp = &dp->dl_stateid;
2078 } else { /* open or lock stateid */ 2084 } else { /* open or lock stateid */
2079 if (!(stp = find_stateid(stateid, flags))) { 2085 if (!(stp = find_stateid(stateid, flags))) {
2080 dprintk("NFSD: open or lock stateid not found\n"); 2086 dprintk("NFSD: open or lock stateid not found\n");
2081 if (nfs4_in_grace())
2082 status = nfserr_grace;
2083 goto out; 2087 goto out;
2084 } 2088 }
2085 if ((flags & CHECK_FH) && nfs4_check_fh(current_fh, stp)) 2089 if ((flags & CHECK_FH) && nfs4_check_fh(current_fh, stp))
@@ -2252,8 +2256,9 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfs
2252 (int)current_fh->fh_dentry->d_name.len, 2256 (int)current_fh->fh_dentry->d_name.len,
2253 current_fh->fh_dentry->d_name.name); 2257 current_fh->fh_dentry->d_name.name);
2254 2258
2255 if ((status = fh_verify(rqstp, current_fh, S_IFREG, 0))) 2259 status = fh_verify(rqstp, current_fh, S_IFREG, 0);
2256 goto out; 2260 if (status)
2261 return status;
2257 2262
2258 nfs4_lock_state(); 2263 nfs4_lock_state();
2259 2264
@@ -2320,7 +2325,8 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct n
2320 (int)current_fh->fh_dentry->d_name.len, 2325 (int)current_fh->fh_dentry->d_name.len,
2321 current_fh->fh_dentry->d_name.name); 2326 current_fh->fh_dentry->d_name.name);
2322 2327
2323 if (!TEST_ACCESS(od->od_share_access) || !TEST_DENY(od->od_share_deny)) 2328 if (!access_valid(od->od_share_access)
2329 || !deny_valid(od->od_share_deny))
2324 return nfserr_inval; 2330 return nfserr_inval;
2325 2331
2326 nfs4_lock_state(); 2332 nfs4_lock_state();
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 3f2ec2e6d06c..ecc439d2565f 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -187,13 +187,6 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
187 goto out; 187 goto out;
188 } 188 }
189 189
190 /* Set user creds for this exportpoint */
191 error = nfsd_setuser(rqstp, exp);
192 if (error) {
193 error = nfserrno(error);
194 goto out;
195 }
196
197 /* 190 /*
198 * Look up the dentry using the NFS file handle. 191 * Look up the dentry using the NFS file handle.
199 */ 192 */
@@ -251,6 +244,14 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
251 } 244 }
252 cache_get(&exp->h); 245 cache_get(&exp->h);
253 246
247 /* Set user creds for this exportpoint; necessary even in the "just
248 * checking" case because this may be a filehandle that was created by
249 * fh_compose, and that is about to be used in another nfsv4 compound
250 * operation */
251 error = nfserrno(nfsd_setuser(rqstp, exp));
252 if (error)
253 goto out;
254
254 error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type); 255 error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type);
255 if (error) 256 if (error)
256 goto out; 257 goto out;
@@ -312,8 +313,8 @@ int
312fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, struct svc_fh *ref_fh) 313fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, struct svc_fh *ref_fh)
313{ 314{
314 /* ref_fh is a reference file handle. 315 /* ref_fh is a reference file handle.
315 * if it is non-null, then we should compose a filehandle which is 316 * if it is non-null and for the same filesystem, then we should compose
316 * of the same version, where possible. 317 * a filehandle which is of the same version, where possible.
317 * Currently, that means that if ref_fh->fh_handle.fh_version == 0xca 318 * Currently, that means that if ref_fh->fh_handle.fh_version == 0xca
318 * Then create a 32byte filehandle using nfs_fhbase_old 319 * Then create a 32byte filehandle using nfs_fhbase_old
319 * 320 *
@@ -332,7 +333,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry, st
332 parent->d_name.name, dentry->d_name.name, 333 parent->d_name.name, dentry->d_name.name,
333 (inode ? inode->i_ino : 0)); 334 (inode ? inode->i_ino : 0));
334 335
335 if (ref_fh) { 336 if (ref_fh && ref_fh->fh_export == exp) {
336 ref_fh_version = ref_fh->fh_handle.fh_version; 337 ref_fh_version = ref_fh->fh_handle.fh_version;
337 if (ref_fh_version == 0xca) 338 if (ref_fh_version == 0xca)
338 ref_fh_fsid_type = 0; 339 ref_fh_fsid_type = 0;
@@ -461,7 +462,7 @@ fh_update(struct svc_fh *fhp)
461 } else { 462 } else {
462 int size; 463 int size;
463 if (fhp->fh_handle.fh_fileid_type != 0) 464 if (fhp->fh_handle.fh_fileid_type != 0)
464 goto out_uptodate; 465 goto out;
465 datap = fhp->fh_handle.fh_auth+ 466 datap = fhp->fh_handle.fh_auth+
466 fhp->fh_handle.fh_size/4 -1; 467 fhp->fh_handle.fh_size/4 -1;
467 size = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4; 468 size = (fhp->fh_maxsize - fhp->fh_handle.fh_size)/4;
@@ -481,10 +482,6 @@ out_negative:
481 printk(KERN_ERR "fh_update: %s/%s still negative!\n", 482 printk(KERN_ERR "fh_update: %s/%s still negative!\n",
482 dentry->d_parent->d_name.name, dentry->d_name.name); 483 dentry->d_parent->d_name.name, dentry->d_name.name);
483 goto out; 484 goto out;
484out_uptodate:
485 printk(KERN_ERR "fh_update: %s/%s already up-to-date!\n",
486 dentry->d_parent->d_name.name, dentry->d_name.name);
487 goto out;
488} 485}
489 486
490/* 487/*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 245eaa1fb59b..e170030d45da 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -673,7 +673,10 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
673 goto out_nfserr; 673 goto out_nfserr;
674 674
675 if (access & MAY_WRITE) { 675 if (access & MAY_WRITE) {
676 flags = O_WRONLY|O_LARGEFILE; 676 if (access & MAY_READ)
677 flags = O_RDWR|O_LARGEFILE;
678 else
679 flags = O_WRONLY|O_LARGEFILE;
677 680
678 DQUOT_INIT(inode); 681 DQUOT_INIT(inode);
679 } 682 }
@@ -834,7 +837,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
834 if (ra && ra->p_set) 837 if (ra && ra->p_set)
835 file->f_ra = ra->p_ra; 838 file->f_ra = ra->p_ra;
836 839
837 if (file->f_op->sendfile) { 840 if (file->f_op->sendfile && rqstp->rq_sendfile_ok) {
838 svc_pushback_unused_pages(rqstp); 841 svc_pushback_unused_pages(rqstp);
839 err = file->f_op->sendfile(file, &offset, *count, 842 err = file->f_op->sendfile(file, &offset, *count,
840 nfsd_read_actor, rqstp); 843 nfsd_read_actor, rqstp);
@@ -1517,14 +1520,15 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
1517 err = nfserrno(err); 1520 err = nfserrno(err);
1518 } 1521 }
1519 1522
1520 fh_unlock(ffhp);
1521 dput(dnew); 1523 dput(dnew);
1524out_unlock:
1525 fh_unlock(ffhp);
1522out: 1526out:
1523 return err; 1527 return err;
1524 1528
1525out_nfserr: 1529out_nfserr:
1526 err = nfserrno(err); 1530 err = nfserrno(err);
1527 goto out; 1531 goto out_unlock;
1528} 1532}
1529 1533
1530/* 1534/*
@@ -1553,7 +1557,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
1553 tdir = tdentry->d_inode; 1557 tdir = tdentry->d_inode;
1554 1558
1555 err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev; 1559 err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
1556 if (fdir->i_sb != tdir->i_sb) 1560 if (ffhp->fh_export != tfhp->fh_export)
1557 goto out; 1561 goto out;
1558 1562
1559 err = nfserr_perm; 1563 err = nfserr_perm;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index cca71317b6d6..f1d1c342ce01 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -558,16 +558,9 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
558 u64 vbo_max; /* file offset, max_blocks from iblock */ 558 u64 vbo_max; /* file offset, max_blocks from iblock */
559 u64 p_blkno; 559 u64 p_blkno;
560 int contig_blocks; 560 int contig_blocks;
561 unsigned char blocksize_bits; 561 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
562 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; 562 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
563 563
564 if (!inode || !bh_result) {
565 mlog(ML_ERROR, "inode or bh_result is null\n");
566 return -EIO;
567 }
568
569 blocksize_bits = inode->i_sb->s_blocksize_bits;
570
571 /* This function won't even be called if the request isn't all 564 /* This function won't even be called if the request isn't all
572 * nicely aligned and of the right size, so there's no need 565 * nicely aligned and of the right size, so there's no need
573 * for us to check any of that. */ 566 * for us to check any of that. */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 1d26cfcd9f84..504595d6cf65 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -517,6 +517,7 @@ static inline void o2hb_prepare_block(struct o2hb_region *reg,
517 hb_block->hb_seq = cpu_to_le64(cputime); 517 hb_block->hb_seq = cpu_to_le64(cputime);
518 hb_block->hb_node = node_num; 518 hb_block->hb_node = node_num;
519 hb_block->hb_generation = cpu_to_le64(generation); 519 hb_block->hb_generation = cpu_to_le64(generation);
520 hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS);
520 521
521 /* This step must always happen last! */ 522 /* This step must always happen last! */
522 hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg, 523 hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg,
@@ -645,6 +646,8 @@ static int o2hb_check_slot(struct o2hb_region *reg,
645 struct o2nm_node *node; 646 struct o2nm_node *node;
646 struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block; 647 struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block;
647 u64 cputime; 648 u64 cputime;
649 unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS;
650 unsigned int slot_dead_ms;
648 651
649 memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes); 652 memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes);
650 653
@@ -733,6 +736,23 @@ fire_callbacks:
733 &o2hb_live_slots[slot->ds_node_num]); 736 &o2hb_live_slots[slot->ds_node_num]);
734 737
735 slot->ds_equal_samples = 0; 738 slot->ds_equal_samples = 0;
739
740 /* We want to be sure that all nodes agree on the
741 * number of milliseconds before a node will be
742 * considered dead. The self-fencing timeout is
743 * computed from this value, and a discrepancy might
744 * result in heartbeat calling a node dead when it
745 * hasn't self-fenced yet. */
746 slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms);
747 if (slot_dead_ms && slot_dead_ms != dead_ms) {
748 /* TODO: Perhaps we can fail the region here. */
749 mlog(ML_ERROR, "Node %d on device %s has a dead count "
750 "of %u ms, but our count is %u ms.\n"
751 "Please double check your configuration values "
752 "for 'O2CB_HEARTBEAT_THRESHOLD'\n",
753 slot->ds_node_num, reg->hr_dev_name, slot_dead_ms,
754 dead_ms);
755 }
736 goto out; 756 goto out;
737 } 757 }
738 758
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index 73edad782537..a42628ba9ddf 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -123,6 +123,17 @@
123#define MLOG_MASK_PREFIX 0 123#define MLOG_MASK_PREFIX 0
124#endif 124#endif
125 125
126/*
127 * When logging is disabled, force the bit test to 0 for anything other
128 * than errors and notices, allowing gcc to remove the code completely.
129 * When enabled, allow all masks.
130 */
131#if defined(CONFIG_OCFS2_DEBUG_MASKLOG)
132#define ML_ALLOWED_BITS ~0
133#else
134#define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE)
135#endif
136
126#define MLOG_MAX_BITS 64 137#define MLOG_MAX_BITS 64
127 138
128struct mlog_bits { 139struct mlog_bits {
@@ -187,7 +198,8 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
187 198
188#define mlog(mask, fmt, args...) do { \ 199#define mlog(mask, fmt, args...) do { \
189 u64 __m = MLOG_MASK_PREFIX | (mask); \ 200 u64 __m = MLOG_MASK_PREFIX | (mask); \
190 if (__mlog_test_u64(__m, mlog_and_bits) && \ 201 if ((__m & ML_ALLOWED_BITS) && \
202 __mlog_test_u64(__m, mlog_and_bits) && \
191 !__mlog_test_u64(__m, mlog_not_bits)) { \ 203 !__mlog_test_u64(__m, mlog_not_bits)) { \
192 if (__m & ML_ERROR) \ 204 if (__m & ML_ERROR) \
193 __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \ 205 __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
@@ -204,6 +216,7 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
204 mlog(ML_ERROR, "status = %lld\n", (long long)_st); \ 216 mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
205} while (0) 217} while (0)
206 218
219#if defined(CONFIG_OCFS2_DEBUG_MASKLOG)
207#define mlog_entry(fmt, args...) do { \ 220#define mlog_entry(fmt, args...) do { \
208 mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \ 221 mlog(ML_ENTRY, "ENTRY:" fmt , ##args); \
209} while (0) 222} while (0)
@@ -247,6 +260,13 @@ extern struct mlog_bits mlog_and_bits, mlog_not_bits;
247#define mlog_exit_void() do { \ 260#define mlog_exit_void() do { \
248 mlog(ML_EXIT, "EXIT\n"); \ 261 mlog(ML_EXIT, "EXIT\n"); \
249} while (0) 262} while (0)
263#else
264#define mlog_entry(...) do { } while (0)
265#define mlog_entry_void(...) do { } while (0)
266#define mlog_exit(...) do { } while (0)
267#define mlog_exit_ptr(...) do { } while (0)
268#define mlog_exit_void(...) do { } while (0)
269#endif /* defined(CONFIG_OCFS2_DEBUG_MASKLOG) */
250 270
251#define mlog_bug_on_msg(cond, fmt, args...) do { \ 271#define mlog_bug_on_msg(cond, fmt, args...) do { \
252 if (cond) { \ 272 if (cond) { \
diff --git a/fs/ocfs2/cluster/ocfs2_heartbeat.h b/fs/ocfs2/cluster/ocfs2_heartbeat.h
index 94096069cb43..3f4151da9709 100644
--- a/fs/ocfs2/cluster/ocfs2_heartbeat.h
+++ b/fs/ocfs2/cluster/ocfs2_heartbeat.h
@@ -32,6 +32,7 @@ struct o2hb_disk_heartbeat_block {
32 __u8 hb_pad1[3]; 32 __u8 hb_pad1[3];
33 __le32 hb_cksum; 33 __le32 hb_cksum;
34 __le64 hb_generation; 34 __le64 hb_generation;
35 __le32 hb_dead_ms;
35}; 36};
36 37
37#endif /* _OCFS2_HEARTBEAT_H */ 38#endif /* _OCFS2_HEARTBEAT_H */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 1591eb37a723..b650efa8c8be 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -396,8 +396,8 @@ static void o2net_set_nn_state(struct o2net_node *nn,
396 } 396 }
397 397
398 if (was_valid && !valid) { 398 if (was_valid && !valid) {
399 mlog(ML_NOTICE, "no longer connected to " SC_NODEF_FMT "\n", 399 printk(KERN_INFO "o2net: no longer connected to "
400 SC_NODEF_ARGS(old_sc)); 400 SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
401 o2net_complete_nodes_nsw(nn); 401 o2net_complete_nodes_nsw(nn);
402 } 402 }
403 403
@@ -409,10 +409,10 @@ static void o2net_set_nn_state(struct o2net_node *nn,
409 * the only way to start connecting again is to down 409 * the only way to start connecting again is to down
410 * heartbeat and bring it back up. */ 410 * heartbeat and bring it back up. */
411 cancel_delayed_work(&nn->nn_connect_expired); 411 cancel_delayed_work(&nn->nn_connect_expired);
412 mlog(ML_NOTICE, "%s " SC_NODEF_FMT "\n", 412 printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n",
413 o2nm_this_node() > sc->sc_node->nd_num ? 413 o2nm_this_node() > sc->sc_node->nd_num ?
414 "connected to" : "accepted connection from", 414 "connected to" : "accepted connection from",
415 SC_NODEF_ARGS(sc)); 415 SC_NODEF_ARGS(sc));
416 } 416 }
417 417
418 /* trigger the connecting worker func as long as we're not valid, 418 /* trigger the connecting worker func as long as we're not valid,
@@ -1280,7 +1280,7 @@ static void o2net_idle_timer(unsigned long data)
1280 1280
1281 do_gettimeofday(&now); 1281 do_gettimeofday(&now);
1282 1282
1283 mlog(ML_NOTICE, "connection to " SC_NODEF_FMT " has been idle for 10 " 1283 printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for 10 "
1284 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc)); 1284 "seconds, shutting it down.\n", SC_NODEF_ARGS(sc));
1285 mlog(ML_NOTICE, "here are some times that might help debug the " 1285 mlog(ML_NOTICE, "here are some times that might help debug the "
1286 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " 1286 "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index ae47f450792f..3d494d1a5f36 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -213,11 +213,9 @@ int ocfs2_find_files_on_disk(const char *name,
213 struct ocfs2_dir_entry **dirent) 213 struct ocfs2_dir_entry **dirent)
214{ 214{
215 int status = -ENOENT; 215 int status = -ENOENT;
216 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
217 216
218 mlog_entry("(osb=%p, parent=%llu, name='%.*s', blkno=%p, inode=%p)\n", 217 mlog_entry("(name=%.*s, blkno=%p, inode=%p, dirent_bh=%p, dirent=%p)\n",
219 osb, (unsigned long long)OCFS2_I(inode)->ip_blkno, 218 namelen, name, blkno, inode, dirent_bh, dirent);
220 namelen, name, blkno, inode);
221 219
222 *dirent_bh = ocfs2_find_entry(name, namelen, inode, dirent); 220 *dirent_bh = ocfs2_find_entry(name, namelen, inode, dirent);
223 if (!*dirent_bh || !*dirent) { 221 if (!*dirent_bh || !*dirent) {
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 9bdc9cf65991..14530ee7e11d 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -822,8 +822,6 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data);
822int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data); 822int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data);
823int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, 823int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
824 u8 nodenum, u8 *real_master); 824 u8 nodenum, u8 *real_master);
825int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
826 struct dlm_lock_resource *res, u8 *real_master);
827 825
828 826
829int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, 827int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index b8c23f7ba67e..8d1065f8b3bd 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -408,12 +408,13 @@ static void __dlm_print_nodes(struct dlm_ctxt *dlm)
408 408
409 assert_spin_locked(&dlm->spinlock); 409 assert_spin_locked(&dlm->spinlock);
410 410
411 mlog(ML_NOTICE, "Nodes in my domain (\"%s\"):\n", dlm->name); 411 printk(KERN_INFO "ocfs2_dlm: Nodes in domain (\"%s\"): ", dlm->name);
412 412
413 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 413 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
414 node + 1)) < O2NM_MAX_NODES) { 414 node + 1)) < O2NM_MAX_NODES) {
415 mlog(ML_NOTICE, " node %d\n", node); 415 printk("%d ", node);
416 } 416 }
417 printk("\n");
417} 418}
418 419
419static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data) 420static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data)
@@ -429,7 +430,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data)
429 430
430 node = exit_msg->node_idx; 431 node = exit_msg->node_idx;
431 432
432 mlog(0, "Node %u leaves domain %s\n", node, dlm->name); 433 printk(KERN_INFO "ocfs2_dlm: Node %u leaves domain %s\n", node, dlm->name);
433 434
434 spin_lock(&dlm->spinlock); 435 spin_lock(&dlm->spinlock);
435 clear_bit(node, dlm->domain_map); 436 clear_bit(node, dlm->domain_map);
@@ -678,6 +679,8 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data)
678 set_bit(assert->node_idx, dlm->domain_map); 679 set_bit(assert->node_idx, dlm->domain_map);
679 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); 680 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
680 681
682 printk(KERN_INFO "ocfs2_dlm: Node %u joins domain %s\n",
683 assert->node_idx, dlm->name);
681 __dlm_print_nodes(dlm); 684 __dlm_print_nodes(dlm);
682 685
683 /* notify anything attached to the heartbeat events */ 686 /* notify anything attached to the heartbeat events */
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 29b2845f370d..594745fab0b5 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -95,6 +95,9 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
95static void dlm_request_all_locks_worker(struct dlm_work_item *item, 95static void dlm_request_all_locks_worker(struct dlm_work_item *item,
96 void *data); 96 void *data);
97static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); 97static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
98static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
99 struct dlm_lock_resource *res,
100 u8 *real_master);
98 101
99static u64 dlm_get_next_mig_cookie(void); 102static u64 dlm_get_next_mig_cookie(void);
100 103
@@ -1484,8 +1487,9 @@ leave:
1484 1487
1485 1488
1486 1489
1487int dlm_lockres_master_requery(struct dlm_ctxt *dlm, 1490static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1488 struct dlm_lock_resource *res, u8 *real_master) 1491 struct dlm_lock_resource *res,
1492 u8 *real_master)
1489{ 1493{
1490 struct dlm_node_iter iter; 1494 struct dlm_node_iter iter;
1491 int nodenum; 1495 int nodenum;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 4acd37286bdd..762eb1fbb34d 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2071,8 +2071,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
2071 } 2071 }
2072 2072
2073 /* launch vote thread */ 2073 /* launch vote thread */
2074 osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote-%d", 2074 osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote");
2075 osb->osb_id);
2076 if (IS_ERR(osb->vote_task)) { 2075 if (IS_ERR(osb->vote_task)) {
2077 status = PTR_ERR(osb->vote_task); 2076 status = PTR_ERR(osb->vote_task);
2078 osb->vote_task = NULL; 2077 osb->vote_task = NULL;
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 1a5c69071df6..fcd4475d1f89 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -298,7 +298,7 @@ static int ocfs2_extent_map_find_leaf(struct inode *inode,
298 298
299 ret = ocfs2_extent_map_insert(inode, rec, 299 ret = ocfs2_extent_map_insert(inode, rec,
300 le16_to_cpu(el->l_tree_depth)); 300 le16_to_cpu(el->l_tree_depth));
301 if (ret) { 301 if (ret && (ret != -EEXIST)) {
302 mlog_errno(ret); 302 mlog_errno(ret);
303 goto out_free; 303 goto out_free;
304 } 304 }
@@ -427,6 +427,11 @@ static int ocfs2_extent_map_insert_entry(struct ocfs2_extent_map *em,
427/* 427/*
428 * Simple rule: on any return code other than -EAGAIN, anything left 428 * Simple rule: on any return code other than -EAGAIN, anything left
429 * in the insert_context will be freed. 429 * in the insert_context will be freed.
430 *
431 * Simple rule #2: A return code of -EEXIST from this function or
432 * its calls to ocfs2_extent_map_insert_entry() signifies that another
433 * thread beat us to the insert. It is not an actual error, but it
434 * tells the caller we have no more work to do.
430 */ 435 */
431static int ocfs2_extent_map_try_insert(struct inode *inode, 436static int ocfs2_extent_map_try_insert(struct inode *inode,
432 struct ocfs2_extent_rec *rec, 437 struct ocfs2_extent_rec *rec,
@@ -448,22 +453,32 @@ static int ocfs2_extent_map_try_insert(struct inode *inode,
448 goto out_unlock; 453 goto out_unlock;
449 } 454 }
450 455
456 /* Since insert_entry failed, the map MUST have old_ent */
451 old_ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos), 457 old_ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos),
452 le32_to_cpu(rec->e_clusters), NULL, 458 le32_to_cpu(rec->e_clusters),
453 NULL); 459 NULL, NULL);
454 460
455 BUG_ON(!old_ent); 461 BUG_ON(!old_ent);
456 462
457 ret = -EEXIST; 463 if (old_ent->e_tree_depth < tree_depth) {
458 if (old_ent->e_tree_depth < tree_depth) 464 /* Another thread beat us to the lower tree_depth */
465 ret = -EEXIST;
459 goto out_unlock; 466 goto out_unlock;
467 }
460 468
461 if (old_ent->e_tree_depth == tree_depth) { 469 if (old_ent->e_tree_depth == tree_depth) {
470 /*
471 * Another thread beat us to this tree_depth.
472 * Let's make sure we agree with that thread (the
473 * extent_rec should be identical).
474 */
462 if (!memcmp(rec, &old_ent->e_rec, 475 if (!memcmp(rec, &old_ent->e_rec,
463 sizeof(struct ocfs2_extent_rec))) 476 sizeof(struct ocfs2_extent_rec)))
464 ret = 0; 477 ret = 0;
478 else
479 /* FIXME: Should this be ESRCH/EBADR??? */
480 ret = -EEXIST;
465 481
466 /* FIXME: Should this be ESRCH/EBADR??? */
467 goto out_unlock; 482 goto out_unlock;
468 } 483 }
469 484
@@ -599,7 +614,7 @@ static int ocfs2_extent_map_insert(struct inode *inode,
599 tree_depth, &ctxt); 614 tree_depth, &ctxt);
600 } while (ret == -EAGAIN); 615 } while (ret == -EAGAIN);
601 616
602 if (ret < 0) 617 if ((ret < 0) && (ret != -EEXIST))
603 mlog_errno(ret); 618 mlog_errno(ret);
604 619
605 if (ctxt.left_ent) 620 if (ctxt.left_ent)
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 910a601b2e98..f92bf1dd379a 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -784,8 +784,7 @@ int ocfs2_journal_load(struct ocfs2_journal *journal)
784 } 784 }
785 785
786 /* Launch the commit thread */ 786 /* Launch the commit thread */
787 osb->commit_task = kthread_run(ocfs2_commit_thread, osb, "ocfs2cmt-%d", 787 osb->commit_task = kthread_run(ocfs2_commit_thread, osb, "ocfs2cmt");
788 osb->osb_id);
789 if (IS_ERR(osb->commit_task)) { 788 if (IS_ERR(osb->commit_task)) {
790 status = PTR_ERR(osb->commit_task); 789 status = PTR_ERR(osb->commit_task);
791 osb->commit_task = NULL; 790 osb->commit_task = NULL;
@@ -1118,7 +1117,7 @@ void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
1118 goto out; 1117 goto out;
1119 1118
1120 osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb, 1119 osb->recovery_thread_task = kthread_run(__ocfs2_recovery_thread, osb,
1121 "ocfs2rec-%d", osb->osb_id); 1120 "ocfs2rec");
1122 if (IS_ERR(osb->recovery_thread_task)) { 1121 if (IS_ERR(osb->recovery_thread_task)) {
1123 mlog_errno((int)PTR_ERR(osb->recovery_thread_task)); 1122 mlog_errno((int)PTR_ERR(osb->recovery_thread_task));
1124 osb->recovery_thread_task = NULL; 1123 osb->recovery_thread_task = NULL;
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 843cf9ddefe8..83934e33e5b0 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -46,12 +46,12 @@ static struct page *ocfs2_nopage(struct vm_area_struct * area,
46 unsigned long address, 46 unsigned long address,
47 int *type) 47 int *type)
48{ 48{
49 struct inode *inode = area->vm_file->f_dentry->d_inode;
50 struct page *page = NOPAGE_SIGBUS; 49 struct page *page = NOPAGE_SIGBUS;
51 sigset_t blocked, oldset; 50 sigset_t blocked, oldset;
52 int ret; 51 int ret;
53 52
54 mlog_entry("(inode %lu, address %lu)\n", inode->i_ino, address); 53 mlog_entry("(area=%p, address=%lu, type=%p)\n", area, address,
54 type);
55 55
56 /* The best way to deal with signals in this path is 56 /* The best way to deal with signals in this path is
57 * to block them upfront, rather than allowing the 57 * to block them upfront, rather than allowing the
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index da1093039c01..cd4a6f253d13 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -184,7 +184,6 @@ struct ocfs2_journal;
184struct ocfs2_journal_handle; 184struct ocfs2_journal_handle;
185struct ocfs2_super 185struct ocfs2_super
186{ 186{
187 u32 osb_id; /* id used by the proc interface */
188 struct task_struct *commit_task; 187 struct task_struct *commit_task;
189 struct super_block *sb; 188 struct super_block *sb;
190 struct inode *root_inode; 189 struct inode *root_inode;
@@ -222,13 +221,11 @@ struct ocfs2_super
222 unsigned long s_mount_opt; 221 unsigned long s_mount_opt;
223 222
224 u16 max_slots; 223 u16 max_slots;
225 u16 num_nodes;
226 s16 node_num; 224 s16 node_num;
227 s16 slot_num; 225 s16 slot_num;
228 int s_sectsize_bits; 226 int s_sectsize_bits;
229 int s_clustersize; 227 int s_clustersize;
230 int s_clustersize_bits; 228 int s_clustersize_bits;
231 struct proc_dir_entry *proc_sub_dir; /* points to /proc/fs/ocfs2/<maj_min> */
232 229
233 atomic_t vol_state; 230 atomic_t vol_state;
234 struct mutex recovery_lock; 231 struct mutex recovery_lock;
@@ -294,7 +291,6 @@ struct ocfs2_super
294}; 291};
295 292
296#define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info) 293#define OCFS2_SB(sb) ((struct ocfs2_super *)(sb)->s_fs_info)
297#define OCFS2_MAX_OSB_ID 65536
298 294
299static inline int ocfs2_should_order_data(struct inode *inode) 295static inline int ocfs2_should_order_data(struct inode *inode)
300{ 296{
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index 871627961d6d..aa6f5aadedc4 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -264,7 +264,7 @@ int ocfs2_find_slot(struct ocfs2_super *osb)
264 osb->slot_num = slot; 264 osb->slot_num = slot;
265 spin_unlock(&si->si_lock); 265 spin_unlock(&si->si_lock);
266 266
267 mlog(ML_NOTICE, "taking node slot %d\n", osb->slot_num); 267 mlog(0, "taking node slot %d\n", osb->slot_num);
268 268
269 status = ocfs2_update_disk_slots(osb, si); 269 status = ocfs2_update_disk_slots(osb, si);
270 if (status < 0) 270 if (status < 0)
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index cdf73393f094..382706a67ffd 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -68,13 +68,6 @@
68 68
69#include "buffer_head_io.h" 69#include "buffer_head_io.h"
70 70
71/*
72 * Globals
73 */
74static spinlock_t ocfs2_globals_lock = SPIN_LOCK_UNLOCKED;
75
76static u32 osb_id; /* Keeps track of next available OSB Id */
77
78static kmem_cache_t *ocfs2_inode_cachep = NULL; 71static kmem_cache_t *ocfs2_inode_cachep = NULL;
79 72
80kmem_cache_t *ocfs2_lock_cache = NULL; 73kmem_cache_t *ocfs2_lock_cache = NULL;
@@ -642,10 +635,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
642 635
643 ocfs2_complete_mount_recovery(osb); 636 ocfs2_complete_mount_recovery(osb);
644 637
645 printk("ocfs2: Mounting device (%u,%u) on (node %d, slot %d) with %s " 638 printk(KERN_INFO "ocfs2: Mounting device (%s) on (node %d, slot %d) "
646 "data mode.\n", 639 "with %s data mode.\n",
647 MAJOR(sb->s_dev), MINOR(sb->s_dev), osb->node_num, 640 osb->dev_str, osb->node_num, osb->slot_num,
648 osb->slot_num,
649 osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" : 641 osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK ? "writeback" :
650 "ordered"); 642 "ordered");
651 643
@@ -800,10 +792,6 @@ static int __init ocfs2_init(void)
800 goto leave; 792 goto leave;
801 } 793 }
802 794
803 spin_lock(&ocfs2_globals_lock);
804 osb_id = 0;
805 spin_unlock(&ocfs2_globals_lock);
806
807 ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL); 795 ocfs2_debugfs_root = debugfs_create_dir("ocfs2", NULL);
808 if (!ocfs2_debugfs_root) { 796 if (!ocfs2_debugfs_root) {
809 status = -EFAULT; 797 status = -EFAULT;
@@ -1020,7 +1008,7 @@ static int ocfs2_fill_local_node_info(struct ocfs2_super *osb)
1020 goto bail; 1008 goto bail;
1021 } 1009 }
1022 1010
1023 mlog(ML_NOTICE, "I am node %d\n", osb->node_num); 1011 mlog(0, "I am node %d\n", osb->node_num);
1024 1012
1025 status = 0; 1013 status = 0;
1026bail: 1014bail:
@@ -1191,8 +1179,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1191 1179
1192 atomic_set(&osb->vol_state, VOLUME_DISMOUNTED); 1180 atomic_set(&osb->vol_state, VOLUME_DISMOUNTED);
1193 1181
1194 printk("ocfs2: Unmounting device (%u,%u) on (node %d)\n", 1182 printk(KERN_INFO "ocfs2: Unmounting device (%s) on (node %d)\n",
1195 MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev), osb->node_num); 1183 osb->dev_str, osb->node_num);
1196 1184
1197 ocfs2_delete_osb(osb); 1185 ocfs2_delete_osb(osb);
1198 kfree(osb); 1186 kfree(osb);
@@ -1212,8 +1200,6 @@ static int ocfs2_setup_osb_uuid(struct ocfs2_super *osb, const unsigned char *uu
1212 if (osb->uuid_str == NULL) 1200 if (osb->uuid_str == NULL)
1213 return -ENOMEM; 1201 return -ENOMEM;
1214 1202
1215 memcpy(osb->uuid, uuid, OCFS2_VOL_UUID_LEN);
1216
1217 for (i = 0, ptr = osb->uuid_str; i < OCFS2_VOL_UUID_LEN; i++) { 1203 for (i = 0, ptr = osb->uuid_str; i < OCFS2_VOL_UUID_LEN; i++) {
1218 /* print with null */ 1204 /* print with null */
1219 ret = snprintf(ptr, 3, "%02X", uuid[i]); 1205 ret = snprintf(ptr, 3, "%02X", uuid[i]);
@@ -1311,13 +1297,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
1311 goto bail; 1297 goto bail;
1312 } 1298 }
1313 1299
1314 osb->uuid = kmalloc(OCFS2_VOL_UUID_LEN, GFP_KERNEL);
1315 if (!osb->uuid) {
1316 mlog(ML_ERROR, "unable to alloc uuid\n");
1317 status = -ENOMEM;
1318 goto bail;
1319 }
1320
1321 di = (struct ocfs2_dinode *)bh->b_data; 1300 di = (struct ocfs2_dinode *)bh->b_data;
1322 1301
1323 osb->max_slots = le16_to_cpu(di->id2.i_super.s_max_slots); 1302 osb->max_slots = le16_to_cpu(di->id2.i_super.s_max_slots);
@@ -1327,7 +1306,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1327 status = -EINVAL; 1306 status = -EINVAL;
1328 goto bail; 1307 goto bail;
1329 } 1308 }
1330 mlog(ML_NOTICE, "max_slots for this device: %u\n", osb->max_slots); 1309 mlog(0, "max_slots for this device: %u\n", osb->max_slots);
1331 1310
1332 init_waitqueue_head(&osb->osb_wipe_event); 1311 init_waitqueue_head(&osb->osb_wipe_event);
1333 osb->osb_orphan_wipes = kcalloc(osb->max_slots, 1312 osb->osb_orphan_wipes = kcalloc(osb->max_slots,
@@ -1418,7 +1397,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
1418 goto bail; 1397 goto bail;
1419 } 1398 }
1420 1399
1421 memcpy(&uuid_net_key, &osb->uuid[i], sizeof(osb->net_key)); 1400 memcpy(&uuid_net_key, di->id2.i_super.s_uuid, sizeof(uuid_net_key));
1422 osb->net_key = le32_to_cpu(uuid_net_key); 1401 osb->net_key = le32_to_cpu(uuid_net_key);
1423 1402
1424 strncpy(osb->vol_label, di->id2.i_super.s_label, 63); 1403 strncpy(osb->vol_label, di->id2.i_super.s_label, 63);
@@ -1484,18 +1463,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
1484 goto bail; 1463 goto bail;
1485 } 1464 }
1486 1465
1487 /* Link this osb onto the global linked list of all osb structures. */
1488 /* The Global Link List is mainted for the whole driver . */
1489 spin_lock(&ocfs2_globals_lock);
1490 osb->osb_id = osb_id;
1491 if (osb_id < OCFS2_MAX_OSB_ID)
1492 osb_id++;
1493 else {
1494 mlog(ML_ERROR, "Too many volumes mounted\n");
1495 status = -ENOMEM;
1496 }
1497 spin_unlock(&ocfs2_globals_lock);
1498
1499bail: 1466bail:
1500 mlog_exit(status); 1467 mlog_exit(status);
1501 return status; 1468 return status;
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 0c8a1294ec96..c0f68aa6c175 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -154,7 +154,7 @@ static void *ocfs2_follow_link(struct dentry *dentry,
154 } 154 }
155 155
156 status = vfs_follow_link(nd, link); 156 status = vfs_follow_link(nd, link);
157 if (status) 157 if (status && status != -ENOENT)
158 mlog_errno(status); 158 mlog_errno(status);
159bail: 159bail:
160 if (page) { 160 if (page) {
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 5c10ea157425..6aa2aa779a09 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -120,7 +120,6 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
120{ 120{
121 struct sysinfo i; 121 struct sysinfo i;
122 int len; 122 int len;
123 struct page_state ps;
124 unsigned long inactive; 123 unsigned long inactive;
125 unsigned long active; 124 unsigned long active;
126 unsigned long free; 125 unsigned long free;
@@ -129,7 +128,6 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
129 struct vmalloc_info vmi; 128 struct vmalloc_info vmi;
130 long cached; 129 long cached;
131 130
132 get_page_state(&ps);
133 get_zone_counts(&active, &inactive, &free); 131 get_zone_counts(&active, &inactive, &free);
134 132
135/* 133/*
@@ -142,7 +140,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
142 allowed = ((totalram_pages - hugetlb_total_pages()) 140 allowed = ((totalram_pages - hugetlb_total_pages())
143 * sysctl_overcommit_ratio / 100) + total_swap_pages; 141 * sysctl_overcommit_ratio / 100) + total_swap_pages;
144 142
145 cached = get_page_cache_size() - total_swapcache_pages - i.bufferram; 143 cached = global_page_state(NR_FILE_PAGES) -
144 total_swapcache_pages - i.bufferram;
146 if (cached < 0) 145 if (cached < 0)
147 cached = 0; 146 cached = 0;
148 147
@@ -167,11 +166,14 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
167 "SwapFree: %8lu kB\n" 166 "SwapFree: %8lu kB\n"
168 "Dirty: %8lu kB\n" 167 "Dirty: %8lu kB\n"
169 "Writeback: %8lu kB\n" 168 "Writeback: %8lu kB\n"
169 "AnonPages: %8lu kB\n"
170 "Mapped: %8lu kB\n" 170 "Mapped: %8lu kB\n"
171 "Slab: %8lu kB\n" 171 "Slab: %8lu kB\n"
172 "PageTables: %8lu kB\n"
173 "NFS Unstable: %8lu kB\n"
174 "Bounce: %8lu kB\n"
172 "CommitLimit: %8lu kB\n" 175 "CommitLimit: %8lu kB\n"
173 "Committed_AS: %8lu kB\n" 176 "Committed_AS: %8lu kB\n"
174 "PageTables: %8lu kB\n"
175 "VmallocTotal: %8lu kB\n" 177 "VmallocTotal: %8lu kB\n"
176 "VmallocUsed: %8lu kB\n" 178 "VmallocUsed: %8lu kB\n"
177 "VmallocChunk: %8lu kB\n", 179 "VmallocChunk: %8lu kB\n",
@@ -188,13 +190,16 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
188 K(i.freeram-i.freehigh), 190 K(i.freeram-i.freehigh),
189 K(i.totalswap), 191 K(i.totalswap),
190 K(i.freeswap), 192 K(i.freeswap),
191 K(ps.nr_dirty), 193 K(global_page_state(NR_FILE_DIRTY)),
192 K(ps.nr_writeback), 194 K(global_page_state(NR_WRITEBACK)),
193 K(ps.nr_mapped), 195 K(global_page_state(NR_ANON_PAGES)),
194 K(ps.nr_slab), 196 K(global_page_state(NR_FILE_MAPPED)),
197 K(global_page_state(NR_SLAB)),
198 K(global_page_state(NR_PAGETABLE)),
199 K(global_page_state(NR_UNSTABLE_NFS)),
200 K(global_page_state(NR_BOUNCE)),
195 K(allowed), 201 K(allowed),
196 K(committed), 202 K(committed),
197 K(ps.nr_page_table_pages),
198 (unsigned long)VMALLOC_TOTAL >> 10, 203 (unsigned long)VMALLOC_TOTAL >> 10,
199 vmi.used >> 10, 204 vmi.used >> 10,
200 vmi.largest_chunk >> 10 205 vmi.largest_chunk >> 10
diff --git a/include/asm-alpha/socket.h b/include/asm-alpha/socket.h
index b5193229132a..d22ab97ea72e 100644
--- a/include/asm-alpha/socket.h
+++ b/include/asm-alpha/socket.h
@@ -51,6 +51,7 @@
51#define SCM_TIMESTAMP SO_TIMESTAMP 51#define SCM_TIMESTAMP SO_TIMESTAMP
52 52
53#define SO_PEERSEC 30 53#define SO_PEERSEC 30
54#define SO_PASSSEC 34
54 55
55/* Security levels - as per NRL IPv6 - don't actually do anything */ 56/* Security levels - as per NRL IPv6 - don't actually do anything */
56#define SO_SECURITY_AUTHENTICATION 19 57#define SO_SECURITY_AUTHENTICATION 19
diff --git a/include/asm-arm/socket.h b/include/asm-arm/socket.h
index 3c51da6438c9..19f7df702b06 100644
--- a/include/asm-arm/socket.h
+++ b/include/asm-arm/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* _ASM_SOCKET_H */ 53#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-arm26/socket.h b/include/asm-arm26/socket.h
index 3c51da6438c9..19f7df702b06 100644
--- a/include/asm-arm26/socket.h
+++ b/include/asm-arm26/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* _ASM_SOCKET_H */ 53#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-cris/socket.h b/include/asm-cris/socket.h
index 8b1da3e58c55..01cfdf1d6d33 100644
--- a/include/asm-cris/socket.h
+++ b/include/asm-cris/socket.h
@@ -50,6 +50,7 @@
50#define SO_ACCEPTCONN 30 50#define SO_ACCEPTCONN 30
51 51
52#define SO_PEERSEC 31 52#define SO_PEERSEC 31
53#define SO_PASSSEC 34
53 54
54#endif /* _ASM_SOCKET_H */ 55#endif /* _ASM_SOCKET_H */
55 56
diff --git a/include/asm-frv/socket.h b/include/asm-frv/socket.h
index 7177f8b9817c..31db18fc871f 100644
--- a/include/asm-frv/socket.h
+++ b/include/asm-frv/socket.h
@@ -48,6 +48,7 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* _ASM_SOCKET_H */ 53#endif /* _ASM_SOCKET_H */
53 54
diff --git a/include/asm-h8300/socket.h b/include/asm-h8300/socket.h
index d98cf85bafc1..ebc830fee0d0 100644
--- a/include/asm-h8300/socket.h
+++ b/include/asm-h8300/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* _ASM_SOCKET_H */ 53#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-i386/socket.h b/include/asm-i386/socket.h
index 802ae76195b7..5755d57c4e95 100644
--- a/include/asm-i386/socket.h
+++ b/include/asm-i386/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* _ASM_SOCKET_H */ 53#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-ia64/socket.h b/include/asm-ia64/socket.h
index a255006fb7b5..d638ef3d50c3 100644
--- a/include/asm-ia64/socket.h
+++ b/include/asm-ia64/socket.h
@@ -57,5 +57,6 @@
57#define SO_ACCEPTCONN 30 57#define SO_ACCEPTCONN 30
58 58
59#define SO_PEERSEC 31 59#define SO_PEERSEC 31
60#define SO_PASSSEC 34
60 61
61#endif /* _ASM_IA64_SOCKET_H */ 62#endif /* _ASM_IA64_SOCKET_H */
diff --git a/include/asm-m32r/socket.h b/include/asm-m32r/socket.h
index 8b6680f223c0..acdf748fcdc8 100644
--- a/include/asm-m32r/socket.h
+++ b/include/asm-m32r/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* _ASM_M32R_SOCKET_H */ 53#endif /* _ASM_M32R_SOCKET_H */
diff --git a/include/asm-m68k/socket.h b/include/asm-m68k/socket.h
index f578ca4b776a..a5966ec005ae 100644
--- a/include/asm-m68k/socket.h
+++ b/include/asm-m68k/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* _ASM_SOCKET_H */ 53#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-mips/socket.h b/include/asm-mips/socket.h
index 0bb31e5aaca6..36ebe4e186a7 100644
--- a/include/asm-mips/socket.h
+++ b/include/asm-mips/socket.h
@@ -69,6 +69,7 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
69#define SO_PEERSEC 30 69#define SO_PEERSEC 30
70#define SO_SNDBUFFORCE 31 70#define SO_SNDBUFFORCE 31
71#define SO_RCVBUFFORCE 33 71#define SO_RCVBUFFORCE 33
72#define SO_PASSSEC 34
72 73
73#ifdef __KERNEL__ 74#ifdef __KERNEL__
74 75
diff --git a/include/asm-parisc/socket.h b/include/asm-parisc/socket.h
index 1bf54dc53c10..ce2eae1708b5 100644
--- a/include/asm-parisc/socket.h
+++ b/include/asm-parisc/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 0x401c 48#define SO_ACCEPTCONN 0x401c
49 49
50#define SO_PEERSEC 0x401d 50#define SO_PEERSEC 0x401d
51#define SO_PASSSEC 0x401e
51 52
52#endif /* _ASM_SOCKET_H */ 53#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-powerpc/socket.h b/include/asm-powerpc/socket.h
index e4b8177d4acc..c8b1da50e72d 100644
--- a/include/asm-powerpc/socket.h
+++ b/include/asm-powerpc/socket.h
@@ -55,5 +55,6 @@
55#define SO_ACCEPTCONN 30 55#define SO_ACCEPTCONN 30
56 56
57#define SO_PEERSEC 31 57#define SO_PEERSEC 31
58#define SO_PASSSEC 34
58 59
59#endif /* _ASM_POWERPC_SOCKET_H */ 60#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 859b5e969826..24312387fa24 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -657,13 +657,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
657 __pte; \ 657 __pte; \
658}) 658})
659 659
660#define SetPageUptodate(_page) \
661 do { \
662 struct page *__page = (_page); \
663 if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
664 page_test_and_clear_dirty(_page); \
665 } while (0)
666
667#ifdef __s390x__ 660#ifdef __s390x__
668 661
669#define pfn_pmd(pfn, pgprot) \ 662#define pfn_pmd(pfn, pgprot) \
diff --git a/include/asm-s390/socket.h b/include/asm-s390/socket.h
index 15a5298c8744..1778a49a74c5 100644
--- a/include/asm-s390/socket.h
+++ b/include/asm-s390/socket.h
@@ -56,5 +56,6 @@
56#define SO_ACCEPTCONN 30 56#define SO_ACCEPTCONN 30
57 57
58#define SO_PEERSEC 31 58#define SO_PEERSEC 31
59#define SO_PASSSEC 34
59 60
60#endif /* _ASM_SOCKET_H */ 61#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-sh/socket.h b/include/asm-sh/socket.h
index 553904ff9336..ca70362eb563 100644
--- a/include/asm-sh/socket.h
+++ b/include/asm-sh/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* __ASM_SH_SOCKET_H */ 53#endif /* __ASM_SH_SOCKET_H */
diff --git a/include/asm-sparc/of_device.h b/include/asm-sparc/of_device.h
index 4816d102f918..80ea31f6e17f 100644
--- a/include/asm-sparc/of_device.h
+++ b/include/asm-sparc/of_device.h
@@ -4,10 +4,12 @@
4 4
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
7#include <asm/openprom.h>
7#include <asm/prom.h> 8#include <asm/prom.h>
8 9
9extern struct bus_type ebus_bus_type; 10extern struct bus_type ebus_bus_type;
10extern struct bus_type sbus_bus_type; 11extern struct bus_type sbus_bus_type;
12extern struct bus_type of_bus_type;
11 13
12/* 14/*
13 * The of_device is a kind of "base class" that is a superset of 15 * The of_device is a kind of "base class" that is a superset of
@@ -16,11 +18,25 @@ extern struct bus_type sbus_bus_type;
16 */ 18 */
17struct of_device 19struct of_device
18{ 20{
19 struct device_node *node; /* OF device node */ 21 struct device_node *node;
20 struct device dev; /* Generic device interface */ 22 struct device dev;
23 struct resource resource[PROMREG_MAX];
24 unsigned int irqs[PROMINTR_MAX];
25 int num_irqs;
26
27 void *sysdata;
28
29 int slot;
30 int portid;
31 int clock_freq;
21}; 32};
22#define to_of_device(d) container_of(d, struct of_device, dev) 33#define to_of_device(d) container_of(d, struct of_device, dev)
23 34
35extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
36extern void of_iounmap(void __iomem *base, unsigned long size);
37
38extern struct of_device *of_find_device_by_node(struct device_node *);
39
24extern const struct of_device_id *of_match_device( 40extern const struct of_device_id *of_match_device(
25 const struct of_device_id *matches, const struct of_device *dev); 41 const struct of_device_id *matches, const struct of_device *dev);
26 42
diff --git a/include/asm-sparc/prom.h b/include/asm-sparc/prom.h
index f9cf44c07164..86c13dccea3d 100644
--- a/include/asm-sparc/prom.h
+++ b/include/asm-sparc/prom.h
@@ -25,11 +25,6 @@
25typedef u32 phandle; 25typedef u32 phandle;
26typedef u32 ihandle; 26typedef u32 ihandle;
27 27
28struct interrupt_info {
29 int line;
30 int sense; /* +ve/-ve logic, edge or level, etc. */
31};
32
33struct property { 28struct property {
34 char *name; 29 char *name;
35 int length; 30 int length;
@@ -43,9 +38,6 @@ struct device_node {
43 char *name; 38 char *name;
44 char *type; 39 char *type;
45 phandle node; 40 phandle node;
46 phandle linux_phandle;
47 int n_intrs;
48 struct interrupt_info *intrs;
49 char *path_component_name; 41 char *path_component_name;
50 char *full_name; 42 char *full_name;
51 43
@@ -69,6 +61,8 @@ struct device_node {
69#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) 61#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
70#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) 62#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
71 63
64#define OF_BAD_ADDR ((u64)-1)
65
72static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de) 66static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
73{ 67{
74 dn->pde = de; 68 dn->pde = de;
@@ -101,6 +95,8 @@ extern int of_set_property(struct device_node *node, const char *name, void *val
101extern int of_getintprop_default(struct device_node *np, 95extern int of_getintprop_default(struct device_node *np,
102 const char *name, 96 const char *name,
103 int def); 97 int def);
98extern int of_n_addr_cells(struct device_node *np);
99extern int of_n_size_cells(struct device_node *np);
104 100
105extern void prom_build_devicetree(void); 101extern void prom_build_devicetree(void);
106 102
diff --git a/include/asm-sparc/socket.h b/include/asm-sparc/socket.h
index 4e0ce3a35ea9..f6c4e5baf3f7 100644
--- a/include/asm-sparc/socket.h
+++ b/include/asm-sparc/socket.h
@@ -48,6 +48,7 @@
48#define SCM_TIMESTAMP SO_TIMESTAMP 48#define SCM_TIMESTAMP SO_TIMESTAMP
49 49
50#define SO_PEERSEC 0x001e 50#define SO_PEERSEC 0x001e
51#define SO_PASSSEC 0x001f
51 52
52/* Security levels - as per NRL IPv6 - don't actually do anything */ 53/* Security levels - as per NRL IPv6 - don't actually do anything */
53#define SO_SECURITY_AUTHENTICATION 0x5001 54#define SO_SECURITY_AUTHENTICATION 0x5001
diff --git a/include/asm-sparc64/of_device.h b/include/asm-sparc64/of_device.h
index 024088ef9d27..a62c7b997d66 100644
--- a/include/asm-sparc64/of_device.h
+++ b/include/asm-sparc64/of_device.h
@@ -4,11 +4,13 @@
4 4
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/mod_devicetable.h> 6#include <linux/mod_devicetable.h>
7#include <asm/openprom.h>
7#include <asm/prom.h> 8#include <asm/prom.h>
8 9
9extern struct bus_type isa_bus_type; 10extern struct bus_type isa_bus_type;
10extern struct bus_type ebus_bus_type; 11extern struct bus_type ebus_bus_type;
11extern struct bus_type sbus_bus_type; 12extern struct bus_type sbus_bus_type;
13extern struct bus_type of_bus_type;
12 14
13/* 15/*
14 * The of_device is a kind of "base class" that is a superset of 16 * The of_device is a kind of "base class" that is a superset of
@@ -17,11 +19,25 @@ extern struct bus_type sbus_bus_type;
17 */ 19 */
18struct of_device 20struct of_device
19{ 21{
20 struct device_node *node; /* OF device node */ 22 struct device_node *node;
21 struct device dev; /* Generic device interface */ 23 struct device dev;
24 struct resource resource[PROMREG_MAX];
25 unsigned int irqs[PROMINTR_MAX];
26 int num_irqs;
27
28 void *sysdata;
29
30 int slot;
31 int portid;
32 int clock_freq;
22}; 33};
23#define to_of_device(d) container_of(d, struct of_device, dev) 34#define to_of_device(d) container_of(d, struct of_device, dev)
24 35
36extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
37extern void of_iounmap(void __iomem *base, unsigned long size);
38
39extern struct of_device *of_find_device_by_node(struct device_node *);
40
25extern const struct of_device_id *of_match_device( 41extern const struct of_device_id *of_match_device(
26 const struct of_device_id *matches, const struct of_device *dev); 42 const struct of_device_id *matches, const struct of_device *dev);
27 43
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h
index cebe80b1da6c..dcfa7629358c 100644
--- a/include/asm-sparc64/pbm.h
+++ b/include/asm-sparc64/pbm.h
@@ -16,6 +16,7 @@
16#include <asm/page.h> 16#include <asm/page.h>
17#include <asm/oplib.h> 17#include <asm/oplib.h>
18#include <asm/prom.h> 18#include <asm/prom.h>
19#include <asm/of_device.h>
19#include <asm/iommu.h> 20#include <asm/iommu.h>
20 21
21/* The abstraction used here is that there are PCI controllers, 22/* The abstraction used here is that there are PCI controllers,
@@ -209,7 +210,6 @@ struct pci_controller_info {
209 210
210 /* Operations which are controller specific. */ 211 /* Operations which are controller specific. */
211 void (*scan_bus)(struct pci_controller_info *); 212 void (*scan_bus)(struct pci_controller_info *);
212 unsigned int (*irq_build)(struct pci_pbm_info *, struct pci_dev *, unsigned int);
213 void (*base_address_update)(struct pci_dev *, int); 213 void (*base_address_update)(struct pci_dev *, int);
214 void (*resource_adjust)(struct pci_dev *, struct resource *, struct resource *); 214 void (*resource_adjust)(struct pci_dev *, struct resource *, struct resource *);
215 215
@@ -217,8 +217,6 @@ struct pci_controller_info {
217 struct pci_ops *pci_ops; 217 struct pci_ops *pci_ops;
218 unsigned int pci_first_busno; 218 unsigned int pci_first_busno;
219 unsigned int pci_last_busno; 219 unsigned int pci_last_busno;
220
221 void *starfire_cookie;
222}; 220};
223 221
224/* PCI devices which are not bridges have this placed in their pci_dev 222/* PCI devices which are not bridges have this placed in their pci_dev
@@ -228,6 +226,7 @@ struct pci_controller_info {
228struct pcidev_cookie { 226struct pcidev_cookie {
229 struct pci_pbm_info *pbm; 227 struct pci_pbm_info *pbm;
230 struct device_node *prom_node; 228 struct device_node *prom_node;
229 struct of_device *op;
231 struct linux_prom_pci_registers prom_regs[PROMREG_MAX]; 230 struct linux_prom_pci_registers prom_regs[PROMREG_MAX];
232 int num_prom_regs; 231 int num_prom_regs;
233 struct linux_prom_pci_registers prom_assignments[PROMREG_MAX]; 232 struct linux_prom_pci_registers prom_assignments[PROMREG_MAX];
diff --git a/include/asm-sparc64/prom.h b/include/asm-sparc64/prom.h
index 265614d497c4..99671ed6625d 100644
--- a/include/asm-sparc64/prom.h
+++ b/include/asm-sparc64/prom.h
@@ -25,11 +25,6 @@
25typedef u32 phandle; 25typedef u32 phandle;
26typedef u32 ihandle; 26typedef u32 ihandle;
27 27
28struct interrupt_info {
29 int line;
30 int sense; /* +ve/-ve logic, edge or level, etc. */
31};
32
33struct property { 28struct property {
34 char *name; 29 char *name;
35 int length; 30 int length;
@@ -39,13 +34,11 @@ struct property {
39 unsigned int unique_id; 34 unsigned int unique_id;
40}; 35};
41 36
37struct of_irq_controller;
42struct device_node { 38struct device_node {
43 char *name; 39 char *name;
44 char *type; 40 char *type;
45 phandle node; 41 phandle node;
46 phandle linux_phandle;
47 int n_intrs;
48 struct interrupt_info *intrs;
49 char *path_component_name; 42 char *path_component_name;
50 char *full_name; 43 char *full_name;
51 44
@@ -61,6 +54,13 @@ struct device_node {
61 unsigned long _flags; 54 unsigned long _flags;
62 void *data; 55 void *data;
63 unsigned int unique_id; 56 unsigned int unique_id;
57
58 struct of_irq_controller *irq_trans;
59};
60
61struct of_irq_controller {
62 unsigned int (*irq_build)(struct device_node *, unsigned int, void *);
63 void *data;
64}; 64};
65 65
66/* flag descriptions */ 66/* flag descriptions */
@@ -69,6 +69,8 @@ struct device_node {
69#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) 69#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
70#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) 70#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
71 71
72#define OF_BAD_ADDR ((u64)-1)
73
72static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de) 74static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_entry *de)
73{ 75{
74 dn->pde = de; 76 dn->pde = de;
@@ -101,6 +103,8 @@ extern int of_set_property(struct device_node *node, const char *name, void *val
101extern int of_getintprop_default(struct device_node *np, 103extern int of_getintprop_default(struct device_node *np,
102 const char *name, 104 const char *name,
103 int def); 105 int def);
106extern int of_n_addr_cells(struct device_node *np);
107extern int of_n_size_cells(struct device_node *np);
104 108
105extern void prom_build_devicetree(void); 109extern void prom_build_devicetree(void);
106 110
diff --git a/include/asm-sparc64/sbus.h b/include/asm-sparc64/sbus.h
index 56ee985e4605..7efd49d31bb8 100644
--- a/include/asm-sparc64/sbus.h
+++ b/include/asm-sparc64/sbus.h
@@ -80,7 +80,6 @@ struct sbus_bus {
80 int num_sbus_ranges; 80 int num_sbus_ranges;
81 81
82 int portid; 82 int portid;
83 void *starfire_cookie;
84}; 83};
85#define to_sbus(d) container_of(d, struct sbus_bus, ofdev.dev) 84#define to_sbus(d) container_of(d, struct sbus_bus, ofdev.dev)
86 85
diff --git a/include/asm-sparc64/socket.h b/include/asm-sparc64/socket.h
index 59987dad3359..754d46a50af3 100644
--- a/include/asm-sparc64/socket.h
+++ b/include/asm-sparc64/socket.h
@@ -48,6 +48,7 @@
48#define SCM_TIMESTAMP SO_TIMESTAMP 48#define SCM_TIMESTAMP SO_TIMESTAMP
49 49
50#define SO_PEERSEC 0x001e 50#define SO_PEERSEC 0x001e
51#define SO_PASSSEC 0x001f
51 52
52/* Security levels - as per NRL IPv6 - don't actually do anything */ 53/* Security levels - as per NRL IPv6 - don't actually do anything */
53#define SO_SECURITY_AUTHENTICATION 0x5001 54#define SO_SECURITY_AUTHENTICATION 0x5001
diff --git a/include/asm-sparc64/starfire.h b/include/asm-sparc64/starfire.h
index b606cb2b32a8..48b50b5e35b0 100644
--- a/include/asm-sparc64/starfire.h
+++ b/include/asm-sparc64/starfire.h
@@ -14,7 +14,7 @@ extern int this_is_starfire;
14extern void check_if_starfire(void); 14extern void check_if_starfire(void);
15extern void starfire_cpu_setup(void); 15extern void starfire_cpu_setup(void);
16extern int starfire_hard_smp_processor_id(void); 16extern int starfire_hard_smp_processor_id(void);
17extern void *starfire_hookup(int); 17extern void starfire_hookup(int);
18extern unsigned int starfire_translate(unsigned long imap, unsigned int upaid); 18extern unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
19 19
20#endif 20#endif
diff --git a/include/asm-um/io.h b/include/asm-um/io.h
index 1934d9340e2c..44e8b8c772ae 100644
--- a/include/asm-um/io.h
+++ b/include/asm-um/io.h
@@ -45,8 +45,13 @@ static inline void writel(unsigned int b, volatile void __iomem *addr)
45{ 45{
46 *(volatile unsigned int __force *) addr = b; 46 *(volatile unsigned int __force *) addr = b;
47} 47}
48static inline void writeq(unsigned int b, volatile void __iomem *addr)
49{
50 *(volatile unsigned long long __force *) addr = b;
51}
48#define __raw_writeb writeb 52#define __raw_writeb writeb
49#define __raw_writew writew 53#define __raw_writew writew
50#define __raw_writel writel 54#define __raw_writel writel
55#define __raw_writeq writeq
51 56
52#endif 57#endif
diff --git a/include/asm-v850/socket.h b/include/asm-v850/socket.h
index 0240d366a0a4..0dfe55ac2ef2 100644
--- a/include/asm-v850/socket.h
+++ b/include/asm-v850/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* __V850_SOCKET_H__ */ 53#endif /* __V850_SOCKET_H__ */
diff --git a/include/asm-x86_64/socket.h b/include/asm-x86_64/socket.h
index f2cdbeae5d5b..b46702607933 100644
--- a/include/asm-x86_64/socket.h
+++ b/include/asm-x86_64/socket.h
@@ -48,5 +48,6 @@
48#define SO_ACCEPTCONN 30 48#define SO_ACCEPTCONN 30
49 49
50#define SO_PEERSEC 31 50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
51 52
52#endif /* _ASM_SOCKET_H */ 53#endif /* _ASM_SOCKET_H */
diff --git a/include/asm-xtensa/socket.h b/include/asm-xtensa/socket.h
index 00f83f3a6d72..971d231be60e 100644
--- a/include/asm-xtensa/socket.h
+++ b/include/asm-xtensa/socket.h
@@ -59,5 +59,6 @@
59 59
60#define SO_ACCEPTCONN 30 60#define SO_ACCEPTCONN 30
61#define SO_PEERSEC 31 61#define SO_PEERSEC 31
62#define SO_PASSSEC 34
62 63
63#endif /* _XTENSA_SOCKET_H */ 64#endif /* _XTENSA_SOCKET_H */
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 1eb238affb12..41788a31c438 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -7,6 +7,7 @@
7#define LINUX_ATMDEV_H 7#define LINUX_ATMDEV_H
8 8
9 9
10#include <linux/device.h>
10#include <linux/atmapi.h> 11#include <linux/atmapi.h>
11#include <linux/atm.h> 12#include <linux/atm.h>
12#include <linux/atmioc.h> 13#include <linux/atmioc.h>
@@ -358,6 +359,7 @@ struct atm_dev {
358 struct proc_dir_entry *proc_entry; /* proc entry */ 359 struct proc_dir_entry *proc_entry; /* proc entry */
359 char *proc_name; /* proc entry name */ 360 char *proc_name; /* proc entry name */
360#endif 361#endif
362 struct class_device class_dev; /* sysfs class device */
361 struct list_head dev_list; /* linkage */ 363 struct list_head dev_list; /* linkage */
362}; 364};
363 365
@@ -459,7 +461,7 @@ static inline void atm_dev_put(struct atm_dev *dev)
459 BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); 461 BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags));
460 if (dev->ops->dev_close) 462 if (dev->ops->dev_close)
461 dev->ops->dev_close(dev); 463 dev->ops->dev_close(dev);
462 kfree(dev); 464 class_device_put(&dev->class_dev);
463 } 465 }
464} 466}
465 467
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c41a1299b8cf..75179529e399 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -36,7 +36,6 @@ extern int sysctl_legacy_va_layout;
36#include <asm/page.h> 36#include <asm/page.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/atomic.h>
40 39
41#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 40#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
42 41
@@ -515,6 +514,11 @@ static inline void set_page_links(struct page *page, unsigned long zone,
515 set_page_section(page, pfn_to_section_nr(pfn)); 514 set_page_section(page, pfn_to_section_nr(pfn));
516} 515}
517 516
517/*
518 * Some inline functions in vmstat.h depend on page_zone()
519 */
520#include <linux/vmstat.h>
521
518#ifndef CONFIG_DISCONTIGMEM 522#ifndef CONFIG_DISCONTIGMEM
519/* The array of struct pages - for discontigmem use pgdat->lmem_map */ 523/* The array of struct pages - for discontigmem use pgdat->lmem_map */
520extern struct page *mem_map; 524extern struct page *mem_map;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d6120fa69116..27e748eb72b0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -46,6 +46,27 @@ struct zone_padding {
46#define ZONE_PADDING(name) 46#define ZONE_PADDING(name)
47#endif 47#endif
48 48
49enum zone_stat_item {
50 NR_ANON_PAGES, /* Mapped anonymous pages */
51 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
52 only modified from process context */
53 NR_FILE_PAGES,
54 NR_SLAB, /* Pages used by slab allocator */
55 NR_PAGETABLE, /* used for pagetables */
56 NR_FILE_DIRTY,
57 NR_WRITEBACK,
58 NR_UNSTABLE_NFS, /* NFS unstable pages */
59 NR_BOUNCE,
60#ifdef CONFIG_NUMA
61 NUMA_HIT, /* allocated in intended node */
62 NUMA_MISS, /* allocated in non intended node */
63 NUMA_FOREIGN, /* was intended here, hit elsewhere */
64 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
65 NUMA_LOCAL, /* allocation from local node */
66 NUMA_OTHER, /* allocation from other node */
67#endif
68 NR_VM_ZONE_STAT_ITEMS };
69
49struct per_cpu_pages { 70struct per_cpu_pages {
50 int count; /* number of pages in the list */ 71 int count; /* number of pages in the list */
51 int high; /* high watermark, emptying needed */ 72 int high; /* high watermark, emptying needed */
@@ -55,13 +76,8 @@ struct per_cpu_pages {
55 76
56struct per_cpu_pageset { 77struct per_cpu_pageset {
57 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 78 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
58#ifdef CONFIG_NUMA 79#ifdef CONFIG_SMP
59 unsigned long numa_hit; /* allocated in intended node */ 80 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
60 unsigned long numa_miss; /* allocated in non intended node */
61 unsigned long numa_foreign; /* was intended here, hit elsewhere */
62 unsigned long interleave_hit; /* interleaver prefered this zone */
63 unsigned long local_node; /* allocation from local node */
64 unsigned long other_node; /* allocation from other node */
65#endif 81#endif
66} ____cacheline_aligned_in_smp; 82} ____cacheline_aligned_in_smp;
67 83
@@ -165,12 +181,8 @@ struct zone {
165 /* A count of how many reclaimers are scanning this zone */ 181 /* A count of how many reclaimers are scanning this zone */
166 atomic_t reclaim_in_progress; 182 atomic_t reclaim_in_progress;
167 183
168 /* 184 /* Zone statistics */
169 * timestamp (in jiffies) of the last zone reclaim that did not 185 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
170 * result in freeing of pages. This is used to avoid repeated scans
171 * if all memory in the zone is in use.
172 */
173 unsigned long last_unsuccessful_zone_reclaim;
174 186
175 /* 187 /*
176 * prev_priority holds the scanning priority for this zone. It is 188 * prev_priority holds the scanning priority for this zone. It is
diff --git a/include/linux/net.h b/include/linux/net.h
index 385e68f5bd93..b20c53c74413 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -61,6 +61,7 @@ typedef enum {
61#define SOCK_ASYNC_WAITDATA 1 61#define SOCK_ASYNC_WAITDATA 1
62#define SOCK_NOSPACE 2 62#define SOCK_NOSPACE 2
63#define SOCK_PASSCRED 3 63#define SOCK_PASSCRED 3
64#define SOCK_PASSSEC 4
64 65
65#ifndef ARCH_HAS_SOCKET_TYPES 66#ifndef ARCH_HAS_SOCKET_TYPES
66/** 67/**
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 03cd7551a7a1..aa2d3c12c4d8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -315,6 +315,8 @@ struct net_device
315#define NETIF_F_GSO_SHIFT 16 315#define NETIF_F_GSO_SHIFT 16
316#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) 316#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
317#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) 317#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT)
318#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
319#define NETIF_F_TSO_ECN (SKB_GSO_TCPV4_ECN << NETIF_F_GSO_SHIFT)
318 320
319#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 321#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
320#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) 322#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
@@ -543,7 +545,8 @@ struct packet_type {
543 struct net_device *, 545 struct net_device *,
544 struct packet_type *, 546 struct packet_type *,
545 struct net_device *); 547 struct net_device *);
546 struct sk_buff *(*gso_segment)(struct sk_buff *skb, int sg); 548 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
549 int features);
547 void *af_packet_priv; 550 void *af_packet_priv;
548 struct list_head list; 551 struct list_head list;
549}; 552};
@@ -968,7 +971,7 @@ extern int netdev_max_backlog;
968extern int weight_p; 971extern int weight_p;
969extern int netdev_set_master(struct net_device *dev, struct net_device *master); 972extern int netdev_set_master(struct net_device *dev, struct net_device *master);
970extern int skb_checksum_help(struct sk_buff *skb, int inward); 973extern int skb_checksum_help(struct sk_buff *skb, int inward);
971extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg); 974extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
972#ifdef CONFIG_BUG 975#ifdef CONFIG_BUG
973extern void netdev_rx_csum_fault(struct net_device *dev); 976extern void netdev_rx_csum_fault(struct net_device *dev);
974#else 977#else
@@ -988,11 +991,16 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
988 991
989extern void linkwatch_run_queue(void); 992extern void linkwatch_run_queue(void);
990 993
994static inline int skb_gso_ok(struct sk_buff *skb, int features)
995{
996 int feature = skb_shinfo(skb)->gso_size ?
997 skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0;
998 return (features & feature) == feature;
999}
1000
991static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 1001static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
992{ 1002{
993 int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT; 1003 return !skb_gso_ok(skb, dev->features);
994 return skb_shinfo(skb)->gso_size &&
995 (dev->features & feature) != feature;
996} 1004}
997 1005
998#endif /* __KERNEL__ */ 1006#endif /* __KERNEL__ */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 0c076d58c676..5748642e9f36 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -5,12 +5,8 @@
5#ifndef PAGE_FLAGS_H 5#ifndef PAGE_FLAGS_H
6#define PAGE_FLAGS_H 6#define PAGE_FLAGS_H
7 7
8#include <linux/percpu.h>
9#include <linux/cache.h>
10#include <linux/types.h> 8#include <linux/types.h>
11 9
12#include <asm/pgtable.h>
13
14/* 10/*
15 * Various page->flags bits: 11 * Various page->flags bits:
16 * 12 *
@@ -103,134 +99,6 @@
103#endif 99#endif
104 100
105/* 101/*
106 * Global page accounting. One instance per CPU. Only unsigned longs are
107 * allowed.
108 *
109 * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
110 * any time safely (which protects the instance from modification by
111 * interrupt.
112 * - The __xxx_page_state variants can be used safely when interrupts are
113 * disabled.
114 * - The __xxx_page_state variants can be used if the field is only
115 * modified from process context and protected from preemption, or only
116 * modified from interrupt context. In this case, the field should be
117 * commented here.
118 */
119struct page_state {
120 unsigned long nr_dirty; /* Dirty writeable pages */
121 unsigned long nr_writeback; /* Pages under writeback */
122 unsigned long nr_unstable; /* NFS unstable pages */
123 unsigned long nr_page_table_pages;/* Pages used for pagetables */
124 unsigned long nr_mapped; /* mapped into pagetables.
125 * only modified from process context */
126 unsigned long nr_slab; /* In slab */
127#define GET_PAGE_STATE_LAST nr_slab
128
129 /*
130 * The below are zeroed by get_page_state(). Use get_full_page_state()
131 * to add up all these.
132 */
133 unsigned long pgpgin; /* Disk reads */
134 unsigned long pgpgout; /* Disk writes */
135 unsigned long pswpin; /* swap reads */
136 unsigned long pswpout; /* swap writes */
137
138 unsigned long pgalloc_high; /* page allocations */
139 unsigned long pgalloc_normal;
140 unsigned long pgalloc_dma32;
141 unsigned long pgalloc_dma;
142
143 unsigned long pgfree; /* page freeings */
144 unsigned long pgactivate; /* pages moved inactive->active */
145 unsigned long pgdeactivate; /* pages moved active->inactive */
146
147 unsigned long pgfault; /* faults (major+minor) */
148 unsigned long pgmajfault; /* faults (major only) */
149
150 unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
151 unsigned long pgrefill_normal;
152 unsigned long pgrefill_dma32;
153 unsigned long pgrefill_dma;
154
155 unsigned long pgsteal_high; /* total highmem pages reclaimed */
156 unsigned long pgsteal_normal;
157 unsigned long pgsteal_dma32;
158 unsigned long pgsteal_dma;
159
160 unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
161 unsigned long pgscan_kswapd_normal;
162 unsigned long pgscan_kswapd_dma32;
163 unsigned long pgscan_kswapd_dma;
164
165 unsigned long pgscan_direct_high;/* total highmem pages scanned */
166 unsigned long pgscan_direct_normal;
167 unsigned long pgscan_direct_dma32;
168 unsigned long pgscan_direct_dma;
169
170 unsigned long pginodesteal; /* pages reclaimed via inode freeing */
171 unsigned long slabs_scanned; /* slab objects scanned */
172 unsigned long kswapd_steal; /* pages reclaimed by kswapd */
173 unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
174 unsigned long pageoutrun; /* kswapd's calls to page reclaim */
175 unsigned long allocstall; /* direct reclaim calls */
176
177 unsigned long pgrotated; /* pages rotated to tail of the LRU */
178 unsigned long nr_bounce; /* pages for bounce buffers */
179};
180
181extern void get_page_state(struct page_state *ret);
182extern void get_page_state_node(struct page_state *ret, int node);
183extern void get_full_page_state(struct page_state *ret);
184extern unsigned long read_page_state_offset(unsigned long offset);
185extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
186extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
187
188#define read_page_state(member) \
189 read_page_state_offset(offsetof(struct page_state, member))
190
191#define mod_page_state(member, delta) \
192 mod_page_state_offset(offsetof(struct page_state, member), (delta))
193
194#define __mod_page_state(member, delta) \
195 __mod_page_state_offset(offsetof(struct page_state, member), (delta))
196
197#define inc_page_state(member) mod_page_state(member, 1UL)
198#define dec_page_state(member) mod_page_state(member, 0UL - 1)
199#define add_page_state(member,delta) mod_page_state(member, (delta))
200#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
201
202#define __inc_page_state(member) __mod_page_state(member, 1UL)
203#define __dec_page_state(member) __mod_page_state(member, 0UL - 1)
204#define __add_page_state(member,delta) __mod_page_state(member, (delta))
205#define __sub_page_state(member,delta) __mod_page_state(member, 0UL - (delta))
206
207#define page_state(member) (*__page_state(offsetof(struct page_state, member)))
208
209#define state_zone_offset(zone, member) \
210({ \
211 unsigned offset; \
212 if (is_highmem(zone)) \
213 offset = offsetof(struct page_state, member##_high); \
214 else if (is_normal(zone)) \
215 offset = offsetof(struct page_state, member##_normal); \
216 else if (is_dma32(zone)) \
217 offset = offsetof(struct page_state, member##_dma32); \
218 else \
219 offset = offsetof(struct page_state, member##_dma); \
220 offset; \
221})
222
223#define __mod_page_state_zone(zone, member, delta) \
224 do { \
225 __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
226 } while (0)
227
228#define mod_page_state_zone(zone, member, delta) \
229 do { \
230 mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
231 } while (0)
232
233/*
234 * Manipulation of page state flags 102 * Manipulation of page state flags
235 */ 103 */
236#define PageLocked(page) \ 104#define PageLocked(page) \
@@ -254,7 +122,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
254#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags) 122#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
255 123
256#define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags) 124#define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags)
257#ifndef SetPageUptodate 125#ifdef CONFIG_S390
126#define SetPageUptodate(_page) \
127 do { \
128 struct page *__page = (_page); \
129 if (!test_and_set_bit(PG_uptodate, &__page->flags)) \
130 page_test_and_clear_dirty(_page); \
131 } while (0)
132#else
258#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags) 133#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
259#endif 134#endif
260#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags) 135#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
@@ -306,7 +181,7 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
306 do { \ 181 do { \
307 if (!test_and_set_bit(PG_writeback, \ 182 if (!test_and_set_bit(PG_writeback, \
308 &(page)->flags)) \ 183 &(page)->flags)) \
309 inc_page_state(nr_writeback); \ 184 inc_zone_page_state(page, NR_WRITEBACK); \
310 } while (0) 185 } while (0)
311#define TestSetPageWriteback(page) \ 186#define TestSetPageWriteback(page) \
312 ({ \ 187 ({ \
@@ -314,14 +189,14 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
314 ret = test_and_set_bit(PG_writeback, \ 189 ret = test_and_set_bit(PG_writeback, \
315 &(page)->flags); \ 190 &(page)->flags); \
316 if (!ret) \ 191 if (!ret) \
317 inc_page_state(nr_writeback); \ 192 inc_zone_page_state(page, NR_WRITEBACK); \
318 ret; \ 193 ret; \
319 }) 194 })
320#define ClearPageWriteback(page) \ 195#define ClearPageWriteback(page) \
321 do { \ 196 do { \
322 if (test_and_clear_bit(PG_writeback, \ 197 if (test_and_clear_bit(PG_writeback, \
323 &(page)->flags)) \ 198 &(page)->flags)) \
324 dec_page_state(nr_writeback); \ 199 dec_zone_page_state(page, NR_WRITEBACK); \
325 } while (0) 200 } while (0)
326#define TestClearPageWriteback(page) \ 201#define TestClearPageWriteback(page) \
327 ({ \ 202 ({ \
@@ -329,7 +204,7 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
329 ret = test_and_clear_bit(PG_writeback, \ 204 ret = test_and_clear_bit(PG_writeback, \
330 &(page)->flags); \ 205 &(page)->flags); \
331 if (ret) \ 206 if (ret) \
332 dec_page_state(nr_writeback); \ 207 dec_zone_page_state(page, NR_WRITEBACK); \
333 ret; \ 208 ret; \
334 }) 209 })
335 210
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1245df7141aa..0a2f5d27f60e 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -113,51 +113,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
113extern void remove_from_page_cache(struct page *page); 113extern void remove_from_page_cache(struct page *page);
114extern void __remove_from_page_cache(struct page *page); 114extern void __remove_from_page_cache(struct page *page);
115 115
116extern atomic_t nr_pagecache;
117
118#ifdef CONFIG_SMP
119
120#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
121DECLARE_PER_CPU(long, nr_pagecache_local);
122
123/*
124 * pagecache_acct implements approximate accounting for pagecache.
125 * vm_enough_memory() do not need high accuracy. Writers will keep
126 * an offset in their per-cpu arena and will spill that into the
127 * global count whenever the absolute value of the local count
128 * exceeds the counter's threshold.
129 *
130 * MUST be protected from preemption.
131 * current protection is mapping->page_lock.
132 */
133static inline void pagecache_acct(int count)
134{
135 long *local;
136
137 local = &__get_cpu_var(nr_pagecache_local);
138 *local += count;
139 if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
140 atomic_add(*local, &nr_pagecache);
141 *local = 0;
142 }
143}
144
145#else
146
147static inline void pagecache_acct(int count)
148{
149 atomic_add(count, &nr_pagecache);
150}
151#endif
152
153static inline unsigned long get_page_cache_size(void)
154{
155 int ret = atomic_read(&nr_pagecache);
156 if (unlikely(ret < 0))
157 ret = 0;
158 return ret;
159}
160
161/* 116/*
162 * Return byte-offset into filesystem object for page. 117 * Return byte-offset into filesystem object for page.
163 */ 118 */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 48dfe00070c7..b4ca73d65891 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -163,14 +163,22 @@ extern int rcu_needs_cpu(int cpu);
163 * 163 *
164 * It is illegal to block while in an RCU read-side critical section. 164 * It is illegal to block while in an RCU read-side critical section.
165 */ 165 */
166#define rcu_read_lock() preempt_disable() 166#define rcu_read_lock() \
167 do { \
168 preempt_disable(); \
169 __acquire(RCU); \
170 } while(0)
167 171
168/** 172/**
169 * rcu_read_unlock - marks the end of an RCU read-side critical section. 173 * rcu_read_unlock - marks the end of an RCU read-side critical section.
170 * 174 *
171 * See rcu_read_lock() for more information. 175 * See rcu_read_lock() for more information.
172 */ 176 */
173#define rcu_read_unlock() preempt_enable() 177#define rcu_read_unlock() \
178 do { \
179 __release(RCU); \
180 preempt_enable(); \
181 } while(0)
174 182
175/* 183/*
176 * So where is rcu_write_lock()? It does not exist, as there is no 184 * So where is rcu_write_lock()? It does not exist, as there is no
@@ -193,14 +201,22 @@ extern int rcu_needs_cpu(int cpu);
193 * can use just rcu_read_lock(). 201 * can use just rcu_read_lock().
194 * 202 *
195 */ 203 */
196#define rcu_read_lock_bh() local_bh_disable() 204#define rcu_read_lock_bh() \
205 do { \
206 local_bh_disable(); \
207 __acquire(RCU_BH); \
208 } while(0)
197 209
198/* 210/*
199 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 211 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
200 * 212 *
201 * See rcu_read_lock_bh() for more information. 213 * See rcu_read_lock_bh() for more information.
202 */ 214 */
203#define rcu_read_unlock_bh() local_bh_enable() 215#define rcu_read_unlock_bh() \
216 do { \
217 __release(RCU_BH); \
218 local_bh_enable(); \
219 } while(0)
204 220
205/** 221/**
206 * rcu_dereference - fetch an RCU-protected pointer in an 222 * rcu_dereference - fetch an RCU-protected pointer in an
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 36e2bf4b4315..5371e4e74595 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -34,8 +34,8 @@ struct rtc_time {
34 * alarm API. 34 * alarm API.
35 */ 35 */
36struct rtc_wkalrm { 36struct rtc_wkalrm {
37 unsigned char enabled; /* 0 = alarm disable, 1 = alarm disabled */ 37 unsigned char enabled; /* 0 = alarm disabled, 1 = alarm enabled */
38 unsigned char pending; /* 0 = alarm pending, 1 = alarm not pending */ 38 unsigned char pending; /* 0 = alarm not pending, 1 = alarm pending */
39 struct rtc_time time; /* time the alarm is set to */ 39 struct rtc_time time; /* time the alarm is set to */
40}; 40};
41 41
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 821f0481ebe1..aaf723308ed4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1153,7 +1153,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *);
1153extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); 1153extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
1154extern int kill_pg_info(int, struct siginfo *, pid_t); 1154extern int kill_pg_info(int, struct siginfo *, pid_t);
1155extern int kill_proc_info(int, struct siginfo *, pid_t); 1155extern int kill_proc_info(int, struct siginfo *, pid_t);
1156extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t); 1156extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32);
1157extern void do_notify_parent(struct task_struct *, int); 1157extern void do_notify_parent(struct task_struct *, int);
1158extern void force_sig(int, struct task_struct *); 1158extern void force_sig(int, struct task_struct *);
1159extern void force_sig_specific(int, struct task_struct *); 1159extern void force_sig_specific(int, struct task_struct *);
diff --git a/include/linux/security.h b/include/linux/security.h
index 51805806f974..f75303831d09 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -67,7 +67,7 @@ struct xfrm_state;
67struct xfrm_user_sec_ctx; 67struct xfrm_user_sec_ctx;
68 68
69extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); 69extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
70extern int cap_netlink_recv(struct sk_buff *skb); 70extern int cap_netlink_recv(struct sk_buff *skb, int cap);
71 71
72/* 72/*
73 * Values used in the task_security_ops calls 73 * Values used in the task_security_ops calls
@@ -567,6 +567,9 @@ struct swap_info_struct;
567 * @p. 567 * @p.
568 * @p contains the task_struct for the process. 568 * @p contains the task_struct for the process.
569 * Return 0 if permission is granted. 569 * Return 0 if permission is granted.
570 * @task_getsecid:
571 * Retrieve the security identifier of the process @p.
572 * @p contains the task_struct for the process and place is into @secid.
570 * @task_setgroups: 573 * @task_setgroups:
571 * Check permission before setting the supplementary group set of the 574 * Check permission before setting the supplementary group set of the
572 * current process. 575 * current process.
@@ -582,6 +585,10 @@ struct swap_info_struct;
582 * @p contains the task_struct of process. 585 * @p contains the task_struct of process.
583 * @ioprio contains the new ioprio value 586 * @ioprio contains the new ioprio value
584 * Return 0 if permission is granted. 587 * Return 0 if permission is granted.
588 * @task_getioprio
589 * Check permission before getting the ioprio value of @p.
590 * @p contains the task_struct of process.
591 * Return 0 if permission is granted.
585 * @task_setrlimit: 592 * @task_setrlimit:
586 * Check permission before setting the resource limits of the current 593 * Check permission before setting the resource limits of the current
587 * process for @resource to @new_rlim. The old resource limit values can 594 * process for @resource to @new_rlim. The old resource limit values can
@@ -615,6 +622,7 @@ struct swap_info_struct;
615 * @p contains the task_struct for process. 622 * @p contains the task_struct for process.
616 * @info contains the signal information. 623 * @info contains the signal information.
617 * @sig contains the signal value. 624 * @sig contains the signal value.
625 * @secid contains the sid of the process where the signal originated
618 * Return 0 if permission is granted. 626 * Return 0 if permission is granted.
619 * @task_wait: 627 * @task_wait:
620 * Check permission before allowing a process to reap a child process @p 628 * Check permission before allowing a process to reap a child process @p
@@ -656,6 +664,7 @@ struct swap_info_struct;
656 * Check permission before processing the received netlink message in 664 * Check permission before processing the received netlink message in
657 * @skb. 665 * @skb.
658 * @skb contains the sk_buff structure for the netlink message. 666 * @skb contains the sk_buff structure for the netlink message.
667 * @cap indicates the capability required
659 * Return 0 if permission is granted. 668 * Return 0 if permission is granted.
660 * 669 *
661 * Security hooks for Unix domain networking. 670 * Security hooks for Unix domain networking.
@@ -1218,16 +1227,18 @@ struct security_operations {
1218 int (*task_setpgid) (struct task_struct * p, pid_t pgid); 1227 int (*task_setpgid) (struct task_struct * p, pid_t pgid);
1219 int (*task_getpgid) (struct task_struct * p); 1228 int (*task_getpgid) (struct task_struct * p);
1220 int (*task_getsid) (struct task_struct * p); 1229 int (*task_getsid) (struct task_struct * p);
1230 void (*task_getsecid) (struct task_struct * p, u32 * secid);
1221 int (*task_setgroups) (struct group_info *group_info); 1231 int (*task_setgroups) (struct group_info *group_info);
1222 int (*task_setnice) (struct task_struct * p, int nice); 1232 int (*task_setnice) (struct task_struct * p, int nice);
1223 int (*task_setioprio) (struct task_struct * p, int ioprio); 1233 int (*task_setioprio) (struct task_struct * p, int ioprio);
1234 int (*task_getioprio) (struct task_struct * p);
1224 int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim); 1235 int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim);
1225 int (*task_setscheduler) (struct task_struct * p, int policy, 1236 int (*task_setscheduler) (struct task_struct * p, int policy,
1226 struct sched_param * lp); 1237 struct sched_param * lp);
1227 int (*task_getscheduler) (struct task_struct * p); 1238 int (*task_getscheduler) (struct task_struct * p);
1228 int (*task_movememory) (struct task_struct * p); 1239 int (*task_movememory) (struct task_struct * p);
1229 int (*task_kill) (struct task_struct * p, 1240 int (*task_kill) (struct task_struct * p,
1230 struct siginfo * info, int sig); 1241 struct siginfo * info, int sig, u32 secid);
1231 int (*task_wait) (struct task_struct * p); 1242 int (*task_wait) (struct task_struct * p);
1232 int (*task_prctl) (int option, unsigned long arg2, 1243 int (*task_prctl) (int option, unsigned long arg2,
1233 unsigned long arg3, unsigned long arg4, 1244 unsigned long arg3, unsigned long arg4,
@@ -1266,7 +1277,7 @@ struct security_operations {
1266 struct sembuf * sops, unsigned nsops, int alter); 1277 struct sembuf * sops, unsigned nsops, int alter);
1267 1278
1268 int (*netlink_send) (struct sock * sk, struct sk_buff * skb); 1279 int (*netlink_send) (struct sock * sk, struct sk_buff * skb);
1269 int (*netlink_recv) (struct sk_buff * skb); 1280 int (*netlink_recv) (struct sk_buff * skb, int cap);
1270 1281
1271 /* allow module stacking */ 1282 /* allow module stacking */
1272 int (*register_security) (const char *name, 1283 int (*register_security) (const char *name,
@@ -1838,6 +1849,11 @@ static inline int security_task_getsid (struct task_struct *p)
1838 return security_ops->task_getsid (p); 1849 return security_ops->task_getsid (p);
1839} 1850}
1840 1851
1852static inline void security_task_getsecid (struct task_struct *p, u32 *secid)
1853{
1854 security_ops->task_getsecid (p, secid);
1855}
1856
1841static inline int security_task_setgroups (struct group_info *group_info) 1857static inline int security_task_setgroups (struct group_info *group_info)
1842{ 1858{
1843 return security_ops->task_setgroups (group_info); 1859 return security_ops->task_setgroups (group_info);
@@ -1853,6 +1869,11 @@ static inline int security_task_setioprio (struct task_struct *p, int ioprio)
1853 return security_ops->task_setioprio (p, ioprio); 1869 return security_ops->task_setioprio (p, ioprio);
1854} 1870}
1855 1871
1872static inline int security_task_getioprio (struct task_struct *p)
1873{
1874 return security_ops->task_getioprio (p);
1875}
1876
1856static inline int security_task_setrlimit (unsigned int resource, 1877static inline int security_task_setrlimit (unsigned int resource,
1857 struct rlimit *new_rlim) 1878 struct rlimit *new_rlim)
1858{ 1879{
@@ -1877,9 +1898,10 @@ static inline int security_task_movememory (struct task_struct *p)
1877} 1898}
1878 1899
1879static inline int security_task_kill (struct task_struct *p, 1900static inline int security_task_kill (struct task_struct *p,
1880 struct siginfo *info, int sig) 1901 struct siginfo *info, int sig,
1902 u32 secid)
1881{ 1903{
1882 return security_ops->task_kill (p, info, sig); 1904 return security_ops->task_kill (p, info, sig, secid);
1883} 1905}
1884 1906
1885static inline int security_task_wait (struct task_struct *p) 1907static inline int security_task_wait (struct task_struct *p)
@@ -2032,9 +2054,9 @@ static inline int security_netlink_send(struct sock *sk, struct sk_buff * skb)
2032 return security_ops->netlink_send(sk, skb); 2054 return security_ops->netlink_send(sk, skb);
2033} 2055}
2034 2056
2035static inline int security_netlink_recv(struct sk_buff * skb) 2057static inline int security_netlink_recv(struct sk_buff * skb, int cap)
2036{ 2058{
2037 return security_ops->netlink_recv(skb); 2059 return security_ops->netlink_recv(skb, cap);
2038} 2060}
2039 2061
2040/* prototypes */ 2062/* prototypes */
@@ -2490,6 +2512,9 @@ static inline int security_task_getsid (struct task_struct *p)
2490 return 0; 2512 return 0;
2491} 2513}
2492 2514
2515static inline void security_task_getsecid (struct task_struct *p, u32 *secid)
2516{ }
2517
2493static inline int security_task_setgroups (struct group_info *group_info) 2518static inline int security_task_setgroups (struct group_info *group_info)
2494{ 2519{
2495 return 0; 2520 return 0;
@@ -2505,6 +2530,11 @@ static inline int security_task_setioprio (struct task_struct *p, int ioprio)
2505 return 0; 2530 return 0;
2506} 2531}
2507 2532
2533static inline int security_task_getioprio (struct task_struct *p)
2534{
2535 return 0;
2536}
2537
2508static inline int security_task_setrlimit (unsigned int resource, 2538static inline int security_task_setrlimit (unsigned int resource,
2509 struct rlimit *new_rlim) 2539 struct rlimit *new_rlim)
2510{ 2540{
@@ -2529,7 +2559,8 @@ static inline int security_task_movememory (struct task_struct *p)
2529} 2559}
2530 2560
2531static inline int security_task_kill (struct task_struct *p, 2561static inline int security_task_kill (struct task_struct *p,
2532 struct siginfo *info, int sig) 2562 struct siginfo *info, int sig,
2563 u32 secid)
2533{ 2564{
2534 return 0; 2565 return 0;
2535} 2566}
@@ -2670,9 +2701,9 @@ static inline int security_netlink_send (struct sock *sk, struct sk_buff *skb)
2670 return cap_netlink_send (sk, skb); 2701 return cap_netlink_send (sk, skb);
2671} 2702}
2672 2703
2673static inline int security_netlink_recv (struct sk_buff *skb) 2704static inline int security_netlink_recv (struct sk_buff *skb, int cap)
2674{ 2705{
2675 return cap_netlink_recv (skb); 2706 return cap_netlink_recv (skb, cap);
2676} 2707}
2677 2708
2678static inline struct dentry *securityfs_create_dir(const char *name, 2709static inline struct dentry *securityfs_create_dir(const char *name,
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 16eef03ce0eb..59918be91d0a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -172,6 +172,12 @@ enum {
172enum { 172enum {
173 SKB_GSO_TCPV4 = 1 << 0, 173 SKB_GSO_TCPV4 = 1 << 0,
174 SKB_GSO_UDPV4 = 1 << 1, 174 SKB_GSO_UDPV4 = 1 << 1,
175
176 /* This indicates the skb is from an untrusted source. */
177 SKB_GSO_DODGY = 1 << 2,
178
179 /* This indicates the tcp segment has CWR set. */
180 SKB_GSO_TCPV4_ECN = 1 << 3,
175}; 181};
176 182
177/** 183/**
@@ -1298,8 +1304,7 @@ extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1298extern void skb_split(struct sk_buff *skb, 1304extern void skb_split(struct sk_buff *skb,
1299 struct sk_buff *skb1, const u32 len); 1305 struct sk_buff *skb1, const u32 len);
1300 1306
1301extern void skb_release_data(struct sk_buff *skb); 1307extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1302extern struct sk_buff *skb_segment(struct sk_buff *skb, int sg);
1303 1308
1304static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1309static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1305 int len, void *buffer) 1310 int len, void *buffer)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c93c3fe4308c..837e8bce1349 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -125,4 +125,6 @@ static inline void smp_send_reschedule(int cpu) { }
125#define put_cpu() preempt_enable() 125#define put_cpu() preempt_enable()
126#define put_cpu_no_resched() preempt_enable_no_resched() 126#define put_cpu_no_resched() preempt_enable_no_resched()
127 127
128void smp_setup_processor_id(void);
129
128#endif /* __LINUX_SMP_H */ 130#endif /* __LINUX_SMP_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 503564384545..7b27c09b5604 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -159,7 +159,9 @@ struct svc_rqst {
159 * determine what device number 159 * determine what device number
160 * to report (real or virtual) 160 * to report (real or virtual)
161 */ 161 */
162 162 int rq_sendfile_ok; /* turned off in gss privacy
163 * to prevent encrypting page
164 * cache pages */
163 wait_queue_head_t rq_wait; /* synchronization */ 165 wait_queue_head_t rq_wait; /* synchronization */
164}; 166};
165 167
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c41e2d6d1acc..cf6ca6e377bd 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -189,7 +189,6 @@ extern long vm_total_pages;
189 189
190#ifdef CONFIG_NUMA 190#ifdef CONFIG_NUMA
191extern int zone_reclaim_mode; 191extern int zone_reclaim_mode;
192extern int zone_reclaim_interval;
193extern int zone_reclaim(struct zone *, gfp_t, unsigned int); 192extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
194#else 193#else
195#define zone_reclaim_mode 0 194#define zone_reclaim_mode 0
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
new file mode 100644
index 000000000000..3e0daf54133e
--- /dev/null
+++ b/include/linux/vmstat.h
@@ -0,0 +1,215 @@
1#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
6#include <linux/config.h>
7#include <linux/mmzone.h>
8#include <asm/atomic.h>
9
10#ifdef CONFIG_VM_EVENT_COUNTERS
11/*
12 * Light weight per cpu counter implementation.
13 *
14 * Counters should only be incremented and no critical kernel component
15 * should rely on the counter values.
16 *
17 * Counters are handled completely inline. On many platforms the code
18 * generated will simply be the increment of a global address.
19 */
20
21#define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH
22
23enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
24 FOR_ALL_ZONES(PGALLOC),
25 PGFREE, PGACTIVATE, PGDEACTIVATE,
26 PGFAULT, PGMAJFAULT,
27 FOR_ALL_ZONES(PGREFILL),
28 FOR_ALL_ZONES(PGSTEAL),
29 FOR_ALL_ZONES(PGSCAN_KSWAPD),
30 FOR_ALL_ZONES(PGSCAN_DIRECT),
31 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
32 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
33 NR_VM_EVENT_ITEMS
34};
35
36struct vm_event_state {
37 unsigned long event[NR_VM_EVENT_ITEMS];
38};
39
40DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
41
42static inline void __count_vm_event(enum vm_event_item item)
43{
44 __get_cpu_var(vm_event_states.event[item])++;
45}
46
47static inline void count_vm_event(enum vm_event_item item)
48{
49 get_cpu_var(vm_event_states.event[item])++;
50 put_cpu();
51}
52
53static inline void __count_vm_events(enum vm_event_item item, long delta)
54{
55 __get_cpu_var(vm_event_states.event[item]) += delta;
56}
57
58static inline void count_vm_events(enum vm_event_item item, long delta)
59{
60 get_cpu_var(vm_event_states.event[item])++;
61 put_cpu();
62}
63
64extern void all_vm_events(unsigned long *);
65extern void vm_events_fold_cpu(int cpu);
66
67#else
68
69/* Disable counters */
70#define get_cpu_vm_events(e) 0L
71#define count_vm_event(e) do { } while (0)
72#define count_vm_events(e,d) do { } while (0)
73#define __count_vm_event(e) do { } while (0)
74#define __count_vm_events(e,d) do { } while (0)
75#define vm_events_fold_cpu(x) do { } while (0)
76
77#endif /* CONFIG_VM_EVENT_COUNTERS */
78
79#define __count_zone_vm_events(item, zone, delta) \
80 __count_vm_events(item##_DMA + zone_idx(zone), delta)
81
82/*
83 * Zone based page accounting with per cpu differentials.
84 */
85extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86
87static inline void zone_page_state_add(long x, struct zone *zone,
88 enum zone_stat_item item)
89{
90 atomic_long_add(x, &zone->vm_stat[item]);
91 atomic_long_add(x, &vm_stat[item]);
92}
93
94static inline unsigned long global_page_state(enum zone_stat_item item)
95{
96 long x = atomic_long_read(&vm_stat[item]);
97#ifdef CONFIG_SMP
98 if (x < 0)
99 x = 0;
100#endif
101 return x;
102}
103
104static inline unsigned long zone_page_state(struct zone *zone,
105 enum zone_stat_item item)
106{
107 long x = atomic_long_read(&zone->vm_stat[item]);
108#ifdef CONFIG_SMP
109 if (x < 0)
110 x = 0;
111#endif
112 return x;
113}
114
115#ifdef CONFIG_NUMA
116/*
117 * Determine the per node value of a stat item. This function
118 * is called frequently in a NUMA machine, so try to be as
119 * frugal as possible.
120 */
121static inline unsigned long node_page_state(int node,
122 enum zone_stat_item item)
123{
124 struct zone *zones = NODE_DATA(node)->node_zones;
125
126 return
127#ifndef CONFIG_DMA_IS_NORMAL
128#if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
129 zone_page_state(&zones[ZONE_DMA32], item) +
130#endif
131 zone_page_state(&zones[ZONE_NORMAL], item) +
132#endif
133#ifdef CONFIG_HIGHMEM
134 zone_page_state(&zones[ZONE_HIGHMEM], item) +
135#endif
136 zone_page_state(&zones[ZONE_DMA], item);
137}
138
139extern void zone_statistics(struct zonelist *, struct zone *);
140
141#else
142
143#define node_page_state(node, item) global_page_state(item)
144#define zone_statistics(_zl,_z) do { } while (0)
145
146#endif /* CONFIG_NUMA */
147
148#define __add_zone_page_state(__z, __i, __d) \
149 __mod_zone_page_state(__z, __i, __d)
150#define __sub_zone_page_state(__z, __i, __d) \
151 __mod_zone_page_state(__z, __i,-(__d))
152
153#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
154#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
155
156static inline void zap_zone_vm_stats(struct zone *zone)
157{
158 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
159}
160
161extern void inc_zone_state(struct zone *, enum zone_stat_item);
162
163#ifdef CONFIG_SMP
164void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
165void __inc_zone_page_state(struct page *, enum zone_stat_item);
166void __dec_zone_page_state(struct page *, enum zone_stat_item);
167
168void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
169void inc_zone_page_state(struct page *, enum zone_stat_item);
170void dec_zone_page_state(struct page *, enum zone_stat_item);
171
172extern void inc_zone_state(struct zone *, enum zone_stat_item);
173
174void refresh_cpu_vm_stats(int);
175void refresh_vm_stats(void);
176
177#else /* CONFIG_SMP */
178
179/*
180 * We do not maintain differentials in a single processor configuration.
181 * The functions directly modify the zone and global counters.
182 */
183static inline void __mod_zone_page_state(struct zone *zone,
184 enum zone_stat_item item, int delta)
185{
186 zone_page_state_add(delta, zone, item);
187}
188
189static inline void __inc_zone_page_state(struct page *page,
190 enum zone_stat_item item)
191{
192 atomic_long_inc(&page_zone(page)->vm_stat[item]);
193 atomic_long_inc(&vm_stat[item]);
194}
195
196static inline void __dec_zone_page_state(struct page *page,
197 enum zone_stat_item item)
198{
199 atomic_long_dec(&page_zone(page)->vm_stat[item]);
200 atomic_long_dec(&vm_stat[item]);
201}
202
203/*
204 * We only use atomic operations to update counters. So there is no need to
205 * disable interrupts.
206 */
207#define inc_zone_page_state __inc_zone_page_state
208#define dec_zone_page_state __dec_zone_page_state
209#define mod_zone_page_state __mod_zone_page_state
210
211static inline void refresh_cpu_vm_stats(int cpu) { }
212static inline void refresh_vm_stats(void) { }
213#endif
214
215#endif /* _LINUX_VMSTAT_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 795f81f9ec7f..5ba72d95280c 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -53,10 +53,16 @@ struct unix_address {
53struct unix_skb_parms { 53struct unix_skb_parms {
54 struct ucred creds; /* Skb credentials */ 54 struct ucred creds; /* Skb credentials */
55 struct scm_fp_list *fp; /* Passed files */ 55 struct scm_fp_list *fp; /* Passed files */
56#ifdef CONFIG_SECURITY_NETWORK
57 char *secdata; /* Security context */
58 u32 seclen; /* Security length */
59#endif
56}; 60};
57 61
58#define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) 62#define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb))
59#define UNIXCREDS(skb) (&UNIXCB((skb)).creds) 63#define UNIXCREDS(skb) (&UNIXCB((skb)).creds)
64#define UNIXSECDATA(skb) (&UNIXCB((skb)).secdata)
65#define UNIXSECLEN(skb) (&UNIXCB((skb)).seclen)
60 66
61#define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) 67#define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock)
62#define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock) 68#define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 75b5b9333fc7..1925c65e617b 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -169,17 +169,23 @@ psched_tod_diff(int delta_sec, int bound)
169 169
170#define PSCHED_TADD2(tv, delta, tv_res) \ 170#define PSCHED_TADD2(tv, delta, tv_res) \
171({ \ 171({ \
172 int __delta = (tv).tv_usec + (delta); \ 172 int __delta = (delta); \
173 (tv_res).tv_sec = (tv).tv_sec; \ 173 (tv_res) = (tv); \
174 if (__delta > USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \ 174 while(__delta >= USEC_PER_SEC){ \
175 (tv_res).tv_sec++; \
176 __delta -= USEC_PER_SEC; \
177 } \
175 (tv_res).tv_usec = __delta; \ 178 (tv_res).tv_usec = __delta; \
176}) 179})
177 180
178#define PSCHED_TADD(tv, delta) \ 181#define PSCHED_TADD(tv, delta) \
179({ \ 182({ \
180 (tv).tv_usec += (delta); \ 183 int __delta = (delta); \
181 if ((tv).tv_usec > USEC_PER_SEC) { (tv).tv_sec++; \ 184 while(__delta >= USEC_PER_SEC){ \
182 (tv).tv_usec -= USEC_PER_SEC; } \ 185 (tv).tv_sec++; \
186 __delta -= USEC_PER_SEC; \
187 } \
188 (tv).tv_usec = __delta; \
183}) 189})
184 190
185/* Set/check that time is in the "past perfect"; 191/* Set/check that time is in the "past perfect";
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 3b6dc15c68a5..40b6b9c9973f 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -36,7 +36,8 @@
36struct net_protocol { 36struct net_protocol {
37 int (*handler)(struct sk_buff *skb); 37 int (*handler)(struct sk_buff *skb);
38 void (*err_handler)(struct sk_buff *skb, u32 info); 38 void (*err_handler)(struct sk_buff *skb, u32 info);
39 struct sk_buff *(*gso_segment)(struct sk_buff *skb, int sg); 39 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
40 int features);
40 int no_policy; 41 int no_policy;
41}; 42};
42 43
diff --git a/include/net/scm.h b/include/net/scm.h
index 540619cb7160..02daa097cdcd 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -19,6 +19,10 @@ struct scm_cookie
19{ 19{
20 struct ucred creds; /* Skb credentials */ 20 struct ucred creds; /* Skb credentials */
21 struct scm_fp_list *fp; /* Passed files */ 21 struct scm_fp_list *fp; /* Passed files */
22#ifdef CONFIG_SECURITY_NETWORK
23 char *secdata; /* Security context */
24 u32 seclen; /* Security length */
25#endif
22 unsigned long seq; /* Connection seqno */ 26 unsigned long seq; /* Connection seqno */
23}; 27};
24 28
@@ -48,6 +52,17 @@ static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
48 return __scm_send(sock, msg, scm); 52 return __scm_send(sock, msg, scm);
49} 53}
50 54
55#ifdef CONFIG_SECURITY_NETWORK
56static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
57{
58 if (test_bit(SOCK_PASSSEC, &sock->flags) && scm->secdata != NULL)
59 put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, scm->seclen, scm->secdata);
60}
61#else
62static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm)
63{ }
64#endif /* CONFIG_SECURITY_NETWORK */
65
51static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, 66static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
52 struct scm_cookie *scm, int flags) 67 struct scm_cookie *scm, int flags)
53{ 68{
@@ -62,6 +77,8 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg,
62 if (test_bit(SOCK_PASSCRED, &sock->flags)) 77 if (test_bit(SOCK_PASSCRED, &sock->flags))
63 put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds); 78 put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds);
64 79
80 scm_passec(sock, msg, scm);
81
65 if (!scm->fp) 82 if (!scm->fp)
66 return; 83 return;
67 84
diff --git a/include/net/sock.h b/include/net/sock.h
index 2d8d6adf1616..7136bae48c2f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -383,7 +383,6 @@ enum sock_flags {
383 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 383 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
384 SOCK_DBG, /* %SO_DEBUG setting */ 384 SOCK_DBG, /* %SO_DEBUG setting */
385 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 385 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
386 SOCK_NO_LARGESEND, /* whether to sent large segments or not */
387 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 386 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
388 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 387 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
389}; 388};
@@ -1033,7 +1032,7 @@ static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
1033 if (sk->sk_route_caps & NETIF_F_GSO) 1032 if (sk->sk_route_caps & NETIF_F_GSO)
1034 sk->sk_route_caps |= NETIF_F_TSO; 1033 sk->sk_route_caps |= NETIF_F_TSO;
1035 if (sk->sk_route_caps & NETIF_F_TSO) { 1034 if (sk->sk_route_caps & NETIF_F_TSO) {
1036 if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len) 1035 if (dst->header_len)
1037 sk->sk_route_caps &= ~NETIF_F_TSO; 1036 sk->sk_route_caps &= ~NETIF_F_TSO;
1038 else 1037 else
1039 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1038 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index ca3d38dfc00b..624921e76332 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1086,7 +1086,7 @@ extern struct request_sock_ops tcp_request_sock_ops;
1086 1086
1087extern int tcp_v4_destroy_sock(struct sock *sk); 1087extern int tcp_v4_destroy_sock(struct sock *sk);
1088 1088
1089extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg); 1089extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
1090 1090
1091#ifdef CONFIG_PROC_FS 1091#ifdef CONFIG_PROC_FS
1092extern int tcp4_proc_init(void); 1092extern int tcp4_proc_init(void);
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index c6b84397448d..7bb366f70934 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -31,10 +31,9 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp,
31 struct sk_buff *skb) 31 struct sk_buff *skb)
32{ 32{
33 tp->ecn_flags = 0; 33 tp->ecn_flags = 0;
34 if (sysctl_tcp_ecn && !(sk->sk_route_caps & NETIF_F_TSO)) { 34 if (sysctl_tcp_ecn) {
35 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; 35 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
36 tp->ecn_flags = TCP_ECN_OK; 36 tp->ecn_flags = TCP_ECN_OK;
37 sock_set_flag(sk, SOCK_NO_LARGESEND);
38 } 37 }
39} 38}
40 39
@@ -56,6 +55,9 @@ static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp,
56 if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { 55 if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) {
57 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 56 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
58 skb->h.th->cwr = 1; 57 skb->h.th->cwr = 1;
58 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
59 skb_shinfo(skb)->gso_type |=
60 SKB_GSO_TCPV4_ECN;
59 } 61 }
60 } else { 62 } else {
61 /* ACK or retransmitted segment: clear ECT|CE */ 63 /* ACK or retransmitted segment: clear ECT|CE */
diff --git a/init/Kconfig b/init/Kconfig
index f70f2fd273c2..a5b073a103e7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -54,8 +54,8 @@ config LOCK_KERNEL
54 54
55config INIT_ENV_ARG_LIMIT 55config INIT_ENV_ARG_LIMIT
56 int 56 int
57 default 32 if !USERMODE 57 default 32 if !UML
58 default 128 if USERMODE 58 default 128 if UML
59 help 59 help
60 Maximum of each of the number of arguments and environment 60 Maximum of each of the number of arguments and environment
61 variables passed to init from the kernel command line. 61 variables passed to init from the kernel command line.
@@ -379,6 +379,15 @@ config SLAB
379 SLOB is more space efficient but does not scale well and is 379 SLOB is more space efficient but does not scale well and is
380 more susceptible to fragmentation. 380 more susceptible to fragmentation.
381 381
382config VM_EVENT_COUNTERS
383 default y
384 bool "Enable VM event counters for /proc/vmstat" if EMBEDDED
385 help
386 VM event counters are only needed to for event counts to be
387 shown. They have no function for the kernel itself. This
388 option allows the disabling of the VM event counters.
389 /proc/vmstat will only show page counts.
390
382endmenu # General setup 391endmenu # General setup
383 392
384config TINY_SHMEM 393config TINY_SHMEM
diff --git a/init/main.c b/init/main.c
index bce0eb7f4f8f..ae04eb78a93a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -446,10 +446,17 @@ static void __init boot_cpu_init(void)
446 cpu_set(cpu, cpu_possible_map); 446 cpu_set(cpu, cpu_possible_map);
447} 447}
448 448
449void __init __attribute__((weak)) smp_setup_processor_id(void)
450{
451}
452
449asmlinkage void __init start_kernel(void) 453asmlinkage void __init start_kernel(void)
450{ 454{
451 char * command_line; 455 char * command_line;
452 extern struct kernel_param __start___param[], __stop___param[]; 456 extern struct kernel_param __start___param[], __stop___param[];
457
458 smp_setup_processor_id();
459
453/* 460/*
454 * Interrupts are still disabled. Do necessary setups, then 461 * Interrupts are still disabled. Do necessary setups, then
455 * enable them 462 * enable them
diff --git a/kernel/audit.c b/kernel/audit.c
index 82443fb433ef..d417ca1db79b 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -445,7 +445,7 @@ void audit_send_reply(int pid, int seq, int type, int done, int multi,
445 * Check for appropriate CAP_AUDIT_ capabilities on incoming audit 445 * Check for appropriate CAP_AUDIT_ capabilities on incoming audit
446 * control messages. 446 * control messages.
447 */ 447 */
448static int audit_netlink_ok(kernel_cap_t eff_cap, u16 msg_type) 448static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
449{ 449{
450 int err = 0; 450 int err = 0;
451 451
@@ -459,13 +459,13 @@ static int audit_netlink_ok(kernel_cap_t eff_cap, u16 msg_type)
459 case AUDIT_DEL: 459 case AUDIT_DEL:
460 case AUDIT_DEL_RULE: 460 case AUDIT_DEL_RULE:
461 case AUDIT_SIGNAL_INFO: 461 case AUDIT_SIGNAL_INFO:
462 if (!cap_raised(eff_cap, CAP_AUDIT_CONTROL)) 462 if (security_netlink_recv(skb, CAP_AUDIT_CONTROL))
463 err = -EPERM; 463 err = -EPERM;
464 break; 464 break;
465 case AUDIT_USER: 465 case AUDIT_USER:
466 case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG: 466 case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG:
467 case AUDIT_FIRST_USER_MSG2...AUDIT_LAST_USER_MSG2: 467 case AUDIT_FIRST_USER_MSG2...AUDIT_LAST_USER_MSG2:
468 if (!cap_raised(eff_cap, CAP_AUDIT_WRITE)) 468 if (security_netlink_recv(skb, CAP_AUDIT_WRITE))
469 err = -EPERM; 469 err = -EPERM;
470 break; 470 break;
471 default: /* bad msg */ 471 default: /* bad msg */
@@ -488,7 +488,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
488 char *ctx; 488 char *ctx;
489 u32 len; 489 u32 len;
490 490
491 err = audit_netlink_ok(NETLINK_CB(skb).eff_cap, msg_type); 491 err = audit_netlink_ok(skb, msg_type);
492 if (err) 492 if (err)
493 return err; 493 return err;
494 494
diff --git a/kernel/sched.c b/kernel/sched.c
index 2629c1711fd6..d5e37072ea54 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4386,7 +4386,16 @@ asmlinkage long sys_sched_yield(void)
4386 return 0; 4386 return 0;
4387} 4387}
4388 4388
4389static inline void __cond_resched(void) 4389static inline int __resched_legal(void)
4390{
4391 if (unlikely(preempt_count()))
4392 return 0;
4393 if (unlikely(system_state != SYSTEM_RUNNING))
4394 return 0;
4395 return 1;
4396}
4397
4398static void __cond_resched(void)
4390{ 4399{
4391#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 4400#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
4392 __might_sleep(__FILE__, __LINE__); 4401 __might_sleep(__FILE__, __LINE__);
@@ -4396,10 +4405,6 @@ static inline void __cond_resched(void)
4396 * PREEMPT_ACTIVE, which could trigger a second 4405 * PREEMPT_ACTIVE, which could trigger a second
4397 * cond_resched() call. 4406 * cond_resched() call.
4398 */ 4407 */
4399 if (unlikely(preempt_count()))
4400 return;
4401 if (unlikely(system_state != SYSTEM_RUNNING))
4402 return;
4403 do { 4408 do {
4404 add_preempt_count(PREEMPT_ACTIVE); 4409 add_preempt_count(PREEMPT_ACTIVE);
4405 schedule(); 4410 schedule();
@@ -4409,13 +4414,12 @@ static inline void __cond_resched(void)
4409 4414
4410int __sched cond_resched(void) 4415int __sched cond_resched(void)
4411{ 4416{
4412 if (need_resched()) { 4417 if (need_resched() && __resched_legal()) {
4413 __cond_resched(); 4418 __cond_resched();
4414 return 1; 4419 return 1;
4415 } 4420 }
4416 return 0; 4421 return 0;
4417} 4422}
4418
4419EXPORT_SYMBOL(cond_resched); 4423EXPORT_SYMBOL(cond_resched);
4420 4424
4421/* 4425/*
@@ -4436,7 +4440,7 @@ int cond_resched_lock(spinlock_t *lock)
4436 ret = 1; 4440 ret = 1;
4437 spin_lock(lock); 4441 spin_lock(lock);
4438 } 4442 }
4439 if (need_resched()) { 4443 if (need_resched() && __resched_legal()) {
4440 _raw_spin_unlock(lock); 4444 _raw_spin_unlock(lock);
4441 preempt_enable_no_resched(); 4445 preempt_enable_no_resched();
4442 __cond_resched(); 4446 __cond_resched();
@@ -4445,14 +4449,13 @@ int cond_resched_lock(spinlock_t *lock)
4445 } 4449 }
4446 return ret; 4450 return ret;
4447} 4451}
4448
4449EXPORT_SYMBOL(cond_resched_lock); 4452EXPORT_SYMBOL(cond_resched_lock);
4450 4453
4451int __sched cond_resched_softirq(void) 4454int __sched cond_resched_softirq(void)
4452{ 4455{
4453 BUG_ON(!in_softirq()); 4456 BUG_ON(!in_softirq());
4454 4457
4455 if (need_resched()) { 4458 if (need_resched() && __resched_legal()) {
4456 __local_bh_enable(); 4459 __local_bh_enable();
4457 __cond_resched(); 4460 __cond_resched();
4458 local_bh_disable(); 4461 local_bh_disable();
@@ -4460,10 +4463,8 @@ int __sched cond_resched_softirq(void)
4460 } 4463 }
4461 return 0; 4464 return 0;
4462} 4465}
4463
4464EXPORT_SYMBOL(cond_resched_softirq); 4466EXPORT_SYMBOL(cond_resched_softirq);
4465 4467
4466
4467/** 4468/**
4468 * yield - yield the current processor to other threads. 4469 * yield - yield the current processor to other threads.
4469 * 4470 *
diff --git a/kernel/signal.c b/kernel/signal.c
index 52adf53929f6..477d11adf3d1 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -584,7 +584,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
584 && !capable(CAP_KILL)) 584 && !capable(CAP_KILL))
585 return error; 585 return error;
586 586
587 error = security_task_kill(t, info, sig); 587 error = security_task_kill(t, info, sig, 0);
588 if (!error) 588 if (!error)
589 audit_signal_info(sig, t); /* Let audit system see the signal */ 589 audit_signal_info(sig, t); /* Let audit system see the signal */
590 return error; 590 return error;
@@ -1107,7 +1107,7 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1107 1107
1108/* like kill_proc_info(), but doesn't use uid/euid of "current" */ 1108/* like kill_proc_info(), but doesn't use uid/euid of "current" */
1109int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, 1109int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1110 uid_t uid, uid_t euid) 1110 uid_t uid, uid_t euid, u32 secid)
1111{ 1111{
1112 int ret = -EINVAL; 1112 int ret = -EINVAL;
1113 struct task_struct *p; 1113 struct task_struct *p;
@@ -1127,6 +1127,9 @@ int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1127 ret = -EPERM; 1127 ret = -EPERM;
1128 goto out_unlock; 1128 goto out_unlock;
1129 } 1129 }
1130 ret = security_task_kill(p, info, sig, secid);
1131 if (ret)
1132 goto out_unlock;
1130 if (sig && p->sighand) { 1133 if (sig && p->sighand) {
1131 unsigned long flags; 1134 unsigned long flags;
1132 spin_lock_irqsave(&p->sighand->siglock, flags); 1135 spin_lock_irqsave(&p->sighand->siglock, flags);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 93a2c5398648..ee0db45e2438 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -933,15 +933,6 @@ static ctl_table vm_table[] = {
933 .strategy = &sysctl_intvec, 933 .strategy = &sysctl_intvec,
934 .extra1 = &zero, 934 .extra1 = &zero,
935 }, 935 },
936 {
937 .ctl_name = VM_ZONE_RECLAIM_INTERVAL,
938 .procname = "zone_reclaim_interval",
939 .data = &zone_reclaim_interval,
940 .maxlen = sizeof(zone_reclaim_interval),
941 .mode = 0644,
942 .proc_handler = &proc_dointvec_jiffies,
943 .strategy = &sysctl_jiffies,
944 },
945#endif 936#endif
946#ifdef CONFIG_X86_32 937#ifdef CONFIG_X86_32
947 { 938 {
diff --git a/mm/Makefile b/mm/Makefile
index 0b8f73f2ed16..9dd824c11eeb 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -10,7 +10,7 @@ mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ 10obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
11 page_alloc.o page-writeback.o pdflush.o \ 11 page_alloc.o page-writeback.o pdflush.o \
12 readahead.o swap.o truncate.o vmscan.o \ 12 readahead.o swap.o truncate.o vmscan.o \
13 prio_tree.o util.o mmzone.o $(mmu-y) 13 prio_tree.o util.o mmzone.o vmstat.o $(mmu-y)
14 14
15obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o 15obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
16obj-$(CONFIG_HUGETLBFS) += hugetlb.o 16obj-$(CONFIG_HUGETLBFS) += hugetlb.o
diff --git a/mm/filemap.c b/mm/filemap.c
index 648f2c0c8e18..796a5471b495 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -120,7 +120,7 @@ void __remove_from_page_cache(struct page *page)
120 radix_tree_delete(&mapping->page_tree, page->index); 120 radix_tree_delete(&mapping->page_tree, page->index);
121 page->mapping = NULL; 121 page->mapping = NULL;
122 mapping->nrpages--; 122 mapping->nrpages--;
123 pagecache_acct(-1); 123 __dec_zone_page_state(page, NR_FILE_PAGES);
124} 124}
125 125
126void remove_from_page_cache(struct page *page) 126void remove_from_page_cache(struct page *page)
@@ -449,7 +449,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
449 page->mapping = mapping; 449 page->mapping = mapping;
450 page->index = offset; 450 page->index = offset;
451 mapping->nrpages++; 451 mapping->nrpages++;
452 pagecache_acct(1); 452 __inc_zone_page_state(page, NR_FILE_PAGES);
453 } 453 }
454 write_unlock_irq(&mapping->tree_lock); 454 write_unlock_irq(&mapping->tree_lock);
455 radix_tree_preload_end(); 455 radix_tree_preload_end();
@@ -1416,7 +1416,7 @@ retry_find:
1416 */ 1416 */
1417 if (!did_readaround) { 1417 if (!did_readaround) {
1418 majmin = VM_FAULT_MAJOR; 1418 majmin = VM_FAULT_MAJOR;
1419 inc_page_state(pgmajfault); 1419 count_vm_event(PGMAJFAULT);
1420 } 1420 }
1421 did_readaround = 1; 1421 did_readaround = 1;
1422 ra_pages = max_sane_readahead(file->f_ra.ra_pages); 1422 ra_pages = max_sane_readahead(file->f_ra.ra_pages);
@@ -1487,7 +1487,7 @@ no_cached_page:
1487page_not_uptodate: 1487page_not_uptodate:
1488 if (!did_readaround) { 1488 if (!did_readaround) {
1489 majmin = VM_FAULT_MAJOR; 1489 majmin = VM_FAULT_MAJOR;
1490 inc_page_state(pgmajfault); 1490 count_vm_event(PGMAJFAULT);
1491 } 1491 }
1492 lock_page(page); 1492 lock_page(page);
1493 1493
diff --git a/mm/highmem.c b/mm/highmem.c
index 9b274fdf9d08..9b2a5403c447 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -315,8 +315,8 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
315 if (bvec->bv_page == org_vec->bv_page) 315 if (bvec->bv_page == org_vec->bv_page)
316 continue; 316 continue;
317 317
318 mempool_free(bvec->bv_page, pool); 318 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
319 dec_page_state(nr_bounce); 319 mempool_free(bvec->bv_page, pool);
320 } 320 }
321 321
322 bio_endio(bio_orig, bio_orig->bi_size, err); 322 bio_endio(bio_orig, bio_orig->bi_size, err);
@@ -397,7 +397,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
397 to->bv_page = mempool_alloc(pool, q->bounce_gfp); 397 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
398 to->bv_len = from->bv_len; 398 to->bv_len = from->bv_len;
399 to->bv_offset = from->bv_offset; 399 to->bv_offset = from->bv_offset;
400 inc_page_state(nr_bounce); 400 inc_zone_page_state(to->bv_page, NR_BOUNCE);
401 401
402 if (rw == WRITE) { 402 if (rw == WRITE) {
403 char *vto, *vfrom; 403 char *vto, *vfrom;
diff --git a/mm/memory.c b/mm/memory.c
index 247b5c312b9b..7e2a4b1580e3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -126,7 +126,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
126 pmd_clear(pmd); 126 pmd_clear(pmd);
127 pte_lock_deinit(page); 127 pte_lock_deinit(page);
128 pte_free_tlb(tlb, page); 128 pte_free_tlb(tlb, page);
129 dec_page_state(nr_page_table_pages); 129 dec_zone_page_state(page, NR_PAGETABLE);
130 tlb->mm->nr_ptes--; 130 tlb->mm->nr_ptes--;
131} 131}
132 132
@@ -311,7 +311,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
311 pte_free(new); 311 pte_free(new);
312 } else { 312 } else {
313 mm->nr_ptes++; 313 mm->nr_ptes++;
314 inc_page_state(nr_page_table_pages); 314 inc_zone_page_state(new, NR_PAGETABLE);
315 pmd_populate(mm, pmd, new); 315 pmd_populate(mm, pmd, new);
316 } 316 }
317 spin_unlock(&mm->page_table_lock); 317 spin_unlock(&mm->page_table_lock);
@@ -1951,7 +1951,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1951 1951
1952 /* Had to read the page from swap area: Major fault */ 1952 /* Had to read the page from swap area: Major fault */
1953 ret = VM_FAULT_MAJOR; 1953 ret = VM_FAULT_MAJOR;
1954 inc_page_state(pgmajfault); 1954 count_vm_event(PGMAJFAULT);
1955 grab_swap_token(); 1955 grab_swap_token();
1956 } 1956 }
1957 1957
@@ -2324,7 +2324,7 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2324 2324
2325 __set_current_state(TASK_RUNNING); 2325 __set_current_state(TASK_RUNNING);
2326 2326
2327 inc_page_state(pgfault); 2327 count_vm_event(PGFAULT);
2328 2328
2329 if (unlikely(is_vm_hugetlb_page(vma))) 2329 if (unlikely(is_vm_hugetlb_page(vma)))
2330 return hugetlb_fault(mm, vma, address, write_access); 2330 return hugetlb_fault(mm, vma, address, write_access);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 6b9740bbf4c0..e07e27e846a2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1209,10 +1209,8 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1209 1209
1210 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp); 1210 zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
1211 page = __alloc_pages(gfp, order, zl); 1211 page = __alloc_pages(gfp, order, zl);
1212 if (page && page_zone(page) == zl->zones[0]) { 1212 if (page && page_zone(page) == zl->zones[0])
1213 zone_pcp(zl->zones[0],get_cpu())->interleave_hit++; 1213 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1214 put_cpu();
1215 }
1216 return page; 1214 return page;
1217} 1215}
1218 1216
diff --git a/mm/mmap.c b/mm/mmap.c
index 6446c6134b04..c1868ecdbc5f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -96,7 +96,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
96 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 96 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
97 unsigned long n; 97 unsigned long n;
98 98
99 free = get_page_cache_size(); 99 free = global_page_state(NR_FILE_PAGES);
100 free += nr_swap_pages; 100 free += nr_swap_pages;
101 101
102 /* 102 /*
diff --git a/mm/nommu.c b/mm/nommu.c
index 029fadac0fb5..5151c44a8257 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1122,7 +1122,7 @@ int __vm_enough_memory(long pages, int cap_sys_admin)
1122 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 1122 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1123 unsigned long n; 1123 unsigned long n;
1124 1124
1125 free = get_page_cache_size(); 1125 free = global_page_state(NR_FILE_PAGES);
1126 free += nr_swap_pages; 1126 free += nr_swap_pages;
1127 1127
1128 /* 1128 /*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 4ec7026c7bab..e630188ccc40 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -99,22 +99,6 @@ EXPORT_SYMBOL(laptop_mode);
99 99
100static void background_writeout(unsigned long _min_pages); 100static void background_writeout(unsigned long _min_pages);
101 101
102struct writeback_state
103{
104 unsigned long nr_dirty;
105 unsigned long nr_unstable;
106 unsigned long nr_mapped;
107 unsigned long nr_writeback;
108};
109
110static void get_writeback_state(struct writeback_state *wbs)
111{
112 wbs->nr_dirty = read_page_state(nr_dirty);
113 wbs->nr_unstable = read_page_state(nr_unstable);
114 wbs->nr_mapped = read_page_state(nr_mapped);
115 wbs->nr_writeback = read_page_state(nr_writeback);
116}
117
118/* 102/*
119 * Work out the current dirty-memory clamping and background writeout 103 * Work out the current dirty-memory clamping and background writeout
120 * thresholds. 104 * thresholds.
@@ -133,8 +117,8 @@ static void get_writeback_state(struct writeback_state *wbs)
133 * clamping level. 117 * clamping level.
134 */ 118 */
135static void 119static void
136get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, 120get_dirty_limits(long *pbackground, long *pdirty,
137 struct address_space *mapping) 121 struct address_space *mapping)
138{ 122{
139 int background_ratio; /* Percentages */ 123 int background_ratio; /* Percentages */
140 int dirty_ratio; 124 int dirty_ratio;
@@ -144,8 +128,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
144 unsigned long available_memory = total_pages; 128 unsigned long available_memory = total_pages;
145 struct task_struct *tsk; 129 struct task_struct *tsk;
146 130
147 get_writeback_state(wbs);
148
149#ifdef CONFIG_HIGHMEM 131#ifdef CONFIG_HIGHMEM
150 /* 132 /*
151 * If this mapping can only allocate from low memory, 133 * If this mapping can only allocate from low memory,
@@ -156,7 +138,9 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
156#endif 138#endif
157 139
158 140
159 unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages; 141 unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
142 global_page_state(NR_ANON_PAGES)) * 100) /
143 total_pages;
160 144
161 dirty_ratio = vm_dirty_ratio; 145 dirty_ratio = vm_dirty_ratio;
162 if (dirty_ratio > unmapped_ratio / 2) 146 if (dirty_ratio > unmapped_ratio / 2)
@@ -189,7 +173,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
189 */ 173 */
190static void balance_dirty_pages(struct address_space *mapping) 174static void balance_dirty_pages(struct address_space *mapping)
191{ 175{
192 struct writeback_state wbs;
193 long nr_reclaimable; 176 long nr_reclaimable;
194 long background_thresh; 177 long background_thresh;
195 long dirty_thresh; 178 long dirty_thresh;
@@ -207,11 +190,12 @@ static void balance_dirty_pages(struct address_space *mapping)
207 .range_cyclic = 1, 190 .range_cyclic = 1,
208 }; 191 };
209 192
210 get_dirty_limits(&wbs, &background_thresh, 193 get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
211 &dirty_thresh, mapping); 194 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
212 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; 195 global_page_state(NR_UNSTABLE_NFS);
213 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) 196 if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
214 break; 197 dirty_thresh)
198 break;
215 199
216 if (!dirty_exceeded) 200 if (!dirty_exceeded)
217 dirty_exceeded = 1; 201 dirty_exceeded = 1;
@@ -224,11 +208,14 @@ static void balance_dirty_pages(struct address_space *mapping)
224 */ 208 */
225 if (nr_reclaimable) { 209 if (nr_reclaimable) {
226 writeback_inodes(&wbc); 210 writeback_inodes(&wbc);
227 get_dirty_limits(&wbs, &background_thresh, 211 get_dirty_limits(&background_thresh,
228 &dirty_thresh, mapping); 212 &dirty_thresh, mapping);
229 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; 213 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
230 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) 214 global_page_state(NR_UNSTABLE_NFS);
231 break; 215 if (nr_reclaimable +
216 global_page_state(NR_WRITEBACK)
217 <= dirty_thresh)
218 break;
232 pages_written += write_chunk - wbc.nr_to_write; 219 pages_written += write_chunk - wbc.nr_to_write;
233 if (pages_written >= write_chunk) 220 if (pages_written >= write_chunk)
234 break; /* We've done our duty */ 221 break; /* We've done our duty */
@@ -236,8 +223,9 @@ static void balance_dirty_pages(struct address_space *mapping)
236 blk_congestion_wait(WRITE, HZ/10); 223 blk_congestion_wait(WRITE, HZ/10);
237 } 224 }
238 225
239 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded) 226 if (nr_reclaimable + global_page_state(NR_WRITEBACK)
240 dirty_exceeded = 0; 227 <= dirty_thresh && dirty_exceeded)
228 dirty_exceeded = 0;
241 229
242 if (writeback_in_progress(bdi)) 230 if (writeback_in_progress(bdi))
243 return; /* pdflush is already working this queue */ 231 return; /* pdflush is already working this queue */
@@ -299,12 +287,11 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
299 287
300void throttle_vm_writeout(void) 288void throttle_vm_writeout(void)
301{ 289{
302 struct writeback_state wbs;
303 long background_thresh; 290 long background_thresh;
304 long dirty_thresh; 291 long dirty_thresh;
305 292
306 for ( ; ; ) { 293 for ( ; ; ) {
307 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); 294 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
308 295
309 /* 296 /*
310 * Boost the allowable dirty threshold a bit for page 297 * Boost the allowable dirty threshold a bit for page
@@ -312,8 +299,9 @@ void throttle_vm_writeout(void)
312 */ 299 */
313 dirty_thresh += dirty_thresh / 10; /* wheeee... */ 300 dirty_thresh += dirty_thresh / 10; /* wheeee... */
314 301
315 if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh) 302 if (global_page_state(NR_UNSTABLE_NFS) +
316 break; 303 global_page_state(NR_WRITEBACK) <= dirty_thresh)
304 break;
317 blk_congestion_wait(WRITE, HZ/10); 305 blk_congestion_wait(WRITE, HZ/10);
318 } 306 }
319} 307}
@@ -336,12 +324,12 @@ static void background_writeout(unsigned long _min_pages)
336 }; 324 };
337 325
338 for ( ; ; ) { 326 for ( ; ; ) {
339 struct writeback_state wbs;
340 long background_thresh; 327 long background_thresh;
341 long dirty_thresh; 328 long dirty_thresh;
342 329
343 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); 330 get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
344 if (wbs.nr_dirty + wbs.nr_unstable < background_thresh 331 if (global_page_state(NR_FILE_DIRTY) +
332 global_page_state(NR_UNSTABLE_NFS) < background_thresh
345 && min_pages <= 0) 333 && min_pages <= 0)
346 break; 334 break;
347 wbc.encountered_congestion = 0; 335 wbc.encountered_congestion = 0;
@@ -365,12 +353,9 @@ static void background_writeout(unsigned long _min_pages)
365 */ 353 */
366int wakeup_pdflush(long nr_pages) 354int wakeup_pdflush(long nr_pages)
367{ 355{
368 if (nr_pages == 0) { 356 if (nr_pages == 0)
369 struct writeback_state wbs; 357 nr_pages = global_page_state(NR_FILE_DIRTY) +
370 358 global_page_state(NR_UNSTABLE_NFS);
371 get_writeback_state(&wbs);
372 nr_pages = wbs.nr_dirty + wbs.nr_unstable;
373 }
374 return pdflush_operation(background_writeout, nr_pages); 359 return pdflush_operation(background_writeout, nr_pages);
375} 360}
376 361
@@ -401,7 +386,6 @@ static void wb_kupdate(unsigned long arg)
401 unsigned long start_jif; 386 unsigned long start_jif;
402 unsigned long next_jif; 387 unsigned long next_jif;
403 long nr_to_write; 388 long nr_to_write;
404 struct writeback_state wbs;
405 struct writeback_control wbc = { 389 struct writeback_control wbc = {
406 .bdi = NULL, 390 .bdi = NULL,
407 .sync_mode = WB_SYNC_NONE, 391 .sync_mode = WB_SYNC_NONE,
@@ -414,11 +398,11 @@ static void wb_kupdate(unsigned long arg)
414 398
415 sync_supers(); 399 sync_supers();
416 400
417 get_writeback_state(&wbs);
418 oldest_jif = jiffies - dirty_expire_interval; 401 oldest_jif = jiffies - dirty_expire_interval;
419 start_jif = jiffies; 402 start_jif = jiffies;
420 next_jif = start_jif + dirty_writeback_interval; 403 next_jif = start_jif + dirty_writeback_interval;
421 nr_to_write = wbs.nr_dirty + wbs.nr_unstable + 404 nr_to_write = global_page_state(NR_FILE_DIRTY) +
405 global_page_state(NR_UNSTABLE_NFS) +
422 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 406 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
423 while (nr_to_write > 0) { 407 while (nr_to_write > 0) {
424 wbc.encountered_congestion = 0; 408 wbc.encountered_congestion = 0;
@@ -640,7 +624,8 @@ int __set_page_dirty_nobuffers(struct page *page)
640 if (mapping2) { /* Race with truncate? */ 624 if (mapping2) { /* Race with truncate? */
641 BUG_ON(mapping2 != mapping); 625 BUG_ON(mapping2 != mapping);
642 if (mapping_cap_account_dirty(mapping)) 626 if (mapping_cap_account_dirty(mapping))
643 inc_page_state(nr_dirty); 627 __inc_zone_page_state(page,
628 NR_FILE_DIRTY);
644 radix_tree_tag_set(&mapping->page_tree, 629 radix_tree_tag_set(&mapping->page_tree,
645 page_index(page), PAGECACHE_TAG_DIRTY); 630 page_index(page), PAGECACHE_TAG_DIRTY);
646 } 631 }
@@ -727,9 +712,9 @@ int test_clear_page_dirty(struct page *page)
727 radix_tree_tag_clear(&mapping->page_tree, 712 radix_tree_tag_clear(&mapping->page_tree,
728 page_index(page), 713 page_index(page),
729 PAGECACHE_TAG_DIRTY); 714 PAGECACHE_TAG_DIRTY);
730 write_unlock_irqrestore(&mapping->tree_lock, flags);
731 if (mapping_cap_account_dirty(mapping)) 715 if (mapping_cap_account_dirty(mapping))
732 dec_page_state(nr_dirty); 716 __dec_zone_page_state(page, NR_FILE_DIRTY);
717 write_unlock_irqrestore(&mapping->tree_lock, flags);
733 return 1; 718 return 1;
734 } 719 }
735 write_unlock_irqrestore(&mapping->tree_lock, flags); 720 write_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -760,7 +745,7 @@ int clear_page_dirty_for_io(struct page *page)
760 if (mapping) { 745 if (mapping) {
761 if (TestClearPageDirty(page)) { 746 if (TestClearPageDirty(page)) {
762 if (mapping_cap_account_dirty(mapping)) 747 if (mapping_cap_account_dirty(mapping))
763 dec_page_state(nr_dirty); 748 dec_zone_page_state(page, NR_FILE_DIRTY);
764 return 1; 749 return 1;
765 } 750 }
766 return 0; 751 return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 084a2de7e52a..30b0b97ad023 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -456,7 +456,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
456 456
457 kernel_map_pages(page, 1 << order, 0); 457 kernel_map_pages(page, 1 << order, 0);
458 local_irq_save(flags); 458 local_irq_save(flags);
459 __mod_page_state(pgfree, 1 << order); 459 __count_vm_events(PGFREE, 1 << order);
460 free_one_page(page_zone(page), page, order); 460 free_one_page(page_zone(page), page, order);
461 local_irq_restore(flags); 461 local_irq_restore(flags);
462} 462}
@@ -709,27 +709,6 @@ void drain_local_pages(void)
709} 709}
710#endif /* CONFIG_PM */ 710#endif /* CONFIG_PM */
711 711
712static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
713{
714#ifdef CONFIG_NUMA
715 pg_data_t *pg = z->zone_pgdat;
716 pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
717 struct per_cpu_pageset *p;
718
719 p = zone_pcp(z, cpu);
720 if (pg == orig) {
721 p->numa_hit++;
722 } else {
723 p->numa_miss++;
724 zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
725 }
726 if (pg == NODE_DATA(numa_node_id()))
727 p->local_node++;
728 else
729 p->other_node++;
730#endif
731}
732
733/* 712/*
734 * Free a 0-order page 713 * Free a 0-order page
735 */ 714 */
@@ -750,7 +729,7 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
750 729
751 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 730 pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
752 local_irq_save(flags); 731 local_irq_save(flags);
753 __inc_page_state(pgfree); 732 __count_vm_event(PGFREE);
754 list_add(&page->lru, &pcp->list); 733 list_add(&page->lru, &pcp->list);
755 pcp->count++; 734 pcp->count++;
756 if (pcp->count >= pcp->high) { 735 if (pcp->count >= pcp->high) {
@@ -826,8 +805,8 @@ again:
826 goto failed; 805 goto failed;
827 } 806 }
828 807
829 __mod_page_state_zone(zone, pgalloc, 1 << order); 808 __count_zone_vm_events(PGALLOC, zone, 1 << order);
830 zone_statistics(zonelist, zone, cpu); 809 zone_statistics(zonelist, zone);
831 local_irq_restore(flags); 810 local_irq_restore(flags);
832 put_cpu(); 811 put_cpu();
833 812
@@ -1231,141 +1210,6 @@ static void show_node(struct zone *zone)
1231#define show_node(zone) do { } while (0) 1210#define show_node(zone) do { } while (0)
1232#endif 1211#endif
1233 1212
1234/*
1235 * Accumulate the page_state information across all CPUs.
1236 * The result is unavoidably approximate - it can change
1237 * during and after execution of this function.
1238 */
1239static DEFINE_PER_CPU(struct page_state, page_states) = {0};
1240
1241atomic_t nr_pagecache = ATOMIC_INIT(0);
1242EXPORT_SYMBOL(nr_pagecache);
1243#ifdef CONFIG_SMP
1244DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
1245#endif
1246
1247static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
1248{
1249 unsigned cpu;
1250
1251 memset(ret, 0, nr * sizeof(unsigned long));
1252 cpus_and(*cpumask, *cpumask, cpu_online_map);
1253
1254 for_each_cpu_mask(cpu, *cpumask) {
1255 unsigned long *in;
1256 unsigned long *out;
1257 unsigned off;
1258 unsigned next_cpu;
1259
1260 in = (unsigned long *)&per_cpu(page_states, cpu);
1261
1262 next_cpu = next_cpu(cpu, *cpumask);
1263 if (likely(next_cpu < NR_CPUS))
1264 prefetch(&per_cpu(page_states, next_cpu));
1265
1266 out = (unsigned long *)ret;
1267 for (off = 0; off < nr; off++)
1268 *out++ += *in++;
1269 }
1270}
1271
1272void get_page_state_node(struct page_state *ret, int node)
1273{
1274 int nr;
1275 cpumask_t mask = node_to_cpumask(node);
1276
1277 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1278 nr /= sizeof(unsigned long);
1279
1280 __get_page_state(ret, nr+1, &mask);
1281}
1282
1283void get_page_state(struct page_state *ret)
1284{
1285 int nr;
1286 cpumask_t mask = CPU_MASK_ALL;
1287
1288 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1289 nr /= sizeof(unsigned long);
1290
1291 __get_page_state(ret, nr + 1, &mask);
1292}
1293
1294void get_full_page_state(struct page_state *ret)
1295{
1296 cpumask_t mask = CPU_MASK_ALL;
1297
1298 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
1299}
1300
1301unsigned long read_page_state_offset(unsigned long offset)
1302{
1303 unsigned long ret = 0;
1304 int cpu;
1305
1306 for_each_online_cpu(cpu) {
1307 unsigned long in;
1308
1309 in = (unsigned long)&per_cpu(page_states, cpu) + offset;
1310 ret += *((unsigned long *)in);
1311 }
1312 return ret;
1313}
1314
1315void __mod_page_state_offset(unsigned long offset, unsigned long delta)
1316{
1317 void *ptr;
1318
1319 ptr = &__get_cpu_var(page_states);
1320 *(unsigned long *)(ptr + offset) += delta;
1321}
1322EXPORT_SYMBOL(__mod_page_state_offset);
1323
1324void mod_page_state_offset(unsigned long offset, unsigned long delta)
1325{
1326 unsigned long flags;
1327 void *ptr;
1328
1329 local_irq_save(flags);
1330 ptr = &__get_cpu_var(page_states);
1331 *(unsigned long *)(ptr + offset) += delta;
1332 local_irq_restore(flags);
1333}
1334EXPORT_SYMBOL(mod_page_state_offset);
1335
1336void __get_zone_counts(unsigned long *active, unsigned long *inactive,
1337 unsigned long *free, struct pglist_data *pgdat)
1338{
1339 struct zone *zones = pgdat->node_zones;
1340 int i;
1341
1342 *active = 0;
1343 *inactive = 0;
1344 *free = 0;
1345 for (i = 0; i < MAX_NR_ZONES; i++) {
1346 *active += zones[i].nr_active;
1347 *inactive += zones[i].nr_inactive;
1348 *free += zones[i].free_pages;
1349 }
1350}
1351
1352void get_zone_counts(unsigned long *active,
1353 unsigned long *inactive, unsigned long *free)
1354{
1355 struct pglist_data *pgdat;
1356
1357 *active = 0;
1358 *inactive = 0;
1359 *free = 0;
1360 for_each_online_pgdat(pgdat) {
1361 unsigned long l, m, n;
1362 __get_zone_counts(&l, &m, &n, pgdat);
1363 *active += l;
1364 *inactive += m;
1365 *free += n;
1366 }
1367}
1368
1369void si_meminfo(struct sysinfo *val) 1213void si_meminfo(struct sysinfo *val)
1370{ 1214{
1371 val->totalram = totalram_pages; 1215 val->totalram = totalram_pages;
@@ -1406,7 +1250,6 @@ void si_meminfo_node(struct sysinfo *val, int nid)
1406 */ 1250 */
1407void show_free_areas(void) 1251void show_free_areas(void)
1408{ 1252{
1409 struct page_state ps;
1410 int cpu, temperature; 1253 int cpu, temperature;
1411 unsigned long active; 1254 unsigned long active;
1412 unsigned long inactive; 1255 unsigned long inactive;
@@ -1438,7 +1281,6 @@ void show_free_areas(void)
1438 } 1281 }
1439 } 1282 }
1440 1283
1441 get_page_state(&ps);
1442 get_zone_counts(&active, &inactive, &free); 1284 get_zone_counts(&active, &inactive, &free);
1443 1285
1444 printk("Free pages: %11ukB (%ukB HighMem)\n", 1286 printk("Free pages: %11ukB (%ukB HighMem)\n",
@@ -1449,13 +1291,13 @@ void show_free_areas(void)
1449 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", 1291 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
1450 active, 1292 active,
1451 inactive, 1293 inactive,
1452 ps.nr_dirty, 1294 global_page_state(NR_FILE_DIRTY),
1453 ps.nr_writeback, 1295 global_page_state(NR_WRITEBACK),
1454 ps.nr_unstable, 1296 global_page_state(NR_UNSTABLE_NFS),
1455 nr_free_pages(), 1297 nr_free_pages(),
1456 ps.nr_slab, 1298 global_page_state(NR_SLAB),
1457 ps.nr_mapped, 1299 global_page_state(NR_FILE_MAPPED),
1458 ps.nr_page_table_pages); 1300 global_page_state(NR_PAGETABLE));
1459 1301
1460 for_each_zone(zone) { 1302 for_each_zone(zone) {
1461 int i; 1303 int i;
@@ -2180,6 +2022,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2180 zone->nr_scan_inactive = 0; 2022 zone->nr_scan_inactive = 0;
2181 zone->nr_active = 0; 2023 zone->nr_active = 0;
2182 zone->nr_inactive = 0; 2024 zone->nr_inactive = 0;
2025 zap_zone_vm_stats(zone);
2183 atomic_set(&zone->reclaim_in_progress, 0); 2026 atomic_set(&zone->reclaim_in_progress, 0);
2184 if (!size) 2027 if (!size)
2185 continue; 2028 continue;
@@ -2253,307 +2096,18 @@ void __init free_area_init(unsigned long *zones_size)
2253 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 2096 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
2254} 2097}
2255 2098
2256#ifdef CONFIG_PROC_FS
2257
2258#include <linux/seq_file.h>
2259
2260static void *frag_start(struct seq_file *m, loff_t *pos)
2261{
2262 pg_data_t *pgdat;
2263 loff_t node = *pos;
2264 for (pgdat = first_online_pgdat();
2265 pgdat && node;
2266 pgdat = next_online_pgdat(pgdat))
2267 --node;
2268
2269 return pgdat;
2270}
2271
2272static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
2273{
2274 pg_data_t *pgdat = (pg_data_t *)arg;
2275
2276 (*pos)++;
2277 return next_online_pgdat(pgdat);
2278}
2279
2280static void frag_stop(struct seq_file *m, void *arg)
2281{
2282}
2283
2284/*
2285 * This walks the free areas for each zone.
2286 */
2287static int frag_show(struct seq_file *m, void *arg)
2288{
2289 pg_data_t *pgdat = (pg_data_t *)arg;
2290 struct zone *zone;
2291 struct zone *node_zones = pgdat->node_zones;
2292 unsigned long flags;
2293 int order;
2294
2295 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
2296 if (!populated_zone(zone))
2297 continue;
2298
2299 spin_lock_irqsave(&zone->lock, flags);
2300 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
2301 for (order = 0; order < MAX_ORDER; ++order)
2302 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
2303 spin_unlock_irqrestore(&zone->lock, flags);
2304 seq_putc(m, '\n');
2305 }
2306 return 0;
2307}
2308
2309struct seq_operations fragmentation_op = {
2310 .start = frag_start,
2311 .next = frag_next,
2312 .stop = frag_stop,
2313 .show = frag_show,
2314};
2315
2316/*
2317 * Output information about zones in @pgdat.
2318 */
2319static int zoneinfo_show(struct seq_file *m, void *arg)
2320{
2321 pg_data_t *pgdat = arg;
2322 struct zone *zone;
2323 struct zone *node_zones = pgdat->node_zones;
2324 unsigned long flags;
2325
2326 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
2327 int i;
2328
2329 if (!populated_zone(zone))
2330 continue;
2331
2332 spin_lock_irqsave(&zone->lock, flags);
2333 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
2334 seq_printf(m,
2335 "\n pages free %lu"
2336 "\n min %lu"
2337 "\n low %lu"
2338 "\n high %lu"
2339 "\n active %lu"
2340 "\n inactive %lu"
2341 "\n scanned %lu (a: %lu i: %lu)"
2342 "\n spanned %lu"
2343 "\n present %lu",
2344 zone->free_pages,
2345 zone->pages_min,
2346 zone->pages_low,
2347 zone->pages_high,
2348 zone->nr_active,
2349 zone->nr_inactive,
2350 zone->pages_scanned,
2351 zone->nr_scan_active, zone->nr_scan_inactive,
2352 zone->spanned_pages,
2353 zone->present_pages);
2354 seq_printf(m,
2355 "\n protection: (%lu",
2356 zone->lowmem_reserve[0]);
2357 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
2358 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
2359 seq_printf(m,
2360 ")"
2361 "\n pagesets");
2362 for_each_online_cpu(i) {
2363 struct per_cpu_pageset *pageset;
2364 int j;
2365
2366 pageset = zone_pcp(zone, i);
2367 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
2368 if (pageset->pcp[j].count)
2369 break;
2370 }
2371 if (j == ARRAY_SIZE(pageset->pcp))
2372 continue;
2373 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
2374 seq_printf(m,
2375 "\n cpu: %i pcp: %i"
2376 "\n count: %i"
2377 "\n high: %i"
2378 "\n batch: %i",
2379 i, j,
2380 pageset->pcp[j].count,
2381 pageset->pcp[j].high,
2382 pageset->pcp[j].batch);
2383 }
2384#ifdef CONFIG_NUMA
2385 seq_printf(m,
2386 "\n numa_hit: %lu"
2387 "\n numa_miss: %lu"
2388 "\n numa_foreign: %lu"
2389 "\n interleave_hit: %lu"
2390 "\n local_node: %lu"
2391 "\n other_node: %lu",
2392 pageset->numa_hit,
2393 pageset->numa_miss,
2394 pageset->numa_foreign,
2395 pageset->interleave_hit,
2396 pageset->local_node,
2397 pageset->other_node);
2398#endif
2399 }
2400 seq_printf(m,
2401 "\n all_unreclaimable: %u"
2402 "\n prev_priority: %i"
2403 "\n temp_priority: %i"
2404 "\n start_pfn: %lu",
2405 zone->all_unreclaimable,
2406 zone->prev_priority,
2407 zone->temp_priority,
2408 zone->zone_start_pfn);
2409 spin_unlock_irqrestore(&zone->lock, flags);
2410 seq_putc(m, '\n');
2411 }
2412 return 0;
2413}
2414
2415struct seq_operations zoneinfo_op = {
2416 .start = frag_start, /* iterate over all zones. The same as in
2417 * fragmentation. */
2418 .next = frag_next,
2419 .stop = frag_stop,
2420 .show = zoneinfo_show,
2421};
2422
2423static char *vmstat_text[] = {
2424 "nr_dirty",
2425 "nr_writeback",
2426 "nr_unstable",
2427 "nr_page_table_pages",
2428 "nr_mapped",
2429 "nr_slab",
2430
2431 "pgpgin",
2432 "pgpgout",
2433 "pswpin",
2434 "pswpout",
2435
2436 "pgalloc_high",
2437 "pgalloc_normal",
2438 "pgalloc_dma32",
2439 "pgalloc_dma",
2440
2441 "pgfree",
2442 "pgactivate",
2443 "pgdeactivate",
2444
2445 "pgfault",
2446 "pgmajfault",
2447
2448 "pgrefill_high",
2449 "pgrefill_normal",
2450 "pgrefill_dma32",
2451 "pgrefill_dma",
2452
2453 "pgsteal_high",
2454 "pgsteal_normal",
2455 "pgsteal_dma32",
2456 "pgsteal_dma",
2457
2458 "pgscan_kswapd_high",
2459 "pgscan_kswapd_normal",
2460 "pgscan_kswapd_dma32",
2461 "pgscan_kswapd_dma",
2462
2463 "pgscan_direct_high",
2464 "pgscan_direct_normal",
2465 "pgscan_direct_dma32",
2466 "pgscan_direct_dma",
2467
2468 "pginodesteal",
2469 "slabs_scanned",
2470 "kswapd_steal",
2471 "kswapd_inodesteal",
2472 "pageoutrun",
2473 "allocstall",
2474
2475 "pgrotated",
2476 "nr_bounce",
2477};
2478
2479static void *vmstat_start(struct seq_file *m, loff_t *pos)
2480{
2481 struct page_state *ps;
2482
2483 if (*pos >= ARRAY_SIZE(vmstat_text))
2484 return NULL;
2485
2486 ps = kmalloc(sizeof(*ps), GFP_KERNEL);
2487 m->private = ps;
2488 if (!ps)
2489 return ERR_PTR(-ENOMEM);
2490 get_full_page_state(ps);
2491 ps->pgpgin /= 2; /* sectors -> kbytes */
2492 ps->pgpgout /= 2;
2493 return (unsigned long *)ps + *pos;
2494}
2495
2496static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
2497{
2498 (*pos)++;
2499 if (*pos >= ARRAY_SIZE(vmstat_text))
2500 return NULL;
2501 return (unsigned long *)m->private + *pos;
2502}
2503
2504static int vmstat_show(struct seq_file *m, void *arg)
2505{
2506 unsigned long *l = arg;
2507 unsigned long off = l - (unsigned long *)m->private;
2508
2509 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
2510 return 0;
2511}
2512
2513static void vmstat_stop(struct seq_file *m, void *arg)
2514{
2515 kfree(m->private);
2516 m->private = NULL;
2517}
2518
2519struct seq_operations vmstat_op = {
2520 .start = vmstat_start,
2521 .next = vmstat_next,
2522 .stop = vmstat_stop,
2523 .show = vmstat_show,
2524};
2525
2526#endif /* CONFIG_PROC_FS */
2527
2528#ifdef CONFIG_HOTPLUG_CPU 2099#ifdef CONFIG_HOTPLUG_CPU
2529static int page_alloc_cpu_notify(struct notifier_block *self, 2100static int page_alloc_cpu_notify(struct notifier_block *self,
2530 unsigned long action, void *hcpu) 2101 unsigned long action, void *hcpu)
2531{ 2102{
2532 int cpu = (unsigned long)hcpu; 2103 int cpu = (unsigned long)hcpu;
2533 long *count;
2534 unsigned long *src, *dest;
2535 2104
2536 if (action == CPU_DEAD) { 2105 if (action == CPU_DEAD) {
2537 int i;
2538
2539 /* Drain local pagecache count. */
2540 count = &per_cpu(nr_pagecache_local, cpu);
2541 atomic_add(*count, &nr_pagecache);
2542 *count = 0;
2543 local_irq_disable(); 2106 local_irq_disable();
2544 __drain_pages(cpu); 2107 __drain_pages(cpu);
2545 2108 vm_events_fold_cpu(cpu);
2546 /* Add dead cpu's page_states to our own. */
2547 dest = (unsigned long *)&__get_cpu_var(page_states);
2548 src = (unsigned long *)&per_cpu(page_states, cpu);
2549
2550 for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
2551 i++) {
2552 dest[i] += src[i];
2553 src[i] = 0;
2554 }
2555
2556 local_irq_enable(); 2109 local_irq_enable();
2110 refresh_cpu_vm_stats(cpu);
2557 } 2111 }
2558 return NOTIFY_OK; 2112 return NOTIFY_OK;
2559} 2113}
diff --git a/mm/page_io.c b/mm/page_io.c
index bb2b0d53889c..88029948d00a 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -101,7 +101,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
101 } 101 }
102 if (wbc->sync_mode == WB_SYNC_ALL) 102 if (wbc->sync_mode == WB_SYNC_ALL)
103 rw |= (1 << BIO_RW_SYNC); 103 rw |= (1 << BIO_RW_SYNC);
104 inc_page_state(pswpout); 104 count_vm_event(PSWPOUT);
105 set_page_writeback(page); 105 set_page_writeback(page);
106 unlock_page(page); 106 unlock_page(page);
107 submit_bio(rw, bio); 107 submit_bio(rw, bio);
@@ -123,7 +123,7 @@ int swap_readpage(struct file *file, struct page *page)
123 ret = -ENOMEM; 123 ret = -ENOMEM;
124 goto out; 124 goto out;
125 } 125 }
126 inc_page_state(pswpin); 126 count_vm_event(PSWPIN);
127 submit_bio(READ, bio); 127 submit_bio(READ, bio);
128out: 128out:
129 return ret; 129 return ret;
diff --git a/mm/rmap.c b/mm/rmap.c
index e76909e880ca..40158b59729e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -455,7 +455,7 @@ static void __page_set_anon_rmap(struct page *page,
455 * nr_mapped state can be updated without turning off 455 * nr_mapped state can be updated without turning off
456 * interrupts because it is not modified via interrupt. 456 * interrupts because it is not modified via interrupt.
457 */ 457 */
458 __inc_page_state(nr_mapped); 458 __inc_zone_page_state(page, NR_ANON_PAGES);
459} 459}
460 460
461/** 461/**
@@ -499,7 +499,7 @@ void page_add_new_anon_rmap(struct page *page,
499void page_add_file_rmap(struct page *page) 499void page_add_file_rmap(struct page *page)
500{ 500{
501 if (atomic_inc_and_test(&page->_mapcount)) 501 if (atomic_inc_and_test(&page->_mapcount))
502 __inc_page_state(nr_mapped); 502 __inc_zone_page_state(page, NR_FILE_MAPPED);
503} 503}
504 504
505/** 505/**
@@ -531,7 +531,8 @@ void page_remove_rmap(struct page *page)
531 */ 531 */
532 if (page_test_and_clear_dirty(page)) 532 if (page_test_and_clear_dirty(page))
533 set_page_dirty(page); 533 set_page_dirty(page);
534 __dec_page_state(nr_mapped); 534 __dec_zone_page_state(page,
535 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
535 } 536 }
536} 537}
537 538
diff --git a/mm/shmem.c b/mm/shmem.c
index b14ff817d162..a9c09e0ba709 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1045,12 +1045,12 @@ repeat:
1045 swappage = lookup_swap_cache(swap); 1045 swappage = lookup_swap_cache(swap);
1046 if (!swappage) { 1046 if (!swappage) {
1047 shmem_swp_unmap(entry); 1047 shmem_swp_unmap(entry);
1048 spin_unlock(&info->lock);
1049 /* here we actually do the io */ 1048 /* here we actually do the io */
1050 if (type && *type == VM_FAULT_MINOR) { 1049 if (type && *type == VM_FAULT_MINOR) {
1051 inc_page_state(pgmajfault); 1050 __count_vm_event(PGMAJFAULT);
1052 *type = VM_FAULT_MAJOR; 1051 *type = VM_FAULT_MAJOR;
1053 } 1052 }
1053 spin_unlock(&info->lock);
1054 swappage = shmem_swapin(info, swap, idx); 1054 swappage = shmem_swapin(info, swap, idx);
1055 if (!swappage) { 1055 if (!swappage) {
1056 spin_lock(&info->lock); 1056 spin_lock(&info->lock);
diff --git a/mm/slab.c b/mm/slab.c
index 233e39d14caf..3936af344542 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -309,6 +309,13 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
309#define SIZE_AC 1 309#define SIZE_AC 1
310#define SIZE_L3 (1 + MAX_NUMNODES) 310#define SIZE_L3 (1 + MAX_NUMNODES)
311 311
312static int drain_freelist(struct kmem_cache *cache,
313 struct kmem_list3 *l3, int tofree);
314static void free_block(struct kmem_cache *cachep, void **objpp, int len,
315 int node);
316static void enable_cpucache(struct kmem_cache *cachep);
317static void cache_reap(void *unused);
318
312/* 319/*
313 * This function must be completely optimized away if a constant is passed to 320 * This function must be completely optimized away if a constant is passed to
314 * it. Mostly the same as what is in linux/slab.h except it returns an index. 321 * it. Mostly the same as what is in linux/slab.h except it returns an index.
@@ -456,7 +463,7 @@ struct kmem_cache {
456#define STATS_DEC_ACTIVE(x) ((x)->num_active--) 463#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
457#define STATS_INC_ALLOCED(x) ((x)->num_allocations++) 464#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
458#define STATS_INC_GROWN(x) ((x)->grown++) 465#define STATS_INC_GROWN(x) ((x)->grown++)
459#define STATS_INC_REAPED(x) ((x)->reaped++) 466#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
460#define STATS_SET_HIGH(x) \ 467#define STATS_SET_HIGH(x) \
461 do { \ 468 do { \
462 if ((x)->num_active > (x)->high_mark) \ 469 if ((x)->num_active > (x)->high_mark) \
@@ -480,7 +487,7 @@ struct kmem_cache {
480#define STATS_DEC_ACTIVE(x) do { } while (0) 487#define STATS_DEC_ACTIVE(x) do { } while (0)
481#define STATS_INC_ALLOCED(x) do { } while (0) 488#define STATS_INC_ALLOCED(x) do { } while (0)
482#define STATS_INC_GROWN(x) do { } while (0) 489#define STATS_INC_GROWN(x) do { } while (0)
483#define STATS_INC_REAPED(x) do { } while (0) 490#define STATS_ADD_REAPED(x,y) do { } while (0)
484#define STATS_SET_HIGH(x) do { } while (0) 491#define STATS_SET_HIGH(x) do { } while (0)
485#define STATS_INC_ERR(x) do { } while (0) 492#define STATS_INC_ERR(x) do { } while (0)
486#define STATS_INC_NODEALLOCS(x) do { } while (0) 493#define STATS_INC_NODEALLOCS(x) do { } while (0)
@@ -700,12 +707,6 @@ int slab_is_available(void)
700 707
701static DEFINE_PER_CPU(struct work_struct, reap_work); 708static DEFINE_PER_CPU(struct work_struct, reap_work);
702 709
703static void free_block(struct kmem_cache *cachep, void **objpp, int len,
704 int node);
705static void enable_cpucache(struct kmem_cache *cachep);
706static void cache_reap(void *unused);
707static int __node_shrink(struct kmem_cache *cachep, int node);
708
709static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 710static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
710{ 711{
711 return cachep->array[smp_processor_id()]; 712 return cachep->array[smp_processor_id()];
@@ -1241,10 +1242,7 @@ free_array_cache:
1241 l3 = cachep->nodelists[node]; 1242 l3 = cachep->nodelists[node];
1242 if (!l3) 1243 if (!l3)
1243 continue; 1244 continue;
1244 spin_lock_irq(&l3->list_lock); 1245 drain_freelist(cachep, l3, l3->free_objects);
1245 /* free slabs belonging to this node */
1246 __node_shrink(cachep, node);
1247 spin_unlock_irq(&l3->list_lock);
1248 } 1246 }
1249 mutex_unlock(&cache_chain_mutex); 1247 mutex_unlock(&cache_chain_mutex);
1250 break; 1248 break;
@@ -1507,7 +1505,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1507 nr_pages = (1 << cachep->gfporder); 1505 nr_pages = (1 << cachep->gfporder);
1508 if (cachep->flags & SLAB_RECLAIM_ACCOUNT) 1506 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1509 atomic_add(nr_pages, &slab_reclaim_pages); 1507 atomic_add(nr_pages, &slab_reclaim_pages);
1510 add_page_state(nr_slab, nr_pages); 1508 add_zone_page_state(page_zone(page), NR_SLAB, nr_pages);
1511 for (i = 0; i < nr_pages; i++) 1509 for (i = 0; i < nr_pages; i++)
1512 __SetPageSlab(page + i); 1510 __SetPageSlab(page + i);
1513 return page_address(page); 1511 return page_address(page);
@@ -1522,12 +1520,12 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1522 struct page *page = virt_to_page(addr); 1520 struct page *page = virt_to_page(addr);
1523 const unsigned long nr_freed = i; 1521 const unsigned long nr_freed = i;
1524 1522
1523 sub_zone_page_state(page_zone(page), NR_SLAB, nr_freed);
1525 while (i--) { 1524 while (i--) {
1526 BUG_ON(!PageSlab(page)); 1525 BUG_ON(!PageSlab(page));
1527 __ClearPageSlab(page); 1526 __ClearPageSlab(page);
1528 page++; 1527 page++;
1529 } 1528 }
1530 sub_page_state(nr_slab, nr_freed);
1531 if (current->reclaim_state) 1529 if (current->reclaim_state)
1532 current->reclaim_state->reclaimed_slab += nr_freed; 1530 current->reclaim_state->reclaimed_slab += nr_freed;
1533 free_pages((unsigned long)addr, cachep->gfporder); 1531 free_pages((unsigned long)addr, cachep->gfporder);
@@ -2248,32 +2246,45 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2248 } 2246 }
2249} 2247}
2250 2248
2251static int __node_shrink(struct kmem_cache *cachep, int node) 2249/*
2250 * Remove slabs from the list of free slabs.
2251 * Specify the number of slabs to drain in tofree.
2252 *
2253 * Returns the actual number of slabs released.
2254 */
2255static int drain_freelist(struct kmem_cache *cache,
2256 struct kmem_list3 *l3, int tofree)
2252{ 2257{
2258 struct list_head *p;
2259 int nr_freed;
2253 struct slab *slabp; 2260 struct slab *slabp;
2254 struct kmem_list3 *l3 = cachep->nodelists[node];
2255 int ret;
2256 2261
2257 for (;;) { 2262 nr_freed = 0;
2258 struct list_head *p; 2263 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2259 2264
2265 spin_lock_irq(&l3->list_lock);
2260 p = l3->slabs_free.prev; 2266 p = l3->slabs_free.prev;
2261 if (p == &l3->slabs_free) 2267 if (p == &l3->slabs_free) {
2262 break; 2268 spin_unlock_irq(&l3->list_lock);
2269 goto out;
2270 }
2263 2271
2264 slabp = list_entry(l3->slabs_free.prev, struct slab, list); 2272 slabp = list_entry(p, struct slab, list);
2265#if DEBUG 2273#if DEBUG
2266 BUG_ON(slabp->inuse); 2274 BUG_ON(slabp->inuse);
2267#endif 2275#endif
2268 list_del(&slabp->list); 2276 list_del(&slabp->list);
2269 2277 /*
2270 l3->free_objects -= cachep->num; 2278 * Safe to drop the lock. The slab is no longer linked
2279 * to the cache.
2280 */
2281 l3->free_objects -= cache->num;
2271 spin_unlock_irq(&l3->list_lock); 2282 spin_unlock_irq(&l3->list_lock);
2272 slab_destroy(cachep, slabp); 2283 slab_destroy(cache, slabp);
2273 spin_lock_irq(&l3->list_lock); 2284 nr_freed++;
2274 } 2285 }
2275 ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial); 2286out:
2276 return ret; 2287 return nr_freed;
2277} 2288}
2278 2289
2279static int __cache_shrink(struct kmem_cache *cachep) 2290static int __cache_shrink(struct kmem_cache *cachep)
@@ -2286,11 +2297,13 @@ static int __cache_shrink(struct kmem_cache *cachep)
2286 check_irq_on(); 2297 check_irq_on();
2287 for_each_online_node(i) { 2298 for_each_online_node(i) {
2288 l3 = cachep->nodelists[i]; 2299 l3 = cachep->nodelists[i];
2289 if (l3) { 2300 if (!l3)
2290 spin_lock_irq(&l3->list_lock); 2301 continue;
2291 ret += __node_shrink(cachep, i); 2302
2292 spin_unlock_irq(&l3->list_lock); 2303 drain_freelist(cachep, l3, l3->free_objects);
2293 } 2304
2305 ret += !list_empty(&l3->slabs_full) ||
2306 !list_empty(&l3->slabs_partial);
2294 } 2307 }
2295 return (ret ? 1 : 0); 2308 return (ret ? 1 : 0);
2296} 2309}
@@ -3694,10 +3707,6 @@ static void cache_reap(void *unused)
3694 } 3707 }
3695 3708
3696 list_for_each_entry(searchp, &cache_chain, next) { 3709 list_for_each_entry(searchp, &cache_chain, next) {
3697 struct list_head *p;
3698 int tofree;
3699 struct slab *slabp;
3700
3701 check_irq_on(); 3710 check_irq_on();
3702 3711
3703 /* 3712 /*
@@ -3722,47 +3731,22 @@ static void cache_reap(void *unused)
3722 3731
3723 drain_array(searchp, l3, l3->shared, 0, node); 3732 drain_array(searchp, l3, l3->shared, 0, node);
3724 3733
3725 if (l3->free_touched) { 3734 if (l3->free_touched)
3726 l3->free_touched = 0; 3735 l3->free_touched = 0;
3727 goto next; 3736 else {
3728 } 3737 int freed;
3729
3730 tofree = (l3->free_limit + 5 * searchp->num - 1) /
3731 (5 * searchp->num);
3732 do {
3733 /*
3734 * Do not lock if there are no free blocks.
3735 */
3736 if (list_empty(&l3->slabs_free))
3737 break;
3738
3739 spin_lock_irq(&l3->list_lock);
3740 p = l3->slabs_free.next;
3741 if (p == &(l3->slabs_free)) {
3742 spin_unlock_irq(&l3->list_lock);
3743 break;
3744 }
3745 3738
3746 slabp = list_entry(p, struct slab, list); 3739 freed = drain_freelist(searchp, l3, (l3->free_limit +
3747 BUG_ON(slabp->inuse); 3740 5 * searchp->num - 1) / (5 * searchp->num));
3748 list_del(&slabp->list); 3741 STATS_ADD_REAPED(searchp, freed);
3749 STATS_INC_REAPED(searchp); 3742 }
3750
3751 /*
3752 * Safe to drop the lock. The slab is no longer linked
3753 * to the cache. searchp cannot disappear, we hold
3754 * cache_chain_lock
3755 */
3756 l3->free_objects -= searchp->num;
3757 spin_unlock_irq(&l3->list_lock);
3758 slab_destroy(searchp, slabp);
3759 } while (--tofree > 0);
3760next: 3743next:
3761 cond_resched(); 3744 cond_resched();
3762 } 3745 }
3763 check_irq_on(); 3746 check_irq_on();
3764 mutex_unlock(&cache_chain_mutex); 3747 mutex_unlock(&cache_chain_mutex);
3765 next_reap_node(); 3748 next_reap_node();
3749 refresh_cpu_vm_stats(smp_processor_id());
3766 /* Set up the next iteration */ 3750 /* Set up the next iteration */
3767 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 3751 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
3768} 3752}
diff --git a/mm/swap.c b/mm/swap.c
index 990868afc1c6..8fd095c4ae51 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -87,7 +87,7 @@ int rotate_reclaimable_page(struct page *page)
87 spin_lock_irqsave(&zone->lru_lock, flags); 87 spin_lock_irqsave(&zone->lru_lock, flags);
88 if (PageLRU(page) && !PageActive(page)) { 88 if (PageLRU(page) && !PageActive(page)) {
89 list_move_tail(&page->lru, &zone->inactive_list); 89 list_move_tail(&page->lru, &zone->inactive_list);
90 inc_page_state(pgrotated); 90 __count_vm_event(PGROTATED);
91 } 91 }
92 if (!test_clear_page_writeback(page)) 92 if (!test_clear_page_writeback(page))
93 BUG(); 93 BUG();
@@ -107,7 +107,7 @@ void fastcall activate_page(struct page *page)
107 del_page_from_inactive_list(zone, page); 107 del_page_from_inactive_list(zone, page);
108 SetPageActive(page); 108 SetPageActive(page);
109 add_page_to_active_list(zone, page); 109 add_page_to_active_list(zone, page);
110 inc_page_state(pgactivate); 110 __count_vm_event(PGACTIVATE);
111 } 111 }
112 spin_unlock_irq(&zone->lru_lock); 112 spin_unlock_irq(&zone->lru_lock);
113} 113}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 7535211bb495..fccbd9bba77b 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -87,7 +87,7 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
87 SetPageSwapCache(page); 87 SetPageSwapCache(page);
88 set_page_private(page, entry.val); 88 set_page_private(page, entry.val);
89 total_swapcache_pages++; 89 total_swapcache_pages++;
90 pagecache_acct(1); 90 __inc_zone_page_state(page, NR_FILE_PAGES);
91 } 91 }
92 write_unlock_irq(&swapper_space.tree_lock); 92 write_unlock_irq(&swapper_space.tree_lock);
93 radix_tree_preload_end(); 93 radix_tree_preload_end();
@@ -132,7 +132,7 @@ void __delete_from_swap_cache(struct page *page)
132 set_page_private(page, 0); 132 set_page_private(page, 0);
133 ClearPageSwapCache(page); 133 ClearPageSwapCache(page);
134 total_swapcache_pages--; 134 total_swapcache_pages--;
135 pagecache_acct(-1); 135 __dec_zone_page_state(page, NR_FILE_PAGES);
136 INC_CACHE_INFO(del_total); 136 INC_CACHE_INFO(del_total);
137} 137}
138 138
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eeacb0d695c3..ff2ebe9458a3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -47,8 +47,6 @@ struct scan_control {
47 /* Incremented by the number of inactive pages that were scanned */ 47 /* Incremented by the number of inactive pages that were scanned */
48 unsigned long nr_scanned; 48 unsigned long nr_scanned;
49 49
50 unsigned long nr_mapped; /* From page_state */
51
52 /* This context's GFP mask */ 50 /* This context's GFP mask */
53 gfp_t gfp_mask; 51 gfp_t gfp_mask;
54 52
@@ -217,7 +215,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
217 break; 215 break;
218 if (shrink_ret < nr_before) 216 if (shrink_ret < nr_before)
219 ret += nr_before - shrink_ret; 217 ret += nr_before - shrink_ret;
220 mod_page_state(slabs_scanned, this_scan); 218 count_vm_events(SLABS_SCANNED, this_scan);
221 total_scan -= this_scan; 219 total_scan -= this_scan;
222 220
223 cond_resched(); 221 cond_resched();
@@ -571,7 +569,7 @@ keep:
571 list_splice(&ret_pages, page_list); 569 list_splice(&ret_pages, page_list);
572 if (pagevec_count(&freed_pvec)) 570 if (pagevec_count(&freed_pvec))
573 __pagevec_release_nonlru(&freed_pvec); 571 __pagevec_release_nonlru(&freed_pvec);
574 mod_page_state(pgactivate, pgactivate); 572 count_vm_events(PGACTIVATE, pgactivate);
575 return nr_reclaimed; 573 return nr_reclaimed;
576} 574}
577 575
@@ -661,11 +659,11 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
661 nr_reclaimed += nr_freed; 659 nr_reclaimed += nr_freed;
662 local_irq_disable(); 660 local_irq_disable();
663 if (current_is_kswapd()) { 661 if (current_is_kswapd()) {
664 __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); 662 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
665 __mod_page_state(kswapd_steal, nr_freed); 663 __count_vm_events(KSWAPD_STEAL, nr_freed);
666 } else 664 } else
667 __mod_page_state_zone(zone, pgscan_direct, nr_scan); 665 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
668 __mod_page_state_zone(zone, pgsteal, nr_freed); 666 __count_vm_events(PGACTIVATE, nr_freed);
669 667
670 if (nr_taken == 0) 668 if (nr_taken == 0)
671 goto done; 669 goto done;
@@ -744,7 +742,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
744 * how much memory 742 * how much memory
745 * is mapped. 743 * is mapped.
746 */ 744 */
747 mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages; 745 mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
746 global_page_state(NR_ANON_PAGES)) * 100) /
747 vm_total_pages;
748 748
749 /* 749 /*
750 * Now decide how much we really want to unmap some pages. The 750 * Now decide how much we really want to unmap some pages. The
@@ -841,11 +841,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
841 } 841 }
842 } 842 }
843 zone->nr_active += pgmoved; 843 zone->nr_active += pgmoved;
844 spin_unlock(&zone->lru_lock);
845 844
846 __mod_page_state_zone(zone, pgrefill, pgscanned); 845 __count_zone_vm_events(PGREFILL, zone, pgscanned);
847 __mod_page_state(pgdeactivate, pgdeactivate); 846 __count_vm_events(PGDEACTIVATE, pgdeactivate);
848 local_irq_enable(); 847 spin_unlock_irq(&zone->lru_lock);
849 848
850 pagevec_release(&pvec); 849 pagevec_release(&pvec);
851} 850}
@@ -977,7 +976,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
977 .swappiness = vm_swappiness, 976 .swappiness = vm_swappiness,
978 }; 977 };
979 978
980 inc_page_state(allocstall); 979 count_vm_event(ALLOCSTALL);
981 980
982 for (i = 0; zones[i] != NULL; i++) { 981 for (i = 0; zones[i] != NULL; i++) {
983 struct zone *zone = zones[i]; 982 struct zone *zone = zones[i];
@@ -990,7 +989,6 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
990 } 989 }
991 990
992 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 991 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
993 sc.nr_mapped = read_page_state(nr_mapped);
994 sc.nr_scanned = 0; 992 sc.nr_scanned = 0;
995 if (!priority) 993 if (!priority)
996 disable_swap_token(); 994 disable_swap_token();
@@ -1075,9 +1073,7 @@ loop_again:
1075 total_scanned = 0; 1073 total_scanned = 0;
1076 nr_reclaimed = 0; 1074 nr_reclaimed = 0;
1077 sc.may_writepage = !laptop_mode; 1075 sc.may_writepage = !laptop_mode;
1078 sc.nr_mapped = read_page_state(nr_mapped); 1076 count_vm_event(PAGEOUTRUN);
1079
1080 inc_page_state(pageoutrun);
1081 1077
1082 for (i = 0; i < pgdat->nr_zones; i++) { 1078 for (i = 0; i < pgdat->nr_zones; i++) {
1083 struct zone *zone = pgdat->node_zones + i; 1079 struct zone *zone = pgdat->node_zones + i;
@@ -1365,7 +1361,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1365 for_each_zone(zone) 1361 for_each_zone(zone)
1366 lru_pages += zone->nr_active + zone->nr_inactive; 1362 lru_pages += zone->nr_active + zone->nr_inactive;
1367 1363
1368 nr_slab = read_page_state(nr_slab); 1364 nr_slab = global_page_state(NR_SLAB);
1369 /* If slab caches are huge, it's better to hit them first */ 1365 /* If slab caches are huge, it's better to hit them first */
1370 while (nr_slab >= lru_pages) { 1366 while (nr_slab >= lru_pages) {
1371 reclaim_state.reclaimed_slab = 0; 1367 reclaim_state.reclaimed_slab = 0;
@@ -1407,9 +1403,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1407 for (prio = DEF_PRIORITY; prio >= 0; prio--) { 1403 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1408 unsigned long nr_to_scan = nr_pages - ret; 1404 unsigned long nr_to_scan = nr_pages - ret;
1409 1405
1410 sc.nr_mapped = read_page_state(nr_mapped);
1411 sc.nr_scanned = 0; 1406 sc.nr_scanned = 0;
1412
1413 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); 1407 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
1414 if (ret >= nr_pages) 1408 if (ret >= nr_pages)
1415 goto out; 1409 goto out;
@@ -1523,11 +1517,6 @@ int zone_reclaim_mode __read_mostly;
1523#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */ 1517#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */
1524 1518
1525/* 1519/*
1526 * Mininum time between zone reclaim scans
1527 */
1528int zone_reclaim_interval __read_mostly = 30*HZ;
1529
1530/*
1531 * Priority for ZONE_RECLAIM. This determines the fraction of pages 1520 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1532 * of a node considered for each zone_reclaim. 4 scans 1/16th of 1521 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1533 * a zone. 1522 * a zone.
@@ -1548,7 +1537,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1548 struct scan_control sc = { 1537 struct scan_control sc = {
1549 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 1538 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1550 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), 1539 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1551 .nr_mapped = read_page_state(nr_mapped),
1552 .swap_cluster_max = max_t(unsigned long, nr_pages, 1540 .swap_cluster_max = max_t(unsigned long, nr_pages,
1553 SWAP_CLUSTER_MAX), 1541 SWAP_CLUSTER_MAX),
1554 .gfp_mask = gfp_mask, 1542 .gfp_mask = gfp_mask,
@@ -1593,16 +1581,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1593 1581
1594 p->reclaim_state = NULL; 1582 p->reclaim_state = NULL;
1595 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 1583 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
1596
1597 if (nr_reclaimed == 0) {
1598 /*
1599 * We were unable to reclaim enough pages to stay on node. We
1600 * now allow off node accesses for a certain time period before
1601 * trying again to reclaim pages from the local zone.
1602 */
1603 zone->last_unsuccessful_zone_reclaim = jiffies;
1604 }
1605
1606 return nr_reclaimed >= nr_pages; 1584 return nr_reclaimed >= nr_pages;
1607} 1585}
1608 1586
@@ -1612,13 +1590,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1612 int node_id; 1590 int node_id;
1613 1591
1614 /* 1592 /*
1615 * Do not reclaim if there was a recent unsuccessful attempt at zone 1593 * Do not reclaim if there are not enough reclaimable pages in this
1616 * reclaim. In that case we let allocations go off node for the 1594 * zone that would satify this allocations.
1617 * zone_reclaim_interval. Otherwise we would scan for each off-node 1595 *
1618 * page allocation. 1596 * All unmapped pagecache pages are reclaimable.
1597 *
1598 * Both counters may be temporarily off a bit so we use
1599 * SWAP_CLUSTER_MAX as the boundary. It may also be good to
1600 * leave a few frequently used unmapped pagecache pages around.
1619 */ 1601 */
1620 if (time_before(jiffies, 1602 if (zone_page_state(zone, NR_FILE_PAGES) -
1621 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) 1603 zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX)
1622 return 0; 1604 return 0;
1623 1605
1624 /* 1606 /*
diff --git a/mm/vmstat.c b/mm/vmstat.c
new file mode 100644
index 000000000000..73b83d67bab6
--- /dev/null
+++ b/mm/vmstat.c
@@ -0,0 +1,614 @@
1/*
2 * linux/mm/vmstat.c
3 *
4 * Manages VM statistics
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 *
7 * zoned VM statistics
8 * Copyright (C) 2006 Silicon Graphics, Inc.,
9 * Christoph Lameter <christoph@lameter.com>
10 */
11
12#include <linux/config.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15
16void __get_zone_counts(unsigned long *active, unsigned long *inactive,
17 unsigned long *free, struct pglist_data *pgdat)
18{
19 struct zone *zones = pgdat->node_zones;
20 int i;
21
22 *active = 0;
23 *inactive = 0;
24 *free = 0;
25 for (i = 0; i < MAX_NR_ZONES; i++) {
26 *active += zones[i].nr_active;
27 *inactive += zones[i].nr_inactive;
28 *free += zones[i].free_pages;
29 }
30}
31
32void get_zone_counts(unsigned long *active,
33 unsigned long *inactive, unsigned long *free)
34{
35 struct pglist_data *pgdat;
36
37 *active = 0;
38 *inactive = 0;
39 *free = 0;
40 for_each_online_pgdat(pgdat) {
41 unsigned long l, m, n;
42 __get_zone_counts(&l, &m, &n, pgdat);
43 *active += l;
44 *inactive += m;
45 *free += n;
46 }
47}
48
49#ifdef CONFIG_VM_EVENT_COUNTERS
50DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
51EXPORT_PER_CPU_SYMBOL(vm_event_states);
52
53static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
54{
55 int cpu = 0;
56 int i;
57
58 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
59
60 cpu = first_cpu(*cpumask);
61 while (cpu < NR_CPUS) {
62 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
63
64 cpu = next_cpu(cpu, *cpumask);
65
66 if (cpu < NR_CPUS)
67 prefetch(&per_cpu(vm_event_states, cpu));
68
69
70 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
71 ret[i] += this->event[i];
72 }
73}
74
75/*
76 * Accumulate the vm event counters across all CPUs.
77 * The result is unavoidably approximate - it can change
78 * during and after execution of this function.
79*/
80void all_vm_events(unsigned long *ret)
81{
82 sum_vm_events(ret, &cpu_online_map);
83}
84
85#ifdef CONFIG_HOTPLUG
86/*
87 * Fold the foreign cpu events into our own.
88 *
89 * This is adding to the events on one processor
90 * but keeps the global counts constant.
91 */
92void vm_events_fold_cpu(int cpu)
93{
94 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
95 int i;
96
97 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
98 count_vm_events(i, fold_state->event[i]);
99 fold_state->event[i] = 0;
100 }
101}
102#endif /* CONFIG_HOTPLUG */
103
104#endif /* CONFIG_VM_EVENT_COUNTERS */
105
106/*
107 * Manage combined zone based / global counters
108 *
109 * vm_stat contains the global counters
110 */
111atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
112EXPORT_SYMBOL(vm_stat);
113
114#ifdef CONFIG_SMP
115
116#define STAT_THRESHOLD 32
117
118/*
119 * Determine pointer to currently valid differential byte given a zone and
120 * the item number.
121 *
122 * Preemption must be off
123 */
124static inline s8 *diff_pointer(struct zone *zone, enum zone_stat_item item)
125{
126 return &zone_pcp(zone, smp_processor_id())->vm_stat_diff[item];
127}
128
129/*
130 * For use when we know that interrupts are disabled.
131 */
132void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
133 int delta)
134{
135 s8 *p;
136 long x;
137
138 p = diff_pointer(zone, item);
139 x = delta + *p;
140
141 if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) {
142 zone_page_state_add(x, zone, item);
143 x = 0;
144 }
145
146 *p = x;
147}
148EXPORT_SYMBOL(__mod_zone_page_state);
149
150/*
151 * For an unknown interrupt state
152 */
153void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
154 int delta)
155{
156 unsigned long flags;
157
158 local_irq_save(flags);
159 __mod_zone_page_state(zone, item, delta);
160 local_irq_restore(flags);
161}
162EXPORT_SYMBOL(mod_zone_page_state);
163
164/*
165 * Optimized increment and decrement functions.
166 *
167 * These are only for a single page and therefore can take a struct page *
168 * argument instead of struct zone *. This allows the inclusion of the code
169 * generated for page_zone(page) into the optimized functions.
170 *
171 * No overflow check is necessary and therefore the differential can be
172 * incremented or decremented in place which may allow the compilers to
173 * generate better code.
174 *
175 * The increment or decrement is known and therefore one boundary check can
176 * be omitted.
177 *
178 * Some processors have inc/dec instructions that are atomic vs an interrupt.
179 * However, the code must first determine the differential location in a zone
180 * based on the processor number and then inc/dec the counter. There is no
181 * guarantee without disabling preemption that the processor will not change
182 * in between and therefore the atomicity vs. interrupt cannot be exploited
183 * in a useful way here.
184 */
185static void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
186{
187 s8 *p = diff_pointer(zone, item);
188
189 (*p)++;
190
191 if (unlikely(*p > STAT_THRESHOLD)) {
192 zone_page_state_add(*p, zone, item);
193 *p = 0;
194 }
195}
196
197void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
198{
199 __inc_zone_state(page_zone(page), item);
200}
201EXPORT_SYMBOL(__inc_zone_page_state);
202
203void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
204{
205 struct zone *zone = page_zone(page);
206 s8 *p = diff_pointer(zone, item);
207
208 (*p)--;
209
210 if (unlikely(*p < -STAT_THRESHOLD)) {
211 zone_page_state_add(*p, zone, item);
212 *p = 0;
213 }
214}
215EXPORT_SYMBOL(__dec_zone_page_state);
216
217void inc_zone_state(struct zone *zone, enum zone_stat_item item)
218{
219 unsigned long flags;
220
221 local_irq_save(flags);
222 __inc_zone_state(zone, item);
223 local_irq_restore(flags);
224}
225
226void inc_zone_page_state(struct page *page, enum zone_stat_item item)
227{
228 unsigned long flags;
229 struct zone *zone;
230
231 zone = page_zone(page);
232 local_irq_save(flags);
233 __inc_zone_state(zone, item);
234 local_irq_restore(flags);
235}
236EXPORT_SYMBOL(inc_zone_page_state);
237
238void dec_zone_page_state(struct page *page, enum zone_stat_item item)
239{
240 unsigned long flags;
241 struct zone *zone;
242 s8 *p;
243
244 zone = page_zone(page);
245 local_irq_save(flags);
246 p = diff_pointer(zone, item);
247
248 (*p)--;
249
250 if (unlikely(*p < -STAT_THRESHOLD)) {
251 zone_page_state_add(*p, zone, item);
252 *p = 0;
253 }
254 local_irq_restore(flags);
255}
256EXPORT_SYMBOL(dec_zone_page_state);
257
258/*
259 * Update the zone counters for one cpu.
260 */
261void refresh_cpu_vm_stats(int cpu)
262{
263 struct zone *zone;
264 int i;
265 unsigned long flags;
266
267 for_each_zone(zone) {
268 struct per_cpu_pageset *pcp;
269
270 pcp = zone_pcp(zone, cpu);
271
272 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
273 if (pcp->vm_stat_diff[i]) {
274 local_irq_save(flags);
275 zone_page_state_add(pcp->vm_stat_diff[i],
276 zone, i);
277 pcp->vm_stat_diff[i] = 0;
278 local_irq_restore(flags);
279 }
280 }
281}
282
283static void __refresh_cpu_vm_stats(void *dummy)
284{
285 refresh_cpu_vm_stats(smp_processor_id());
286}
287
288/*
289 * Consolidate all counters.
290 *
291 * Note that the result is less inaccurate but still inaccurate
292 * if concurrent processes are allowed to run.
293 */
294void refresh_vm_stats(void)
295{
296 on_each_cpu(__refresh_cpu_vm_stats, NULL, 0, 1);
297}
298EXPORT_SYMBOL(refresh_vm_stats);
299
300#endif
301
302#ifdef CONFIG_NUMA
303/*
304 * zonelist = the list of zones passed to the allocator
305 * z = the zone from which the allocation occurred.
306 *
307 * Must be called with interrupts disabled.
308 */
309void zone_statistics(struct zonelist *zonelist, struct zone *z)
310{
311 if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
312 __inc_zone_state(z, NUMA_HIT);
313 } else {
314 __inc_zone_state(z, NUMA_MISS);
315 __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
316 }
317 if (z->zone_pgdat == NODE_DATA(numa_node_id()))
318 __inc_zone_state(z, NUMA_LOCAL);
319 else
320 __inc_zone_state(z, NUMA_OTHER);
321}
322#endif
323
324#ifdef CONFIG_PROC_FS
325
326#include <linux/seq_file.h>
327
328static void *frag_start(struct seq_file *m, loff_t *pos)
329{
330 pg_data_t *pgdat;
331 loff_t node = *pos;
332 for (pgdat = first_online_pgdat();
333 pgdat && node;
334 pgdat = next_online_pgdat(pgdat))
335 --node;
336
337 return pgdat;
338}
339
340static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
341{
342 pg_data_t *pgdat = (pg_data_t *)arg;
343
344 (*pos)++;
345 return next_online_pgdat(pgdat);
346}
347
348static void frag_stop(struct seq_file *m, void *arg)
349{
350}
351
352/*
353 * This walks the free areas for each zone.
354 */
355static int frag_show(struct seq_file *m, void *arg)
356{
357 pg_data_t *pgdat = (pg_data_t *)arg;
358 struct zone *zone;
359 struct zone *node_zones = pgdat->node_zones;
360 unsigned long flags;
361 int order;
362
363 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
364 if (!populated_zone(zone))
365 continue;
366
367 spin_lock_irqsave(&zone->lock, flags);
368 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
369 for (order = 0; order < MAX_ORDER; ++order)
370 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
371 spin_unlock_irqrestore(&zone->lock, flags);
372 seq_putc(m, '\n');
373 }
374 return 0;
375}
376
377struct seq_operations fragmentation_op = {
378 .start = frag_start,
379 .next = frag_next,
380 .stop = frag_stop,
381 .show = frag_show,
382};
383
384static char *vmstat_text[] = {
385 /* Zoned VM counters */
386 "nr_anon_pages",
387 "nr_mapped",
388 "nr_file_pages",
389 "nr_slab",
390 "nr_page_table_pages",
391 "nr_dirty",
392 "nr_writeback",
393 "nr_unstable",
394 "nr_bounce",
395
396#ifdef CONFIG_NUMA
397 "numa_hit",
398 "numa_miss",
399 "numa_foreign",
400 "numa_interleave",
401 "numa_local",
402 "numa_other",
403#endif
404
405#ifdef CONFIG_VM_EVENT_COUNTERS
406 "pgpgin",
407 "pgpgout",
408 "pswpin",
409 "pswpout",
410
411 "pgalloc_dma",
412 "pgalloc_dma32",
413 "pgalloc_normal",
414 "pgalloc_high",
415
416 "pgfree",
417 "pgactivate",
418 "pgdeactivate",
419
420 "pgfault",
421 "pgmajfault",
422
423 "pgrefill_dma",
424 "pgrefill_dma32",
425 "pgrefill_normal",
426 "pgrefill_high",
427
428 "pgsteal_dma",
429 "pgsteal_dma32",
430 "pgsteal_normal",
431 "pgsteal_high",
432
433 "pgscan_kswapd_dma",
434 "pgscan_kswapd_dma32",
435 "pgscan_kswapd_normal",
436 "pgscan_kswapd_high",
437
438 "pgscan_direct_dma",
439 "pgscan_direct_dma32",
440 "pgscan_direct_normal",
441 "pgscan_direct_high",
442
443 "pginodesteal",
444 "slabs_scanned",
445 "kswapd_steal",
446 "kswapd_inodesteal",
447 "pageoutrun",
448 "allocstall",
449
450 "pgrotated",
451#endif
452};
453
454/*
455 * Output information about zones in @pgdat.
456 */
457static int zoneinfo_show(struct seq_file *m, void *arg)
458{
459 pg_data_t *pgdat = arg;
460 struct zone *zone;
461 struct zone *node_zones = pgdat->node_zones;
462 unsigned long flags;
463
464 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
465 int i;
466
467 if (!populated_zone(zone))
468 continue;
469
470 spin_lock_irqsave(&zone->lock, flags);
471 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
472 seq_printf(m,
473 "\n pages free %lu"
474 "\n min %lu"
475 "\n low %lu"
476 "\n high %lu"
477 "\n active %lu"
478 "\n inactive %lu"
479 "\n scanned %lu (a: %lu i: %lu)"
480 "\n spanned %lu"
481 "\n present %lu",
482 zone->free_pages,
483 zone->pages_min,
484 zone->pages_low,
485 zone->pages_high,
486 zone->nr_active,
487 zone->nr_inactive,
488 zone->pages_scanned,
489 zone->nr_scan_active, zone->nr_scan_inactive,
490 zone->spanned_pages,
491 zone->present_pages);
492
493 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
494 seq_printf(m, "\n %-12s %lu", vmstat_text[i],
495 zone_page_state(zone, i));
496
497 seq_printf(m,
498 "\n protection: (%lu",
499 zone->lowmem_reserve[0]);
500 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
501 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
502 seq_printf(m,
503 ")"
504 "\n pagesets");
505 for_each_online_cpu(i) {
506 struct per_cpu_pageset *pageset;
507 int j;
508
509 pageset = zone_pcp(zone, i);
510 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
511 if (pageset->pcp[j].count)
512 break;
513 }
514 if (j == ARRAY_SIZE(pageset->pcp))
515 continue;
516 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
517 seq_printf(m,
518 "\n cpu: %i pcp: %i"
519 "\n count: %i"
520 "\n high: %i"
521 "\n batch: %i",
522 i, j,
523 pageset->pcp[j].count,
524 pageset->pcp[j].high,
525 pageset->pcp[j].batch);
526 }
527 }
528 seq_printf(m,
529 "\n all_unreclaimable: %u"
530 "\n prev_priority: %i"
531 "\n temp_priority: %i"
532 "\n start_pfn: %lu",
533 zone->all_unreclaimable,
534 zone->prev_priority,
535 zone->temp_priority,
536 zone->zone_start_pfn);
537 spin_unlock_irqrestore(&zone->lock, flags);
538 seq_putc(m, '\n');
539 }
540 return 0;
541}
542
543struct seq_operations zoneinfo_op = {
544 .start = frag_start, /* iterate over all zones. The same as in
545 * fragmentation. */
546 .next = frag_next,
547 .stop = frag_stop,
548 .show = zoneinfo_show,
549};
550
551static void *vmstat_start(struct seq_file *m, loff_t *pos)
552{
553 unsigned long *v;
554#ifdef CONFIG_VM_EVENT_COUNTERS
555 unsigned long *e;
556#endif
557 int i;
558
559 if (*pos >= ARRAY_SIZE(vmstat_text))
560 return NULL;
561
562#ifdef CONFIG_VM_EVENT_COUNTERS
563 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
564 + sizeof(struct vm_event_state), GFP_KERNEL);
565#else
566 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
567 GFP_KERNEL);
568#endif
569 m->private = v;
570 if (!v)
571 return ERR_PTR(-ENOMEM);
572 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
573 v[i] = global_page_state(i);
574#ifdef CONFIG_VM_EVENT_COUNTERS
575 e = v + NR_VM_ZONE_STAT_ITEMS;
576 all_vm_events(e);
577 e[PGPGIN] /= 2; /* sectors -> kbytes */
578 e[PGPGOUT] /= 2;
579#endif
580 return v + *pos;
581}
582
583static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
584{
585 (*pos)++;
586 if (*pos >= ARRAY_SIZE(vmstat_text))
587 return NULL;
588 return (unsigned long *)m->private + *pos;
589}
590
591static int vmstat_show(struct seq_file *m, void *arg)
592{
593 unsigned long *l = arg;
594 unsigned long off = l - (unsigned long *)m->private;
595
596 seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
597 return 0;
598}
599
600static void vmstat_stop(struct seq_file *m, void *arg)
601{
602 kfree(m->private);
603 m->private = NULL;
604}
605
606struct seq_operations vmstat_op = {
607 .start = vmstat_start,
608 .next = vmstat_next,
609 .stop = vmstat_stop,
610 .show = vmstat_show,
611};
612
613#endif /* CONFIG_PROC_FS */
614
diff --git a/net/atm/Makefile b/net/atm/Makefile
index d5818751f6ba..89656d6c0b90 100644
--- a/net/atm/Makefile
+++ b/net/atm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the ATM Protocol Families. 2# Makefile for the ATM Protocol Families.
3# 3#
4 4
5atm-y := addr.o pvc.o signaling.o svc.o ioctl.o common.o atm_misc.o raw.o resources.o 5atm-y := addr.o pvc.o signaling.o svc.o ioctl.o common.o atm_misc.o raw.o resources.o atm_sysfs.o
6mpoa-objs := mpc.o mpoa_caches.o mpoa_proc.o 6mpoa-objs := mpc.o mpoa_caches.o mpoa_proc.o
7 7
8obj-$(CONFIG_ATM) += atm.o 8obj-$(CONFIG_ATM) += atm.o
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
new file mode 100644
index 000000000000..5df4b9a068bb
--- /dev/null
+++ b/net/atm/atm_sysfs.c
@@ -0,0 +1,176 @@
1/* ATM driver model support. */
2
3#include <linux/config.h>
4#include <linux/kernel.h>
5#include <linux/init.h>
6#include <linux/kobject.h>
7#include <linux/atmdev.h>
8#include "common.h"
9#include "resources.h"
10
11#define to_atm_dev(cldev) container_of(cldev, struct atm_dev, class_dev)
12
13static ssize_t show_type(struct class_device *cdev, char *buf)
14{
15 struct atm_dev *adev = to_atm_dev(cdev);
16 return sprintf(buf, "%s\n", adev->type);
17}
18
19static ssize_t show_address(struct class_device *cdev, char *buf)
20{
21 char *pos = buf;
22 struct atm_dev *adev = to_atm_dev(cdev);
23 int i;
24
25 for (i = 0; i < (ESI_LEN - 1); i++)
26 pos += sprintf(pos, "%02x:", adev->esi[i]);
27 pos += sprintf(pos, "%02x\n", adev->esi[i]);
28
29 return pos - buf;
30}
31
32static ssize_t show_atmaddress(struct class_device *cdev, char *buf)
33{
34 unsigned long flags;
35 char *pos = buf;
36 struct atm_dev *adev = to_atm_dev(cdev);
37 struct atm_dev_addr *aaddr;
38 int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin;
39 int i, j;
40
41 spin_lock_irqsave(&adev->lock, flags);
42 list_for_each_entry(aaddr, &adev->local, entry) {
43 for(i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
44 if (j == *fmt) {
45 pos += sprintf(pos, ".");
46 ++fmt;
47 j = 0;
48 }
49 pos += sprintf(pos, "%02x", aaddr->addr.sas_addr.prv[i]);
50 }
51 pos += sprintf(pos, "\n");
52 }
53 spin_unlock_irqrestore(&adev->lock, flags);
54
55 return pos - buf;
56}
57
58static ssize_t show_carrier(struct class_device *cdev, char *buf)
59{
60 char *pos = buf;
61 struct atm_dev *adev = to_atm_dev(cdev);
62
63 pos += sprintf(pos, "%d\n",
64 adev->signal == ATM_PHY_SIG_LOST ? 0 : 1);
65
66 return pos - buf;
67}
68
69static ssize_t show_link_rate(struct class_device *cdev, char *buf)
70{
71 char *pos = buf;
72 struct atm_dev *adev = to_atm_dev(cdev);
73 int link_rate;
74
75 /* show the link rate, not the data rate */
76 switch (adev->link_rate) {
77 case ATM_OC3_PCR:
78 link_rate = 155520000;
79 break;
80 case ATM_OC12_PCR:
81 link_rate = 622080000;
82 break;
83 case ATM_25_PCR:
84 link_rate = 25600000;
85 break;
86 default:
87 link_rate = adev->link_rate * 8 * 53;
88 }
89 pos += sprintf(pos, "%d\n", link_rate);
90
91 return pos - buf;
92}
93
94static CLASS_DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
95static CLASS_DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
96static CLASS_DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
97static CLASS_DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
98static CLASS_DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
99
100static struct class_device_attribute *atm_attrs[] = {
101 &class_device_attr_atmaddress,
102 &class_device_attr_address,
103 &class_device_attr_carrier,
104 &class_device_attr_type,
105 &class_device_attr_link_rate,
106 NULL
107};
108
109static int atm_uevent(struct class_device *cdev, char **envp, int num_envp, char *buf, int size)
110{
111 struct atm_dev *adev;
112 int i = 0, len = 0;
113
114 if (!cdev)
115 return -ENODEV;
116
117 adev = to_atm_dev(cdev);
118 if (!adev)
119 return -ENODEV;
120
121 if (add_uevent_var(envp, num_envp, &i, buf, size, &len,
122 "NAME=%s%d", adev->type, adev->number))
123 return -ENOMEM;
124
125 envp[i] = NULL;
126 return 0;
127}
128
129static void atm_release(struct class_device *cdev)
130{
131 struct atm_dev *adev = to_atm_dev(cdev);
132
133 kfree(adev);
134}
135
136static struct class atm_class = {
137 .name = "atm",
138 .release = atm_release,
139 .uevent = atm_uevent,
140};
141
142int atm_register_sysfs(struct atm_dev *adev)
143{
144 struct class_device *cdev = &adev->class_dev;
145 int i, err;
146
147 cdev->class = &atm_class;
148 class_set_devdata(cdev, adev);
149
150 snprintf(cdev->class_id, BUS_ID_SIZE, "%s%d", adev->type, adev->number);
151 err = class_device_register(cdev);
152 if (err < 0)
153 return err;
154
155 for (i = 0; atm_attrs[i]; i++)
156 class_device_create_file(cdev, atm_attrs[i]);
157
158 return 0;
159}
160
161void atm_unregister_sysfs(struct atm_dev *adev)
162{
163 struct class_device *cdev = &adev->class_dev;
164
165 class_device_del(cdev);
166}
167
168int __init atm_sysfs_init(void)
169{
170 return class_register(&atm_class);
171}
172
173void __exit atm_sysfs_exit(void)
174{
175 class_unregister(&atm_class);
176}
diff --git a/net/atm/common.c b/net/atm/common.c
index ae002220fa99..35ab1a61e831 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -791,8 +791,14 @@ static int __init atm_init(void)
791 printk(KERN_ERR "atm_proc_init() failed with %d\n",error); 791 printk(KERN_ERR "atm_proc_init() failed with %d\n",error);
792 goto out_atmsvc_exit; 792 goto out_atmsvc_exit;
793 } 793 }
794 if ((error = atm_sysfs_init()) < 0) {
795 printk(KERN_ERR "atm_sysfs_init() failed with %d\n",error);
796 goto out_atmproc_exit;
797 }
794out: 798out:
795 return error; 799 return error;
800out_atmproc_exit:
801 atm_proc_exit();
796out_atmsvc_exit: 802out_atmsvc_exit:
797 atmsvc_exit(); 803 atmsvc_exit();
798out_atmpvc_exit: 804out_atmpvc_exit:
@@ -805,6 +811,7 @@ out_unregister_vcc_proto:
805static void __exit atm_exit(void) 811static void __exit atm_exit(void)
806{ 812{
807 atm_proc_exit(); 813 atm_proc_exit();
814 atm_sysfs_exit();
808 atmsvc_exit(); 815 atmsvc_exit();
809 atmpvc_exit(); 816 atmpvc_exit();
810 proto_unregister(&vcc_proto); 817 proto_unregister(&vcc_proto);
diff --git a/net/atm/common.h b/net/atm/common.h
index 4887c317cefe..a422da7788fb 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -28,6 +28,8 @@ int atmpvc_init(void);
28void atmpvc_exit(void); 28void atmpvc_exit(void);
29int atmsvc_init(void); 29int atmsvc_init(void);
30void atmsvc_exit(void); 30void atmsvc_exit(void);
31int atm_sysfs_init(void);
32void atm_sysfs_exit(void);
31 33
32#ifdef CONFIG_PROC_FS 34#ifdef CONFIG_PROC_FS
33int atm_proc_init(void); 35int atm_proc_init(void);
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 18ac80698f83..534baf704056 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -114,14 +114,27 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
114 printk(KERN_ERR "atm_dev_register: " 114 printk(KERN_ERR "atm_dev_register: "
115 "atm_proc_dev_register failed for dev %s\n", 115 "atm_proc_dev_register failed for dev %s\n",
116 type); 116 type);
117 mutex_unlock(&atm_dev_mutex); 117 goto out_fail;
118 kfree(dev); 118 }
119 return NULL; 119
120 if (atm_register_sysfs(dev) < 0) {
121 printk(KERN_ERR "atm_dev_register: "
122 "atm_register_sysfs failed for dev %s\n",
123 type);
124 atm_proc_dev_deregister(dev);
125 goto out_fail;
120 } 126 }
127
121 list_add_tail(&dev->dev_list, &atm_devs); 128 list_add_tail(&dev->dev_list, &atm_devs);
122 mutex_unlock(&atm_dev_mutex);
123 129
130out:
131 mutex_unlock(&atm_dev_mutex);
124 return dev; 132 return dev;
133
134out_fail:
135 kfree(dev);
136 dev = NULL;
137 goto out;
125} 138}
126 139
127 140
@@ -140,6 +153,7 @@ void atm_dev_deregister(struct atm_dev *dev)
140 mutex_unlock(&atm_dev_mutex); 153 mutex_unlock(&atm_dev_mutex);
141 154
142 atm_dev_release_vccs(dev); 155 atm_dev_release_vccs(dev);
156 atm_unregister_sysfs(dev);
143 atm_proc_dev_deregister(dev); 157 atm_proc_dev_deregister(dev);
144 158
145 atm_dev_put(dev); 159 atm_dev_put(dev);
diff --git a/net/atm/resources.h b/net/atm/resources.h
index ac7222fee7a8..644989980c37 100644
--- a/net/atm/resources.h
+++ b/net/atm/resources.h
@@ -43,4 +43,6 @@ static inline void atm_proc_dev_deregister(struct atm_dev *dev)
43 43
44#endif /* CONFIG_PROC_FS */ 44#endif /* CONFIG_PROC_FS */
45 45
46int atm_register_sysfs(struct atm_dev *adev);
47void atm_unregister_sysfs(struct atm_dev *adev);
46#endif 48#endif
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 2afdc7c0736c..f8dbcee80eba 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -184,6 +184,6 @@ void br_dev_setup(struct net_device *dev)
184 dev->set_mac_address = br_set_mac_address; 184 dev->set_mac_address = br_set_mac_address;
185 dev->priv_flags = IFF_EBRIDGE; 185 dev->priv_flags = IFF_EBRIDGE;
186 186
187 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 187 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
188 | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_NO_CSUM; 188 NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST;
189} 189}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 07956ecf545e..f55ef682ef84 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -392,7 +392,8 @@ void br_features_recompute(struct net_bridge *br)
392 features &= feature; 392 features &= feature;
393 } 393 }
394 394
395 br->dev->features = features | checksum | NETIF_F_LLTX; 395 br->dev->features = features | checksum | NETIF_F_LLTX |
396 NETIF_F_GSO_ROBUST;
396} 397}
397 398
398/* called with RTNL */ 399/* called with RTNL */
diff --git a/net/core/dev.c b/net/core/dev.c
index f1c52cbd6ef7..08976b08df5b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1190,11 +1190,14 @@ out:
1190/** 1190/**
1191 * skb_gso_segment - Perform segmentation on skb. 1191 * skb_gso_segment - Perform segmentation on skb.
1192 * @skb: buffer to segment 1192 * @skb: buffer to segment
1193 * @sg: whether scatter-gather is supported on the target. 1193 * @features: features for the output path (see dev->features)
1194 * 1194 *
1195 * This function segments the given skb and returns a list of segments. 1195 * This function segments the given skb and returns a list of segments.
1196 *
1197 * It may return NULL if the skb requires no segmentation. This is
1198 * only possible when GSO is used for verifying header integrity.
1196 */ 1199 */
1197struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg) 1200struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1198{ 1201{
1199 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1202 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1200 struct packet_type *ptype; 1203 struct packet_type *ptype;
@@ -1210,12 +1213,14 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg)
1210 rcu_read_lock(); 1213 rcu_read_lock();
1211 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { 1214 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1212 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1215 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1213 segs = ptype->gso_segment(skb, sg); 1216 segs = ptype->gso_segment(skb, features);
1214 break; 1217 break;
1215 } 1218 }
1216 } 1219 }
1217 rcu_read_unlock(); 1220 rcu_read_unlock();
1218 1221
1222 __skb_push(skb, skb->data - skb->mac.raw);
1223
1219 return segs; 1224 return segs;
1220} 1225}
1221 1226
@@ -1234,7 +1239,6 @@ void netdev_rx_csum_fault(struct net_device *dev)
1234EXPORT_SYMBOL(netdev_rx_csum_fault); 1239EXPORT_SYMBOL(netdev_rx_csum_fault);
1235#endif 1240#endif
1236 1241
1237#ifdef CONFIG_HIGHMEM
1238/* Actually, we should eliminate this check as soon as we know, that: 1242/* Actually, we should eliminate this check as soon as we know, that:
1239 * 1. IOMMU is present and allows to map all the memory. 1243 * 1. IOMMU is present and allows to map all the memory.
1240 * 2. No high memory really exists on this machine. 1244 * 2. No high memory really exists on this machine.
@@ -1242,6 +1246,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
1242 1246
1243static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 1247static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1244{ 1248{
1249#ifdef CONFIG_HIGHMEM
1245 int i; 1250 int i;
1246 1251
1247 if (dev->features & NETIF_F_HIGHDMA) 1252 if (dev->features & NETIF_F_HIGHDMA)
@@ -1251,11 +1256,9 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1251 if (PageHighMem(skb_shinfo(skb)->frags[i].page)) 1256 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1252 return 1; 1257 return 1;
1253 1258
1259#endif
1254 return 0; 1260 return 0;
1255} 1261}
1256#else
1257#define illegal_highdma(dev, skb) (0)
1258#endif
1259 1262
1260struct dev_gso_cb { 1263struct dev_gso_cb {
1261 void (*destructor)(struct sk_buff *skb); 1264 void (*destructor)(struct sk_buff *skb);
@@ -1291,9 +1294,15 @@ static int dev_gso_segment(struct sk_buff *skb)
1291{ 1294{
1292 struct net_device *dev = skb->dev; 1295 struct net_device *dev = skb->dev;
1293 struct sk_buff *segs; 1296 struct sk_buff *segs;
1297 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1298 NETIF_F_SG : 0);
1299
1300 segs = skb_gso_segment(skb, features);
1301
1302 /* Verifying header integrity only. */
1303 if (!segs)
1304 return 0;
1294 1305
1295 segs = skb_gso_segment(skb, dev->features & NETIF_F_SG &&
1296 !illegal_highdma(dev, skb));
1297 if (unlikely(IS_ERR(segs))) 1306 if (unlikely(IS_ERR(segs)))
1298 return PTR_ERR(segs); 1307 return PTR_ERR(segs);
1299 1308
@@ -1310,13 +1319,17 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1310 if (netdev_nit) 1319 if (netdev_nit)
1311 dev_queue_xmit_nit(skb, dev); 1320 dev_queue_xmit_nit(skb, dev);
1312 1321
1313 if (!netif_needs_gso(dev, skb)) 1322 if (netif_needs_gso(dev, skb)) {
1314 return dev->hard_start_xmit(skb, dev); 1323 if (unlikely(dev_gso_segment(skb)))
1324 goto out_kfree_skb;
1325 if (skb->next)
1326 goto gso;
1327 }
1315 1328
1316 if (unlikely(dev_gso_segment(skb))) 1329 return dev->hard_start_xmit(skb, dev);
1317 goto out_kfree_skb;
1318 } 1330 }
1319 1331
1332gso:
1320 do { 1333 do {
1321 struct sk_buff *nskb = skb->next; 1334 struct sk_buff *nskb = skb->next;
1322 int rc; 1335 int rc;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 3fcfa9c59e1f..f25aac17497a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -663,7 +663,7 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
663 sz_idx = type>>2; 663 sz_idx = type>>2;
664 kind = type&3; 664 kind = type&3;
665 665
666 if (kind != 2 && security_netlink_recv(skb)) { 666 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) {
667 *errp = -EPERM; 667 *errp = -EPERM;
668 return -1; 668 return -1;
669 } 669 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6edbb90cbcec..a1c9ecf4f1e0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -272,7 +272,7 @@ static void skb_clone_fraglist(struct sk_buff *skb)
272 skb_get(list); 272 skb_get(list);
273} 273}
274 274
275void skb_release_data(struct sk_buff *skb) 275static void skb_release_data(struct sk_buff *skb)
276{ 276{
277 if (!skb->cloned || 277 if (!skb->cloned ||
278 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 278 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
@@ -1848,13 +1848,13 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
1848/** 1848/**
1849 * skb_segment - Perform protocol segmentation on skb. 1849 * skb_segment - Perform protocol segmentation on skb.
1850 * @skb: buffer to segment 1850 * @skb: buffer to segment
1851 * @sg: whether scatter-gather can be used for generated segments 1851 * @features: features for the output path (see dev->features)
1852 * 1852 *
1853 * This function performs segmentation on the given skb. It returns 1853 * This function performs segmentation on the given skb. It returns
1854 * the segment at the given position. It returns NULL if there are 1854 * the segment at the given position. It returns NULL if there are
1855 * no more segments to generate, or when an error is encountered. 1855 * no more segments to generate, or when an error is encountered.
1856 */ 1856 */
1857struct sk_buff *skb_segment(struct sk_buff *skb, int sg) 1857struct sk_buff *skb_segment(struct sk_buff *skb, int features)
1858{ 1858{
1859 struct sk_buff *segs = NULL; 1859 struct sk_buff *segs = NULL;
1860 struct sk_buff *tail = NULL; 1860 struct sk_buff *tail = NULL;
@@ -1863,6 +1863,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int sg)
1863 unsigned int offset = doffset; 1863 unsigned int offset = doffset;
1864 unsigned int headroom; 1864 unsigned int headroom;
1865 unsigned int len; 1865 unsigned int len;
1866 int sg = features & NETIF_F_SG;
1866 int nfrags = skb_shinfo(skb)->nr_frags; 1867 int nfrags = skb_shinfo(skb)->nr_frags;
1867 int err = -ENOMEM; 1868 int err = -ENOMEM;
1868 int i = 0; 1869 int i = 0;
diff --git a/net/core/sock.c b/net/core/sock.c
index 5d820c376653..204a8dec65cc 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -565,6 +565,13 @@ set_rcvbuf:
565 ret = -ENONET; 565 ret = -ENONET;
566 break; 566 break;
567 567
568 case SO_PASSSEC:
569 if (valbool)
570 set_bit(SOCK_PASSSEC, &sock->flags);
571 else
572 clear_bit(SOCK_PASSSEC, &sock->flags);
573 break;
574
568 /* We implement the SO_SNDLOWAT etc to 575 /* We implement the SO_SNDLOWAT etc to
569 not be settable (1003.1g 5.3) */ 576 not be settable (1003.1g 5.3) */
570 default: 577 default:
@@ -723,6 +730,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
723 v.val = sk->sk_state == TCP_LISTEN; 730 v.val = sk->sk_state == TCP_LISTEN;
724 break; 731 break;
725 732
733 case SO_PASSSEC:
734 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
735 break;
736
726 case SO_PEERSEC: 737 case SO_PEERSEC:
727 return security_socket_getpeersec_stream(sock, optval, optlen, len); 738 return security_socket_getpeersec_stream(sock, optval, optlen, len);
728 739
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 74133ecd7700..8b99bd33540d 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -107,7 +107,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
107 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 107 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
108 return; 108 return;
109 109
110 if (!cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) 110 if (security_netlink_recv(skb, CAP_NET_ADMIN))
111 RCV_SKB_FAIL(-EPERM); 111 RCV_SKB_FAIL(-EPERM);
112 112
113 /* Eventually we might send routing messages too */ 113 /* Eventually we might send routing messages too */
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 461216b47948..8d157157bf8e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1097,7 +1097,7 @@ int inet_sk_rebuild_header(struct sock *sk)
1097 1097
1098EXPORT_SYMBOL(inet_sk_rebuild_header); 1098EXPORT_SYMBOL(inet_sk_rebuild_header);
1099 1099
1100static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int sg) 1100static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
1101{ 1101{
1102 struct sk_buff *segs = ERR_PTR(-EINVAL); 1102 struct sk_buff *segs = ERR_PTR(-EINVAL);
1103 struct iphdr *iph; 1103 struct iphdr *iph;
@@ -1126,10 +1126,10 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int sg)
1126 rcu_read_lock(); 1126 rcu_read_lock();
1127 ops = rcu_dereference(inet_protos[proto]); 1127 ops = rcu_dereference(inet_protos[proto]);
1128 if (ops && ops->gso_segment) 1128 if (ops && ops->gso_segment)
1129 segs = ops->gso_segment(skb, sg); 1129 segs = ops->gso_segment(skb, features);
1130 rcu_read_unlock(); 1130 rcu_read_unlock();
1131 1131
1132 if (IS_ERR(segs)) 1132 if (!segs || unlikely(IS_ERR(segs)))
1133 goto out; 1133 goto out;
1134 1134
1135 skb = segs; 1135 skb = segs;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index e1d7f5fbc526..ef0b5aac5838 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -332,7 +332,7 @@ config IP_NF_MATCH_HASHLIMIT
332 help 332 help
333 This option adds a new iptables `hashlimit' match. 333 This option adds a new iptables `hashlimit' match.
334 334
335 As opposed to `limit', this match dynamically crates a hash table 335 As opposed to `limit', this match dynamically creates a hash table
336 of limit buckets, based on your selection of source/destination 336 of limit buckets, based on your selection of source/destination
337 ip addresses and/or ports. 337 ip addresses and/or ports.
338 338
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index d0d19192026d..ad39bf640567 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -1120,7 +1120,8 @@ int arpt_register_table(struct arpt_table *table,
1120 return ret; 1120 return ret;
1121 } 1121 }
1122 1122
1123 if (xt_register_table(table, &bootstrap, newinfo) != 0) { 1123 ret = xt_register_table(table, &bootstrap, newinfo);
1124 if (ret != 0) {
1124 xt_free_table_info(newinfo); 1125 xt_free_table_info(newinfo);
1125 return ret; 1126 return ret;
1126 } 1127 }
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index b93f0494362f..198ac36db861 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -457,11 +457,19 @@ dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex)
457 if (entry->info->indev) 457 if (entry->info->indev)
458 if (entry->info->indev->ifindex == ifindex) 458 if (entry->info->indev->ifindex == ifindex)
459 return 1; 459 return 1;
460
461 if (entry->info->outdev) 460 if (entry->info->outdev)
462 if (entry->info->outdev->ifindex == ifindex) 461 if (entry->info->outdev->ifindex == ifindex)
463 return 1; 462 return 1;
464 463#ifdef CONFIG_BRIDGE_NETFILTER
464 if (entry->skb->nf_bridge) {
465 if (entry->skb->nf_bridge->physindev &&
466 entry->skb->nf_bridge->physindev->ifindex == ifindex)
467 return 1;
468 if (entry->skb->nf_bridge->physoutdev &&
469 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
470 return 1;
471 }
472#endif
465 return 0; 473 return 0;
466} 474}
467 475
@@ -507,7 +515,7 @@ ipq_rcv_skb(struct sk_buff *skb)
507 if (type <= IPQM_BASE) 515 if (type <= IPQM_BASE)
508 return; 516 return;
509 517
510 if (security_netlink_recv(skb)) 518 if (security_netlink_recv(skb, CAP_NET_ADMIN))
511 RCV_SKB_FAIL(-EPERM); 519 RCV_SKB_FAIL(-EPERM);
512 520
513 write_lock_bh(&queue_lock); 521 write_lock_bh(&queue_lock);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 706c0025ec5e..7aaaf92efb59 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -2113,7 +2113,8 @@ int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2113 return ret; 2113 return ret;
2114 } 2114 }
2115 2115
2116 if (xt_register_table(table, &bootstrap, newinfo) != 0) { 2116 ret = xt_register_table(table, &bootstrap, newinfo);
2117 if (ret != 0) {
2117 xt_free_table_info(newinfo); 2118 xt_free_table_info(newinfo);
2118 return ret; 2119 return ret;
2119 } 2120 }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c04176be7ed1..0336422c88a0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2145,7 +2145,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2145EXPORT_SYMBOL(compat_tcp_getsockopt); 2145EXPORT_SYMBOL(compat_tcp_getsockopt);
2146#endif 2146#endif
2147 2147
2148struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg) 2148struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2149{ 2149{
2150 struct sk_buff *segs = ERR_PTR(-EINVAL); 2150 struct sk_buff *segs = ERR_PTR(-EINVAL);
2151 struct tcphdr *th; 2151 struct tcphdr *th;
@@ -2166,10 +2166,14 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg)
2166 if (!pskb_may_pull(skb, thlen)) 2166 if (!pskb_may_pull(skb, thlen))
2167 goto out; 2167 goto out;
2168 2168
2169 segs = NULL;
2170 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
2171 goto out;
2172
2169 oldlen = (u16)~skb->len; 2173 oldlen = (u16)~skb->len;
2170 __skb_pull(skb, thlen); 2174 __skb_pull(skb, thlen);
2171 2175
2172 segs = skb_segment(skb, sg); 2176 segs = skb_segment(skb, features);
2173 if (IS_ERR(segs)) 2177 if (IS_ERR(segs))
2174 goto out; 2178 goto out;
2175 2179
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index c148c1081880..b56399c7cc12 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -26,7 +26,10 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
26 const struct tcp_sock *tp = tcp_sk(sk); 26 const struct tcp_sock *tp = tcp_sk(sk);
27 struct tcp_info *info = _info; 27 struct tcp_info *info = _info;
28 28
29 r->idiag_rqueue = tp->rcv_nxt - tp->copied_seq; 29 if (sk->sk_state == TCP_LISTEN)
30 r->idiag_rqueue = sk->sk_ack_backlog;
31 else
32 r->idiag_rqueue = tp->rcv_nxt - tp->copied_seq;
30 r->idiag_wqueue = tp->write_seq - tp->snd_una; 33 r->idiag_wqueue = tp->write_seq - tp->snd_una;
31 if (info != NULL) 34 if (info != NULL)
32 tcp_get_info(sk, info); 35 tcp_get_info(sk, info);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 94fe5b1f9dcb..7fa0b4a8a389 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4178,8 +4178,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
4178 */ 4178 */
4179 4179
4180 TCP_ECN_rcv_synack(tp, th); 4180 TCP_ECN_rcv_synack(tp, th);
4181 if (tp->ecn_flags&TCP_ECN_OK)
4182 sock_set_flag(sk, SOCK_NO_LARGESEND);
4183 4181
4184 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 4182 tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
4185 tcp_ack(sk, skb, FLAG_SLOWPATH); 4183 tcp_ack(sk, skb, FLAG_SLOWPATH);
@@ -4322,8 +4320,6 @@ discard:
4322 tp->max_window = tp->snd_wnd; 4320 tp->max_window = tp->snd_wnd;
4323 4321
4324 TCP_ECN_rcv_syn(tp, th); 4322 TCP_ECN_rcv_syn(tp, th);
4325 if (tp->ecn_flags&TCP_ECN_OK)
4326 sock_set_flag(sk, SOCK_NO_LARGESEND);
4327 4323
4328 tcp_mtup_init(sk); 4324 tcp_mtup_init(sk);
4329 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 4325 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 25ecc6e2478b..4c6ef47eb1c3 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1726,7 +1726,8 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
1726 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 1726 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
1727 "%08X %5d %8d %lu %d %p %u %u %u %u %d", 1727 "%08X %5d %8d %lu %d %p %u %u %u %u %d",
1728 i, src, srcp, dest, destp, sp->sk_state, 1728 i, src, srcp, dest, destp, sp->sk_state,
1729 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, 1729 tp->write_seq - tp->snd_una,
1730 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1730 timer_active, 1731 timer_active,
1731 jiffies_to_clock_t(timer_expires - jiffies), 1732 jiffies_to_clock_t(timer_expires - jiffies),
1732 icsk->icsk_retransmits, 1733 icsk->icsk_retransmits,
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 2b9b7f6c7f7c..54b2ef7d3efe 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -440,8 +440,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
440 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 440 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
441 newtp->rx_opt.mss_clamp = req->mss; 441 newtp->rx_opt.mss_clamp = req->mss;
442 TCP_ECN_openreq_child(newtp, req); 442 TCP_ECN_openreq_child(newtp, req);
443 if (newtp->ecn_flags&TCP_ECN_OK)
444 sock_set_flag(newsk, SOCK_NO_LARGESEND);
445 443
446 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); 444 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
447 } 445 }
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bdd71db8bf90..5a7cb4a9c867 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2044,8 +2044,6 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2044 memset(th, 0, sizeof(struct tcphdr)); 2044 memset(th, 0, sizeof(struct tcphdr));
2045 th->syn = 1; 2045 th->syn = 1;
2046 th->ack = 1; 2046 th->ack = 1;
2047 if (dst->dev->features&NETIF_F_TSO)
2048 ireq->ecn_ok = 0;
2049 TCP_ECN_make_synack(req, th); 2047 TCP_ECN_make_synack(req, th);
2050 th->source = inet_sk(sk)->sport; 2048 th->source = inet_sk(sk)->sport;
2051 th->dest = ireq->rmt_port; 2049 th->dest = ireq->rmt_port;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index b4b7d441af25..968a14be0d05 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -505,7 +505,7 @@ ipq_rcv_skb(struct sk_buff *skb)
505 if (type <= IPQM_BASE) 505 if (type <= IPQM_BASE)
506 return; 506 return;
507 507
508 if (security_netlink_recv(skb)) 508 if (security_netlink_recv(skb, CAP_NET_ADMIN))
509 RCV_SKB_FAIL(-EPERM); 509 RCV_SKB_FAIL(-EPERM);
510 510
511 write_lock_bh(&queue_lock); 511 write_lock_bh(&queue_lock);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 2e72f89a7019..0b5bd5587a3e 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1281,7 +1281,8 @@ int ip6t_register_table(struct xt_table *table,
1281 return ret; 1281 return ret;
1282 } 1282 }
1283 1283
1284 if (xt_register_table(table, &bootstrap, newinfo) != 0) { 1284 ret = xt_register_table(table, &bootstrap, newinfo);
1285 if (ret != 0) {
1285 xt_free_table_info(newinfo); 1286 xt_free_table_info(newinfo);
1286 return ret; 1287 return ret;
1287 } 1288 }
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index a50eb306e9e2..b36d5b2e7c30 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1469,7 +1469,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1469 dest->s6_addr32[0], dest->s6_addr32[1], 1469 dest->s6_addr32[0], dest->s6_addr32[1],
1470 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1470 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1471 sp->sk_state, 1471 sp->sk_state,
1472 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, 1472 tp->write_seq-tp->snd_una,
1473 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1473 timer_active, 1474 timer_active,
1474 jiffies_to_clock_t(timer_expires - jiffies), 1475 jiffies_to_clock_t(timer_expires - jiffies),
1475 icsk->icsk_retransmits, 1476 icsk->icsk_retransmits,
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index f8e6cb0db04b..95cf1234ea17 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -173,13 +173,14 @@ void irlan_client_discovery_indication(discinfo_t *discovery,
173 rcu_read_lock(); 173 rcu_read_lock();
174 self = irlan_get_any(); 174 self = irlan_get_any();
175 if (self) { 175 if (self) {
176 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 176 IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;);
177 177
178 IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __FUNCTION__ , 178 IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __FUNCTION__ ,
179 daddr); 179 daddr);
180 180
181 irlan_client_wakeup(self, saddr, daddr); 181 irlan_client_wakeup(self, saddr, daddr);
182 } 182 }
183IRDA_ASSERT_LABEL(out:)
183 rcu_read_unlock(); 184 rcu_read_unlock();
184} 185}
185 186
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index b1622b7de1cf..42a178aa30f9 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -411,7 +411,10 @@ config NETFILTER_XT_MATCH_STATISTIC
411 tristate '"statistic" match support' 411 tristate '"statistic" match support'
412 depends on NETFILTER_XTABLES 412 depends on NETFILTER_XTABLES
413 help 413 help
414 statistic module 414 This option adds a `statistic' match, which allows you to match
415 on packets periodically or randomly with a given percentage.
416
417 To compile it as a module, choose M here. If unsure, say N.
415 418
416config NETFILTER_XT_MATCH_STRING 419config NETFILTER_XT_MATCH_STRING
417 tristate '"string" match support' 420 tristate '"string" match support'
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index b8c7c567c9df..af4845971f70 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -29,6 +29,7 @@
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/netlink.h> 30#include <linux/netlink.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/interrupt.h>
32#include <linux/notifier.h> 33#include <linux/notifier.h>
33 34
34#include <linux/netfilter.h> 35#include <linux/netfilter.h>
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 0c6da496cfa9..0839b701b930 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -28,6 +28,8 @@
28#include <linux/sctp.h> 28#include <linux/sctp.h>
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/spinlock.h>
32#include <linux/interrupt.h>
31 33
32#include <net/netfilter/nf_conntrack.h> 34#include <net/netfilter/nf_conntrack.h>
33#include <net/netfilter/nf_conntrack_protocol.h> 35#include <net/netfilter/nf_conntrack_protocol.h>
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index b88e82a1a987..ec9f0efea6bb 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -229,7 +229,7 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb,
229 NFNL_SUBSYS_ID(nlh->nlmsg_type), 229 NFNL_SUBSYS_ID(nlh->nlmsg_type),
230 NFNL_MSG_TYPE(nlh->nlmsg_type)); 230 NFNL_MSG_TYPE(nlh->nlmsg_type));
231 231
232 if (!cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) { 232 if (security_netlink_recv(skb, CAP_NET_ADMIN)) {
233 DEBUGP("missing CAP_NET_ADMIN\n"); 233 DEBUGP("missing CAP_NET_ADMIN\n");
234 *errp = -EPERM; 234 *errp = -EPERM;
235 return -1; 235 return -1;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 86a4ac33de34..49ef41e34c48 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -680,11 +680,19 @@ dev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex)
680 if (entinf->indev) 680 if (entinf->indev)
681 if (entinf->indev->ifindex == ifindex) 681 if (entinf->indev->ifindex == ifindex)
682 return 1; 682 return 1;
683
684 if (entinf->outdev) 683 if (entinf->outdev)
685 if (entinf->outdev->ifindex == ifindex) 684 if (entinf->outdev->ifindex == ifindex)
686 return 1; 685 return 1;
687 686#ifdef CONFIG_BRIDGE_NETFILTER
687 if (entry->skb->nf_bridge) {
688 if (entry->skb->nf_bridge->physindev &&
689 entry->skb->nf_bridge->physindev->ifindex == ifindex)
690 return 1;
691 if (entry->skb->nf_bridge->physoutdev &&
692 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
693 return 1;
694 }
695#endif
688 return 0; 696 return 0;
689} 697}
690 698
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index 9316c753692f..843383e01d41 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -151,7 +151,7 @@ match(const struct sk_buff *skb,
151 && SCCHECK(((ntohs(sh->dest) >= info->dpts[0]) 151 && SCCHECK(((ntohs(sh->dest) >= info->dpts[0])
152 && (ntohs(sh->dest) <= info->dpts[1])), 152 && (ntohs(sh->dest) <= info->dpts[1])),
153 XT_SCTP_DEST_PORTS, info->flags, info->invflags) 153 XT_SCTP_DEST_PORTS, info->flags, info->invflags)
154 && SCCHECK(match_packet(skb, protoff, 154 && SCCHECK(match_packet(skb, protoff + sizeof (sctp_sctphdr_t),
155 info->chunkmap, info->chunk_match_type, 155 info->chunkmap, info->chunk_match_type,
156 info->flag_info, info->flag_count, 156 info->flag_info, info->flag_count,
157 hotdrop), 157 hotdrop),
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
index 1b61dac9c873..a9a63aa68936 100644
--- a/net/netfilter/xt_tcpudp.c
+++ b/net/netfilter/xt_tcpudp.c
@@ -260,7 +260,7 @@ static int __init xt_tcpudp_init(void)
260 return ret; 260 return ret;
261 261
262out_unreg_udp: 262out_unreg_udp:
263 xt_unregister_match(&tcp_matchstruct); 263 xt_unregister_match(&udp_matchstruct);
264out_unreg_tcp6: 264out_unreg_tcp6:
265 xt_unregister_match(&tcp6_matchstruct); 265 xt_unregister_match(&tcp6_matchstruct);
266out_unreg_tcp: 266out_unreg_tcp:
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index f329b72578f5..edf084becd5e 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -320,7 +320,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
320 goto errout; 320 goto errout;
321 } 321 }
322 322
323 if ((ops->flags & GENL_ADMIN_PERM) && security_netlink_recv(skb)) { 323 if ((ops->flags & GENL_ADMIN_PERM) && security_netlink_recv(skb, CAP_NET_ADMIN)) {
324 err = -EPERM; 324 err = -EPERM;
325 goto errout; 325 goto errout;
326 } 326 }
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index f8bac6ccd524..d88468d21c37 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -224,7 +224,8 @@ EXPORT_SYMBOL(gss_service_to_auth_domain_name);
224void 224void
225gss_mech_put(struct gss_api_mech * gm) 225gss_mech_put(struct gss_api_mech * gm)
226{ 226{
227 module_put(gm->gm_owner); 227 if (gm)
228 module_put(gm->gm_owner);
228} 229}
229 230
230EXPORT_SYMBOL(gss_mech_put); 231EXPORT_SYMBOL(gss_mech_put);
@@ -307,8 +308,7 @@ gss_delete_sec_context(struct gss_ctx **context_handle)
307 (*context_handle)->mech_type->gm_ops 308 (*context_handle)->mech_type->gm_ops
308 ->gss_delete_sec_context((*context_handle) 309 ->gss_delete_sec_context((*context_handle)
309 ->internal_ctx_id); 310 ->internal_ctx_id);
310 if ((*context_handle)->mech_type) 311 gss_mech_put((*context_handle)->mech_type);
311 gss_mech_put((*context_handle)->mech_type);
312 kfree(*context_handle); 312 kfree(*context_handle);
313 *context_handle=NULL; 313 *context_handle=NULL;
314 return GSS_S_COMPLETE; 314 return GSS_S_COMPLETE;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d51e316c5821..94217ec9e2dd 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -425,6 +425,7 @@ static int rsc_parse(struct cache_detail *cd,
425 struct rsc rsci, *rscp = NULL; 425 struct rsc rsci, *rscp = NULL;
426 time_t expiry; 426 time_t expiry;
427 int status = -EINVAL; 427 int status = -EINVAL;
428 struct gss_api_mech *gm = NULL;
428 429
429 memset(&rsci, 0, sizeof(rsci)); 430 memset(&rsci, 0, sizeof(rsci));
430 /* context handle */ 431 /* context handle */
@@ -453,7 +454,6 @@ static int rsc_parse(struct cache_detail *cd,
453 set_bit(CACHE_NEGATIVE, &rsci.h.flags); 454 set_bit(CACHE_NEGATIVE, &rsci.h.flags);
454 else { 455 else {
455 int N, i; 456 int N, i;
456 struct gss_api_mech *gm;
457 457
458 /* gid */ 458 /* gid */
459 if (get_int(&mesg, &rsci.cred.cr_gid)) 459 if (get_int(&mesg, &rsci.cred.cr_gid))
@@ -488,21 +488,17 @@ static int rsc_parse(struct cache_detail *cd,
488 status = -EINVAL; 488 status = -EINVAL;
489 /* mech-specific data: */ 489 /* mech-specific data: */
490 len = qword_get(&mesg, buf, mlen); 490 len = qword_get(&mesg, buf, mlen);
491 if (len < 0) { 491 if (len < 0)
492 gss_mech_put(gm);
493 goto out; 492 goto out;
494 }
495 status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); 493 status = gss_import_sec_context(buf, len, gm, &rsci.mechctx);
496 if (status) { 494 if (status)
497 gss_mech_put(gm);
498 goto out; 495 goto out;
499 }
500 gss_mech_put(gm);
501 } 496 }
502 rsci.h.expiry_time = expiry; 497 rsci.h.expiry_time = expiry;
503 rscp = rsc_update(&rsci, rscp); 498 rscp = rsc_update(&rsci, rscp);
504 status = 0; 499 status = 0;
505out: 500out:
501 gss_mech_put(gm);
506 rsc_free(&rsci); 502 rsc_free(&rsci);
507 if (rscp) 503 if (rscp)
508 cache_put(&rscp->h, &rsc_cache); 504 cache_put(&rscp->h, &rsc_cache);
@@ -836,6 +832,74 @@ out:
836 return stat; 832 return stat;
837} 833}
838 834
835static inline int
836total_buf_len(struct xdr_buf *buf)
837{
838 return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len;
839}
840
841static void
842fix_priv_head(struct xdr_buf *buf, int pad)
843{
844 if (buf->page_len == 0) {
845 /* We need to adjust head and buf->len in tandem in this
846 * case to make svc_defer() work--it finds the original
847 * buffer start using buf->len - buf->head[0].iov_len. */
848 buf->head[0].iov_len -= pad;
849 }
850}
851
852static int
853unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
854{
855 u32 priv_len, maj_stat;
856 int pad, saved_len, remaining_len, offset;
857
858 rqstp->rq_sendfile_ok = 0;
859
860 priv_len = ntohl(svc_getu32(&buf->head[0]));
861 if (rqstp->rq_deferred) {
862 /* Already decrypted last time through! The sequence number
863 * check at out_seq is unnecessary but harmless: */
864 goto out_seq;
865 }
866 /* buf->len is the number of bytes from the original start of the
867 * request to the end, where head[0].iov_len is just the bytes
868 * not yet read from the head, so these two values are different: */
869 remaining_len = total_buf_len(buf);
870 if (priv_len > remaining_len)
871 return -EINVAL;
872 pad = remaining_len - priv_len;
873 buf->len -= pad;
874 fix_priv_head(buf, pad);
875
876 /* Maybe it would be better to give gss_unwrap a length parameter: */
877 saved_len = buf->len;
878 buf->len = priv_len;
879 maj_stat = gss_unwrap(ctx, 0, buf);
880 pad = priv_len - buf->len;
881 buf->len = saved_len;
882 buf->len -= pad;
883 /* The upper layers assume the buffer is aligned on 4-byte boundaries.
884 * In the krb5p case, at least, the data ends up offset, so we need to
885 * move it around. */
886 /* XXX: This is very inefficient. It would be better to either do
887 * this while we encrypt, or maybe in the receive code, if we can peak
888 * ahead and work out the service and mechanism there. */
889 offset = buf->head[0].iov_len % 4;
890 if (offset) {
891 buf->buflen = RPCSVC_MAXPAYLOAD;
892 xdr_shift_buf(buf, offset);
893 fix_priv_head(buf, pad);
894 }
895 if (maj_stat != GSS_S_COMPLETE)
896 return -EINVAL;
897out_seq:
898 if (ntohl(svc_getu32(&buf->head[0])) != seq)
899 return -EINVAL;
900 return 0;
901}
902
839struct gss_svc_data { 903struct gss_svc_data {
840 /* decoded gss client cred: */ 904 /* decoded gss client cred: */
841 struct rpc_gss_wire_cred clcred; 905 struct rpc_gss_wire_cred clcred;
@@ -1051,7 +1115,14 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
1051 svc_putu32(resv, 0); 1115 svc_putu32(resv, 0);
1052 break; 1116 break;
1053 case RPC_GSS_SVC_PRIVACY: 1117 case RPC_GSS_SVC_PRIVACY:
1054 /* currently unsupported */ 1118 if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
1119 gc->gc_seq, rsci->mechctx))
1120 goto auth_err;
1121 /* placeholders for length and seq. number: */
1122 svcdata->body_start = resv->iov_base + resv->iov_len;
1123 svc_putu32(resv, 0);
1124 svc_putu32(resv, 0);
1125 break;
1055 default: 1126 default:
1056 goto auth_err; 1127 goto auth_err;
1057 } 1128 }
@@ -1076,8 +1147,8 @@ out:
1076 return ret; 1147 return ret;
1077} 1148}
1078 1149
1079static int 1150static inline int
1080svcauth_gss_release(struct svc_rqst *rqstp) 1151svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1081{ 1152{
1082 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; 1153 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
1083 struct rpc_gss_wire_cred *gc = &gsd->clcred; 1154 struct rpc_gss_wire_cred *gc = &gsd->clcred;
@@ -1089,69 +1160,147 @@ svcauth_gss_release(struct svc_rqst *rqstp)
1089 int integ_offset, integ_len; 1160 int integ_offset, integ_len;
1090 int stat = -EINVAL; 1161 int stat = -EINVAL;
1091 1162
1163 p = gsd->body_start;
1164 gsd->body_start = NULL;
1165 /* move accept_stat to right place: */
1166 memcpy(p, p + 2, 4);
1167 /* Don't wrap in failure case: */
1168 /* Counting on not getting here if call was not even accepted! */
1169 if (*p != rpc_success) {
1170 resbuf->head[0].iov_len -= 2 * 4;
1171 goto out;
1172 }
1173 p++;
1174 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
1175 integ_len = resbuf->len - integ_offset;
1176 BUG_ON(integ_len % 4);
1177 *p++ = htonl(integ_len);
1178 *p++ = htonl(gc->gc_seq);
1179 if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
1180 integ_len))
1181 BUG();
1182 if (resbuf->page_len == 0
1183 && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
1184 < PAGE_SIZE) {
1185 BUG_ON(resbuf->tail[0].iov_len);
1186 /* Use head for everything */
1187 resv = &resbuf->head[0];
1188 } else if (resbuf->tail[0].iov_base == NULL) {
1189 if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1190 goto out_err;
1191 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1192 + resbuf->head[0].iov_len;
1193 resbuf->tail[0].iov_len = 0;
1194 rqstp->rq_restailpage = 0;
1195 resv = &resbuf->tail[0];
1196 } else {
1197 resv = &resbuf->tail[0];
1198 }
1199 mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
1200 if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
1201 goto out_err;
1202 svc_putu32(resv, htonl(mic.len));
1203 memset(mic.data + mic.len, 0,
1204 round_up_to_quad(mic.len) - mic.len);
1205 resv->iov_len += XDR_QUADLEN(mic.len) << 2;
1206 /* not strictly required: */
1207 resbuf->len += XDR_QUADLEN(mic.len) << 2;
1208 BUG_ON(resv->iov_len > PAGE_SIZE);
1209out:
1210 stat = 0;
1211out_err:
1212 return stat;
1213}
1214
1215static inline int
1216svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1217{
1218 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
1219 struct rpc_gss_wire_cred *gc = &gsd->clcred;
1220 struct xdr_buf *resbuf = &rqstp->rq_res;
1221 struct page **inpages = NULL;
1222 u32 *p;
1223 int offset, *len;
1224 int pad;
1225
1226 p = gsd->body_start;
1227 gsd->body_start = NULL;
1228 /* move accept_stat to right place: */
1229 memcpy(p, p + 2, 4);
1230 /* Don't wrap in failure case: */
1231 /* Counting on not getting here if call was not even accepted! */
1232 if (*p != rpc_success) {
1233 resbuf->head[0].iov_len -= 2 * 4;
1234 return 0;
1235 }
1236 p++;
1237 len = p++;
1238 offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
1239 *p++ = htonl(gc->gc_seq);
1240 inpages = resbuf->pages;
1241 /* XXX: Would be better to write some xdr helper functions for
1242 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
1243 if (resbuf->tail[0].iov_base && rqstp->rq_restailpage == 0) {
1244 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
1245 + PAGE_SIZE);
1246 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
1247 if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len
1248 + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1249 return -ENOMEM;
1250 memmove(resbuf->tail[0].iov_base + RPC_MAX_AUTH_SIZE,
1251 resbuf->tail[0].iov_base,
1252 resbuf->tail[0].iov_len);
1253 resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
1254 }
1255 if (resbuf->tail[0].iov_base == NULL) {
1256 if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1257 return -ENOMEM;
1258 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1259 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
1260 resbuf->tail[0].iov_len = 0;
1261 rqstp->rq_restailpage = 0;
1262 }
1263 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
1264 return -ENOMEM;
1265 *len = htonl(resbuf->len - offset);
1266 pad = 3 - ((resbuf->len - offset - 1)&3);
1267 p = (u32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len);
1268 memset(p, 0, pad);
1269 resbuf->tail[0].iov_len += pad;
1270 resbuf->len += pad;
1271 return 0;
1272}
1273
1274static int
1275svcauth_gss_release(struct svc_rqst *rqstp)
1276{
1277 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
1278 struct rpc_gss_wire_cred *gc = &gsd->clcred;
1279 struct xdr_buf *resbuf = &rqstp->rq_res;
1280 int stat = -EINVAL;
1281
1092 if (gc->gc_proc != RPC_GSS_PROC_DATA) 1282 if (gc->gc_proc != RPC_GSS_PROC_DATA)
1093 goto out; 1283 goto out;
1094 /* Release can be called twice, but we only wrap once. */ 1284 /* Release can be called twice, but we only wrap once. */
1095 if (gsd->body_start == NULL) 1285 if (gsd->body_start == NULL)
1096 goto out; 1286 goto out;
1097 /* normally not set till svc_send, but we need it here: */ 1287 /* normally not set till svc_send, but we need it here: */
1098 resbuf->len = resbuf->head[0].iov_len 1288 /* XXX: what for? Do we mess it up the moment we call svc_putu32
1099 + resbuf->page_len + resbuf->tail[0].iov_len; 1289 * or whatever? */
1290 resbuf->len = total_buf_len(resbuf);
1100 switch (gc->gc_svc) { 1291 switch (gc->gc_svc) {
1101 case RPC_GSS_SVC_NONE: 1292 case RPC_GSS_SVC_NONE:
1102 break; 1293 break;
1103 case RPC_GSS_SVC_INTEGRITY: 1294 case RPC_GSS_SVC_INTEGRITY:
1104 p = gsd->body_start; 1295 stat = svcauth_gss_wrap_resp_integ(rqstp);
1105 gsd->body_start = NULL; 1296 if (stat)
1106 /* move accept_stat to right place: */
1107 memcpy(p, p + 2, 4);
1108 /* don't wrap in failure case: */
1109 /* Note: counting on not getting here if call was not even
1110 * accepted! */
1111 if (*p != rpc_success) {
1112 resbuf->head[0].iov_len -= 2 * 4;
1113 goto out;
1114 }
1115 p++;
1116 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
1117 integ_len = resbuf->len - integ_offset;
1118 BUG_ON(integ_len % 4);
1119 *p++ = htonl(integ_len);
1120 *p++ = htonl(gc->gc_seq);
1121 if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
1122 integ_len))
1123 BUG();
1124 if (resbuf->page_len == 0
1125 && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
1126 < PAGE_SIZE) {
1127 BUG_ON(resbuf->tail[0].iov_len);
1128 /* Use head for everything */
1129 resv = &resbuf->head[0];
1130 } else if (resbuf->tail[0].iov_base == NULL) {
1131 if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE
1132 > PAGE_SIZE)
1133 goto out_err;
1134 resbuf->tail[0].iov_base =
1135 resbuf->head[0].iov_base
1136 + resbuf->head[0].iov_len;
1137 resbuf->tail[0].iov_len = 0;
1138 rqstp->rq_restailpage = 0;
1139 resv = &resbuf->tail[0];
1140 } else {
1141 resv = &resbuf->tail[0];
1142 }
1143 mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
1144 if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic))
1145 goto out_err; 1297 goto out_err;
1146 svc_putu32(resv, htonl(mic.len));
1147 memset(mic.data + mic.len, 0,
1148 round_up_to_quad(mic.len) - mic.len);
1149 resv->iov_len += XDR_QUADLEN(mic.len) << 2;
1150 /* not strictly required: */
1151 resbuf->len += XDR_QUADLEN(mic.len) << 2;
1152 BUG_ON(resv->iov_len > PAGE_SIZE);
1153 break; 1298 break;
1154 case RPC_GSS_SVC_PRIVACY: 1299 case RPC_GSS_SVC_PRIVACY:
1300 stat = svcauth_gss_wrap_resp_priv(rqstp);
1301 if (stat)
1302 goto out_err;
1303 break;
1155 default: 1304 default:
1156 goto out_err; 1305 goto out_err;
1157 } 1306 }
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b08419e1fc68..01ba60a49572 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -280,7 +280,10 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp)
280 rqstp->rq_res.page_base = 0; 280 rqstp->rq_res.page_base = 0;
281 rqstp->rq_res.page_len = 0; 281 rqstp->rq_res.page_len = 0;
282 rqstp->rq_res.buflen = PAGE_SIZE; 282 rqstp->rq_res.buflen = PAGE_SIZE;
283 rqstp->rq_res.tail[0].iov_base = NULL;
283 rqstp->rq_res.tail[0].iov_len = 0; 284 rqstp->rq_res.tail[0].iov_len = 0;
285 /* Will be turned off only in gss privacy case: */
286 rqstp->rq_sendfile_ok = 1;
284 /* tcp needs a space for the record length... */ 287 /* tcp needs a space for the record length... */
285 if (rqstp->rq_prot == IPPROTO_TCP) 288 if (rqstp->rq_prot == IPPROTO_TCP)
286 svc_putu32(resv, 0); 289 svc_putu32(resv, 0);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 5003acb15919..0539a8362858 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -191,7 +191,8 @@ static int __init tipc_init(void)
191 int res; 191 int res;
192 192
193 tipc_log_reinit(CONFIG_TIPC_LOG); 193 tipc_log_reinit(CONFIG_TIPC_LOG);
194 info("Activated (compiled " __DATE__ " " __TIME__ ")\n"); 194 info("Activated (version " TIPC_MOD_VER
195 " compiled " __DATE__ " " __TIME__ ")\n");
195 196
196 tipc_own_addr = 0; 197 tipc_own_addr = 0;
197 tipc_remote_management = 1; 198 tipc_remote_management = 1;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index d64658053746..c6831c75cfa4 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -2,7 +2,7 @@
2 * net/tipc/link.c: TIPC link code 2 * net/tipc/link.c: TIPC link code
3 * 3 *
4 * Copyright (c) 1996-2006, Ericsson AB 4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -988,17 +988,18 @@ static int link_bundle_buf(struct link *l_ptr,
988 struct tipc_msg *bundler_msg = buf_msg(bundler); 988 struct tipc_msg *bundler_msg = buf_msg(bundler);
989 struct tipc_msg *msg = buf_msg(buf); 989 struct tipc_msg *msg = buf_msg(buf);
990 u32 size = msg_size(msg); 990 u32 size = msg_size(msg);
991 u32 to_pos = align(msg_size(bundler_msg)); 991 u32 bundle_size = msg_size(bundler_msg);
992 u32 rest = link_max_pkt(l_ptr) - to_pos; 992 u32 to_pos = align(bundle_size);
993 u32 pad = to_pos - bundle_size;
993 994
994 if (msg_user(bundler_msg) != MSG_BUNDLER) 995 if (msg_user(bundler_msg) != MSG_BUNDLER)
995 return 0; 996 return 0;
996 if (msg_type(bundler_msg) != OPEN_MSG) 997 if (msg_type(bundler_msg) != OPEN_MSG)
997 return 0; 998 return 0;
998 if (rest < align(size)) 999 if (skb_tailroom(bundler) < (pad + size))
999 return 0; 1000 return 0;
1000 1001
1001 skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size); 1002 skb_put(bundler, pad + size);
1002 memcpy(bundler->data + to_pos, buf->data, size); 1003 memcpy(bundler->data + to_pos, buf->data, size);
1003 msg_set_size(bundler_msg, to_pos + size); 1004 msg_set_size(bundler_msg, to_pos + size);
1004 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1); 1005 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 861322b935da..fc6d09630ccd 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2,7 +2,7 @@
2 * net/tipc/node.c: TIPC node management routines 2 * net/tipc/node.c: TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -592,6 +592,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
592 struct sk_buff *buf; 592 struct sk_buff *buf;
593 struct node *n_ptr; 593 struct node *n_ptr;
594 struct tipc_node_info node_info; 594 struct tipc_node_info node_info;
595 u32 payload_size;
595 596
596 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 597 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
597 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 598 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -608,8 +609,11 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
608 /* For now, get space for all other nodes 609 /* For now, get space for all other nodes
609 (will need to modify this when slave nodes are supported */ 610 (will need to modify this when slave nodes are supported */
610 611
611 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) * 612 payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1);
612 (tipc_max_nodes - 1)); 613 if (payload_size > 32768u)
614 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
615 " (too many nodes)");
616 buf = tipc_cfg_reply_alloc(payload_size);
613 if (!buf) 617 if (!buf)
614 return NULL; 618 return NULL;
615 619
@@ -633,6 +637,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
633 struct sk_buff *buf; 637 struct sk_buff *buf;
634 struct node *n_ptr; 638 struct node *n_ptr;
635 struct tipc_link_info link_info; 639 struct tipc_link_info link_info;
640 u32 payload_size;
636 641
637 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 642 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
638 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 643 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -645,12 +650,15 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
645 650
646 if (!tipc_nodes) 651 if (!tipc_nodes)
647 return tipc_cfg_reply_none(); 652 return tipc_cfg_reply_none();
648 653
649 /* For now, get space for 2 links to all other nodes + bcast link 654 /* Get space for all unicast links + multicast link */
650 (will need to modify this when slave nodes are supported */ 655
651 656 payload_size = TLV_SPACE(sizeof(link_info)) *
652 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) * 657 (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1);
653 (2 * (tipc_max_nodes - 1) + 1)); 658 if (payload_size > 32768u)
659 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
660 " (too many links)");
661 buf = tipc_cfg_reply_alloc(payload_size);
654 if (!buf) 662 if (!buf)
655 return NULL; 663 return NULL;
656 664
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
index 267999c5a240..5ab3d08602e2 100644
--- a/net/tipc/zone.h
+++ b/net/tipc/zone.h
@@ -2,7 +2,7 @@
2 * net/tipc/zone.h: Include file for TIPC zone management routines 2 * net/tipc/zone.h: Include file for TIPC zone management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems 5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -45,7 +45,7 @@
45 * struct _zone - TIPC zone structure 45 * struct _zone - TIPC zone structure
46 * @addr: network address of zone 46 * @addr: network address of zone
47 * @clusters: array of pointers to all clusters within zone 47 * @clusters: array of pointers to all clusters within zone
48 * @links: (used for inter-zone communication) 48 * @links: number of (unicast) links to zone
49 */ 49 */
50 50
51struct _zone { 51struct _zone {
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d901465ce013..fd11d4048b52 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -128,6 +128,30 @@ static atomic_t unix_nr_socks = ATOMIC_INIT(0);
128 128
129#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 129#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
130 130
131#ifdef CONFIG_SECURITY_NETWORK
132static void unix_get_peersec_dgram(struct sk_buff *skb)
133{
134 int err;
135
136 err = security_socket_getpeersec_dgram(skb, UNIXSECDATA(skb),
137 UNIXSECLEN(skb));
138 if (err)
139 *(UNIXSECDATA(skb)) = NULL;
140}
141
142static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
143{
144 scm->secdata = *UNIXSECDATA(skb);
145 scm->seclen = *UNIXSECLEN(skb);
146}
147#else
148static void unix_get_peersec_dgram(struct sk_buff *skb)
149{ }
150
151static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
152{ }
153#endif /* CONFIG_SECURITY_NETWORK */
154
131/* 155/*
132 * SMP locking strategy: 156 * SMP locking strategy:
133 * hash table is protected with spinlock unix_table_lock 157 * hash table is protected with spinlock unix_table_lock
@@ -1291,6 +1315,8 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1291 if (siocb->scm->fp) 1315 if (siocb->scm->fp)
1292 unix_attach_fds(siocb->scm, skb); 1316 unix_attach_fds(siocb->scm, skb);
1293 1317
1318 unix_get_peersec_dgram(skb);
1319
1294 skb->h.raw = skb->data; 1320 skb->h.raw = skb->data;
1295 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); 1321 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1296 if (err) 1322 if (err)
@@ -1570,6 +1596,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1570 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1596 memset(&tmp_scm, 0, sizeof(tmp_scm));
1571 } 1597 }
1572 siocb->scm->creds = *UNIXCREDS(skb); 1598 siocb->scm->creds = *UNIXCREDS(skb);
1599 unix_set_secdata(siocb->scm, skb);
1573 1600
1574 if (!(flags & MSG_PEEK)) 1601 if (!(flags & MSG_PEEK))
1575 { 1602 {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 17b29ec3c417..43f00fc28a3d 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1164,8 +1164,6 @@ int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1164 return res; 1164 return res;
1165} 1165}
1166 1166
1167EXPORT_SYMBOL(xfrm_state_mtu);
1168
1169int xfrm_init_state(struct xfrm_state *x) 1167int xfrm_init_state(struct xfrm_state *x)
1170{ 1168{
1171 struct xfrm_state_afinfo *afinfo; 1169 struct xfrm_state_afinfo *afinfo;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c21dc26141ea..3e6a722d072e 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1435,7 +1435,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err
1435 link = &xfrm_dispatch[type]; 1435 link = &xfrm_dispatch[type];
1436 1436
1437 /* All operations require privileges, even GET */ 1437 /* All operations require privileges, even GET */
1438 if (security_netlink_recv(skb)) { 1438 if (security_netlink_recv(skb, CAP_NET_ADMIN)) {
1439 *errp = -EPERM; 1439 *errp = -EPERM;
1440 return -1; 1440 return -1;
1441 } 1441 }
diff --git a/security/commoncap.c b/security/commoncap.c
index 841eb4e5c62b..57673ee07ceb 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -33,9 +33,9 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
33 33
34EXPORT_SYMBOL(cap_netlink_send); 34EXPORT_SYMBOL(cap_netlink_send);
35 35
36int cap_netlink_recv(struct sk_buff *skb) 36int cap_netlink_recv(struct sk_buff *skb, int cap)
37{ 37{
38 if (!cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) 38 if (!cap_raised(NETLINK_CB(skb).eff_cap, cap))
39 return -EPERM; 39 return -EPERM;
40 return 0; 40 return 0;
41} 41}
diff --git a/security/dummy.c b/security/dummy.c
index 310fcdf7b749..d417936562db 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -506,6 +506,9 @@ static int dummy_task_getsid (struct task_struct *p)
506 return 0; 506 return 0;
507} 507}
508 508
509static void dummy_task_getsecid (struct task_struct *p, u32 *secid)
510{ }
511
509static int dummy_task_setgroups (struct group_info *group_info) 512static int dummy_task_setgroups (struct group_info *group_info)
510{ 513{
511 return 0; 514 return 0;
@@ -521,6 +524,11 @@ static int dummy_task_setioprio (struct task_struct *p, int ioprio)
521 return 0; 524 return 0;
522} 525}
523 526
527static int dummy_task_getioprio (struct task_struct *p)
528{
529 return 0;
530}
531
524static int dummy_task_setrlimit (unsigned int resource, struct rlimit *new_rlim) 532static int dummy_task_setrlimit (unsigned int resource, struct rlimit *new_rlim)
525{ 533{
526 return 0; 534 return 0;
@@ -548,7 +556,7 @@ static int dummy_task_wait (struct task_struct *p)
548} 556}
549 557
550static int dummy_task_kill (struct task_struct *p, struct siginfo *info, 558static int dummy_task_kill (struct task_struct *p, struct siginfo *info,
551 int sig) 559 int sig, u32 secid)
552{ 560{
553 return 0; 561 return 0;
554} 562}
@@ -675,9 +683,9 @@ static int dummy_netlink_send (struct sock *sk, struct sk_buff *skb)
675 return 0; 683 return 0;
676} 684}
677 685
678static int dummy_netlink_recv (struct sk_buff *skb) 686static int dummy_netlink_recv (struct sk_buff *skb, int cap)
679{ 687{
680 if (!cap_raised (NETLINK_CB (skb).eff_cap, CAP_NET_ADMIN)) 688 if (!cap_raised (NETLINK_CB (skb).eff_cap, cap))
681 return -EPERM; 689 return -EPERM;
682 return 0; 690 return 0;
683} 691}
@@ -981,9 +989,11 @@ void security_fixup_ops (struct security_operations *ops)
981 set_to_dummy_if_null(ops, task_setpgid); 989 set_to_dummy_if_null(ops, task_setpgid);
982 set_to_dummy_if_null(ops, task_getpgid); 990 set_to_dummy_if_null(ops, task_getpgid);
983 set_to_dummy_if_null(ops, task_getsid); 991 set_to_dummy_if_null(ops, task_getsid);
992 set_to_dummy_if_null(ops, task_getsecid);
984 set_to_dummy_if_null(ops, task_setgroups); 993 set_to_dummy_if_null(ops, task_setgroups);
985 set_to_dummy_if_null(ops, task_setnice); 994 set_to_dummy_if_null(ops, task_setnice);
986 set_to_dummy_if_null(ops, task_setioprio); 995 set_to_dummy_if_null(ops, task_setioprio);
996 set_to_dummy_if_null(ops, task_getioprio);
987 set_to_dummy_if_null(ops, task_setrlimit); 997 set_to_dummy_if_null(ops, task_setrlimit);
988 set_to_dummy_if_null(ops, task_setscheduler); 998 set_to_dummy_if_null(ops, task_setscheduler);
989 set_to_dummy_if_null(ops, task_getscheduler); 999 set_to_dummy_if_null(ops, task_getscheduler);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 28832e689800..51bec4c88f19 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -69,6 +69,7 @@
69#include <linux/sysctl.h> 69#include <linux/sysctl.h>
70#include <linux/audit.h> 70#include <linux/audit.h>
71#include <linux/string.h> 71#include <linux/string.h>
72#include <linux/selinux.h>
72 73
73#include "avc.h" 74#include "avc.h"
74#include "objsec.h" 75#include "objsec.h"
@@ -2643,6 +2644,11 @@ static int selinux_task_getsid(struct task_struct *p)
2643 return task_has_perm(current, p, PROCESS__GETSESSION); 2644 return task_has_perm(current, p, PROCESS__GETSESSION);
2644} 2645}
2645 2646
2647static void selinux_task_getsecid(struct task_struct *p, u32 *secid)
2648{
2649 selinux_get_task_sid(p, secid);
2650}
2651
2646static int selinux_task_setgroups(struct group_info *group_info) 2652static int selinux_task_setgroups(struct group_info *group_info)
2647{ 2653{
2648 /* See the comment for setuid above. */ 2654 /* See the comment for setuid above. */
@@ -2665,6 +2671,11 @@ static int selinux_task_setioprio(struct task_struct *p, int ioprio)
2665 return task_has_perm(current, p, PROCESS__SETSCHED); 2671 return task_has_perm(current, p, PROCESS__SETSCHED);
2666} 2672}
2667 2673
2674static int selinux_task_getioprio(struct task_struct *p)
2675{
2676 return task_has_perm(current, p, PROCESS__GETSCHED);
2677}
2678
2668static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim) 2679static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
2669{ 2680{
2670 struct rlimit *old_rlim = current->signal->rlim + resource; 2681 struct rlimit *old_rlim = current->signal->rlim + resource;
@@ -2699,12 +2710,14 @@ static int selinux_task_movememory(struct task_struct *p)
2699 return task_has_perm(current, p, PROCESS__SETSCHED); 2710 return task_has_perm(current, p, PROCESS__SETSCHED);
2700} 2711}
2701 2712
2702static int selinux_task_kill(struct task_struct *p, struct siginfo *info, int sig) 2713static int selinux_task_kill(struct task_struct *p, struct siginfo *info,
2714 int sig, u32 secid)
2703{ 2715{
2704 u32 perm; 2716 u32 perm;
2705 int rc; 2717 int rc;
2718 struct task_security_struct *tsec;
2706 2719
2707 rc = secondary_ops->task_kill(p, info, sig); 2720 rc = secondary_ops->task_kill(p, info, sig, secid);
2708 if (rc) 2721 if (rc)
2709 return rc; 2722 return rc;
2710 2723
@@ -2715,8 +2728,12 @@ static int selinux_task_kill(struct task_struct *p, struct siginfo *info, int si
2715 perm = PROCESS__SIGNULL; /* null signal; existence test */ 2728 perm = PROCESS__SIGNULL; /* null signal; existence test */
2716 else 2729 else
2717 perm = signal_to_av(sig); 2730 perm = signal_to_av(sig);
2718 2731 tsec = p->security;
2719 return task_has_perm(current, p, perm); 2732 if (secid)
2733 rc = avc_has_perm(secid, tsec->sid, SECCLASS_PROCESS, perm, NULL);
2734 else
2735 rc = task_has_perm(current, p, perm);
2736 return rc;
2720} 2737}
2721 2738
2722static int selinux_task_prctl(int option, 2739static int selinux_task_prctl(int option,
@@ -3420,7 +3437,13 @@ out:
3420static int selinux_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, u32 *seclen) 3437static int selinux_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, u32 *seclen)
3421{ 3438{
3422 int err = 0; 3439 int err = 0;
3423 u32 peer_sid = selinux_socket_getpeer_dgram(skb); 3440 u32 peer_sid;
3441
3442 if (skb->sk->sk_family == PF_UNIX)
3443 selinux_get_inode_sid(SOCK_INODE(skb->sk->sk_socket),
3444 &peer_sid);
3445 else
3446 peer_sid = selinux_socket_getpeer_dgram(skb);
3424 3447
3425 if (peer_sid == SECSID_NULL) 3448 if (peer_sid == SECSID_NULL)
3426 return -EINVAL; 3449 return -EINVAL;
@@ -3432,8 +3455,6 @@ static int selinux_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata,
3432 return 0; 3455 return 0;
3433} 3456}
3434 3457
3435
3436
3437static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority) 3458static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority)
3438{ 3459{
3439 return sk_alloc_security(sk, family, priority); 3460 return sk_alloc_security(sk, family, priority);
@@ -3641,32 +3662,32 @@ static unsigned int selinux_ipv6_postroute_last(unsigned int hooknum,
3641 3662
3642static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb) 3663static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
3643{ 3664{
3644 struct task_security_struct *tsec;
3645 struct av_decision avd;
3646 int err; 3665 int err;
3647 3666
3648 err = secondary_ops->netlink_send(sk, skb); 3667 err = secondary_ops->netlink_send(sk, skb);
3649 if (err) 3668 if (err)
3650 return err; 3669 return err;
3651 3670
3652 tsec = current->security;
3653
3654 avd.allowed = 0;
3655 avc_has_perm_noaudit(tsec->sid, tsec->sid,
3656 SECCLASS_CAPABILITY, ~0, &avd);
3657 cap_mask(NETLINK_CB(skb).eff_cap, avd.allowed);
3658
3659 if (policydb_loaded_version >= POLICYDB_VERSION_NLCLASS) 3671 if (policydb_loaded_version >= POLICYDB_VERSION_NLCLASS)
3660 err = selinux_nlmsg_perm(sk, skb); 3672 err = selinux_nlmsg_perm(sk, skb);
3661 3673
3662 return err; 3674 return err;
3663} 3675}
3664 3676
3665static int selinux_netlink_recv(struct sk_buff *skb) 3677static int selinux_netlink_recv(struct sk_buff *skb, int capability)
3666{ 3678{
3667 if (!cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) 3679 int err;
3668 return -EPERM; 3680 struct avc_audit_data ad;
3669 return 0; 3681
3682 err = secondary_ops->netlink_recv(skb, capability);
3683 if (err)
3684 return err;
3685
3686 AVC_AUDIT_DATA_INIT(&ad, CAP);
3687 ad.u.cap = capability;
3688
3689 return avc_has_perm(NETLINK_CB(skb).sid, NETLINK_CB(skb).sid,
3690 SECCLASS_CAPABILITY, CAP_TO_MASK(capability), &ad);
3670} 3691}
3671 3692
3672static int ipc_alloc_security(struct task_struct *task, 3693static int ipc_alloc_security(struct task_struct *task,
@@ -4429,9 +4450,11 @@ static struct security_operations selinux_ops = {
4429 .task_setpgid = selinux_task_setpgid, 4450 .task_setpgid = selinux_task_setpgid,
4430 .task_getpgid = selinux_task_getpgid, 4451 .task_getpgid = selinux_task_getpgid,
4431 .task_getsid = selinux_task_getsid, 4452 .task_getsid = selinux_task_getsid,
4453 .task_getsecid = selinux_task_getsecid,
4432 .task_setgroups = selinux_task_setgroups, 4454 .task_setgroups = selinux_task_setgroups,
4433 .task_setnice = selinux_task_setnice, 4455 .task_setnice = selinux_task_setnice,
4434 .task_setioprio = selinux_task_setioprio, 4456 .task_setioprio = selinux_task_setioprio,
4457 .task_getioprio = selinux_task_getioprio,
4435 .task_setrlimit = selinux_task_setrlimit, 4458 .task_setrlimit = selinux_task_setrlimit,
4436 .task_setscheduler = selinux_task_setscheduler, 4459 .task_setscheduler = selinux_task_setscheduler,
4437 .task_getscheduler = selinux_task_getscheduler, 4460 .task_getscheduler = selinux_task_getscheduler,