aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/SubmittingPatches24
-rw-r--r--Documentation/networking/ip-sysctl.txt10
-rw-r--r--arch/arm/kernel/sys_arm.c2
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/mach-imx/generic.c23
-rw-r--r--arch/arm/mach-imx/mx1ads.c2
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/ia64/kernel/mca.c5
-rw-r--r--arch/ppc/platforms/pmac_time.c2
-rw-r--r--arch/sparc/Kconfig56
-rw-r--r--arch/sparc/kernel/time.c2
-rw-r--r--arch/sparc/mm/srmmu.c2
-rw-r--r--arch/sparc64/kernel/entry.S43
-rw-r--r--arch/sparc64/kernel/etrap.S51
-rw-r--r--arch/sparc64/kernel/head.S33
-rw-r--r--arch/sparc64/kernel/rtrap.S23
-rw-r--r--arch/sparc64/kernel/setup.c8
-rw-r--r--arch/sparc64/kernel/trampoline.S15
-rw-r--r--arch/sparc64/kernel/winfixup.S33
-rw-r--r--arch/sparc64/mm/init.c182
-rw-r--r--arch/um/include/registers.h12
-rw-r--r--arch/um/include/sysdep-x86_64/ptrace.h4
-rw-r--r--arch/um/kernel/sysrq.c8
-rw-r--r--arch/um/os-Linux/sys-i386/registers.c19
-rw-r--r--arch/um/os-Linux/sys-x86_64/registers.c19
-rw-r--r--arch/um/sys-i386/sysrq.c13
-rw-r--r--arch/um/sys-i386/user-offsets.c2
-rw-r--r--arch/x86_64/kernel/head.S40
-rw-r--r--arch/x86_64/kernel/setup.c5
-rw-r--r--drivers/atm/fore200e.c10
-rw-r--r--drivers/char/drm/drm_stub.c2
-rw-r--r--drivers/connector/connector.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c45
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/mfd/ucb1x00.h2
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/bonding/bond_main.c287
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/cassini.c502
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c31
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c2
-rw-r--r--drivers/net/skge.c24
-rw-r--r--drivers/net/starfire.c46
-rw-r--r--drivers/net/sungem.h3
-rw-r--r--drivers/net/tg3.c27
-rw-r--r--drivers/net/tokenring/ibmtr.c5
-rw-r--r--drivers/net/tulip/21142.c2
-rw-r--r--drivers/net/wan/sdlamain.c23
-rw-r--r--drivers/net/wan/syncppp.c2
-rw-r--r--drivers/net/wireless/orinoco.c14
-rw-r--r--drivers/net/wireless/strip.c4
-rw-r--r--drivers/parisc/led.c5
-rw-r--r--drivers/s390/net/qeth.h2
-rw-r--r--drivers/s390/net/qeth_main.c41
-rw-r--r--drivers/scsi/3w-9xxx.c55
-rw-r--r--drivers/scsi/3w-9xxx.h17
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c283
-rw-r--r--drivers/scsi/aacraid/aacraid.h17
-rw-r--r--drivers/scsi/aacraid/comminit.c17
-rw-r--r--drivers/scsi/aacraid/commsup.c581
-rw-r--r--drivers/scsi/aacraid/linit.c12
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c3
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c6
-rw-r--r--drivers/scsi/megaraid.c70
-rw-r--r--drivers/scsi/megaraid/Kconfig.megaraid9
-rw-r--r--drivers/scsi/megaraid/Makefile1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c2806
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h1142
-rw-r--r--drivers/scsi/qla2xxx/qla_rscn.c2
-rw-r--r--drivers/scsi/scsi_scan.c96
-rw-r--r--drivers/scsi/scsi_transport_sas.c9
-rw-r--r--drivers/serial/sunsu.c4
-rw-r--r--fs/bfs/dir.c2
-rw-r--r--fs/bfs/inode.c44
-rw-r--r--fs/namei.c6
-rw-r--r--fs/ntfs/ChangeLog3
-rw-r--r--fs/ntfs/bitmap.c5
-rw-r--r--fs/ntfs/layout.h2
-rw-r--r--fs/ntfs/mft.c3
-rw-r--r--fs/ntfs/unistr.c2
-rw-r--r--include/asm-arm/arch-h720x/system.h8
-rw-r--r--include/asm-arm/arch-imx/imx-regs.h46
-rw-r--r--include/asm-arm/arch-ixp4xx/platform.h2
-rw-r--r--include/asm-sparc/btfixup.h24
-rw-r--r--include/asm-sparc/cache.h18
-rw-r--r--include/asm-sparc/cypress.h8
-rw-r--r--include/asm-sparc/delay.h2
-rw-r--r--include/asm-sparc/dma.h2
-rw-r--r--include/asm-sparc/iommu.h4
-rw-r--r--include/asm-sparc/kdebug.h2
-rw-r--r--include/asm-sparc/mbus.h4
-rw-r--r--include/asm-sparc/msi.h2
-rw-r--r--include/asm-sparc/mxcc.h8
-rw-r--r--include/asm-sparc/obio.h30
-rw-r--r--include/asm-sparc/pci.h6
-rw-r--r--include/asm-sparc/pgtable.h44
-rw-r--r--include/asm-sparc/pgtsrmmu.h30
-rw-r--r--include/asm-sparc/processor.h2
-rw-r--r--include/asm-sparc/psr.h6
-rw-r--r--include/asm-sparc/sbi.h10
-rw-r--r--include/asm-sparc/sbus.h6
-rw-r--r--include/asm-sparc/smp.h26
-rw-r--r--include/asm-sparc/smpprim.h8
-rw-r--r--include/asm-sparc/spinlock.h10
-rw-r--r--include/asm-sparc/system.h2
-rw-r--r--include/asm-sparc/traps.h2
-rw-r--r--include/asm-um/processor-generic.h23
-rw-r--r--include/asm-um/processor-i386.h15
-rw-r--r--include/asm-um/processor-x86_64.h14
-rw-r--r--include/linux/atmdev.h2
-rw-r--r--include/linux/bfs_fs.h42
-rw-r--r--include/linux/connector.h2
-rw-r--r--include/linux/inetdevice.h12
-rw-r--r--include/linux/ipv6.h5
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/skbuff.h12
-rw-r--r--include/linux/tc_ematch/tc_em_meta.h2
-rw-r--r--include/linux/textsearch.h3
-rw-r--r--include/net/dn_nsp.h8
-rw-r--r--include/net/dn_route.h2
-rw-r--r--include/net/inet6_hashtables.h21
-rw-r--r--include/net/inet_hashtables.h66
-rw-r--r--include/net/inet_timewait_sock.h2
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/sock.h5
-rw-r--r--include/net/xfrm.h7
-rw-r--r--include/rxrpc/call.h2
-rw-r--r--include/rxrpc/message.h2
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--lib/ts_bm.c2
-rw-r--r--lib/ts_fsm.c2
-rw-r--r--lib/ts_kmp.c2
-rw-r--r--net/atm/atm_misc.c2
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/atm/common.c2
-rw-r--r--net/ax25/ax25_in.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/dccp/ipv4.c12
-rw-r--r--net/decnet/af_decnet.c6
-rw-r--r--net/decnet/dn_nsp_out.c23
-rw-r--r--net/econet/af_econet.c2
-rw-r--r--net/ieee80211/ieee80211_tx.c2
-rw-r--r--net/ipv4/arp.c21
-rw-r--r--net/ipv4/devinet.c22
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/fib_trie.c23
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c6
-rw-r--r--net/ipv4/ip_gre.c4
-rw-r--r--net/ipv4/ipmr.c6
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/netfilter/Kconfig2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netbios_ns.c2
-rw-r--r--net/ipv4/netfilter/ip_queue.c4
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c2
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c4
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/tcp_bic.c2
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6_queue.c4
-rw-r--r--net/ipv6/tcp_ipv6.c18
-rw-r--r--net/ipv6/udp.c18
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/key/af_key.c18
-rw-r--r--net/netfilter/nfnetlink.c3
-rw-r--r--net/netfilter/nfnetlink_log.c4
-rw-r--r--net/netfilter/nfnetlink_queue.c4
-rw-r--r--net/netrom/nr_dev.c2
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rxrpc/call.c2
-rw-r--r--net/rxrpc/connection.c2
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sysctl_net.c2
-rw-r--r--net/xfrm/xfrm_policy.c6
195 files changed, 6037 insertions, 1820 deletions
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 1d96efec5e8f..237d54c44bc5 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -305,7 +305,7 @@ point out some special detail about the sign-off.
305 305
306The canonical patch subject line is: 306The canonical patch subject line is:
307 307
308 Subject: [PATCH 001/123] [<area>:] <explanation> 308 Subject: [PATCH 001/123] subsystem: summary phrase
309 309
310The canonical patch message body contains the following: 310The canonical patch message body contains the following:
311 311
@@ -330,9 +330,25 @@ alphabetically by subject line - pretty much any email reader will
330support that - since because the sequence number is zero-padded, 330support that - since because the sequence number is zero-padded,
331the numerical and alphabetic sort is the same. 331the numerical and alphabetic sort is the same.
332 332
333See further details on how to phrase the "<explanation>" in the 333The "subsystem" in the email's Subject should identify which
334"Subject:" line in Andrew Morton's "The perfect patch", referenced 334area or subsystem of the kernel is being patched.
335below. 335
336The "summary phrase" in the email's Subject should concisely
337describe the patch which that email contains. The "summary
338phrase" should not be a filename. Do not use the same "summary
339phrase" for every patch in a whole patch series.
340
341Bear in mind that the "summary phrase" of your email becomes
342a globally-unique identifier for that patch. It propagates
343all the way into the git changelog. The "summary phrase" may
344later be used in developer discussions which refer to the patch.
345People will want to google for the "summary phrase" to read
346discussion regarding that patch.
347
348A couple of example Subjects:
349
350 Subject: [patch 2/5] ext2: improve scalability of bitmap searching
351 Subject: [PATCHv2 001/207] x86: fix eflags tracking
336 352
337The "from" line must be the very first line in the message body, 353The "from" line must be the very first line in the message body,
338and has the form: 354and has the form:
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index ab65714d95fc..b433c8a27e2d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -355,10 +355,14 @@ ip_dynaddr - BOOLEAN
355 Default: 0 355 Default: 0
356 356
357icmp_echo_ignore_all - BOOLEAN 357icmp_echo_ignore_all - BOOLEAN
358 If set non-zero, then the kernel will ignore all ICMP ECHO
359 requests sent to it.
360 Default: 0
361
358icmp_echo_ignore_broadcasts - BOOLEAN 362icmp_echo_ignore_broadcasts - BOOLEAN
359 If either is set to true, then the kernel will ignore either all 363 If set non-zero, then the kernel will ignore all ICMP ECHO and
360 ICMP ECHO requests sent to it or just those to broadcast/multicast 364 TIMESTAMP requests sent to it via broadcast/multicast.
361 addresses, respectively. 365 Default: 1
362 366
363icmp_ratelimit - INTEGER 367icmp_ratelimit - INTEGER
364 Limit the maximal rates for sending ICMP packets whose type matches 368 Limit the maximal rates for sending ICMP packets whose type matches
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 42629ff84f5a..ea569ba482b1 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -305,7 +305,7 @@ long execve(const char *filename, char **argv, char **envp)
305 "Ir" (THREAD_START_SP - sizeof(regs)), 305 "Ir" (THREAD_START_SP - sizeof(regs)),
306 "r" (&regs), 306 "r" (&regs),
307 "Ir" (sizeof(regs)) 307 "Ir" (sizeof(regs))
308 : "r0", "r1", "r2", "r3", "ip", "memory"); 308 : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
309 309
310 out: 310 out:
311 return ret; 311 return ret;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index e7d22dbcb691..f6de76e0a45d 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -504,7 +504,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
504 504
505 bad_access: 505 bad_access:
506 spin_unlock(&mm->page_table_lock); 506 spin_unlock(&mm->page_table_lock);
507 /* simulate a read access fault */ 507 /* simulate a write access fault */
508 do_DataAbort(addr, 15 + (1 << 11), regs); 508 do_DataAbort(addr, 15 + (1 << 11), regs);
509 return -1; 509 return -1;
510 } 510 }
diff --git a/arch/arm/mach-imx/generic.c b/arch/arm/mach-imx/generic.c
index 41e5849ae8da..f8a742bb2d5b 100644
--- a/arch/arm/mach-imx/generic.c
+++ b/arch/arm/mach-imx/generic.c
@@ -28,14 +28,15 @@
28#include <linux/module.h> 28#include <linux/module.h>
29#include <asm/arch/imxfb.h> 29#include <asm/arch/imxfb.h>
30#include <asm/hardware.h> 30#include <asm/hardware.h>
31#include <asm/arch/imx-regs.h>
31 32
32#include <asm/mach/map.h> 33#include <asm/mach/map.h>
33 34
34void imx_gpio_mode(int gpio_mode) 35void imx_gpio_mode(int gpio_mode)
35{ 36{
36 unsigned int pin = gpio_mode & GPIO_PIN_MASK; 37 unsigned int pin = gpio_mode & GPIO_PIN_MASK;
37 unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> 5; 38 unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT;
38 unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> 10; 39 unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> GPIO_OCR_SHIFT;
39 unsigned int tmp; 40 unsigned int tmp;
40 41
41 /* Pullup enable */ 42 /* Pullup enable */
@@ -57,7 +58,7 @@ void imx_gpio_mode(int gpio_mode)
57 GPR(port) &= ~(1<<pin); 58 GPR(port) &= ~(1<<pin);
58 59
59 /* use as gpio? */ 60 /* use as gpio? */
60 if( ocr == 3 ) 61 if(gpio_mode & GPIO_GIUS)
61 GIUS(port) |= (1<<pin); 62 GIUS(port) |= (1<<pin);
62 else 63 else
63 GIUS(port) &= ~(1<<pin); 64 GIUS(port) &= ~(1<<pin);
@@ -72,20 +73,20 @@ void imx_gpio_mode(int gpio_mode)
72 tmp |= (ocr << (pin*2)); 73 tmp |= (ocr << (pin*2));
73 OCR1(port) = tmp; 74 OCR1(port) = tmp;
74 75
75 if( gpio_mode & GPIO_AOUT ) 76 ICONFA1(port) &= ~( 3<<(pin*2));
76 ICONFA1(port) &= ~( 3<<(pin*2)); 77 ICONFA1(port) |= ((gpio_mode >> GPIO_AOUT_SHIFT) & 3) << (pin * 2);
77 if( gpio_mode & GPIO_BOUT ) 78 ICONFB1(port) &= ~( 3<<(pin*2));
78 ICONFB1(port) &= ~( 3<<(pin*2)); 79 ICONFB1(port) |= ((gpio_mode >> GPIO_BOUT_SHIFT) & 3) << (pin * 2);
79 } else { 80 } else {
80 tmp = OCR2(port); 81 tmp = OCR2(port);
81 tmp &= ~( 3<<((pin-16)*2)); 82 tmp &= ~( 3<<((pin-16)*2));
82 tmp |= (ocr << ((pin-16)*2)); 83 tmp |= (ocr << ((pin-16)*2));
83 OCR2(port) = tmp; 84 OCR2(port) = tmp;
84 85
85 if( gpio_mode & GPIO_AOUT ) 86 ICONFA2(port) &= ~( 3<<((pin-16)*2));
86 ICONFA2(port) &= ~( 3<<((pin-16)*2)); 87 ICONFA2(port) |= ((gpio_mode >> GPIO_AOUT_SHIFT) & 3) << ((pin-16) * 2);
87 if( gpio_mode & GPIO_BOUT ) 88 ICONFB2(port) &= ~( 3<<((pin-16)*2));
88 ICONFB2(port) &= ~( 3<<((pin-16)*2)); 89 ICONFB2(port) |= ((gpio_mode >> GPIO_BOUT_SHIFT) & 3) << ((pin-16) * 2);
89 } 90 }
90} 91}
91 92
diff --git a/arch/arm/mach-imx/mx1ads.c b/arch/arm/mach-imx/mx1ads.c
index 5d25434d332c..a7511ddfe364 100644
--- a/arch/arm/mach-imx/mx1ads.c
+++ b/arch/arm/mach-imx/mx1ads.c
@@ -55,7 +55,7 @@ static void __init
55mx1ads_init(void) 55mx1ads_init(void)
56{ 56{
57#ifdef CONFIG_LEDS 57#ifdef CONFIG_LEDS
58 imx_gpio_mode(GPIO_PORTA | GPIO_OUT | GPIO_GPIO | 2); 58 imx_gpio_mode(GPIO_PORTA | GPIO_OUT | 2);
59#endif 59#endif
60 platform_add_devices(devices, ARRAY_SIZE(devices)); 60 platform_add_devices(devices, ARRAY_SIZE(devices));
61} 61}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index db5e47dfc303..c54e04c995ee 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -370,21 +370,21 @@ config CPU_BIG_ENDIAN
370 370
371config CPU_ICACHE_DISABLE 371config CPU_ICACHE_DISABLE
372 bool "Disable I-Cache" 372 bool "Disable I-Cache"
373 depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 373 depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6
374 help 374 help
375 Say Y here to disable the processor instruction cache. Unless 375 Say Y here to disable the processor instruction cache. Unless
376 you have a reason not to or are unsure, say N. 376 you have a reason not to or are unsure, say N.
377 377
378config CPU_DCACHE_DISABLE 378config CPU_DCACHE_DISABLE
379 bool "Disable D-Cache" 379 bool "Disable D-Cache"
380 depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 380 depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6
381 help 381 help
382 Say Y here to disable the processor data cache. Unless 382 Say Y here to disable the processor data cache. Unless
383 you have a reason not to or are unsure, say N. 383 you have a reason not to or are unsure, say N.
384 384
385config CPU_DCACHE_WRITETHROUGH 385config CPU_DCACHE_WRITETHROUGH
386 bool "Force write through D-cache" 386 bool "Force write through D-cache"
387 depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020) && !CPU_DCACHE_DISABLE 387 depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE
388 default y if CPU_ARM925T 388 default y if CPU_ARM925T
389 help 389 help
390 Say Y here to use the data cache in writethrough mode. Unless you 390 Say Y here to use the data cache in writethrough mode. Unless you
@@ -399,7 +399,7 @@ config CPU_CACHE_ROUND_ROBIN
399 399
400config CPU_BPREDICT_DISABLE 400config CPU_BPREDICT_DISABLE
401 bool "Disable branch prediction" 401 bool "Disable branch prediction"
402 depends on CPU_ARM1020 402 depends on CPU_ARM1020 || CPU_V6
403 help 403 help
404 Say Y here to disable branch prediction. If unsure, say N. 404 Say Y here to disable branch prediction. If unsure, say N.
405 405
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6dc726ad7137..d0a5106fba24 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1016,6 +1016,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
1016 1016
1017 cmc_polling_enabled = 1; 1017 cmc_polling_enabled = 1;
1018 spin_unlock(&cmc_history_lock); 1018 spin_unlock(&cmc_history_lock);
1019 /* If we're being hit with CMC interrupts, we won't
1020 * ever execute the schedule_work() below. Need to
1021 * disable CMC interrupts on this processor now.
1022 */
1023 ia64_mca_cmc_vector_disable(NULL);
1019 schedule_work(&cmc_disable_work); 1024 schedule_work(&cmc_disable_work);
1020 1025
1021 /* 1026 /*
diff --git a/arch/ppc/platforms/pmac_time.c b/arch/ppc/platforms/pmac_time.c
index 778ce4fec368..efb819f9490d 100644
--- a/arch/ppc/platforms/pmac_time.c
+++ b/arch/ppc/platforms/pmac_time.c
@@ -195,7 +195,7 @@ via_calibrate_decr(void)
195 ; 195 ;
196 dend = get_dec(); 196 dend = get_dec();
197 197
198 tb_ticks_per_jiffy = (dstart - dend) / (6 * (HZ/100)); 198 tb_ticks_per_jiffy = (dstart - dend) / ((6 * HZ)/100);
199 tb_to_us = mulhwu_scale_factor(dstart - dend, 60000); 199 tb_to_us = mulhwu_scale_factor(dstart - dend, 60000);
200 200
201 printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n", 201 printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n",
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index aba05394d30a..6537445dac0e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -25,62 +25,6 @@ source "init/Kconfig"
25 25
26menu "General machine setup" 26menu "General machine setup"
27 27
28config VT
29 bool
30 select INPUT
31 default y
32 ---help---
33 If you say Y here, you will get support for terminal devices with
34 display and keyboard devices. These are called "virtual" because you
35 can run several virtual terminals (also called virtual consoles) on
36 one physical terminal. This is rather useful, for example one
37 virtual terminal can collect system messages and warnings, another
38 one can be used for a text-mode user session, and a third could run
39 an X session, all in parallel. Switching between virtual terminals
40 is done with certain key combinations, usually Alt-<function key>.
41
42 The setterm command ("man setterm") can be used to change the
43 properties (such as colors or beeping) of a virtual terminal. The
44 man page console_codes(4) ("man console_codes") contains the special
45 character sequences that can be used to change those properties
46 directly. The fonts used on virtual terminals can be changed with
47 the setfont ("man setfont") command and the key bindings are defined
48 with the loadkeys ("man loadkeys") command.
49
50 You need at least one virtual terminal device in order to make use
51 of your keyboard and monitor. Therefore, only people configuring an
52 embedded system would want to say N here in order to save some
53 memory; the only way to log into such a system is then via a serial
54 or network connection.
55
56 If unsure, say Y, or else you won't be able to do much with your new
57 shiny Linux system :-)
58
59config VT_CONSOLE
60 bool
61 default y
62 ---help---
63 The system console is the device which receives all kernel messages
64 and warnings and which allows logins in single user mode. If you
65 answer Y here, a virtual terminal (the device used to interact with
66 a physical terminal) can be used as system console. This is the most
67 common mode of operations, so you should say Y here unless you want
68 the kernel messages be output only to a serial port (in which case
69 you should say Y to "Console on serial port", below).
70
71 If you do say Y here, by default the currently visible virtual
72 terminal (/dev/tty0) will be used as system console. You can change
73 that with a kernel command line option such as "console=tty3" which
74 would use the third virtual terminal as system console. (Try "man
75 bootparam" or see the documentation of your boot loader (lilo or
76 loadlin) about how to pass options to the kernel at boot time.)
77
78 If unsure, say Y.
79
80config HW_CONSOLE
81 bool
82 default y
83
84config SMP 28config SMP
85 bool "Symmetric multi-processing support (does not work on sun4/sun4c)" 29 bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
86 depends on BROKEN 30 depends on BROKEN
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index bc015e980341..279a62627c10 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -457,7 +457,7 @@ void __init time_init(void)
457 sbus_time_init(); 457 sbus_time_init();
458} 458}
459 459
460extern __inline__ unsigned long do_gettimeoffset(void) 460static inline unsigned long do_gettimeoffset(void)
461{ 461{
462 return (*master_l10_counter >> 10) & 0x1fffff; 462 return (*master_l10_counter >> 10) & 0x1fffff;
463} 463}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index c89a803cbc20..c664b962987c 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -260,7 +260,7 @@ static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
260{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } 260{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
261 261
262/* to find an entry in a top-level page table... */ 262/* to find an entry in a top-level page table... */
263extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) 263static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
264{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } 264{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
265 265
266/* Find an entry in the second-level page table.. */ 266/* Find an entry in the second-level page table.. */
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 2879b1072921..f685035dbdb8 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -97,8 +97,8 @@ do_fpdis:
97 faddd %f0, %f2, %f4 97 faddd %f0, %f2, %f4
98 fmuld %f0, %f2, %f6 98 fmuld %f0, %f2, %f6
99 ldxa [%g3] ASI_DMMU, %g5 99 ldxa [%g3] ASI_DMMU, %g5
100cplus_fptrap_insn_1: 100 sethi %hi(sparc64_kern_sec_context), %g2
101 sethi %hi(0), %g2 101 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
102 stxa %g2, [%g3] ASI_DMMU 102 stxa %g2, [%g3] ASI_DMMU
103 membar #Sync 103 membar #Sync
104 add %g6, TI_FPREGS + 0xc0, %g2 104 add %g6, TI_FPREGS + 0xc0, %g2
@@ -126,8 +126,8 @@ cplus_fptrap_insn_1:
126 fzero %f34 126 fzero %f34
127 ldxa [%g3] ASI_DMMU, %g5 127 ldxa [%g3] ASI_DMMU, %g5
128 add %g6, TI_FPREGS, %g1 128 add %g6, TI_FPREGS, %g1
129cplus_fptrap_insn_2: 129 sethi %hi(sparc64_kern_sec_context), %g2
130 sethi %hi(0), %g2 130 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
131 stxa %g2, [%g3] ASI_DMMU 131 stxa %g2, [%g3] ASI_DMMU
132 membar #Sync 132 membar #Sync
133 add %g6, TI_FPREGS + 0x40, %g2 133 add %g6, TI_FPREGS + 0x40, %g2
@@ -153,8 +153,8 @@ cplus_fptrap_insn_2:
1533: mov SECONDARY_CONTEXT, %g3 1533: mov SECONDARY_CONTEXT, %g3
154 add %g6, TI_FPREGS, %g1 154 add %g6, TI_FPREGS, %g1
155 ldxa [%g3] ASI_DMMU, %g5 155 ldxa [%g3] ASI_DMMU, %g5
156cplus_fptrap_insn_3: 156 sethi %hi(sparc64_kern_sec_context), %g2
157 sethi %hi(0), %g2 157 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
158 stxa %g2, [%g3] ASI_DMMU 158 stxa %g2, [%g3] ASI_DMMU
159 membar #Sync 159 membar #Sync
160 mov 0x40, %g2 160 mov 0x40, %g2
@@ -319,8 +319,8 @@ do_fptrap_after_fsr:
319 stx %g3, [%g6 + TI_GSR] 319 stx %g3, [%g6 + TI_GSR]
320 mov SECONDARY_CONTEXT, %g3 320 mov SECONDARY_CONTEXT, %g3
321 ldxa [%g3] ASI_DMMU, %g5 321 ldxa [%g3] ASI_DMMU, %g5
322cplus_fptrap_insn_4: 322 sethi %hi(sparc64_kern_sec_context), %g2
323 sethi %hi(0), %g2 323 ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
324 stxa %g2, [%g3] ASI_DMMU 324 stxa %g2, [%g3] ASI_DMMU
325 membar #Sync 325 membar #Sync
326 add %g6, TI_FPREGS, %g2 326 add %g6, TI_FPREGS, %g2
@@ -341,33 +341,6 @@ cplus_fptrap_insn_4:
341 ba,pt %xcc, etrap 341 ba,pt %xcc, etrap
342 wr %g0, 0, %fprs 342 wr %g0, 0, %fprs
343 343
344cplus_fptrap_1:
345 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
346
347 .globl cheetah_plus_patch_fpdis
348cheetah_plus_patch_fpdis:
349 /* We configure the dTLB512_0 for 4MB pages and the
350 * dTLB512_1 for 8K pages when in context zero.
351 */
352 sethi %hi(cplus_fptrap_1), %o0
353 lduw [%o0 + %lo(cplus_fptrap_1)], %o1
354
355 set cplus_fptrap_insn_1, %o2
356 stw %o1, [%o2]
357 flush %o2
358 set cplus_fptrap_insn_2, %o2
359 stw %o1, [%o2]
360 flush %o2
361 set cplus_fptrap_insn_3, %o2
362 stw %o1, [%o2]
363 flush %o2
364 set cplus_fptrap_insn_4, %o2
365 stw %o1, [%o2]
366 flush %o2
367
368 retl
369 nop
370
371 /* The registers for cross calls will be: 344 /* The registers for cross calls will be:
372 * 345 *
373 * DATA 0: [low 32-bits] Address of function to call, jmp to this 346 * DATA 0: [low 32-bits] Address of function to call, jmp to this
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index 50d2af1d98ae..0d8eba21111b 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -68,12 +68,8 @@ etrap_irq:
68 68
69 wrpr %g3, 0, %otherwin 69 wrpr %g3, 0, %otherwin
70 wrpr %g2, 0, %wstate 70 wrpr %g2, 0, %wstate
71cplus_etrap_insn_1: 71 sethi %hi(sparc64_kern_pri_context), %g2
72 sethi %hi(0), %g3 72 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
73 sllx %g3, 32, %g3
74cplus_etrap_insn_2:
75 sethi %hi(0), %g2
76 or %g3, %g2, %g3
77 stxa %g3, [%l4] ASI_DMMU 73 stxa %g3, [%l4] ASI_DMMU
78 flush %l6 74 flush %l6
79 wr %g0, ASI_AIUS, %asi 75 wr %g0, ASI_AIUS, %asi
@@ -215,12 +211,8 @@ scetrap: rdpr %pil, %g2
215 mov PRIMARY_CONTEXT, %l4 211 mov PRIMARY_CONTEXT, %l4
216 wrpr %g3, 0, %otherwin 212 wrpr %g3, 0, %otherwin
217 wrpr %g2, 0, %wstate 213 wrpr %g2, 0, %wstate
218cplus_etrap_insn_3: 214 sethi %hi(sparc64_kern_pri_context), %g2
219 sethi %hi(0), %g3 215 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
220 sllx %g3, 32, %g3
221cplus_etrap_insn_4:
222 sethi %hi(0), %g2
223 or %g3, %g2, %g3
224 stxa %g3, [%l4] ASI_DMMU 216 stxa %g3, [%l4] ASI_DMMU
225 flush %l6 217 flush %l6
226 218
@@ -264,38 +256,3 @@ cplus_etrap_insn_4:
264 256
265#undef TASK_REGOFF 257#undef TASK_REGOFF
266#undef ETRAP_PSTATE1 258#undef ETRAP_PSTATE1
267
268cplus_einsn_1:
269 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
270cplus_einsn_2:
271 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
272
273 .globl cheetah_plus_patch_etrap
274cheetah_plus_patch_etrap:
275 /* We configure the dTLB512_0 for 4MB pages and the
276 * dTLB512_1 for 8K pages when in context zero.
277 */
278 sethi %hi(cplus_einsn_1), %o0
279 sethi %hi(cplus_etrap_insn_1), %o2
280 lduw [%o0 + %lo(cplus_einsn_1)], %o1
281 or %o2, %lo(cplus_etrap_insn_1), %o2
282 stw %o1, [%o2]
283 flush %o2
284 sethi %hi(cplus_etrap_insn_3), %o2
285 or %o2, %lo(cplus_etrap_insn_3), %o2
286 stw %o1, [%o2]
287 flush %o2
288
289 sethi %hi(cplus_einsn_2), %o0
290 sethi %hi(cplus_etrap_insn_2), %o2
291 lduw [%o0 + %lo(cplus_einsn_2)], %o1
292 or %o2, %lo(cplus_etrap_insn_2), %o2
293 stw %o1, [%o2]
294 flush %o2
295 sethi %hi(cplus_etrap_insn_4), %o2
296 or %o2, %lo(cplus_etrap_insn_4), %o2
297 stw %o1, [%o2]
298 flush %o2
299
300 retl
301 nop
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 89406f9649a9..24340496cdd3 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -325,23 +325,7 @@ cheetah_tlb_fixup:
3251: sethi %hi(tlb_type), %g1 3251: sethi %hi(tlb_type), %g1
326 stw %g2, [%g1 + %lo(tlb_type)] 326 stw %g2, [%g1 + %lo(tlb_type)]
327 327
328 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) 328 /* Patch copy/page operations to cheetah optimized versions. */
329 ba,pt %xcc, 2f
330 nop
331
3321: /* Patch context register writes to support nucleus page
333 * size correctly.
334 */
335 call cheetah_plus_patch_etrap
336 nop
337 call cheetah_plus_patch_rtrap
338 nop
339 call cheetah_plus_patch_fpdis
340 nop
341 call cheetah_plus_patch_winfixup
342 nop
343
3442: /* Patch copy/page operations to cheetah optimized versions. */
345 call cheetah_patch_copyops 329 call cheetah_patch_copyops
346 nop 330 nop
347 call cheetah_patch_copy_page 331 call cheetah_patch_copy_page
@@ -484,20 +468,13 @@ spitfire_vpte_base:
484 call prom_set_trap_table 468 call prom_set_trap_table
485 sethi %hi(sparc64_ttable_tl0), %o0 469 sethi %hi(sparc64_ttable_tl0), %o0
486 470
487 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f) 471 /* Start using proper page size encodings in ctx register. */
488 ba,pt %xcc, 2f 472 sethi %hi(sparc64_kern_pri_context), %g3
489 nop 473 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
490
4911: /* Start using proper page size encodings in ctx register. */
492 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
493 mov PRIMARY_CONTEXT, %g1 474 mov PRIMARY_CONTEXT, %g1
494 sllx %g3, 32, %g3 475 stxa %g2, [%g1] ASI_DMMU
495 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
496 or %g3, %g2, %g3
497 stxa %g3, [%g1] ASI_DMMU
498 membar #Sync 476 membar #Sync
499 477
5002:
501 rdpr %pstate, %o1 478 rdpr %pstate, %o1
502 or %o1, PSTATE_IE, %o1 479 or %o1, PSTATE_IE, %o1
503 wrpr %o1, 0, %pstate 480 wrpr %o1, 0, %pstate
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index fafd227735fa..ecfb42a69a44 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -256,9 +256,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
256 brnz,pn %l3, kern_rtt 256 brnz,pn %l3, kern_rtt
257 mov PRIMARY_CONTEXT, %l7 257 mov PRIMARY_CONTEXT, %l7
258 ldxa [%l7 + %l7] ASI_DMMU, %l0 258 ldxa [%l7 + %l7] ASI_DMMU, %l0
259cplus_rtrap_insn_1: 259 sethi %hi(sparc64_kern_pri_nuc_bits), %l1
260 sethi %hi(0), %l1 260 ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
261 sllx %l1, 32, %l1
262 or %l0, %l1, %l0 261 or %l0, %l1, %l0
263 stxa %l0, [%l7] ASI_DMMU 262 stxa %l0, [%l7] ASI_DMMU
264 flush %g6 263 flush %g6
@@ -345,21 +344,3 @@ kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
345 wr %g0, FPRS_DU, %fprs 344 wr %g0, FPRS_DU, %fprs
346 ba,pt %xcc, rt_continue 345 ba,pt %xcc, rt_continue
347 stb %l5, [%g6 + TI_FPDEPTH] 346 stb %l5, [%g6 + TI_FPDEPTH]
348
349cplus_rinsn_1:
350 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
351
352 .globl cheetah_plus_patch_rtrap
353cheetah_plus_patch_rtrap:
354 /* We configure the dTLB512_0 for 4MB pages and the
355 * dTLB512_1 for 8K pages when in context zero.
356 */
357 sethi %hi(cplus_rinsn_1), %o0
358 sethi %hi(cplus_rtrap_insn_1), %o2
359 lduw [%o0 + %lo(cplus_rinsn_1)], %o1
360 or %o2, %lo(cplus_rtrap_insn_1), %o2
361 stw %o1, [%o2]
362 flush %o2
363
364 retl
365 nop
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 4c9c8f241748..c1f34237cdf2 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -187,17 +187,13 @@ int prom_callback(long *args)
187 } 187 }
188 188
189 if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { 189 if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
190 unsigned long kernel_pctx = 0; 190 extern unsigned long sparc64_kern_pri_context;
191
192 if (tlb_type == cheetah_plus)
193 kernel_pctx |= (CTX_CHEETAH_PLUS_NUC |
194 CTX_CHEETAH_PLUS_CTX0);
195 191
196 /* Spitfire Errata #32 workaround */ 192 /* Spitfire Errata #32 workaround */
197 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 193 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
198 "flush %%g6" 194 "flush %%g6"
199 : /* No outputs */ 195 : /* No outputs */
200 : "r" (kernel_pctx), 196 : "r" (sparc64_kern_pri_context),
201 "r" (PRIMARY_CONTEXT), 197 "r" (PRIMARY_CONTEXT),
202 "i" (ASI_DMMU)); 198 "i" (ASI_DMMU));
203 199
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 89f2fcfcd662..9478551cb020 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -336,20 +336,13 @@ do_unlock:
336 call init_irqwork_curcpu 336 call init_irqwork_curcpu
337 nop 337 nop
338 338
339 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f) 339 /* Start using proper page size encodings in ctx register. */
340 ba,pt %xcc, 2f 340 sethi %hi(sparc64_kern_pri_context), %g3
341 nop 341 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
342
3431: /* Start using proper page size encodings in ctx register. */
344 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
345 mov PRIMARY_CONTEXT, %g1 342 mov PRIMARY_CONTEXT, %g1
346 sllx %g3, 32, %g3 343 stxa %g2, [%g1] ASI_DMMU
347 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
348 or %g3, %g2, %g3
349 stxa %g3, [%g1] ASI_DMMU
350 membar #Sync 344 membar #Sync
351 345
3522:
353 rdpr %pstate, %o1 346 rdpr %pstate, %o1
354 or %o1, PSTATE_IE, %o1 347 or %o1, PSTATE_IE, %o1
355 wrpr %o1, 0, %pstate 348 wrpr %o1, 0, %pstate
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index 99c809a1e5ac..39160926267b 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -16,23 +16,14 @@
16 .text 16 .text
17 17
18set_pcontext: 18set_pcontext:
19cplus_winfixup_insn_1: 19 sethi %hi(sparc64_kern_pri_context), %l1
20 sethi %hi(0), %l1 20 ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1
21 mov PRIMARY_CONTEXT, %g1 21 mov PRIMARY_CONTEXT, %g1
22 sllx %l1, 32, %l1
23cplus_winfixup_insn_2:
24 sethi %hi(0), %g2
25 or %l1, %g2, %l1
26 stxa %l1, [%g1] ASI_DMMU 22 stxa %l1, [%g1] ASI_DMMU
27 flush %g6 23 flush %g6
28 retl 24 retl
29 nop 25 nop
30 26
31cplus_wfinsn_1:
32 sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
33cplus_wfinsn_2:
34 sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
35
36 .align 32 27 .align 32
37 28
38 /* Here are the rules, pay attention. 29 /* Here are the rules, pay attention.
@@ -395,23 +386,3 @@ window_dax_from_user_common:
395 add %sp, PTREGS_OFF, %o0 386 add %sp, PTREGS_OFF, %o0
396 ba,pt %xcc, rtrap 387 ba,pt %xcc, rtrap
397 clr %l6 388 clr %l6
398
399
400 .globl cheetah_plus_patch_winfixup
401cheetah_plus_patch_winfixup:
402 sethi %hi(cplus_wfinsn_1), %o0
403 sethi %hi(cplus_winfixup_insn_1), %o2
404 lduw [%o0 + %lo(cplus_wfinsn_1)], %o1
405 or %o2, %lo(cplus_winfixup_insn_1), %o2
406 stw %o1, [%o2]
407 flush %o2
408
409 sethi %hi(cplus_wfinsn_2), %o0
410 sethi %hi(cplus_winfixup_insn_2), %o2
411 lduw [%o0 + %lo(cplus_wfinsn_2)], %o1
412 or %o2, %lo(cplus_winfixup_insn_2), %o2
413 stw %o1, [%o2]
414 flush %o2
415
416 retl
417 nop
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 5db50524f20d..0d2e967c7200 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -133,6 +133,12 @@ extern unsigned int sparc_ramdisk_size;
133 133
134struct page *mem_map_zero __read_mostly; 134struct page *mem_map_zero __read_mostly;
135 135
136unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
137
138unsigned long sparc64_kern_pri_context __read_mostly;
139unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
140unsigned long sparc64_kern_sec_context __read_mostly;
141
136int bigkernel = 0; 142int bigkernel = 0;
137 143
138/* XXX Tune this... */ 144/* XXX Tune this... */
@@ -362,6 +368,7 @@ struct linux_prom_translation {
362 unsigned long data; 368 unsigned long data;
363}; 369};
364static struct linux_prom_translation prom_trans[512] __initdata; 370static struct linux_prom_translation prom_trans[512] __initdata;
371static unsigned int prom_trans_ents __initdata;
365 372
366extern unsigned long prom_boot_page; 373extern unsigned long prom_boot_page;
367extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); 374extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
@@ -375,57 +382,7 @@ unsigned long kern_locked_tte_data;
375unsigned long prom_pmd_phys __read_mostly; 382unsigned long prom_pmd_phys __read_mostly;
376unsigned int swapper_pgd_zero __read_mostly; 383unsigned int swapper_pgd_zero __read_mostly;
377 384
378/* Allocate power-of-2 aligned chunks from the end of the 385static pmd_t *prompmd __read_mostly;
379 * kernel image. Return physical address.
380 */
381static inline unsigned long early_alloc_phys(unsigned long size)
382{
383 unsigned long base;
384
385 BUILD_BUG_ON(size & (size - 1));
386
387 kern_size = (kern_size + (size - 1)) & ~(size - 1);
388 base = kern_base + kern_size;
389 kern_size += size;
390
391 return base;
392}
393
394static inline unsigned long load_phys32(unsigned long pa)
395{
396 unsigned long val;
397
398 __asm__ __volatile__("lduwa [%1] %2, %0"
399 : "=&r" (val)
400 : "r" (pa), "i" (ASI_PHYS_USE_EC));
401
402 return val;
403}
404
405static inline unsigned long load_phys64(unsigned long pa)
406{
407 unsigned long val;
408
409 __asm__ __volatile__("ldxa [%1] %2, %0"
410 : "=&r" (val)
411 : "r" (pa), "i" (ASI_PHYS_USE_EC));
412
413 return val;
414}
415
416static inline void store_phys32(unsigned long pa, unsigned long val)
417{
418 __asm__ __volatile__("stwa %0, [%1] %2"
419 : /* no outputs */
420 : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
421}
422
423static inline void store_phys64(unsigned long pa, unsigned long val)
424{
425 __asm__ __volatile__("stxa %0, [%1] %2"
426 : /* no outputs */
427 : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
428}
429 386
430#define BASE_PAGE_SIZE 8192 387#define BASE_PAGE_SIZE 8192
431 388
@@ -435,34 +392,28 @@ static inline void store_phys64(unsigned long pa, unsigned long val)
435 */ 392 */
436unsigned long prom_virt_to_phys(unsigned long promva, int *error) 393unsigned long prom_virt_to_phys(unsigned long promva, int *error)
437{ 394{
438 unsigned long pmd_phys = (prom_pmd_phys + 395 pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
439 ((promva >> 23) & 0x7ff) * sizeof(pmd_t)); 396 pte_t *ptep;
440 unsigned long pte_phys;
441 pmd_t pmd_ent;
442 pte_t pte_ent;
443 unsigned long base; 397 unsigned long base;
444 398
445 pmd_val(pmd_ent) = load_phys32(pmd_phys); 399 if (pmd_none(*pmdp)) {
446 if (pmd_none(pmd_ent)) {
447 if (error) 400 if (error)
448 *error = 1; 401 *error = 1;
449 return 0; 402 return 0;
450 } 403 }
451 404 ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
452 pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL; 405 if (!pte_present(*ptep)) {
453 pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t);
454 pte_val(pte_ent) = load_phys64(pte_phys);
455 if (!pte_present(pte_ent)) {
456 if (error) 406 if (error)
457 *error = 1; 407 *error = 1;
458 return 0; 408 return 0;
459 } 409 }
460 if (error) { 410 if (error) {
461 *error = 0; 411 *error = 0;
462 return pte_val(pte_ent); 412 return pte_val(*ptep);
463 } 413 }
464 base = pte_val(pte_ent) & _PAGE_PADDR; 414 base = pte_val(*ptep) & _PAGE_PADDR;
465 return (base + (promva & (BASE_PAGE_SIZE - 1))); 415
416 return base + (promva & (BASE_PAGE_SIZE - 1));
466} 417}
467 418
468/* The obp translations are saved based on 8k pagesize, since obp can 419/* The obp translations are saved based on 8k pagesize, since obp can
@@ -475,25 +426,20 @@ static void __init build_obp_range(unsigned long start, unsigned long end, unsig
475 unsigned long vaddr; 426 unsigned long vaddr;
476 427
477 for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) { 428 for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
478 unsigned long val, pte_phys, pmd_phys; 429 unsigned long val;
479 pmd_t pmd_ent; 430 pmd_t *pmd;
480 int i; 431 pte_t *pte;
481
482 pmd_phys = (prom_pmd_phys +
483 (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t)));
484 pmd_val(pmd_ent) = load_phys32(pmd_phys);
485 if (pmd_none(pmd_ent)) {
486 pte_phys = early_alloc_phys(BASE_PAGE_SIZE);
487
488 for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++)
489 store_phys64(pte_phys+i*sizeof(pte_t),0);
490 432
491 pmd_val(pmd_ent) = pte_phys >> 11UL; 433 pmd = prompmd + ((vaddr >> 23) & 0x7ff);
492 store_phys32(pmd_phys, pmd_val(pmd_ent)); 434 if (pmd_none(*pmd)) {
435 pte = __alloc_bootmem(BASE_PAGE_SIZE, BASE_PAGE_SIZE,
436 PAGE_SIZE);
437 if (!pte)
438 prom_halt();
439 memset(pte, 0, BASE_PAGE_SIZE);
440 pmd_set(pmd, pte);
493 } 441 }
494 442 pte = (pte_t *) __pmd_page(*pmd) + ((vaddr >> 13) & 0x3ff);
495 pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
496 pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t));
497 443
498 val = data; 444 val = data;
499 445
@@ -501,7 +447,8 @@ static void __init build_obp_range(unsigned long start, unsigned long end, unsig
501 if (tlb_type == spitfire) 447 if (tlb_type == spitfire)
502 val &= ~0x0003fe0000000000UL; 448 val &= ~0x0003fe0000000000UL;
503 449
504 store_phys64(pte_phys, val | _PAGE_MODIFIED); 450 set_pte_at(&init_mm, vaddr, pte,
451 __pte(val | _PAGE_MODIFIED));
505 452
506 data += BASE_PAGE_SIZE; 453 data += BASE_PAGE_SIZE;
507 } 454 }
@@ -514,13 +461,17 @@ static inline int in_obp_range(unsigned long vaddr)
514} 461}
515 462
516#define OBP_PMD_SIZE 2048 463#define OBP_PMD_SIZE 2048
517static void __init build_obp_pgtable(int prom_trans_ents) 464static void __init build_obp_pgtable(void)
518{ 465{
519 unsigned long i; 466 unsigned long i;
520 467
521 prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE); 468 prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, PAGE_SIZE);
522 for (i = 0; i < OBP_PMD_SIZE; i += 4) 469 if (!prompmd)
523 store_phys32(prom_pmd_phys + i, 0); 470 prom_halt();
471
472 memset(prompmd, 0, OBP_PMD_SIZE);
473
474 prom_pmd_phys = __pa(prompmd);
524 475
525 for (i = 0; i < prom_trans_ents; i++) { 476 for (i = 0; i < prom_trans_ents; i++) {
526 unsigned long start, end; 477 unsigned long start, end;
@@ -540,7 +491,7 @@ static void __init build_obp_pgtable(int prom_trans_ents)
540/* Read OBP translations property into 'prom_trans[]'. 491/* Read OBP translations property into 'prom_trans[]'.
541 * Return the number of entries. 492 * Return the number of entries.
542 */ 493 */
543static int __init read_obp_translations(void) 494static void __init read_obp_translations(void)
544{ 495{
545 int n, node; 496 int n, node;
546 497
@@ -561,8 +512,10 @@ static int __init read_obp_translations(void)
561 prom_printf("prom_mappings: Couldn't get property.\n"); 512 prom_printf("prom_mappings: Couldn't get property.\n");
562 prom_halt(); 513 prom_halt();
563 } 514 }
515
564 n = n / sizeof(struct linux_prom_translation); 516 n = n / sizeof(struct linux_prom_translation);
565 return n; 517
518 prom_trans_ents = n;
566} 519}
567 520
568static void __init remap_kernel(void) 521static void __init remap_kernel(void)
@@ -582,28 +535,38 @@ static void __init remap_kernel(void)
582 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); 535 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
583 prom_itlb_load(tlb_ent, tte_data, tte_vaddr); 536 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
584 if (bigkernel) { 537 if (bigkernel) {
585 prom_dtlb_load(tlb_ent - 1, 538 tlb_ent -= 1;
539 prom_dtlb_load(tlb_ent,
586 tte_data + 0x400000, 540 tte_data + 0x400000,
587 tte_vaddr + 0x400000); 541 tte_vaddr + 0x400000);
588 prom_itlb_load(tlb_ent - 1, 542 prom_itlb_load(tlb_ent,
589 tte_data + 0x400000, 543 tte_data + 0x400000,
590 tte_vaddr + 0x400000); 544 tte_vaddr + 0x400000);
591 } 545 }
546 sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
547 if (tlb_type == cheetah_plus) {
548 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
549 CTX_CHEETAH_PLUS_NUC);
550 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
551 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
552 }
592} 553}
593 554
594static void __init inherit_prom_mappings(void)
595{
596 int n;
597 555
598 n = read_obp_translations(); 556static void __init inherit_prom_mappings_pre(void)
599 build_obp_pgtable(n); 557{
558 read_obp_translations();
600 559
601 /* Now fixup OBP's idea about where we really are mapped. */ 560 /* Now fixup OBP's idea about where we really are mapped. */
602 prom_printf("Remapping the kernel... "); 561 prom_printf("Remapping the kernel... ");
603 remap_kernel(); 562 remap_kernel();
604 563
605 prom_printf("done.\n"); 564 prom_printf("done.\n");
565}
606 566
567static void __init inherit_prom_mappings_post(void)
568{
569 build_obp_pgtable();
607 register_prom_callbacks(); 570 register_prom_callbacks();
608} 571}
609 572
@@ -788,8 +751,8 @@ void inherit_locked_prom_mappings(int save_p)
788 } 751 }
789 } 752 }
790 if (tlb_type == spitfire) { 753 if (tlb_type == spitfire) {
791 int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel; 754 int high = sparc64_highest_unlocked_tlb_ent;
792 for (i = 0; i < high; i++) { 755 for (i = 0; i <= high; i++) {
793 unsigned long data; 756 unsigned long data;
794 757
795 /* Spitfire Errata #32 workaround */ 758 /* Spitfire Errata #32 workaround */
@@ -877,9 +840,9 @@ void inherit_locked_prom_mappings(int save_p)
877 } 840 }
878 } 841 }
879 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { 842 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
880 int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel; 843 int high = sparc64_highest_unlocked_tlb_ent;
881 844
882 for (i = 0; i < high; i++) { 845 for (i = 0; i <= high; i++) {
883 unsigned long data; 846 unsigned long data;
884 847
885 data = cheetah_get_ldtlb_data(i); 848 data = cheetah_get_ldtlb_data(i);
@@ -1556,8 +1519,7 @@ void __init paging_init(void)
1556 1519
1557 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); 1520 swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
1558 1521
1559 /* Inherit non-locked OBP mappings. */ 1522 inherit_prom_mappings_pre();
1560 inherit_prom_mappings();
1561 1523
1562 /* Ok, we can use our TLB miss and window trap handlers safely. 1524 /* Ok, we can use our TLB miss and window trap handlers safely.
1563 * We need to do a quick peek here to see if we are on StarFire 1525 * We need to do a quick peek here to see if we are on StarFire
@@ -1568,15 +1530,23 @@ void __init paging_init(void)
1568 extern void setup_tba(int); 1530 extern void setup_tba(int);
1569 setup_tba(this_is_starfire); 1531 setup_tba(this_is_starfire);
1570 } 1532 }
1571
1572 inherit_locked_prom_mappings(1);
1573
1574 __flush_tlb_all(); 1533 __flush_tlb_all();
1575 1534
1535 /* Everything from this point forward, until we are done with
1536 * inherit_prom_mappings_post(), must complete successfully
1537 * without calling into the firmware. The firwmare page tables
1538 * have not been built, but we are running on the Linux kernel's
1539 * trap table.
1540 */
1541
1576 /* Setup bootmem... */ 1542 /* Setup bootmem... */
1577 pages_avail = 0; 1543 pages_avail = 0;
1578 last_valid_pfn = end_pfn = bootmem_init(&pages_avail); 1544 last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
1579 1545
1546 inherit_prom_mappings_post();
1547
1548 inherit_locked_prom_mappings(1);
1549
1580#ifdef CONFIG_DEBUG_PAGEALLOC 1550#ifdef CONFIG_DEBUG_PAGEALLOC
1581 kernel_physical_mapping_init(); 1551 kernel_physical_mapping_init();
1582#endif 1552#endif
diff --git a/arch/um/include/registers.h b/arch/um/include/registers.h
index 0a35e6d0baa0..4892e5fcef07 100644
--- a/arch/um/include/registers.h
+++ b/arch/um/include/registers.h
@@ -15,16 +15,6 @@ extern void save_registers(int pid, union uml_pt_regs *regs);
15extern void restore_registers(int pid, union uml_pt_regs *regs); 15extern void restore_registers(int pid, union uml_pt_regs *regs);
16extern void init_registers(int pid); 16extern void init_registers(int pid);
17extern void get_safe_registers(unsigned long * regs); 17extern void get_safe_registers(unsigned long * regs);
18extern void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer);
18 19
19#endif 20#endif
20
21/*
22 * Overrides for Emacs so that we follow Linus's tabbing style.
23 * Emacs will notice this stuff at the end of the file and automatically
24 * adjust the settings for this buffer only. This must remain at the end
25 * of the file.
26 * ---------------------------------------------------------------------------
27 * Local variables:
28 * c-file-style: "linux"
29 * End:
30 */
diff --git a/arch/um/include/sysdep-x86_64/ptrace.h b/arch/um/include/sysdep-x86_64/ptrace.h
index 331aa2d1f3f5..8f0656766c21 100644
--- a/arch/um/include/sysdep-x86_64/ptrace.h
+++ b/arch/um/include/sysdep-x86_64/ptrace.h
@@ -218,10 +218,6 @@ struct syscall_args {
218 case RBP: UPT_RBP(regs) = __upt_val; break; \ 218 case RBP: UPT_RBP(regs) = __upt_val; break; \
219 case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \ 219 case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \
220 case CS: UPT_CS(regs) = __upt_val; break; \ 220 case CS: UPT_CS(regs) = __upt_val; break; \
221 case DS: UPT_DS(regs) = __upt_val; break; \
222 case ES: UPT_ES(regs) = __upt_val; break; \
223 case FS: UPT_FS(regs) = __upt_val; break; \
224 case GS: UPT_GS(regs) = __upt_val; break; \
225 case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \ 221 case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \
226 default : \ 222 default : \
227 panic("Bad register in UPT_SET : %d\n", reg); \ 223 panic("Bad register in UPT_SET : %d\n", reg); \
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index f80850091e79..b331e970002f 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -62,13 +62,7 @@ void show_stack(struct task_struct *task, unsigned long *esp)
62 62
63 if (esp == NULL) { 63 if (esp == NULL) {
64 if (task != current && task != NULL) { 64 if (task != current && task != NULL) {
65 /* XXX: Isn't this bogus? I.e. isn't this the
66 * *userspace* stack of this task? If not so, use this
67 * even when task == current (as in i386).
68 */
69 esp = (unsigned long *) KSTK_ESP(task); 65 esp = (unsigned long *) KSTK_ESP(task);
70 /* Which one? No actual difference - just coding style.*/
71 //esp = (unsigned long *) PT_REGS_IP(&task->thread.regs);
72 } else { 66 } else {
73 esp = (unsigned long *) &esp; 67 esp = (unsigned long *) &esp;
74 } 68 }
@@ -84,5 +78,5 @@ void show_stack(struct task_struct *task, unsigned long *esp)
84 } 78 }
85 79
86 printk("Call Trace: \n"); 80 printk("Call Trace: \n");
87 show_trace(current, esp); 81 show_trace(task, esp);
88} 82}
diff --git a/arch/um/os-Linux/sys-i386/registers.c b/arch/um/os-Linux/sys-i386/registers.c
index 3125d320722c..aee4812333c6 100644
--- a/arch/um/os-Linux/sys-i386/registers.c
+++ b/arch/um/os-Linux/sys-i386/registers.c
@@ -5,6 +5,7 @@
5 5
6#include <errno.h> 6#include <errno.h>
7#include <string.h> 7#include <string.h>
8#include <setjmp.h>
8#include "sysdep/ptrace_user.h" 9#include "sysdep/ptrace_user.h"
9#include "sysdep/ptrace.h" 10#include "sysdep/ptrace.h"
10#include "uml-config.h" 11#include "uml-config.h"
@@ -126,13 +127,11 @@ void get_safe_registers(unsigned long *regs)
126 memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long)); 127 memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long));
127} 128}
128 129
129/* 130void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer)
130 * Overrides for Emacs so that we follow Linus's tabbing style. 131{
131 * Emacs will notice this stuff at the end of the file and automatically 132 struct __jmp_buf_tag *jmpbuf = buffer;
132 * adjust the settings for this buffer only. This must remain at the end 133
133 * of the file. 134 UPT_SET(uml_regs, EIP, jmpbuf->__jmpbuf[JB_PC]);
134 * --------------------------------------------------------------------------- 135 UPT_SET(uml_regs, UESP, jmpbuf->__jmpbuf[JB_SP]);
135 * Local variables: 136 UPT_SET(uml_regs, EBP, jmpbuf->__jmpbuf[JB_BP]);
136 * c-file-style: "linux" 137}
137 * End:
138 */
diff --git a/arch/um/os-Linux/sys-x86_64/registers.c b/arch/um/os-Linux/sys-x86_64/registers.c
index 44438d15c3d6..4b638dfb52b0 100644
--- a/arch/um/os-Linux/sys-x86_64/registers.c
+++ b/arch/um/os-Linux/sys-x86_64/registers.c
@@ -5,6 +5,7 @@
5 5
6#include <errno.h> 6#include <errno.h>
7#include <string.h> 7#include <string.h>
8#include <setjmp.h>
8#include "ptrace_user.h" 9#include "ptrace_user.h"
9#include "uml-config.h" 10#include "uml-config.h"
10#include "skas_ptregs.h" 11#include "skas_ptregs.h"
@@ -74,13 +75,11 @@ void get_safe_registers(unsigned long *regs)
74 memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long)); 75 memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long));
75} 76}
76 77
77/* 78void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer)
78 * Overrides for Emacs so that we follow Linus's tabbing style. 79{
79 * Emacs will notice this stuff at the end of the file and automatically 80 struct __jmp_buf_tag *jmpbuf = buffer;
80 * adjust the settings for this buffer only. This must remain at the end 81
81 * of the file. 82 UPT_SET(uml_regs, RIP, jmpbuf->__jmpbuf[JB_PC]);
82 * --------------------------------------------------------------------------- 83 UPT_SET(uml_regs, RSP, jmpbuf->__jmpbuf[JB_RSP]);
83 * Local variables: 84 UPT_SET(uml_regs, RBP, jmpbuf->__jmpbuf[JB_RBP]);
84 * c-file-style: "linux" 85}
85 * End:
86 */
diff --git a/arch/um/sys-i386/sysrq.c b/arch/um/sys-i386/sysrq.c
index e3706d15c4f5..d5244f070539 100644
--- a/arch/um/sys-i386/sysrq.c
+++ b/arch/um/sys-i386/sysrq.c
@@ -88,9 +88,7 @@ void show_trace(struct task_struct* task, unsigned long * stack)
88 task = current; 88 task = current;
89 89
90 if (task != current) { 90 if (task != current) {
91 //ebp = (unsigned long) KSTK_EBP(task); 91 ebp = (unsigned long) KSTK_EBP(task);
92 /* Which one? No actual difference - just coding style.*/
93 ebp = (unsigned long) PT_REGS_EBP(&task->thread.regs);
94 } else { 92 } else {
95 asm ("movl %%ebp, %0" : "=r" (ebp) : ); 93 asm ("movl %%ebp, %0" : "=r" (ebp) : );
96 } 94 }
@@ -99,15 +97,6 @@ void show_trace(struct task_struct* task, unsigned long * stack)
99 ((unsigned long)stack & (~(THREAD_SIZE - 1))); 97 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
100 print_context_stack(context, stack, ebp); 98 print_context_stack(context, stack, ebp);
101 99
102 /*while (((long) stack & (THREAD_SIZE-1)) != 0) {
103 addr = *stack;
104 if (__kernel_text_address(addr)) {
105 printk("%08lx: [<%08lx>]", (unsigned long) stack, addr);
106 print_symbol(" %s", addr);
107 printk("\n");
108 }
109 stack++;
110 }*/
111 printk("\n"); 100 printk("\n");
112} 101}
113 102
diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c
index 677fc26a9bbe..26b68675053d 100644
--- a/arch/um/sys-i386/user-offsets.c
+++ b/arch/um/sys-i386/user-offsets.c
@@ -46,7 +46,7 @@ void foo(void)
46 OFFSET(HOST_SC_FP_ST, _fpstate, _st); 46 OFFSET(HOST_SC_FP_ST, _fpstate, _st);
47 OFFSET(HOST_SC_FXSR_ENV, _fpstate, _fxsr_env); 47 OFFSET(HOST_SC_FXSR_ENV, _fpstate, _fxsr_env);
48 48
49 DEFINE_LONGS(HOST_FRAME_SIZE, FRAME_SIZE); 49 DEFINE(HOST_FRAME_SIZE, FRAME_SIZE);
50 DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_i387_struct)); 50 DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_i387_struct));
51 DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fxsr_struct)); 51 DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fxsr_struct));
52 52
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 4592bf21fcaf..b92e5f45ed46 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -270,26 +270,26 @@ ENTRY(level3_kernel_pgt)
270.org 0x4000 270.org 0x4000
271ENTRY(level2_ident_pgt) 271ENTRY(level2_ident_pgt)
272 /* 40MB for bootup. */ 272 /* 40MB for bootup. */
273 .quad 0x0000000000000183 273 .quad 0x0000000000000083
274 .quad 0x0000000000200183 274 .quad 0x0000000000200083
275 .quad 0x0000000000400183 275 .quad 0x0000000000400083
276 .quad 0x0000000000600183 276 .quad 0x0000000000600083
277 .quad 0x0000000000800183 277 .quad 0x0000000000800083
278 .quad 0x0000000000A00183 278 .quad 0x0000000000A00083
279 .quad 0x0000000000C00183 279 .quad 0x0000000000C00083
280 .quad 0x0000000000E00183 280 .quad 0x0000000000E00083
281 .quad 0x0000000001000183 281 .quad 0x0000000001000083
282 .quad 0x0000000001200183 282 .quad 0x0000000001200083
283 .quad 0x0000000001400183 283 .quad 0x0000000001400083
284 .quad 0x0000000001600183 284 .quad 0x0000000001600083
285 .quad 0x0000000001800183 285 .quad 0x0000000001800083
286 .quad 0x0000000001A00183 286 .quad 0x0000000001A00083
287 .quad 0x0000000001C00183 287 .quad 0x0000000001C00083
288 .quad 0x0000000001E00183 288 .quad 0x0000000001E00083
289 .quad 0x0000000002000183 289 .quad 0x0000000002000083
290 .quad 0x0000000002200183 290 .quad 0x0000000002200083
291 .quad 0x0000000002400183 291 .quad 0x0000000002400083
292 .quad 0x0000000002600183 292 .quad 0x0000000002600083
293 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */ 293 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
294 .globl temp_boot_pmds 294 .globl temp_boot_pmds
295temp_boot_pmds: 295temp_boot_pmds:
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 257f5ba17902..cb28df14ff6f 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -967,13 +967,12 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
967static void srat_detect_node(void) 967static void srat_detect_node(void)
968{ 968{
969#ifdef CONFIG_NUMA 969#ifdef CONFIG_NUMA
970 unsigned apicid, node; 970 unsigned node;
971 int cpu = smp_processor_id(); 971 int cpu = smp_processor_id();
972 972
973 /* Don't do the funky fallback heuristics the AMD version employs 973 /* Don't do the funky fallback heuristics the AMD version employs
974 for now. */ 974 for now. */
975 apicid = phys_proc_id[cpu]; 975 node = apicid_to_node[hard_smp_processor_id()];
976 node = apicid_to_node[apicid];
977 if (node == NUMA_NO_NODE) 976 if (node == NUMA_NO_NODE)
978 node = 0; 977 node = 0;
979 cpu_to_node[cpu] = node; 978 cpu_to_node[cpu] = node;
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 2bf723a7b6e6..6f1a83c9d9e0 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -178,14 +178,12 @@ fore200e_irq_itoa(int irq)
178 178
179 179
180static void* 180static void*
181fore200e_kmalloc(int size, int flags) 181fore200e_kmalloc(int size, unsigned int __nocast flags)
182{ 182{
183 void* chunk = kmalloc(size, flags); 183 void *chunk = kzalloc(size, flags);
184 184
185 if (chunk) 185 if (!chunk)
186 memset(chunk, 0x00, size); 186 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
187 else
188 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
189 187
190 return chunk; 188 return chunk;
191} 189}
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 95a976c96eb8..70458cb061c6 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -47,7 +47,7 @@ MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
47MODULE_PARM_DESC(debug, "Enable debug output"); 47MODULE_PARM_DESC(debug, "Enable debug output");
48 48
49module_param_named(cards_limit, drm_cards_limit, int, 0444); 49module_param_named(cards_limit, drm_cards_limit, int, 0444);
50module_param_named(debug, drm_debug, int, 0666); 50module_param_named(debug, drm_debug, int, 0600);
51 51
52drm_head_t **drm_heads; 52drm_head_t **drm_heads;
53struct drm_sysfs_class *drm_class; 53struct drm_sysfs_class *drm_class;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index bb0b3a8de14b..1422285d537c 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -69,7 +69,8 @@ int cn_already_initialized = 0;
69 * a new message. 69 * a new message.
70 * 70 *
71 */ 71 */
72int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask) 72int cn_netlink_send(struct cn_msg *msg, u32 __group,
73 unsigned int __nocast gfp_mask)
73{ 74{
74 struct cn_callback_entry *__cbq; 75 struct cn_callback_entry *__cbq;
75 unsigned int size; 76 unsigned int size;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index ffbcd40418d5..23a3f56c7899 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -503,6 +503,25 @@ err_free_aux:
503 return err; 503 return err;
504} 504}
505 505
506static void mthca_free_icms(struct mthca_dev *mdev)
507{
508 u8 status;
509
510 mthca_free_icm_table(mdev, mdev->mcg_table.table);
511 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
512 mthca_free_icm_table(mdev, mdev->srq_table.table);
513 mthca_free_icm_table(mdev, mdev->cq_table.table);
514 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
515 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
516 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
517 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
518 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
519 mthca_unmap_eq_icm(mdev);
520
521 mthca_UNMAP_ICM_AUX(mdev, &status);
522 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
523}
524
506static int __devinit mthca_init_arbel(struct mthca_dev *mdev) 525static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
507{ 526{
508 struct mthca_dev_lim dev_lim; 527 struct mthca_dev_lim dev_lim;
@@ -580,18 +599,7 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
580 return 0; 599 return 0;
581 600
582err_free_icm: 601err_free_icm:
583 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 602 mthca_free_icms(mdev);
584 mthca_free_icm_table(mdev, mdev->srq_table.table);
585 mthca_free_icm_table(mdev, mdev->cq_table.table);
586 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
587 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
588 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
589 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
590 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
591 mthca_unmap_eq_icm(mdev);
592
593 mthca_UNMAP_ICM_AUX(mdev, &status);
594 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
595 603
596err_stop_fw: 604err_stop_fw:
597 mthca_UNMAP_FA(mdev, &status); 605 mthca_UNMAP_FA(mdev, &status);
@@ -611,18 +619,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
611 mthca_CLOSE_HCA(mdev, 0, &status); 619 mthca_CLOSE_HCA(mdev, 0, &status);
612 620
613 if (mthca_is_memfree(mdev)) { 621 if (mthca_is_memfree(mdev)) {
614 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 622 mthca_free_icms(mdev);
615 mthca_free_icm_table(mdev, mdev->srq_table.table);
616 mthca_free_icm_table(mdev, mdev->cq_table.table);
617 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
618 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
619 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
620 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
621 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
622 mthca_unmap_eq_icm(mdev);
623
624 mthca_UNMAP_ICM_AUX(mdev, &status);
625 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
626 623
627 mthca_UNMAP_FA(mdev, &status); 624 mthca_UNMAP_FA(mdev, &status);
628 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); 625 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 704f48e0b6a7..6c5bf07489f4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -474,7 +474,7 @@ err:
474 spin_unlock(&priv->lock); 474 spin_unlock(&priv->lock);
475} 475}
476 476
477static void path_lookup(struct sk_buff *skb, struct net_device *dev) 477static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
478{ 478{
479 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 479 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
480 480
@@ -569,7 +569,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
569 569
570 if (skb->dst && skb->dst->neighbour) { 570 if (skb->dst && skb->dst->neighbour) {
571 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 571 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
572 path_lookup(skb, dev); 572 ipoib_path_lookup(skb, dev);
573 goto out; 573 goto out;
574 } 574 }
575 575
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 10f6ce1bc0ab..612564ac6f7b 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -642,8 +642,6 @@ static void __exit ucb1x00_exit(void)
642module_init(ucb1x00_init); 642module_init(ucb1x00_init);
643module_exit(ucb1x00_exit); 643module_exit(ucb1x00_exit);
644 644
645EXPORT_SYMBOL(ucb1x00_class);
646
647EXPORT_SYMBOL(ucb1x00_io_set_dir); 645EXPORT_SYMBOL(ucb1x00_io_set_dir);
648EXPORT_SYMBOL(ucb1x00_io_write); 646EXPORT_SYMBOL(ucb1x00_io_write);
649EXPORT_SYMBOL(ucb1x00_io_read); 647EXPORT_SYMBOL(ucb1x00_io_read);
diff --git a/drivers/mfd/ucb1x00.h b/drivers/mfd/ucb1x00.h
index 6b632644f59a..9c9a647d8b7b 100644
--- a/drivers/mfd/ucb1x00.h
+++ b/drivers/mfd/ucb1x00.h
@@ -106,8 +106,6 @@ struct ucb1x00_irq {
106 void (*fn)(int, void *); 106 void (*fn)(int, void *);
107}; 107};
108 108
109extern struct class ucb1x00_class;
110
111struct ucb1x00 { 109struct ucb1x00 {
112 spinlock_t lock; 110 spinlock_t lock;
113 struct mcp *mcp; 111 struct mcp *mcp;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2a908c4690a7..c748b0e16419 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1655,7 +1655,7 @@ config LAN_SAA9730
1655 1655
1656config NET_POCKET 1656config NET_POCKET
1657 bool "Pocket and portable adapters" 1657 bool "Pocket and portable adapters"
1658 depends on NET_ETHERNET && ISA 1658 depends on NET_ETHERNET && PARPORT
1659 ---help--- 1659 ---help---
1660 Cute little network (Ethernet) devices which attach to the parallel 1660 Cute little network (Ethernet) devices which attach to the parallel
1661 port ("pocket adapters"), commonly used with laptops. If you have 1661 port ("pocket adapters"), commonly used with laptops. If you have
@@ -1679,7 +1679,7 @@ config NET_POCKET
1679 1679
1680config ATP 1680config ATP
1681 tristate "AT-LAN-TEC/RealTek pocket adapter support" 1681 tristate "AT-LAN-TEC/RealTek pocket adapter support"
1682 depends on NET_POCKET && ISA && X86 1682 depends on NET_POCKET && PARPORT && X86
1683 select CRC32 1683 select CRC32
1684 ---help--- 1684 ---help---
1685 This is a network (Ethernet) device which attaches to your parallel 1685 This is a network (Ethernet) device which attaches to your parallel
@@ -1694,7 +1694,7 @@ config ATP
1694 1694
1695config DE600 1695config DE600
1696 tristate "D-Link DE600 pocket adapter support" 1696 tristate "D-Link DE600 pocket adapter support"
1697 depends on NET_POCKET && ISA 1697 depends on NET_POCKET && PARPORT
1698 ---help--- 1698 ---help---
1699 This is a network (Ethernet) device which attaches to your parallel 1699 This is a network (Ethernet) device which attaches to your parallel
1700 port. Read <file:Documentation/networking/DLINK.txt> as well as the 1700 port. Read <file:Documentation/networking/DLINK.txt> as well as the
@@ -1709,7 +1709,7 @@ config DE600
1709 1709
1710config DE620 1710config DE620
1711 tristate "D-Link DE620 pocket adapter support" 1711 tristate "D-Link DE620 pocket adapter support"
1712 depends on NET_POCKET && ISA 1712 depends on NET_POCKET && PARPORT
1713 ---help--- 1713 ---help---
1714 This is a network (Ethernet) device which attaches to your parallel 1714 This is a network (Ethernet) device which attaches to your parallel
1715 port. Read <file:Documentation/networking/DLINK.txt> as well as the 1715 port. Read <file:Documentation/networking/DLINK.txt> as well as the
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6d00c3de1a83..f0a5b772a386 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -487,6 +487,8 @@
487 * * Added xmit_hash_policy_layer34() 487 * * Added xmit_hash_policy_layer34()
488 * - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4. 488 * - Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4.
489 * Set version to 2.6.3. 489 * Set version to 2.6.3.
490 * 2005/09/26 - Jay Vosburgh <fubar@us.ibm.com>
491 * - Removed backwards compatibility for old ifenslaves. Version 2.6.4.
490 */ 492 */
491 493
492//#define BONDING_DEBUG 1 494//#define BONDING_DEBUG 1
@@ -595,14 +597,7 @@ static int arp_ip_count = 0;
595static int bond_mode = BOND_MODE_ROUNDROBIN; 597static int bond_mode = BOND_MODE_ROUNDROBIN;
596static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; 598static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
597static int lacp_fast = 0; 599static int lacp_fast = 0;
598static int app_abi_ver = 0; 600
599static int orig_app_abi_ver = -1; /* This is used to save the first ABI version
600 * we receive from the application. Once set,
601 * it won't be changed, and the module will
602 * refuse to enslave/release interfaces if the
603 * command comes from an application using
604 * another ABI version.
605 */
606struct bond_parm_tbl { 601struct bond_parm_tbl {
607 char *modename; 602 char *modename;
608 int mode; 603 int mode;
@@ -1294,12 +1289,13 @@ static void bond_mc_list_destroy(struct bonding *bond)
1294/* 1289/*
1295 * Copy all the Multicast addresses from src to the bonding device dst 1290 * Copy all the Multicast addresses from src to the bonding device dst
1296 */ 1291 */
1297static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, int gpf_flag) 1292static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
1293 unsigned int __nocast gfp_flag)
1298{ 1294{
1299 struct dev_mc_list *dmi, *new_dmi; 1295 struct dev_mc_list *dmi, *new_dmi;
1300 1296
1301 for (dmi = mc_list; dmi; dmi = dmi->next) { 1297 for (dmi = mc_list; dmi; dmi = dmi->next) {
1302 new_dmi = kmalloc(sizeof(struct dev_mc_list), gpf_flag); 1298 new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
1303 1299
1304 if (!new_dmi) { 1300 if (!new_dmi) {
1305 /* FIXME: Potential memory leak !!! */ 1301 /* FIXME: Potential memory leak !!! */
@@ -1702,51 +1698,29 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1702 } 1698 }
1703 } 1699 }
1704 1700
1705 if (app_abi_ver >= 1) { 1701 /*
1706 /* The application is using an ABI, which requires the 1702 * Old ifenslave binaries are no longer supported. These can
1707 * slave interface to be closed. 1703 * be identified with moderate accurary by the state of the slave:
1708 */ 1704 * the current ifenslave will set the interface down prior to
1709 if ((slave_dev->flags & IFF_UP)) { 1705 * enslaving it; the old ifenslave will not.
1710 printk(KERN_ERR DRV_NAME 1706 */
1711 ": Error: %s is up\n", 1707 if ((slave_dev->flags & IFF_UP)) {
1712 slave_dev->name); 1708 printk(KERN_ERR DRV_NAME ": %s is up. "
1713 res = -EPERM; 1709 "This may be due to an out of date ifenslave.\n",
1714 goto err_undo_flags; 1710 slave_dev->name);
1715 } 1711 res = -EPERM;
1716 1712 goto err_undo_flags;
1717 if (slave_dev->set_mac_address == NULL) { 1713 }
1718 printk(KERN_ERR DRV_NAME
1719 ": Error: The slave device you specified does "
1720 "not support setting the MAC address.\n");
1721 printk(KERN_ERR
1722 "Your kernel likely does not support slave "
1723 "devices.\n");
1724 1714
1725 res = -EOPNOTSUPP; 1715 if (slave_dev->set_mac_address == NULL) {
1726 goto err_undo_flags; 1716 printk(KERN_ERR DRV_NAME
1727 } 1717 ": Error: The slave device you specified does "
1728 } else { 1718 "not support setting the MAC address.\n");
1729 /* The application is not using an ABI, which requires the 1719 printk(KERN_ERR
1730 * slave interface to be open. 1720 "Your kernel likely does not support slave devices.\n");
1731 */
1732 if (!(slave_dev->flags & IFF_UP)) {
1733 printk(KERN_ERR DRV_NAME
1734 ": Error: %s is not running\n",
1735 slave_dev->name);
1736 res = -EINVAL;
1737 goto err_undo_flags;
1738 }
1739 1721
1740 if ((bond->params.mode == BOND_MODE_8023AD) || 1722 res = -EOPNOTSUPP;
1741 (bond->params.mode == BOND_MODE_TLB) || 1723 goto err_undo_flags;
1742 (bond->params.mode == BOND_MODE_ALB)) {
1743 printk(KERN_ERR DRV_NAME
1744 ": Error: to use %s mode, you must upgrade "
1745 "ifenslave.\n",
1746 bond_mode_name(bond->params.mode));
1747 res = -EOPNOTSUPP;
1748 goto err_undo_flags;
1749 }
1750 } 1724 }
1751 1725
1752 new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL); 1726 new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1762,41 +1736,36 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1762 */ 1736 */
1763 new_slave->original_flags = slave_dev->flags; 1737 new_slave->original_flags = slave_dev->flags;
1764 1738
1765 if (app_abi_ver >= 1) { 1739 /*
1766 /* save slave's original ("permanent") mac address for 1740 * Save slave's original ("permanent") mac address for modes
1767 * modes that needs it, and for restoring it upon release, 1741 * that need it, and for restoring it upon release, and then
1768 * and then set it to the master's address 1742 * set it to the master's address
1769 */ 1743 */
1770 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); 1744 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
1771 1745
1772 /* set slave to master's mac address 1746 /*
1773 * The application already set the master's 1747 * Set slave to master's mac address. The application already
1774 * mac address to that of the first slave 1748 * set the master's mac address to that of the first slave
1775 */ 1749 */
1776 memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); 1750 memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
1777 addr.sa_family = slave_dev->type; 1751 addr.sa_family = slave_dev->type;
1778 res = dev_set_mac_address(slave_dev, &addr); 1752 res = dev_set_mac_address(slave_dev, &addr);
1779 if (res) { 1753 if (res) {
1780 dprintk("Error %d calling set_mac_address\n", res); 1754 dprintk("Error %d calling set_mac_address\n", res);
1781 goto err_free; 1755 goto err_free;
1782 } 1756 }
1783 1757
1784 /* open the slave since the application closed it */ 1758 /* open the slave since the application closed it */
1785 res = dev_open(slave_dev); 1759 res = dev_open(slave_dev);
1786 if (res) { 1760 if (res) {
1787 dprintk("Openning slave %s failed\n", slave_dev->name); 1761 dprintk("Openning slave %s failed\n", slave_dev->name);
1788 goto err_restore_mac; 1762 goto err_restore_mac;
1789 }
1790 } 1763 }
1791 1764
1792 res = netdev_set_master(slave_dev, bond_dev); 1765 res = netdev_set_master(slave_dev, bond_dev);
1793 if (res) { 1766 if (res) {
1794 dprintk("Error %d calling netdev_set_master\n", res); 1767 dprintk("Error %d calling netdev_set_master\n", res);
1795 if (app_abi_ver < 1) { 1768 goto err_close;
1796 goto err_free;
1797 } else {
1798 goto err_close;
1799 }
1800 } 1769 }
1801 1770
1802 new_slave->dev = slave_dev; 1771 new_slave->dev = slave_dev;
@@ -1997,39 +1966,6 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
1997 1966
1998 write_unlock_bh(&bond->lock); 1967 write_unlock_bh(&bond->lock);
1999 1968
2000 if (app_abi_ver < 1) {
2001 /*
2002 * !!! This is to support old versions of ifenslave.
2003 * We can remove this in 2.5 because our ifenslave takes
2004 * care of this for us.
2005 * We check to see if the master has a mac address yet.
2006 * If not, we'll give it the mac address of our slave device.
2007 */
2008 int ndx = 0;
2009
2010 for (ndx = 0; ndx < bond_dev->addr_len; ndx++) {
2011 dprintk("Checking ndx=%d of bond_dev->dev_addr\n",
2012 ndx);
2013 if (bond_dev->dev_addr[ndx] != 0) {
2014 dprintk("Found non-zero byte at ndx=%d\n",
2015 ndx);
2016 break;
2017 }
2018 }
2019
2020 if (ndx == bond_dev->addr_len) {
2021 /*
2022 * We got all the way through the address and it was
2023 * all 0's.
2024 */
2025 dprintk("%s doesn't have a MAC address yet. \n",
2026 bond_dev->name);
2027 dprintk("Going to give assign it from %s.\n",
2028 slave_dev->name);
2029 bond_sethwaddr(bond_dev, slave_dev);
2030 }
2031 }
2032
2033 printk(KERN_INFO DRV_NAME 1969 printk(KERN_INFO DRV_NAME
2034 ": %s: enslaving %s as a%s interface with a%s link.\n", 1970 ": %s: enslaving %s as a%s interface with a%s link.\n",
2035 bond_dev->name, slave_dev->name, 1971 bond_dev->name, slave_dev->name,
@@ -2227,12 +2163,10 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
2227 /* close slave before restoring its mac address */ 2163 /* close slave before restoring its mac address */
2228 dev_close(slave_dev); 2164 dev_close(slave_dev);
2229 2165
2230 if (app_abi_ver >= 1) { 2166 /* restore original ("permanent") mac address */
2231 /* restore original ("permanent") mac address */ 2167 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
2232 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 2168 addr.sa_family = slave_dev->type;
2233 addr.sa_family = slave_dev->type; 2169 dev_set_mac_address(slave_dev, &addr);
2234 dev_set_mac_address(slave_dev, &addr);
2235 }
2236 2170
2237 /* restore the original state of the 2171 /* restore the original state of the
2238 * IFF_NOARP flag that might have been 2172 * IFF_NOARP flag that might have been
@@ -2320,12 +2254,10 @@ static int bond_release_all(struct net_device *bond_dev)
2320 /* close slave before restoring its mac address */ 2254 /* close slave before restoring its mac address */
2321 dev_close(slave_dev); 2255 dev_close(slave_dev);
2322 2256
2323 if (app_abi_ver >= 1) { 2257 /* restore original ("permanent") mac address*/
2324 /* restore original ("permanent") mac address*/ 2258 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
2325 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); 2259 addr.sa_family = slave_dev->type;
2326 addr.sa_family = slave_dev->type; 2260 dev_set_mac_address(slave_dev, &addr);
2327 dev_set_mac_address(slave_dev, &addr);
2328 }
2329 2261
2330 /* restore the original state of the IFF_NOARP flag that might have 2262 /* restore the original state of the IFF_NOARP flag that might have
2331 * been set by bond_set_slave_inactive_flags() 2263 * been set by bond_set_slave_inactive_flags()
@@ -2423,57 +2355,6 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
2423 return res; 2355 return res;
2424} 2356}
2425 2357
2426static int bond_ethtool_ioctl(struct net_device *bond_dev, struct ifreq *ifr)
2427{
2428 struct ethtool_drvinfo info;
2429 void __user *addr = ifr->ifr_data;
2430 uint32_t cmd;
2431
2432 if (get_user(cmd, (uint32_t __user *)addr)) {
2433 return -EFAULT;
2434 }
2435
2436 switch (cmd) {
2437 case ETHTOOL_GDRVINFO:
2438 if (copy_from_user(&info, addr, sizeof(info))) {
2439 return -EFAULT;
2440 }
2441
2442 if (strcmp(info.driver, "ifenslave") == 0) {
2443 int new_abi_ver;
2444 char *endptr;
2445
2446 new_abi_ver = simple_strtoul(info.fw_version,
2447 &endptr, 0);
2448 if (*endptr) {
2449 printk(KERN_ERR DRV_NAME
2450 ": Error: got invalid ABI "
2451 "version from application\n");
2452
2453 return -EINVAL;
2454 }
2455
2456 if (orig_app_abi_ver == -1) {
2457 orig_app_abi_ver = new_abi_ver;
2458 }
2459
2460 app_abi_ver = new_abi_ver;
2461 }
2462
2463 strncpy(info.driver, DRV_NAME, 32);
2464 strncpy(info.version, DRV_VERSION, 32);
2465 snprintf(info.fw_version, 32, "%d", BOND_ABI_VERSION);
2466
2467 if (copy_to_user(addr, &info, sizeof(info))) {
2468 return -EFAULT;
2469 }
2470
2471 return 0;
2472 default:
2473 return -EOPNOTSUPP;
2474 }
2475}
2476
2477static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) 2358static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2478{ 2359{
2479 struct bonding *bond = bond_dev->priv; 2360 struct bonding *bond = bond_dev->priv;
@@ -2776,7 +2657,7 @@ static u32 bond_glean_dev_ip(struct net_device *dev)
2776 return 0; 2657 return 0;
2777 2658
2778 rcu_read_lock(); 2659 rcu_read_lock();
2779 idev = __in_dev_get(dev); 2660 idev = __in_dev_get_rcu(dev);
2780 if (!idev) 2661 if (!idev)
2781 goto out; 2662 goto out;
2782 2663
@@ -3442,16 +3323,11 @@ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave
3442 seq_printf(seq, "Link Failure Count: %d\n", 3323 seq_printf(seq, "Link Failure Count: %d\n",
3443 slave->link_failure_count); 3324 slave->link_failure_count);
3444 3325
3445 if (app_abi_ver >= 1) { 3326 seq_printf(seq,
3446 seq_printf(seq, 3327 "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
3447 "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n", 3328 slave->perm_hwaddr[0], slave->perm_hwaddr[1],
3448 slave->perm_hwaddr[0], 3329 slave->perm_hwaddr[2], slave->perm_hwaddr[3],
3449 slave->perm_hwaddr[1], 3330 slave->perm_hwaddr[4], slave->perm_hwaddr[5]);
3450 slave->perm_hwaddr[2],
3451 slave->perm_hwaddr[3],
3452 slave->perm_hwaddr[4],
3453 slave->perm_hwaddr[5]);
3454 }
3455 3331
3456 if (bond->params.mode == BOND_MODE_8023AD) { 3332 if (bond->params.mode == BOND_MODE_8023AD) {
3457 const struct aggregator *agg 3333 const struct aggregator *agg
@@ -4010,15 +3886,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4010 struct ifslave k_sinfo; 3886 struct ifslave k_sinfo;
4011 struct ifslave __user *u_sinfo = NULL; 3887 struct ifslave __user *u_sinfo = NULL;
4012 struct mii_ioctl_data *mii = NULL; 3888 struct mii_ioctl_data *mii = NULL;
4013 int prev_abi_ver = orig_app_abi_ver;
4014 int res = 0; 3889 int res = 0;
4015 3890
4016 dprintk("bond_ioctl: master=%s, cmd=%d\n", 3891 dprintk("bond_ioctl: master=%s, cmd=%d\n",
4017 bond_dev->name, cmd); 3892 bond_dev->name, cmd);
4018 3893
4019 switch (cmd) { 3894 switch (cmd) {
4020 case SIOCETHTOOL:
4021 return bond_ethtool_ioctl(bond_dev, ifr);
4022 case SIOCGMIIPHY: 3895 case SIOCGMIIPHY:
4023 mii = if_mii(ifr); 3896 mii = if_mii(ifr);
4024 if (!mii) { 3897 if (!mii) {
@@ -4090,21 +3963,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4090 return -EPERM; 3963 return -EPERM;
4091 } 3964 }
4092 3965
4093 if (orig_app_abi_ver == -1) {
4094 /* no orig_app_abi_ver was provided yet, so we'll use the
4095 * current one from now on, even if it's 0
4096 */
4097 orig_app_abi_ver = app_abi_ver;
4098
4099 } else if (orig_app_abi_ver != app_abi_ver) {
4100 printk(KERN_ERR DRV_NAME
4101 ": Error: already using ifenslave ABI version %d; to "
4102 "upgrade ifenslave to version %d, you must first "
4103 "reload bonding.\n",
4104 orig_app_abi_ver, app_abi_ver);
4105 return -EINVAL;
4106 }
4107
4108 slave_dev = dev_get_by_name(ifr->ifr_slave); 3966 slave_dev = dev_get_by_name(ifr->ifr_slave);
4109 3967
4110 dprintk("slave_dev=%p: \n", slave_dev); 3968 dprintk("slave_dev=%p: \n", slave_dev);
@@ -4137,14 +3995,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4137 dev_put(slave_dev); 3995 dev_put(slave_dev);
4138 } 3996 }
4139 3997
4140 if (res < 0) {
4141 /* The ioctl failed, so there's no point in changing the
4142 * orig_app_abi_ver. We'll restore it's value just in case
4143 * we've changed it earlier in this function.
4144 */
4145 orig_app_abi_ver = prev_abi_ver;
4146 }
4147
4148 return res; 3998 return res;
4149} 3999}
4150 4000
@@ -4578,9 +4428,18 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode)
4578 } 4428 }
4579} 4429}
4580 4430
4431static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4432 struct ethtool_drvinfo *drvinfo)
4433{
4434 strncpy(drvinfo->driver, DRV_NAME, 32);
4435 strncpy(drvinfo->version, DRV_VERSION, 32);
4436 snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION);
4437}
4438
4581static struct ethtool_ops bond_ethtool_ops = { 4439static struct ethtool_ops bond_ethtool_ops = {
4582 .get_tx_csum = ethtool_op_get_tx_csum, 4440 .get_tx_csum = ethtool_op_get_tx_csum,
4583 .get_sg = ethtool_op_get_sg, 4441 .get_sg = ethtool_op_get_sg,
4442 .get_drvinfo = bond_ethtool_get_drvinfo,
4584}; 4443};
4585 4444
4586/* 4445/*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 388196980862..bbf9da8af624 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -40,8 +40,8 @@
40#include "bond_3ad.h" 40#include "bond_3ad.h"
41#include "bond_alb.h" 41#include "bond_alb.h"
42 42
43#define DRV_VERSION "2.6.3" 43#define DRV_VERSION "2.6.4"
44#define DRV_RELDATE "June 8, 2005" 44#define DRV_RELDATE "September 26, 2005"
45#define DRV_NAME "bonding" 45#define DRV_NAME "bonding"
46#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 46#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
47 47
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 45831fb377a0..2e617424d3fb 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4423,18 +4423,14 @@ static struct {
4423#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int)) 4423#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int))
4424#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) 4424#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4425 4425
4426static u8 *cas_get_regs(struct cas *cp) 4426static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4427{ 4427{
4428 u8 *ptr = kmalloc(CAS_MAX_REGS, GFP_KERNEL);
4429 u8 *p; 4428 u8 *p;
4430 int i; 4429 int i;
4431 unsigned long flags; 4430 unsigned long flags;
4432 4431
4433 if (!ptr)
4434 return NULL;
4435
4436 spin_lock_irqsave(&cp->lock, flags); 4432 spin_lock_irqsave(&cp->lock, flags);
4437 for (i = 0, p = ptr; i < CAS_REG_LEN ; i ++, p += sizeof(u32)) { 4433 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4438 u16 hval; 4434 u16 hval;
4439 u32 val; 4435 u32 val;
4440 if (ethtool_register_table[i].offsets < 0) { 4436 if (ethtool_register_table[i].offsets < 0) {
@@ -4447,8 +4443,6 @@ static u8 *cas_get_regs(struct cas *cp)
4447 memcpy(p, (u8 *)&val, sizeof(u32)); 4443 memcpy(p, (u8 *)&val, sizeof(u32));
4448 } 4444 }
4449 spin_unlock_irqrestore(&cp->lock, flags); 4445 spin_unlock_irqrestore(&cp->lock, flags);
4450
4451 return ptr;
4452} 4446}
4453 4447
4454static struct net_device_stats *cas_get_stats(struct net_device *dev) 4448static struct net_device_stats *cas_get_stats(struct net_device *dev)
@@ -4561,316 +4555,251 @@ static void cas_set_multicast(struct net_device *dev)
4561 spin_unlock_irqrestore(&cp->lock, flags); 4555 spin_unlock_irqrestore(&cp->lock, flags);
4562} 4556}
4563 4557
4564/* Eventually add support for changing the advertisement 4558static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4565 * on autoneg. 4559{
4566 */ 4560 struct cas *cp = netdev_priv(dev);
4567static int cas_ethtool_ioctl(struct net_device *dev, void __user *ep_user) 4561 strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
4562 strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
4563 info->fw_version[0] = '\0';
4564 strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
4565 info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4566 cp->casreg_len : CAS_MAX_REGS;
4567 info->n_stats = CAS_NUM_STAT_KEYS;
4568}
4569
4570static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4568{ 4571{
4569 struct cas *cp = netdev_priv(dev); 4572 struct cas *cp = netdev_priv(dev);
4570 u16 bmcr; 4573 u16 bmcr;
4571 int full_duplex, speed, pause; 4574 int full_duplex, speed, pause;
4572 struct ethtool_cmd ecmd;
4573 unsigned long flags; 4575 unsigned long flags;
4574 enum link_state linkstate = link_up; 4576 enum link_state linkstate = link_up;
4575 4577
4576 if (copy_from_user(&ecmd, ep_user, sizeof(ecmd))) 4578 cmd->advertising = 0;
4577 return -EFAULT; 4579 cmd->supported = SUPPORTED_Autoneg;
4578 4580 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4579 switch(ecmd.cmd) { 4581 cmd->supported |= SUPPORTED_1000baseT_Full;
4580 case ETHTOOL_GDRVINFO: { 4582 cmd->advertising |= ADVERTISED_1000baseT_Full;
4581 struct ethtool_drvinfo info = { .cmd = ETHTOOL_GDRVINFO };
4582
4583 strncpy(info.driver, DRV_MODULE_NAME,
4584 ETHTOOL_BUSINFO_LEN);
4585 strncpy(info.version, DRV_MODULE_VERSION,
4586 ETHTOOL_BUSINFO_LEN);
4587 info.fw_version[0] = '\0';
4588 strncpy(info.bus_info, pci_name(cp->pdev),
4589 ETHTOOL_BUSINFO_LEN);
4590 info.regdump_len = cp->casreg_len < CAS_MAX_REGS ?
4591 cp->casreg_len : CAS_MAX_REGS;
4592 info.n_stats = CAS_NUM_STAT_KEYS;
4593 if (copy_to_user(ep_user, &info, sizeof(info)))
4594 return -EFAULT;
4595
4596 return 0;
4597 } 4583 }
4598 4584
4599 case ETHTOOL_GSET: 4585 /* Record PHY settings if HW is on. */
4600 ecmd.advertising = 0; 4586 spin_lock_irqsave(&cp->lock, flags);
4601 ecmd.supported = SUPPORTED_Autoneg; 4587 bmcr = 0;
4602 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { 4588 linkstate = cp->lstate;
4603 ecmd.supported |= SUPPORTED_1000baseT_Full; 4589 if (CAS_PHY_MII(cp->phy_type)) {
4604 ecmd.advertising |= ADVERTISED_1000baseT_Full; 4590 cmd->port = PORT_MII;
4591 cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ?
4592 XCVR_INTERNAL : XCVR_EXTERNAL;
4593 cmd->phy_address = cp->phy_addr;
4594 cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII |
4595 ADVERTISED_10baseT_Half |
4596 ADVERTISED_10baseT_Full |
4597 ADVERTISED_100baseT_Half |
4598 ADVERTISED_100baseT_Full;
4599
4600 cmd->supported |=
4601 (SUPPORTED_10baseT_Half |
4602 SUPPORTED_10baseT_Full |
4603 SUPPORTED_100baseT_Half |
4604 SUPPORTED_100baseT_Full |
4605 SUPPORTED_TP | SUPPORTED_MII);
4606
4607 if (cp->hw_running) {
4608 cas_mif_poll(cp, 0);
4609 bmcr = cas_phy_read(cp, MII_BMCR);
4610 cas_read_mii_link_mode(cp, &full_duplex,
4611 &speed, &pause);
4612 cas_mif_poll(cp, 1);
4605 } 4613 }
4606 4614
4607 /* Record PHY settings if HW is on. */ 4615 } else {
4608 spin_lock_irqsave(&cp->lock, flags); 4616 cmd->port = PORT_FIBRE;
4609 bmcr = 0; 4617 cmd->transceiver = XCVR_INTERNAL;
4610 linkstate = cp->lstate; 4618 cmd->phy_address = 0;
4611 if (CAS_PHY_MII(cp->phy_type)) { 4619 cmd->supported |= SUPPORTED_FIBRE;
4612 ecmd.port = PORT_MII; 4620 cmd->advertising |= ADVERTISED_FIBRE;
4613 ecmd.transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? 4621
4614 XCVR_INTERNAL : XCVR_EXTERNAL; 4622 if (cp->hw_running) {
4615 ecmd.phy_address = cp->phy_addr; 4623 /* pcs uses the same bits as mii */
4616 ecmd.advertising |= ADVERTISED_TP | ADVERTISED_MII | 4624 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4617 ADVERTISED_10baseT_Half | 4625 cas_read_pcs_link_mode(cp, &full_duplex,
4618 ADVERTISED_10baseT_Full | 4626 &speed, &pause);
4619 ADVERTISED_100baseT_Half |
4620 ADVERTISED_100baseT_Full;
4621
4622 ecmd.supported |=
4623 (SUPPORTED_10baseT_Half |
4624 SUPPORTED_10baseT_Full |
4625 SUPPORTED_100baseT_Half |
4626 SUPPORTED_100baseT_Full |
4627 SUPPORTED_TP | SUPPORTED_MII);
4628
4629 if (cp->hw_running) {
4630 cas_mif_poll(cp, 0);
4631 bmcr = cas_phy_read(cp, MII_BMCR);
4632 cas_read_mii_link_mode(cp, &full_duplex,
4633 &speed, &pause);
4634 cas_mif_poll(cp, 1);
4635 }
4636
4637 } else {
4638 ecmd.port = PORT_FIBRE;
4639 ecmd.transceiver = XCVR_INTERNAL;
4640 ecmd.phy_address = 0;
4641 ecmd.supported |= SUPPORTED_FIBRE;
4642 ecmd.advertising |= ADVERTISED_FIBRE;
4643
4644 if (cp->hw_running) {
4645 /* pcs uses the same bits as mii */
4646 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4647 cas_read_pcs_link_mode(cp, &full_duplex,
4648 &speed, &pause);
4649 }
4650 } 4627 }
4651 spin_unlock_irqrestore(&cp->lock, flags); 4628 }
4629 spin_unlock_irqrestore(&cp->lock, flags);
4652 4630
4653 if (bmcr & BMCR_ANENABLE) { 4631 if (bmcr & BMCR_ANENABLE) {
4654 ecmd.advertising |= ADVERTISED_Autoneg; 4632 cmd->advertising |= ADVERTISED_Autoneg;
4655 ecmd.autoneg = AUTONEG_ENABLE; 4633 cmd->autoneg = AUTONEG_ENABLE;
4656 ecmd.speed = ((speed == 10) ? 4634 cmd->speed = ((speed == 10) ?
4657 SPEED_10 : 4635 SPEED_10 :
4658 ((speed == 1000) ? 4636 ((speed == 1000) ?
4659 SPEED_1000 : SPEED_100)); 4637 SPEED_1000 : SPEED_100));
4660 ecmd.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; 4638 cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4639 } else {
4640 cmd->autoneg = AUTONEG_DISABLE;
4641 cmd->speed =
4642 (bmcr & CAS_BMCR_SPEED1000) ?
4643 SPEED_1000 :
4644 ((bmcr & BMCR_SPEED100) ? SPEED_100:
4645 SPEED_10);
4646 cmd->duplex =
4647 (bmcr & BMCR_FULLDPLX) ?
4648 DUPLEX_FULL : DUPLEX_HALF;
4649 }
4650 if (linkstate != link_up) {
4651 /* Force these to "unknown" if the link is not up and
4652 * autonogotiation in enabled. We can set the link
4653 * speed to 0, but not cmd->duplex,
4654 * because its legal values are 0 and 1. Ethtool will
4655 * print the value reported in parentheses after the
4656 * word "Unknown" for unrecognized values.
4657 *
4658 * If in forced mode, we report the speed and duplex
4659 * settings that we configured.
4660 */
4661 if (cp->link_cntl & BMCR_ANENABLE) {
4662 cmd->speed = 0;
4663 cmd->duplex = 0xff;
4661 } else { 4664 } else {
4662 ecmd.autoneg = AUTONEG_DISABLE; 4665 cmd->speed = SPEED_10;
4663 ecmd.speed = 4666 if (cp->link_cntl & BMCR_SPEED100) {
4664 (bmcr & CAS_BMCR_SPEED1000) ? 4667 cmd->speed = SPEED_100;
4665 SPEED_1000 : 4668 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4666 ((bmcr & BMCR_SPEED100) ? SPEED_100: 4669 cmd->speed = SPEED_1000;
4667 SPEED_10);
4668 ecmd.duplex =
4669 (bmcr & BMCR_FULLDPLX) ?
4670 DUPLEX_FULL : DUPLEX_HALF;
4671 }
4672 if (linkstate != link_up) {
4673 /* Force these to "unknown" if the link is not up and
4674 * autonogotiation in enabled. We can set the link
4675 * speed to 0, but not ecmd.duplex,
4676 * because its legal values are 0 and 1. Ethtool will
4677 * print the value reported in parentheses after the
4678 * word "Unknown" for unrecognized values.
4679 *
4680 * If in forced mode, we report the speed and duplex
4681 * settings that we configured.
4682 */
4683 if (cp->link_cntl & BMCR_ANENABLE) {
4684 ecmd.speed = 0;
4685 ecmd.duplex = 0xff;
4686 } else {
4687 ecmd.speed = SPEED_10;
4688 if (cp->link_cntl & BMCR_SPEED100) {
4689 ecmd.speed = SPEED_100;
4690 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4691 ecmd.speed = SPEED_1000;
4692 }
4693 ecmd.duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4694 DUPLEX_FULL : DUPLEX_HALF;
4695 } 4670 }
4671 cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
4672 DUPLEX_FULL : DUPLEX_HALF;
4696 } 4673 }
4697 if (copy_to_user(ep_user, &ecmd, sizeof(ecmd))) 4674 }
4698 return -EFAULT; 4675 return 0;
4699 return 0; 4676}
4700 4677
4701 case ETHTOOL_SSET: 4678static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4702 if (!capable(CAP_NET_ADMIN)) 4679{
4703 return -EPERM; 4680 struct cas *cp = netdev_priv(dev);
4681 unsigned long flags;
4704 4682
4705 /* Verify the settings we care about. */ 4683 /* Verify the settings we care about. */
4706 if (ecmd.autoneg != AUTONEG_ENABLE && 4684 if (cmd->autoneg != AUTONEG_ENABLE &&
4707 ecmd.autoneg != AUTONEG_DISABLE) 4685 cmd->autoneg != AUTONEG_DISABLE)
4708 return -EINVAL; 4686 return -EINVAL;
4709 4687
4710 if (ecmd.autoneg == AUTONEG_DISABLE && 4688 if (cmd->autoneg == AUTONEG_DISABLE &&
4711 ((ecmd.speed != SPEED_1000 && 4689 ((cmd->speed != SPEED_1000 &&
4712 ecmd.speed != SPEED_100 && 4690 cmd->speed != SPEED_100 &&
4713 ecmd.speed != SPEED_10) || 4691 cmd->speed != SPEED_10) ||
4714 (ecmd.duplex != DUPLEX_HALF && 4692 (cmd->duplex != DUPLEX_HALF &&
4715 ecmd.duplex != DUPLEX_FULL))) 4693 cmd->duplex != DUPLEX_FULL)))
4716 return -EINVAL; 4694 return -EINVAL;
4717 4695
4718 /* Apply settings and restart link process. */ 4696 /* Apply settings and restart link process. */
4719 spin_lock_irqsave(&cp->lock, flags); 4697 spin_lock_irqsave(&cp->lock, flags);
4720 cas_begin_auto_negotiation(cp, &ecmd); 4698 cas_begin_auto_negotiation(cp, cmd);
4721 spin_unlock_irqrestore(&cp->lock, flags); 4699 spin_unlock_irqrestore(&cp->lock, flags);
4722 return 0; 4700 return 0;
4701}
4723 4702
4724 case ETHTOOL_NWAY_RST: 4703static int cas_nway_reset(struct net_device *dev)
4725 if ((cp->link_cntl & BMCR_ANENABLE) == 0) 4704{
4726 return -EINVAL; 4705 struct cas *cp = netdev_priv(dev);
4706 unsigned long flags;
4727 4707
4728 /* Restart link process. */ 4708 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4729 spin_lock_irqsave(&cp->lock, flags); 4709 return -EINVAL;
4730 cas_begin_auto_negotiation(cp, NULL);
4731 spin_unlock_irqrestore(&cp->lock, flags);
4732 4710
4733 return 0; 4711 /* Restart link process. */
4712 spin_lock_irqsave(&cp->lock, flags);
4713 cas_begin_auto_negotiation(cp, NULL);
4714 spin_unlock_irqrestore(&cp->lock, flags);
4734 4715
4735 case ETHTOOL_GWOL: 4716 return 0;
4736 case ETHTOOL_SWOL: 4717}
4737 break; /* doesn't exist */
4738 4718
4739 /* get link status */ 4719static u32 cas_get_link(struct net_device *dev)
4740 case ETHTOOL_GLINK: { 4720{
4741 struct ethtool_value edata = { .cmd = ETHTOOL_GLINK }; 4721 struct cas *cp = netdev_priv(dev);
4722 return cp->lstate == link_up;
4723}
4742 4724
4743 edata.data = (cp->lstate == link_up); 4725static u32 cas_get_msglevel(struct net_device *dev)
4744 if (copy_to_user(ep_user, &edata, sizeof(edata))) 4726{
4745 return -EFAULT; 4727 struct cas *cp = netdev_priv(dev);
4746 return 0; 4728 return cp->msg_enable;
4747 } 4729}
4748 4730
4749 /* get message-level */ 4731static void cas_set_msglevel(struct net_device *dev, u32 value)
4750 case ETHTOOL_GMSGLVL: { 4732{
4751 struct ethtool_value edata = { .cmd = ETHTOOL_GMSGLVL }; 4733 struct cas *cp = netdev_priv(dev);
4734 cp->msg_enable = value;
4735}
4752 4736
4753 edata.data = cp->msg_enable; 4737static int cas_get_regs_len(struct net_device *dev)
4754 if (copy_to_user(ep_user, &edata, sizeof(edata))) 4738{
4755 return -EFAULT; 4739 struct cas *cp = netdev_priv(dev);
4756 return 0; 4740 return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS;
4757 } 4741}
4758 4742
4759 /* set message-level */ 4743static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4760 case ETHTOOL_SMSGLVL: { 4744 void *p)
4761 struct ethtool_value edata; 4745{
4746 struct cas *cp = netdev_priv(dev);
4747 regs->version = 0;
4748 /* cas_read_regs handles locks (cp->lock). */
4749 cas_read_regs(cp, p, regs->len / sizeof(u32));
4750}
4762 4751
4763 if (!capable(CAP_NET_ADMIN)) { 4752static int cas_get_stats_count(struct net_device *dev)
4764 return (-EPERM); 4753{
4765 } 4754 return CAS_NUM_STAT_KEYS;
4766 if (copy_from_user(&edata, ep_user, sizeof(edata))) 4755}
4767 return -EFAULT;
4768 cp->msg_enable = edata.data;
4769 return 0;
4770 }
4771 4756
4772 case ETHTOOL_GREGS: { 4757static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4773 struct ethtool_regs edata; 4758{
4774 u8 *ptr; 4759 memcpy(data, &ethtool_cassini_statnames,
4775 int len = cp->casreg_len < CAS_MAX_REGS ? 4760 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4776 cp->casreg_len: CAS_MAX_REGS; 4761}
4777
4778 if (copy_from_user(&edata, ep_user, sizeof (edata)))
4779 return -EFAULT;
4780
4781 if (edata.len > len)
4782 edata.len = len;
4783 edata.version = 0;
4784 if (copy_to_user (ep_user, &edata, sizeof(edata)))
4785 return -EFAULT;
4786
4787 /* cas_get_regs handles locks (cp->lock). */
4788 ptr = cas_get_regs(cp);
4789 if (ptr == NULL)
4790 return -ENOMEM;
4791 if (copy_to_user(ep_user + sizeof (edata), ptr, edata.len))
4792 return -EFAULT;
4793
4794 kfree(ptr);
4795 return (0);
4796 }
4797 case ETHTOOL_GSTRINGS: {
4798 struct ethtool_gstrings edata;
4799 int len;
4800
4801 if (copy_from_user(&edata, ep_user, sizeof(edata)))
4802 return -EFAULT;
4803
4804 len = edata.len;
4805 switch(edata.string_set) {
4806 case ETH_SS_STATS:
4807 edata.len = (len < CAS_NUM_STAT_KEYS) ?
4808 len : CAS_NUM_STAT_KEYS;
4809 if (copy_to_user(ep_user, &edata, sizeof(edata)))
4810 return -EFAULT;
4811
4812 if (copy_to_user(ep_user + sizeof(edata),
4813 &ethtool_cassini_statnames,
4814 (edata.len * ETH_GSTRING_LEN)))
4815 return -EFAULT;
4816 return 0;
4817 default:
4818 return -EINVAL;
4819 }
4820 }
4821 case ETHTOOL_GSTATS: {
4822 int i = 0;
4823 u64 *tmp;
4824 struct ethtool_stats edata;
4825 struct net_device_stats *stats;
4826 int len;
4827
4828 if (copy_from_user(&edata, ep_user, sizeof(edata)))
4829 return -EFAULT;
4830
4831 len = edata.n_stats;
4832 stats = cas_get_stats(cp->dev);
4833 edata.cmd = ETHTOOL_GSTATS;
4834 edata.n_stats = (len < CAS_NUM_STAT_KEYS) ?
4835 len : CAS_NUM_STAT_KEYS;
4836 if (copy_to_user(ep_user, &edata, sizeof (edata)))
4837 return -EFAULT;
4838
4839 tmp = kmalloc(sizeof(u64)*CAS_NUM_STAT_KEYS, GFP_KERNEL);
4840 if (tmp) {
4841 tmp[i++] = stats->collisions;
4842 tmp[i++] = stats->rx_bytes;
4843 tmp[i++] = stats->rx_crc_errors;
4844 tmp[i++] = stats->rx_dropped;
4845 tmp[i++] = stats->rx_errors;
4846 tmp[i++] = stats->rx_fifo_errors;
4847 tmp[i++] = stats->rx_frame_errors;
4848 tmp[i++] = stats->rx_length_errors;
4849 tmp[i++] = stats->rx_over_errors;
4850 tmp[i++] = stats->rx_packets;
4851 tmp[i++] = stats->tx_aborted_errors;
4852 tmp[i++] = stats->tx_bytes;
4853 tmp[i++] = stats->tx_dropped;
4854 tmp[i++] = stats->tx_errors;
4855 tmp[i++] = stats->tx_fifo_errors;
4856 tmp[i++] = stats->tx_packets;
4857 BUG_ON(i != CAS_NUM_STAT_KEYS);
4858
4859 i = copy_to_user(ep_user + sizeof(edata),
4860 tmp, sizeof(u64)*edata.n_stats);
4861 kfree(tmp);
4862 } else {
4863 return -ENOMEM;
4864 }
4865 if (i)
4866 return -EFAULT;
4867 return 0;
4868 }
4869 }
4870 4762
4871 return -EOPNOTSUPP; 4763static void cas_get_ethtool_stats(struct net_device *dev,
4764 struct ethtool_stats *estats, u64 *data)
4765{
4766 struct cas *cp = netdev_priv(dev);
4767 struct net_device_stats *stats = cas_get_stats(cp->dev);
4768 int i = 0;
4769 data[i++] = stats->collisions;
4770 data[i++] = stats->rx_bytes;
4771 data[i++] = stats->rx_crc_errors;
4772 data[i++] = stats->rx_dropped;
4773 data[i++] = stats->rx_errors;
4774 data[i++] = stats->rx_fifo_errors;
4775 data[i++] = stats->rx_frame_errors;
4776 data[i++] = stats->rx_length_errors;
4777 data[i++] = stats->rx_over_errors;
4778 data[i++] = stats->rx_packets;
4779 data[i++] = stats->tx_aborted_errors;
4780 data[i++] = stats->tx_bytes;
4781 data[i++] = stats->tx_dropped;
4782 data[i++] = stats->tx_errors;
4783 data[i++] = stats->tx_fifo_errors;
4784 data[i++] = stats->tx_packets;
4785 BUG_ON(i != CAS_NUM_STAT_KEYS);
4872} 4786}
4873 4787
4788static struct ethtool_ops cas_ethtool_ops = {
4789 .get_drvinfo = cas_get_drvinfo,
4790 .get_settings = cas_get_settings,
4791 .set_settings = cas_set_settings,
4792 .nway_reset = cas_nway_reset,
4793 .get_link = cas_get_link,
4794 .get_msglevel = cas_get_msglevel,
4795 .set_msglevel = cas_set_msglevel,
4796 .get_regs_len = cas_get_regs_len,
4797 .get_regs = cas_get_regs,
4798 .get_stats_count = cas_get_stats_count,
4799 .get_strings = cas_get_strings,
4800 .get_ethtool_stats = cas_get_ethtool_stats,
4801};
4802
4874static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 4803static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4875{ 4804{
4876 struct cas *cp = netdev_priv(dev); 4805 struct cas *cp = netdev_priv(dev);
@@ -4883,10 +4812,6 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4883 */ 4812 */
4884 down(&cp->pm_sem); 4813 down(&cp->pm_sem);
4885 switch (cmd) { 4814 switch (cmd) {
4886 case SIOCETHTOOL:
4887 rc = cas_ethtool_ioctl(dev, ifr->ifr_data);
4888 break;
4889
4890 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ 4815 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
4891 data->phy_id = cp->phy_addr; 4816 data->phy_id = cp->phy_addr;
4892 /* Fallthrough... */ 4817 /* Fallthrough... */
@@ -5112,6 +5037,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5112 dev->get_stats = cas_get_stats; 5037 dev->get_stats = cas_get_stats;
5113 dev->set_multicast_list = cas_set_multicast; 5038 dev->set_multicast_list = cas_set_multicast;
5114 dev->do_ioctl = cas_ioctl; 5039 dev->do_ioctl = cas_ioctl;
5040 dev->ethtool_ops = &cas_ethtool_ops;
5115 dev->tx_timeout = cas_tx_timeout; 5041 dev->tx_timeout = cas_tx_timeout;
5116 dev->watchdog_timeo = CAS_TX_TIMEOUT; 5042 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5117 dev->change_mtu = cas_change_mtu; 5043 dev->change_mtu = cas_change_mtu;
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 0de3bb906174..14e9b6315f20 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1875,6 +1875,9 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
1875 rc = -ENODEV; 1875 rc = -ENODEV;
1876 goto bail; 1876 goto bail;
1877 } 1877 }
1878
1879 /* Disable any PHY features not supported by the platform */
1880 ep->phy_mii.def->features &= ~emacdata->phy_feat_exc;
1878 1881
1879 /* Setup initial PHY config & startup aneg */ 1882 /* Setup initial PHY config & startup aneg */
1880 if (ep->phy_mii.def->ops->init) 1883 if (ep->phy_mii.def->ops->init)
@@ -1882,6 +1885,34 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
1882 netif_carrier_off(ndev); 1885 netif_carrier_off(ndev);
1883 if (ep->phy_mii.def->features & SUPPORTED_Autoneg) 1886 if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
1884 ep->want_autoneg = 1; 1887 ep->want_autoneg = 1;
1888 else {
1889 ep->want_autoneg = 0;
1890
1891 /* Select highest supported speed/duplex */
1892 if (ep->phy_mii.def->features & SUPPORTED_1000baseT_Full) {
1893 ep->phy_mii.speed = SPEED_1000;
1894 ep->phy_mii.duplex = DUPLEX_FULL;
1895 } else if (ep->phy_mii.def->features &
1896 SUPPORTED_1000baseT_Half) {
1897 ep->phy_mii.speed = SPEED_1000;
1898 ep->phy_mii.duplex = DUPLEX_HALF;
1899 } else if (ep->phy_mii.def->features &
1900 SUPPORTED_100baseT_Full) {
1901 ep->phy_mii.speed = SPEED_100;
1902 ep->phy_mii.duplex = DUPLEX_FULL;
1903 } else if (ep->phy_mii.def->features &
1904 SUPPORTED_100baseT_Half) {
1905 ep->phy_mii.speed = SPEED_100;
1906 ep->phy_mii.duplex = DUPLEX_HALF;
1907 } else if (ep->phy_mii.def->features &
1908 SUPPORTED_10baseT_Full) {
1909 ep->phy_mii.speed = SPEED_10;
1910 ep->phy_mii.duplex = DUPLEX_FULL;
1911 } else {
1912 ep->phy_mii.speed = SPEED_10;
1913 ep->phy_mii.duplex = DUPLEX_HALF;
1914 }
1915 }
1885 emac_start_link(ep, NULL); 1916 emac_start_link(ep, NULL);
1886 1917
1887 /* read the MAC Address */ 1918 /* read the MAC Address */
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index e64df4d0800b..83334db2921c 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -584,7 +584,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
584 return 0; 584 return 0;
585} 585}
586 586
587static inline int rx_refill(struct net_device *ndev, int gfp) 587static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp)
588{ 588{
589 struct ns83820 *dev = PRIV(ndev); 589 struct ns83820 *dev = PRIV(ndev);
590 unsigned i; 590 unsigned i;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index d652e1eddb45..c7cca842e5ee 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1832,7 +1832,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
1832{ 1832{
1833 struct dev_mc_list *mc_addr; 1833 struct dev_mc_list *mc_addr;
1834 1834
1835 for (mc_addr = addrs; mc_addr && --count > 0; mc_addr = mc_addr->next) { 1835 for (mc_addr = addrs; mc_addr && count-- > 0; mc_addr = mc_addr->next) {
1836 u_int position = ether_crc(6, mc_addr->dmi_addr); 1836 u_int position = ether_crc(6, mc_addr->dmi_addr);
1837#ifndef final_version /* Verify multicast address. */ 1837#ifndef final_version /* Verify multicast address. */
1838 if ((mc_addr->dmi_addr[0] & 1) == 0) 1838 if ((mc_addr->dmi_addr[0] & 1) == 0)
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index fd398da4993b..c2e6484ef138 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2837,21 +2837,29 @@ static void skge_netpoll(struct net_device *dev)
2837static int skge_set_mac_address(struct net_device *dev, void *p) 2837static int skge_set_mac_address(struct net_device *dev, void *p)
2838{ 2838{
2839 struct skge_port *skge = netdev_priv(dev); 2839 struct skge_port *skge = netdev_priv(dev);
2840 struct sockaddr *addr = p; 2840 struct skge_hw *hw = skge->hw;
2841 int err = 0; 2841 unsigned port = skge->port;
2842 const struct sockaddr *addr = p;
2842 2843
2843 if (!is_valid_ether_addr(addr->sa_data)) 2844 if (!is_valid_ether_addr(addr->sa_data))
2844 return -EADDRNOTAVAIL; 2845 return -EADDRNOTAVAIL;
2845 2846
2846 skge_down(dev); 2847 spin_lock_bh(&hw->phy_lock);
2847 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 2848 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2848 memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8, 2849 memcpy_toio(hw->regs + B2_MAC_1 + port*8,
2849 dev->dev_addr, ETH_ALEN); 2850 dev->dev_addr, ETH_ALEN);
2850 memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8, 2851 memcpy_toio(hw->regs + B2_MAC_2 + port*8,
2851 dev->dev_addr, ETH_ALEN); 2852 dev->dev_addr, ETH_ALEN);
2852 if (dev->flags & IFF_UP) 2853
2853 err = skge_up(dev); 2854 if (hw->chip_id == CHIP_ID_GENESIS)
2854 return err; 2855 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
2856 else {
2857 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
2858 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
2859 }
2860 spin_unlock_bh(&hw->phy_lock);
2861
2862 return 0;
2855} 2863}
2856 2864
2857static const struct { 2865static const struct {
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 88b89dc95c77..efdb179ecc8c 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -133,14 +133,18 @@
133 - finally added firmware (GPL'ed by Adaptec) 133 - finally added firmware (GPL'ed by Adaptec)
134 - removed compatibility code for 2.2.x 134 - removed compatibility code for 2.2.x
135 135
136 LK1.4.2.1 (Ion Badulescu)
137 - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM
138 - added 32-bit padding to outgoing skb's, removed previous workaround
139
136TODO: - fix forced speed/duplexing code (broken a long time ago, when 140TODO: - fix forced speed/duplexing code (broken a long time ago, when
137 somebody converted the driver to use the generic MII code) 141 somebody converted the driver to use the generic MII code)
138 - fix VLAN support 142 - fix VLAN support
139*/ 143*/
140 144
141#define DRV_NAME "starfire" 145#define DRV_NAME "starfire"
142#define DRV_VERSION "1.03+LK1.4.2" 146#define DRV_VERSION "1.03+LK1.4.2.1"
143#define DRV_RELDATE "January 19, 2005" 147#define DRV_RELDATE "October 3, 2005"
144 148
145#include <linux/config.h> 149#include <linux/config.h>
146#include <linux/version.h> 150#include <linux/version.h>
@@ -165,6 +169,14 @@ TODO: - fix forced speed/duplexing code (broken a long time ago, when
165 * of length 1. If and when this is fixed, the #define below can be removed. 169 * of length 1. If and when this is fixed, the #define below can be removed.
166 */ 170 */
167#define HAS_BROKEN_FIRMWARE 171#define HAS_BROKEN_FIRMWARE
172
173/*
174 * If using the broken firmware, data must be padded to the next 32-bit boundary.
175 */
176#ifdef HAS_BROKEN_FIRMWARE
177#define PADDING_MASK 3
178#endif
179
168/* 180/*
169 * Define this if using the driver with the zero-copy patch 181 * Define this if using the driver with the zero-copy patch
170 */ 182 */
@@ -257,9 +269,10 @@ static int full_duplex[MAX_UNITS] = {0, };
257 * This SUCKS. 269 * This SUCKS.
258 * We need a much better method to determine if dma_addr_t is 64-bit. 270 * We need a much better method to determine if dma_addr_t is 64-bit.
259 */ 271 */
260#if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) 272#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
261/* 64-bit dma_addr_t */ 273/* 64-bit dma_addr_t */
262#define ADDR_64BITS /* This chip uses 64 bit addresses. */ 274#define ADDR_64BITS /* This chip uses 64 bit addresses. */
275#define netdrv_addr_t u64
263#define cpu_to_dma(x) cpu_to_le64(x) 276#define cpu_to_dma(x) cpu_to_le64(x)
264#define dma_to_cpu(x) le64_to_cpu(x) 277#define dma_to_cpu(x) le64_to_cpu(x)
265#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit 278#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
@@ -268,6 +281,7 @@ static int full_duplex[MAX_UNITS] = {0, };
268#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit 281#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
269#define RX_DESC_ADDR_SIZE RxDescAddr64bit 282#define RX_DESC_ADDR_SIZE RxDescAddr64bit
270#else /* 32-bit dma_addr_t */ 283#else /* 32-bit dma_addr_t */
284#define netdrv_addr_t u32
271#define cpu_to_dma(x) cpu_to_le32(x) 285#define cpu_to_dma(x) cpu_to_le32(x)
272#define dma_to_cpu(x) le32_to_cpu(x) 286#define dma_to_cpu(x) le32_to_cpu(x)
273#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit 287#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
@@ -1333,21 +1347,10 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1333 } 1347 }
1334 1348
1335#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) 1349#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1336 { 1350 if (skb->ip_summed == CHECKSUM_HW) {
1337 int has_bad_length = 0; 1351 skb = skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK);
1338 1352 if (skb == NULL)
1339 if (skb_first_frag_len(skb) == 1) 1353 return NETDEV_TX_OK;
1340 has_bad_length = 1;
1341 else {
1342 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1343 if (skb_shinfo(skb)->frags[i].size == 1) {
1344 has_bad_length = 1;
1345 break;
1346 }
1347 }
1348
1349 if (has_bad_length)
1350 skb_checksum_help(skb, 0);
1351 } 1354 }
1352#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ 1355#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1353 1356
@@ -2127,13 +2130,12 @@ static int __init starfire_init (void)
2127#endif 2130#endif
2128#endif 2131#endif
2129 2132
2130#ifndef ADDR_64BITS
2131 /* we can do this test only at run-time... sigh */ 2133 /* we can do this test only at run-time... sigh */
2132 if (sizeof(dma_addr_t) == sizeof(u64)) { 2134 if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
2133 printk("This driver has not been ported to this 64-bit architecture yet\n"); 2135 printk("This driver has dma_addr_t issues, please send email to maintainer\n");
2134 return -ENODEV; 2136 return -ENODEV;
2135 } 2137 }
2136#endif /* not ADDR_64BITS */ 2138
2137 return pci_module_init (&starfire_driver); 2139 return pci_module_init (&starfire_driver);
2138} 2140}
2139 2141
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index ff8ae5f79970..16edbb1a4a7a 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -1035,7 +1035,8 @@ struct gem {
1035 1035
1036#define ALIGNED_RX_SKB_ADDR(addr) \ 1036#define ALIGNED_RX_SKB_ADDR(addr) \
1037 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) 1037 ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
1038static __inline__ struct sk_buff *gem_alloc_skb(int size, int gfp_flags) 1038static __inline__ struct sk_buff *gem_alloc_skb(int size,
1039 unsigned int __nocast gfp_flags)
1039{ 1040{
1040 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); 1041 struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
1041 1042
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 25f85fb9df46..1802c3b48799 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
67 67
68#define DRV_MODULE_NAME "tg3" 68#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": " 69#define PFX DRV_MODULE_NAME ": "
70#define DRV_MODULE_VERSION "3.41" 70#define DRV_MODULE_VERSION "3.42"
71#define DRV_MODULE_RELDATE "September 27, 2005" 71#define DRV_MODULE_RELDATE "Oct 3, 2005"
72 72
73#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -9284,8 +9284,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9284 static struct pci_device_id write_reorder_chipsets[] = { 9284 static struct pci_device_id write_reorder_chipsets[] = {
9285 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9285 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
9286 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 9286 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
9287 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 9287 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
9288 PCI_DEVICE_ID_AMD_K8_NB) }, 9288 PCI_DEVICE_ID_VIA_8385_0) },
9289 { }, 9289 { },
9290 }; 9290 };
9291 u32 misc_ctrl_reg; 9291 u32 misc_ctrl_reg;
@@ -9300,15 +9300,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9300 tp->tg3_flags2 |= TG3_FLG2_SUN_570X; 9300 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
9301#endif 9301#endif
9302 9302
9303 /* If we have an AMD 762 or K8 chipset, write
9304 * reordering to the mailbox registers done by the host
9305 * controller can cause major troubles. We read back from
9306 * every mailbox register write to force the writes to be
9307 * posted to the chip in order.
9308 */
9309 if (pci_dev_present(write_reorder_chipsets))
9310 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9311
9312 /* Force memory write invalidate off. If we leave it on, 9303 /* Force memory write invalidate off. If we leave it on,
9313 * then on 5700_BX chips we have to enable a workaround. 9304 * then on 5700_BX chips we have to enable a workaround.
9314 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary 9305 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
@@ -9439,6 +9430,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9439 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0) 9430 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
9440 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 9431 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
9441 9432
9433 /* If we have an AMD 762 or VIA K8T800 chipset, write
9434 * reordering to the mailbox registers done by the host
9435 * controller can cause major troubles. We read back from
9436 * every mailbox register write to force the writes to be
9437 * posted to the chip in order.
9438 */
9439 if (pci_dev_present(write_reorder_chipsets) &&
9440 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
9441 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
9442
9442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 9443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
9443 tp->pci_lat_timer < 64) { 9444 tp->pci_lat_timer < 64) {
9444 tp->pci_lat_timer = 64; 9445 tp->pci_lat_timer = 64;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index e7b001017b9a..32057e65808b 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -531,7 +531,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
531 if (!time_after(jiffies, timeout)) continue; 531 if (!time_after(jiffies, timeout)) continue;
532 DPRINTK( "Hardware timeout during initialization.\n"); 532 DPRINTK( "Hardware timeout during initialization.\n");
533 iounmap(t_mmio); 533 iounmap(t_mmio);
534 kfree(ti);
535 return -ENODEV; 534 return -ENODEV;
536 } 535 }
537 ti->sram_phys = 536 ti->sram_phys =
@@ -645,7 +644,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
645 DPRINTK("Unknown shared ram paging info %01X\n", 644 DPRINTK("Unknown shared ram paging info %01X\n",
646 ti->shared_ram_paging); 645 ti->shared_ram_paging);
647 iounmap(t_mmio); 646 iounmap(t_mmio);
648 kfree(ti);
649 return -ENODEV; 647 return -ENODEV;
650 break; 648 break;
651 } /*end switch shared_ram_paging */ 649 } /*end switch shared_ram_paging */
@@ -675,7 +673,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
675 "driver limit (%05x), adapter not started.\n", 673 "driver limit (%05x), adapter not started.\n",
676 chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE); 674 chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
677 iounmap(t_mmio); 675 iounmap(t_mmio);
678 kfree(ti);
679 return -ENODEV; 676 return -ENODEV;
680 } else { /* seems cool, record what we have figured out */ 677 } else { /* seems cool, record what we have figured out */
681 ti->sram_base = new_base >> 12; 678 ti->sram_base = new_base >> 12;
@@ -690,7 +687,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
690 DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n", 687 DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
691 irq); 688 irq);
692 iounmap(t_mmio); 689 iounmap(t_mmio);
693 kfree(ti);
694 return -ENODEV; 690 return -ENODEV;
695 } 691 }
696 /*?? Now, allocate some of the PIO PORTs for this driver.. */ 692 /*?? Now, allocate some of the PIO PORTs for this driver.. */
@@ -699,7 +695,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
699 DPRINTK("Could not grab PIO range. Halting driver.\n"); 695 DPRINTK("Could not grab PIO range. Halting driver.\n");
700 free_irq(dev->irq, dev); 696 free_irq(dev->irq, dev);
701 iounmap(t_mmio); 697 iounmap(t_mmio);
702 kfree(ti);
703 return -EBUSY; 698 return -EBUSY;
704 } 699 }
705 700
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 5db694c4eb02..683f14b01c06 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -172,7 +172,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
172 int i; 172 int i;
173 for (i = 0; i < tp->mtable->leafcount; i++) 173 for (i = 0; i < tp->mtable->leafcount; i++)
174 if (tp->mtable->mleaf[i].media == dev->if_port) { 174 if (tp->mtable->mleaf[i].media == dev->if_port) {
175 int startup = ! ((tp->chip_id == DC21143 && tp->revision == 65)); 175 int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65)));
176 tp->cur_index = i; 176 tp->cur_index = i;
177 tulip_select_media(dev, startup); 177 tulip_select_media(dev, startup);
178 setup_done = 1; 178 setup_done = 1;
diff --git a/drivers/net/wan/sdlamain.c b/drivers/net/wan/sdlamain.c
index 74e151acef3e..7a8b22a7ea31 100644
--- a/drivers/net/wan/sdlamain.c
+++ b/drivers/net/wan/sdlamain.c
@@ -57,6 +57,7 @@
57#include <linux/ioport.h> /* request_region(), release_region() */ 57#include <linux/ioport.h> /* request_region(), release_region() */
58#include <linux/wanrouter.h> /* WAN router definitions */ 58#include <linux/wanrouter.h> /* WAN router definitions */
59#include <linux/wanpipe.h> /* WANPIPE common user API definitions */ 59#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
60#include <linux/rcupdate.h>
60 61
61#include <linux/in.h> 62#include <linux/in.h>
62#include <asm/io.h> /* phys_to_virt() */ 63#include <asm/io.h> /* phys_to_virt() */
@@ -1268,37 +1269,41 @@ unsigned long get_ip_address(struct net_device *dev, int option)
1268 1269
1269 struct in_ifaddr *ifaddr; 1270 struct in_ifaddr *ifaddr;
1270 struct in_device *in_dev; 1271 struct in_device *in_dev;
1272 unsigned long addr = 0;
1271 1273
1272 if ((in_dev = __in_dev_get(dev)) == NULL){ 1274 rcu_read_lock();
1273 return 0; 1275 if ((in_dev = __in_dev_get_rcu(dev)) == NULL){
1276 goto out;
1274 } 1277 }
1275 1278
1276 if ((ifaddr = in_dev->ifa_list)== NULL ){ 1279 if ((ifaddr = in_dev->ifa_list)== NULL ){
1277 return 0; 1280 goto out;
1278 } 1281 }
1279 1282
1280 switch (option){ 1283 switch (option){
1281 1284
1282 case WAN_LOCAL_IP: 1285 case WAN_LOCAL_IP:
1283 return ifaddr->ifa_local; 1286 addr = ifaddr->ifa_local;
1284 break; 1287 break;
1285 1288
1286 case WAN_POINTOPOINT_IP: 1289 case WAN_POINTOPOINT_IP:
1287 return ifaddr->ifa_address; 1290 addr = ifaddr->ifa_address;
1288 break; 1291 break;
1289 1292
1290 case WAN_NETMASK_IP: 1293 case WAN_NETMASK_IP:
1291 return ifaddr->ifa_mask; 1294 addr = ifaddr->ifa_mask;
1292 break; 1295 break;
1293 1296
1294 case WAN_BROADCAST_IP: 1297 case WAN_BROADCAST_IP:
1295 return ifaddr->ifa_broadcast; 1298 addr = ifaddr->ifa_broadcast;
1296 break; 1299 break;
1297 default: 1300 default:
1298 return 0; 1301 break;
1299 } 1302 }
1300 1303
1301 return 0; 1304out:
1305 rcu_read_unlock();
1306 return addr;
1302} 1307}
1303 1308
1304void add_gateway(sdla_t *card, struct net_device *dev) 1309void add_gateway(sdla_t *card, struct net_device *dev)
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index b56a7b516d24..a6d3b55013a5 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -769,7 +769,7 @@ static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
769 u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */ 769 u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */
770#ifdef CONFIG_INET 770#ifdef CONFIG_INET
771 rcu_read_lock(); 771 rcu_read_lock();
772 if ((in_dev = __in_dev_get(dev)) != NULL) 772 if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
773 { 773 {
774 for (ifa=in_dev->ifa_list; ifa != NULL; 774 for (ifa=in_dev->ifa_list; ifa != NULL;
775 ifa=ifa->ifa_next) { 775 ifa=ifa->ifa_next) {
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 6deb7cc810cc..cf3daaa1b369 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -503,9 +503,14 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
503 return 0; 503 return 0;
504 } 504 }
505 505
506 /* Length of the packet body */ 506 /* Check packet length, pad short packets, round up odd length */
507 /* FIXME: what if the skb is smaller than this? */ 507 len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
508 len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN); 508 if (skb->len < len) {
509 skb = skb_padto(skb, len);
510 if (skb == NULL)
511 goto fail;
512 }
513 len -= ETH_HLEN;
509 514
510 eh = (struct ethhdr *)skb->data; 515 eh = (struct ethhdr *)skb->data;
511 516
@@ -557,8 +562,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
557 p = skb->data; 562 p = skb->data;
558 } 563 }
559 564
560 /* Round up for odd length packets */ 565 err = hermes_bap_pwrite(hw, USER_BAP, p, data_len,
561 err = hermes_bap_pwrite(hw, USER_BAP, p, ALIGN(data_len, 2),
562 txfid, data_off); 566 txfid, data_off);
563 if (err) { 567 if (err) {
564 printk(KERN_ERR "%s: Error %d writing packet to BAP\n", 568 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 4b0acae22b0d..7bc7fc823128 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1352,7 +1352,7 @@ static unsigned char *strip_make_packet(unsigned char *buffer,
1352 struct in_device *in_dev; 1352 struct in_device *in_dev;
1353 1353
1354 rcu_read_lock(); 1354 rcu_read_lock();
1355 in_dev = __in_dev_get(strip_info->dev); 1355 in_dev = __in_dev_get_rcu(strip_info->dev);
1356 if (in_dev == NULL) { 1356 if (in_dev == NULL) {
1357 rcu_read_unlock(); 1357 rcu_read_unlock();
1358 return NULL; 1358 return NULL;
@@ -1508,7 +1508,7 @@ static void strip_send(struct strip *strip_info, struct sk_buff *skb)
1508 1508
1509 brd = addr = 0; 1509 brd = addr = 0;
1510 rcu_read_lock(); 1510 rcu_read_lock();
1511 in_dev = __in_dev_get(strip_info->dev); 1511 in_dev = __in_dev_get_rcu(strip_info->dev);
1512 if (in_dev) { 1512 if (in_dev) {
1513 if (in_dev->ifa_list) { 1513 if (in_dev->ifa_list) {
1514 brd = in_dev->ifa_list->ifa_broadcast; 1514 brd = in_dev->ifa_list->ifa_broadcast;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index e90fb72a6962..286902298e33 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -37,6 +37,7 @@
37#include <linux/proc_fs.h> 37#include <linux/proc_fs.h>
38#include <linux/ctype.h> 38#include <linux/ctype.h>
39#include <linux/blkdev.h> 39#include <linux/blkdev.h>
40#include <linux/rcupdate.h>
40#include <asm/io.h> 41#include <asm/io.h>
41#include <asm/processor.h> 42#include <asm/processor.h>
42#include <asm/hardware.h> 43#include <asm/hardware.h>
@@ -358,9 +359,10 @@ static __inline__ int led_get_net_activity(void)
358 /* we are running as tasklet, so locking dev_base 359 /* we are running as tasklet, so locking dev_base
359 * for reading should be OK */ 360 * for reading should be OK */
360 read_lock(&dev_base_lock); 361 read_lock(&dev_base_lock);
362 rcu_read_lock();
361 for (dev = dev_base; dev; dev = dev->next) { 363 for (dev = dev_base; dev; dev = dev->next) {
362 struct net_device_stats *stats; 364 struct net_device_stats *stats;
363 struct in_device *in_dev = __in_dev_get(dev); 365 struct in_device *in_dev = __in_dev_get_rcu(dev);
364 if (!in_dev || !in_dev->ifa_list) 366 if (!in_dev || !in_dev->ifa_list)
365 continue; 367 continue;
366 if (LOOPBACK(in_dev->ifa_list->ifa_local)) 368 if (LOOPBACK(in_dev->ifa_list->ifa_local))
@@ -371,6 +373,7 @@ static __inline__ int led_get_net_activity(void)
371 rx_total += stats->rx_packets; 373 rx_total += stats->rx_packets;
372 tx_total += stats->tx_packets; 374 tx_total += stats->tx_packets;
373 } 375 }
376 rcu_read_unlock();
374 read_unlock(&dev_base_lock); 377 read_unlock(&dev_base_lock);
375 378
376 retval = 0; 379 retval = 0;
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h
index 2ad4797ce024..9963479ba89f 100644
--- a/drivers/s390/net/qeth.h
+++ b/drivers/s390/net/qeth.h
@@ -686,6 +686,7 @@ struct qeth_seqno {
686 __u32 pdu_hdr; 686 __u32 pdu_hdr;
687 __u32 pdu_hdr_ack; 687 __u32 pdu_hdr_ack;
688 __u16 ipa; 688 __u16 ipa;
689 __u32 pkt_seqno;
689}; 690};
690 691
691struct qeth_reply { 692struct qeth_reply {
@@ -848,6 +849,7 @@ qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
848 "on interface %s", QETH_CARD_IFNAME(card)); 849 "on interface %s", QETH_CARD_IFNAME(card));
849 return -ENOMEM; 850 return -ENOMEM;
850 } 851 }
852 kfree_skb(*skb);
851 *skb = new_skb; 853 *skb = new_skb;
852 } 854 }
853 return 0; 855 return 0;
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 86582cf1e19e..bd28e2438d7f 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -511,7 +511,7 @@ static int
511__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) 511__qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
512{ 512{
513 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data; 513 struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
514 int rc = 0; 514 int rc = 0, rc2 = 0, rc3 = 0;
515 enum qeth_card_states recover_flag; 515 enum qeth_card_states recover_flag;
516 516
517 QETH_DBF_TEXT(setup, 3, "setoffl"); 517 QETH_DBF_TEXT(setup, 3, "setoffl");
@@ -523,11 +523,13 @@ __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
523 CARD_BUS_ID(card)); 523 CARD_BUS_ID(card));
524 return -ERESTARTSYS; 524 return -ERESTARTSYS;
525 } 525 }
526 if ((rc = ccw_device_set_offline(CARD_DDEV(card))) || 526 rc = ccw_device_set_offline(CARD_DDEV(card));
527 (rc = ccw_device_set_offline(CARD_WDEV(card))) || 527 rc2 = ccw_device_set_offline(CARD_WDEV(card));
528 (rc = ccw_device_set_offline(CARD_RDEV(card)))) { 528 rc3 = ccw_device_set_offline(CARD_RDEV(card));
529 if (!rc)
530 rc = (rc2) ? rc2 : rc3;
531 if (rc)
529 QETH_DBF_TEXT_(setup, 2, "1err%d", rc); 532 QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
530 }
531 if (recover_flag == CARD_STATE_UP) 533 if (recover_flag == CARD_STATE_UP)
532 card->state = CARD_STATE_RECOVER; 534 card->state = CARD_STATE_RECOVER;
533 qeth_notify_processes(); 535 qeth_notify_processes();
@@ -1046,6 +1048,7 @@ qeth_setup_card(struct qeth_card *card)
1046 spin_lock_init(&card->vlanlock); 1048 spin_lock_init(&card->vlanlock);
1047 card->vlangrp = NULL; 1049 card->vlangrp = NULL;
1048#endif 1050#endif
1051 spin_lock_init(&card->lock);
1049 spin_lock_init(&card->ip_lock); 1052 spin_lock_init(&card->ip_lock);
1050 spin_lock_init(&card->thread_mask_lock); 1053 spin_lock_init(&card->thread_mask_lock);
1051 card->thread_start_mask = 0; 1054 card->thread_start_mask = 0;
@@ -1626,16 +1629,6 @@ qeth_cmd_timeout(unsigned long data)
1626 spin_unlock_irqrestore(&reply->card->lock, flags); 1629 spin_unlock_irqrestore(&reply->card->lock, flags);
1627} 1630}
1628 1631
1629static void
1630qeth_reset_ip_addresses(struct qeth_card *card)
1631{
1632 QETH_DBF_TEXT(trace, 2, "rstipadd");
1633
1634 qeth_clear_ip_list(card, 0, 1);
1635 /* this function will also schedule the SET_IP_THREAD */
1636 qeth_set_multicast_list(card->dev);
1637}
1638
1639static struct qeth_ipa_cmd * 1632static struct qeth_ipa_cmd *
1640qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) 1633qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1641{ 1634{
@@ -1664,9 +1657,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
1664 "IP address reset.\n", 1657 "IP address reset.\n",
1665 QETH_CARD_IFNAME(card), 1658 QETH_CARD_IFNAME(card),
1666 card->info.chpid); 1659 card->info.chpid);
1667 card->lan_online = 1;
1668 netif_carrier_on(card->dev); 1660 netif_carrier_on(card->dev);
1669 qeth_reset_ip_addresses(card); 1661 qeth_schedule_recovery(card);
1670 return NULL; 1662 return NULL;
1671 case IPA_CMD_REGISTER_LOCAL_ADDR: 1663 case IPA_CMD_REGISTER_LOCAL_ADDR:
1672 QETH_DBF_TEXT(trace,3, "irla"); 1664 QETH_DBF_TEXT(trace,3, "irla");
@@ -2387,6 +2379,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
2387 skb_pull(skb, VLAN_HLEN); 2379 skb_pull(skb, VLAN_HLEN);
2388 } 2380 }
2389#endif 2381#endif
2382 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
2390 return vlan_id; 2383 return vlan_id;
2391} 2384}
2392 2385
@@ -3014,7 +3007,7 @@ qeth_alloc_buffer_pool(struct qeth_card *card)
3014 return -ENOMEM; 3007 return -ENOMEM;
3015 } 3008 }
3016 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){ 3009 for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
3017 ptr = (void *) __get_free_page(GFP_KERNEL); 3010 ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
3018 if (!ptr) { 3011 if (!ptr) {
3019 while (j > 0) 3012 while (j > 0)
3020 free_page((unsigned long) 3013 free_page((unsigned long)
@@ -3058,7 +3051,8 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
3058 if (card->qdio.state == QETH_QDIO_ALLOCATED) 3051 if (card->qdio.state == QETH_QDIO_ALLOCATED)
3059 return 0; 3052 return 0;
3060 3053
3061 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); 3054 card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q),
3055 GFP_KERNEL|GFP_DMA);
3062 if (!card->qdio.in_q) 3056 if (!card->qdio.in_q)
3063 return - ENOMEM; 3057 return - ENOMEM;
3064 QETH_DBF_TEXT(setup, 2, "inq"); 3058 QETH_DBF_TEXT(setup, 2, "inq");
@@ -3083,7 +3077,7 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
3083 } 3077 }
3084 for (i = 0; i < card->qdio.no_out_queues; ++i){ 3078 for (i = 0; i < card->qdio.no_out_queues; ++i){
3085 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), 3079 card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
3086 GFP_KERNEL); 3080 GFP_KERNEL|GFP_DMA);
3087 if (!card->qdio.out_qs[i]){ 3081 if (!card->qdio.out_qs[i]){
3088 while (i > 0) 3082 while (i > 0)
3089 kfree(card->qdio.out_qs[--i]); 3083 kfree(card->qdio.out_qs[--i]);
@@ -5200,7 +5194,7 @@ qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid)
5200 if (!card->vlangrp) 5194 if (!card->vlangrp)
5201 return; 5195 return;
5202 rcu_read_lock(); 5196 rcu_read_lock();
5203 in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]); 5197 in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]);
5204 if (!in_dev) 5198 if (!in_dev)
5205 goto out; 5199 goto out;
5206 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 5200 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
@@ -6470,6 +6464,9 @@ qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
6470 if (cmd->hdr.prot_version == QETH_PROT_IPV4) { 6464 if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
6471 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; 6465 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
6472 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; 6466 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
6467 /* Disable IPV6 support hard coded for Hipersockets */
6468 if(card->info.type == QETH_CARD_TYPE_IQD)
6469 card->options.ipa4.supported_funcs &= ~IPA_IPV6;
6473 } else { 6470 } else {
6474#ifdef CONFIG_QETH_IPV6 6471#ifdef CONFIG_QETH_IPV6
6475 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; 6472 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
@@ -7725,7 +7722,7 @@ qeth_arp_constructor(struct neighbour *neigh)
7725 goto out; 7722 goto out;
7726 7723
7727 rcu_read_lock(); 7724 rcu_read_lock();
7728 in_dev = rcu_dereference(__in_dev_get(dev)); 7725 in_dev = __in_dev_get_rcu(dev);
7729 if (in_dev == NULL) { 7726 if (in_dev == NULL) {
7730 rcu_read_unlock(); 7727 rcu_read_unlock();
7731 return -EINVAL; 7728 return -EINVAL;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a6ac61611f35..a748fbfb6692 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -60,6 +60,7 @@
60 Remove un-needed eh_abort handler. 60 Remove un-needed eh_abort handler.
61 Add support for embedded firmware error strings. 61 Add support for embedded firmware error strings.
62 2.26.02.003 - Correctly handle single sgl's with use_sg=1. 62 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
63 2.26.02.004 - Add support for 9550SX controllers.
63*/ 64*/
64 65
65#include <linux/module.h> 66#include <linux/module.h>
@@ -82,7 +83,7 @@
82#include "3w-9xxx.h" 83#include "3w-9xxx.h"
83 84
84/* Globals */ 85/* Globals */
85#define TW_DRIVER_VERSION "2.26.02.003" 86#define TW_DRIVER_VERSION "2.26.02.004"
86static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 87static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
87static unsigned int twa_device_extension_count; 88static unsigned int twa_device_extension_count;
88static int twa_major = -1; 89static int twa_major = -1;
@@ -892,11 +893,6 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
892 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); 893 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
893 } 894 }
894 895
895 if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) {
896 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "SBUF Write Error: clearing");
897 writel(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
898 }
899
900 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { 896 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
901 if (tw_dev->reset_print == 0) { 897 if (tw_dev->reset_print == 0) {
902 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); 898 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
@@ -930,6 +926,36 @@ out:
930 return retval; 926 return retval;
931} /* End twa_empty_response_queue() */ 927} /* End twa_empty_response_queue() */
932 928
929/* This function will clear the pchip/response queue on 9550SX */
930static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
931{
932 u32 status_reg_value, response_que_value;
933 int count = 0, retval = 1;
934
935 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) {
936 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
937
938 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
939 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
940 if ((response_que_value & TW_9550SX_DRAIN_COMPLETED) == TW_9550SX_DRAIN_COMPLETED) {
941 /* P-chip settle time */
942 msleep(500);
943 retval = 0;
944 goto out;
945 }
946 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
947 count++;
948 }
949 if (count == TW_MAX_RESPONSE_DRAIN)
950 goto out;
951
952 retval = 0;
953 } else
954 retval = 0;
955out:
956 return retval;
957} /* End twa_empty_response_queue_large() */
958
933/* This function passes sense keys from firmware to scsi layer */ 959/* This function passes sense keys from firmware to scsi layer */
934static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) 960static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
935{ 961{
@@ -1613,8 +1639,16 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1613 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; 1639 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1614 1640
1615 while (tries < TW_MAX_RESET_TRIES) { 1641 while (tries < TW_MAX_RESET_TRIES) {
1616 if (do_soft_reset) 1642 if (do_soft_reset) {
1617 TW_SOFT_RESET(tw_dev); 1643 TW_SOFT_RESET(tw_dev);
1644 /* Clear pchip/response queue on 9550SX */
1645 if (twa_empty_response_queue_large(tw_dev)) {
1646 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1647 do_soft_reset = 1;
1648 tries++;
1649 continue;
1650 }
1651 }
1618 1652
1619 /* Make sure controller is in a good state */ 1653 /* Make sure controller is in a good state */
1620 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) { 1654 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
@@ -2034,7 +2068,10 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2034 goto out_free_device_extension; 2068 goto out_free_device_extension;
2035 } 2069 }
2036 2070
2037 mem_addr = pci_resource_start(pdev, 1); 2071 if (pdev->device == PCI_DEVICE_ID_3WARE_9000)
2072 mem_addr = pci_resource_start(pdev, 1);
2073 else
2074 mem_addr = pci_resource_start(pdev, 2);
2038 2075
2039 /* Save base address */ 2076 /* Save base address */
2040 tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE); 2077 tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE);
@@ -2148,6 +2185,8 @@ static void twa_remove(struct pci_dev *pdev)
2148static struct pci_device_id twa_pci_tbl[] __devinitdata = { 2185static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2149 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, 2186 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2150 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 2187 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2188 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2189 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2151 { } 2190 { }
2152}; 2191};
2153MODULE_DEVICE_TABLE(pci, twa_pci_tbl); 2192MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 8c8ecbed3b58..46f22cdc8298 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -267,7 +267,6 @@ static twa_message_type twa_error_table[] = {
267#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000 267#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
268#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000 268#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
269#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000 269#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
270#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008
271 270
272/* Status register bit definitions */ 271/* Status register bit definitions */
273#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000 272#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
@@ -285,9 +284,8 @@ static twa_message_type twa_error_table[] = {
285#define TW_STATUS_MICROCONTROLLER_READY 0x00002000 284#define TW_STATUS_MICROCONTROLLER_READY 0x00002000
286#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000 285#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
287#define TW_STATUS_EXPECTED_BITS 0x00002000 286#define TW_STATUS_EXPECTED_BITS 0x00002000
288#define TW_STATUS_UNEXPECTED_BITS 0x00F00008 287#define TW_STATUS_UNEXPECTED_BITS 0x00F00000
289#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008 288#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
290#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
291 289
292/* RESPONSE QUEUE BIT DEFINITIONS */ 290/* RESPONSE QUEUE BIT DEFINITIONS */
293#define TW_RESPONSE_ID_MASK 0x00000FF0 291#define TW_RESPONSE_ID_MASK 0x00000FF0
@@ -324,9 +322,9 @@ static twa_message_type twa_error_table[] = {
324 322
325/* Compatibility defines */ 323/* Compatibility defines */
326#define TW_9000_ARCH_ID 0x5 324#define TW_9000_ARCH_ID 0x5
327#define TW_CURRENT_DRIVER_SRL 28 325#define TW_CURRENT_DRIVER_SRL 30
328#define TW_CURRENT_DRIVER_BUILD 9 326#define TW_CURRENT_DRIVER_BUILD 80
329#define TW_CURRENT_DRIVER_BRANCH 4 327#define TW_CURRENT_DRIVER_BRANCH 0
330 328
331/* Phase defines */ 329/* Phase defines */
332#define TW_PHASE_INITIAL 0 330#define TW_PHASE_INITIAL 0
@@ -334,6 +332,7 @@ static twa_message_type twa_error_table[] = {
334#define TW_PHASE_SGLIST 2 332#define TW_PHASE_SGLIST 2
335 333
336/* Misc defines */ 334/* Misc defines */
335#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
337#define TW_SECTOR_SIZE 512 336#define TW_SECTOR_SIZE 512
338#define TW_ALIGNMENT_9000 4 /* 4 bytes */ 337#define TW_ALIGNMENT_9000 4 /* 4 bytes */
339#define TW_ALIGNMENT_9000_SGL 0x3 338#define TW_ALIGNMENT_9000_SGL 0x3
@@ -417,6 +416,9 @@ static twa_message_type twa_error_table[] = {
417#ifndef PCI_DEVICE_ID_3WARE_9000 416#ifndef PCI_DEVICE_ID_3WARE_9000
418#define PCI_DEVICE_ID_3WARE_9000 0x1002 417#define PCI_DEVICE_ID_3WARE_9000 0x1002
419#endif 418#endif
419#ifndef PCI_DEVICE_ID_3WARE_9550SX
420#define PCI_DEVICE_ID_3WARE_9550SX 0x1003
421#endif
420 422
421/* Bitmask macros to eliminate bitfields */ 423/* Bitmask macros to eliminate bitfields */
422 424
@@ -443,6 +445,7 @@ static twa_message_type twa_error_table[] = {
443#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4) 445#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4)
444#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8)) 446#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
445#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC) 447#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC)
448#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30)
446#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x))) 449#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
447#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x))) 450#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
448#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x))) 451#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1e4edbdf2730..48529d180ca8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_SCSI_DC395x) += dc395x.o
99obj-$(CONFIG_SCSI_DC390T) += tmscsim.o 99obj-$(CONFIG_SCSI_DC390T) += tmscsim.o
100obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o 100obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
101obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 101obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
102obj-$(CONFIG_MEGARAID_SAS) += megaraid/
102obj-$(CONFIG_SCSI_ACARD) += atp870u.o 103obj-$(CONFIG_SCSI_ACARD) += atp870u.o
103obj-$(CONFIG_SCSI_SUNESP) += esp.o 104obj-$(CONFIG_SCSI_SUNESP) += esp.o
104obj-$(CONFIG_SCSI_GDTH) += gdth.o 105obj-$(CONFIG_SCSI_GDTH) += gdth.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a8e3dfcd0dc7..93416f760e5a 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -313,18 +313,37 @@ int aac_get_containers(struct aac_dev *dev)
313 } 313 }
314 dresp = (struct aac_mount *)fib_data(fibptr); 314 dresp = (struct aac_mount *)fib_data(fibptr);
315 315
316 if ((le32_to_cpu(dresp->status) == ST_OK) &&
317 (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
318 dinfo->command = cpu_to_le32(VM_NameServe64);
319 dinfo->count = cpu_to_le32(index);
320 dinfo->type = cpu_to_le32(FT_FILESYS);
321
322 if (fib_send(ContainerCommand,
323 fibptr,
324 sizeof(struct aac_query_mount),
325 FsaNormal,
326 1, 1,
327 NULL, NULL) < 0)
328 continue;
329 } else
330 dresp->mnt[0].capacityhigh = 0;
331
316 dprintk ((KERN_DEBUG 332 dprintk ((KERN_DEBUG
317 "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n", 333 "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n",
318 (int)index, (int)le32_to_cpu(dresp->status), 334 (int)index, (int)le32_to_cpu(dresp->status),
319 (int)le32_to_cpu(dresp->mnt[0].vol), 335 (int)le32_to_cpu(dresp->mnt[0].vol),
320 (int)le32_to_cpu(dresp->mnt[0].state), 336 (int)le32_to_cpu(dresp->mnt[0].state),
321 (unsigned)le32_to_cpu(dresp->mnt[0].capacity))); 337 ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
338 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
322 if ((le32_to_cpu(dresp->status) == ST_OK) && 339 if ((le32_to_cpu(dresp->status) == ST_OK) &&
323 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 340 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
324 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 341 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
325 fsa_dev_ptr[index].valid = 1; 342 fsa_dev_ptr[index].valid = 1;
326 fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol); 343 fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
327 fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity); 344 fsa_dev_ptr[index].size
345 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
346 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
328 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 347 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
329 fsa_dev_ptr[index].ro = 1; 348 fsa_dev_ptr[index].ro = 1;
330 } 349 }
@@ -460,7 +479,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
460 * is updated in the struct fsa_dev_info structure rather than returned. 479 * is updated in the struct fsa_dev_info structure rather than returned.
461 */ 480 */
462 481
463static int probe_container(struct aac_dev *dev, int cid) 482int probe_container(struct aac_dev *dev, int cid)
464{ 483{
465 struct fsa_dev_info *fsa_dev_ptr; 484 struct fsa_dev_info *fsa_dev_ptr;
466 int status; 485 int status;
@@ -497,11 +516,29 @@ static int probe_container(struct aac_dev *dev, int cid)
497 dresp = (struct aac_mount *) fib_data(fibptr); 516 dresp = (struct aac_mount *) fib_data(fibptr);
498 517
499 if ((le32_to_cpu(dresp->status) == ST_OK) && 518 if ((le32_to_cpu(dresp->status) == ST_OK) &&
519 (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) {
520 dinfo->command = cpu_to_le32(VM_NameServe64);
521 dinfo->count = cpu_to_le32(cid);
522 dinfo->type = cpu_to_le32(FT_FILESYS);
523
524 if (fib_send(ContainerCommand,
525 fibptr,
526 sizeof(struct aac_query_mount),
527 FsaNormal,
528 1, 1,
529 NULL, NULL) < 0)
530 goto error;
531 } else
532 dresp->mnt[0].capacityhigh = 0;
533
534 if ((le32_to_cpu(dresp->status) == ST_OK) &&
500 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 535 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
501 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 536 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
502 fsa_dev_ptr[cid].valid = 1; 537 fsa_dev_ptr[cid].valid = 1;
503 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol); 538 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
504 fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity); 539 fsa_dev_ptr[cid].size
540 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
541 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
505 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) 542 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
506 fsa_dev_ptr[cid].ro = 1; 543 fsa_dev_ptr[cid].ro = 1;
507 } 544 }
@@ -655,7 +692,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
655 fibptr, 692 fibptr,
656 sizeof(*info), 693 sizeof(*info),
657 FsaNormal, 694 FsaNormal,
658 1, 1, 695 -1, 1, /* First `interrupt' command uses special wait */
659 NULL, 696 NULL,
660 NULL); 697 NULL);
661 698
@@ -806,8 +843,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
806 if (!(dev->raw_io_interface)) { 843 if (!(dev->raw_io_interface)) {
807 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - 844 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
808 sizeof(struct aac_fibhdr) - 845 sizeof(struct aac_fibhdr) -
809 sizeof(struct aac_write) + sizeof(struct sgmap)) / 846 sizeof(struct aac_write) + sizeof(struct sgentry)) /
810 sizeof(struct sgmap); 847 sizeof(struct sgentry);
811 if (dev->dac_support) { 848 if (dev->dac_support) {
812 /* 849 /*
813 * 38 scatter gather elements 850 * 38 scatter gather elements
@@ -816,8 +853,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
816 (dev->max_fib_size - 853 (dev->max_fib_size -
817 sizeof(struct aac_fibhdr) - 854 sizeof(struct aac_fibhdr) -
818 sizeof(struct aac_write64) + 855 sizeof(struct aac_write64) +
819 sizeof(struct sgmap64)) / 856 sizeof(struct sgentry64)) /
820 sizeof(struct sgmap64); 857 sizeof(struct sgentry64);
821 } 858 }
822 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 859 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
823 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 860 if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
@@ -854,7 +891,40 @@ static void io_callback(void *context, struct fib * fibptr)
854 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 891 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
855 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); 892 cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
856 893
857 dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies)); 894 if (nblank(dprintk(x))) {
895 u64 lba;
896 switch (scsicmd->cmnd[0]) {
897 case WRITE_6:
898 case READ_6:
899 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
900 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
901 break;
902 case WRITE_16:
903 case READ_16:
904 lba = ((u64)scsicmd->cmnd[2] << 56) |
905 ((u64)scsicmd->cmnd[3] << 48) |
906 ((u64)scsicmd->cmnd[4] << 40) |
907 ((u64)scsicmd->cmnd[5] << 32) |
908 ((u64)scsicmd->cmnd[6] << 24) |
909 (scsicmd->cmnd[7] << 16) |
910 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
911 break;
912 case WRITE_12:
913 case READ_12:
914 lba = ((u64)scsicmd->cmnd[2] << 24) |
915 (scsicmd->cmnd[3] << 16) |
916 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
917 break;
918 default:
919 lba = ((u64)scsicmd->cmnd[2] << 24) |
920 (scsicmd->cmnd[3] << 16) |
921 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
922 break;
923 }
924 printk(KERN_DEBUG
925 "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
926 smp_processor_id(), (unsigned long long)lba, jiffies);
927 }
858 928
859 if (fibptr == NULL) 929 if (fibptr == NULL)
860 BUG(); 930 BUG();
@@ -895,7 +965,7 @@ static void io_callback(void *context, struct fib * fibptr)
895 965
896static int aac_read(struct scsi_cmnd * scsicmd, int cid) 966static int aac_read(struct scsi_cmnd * scsicmd, int cid)
897{ 967{
898 u32 lba; 968 u64 lba;
899 u32 count; 969 u32 count;
900 int status; 970 int status;
901 971
@@ -907,23 +977,69 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
907 /* 977 /*
908 * Get block address and transfer length 978 * Get block address and transfer length
909 */ 979 */
910 if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */ 980 switch (scsicmd->cmnd[0]) {
911 { 981 case READ_6:
912 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid)); 982 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
913 983
914 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 984 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
985 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
915 count = scsicmd->cmnd[4]; 986 count = scsicmd->cmnd[4];
916 987
917 if (count == 0) 988 if (count == 0)
918 count = 256; 989 count = 256;
919 } else { 990 break;
991 case READ_16:
992 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid));
993
994 lba = ((u64)scsicmd->cmnd[2] << 56) |
995 ((u64)scsicmd->cmnd[3] << 48) |
996 ((u64)scsicmd->cmnd[4] << 40) |
997 ((u64)scsicmd->cmnd[5] << 32) |
998 ((u64)scsicmd->cmnd[6] << 24) |
999 (scsicmd->cmnd[7] << 16) |
1000 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1001 count = (scsicmd->cmnd[10] << 24) |
1002 (scsicmd->cmnd[11] << 16) |
1003 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1004 break;
1005 case READ_12:
1006 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid));
1007
1008 lba = ((u64)scsicmd->cmnd[2] << 24) |
1009 (scsicmd->cmnd[3] << 16) |
1010 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1011 count = (scsicmd->cmnd[6] << 24) |
1012 (scsicmd->cmnd[7] << 16) |
1013 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1014 break;
1015 default:
920 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid)); 1016 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
921 1017
922 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1018 lba = ((u64)scsicmd->cmnd[2] << 24) |
1019 (scsicmd->cmnd[3] << 16) |
1020 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
923 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1021 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1022 break;
924 } 1023 }
925 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", 1024 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
926 smp_processor_id(), (unsigned long long)lba, jiffies)); 1025 smp_processor_id(), (unsigned long long)lba, jiffies));
1026 if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) &&
1027 (lba & 0xffffffff00000000LL)) {
1028 dprintk((KERN_DEBUG "aac_read: Illegal lba\n"));
1029 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
1030 SAM_STAT_CHECK_CONDITION;
1031 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1032 HARDWARE_ERROR,
1033 SENCODE_INTERNAL_TARGET_FAILURE,
1034 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1035 0, 0);
1036 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1037 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1038 ? sizeof(scsicmd->sense_buffer)
1039 : sizeof(dev->fsa_dev[cid].sense_data));
1040 scsicmd->scsi_done(scsicmd);
1041 return 0;
1042 }
927 /* 1043 /*
928 * Alocate and initialize a Fib 1044 * Alocate and initialize a Fib
929 */ 1045 */
@@ -936,8 +1052,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
936 if (dev->raw_io_interface) { 1052 if (dev->raw_io_interface) {
937 struct aac_raw_io *readcmd; 1053 struct aac_raw_io *readcmd;
938 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); 1054 readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
939 readcmd->block[0] = cpu_to_le32(lba); 1055 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
940 readcmd->block[1] = 0; 1056 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
941 readcmd->count = cpu_to_le32(count<<9); 1057 readcmd->count = cpu_to_le32(count<<9);
942 readcmd->cid = cpu_to_le16(cid); 1058 readcmd->cid = cpu_to_le16(cid);
943 readcmd->flags = cpu_to_le16(1); 1059 readcmd->flags = cpu_to_le16(1);
@@ -964,7 +1080,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
964 readcmd->command = cpu_to_le32(VM_CtHostRead64); 1080 readcmd->command = cpu_to_le32(VM_CtHostRead64);
965 readcmd->cid = cpu_to_le16(cid); 1081 readcmd->cid = cpu_to_le16(cid);
966 readcmd->sector_count = cpu_to_le16(count); 1082 readcmd->sector_count = cpu_to_le16(count);
967 readcmd->block = cpu_to_le32(lba); 1083 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
968 readcmd->pad = 0; 1084 readcmd->pad = 0;
969 readcmd->flags = 0; 1085 readcmd->flags = 0;
970 1086
@@ -989,7 +1105,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
989 readcmd = (struct aac_read *) fib_data(cmd_fibcontext); 1105 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
990 readcmd->command = cpu_to_le32(VM_CtBlockRead); 1106 readcmd->command = cpu_to_le32(VM_CtBlockRead);
991 readcmd->cid = cpu_to_le32(cid); 1107 readcmd->cid = cpu_to_le32(cid);
992 readcmd->block = cpu_to_le32(lba); 1108 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
993 readcmd->count = cpu_to_le32(count * 512); 1109 readcmd->count = cpu_to_le32(count * 512);
994 1110
995 aac_build_sg(scsicmd, &readcmd->sg); 1111 aac_build_sg(scsicmd, &readcmd->sg);
@@ -1031,7 +1147,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid)
1031 1147
1032static int aac_write(struct scsi_cmnd * scsicmd, int cid) 1148static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1033{ 1149{
1034 u32 lba; 1150 u64 lba;
1035 u32 count; 1151 u32 count;
1036 int status; 1152 int status;
1037 u16 fibsize; 1153 u16 fibsize;
@@ -1048,13 +1164,48 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1048 count = scsicmd->cmnd[4]; 1164 count = scsicmd->cmnd[4];
1049 if (count == 0) 1165 if (count == 0)
1050 count = 256; 1166 count = 256;
1167 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
1168 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid));
1169
1170 lba = ((u64)scsicmd->cmnd[2] << 56) |
1171 ((u64)scsicmd->cmnd[3] << 48) |
1172 ((u64)scsicmd->cmnd[4] << 40) |
1173 ((u64)scsicmd->cmnd[5] << 32) |
1174 ((u64)scsicmd->cmnd[6] << 24) |
1175 (scsicmd->cmnd[7] << 16) |
1176 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1177 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
1178 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1179 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
1180 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid));
1181
1182 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
1183 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1184 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
1185 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1051 } else { 1186 } else {
1052 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid)); 1187 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
1053 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1188 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1054 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1189 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1055 } 1190 }
1056 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", 1191 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
1057 smp_processor_id(), (unsigned long long)lba, jiffies)); 1192 smp_processor_id(), (unsigned long long)lba, jiffies));
1193 if ((!(dev->raw_io_interface) || !(dev->raw_io_64))
1194 && (lba & 0xffffffff00000000LL)) {
1195 dprintk((KERN_DEBUG "aac_write: Illegal lba\n"));
1196 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1197 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1198 HARDWARE_ERROR,
1199 SENCODE_INTERNAL_TARGET_FAILURE,
1200 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1201 0, 0);
1202 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1203 (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1204 ? sizeof(scsicmd->sense_buffer)
1205 : sizeof(dev->fsa_dev[cid].sense_data));
1206 scsicmd->scsi_done(scsicmd);
1207 return 0;
1208 }
1058 /* 1209 /*
1059 * Allocate and initialize a Fib then setup a BlockWrite command 1210 * Allocate and initialize a Fib then setup a BlockWrite command
1060 */ 1211 */
@@ -1068,8 +1219,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1068 if (dev->raw_io_interface) { 1219 if (dev->raw_io_interface) {
1069 struct aac_raw_io *writecmd; 1220 struct aac_raw_io *writecmd;
1070 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); 1221 writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext);
1071 writecmd->block[0] = cpu_to_le32(lba); 1222 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
1072 writecmd->block[1] = 0; 1223 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
1073 writecmd->count = cpu_to_le32(count<<9); 1224 writecmd->count = cpu_to_le32(count<<9);
1074 writecmd->cid = cpu_to_le16(cid); 1225 writecmd->cid = cpu_to_le16(cid);
1075 writecmd->flags = 0; 1226 writecmd->flags = 0;
@@ -1096,7 +1247,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1096 writecmd->command = cpu_to_le32(VM_CtHostWrite64); 1247 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
1097 writecmd->cid = cpu_to_le16(cid); 1248 writecmd->cid = cpu_to_le16(cid);
1098 writecmd->sector_count = cpu_to_le16(count); 1249 writecmd->sector_count = cpu_to_le16(count);
1099 writecmd->block = cpu_to_le32(lba); 1250 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1100 writecmd->pad = 0; 1251 writecmd->pad = 0;
1101 writecmd->flags = 0; 1252 writecmd->flags = 0;
1102 1253
@@ -1121,7 +1272,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid)
1121 writecmd = (struct aac_write *) fib_data(cmd_fibcontext); 1272 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
1122 writecmd->command = cpu_to_le32(VM_CtBlockWrite); 1273 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
1123 writecmd->cid = cpu_to_le32(cid); 1274 writecmd->cid = cpu_to_le32(cid);
1124 writecmd->block = cpu_to_le32(lba); 1275 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
1125 writecmd->count = cpu_to_le32(count * 512); 1276 writecmd->count = cpu_to_le32(count * 512);
1126 writecmd->sg.count = cpu_to_le32(1); 1277 writecmd->sg.count = cpu_to_le32(1);
1127 /* ->stable is not used - it did mean which type of write */ 1278 /* ->stable is not used - it did mean which type of write */
@@ -1310,11 +1461,18 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1310 */ 1461 */
1311 if ((fsa_dev_ptr[cid].valid & 1) == 0) { 1462 if ((fsa_dev_ptr[cid].valid & 1) == 0) {
1312 switch (scsicmd->cmnd[0]) { 1463 switch (scsicmd->cmnd[0]) {
1464 case SERVICE_ACTION_IN:
1465 if (!(dev->raw_io_interface) ||
1466 !(dev->raw_io_64) ||
1467 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
1468 break;
1313 case INQUIRY: 1469 case INQUIRY:
1314 case READ_CAPACITY: 1470 case READ_CAPACITY:
1315 case TEST_UNIT_READY: 1471 case TEST_UNIT_READY:
1316 spin_unlock_irq(host->host_lock); 1472 spin_unlock_irq(host->host_lock);
1317 probe_container(dev, cid); 1473 probe_container(dev, cid);
1474 if ((fsa_dev_ptr[cid].valid & 1) == 0)
1475 fsa_dev_ptr[cid].valid = 0;
1318 spin_lock_irq(host->host_lock); 1476 spin_lock_irq(host->host_lock);
1319 if (fsa_dev_ptr[cid].valid == 0) { 1477 if (fsa_dev_ptr[cid].valid == 0) {
1320 scsicmd->result = DID_NO_CONNECT << 16; 1478 scsicmd->result = DID_NO_CONNECT << 16;
@@ -1375,7 +1533,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1375 memset(&inq_data, 0, sizeof (struct inquiry_data)); 1533 memset(&inq_data, 0, sizeof (struct inquiry_data));
1376 1534
1377 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ 1535 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
1378 inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */
1379 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ 1536 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
1380 inq_data.inqd_len = 31; 1537 inq_data.inqd_len = 31;
1381 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ 1538 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
@@ -1397,13 +1554,55 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1397 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); 1554 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
1398 return aac_get_container_name(scsicmd, cid); 1555 return aac_get_container_name(scsicmd, cid);
1399 } 1556 }
1557 case SERVICE_ACTION_IN:
1558 if (!(dev->raw_io_interface) ||
1559 !(dev->raw_io_64) ||
1560 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
1561 break;
1562 {
1563 u64 capacity;
1564 char cp[12];
1565 unsigned int offset = 0;
1566
1567 dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
1568 capacity = fsa_dev_ptr[cid].size - 1;
1569 if (scsicmd->cmnd[13] > 12) {
1570 offset = scsicmd->cmnd[13] - 12;
1571 if (offset > sizeof(cp))
1572 break;
1573 memset(cp, 0, offset);
1574 aac_internal_transfer(scsicmd, cp, 0, offset);
1575 }
1576 cp[0] = (capacity >> 56) & 0xff;
1577 cp[1] = (capacity >> 48) & 0xff;
1578 cp[2] = (capacity >> 40) & 0xff;
1579 cp[3] = (capacity >> 32) & 0xff;
1580 cp[4] = (capacity >> 24) & 0xff;
1581 cp[5] = (capacity >> 16) & 0xff;
1582 cp[6] = (capacity >> 8) & 0xff;
1583 cp[7] = (capacity >> 0) & 0xff;
1584 cp[8] = 0;
1585 cp[9] = 0;
1586 cp[10] = 2;
1587 cp[11] = 0;
1588 aac_internal_transfer(scsicmd, cp, offset, sizeof(cp));
1589
1590 /* Do not cache partition table for arrays */
1591 scsicmd->device->removable = 1;
1592
1593 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1594 scsicmd->scsi_done(scsicmd);
1595
1596 return 0;
1597 }
1598
1400 case READ_CAPACITY: 1599 case READ_CAPACITY:
1401 { 1600 {
1402 u32 capacity; 1601 u32 capacity;
1403 char cp[8]; 1602 char cp[8];
1404 1603
1405 dprintk((KERN_DEBUG "READ CAPACITY command.\n")); 1604 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
1406 if (fsa_dev_ptr[cid].size <= 0x100000000LL) 1605 if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
1407 capacity = fsa_dev_ptr[cid].size - 1; 1606 capacity = fsa_dev_ptr[cid].size - 1;
1408 else 1607 else
1409 capacity = (u32)-1; 1608 capacity = (u32)-1;
@@ -1417,6 +1616,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1417 cp[6] = 2; 1616 cp[6] = 2;
1418 cp[7] = 0; 1617 cp[7] = 0;
1419 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp)); 1618 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
1619 /* Do not cache partition table for arrays */
1620 scsicmd->device->removable = 1;
1420 1621
1421 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1622 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1422 scsicmd->scsi_done(scsicmd); 1623 scsicmd->scsi_done(scsicmd);
@@ -1497,6 +1698,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1497 { 1698 {
1498 case READ_6: 1699 case READ_6:
1499 case READ_10: 1700 case READ_10:
1701 case READ_12:
1702 case READ_16:
1500 /* 1703 /*
1501 * Hack to keep track of ordinal number of the device that 1704 * Hack to keep track of ordinal number of the device that
1502 * corresponds to a container. Needed to convert 1705 * corresponds to a container. Needed to convert
@@ -1504,17 +1707,19 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1504 */ 1707 */
1505 1708
1506 spin_unlock_irq(host->host_lock); 1709 spin_unlock_irq(host->host_lock);
1507 if (scsicmd->request->rq_disk) 1710 if (scsicmd->request->rq_disk)
1508 memcpy(fsa_dev_ptr[cid].devname, 1711 strlcpy(fsa_dev_ptr[cid].devname,
1509 scsicmd->request->rq_disk->disk_name, 1712 scsicmd->request->rq_disk->disk_name,
1510 8); 1713 min(sizeof(fsa_dev_ptr[cid].devname),
1511 1714 sizeof(scsicmd->request->rq_disk->disk_name) + 1));
1512 ret = aac_read(scsicmd, cid); 1715 ret = aac_read(scsicmd, cid);
1513 spin_lock_irq(host->host_lock); 1716 spin_lock_irq(host->host_lock);
1514 return ret; 1717 return ret;
1515 1718
1516 case WRITE_6: 1719 case WRITE_6:
1517 case WRITE_10: 1720 case WRITE_10:
1721 case WRITE_12:
1722 case WRITE_16:
1518 spin_unlock_irq(host->host_lock); 1723 spin_unlock_irq(host->host_lock);
1519 ret = aac_write(scsicmd, cid); 1724 ret = aac_write(scsicmd, cid);
1520 spin_lock_irq(host->host_lock); 1725 spin_lock_irq(host->host_lock);
@@ -1745,6 +1950,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1745 case WRITE_10: 1950 case WRITE_10:
1746 case READ_12: 1951 case READ_12:
1747 case WRITE_12: 1952 case WRITE_12:
1953 case READ_16:
1954 case WRITE_16:
1748 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) { 1955 if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
1749 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n"); 1956 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
1750 } else { 1957 } else {
@@ -1850,8 +2057,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
1850 sizeof(scsicmd->sense_buffer) : 2057 sizeof(scsicmd->sense_buffer) :
1851 le32_to_cpu(srbreply->sense_data_size); 2058 le32_to_cpu(srbreply->sense_data_size);
1852#ifdef AAC_DETAILED_STATUS_INFO 2059#ifdef AAC_DETAILED_STATUS_INFO
1853 dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", 2060 printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
1854 le32_to_cpu(srbreply->status), len)); 2061 le32_to_cpu(srbreply->status), len);
1855#endif 2062#endif
1856 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); 2063 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1857 2064
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index e40528185d48..4a99d2f000f4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -1,6 +1,10 @@
1#if (!defined(dprintk)) 1#if (!defined(dprintk))
2# define dprintk(x) 2# define dprintk(x)
3#endif 3#endif
4/* eg: if (nblank(dprintk(x))) */
5#define _nblank(x) #x
6#define nblank(x) _nblank(x)[0]
7
4 8
5/*------------------------------------------------------------------------------ 9/*------------------------------------------------------------------------------
6 * D E F I N E S 10 * D E F I N E S
@@ -302,7 +306,6 @@ enum aac_queue_types {
302 */ 306 */
303 307
304#define FsaNormal 1 308#define FsaNormal 1
305#define FsaHigh 2
306 309
307/* 310/*
308 * Define the FIB. The FIB is the where all the requested data and 311 * Define the FIB. The FIB is the where all the requested data and
@@ -546,8 +549,6 @@ struct aac_queue {
546 /* This is only valid for adapter to host command queues. */ 549 /* This is only valid for adapter to host command queues. */
547 spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */ 550 spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */
548 spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ 551 spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */
549 unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */
550 u32 padding; /* Padding - FIXME - can remove I believe */
551 struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ 552 struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */
552 /* only valid for command queues which receive entries from the adapter. */ 553 /* only valid for command queues which receive entries from the adapter. */
553 struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */ 554 struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */
@@ -776,7 +777,9 @@ struct fsa_dev_info {
776 u64 last; 777 u64 last;
777 u64 size; 778 u64 size;
778 u32 type; 779 u32 type;
780 u32 config_waiting_on;
779 u16 queue_depth; 781 u16 queue_depth;
782 u8 config_needed;
780 u8 valid; 783 u8 valid;
781 u8 ro; 784 u8 ro;
782 u8 locked; 785 u8 locked;
@@ -1012,6 +1015,7 @@ struct aac_dev
1012 /* macro side-effects BEWARE */ 1015 /* macro side-effects BEWARE */
1013# define raw_io_interface \ 1016# define raw_io_interface \
1014 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) 1017 init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4)
1018 u8 raw_io_64;
1015 u8 printf_enabled; 1019 u8 printf_enabled;
1016}; 1020};
1017 1021
@@ -1362,8 +1366,10 @@ struct aac_srb_reply
1362#define VM_CtBlockVerify64 18 1366#define VM_CtBlockVerify64 18
1363#define VM_CtHostRead64 19 1367#define VM_CtHostRead64 19
1364#define VM_CtHostWrite64 20 1368#define VM_CtHostWrite64 20
1369#define VM_DrvErrTblLog 21
1370#define VM_NameServe64 22
1365 1371
1366#define MAX_VMCOMMAND_NUM 21 /* used for sizing stats array - leave last */ 1372#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */
1367 1373
1368/* 1374/*
1369 * Descriptive information (eg, vital stats) 1375 * Descriptive information (eg, vital stats)
@@ -1472,6 +1478,7 @@ struct aac_mntent {
1472 manager (eg, filesystem) */ 1478 manager (eg, filesystem) */
1473 __le32 altoid; /* != oid <==> snapshot or 1479 __le32 altoid; /* != oid <==> snapshot or
1474 broken mirror exists */ 1480 broken mirror exists */
1481 __le32 capacityhigh;
1475}; 1482};
1476 1483
1477#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */ 1484#define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */
@@ -1707,6 +1714,7 @@ extern struct aac_common aac_config;
1707#define AifCmdJobProgress 2 /* Progress report */ 1714#define AifCmdJobProgress 2 /* Progress report */
1708#define AifJobCtrZero 101 /* Array Zero progress */ 1715#define AifJobCtrZero 101 /* Array Zero progress */
1709#define AifJobStsSuccess 1 /* Job completes */ 1716#define AifJobStsSuccess 1 /* Job completes */
1717#define AifJobStsRunning 102 /* Job running */
1710#define AifCmdAPIReport 3 /* Report from other user of API */ 1718#define AifCmdAPIReport 3 /* Report from other user of API */
1711#define AifCmdDriverNotify 4 /* Notify host driver of event */ 1719#define AifCmdDriverNotify 4 /* Notify host driver of event */
1712#define AifDenMorphComplete 200 /* A morph operation completed */ 1720#define AifDenMorphComplete 200 /* A morph operation completed */
@@ -1777,6 +1785,7 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size);
1777struct aac_driver_ident* aac_get_driver_ident(int devtype); 1785struct aac_driver_ident* aac_get_driver_ident(int devtype);
1778int aac_get_adapter_info(struct aac_dev* dev); 1786int aac_get_adapter_info(struct aac_dev* dev);
1779int aac_send_shutdown(struct aac_dev *dev); 1787int aac_send_shutdown(struct aac_dev *dev);
1788int probe_container(struct aac_dev *dev, int cid);
1780extern int numacb; 1789extern int numacb;
1781extern int acbsize; 1790extern int acbsize;
1782extern char aac_driver_version[]; 1791extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 75abd0453289..59a341b2aedc 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -195,7 +195,7 @@ int aac_send_shutdown(struct aac_dev * dev)
195 fibctx, 195 fibctx,
196 sizeof(struct aac_close), 196 sizeof(struct aac_close),
197 FsaNormal, 197 FsaNormal,
198 1, 1, 198 -2 /* Timeout silently */, 1,
199 NULL, NULL); 199 NULL, NULL);
200 200
201 if (status == 0) 201 if (status == 0)
@@ -313,8 +313,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
313 dev->max_fib_size = sizeof(struct hw_fib); 313 dev->max_fib_size = sizeof(struct hw_fib);
314 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size 314 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
315 - sizeof(struct aac_fibhdr) 315 - sizeof(struct aac_fibhdr)
316 - sizeof(struct aac_write) + sizeof(struct sgmap)) 316 - sizeof(struct aac_write) + sizeof(struct sgentry))
317 / sizeof(struct sgmap); 317 / sizeof(struct sgentry);
318 dev->raw_io_64 = 0;
319 if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
320 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) &&
321 (status[0] == 0x00000001)) {
322 if (status[1] & AAC_OPT_NEW_COMM_64)
323 dev->raw_io_64 = 1;
324 }
318 if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, 325 if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
319 0, 0, 0, 0, 0, 0, 326 0, 0, 0, 0, 0, 0,
320 status+0, status+1, status+2, status+3, status+4)) 327 status+0, status+1, status+2, status+3, status+4))
@@ -342,8 +349,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
342 dev->max_fib_size = 512; 349 dev->max_fib_size = 512;
343 dev->sg_tablesize = host->sg_tablesize 350 dev->sg_tablesize = host->sg_tablesize
344 = (512 - sizeof(struct aac_fibhdr) 351 = (512 - sizeof(struct aac_fibhdr)
345 - sizeof(struct aac_write) + sizeof(struct sgmap)) 352 - sizeof(struct aac_write) + sizeof(struct sgentry))
346 / sizeof(struct sgmap); 353 / sizeof(struct sgentry);
347 host->can_queue = AAC_NUM_IO_FIB; 354 host->can_queue = AAC_NUM_IO_FIB;
348 } else if (acbsize == 2048) { 355 } else if (acbsize == 2048) {
349 host->max_sectors = 512; 356 host->max_sectors = 512;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index a1d303f03480..e4d543a474ae 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -39,7 +39,9 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/blkdev.h> 40#include <linux/blkdev.h>
41#include <scsi/scsi_host.h> 41#include <scsi/scsi_host.h>
42#include <scsi/scsi_device.h>
42#include <asm/semaphore.h> 43#include <asm/semaphore.h>
44#include <asm/delay.h>
43 45
44#include "aacraid.h" 46#include "aacraid.h"
45 47
@@ -269,40 +271,22 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr
269 /* Interrupt Moderation, only interrupt for first two entries */ 271 /* Interrupt Moderation, only interrupt for first two entries */
270 if (idx != le32_to_cpu(*(q->headers.consumer))) { 272 if (idx != le32_to_cpu(*(q->headers.consumer))) {
271 if (--idx == 0) { 273 if (--idx == 0) {
272 if (qid == AdapHighCmdQueue) 274 if (qid == AdapNormCmdQueue)
273 idx = ADAP_HIGH_CMD_ENTRIES;
274 else if (qid == AdapNormCmdQueue)
275 idx = ADAP_NORM_CMD_ENTRIES; 275 idx = ADAP_NORM_CMD_ENTRIES;
276 else if (qid == AdapHighRespQueue) 276 else
277 idx = ADAP_HIGH_RESP_ENTRIES;
278 else if (qid == AdapNormRespQueue)
279 idx = ADAP_NORM_RESP_ENTRIES; 277 idx = ADAP_NORM_RESP_ENTRIES;
280 } 278 }
281 if (idx != le32_to_cpu(*(q->headers.consumer))) 279 if (idx != le32_to_cpu(*(q->headers.consumer)))
282 *nonotify = 1; 280 *nonotify = 1;
283 } 281 }
284 282
285 if (qid == AdapHighCmdQueue) { 283 if (qid == AdapNormCmdQueue) {
286 if (*index >= ADAP_HIGH_CMD_ENTRIES)
287 *index = 0;
288 } else if (qid == AdapNormCmdQueue) {
289 if (*index >= ADAP_NORM_CMD_ENTRIES) 284 if (*index >= ADAP_NORM_CMD_ENTRIES)
290 *index = 0; /* Wrap to front of the Producer Queue. */ 285 *index = 0; /* Wrap to front of the Producer Queue. */
291 } 286 } else {
292 else if (qid == AdapHighRespQueue)
293 {
294 if (*index >= ADAP_HIGH_RESP_ENTRIES)
295 *index = 0;
296 }
297 else if (qid == AdapNormRespQueue)
298 {
299 if (*index >= ADAP_NORM_RESP_ENTRIES) 287 if (*index >= ADAP_NORM_RESP_ENTRIES)
300 *index = 0; /* Wrap to front of the Producer Queue. */ 288 *index = 0; /* Wrap to front of the Producer Queue. */
301 } 289 }
302 else {
303 printk("aacraid: invalid qid\n");
304 BUG();
305 }
306 290
307 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ 291 if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
308 printk(KERN_WARNING "Queue %d full, %u outstanding.\n", 292 printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
@@ -334,12 +318,8 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
334{ 318{
335 struct aac_entry * entry = NULL; 319 struct aac_entry * entry = NULL;
336 int map = 0; 320 int map = 0;
337 struct aac_queue * q = &dev->queues->queue[qid];
338
339 spin_lock_irqsave(q->lock, q->SavedIrql);
340 321
341 if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) 322 if (qid == AdapNormCmdQueue) {
342 {
343 /* if no entries wait for some if caller wants to */ 323 /* if no entries wait for some if caller wants to */
344 while (!aac_get_entry(dev, qid, &entry, index, nonotify)) 324 while (!aac_get_entry(dev, qid, &entry, index, nonotify))
345 { 325 {
@@ -350,9 +330,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
350 */ 330 */
351 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); 331 entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
352 map = 1; 332 map = 1;
353 } 333 } else {
354 else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue)
355 {
356 while(!aac_get_entry(dev, qid, &entry, index, nonotify)) 334 while(!aac_get_entry(dev, qid, &entry, index, nonotify))
357 { 335 {
358 /* if no entries wait for some if caller wants to */ 336 /* if no entries wait for some if caller wants to */
@@ -375,42 +353,6 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f
375 return 0; 353 return 0;
376} 354}
377 355
378
379/**
380 * aac_insert_entry - insert a queue entry
381 * @dev: Adapter
382 * @index: Index of entry to insert
383 * @qid: Queue number
384 * @nonotify: Suppress adapter notification
385 *
386 * Gets the next free QE off the requested priorty adapter command
387 * queue and associates the Fib with the QE. The QE represented by
388 * index is ready to insert on the queue when this routine returns
389 * success.
390 */
391
392static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify)
393{
394 struct aac_queue * q = &dev->queues->queue[qid];
395
396 if(q == NULL)
397 BUG();
398 *(q->headers.producer) = cpu_to_le32(index + 1);
399 spin_unlock_irqrestore(q->lock, q->SavedIrql);
400
401 if (qid == AdapHighCmdQueue ||
402 qid == AdapNormCmdQueue ||
403 qid == AdapHighRespQueue ||
404 qid == AdapNormRespQueue)
405 {
406 if (!nonotify)
407 aac_adapter_notify(dev, qid);
408 }
409 else
410 printk("Suprise insert!\n");
411 return 0;
412}
413
414/* 356/*
415 * Define the highest level of host to adapter communication routines. 357 * Define the highest level of host to adapter communication routines.
416 * These routines will support host to adapter FS commuication. These 358 * These routines will support host to adapter FS commuication. These
@@ -439,12 +381,13 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l
439int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) 381int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data)
440{ 382{
441 u32 index; 383 u32 index;
442 u32 qid;
443 struct aac_dev * dev = fibptr->dev; 384 struct aac_dev * dev = fibptr->dev;
444 unsigned long nointr = 0; 385 unsigned long nointr = 0;
445 struct hw_fib * hw_fib = fibptr->hw_fib; 386 struct hw_fib * hw_fib = fibptr->hw_fib;
446 struct aac_queue * q; 387 struct aac_queue * q;
447 unsigned long flags = 0; 388 unsigned long flags = 0;
389 unsigned long qflags;
390
448 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 391 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
449 return -EBUSY; 392 return -EBUSY;
450 /* 393 /*
@@ -497,26 +440,8 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
497 * Get a queue entry connect the FIB to it and send an notify 440 * Get a queue entry connect the FIB to it and send an notify
498 * the adapter a command is ready. 441 * the adapter a command is ready.
499 */ 442 */
500 if (priority == FsaHigh) { 443 hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
501 hw_fib->header.XferState |= cpu_to_le32(HighPriority);
502 qid = AdapHighCmdQueue;
503 } else {
504 hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
505 qid = AdapNormCmdQueue;
506 }
507 q = &dev->queues->queue[qid];
508 444
509 if(wait)
510 spin_lock_irqsave(&fibptr->event_lock, flags);
511 if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0)
512 return -EWOULDBLOCK;
513 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
514 dprintk((KERN_DEBUG "Fib contents:.\n"));
515 dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
516 dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
517 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
518 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
519 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
520 /* 445 /*
521 * Fill in the Callback and CallbackContext if we are not 446 * Fill in the Callback and CallbackContext if we are not
522 * going to wait. 447 * going to wait.
@@ -525,22 +450,67 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority
525 fibptr->callback = callback; 450 fibptr->callback = callback;
526 fibptr->callback_data = callback_data; 451 fibptr->callback_data = callback_data;
527 } 452 }
528 FIB_COUNTER_INCREMENT(aac_config.FibsSent);
529 list_add_tail(&fibptr->queue, &q->pendingq);
530 q->numpending++;
531 453
532 fibptr->done = 0; 454 fibptr->done = 0;
533 fibptr->flags = 0; 455 fibptr->flags = 0;
534 456
535 if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0) 457 FIB_COUNTER_INCREMENT(aac_config.FibsSent);
536 return -EWOULDBLOCK; 458
459 dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index));
460 dprintk((KERN_DEBUG "Fib contents:.\n"));
461 dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command));
462 dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState));
463 dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib));
464 dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
465 dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr));
466
467 q = &dev->queues->queue[AdapNormCmdQueue];
468
469 if(wait)
470 spin_lock_irqsave(&fibptr->event_lock, flags);
471 spin_lock_irqsave(q->lock, qflags);
472 aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
473
474 list_add_tail(&fibptr->queue, &q->pendingq);
475 q->numpending++;
476 *(q->headers.producer) = cpu_to_le32(index + 1);
477 spin_unlock_irqrestore(q->lock, qflags);
478 if (!(nointr & aac_config.irq_mod))
479 aac_adapter_notify(dev, AdapNormCmdQueue);
537 /* 480 /*
538 * If the caller wanted us to wait for response wait now. 481 * If the caller wanted us to wait for response wait now.
539 */ 482 */
540 483
541 if (wait) { 484 if (wait) {
542 spin_unlock_irqrestore(&fibptr->event_lock, flags); 485 spin_unlock_irqrestore(&fibptr->event_lock, flags);
543 down(&fibptr->event_wait); 486 /* Only set for first known interruptable command */
487 if (wait < 0) {
488 /*
489 * *VERY* Dangerous to time out a command, the
490 * assumption is made that we have no hope of
491 * functioning because an interrupt routing or other
492 * hardware failure has occurred.
493 */
494 unsigned long count = 36000000L; /* 3 minutes */
495 unsigned long qflags;
496 while (down_trylock(&fibptr->event_wait)) {
497 if (--count == 0) {
498 spin_lock_irqsave(q->lock, qflags);
499 q->numpending--;
500 list_del(&fibptr->queue);
501 spin_unlock_irqrestore(q->lock, qflags);
502 if (wait == -1) {
503 printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n"
504 "Usually a result of a PCI interrupt routing problem;\n"
505 "update mother board BIOS or consider utilizing one of\n"
506 "the SAFE mode kernel options (acpi, apic etc)\n");
507 }
508 return -ETIMEDOUT;
509 }
510 udelay(5);
511 }
512 } else
513 down(&fibptr->event_wait);
544 if(fibptr->done == 0) 514 if(fibptr->done == 0)
545 BUG(); 515 BUG();
546 516
@@ -622,15 +592,9 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
622 case HostNormCmdQueue: 592 case HostNormCmdQueue:
623 notify = HostNormCmdNotFull; 593 notify = HostNormCmdNotFull;
624 break; 594 break;
625 case HostHighCmdQueue:
626 notify = HostHighCmdNotFull;
627 break;
628 case HostNormRespQueue: 595 case HostNormRespQueue:
629 notify = HostNormRespNotFull; 596 notify = HostNormRespNotFull;
630 break; 597 break;
631 case HostHighRespQueue:
632 notify = HostHighRespNotFull;
633 break;
634 default: 598 default:
635 BUG(); 599 BUG();
636 return; 600 return;
@@ -652,9 +616,13 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
652{ 616{
653 struct hw_fib * hw_fib = fibptr->hw_fib; 617 struct hw_fib * hw_fib = fibptr->hw_fib;
654 struct aac_dev * dev = fibptr->dev; 618 struct aac_dev * dev = fibptr->dev;
619 struct aac_queue * q;
655 unsigned long nointr = 0; 620 unsigned long nointr = 0;
656 if (hw_fib->header.XferState == 0) 621 unsigned long qflags;
622
623 if (hw_fib->header.XferState == 0) {
657 return 0; 624 return 0;
625 }
658 /* 626 /*
659 * If we plan to do anything check the structure type first. 627 * If we plan to do anything check the structure type first.
660 */ 628 */
@@ -669,37 +637,21 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size)
669 * send the completed cdb to the adapter. 637 * send the completed cdb to the adapter.
670 */ 638 */
671 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { 639 if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
640 u32 index;
672 hw_fib->header.XferState |= cpu_to_le32(HostProcessed); 641 hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
673 if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) { 642 if (size) {
674 u32 index; 643 size += sizeof(struct aac_fibhdr);
675 if (size) 644 if (size > le16_to_cpu(hw_fib->header.SenderSize))
676 { 645 return -EMSGSIZE;
677 size += sizeof(struct aac_fibhdr); 646 hw_fib->header.Size = cpu_to_le16(size);
678 if (size > le16_to_cpu(hw_fib->header.SenderSize))
679 return -EMSGSIZE;
680 hw_fib->header.Size = cpu_to_le16(size);
681 }
682 if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) {
683 return -EWOULDBLOCK;
684 }
685 if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) {
686 }
687 } else if (hw_fib->header.XferState &
688 cpu_to_le32(NormalPriority)) {
689 u32 index;
690
691 if (size) {
692 size += sizeof(struct aac_fibhdr);
693 if (size > le16_to_cpu(hw_fib->header.SenderSize))
694 return -EMSGSIZE;
695 hw_fib->header.Size = cpu_to_le16(size);
696 }
697 if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0)
698 return -EWOULDBLOCK;
699 if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0)
700 {
701 }
702 } 647 }
648 q = &dev->queues->queue[AdapNormRespQueue];
649 spin_lock_irqsave(q->lock, qflags);
650 aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
651 *(q->headers.producer) = cpu_to_le32(index + 1);
652 spin_unlock_irqrestore(q->lock, qflags);
653 if (!(nointr & (int)aac_config.irq_mod))
654 aac_adapter_notify(dev, AdapNormRespQueue);
703 } 655 }
704 else 656 else
705 { 657 {
@@ -791,6 +743,268 @@ void aac_printf(struct aac_dev *dev, u32 val)
791 memset(cp, 0, 256); 743 memset(cp, 0, 256);
792} 744}
793 745
746
747/**
748 * aac_handle_aif - Handle a message from the firmware
749 * @dev: Which adapter this fib is from
750 * @fibptr: Pointer to fibptr from adapter
751 *
752 * This routine handles a driver notify fib from the adapter and
753 * dispatches it to the appropriate routine for handling.
754 */
755
756static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
757{
758 struct hw_fib * hw_fib = fibptr->hw_fib;
759 struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
760 int busy;
761 u32 container;
762 struct scsi_device *device;
763 enum {
764 NOTHING,
765 DELETE,
766 ADD,
767 CHANGE
768 } device_config_needed;
769
770 /* Sniff for container changes */
771
772 if (!dev)
773 return;
774 container = (u32)-1;
775
776 /*
777 * We have set this up to try and minimize the number of
778 * re-configures that take place. As a result of this when
779 * certain AIF's come in we will set a flag waiting for another
780 * type of AIF before setting the re-config flag.
781 */
782 switch (le32_to_cpu(aifcmd->command)) {
783 case AifCmdDriverNotify:
784 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
785 /*
786 * Morph or Expand complete
787 */
788 case AifDenMorphComplete:
789 case AifDenVolumeExtendComplete:
790 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
791 if (container >= dev->maximum_num_containers)
792 break;
793
794 /*
795 * Find the Scsi_Device associated with the SCSI
796 * address. Make sure we have the right array, and if
797 * so set the flag to initiate a new re-config once we
798 * see an AifEnConfigChange AIF come through.
799 */
800
801 if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
802 device = scsi_device_lookup(dev->scsi_host_ptr,
803 CONTAINER_TO_CHANNEL(container),
804 CONTAINER_TO_ID(container),
805 CONTAINER_TO_LUN(container));
806 if (device) {
807 dev->fsa_dev[container].config_needed = CHANGE;
808 dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
809 scsi_device_put(device);
810 }
811 }
812 }
813
814 /*
815 * If we are waiting on something and this happens to be
816 * that thing then set the re-configure flag.
817 */
818 if (container != (u32)-1) {
819 if (container >= dev->maximum_num_containers)
820 break;
821 if (dev->fsa_dev[container].config_waiting_on ==
822 le32_to_cpu(*(u32 *)aifcmd->data))
823 dev->fsa_dev[container].config_waiting_on = 0;
824 } else for (container = 0;
825 container < dev->maximum_num_containers; ++container) {
826 if (dev->fsa_dev[container].config_waiting_on ==
827 le32_to_cpu(*(u32 *)aifcmd->data))
828 dev->fsa_dev[container].config_waiting_on = 0;
829 }
830 break;
831
832 case AifCmdEventNotify:
833 switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
834 /*
835 * Add an Array.
836 */
837 case AifEnAddContainer:
838 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
839 if (container >= dev->maximum_num_containers)
840 break;
841 dev->fsa_dev[container].config_needed = ADD;
842 dev->fsa_dev[container].config_waiting_on =
843 AifEnConfigChange;
844 break;
845
846 /*
847 * Delete an Array.
848 */
849 case AifEnDeleteContainer:
850 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
851 if (container >= dev->maximum_num_containers)
852 break;
853 dev->fsa_dev[container].config_needed = DELETE;
854 dev->fsa_dev[container].config_waiting_on =
855 AifEnConfigChange;
856 break;
857
858 /*
859 * Container change detected. If we currently are not
860 * waiting on something else, setup to wait on a Config Change.
861 */
862 case AifEnContainerChange:
863 container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
864 if (container >= dev->maximum_num_containers)
865 break;
866 if (dev->fsa_dev[container].config_waiting_on)
867 break;
868 dev->fsa_dev[container].config_needed = CHANGE;
869 dev->fsa_dev[container].config_waiting_on =
870 AifEnConfigChange;
871 break;
872
873 case AifEnConfigChange:
874 break;
875
876 }
877
878 /*
879 * If we are waiting on something and this happens to be
880 * that thing then set the re-configure flag.
881 */
882 if (container != (u32)-1) {
883 if (container >= dev->maximum_num_containers)
884 break;
885 if (dev->fsa_dev[container].config_waiting_on ==
886 le32_to_cpu(*(u32 *)aifcmd->data))
887 dev->fsa_dev[container].config_waiting_on = 0;
888 } else for (container = 0;
889 container < dev->maximum_num_containers; ++container) {
890 if (dev->fsa_dev[container].config_waiting_on ==
891 le32_to_cpu(*(u32 *)aifcmd->data))
892 dev->fsa_dev[container].config_waiting_on = 0;
893 }
894 break;
895
896 case AifCmdJobProgress:
897 /*
898 * These are job progress AIF's. When a Clear is being
899 * done on a container it is initially created then hidden from
900 * the OS. When the clear completes we don't get a config
901 * change so we monitor the job status complete on a clear then
902 * wait for a container change.
903 */
904
905 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
906 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
907 || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
908 for (container = 0;
909 container < dev->maximum_num_containers;
910 ++container) {
911 /*
912 * Stomp on all config sequencing for all
913 * containers?
914 */
915 dev->fsa_dev[container].config_waiting_on =
916 AifEnContainerChange;
917 dev->fsa_dev[container].config_needed = ADD;
918 }
919 }
920 if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
921 && (((u32 *)aifcmd->data)[6] == 0)
922 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
923 for (container = 0;
924 container < dev->maximum_num_containers;
925 ++container) {
926 /*
927 * Stomp on all config sequencing for all
928 * containers?
929 */
930 dev->fsa_dev[container].config_waiting_on =
931 AifEnContainerChange;
932 dev->fsa_dev[container].config_needed = DELETE;
933 }
934 }
935 break;
936 }
937
938 device_config_needed = NOTHING;
939 for (container = 0; container < dev->maximum_num_containers;
940 ++container) {
941 if ((dev->fsa_dev[container].config_waiting_on == 0)
942 && (dev->fsa_dev[container].config_needed != NOTHING)) {
943 device_config_needed =
944 dev->fsa_dev[container].config_needed;
945 dev->fsa_dev[container].config_needed = NOTHING;
946 break;
947 }
948 }
949 if (device_config_needed == NOTHING)
950 return;
951
952 /*
953 * If we decided that a re-configuration needs to be done,
954 * schedule it here on the way out the door, please close the door
955 * behind you.
956 */
957
958 busy = 0;
959
960
961 /*
962 * Find the Scsi_Device associated with the SCSI address,
963 * and mark it as changed, invalidating the cache. This deals
964 * with changes to existing device IDs.
965 */
966
967 if (!dev || !dev->scsi_host_ptr)
968 return;
969 /*
970 * force reload of disk info via probe_container
971 */
972 if ((device_config_needed == CHANGE)
973 && (dev->fsa_dev[container].valid == 1))
974 dev->fsa_dev[container].valid = 2;
975 if ((device_config_needed == CHANGE) ||
976 (device_config_needed == ADD))
977 probe_container(dev, container);
978 device = scsi_device_lookup(dev->scsi_host_ptr,
979 CONTAINER_TO_CHANNEL(container),
980 CONTAINER_TO_ID(container),
981 CONTAINER_TO_LUN(container));
982 if (device) {
983 switch (device_config_needed) {
984 case DELETE:
985 scsi_remove_device(device);
986 break;
987 case CHANGE:
988 if (!dev->fsa_dev[container].valid) {
989 scsi_remove_device(device);
990 break;
991 }
992 scsi_rescan_device(&device->sdev_gendev);
993
994 default:
995 break;
996 }
997 scsi_device_put(device);
998 }
999 if (device_config_needed == ADD) {
1000 scsi_add_device(dev->scsi_host_ptr,
1001 CONTAINER_TO_CHANNEL(container),
1002 CONTAINER_TO_ID(container),
1003 CONTAINER_TO_LUN(container));
1004 }
1005
1006}
1007
794/** 1008/**
795 * aac_command_thread - command processing thread 1009 * aac_command_thread - command processing thread
796 * @dev: Adapter to monitor 1010 * @dev: Adapter to monitor
@@ -805,7 +1019,6 @@ int aac_command_thread(struct aac_dev * dev)
805{ 1019{
806 struct hw_fib *hw_fib, *hw_newfib; 1020 struct hw_fib *hw_fib, *hw_newfib;
807 struct fib *fib, *newfib; 1021 struct fib *fib, *newfib;
808 struct aac_queue_block *queues = dev->queues;
809 struct aac_fib_context *fibctx; 1022 struct aac_fib_context *fibctx;
810 unsigned long flags; 1023 unsigned long flags;
811 DECLARE_WAITQUEUE(wait, current); 1024 DECLARE_WAITQUEUE(wait, current);
@@ -825,21 +1038,22 @@ int aac_command_thread(struct aac_dev * dev)
825 * Let the DPC know it has a place to send the AIF's to. 1038 * Let the DPC know it has a place to send the AIF's to.
826 */ 1039 */
827 dev->aif_thread = 1; 1040 dev->aif_thread = 1;
828 add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); 1041 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
829 set_current_state(TASK_INTERRUPTIBLE); 1042 set_current_state(TASK_INTERRUPTIBLE);
1043 dprintk ((KERN_INFO "aac_command_thread start\n"));
830 while(1) 1044 while(1)
831 { 1045 {
832 spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); 1046 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
833 while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) { 1047 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
834 struct list_head *entry; 1048 struct list_head *entry;
835 struct aac_aifcmd * aifcmd; 1049 struct aac_aifcmd * aifcmd;
836 1050
837 set_current_state(TASK_RUNNING); 1051 set_current_state(TASK_RUNNING);
838 1052
839 entry = queues->queue[HostNormCmdQueue].cmdq.next; 1053 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
840 list_del(entry); 1054 list_del(entry);
841 1055
842 spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); 1056 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
843 fib = list_entry(entry, struct fib, fiblink); 1057 fib = list_entry(entry, struct fib, fiblink);
844 /* 1058 /*
845 * We will process the FIB here or pass it to a 1059 * We will process the FIB here or pass it to a
@@ -860,6 +1074,7 @@ int aac_command_thread(struct aac_dev * dev)
860 aifcmd = (struct aac_aifcmd *) hw_fib->data; 1074 aifcmd = (struct aac_aifcmd *) hw_fib->data;
861 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { 1075 if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
862 /* Handle Driver Notify Events */ 1076 /* Handle Driver Notify Events */
1077 aac_handle_aif(dev, fib);
863 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1078 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
864 fib_adapter_complete(fib, (u16)sizeof(u32)); 1079 fib_adapter_complete(fib, (u16)sizeof(u32));
865 } else { 1080 } else {
@@ -869,9 +1084,62 @@ int aac_command_thread(struct aac_dev * dev)
869 1084
870 u32 time_now, time_last; 1085 u32 time_now, time_last;
871 unsigned long flagv; 1086 unsigned long flagv;
872 1087 unsigned num;
1088 struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1089 struct fib ** fib_pool, ** fib_p;
1090
1091 /* Sniff events */
1092 if ((aifcmd->command ==
1093 cpu_to_le32(AifCmdEventNotify)) ||
1094 (aifcmd->command ==
1095 cpu_to_le32(AifCmdJobProgress))) {
1096 aac_handle_aif(dev, fib);
1097 }
1098
873 time_now = jiffies/HZ; 1099 time_now = jiffies/HZ;
874 1100
1101 /*
1102 * Warning: no sleep allowed while
1103 * holding spinlock. We take the estimate
1104 * and pre-allocate a set of fibs outside the
1105 * lock.
1106 */
1107 num = le32_to_cpu(dev->init->AdapterFibsSize)
1108 / sizeof(struct hw_fib); /* some extra */
1109 spin_lock_irqsave(&dev->fib_lock, flagv);
1110 entry = dev->fib_list.next;
1111 while (entry != &dev->fib_list) {
1112 entry = entry->next;
1113 ++num;
1114 }
1115 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1116 hw_fib_pool = NULL;
1117 fib_pool = NULL;
1118 if (num
1119 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1120 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1121 hw_fib_p = hw_fib_pool;
1122 fib_p = fib_pool;
1123 while (hw_fib_p < &hw_fib_pool[num]) {
1124 if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1125 --hw_fib_p;
1126 break;
1127 }
1128 if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1129 kfree(*(--hw_fib_p));
1130 break;
1131 }
1132 }
1133 if ((num = hw_fib_p - hw_fib_pool) == 0) {
1134 kfree(fib_pool);
1135 fib_pool = NULL;
1136 kfree(hw_fib_pool);
1137 hw_fib_pool = NULL;
1138 }
1139 } else if (hw_fib_pool) {
1140 kfree(hw_fib_pool);
1141 hw_fib_pool = NULL;
1142 }
875 spin_lock_irqsave(&dev->fib_lock, flagv); 1143 spin_lock_irqsave(&dev->fib_lock, flagv);
876 entry = dev->fib_list.next; 1144 entry = dev->fib_list.next;
877 /* 1145 /*
@@ -880,6 +1148,8 @@ int aac_command_thread(struct aac_dev * dev)
880 * fib, and then set the event to wake up the 1148 * fib, and then set the event to wake up the
881 * thread that is waiting for it. 1149 * thread that is waiting for it.
882 */ 1150 */
1151 hw_fib_p = hw_fib_pool;
1152 fib_p = fib_pool;
883 while (entry != &dev->fib_list) { 1153 while (entry != &dev->fib_list) {
884 /* 1154 /*
885 * Extract the fibctx 1155 * Extract the fibctx
@@ -912,9 +1182,11 @@ int aac_command_thread(struct aac_dev * dev)
912 * Warning: no sleep allowed while 1182 * Warning: no sleep allowed while
913 * holding spinlock 1183 * holding spinlock
914 */ 1184 */
915 hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); 1185 if (hw_fib_p < &hw_fib_pool[num]) {
916 newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC); 1186 hw_newfib = *hw_fib_p;
917 if (newfib && hw_newfib) { 1187 *(hw_fib_p++) = NULL;
1188 newfib = *fib_p;
1189 *(fib_p++) = NULL;
918 /* 1190 /*
919 * Make the copy of the FIB 1191 * Make the copy of the FIB
920 */ 1192 */
@@ -929,15 +1201,11 @@ int aac_command_thread(struct aac_dev * dev)
929 fibctx->count++; 1201 fibctx->count++;
930 /* 1202 /*
931 * Set the event to wake up the 1203 * Set the event to wake up the
932 * thread that will waiting. 1204 * thread that is waiting.
933 */ 1205 */
934 up(&fibctx->wait_sem); 1206 up(&fibctx->wait_sem);
935 } else { 1207 } else {
936 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); 1208 printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
937 if(newfib)
938 kfree(newfib);
939 if(hw_newfib)
940 kfree(hw_newfib);
941 } 1209 }
942 entry = entry->next; 1210 entry = entry->next;
943 } 1211 }
@@ -947,21 +1215,38 @@ int aac_command_thread(struct aac_dev * dev)
947 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); 1215 *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
948 fib_adapter_complete(fib, sizeof(u32)); 1216 fib_adapter_complete(fib, sizeof(u32));
949 spin_unlock_irqrestore(&dev->fib_lock, flagv); 1217 spin_unlock_irqrestore(&dev->fib_lock, flagv);
1218 /* Free up the remaining resources */
1219 hw_fib_p = hw_fib_pool;
1220 fib_p = fib_pool;
1221 while (hw_fib_p < &hw_fib_pool[num]) {
1222 if (*hw_fib_p)
1223 kfree(*hw_fib_p);
1224 if (*fib_p)
1225 kfree(*fib_p);
1226 ++fib_p;
1227 ++hw_fib_p;
1228 }
1229 if (hw_fib_pool)
1230 kfree(hw_fib_pool);
1231 if (fib_pool)
1232 kfree(fib_pool);
950 } 1233 }
951 spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags);
952 kfree(fib); 1234 kfree(fib);
1235 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
953 } 1236 }
954 /* 1237 /*
955 * There are no more AIF's 1238 * There are no more AIF's
956 */ 1239 */
957 spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); 1240 spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
958 schedule(); 1241 schedule();
959 1242
960 if(signal_pending(current)) 1243 if(signal_pending(current))
961 break; 1244 break;
962 set_current_state(TASK_INTERRUPTIBLE); 1245 set_current_state(TASK_INTERRUPTIBLE);
963 } 1246 }
964 remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); 1247 if (dev->queues)
1248 remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
965 dev->aif_thread = 0; 1249 dev->aif_thread = 0;
966 complete_and_exit(&dev->aif_completion, 0); 1250 complete_and_exit(&dev->aif_completion, 0);
1251 return 0;
967} 1252}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4ff29d7f5825..de8490a92831 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -748,7 +748,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
748 unique_id++; 748 unique_id++;
749 } 749 }
750 750
751 if (pci_enable_device(pdev)) 751 error = pci_enable_device(pdev);
752 if (error)
752 goto out; 753 goto out;
753 754
754 if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) || 755 if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) ||
@@ -772,6 +773,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
772 shost->irq = pdev->irq; 773 shost->irq = pdev->irq;
773 shost->base = pci_resource_start(pdev, 0); 774 shost->base = pci_resource_start(pdev, 0);
774 shost->unique_id = unique_id; 775 shost->unique_id = unique_id;
776 shost->max_cmd_len = 16;
775 777
776 aac = (struct aac_dev *)shost->hostdata; 778 aac = (struct aac_dev *)shost->hostdata;
777 aac->scsi_host_ptr = shost; 779 aac->scsi_host_ptr = shost;
@@ -799,7 +801,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
799 goto out_free_fibs; 801 goto out_free_fibs;
800 802
801 aac->maximum_num_channels = aac_drivers[index].channels; 803 aac->maximum_num_channels = aac_drivers[index].channels;
802 aac_get_adapter_info(aac); 804 error = aac_get_adapter_info(aac);
805 if (error < 0)
806 goto out_deinit;
803 807
804 /* 808 /*
805 * Lets override negotiations and drop the maximum SG limit to 34 809 * Lets override negotiations and drop the maximum SG limit to 34
@@ -927,8 +931,8 @@ static int __init aac_init(void)
927 printk(KERN_INFO "Adaptec %s driver (%s)\n", 931 printk(KERN_INFO "Adaptec %s driver (%s)\n",
928 AAC_DRIVERNAME, aac_driver_version); 932 AAC_DRIVERNAME, aac_driver_version);
929 933
930 error = pci_module_init(&aac_pci_driver); 934 error = pci_register_driver(&aac_pci_driver);
931 if (error) 935 if (error < 0)
932 return error; 936 return error;
933 937
934 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); 938 aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops);
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 70c5fb59c9ea..d754b3267863 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -112,6 +112,9 @@ aic7770_remove(struct device *dev)
112 struct ahc_softc *ahc = dev_get_drvdata(dev); 112 struct ahc_softc *ahc = dev_get_drvdata(dev);
113 u_long s; 113 u_long s;
114 114
115 if (ahc->platform_data && ahc->platform_data->host)
116 scsi_remove_host(ahc->platform_data->host);
117
115 ahc_lock(ahc, &s); 118 ahc_lock(ahc, &s);
116 ahc_intr_enable(ahc, FALSE); 119 ahc_intr_enable(ahc, FALSE);
117 ahc_unlock(ahc, &s); 120 ahc_unlock(ahc, &s);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 6b6d4e287793..95c285cc83e4 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1192,11 +1192,6 @@ ahd_platform_free(struct ahd_softc *ahd)
1192 int i, j; 1192 int i, j;
1193 1193
1194 if (ahd->platform_data != NULL) { 1194 if (ahd->platform_data != NULL) {
1195 if (ahd->platform_data->host != NULL) {
1196 scsi_remove_host(ahd->platform_data->host);
1197 scsi_host_put(ahd->platform_data->host);
1198 }
1199
1200 /* destroy all of the device and target objects */ 1195 /* destroy all of the device and target objects */
1201 for (i = 0; i < AHD_NUM_TARGETS; i++) { 1196 for (i = 0; i < AHD_NUM_TARGETS; i++) {
1202 starget = ahd->platform_data->starget[i]; 1197 starget = ahd->platform_data->starget[i];
@@ -1226,6 +1221,9 @@ ahd_platform_free(struct ahd_softc *ahd)
1226 release_mem_region(ahd->platform_data->mem_busaddr, 1221 release_mem_region(ahd->platform_data->mem_busaddr,
1227 0x1000); 1222 0x1000);
1228 } 1223 }
1224 if (ahd->platform_data->host)
1225 scsi_host_put(ahd->platform_data->host);
1226
1229 free(ahd->platform_data, M_DEVBUF); 1227 free(ahd->platform_data, M_DEVBUF);
1230 } 1228 }
1231} 1229}
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 390b53852d4b..bf360ae021ab 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -95,6 +95,9 @@ ahd_linux_pci_dev_remove(struct pci_dev *pdev)
95 struct ahd_softc *ahd = pci_get_drvdata(pdev); 95 struct ahd_softc *ahd = pci_get_drvdata(pdev);
96 u_long s; 96 u_long s;
97 97
98 if (ahd->platform_data && ahd->platform_data->host)
99 scsi_remove_host(ahd->platform_data->host);
100
98 ahd_lock(ahd, &s); 101 ahd_lock(ahd, &s);
99 ahd_intr_enable(ahd, FALSE); 102 ahd_intr_enable(ahd, FALSE);
100 ahd_unlock(ahd, &s); 103 ahd_unlock(ahd, &s);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 876d1de8480d..6ee1435d37fa 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1209,11 +1209,6 @@ ahc_platform_free(struct ahc_softc *ahc)
1209 int i, j; 1209 int i, j;
1210 1210
1211 if (ahc->platform_data != NULL) { 1211 if (ahc->platform_data != NULL) {
1212 if (ahc->platform_data->host != NULL) {
1213 scsi_remove_host(ahc->platform_data->host);
1214 scsi_host_put(ahc->platform_data->host);
1215 }
1216
1217 /* destroy all of the device and target objects */ 1212 /* destroy all of the device and target objects */
1218 for (i = 0; i < AHC_NUM_TARGETS; i++) { 1213 for (i = 0; i < AHC_NUM_TARGETS; i++) {
1219 starget = ahc->platform_data->starget[i]; 1214 starget = ahc->platform_data->starget[i];
@@ -1242,6 +1237,9 @@ ahc_platform_free(struct ahc_softc *ahc)
1242 0x1000); 1237 0x1000);
1243 } 1238 }
1244 1239
1240 if (ahc->platform_data->host)
1241 scsi_host_put(ahc->platform_data->host);
1242
1245 free(ahc->platform_data, M_DEVBUF); 1243 free(ahc->platform_data, M_DEVBUF);
1246 } 1244 }
1247} 1245}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 3ce77ddc889e..cb30d9c1153d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -143,6 +143,9 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
143 struct ahc_softc *ahc = pci_get_drvdata(pdev); 143 struct ahc_softc *ahc = pci_get_drvdata(pdev);
144 u_long s; 144 u_long s;
145 145
146 if (ahc->platform_data && ahc->platform_data->host)
147 scsi_remove_host(ahc->platform_data->host);
148
146 ahc_lock(ahc, &s); 149 ahc_lock(ahc, &s);
147 ahc_intr_enable(ahc, FALSE); 150 ahc_intr_enable(ahc, FALSE);
148 ahc_unlock(ahc, &s); 151 ahc_unlock(ahc, &s);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index f2a72d33132c..02fe371b0ab8 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -176,6 +176,7 @@ void scsi_remove_host(struct Scsi_Host *shost)
176 transport_unregister_device(&shost->shost_gendev); 176 transport_unregister_device(&shost->shost_gendev);
177 class_device_unregister(&shost->shost_classdev); 177 class_device_unregister(&shost->shost_classdev);
178 device_del(&shost->shost_gendev); 178 device_del(&shost->shost_gendev);
179 scsi_proc_hostdir_rm(shost->hostt);
179} 180}
180EXPORT_SYMBOL(scsi_remove_host); 181EXPORT_SYMBOL(scsi_remove_host);
181 182
@@ -262,7 +263,6 @@ static void scsi_host_dev_release(struct device *dev)
262 if (shost->work_q) 263 if (shost->work_q)
263 destroy_workqueue(shost->work_q); 264 destroy_workqueue(shost->work_q);
264 265
265 scsi_proc_hostdir_rm(shost->hostt);
266 scsi_destroy_command_freelist(shost); 266 scsi_destroy_command_freelist(shost);
267 kfree(shost->shost_data); 267 kfree(shost->shost_data);
268 268
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 86eaf6d408d5..acae7c48ef7d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -973,10 +973,10 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
973 if ((phba->fc_flag & FC_FABRIC) || 973 if ((phba->fc_flag & FC_FABRIC) ||
974 ((phba->fc_topology == TOPOLOGY_LOOP) && 974 ((phba->fc_topology == TOPOLOGY_LOOP) &&
975 (phba->fc_flag & FC_PUBLIC_LOOP))) 975 (phba->fc_flag & FC_PUBLIC_LOOP)))
976 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.wwn); 976 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
977 else 977 else
978 /* fabric is local port if there is no F/FL_Port */ 978 /* fabric is local port if there is no F/FL_Port */
979 node_name = wwn_to_u64(phba->fc_nodename.wwn); 979 node_name = wwn_to_u64(phba->fc_nodename.u.wwn);
980 980
981 spin_unlock_irq(shost->host_lock); 981 spin_unlock_irq(shost->host_lock);
982 982
@@ -1110,7 +1110,7 @@ lpfc_get_starget_node_name(struct scsi_target *starget)
1110 /* Search the mapped list for this target ID */ 1110 /* Search the mapped list for this target ID */
1111 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1111 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1112 if (starget->id == ndlp->nlp_sid) { 1112 if (starget->id == ndlp->nlp_sid) {
1113 node_name = wwn_to_u64(ndlp->nlp_nodename.wwn); 1113 node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1114 break; 1114 break;
1115 } 1115 }
1116 } 1116 }
@@ -1131,7 +1131,7 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
1131 /* Search the mapped list for this target ID */ 1131 /* Search the mapped list for this target ID */
1132 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { 1132 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1133 if (starget->id == ndlp->nlp_sid) { 1133 if (starget->id == ndlp->nlp_sid) {
1134 port_name = wwn_to_u64(ndlp->nlp_portname.wwn); 1134 port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1135 break; 1135 break;
1136 } 1136 }
1137 } 1137 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 4fb8eb0c84cf..56052f4510c3 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1019,8 +1019,8 @@ lpfc_register_remote_port(struct lpfc_hba * phba,
1019 struct fc_rport_identifiers rport_ids; 1019 struct fc_rport_identifiers rport_ids;
1020 1020
1021 /* Remote port has reappeared. Re-register w/ FC transport */ 1021 /* Remote port has reappeared. Re-register w/ FC transport */
1022 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.wwn); 1022 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
1023 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.wwn); 1023 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
1024 rport_ids.port_id = ndlp->nlp_DID; 1024 rport_ids.port_id = ndlp->nlp_DID;
1025 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 1025 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1026 if (ndlp->nlp_type & NLP_FCP_TARGET) 1026 if (ndlp->nlp_type & NLP_FCP_TARGET)
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 047a87c26cc0..86c41981188b 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -280,9 +280,9 @@ struct lpfc_name {
280#define NAME_CCITT_GR_TYPE 0xE 280#define NAME_CCITT_GR_TYPE 0xE
281 uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */ 281 uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */
282 uint8_t IEEE[6]; /* FC IEEE address */ 282 uint8_t IEEE[6]; /* FC IEEE address */
283 }; 283 } s;
284 uint8_t wwn[8]; 284 uint8_t wwn[8];
285 }; 285 } u;
286}; 286};
287 287
288struct csp { 288struct csp {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 454058f655db..0856ff7d3b33 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -285,7 +285,7 @@ lpfc_config_port_post(struct lpfc_hba * phba)
285 if (phba->SerialNumber[0] == 0) { 285 if (phba->SerialNumber[0] == 0) {
286 uint8_t *outptr; 286 uint8_t *outptr;
287 287
288 outptr = (uint8_t *) & phba->fc_nodename.IEEE[0]; 288 outptr = &phba->fc_nodename.u.s.IEEE[0];
289 for (i = 0; i < 12; i++) { 289 for (i = 0; i < 12; i++) {
290 status = *outptr++; 290 status = *outptr++;
291 j = ((status & 0xf0) >> 4); 291 j = ((status & 0xf0) >> 4);
@@ -1523,8 +1523,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
1523 * Must done after lpfc_sli_hba_setup() 1523 * Must done after lpfc_sli_hba_setup()
1524 */ 1524 */
1525 1525
1526 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.wwn); 1526 fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn);
1527 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.wwn); 1527 fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn);
1528 fc_host_supported_classes(host) = FC_COS_CLASS3; 1528 fc_host_supported_classes(host) = FC_COS_CLASS3;
1529 1529
1530 memset(fc_host_supported_fc4s(host), 0, 1530 memset(fc_host_supported_fc4s(host), 0,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 6f308ebe3e79..61a6fd810bb4 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -621,8 +621,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
621 if(islogical) { 621 if(islogical) {
622 switch (cmd->cmnd[0]) { 622 switch (cmd->cmnd[0]) {
623 case TEST_UNIT_READY: 623 case TEST_UNIT_READY:
624 memset(cmd->request_buffer, 0, cmd->request_bufflen);
625
626#if MEGA_HAVE_CLUSTERING 624#if MEGA_HAVE_CLUSTERING
627 /* 625 /*
628 * Do we support clustering and is the support enabled 626 * Do we support clustering and is the support enabled
@@ -652,11 +650,28 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
652 return NULL; 650 return NULL;
653#endif 651#endif
654 652
655 case MODE_SENSE: 653 case MODE_SENSE: {
654 char *buf;
655
656 if (cmd->use_sg) {
657 struct scatterlist *sg;
658
659 sg = (struct scatterlist *)cmd->request_buffer;
660 buf = kmap_atomic(sg->page, KM_IRQ0) +
661 sg->offset;
662 } else
663 buf = cmd->request_buffer;
656 memset(cmd->request_buffer, 0, cmd->cmnd[4]); 664 memset(cmd->request_buffer, 0, cmd->cmnd[4]);
665 if (cmd->use_sg) {
666 struct scatterlist *sg;
667
668 sg = (struct scatterlist *)cmd->request_buffer;
669 kunmap_atomic(buf - sg->offset, KM_IRQ0);
670 }
657 cmd->result = (DID_OK << 16); 671 cmd->result = (DID_OK << 16);
658 cmd->scsi_done(cmd); 672 cmd->scsi_done(cmd);
659 return NULL; 673 return NULL;
674 }
660 675
661 case READ_CAPACITY: 676 case READ_CAPACITY:
662 case INQUIRY: 677 case INQUIRY:
@@ -1685,14 +1700,23 @@ mega_rundoneq (adapter_t *adapter)
1685static void 1700static void
1686mega_free_scb(adapter_t *adapter, scb_t *scb) 1701mega_free_scb(adapter_t *adapter, scb_t *scb)
1687{ 1702{
1703 unsigned long length;
1704
1688 switch( scb->dma_type ) { 1705 switch( scb->dma_type ) {
1689 1706
1690 case MEGA_DMA_TYPE_NONE: 1707 case MEGA_DMA_TYPE_NONE:
1691 break; 1708 break;
1692 1709
1693 case MEGA_BULK_DATA: 1710 case MEGA_BULK_DATA:
1711 if (scb->cmd->use_sg == 0)
1712 length = scb->cmd->request_bufflen;
1713 else {
1714 struct scatterlist *sgl =
1715 (struct scatterlist *)scb->cmd->request_buffer;
1716 length = sgl->length;
1717 }
1694 pci_unmap_page(adapter->dev, scb->dma_h_bulkdata, 1718 pci_unmap_page(adapter->dev, scb->dma_h_bulkdata,
1695 scb->cmd->request_bufflen, scb->dma_direction); 1719 length, scb->dma_direction);
1696 break; 1720 break;
1697 1721
1698 case MEGA_SGLIST: 1722 case MEGA_SGLIST:
@@ -1741,6 +1765,7 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1741 struct scatterlist *sgl; 1765 struct scatterlist *sgl;
1742 struct page *page; 1766 struct page *page;
1743 unsigned long offset; 1767 unsigned long offset;
1768 unsigned int length;
1744 Scsi_Cmnd *cmd; 1769 Scsi_Cmnd *cmd;
1745 int sgcnt; 1770 int sgcnt;
1746 int idx; 1771 int idx;
@@ -1748,14 +1773,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1748 cmd = scb->cmd; 1773 cmd = scb->cmd;
1749 1774
1750 /* Scatter-gather not used */ 1775 /* Scatter-gather not used */
1751 if( !cmd->use_sg ) { 1776 if( cmd->use_sg == 0 || (cmd->use_sg == 1 &&
1752 1777 !adapter->has_64bit_addr)) {
1753 page = virt_to_page(cmd->request_buffer); 1778
1754 offset = offset_in_page(cmd->request_buffer); 1779 if (cmd->use_sg == 0) {
1780 page = virt_to_page(cmd->request_buffer);
1781 offset = offset_in_page(cmd->request_buffer);
1782 length = cmd->request_bufflen;
1783 } else {
1784 sgl = (struct scatterlist *)cmd->request_buffer;
1785 page = sgl->page;
1786 offset = sgl->offset;
1787 length = sgl->length;
1788 }
1755 1789
1756 scb->dma_h_bulkdata = pci_map_page(adapter->dev, 1790 scb->dma_h_bulkdata = pci_map_page(adapter->dev,
1757 page, offset, 1791 page, offset,
1758 cmd->request_bufflen, 1792 length,
1759 scb->dma_direction); 1793 scb->dma_direction);
1760 scb->dma_type = MEGA_BULK_DATA; 1794 scb->dma_type = MEGA_BULK_DATA;
1761 1795
@@ -1765,14 +1799,14 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1765 */ 1799 */
1766 if( adapter->has_64bit_addr ) { 1800 if( adapter->has_64bit_addr ) {
1767 scb->sgl64[0].address = scb->dma_h_bulkdata; 1801 scb->sgl64[0].address = scb->dma_h_bulkdata;
1768 scb->sgl64[0].length = cmd->request_bufflen; 1802 scb->sgl64[0].length = length;
1769 *buf = (u32)scb->sgl_dma_addr; 1803 *buf = (u32)scb->sgl_dma_addr;
1770 *len = (u32)cmd->request_bufflen; 1804 *len = (u32)length;
1771 return 1; 1805 return 1;
1772 } 1806 }
1773 else { 1807 else {
1774 *buf = (u32)scb->dma_h_bulkdata; 1808 *buf = (u32)scb->dma_h_bulkdata;
1775 *len = (u32)cmd->request_bufflen; 1809 *len = (u32)length;
1776 } 1810 }
1777 return 0; 1811 return 0;
1778 } 1812 }
@@ -1791,27 +1825,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1791 1825
1792 if( sgcnt > adapter->sglen ) BUG(); 1826 if( sgcnt > adapter->sglen ) BUG();
1793 1827
1828 *len = 0;
1829
1794 for( idx = 0; idx < sgcnt; idx++, sgl++ ) { 1830 for( idx = 0; idx < sgcnt; idx++, sgl++ ) {
1795 1831
1796 if( adapter->has_64bit_addr ) { 1832 if( adapter->has_64bit_addr ) {
1797 scb->sgl64[idx].address = sg_dma_address(sgl); 1833 scb->sgl64[idx].address = sg_dma_address(sgl);
1798 scb->sgl64[idx].length = sg_dma_len(sgl); 1834 *len += scb->sgl64[idx].length = sg_dma_len(sgl);
1799 } 1835 }
1800 else { 1836 else {
1801 scb->sgl[idx].address = sg_dma_address(sgl); 1837 scb->sgl[idx].address = sg_dma_address(sgl);
1802 scb->sgl[idx].length = sg_dma_len(sgl); 1838 *len += scb->sgl[idx].length = sg_dma_len(sgl);
1803 } 1839 }
1804 } 1840 }
1805 1841
1806 /* Reset pointer and length fields */ 1842 /* Reset pointer and length fields */
1807 *buf = scb->sgl_dma_addr; 1843 *buf = scb->sgl_dma_addr;
1808 1844
1809 /*
1810 * For passthru command, dataxferlen must be set, even for commands
1811 * with a sg list
1812 */
1813 *len = (u32)cmd->request_bufflen;
1814
1815 /* Return count of SG requests */ 1845 /* Return count of SG requests */
1816 return sgcnt; 1846 return sgcnt;
1817} 1847}
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid
index 917d591d90b2..7363e12663ac 100644
--- a/drivers/scsi/megaraid/Kconfig.megaraid
+++ b/drivers/scsi/megaraid/Kconfig.megaraid
@@ -76,3 +76,12 @@ config MEGARAID_LEGACY
76 To compile this driver as a module, choose M here: the 76 To compile this driver as a module, choose M here: the
77 module will be called megaraid 77 module will be called megaraid
78endif 78endif
79
80config MEGARAID_SAS
81 tristate "LSI Logic MegaRAID SAS RAID Module"
82 depends on PCI && SCSI
83 help
84 Module for LSI Logic's SAS based RAID controllers.
85 To compile this driver as a module, choose 'm' here.
86 Module will be called megaraid_sas
87
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
index 6dd99f275722..f469915b97c3 100644
--- a/drivers/scsi/megaraid/Makefile
+++ b/drivers/scsi/megaraid/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o 1obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
2obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o 2obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
3obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
new file mode 100644
index 000000000000..c3f637395734
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -0,0 +1,2806 @@
1/*
2 *
3 * Linux MegaRAID driver for SAS based RAID controllers
4 *
5 * Copyright (c) 2003-2005 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_sas.c
13 * Version : v00.00.02.00-rc4
14 *
15 * Authors:
16 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
17 * Sumant Patro <Sumant.Patro@lsil.com>
18 *
19 * List of supported controllers
20 *
21 * OEM Product Name VID DID SSVID SSID
22 * --- ------------ --- --- ---- ----
23 */
24
25#include <linux/kernel.h>
26#include <linux/types.h>
27#include <linux/pci.h>
28#include <linux/list.h>
29#include <linux/version.h>
30#include <linux/moduleparam.h>
31#include <linux/module.h>
32#include <linux/spinlock.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35#include <linux/uio.h>
36#include <asm/uaccess.h>
37#include <linux/fs.h>
38#include <linux/compat.h>
39
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_host.h>
44#include "megaraid_sas.h"
45
46MODULE_LICENSE("GPL");
47MODULE_VERSION(MEGASAS_VERSION);
48MODULE_AUTHOR("sreenivas.bagalkote@lsil.com");
49MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver");
50
51/*
52 * PCI ID table for all supported controllers
53 */
54static struct pci_device_id megasas_pci_table[] = {
55
56 {
57 PCI_VENDOR_ID_LSI_LOGIC,
58 PCI_DEVICE_ID_LSI_SAS1064R,
59 PCI_ANY_ID,
60 PCI_ANY_ID,
61 },
62 {
63 PCI_VENDOR_ID_DELL,
64 PCI_DEVICE_ID_DELL_PERC5,
65 PCI_ANY_ID,
66 PCI_ANY_ID,
67 },
68 {0} /* Terminating entry */
69};
70
71MODULE_DEVICE_TABLE(pci, megasas_pci_table);
72
73static int megasas_mgmt_majorno;
74static struct megasas_mgmt_info megasas_mgmt_info;
75static struct fasync_struct *megasas_async_queue;
76static DECLARE_MUTEX(megasas_async_queue_mutex);
77
78/**
79 * megasas_get_cmd - Get a command from the free pool
80 * @instance: Adapter soft state
81 *
82 * Returns a free command from the pool
83 */
84static inline struct megasas_cmd *megasas_get_cmd(struct megasas_instance
85 *instance)
86{
87 unsigned long flags;
88 struct megasas_cmd *cmd = NULL;
89
90 spin_lock_irqsave(&instance->cmd_pool_lock, flags);
91
92 if (!list_empty(&instance->cmd_pool)) {
93 cmd = list_entry((&instance->cmd_pool)->next,
94 struct megasas_cmd, list);
95 list_del_init(&cmd->list);
96 } else {
97 printk(KERN_ERR "megasas: Command pool empty!\n");
98 }
99
100 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
101 return cmd;
102}
103
104/**
105 * megasas_return_cmd - Return a cmd to free command pool
106 * @instance: Adapter soft state
107 * @cmd: Command packet to be returned to free command pool
108 */
109static inline void
110megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
111{
112 unsigned long flags;
113
114 spin_lock_irqsave(&instance->cmd_pool_lock, flags);
115
116 cmd->scmd = NULL;
117 list_add_tail(&cmd->list, &instance->cmd_pool);
118
119 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
120}
121
122/**
123 * megasas_enable_intr - Enables interrupts
124 * @regs: MFI register set
125 */
126static inline void
127megasas_enable_intr(struct megasas_register_set __iomem * regs)
128{
129 writel(1, &(regs)->outbound_intr_mask);
130
131 /* Dummy readl to force pci flush */
132 readl(&regs->outbound_intr_mask);
133}
134
135/**
136 * megasas_disable_intr - Disables interrupts
137 * @regs: MFI register set
138 */
139static inline void
140megasas_disable_intr(struct megasas_register_set __iomem * regs)
141{
142 u32 mask = readl(&regs->outbound_intr_mask) & (~0x00000001);
143 writel(mask, &regs->outbound_intr_mask);
144
145 /* Dummy readl to force pci flush */
146 readl(&regs->outbound_intr_mask);
147}
148
149/**
150 * megasas_issue_polled - Issues a polling command
151 * @instance: Adapter soft state
152 * @cmd: Command packet to be issued
153 *
154 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
155 */
156static int
157megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
158{
159 int i;
160 u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000;
161
162 struct megasas_header *frame_hdr = &cmd->frame->hdr;
163
164 frame_hdr->cmd_status = 0xFF;
165 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
166
167 /*
168 * Issue the frame using inbound queue port
169 */
170 writel(cmd->frame_phys_addr >> 3,
171 &instance->reg_set->inbound_queue_port);
172
173 /*
174 * Wait for cmd_status to change
175 */
176 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) {
177 rmb();
178 msleep(1);
179 }
180
181 if (frame_hdr->cmd_status == 0xff)
182 return -ETIME;
183
184 return 0;
185}
186
187/**
188 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
189 * @instance: Adapter soft state
190 * @cmd: Command to be issued
191 *
192 * This function waits on an event for the command to be returned from ISR.
193 * Used to issue ioctl commands.
194 */
195static int
196megasas_issue_blocked_cmd(struct megasas_instance *instance,
197 struct megasas_cmd *cmd)
198{
199 cmd->cmd_status = ENODATA;
200
201 writel(cmd->frame_phys_addr >> 3,
202 &instance->reg_set->inbound_queue_port);
203
204 wait_event(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA));
205
206 return 0;
207}
208
209/**
210 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
211 * @instance: Adapter soft state
212 * @cmd_to_abort: Previously issued cmd to be aborted
213 *
214 * MFI firmware can abort previously issued AEN comamnd (automatic event
215 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
216 * cmd and blocks till it is completed.
217 */
218static int
219megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
220 struct megasas_cmd *cmd_to_abort)
221{
222 struct megasas_cmd *cmd;
223 struct megasas_abort_frame *abort_fr;
224
225 cmd = megasas_get_cmd(instance);
226
227 if (!cmd)
228 return -1;
229
230 abort_fr = &cmd->frame->abort;
231
232 /*
233 * Prepare and issue the abort frame
234 */
235 abort_fr->cmd = MFI_CMD_ABORT;
236 abort_fr->cmd_status = 0xFF;
237 abort_fr->flags = 0;
238 abort_fr->abort_context = cmd_to_abort->index;
239 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
240 abort_fr->abort_mfi_phys_addr_hi = 0;
241
242 cmd->sync_cmd = 1;
243 cmd->cmd_status = 0xFF;
244
245 writel(cmd->frame_phys_addr >> 3,
246 &instance->reg_set->inbound_queue_port);
247
248 /*
249 * Wait for this cmd to complete
250 */
251 wait_event(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF));
252
253 megasas_return_cmd(instance, cmd);
254 return 0;
255}
256
257/**
258 * megasas_make_sgl32 - Prepares 32-bit SGL
259 * @instance: Adapter soft state
260 * @scp: SCSI command from the mid-layer
261 * @mfi_sgl: SGL to be filled in
262 *
263 * If successful, this function returns the number of SG elements. Otherwise,
264 * it returnes -1.
265 */
266static inline int
267megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
268 union megasas_sgl *mfi_sgl)
269{
270 int i;
271 int sge_count;
272 struct scatterlist *os_sgl;
273
274 /*
275 * Return 0 if there is no data transfer
276 */
277 if (!scp->request_buffer || !scp->request_bufflen)
278 return 0;
279
280 if (!scp->use_sg) {
281 mfi_sgl->sge32[0].phys_addr = pci_map_single(instance->pdev,
282 scp->
283 request_buffer,
284 scp->
285 request_bufflen,
286 scp->
287 sc_data_direction);
288 mfi_sgl->sge32[0].length = scp->request_bufflen;
289
290 return 1;
291 }
292
293 os_sgl = (struct scatterlist *)scp->request_buffer;
294 sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
295 scp->sc_data_direction);
296
297 for (i = 0; i < sge_count; i++, os_sgl++) {
298 mfi_sgl->sge32[i].length = sg_dma_len(os_sgl);
299 mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl);
300 }
301
302 return sge_count;
303}
304
305/**
306 * megasas_make_sgl64 - Prepares 64-bit SGL
307 * @instance: Adapter soft state
308 * @scp: SCSI command from the mid-layer
309 * @mfi_sgl: SGL to be filled in
310 *
311 * If successful, this function returns the number of SG elements. Otherwise,
312 * it returnes -1.
313 */
314static inline int
315megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
316 union megasas_sgl *mfi_sgl)
317{
318 int i;
319 int sge_count;
320 struct scatterlist *os_sgl;
321
322 /*
323 * Return 0 if there is no data transfer
324 */
325 if (!scp->request_buffer || !scp->request_bufflen)
326 return 0;
327
328 if (!scp->use_sg) {
329 mfi_sgl->sge64[0].phys_addr = pci_map_single(instance->pdev,
330 scp->
331 request_buffer,
332 scp->
333 request_bufflen,
334 scp->
335 sc_data_direction);
336
337 mfi_sgl->sge64[0].length = scp->request_bufflen;
338
339 return 1;
340 }
341
342 os_sgl = (struct scatterlist *)scp->request_buffer;
343 sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg,
344 scp->sc_data_direction);
345
346 for (i = 0; i < sge_count; i++, os_sgl++) {
347 mfi_sgl->sge64[i].length = sg_dma_len(os_sgl);
348 mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl);
349 }
350
351 return sge_count;
352}
353
354/**
355 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
356 * @instance: Adapter soft state
357 * @scp: SCSI command
358 * @cmd: Command to be prepared in
359 *
360 * This function prepares CDB commands. These are typcially pass-through
361 * commands to the devices.
362 */
363static inline int
364megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
365 struct megasas_cmd *cmd)
366{
367 u32 sge_sz;
368 int sge_bytes;
369 u32 is_logical;
370 u32 device_id;
371 u16 flags = 0;
372 struct megasas_pthru_frame *pthru;
373
374 is_logical = MEGASAS_IS_LOGICAL(scp);
375 device_id = MEGASAS_DEV_INDEX(instance, scp);
376 pthru = (struct megasas_pthru_frame *)cmd->frame;
377
378 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
379 flags = MFI_FRAME_DIR_WRITE;
380 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
381 flags = MFI_FRAME_DIR_READ;
382 else if (scp->sc_data_direction == PCI_DMA_NONE)
383 flags = MFI_FRAME_DIR_NONE;
384
385 /*
386 * Prepare the DCDB frame
387 */
388 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
389 pthru->cmd_status = 0x0;
390 pthru->scsi_status = 0x0;
391 pthru->target_id = device_id;
392 pthru->lun = scp->device->lun;
393 pthru->cdb_len = scp->cmd_len;
394 pthru->timeout = 0;
395 pthru->flags = flags;
396 pthru->data_xfer_len = scp->request_bufflen;
397
398 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
399
400 /*
401 * Construct SGL
402 */
403 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
404 sizeof(struct megasas_sge32);
405
406 if (IS_DMA64) {
407 pthru->flags |= MFI_FRAME_SGL64;
408 pthru->sge_count = megasas_make_sgl64(instance, scp,
409 &pthru->sgl);
410 } else
411 pthru->sge_count = megasas_make_sgl32(instance, scp,
412 &pthru->sgl);
413
414 /*
415 * Sense info specific
416 */
417 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
418 pthru->sense_buf_phys_addr_hi = 0;
419 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
420
421 sge_bytes = sge_sz * pthru->sge_count;
422
423 /*
424 * Compute the total number of frames this command consumes. FW uses
425 * this number to pull sufficient number of frames from host memory.
426 */
427 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
428 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
429
430 if (cmd->frame_count > 7)
431 cmd->frame_count = 8;
432
433 return cmd->frame_count;
434}
435
436/**
437 * megasas_build_ldio - Prepares IOs to logical devices
438 * @instance: Adapter soft state
439 * @scp: SCSI command
440 * @cmd: Command to to be prepared
441 *
442 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
443 */
444static inline int
445megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
446 struct megasas_cmd *cmd)
447{
448 u32 sge_sz;
449 int sge_bytes;
450 u32 device_id;
451 u8 sc = scp->cmnd[0];
452 u16 flags = 0;
453 struct megasas_io_frame *ldio;
454
455 device_id = MEGASAS_DEV_INDEX(instance, scp);
456 ldio = (struct megasas_io_frame *)cmd->frame;
457
458 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
459 flags = MFI_FRAME_DIR_WRITE;
460 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
461 flags = MFI_FRAME_DIR_READ;
462
463 /*
464 * Preare the Logical IO frame: 2nd bit is zero for all read cmds
465 */
466 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
467 ldio->cmd_status = 0x0;
468 ldio->scsi_status = 0x0;
469 ldio->target_id = device_id;
470 ldio->timeout = 0;
471 ldio->reserved_0 = 0;
472 ldio->pad_0 = 0;
473 ldio->flags = flags;
474 ldio->start_lba_hi = 0;
475 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
476
477 /*
478 * 6-byte READ(0x08) or WRITE(0x0A) cdb
479 */
480 if (scp->cmd_len == 6) {
481 ldio->lba_count = (u32) scp->cmnd[4];
482 ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) |
483 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
484
485 ldio->start_lba_lo &= 0x1FFFFF;
486 }
487
488 /*
489 * 10-byte READ(0x28) or WRITE(0x2A) cdb
490 */
491 else if (scp->cmd_len == 10) {
492 ldio->lba_count = (u32) scp->cmnd[8] |
493 ((u32) scp->cmnd[7] << 8);
494 ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
495 ((u32) scp->cmnd[3] << 16) |
496 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
497 }
498
499 /*
500 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
501 */
502 else if (scp->cmd_len == 12) {
503 ldio->lba_count = ((u32) scp->cmnd[6] << 24) |
504 ((u32) scp->cmnd[7] << 16) |
505 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
506
507 ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) |
508 ((u32) scp->cmnd[3] << 16) |
509 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
510 }
511
512 /*
513 * 16-byte READ(0x88) or WRITE(0x8A) cdb
514 */
515 else if (scp->cmd_len == 16) {
516 ldio->lba_count = ((u32) scp->cmnd[10] << 24) |
517 ((u32) scp->cmnd[11] << 16) |
518 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
519
520 ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) |
521 ((u32) scp->cmnd[7] << 16) |
522 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
523
524 ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) |
525 ((u32) scp->cmnd[3] << 16) |
526 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
527
528 }
529
530 /*
531 * Construct SGL
532 */
533 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
534 sizeof(struct megasas_sge32);
535
536 if (IS_DMA64) {
537 ldio->flags |= MFI_FRAME_SGL64;
538 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
539 } else
540 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
541
542 /*
543 * Sense info specific
544 */
545 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
546 ldio->sense_buf_phys_addr_hi = 0;
547 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
548
549 sge_bytes = sge_sz * ldio->sge_count;
550
551 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
552 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
553
554 if (cmd->frame_count > 7)
555 cmd->frame_count = 8;
556
557 return cmd->frame_count;
558}
559
560/**
561 * megasas_build_cmd - Prepares a command packet
562 * @instance: Adapter soft state
563 * @scp: SCSI command
564 * @frame_count: [OUT] Number of frames used to prepare this command
565 */
566static inline struct megasas_cmd *megasas_build_cmd(struct megasas_instance
567 *instance,
568 struct scsi_cmnd *scp,
569 int *frame_count)
570{
571 u32 logical_cmd;
572 struct megasas_cmd *cmd;
573
574 /*
575 * Find out if this is logical or physical drive command.
576 */
577 logical_cmd = MEGASAS_IS_LOGICAL(scp);
578
579 /*
580 * Logical drive command
581 */
582 if (logical_cmd) {
583
584 if (scp->device->id >= MEGASAS_MAX_LD) {
585 scp->result = DID_BAD_TARGET << 16;
586 return NULL;
587 }
588
589 switch (scp->cmnd[0]) {
590
591 case READ_10:
592 case WRITE_10:
593 case READ_12:
594 case WRITE_12:
595 case READ_6:
596 case WRITE_6:
597 case READ_16:
598 case WRITE_16:
599 /*
600 * Fail for LUN > 0
601 */
602 if (scp->device->lun) {
603 scp->result = DID_BAD_TARGET << 16;
604 return NULL;
605 }
606
607 cmd = megasas_get_cmd(instance);
608
609 if (!cmd) {
610 scp->result = DID_IMM_RETRY << 16;
611 return NULL;
612 }
613
614 *frame_count = megasas_build_ldio(instance, scp, cmd);
615
616 if (!(*frame_count)) {
617 megasas_return_cmd(instance, cmd);
618 return NULL;
619 }
620
621 return cmd;
622
623 default:
624 /*
625 * Fail for LUN > 0
626 */
627 if (scp->device->lun) {
628 scp->result = DID_BAD_TARGET << 16;
629 return NULL;
630 }
631
632 cmd = megasas_get_cmd(instance);
633
634 if (!cmd) {
635 scp->result = DID_IMM_RETRY << 16;
636 return NULL;
637 }
638
639 *frame_count = megasas_build_dcdb(instance, scp, cmd);
640
641 if (!(*frame_count)) {
642 megasas_return_cmd(instance, cmd);
643 return NULL;
644 }
645
646 return cmd;
647 }
648 } else {
649 cmd = megasas_get_cmd(instance);
650
651 if (!cmd) {
652 scp->result = DID_IMM_RETRY << 16;
653 return NULL;
654 }
655
656 *frame_count = megasas_build_dcdb(instance, scp, cmd);
657
658 if (!(*frame_count)) {
659 megasas_return_cmd(instance, cmd);
660 return NULL;
661 }
662
663 return cmd;
664 }
665
666 return NULL;
667}
668
669/**
670 * megasas_queue_command - Queue entry point
671 * @scmd: SCSI command to be queued
672 * @done: Callback entry point
673 */
674static int
675megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
676{
677 u32 frame_count;
678 unsigned long flags;
679 struct megasas_cmd *cmd;
680 struct megasas_instance *instance;
681
682 instance = (struct megasas_instance *)
683 scmd->device->host->hostdata;
684 scmd->scsi_done = done;
685 scmd->result = 0;
686
687 cmd = megasas_build_cmd(instance, scmd, &frame_count);
688
689 if (!cmd) {
690 done(scmd);
691 return 0;
692 }
693
694 cmd->scmd = scmd;
695 scmd->SCp.ptr = (char *)cmd;
696 scmd->SCp.sent_command = jiffies;
697
698 /*
699 * Issue the command to the FW
700 */
701 spin_lock_irqsave(&instance->instance_lock, flags);
702 instance->fw_outstanding++;
703 spin_unlock_irqrestore(&instance->instance_lock, flags);
704
705 writel(((cmd->frame_phys_addr >> 3) | (cmd->frame_count - 1)),
706 &instance->reg_set->inbound_queue_port);
707
708 return 0;
709}
710
711/**
712 * megasas_wait_for_outstanding - Wait for all outstanding cmds
713 * @instance: Adapter soft state
714 *
715 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
716 * complete all its outstanding commands. Returns error if one or more IOs
717 * are pending after this time period. It also marks the controller dead.
718 */
719static int megasas_wait_for_outstanding(struct megasas_instance *instance)
720{
721 int i;
722 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
723
724 for (i = 0; i < wait_time; i++) {
725
726 if (!instance->fw_outstanding)
727 break;
728
729 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
730 printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
731 "commands to complete\n", i,
732 instance->fw_outstanding);
733 }
734
735 msleep(1000);
736 }
737
738 if (instance->fw_outstanding) {
739 instance->hw_crit_error = 1;
740 return FAILED;
741 }
742
743 return SUCCESS;
744}
745
746/**
747 * megasas_generic_reset - Generic reset routine
748 * @scmd: Mid-layer SCSI command
749 *
750 * This routine implements a generic reset handler for device, bus and host
751 * reset requests. Device, bus and host specific reset handlers can use this
752 * function after they do their specific tasks.
753 */
754static int megasas_generic_reset(struct scsi_cmnd *scmd)
755{
756 int ret_val;
757 struct megasas_instance *instance;
758
759 instance = (struct megasas_instance *)scmd->device->host->hostdata;
760
761 printk(KERN_NOTICE "megasas: RESET -%ld cmd=%x <c=%d t=%d l=%d>\n",
762 scmd->serial_number, scmd->cmnd[0], scmd->device->channel,
763 scmd->device->id, scmd->device->lun);
764
765 if (instance->hw_crit_error) {
766 printk(KERN_ERR "megasas: cannot recover from previous reset "
767 "failures\n");
768 return FAILED;
769 }
770
771 spin_unlock(scmd->device->host->host_lock);
772
773 ret_val = megasas_wait_for_outstanding(instance);
774
775 if (ret_val == SUCCESS)
776 printk(KERN_NOTICE "megasas: reset successful \n");
777 else
778 printk(KERN_ERR "megasas: failed to do reset\n");
779
780 spin_lock(scmd->device->host->host_lock);
781
782 return ret_val;
783}
784
785static enum scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
786{
787 unsigned long seconds;
788
789 if (scmd->SCp.ptr) {
790 seconds = (jiffies - scmd->SCp.sent_command) / HZ;
791
792 if (seconds < 90) {
793 return EH_RESET_TIMER;
794 } else {
795 return EH_NOT_HANDLED;
796 }
797 }
798
799 return EH_HANDLED;
800}
801
802/**
803 * megasas_reset_device - Device reset handler entry point
804 */
805static int megasas_reset_device(struct scsi_cmnd *scmd)
806{
807 int ret;
808
809 /*
810 * First wait for all commands to complete
811 */
812 ret = megasas_generic_reset(scmd);
813
814 return ret;
815}
816
817/**
818 * megasas_reset_bus_host - Bus & host reset handler entry point
819 */
820static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
821{
822 int ret;
823
824 /*
825 * Frist wait for all commands to complete
826 */
827 ret = megasas_generic_reset(scmd);
828
829 return ret;
830}
831
832/**
833 * megasas_service_aen - Processes an event notification
834 * @instance: Adapter soft state
835 * @cmd: AEN command completed by the ISR
836 *
837 * For AEN, driver sends a command down to FW that is held by the FW till an
838 * event occurs. When an event of interest occurs, FW completes the command
839 * that it was previously holding.
840 *
841 * This routines sends SIGIO signal to processes that have registered with the
842 * driver for AEN.
843 */
844static void
845megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
846{
847 /*
848 * Don't signal app if it is just an aborted previously registered aen
849 */
850 if (!cmd->abort_aen)
851 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
852 else
853 cmd->abort_aen = 0;
854
855 instance->aen_cmd = NULL;
856 megasas_return_cmd(instance, cmd);
857}
858
859/*
860 * Scsi host template for megaraid_sas driver
861 */
862static struct scsi_host_template megasas_template = {
863
864 .module = THIS_MODULE,
865 .name = "LSI Logic SAS based MegaRAID driver",
866 .proc_name = "megaraid_sas",
867 .queuecommand = megasas_queue_command,
868 .eh_device_reset_handler = megasas_reset_device,
869 .eh_bus_reset_handler = megasas_reset_bus_host,
870 .eh_host_reset_handler = megasas_reset_bus_host,
871 .eh_timed_out = megasas_reset_timer,
872 .use_clustering = ENABLE_CLUSTERING,
873};
874
875/**
876 * megasas_complete_int_cmd - Completes an internal command
877 * @instance: Adapter soft state
878 * @cmd: Command to be completed
879 *
880 * The megasas_issue_blocked_cmd() function waits for a command to complete
881 * after it issues a command. This function wakes up that waiting routine by
882 * calling wake_up() on the wait queue.
883 */
884static void
885megasas_complete_int_cmd(struct megasas_instance *instance,
886 struct megasas_cmd *cmd)
887{
888 cmd->cmd_status = cmd->frame->io.cmd_status;
889
890 if (cmd->cmd_status == ENODATA) {
891 cmd->cmd_status = 0;
892 }
893 wake_up(&instance->int_cmd_wait_q);
894}
895
896/**
897 * megasas_complete_abort - Completes aborting a command
898 * @instance: Adapter soft state
899 * @cmd: Cmd that was issued to abort another cmd
900 *
901 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
902 * after it issues an abort on a previously issued command. This function
903 * wakes up all functions waiting on the same wait queue.
904 */
905static void
906megasas_complete_abort(struct megasas_instance *instance,
907 struct megasas_cmd *cmd)
908{
909 if (cmd->sync_cmd) {
910 cmd->sync_cmd = 0;
911 cmd->cmd_status = 0;
912 wake_up(&instance->abort_cmd_wait_q);
913 }
914
915 return;
916}
917
918/**
919 * megasas_unmap_sgbuf - Unmap SG buffers
920 * @instance: Adapter soft state
921 * @cmd: Completed command
922 */
923static inline void
924megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd)
925{
926 dma_addr_t buf_h;
927 u8 opcode;
928
929 if (cmd->scmd->use_sg) {
930 pci_unmap_sg(instance->pdev, cmd->scmd->request_buffer,
931 cmd->scmd->use_sg, cmd->scmd->sc_data_direction);
932 return;
933 }
934
935 if (!cmd->scmd->request_bufflen)
936 return;
937
938 opcode = cmd->frame->hdr.cmd;
939
940 if ((opcode == MFI_CMD_LD_READ) || (opcode == MFI_CMD_LD_WRITE)) {
941 if (IS_DMA64)
942 buf_h = cmd->frame->io.sgl.sge64[0].phys_addr;
943 else
944 buf_h = cmd->frame->io.sgl.sge32[0].phys_addr;
945 } else {
946 if (IS_DMA64)
947 buf_h = cmd->frame->pthru.sgl.sge64[0].phys_addr;
948 else
949 buf_h = cmd->frame->pthru.sgl.sge32[0].phys_addr;
950 }
951
952 pci_unmap_single(instance->pdev, buf_h, cmd->scmd->request_bufflen,
953 cmd->scmd->sc_data_direction);
954 return;
955}
956
957/**
958 * megasas_complete_cmd - Completes a command
959 * @instance: Adapter soft state
960 * @cmd: Command to be completed
961 * @alt_status: If non-zero, use this value as status to
962 * SCSI mid-layer instead of the value returned
963 * by the FW. This should be used if caller wants
964 * an alternate status (as in the case of aborted
965 * commands)
966 */
967static inline void
968megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
969 u8 alt_status)
970{
971 int exception = 0;
972 struct megasas_header *hdr = &cmd->frame->hdr;
973 unsigned long flags;
974
975 if (cmd->scmd) {
976 cmd->scmd->SCp.ptr = (char *)0;
977 }
978
979 switch (hdr->cmd) {
980
981 case MFI_CMD_PD_SCSI_IO:
982 case MFI_CMD_LD_SCSI_IO:
983
984 /*
985 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
986 * issued either through an IO path or an IOCTL path. If it
987 * was via IOCTL, we will send it to internal completion.
988 */
989 if (cmd->sync_cmd) {
990 cmd->sync_cmd = 0;
991 megasas_complete_int_cmd(instance, cmd);
992 break;
993 }
994
995 /*
996 * Don't export physical disk devices to mid-layer.
997 */
998 if (!MEGASAS_IS_LOGICAL(cmd->scmd) &&
999 (hdr->cmd_status == MFI_STAT_OK) &&
1000 (cmd->scmd->cmnd[0] == INQUIRY)) {
1001
1002 if (((*(u8 *) cmd->scmd->request_buffer) & 0x1F) ==
1003 TYPE_DISK) {
1004 cmd->scmd->result = DID_BAD_TARGET << 16;
1005 exception = 1;
1006 }
1007 }
1008
1009 case MFI_CMD_LD_READ:
1010 case MFI_CMD_LD_WRITE:
1011
1012 if (alt_status) {
1013 cmd->scmd->result = alt_status << 16;
1014 exception = 1;
1015 }
1016
1017 if (exception) {
1018
1019 spin_lock_irqsave(&instance->instance_lock, flags);
1020 instance->fw_outstanding--;
1021 spin_unlock_irqrestore(&instance->instance_lock, flags);
1022
1023 megasas_unmap_sgbuf(instance, cmd);
1024 cmd->scmd->scsi_done(cmd->scmd);
1025 megasas_return_cmd(instance, cmd);
1026
1027 break;
1028 }
1029
1030 switch (hdr->cmd_status) {
1031
1032 case MFI_STAT_OK:
1033 cmd->scmd->result = DID_OK << 16;
1034 break;
1035
1036 case MFI_STAT_SCSI_IO_FAILED:
1037 case MFI_STAT_LD_INIT_IN_PROGRESS:
1038 cmd->scmd->result =
1039 (DID_ERROR << 16) | hdr->scsi_status;
1040 break;
1041
1042 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1043
1044 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
1045
1046 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
1047 memset(cmd->scmd->sense_buffer, 0,
1048 SCSI_SENSE_BUFFERSIZE);
1049 memcpy(cmd->scmd->sense_buffer, cmd->sense,
1050 hdr->sense_len);
1051
1052 cmd->scmd->result |= DRIVER_SENSE << 24;
1053 }
1054
1055 break;
1056
1057 case MFI_STAT_LD_OFFLINE:
1058 case MFI_STAT_DEVICE_NOT_FOUND:
1059 cmd->scmd->result = DID_BAD_TARGET << 16;
1060 break;
1061
1062 default:
1063 printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
1064 hdr->cmd_status);
1065 cmd->scmd->result = DID_ERROR << 16;
1066 break;
1067 }
1068
1069 spin_lock_irqsave(&instance->instance_lock, flags);
1070 instance->fw_outstanding--;
1071 spin_unlock_irqrestore(&instance->instance_lock, flags);
1072
1073 megasas_unmap_sgbuf(instance, cmd);
1074 cmd->scmd->scsi_done(cmd->scmd);
1075 megasas_return_cmd(instance, cmd);
1076
1077 break;
1078
1079 case MFI_CMD_SMP:
1080 case MFI_CMD_STP:
1081 case MFI_CMD_DCMD:
1082
1083 /*
1084 * See if got an event notification
1085 */
1086 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
1087 megasas_service_aen(instance, cmd);
1088 else
1089 megasas_complete_int_cmd(instance, cmd);
1090
1091 break;
1092
1093 case MFI_CMD_ABORT:
1094 /*
1095 * Cmd issued to abort another cmd returned
1096 */
1097 megasas_complete_abort(instance, cmd);
1098 break;
1099
1100 default:
1101 printk("megasas: Unknown command completed! [0x%X]\n",
1102 hdr->cmd);
1103 break;
1104 }
1105}
1106
1107/**
1108 * megasas_deplete_reply_queue - Processes all completed commands
1109 * @instance: Adapter soft state
1110 * @alt_status: Alternate status to be returned to
1111 * SCSI mid-layer instead of the status
1112 * returned by the FW
1113 */
1114static inline int
1115megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status)
1116{
1117 u32 status;
1118 u32 producer;
1119 u32 consumer;
1120 u32 context;
1121 struct megasas_cmd *cmd;
1122
1123 /*
1124 * Check if it is our interrupt
1125 */
1126 status = readl(&instance->reg_set->outbound_intr_status);
1127
1128 if (!(status & MFI_OB_INTR_STATUS_MASK)) {
1129 return IRQ_NONE;
1130 }
1131
1132 /*
1133 * Clear the interrupt by writing back the same value
1134 */
1135 writel(status, &instance->reg_set->outbound_intr_status);
1136
1137 producer = *instance->producer;
1138 consumer = *instance->consumer;
1139
1140 while (consumer != producer) {
1141 context = instance->reply_queue[consumer];
1142
1143 cmd = instance->cmd_list[context];
1144
1145 megasas_complete_cmd(instance, cmd, alt_status);
1146
1147 consumer++;
1148 if (consumer == (instance->max_fw_cmds + 1)) {
1149 consumer = 0;
1150 }
1151 }
1152
1153 *instance->consumer = producer;
1154
1155 return IRQ_HANDLED;
1156}
1157
1158/**
1159 * megasas_isr - isr entry point
1160 */
1161static irqreturn_t megasas_isr(int irq, void *devp, struct pt_regs *regs)
1162{
1163 return megasas_deplete_reply_queue((struct megasas_instance *)devp,
1164 DID_OK);
1165}
1166
1167/**
1168 * megasas_transition_to_ready - Move the FW to READY state
1169 * @reg_set: MFI register set
1170 *
1171 * During the initialization, FW passes can potentially be in any one of
1172 * several possible states. If the FW in operational, waiting-for-handshake
1173 * states, driver must take steps to bring it to ready state. Otherwise, it
1174 * has to wait for the ready state.
1175 */
1176static int
1177megasas_transition_to_ready(struct megasas_register_set __iomem * reg_set)
1178{
1179 int i;
1180 u8 max_wait;
1181 u32 fw_state;
1182 u32 cur_state;
1183
1184 fw_state = readl(&reg_set->outbound_msg_0) & MFI_STATE_MASK;
1185
1186 while (fw_state != MFI_STATE_READY) {
1187
1188 printk(KERN_INFO "megasas: Waiting for FW to come to ready"
1189 " state\n");
1190 switch (fw_state) {
1191
1192 case MFI_STATE_FAULT:
1193
1194 printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
1195 return -ENODEV;
1196
1197 case MFI_STATE_WAIT_HANDSHAKE:
1198 /*
1199 * Set the CLR bit in inbound doorbell
1200 */
1201 writel(MFI_INIT_CLEAR_HANDSHAKE,
1202 &reg_set->inbound_doorbell);
1203
1204 max_wait = 2;
1205 cur_state = MFI_STATE_WAIT_HANDSHAKE;
1206 break;
1207
1208 case MFI_STATE_OPERATIONAL:
1209 /*
1210 * Bring it to READY state; assuming max wait 2 secs
1211 */
1212 megasas_disable_intr(reg_set);
1213 writel(MFI_INIT_READY, &reg_set->inbound_doorbell);
1214
1215 max_wait = 10;
1216 cur_state = MFI_STATE_OPERATIONAL;
1217 break;
1218
1219 case MFI_STATE_UNDEFINED:
1220 /*
1221 * This state should not last for more than 2 seconds
1222 */
1223 max_wait = 2;
1224 cur_state = MFI_STATE_UNDEFINED;
1225 break;
1226
1227 case MFI_STATE_BB_INIT:
1228 max_wait = 2;
1229 cur_state = MFI_STATE_BB_INIT;
1230 break;
1231
1232 case MFI_STATE_FW_INIT:
1233 max_wait = 20;
1234 cur_state = MFI_STATE_FW_INIT;
1235 break;
1236
1237 case MFI_STATE_FW_INIT_2:
1238 max_wait = 20;
1239 cur_state = MFI_STATE_FW_INIT_2;
1240 break;
1241
1242 case MFI_STATE_DEVICE_SCAN:
1243 max_wait = 20;
1244 cur_state = MFI_STATE_DEVICE_SCAN;
1245 break;
1246
1247 case MFI_STATE_FLUSH_CACHE:
1248 max_wait = 20;
1249 cur_state = MFI_STATE_FLUSH_CACHE;
1250 break;
1251
1252 default:
1253 printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
1254 fw_state);
1255 return -ENODEV;
1256 }
1257
1258 /*
1259 * The cur_state should not last for more than max_wait secs
1260 */
1261 for (i = 0; i < (max_wait * 1000); i++) {
1262 fw_state = MFI_STATE_MASK &
1263 readl(&reg_set->outbound_msg_0);
1264
1265 if (fw_state == cur_state) {
1266 msleep(1);
1267 } else
1268 break;
1269 }
1270
1271 /*
1272 * Return error if fw_state hasn't changed after max_wait
1273 */
1274 if (fw_state == cur_state) {
1275 printk(KERN_DEBUG "FW state [%d] hasn't changed "
1276 "in %d secs\n", fw_state, max_wait);
1277 return -ENODEV;
1278 }
1279 };
1280
1281 return 0;
1282}
1283
1284/**
1285 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
1286 * @instance: Adapter soft state
1287 */
1288static void megasas_teardown_frame_pool(struct megasas_instance *instance)
1289{
1290 int i;
1291 u32 max_cmd = instance->max_fw_cmds;
1292 struct megasas_cmd *cmd;
1293
1294 if (!instance->frame_dma_pool)
1295 return;
1296
1297 /*
1298 * Return all frames to pool
1299 */
1300 for (i = 0; i < max_cmd; i++) {
1301
1302 cmd = instance->cmd_list[i];
1303
1304 if (cmd->frame)
1305 pci_pool_free(instance->frame_dma_pool, cmd->frame,
1306 cmd->frame_phys_addr);
1307
1308 if (cmd->sense)
1309 pci_pool_free(instance->sense_dma_pool, cmd->frame,
1310 cmd->sense_phys_addr);
1311 }
1312
1313 /*
1314 * Now destroy the pool itself
1315 */
1316 pci_pool_destroy(instance->frame_dma_pool);
1317 pci_pool_destroy(instance->sense_dma_pool);
1318
1319 instance->frame_dma_pool = NULL;
1320 instance->sense_dma_pool = NULL;
1321}
1322
1323/**
1324 * megasas_create_frame_pool - Creates DMA pool for cmd frames
1325 * @instance: Adapter soft state
1326 *
1327 * Each command packet has an embedded DMA memory buffer that is used for
1328 * filling MFI frame and the SG list that immediately follows the frame. This
1329 * function creates those DMA memory buffers for each command packet by using
1330 * PCI pool facility.
1331 */
1332static int megasas_create_frame_pool(struct megasas_instance *instance)
1333{
1334 int i;
1335 u32 max_cmd;
1336 u32 sge_sz;
1337 u32 sgl_sz;
1338 u32 total_sz;
1339 u32 frame_count;
1340 struct megasas_cmd *cmd;
1341
1342 max_cmd = instance->max_fw_cmds;
1343
1344 /*
1345 * Size of our frame is 64 bytes for MFI frame, followed by max SG
1346 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
1347 */
1348 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1349 sizeof(struct megasas_sge32);
1350
1351 /*
1352 * Calculated the number of 64byte frames required for SGL
1353 */
1354 sgl_sz = sge_sz * instance->max_num_sge;
1355 frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE;
1356
1357 /*
1358 * We need one extra frame for the MFI command
1359 */
1360 frame_count++;
1361
1362 total_sz = MEGAMFI_FRAME_SIZE * frame_count;
1363 /*
1364 * Use DMA pool facility provided by PCI layer
1365 */
1366 instance->frame_dma_pool = pci_pool_create("megasas frame pool",
1367 instance->pdev, total_sz, 64,
1368 0);
1369
1370 if (!instance->frame_dma_pool) {
1371 printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
1372 return -ENOMEM;
1373 }
1374
1375 instance->sense_dma_pool = pci_pool_create("megasas sense pool",
1376 instance->pdev, 128, 4, 0);
1377
1378 if (!instance->sense_dma_pool) {
1379 printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
1380
1381 pci_pool_destroy(instance->frame_dma_pool);
1382 instance->frame_dma_pool = NULL;
1383
1384 return -ENOMEM;
1385 }
1386
1387 /*
1388 * Allocate and attach a frame to each of the commands in cmd_list.
1389 * By making cmd->index as the context instead of the &cmd, we can
1390 * always use 32bit context regardless of the architecture
1391 */
1392 for (i = 0; i < max_cmd; i++) {
1393
1394 cmd = instance->cmd_list[i];
1395
1396 cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
1397 GFP_KERNEL, &cmd->frame_phys_addr);
1398
1399 cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
1400 GFP_KERNEL, &cmd->sense_phys_addr);
1401
1402 /*
1403 * megasas_teardown_frame_pool() takes care of freeing
1404 * whatever has been allocated
1405 */
1406 if (!cmd->frame || !cmd->sense) {
1407 printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
1408 megasas_teardown_frame_pool(instance);
1409 return -ENOMEM;
1410 }
1411
1412 cmd->frame->io.context = cmd->index;
1413 }
1414
1415 return 0;
1416}
1417
1418/**
1419 * megasas_free_cmds - Free all the cmds in the free cmd pool
1420 * @instance: Adapter soft state
1421 */
1422static void megasas_free_cmds(struct megasas_instance *instance)
1423{
1424 int i;
1425 /* First free the MFI frame pool */
1426 megasas_teardown_frame_pool(instance);
1427
1428 /* Free all the commands in the cmd_list */
1429 for (i = 0; i < instance->max_fw_cmds; i++)
1430 kfree(instance->cmd_list[i]);
1431
1432 /* Free the cmd_list buffer itself */
1433 kfree(instance->cmd_list);
1434 instance->cmd_list = NULL;
1435
1436 INIT_LIST_HEAD(&instance->cmd_pool);
1437}
1438
1439/**
1440 * megasas_alloc_cmds - Allocates the command packets
1441 * @instance: Adapter soft state
1442 *
1443 * Each command that is issued to the FW, whether IO commands from the OS or
1444 * internal commands like IOCTLs, are wrapped in local data structure called
1445 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
1446 * the FW.
1447 *
1448 * Each frame has a 32-bit field called context (tag). This context is used
1449 * to get back the megasas_cmd from the frame when a frame gets completed in
1450 * the ISR. Typically the address of the megasas_cmd itself would be used as
1451 * the context. But we wanted to keep the differences between 32 and 64 bit
1452 * systems to the mininum. We always use 32 bit integers for the context. In
1453 * this driver, the 32 bit values are the indices into an array cmd_list.
1454 * This array is used only to look up the megasas_cmd given the context. The
1455 * free commands themselves are maintained in a linked list called cmd_pool.
1456 */
1457static int megasas_alloc_cmds(struct megasas_instance *instance)
1458{
1459 int i;
1460 int j;
1461 u32 max_cmd;
1462 struct megasas_cmd *cmd;
1463
1464 max_cmd = instance->max_fw_cmds;
1465
1466 /*
1467 * instance->cmd_list is an array of struct megasas_cmd pointers.
1468 * Allocate the dynamic array first and then allocate individual
1469 * commands.
1470 */
1471 instance->cmd_list = kmalloc(sizeof(struct megasas_cmd *) * max_cmd,
1472 GFP_KERNEL);
1473
1474 if (!instance->cmd_list) {
1475 printk(KERN_DEBUG "megasas: out of memory\n");
1476 return -ENOMEM;
1477 }
1478
1479 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) * max_cmd);
1480
1481 for (i = 0; i < max_cmd; i++) {
1482 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
1483 GFP_KERNEL);
1484
1485 if (!instance->cmd_list[i]) {
1486
1487 for (j = 0; j < i; j++)
1488 kfree(instance->cmd_list[j]);
1489
1490 kfree(instance->cmd_list);
1491 instance->cmd_list = NULL;
1492
1493 return -ENOMEM;
1494 }
1495 }
1496
1497 /*
1498 * Add all the commands to command pool (instance->cmd_pool)
1499 */
1500 for (i = 0; i < max_cmd; i++) {
1501 cmd = instance->cmd_list[i];
1502 memset(cmd, 0, sizeof(struct megasas_cmd));
1503 cmd->index = i;
1504 cmd->instance = instance;
1505
1506 list_add_tail(&cmd->list, &instance->cmd_pool);
1507 }
1508
1509 /*
1510 * Create a frame pool and assign one frame to each cmd
1511 */
1512 if (megasas_create_frame_pool(instance)) {
1513 printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
1514 megasas_free_cmds(instance);
1515 }
1516
1517 return 0;
1518}
1519
1520/**
1521 * megasas_get_controller_info - Returns FW's controller structure
1522 * @instance: Adapter soft state
1523 * @ctrl_info: Controller information structure
1524 *
1525 * Issues an internal command (DCMD) to get the FW's controller structure.
1526 * This information is mainly used to find out the maximum IO transfer per
1527 * command supported by the FW.
1528 */
1529static int
1530megasas_get_ctrl_info(struct megasas_instance *instance,
1531 struct megasas_ctrl_info *ctrl_info)
1532{
1533 int ret = 0;
1534 struct megasas_cmd *cmd;
1535 struct megasas_dcmd_frame *dcmd;
1536 struct megasas_ctrl_info *ci;
1537 dma_addr_t ci_h = 0;
1538
1539 cmd = megasas_get_cmd(instance);
1540
1541 if (!cmd) {
1542 printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
1543 return -ENOMEM;
1544 }
1545
1546 dcmd = &cmd->frame->dcmd;
1547
1548 ci = pci_alloc_consistent(instance->pdev,
1549 sizeof(struct megasas_ctrl_info), &ci_h);
1550
1551 if (!ci) {
1552 printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
1553 megasas_return_cmd(instance, cmd);
1554 return -ENOMEM;
1555 }
1556
1557 memset(ci, 0, sizeof(*ci));
1558 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1559
1560 dcmd->cmd = MFI_CMD_DCMD;
1561 dcmd->cmd_status = 0xFF;
1562 dcmd->sge_count = 1;
1563 dcmd->flags = MFI_FRAME_DIR_READ;
1564 dcmd->timeout = 0;
1565 dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info);
1566 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
1567 dcmd->sgl.sge32[0].phys_addr = ci_h;
1568 dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info);
1569
1570 if (!megasas_issue_polled(instance, cmd)) {
1571 ret = 0;
1572 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
1573 } else {
1574 ret = -1;
1575 }
1576
1577 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
1578 ci, ci_h);
1579
1580 megasas_return_cmd(instance, cmd);
1581 return ret;
1582}
1583
1584/**
1585 * megasas_init_mfi - Initializes the FW
1586 * @instance: Adapter soft state
1587 *
1588 * This is the main function for initializing MFI firmware.
1589 */
1590static int megasas_init_mfi(struct megasas_instance *instance)
1591{
1592 u32 context_sz;
1593 u32 reply_q_sz;
1594 u32 max_sectors_1;
1595 u32 max_sectors_2;
1596 struct megasas_register_set __iomem *reg_set;
1597
1598 struct megasas_cmd *cmd;
1599 struct megasas_ctrl_info *ctrl_info;
1600
1601 struct megasas_init_frame *init_frame;
1602 struct megasas_init_queue_info *initq_info;
1603 dma_addr_t init_frame_h;
1604 dma_addr_t initq_info_h;
1605
1606 /*
1607 * Map the message registers
1608 */
1609 instance->base_addr = pci_resource_start(instance->pdev, 0);
1610
1611 if (pci_request_regions(instance->pdev, "megasas: LSI Logic")) {
1612 printk(KERN_DEBUG "megasas: IO memory region busy!\n");
1613 return -EBUSY;
1614 }
1615
1616 instance->reg_set = ioremap_nocache(instance->base_addr, 8192);
1617
1618 if (!instance->reg_set) {
1619 printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
1620 goto fail_ioremap;
1621 }
1622
1623 reg_set = instance->reg_set;
1624
1625 /*
1626 * We expect the FW state to be READY
1627 */
1628 if (megasas_transition_to_ready(instance->reg_set))
1629 goto fail_ready_state;
1630
1631 /*
1632 * Get various operational parameters from status register
1633 */
1634 instance->max_fw_cmds = readl(&reg_set->outbound_msg_0) & 0x00FFFF;
1635 instance->max_num_sge = (readl(&reg_set->outbound_msg_0) & 0xFF0000) >>
1636 0x10;
1637 /*
1638 * Create a pool of commands
1639 */
1640 if (megasas_alloc_cmds(instance))
1641 goto fail_alloc_cmds;
1642
1643 /*
1644 * Allocate memory for reply queue. Length of reply queue should
1645 * be _one_ more than the maximum commands handled by the firmware.
1646 *
1647 * Note: When FW completes commands, it places corresponding contex
1648 * values in this circular reply queue. This circular queue is a fairly
1649 * typical producer-consumer queue. FW is the producer (of completed
1650 * commands) and the driver is the consumer.
1651 */
1652 context_sz = sizeof(u32);
1653 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
1654
1655 instance->reply_queue = pci_alloc_consistent(instance->pdev,
1656 reply_q_sz,
1657 &instance->reply_queue_h);
1658
1659 if (!instance->reply_queue) {
1660 printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
1661 goto fail_reply_queue;
1662 }
1663
1664 /*
1665 * Prepare a init frame. Note the init frame points to queue info
1666 * structure. Each frame has SGL allocated after first 64 bytes. For
1667 * this frame - since we don't need any SGL - we use SGL's space as
1668 * queue info structure
1669 *
1670 * We will not get a NULL command below. We just created the pool.
1671 */
1672 cmd = megasas_get_cmd(instance);
1673
1674 init_frame = (struct megasas_init_frame *)cmd->frame;
1675 initq_info = (struct megasas_init_queue_info *)
1676 ((unsigned long)init_frame + 64);
1677
1678 init_frame_h = cmd->frame_phys_addr;
1679 initq_info_h = init_frame_h + 64;
1680
1681 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
1682 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
1683
1684 initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
1685 initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h;
1686
1687 initq_info->producer_index_phys_addr_lo = instance->producer_h;
1688 initq_info->consumer_index_phys_addr_lo = instance->consumer_h;
1689
1690 init_frame->cmd = MFI_CMD_INIT;
1691 init_frame->cmd_status = 0xFF;
1692 init_frame->queue_info_new_phys_addr_lo = initq_info_h;
1693
1694 init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info);
1695
1696 /*
1697 * Issue the init frame in polled mode
1698 */
1699 if (megasas_issue_polled(instance, cmd)) {
1700 printk(KERN_DEBUG "megasas: Failed to init firmware\n");
1701 goto fail_fw_init;
1702 }
1703
1704 megasas_return_cmd(instance, cmd);
1705
1706 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
1707
1708 /*
1709 * Compute the max allowed sectors per IO: The controller info has two
1710 * limits on max sectors. Driver should use the minimum of these two.
1711 *
1712 * 1 << stripe_sz_ops.min = max sectors per strip
1713 *
1714 * Note that older firmwares ( < FW ver 30) didn't report information
1715 * to calculate max_sectors_1. So the number ended up as zero always.
1716 */
1717 if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
1718
1719 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1720 ctrl_info->max_strips_per_io;
1721 max_sectors_2 = ctrl_info->max_request_size;
1722
1723 instance->max_sectors_per_req = (max_sectors_1 < max_sectors_2)
1724 ? max_sectors_1 : max_sectors_2;
1725 } else
1726 instance->max_sectors_per_req = instance->max_num_sge *
1727 PAGE_SIZE / 512;
1728
1729 kfree(ctrl_info);
1730
1731 return 0;
1732
1733 fail_fw_init:
1734 megasas_return_cmd(instance, cmd);
1735
1736 pci_free_consistent(instance->pdev, reply_q_sz,
1737 instance->reply_queue, instance->reply_queue_h);
1738 fail_reply_queue:
1739 megasas_free_cmds(instance);
1740
1741 fail_alloc_cmds:
1742 fail_ready_state:
1743 iounmap(instance->reg_set);
1744
1745 fail_ioremap:
1746 pci_release_regions(instance->pdev);
1747
1748 return -EINVAL;
1749}
1750
1751/**
1752 * megasas_release_mfi - Reverses the FW initialization
1753 * @intance: Adapter soft state
1754 */
1755static void megasas_release_mfi(struct megasas_instance *instance)
1756{
1757 u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1);
1758
1759 pci_free_consistent(instance->pdev, reply_q_sz,
1760 instance->reply_queue, instance->reply_queue_h);
1761
1762 megasas_free_cmds(instance);
1763
1764 iounmap(instance->reg_set);
1765
1766 pci_release_regions(instance->pdev);
1767}
1768
1769/**
1770 * megasas_get_seq_num - Gets latest event sequence numbers
1771 * @instance: Adapter soft state
1772 * @eli: FW event log sequence numbers information
1773 *
1774 * FW maintains a log of all events in a non-volatile area. Upper layers would
1775 * usually find out the latest sequence number of the events, the seq number at
1776 * the boot etc. They would "read" all the events below the latest seq number
1777 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
1778 * number), they would subsribe to AEN (asynchronous event notification) and
1779 * wait for the events to happen.
1780 */
1781static int
1782megasas_get_seq_num(struct megasas_instance *instance,
1783 struct megasas_evt_log_info *eli)
1784{
1785 struct megasas_cmd *cmd;
1786 struct megasas_dcmd_frame *dcmd;
1787 struct megasas_evt_log_info *el_info;
1788 dma_addr_t el_info_h = 0;
1789
1790 cmd = megasas_get_cmd(instance);
1791
1792 if (!cmd) {
1793 return -ENOMEM;
1794 }
1795
1796 dcmd = &cmd->frame->dcmd;
1797 el_info = pci_alloc_consistent(instance->pdev,
1798 sizeof(struct megasas_evt_log_info),
1799 &el_info_h);
1800
1801 if (!el_info) {
1802 megasas_return_cmd(instance, cmd);
1803 return -ENOMEM;
1804 }
1805
1806 memset(el_info, 0, sizeof(*el_info));
1807 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1808
1809 dcmd->cmd = MFI_CMD_DCMD;
1810 dcmd->cmd_status = 0x0;
1811 dcmd->sge_count = 1;
1812 dcmd->flags = MFI_FRAME_DIR_READ;
1813 dcmd->timeout = 0;
1814 dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info);
1815 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
1816 dcmd->sgl.sge32[0].phys_addr = el_info_h;
1817 dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info);
1818
1819 megasas_issue_blocked_cmd(instance, cmd);
1820
1821 /*
1822 * Copy the data back into callers buffer
1823 */
1824 memcpy(eli, el_info, sizeof(struct megasas_evt_log_info));
1825
1826 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
1827 el_info, el_info_h);
1828
1829 megasas_return_cmd(instance, cmd);
1830
1831 return 0;
1832}
1833
1834/**
1835 * megasas_register_aen - Registers for asynchronous event notification
1836 * @instance: Adapter soft state
1837 * @seq_num: The starting sequence number
1838 * @class_locale: Class of the event
1839 *
1840 * This function subscribes for AEN for events beyond the @seq_num. It requests
1841 * to be notified if and only if the event is of type @class_locale
1842 */
1843static int
1844megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
1845 u32 class_locale_word)
1846{
1847 int ret_val;
1848 struct megasas_cmd *cmd;
1849 struct megasas_dcmd_frame *dcmd;
1850 union megasas_evt_class_locale curr_aen;
1851 union megasas_evt_class_locale prev_aen;
1852
1853 /*
1854 * If there an AEN pending already (aen_cmd), check if the
1855 * class_locale of that pending AEN is inclusive of the new
1856 * AEN request we currently have. If it is, then we don't have
1857 * to do anything. In other words, whichever events the current
1858 * AEN request is subscribing to, have already been subscribed
1859 * to.
1860 *
1861 * If the old_cmd is _not_ inclusive, then we have to abort
1862 * that command, form a class_locale that is superset of both
1863 * old and current and re-issue to the FW
1864 */
1865
1866 curr_aen.word = class_locale_word;
1867
1868 if (instance->aen_cmd) {
1869
1870 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
1871
1872 /*
1873 * A class whose enum value is smaller is inclusive of all
1874 * higher values. If a PROGRESS (= -1) was previously
1875 * registered, then a new registration requests for higher
1876 * classes need not be sent to FW. They are automatically
1877 * included.
1878 *
1879 * Locale numbers don't have such hierarchy. They are bitmap
1880 * values
1881 */
1882 if ((prev_aen.members.class <= curr_aen.members.class) &&
1883 !((prev_aen.members.locale & curr_aen.members.locale) ^
1884 curr_aen.members.locale)) {
1885 /*
1886 * Previously issued event registration includes
1887 * current request. Nothing to do.
1888 */
1889 return 0;
1890 } else {
1891 curr_aen.members.locale |= prev_aen.members.locale;
1892
1893 if (prev_aen.members.class < curr_aen.members.class)
1894 curr_aen.members.class = prev_aen.members.class;
1895
1896 instance->aen_cmd->abort_aen = 1;
1897 ret_val = megasas_issue_blocked_abort_cmd(instance,
1898 instance->
1899 aen_cmd);
1900
1901 if (ret_val) {
1902 printk(KERN_DEBUG "megasas: Failed to abort "
1903 "previous AEN command\n");
1904 return ret_val;
1905 }
1906 }
1907 }
1908
1909 cmd = megasas_get_cmd(instance);
1910
1911 if (!cmd)
1912 return -ENOMEM;
1913
1914 dcmd = &cmd->frame->dcmd;
1915
1916 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
1917
1918 /*
1919 * Prepare DCMD for aen registration
1920 */
1921 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1922
1923 dcmd->cmd = MFI_CMD_DCMD;
1924 dcmd->cmd_status = 0x0;
1925 dcmd->sge_count = 1;
1926 dcmd->flags = MFI_FRAME_DIR_READ;
1927 dcmd->timeout = 0;
1928 dcmd->data_xfer_len = sizeof(struct megasas_evt_detail);
1929 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
1930 dcmd->mbox.w[0] = seq_num;
1931 dcmd->mbox.w[1] = curr_aen.word;
1932 dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h;
1933 dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail);
1934
1935 /*
1936 * Store reference to the cmd used to register for AEN. When an
1937 * application wants us to register for AEN, we have to abort this
1938 * cmd and re-register with a new EVENT LOCALE supplied by that app
1939 */
1940 instance->aen_cmd = cmd;
1941
1942 /*
1943 * Issue the aen registration frame
1944 */
1945 writel(cmd->frame_phys_addr >> 3,
1946 &instance->reg_set->inbound_queue_port);
1947
1948 return 0;
1949}
1950
1951/**
1952 * megasas_start_aen - Subscribes to AEN during driver load time
1953 * @instance: Adapter soft state
1954 */
1955static int megasas_start_aen(struct megasas_instance *instance)
1956{
1957 struct megasas_evt_log_info eli;
1958 union megasas_evt_class_locale class_locale;
1959
1960 /*
1961 * Get the latest sequence number from FW
1962 */
1963 memset(&eli, 0, sizeof(eli));
1964
1965 if (megasas_get_seq_num(instance, &eli))
1966 return -1;
1967
1968 /*
1969 * Register AEN with FW for latest sequence number plus 1
1970 */
1971 class_locale.members.reserved = 0;
1972 class_locale.members.locale = MR_EVT_LOCALE_ALL;
1973 class_locale.members.class = MR_EVT_CLASS_DEBUG;
1974
1975 return megasas_register_aen(instance, eli.newest_seq_num + 1,
1976 class_locale.word);
1977}
1978
1979/**
1980 * megasas_io_attach - Attaches this driver to SCSI mid-layer
1981 * @instance: Adapter soft state
1982 */
1983static int megasas_io_attach(struct megasas_instance *instance)
1984{
1985 struct Scsi_Host *host = instance->host;
1986
1987 /*
1988 * Export parameters required by SCSI mid-layer
1989 */
1990 host->irq = instance->pdev->irq;
1991 host->unique_id = instance->unique_id;
1992 host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS;
1993 host->this_id = instance->init_id;
1994 host->sg_tablesize = instance->max_num_sge;
1995 host->max_sectors = instance->max_sectors_per_req;
1996 host->cmd_per_lun = 128;
1997 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
1998 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
1999 host->max_lun = MEGASAS_MAX_LUN;
2000
2001 /*
2002 * Notify the mid-layer about the new controller
2003 */
2004 if (scsi_add_host(host, &instance->pdev->dev)) {
2005 printk(KERN_DEBUG "megasas: scsi_add_host failed\n");
2006 return -ENODEV;
2007 }
2008
2009 /*
2010 * Trigger SCSI to scan our drives
2011 */
2012 scsi_scan_host(host);
2013 return 0;
2014}
2015
2016/**
2017 * megasas_probe_one - PCI hotplug entry point
2018 * @pdev: PCI device structure
2019 * @id: PCI ids of supported hotplugged adapter
2020 */
2021static int __devinit
2022megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2023{
2024 int rval;
2025 struct Scsi_Host *host;
2026 struct megasas_instance *instance;
2027
2028 /*
2029 * Announce PCI information
2030 */
2031 printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2032 pdev->vendor, pdev->device, pdev->subsystem_vendor,
2033 pdev->subsystem_device);
2034
2035 printk("bus %d:slot %d:func %d\n",
2036 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2037
2038 /*
2039 * PCI prepping: enable device set bus mastering and dma mask
2040 */
2041 rval = pci_enable_device(pdev);
2042
2043 if (rval) {
2044 return rval;
2045 }
2046
2047 pci_set_master(pdev);
2048
2049 /*
2050 * All our contollers are capable of performing 64-bit DMA
2051 */
2052 if (IS_DMA64) {
2053 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
2054
2055 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
2056 goto fail_set_dma_mask;
2057 }
2058 } else {
2059 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
2060 goto fail_set_dma_mask;
2061 }
2062
2063 host = scsi_host_alloc(&megasas_template,
2064 sizeof(struct megasas_instance));
2065
2066 if (!host) {
2067 printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
2068 goto fail_alloc_instance;
2069 }
2070
2071 instance = (struct megasas_instance *)host->hostdata;
2072 memset(instance, 0, sizeof(*instance));
2073
2074 instance->producer = pci_alloc_consistent(pdev, sizeof(u32),
2075 &instance->producer_h);
2076 instance->consumer = pci_alloc_consistent(pdev, sizeof(u32),
2077 &instance->consumer_h);
2078
2079 if (!instance->producer || !instance->consumer) {
2080 printk(KERN_DEBUG "megasas: Failed to allocate memory for "
2081 "producer, consumer\n");
2082 goto fail_alloc_dma_buf;
2083 }
2084
2085 *instance->producer = 0;
2086 *instance->consumer = 0;
2087
2088 instance->evt_detail = pci_alloc_consistent(pdev,
2089 sizeof(struct
2090 megasas_evt_detail),
2091 &instance->evt_detail_h);
2092
2093 if (!instance->evt_detail) {
2094 printk(KERN_DEBUG "megasas: Failed to allocate memory for "
2095 "event detail structure\n");
2096 goto fail_alloc_dma_buf;
2097 }
2098
2099 /*
2100 * Initialize locks and queues
2101 */
2102 INIT_LIST_HEAD(&instance->cmd_pool);
2103
2104 init_waitqueue_head(&instance->int_cmd_wait_q);
2105 init_waitqueue_head(&instance->abort_cmd_wait_q);
2106
2107 spin_lock_init(&instance->cmd_pool_lock);
2108 spin_lock_init(&instance->instance_lock);
2109
2110 sema_init(&instance->aen_mutex, 1);
2111 sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
2112
2113 /*
2114 * Initialize PCI related and misc parameters
2115 */
2116 instance->pdev = pdev;
2117 instance->host = host;
2118 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
2119 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
2120
2121 /*
2122 * Initialize MFI Firmware
2123 */
2124 if (megasas_init_mfi(instance))
2125 goto fail_init_mfi;
2126
2127 /*
2128 * Register IRQ
2129 */
2130 if (request_irq(pdev->irq, megasas_isr, SA_SHIRQ, "megasas", instance)) {
2131 printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
2132 goto fail_irq;
2133 }
2134
2135 megasas_enable_intr(instance->reg_set);
2136
2137 /*
2138 * Store instance in PCI softstate
2139 */
2140 pci_set_drvdata(pdev, instance);
2141
2142 /*
2143 * Add this controller to megasas_mgmt_info structure so that it
2144 * can be exported to management applications
2145 */
2146 megasas_mgmt_info.count++;
2147 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
2148 megasas_mgmt_info.max_index++;
2149
2150 /*
2151 * Initiate AEN (Asynchronous Event Notification)
2152 */
2153 if (megasas_start_aen(instance)) {
2154 printk(KERN_DEBUG "megasas: start aen failed\n");
2155 goto fail_start_aen;
2156 }
2157
2158 /*
2159 * Register with SCSI mid-layer
2160 */
2161 if (megasas_io_attach(instance))
2162 goto fail_io_attach;
2163
2164 return 0;
2165
2166 fail_start_aen:
2167 fail_io_attach:
2168 megasas_mgmt_info.count--;
2169 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
2170 megasas_mgmt_info.max_index--;
2171
2172 pci_set_drvdata(pdev, NULL);
2173 megasas_disable_intr(instance->reg_set);
2174 free_irq(instance->pdev->irq, instance);
2175
2176 megasas_release_mfi(instance);
2177
2178 fail_irq:
2179 fail_init_mfi:
2180 fail_alloc_dma_buf:
2181 if (instance->evt_detail)
2182 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
2183 instance->evt_detail,
2184 instance->evt_detail_h);
2185
2186 if (instance->producer)
2187 pci_free_consistent(pdev, sizeof(u32), instance->producer,
2188 instance->producer_h);
2189 if (instance->consumer)
2190 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
2191 instance->consumer_h);
2192 scsi_host_put(host);
2193
2194 fail_alloc_instance:
2195 fail_set_dma_mask:
2196 pci_disable_device(pdev);
2197
2198 return -ENODEV;
2199}
2200
2201/**
2202 * megasas_flush_cache - Requests FW to flush all its caches
2203 * @instance: Adapter soft state
2204 */
2205static void megasas_flush_cache(struct megasas_instance *instance)
2206{
2207 struct megasas_cmd *cmd;
2208 struct megasas_dcmd_frame *dcmd;
2209
2210 cmd = megasas_get_cmd(instance);
2211
2212 if (!cmd)
2213 return;
2214
2215 dcmd = &cmd->frame->dcmd;
2216
2217 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2218
2219 dcmd->cmd = MFI_CMD_DCMD;
2220 dcmd->cmd_status = 0x0;
2221 dcmd->sge_count = 0;
2222 dcmd->flags = MFI_FRAME_DIR_NONE;
2223 dcmd->timeout = 0;
2224 dcmd->data_xfer_len = 0;
2225 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
2226 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
2227
2228 megasas_issue_blocked_cmd(instance, cmd);
2229
2230 megasas_return_cmd(instance, cmd);
2231
2232 return;
2233}
2234
2235/**
2236 * megasas_shutdown_controller - Instructs FW to shutdown the controller
2237 * @instance: Adapter soft state
2238 */
2239static void megasas_shutdown_controller(struct megasas_instance *instance)
2240{
2241 struct megasas_cmd *cmd;
2242 struct megasas_dcmd_frame *dcmd;
2243
2244 cmd = megasas_get_cmd(instance);
2245
2246 if (!cmd)
2247 return;
2248
2249 if (instance->aen_cmd)
2250 megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd);
2251
2252 dcmd = &cmd->frame->dcmd;
2253
2254 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2255
2256 dcmd->cmd = MFI_CMD_DCMD;
2257 dcmd->cmd_status = 0x0;
2258 dcmd->sge_count = 0;
2259 dcmd->flags = MFI_FRAME_DIR_NONE;
2260 dcmd->timeout = 0;
2261 dcmd->data_xfer_len = 0;
2262 dcmd->opcode = MR_DCMD_CTRL_SHUTDOWN;
2263
2264 megasas_issue_blocked_cmd(instance, cmd);
2265
2266 megasas_return_cmd(instance, cmd);
2267
2268 return;
2269}
2270
2271/**
2272 * megasas_detach_one - PCI hot"un"plug entry point
2273 * @pdev: PCI device structure
2274 */
2275static void megasas_detach_one(struct pci_dev *pdev)
2276{
2277 int i;
2278 struct Scsi_Host *host;
2279 struct megasas_instance *instance;
2280
2281 instance = pci_get_drvdata(pdev);
2282 host = instance->host;
2283
2284 scsi_remove_host(instance->host);
2285 megasas_flush_cache(instance);
2286 megasas_shutdown_controller(instance);
2287
2288 /*
2289 * Take the instance off the instance array. Note that we will not
2290 * decrement the max_index. We let this array be sparse array
2291 */
2292 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
2293 if (megasas_mgmt_info.instance[i] == instance) {
2294 megasas_mgmt_info.count--;
2295 megasas_mgmt_info.instance[i] = NULL;
2296
2297 break;
2298 }
2299 }
2300
2301 pci_set_drvdata(instance->pdev, NULL);
2302
2303 megasas_disable_intr(instance->reg_set);
2304
2305 free_irq(instance->pdev->irq, instance);
2306
2307 megasas_release_mfi(instance);
2308
2309 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
2310 instance->evt_detail, instance->evt_detail_h);
2311
2312 pci_free_consistent(pdev, sizeof(u32), instance->producer,
2313 instance->producer_h);
2314
2315 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
2316 instance->consumer_h);
2317
2318 scsi_host_put(host);
2319
2320 pci_set_drvdata(pdev, NULL);
2321
2322 pci_disable_device(pdev);
2323
2324 return;
2325}
2326
2327/**
2328 * megasas_shutdown - Shutdown entry point
2329 * @device: Generic device structure
2330 */
2331static void megasas_shutdown(struct pci_dev *pdev)
2332{
2333 struct megasas_instance *instance = pci_get_drvdata(pdev);
2334 megasas_flush_cache(instance);
2335}
2336
2337/**
2338 * megasas_mgmt_open - char node "open" entry point
2339 */
2340static int megasas_mgmt_open(struct inode *inode, struct file *filep)
2341{
2342 /*
2343 * Allow only those users with admin rights
2344 */
2345 if (!capable(CAP_SYS_ADMIN))
2346 return -EACCES;
2347
2348 return 0;
2349}
2350
2351/**
2352 * megasas_mgmt_release - char node "release" entry point
2353 */
2354static int megasas_mgmt_release(struct inode *inode, struct file *filep)
2355{
2356 filep->private_data = NULL;
2357 fasync_helper(-1, filep, 0, &megasas_async_queue);
2358
2359 return 0;
2360}
2361
2362/**
2363 * megasas_mgmt_fasync - Async notifier registration from applications
2364 *
2365 * This function adds the calling process to a driver global queue. When an
2366 * event occurs, SIGIO will be sent to all processes in this queue.
2367 */
2368static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
2369{
2370 int rc;
2371
2372 down(&megasas_async_queue_mutex);
2373
2374 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
2375
2376 up(&megasas_async_queue_mutex);
2377
2378 if (rc >= 0) {
2379 /* For sanity check when we get ioctl */
2380 filep->private_data = filep;
2381 return 0;
2382 }
2383
2384 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
2385
2386 return rc;
2387}
2388
2389/**
2390 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
2391 * @instance: Adapter soft state
2392 * @argp: User's ioctl packet
2393 */
2394static int
2395megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
2396 struct megasas_iocpacket __user * user_ioc,
2397 struct megasas_iocpacket *ioc)
2398{
2399 struct megasas_sge32 *kern_sge32;
2400 struct megasas_cmd *cmd;
2401 void *kbuff_arr[MAX_IOCTL_SGE];
2402 dma_addr_t buf_handle = 0;
2403 int error = 0, i;
2404 void *sense = NULL;
2405 dma_addr_t sense_handle;
2406 u32 *sense_ptr;
2407
2408 memset(kbuff_arr, 0, sizeof(kbuff_arr));
2409
2410 if (ioc->sge_count > MAX_IOCTL_SGE) {
2411 printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n",
2412 ioc->sge_count, MAX_IOCTL_SGE);
2413 return -EINVAL;
2414 }
2415
2416 cmd = megasas_get_cmd(instance);
2417 if (!cmd) {
2418 printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
2419 return -ENOMEM;
2420 }
2421
2422 /*
2423 * User's IOCTL packet has 2 frames (maximum). Copy those two
2424 * frames into our cmd's frames. cmd->frame's context will get
2425 * overwritten when we copy from user's frames. So set that value
2426 * alone separately
2427 */
2428 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
2429 cmd->frame->hdr.context = cmd->index;
2430
2431 /*
2432 * The management interface between applications and the fw uses
2433 * MFI frames. E.g, RAID configuration changes, LD property changes
2434 * etc are accomplishes through different kinds of MFI frames. The
2435 * driver needs to care only about substituting user buffers with
2436 * kernel buffers in SGLs. The location of SGL is embedded in the
2437 * struct iocpacket itself.
2438 */
2439 kern_sge32 = (struct megasas_sge32 *)
2440 ((unsigned long)cmd->frame + ioc->sgl_off);
2441
2442 /*
2443 * For each user buffer, create a mirror buffer and copy in
2444 */
2445 for (i = 0; i < ioc->sge_count; i++) {
2446 kbuff_arr[i] = pci_alloc_consistent(instance->pdev,
2447 ioc->sgl[i].iov_len,
2448 &buf_handle);
2449 if (!kbuff_arr[i]) {
2450 printk(KERN_DEBUG "megasas: Failed to alloc "
2451 "kernel SGL buffer for IOCTL \n");
2452 error = -ENOMEM;
2453 goto out;
2454 }
2455
2456 /*
2457 * We don't change the dma_coherent_mask, so
2458 * pci_alloc_consistent only returns 32bit addresses
2459 */
2460 kern_sge32[i].phys_addr = (u32) buf_handle;
2461 kern_sge32[i].length = ioc->sgl[i].iov_len;
2462
2463 /*
2464 * We created a kernel buffer corresponding to the
2465 * user buffer. Now copy in from the user buffer
2466 */
2467 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
2468 (u32) (ioc->sgl[i].iov_len))) {
2469 error = -EFAULT;
2470 goto out;
2471 }
2472 }
2473
2474 if (ioc->sense_len) {
2475 sense = pci_alloc_consistent(instance->pdev, ioc->sense_len,
2476 &sense_handle);
2477 if (!sense) {
2478 error = -ENOMEM;
2479 goto out;
2480 }
2481
2482 sense_ptr =
2483 (u32 *) ((unsigned long)cmd->frame + ioc->sense_off);
2484 *sense_ptr = sense_handle;
2485 }
2486
2487 /*
2488 * Set the sync_cmd flag so that the ISR knows not to complete this
2489 * cmd to the SCSI mid-layer
2490 */
2491 cmd->sync_cmd = 1;
2492 megasas_issue_blocked_cmd(instance, cmd);
2493 cmd->sync_cmd = 0;
2494
2495 /*
2496 * copy out the kernel buffers to user buffers
2497 */
2498 for (i = 0; i < ioc->sge_count; i++) {
2499 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
2500 ioc->sgl[i].iov_len)) {
2501 error = -EFAULT;
2502 goto out;
2503 }
2504 }
2505
2506 /*
2507 * copy out the sense
2508 */
2509 if (ioc->sense_len) {
2510 /*
2511 * sense_ptr points to the location that has the user
2512 * sense buffer address
2513 */
2514 sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw +
2515 ioc->sense_off);
2516
2517 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
2518 sense, ioc->sense_len)) {
2519 error = -EFAULT;
2520 goto out;
2521 }
2522 }
2523
2524 /*
2525 * copy the status codes returned by the fw
2526 */
2527 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
2528 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
2529 printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
2530 error = -EFAULT;
2531 }
2532
2533 out:
2534 if (sense) {
2535 pci_free_consistent(instance->pdev, ioc->sense_len,
2536 sense, sense_handle);
2537 }
2538
2539 for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) {
2540 pci_free_consistent(instance->pdev,
2541 kern_sge32[i].length,
2542 kbuff_arr[i], kern_sge32[i].phys_addr);
2543 }
2544
2545 megasas_return_cmd(instance, cmd);
2546 return error;
2547}
2548
2549static struct megasas_instance *megasas_lookup_instance(u16 host_no)
2550{
2551 int i;
2552
2553 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
2554
2555 if ((megasas_mgmt_info.instance[i]) &&
2556 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
2557 return megasas_mgmt_info.instance[i];
2558 }
2559
2560 return NULL;
2561}
2562
2563static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
2564{
2565 struct megasas_iocpacket __user *user_ioc =
2566 (struct megasas_iocpacket __user *)arg;
2567 struct megasas_iocpacket *ioc;
2568 struct megasas_instance *instance;
2569 int error;
2570
2571 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
2572 if (!ioc)
2573 return -ENOMEM;
2574
2575 if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
2576 error = -EFAULT;
2577 goto out_kfree_ioc;
2578 }
2579
2580 instance = megasas_lookup_instance(ioc->host_no);
2581 if (!instance) {
2582 error = -ENODEV;
2583 goto out_kfree_ioc;
2584 }
2585
2586 /*
2587 * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds
2588 */
2589 if (down_interruptible(&instance->ioctl_sem)) {
2590 error = -ERESTARTSYS;
2591 goto out_kfree_ioc;
2592 }
2593 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
2594 up(&instance->ioctl_sem);
2595
2596 out_kfree_ioc:
2597 kfree(ioc);
2598 return error;
2599}
2600
2601static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
2602{
2603 struct megasas_instance *instance;
2604 struct megasas_aen aen;
2605 int error;
2606
2607 if (file->private_data != file) {
2608 printk(KERN_DEBUG "megasas: fasync_helper was not "
2609 "called first\n");
2610 return -EINVAL;
2611 }
2612
2613 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
2614 return -EFAULT;
2615
2616 instance = megasas_lookup_instance(aen.host_no);
2617
2618 if (!instance)
2619 return -ENODEV;
2620
2621 down(&instance->aen_mutex);
2622 error = megasas_register_aen(instance, aen.seq_num,
2623 aen.class_locale_word);
2624 up(&instance->aen_mutex);
2625 return error;
2626}
2627
2628/**
2629 * megasas_mgmt_ioctl - char node ioctl entry point
2630 */
2631static long
2632megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2633{
2634 switch (cmd) {
2635 case MEGASAS_IOC_FIRMWARE:
2636 return megasas_mgmt_ioctl_fw(file, arg);
2637
2638 case MEGASAS_IOC_GET_AEN:
2639 return megasas_mgmt_ioctl_aen(file, arg);
2640 }
2641
2642 return -ENOTTY;
2643}
2644
2645#ifdef CONFIG_COMPAT
2646static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
2647{
2648 struct compat_megasas_iocpacket __user *cioc =
2649 (struct compat_megasas_iocpacket __user *)arg;
2650 struct megasas_iocpacket __user *ioc =
2651 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
2652 int i;
2653 int error = 0;
2654
2655 clear_user(ioc, sizeof(*ioc));
2656
2657 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
2658 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
2659 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
2660 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
2661 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
2662 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
2663 return -EFAULT;
2664
2665 for (i = 0; i < MAX_IOCTL_SGE; i++) {
2666 compat_uptr_t ptr;
2667
2668 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
2669 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
2670 copy_in_user(&ioc->sgl[i].iov_len,
2671 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
2672 return -EFAULT;
2673 }
2674
2675 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
2676
2677 if (copy_in_user(&cioc->frame.hdr.cmd_status,
2678 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
2679 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
2680 return -EFAULT;
2681 }
2682 return error;
2683}
2684
2685static long
2686megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
2687 unsigned long arg)
2688{
2689 switch (cmd) {
2690 case MEGASAS_IOC_FIRMWARE:{
2691 return megasas_mgmt_compat_ioctl_fw(file, arg);
2692 }
2693 case MEGASAS_IOC_GET_AEN:
2694 return megasas_mgmt_ioctl_aen(file, arg);
2695 }
2696
2697 return -ENOTTY;
2698}
2699#endif
2700
2701/*
2702 * File operations structure for management interface
2703 */
2704static struct file_operations megasas_mgmt_fops = {
2705 .owner = THIS_MODULE,
2706 .open = megasas_mgmt_open,
2707 .release = megasas_mgmt_release,
2708 .fasync = megasas_mgmt_fasync,
2709 .unlocked_ioctl = megasas_mgmt_ioctl,
2710#ifdef CONFIG_COMPAT
2711 .compat_ioctl = megasas_mgmt_compat_ioctl,
2712#endif
2713};
2714
2715/*
2716 * PCI hotplug support registration structure
2717 */
2718static struct pci_driver megasas_pci_driver = {
2719
2720 .name = "megaraid_sas",
2721 .id_table = megasas_pci_table,
2722 .probe = megasas_probe_one,
2723 .remove = __devexit_p(megasas_detach_one),
2724 .shutdown = megasas_shutdown,
2725};
2726
2727/*
2728 * Sysfs driver attributes
2729 */
2730static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
2731{
2732 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
2733 MEGASAS_VERSION);
2734}
2735
2736static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
2737
2738static ssize_t
2739megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
2740{
2741 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
2742 MEGASAS_RELDATE);
2743}
2744
2745static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date,
2746 NULL);
2747
2748/**
2749 * megasas_init - Driver load entry point
2750 */
2751static int __init megasas_init(void)
2752{
2753 int rval;
2754
2755 /*
2756 * Announce driver version and other information
2757 */
2758 printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
2759 MEGASAS_EXT_VERSION);
2760
2761 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
2762
2763 /*
2764 * Register character device node
2765 */
2766 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
2767
2768 if (rval < 0) {
2769 printk(KERN_DEBUG "megasas: failed to open device node\n");
2770 return rval;
2771 }
2772
2773 megasas_mgmt_majorno = rval;
2774
2775 /*
2776 * Register ourselves as PCI hotplug module
2777 */
2778 rval = pci_module_init(&megasas_pci_driver);
2779
2780 if (rval) {
2781 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n");
2782 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
2783 }
2784
2785 driver_create_file(&megasas_pci_driver.driver, &driver_attr_version);
2786 driver_create_file(&megasas_pci_driver.driver,
2787 &driver_attr_release_date);
2788
2789 return rval;
2790}
2791
2792/**
2793 * megasas_exit - Driver unload entry point
2794 */
2795static void __exit megasas_exit(void)
2796{
2797 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
2798 driver_remove_file(&megasas_pci_driver.driver,
2799 &driver_attr_release_date);
2800
2801 pci_unregister_driver(&megasas_pci_driver);
2802 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
2803}
2804
2805module_init(megasas_init);
2806module_exit(megasas_exit);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
new file mode 100644
index 000000000000..eaec9d531424
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -0,0 +1,1142 @@
1/*
2 *
3 * Linux MegaRAID driver for SAS based RAID controllers
4 *
5 * Copyright (c) 2003-2005 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_sas.h
13 */
14
15#ifndef LSI_MEGARAID_SAS_H
16#define LSI_MEGARAID_SAS_H
17
18/**
19 * MegaRAID SAS Driver meta data
20 */
21#define MEGASAS_VERSION "00.00.02.00-rc4"
22#define MEGASAS_RELDATE "Sep 16, 2005"
23#define MEGASAS_EXT_VERSION "Fri Sep 16 12:37:08 EDT 2005"
24
25/*
26 * =====================================
27 * MegaRAID SAS MFI firmware definitions
28 * =====================================
29 */
30
31/*
32 * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for
33 * protocol between the software and firmware. Commands are issued using
34 * "message frames"
35 */
36
37/**
38 * FW posts its state in upper 4 bits of outbound_msg_0 register
39 */
40#define MFI_STATE_MASK 0xF0000000
41#define MFI_STATE_UNDEFINED 0x00000000
42#define MFI_STATE_BB_INIT 0x10000000
43#define MFI_STATE_FW_INIT 0x40000000
44#define MFI_STATE_WAIT_HANDSHAKE 0x60000000
45#define MFI_STATE_FW_INIT_2 0x70000000
46#define MFI_STATE_DEVICE_SCAN 0x80000000
47#define MFI_STATE_FLUSH_CACHE 0xA0000000
48#define MFI_STATE_READY 0xB0000000
49#define MFI_STATE_OPERATIONAL 0xC0000000
50#define MFI_STATE_FAULT 0xF0000000
51
52#define MEGAMFI_FRAME_SIZE 64
53
54/**
55 * During FW init, clear pending cmds & reset state using inbound_msg_0
56 *
57 * ABORT : Abort all pending cmds
58 * READY : Move from OPERATIONAL to READY state; discard queue info
59 * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??)
60 * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver
61 */
62#define MFI_INIT_ABORT 0x00000000
63#define MFI_INIT_READY 0x00000002
64#define MFI_INIT_MFIMODE 0x00000004
65#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008
66#define MFI_RESET_FLAGS MFI_INIT_READY|MFI_INIT_MFIMODE
67
68/**
69 * MFI frame flags
70 */
71#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000
72#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001
73#define MFI_FRAME_SGL32 0x0000
74#define MFI_FRAME_SGL64 0x0002
75#define MFI_FRAME_SENSE32 0x0000
76#define MFI_FRAME_SENSE64 0x0004
77#define MFI_FRAME_DIR_NONE 0x0000
78#define MFI_FRAME_DIR_WRITE 0x0008
79#define MFI_FRAME_DIR_READ 0x0010
80#define MFI_FRAME_DIR_BOTH 0x0018
81
82/**
83 * Definition for cmd_status
84 */
85#define MFI_CMD_STATUS_POLL_MODE 0xFF
86
87/**
88 * MFI command opcodes
89 */
90#define MFI_CMD_INIT 0x00
91#define MFI_CMD_LD_READ 0x01
92#define MFI_CMD_LD_WRITE 0x02
93#define MFI_CMD_LD_SCSI_IO 0x03
94#define MFI_CMD_PD_SCSI_IO 0x04
95#define MFI_CMD_DCMD 0x05
96#define MFI_CMD_ABORT 0x06
97#define MFI_CMD_SMP 0x07
98#define MFI_CMD_STP 0x08
99
100#define MR_DCMD_CTRL_GET_INFO 0x01010000
101
102#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000
103#define MR_FLUSH_CTRL_CACHE 0x01
104#define MR_FLUSH_DISK_CACHE 0x02
105
106#define MR_DCMD_CTRL_SHUTDOWN 0x01050000
107#define MR_ENABLE_DRIVE_SPINDOWN 0x01
108
109#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100
110#define MR_DCMD_CTRL_EVENT_GET 0x01040300
111#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500
112#define MR_DCMD_LD_GET_PROPERTIES 0x03030000
113
114#define MR_DCMD_CLUSTER 0x08000000
115#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100
116#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
117
118/**
119 * MFI command completion codes
120 */
121enum MFI_STAT {
122 MFI_STAT_OK = 0x00,
123 MFI_STAT_INVALID_CMD = 0x01,
124 MFI_STAT_INVALID_DCMD = 0x02,
125 MFI_STAT_INVALID_PARAMETER = 0x03,
126 MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04,
127 MFI_STAT_ABORT_NOT_POSSIBLE = 0x05,
128 MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06,
129 MFI_STAT_APP_IN_USE = 0x07,
130 MFI_STAT_APP_NOT_INITIALIZED = 0x08,
131 MFI_STAT_ARRAY_INDEX_INVALID = 0x09,
132 MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a,
133 MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b,
134 MFI_STAT_DEVICE_NOT_FOUND = 0x0c,
135 MFI_STAT_DRIVE_TOO_SMALL = 0x0d,
136 MFI_STAT_FLASH_ALLOC_FAIL = 0x0e,
137 MFI_STAT_FLASH_BUSY = 0x0f,
138 MFI_STAT_FLASH_ERROR = 0x10,
139 MFI_STAT_FLASH_IMAGE_BAD = 0x11,
140 MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12,
141 MFI_STAT_FLASH_NOT_OPEN = 0x13,
142 MFI_STAT_FLASH_NOT_STARTED = 0x14,
143 MFI_STAT_FLUSH_FAILED = 0x15,
144 MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16,
145 MFI_STAT_LD_CC_IN_PROGRESS = 0x17,
146 MFI_STAT_LD_INIT_IN_PROGRESS = 0x18,
147 MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19,
148 MFI_STAT_LD_MAX_CONFIGURED = 0x1a,
149 MFI_STAT_LD_NOT_OPTIMAL = 0x1b,
150 MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c,
151 MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d,
152 MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e,
153 MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f,
154 MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
155 MFI_STAT_MFC_HW_ERROR = 0x21,
156 MFI_STAT_NO_HW_PRESENT = 0x22,
157 MFI_STAT_NOT_FOUND = 0x23,
158 MFI_STAT_NOT_IN_ENCL = 0x24,
159 MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25,
160 MFI_STAT_PD_TYPE_WRONG = 0x26,
161 MFI_STAT_PR_DISABLED = 0x27,
162 MFI_STAT_ROW_INDEX_INVALID = 0x28,
163 MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29,
164 MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a,
165 MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b,
166 MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c,
167 MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d,
168 MFI_STAT_SCSI_IO_FAILED = 0x2e,
169 MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f,
170 MFI_STAT_SHUTDOWN_FAILED = 0x30,
171 MFI_STAT_TIME_NOT_SET = 0x31,
172 MFI_STAT_WRONG_STATE = 0x32,
173 MFI_STAT_LD_OFFLINE = 0x33,
174 MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34,
175 MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35,
176 MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
177 MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
178 MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
179
180 MFI_STAT_INVALID_STATUS = 0xFF
181};
182
183/*
184 * Number of mailbox bytes in DCMD message frame
185 */
186#define MFI_MBOX_SIZE 12
187
188enum MR_EVT_CLASS {
189
190 MR_EVT_CLASS_DEBUG = -2,
191 MR_EVT_CLASS_PROGRESS = -1,
192 MR_EVT_CLASS_INFO = 0,
193 MR_EVT_CLASS_WARNING = 1,
194 MR_EVT_CLASS_CRITICAL = 2,
195 MR_EVT_CLASS_FATAL = 3,
196 MR_EVT_CLASS_DEAD = 4,
197
198};
199
200enum MR_EVT_LOCALE {
201
202 MR_EVT_LOCALE_LD = 0x0001,
203 MR_EVT_LOCALE_PD = 0x0002,
204 MR_EVT_LOCALE_ENCL = 0x0004,
205 MR_EVT_LOCALE_BBU = 0x0008,
206 MR_EVT_LOCALE_SAS = 0x0010,
207 MR_EVT_LOCALE_CTRL = 0x0020,
208 MR_EVT_LOCALE_CONFIG = 0x0040,
209 MR_EVT_LOCALE_CLUSTER = 0x0080,
210 MR_EVT_LOCALE_ALL = 0xffff,
211
212};
213
214enum MR_EVT_ARGS {
215
216 MR_EVT_ARGS_NONE,
217 MR_EVT_ARGS_CDB_SENSE,
218 MR_EVT_ARGS_LD,
219 MR_EVT_ARGS_LD_COUNT,
220 MR_EVT_ARGS_LD_LBA,
221 MR_EVT_ARGS_LD_OWNER,
222 MR_EVT_ARGS_LD_LBA_PD_LBA,
223 MR_EVT_ARGS_LD_PROG,
224 MR_EVT_ARGS_LD_STATE,
225 MR_EVT_ARGS_LD_STRIP,
226 MR_EVT_ARGS_PD,
227 MR_EVT_ARGS_PD_ERR,
228 MR_EVT_ARGS_PD_LBA,
229 MR_EVT_ARGS_PD_LBA_LD,
230 MR_EVT_ARGS_PD_PROG,
231 MR_EVT_ARGS_PD_STATE,
232 MR_EVT_ARGS_PCI,
233 MR_EVT_ARGS_RATE,
234 MR_EVT_ARGS_STR,
235 MR_EVT_ARGS_TIME,
236 MR_EVT_ARGS_ECC,
237
238};
239
240/*
241 * SAS controller properties
242 */
243struct megasas_ctrl_prop {
244
245 u16 seq_num;
246 u16 pred_fail_poll_interval;
247 u16 intr_throttle_count;
248 u16 intr_throttle_timeouts;
249 u8 rebuild_rate;
250 u8 patrol_read_rate;
251 u8 bgi_rate;
252 u8 cc_rate;
253 u8 recon_rate;
254 u8 cache_flush_interval;
255 u8 spinup_drv_count;
256 u8 spinup_delay;
257 u8 cluster_enable;
258 u8 coercion_mode;
259 u8 alarm_enable;
260 u8 disable_auto_rebuild;
261 u8 disable_battery_warn;
262 u8 ecc_bucket_size;
263 u16 ecc_bucket_leak_rate;
264 u8 restore_hotspare_on_insertion;
265 u8 expose_encl_devices;
266 u8 reserved[38];
267
268} __attribute__ ((packed));
269
270/*
271 * SAS controller information
272 */
273struct megasas_ctrl_info {
274
275 /*
276 * PCI device information
277 */
278 struct {
279
280 u16 vendor_id;
281 u16 device_id;
282 u16 sub_vendor_id;
283 u16 sub_device_id;
284 u8 reserved[24];
285
286 } __attribute__ ((packed)) pci;
287
288 /*
289 * Host interface information
290 */
291 struct {
292
293 u8 PCIX:1;
294 u8 PCIE:1;
295 u8 iSCSI:1;
296 u8 SAS_3G:1;
297 u8 reserved_0:4;
298 u8 reserved_1[6];
299 u8 port_count;
300 u64 port_addr[8];
301
302 } __attribute__ ((packed)) host_interface;
303
304 /*
305 * Device (backend) interface information
306 */
307 struct {
308
309 u8 SPI:1;
310 u8 SAS_3G:1;
311 u8 SATA_1_5G:1;
312 u8 SATA_3G:1;
313 u8 reserved_0:4;
314 u8 reserved_1[6];
315 u8 port_count;
316 u64 port_addr[8];
317
318 } __attribute__ ((packed)) device_interface;
319
320 /*
321 * List of components residing in flash. All str are null terminated
322 */
323 u32 image_check_word;
324 u32 image_component_count;
325
326 struct {
327
328 char name[8];
329 char version[32];
330 char build_date[16];
331 char built_time[16];
332
333 } __attribute__ ((packed)) image_component[8];
334
335 /*
336 * List of flash components that have been flashed on the card, but
337 * are not in use, pending reset of the adapter. This list will be
338 * empty if a flash operation has not occurred. All stings are null
339 * terminated
340 */
341 u32 pending_image_component_count;
342
343 struct {
344
345 char name[8];
346 char version[32];
347 char build_date[16];
348 char build_time[16];
349
350 } __attribute__ ((packed)) pending_image_component[8];
351
352 u8 max_arms;
353 u8 max_spans;
354 u8 max_arrays;
355 u8 max_lds;
356
357 char product_name[80];
358 char serial_no[32];
359
360 /*
361 * Other physical/controller/operation information. Indicates the
362 * presence of the hardware
363 */
364 struct {
365
366 u32 bbu:1;
367 u32 alarm:1;
368 u32 nvram:1;
369 u32 uart:1;
370 u32 reserved:28;
371
372 } __attribute__ ((packed)) hw_present;
373
374 u32 current_fw_time;
375
376 /*
377 * Maximum data transfer sizes
378 */
379 u16 max_concurrent_cmds;
380 u16 max_sge_count;
381 u32 max_request_size;
382
383 /*
384 * Logical and physical device counts
385 */
386 u16 ld_present_count;
387 u16 ld_degraded_count;
388 u16 ld_offline_count;
389
390 u16 pd_present_count;
391 u16 pd_disk_present_count;
392 u16 pd_disk_pred_failure_count;
393 u16 pd_disk_failed_count;
394
395 /*
396 * Memory size information
397 */
398 u16 nvram_size;
399 u16 memory_size;
400 u16 flash_size;
401
402 /*
403 * Error counters
404 */
405 u16 mem_correctable_error_count;
406 u16 mem_uncorrectable_error_count;
407
408 /*
409 * Cluster information
410 */
411 u8 cluster_permitted;
412 u8 cluster_active;
413
414 /*
415 * Additional max data transfer sizes
416 */
417 u16 max_strips_per_io;
418
419 /*
420 * Controller capabilities structures
421 */
422 struct {
423
424 u32 raid_level_0:1;
425 u32 raid_level_1:1;
426 u32 raid_level_5:1;
427 u32 raid_level_1E:1;
428 u32 raid_level_6:1;
429 u32 reserved:27;
430
431 } __attribute__ ((packed)) raid_levels;
432
433 struct {
434
435 u32 rbld_rate:1;
436 u32 cc_rate:1;
437 u32 bgi_rate:1;
438 u32 recon_rate:1;
439 u32 patrol_rate:1;
440 u32 alarm_control:1;
441 u32 cluster_supported:1;
442 u32 bbu:1;
443 u32 spanning_allowed:1;
444 u32 dedicated_hotspares:1;
445 u32 revertible_hotspares:1;
446 u32 foreign_config_import:1;
447 u32 self_diagnostic:1;
448 u32 mixed_redundancy_arr:1;
449 u32 global_hot_spares:1;
450 u32 reserved:17;
451
452 } __attribute__ ((packed)) adapter_operations;
453
454 struct {
455
456 u32 read_policy:1;
457 u32 write_policy:1;
458 u32 io_policy:1;
459 u32 access_policy:1;
460 u32 disk_cache_policy:1;
461 u32 reserved:27;
462
463 } __attribute__ ((packed)) ld_operations;
464
465 struct {
466
467 u8 min;
468 u8 max;
469 u8 reserved[2];
470
471 } __attribute__ ((packed)) stripe_sz_ops;
472
473 struct {
474
475 u32 force_online:1;
476 u32 force_offline:1;
477 u32 force_rebuild:1;
478 u32 reserved:29;
479
480 } __attribute__ ((packed)) pd_operations;
481
482 struct {
483
484 u32 ctrl_supports_sas:1;
485 u32 ctrl_supports_sata:1;
486 u32 allow_mix_in_encl:1;
487 u32 allow_mix_in_ld:1;
488 u32 allow_sata_in_cluster:1;
489 u32 reserved:27;
490
491 } __attribute__ ((packed)) pd_mix_support;
492
493 /*
494 * Define ECC single-bit-error bucket information
495 */
496 u8 ecc_bucket_count;
497 u8 reserved_2[11];
498
499 /*
500 * Include the controller properties (changeable items)
501 */
502 struct megasas_ctrl_prop properties;
503
504 /*
505 * Define FW pkg version (set in envt v'bles on OEM basis)
506 */
507 char package_version[0x60];
508
509 u8 pad[0x800 - 0x6a0];
510
511} __attribute__ ((packed));
512
513/*
514 * ===============================
515 * MegaRAID SAS driver definitions
516 * ===============================
517 */
518#define MEGASAS_MAX_PD_CHANNELS 2
519#define MEGASAS_MAX_LD_CHANNELS 2
520#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
521 MEGASAS_MAX_LD_CHANNELS)
522#define MEGASAS_MAX_DEV_PER_CHANNEL 128
523#define MEGASAS_DEFAULT_INIT_ID -1
524#define MEGASAS_MAX_LUN 8
525#define MEGASAS_MAX_LD 64
526
527/*
528 * When SCSI mid-layer calls driver's reset routine, driver waits for
529 * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note
530 * that the driver cannot _actually_ abort or reset pending commands. While
531 * it is waiting for the commands to complete, it prints a diagnostic message
532 * every MEGASAS_RESET_NOTICE_INTERVAL seconds
533 */
534#define MEGASAS_RESET_WAIT_TIME 180
535#define MEGASAS_RESET_NOTICE_INTERVAL 5
536
537#define MEGASAS_IOCTL_CMD 0
538
539/*
540 * FW reports the maximum of number of commands that it can accept (maximum
541 * commands that can be outstanding) at any time. The driver must report a
542 * lower number to the mid layer because it can issue a few internal commands
543 * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs
544 * is shown below
545 */
546#define MEGASAS_INT_CMDS 32
547
548/*
549 * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
550 * SGLs based on the size of dma_addr_t
551 */
552#define IS_DMA64 (sizeof(dma_addr_t) == 8)
553
554#define MFI_OB_INTR_STATUS_MASK 0x00000002
555#define MFI_POLL_TIMEOUT_SECS 10
556
557struct megasas_register_set {
558
559 u32 reserved_0[4]; /*0000h */
560
561 u32 inbound_msg_0; /*0010h */
562 u32 inbound_msg_1; /*0014h */
563 u32 outbound_msg_0; /*0018h */
564 u32 outbound_msg_1; /*001Ch */
565
566 u32 inbound_doorbell; /*0020h */
567 u32 inbound_intr_status; /*0024h */
568 u32 inbound_intr_mask; /*0028h */
569
570 u32 outbound_doorbell; /*002Ch */
571 u32 outbound_intr_status; /*0030h */
572 u32 outbound_intr_mask; /*0034h */
573
574 u32 reserved_1[2]; /*0038h */
575
576 u32 inbound_queue_port; /*0040h */
577 u32 outbound_queue_port; /*0044h */
578
579 u32 reserved_2; /*004Ch */
580
581 u32 index_registers[1004]; /*0050h */
582
583} __attribute__ ((packed));
584
585struct megasas_sge32 {
586
587 u32 phys_addr;
588 u32 length;
589
590} __attribute__ ((packed));
591
592struct megasas_sge64 {
593
594 u64 phys_addr;
595 u32 length;
596
597} __attribute__ ((packed));
598
599union megasas_sgl {
600
601 struct megasas_sge32 sge32[1];
602 struct megasas_sge64 sge64[1];
603
604} __attribute__ ((packed));
605
606struct megasas_header {
607
608 u8 cmd; /*00h */
609 u8 sense_len; /*01h */
610 u8 cmd_status; /*02h */
611 u8 scsi_status; /*03h */
612
613 u8 target_id; /*04h */
614 u8 lun; /*05h */
615 u8 cdb_len; /*06h */
616 u8 sge_count; /*07h */
617
618 u32 context; /*08h */
619 u32 pad_0; /*0Ch */
620
621 u16 flags; /*10h */
622 u16 timeout; /*12h */
623 u32 data_xferlen; /*14h */
624
625} __attribute__ ((packed));
626
627union megasas_sgl_frame {
628
629 struct megasas_sge32 sge32[8];
630 struct megasas_sge64 sge64[5];
631
632} __attribute__ ((packed));
633
634struct megasas_init_frame {
635
636 u8 cmd; /*00h */
637 u8 reserved_0; /*01h */
638 u8 cmd_status; /*02h */
639
640 u8 reserved_1; /*03h */
641 u32 reserved_2; /*04h */
642
643 u32 context; /*08h */
644 u32 pad_0; /*0Ch */
645
646 u16 flags; /*10h */
647 u16 reserved_3; /*12h */
648 u32 data_xfer_len; /*14h */
649
650 u32 queue_info_new_phys_addr_lo; /*18h */
651 u32 queue_info_new_phys_addr_hi; /*1Ch */
652 u32 queue_info_old_phys_addr_lo; /*20h */
653 u32 queue_info_old_phys_addr_hi; /*24h */
654
655 u32 reserved_4[6]; /*28h */
656
657} __attribute__ ((packed));
658
659struct megasas_init_queue_info {
660
661 u32 init_flags; /*00h */
662 u32 reply_queue_entries; /*04h */
663
664 u32 reply_queue_start_phys_addr_lo; /*08h */
665 u32 reply_queue_start_phys_addr_hi; /*0Ch */
666 u32 producer_index_phys_addr_lo; /*10h */
667 u32 producer_index_phys_addr_hi; /*14h */
668 u32 consumer_index_phys_addr_lo; /*18h */
669 u32 consumer_index_phys_addr_hi; /*1Ch */
670
671} __attribute__ ((packed));
672
673struct megasas_io_frame {
674
675 u8 cmd; /*00h */
676 u8 sense_len; /*01h */
677 u8 cmd_status; /*02h */
678 u8 scsi_status; /*03h */
679
680 u8 target_id; /*04h */
681 u8 access_byte; /*05h */
682 u8 reserved_0; /*06h */
683 u8 sge_count; /*07h */
684
685 u32 context; /*08h */
686 u32 pad_0; /*0Ch */
687
688 u16 flags; /*10h */
689 u16 timeout; /*12h */
690 u32 lba_count; /*14h */
691
692 u32 sense_buf_phys_addr_lo; /*18h */
693 u32 sense_buf_phys_addr_hi; /*1Ch */
694
695 u32 start_lba_lo; /*20h */
696 u32 start_lba_hi; /*24h */
697
698 union megasas_sgl sgl; /*28h */
699
700} __attribute__ ((packed));
701
702struct megasas_pthru_frame {
703
704 u8 cmd; /*00h */
705 u8 sense_len; /*01h */
706 u8 cmd_status; /*02h */
707 u8 scsi_status; /*03h */
708
709 u8 target_id; /*04h */
710 u8 lun; /*05h */
711 u8 cdb_len; /*06h */
712 u8 sge_count; /*07h */
713
714 u32 context; /*08h */
715 u32 pad_0; /*0Ch */
716
717 u16 flags; /*10h */
718 u16 timeout; /*12h */
719 u32 data_xfer_len; /*14h */
720
721 u32 sense_buf_phys_addr_lo; /*18h */
722 u32 sense_buf_phys_addr_hi; /*1Ch */
723
724 u8 cdb[16]; /*20h */
725 union megasas_sgl sgl; /*30h */
726
727} __attribute__ ((packed));
728
729struct megasas_dcmd_frame {
730
731 u8 cmd; /*00h */
732 u8 reserved_0; /*01h */
733 u8 cmd_status; /*02h */
734 u8 reserved_1[4]; /*03h */
735 u8 sge_count; /*07h */
736
737 u32 context; /*08h */
738 u32 pad_0; /*0Ch */
739
740 u16 flags; /*10h */
741 u16 timeout; /*12h */
742
743 u32 data_xfer_len; /*14h */
744 u32 opcode; /*18h */
745
746 union { /*1Ch */
747 u8 b[12];
748 u16 s[6];
749 u32 w[3];
750 } mbox;
751
752 union megasas_sgl sgl; /*28h */
753
754} __attribute__ ((packed));
755
756struct megasas_abort_frame {
757
758 u8 cmd; /*00h */
759 u8 reserved_0; /*01h */
760 u8 cmd_status; /*02h */
761
762 u8 reserved_1; /*03h */
763 u32 reserved_2; /*04h */
764
765 u32 context; /*08h */
766 u32 pad_0; /*0Ch */
767
768 u16 flags; /*10h */
769 u16 reserved_3; /*12h */
770 u32 reserved_4; /*14h */
771
772 u32 abort_context; /*18h */
773 u32 pad_1; /*1Ch */
774
775 u32 abort_mfi_phys_addr_lo; /*20h */
776 u32 abort_mfi_phys_addr_hi; /*24h */
777
778 u32 reserved_5[6]; /*28h */
779
780} __attribute__ ((packed));
781
782struct megasas_smp_frame {
783
784 u8 cmd; /*00h */
785 u8 reserved_1; /*01h */
786 u8 cmd_status; /*02h */
787 u8 connection_status; /*03h */
788
789 u8 reserved_2[3]; /*04h */
790 u8 sge_count; /*07h */
791
792 u32 context; /*08h */
793 u32 pad_0; /*0Ch */
794
795 u16 flags; /*10h */
796 u16 timeout; /*12h */
797
798 u32 data_xfer_len; /*14h */
799 u64 sas_addr; /*18h */
800
801 union {
802 struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */
803 struct megasas_sge64 sge64[2]; /* [0]: resp [1]: req */
804 } sgl;
805
806} __attribute__ ((packed));
807
808struct megasas_stp_frame {
809
810 u8 cmd; /*00h */
811 u8 reserved_1; /*01h */
812 u8 cmd_status; /*02h */
813 u8 reserved_2; /*03h */
814
815 u8 target_id; /*04h */
816 u8 reserved_3[2]; /*05h */
817 u8 sge_count; /*07h */
818
819 u32 context; /*08h */
820 u32 pad_0; /*0Ch */
821
822 u16 flags; /*10h */
823 u16 timeout; /*12h */
824
825 u32 data_xfer_len; /*14h */
826
827 u16 fis[10]; /*18h */
828 u32 stp_flags;
829
830 union {
831 struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */
832 struct megasas_sge64 sge64[2]; /* [0]: resp [1]: data */
833 } sgl;
834
835} __attribute__ ((packed));
836
837union megasas_frame {
838
839 struct megasas_header hdr;
840 struct megasas_init_frame init;
841 struct megasas_io_frame io;
842 struct megasas_pthru_frame pthru;
843 struct megasas_dcmd_frame dcmd;
844 struct megasas_abort_frame abort;
845 struct megasas_smp_frame smp;
846 struct megasas_stp_frame stp;
847
848 u8 raw_bytes[64];
849};
850
851struct megasas_cmd;
852
853union megasas_evt_class_locale {
854
855 struct {
856 u16 locale;
857 u8 reserved;
858 s8 class;
859 } __attribute__ ((packed)) members;
860
861 u32 word;
862
863} __attribute__ ((packed));
864
865struct megasas_evt_log_info {
866 u32 newest_seq_num;
867 u32 oldest_seq_num;
868 u32 clear_seq_num;
869 u32 shutdown_seq_num;
870 u32 boot_seq_num;
871
872} __attribute__ ((packed));
873
874struct megasas_progress {
875
876 u16 progress;
877 u16 elapsed_seconds;
878
879} __attribute__ ((packed));
880
881struct megasas_evtarg_ld {
882
883 u16 target_id;
884 u8 ld_index;
885 u8 reserved;
886
887} __attribute__ ((packed));
888
889struct megasas_evtarg_pd {
890 u16 device_id;
891 u8 encl_index;
892 u8 slot_number;
893
894} __attribute__ ((packed));
895
896struct megasas_evt_detail {
897
898 u32 seq_num;
899 u32 time_stamp;
900 u32 code;
901 union megasas_evt_class_locale cl;
902 u8 arg_type;
903 u8 reserved1[15];
904
905 union {
906 struct {
907 struct megasas_evtarg_pd pd;
908 u8 cdb_length;
909 u8 sense_length;
910 u8 reserved[2];
911 u8 cdb[16];
912 u8 sense[64];
913 } __attribute__ ((packed)) cdbSense;
914
915 struct megasas_evtarg_ld ld;
916
917 struct {
918 struct megasas_evtarg_ld ld;
919 u64 count;
920 } __attribute__ ((packed)) ld_count;
921
922 struct {
923 u64 lba;
924 struct megasas_evtarg_ld ld;
925 } __attribute__ ((packed)) ld_lba;
926
927 struct {
928 struct megasas_evtarg_ld ld;
929 u32 prevOwner;
930 u32 newOwner;
931 } __attribute__ ((packed)) ld_owner;
932
933 struct {
934 u64 ld_lba;
935 u64 pd_lba;
936 struct megasas_evtarg_ld ld;
937 struct megasas_evtarg_pd pd;
938 } __attribute__ ((packed)) ld_lba_pd_lba;
939
940 struct {
941 struct megasas_evtarg_ld ld;
942 struct megasas_progress prog;
943 } __attribute__ ((packed)) ld_prog;
944
945 struct {
946 struct megasas_evtarg_ld ld;
947 u32 prev_state;
948 u32 new_state;
949 } __attribute__ ((packed)) ld_state;
950
951 struct {
952 u64 strip;
953 struct megasas_evtarg_ld ld;
954 } __attribute__ ((packed)) ld_strip;
955
956 struct megasas_evtarg_pd pd;
957
958 struct {
959 struct megasas_evtarg_pd pd;
960 u32 err;
961 } __attribute__ ((packed)) pd_err;
962
963 struct {
964 u64 lba;
965 struct megasas_evtarg_pd pd;
966 } __attribute__ ((packed)) pd_lba;
967
968 struct {
969 u64 lba;
970 struct megasas_evtarg_pd pd;
971 struct megasas_evtarg_ld ld;
972 } __attribute__ ((packed)) pd_lba_ld;
973
974 struct {
975 struct megasas_evtarg_pd pd;
976 struct megasas_progress prog;
977 } __attribute__ ((packed)) pd_prog;
978
979 struct {
980 struct megasas_evtarg_pd pd;
981 u32 prevState;
982 u32 newState;
983 } __attribute__ ((packed)) pd_state;
984
985 struct {
986 u16 vendorId;
987 u16 deviceId;
988 u16 subVendorId;
989 u16 subDeviceId;
990 } __attribute__ ((packed)) pci;
991
992 u32 rate;
993 char str[96];
994
995 struct {
996 u32 rtc;
997 u32 elapsedSeconds;
998 } __attribute__ ((packed)) time;
999
1000 struct {
1001 u32 ecar;
1002 u32 elog;
1003 char str[64];
1004 } __attribute__ ((packed)) ecc;
1005
1006 u8 b[96];
1007 u16 s[48];
1008 u32 w[24];
1009 u64 d[12];
1010 } args;
1011
1012 char description[128];
1013
1014} __attribute__ ((packed));
1015
1016struct megasas_instance {
1017
1018 u32 *producer;
1019 dma_addr_t producer_h;
1020 u32 *consumer;
1021 dma_addr_t consumer_h;
1022
1023 u32 *reply_queue;
1024 dma_addr_t reply_queue_h;
1025
1026 unsigned long base_addr;
1027 struct megasas_register_set __iomem *reg_set;
1028
1029 s8 init_id;
1030 u8 reserved[3];
1031
1032 u16 max_num_sge;
1033 u16 max_fw_cmds;
1034 u32 max_sectors_per_req;
1035
1036 struct megasas_cmd **cmd_list;
1037 struct list_head cmd_pool;
1038 spinlock_t cmd_pool_lock;
1039 struct dma_pool *frame_dma_pool;
1040 struct dma_pool *sense_dma_pool;
1041
1042 struct megasas_evt_detail *evt_detail;
1043 dma_addr_t evt_detail_h;
1044 struct megasas_cmd *aen_cmd;
1045 struct semaphore aen_mutex;
1046 struct semaphore ioctl_sem;
1047
1048 struct Scsi_Host *host;
1049
1050 wait_queue_head_t int_cmd_wait_q;
1051 wait_queue_head_t abort_cmd_wait_q;
1052
1053 struct pci_dev *pdev;
1054 u32 unique_id;
1055
1056 u32 fw_outstanding;
1057 u32 hw_crit_error;
1058 spinlock_t instance_lock;
1059};
1060
1061#define MEGASAS_IS_LOGICAL(scp) \
1062 (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
1063
1064#define MEGASAS_DEV_INDEX(inst, scp) \
1065 ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \
1066 scp->device->id
1067
1068struct megasas_cmd {
1069
1070 union megasas_frame *frame;
1071 dma_addr_t frame_phys_addr;
1072 u8 *sense;
1073 dma_addr_t sense_phys_addr;
1074
1075 u32 index;
1076 u8 sync_cmd;
1077 u8 cmd_status;
1078 u16 abort_aen;
1079
1080 struct list_head list;
1081 struct scsi_cmnd *scmd;
1082 struct megasas_instance *instance;
1083 u32 frame_count;
1084};
1085
1086#define MAX_MGMT_ADAPTERS 1024
1087#define MAX_IOCTL_SGE 16
1088
1089struct megasas_iocpacket {
1090
1091 u16 host_no;
1092 u16 __pad1;
1093 u32 sgl_off;
1094 u32 sge_count;
1095 u32 sense_off;
1096 u32 sense_len;
1097 union {
1098 u8 raw[128];
1099 struct megasas_header hdr;
1100 } frame;
1101
1102 struct iovec sgl[MAX_IOCTL_SGE];
1103
1104} __attribute__ ((packed));
1105
1106struct megasas_aen {
1107 u16 host_no;
1108 u16 __pad1;
1109 u32 seq_num;
1110 u32 class_locale_word;
1111} __attribute__ ((packed));
1112
1113#ifdef CONFIG_COMPAT
1114struct compat_megasas_iocpacket {
1115 u16 host_no;
1116 u16 __pad1;
1117 u32 sgl_off;
1118 u32 sge_count;
1119 u32 sense_off;
1120 u32 sense_len;
1121 union {
1122 u8 raw[128];
1123 struct megasas_header hdr;
1124 } frame;
1125 struct compat_iovec sgl[MAX_IOCTL_SGE];
1126} __attribute__ ((packed));
1127
1128#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct compat_megasas_iocpacket)
1129#else
1130#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket)
1131#endif
1132
1133#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen)
1134
1135struct megasas_mgmt_info {
1136
1137 u16 count;
1138 struct megasas_instance *instance[MAX_MGMT_ADAPTERS];
1139 int max_index;
1140};
1141
1142#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c
index bdc3bc74bbe1..1eba98828636 100644
--- a/drivers/scsi/qla2xxx/qla_rscn.c
+++ b/drivers/scsi/qla2xxx/qla_rscn.c
@@ -330,6 +330,8 @@ qla2x00_update_login_fcport(scsi_qla_host_t *ha, struct mbx_entry *mbxstat,
330 fcport->flags &= ~FCF_FAILOVER_NEEDED; 330 fcport->flags &= ~FCF_FAILOVER_NEEDED;
331 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX; 331 fcport->iodesc_idx_sent = IODESC_INVALID_INDEX;
332 atomic_set(&fcport->state, FCS_ONLINE); 332 atomic_set(&fcport->state, FCS_ONLINE);
333 if (fcport->rport)
334 fc_remote_port_unblock(fcport->rport);
333} 335}
334 336
335 337
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index fcf9f6cbb142..327c5d7e5bd2 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -587,6 +587,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result,
587 if (sdev->scsi_level >= 2 || 587 if (sdev->scsi_level >= 2 ||
588 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) 588 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
589 sdev->scsi_level++; 589 sdev->scsi_level++;
590 sdev->sdev_target->scsi_level = sdev->scsi_level;
590 591
591 return 0; 592 return 0;
592} 593}
@@ -771,6 +772,15 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags)
771 return SCSI_SCAN_LUN_PRESENT; 772 return SCSI_SCAN_LUN_PRESENT;
772} 773}
773 774
775static inline void scsi_destroy_sdev(struct scsi_device *sdev)
776{
777 if (sdev->host->hostt->slave_destroy)
778 sdev->host->hostt->slave_destroy(sdev);
779 transport_destroy_device(&sdev->sdev_gendev);
780 put_device(&sdev->sdev_gendev);
781}
782
783
774/** 784/**
775 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it 785 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
776 * @starget: pointer to target device structure 786 * @starget: pointer to target device structure
@@ -803,9 +813,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
803 * The rescan flag is used as an optimization, the first scan of a 813 * The rescan flag is used as an optimization, the first scan of a
804 * host adapter calls into here with rescan == 0. 814 * host adapter calls into here with rescan == 0.
805 */ 815 */
806 if (rescan) { 816 sdev = scsi_device_lookup_by_target(starget, lun);
807 sdev = scsi_device_lookup_by_target(starget, lun); 817 if (sdev) {
808 if (sdev) { 818 if (rescan || sdev->sdev_state != SDEV_CREATED) {
809 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO 819 SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
810 "scsi scan: device exists on %s\n", 820 "scsi scan: device exists on %s\n",
811 sdev->sdev_gendev.bus_id)); 821 sdev->sdev_gendev.bus_id));
@@ -820,9 +830,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
820 sdev->model); 830 sdev->model);
821 return SCSI_SCAN_LUN_PRESENT; 831 return SCSI_SCAN_LUN_PRESENT;
822 } 832 }
823 } 833 scsi_device_put(sdev);
824 834 } else
825 sdev = scsi_alloc_sdev(starget, lun, hostdata); 835 sdev = scsi_alloc_sdev(starget, lun, hostdata);
826 if (!sdev) 836 if (!sdev)
827 goto out; 837 goto out;
828 838
@@ -877,12 +887,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
877 res = SCSI_SCAN_NO_RESPONSE; 887 res = SCSI_SCAN_NO_RESPONSE;
878 } 888 }
879 } 889 }
880 } else { 890 } else
881 if (sdev->host->hostt->slave_destroy) 891 scsi_destroy_sdev(sdev);
882 sdev->host->hostt->slave_destroy(sdev);
883 transport_destroy_device(&sdev->sdev_gendev);
884 put_device(&sdev->sdev_gendev);
885 }
886 out: 892 out:
887 return res; 893 return res;
888} 894}
@@ -1054,7 +1060,7 @@ EXPORT_SYMBOL(int_to_scsilun);
1054 * 0: scan completed (or no memory, so further scanning is futile) 1060 * 0: scan completed (or no memory, so further scanning is futile)
1055 * 1: no report lun scan, or not configured 1061 * 1: no report lun scan, or not configured
1056 **/ 1062 **/
1057static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags, 1063static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1058 int rescan) 1064 int rescan)
1059{ 1065{
1060 char devname[64]; 1066 char devname[64];
@@ -1067,7 +1073,8 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1067 struct scsi_lun *lunp, *lun_data; 1073 struct scsi_lun *lunp, *lun_data;
1068 u8 *data; 1074 u8 *data;
1069 struct scsi_sense_hdr sshdr; 1075 struct scsi_sense_hdr sshdr;
1070 struct scsi_target *starget = scsi_target(sdev); 1076 struct scsi_device *sdev;
1077 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1071 1078
1072 /* 1079 /*
1073 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. 1080 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
@@ -1075,15 +1082,23 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1075 * support more than 8 LUNs. 1082 * support more than 8 LUNs.
1076 */ 1083 */
1077 if ((bflags & BLIST_NOREPORTLUN) || 1084 if ((bflags & BLIST_NOREPORTLUN) ||
1078 sdev->scsi_level < SCSI_2 || 1085 starget->scsi_level < SCSI_2 ||
1079 (sdev->scsi_level < SCSI_3 && 1086 (starget->scsi_level < SCSI_3 &&
1080 (!(bflags & BLIST_REPORTLUN2) || sdev->host->max_lun <= 8)) ) 1087 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) )
1081 return 1; 1088 return 1;
1082 if (bflags & BLIST_NOLUN) 1089 if (bflags & BLIST_NOLUN)
1083 return 0; 1090 return 0;
1084 1091
1092 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1093 sdev = scsi_alloc_sdev(starget, 0, NULL);
1094 if (!sdev)
1095 return 0;
1096 if (scsi_device_get(sdev))
1097 return 0;
1098 }
1099
1085 sprintf(devname, "host %d channel %d id %d", 1100 sprintf(devname, "host %d channel %d id %d",
1086 sdev->host->host_no, sdev->channel, sdev->id); 1101 shost->host_no, sdev->channel, sdev->id);
1087 1102
1088 /* 1103 /*
1089 * Allocate enough to hold the header (the same size as one scsi_lun) 1104 * Allocate enough to hold the header (the same size as one scsi_lun)
@@ -1098,8 +1113,10 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1098 length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun); 1113 length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun);
1099 lun_data = kmalloc(length, GFP_ATOMIC | 1114 lun_data = kmalloc(length, GFP_ATOMIC |
1100 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); 1115 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1101 if (!lun_data) 1116 if (!lun_data) {
1117 printk(ALLOC_FAILURE_MSG, __FUNCTION__);
1102 goto out; 1118 goto out;
1119 }
1103 1120
1104 scsi_cmd[0] = REPORT_LUNS; 1121 scsi_cmd[0] = REPORT_LUNS;
1105 1122
@@ -1201,10 +1218,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1201 for (i = 0; i < sizeof(struct scsi_lun); i++) 1218 for (i = 0; i < sizeof(struct scsi_lun); i++)
1202 printk("%02x", data[i]); 1219 printk("%02x", data[i]);
1203 printk(" has a LUN larger than currently supported.\n"); 1220 printk(" has a LUN larger than currently supported.\n");
1204 } else if (lun == 0) {
1205 /*
1206 * LUN 0 has already been scanned.
1207 */
1208 } else if (lun > sdev->host->max_lun) { 1221 } else if (lun > sdev->host->max_lun) {
1209 printk(KERN_WARNING "scsi: %s lun%d has a LUN larger" 1222 printk(KERN_WARNING "scsi: %s lun%d has a LUN larger"
1210 " than allowed by the host adapter\n", 1223 " than allowed by the host adapter\n",
@@ -1227,13 +1240,13 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags,
1227 } 1240 }
1228 1241
1229 kfree(lun_data); 1242 kfree(lun_data);
1230 return 0;
1231
1232 out: 1243 out:
1233 /* 1244 scsi_device_put(sdev);
1234 * We are out of memory, don't try scanning any further. 1245 if (sdev->sdev_state == SDEV_CREATED)
1235 */ 1246 /*
1236 printk(ALLOC_FAILURE_MSG, __FUNCTION__); 1247 * the sdev we used didn't appear in the report luns scan
1248 */
1249 scsi_destroy_sdev(sdev);
1237 return 0; 1250 return 0;
1238} 1251}
1239 1252
@@ -1299,7 +1312,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1299 struct Scsi_Host *shost = dev_to_shost(parent); 1312 struct Scsi_Host *shost = dev_to_shost(parent);
1300 int bflags = 0; 1313 int bflags = 0;
1301 int res; 1314 int res;
1302 struct scsi_device *sdev = NULL;
1303 struct scsi_target *starget; 1315 struct scsi_target *starget;
1304 1316
1305 if (shost->this_id == id) 1317 if (shost->this_id == id)
@@ -1325,27 +1337,16 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
1325 * Scan LUN 0, if there is some response, scan further. Ideally, we 1337 * Scan LUN 0, if there is some response, scan further. Ideally, we
1326 * would not configure LUN 0 until all LUNs are scanned. 1338 * would not configure LUN 0 until all LUNs are scanned.
1327 */ 1339 */
1328 res = scsi_probe_and_add_lun(starget, 0, &bflags, &sdev, rescan, NULL); 1340 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1329 if (res == SCSI_SCAN_LUN_PRESENT) { 1341 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1330 if (scsi_report_lun_scan(sdev, bflags, rescan) != 0) 1342 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1331 /* 1343 /*
1332 * The REPORT LUN did not scan the target, 1344 * The REPORT LUN did not scan the target,
1333 * do a sequential scan. 1345 * do a sequential scan.
1334 */ 1346 */
1335 scsi_sequential_lun_scan(starget, bflags, 1347 scsi_sequential_lun_scan(starget, bflags,
1336 res, sdev->scsi_level, rescan); 1348 res, starget->scsi_level, rescan);
1337 } else if (res == SCSI_SCAN_TARGET_PRESENT) {
1338 /*
1339 * There's a target here, but lun 0 is offline so we
1340 * can't use the report_lun scan. Fall back to a
1341 * sequential lun scan with a bflags of SPARSELUN and
1342 * a default scsi level of SCSI_2
1343 */
1344 scsi_sequential_lun_scan(starget, BLIST_SPARSELUN,
1345 SCSI_SCAN_TARGET_PRESENT, SCSI_2, rescan);
1346 } 1349 }
1347 if (sdev)
1348 scsi_device_put(sdev);
1349 1350
1350 out_reap: 1351 out_reap:
1351 /* now determine if the target has any children at all 1352 /* now determine if the target has any children at all
@@ -1542,10 +1543,7 @@ void scsi_free_host_dev(struct scsi_device *sdev)
1542{ 1543{
1543 BUG_ON(sdev->id != sdev->host->this_id); 1544 BUG_ON(sdev->id != sdev->host->this_id);
1544 1545
1545 if (sdev->host->hostt->slave_destroy) 1546 scsi_destroy_sdev(sdev);
1546 sdev->host->hostt->slave_destroy(sdev);
1547 transport_destroy_device(&sdev->sdev_gendev);
1548 put_device(&sdev->sdev_gendev);
1549} 1547}
1550EXPORT_SYMBOL(scsi_free_host_dev); 1548EXPORT_SYMBOL(scsi_free_host_dev);
1551 1549
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index ff724bbe6611..1d145d2f9a38 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -628,17 +628,16 @@ sas_rphy_delete(struct sas_rphy *rphy)
628 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); 628 struct Scsi_Host *shost = dev_to_shost(parent->dev.parent);
629 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); 629 struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
630 630
631 transport_destroy_device(&rphy->dev); 631 scsi_remove_target(dev);
632 632
633 scsi_remove_target(&rphy->dev); 633 transport_remove_device(dev);
634 device_del(dev);
635 transport_destroy_device(dev);
634 636
635 spin_lock(&sas_host->lock); 637 spin_lock(&sas_host->lock);
636 list_del(&rphy->list); 638 list_del(&rphy->list);
637 spin_unlock(&sas_host->lock); 639 spin_unlock(&sas_host->lock);
638 640
639 transport_remove_device(dev);
640 device_del(dev);
641 transport_destroy_device(dev);
642 put_device(&parent->dev); 641 put_device(&parent->dev);
643} 642}
644EXPORT_SYMBOL(sas_rphy_delete); 643EXPORT_SYMBOL(sas_rphy_delete);
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 5959e6755a81..656c0e8d160e 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -518,11 +518,7 @@ static void sunsu_change_mouse_baud(struct uart_sunsu_port *up)
518 518
519 quot = up->port.uartclk / (16 * new_baud); 519 quot = up->port.uartclk / (16 * new_baud);
520 520
521 spin_unlock(&up->port.lock);
522
523 sunsu_change_speed(&up->port, up->cflag, 0, quot); 521 sunsu_change_speed(&up->port, up->cflag, 0, quot);
524
525 spin_lock(&up->port.lock);
526} 522}
527 523
528static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break) 524static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break)
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index e240c335eb23..5af928fa0449 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -108,7 +108,7 @@ static int bfs_create(struct inode * dir, struct dentry * dentry, int mode,
108 inode->i_mapping->a_ops = &bfs_aops; 108 inode->i_mapping->a_ops = &bfs_aops;
109 inode->i_mode = mode; 109 inode->i_mode = mode;
110 inode->i_ino = ino; 110 inode->i_ino = ino;
111 BFS_I(inode)->i_dsk_ino = cpu_to_le16(ino); 111 BFS_I(inode)->i_dsk_ino = ino;
112 BFS_I(inode)->i_sblock = 0; 112 BFS_I(inode)->i_sblock = 0;
113 BFS_I(inode)->i_eblock = 0; 113 BFS_I(inode)->i_eblock = 0;
114 insert_inode_hash(inode); 114 insert_inode_hash(inode);
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index c7b39aa279d7..3af6c73c5b5a 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -357,28 +357,46 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
357 } 357 }
358 358
359 info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */ 359 info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */
360 info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - cpu_to_le32(bfs_sb->s_start))>>BFS_BSIZE_BITS; 360 info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - le32_to_cpu(bfs_sb->s_start))>>BFS_BSIZE_BITS;
361 info->si_freei = 0; 361 info->si_freei = 0;
362 info->si_lf_eblk = 0; 362 info->si_lf_eblk = 0;
363 info->si_lf_sblk = 0; 363 info->si_lf_sblk = 0;
364 info->si_lf_ioff = 0; 364 info->si_lf_ioff = 0;
365 bh = NULL;
365 for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) { 366 for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) {
366 inode = iget(s,i); 367 struct bfs_inode *di;
367 if (BFS_I(inode)->i_dsk_ino == 0) 368 int block = (i - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
369 int off = (i - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
370 unsigned long sblock, eblock;
371
372 if (!off) {
373 brelse(bh);
374 bh = sb_bread(s, block);
375 }
376
377 if (!bh)
378 continue;
379
380 di = (struct bfs_inode *)bh->b_data + off;
381
382 if (!di->i_ino) {
368 info->si_freei++; 383 info->si_freei++;
369 else { 384 continue;
370 set_bit(i, info->si_imap); 385 }
371 info->si_freeb -= inode->i_blocks; 386 set_bit(i, info->si_imap);
372 if (BFS_I(inode)->i_eblock > info->si_lf_eblk) { 387 info->si_freeb -= BFS_FILEBLOCKS(di);
373 info->si_lf_eblk = BFS_I(inode)->i_eblock; 388
374 info->si_lf_sblk = BFS_I(inode)->i_sblock; 389 sblock = le32_to_cpu(di->i_sblock);
375 info->si_lf_ioff = BFS_INO2OFF(i); 390 eblock = le32_to_cpu(di->i_eblock);
376 } 391 if (eblock > info->si_lf_eblk) {
392 info->si_lf_eblk = eblock;
393 info->si_lf_sblk = sblock;
394 info->si_lf_ioff = BFS_INO2OFF(i);
377 } 395 }
378 iput(inode);
379 } 396 }
397 brelse(bh);
380 if (!(s->s_flags & MS_RDONLY)) { 398 if (!(s->s_flags & MS_RDONLY)) {
381 mark_buffer_dirty(bh); 399 mark_buffer_dirty(info->si_sbh);
382 s->s_dirt = 1; 400 s->s_dirt = 1;
383 } 401 }
384 dump_imap("read_super", s); 402 dump_imap("read_super", s);
diff --git a/fs/namei.c b/fs/namei.c
index 043d587216b5..aa62dbda93ac 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1551,19 +1551,19 @@ do_link:
1551 if (nd->last_type != LAST_NORM) 1551 if (nd->last_type != LAST_NORM)
1552 goto exit; 1552 goto exit;
1553 if (nd->last.name[nd->last.len]) { 1553 if (nd->last.name[nd->last.len]) {
1554 putname(nd->last.name); 1554 __putname(nd->last.name);
1555 goto exit; 1555 goto exit;
1556 } 1556 }
1557 error = -ELOOP; 1557 error = -ELOOP;
1558 if (count++==32) { 1558 if (count++==32) {
1559 putname(nd->last.name); 1559 __putname(nd->last.name);
1560 goto exit; 1560 goto exit;
1561 } 1561 }
1562 dir = nd->dentry; 1562 dir = nd->dentry;
1563 down(&dir->d_inode->i_sem); 1563 down(&dir->d_inode->i_sem);
1564 path.dentry = __lookup_hash(&nd->last, nd->dentry, nd); 1564 path.dentry = __lookup_hash(&nd->last, nd->dentry, nd);
1565 path.mnt = nd->mnt; 1565 path.mnt = nd->mnt;
1566 putname(nd->last.name); 1566 __putname(nd->last.name);
1567 goto do_last; 1567 goto do_last;
1568} 1568}
1569 1569
diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog
index 83f3322765cd..de58579a1d0e 100644
--- a/fs/ntfs/ChangeLog
+++ b/fs/ntfs/ChangeLog
@@ -102,6 +102,9 @@ ToDo/Notes:
102 inode instead of a vfs inode as parameter. 102 inode instead of a vfs inode as parameter.
103 - Fix the definition of the CHKD ntfs record magic. It had an off by 103 - Fix the definition of the CHKD ntfs record magic. It had an off by
104 two error causing it to be CHKB instead of CHKD. 104 two error causing it to be CHKB instead of CHKD.
105 - Fix a stupid bug in __ntfs_bitmap_set_bits_in_run() which caused the
106 count to become negative and hence we had a wild memset() scribbling
107 all over the system's ram.
105 108
1062.1.23 - Implement extension of resident files and make writing safe as well as 1092.1.23 - Implement extension of resident files and make writing safe as well as
107 many bug fixes, cleanups, and enhancements... 110 many bug fixes, cleanups, and enhancements...
diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c
index 12cf2e30c7dd..7a190cdc60e2 100644
--- a/fs/ntfs/bitmap.c
+++ b/fs/ntfs/bitmap.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project. 2 * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project.
3 * 3 *
4 * Copyright (c) 2004 Anton Altaparmakov 4 * Copyright (c) 2004-2005 Anton Altaparmakov
5 * 5 *
6 * This program/include file is free software; you can redistribute it and/or 6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published 7 * modify it under the terms of the GNU General Public License as published
@@ -90,7 +90,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
90 /* If the first byte is partial, modify the appropriate bits in it. */ 90 /* If the first byte is partial, modify the appropriate bits in it. */
91 if (bit) { 91 if (bit) {
92 u8 *byte = kaddr + pos; 92 u8 *byte = kaddr + pos;
93 while ((bit & 7) && cnt--) { 93 while ((bit & 7) && cnt) {
94 cnt--;
94 if (value) 95 if (value)
95 *byte |= 1 << bit++; 96 *byte |= 1 << bit++;
96 else 97 else
diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h
index 01f2dfa39cec..5c248d404f05 100644
--- a/fs/ntfs/layout.h
+++ b/fs/ntfs/layout.h
@@ -309,7 +309,7 @@ typedef le16 MFT_RECORD_FLAGS;
309 * Note: The _LE versions will return a CPU endian formatted value! 309 * Note: The _LE versions will return a CPU endian formatted value!
310 */ 310 */
311#define MFT_REF_MASK_CPU 0x0000ffffffffffffULL 311#define MFT_REF_MASK_CPU 0x0000ffffffffffffULL
312#define MFT_REF_MASK_LE const_cpu_to_le64(0x0000ffffffffffffULL) 312#define MFT_REF_MASK_LE const_cpu_to_le64(MFT_REF_MASK_CPU)
313 313
314typedef u64 MFT_REF; 314typedef u64 MFT_REF;
315typedef le64 leMFT_REF; 315typedef le64 leMFT_REF;
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 247586d1d5dc..b011369b5956 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -58,7 +58,8 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
58 * overflowing the unsigned long, but I don't think we would ever get 58 * overflowing the unsigned long, but I don't think we would ever get
59 * here if the volume was that big... 59 * here if the volume was that big...
60 */ 60 */
61 index = ni->mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; 61 index = (u64)ni->mft_no << vol->mft_record_size_bits >>
62 PAGE_CACHE_SHIFT;
62 ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; 63 ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
63 64
64 i_size = i_size_read(mft_vi); 65 i_size = i_size_read(mft_vi);
diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c
index a389a5a16c84..0ea887fc859c 100644
--- a/fs/ntfs/unistr.c
+++ b/fs/ntfs/unistr.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. 2 * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project.
3 * 3 *
4 * Copyright (c) 2001-2004 Anton Altaparmakov 4 * Copyright (c) 2001-2005 Anton Altaparmakov
5 * 5 *
6 * This program/include file is free software; you can redistribute it and/or 6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published 7 * modify it under the terms of the GNU General Public License as published
diff --git a/include/asm-arm/arch-h720x/system.h b/include/asm-arm/arch-h720x/system.h
index 0b025e227ec2..09eda84592ff 100644
--- a/include/asm-arm/arch-h720x/system.h
+++ b/include/asm-arm/arch-h720x/system.h
@@ -17,9 +17,11 @@
17static void arch_idle(void) 17static void arch_idle(void)
18{ 18{
19 CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_IDLE; 19 CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_IDLE;
20 __asm__ __volatile__( 20 nop();
21 "mov r0, r0\n\t" 21 nop();
22 "mov r0, r0"); 22 CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_RUN;
23 nop();
24 nop();
23} 25}
24 26
25 27
diff --git a/include/asm-arm/arch-imx/imx-regs.h b/include/asm-arm/arch-imx/imx-regs.h
index 93b840e8fa60..229f7008d74f 100644
--- a/include/asm-arm/arch-imx/imx-regs.h
+++ b/include/asm-arm/arch-imx/imx-regs.h
@@ -76,6 +76,7 @@
76#define GPIO_PIN_MASK 0x1f 76#define GPIO_PIN_MASK 0x1f
77#define GPIO_PORT_MASK (0x3 << 5) 77#define GPIO_PORT_MASK (0x3 << 5)
78 78
79#define GPIO_PORT_SHIFT 5
79#define GPIO_PORTA (0<<5) 80#define GPIO_PORTA (0<<5)
80#define GPIO_PORTB (1<<5) 81#define GPIO_PORTB (1<<5)
81#define GPIO_PORTC (2<<5) 82#define GPIO_PORTC (2<<5)
@@ -88,24 +89,37 @@
88#define GPIO_PF (0<<9) 89#define GPIO_PF (0<<9)
89#define GPIO_AF (1<<9) 90#define GPIO_AF (1<<9)
90 91
92#define GPIO_OCR_SHIFT 10
91#define GPIO_OCR_MASK (3<<10) 93#define GPIO_OCR_MASK (3<<10)
92#define GPIO_AIN (0<<10) 94#define GPIO_AIN (0<<10)
93#define GPIO_BIN (1<<10) 95#define GPIO_BIN (1<<10)
94#define GPIO_CIN (2<<10) 96#define GPIO_CIN (2<<10)
95#define GPIO_GPIO (3<<10) 97#define GPIO_DR (3<<10)
96 98
97#define GPIO_AOUT (1<<12) 99#define GPIO_AOUT_SHIFT 12
98#define GPIO_BOUT (1<<13) 100#define GPIO_AOUT_MASK (3<<12)
101#define GPIO_AOUT (0<<12)
102#define GPIO_AOUT_ISR (1<<12)
103#define GPIO_AOUT_0 (2<<12)
104#define GPIO_AOUT_1 (3<<12)
105
106#define GPIO_BOUT_SHIFT 14
107#define GPIO_BOUT_MASK (3<<14)
108#define GPIO_BOUT (0<<14)
109#define GPIO_BOUT_ISR (1<<14)
110#define GPIO_BOUT_0 (2<<14)
111#define GPIO_BOUT_1 (3<<14)
112
113#define GPIO_GIUS (1<<16)
99 114
100/* assignements for GPIO alternate/primary functions */ 115/* assignements for GPIO alternate/primary functions */
101 116
102/* FIXME: This list is not completed. The correct directions are 117/* FIXME: This list is not completed. The correct directions are
103 * missing on some (many) pins 118 * missing on some (many) pins
104 */ 119 */
105#define PA0_PF_A24 ( GPIO_PORTA | GPIO_PF | 0 ) 120#define PA0_AIN_SPI2_CLK ( GPIO_GIUS | GPIO_PORTA | GPIO_OUT | 0 )
106#define PA0_AIN_SPI2_CLK ( GPIO_PORTA | GPIO_OUT | GPIO_AIN | 0 )
107#define PA0_AF_ETMTRACESYNC ( GPIO_PORTA | GPIO_AF | 0 ) 121#define PA0_AF_ETMTRACESYNC ( GPIO_PORTA | GPIO_AF | 0 )
108#define PA1_AOUT_SPI2_RXD ( GPIO_PORTA | GPIO_IN | GPIO_AOUT | 1 ) 122#define PA1_AOUT_SPI2_RXD ( GPIO_GIUS | GPIO_PORTA | GPIO_IN | 1 )
109#define PA1_PF_TIN ( GPIO_PORTA | GPIO_PF | 1 ) 123#define PA1_PF_TIN ( GPIO_PORTA | GPIO_PF | 1 )
110#define PA2_PF_PWM0 ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 2 ) 124#define PA2_PF_PWM0 ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 2 )
111#define PA3_PF_CSI_MCLK ( GPIO_PORTA | GPIO_PF | 3 ) 125#define PA3_PF_CSI_MCLK ( GPIO_PORTA | GPIO_PF | 3 )
@@ -123,7 +137,7 @@
123#define PA15_PF_I2C_SDA ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 15 ) 137#define PA15_PF_I2C_SDA ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 15 )
124#define PA16_PF_I2C_SCL ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 16 ) 138#define PA16_PF_I2C_SCL ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 16 )
125#define PA17_AF_ETMTRACEPKT4 ( GPIO_PORTA | GPIO_AF | 17 ) 139#define PA17_AF_ETMTRACEPKT4 ( GPIO_PORTA | GPIO_AF | 17 )
126#define PA17_AIN_SPI2_SS ( GPIO_PORTA | GPIO_AIN | 17 ) 140#define PA17_AIN_SPI2_SS ( GPIO_GIUS | GPIO_PORTA | GPIO_OUT | 17 )
127#define PA18_AF_ETMTRACEPKT5 ( GPIO_PORTA | GPIO_AF | 18 ) 141#define PA18_AF_ETMTRACEPKT5 ( GPIO_PORTA | GPIO_AF | 18 )
128#define PA19_AF_ETMTRACEPKT6 ( GPIO_PORTA | GPIO_AF | 19 ) 142#define PA19_AF_ETMTRACEPKT6 ( GPIO_PORTA | GPIO_AF | 19 )
129#define PA20_AF_ETMTRACEPKT7 ( GPIO_PORTA | GPIO_AF | 20 ) 143#define PA20_AF_ETMTRACEPKT7 ( GPIO_PORTA | GPIO_AF | 20 )
@@ -191,19 +205,27 @@
191#define PC15_PF_SPI1_SS ( GPIO_PORTC | GPIO_PF | 15 ) 205#define PC15_PF_SPI1_SS ( GPIO_PORTC | GPIO_PF | 15 )
192#define PC16_PF_SPI1_MISO ( GPIO_PORTC | GPIO_PF | 16 ) 206#define PC16_PF_SPI1_MISO ( GPIO_PORTC | GPIO_PF | 16 )
193#define PC17_PF_SPI1_MOSI ( GPIO_PORTC | GPIO_PF | 17 ) 207#define PC17_PF_SPI1_MOSI ( GPIO_PORTC | GPIO_PF | 17 )
208#define PC24_BIN_UART3_RI ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 24 )
209#define PC25_BIN_UART3_DSR ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 25 )
210#define PC26_AOUT_UART3_DTR ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 26 )
211#define PC27_BIN_UART3_DCD ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 27 )
212#define PC28_BIN_UART3_CTS ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 28 )
213#define PC29_AOUT_UART3_RTS ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 29 )
214#define PC30_BIN_UART3_TX ( GPIO_GIUS | GPIO_PORTC | GPIO_BIN | 30 )
215#define PC31_AOUT_UART3_RX ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 31)
194#define PD6_PF_LSCLK ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 6 ) 216#define PD6_PF_LSCLK ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 6 )
195#define PD7_PF_REV ( GPIO_PORTD | GPIO_PF | 7 ) 217#define PD7_PF_REV ( GPIO_PORTD | GPIO_PF | 7 )
196#define PD7_AF_UART2_DTR ( GPIO_PORTD | GPIO_IN | GPIO_AF | 7 ) 218#define PD7_AF_UART2_DTR ( GPIO_PORTD | GPIO_IN | GPIO_AF | 7 )
197#define PD7_AIN_SPI2_SCLK ( GPIO_PORTD | GPIO_AIN | 7 ) 219#define PD7_AIN_SPI2_SCLK ( GPIO_GIUS | GPIO_PORTD | GPIO_AIN | 7 )
198#define PD8_PF_CLS ( GPIO_PORTD | GPIO_PF | 8 ) 220#define PD8_PF_CLS ( GPIO_PORTD | GPIO_PF | 8 )
199#define PD8_AF_UART2_DCD ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 8 ) 221#define PD8_AF_UART2_DCD ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 8 )
200#define PD8_AIN_SPI2_SS ( GPIO_PORTD | GPIO_AIN | 8 ) 222#define PD8_AIN_SPI2_SS ( GPIO_GIUS | GPIO_PORTD | GPIO_AIN | 8 )
201#define PD9_PF_PS ( GPIO_PORTD | GPIO_PF | 9 ) 223#define PD9_PF_PS ( GPIO_PORTD | GPIO_PF | 9 )
202#define PD9_AF_UART2_RI ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 9 ) 224#define PD9_AF_UART2_RI ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 9 )
203#define PD9_AOUT_SPI2_RXD ( GPIO_PORTD | GPIO_IN | GPIO_AOUT | 9 ) 225#define PD9_AOUT_SPI2_RXD ( GPIO_GIUS | GPIO_PORTD | GPIO_IN | 9 )
204#define PD10_PF_SPL_SPR ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 10 ) 226#define PD10_PF_SPL_SPR ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 10 )
205#define PD10_AF_UART2_DSR ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 10 ) 227#define PD10_AF_UART2_DSR ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 10 )
206#define PD10_AIN_SPI2_TXD ( GPIO_PORTD | GPIO_OUT | GPIO_AIN | 10 ) 228#define PD10_AIN_SPI2_TXD ( GPIO_GIUS | GPIO_PORTD | GPIO_OUT | 10 )
207#define PD11_PF_CONTRAST ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 11 ) 229#define PD11_PF_CONTRAST ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 11 )
208#define PD12_PF_ACD_OE ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 12 ) 230#define PD12_PF_ACD_OE ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 12 )
209#define PD13_PF_LP_HSYNC ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 13 ) 231#define PD13_PF_LP_HSYNC ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 13 )
@@ -225,7 +247,7 @@
225#define PD29_PF_LD14 ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 29 ) 247#define PD29_PF_LD14 ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 29 )
226#define PD30_PF_LD15 ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 30 ) 248#define PD30_PF_LD15 ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 30 )
227#define PD31_PF_TMR2OUT ( GPIO_PORTD | GPIO_PF | 31 ) 249#define PD31_PF_TMR2OUT ( GPIO_PORTD | GPIO_PF | 31 )
228#define PD31_BIN_SPI2_TXD ( GPIO_PORTD | GPIO_BIN | 31 ) 250#define PD31_BIN_SPI2_TXD ( GPIO_GIUS | GPIO_PORTD | GPIO_BIN | 31 )
229 251
230/* 252/*
231 * PWM controller 253 * PWM controller
diff --git a/include/asm-arm/arch-ixp4xx/platform.h b/include/asm-arm/arch-ixp4xx/platform.h
index d13ee7f78c70..f14ed63590c3 100644
--- a/include/asm-arm/arch-ixp4xx/platform.h
+++ b/include/asm-arm/arch-ixp4xx/platform.h
@@ -93,7 +93,7 @@ extern struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys);
93 93
94static inline void gpio_line_config(u8 line, u32 direction) 94static inline void gpio_line_config(u8 line, u32 direction)
95{ 95{
96 if (direction == IXP4XX_GPIO_OUT) 96 if (direction == IXP4XX_GPIO_IN)
97 *IXP4XX_GPIO_GPOER |= (1 << line); 97 *IXP4XX_GPIO_GPOER |= (1 << line);
98 else 98 else
99 *IXP4XX_GPIO_GPOER &= ~(1 << line); 99 *IXP4XX_GPIO_GPOER &= ~(1 << line);
diff --git a/include/asm-sparc/btfixup.h b/include/asm-sparc/btfixup.h
index b84c96c89581..c2868d0f60b6 100644
--- a/include/asm-sparc/btfixup.h
+++ b/include/asm-sparc/btfixup.h
@@ -49,17 +49,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
49/* Put bottom 13bits into some register variable */ 49/* Put bottom 13bits into some register variable */
50 50
51#define BTFIXUPDEF_SIMM13(__name) \ 51#define BTFIXUPDEF_SIMM13(__name) \
52 extern unsigned int ___sf_##__name(void) __attribute_const__; \ 52 static inline unsigned int ___sf_##__name(void) __attribute_const__; \
53 extern unsigned ___ss_##__name[2]; \ 53 extern unsigned ___ss_##__name[2]; \
54 extern __inline__ unsigned int ___sf_##__name(void) { \ 54 static inline unsigned int ___sf_##__name(void) { \
55 unsigned int ret; \ 55 unsigned int ret; \
56 __asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret)); \ 56 __asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret)); \
57 return ret; \ 57 return ret; \
58 } 58 }
59#define BTFIXUPDEF_SIMM13_INIT(__name,__val) \ 59#define BTFIXUPDEF_SIMM13_INIT(__name,__val) \
60 extern unsigned int ___sf_##__name(void) __attribute_const__; \ 60 static inline unsigned int ___sf_##__name(void) __attribute_const__; \
61 extern unsigned ___ss_##__name[2]; \ 61 extern unsigned ___ss_##__name[2]; \
62 extern __inline__ unsigned int ___sf_##__name(void) { \ 62 static inline unsigned int ___sf_##__name(void) { \
63 unsigned int ret; \ 63 unsigned int ret; \
64 __asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ 64 __asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
65 return ret; \ 65 return ret; \
@@ -71,17 +71,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
71 */ 71 */
72 72
73#define BTFIXUPDEF_HALF(__name) \ 73#define BTFIXUPDEF_HALF(__name) \
74 extern unsigned int ___af_##__name(void) __attribute_const__; \ 74 static inline unsigned int ___af_##__name(void) __attribute_const__; \
75 extern unsigned ___as_##__name[2]; \ 75 extern unsigned ___as_##__name[2]; \
76 extern __inline__ unsigned int ___af_##__name(void) { \ 76 static inline unsigned int ___af_##__name(void) { \
77 unsigned int ret; \ 77 unsigned int ret; \
78 __asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret)); \ 78 __asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret)); \
79 return ret; \ 79 return ret; \
80 } 80 }
81#define BTFIXUPDEF_HALF_INIT(__name,__val) \ 81#define BTFIXUPDEF_HALF_INIT(__name,__val) \
82 extern unsigned int ___af_##__name(void) __attribute_const__; \ 82 static inline unsigned int ___af_##__name(void) __attribute_const__; \
83 extern unsigned ___as_##__name[2]; \ 83 extern unsigned ___as_##__name[2]; \
84 extern __inline__ unsigned int ___af_##__name(void) { \ 84 static inline unsigned int ___af_##__name(void) { \
85 unsigned int ret; \ 85 unsigned int ret; \
86 __asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ 86 __asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
87 return ret; \ 87 return ret; \
@@ -90,17 +90,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
90/* Put upper 22 bits into some register variable */ 90/* Put upper 22 bits into some register variable */
91 91
92#define BTFIXUPDEF_SETHI(__name) \ 92#define BTFIXUPDEF_SETHI(__name) \
93 extern unsigned int ___hf_##__name(void) __attribute_const__; \ 93 static inline unsigned int ___hf_##__name(void) __attribute_const__; \
94 extern unsigned ___hs_##__name[2]; \ 94 extern unsigned ___hs_##__name[2]; \
95 extern __inline__ unsigned int ___hf_##__name(void) { \ 95 static inline unsigned int ___hf_##__name(void) { \
96 unsigned int ret; \ 96 unsigned int ret; \
97 __asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret)); \ 97 __asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret)); \
98 return ret; \ 98 return ret; \
99 } 99 }
100#define BTFIXUPDEF_SETHI_INIT(__name,__val) \ 100#define BTFIXUPDEF_SETHI_INIT(__name,__val) \
101 extern unsigned int ___hf_##__name(void) __attribute_const__; \ 101 static inline unsigned int ___hf_##__name(void) __attribute_const__; \
102 extern unsigned ___hs_##__name[2]; \ 102 extern unsigned ___hs_##__name[2]; \
103 extern __inline__ unsigned int ___hf_##__name(void) { \ 103 static inline unsigned int ___hf_##__name(void) { \
104 unsigned int ret; \ 104 unsigned int ret; \
105 __asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : \ 105 __asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : \
106 "=r"(ret)); \ 106 "=r"(ret)); \
diff --git a/include/asm-sparc/cache.h b/include/asm-sparc/cache.h
index e6316fd7e1a4..a10522cb21b7 100644
--- a/include/asm-sparc/cache.h
+++ b/include/asm-sparc/cache.h
@@ -27,7 +27,7 @@
27 */ 27 */
28 28
29/* First, cache-tag access. */ 29/* First, cache-tag access. */
30extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum) 30static inline unsigned int get_icache_tag(int setnum, int tagnum)
31{ 31{
32 unsigned int vaddr, retval; 32 unsigned int vaddr, retval;
33 33
@@ -38,7 +38,7 @@ extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum)
38 return retval; 38 return retval;
39} 39}
40 40
41extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry) 41static inline void put_icache_tag(int setnum, int tagnum, unsigned int entry)
42{ 42{
43 unsigned int vaddr; 43 unsigned int vaddr;
44 44
@@ -51,7 +51,7 @@ extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry
51/* Second cache-data access. The data is returned two-32bit quantities 51/* Second cache-data access. The data is returned two-32bit quantities
52 * at a time. 52 * at a time.
53 */ 53 */
54extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock, 54static inline void get_icache_data(int setnum, int tagnum, int subblock,
55 unsigned int *data) 55 unsigned int *data)
56{ 56{
57 unsigned int value1, value2, vaddr; 57 unsigned int value1, value2, vaddr;
@@ -67,7 +67,7 @@ extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock,
67 data[0] = value1; data[1] = value2; 67 data[0] = value1; data[1] = value2;
68} 68}
69 69
70extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock, 70static inline void put_icache_data(int setnum, int tagnum, int subblock,
71 unsigned int *data) 71 unsigned int *data)
72{ 72{
73 unsigned int value1, value2, vaddr; 73 unsigned int value1, value2, vaddr;
@@ -92,35 +92,35 @@ extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock,
92 */ 92 */
93 93
94/* Flushes which clear out both the on-chip and external caches */ 94/* Flushes which clear out both the on-chip and external caches */
95extern __inline__ void flush_ei_page(unsigned int addr) 95static inline void flush_ei_page(unsigned int addr)
96{ 96{
97 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 97 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
98 "r" (addr), "i" (ASI_M_FLUSH_PAGE) : 98 "r" (addr), "i" (ASI_M_FLUSH_PAGE) :
99 "memory"); 99 "memory");
100} 100}
101 101
102extern __inline__ void flush_ei_seg(unsigned int addr) 102static inline void flush_ei_seg(unsigned int addr)
103{ 103{
104 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 104 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
105 "r" (addr), "i" (ASI_M_FLUSH_SEG) : 105 "r" (addr), "i" (ASI_M_FLUSH_SEG) :
106 "memory"); 106 "memory");
107} 107}
108 108
109extern __inline__ void flush_ei_region(unsigned int addr) 109static inline void flush_ei_region(unsigned int addr)
110{ 110{
111 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 111 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
112 "r" (addr), "i" (ASI_M_FLUSH_REGION) : 112 "r" (addr), "i" (ASI_M_FLUSH_REGION) :
113 "memory"); 113 "memory");
114} 114}
115 115
116extern __inline__ void flush_ei_ctx(unsigned int addr) 116static inline void flush_ei_ctx(unsigned int addr)
117{ 117{
118 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 118 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
119 "r" (addr), "i" (ASI_M_FLUSH_CTX) : 119 "r" (addr), "i" (ASI_M_FLUSH_CTX) :
120 "memory"); 120 "memory");
121} 121}
122 122
123extern __inline__ void flush_ei_user(unsigned int addr) 123static inline void flush_ei_user(unsigned int addr)
124{ 124{
125 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 125 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
126 "r" (addr), "i" (ASI_M_FLUSH_USER) : 126 "r" (addr), "i" (ASI_M_FLUSH_USER) :
diff --git a/include/asm-sparc/cypress.h b/include/asm-sparc/cypress.h
index fc92fc839c3f..99599533efbc 100644
--- a/include/asm-sparc/cypress.h
+++ b/include/asm-sparc/cypress.h
@@ -48,25 +48,25 @@
48#define CYPRESS_NFAULT 0x00000002 48#define CYPRESS_NFAULT 0x00000002
49#define CYPRESS_MENABLE 0x00000001 49#define CYPRESS_MENABLE 0x00000001
50 50
51extern __inline__ void cypress_flush_page(unsigned long page) 51static inline void cypress_flush_page(unsigned long page)
52{ 52{
53 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 53 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
54 "r" (page), "i" (ASI_M_FLUSH_PAGE)); 54 "r" (page), "i" (ASI_M_FLUSH_PAGE));
55} 55}
56 56
57extern __inline__ void cypress_flush_segment(unsigned long addr) 57static inline void cypress_flush_segment(unsigned long addr)
58{ 58{
59 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 59 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
60 "r" (addr), "i" (ASI_M_FLUSH_SEG)); 60 "r" (addr), "i" (ASI_M_FLUSH_SEG));
61} 61}
62 62
63extern __inline__ void cypress_flush_region(unsigned long addr) 63static inline void cypress_flush_region(unsigned long addr)
64{ 64{
65 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : 65 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
66 "r" (addr), "i" (ASI_M_FLUSH_REGION)); 66 "r" (addr), "i" (ASI_M_FLUSH_REGION));
67} 67}
68 68
69extern __inline__ void cypress_flush_context(void) 69static inline void cypress_flush_context(void)
70{ 70{
71 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : 71 __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
72 "i" (ASI_M_FLUSH_CTX)); 72 "i" (ASI_M_FLUSH_CTX));
diff --git a/include/asm-sparc/delay.h b/include/asm-sparc/delay.h
index 6edf2cbb246b..7ec8e9f7ad4f 100644
--- a/include/asm-sparc/delay.h
+++ b/include/asm-sparc/delay.h
@@ -10,7 +10,7 @@
10#include <linux/config.h> 10#include <linux/config.h>
11#include <asm/cpudata.h> 11#include <asm/cpudata.h>
12 12
13extern __inline__ void __delay(unsigned long loops) 13static inline void __delay(unsigned long loops)
14{ 14{
15 __asm__ __volatile__("cmp %0, 0\n\t" 15 __asm__ __volatile__("cmp %0, 0\n\t"
16 "1: bne 1b\n\t" 16 "1: bne 1b\n\t"
diff --git a/include/asm-sparc/dma.h b/include/asm-sparc/dma.h
index 07e6368a2521..8ec206aa5f2e 100644
--- a/include/asm-sparc/dma.h
+++ b/include/asm-sparc/dma.h
@@ -198,7 +198,7 @@ extern void dvma_init(struct sbus_bus *);
198/* Pause until counter runs out or BIT isn't set in the DMA condition 198/* Pause until counter runs out or BIT isn't set in the DMA condition
199 * register. 199 * register.
200 */ 200 */
201extern __inline__ void sparc_dma_pause(struct sparc_dma_registers *regs, 201static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
202 unsigned long bit) 202 unsigned long bit)
203{ 203{
204 int ctr = 50000; /* Let's find some bugs ;) */ 204 int ctr = 50000; /* Let's find some bugs ;) */
diff --git a/include/asm-sparc/iommu.h b/include/asm-sparc/iommu.h
index 8171362d56b9..70c589c05a10 100644
--- a/include/asm-sparc/iommu.h
+++ b/include/asm-sparc/iommu.h
@@ -108,12 +108,12 @@ struct iommu_struct {
108 struct bit_map usemap; 108 struct bit_map usemap;
109}; 109};
110 110
111extern __inline__ void iommu_invalidate(struct iommu_regs *regs) 111static inline void iommu_invalidate(struct iommu_regs *regs)
112{ 112{
113 regs->tlbflush = 0; 113 regs->tlbflush = 0;
114} 114}
115 115
116extern __inline__ void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba) 116static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
117{ 117{
118 regs->pageflush = (ba & PAGE_MASK); 118 regs->pageflush = (ba & PAGE_MASK);
119} 119}
diff --git a/include/asm-sparc/kdebug.h b/include/asm-sparc/kdebug.h
index 3ea4916635ee..fba92485fdba 100644
--- a/include/asm-sparc/kdebug.h
+++ b/include/asm-sparc/kdebug.h
@@ -46,7 +46,7 @@ struct kernel_debug {
46extern struct kernel_debug *linux_dbvec; 46extern struct kernel_debug *linux_dbvec;
47 47
48/* Use this macro in C-code to enter the debugger. */ 48/* Use this macro in C-code to enter the debugger. */
49extern __inline__ void sp_enter_debugger(void) 49static inline void sp_enter_debugger(void)
50{ 50{
51 __asm__ __volatile__("jmpl %0, %%o7\n\t" 51 __asm__ __volatile__("jmpl %0, %%o7\n\t"
52 "nop\n\t" : : 52 "nop\n\t" : :
diff --git a/include/asm-sparc/mbus.h b/include/asm-sparc/mbus.h
index 5f2749015342..ecacdf4075d7 100644
--- a/include/asm-sparc/mbus.h
+++ b/include/asm-sparc/mbus.h
@@ -83,7 +83,7 @@ extern unsigned int hwbug_bitmask;
83 */ 83 */
84#define TBR_ID_SHIFT 20 84#define TBR_ID_SHIFT 20
85 85
86extern __inline__ int get_cpuid(void) 86static inline int get_cpuid(void)
87{ 87{
88 register int retval; 88 register int retval;
89 __asm__ __volatile__("rd %%tbr, %0\n\t" 89 __asm__ __volatile__("rd %%tbr, %0\n\t"
@@ -93,7 +93,7 @@ extern __inline__ int get_cpuid(void)
93 return (retval & 3); 93 return (retval & 3);
94} 94}
95 95
96extern __inline__ int get_modid(void) 96static inline int get_modid(void)
97{ 97{
98 return (get_cpuid() | 0x8); 98 return (get_cpuid() | 0x8);
99} 99}
diff --git a/include/asm-sparc/msi.h b/include/asm-sparc/msi.h
index b69543dd3b46..ff72cbd946a4 100644
--- a/include/asm-sparc/msi.h
+++ b/include/asm-sparc/msi.h
@@ -19,7 +19,7 @@
19#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ 19#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
20 20
21 21
22extern __inline__ void msi_set_sync(void) 22static inline void msi_set_sync(void)
23{ 23{
24 __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" 24 __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
25 "andn %%g3, %2, %%g3\n\t" 25 "andn %%g3, %2, %%g3\n\t"
diff --git a/include/asm-sparc/mxcc.h b/include/asm-sparc/mxcc.h
index 60ef9d6fe7bc..128fe9708135 100644
--- a/include/asm-sparc/mxcc.h
+++ b/include/asm-sparc/mxcc.h
@@ -85,7 +85,7 @@
85 85
86#ifndef __ASSEMBLY__ 86#ifndef __ASSEMBLY__
87 87
88extern __inline__ void mxcc_set_stream_src(unsigned long *paddr) 88static inline void mxcc_set_stream_src(unsigned long *paddr)
89{ 89{
90 unsigned long data0 = paddr[0]; 90 unsigned long data0 = paddr[0];
91 unsigned long data1 = paddr[1]; 91 unsigned long data1 = paddr[1];
@@ -98,7 +98,7 @@ extern __inline__ void mxcc_set_stream_src(unsigned long *paddr)
98 "i" (ASI_M_MXCC) : "g2", "g3"); 98 "i" (ASI_M_MXCC) : "g2", "g3");
99} 99}
100 100
101extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr) 101static inline void mxcc_set_stream_dst(unsigned long *paddr)
102{ 102{
103 unsigned long data0 = paddr[0]; 103 unsigned long data0 = paddr[0];
104 unsigned long data1 = paddr[1]; 104 unsigned long data1 = paddr[1];
@@ -111,7 +111,7 @@ extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr)
111 "i" (ASI_M_MXCC) : "g2", "g3"); 111 "i" (ASI_M_MXCC) : "g2", "g3");
112} 112}
113 113
114extern __inline__ unsigned long mxcc_get_creg(void) 114static inline unsigned long mxcc_get_creg(void)
115{ 115{
116 unsigned long mxcc_control; 116 unsigned long mxcc_control;
117 117
@@ -125,7 +125,7 @@ extern __inline__ unsigned long mxcc_get_creg(void)
125 return mxcc_control; 125 return mxcc_control;
126} 126}
127 127
128extern __inline__ void mxcc_set_creg(unsigned long mxcc_control) 128static inline void mxcc_set_creg(unsigned long mxcc_control)
129{ 129{
130 __asm__ __volatile__("sta %0, [%1] %2\n\t" : : 130 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
131 "r" (mxcc_control), "r" (MXCC_CREG), 131 "r" (mxcc_control), "r" (MXCC_CREG),
diff --git a/include/asm-sparc/obio.h b/include/asm-sparc/obio.h
index 62e1d77965f3..47854a2a12cf 100644
--- a/include/asm-sparc/obio.h
+++ b/include/asm-sparc/obio.h
@@ -98,7 +98,7 @@
98 98
99#ifndef __ASSEMBLY__ 99#ifndef __ASSEMBLY__
100 100
101extern __inline__ int bw_get_intr_mask(int sbus_level) 101static inline int bw_get_intr_mask(int sbus_level)
102{ 102{
103 int mask; 103 int mask;
104 104
@@ -109,7 +109,7 @@ extern __inline__ int bw_get_intr_mask(int sbus_level)
109 return mask; 109 return mask;
110} 110}
111 111
112extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask) 112static inline void bw_clear_intr_mask(int sbus_level, int mask)
113{ 113{
114 __asm__ __volatile__ ("stha %0, [%1] %2" : : 114 __asm__ __volatile__ ("stha %0, [%1] %2" : :
115 "r" (mask), 115 "r" (mask),
@@ -117,7 +117,7 @@ extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask)
117 "i" (ASI_M_CTL)); 117 "i" (ASI_M_CTL));
118} 118}
119 119
120extern __inline__ unsigned bw_get_prof_limit(int cpu) 120static inline unsigned bw_get_prof_limit(int cpu)
121{ 121{
122 unsigned limit; 122 unsigned limit;
123 123
@@ -128,7 +128,7 @@ extern __inline__ unsigned bw_get_prof_limit(int cpu)
128 return limit; 128 return limit;
129} 129}
130 130
131extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit) 131static inline void bw_set_prof_limit(int cpu, unsigned limit)
132{ 132{
133 __asm__ __volatile__ ("sta %0, [%1] %2" : : 133 __asm__ __volatile__ ("sta %0, [%1] %2" : :
134 "r" (limit), 134 "r" (limit),
@@ -136,7 +136,7 @@ extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit)
136 "i" (ASI_M_CTL)); 136 "i" (ASI_M_CTL));
137} 137}
138 138
139extern __inline__ unsigned bw_get_ctrl(int cpu) 139static inline unsigned bw_get_ctrl(int cpu)
140{ 140{
141 unsigned ctrl; 141 unsigned ctrl;
142 142
@@ -147,7 +147,7 @@ extern __inline__ unsigned bw_get_ctrl(int cpu)
147 return ctrl; 147 return ctrl;
148} 148}
149 149
150extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl) 150static inline void bw_set_ctrl(int cpu, unsigned ctrl)
151{ 151{
152 __asm__ __volatile__ ("sta %0, [%1] %2" : : 152 __asm__ __volatile__ ("sta %0, [%1] %2" : :
153 "r" (ctrl), 153 "r" (ctrl),
@@ -157,7 +157,7 @@ extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl)
157 157
158extern unsigned char cpu_leds[32]; 158extern unsigned char cpu_leds[32];
159 159
160extern __inline__ void show_leds(int cpuid) 160static inline void show_leds(int cpuid)
161{ 161{
162 cpuid &= 0x1e; 162 cpuid &= 0x1e;
163 __asm__ __volatile__ ("stba %0, [%1] %2" : : 163 __asm__ __volatile__ ("stba %0, [%1] %2" : :
@@ -166,7 +166,7 @@ extern __inline__ void show_leds(int cpuid)
166 "i" (ASI_M_CTL)); 166 "i" (ASI_M_CTL));
167} 167}
168 168
169extern __inline__ unsigned cc_get_ipen(void) 169static inline unsigned cc_get_ipen(void)
170{ 170{
171 unsigned pending; 171 unsigned pending;
172 172
@@ -177,7 +177,7 @@ extern __inline__ unsigned cc_get_ipen(void)
177 return pending; 177 return pending;
178} 178}
179 179
180extern __inline__ void cc_set_iclr(unsigned clear) 180static inline void cc_set_iclr(unsigned clear)
181{ 181{
182 __asm__ __volatile__ ("stha %0, [%1] %2" : : 182 __asm__ __volatile__ ("stha %0, [%1] %2" : :
183 "r" (clear), 183 "r" (clear),
@@ -185,7 +185,7 @@ extern __inline__ void cc_set_iclr(unsigned clear)
185 "i" (ASI_M_MXCC)); 185 "i" (ASI_M_MXCC));
186} 186}
187 187
188extern __inline__ unsigned cc_get_imsk(void) 188static inline unsigned cc_get_imsk(void)
189{ 189{
190 unsigned mask; 190 unsigned mask;
191 191
@@ -196,7 +196,7 @@ extern __inline__ unsigned cc_get_imsk(void)
196 return mask; 196 return mask;
197} 197}
198 198
199extern __inline__ void cc_set_imsk(unsigned mask) 199static inline void cc_set_imsk(unsigned mask)
200{ 200{
201 __asm__ __volatile__ ("stha %0, [%1] %2" : : 201 __asm__ __volatile__ ("stha %0, [%1] %2" : :
202 "r" (mask), 202 "r" (mask),
@@ -204,7 +204,7 @@ extern __inline__ void cc_set_imsk(unsigned mask)
204 "i" (ASI_M_MXCC)); 204 "i" (ASI_M_MXCC));
205} 205}
206 206
207extern __inline__ unsigned cc_get_imsk_other(int cpuid) 207static inline unsigned cc_get_imsk_other(int cpuid)
208{ 208{
209 unsigned mask; 209 unsigned mask;
210 210
@@ -215,7 +215,7 @@ extern __inline__ unsigned cc_get_imsk_other(int cpuid)
215 return mask; 215 return mask;
216} 216}
217 217
218extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask) 218static inline void cc_set_imsk_other(int cpuid, unsigned mask)
219{ 219{
220 __asm__ __volatile__ ("stha %0, [%1] %2" : : 220 __asm__ __volatile__ ("stha %0, [%1] %2" : :
221 "r" (mask), 221 "r" (mask),
@@ -223,7 +223,7 @@ extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask)
223 "i" (ASI_M_CTL)); 223 "i" (ASI_M_CTL));
224} 224}
225 225
226extern __inline__ void cc_set_igen(unsigned gen) 226static inline void cc_set_igen(unsigned gen)
227{ 227{
228 __asm__ __volatile__ ("sta %0, [%1] %2" : : 228 __asm__ __volatile__ ("sta %0, [%1] %2" : :
229 "r" (gen), 229 "r" (gen),
@@ -239,7 +239,7 @@ extern __inline__ void cc_set_igen(unsigned gen)
239#define IGEN_MESSAGE(bcast, devid, sid, levels) \ 239#define IGEN_MESSAGE(bcast, devid, sid, levels) \
240 (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels)) 240 (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
241 241
242extern __inline__ void sun4d_send_ipi(int cpu, int level) 242static inline void sun4d_send_ipi(int cpu, int level)
243{ 243{
244 cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1))); 244 cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
245} 245}
diff --git a/include/asm-sparc/pci.h b/include/asm-sparc/pci.h
index 97052baf90c1..38644742f011 100644
--- a/include/asm-sparc/pci.h
+++ b/include/asm-sparc/pci.h
@@ -15,12 +15,12 @@
15 15
16#define PCI_IRQ_NONE 0xffffffff 16#define PCI_IRQ_NONE 0xffffffff
17 17
18extern inline void pcibios_set_master(struct pci_dev *dev) 18static inline void pcibios_set_master(struct pci_dev *dev)
19{ 19{
20 /* No special bus mastering setup handling */ 20 /* No special bus mastering setup handling */
21} 21}
22 22
23extern inline void pcibios_penalize_isa_irq(int irq, int active) 23static inline void pcibios_penalize_isa_irq(int irq, int active)
24{ 24{
25 /* We don't do dynamic PCI IRQ allocation */ 25 /* We don't do dynamic PCI IRQ allocation */
26} 26}
@@ -137,7 +137,7 @@ extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist
137 * only drive the low 24-bits during PCI bus mastering, then 137 * only drive the low 24-bits during PCI bus mastering, then
138 * you would pass 0x00ffffff as the mask to this function. 138 * you would pass 0x00ffffff as the mask to this function.
139 */ 139 */
140extern inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) 140static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
141{ 141{
142 return 1; 142 return 1;
143} 143}
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 8395ad2f1c09..a14e98677500 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -154,7 +154,7 @@ BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
154BTFIXUPDEF_CALL(void, pte_clear, pte_t *) 154BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
155BTFIXUPDEF_CALL(int, pte_read, pte_t) 155BTFIXUPDEF_CALL(int, pte_read, pte_t)
156 156
157extern __inline__ int pte_none(pte_t pte) 157static inline int pte_none(pte_t pte)
158{ 158{
159 return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask)); 159 return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
160} 160}
@@ -167,7 +167,7 @@ BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
167BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t) 167BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
168BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *) 168BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
169 169
170extern __inline__ int pmd_none(pmd_t pmd) 170static inline int pmd_none(pmd_t pmd)
171{ 171{
172 return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask)); 172 return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
173} 173}
@@ -194,20 +194,20 @@ BTFIXUPDEF_HALF(pte_writei)
194BTFIXUPDEF_HALF(pte_dirtyi) 194BTFIXUPDEF_HALF(pte_dirtyi)
195BTFIXUPDEF_HALF(pte_youngi) 195BTFIXUPDEF_HALF(pte_youngi)
196 196
197extern int pte_write(pte_t pte) __attribute_const__; 197static int pte_write(pte_t pte) __attribute_const__;
198extern __inline__ int pte_write(pte_t pte) 198static inline int pte_write(pte_t pte)
199{ 199{
200 return pte_val(pte) & BTFIXUP_HALF(pte_writei); 200 return pte_val(pte) & BTFIXUP_HALF(pte_writei);
201} 201}
202 202
203extern int pte_dirty(pte_t pte) __attribute_const__; 203static int pte_dirty(pte_t pte) __attribute_const__;
204extern __inline__ int pte_dirty(pte_t pte) 204static inline int pte_dirty(pte_t pte)
205{ 205{
206 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi); 206 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
207} 207}
208 208
209extern int pte_young(pte_t pte) __attribute_const__; 209static int pte_young(pte_t pte) __attribute_const__;
210extern __inline__ int pte_young(pte_t pte) 210static inline int pte_young(pte_t pte)
211{ 211{
212 return pte_val(pte) & BTFIXUP_HALF(pte_youngi); 212 return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
213} 213}
@@ -217,8 +217,8 @@ extern __inline__ int pte_young(pte_t pte)
217 */ 217 */
218BTFIXUPDEF_HALF(pte_filei) 218BTFIXUPDEF_HALF(pte_filei)
219 219
220extern int pte_file(pte_t pte) __attribute_const__; 220static int pte_file(pte_t pte) __attribute_const__;
221extern __inline__ int pte_file(pte_t pte) 221static inline int pte_file(pte_t pte)
222{ 222{
223 return pte_val(pte) & BTFIXUP_HALF(pte_filei); 223 return pte_val(pte) & BTFIXUP_HALF(pte_filei);
224} 224}
@@ -229,20 +229,20 @@ BTFIXUPDEF_HALF(pte_wrprotecti)
229BTFIXUPDEF_HALF(pte_mkcleani) 229BTFIXUPDEF_HALF(pte_mkcleani)
230BTFIXUPDEF_HALF(pte_mkoldi) 230BTFIXUPDEF_HALF(pte_mkoldi)
231 231
232extern pte_t pte_wrprotect(pte_t pte) __attribute_const__; 232static pte_t pte_wrprotect(pte_t pte) __attribute_const__;
233extern __inline__ pte_t pte_wrprotect(pte_t pte) 233static inline pte_t pte_wrprotect(pte_t pte)
234{ 234{
235 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti)); 235 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
236} 236}
237 237
238extern pte_t pte_mkclean(pte_t pte) __attribute_const__; 238static pte_t pte_mkclean(pte_t pte) __attribute_const__;
239extern __inline__ pte_t pte_mkclean(pte_t pte) 239static inline pte_t pte_mkclean(pte_t pte)
240{ 240{
241 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani)); 241 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
242} 242}
243 243
244extern pte_t pte_mkold(pte_t pte) __attribute_const__; 244static pte_t pte_mkold(pte_t pte) __attribute_const__;
245extern __inline__ pte_t pte_mkold(pte_t pte) 245static inline pte_t pte_mkold(pte_t pte)
246{ 246{
247 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi)); 247 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
248} 248}
@@ -278,8 +278,8 @@ BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
278 278
279BTFIXUPDEF_INT(pte_modify_mask) 279BTFIXUPDEF_INT(pte_modify_mask)
280 280
281extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; 281static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
282extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot) 282static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
283{ 283{
284 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) | 284 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
285 pgprot_val(newprot)); 285 pgprot_val(newprot));
@@ -386,13 +386,13 @@ extern struct ctx_list ctx_used; /* Head of used contexts list */
386 386
387#define NO_CONTEXT -1 387#define NO_CONTEXT -1
388 388
389extern __inline__ void remove_from_ctx_list(struct ctx_list *entry) 389static inline void remove_from_ctx_list(struct ctx_list *entry)
390{ 390{
391 entry->next->prev = entry->prev; 391 entry->next->prev = entry->prev;
392 entry->prev->next = entry->next; 392 entry->prev->next = entry->next;
393} 393}
394 394
395extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) 395static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
396{ 396{
397 entry->next = head; 397 entry->next = head;
398 (entry->prev = head->prev)->next = entry; 398 (entry->prev = head->prev)->next = entry;
@@ -401,7 +401,7 @@ extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *e
401#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) 401#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
402#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) 402#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
403 403
404extern __inline__ unsigned long 404static inline unsigned long
405__get_phys (unsigned long addr) 405__get_phys (unsigned long addr)
406{ 406{
407 switch (sparc_cpu_model){ 407 switch (sparc_cpu_model){
@@ -416,7 +416,7 @@ __get_phys (unsigned long addr)
416 } 416 }
417} 417}
418 418
419extern __inline__ int 419static inline int
420__get_iospace (unsigned long addr) 420__get_iospace (unsigned long addr)
421{ 421{
422 switch (sparc_cpu_model){ 422 switch (sparc_cpu_model){
diff --git a/include/asm-sparc/pgtsrmmu.h b/include/asm-sparc/pgtsrmmu.h
index ee3b9d93187c..edeb9811e728 100644
--- a/include/asm-sparc/pgtsrmmu.h
+++ b/include/asm-sparc/pgtsrmmu.h
@@ -148,7 +148,7 @@ extern void *srmmu_nocache_pool;
148#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) 148#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
149 149
150/* Accessing the MMU control register. */ 150/* Accessing the MMU control register. */
151extern __inline__ unsigned int srmmu_get_mmureg(void) 151static inline unsigned int srmmu_get_mmureg(void)
152{ 152{
153 unsigned int retval; 153 unsigned int retval;
154 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" : 154 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
@@ -157,14 +157,14 @@ extern __inline__ unsigned int srmmu_get_mmureg(void)
157 return retval; 157 return retval;
158} 158}
159 159
160extern __inline__ void srmmu_set_mmureg(unsigned long regval) 160static inline void srmmu_set_mmureg(unsigned long regval)
161{ 161{
162 __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : : 162 __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
163 "r" (regval), "i" (ASI_M_MMUREGS) : "memory"); 163 "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
164 164
165} 165}
166 166
167extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) 167static inline void srmmu_set_ctable_ptr(unsigned long paddr)
168{ 168{
169 paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); 169 paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
170 __asm__ __volatile__("sta %0, [%1] %2\n\t" : : 170 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
@@ -173,7 +173,7 @@ extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
173 "memory"); 173 "memory");
174} 174}
175 175
176extern __inline__ unsigned long srmmu_get_ctable_ptr(void) 176static inline unsigned long srmmu_get_ctable_ptr(void)
177{ 177{
178 unsigned int retval; 178 unsigned int retval;
179 179
@@ -184,14 +184,14 @@ extern __inline__ unsigned long srmmu_get_ctable_ptr(void)
184 return (retval & SRMMU_CTX_PMASK) << 4; 184 return (retval & SRMMU_CTX_PMASK) << 4;
185} 185}
186 186
187extern __inline__ void srmmu_set_context(int context) 187static inline void srmmu_set_context(int context)
188{ 188{
189 __asm__ __volatile__("sta %0, [%1] %2\n\t" : : 189 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
190 "r" (context), "r" (SRMMU_CTX_REG), 190 "r" (context), "r" (SRMMU_CTX_REG),
191 "i" (ASI_M_MMUREGS) : "memory"); 191 "i" (ASI_M_MMUREGS) : "memory");
192} 192}
193 193
194extern __inline__ int srmmu_get_context(void) 194static inline int srmmu_get_context(void)
195{ 195{
196 register int retval; 196 register int retval;
197 __asm__ __volatile__("lda [%1] %2, %0\n\t" : 197 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
@@ -201,7 +201,7 @@ extern __inline__ int srmmu_get_context(void)
201 return retval; 201 return retval;
202} 202}
203 203
204extern __inline__ unsigned int srmmu_get_fstatus(void) 204static inline unsigned int srmmu_get_fstatus(void)
205{ 205{
206 unsigned int retval; 206 unsigned int retval;
207 207
@@ -211,7 +211,7 @@ extern __inline__ unsigned int srmmu_get_fstatus(void)
211 return retval; 211 return retval;
212} 212}
213 213
214extern __inline__ unsigned int srmmu_get_faddr(void) 214static inline unsigned int srmmu_get_faddr(void)
215{ 215{
216 unsigned int retval; 216 unsigned int retval;
217 217
@@ -222,7 +222,7 @@ extern __inline__ unsigned int srmmu_get_faddr(void)
222} 222}
223 223
224/* This is guaranteed on all SRMMU's. */ 224/* This is guaranteed on all SRMMU's. */
225extern __inline__ void srmmu_flush_whole_tlb(void) 225static inline void srmmu_flush_whole_tlb(void)
226{ 226{
227 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : 227 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
228 "r" (0x400), /* Flush entire TLB!! */ 228 "r" (0x400), /* Flush entire TLB!! */
@@ -231,7 +231,7 @@ extern __inline__ void srmmu_flush_whole_tlb(void)
231} 231}
232 232
233/* These flush types are not available on all chips... */ 233/* These flush types are not available on all chips... */
234extern __inline__ void srmmu_flush_tlb_ctx(void) 234static inline void srmmu_flush_tlb_ctx(void)
235{ 235{
236 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : 236 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
237 "r" (0x300), /* Flush TLB ctx.. */ 237 "r" (0x300), /* Flush TLB ctx.. */
@@ -239,7 +239,7 @@ extern __inline__ void srmmu_flush_tlb_ctx(void)
239 239
240} 240}
241 241
242extern __inline__ void srmmu_flush_tlb_region(unsigned long addr) 242static inline void srmmu_flush_tlb_region(unsigned long addr)
243{ 243{
244 addr &= SRMMU_PGDIR_MASK; 244 addr &= SRMMU_PGDIR_MASK;
245 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : 245 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
@@ -249,7 +249,7 @@ extern __inline__ void srmmu_flush_tlb_region(unsigned long addr)
249} 249}
250 250
251 251
252extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr) 252static inline void srmmu_flush_tlb_segment(unsigned long addr)
253{ 253{
254 addr &= SRMMU_REAL_PMD_MASK; 254 addr &= SRMMU_REAL_PMD_MASK;
255 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : 255 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
@@ -258,7 +258,7 @@ extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
258 258
259} 259}
260 260
261extern __inline__ void srmmu_flush_tlb_page(unsigned long page) 261static inline void srmmu_flush_tlb_page(unsigned long page)
262{ 262{
263 page &= PAGE_MASK; 263 page &= PAGE_MASK;
264 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : 264 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
@@ -267,7 +267,7 @@ extern __inline__ void srmmu_flush_tlb_page(unsigned long page)
267 267
268} 268}
269 269
270extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) 270static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
271{ 271{
272 unsigned long retval; 272 unsigned long retval;
273 273
@@ -279,7 +279,7 @@ extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
279 return retval; 279 return retval;
280} 280}
281 281
282extern __inline__ int 282static inline int
283srmmu_get_pte (unsigned long addr) 283srmmu_get_pte (unsigned long addr)
284{ 284{
285 register unsigned long entry; 285 register unsigned long entry;
diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h
index 5a7a1a8d29ac..6fbb3f0af8d8 100644
--- a/include/asm-sparc/processor.h
+++ b/include/asm-sparc/processor.h
@@ -79,7 +79,7 @@ struct thread_struct {
79extern unsigned long thread_saved_pc(struct task_struct *t); 79extern unsigned long thread_saved_pc(struct task_struct *t);
80 80
81/* Do necessary setup to start up a newly executed thread. */ 81/* Do necessary setup to start up a newly executed thread. */
82extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc, 82static inline void start_thread(struct pt_regs * regs, unsigned long pc,
83 unsigned long sp) 83 unsigned long sp)
84{ 84{
85 register unsigned long zero asm("g1"); 85 register unsigned long zero asm("g1");
diff --git a/include/asm-sparc/psr.h b/include/asm-sparc/psr.h
index 9778b8c8b15b..19c978051118 100644
--- a/include/asm-sparc/psr.h
+++ b/include/asm-sparc/psr.h
@@ -38,7 +38,7 @@
38 38
39#ifndef __ASSEMBLY__ 39#ifndef __ASSEMBLY__
40/* Get the %psr register. */ 40/* Get the %psr register. */
41extern __inline__ unsigned int get_psr(void) 41static inline unsigned int get_psr(void)
42{ 42{
43 unsigned int psr; 43 unsigned int psr;
44 __asm__ __volatile__( 44 __asm__ __volatile__(
@@ -53,7 +53,7 @@ extern __inline__ unsigned int get_psr(void)
53 return psr; 53 return psr;
54} 54}
55 55
56extern __inline__ void put_psr(unsigned int new_psr) 56static inline void put_psr(unsigned int new_psr)
57{ 57{
58 __asm__ __volatile__( 58 __asm__ __volatile__(
59 "wr %0, 0x0, %%psr\n\t" 59 "wr %0, 0x0, %%psr\n\t"
@@ -72,7 +72,7 @@ extern __inline__ void put_psr(unsigned int new_psr)
72 72
73extern unsigned int fsr_storage; 73extern unsigned int fsr_storage;
74 74
75extern __inline__ unsigned int get_fsr(void) 75static inline unsigned int get_fsr(void)
76{ 76{
77 unsigned int fsr = 0; 77 unsigned int fsr = 0;
78 78
diff --git a/include/asm-sparc/sbi.h b/include/asm-sparc/sbi.h
index 739ccac5dcf2..86a603ac7b20 100644
--- a/include/asm-sparc/sbi.h
+++ b/include/asm-sparc/sbi.h
@@ -65,7 +65,7 @@ struct sbi_regs {
65 65
66#ifndef __ASSEMBLY__ 66#ifndef __ASSEMBLY__
67 67
68extern __inline__ int acquire_sbi(int devid, int mask) 68static inline int acquire_sbi(int devid, int mask)
69{ 69{
70 __asm__ __volatile__ ("swapa [%2] %3, %0" : 70 __asm__ __volatile__ ("swapa [%2] %3, %0" :
71 "=r" (mask) : 71 "=r" (mask) :
@@ -75,7 +75,7 @@ extern __inline__ int acquire_sbi(int devid, int mask)
75 return mask; 75 return mask;
76} 76}
77 77
78extern __inline__ void release_sbi(int devid, int mask) 78static inline void release_sbi(int devid, int mask)
79{ 79{
80 __asm__ __volatile__ ("sta %0, [%1] %2" : : 80 __asm__ __volatile__ ("sta %0, [%1] %2" : :
81 "r" (mask), 81 "r" (mask),
@@ -83,7 +83,7 @@ extern __inline__ void release_sbi(int devid, int mask)
83 "i" (ASI_M_CTL)); 83 "i" (ASI_M_CTL));
84} 84}
85 85
86extern __inline__ void set_sbi_tid(int devid, int targetid) 86static inline void set_sbi_tid(int devid, int targetid)
87{ 87{
88 __asm__ __volatile__ ("sta %0, [%1] %2" : : 88 __asm__ __volatile__ ("sta %0, [%1] %2" : :
89 "r" (targetid), 89 "r" (targetid),
@@ -91,7 +91,7 @@ extern __inline__ void set_sbi_tid(int devid, int targetid)
91 "i" (ASI_M_CTL)); 91 "i" (ASI_M_CTL));
92} 92}
93 93
94extern __inline__ int get_sbi_ctl(int devid, int cfgno) 94static inline int get_sbi_ctl(int devid, int cfgno)
95{ 95{
96 int cfg; 96 int cfg;
97 97
@@ -102,7 +102,7 @@ extern __inline__ int get_sbi_ctl(int devid, int cfgno)
102 return cfg; 102 return cfg;
103} 103}
104 104
105extern __inline__ void set_sbi_ctl(int devid, int cfgno, int cfg) 105static inline void set_sbi_ctl(int devid, int cfgno, int cfg)
106{ 106{
107 __asm__ __volatile__ ("sta %0, [%1] %2" : : 107 __asm__ __volatile__ ("sta %0, [%1] %2" : :
108 "r" (cfg), 108 "r" (cfg),
diff --git a/include/asm-sparc/sbus.h b/include/asm-sparc/sbus.h
index 3a8b3908728a..a13cddcecec5 100644
--- a/include/asm-sparc/sbus.h
+++ b/include/asm-sparc/sbus.h
@@ -28,12 +28,12 @@
28 * numbers + offsets, and vice versa. 28 * numbers + offsets, and vice versa.
29 */ 29 */
30 30
31extern __inline__ unsigned long sbus_devaddr(int slotnum, unsigned long offset) 31static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset)
32{ 32{
33 return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset)); 33 return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset));
34} 34}
35 35
36extern __inline__ int sbus_dev_slot(unsigned long dev_addr) 36static inline int sbus_dev_slot(unsigned long dev_addr)
37{ 37{
38 return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25); 38 return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25);
39} 39}
@@ -80,7 +80,7 @@ struct sbus_bus {
80 80
81extern struct sbus_bus *sbus_root; 81extern struct sbus_bus *sbus_root;
82 82
83extern __inline__ int 83static inline int
84sbus_is_slave(struct sbus_dev *dev) 84sbus_is_slave(struct sbus_dev *dev)
85{ 85{
86 /* XXX Have to write this for sun4c's */ 86 /* XXX Have to write this for sun4c's */
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index 4f96d8333a12..580c51d011df 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -60,22 +60,22 @@ BTFIXUPDEF_BLACKBOX(load_current)
60#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5) 60#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
61#define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait) 61#define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait)
62 62
63extern __inline__ void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); } 63static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
64extern __inline__ void xc1(smpfunc_t func, unsigned long arg1) 64static inline void xc1(smpfunc_t func, unsigned long arg1)
65{ smp_cross_call(func, arg1, 0, 0, 0, 0); } 65{ smp_cross_call(func, arg1, 0, 0, 0, 0); }
66extern __inline__ void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) 66static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
67{ smp_cross_call(func, arg1, arg2, 0, 0, 0); } 67{ smp_cross_call(func, arg1, arg2, 0, 0, 0); }
68extern __inline__ void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, 68static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
69 unsigned long arg3) 69 unsigned long arg3)
70{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); } 70{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
71extern __inline__ void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, 71static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
72 unsigned long arg3, unsigned long arg4) 72 unsigned long arg3, unsigned long arg4)
73{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); } 73{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
74extern __inline__ void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, 74static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
75 unsigned long arg3, unsigned long arg4, unsigned long arg5) 75 unsigned long arg3, unsigned long arg4, unsigned long arg5)
76{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } 76{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
77 77
78extern __inline__ int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) 78static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait)
79{ 79{
80 xc1((smpfunc_t)func, (unsigned long)info); 80 xc1((smpfunc_t)func, (unsigned long)info);
81 return 0; 81 return 0;
@@ -84,16 +84,16 @@ extern __inline__ int smp_call_function(void (*func)(void *info), void *info, in
84extern __volatile__ int __cpu_number_map[NR_CPUS]; 84extern __volatile__ int __cpu_number_map[NR_CPUS];
85extern __volatile__ int __cpu_logical_map[NR_CPUS]; 85extern __volatile__ int __cpu_logical_map[NR_CPUS];
86 86
87extern __inline__ int cpu_logical_map(int cpu) 87static inline int cpu_logical_map(int cpu)
88{ 88{
89 return __cpu_logical_map[cpu]; 89 return __cpu_logical_map[cpu];
90} 90}
91extern __inline__ int cpu_number_map(int cpu) 91static inline int cpu_number_map(int cpu)
92{ 92{
93 return __cpu_number_map[cpu]; 93 return __cpu_number_map[cpu];
94} 94}
95 95
96extern __inline__ int hard_smp4m_processor_id(void) 96static inline int hard_smp4m_processor_id(void)
97{ 97{
98 int cpuid; 98 int cpuid;
99 99
@@ -104,7 +104,7 @@ extern __inline__ int hard_smp4m_processor_id(void)
104 return cpuid; 104 return cpuid;
105} 105}
106 106
107extern __inline__ int hard_smp4d_processor_id(void) 107static inline int hard_smp4d_processor_id(void)
108{ 108{
109 int cpuid; 109 int cpuid;
110 110
@@ -114,7 +114,7 @@ extern __inline__ int hard_smp4d_processor_id(void)
114} 114}
115 115
116#ifndef MODULE 116#ifndef MODULE
117extern __inline__ int hard_smp_processor_id(void) 117static inline int hard_smp_processor_id(void)
118{ 118{
119 int cpuid; 119 int cpuid;
120 120
@@ -136,7 +136,7 @@ extern __inline__ int hard_smp_processor_id(void)
136 return cpuid; 136 return cpuid;
137} 137}
138#else 138#else
139extern __inline__ int hard_smp_processor_id(void) 139static inline int hard_smp_processor_id(void)
140{ 140{
141 int cpuid; 141 int cpuid;
142 142
diff --git a/include/asm-sparc/smpprim.h b/include/asm-sparc/smpprim.h
index 9b9c28ed748e..e7b6d346ae10 100644
--- a/include/asm-sparc/smpprim.h
+++ b/include/asm-sparc/smpprim.h
@@ -15,7 +15,7 @@
15 * atomic. 15 * atomic.
16 */ 16 */
17 17
18extern __inline__ __volatile__ char test_and_set(void *addr) 18static inline __volatile__ char test_and_set(void *addr)
19{ 19{
20 char state = 0; 20 char state = 0;
21 21
@@ -27,7 +27,7 @@ extern __inline__ __volatile__ char test_and_set(void *addr)
27} 27}
28 28
29/* Initialize a spin-lock. */ 29/* Initialize a spin-lock. */
30extern __inline__ __volatile__ smp_initlock(void *spinlock) 30static inline __volatile__ smp_initlock(void *spinlock)
31{ 31{
32 /* Unset the lock. */ 32 /* Unset the lock. */
33 *((unsigned char *) spinlock) = 0; 33 *((unsigned char *) spinlock) = 0;
@@ -36,7 +36,7 @@ extern __inline__ __volatile__ smp_initlock(void *spinlock)
36} 36}
37 37
38/* This routine spins until it acquires the lock at ADDR. */ 38/* This routine spins until it acquires the lock at ADDR. */
39extern __inline__ __volatile__ smp_lock(void *addr) 39static inline __volatile__ smp_lock(void *addr)
40{ 40{
41 while(test_and_set(addr) == 0xff) 41 while(test_and_set(addr) == 0xff)
42 ; 42 ;
@@ -46,7 +46,7 @@ extern __inline__ __volatile__ smp_lock(void *addr)
46} 46}
47 47
48/* This routine releases the lock at ADDR. */ 48/* This routine releases the lock at ADDR. */
49extern __inline__ __volatile__ smp_unlock(void *addr) 49static inline __volatile__ smp_unlock(void *addr)
50{ 50{
51 *((unsigned char *) addr) = 0; 51 *((unsigned char *) addr) = 0;
52} 52}
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h
index 111727a2bb4e..e344c98a6f5f 100644
--- a/include/asm-sparc/spinlock.h
+++ b/include/asm-sparc/spinlock.h
@@ -17,7 +17,7 @@
17#define __raw_spin_unlock_wait(lock) \ 17#define __raw_spin_unlock_wait(lock) \
18 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 18 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
19 19
20extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) 20static inline void __raw_spin_lock(raw_spinlock_t *lock)
21{ 21{
22 __asm__ __volatile__( 22 __asm__ __volatile__(
23 "\n1:\n\t" 23 "\n1:\n\t"
@@ -37,7 +37,7 @@ extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock)
37 : "g2", "memory", "cc"); 37 : "g2", "memory", "cc");
38} 38}
39 39
40extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) 40static inline int __raw_spin_trylock(raw_spinlock_t *lock)
41{ 41{
42 unsigned int result; 42 unsigned int result;
43 __asm__ __volatile__("ldstub [%1], %0" 43 __asm__ __volatile__("ldstub [%1], %0"
@@ -47,7 +47,7 @@ extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock)
47 return (result == 0); 47 return (result == 0);
48} 48}
49 49
50extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) 50static inline void __raw_spin_unlock(raw_spinlock_t *lock)
51{ 51{
52 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 52 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
53} 53}
@@ -78,7 +78,7 @@ extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
78 * 78 *
79 * Unfortunately this scheme limits us to ~16,000,000 cpus. 79 * Unfortunately this scheme limits us to ~16,000,000 cpus.
80 */ 80 */
81extern __inline__ void __read_lock(raw_rwlock_t *rw) 81static inline void __read_lock(raw_rwlock_t *rw)
82{ 82{
83 register raw_rwlock_t *lp asm("g1"); 83 register raw_rwlock_t *lp asm("g1");
84 lp = rw; 84 lp = rw;
@@ -98,7 +98,7 @@ do { unsigned long flags; \
98 local_irq_restore(flags); \ 98 local_irq_restore(flags); \
99} while(0) 99} while(0)
100 100
101extern __inline__ void __read_unlock(raw_rwlock_t *rw) 101static inline void __read_unlock(raw_rwlock_t *rw)
102{ 102{
103 register raw_rwlock_t *lp asm("g1"); 103 register raw_rwlock_t *lp asm("g1");
104 lp = rw; 104 lp = rw;
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 3557781a4bfd..1f6b71f9e1b6 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -204,7 +204,7 @@ static inline unsigned long getipl(void)
204BTFIXUPDEF_CALL(void, ___xchg32, void) 204BTFIXUPDEF_CALL(void, ___xchg32, void)
205#endif 205#endif
206 206
207extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) 207static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
208{ 208{
209#ifdef CONFIG_SMP 209#ifdef CONFIG_SMP
210 __asm__ __volatile__("swap [%2], %0" 210 __asm__ __volatile__("swap [%2], %0"
diff --git a/include/asm-sparc/traps.h b/include/asm-sparc/traps.h
index 6690ab956ea6..f62c7f878ee1 100644
--- a/include/asm-sparc/traps.h
+++ b/include/asm-sparc/traps.h
@@ -22,7 +22,7 @@ struct tt_entry {
22/* We set this to _start in system setup. */ 22/* We set this to _start in system setup. */
23extern struct tt_entry *sparc_ttable; 23extern struct tt_entry *sparc_ttable;
24 24
25extern __inline__ unsigned long get_tbr(void) 25static inline unsigned long get_tbr(void)
26{ 26{
27 unsigned long tbr; 27 unsigned long tbr;
28 28
diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h
index 2d242360c3d6..075771c371f6 100644
--- a/include/asm-um/processor-generic.h
+++ b/include/asm-um/processor-generic.h
@@ -13,6 +13,7 @@ struct task_struct;
13#include "linux/config.h" 13#include "linux/config.h"
14#include "asm/ptrace.h" 14#include "asm/ptrace.h"
15#include "choose-mode.h" 15#include "choose-mode.h"
16#include "registers.h"
16 17
17struct mm_struct; 18struct mm_struct;
18 19
@@ -136,19 +137,15 @@ extern struct cpuinfo_um cpu_data[];
136#define current_cpu_data boot_cpu_data 137#define current_cpu_data boot_cpu_data
137#endif 138#endif
138 139
139#define KSTK_EIP(tsk) (PT_REGS_IP(&tsk->thread.regs))
140#define KSTK_ESP(tsk) (PT_REGS_SP(&tsk->thread.regs))
141#define get_wchan(p) (0)
142 140
141#ifdef CONFIG_MODE_SKAS
142#define KSTK_REG(tsk, reg) \
143 ({ union uml_pt_regs regs; \
144 get_thread_regs(&regs, tsk->thread.mode.skas.switch_buf); \
145 UPT_REG(&regs, reg); })
146#else
147#define KSTK_REG(tsk, reg) (0xbadbabe)
143#endif 148#endif
149#define get_wchan(p) (0)
144 150
145/* 151#endif
146 * Overrides for Emacs so that we follow Linus's tabbing style.
147 * Emacs will notice this stuff at the end of the file and automatically
148 * adjust the settings for this buffer only. This must remain at the end
149 * of the file.
150 * ---------------------------------------------------------------------------
151 * Local variables:
152 * c-file-style: "linux"
153 * End:
154 */
diff --git a/include/asm-um/processor-i386.h b/include/asm-um/processor-i386.h
index 431bad3ae9d7..4108a579eb92 100644
--- a/include/asm-um/processor-i386.h
+++ b/include/asm-um/processor-i386.h
@@ -43,17 +43,10 @@ static inline void rep_nop(void)
43#define ARCH_IS_STACKGROW(address) \ 43#define ARCH_IS_STACKGROW(address) \
44 (address + 32 >= UPT_SP(&current->thread.regs.regs)) 44 (address + 32 >= UPT_SP(&current->thread.regs.regs))
45 45
46#define KSTK_EIP(tsk) KSTK_REG(tsk, EIP)
47#define KSTK_ESP(tsk) KSTK_REG(tsk, UESP)
48#define KSTK_EBP(tsk) KSTK_REG(tsk, EBP)
49
46#include "asm/processor-generic.h" 50#include "asm/processor-generic.h"
47 51
48#endif 52#endif
49
50/*
51 * Overrides for Emacs so that we follow Linus's tabbing style.
52 * Emacs will notice this stuff at the end of the file and automatically
53 * adjust the settings for this buffer only. This must remain at the end
54 * of the file.
55 * ---------------------------------------------------------------------------
56 * Local variables:
57 * c-file-style: "linux"
58 * End:
59 */
diff --git a/include/asm-um/processor-x86_64.h b/include/asm-um/processor-x86_64.h
index 0beb9a42ae05..e1e1255a1d36 100644
--- a/include/asm-um/processor-x86_64.h
+++ b/include/asm-um/processor-x86_64.h
@@ -36,17 +36,9 @@ extern inline void rep_nop(void)
36#define ARCH_IS_STACKGROW(address) \ 36#define ARCH_IS_STACKGROW(address) \
37 (address + 128 >= UPT_SP(&current->thread.regs.regs)) 37 (address + 128 >= UPT_SP(&current->thread.regs.regs))
38 38
39#define KSTK_EIP(tsk) KSTK_REG(tsk, RIP)
40#define KSTK_ESP(tsk) KSTK_REG(tsk, RSP)
41
39#include "asm/processor-generic.h" 42#include "asm/processor-generic.h"
40 43
41#endif 44#endif
42
43/*
44 * Overrides for Emacs so that we follow Linus's tabbing style.
45 * Emacs will notice this stuff at the end of the file and automatically
46 * adjust the settings for this buffer only. This must remain at the end
47 * of the file.
48 * ---------------------------------------------------------------------------
49 * Local variables:
50 * c-file-style: "linux"
51 * End:
52 */
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 9f374cfa1b05..f1fd849e5535 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -457,7 +457,7 @@ static inline void atm_dev_put(struct atm_dev *dev)
457 457
458int atm_charge(struct atm_vcc *vcc,int truesize); 458int atm_charge(struct atm_vcc *vcc,int truesize);
459struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 459struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
460 int gfp_flags); 460 unsigned int __nocast gfp_flags);
461int atm_pcr_goal(struct atm_trafprm *tp); 461int atm_pcr_goal(struct atm_trafprm *tp);
462 462
463void vcc_release_async(struct atm_vcc *vcc, int reply); 463void vcc_release_async(struct atm_vcc *vcc, int reply);
diff --git a/include/linux/bfs_fs.h b/include/linux/bfs_fs.h
index c1237aa92e38..8ed6dfdcd783 100644
--- a/include/linux/bfs_fs.h
+++ b/include/linux/bfs_fs.h
@@ -20,19 +20,19 @@
20 20
21/* BFS inode layout on disk */ 21/* BFS inode layout on disk */
22struct bfs_inode { 22struct bfs_inode {
23 __u16 i_ino; 23 __le16 i_ino;
24 __u16 i_unused; 24 __u16 i_unused;
25 __u32 i_sblock; 25 __le32 i_sblock;
26 __u32 i_eblock; 26 __le32 i_eblock;
27 __u32 i_eoffset; 27 __le32 i_eoffset;
28 __u32 i_vtype; 28 __le32 i_vtype;
29 __u32 i_mode; 29 __le32 i_mode;
30 __s32 i_uid; 30 __le32 i_uid;
31 __s32 i_gid; 31 __le32 i_gid;
32 __u32 i_nlink; 32 __le32 i_nlink;
33 __u32 i_atime; 33 __le32 i_atime;
34 __u32 i_mtime; 34 __le32 i_mtime;
35 __u32 i_ctime; 35 __le32 i_ctime;
36 __u32 i_padding[4]; 36 __u32 i_padding[4];
37}; 37};
38 38
@@ -41,17 +41,17 @@ struct bfs_inode {
41#define BFS_DIRS_PER_BLOCK 32 41#define BFS_DIRS_PER_BLOCK 32
42 42
43struct bfs_dirent { 43struct bfs_dirent {
44 __u16 ino; 44 __le16 ino;
45 char name[BFS_NAMELEN]; 45 char name[BFS_NAMELEN];
46}; 46};
47 47
48/* BFS superblock layout on disk */ 48/* BFS superblock layout on disk */
49struct bfs_super_block { 49struct bfs_super_block {
50 __u32 s_magic; 50 __le32 s_magic;
51 __u32 s_start; 51 __le32 s_start;
52 __u32 s_end; 52 __le32 s_end;
53 __s32 s_from; 53 __le32 s_from;
54 __s32 s_to; 54 __le32 s_to;
55 __s32 s_bfrom; 55 __s32 s_bfrom;
56 __s32 s_bto; 56 __s32 s_bto;
57 char s_fsname[6]; 57 char s_fsname[6];
@@ -66,15 +66,15 @@ struct bfs_super_block {
66#define BFS_INO2OFF(ino) \ 66#define BFS_INO2OFF(ino) \
67 ((__u32)(((ino) - BFS_ROOT_INO) * sizeof(struct bfs_inode)) + BFS_BSIZE) 67 ((__u32)(((ino) - BFS_ROOT_INO) * sizeof(struct bfs_inode)) + BFS_BSIZE)
68#define BFS_NZFILESIZE(ip) \ 68#define BFS_NZFILESIZE(ip) \
69 ((cpu_to_le32((ip)->i_eoffset) + 1) - cpu_to_le32((ip)->i_sblock) * BFS_BSIZE) 69 ((le32_to_cpu((ip)->i_eoffset) + 1) - le32_to_cpu((ip)->i_sblock) * BFS_BSIZE)
70 70
71#define BFS_FILESIZE(ip) \ 71#define BFS_FILESIZE(ip) \
72 ((ip)->i_sblock == 0 ? 0 : BFS_NZFILESIZE(ip)) 72 ((ip)->i_sblock == 0 ? 0 : BFS_NZFILESIZE(ip))
73 73
74#define BFS_FILEBLOCKS(ip) \ 74#define BFS_FILEBLOCKS(ip) \
75 ((ip)->i_sblock == 0 ? 0 : (cpu_to_le32((ip)->i_eblock) + 1) - cpu_to_le32((ip)->i_sblock)) 75 ((ip)->i_sblock == 0 ? 0 : (le32_to_cpu((ip)->i_eblock) + 1) - le32_to_cpu((ip)->i_sblock))
76#define BFS_UNCLEAN(bfs_sb, sb) \ 76#define BFS_UNCLEAN(bfs_sb, sb) \
77 ((cpu_to_le32(bfs_sb->s_from) != -1) && (cpu_to_le32(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY)) 77 ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY))
78 78
79 79
80#endif /* _LINUX_BFS_FS_H */ 80#endif /* _LINUX_BFS_FS_H */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 86d4b0a81713..96582c9911ac 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -149,7 +149,7 @@ struct cn_dev {
149 149
150int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); 150int cn_add_callback(struct cb_id *, char *, void (*callback) (void *));
151void cn_del_callback(struct cb_id *); 151void cn_del_callback(struct cb_id *);
152int cn_netlink_send(struct cn_msg *, u32, int); 152int cn_netlink_send(struct cn_msg *, u32, unsigned int __nocast);
153 153
154int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); 154int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *));
155void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); 155void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 7e1e15f934f3..fd7af86151b1 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -142,13 +142,21 @@ static __inline__ int bad_mask(u32 mask, u32 addr)
142 142
143#define endfor_ifa(in_dev) } 143#define endfor_ifa(in_dev) }
144 144
145static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev)
146{
147 struct in_device *in_dev = dev->ip_ptr;
148 if (in_dev)
149 in_dev = rcu_dereference(in_dev);
150 return in_dev;
151}
152
145static __inline__ struct in_device * 153static __inline__ struct in_device *
146in_dev_get(const struct net_device *dev) 154in_dev_get(const struct net_device *dev)
147{ 155{
148 struct in_device *in_dev; 156 struct in_device *in_dev;
149 157
150 rcu_read_lock(); 158 rcu_read_lock();
151 in_dev = dev->ip_ptr; 159 in_dev = __in_dev_get_rcu(dev);
152 if (in_dev) 160 if (in_dev)
153 atomic_inc(&in_dev->refcnt); 161 atomic_inc(&in_dev->refcnt);
154 rcu_read_unlock(); 162 rcu_read_unlock();
@@ -156,7 +164,7 @@ in_dev_get(const struct net_device *dev)
156} 164}
157 165
158static __inline__ struct in_device * 166static __inline__ struct in_device *
159__in_dev_get(const struct net_device *dev) 167__in_dev_get_rtnl(const struct net_device *dev)
160{ 168{
161 return (struct in_device*)dev->ip_ptr; 169 return (struct in_device*)dev->ip_ptr;
162} 170}
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index bb6f88e14061..e0b922785d98 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -372,8 +372,9 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
372#define inet_v6_ipv6only(__sk) 0 372#define inet_v6_ipv6only(__sk) 0
373#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 373#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
374 374
375#define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ 375#define INET6_MATCH(__sk, __hash, __saddr, __daddr, __ports, __dif)\
376 (((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ 376 (((__sk)->sk_hash == (__hash)) && \
377 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
377 ((__sk)->sk_family == AF_INET6) && \ 378 ((__sk)->sk_family == AF_INET6) && \
378 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ 379 ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \
379 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \ 380 ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index eb36fd293b41..f74ed9462475 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -185,6 +185,7 @@
185#define PCI_DEVICE_ID_LSI_61C102 0x0901 185#define PCI_DEVICE_ID_LSI_61C102 0x0901
186#define PCI_DEVICE_ID_LSI_63C815 0x1000 186#define PCI_DEVICE_ID_LSI_63C815 0x1000
187#define PCI_DEVICE_ID_LSI_SAS1064 0x0050 187#define PCI_DEVICE_ID_LSI_SAS1064 0x0050
188#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411
188#define PCI_DEVICE_ID_LSI_SAS1066 0x005E 189#define PCI_DEVICE_ID_LSI_SAS1066 0x005E
189#define PCI_DEVICE_ID_LSI_SAS1068 0x0054 190#define PCI_DEVICE_ID_LSI_SAS1068 0x0054
190#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C 191#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C
@@ -560,6 +561,7 @@
560#define PCI_VENDOR_ID_DELL 0x1028 561#define PCI_VENDOR_ID_DELL 0x1028
561#define PCI_DEVICE_ID_DELL_RACIII 0x0008 562#define PCI_DEVICE_ID_DELL_RACIII 0x0008
562#define PCI_DEVICE_ID_DELL_RAC4 0x0012 563#define PCI_DEVICE_ID_DELL_RAC4 0x0012
564#define PCI_DEVICE_ID_DELL_PERC5 0x0015
563 565
564#define PCI_VENDOR_ID_MATROX 0x102B 566#define PCI_VENDOR_ID_MATROX 0x102B
565#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 567#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2741c0c55e83..466c879f82b8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -155,8 +155,6 @@ struct skb_shared_info {
155#define SKB_DATAREF_SHIFT 16 155#define SKB_DATAREF_SHIFT 16
156#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 156#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
157 157
158extern struct timeval skb_tv_base;
159
160struct skb_timeval { 158struct skb_timeval {
161 u32 off_sec; 159 u32 off_sec;
162 u32 off_usec; 160 u32 off_usec;
@@ -175,7 +173,7 @@ enum {
175 * @prev: Previous buffer in list 173 * @prev: Previous buffer in list
176 * @list: List we are on 174 * @list: List we are on
177 * @sk: Socket we are owned by 175 * @sk: Socket we are owned by
178 * @tstamp: Time we arrived stored as offset to skb_tv_base 176 * @tstamp: Time we arrived
179 * @dev: Device we arrived on/are leaving by 177 * @dev: Device we arrived on/are leaving by
180 * @input_dev: Device we arrived on 178 * @input_dev: Device we arrived on
181 * @h: Transport layer header 179 * @h: Transport layer header
@@ -1255,10 +1253,6 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *
1255{ 1253{
1256 stamp->tv_sec = skb->tstamp.off_sec; 1254 stamp->tv_sec = skb->tstamp.off_sec;
1257 stamp->tv_usec = skb->tstamp.off_usec; 1255 stamp->tv_usec = skb->tstamp.off_usec;
1258 if (skb->tstamp.off_sec) {
1259 stamp->tv_sec += skb_tv_base.tv_sec;
1260 stamp->tv_usec += skb_tv_base.tv_usec;
1261 }
1262} 1256}
1263 1257
1264/** 1258/**
@@ -1272,8 +1266,8 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *
1272 */ 1266 */
1273static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) 1267static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp)
1274{ 1268{
1275 skb->tstamp.off_sec = stamp->tv_sec - skb_tv_base.tv_sec; 1269 skb->tstamp.off_sec = stamp->tv_sec;
1276 skb->tstamp.off_usec = stamp->tv_usec - skb_tv_base.tv_usec; 1270 skb->tstamp.off_usec = stamp->tv_usec;
1277} 1271}
1278 1272
1279extern void __net_timestamp(struct sk_buff *skb); 1273extern void __net_timestamp(struct sk_buff *skb);
diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h
index 081b1ee8516e..e21937cf91d0 100644
--- a/include/linux/tc_ematch/tc_em_meta.h
+++ b/include/linux/tc_ematch/tc_em_meta.h
@@ -71,7 +71,7 @@ enum
71 TCF_META_ID_SK_SNDBUF, 71 TCF_META_ID_SK_SNDBUF,
72 TCF_META_ID_SK_ALLOCS, 72 TCF_META_ID_SK_ALLOCS,
73 TCF_META_ID_SK_ROUTE_CAPS, 73 TCF_META_ID_SK_ROUTE_CAPS,
74 TCF_META_ID_SK_HASHENT, 74 TCF_META_ID_SK_HASH,
75 TCF_META_ID_SK_LINGERTIME, 75 TCF_META_ID_SK_LINGERTIME,
76 TCF_META_ID_SK_ACK_BACKLOG, 76 TCF_META_ID_SK_ACK_BACKLOG,
77 TCF_META_ID_SK_MAX_ACK_BACKLOG, 77 TCF_META_ID_SK_MAX_ACK_BACKLOG,
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
index 941f45ac117a..1a4990e448e9 100644
--- a/include/linux/textsearch.h
+++ b/include/linux/textsearch.h
@@ -158,7 +158,8 @@ extern unsigned int textsearch_find_continuous(struct ts_config *,
158#define TS_PRIV_ALIGNTO 8 158#define TS_PRIV_ALIGNTO 8
159#define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) 159#define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1))
160 160
161static inline struct ts_config *alloc_ts_config(size_t payload, int gfp_mask) 161static inline struct ts_config *alloc_ts_config(size_t payload,
162 unsigned int __nocast gfp_mask)
162{ 163{
163 struct ts_config *conf; 164 struct ts_config *conf;
164 165
diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h
index 6bbeafa73e8b..8a0891e2e888 100644
--- a/include/net/dn_nsp.h
+++ b/include/net/dn_nsp.h
@@ -19,9 +19,9 @@ extern void dn_nsp_send_data_ack(struct sock *sk);
19extern void dn_nsp_send_oth_ack(struct sock *sk); 19extern void dn_nsp_send_oth_ack(struct sock *sk);
20extern void dn_nsp_delayed_ack(struct sock *sk); 20extern void dn_nsp_delayed_ack(struct sock *sk);
21extern void dn_send_conn_ack(struct sock *sk); 21extern void dn_send_conn_ack(struct sock *sk);
22extern void dn_send_conn_conf(struct sock *sk, int gfp); 22extern void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp);
23extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, 23extern void dn_nsp_send_disc(struct sock *sk, unsigned char type,
24 unsigned short reason, int gfp); 24 unsigned short reason, unsigned int __nocast gfp);
25extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, 25extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
26 unsigned short reason); 26 unsigned short reason);
27extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); 27extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
@@ -29,14 +29,14 @@ extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
29 29
30extern void dn_nsp_output(struct sock *sk); 30extern void dn_nsp_output(struct sock *sk);
31extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum); 31extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum);
32extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oob); 32extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, unsigned int __nocast gfp, int oob);
33extern unsigned long dn_nsp_persist(struct sock *sk); 33extern unsigned long dn_nsp_persist(struct sock *sk);
34extern int dn_nsp_xmit_timeout(struct sock *sk); 34extern int dn_nsp_xmit_timeout(struct sock *sk);
35 35
36extern int dn_nsp_rx(struct sk_buff *); 36extern int dn_nsp_rx(struct sk_buff *);
37extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); 37extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
38 38
39extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri); 39extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri);
40extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err); 40extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err);
41 41
42#define NSP_REASON_OK 0 /* No error */ 42#define NSP_REASON_OK 0 /* No error */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index d084721db198..11fe973cf383 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -15,7 +15,7 @@
15 GNU General Public License for more details. 15 GNU General Public License for more details.
16*******************************************************************************/ 16*******************************************************************************/
17 17
18extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri); 18extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri);
19extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); 19extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags);
20extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); 20extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
21extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); 21extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 03df3b157960..5a2beed5a770 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -26,19 +26,18 @@
26struct inet_hashinfo; 26struct inet_hashinfo;
27 27
28/* I have no idea if this is a good hash for v6 or not. -DaveM */ 28/* I have no idea if this is a good hash for v6 or not. -DaveM */
29static inline int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport, 29static inline unsigned int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport,
30 const struct in6_addr *faddr, const u16 fport, 30 const struct in6_addr *faddr, const u16 fport)
31 const int ehash_size)
32{ 31{
33 int hashent = (lport ^ fport); 32 unsigned int hashent = (lport ^ fport);
34 33
35 hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]); 34 hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
36 hashent ^= hashent >> 16; 35 hashent ^= hashent >> 16;
37 hashent ^= hashent >> 8; 36 hashent ^= hashent >> 8;
38 return (hashent & (ehash_size - 1)); 37 return hashent;
39} 38}
40 39
41static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size) 40static inline int inet6_sk_ehashfn(const struct sock *sk)
42{ 41{
43 const struct inet_sock *inet = inet_sk(sk); 42 const struct inet_sock *inet = inet_sk(sk);
44 const struct ipv6_pinfo *np = inet6_sk(sk); 43 const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -46,7 +45,7 @@ static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size)
46 const struct in6_addr *faddr = &np->daddr; 45 const struct in6_addr *faddr = &np->daddr;
47 const __u16 lport = inet->num; 46 const __u16 lport = inet->num;
48 const __u16 fport = inet->dport; 47 const __u16 fport = inet->dport;
49 return inet6_ehashfn(laddr, lport, faddr, fport, ehash_size); 48 return inet6_ehashfn(laddr, lport, faddr, fport);
50} 49}
51 50
52/* 51/*
@@ -69,14 +68,14 @@ static inline struct sock *
69 /* Optimize here for direct hit, only listening connections can 68 /* Optimize here for direct hit, only listening connections can
70 * have wildcards anyways. 69 * have wildcards anyways.
71 */ 70 */
72 const int hash = inet6_ehashfn(daddr, hnum, saddr, sport, 71 unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport);
73 hashinfo->ehash_size); 72 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
74 struct inet_ehash_bucket *head = &hashinfo->ehash[hash];
75 73
74 prefetch(head->chain.first);
76 read_lock(&head->lock); 75 read_lock(&head->lock);
77 sk_for_each(sk, node, &head->chain) { 76 sk_for_each(sk, node, &head->chain) {
78 /* For IPV6 do the cheaper port and family tests first. */ 77 /* For IPV6 do the cheaper port and family tests first. */
79 if (INET6_MATCH(sk, saddr, daddr, ports, dif)) 78 if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif))
80 goto hit; /* You sunk my battleship! */ 79 goto hit; /* You sunk my battleship! */
81 } 80 }
82 /* Must check for a TIME_WAIT'er before going to listener hash. */ 81 /* Must check for a TIME_WAIT'er before going to listener hash. */
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 646b6ea7fe26..f50f95968340 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -40,7 +40,7 @@
40struct inet_ehash_bucket { 40struct inet_ehash_bucket {
41 rwlock_t lock; 41 rwlock_t lock;
42 struct hlist_head chain; 42 struct hlist_head chain;
43} __attribute__((__aligned__(8))); 43};
44 44
45/* There are a few simple rules, which allow for local port reuse by 45/* There are a few simple rules, which allow for local port reuse by
46 * an application. In essence: 46 * an application. In essence:
@@ -108,7 +108,7 @@ struct inet_hashinfo {
108 struct inet_bind_hashbucket *bhash; 108 struct inet_bind_hashbucket *bhash;
109 109
110 int bhash_size; 110 int bhash_size;
111 int ehash_size; 111 unsigned int ehash_size;
112 112
113 /* All sockets in TCP_LISTEN state will be in here. This is the only 113 /* All sockets in TCP_LISTEN state will be in here. This is the only
114 * table where wildcard'd TCP sockets can exist. Hash function here 114 * table where wildcard'd TCP sockets can exist. Hash function here
@@ -130,17 +130,16 @@ struct inet_hashinfo {
130 int port_rover; 130 int port_rover;
131}; 131};
132 132
133static inline int inet_ehashfn(const __u32 laddr, const __u16 lport, 133static inline unsigned int inet_ehashfn(const __u32 laddr, const __u16 lport,
134 const __u32 faddr, const __u16 fport, 134 const __u32 faddr, const __u16 fport)
135 const int ehash_size)
136{ 135{
137 int h = (laddr ^ lport) ^ (faddr ^ fport); 136 unsigned int h = (laddr ^ lport) ^ (faddr ^ fport);
138 h ^= h >> 16; 137 h ^= h >> 16;
139 h ^= h >> 8; 138 h ^= h >> 8;
140 return h & (ehash_size - 1); 139 return h;
141} 140}
142 141
143static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size) 142static inline int inet_sk_ehashfn(const struct sock *sk)
144{ 143{
145 const struct inet_sock *inet = inet_sk(sk); 144 const struct inet_sock *inet = inet_sk(sk);
146 const __u32 laddr = inet->rcv_saddr; 145 const __u32 laddr = inet->rcv_saddr;
@@ -148,7 +147,14 @@ static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size)
148 const __u32 faddr = inet->daddr; 147 const __u32 faddr = inet->daddr;
149 const __u16 fport = inet->dport; 148 const __u16 fport = inet->dport;
150 149
151 return inet_ehashfn(laddr, lport, faddr, fport, ehash_size); 150 return inet_ehashfn(laddr, lport, faddr, fport);
151}
152
153static inline struct inet_ehash_bucket *inet_ehash_bucket(
154 struct inet_hashinfo *hashinfo,
155 unsigned int hash)
156{
157 return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
152} 158}
153 159
154extern struct inet_bind_bucket * 160extern struct inet_bind_bucket *
@@ -235,9 +241,11 @@ static inline void __inet_hash(struct inet_hashinfo *hashinfo,
235 lock = &hashinfo->lhash_lock; 241 lock = &hashinfo->lhash_lock;
236 inet_listen_wlock(hashinfo); 242 inet_listen_wlock(hashinfo);
237 } else { 243 } else {
238 sk->sk_hashent = inet_sk_ehashfn(sk, hashinfo->ehash_size); 244 struct inet_ehash_bucket *head;
239 list = &hashinfo->ehash[sk->sk_hashent].chain; 245 sk->sk_hash = inet_sk_ehashfn(sk);
240 lock = &hashinfo->ehash[sk->sk_hashent].lock; 246 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
247 list = &head->chain;
248 lock = &head->lock;
241 write_lock(lock); 249 write_lock(lock);
242 } 250 }
243 __sk_add_node(sk, list); 251 __sk_add_node(sk, list);
@@ -268,9 +276,8 @@ static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk)
268 inet_listen_wlock(hashinfo); 276 inet_listen_wlock(hashinfo);
269 lock = &hashinfo->lhash_lock; 277 lock = &hashinfo->lhash_lock;
270 } else { 278 } else {
271 struct inet_ehash_bucket *head = &hashinfo->ehash[sk->sk_hashent]; 279 lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock;
272 lock = &head->lock; 280 write_lock_bh(lock);
273 write_lock_bh(&head->lock);
274 } 281 }
275 282
276 if (__sk_del_node_init(sk)) 283 if (__sk_del_node_init(sk))
@@ -337,23 +344,27 @@ sherry_cache:
337#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ 344#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
338 const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr)); 345 const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr));
339#endif /* __BIG_ENDIAN */ 346#endif /* __BIG_ENDIAN */
340#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ 347#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
341 (((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \ 348 (((__sk)->sk_hash == (__hash)) && \
349 ((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
342 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ 350 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
343 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 351 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
344#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ 352#define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
345 (((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ 353 (((__sk)->sk_hash == (__hash)) && \
354 ((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
346 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ 355 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
347 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 356 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
348#else /* 32-bit arch */ 357#else /* 32-bit arch */
349#define INET_ADDR_COOKIE(__name, __saddr, __daddr) 358#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
350#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \ 359#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
351 ((inet_sk(__sk)->daddr == (__saddr)) && \ 360 (((__sk)->sk_hash == (__hash)) && \
361 (inet_sk(__sk)->daddr == (__saddr)) && \
352 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ 362 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
353 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ 363 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
354 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 364 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
355#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \ 365#define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
356 ((inet_twsk(__sk)->tw_daddr == (__saddr)) && \ 366 (((__sk)->sk_hash == (__hash)) && \
367 (inet_twsk(__sk)->tw_daddr == (__saddr)) && \
357 (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \ 368 (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
358 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ 369 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
359 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 370 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
@@ -378,18 +389,19 @@ static inline struct sock *
378 /* Optimize here for direct hit, only listening connections can 389 /* Optimize here for direct hit, only listening connections can
379 * have wildcards anyways. 390 * have wildcards anyways.
380 */ 391 */
381 const int hash = inet_ehashfn(daddr, hnum, saddr, sport, hashinfo->ehash_size); 392 unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
382 struct inet_ehash_bucket *head = &hashinfo->ehash[hash]; 393 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
383 394
395 prefetch(head->chain.first);
384 read_lock(&head->lock); 396 read_lock(&head->lock);
385 sk_for_each(sk, node, &head->chain) { 397 sk_for_each(sk, node, &head->chain) {
386 if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif)) 398 if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
387 goto hit; /* You sunk my battleship! */ 399 goto hit; /* You sunk my battleship! */
388 } 400 }
389 401
390 /* Must check for a TIME_WAIT'er before going to listener hash. */ 402 /* Must check for a TIME_WAIT'er before going to listener hash. */
391 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { 403 sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
392 if (INET_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) 404 if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
393 goto hit; 405 goto hit;
394 } 406 }
395 sk = NULL; 407 sk = NULL;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 3b070352e869..4ade56ef3a4d 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -112,6 +112,7 @@ struct inet_timewait_sock {
112#define tw_node __tw_common.skc_node 112#define tw_node __tw_common.skc_node
113#define tw_bind_node __tw_common.skc_bind_node 113#define tw_bind_node __tw_common.skc_bind_node
114#define tw_refcnt __tw_common.skc_refcnt 114#define tw_refcnt __tw_common.skc_refcnt
115#define tw_hash __tw_common.skc_hash
115#define tw_prot __tw_common.skc_prot 116#define tw_prot __tw_common.skc_prot
116 volatile unsigned char tw_substate; 117 volatile unsigned char tw_substate;
117 /* 3 bits hole, try to pack */ 118 /* 3 bits hole, try to pack */
@@ -126,7 +127,6 @@ struct inet_timewait_sock {
126 /* And these are ours. */ 127 /* And these are ours. */
127 __u8 tw_ipv6only:1; 128 __u8 tw_ipv6only:1;
128 /* 31 bits hole, try to pack */ 129 /* 31 bits hole, try to pack */
129 int tw_hashent;
130 int tw_timeout; 130 int tw_timeout;
131 unsigned long tw_ttd; 131 unsigned long tw_ttd;
132 struct inet_bind_bucket *tw_tb; 132 struct inet_bind_bucket *tw_tb;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 06b4235aa016..ecb2b061f597 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -832,7 +832,7 @@ extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
832 832
833extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb); 833extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb);
834extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb); 834extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb);
835extern int ip_vs_skb_replace(struct sk_buff *skb, int pri, 835extern int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri,
836 char *o_buf, int o_len, char *n_buf, int n_len); 836 char *o_buf, int o_len, char *n_buf, int n_len);
837extern int ip_vs_app_init(void); 837extern int ip_vs_app_init(void);
838extern void ip_vs_app_cleanup(void); 838extern void ip_vs_app_cleanup(void);
diff --git a/include/net/sock.h b/include/net/sock.h
index 8c48fbecb7cf..b6440805c420 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -99,6 +99,7 @@ struct proto;
99 * @skc_node: main hash linkage for various protocol lookup tables 99 * @skc_node: main hash linkage for various protocol lookup tables
100 * @skc_bind_node: bind hash linkage for various protocol lookup tables 100 * @skc_bind_node: bind hash linkage for various protocol lookup tables
101 * @skc_refcnt: reference count 101 * @skc_refcnt: reference count
102 * @skc_hash: hash value used with various protocol lookup tables
102 * @skc_prot: protocol handlers inside a network family 103 * @skc_prot: protocol handlers inside a network family
103 * 104 *
104 * This is the minimal network layer representation of sockets, the header 105 * This is the minimal network layer representation of sockets, the header
@@ -112,6 +113,7 @@ struct sock_common {
112 struct hlist_node skc_node; 113 struct hlist_node skc_node;
113 struct hlist_node skc_bind_node; 114 struct hlist_node skc_bind_node;
114 atomic_t skc_refcnt; 115 atomic_t skc_refcnt;
116 unsigned int skc_hash;
115 struct proto *skc_prot; 117 struct proto *skc_prot;
116}; 118};
117 119
@@ -139,7 +141,6 @@ struct sock_common {
139 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets 141 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
140 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 142 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
141 * @sk_lingertime: %SO_LINGER l_linger setting 143 * @sk_lingertime: %SO_LINGER l_linger setting
142 * @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash)
143 * @sk_backlog: always used with the per-socket spinlock held 144 * @sk_backlog: always used with the per-socket spinlock held
144 * @sk_callback_lock: used with the callbacks in the end of this struct 145 * @sk_callback_lock: used with the callbacks in the end of this struct
145 * @sk_error_queue: rarely used 146 * @sk_error_queue: rarely used
@@ -186,6 +187,7 @@ struct sock {
186#define sk_node __sk_common.skc_node 187#define sk_node __sk_common.skc_node
187#define sk_bind_node __sk_common.skc_bind_node 188#define sk_bind_node __sk_common.skc_bind_node
188#define sk_refcnt __sk_common.skc_refcnt 189#define sk_refcnt __sk_common.skc_refcnt
190#define sk_hash __sk_common.skc_hash
189#define sk_prot __sk_common.skc_prot 191#define sk_prot __sk_common.skc_prot
190 unsigned char sk_shutdown : 2, 192 unsigned char sk_shutdown : 2,
191 sk_no_check : 2, 193 sk_no_check : 2,
@@ -208,7 +210,6 @@ struct sock {
208 unsigned int sk_allocation; 210 unsigned int sk_allocation;
209 int sk_sndbuf; 211 int sk_sndbuf;
210 int sk_route_caps; 212 int sk_route_caps;
211 int sk_hashent;
212 unsigned long sk_flags; 213 unsigned long sk_flags;
213 unsigned long sk_lingertime; 214 unsigned long sk_lingertime;
214 /* 215 /*
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index a9d0d8c5dfbf..b6e72f890c6c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -875,7 +875,7 @@ static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsig
875} 875}
876#endif 876#endif
877 877
878struct xfrm_policy *xfrm_policy_alloc(int gfp); 878struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp);
879extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *); 879extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *);
880int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); 880int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
881struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel, 881struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
@@ -931,4 +931,9 @@ static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
931 } 931 }
932} 932}
933 933
934static inline int xfrm_policy_id2dir(u32 index)
935{
936 return index & 7;
937}
938
934#endif /* _NET_XFRM_H */ 939#endif /* _NET_XFRM_H */
diff --git a/include/rxrpc/call.h b/include/rxrpc/call.h
index f48f27e9e0ab..8118731e7d96 100644
--- a/include/rxrpc/call.h
+++ b/include/rxrpc/call.h
@@ -203,7 +203,7 @@ extern int rxrpc_call_write_data(struct rxrpc_call *call,
203 size_t sioc, 203 size_t sioc,
204 struct kvec *siov, 204 struct kvec *siov,
205 uint8_t rxhdr_flags, 205 uint8_t rxhdr_flags,
206 int alloc_flags, 206 unsigned int __nocast alloc_flags,
207 int dup_data, 207 int dup_data,
208 size_t *size_sent); 208 size_t *size_sent);
209 209
diff --git a/include/rxrpc/message.h b/include/rxrpc/message.h
index 3a59df6870b2..983d9f9eee1a 100644
--- a/include/rxrpc/message.h
+++ b/include/rxrpc/message.h
@@ -63,7 +63,7 @@ extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
63 uint8_t type, 63 uint8_t type,
64 int count, 64 int count,
65 struct kvec *diov, 65 struct kvec *diov,
66 int alloc_flags, 66 unsigned int __nocast alloc_flags,
67 struct rxrpc_message **_msg); 67 struct rxrpc_message **_msg);
68 68
69extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg); 69extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index c0e4c67d836f..7ece05666feb 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -163,6 +163,7 @@ struct scsi_target {
163 unsigned int id; /* target id ... replace 163 unsigned int id; /* target id ... replace
164 * scsi_device.id eventually */ 164 * scsi_device.id eventually */
165 unsigned long create:1; /* signal that it needs to be added */ 165 unsigned long create:1; /* signal that it needs to be added */
166 char scsi_level;
166 void *hostdata; /* available to low-level driver */ 167 void *hostdata; /* available to low-level driver */
167 unsigned long starget_data[0]; /* for the transport */ 168 unsigned long starget_data[0]; /* for the transport */
168 /* starget_data must be the last element!!!! */ 169 /* starget_data must be the last element!!!! */
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 2cc79112ecc3..1b61fceef777 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -127,7 +127,7 @@ static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
127} 127}
128 128
129static struct ts_config *bm_init(const void *pattern, unsigned int len, 129static struct ts_config *bm_init(const void *pattern, unsigned int len,
130 int gfp_mask) 130 unsigned int __nocast gfp_mask)
131{ 131{
132 struct ts_config *conf; 132 struct ts_config *conf;
133 struct ts_bm *bm; 133 struct ts_bm *bm;
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index d27c0a072940..ef9779e00506 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -258,7 +258,7 @@ found_match:
258} 258}
259 259
260static struct ts_config *fsm_init(const void *pattern, unsigned int len, 260static struct ts_config *fsm_init(const void *pattern, unsigned int len,
261 int gfp_mask) 261 unsigned int __nocast gfp_mask)
262{ 262{
263 int i, err = -EINVAL; 263 int i, err = -EINVAL;
264 struct ts_config *conf; 264 struct ts_config *conf;
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 73266b975585..e45f0f0c2379 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -87,7 +87,7 @@ static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
87} 87}
88 88
89static struct ts_config *kmp_init(const void *pattern, unsigned int len, 89static struct ts_config *kmp_init(const void *pattern, unsigned int len,
90 int gfp_mask) 90 unsigned int __nocast gfp_mask)
91{ 91{
92 struct ts_config *conf; 92 struct ts_config *conf;
93 struct ts_kmp *kmp; 93 struct ts_kmp *kmp;
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index b2113c3454ae..71abc99ec815 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -25,7 +25,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
25 25
26 26
27struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, 27struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
28 int gfp_flags) 28 unsigned int __nocast gfp_flags)
29{ 29{
30 struct sock *sk = sk_atm(vcc); 30 struct sock *sk = sk_atm(vcc);
31 int guess = atm_guess_pdu2truesize(pdu_size); 31 int guess = atm_guess_pdu2truesize(pdu_size);
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 28dab55a4387..4f54c9a5e84a 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -310,7 +310,7 @@ static int clip_constructor(struct neighbour *neigh)
310 if (neigh->type != RTN_UNICAST) return -EINVAL; 310 if (neigh->type != RTN_UNICAST) return -EINVAL;
311 311
312 rcu_read_lock(); 312 rcu_read_lock();
313 in_dev = rcu_dereference(__in_dev_get(dev)); 313 in_dev = __in_dev_get_rcu(dev);
314 if (!in_dev) { 314 if (!in_dev) {
315 rcu_read_unlock(); 315 rcu_read_unlock();
316 return -EINVAL; 316 return -EINVAL;
diff --git a/net/atm/common.c b/net/atm/common.c
index 801a5813ec60..63feea49fb13 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -46,7 +46,7 @@ static void __vcc_insert_socket(struct sock *sk)
46 struct atm_vcc *vcc = atm_sk(sk); 46 struct atm_vcc *vcc = atm_sk(sk);
47 struct hlist_head *head = &vcc_hash[vcc->vci & 47 struct hlist_head *head = &vcc_hash[vcc->vci &
48 (VCC_HTABLE_SIZE - 1)]; 48 (VCC_HTABLE_SIZE - 1)];
49 sk->sk_hashent = vcc->vci & (VCC_HTABLE_SIZE - 1); 49 sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
50 sk_add_node(sk, head); 50 sk_add_node(sk, head);
51} 51}
52 52
diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c
index 810c9c76c2e0..73cfc3411c46 100644
--- a/net/ax25/ax25_in.c
+++ b/net/ax25/ax25_in.c
@@ -123,7 +123,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
123 } 123 }
124 124
125 skb_pull(skb, 1); /* Remove PID */ 125 skb_pull(skb, 1); /* Remove PID */
126 skb->h.raw = skb->data; 126 skb->mac.raw = skb->nh.raw;
127 skb->nh.raw = skb->data; 127 skb->nh.raw = skb->data;
128 skb->dev = ax25->ax25_dev->dev; 128 skb->dev = ax25->ax25_dev->dev;
129 skb->pkt_type = PACKET_HOST; 129 skb->pkt_type = PACKET_HOST;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 5265dfd69928..802fe11efad0 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -703,7 +703,7 @@ int netpoll_setup(struct netpoll *np)
703 703
704 if (!np->local_ip) { 704 if (!np->local_ip) {
705 rcu_read_lock(); 705 rcu_read_lock();
706 in_dev = __in_dev_get(ndev); 706 in_dev = __in_dev_get_rcu(ndev);
707 707
708 if (!in_dev || !in_dev->ifa_list) { 708 if (!in_dev || !in_dev->ifa_list) {
709 rcu_read_unlock(); 709 rcu_read_unlock();
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b7f2d65a614f..5f043d346694 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1667,13 +1667,12 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1667 struct in_device *in_dev; 1667 struct in_device *in_dev;
1668 1668
1669 rcu_read_lock(); 1669 rcu_read_lock();
1670 in_dev = __in_dev_get(pkt_dev->odev); 1670 in_dev = __in_dev_get_rcu(pkt_dev->odev);
1671 if (in_dev) { 1671 if (in_dev) {
1672 if (in_dev->ifa_list) { 1672 if (in_dev->ifa_list) {
1673 pkt_dev->saddr_min = in_dev->ifa_list->ifa_address; 1673 pkt_dev->saddr_min = in_dev->ifa_list->ifa_address;
1674 pkt_dev->saddr_max = pkt_dev->saddr_min; 1674 pkt_dev->saddr_max = pkt_dev->saddr_min;
1675 } 1675 }
1676 __in_dev_put(in_dev);
1677 } 1676 }
1678 rcu_read_unlock(); 1677 rcu_read_unlock();
1679 } 1678 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f80a28785610..0e9431b59fb2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -71,8 +71,6 @@
71static kmem_cache_t *skbuff_head_cache __read_mostly; 71static kmem_cache_t *skbuff_head_cache __read_mostly;
72static kmem_cache_t *skbuff_fclone_cache __read_mostly; 72static kmem_cache_t *skbuff_fclone_cache __read_mostly;
73 73
74struct timeval __read_mostly skb_tv_base;
75
76/* 74/*
77 * Keep out-of-line to prevent kernel bloat. 75 * Keep out-of-line to prevent kernel bloat.
78 * __builtin_return_address is not used because it is not always 76 * __builtin_return_address is not used because it is not always
@@ -1708,8 +1706,6 @@ void __init skb_init(void)
1708 NULL, NULL); 1706 NULL, NULL);
1709 if (!skbuff_fclone_cache) 1707 if (!skbuff_fclone_cache)
1710 panic("cannot create skbuff cache"); 1708 panic("cannot create skbuff cache");
1711
1712 do_gettimeofday(&skb_tv_base);
1713} 1709}
1714 1710
1715EXPORT_SYMBOL(___pskb_trim); 1711EXPORT_SYMBOL(___pskb_trim);
@@ -1743,4 +1739,3 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
1743EXPORT_SYMBOL(skb_seq_read); 1739EXPORT_SYMBOL(skb_seq_read);
1744EXPORT_SYMBOL(skb_abort_seq_read); 1740EXPORT_SYMBOL(skb_abort_seq_read);
1745EXPORT_SYMBOL(skb_find_text); 1741EXPORT_SYMBOL(skb_find_text);
1746EXPORT_SYMBOL(skb_tv_base);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 40fe6afacde6..ae088d1347af 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -62,27 +62,27 @@ static int __dccp_v4_check_established(struct sock *sk, const __u16 lport,
62 const int dif = sk->sk_bound_dev_if; 62 const int dif = sk->sk_bound_dev_if;
63 INET_ADDR_COOKIE(acookie, saddr, daddr) 63 INET_ADDR_COOKIE(acookie, saddr, daddr)
64 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 64 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
65 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, 65 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
66 dccp_hashinfo.ehash_size); 66 struct inet_ehash_bucket *head = inet_ehash_bucket(&dccp_hashinfo, hash);
67 struct inet_ehash_bucket *head = &dccp_hashinfo.ehash[hash];
68 const struct sock *sk2; 67 const struct sock *sk2;
69 const struct hlist_node *node; 68 const struct hlist_node *node;
70 struct inet_timewait_sock *tw; 69 struct inet_timewait_sock *tw;
71 70
71 prefetch(head->chain.first);
72 write_lock(&head->lock); 72 write_lock(&head->lock);
73 73
74 /* Check TIME-WAIT sockets first. */ 74 /* Check TIME-WAIT sockets first. */
75 sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) { 75 sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) {
76 tw = inet_twsk(sk2); 76 tw = inet_twsk(sk2);
77 77
78 if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) 78 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif))
79 goto not_unique; 79 goto not_unique;
80 } 80 }
81 tw = NULL; 81 tw = NULL;
82 82
83 /* And established part... */ 83 /* And established part... */
84 sk_for_each(sk2, node, &head->chain) { 84 sk_for_each(sk2, node, &head->chain) {
85 if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif)) 85 if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif))
86 goto not_unique; 86 goto not_unique;
87 } 87 }
88 88
@@ -90,7 +90,7 @@ static int __dccp_v4_check_established(struct sock *sk, const __u16 lport,
90 * in hash table socket with a funny identity. */ 90 * in hash table socket with a funny identity. */
91 inet->num = lport; 91 inet->num = lport;
92 inet->sport = htons(lport); 92 inet->sport = htons(lport);
93 sk->sk_hashent = hash; 93 sk->sk_hash = hash;
94 BUG_TRAP(sk_unhashed(sk)); 94 BUG_TRAP(sk_unhashed(sk));
95 __sk_add_node(sk, &head->chain); 95 __sk_add_node(sk, &head->chain);
96 sock_prot_inc_use(sk->sk_prot); 96 sock_prot_inc_use(sk->sk_prot);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 348f36b529f7..34d4128d56d5 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -452,7 +452,8 @@ static struct proto dn_proto = {
452 .obj_size = sizeof(struct dn_sock), 452 .obj_size = sizeof(struct dn_sock),
453}; 453};
454 454
455static struct sock *dn_alloc_sock(struct socket *sock, int gfp) 455static struct sock *dn_alloc_sock(struct socket *sock,
456 unsigned int __nocast gfp)
456{ 457{
457 struct dn_scp *scp; 458 struct dn_scp *scp;
458 struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); 459 struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1);
@@ -804,7 +805,8 @@ static int dn_auto_bind(struct socket *sock)
804 return rv; 805 return rv;
805} 806}
806 807
807static int dn_confirm_accept(struct sock *sk, long *timeo, int allocation) 808static int dn_confirm_accept(struct sock *sk, long *timeo,
809 unsigned int __nocast allocation)
808{ 810{
809 struct dn_scp *scp = DN_SK(sk); 811 struct dn_scp *scp = DN_SK(sk);
810 DEFINE_WAIT(wait); 812 DEFINE_WAIT(wait);
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 53633d352868..cd08244aa10c 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -117,7 +117,8 @@ try_again:
117 * The eventual aim is for each socket to have a cached header size 117 * The eventual aim is for each socket to have a cached header size
118 * for its outgoing packets, and to set hdr from this when sk != NULL. 118 * for its outgoing packets, and to set hdr from this when sk != NULL.
119 */ 119 */
120struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri) 120struct sk_buff *dn_alloc_skb(struct sock *sk, int size,
121 unsigned int __nocast pri)
121{ 122{
122 struct sk_buff *skb; 123 struct sk_buff *skb;
123 int hdr = 64; 124 int hdr = 64;
@@ -210,7 +211,8 @@ static void dn_nsp_rtt(struct sock *sk, long rtt)
210 * 211 *
211 * Returns: The number of times the packet has been sent previously 212 * Returns: The number of times the packet has been sent previously
212 */ 213 */
213static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, int gfp) 214static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb,
215 unsigned int __nocast gfp)
214{ 216{
215 struct dn_skb_cb *cb = DN_SKB_CB(skb); 217 struct dn_skb_cb *cb = DN_SKB_CB(skb);
216 struct sk_buff *skb2; 218 struct sk_buff *skb2;
@@ -350,7 +352,8 @@ static unsigned short *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *sk
350 return ptr; 352 return ptr;
351} 353}
352 354
353void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oth) 355void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb,
356 unsigned int __nocast gfp, int oth)
354{ 357{
355 struct dn_scp *scp = DN_SK(sk); 358 struct dn_scp *scp = DN_SK(sk);
356 struct dn_skb_cb *cb = DN_SKB_CB(skb); 359 struct dn_skb_cb *cb = DN_SKB_CB(skb);
@@ -517,7 +520,7 @@ static int dn_nsp_retrans_conn_conf(struct sock *sk)
517 return 0; 520 return 0;
518} 521}
519 522
520void dn_send_conn_conf(struct sock *sk, int gfp) 523void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp)
521{ 524{
522 struct dn_scp *scp = DN_SK(sk); 525 struct dn_scp *scp = DN_SK(sk);
523 struct sk_buff *skb = NULL; 526 struct sk_buff *skb = NULL;
@@ -549,7 +552,8 @@ void dn_send_conn_conf(struct sock *sk, int gfp)
549 552
550 553
551static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, 554static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
552 unsigned short reason, int gfp, struct dst_entry *dst, 555 unsigned short reason, unsigned int __nocast gfp,
556 struct dst_entry *dst,
553 int ddl, unsigned char *dd, __u16 rem, __u16 loc) 557 int ddl, unsigned char *dd, __u16 rem, __u16 loc)
554{ 558{
555 struct sk_buff *skb = NULL; 559 struct sk_buff *skb = NULL;
@@ -591,7 +595,7 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
591 595
592 596
593void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, 597void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
594 unsigned short reason, int gfp) 598 unsigned short reason, unsigned int __nocast gfp)
595{ 599{
596 struct dn_scp *scp = DN_SK(sk); 600 struct dn_scp *scp = DN_SK(sk);
597 int ddl = 0; 601 int ddl = 0;
@@ -612,7 +616,7 @@ void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg,
612{ 616{
613 struct dn_skb_cb *cb = DN_SKB_CB(skb); 617 struct dn_skb_cb *cb = DN_SKB_CB(skb);
614 int ddl = 0; 618 int ddl = 0;
615 int gfp = GFP_ATOMIC; 619 unsigned int __nocast gfp = GFP_ATOMIC;
616 620
617 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, 621 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl,
618 NULL, cb->src_port, cb->dst_port); 622 NULL, cb->src_port, cb->dst_port);
@@ -624,7 +628,7 @@ void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval)
624 struct dn_scp *scp = DN_SK(sk); 628 struct dn_scp *scp = DN_SK(sk);
625 struct sk_buff *skb; 629 struct sk_buff *skb;
626 unsigned char *ptr; 630 unsigned char *ptr;
627 int gfp = GFP_ATOMIC; 631 unsigned int __nocast gfp = GFP_ATOMIC;
628 632
629 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) 633 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL)
630 return; 634 return;
@@ -659,7 +663,8 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
659 unsigned char menuver; 663 unsigned char menuver;
660 struct dn_skb_cb *cb; 664 struct dn_skb_cb *cb;
661 unsigned char type = 1; 665 unsigned char type = 1;
662 int allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; 666 unsigned int __nocast allocation =
667 (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
663 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); 668 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);
664 669
665 if (!skb) 670 if (!skb)
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 4a62093eb343..34fdac51df96 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -406,7 +406,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
406 unsigned long network = 0; 406 unsigned long network = 0;
407 407
408 rcu_read_lock(); 408 rcu_read_lock();
409 idev = __in_dev_get(dev); 409 idev = __in_dev_get_rcu(dev);
410 if (idev) { 410 if (idev) {
411 if (idev->ifa_list) 411 if (idev->ifa_list)
412 network = ntohl(idev->ifa_list->ifa_address) & 412 network = ntohl(idev->ifa_list->ifa_address) &
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index c9aaff3fea1e..ecdf9f7a538f 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -207,7 +207,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb)
207} 207}
208 208
209static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, 209static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
210 int gfp_mask) 210 unsigned int __nocast gfp_mask)
211{ 211{
212 struct ieee80211_txb *txb; 212 struct ieee80211_txb *txb;
213 int i; 213 int i;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 8bf312bdea13..b425748f02d7 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -241,7 +241,7 @@ static int arp_constructor(struct neighbour *neigh)
241 neigh->type = inet_addr_type(addr); 241 neigh->type = inet_addr_type(addr);
242 242
243 rcu_read_lock(); 243 rcu_read_lock();
244 in_dev = rcu_dereference(__in_dev_get(dev)); 244 in_dev = __in_dev_get_rcu(dev);
245 if (in_dev == NULL) { 245 if (in_dev == NULL) {
246 rcu_read_unlock(); 246 rcu_read_unlock();
247 return -EINVAL; 247 return -EINVAL;
@@ -697,12 +697,6 @@ void arp_send(int type, int ptype, u32 dest_ip,
697 arp_xmit(skb); 697 arp_xmit(skb);
698} 698}
699 699
700static void parp_redo(struct sk_buff *skb)
701{
702 nf_reset(skb);
703 arp_rcv(skb, skb->dev, NULL, skb->dev);
704}
705
706/* 700/*
707 * Process an arp request. 701 * Process an arp request.
708 */ 702 */
@@ -922,6 +916,11 @@ out:
922 return 0; 916 return 0;
923} 917}
924 918
919static void parp_redo(struct sk_buff *skb)
920{
921 arp_process(skb);
922}
923
925 924
926/* 925/*
927 * Receive an arp request from the device layer. 926 * Receive an arp request from the device layer.
@@ -990,8 +989,8 @@ static int arp_req_set(struct arpreq *r, struct net_device * dev)
990 ipv4_devconf.proxy_arp = 1; 989 ipv4_devconf.proxy_arp = 1;
991 return 0; 990 return 0;
992 } 991 }
993 if (__in_dev_get(dev)) { 992 if (__in_dev_get_rtnl(dev)) {
994 __in_dev_get(dev)->cnf.proxy_arp = 1; 993 __in_dev_get_rtnl(dev)->cnf.proxy_arp = 1;
995 return 0; 994 return 0;
996 } 995 }
997 return -ENXIO; 996 return -ENXIO;
@@ -1096,8 +1095,8 @@ static int arp_req_delete(struct arpreq *r, struct net_device * dev)
1096 ipv4_devconf.proxy_arp = 0; 1095 ipv4_devconf.proxy_arp = 0;
1097 return 0; 1096 return 0;
1098 } 1097 }
1099 if (__in_dev_get(dev)) { 1098 if (__in_dev_get_rtnl(dev)) {
1100 __in_dev_get(dev)->cnf.proxy_arp = 0; 1099 __in_dev_get_rtnl(dev)->cnf.proxy_arp = 0;
1101 return 0; 1100 return 0;
1102 } 1101 }
1103 return -ENXIO; 1102 return -ENXIO;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ba2895ae8151..74f2207e131a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -351,7 +351,7 @@ static int inet_insert_ifa(struct in_ifaddr *ifa)
351 351
352static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa) 352static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
353{ 353{
354 struct in_device *in_dev = __in_dev_get(dev); 354 struct in_device *in_dev = __in_dev_get_rtnl(dev);
355 355
356 ASSERT_RTNL(); 356 ASSERT_RTNL();
357 357
@@ -449,7 +449,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg
449 goto out; 449 goto out;
450 450
451 rc = -ENOBUFS; 451 rc = -ENOBUFS;
452 if ((in_dev = __in_dev_get(dev)) == NULL) { 452 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
453 in_dev = inetdev_init(dev); 453 in_dev = inetdev_init(dev);
454 if (!in_dev) 454 if (!in_dev)
455 goto out; 455 goto out;
@@ -584,7 +584,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
584 if (colon) 584 if (colon)
585 *colon = ':'; 585 *colon = ':';
586 586
587 if ((in_dev = __in_dev_get(dev)) != NULL) { 587 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
588 if (tryaddrmatch) { 588 if (tryaddrmatch) {
589 /* Matthias Andree */ 589 /* Matthias Andree */
590 /* compare label and address (4.4BSD style) */ 590 /* compare label and address (4.4BSD style) */
@@ -748,7 +748,7 @@ rarok:
748 748
749static int inet_gifconf(struct net_device *dev, char __user *buf, int len) 749static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
750{ 750{
751 struct in_device *in_dev = __in_dev_get(dev); 751 struct in_device *in_dev = __in_dev_get_rtnl(dev);
752 struct in_ifaddr *ifa; 752 struct in_ifaddr *ifa;
753 struct ifreq ifr; 753 struct ifreq ifr;
754 int done = 0; 754 int done = 0;
@@ -791,7 +791,7 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope)
791 struct in_device *in_dev; 791 struct in_device *in_dev;
792 792
793 rcu_read_lock(); 793 rcu_read_lock();
794 in_dev = __in_dev_get(dev); 794 in_dev = __in_dev_get_rcu(dev);
795 if (!in_dev) 795 if (!in_dev)
796 goto no_in_dev; 796 goto no_in_dev;
797 797
@@ -818,7 +818,7 @@ no_in_dev:
818 read_lock(&dev_base_lock); 818 read_lock(&dev_base_lock);
819 rcu_read_lock(); 819 rcu_read_lock();
820 for (dev = dev_base; dev; dev = dev->next) { 820 for (dev = dev_base; dev; dev = dev->next) {
821 if ((in_dev = __in_dev_get(dev)) == NULL) 821 if ((in_dev = __in_dev_get_rcu(dev)) == NULL)
822 continue; 822 continue;
823 823
824 for_primary_ifa(in_dev) { 824 for_primary_ifa(in_dev) {
@@ -887,7 +887,7 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop
887 887
888 if (dev) { 888 if (dev) {
889 rcu_read_lock(); 889 rcu_read_lock();
890 if ((in_dev = __in_dev_get(dev))) 890 if ((in_dev = __in_dev_get_rcu(dev)))
891 addr = confirm_addr_indev(in_dev, dst, local, scope); 891 addr = confirm_addr_indev(in_dev, dst, local, scope);
892 rcu_read_unlock(); 892 rcu_read_unlock();
893 893
@@ -897,7 +897,7 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop
897 read_lock(&dev_base_lock); 897 read_lock(&dev_base_lock);
898 rcu_read_lock(); 898 rcu_read_lock();
899 for (dev = dev_base; dev; dev = dev->next) { 899 for (dev = dev_base; dev; dev = dev->next) {
900 if ((in_dev = __in_dev_get(dev))) { 900 if ((in_dev = __in_dev_get_rcu(dev))) {
901 addr = confirm_addr_indev(in_dev, dst, local, scope); 901 addr = confirm_addr_indev(in_dev, dst, local, scope);
902 if (addr) 902 if (addr)
903 break; 903 break;
@@ -957,7 +957,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
957 void *ptr) 957 void *ptr)
958{ 958{
959 struct net_device *dev = ptr; 959 struct net_device *dev = ptr;
960 struct in_device *in_dev = __in_dev_get(dev); 960 struct in_device *in_dev = __in_dev_get_rtnl(dev);
961 961
962 ASSERT_RTNL(); 962 ASSERT_RTNL();
963 963
@@ -1078,7 +1078,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1078 if (idx > s_idx) 1078 if (idx > s_idx)
1079 s_ip_idx = 0; 1079 s_ip_idx = 0;
1080 rcu_read_lock(); 1080 rcu_read_lock();
1081 if ((in_dev = __in_dev_get(dev)) == NULL) { 1081 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
1082 rcu_read_unlock(); 1082 rcu_read_unlock();
1083 continue; 1083 continue;
1084 } 1084 }
@@ -1149,7 +1149,7 @@ void inet_forward_change(void)
1149 for (dev = dev_base; dev; dev = dev->next) { 1149 for (dev = dev_base; dev; dev = dev->next) {
1150 struct in_device *in_dev; 1150 struct in_device *in_dev;
1151 rcu_read_lock(); 1151 rcu_read_lock();
1152 in_dev = __in_dev_get(dev); 1152 in_dev = __in_dev_get_rcu(dev);
1153 if (in_dev) 1153 if (in_dev)
1154 in_dev->cnf.forwarding = on; 1154 in_dev->cnf.forwarding = on;
1155 rcu_read_unlock(); 1155 rcu_read_unlock();
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4e1379f71269..e61bc7177eb1 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -173,7 +173,7 @@ int fib_validate_source(u32 src, u32 dst, u8 tos, int oif,
173 173
174 no_addr = rpf = 0; 174 no_addr = rpf = 0;
175 rcu_read_lock(); 175 rcu_read_lock();
176 in_dev = __in_dev_get(dev); 176 in_dev = __in_dev_get_rcu(dev);
177 if (in_dev) { 177 if (in_dev) {
178 no_addr = in_dev->ifa_list == NULL; 178 no_addr = in_dev->ifa_list == NULL;
179 rpf = IN_DEV_RPFILTER(in_dev); 179 rpf = IN_DEV_RPFILTER(in_dev);
@@ -607,7 +607,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
607static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 607static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
608{ 608{
609 struct net_device *dev = ptr; 609 struct net_device *dev = ptr;
610 struct in_device *in_dev = __in_dev_get(dev); 610 struct in_device *in_dev = __in_dev_get_rtnl(dev);
611 611
612 if (event == NETDEV_UNREGISTER) { 612 if (event == NETDEV_UNREGISTER) {
613 fib_disable_ip(dev, 2); 613 fib_disable_ip(dev, 2);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index d41219e8037c..186f20c4a45e 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1087,7 +1087,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm,
1087 rta->rta_oif = &dev->ifindex; 1087 rta->rta_oif = &dev->ifindex;
1088 if (colon) { 1088 if (colon) {
1089 struct in_ifaddr *ifa; 1089 struct in_ifaddr *ifa;
1090 struct in_device *in_dev = __in_dev_get(dev); 1090 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1091 if (!in_dev) 1091 if (!in_dev)
1092 return -ENODEV; 1092 return -ENODEV;
1093 *colon = ':'; 1093 *colon = ':';
@@ -1268,7 +1268,7 @@ int fib_sync_up(struct net_device *dev)
1268 } 1268 }
1269 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) 1269 if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
1270 continue; 1270 continue;
1271 if (nh->nh_dev != dev || __in_dev_get(dev) == NULL) 1271 if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev))
1272 continue; 1272 continue;
1273 alive++; 1273 alive++;
1274 spin_lock_bh(&fib_multipath_lock); 1274 spin_lock_bh(&fib_multipath_lock);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 50c0519cd70d..0093ea08c7f5 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -286,6 +286,8 @@ static inline void check_tnode(const struct tnode *tn)
286 286
287static int halve_threshold = 25; 287static int halve_threshold = 25;
288static int inflate_threshold = 50; 288static int inflate_threshold = 50;
289static int halve_threshold_root = 15;
290static int inflate_threshold_root = 25;
289 291
290 292
291static void __alias_free_mem(struct rcu_head *head) 293static void __alias_free_mem(struct rcu_head *head)
@@ -449,6 +451,8 @@ static struct node *resize(struct trie *t, struct tnode *tn)
449 int i; 451 int i;
450 int err = 0; 452 int err = 0;
451 struct tnode *old_tn; 453 struct tnode *old_tn;
454 int inflate_threshold_use;
455 int halve_threshold_use;
452 456
453 if (!tn) 457 if (!tn)
454 return NULL; 458 return NULL;
@@ -541,10 +545,17 @@ static struct node *resize(struct trie *t, struct tnode *tn)
541 545
542 check_tnode(tn); 546 check_tnode(tn);
543 547
548 /* Keep root node larger */
549
550 if(!tn->parent)
551 inflate_threshold_use = inflate_threshold_root;
552 else
553 inflate_threshold_use = inflate_threshold;
554
544 err = 0; 555 err = 0;
545 while ((tn->full_children > 0 && 556 while ((tn->full_children > 0 &&
546 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >= 557 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
547 inflate_threshold * tnode_child_length(tn))) { 558 inflate_threshold_use * tnode_child_length(tn))) {
548 559
549 old_tn = tn; 560 old_tn = tn;
550 tn = inflate(t, tn); 561 tn = inflate(t, tn);
@@ -564,10 +575,18 @@ static struct node *resize(struct trie *t, struct tnode *tn)
564 * node is above threshold. 575 * node is above threshold.
565 */ 576 */
566 577
578
579 /* Keep root node larger */
580
581 if(!tn->parent)
582 halve_threshold_use = halve_threshold_root;
583 else
584 halve_threshold_use = halve_threshold;
585
567 err = 0; 586 err = 0;
568 while (tn->bits > 1 && 587 while (tn->bits > 1 &&
569 100 * (tnode_child_length(tn) - tn->empty_children) < 588 100 * (tnode_child_length(tn) - tn->empty_children) <
570 halve_threshold * tnode_child_length(tn)) { 589 halve_threshold_use * tnode_child_length(tn)) {
571 590
572 old_tn = tn; 591 old_tn = tn;
573 tn = halve(t, tn); 592 tn = halve(t, tn);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 24eb56ae1b5a..90dca711ac9f 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -188,7 +188,7 @@ struct icmp_err icmp_err_convert[] = {
188 188
189/* Control parameters for ECHO replies. */ 189/* Control parameters for ECHO replies. */
190int sysctl_icmp_echo_ignore_all; 190int sysctl_icmp_echo_ignore_all;
191int sysctl_icmp_echo_ignore_broadcasts; 191int sysctl_icmp_echo_ignore_broadcasts = 1;
192 192
193/* Control parameter - ignore bogus broadcast responses? */ 193/* Control parameter - ignore bogus broadcast responses? */
194int sysctl_icmp_ignore_bogus_error_responses; 194int sysctl_icmp_ignore_bogus_error_responses;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 70c44e4c3ceb..8b6d3939e1e6 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1323,7 +1323,7 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
1323 } 1323 }
1324 if (dev) { 1324 if (dev) {
1325 imr->imr_ifindex = dev->ifindex; 1325 imr->imr_ifindex = dev->ifindex;
1326 idev = __in_dev_get(dev); 1326 idev = __in_dev_get_rtnl(dev);
1327 } 1327 }
1328 return idev; 1328 return idev;
1329} 1329}
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 4d1502a49852..f9076ef3a1a8 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -20,7 +20,7 @@ void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashi
20 struct inet_bind_hashbucket *bhead; 20 struct inet_bind_hashbucket *bhead;
21 struct inet_bind_bucket *tb; 21 struct inet_bind_bucket *tb;
22 /* Unlink from established hashes. */ 22 /* Unlink from established hashes. */
23 struct inet_ehash_bucket *ehead = &hashinfo->ehash[tw->tw_hashent]; 23 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash);
24 24
25 write_lock(&ehead->lock); 25 write_lock(&ehead->lock);
26 if (hlist_unhashed(&tw->tw_node)) { 26 if (hlist_unhashed(&tw->tw_node)) {
@@ -60,7 +60,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
60{ 60{
61 const struct inet_sock *inet = inet_sk(sk); 61 const struct inet_sock *inet = inet_sk(sk);
62 const struct inet_connection_sock *icsk = inet_csk(sk); 62 const struct inet_connection_sock *icsk = inet_csk(sk);
63 struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent]; 63 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
64 struct inet_bind_hashbucket *bhead; 64 struct inet_bind_hashbucket *bhead;
65 /* Step 1: Put TW into bind hash. Original socket stays there too. 65 /* Step 1: Put TW into bind hash. Original socket stays there too.
66 Note, that any socket with inet->num != 0 MUST be bound in 66 Note, that any socket with inet->num != 0 MUST be bound in
@@ -106,7 +106,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
106 tw->tw_dport = inet->dport; 106 tw->tw_dport = inet->dport;
107 tw->tw_family = sk->sk_family; 107 tw->tw_family = sk->sk_family;
108 tw->tw_reuse = sk->sk_reuse; 108 tw->tw_reuse = sk->sk_reuse;
109 tw->tw_hashent = sk->sk_hashent; 109 tw->tw_hash = sk->sk_hash;
110 tw->tw_ipv6only = 0; 110 tw->tw_ipv6only = 0;
111 tw->tw_prot = sk->sk_prot_creator; 111 tw->tw_prot = sk->sk_prot_creator;
112 atomic_set(&tw->tw_refcnt, 1); 112 atomic_set(&tw->tw_refcnt, 1);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f0d5740d7e22..896ce3f8f53a 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1104,10 +1104,10 @@ static int ipgre_open(struct net_device *dev)
1104 return -EADDRNOTAVAIL; 1104 return -EADDRNOTAVAIL;
1105 dev = rt->u.dst.dev; 1105 dev = rt->u.dst.dev;
1106 ip_rt_put(rt); 1106 ip_rt_put(rt);
1107 if (__in_dev_get(dev) == NULL) 1107 if (__in_dev_get_rtnl(dev) == NULL)
1108 return -EADDRNOTAVAIL; 1108 return -EADDRNOTAVAIL;
1109 t->mlink = dev->ifindex; 1109 t->mlink = dev->ifindex;
1110 ip_mc_inc_group(__in_dev_get(dev), t->parms.iph.daddr); 1110 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1111 } 1111 }
1112 return 0; 1112 return 0;
1113} 1113}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9dbf5909f3a6..302b7eb507c9 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -149,7 +149,7 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v)
149 if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) { 149 if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) {
150 dev->flags |= IFF_MULTICAST; 150 dev->flags |= IFF_MULTICAST;
151 151
152 in_dev = __in_dev_get(dev); 152 in_dev = __in_dev_get_rtnl(dev);
153 if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL) 153 if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL)
154 goto failure; 154 goto failure;
155 in_dev->cnf.rp_filter = 0; 155 in_dev->cnf.rp_filter = 0;
@@ -278,7 +278,7 @@ static int vif_delete(int vifi)
278 278
279 dev_set_allmulti(dev, -1); 279 dev_set_allmulti(dev, -1);
280 280
281 if ((in_dev = __in_dev_get(dev)) != NULL) { 281 if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
282 in_dev->cnf.mc_forwarding--; 282 in_dev->cnf.mc_forwarding--;
283 ip_rt_multicast_event(in_dev); 283 ip_rt_multicast_event(in_dev);
284 } 284 }
@@ -421,7 +421,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock)
421 return -EINVAL; 421 return -EINVAL;
422 } 422 }
423 423
424 if ((in_dev = __in_dev_get(dev)) == NULL) 424 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
425 return -EADDRNOTAVAIL; 425 return -EADDRNOTAVAIL;
426 in_dev->cnf.mc_forwarding++; 426 in_dev->cnf.mc_forwarding++;
427 dev_set_allmulti(dev, +1); 427 dev_set_allmulti(dev, +1);
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 6e092dadb388..b942ff3c8860 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -604,7 +604,7 @@ static struct file_operations ip_vs_app_fops = {
604/* 604/*
605 * Replace a segment of data with a new segment 605 * Replace a segment of data with a new segment
606 */ 606 */
607int ip_vs_skb_replace(struct sk_buff *skb, int pri, 607int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri,
608 char *o_buf, int o_len, char *n_buf, int n_len) 608 char *o_buf, int o_len, char *n_buf, int n_len)
609{ 609{
610 struct iphdr *iph; 610 struct iphdr *iph;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 2cd7e7d1ac90..a7659728e7a0 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -141,7 +141,7 @@ config IP_NF_PPTP
141 tristate 'PPTP protocol support' 141 tristate 'PPTP protocol support'
142 help 142 help
143 This module adds support for PPTP (Point to Point Tunnelling 143 This module adds support for PPTP (Point to Point Tunnelling
144 Protocol, RFC2637) conncection tracking and NAT. 144 Protocol, RFC2637) connection tracking and NAT.
145 145
146 If you are running PPTP sessions over a stateful firewall or NAT 146 If you are running PPTP sessions over a stateful firewall or NAT
147 box, you may want to enable this feature. 147 box, you may want to enable this feature.
diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
index 577bac22dcc6..186646eb249f 100644
--- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
+++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
@@ -58,7 +58,7 @@ static int help(struct sk_buff **pskb,
58 goto out; 58 goto out;
59 59
60 rcu_read_lock(); 60 rcu_read_lock();
61 in_dev = __in_dev_get(rt->u.dst.dev); 61 in_dev = __in_dev_get_rcu(rt->u.dst.dev);
62 if (in_dev != NULL) { 62 if (in_dev != NULL) {
63 for_primary_ifa(in_dev) { 63 for_primary_ifa(in_dev) {
64 if (ifa->ifa_broadcast == iph->daddr) { 64 if (ifa->ifa_broadcast == iph->daddr) {
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index d54f14d926f6..36339eb39e17 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -240,8 +240,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
240 240
241 pmsg->packet_id = (unsigned long )entry; 241 pmsg->packet_id = (unsigned long )entry;
242 pmsg->data_len = data_len; 242 pmsg->data_len = data_len;
243 pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec; 243 pmsg->timestamp_sec = entry->skb->tstamp.off_sec;
244 pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec; 244 pmsg->timestamp_usec = entry->skb->tstamp.off_usec;
245 pmsg->mark = entry->skb->nfmark; 245 pmsg->mark = entry->skb->nfmark;
246 pmsg->hook = entry->info->hook; 246 pmsg->hook = entry->info->hook;
247 pmsg->hw_protocol = entry->skb->protocol; 247 pmsg->hw_protocol = entry->skb->protocol;
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 715cb613405c..5245bfd33d52 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -93,7 +93,7 @@ redirect_target(struct sk_buff **pskb,
93 newdst = 0; 93 newdst = 0;
94 94
95 rcu_read_lock(); 95 rcu_read_lock();
96 indev = __in_dev_get((*pskb)->dev); 96 indev = __in_dev_get_rcu((*pskb)->dev);
97 if (indev && (ifa = indev->ifa_list)) 97 if (indev && (ifa = indev->ifa_list))
98 newdst = ifa->ifa_local; 98 newdst = ifa->ifa_local;
99 rcu_read_unlock(); 99 rcu_read_unlock();
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index e2c14f3cb2fc..2883ccd8a91d 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -225,8 +225,8 @@ static void ipt_ulog_packet(unsigned int hooknum,
225 225
226 /* copy hook, prefix, timestamp, payload, etc. */ 226 /* copy hook, prefix, timestamp, payload, etc. */
227 pm->data_len = copy_len; 227 pm->data_len = copy_len;
228 pm->timestamp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec; 228 pm->timestamp_sec = skb->tstamp.off_sec;
229 pm->timestamp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec; 229 pm->timestamp_usec = skb->tstamp.off_usec;
230 pm->mark = skb->nfmark; 230 pm->mark = skb->nfmark;
231 pm->hook = hooknum; 231 pm->hook = hooknum;
232 if (prefix != NULL) 232 if (prefix != NULL)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 8549f26e2495..381dd6a6aebb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2128,7 +2128,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
2128 struct in_device *in_dev; 2128 struct in_device *in_dev;
2129 2129
2130 rcu_read_lock(); 2130 rcu_read_lock();
2131 if ((in_dev = __in_dev_get(dev)) != NULL) { 2131 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
2132 int our = ip_check_mc(in_dev, daddr, saddr, 2132 int our = ip_check_mc(in_dev, daddr, saddr,
2133 skb->nh.iph->protocol); 2133 skb->nh.iph->protocol);
2134 if (our 2134 if (our
@@ -2443,7 +2443,9 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp)
2443 err = -ENODEV; 2443 err = -ENODEV;
2444 if (dev_out == NULL) 2444 if (dev_out == NULL)
2445 goto out; 2445 goto out;
2446 if (__in_dev_get(dev_out) == NULL) { 2446
2447 /* RACE: Check return value of inet_select_addr instead. */
2448 if (__in_dev_get_rtnl(dev_out) == NULL) {
2447 dev_put(dev_out); 2449 dev_put(dev_out);
2448 goto out; /* Wrong error code */ 2450 goto out; /* Wrong error code */
2449 } 2451 }
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index b940346de4e7..6d80e063c187 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -136,7 +136,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
136 else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) 136 else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1))
137 /* slow start */ 137 /* slow start */
138 ca->cnt = (cwnd * (BICTCP_B-1)) 138 ca->cnt = (cwnd * (BICTCP_B-1))
139 / cwnd-ca->last_max_cwnd; 139 / (cwnd - ca->last_max_cwnd);
140 else 140 else
141 /* linear increase */ 141 /* linear increase */
142 ca->cnt = cwnd / max_increment; 142 ca->cnt = cwnd / max_increment;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 13dfb391cdf1..c85819d8474b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -130,19 +130,20 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
130 int dif = sk->sk_bound_dev_if; 130 int dif = sk->sk_bound_dev_if;
131 INET_ADDR_COOKIE(acookie, saddr, daddr) 131 INET_ADDR_COOKIE(acookie, saddr, daddr)
132 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 132 const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
133 const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size); 133 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
134 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; 134 struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
135 struct sock *sk2; 135 struct sock *sk2;
136 const struct hlist_node *node; 136 const struct hlist_node *node;
137 struct inet_timewait_sock *tw; 137 struct inet_timewait_sock *tw;
138 138
139 prefetch(head->chain.first);
139 write_lock(&head->lock); 140 write_lock(&head->lock);
140 141
141 /* Check TIME-WAIT sockets first. */ 142 /* Check TIME-WAIT sockets first. */
142 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { 143 sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
143 tw = inet_twsk(sk2); 144 tw = inet_twsk(sk2);
144 145
145 if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { 146 if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) {
146 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2); 147 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
147 struct tcp_sock *tp = tcp_sk(sk); 148 struct tcp_sock *tp = tcp_sk(sk);
148 149
@@ -179,7 +180,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
179 180
180 /* And established part... */ 181 /* And established part... */
181 sk_for_each(sk2, node, &head->chain) { 182 sk_for_each(sk2, node, &head->chain) {
182 if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif)) 183 if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif))
183 goto not_unique; 184 goto not_unique;
184 } 185 }
185 186
@@ -188,7 +189,7 @@ unique:
188 * in hash table socket with a funny identity. */ 189 * in hash table socket with a funny identity. */
189 inet->num = lport; 190 inet->num = lport;
190 inet->sport = htons(lport); 191 inet->sport = htons(lport);
191 sk->sk_hashent = hash; 192 sk->sk_hash = hash;
192 BUG_TRAP(sk_unhashed(sk)); 193 BUG_TRAP(sk_unhashed(sk));
193 __sk_add_node(sk, &head->chain); 194 __sk_add_node(sk, &head->chain);
194 sock_prot_inc_use(sk->sk_prot); 195 sock_prot_inc_use(sk->sk_prot);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4e509e52fbc1..a970b4727ce8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1806,7 +1806,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
1806 } 1806 }
1807 1807
1808 for (dev = dev_base; dev != NULL; dev = dev->next) { 1808 for (dev = dev_base; dev != NULL; dev = dev->next) {
1809 struct in_device * in_dev = __in_dev_get(dev); 1809 struct in_device * in_dev = __in_dev_get_rtnl(dev);
1810 if (in_dev && (dev->flags & IFF_UP)) { 1810 if (in_dev && (dev->flags & IFF_UP)) {
1811 struct in_ifaddr * ifa; 1811 struct in_ifaddr * ifa;
1812 1812
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2f589f24c093..563b442ffab8 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -666,7 +666,7 @@ slow_path:
666 */ 666 */
667 fh->nexthdr = nexthdr; 667 fh->nexthdr = nexthdr;
668 fh->reserved = 0; 668 fh->reserved = 0;
669 if (frag_id) { 669 if (!frag_id) {
670 ipv6_select_ident(skb, fh); 670 ipv6_select_ident(skb, fh);
671 frag_id = fh->identification; 671 frag_id = fh->identification;
672 } else 672 } else
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 519899fb11d5..39a96c768102 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1393,7 +1393,7 @@ static void mld_sendpack(struct sk_buff *skb)
1393 1393
1394static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) 1394static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1395{ 1395{
1396 return sizeof(struct mld2_grec) + 4*mld_scount(pmc,type,gdel,sdel); 1396 return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1397} 1397}
1398 1398
1399static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1399static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 555a31347eda..305d9ee6d7db 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1450,7 +1450,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
1450 1450
1451static void pndisc_redo(struct sk_buff *skb) 1451static void pndisc_redo(struct sk_buff *skb)
1452{ 1452{
1453 ndisc_rcv(skb); 1453 ndisc_recv_ns(skb);
1454 kfree_skb(skb); 1454 kfree_skb(skb);
1455} 1455}
1456 1456
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index aa11cf366efa..5027bbe6415e 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -238,8 +238,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
238 238
239 pmsg->packet_id = (unsigned long )entry; 239 pmsg->packet_id = (unsigned long )entry;
240 pmsg->data_len = data_len; 240 pmsg->data_len = data_len;
241 pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec; 241 pmsg->timestamp_sec = entry->skb->tstamp.off_sec;
242 pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec; 242 pmsg->timestamp_usec = entry->skb->tstamp.off_usec;
243 pmsg->mark = entry->skb->nfmark; 243 pmsg->mark = entry->skb->nfmark;
244 pmsg->hook = entry->info->hook; 244 pmsg->hook = entry->info->hook;
245 pmsg->hw_protocol = entry->skb->protocol; 245 pmsg->hw_protocol = entry->skb->protocol;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 80643e6b346b..d693cb988b78 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -209,9 +209,11 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
209 lock = &tcp_hashinfo.lhash_lock; 209 lock = &tcp_hashinfo.lhash_lock;
210 inet_listen_wlock(&tcp_hashinfo); 210 inet_listen_wlock(&tcp_hashinfo);
211 } else { 211 } else {
212 sk->sk_hashent = inet6_sk_ehashfn(sk, tcp_hashinfo.ehash_size); 212 unsigned int hash;
213 list = &tcp_hashinfo.ehash[sk->sk_hashent].chain; 213 sk->sk_hash = hash = inet6_sk_ehashfn(sk);
214 lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock; 214 hash &= (tcp_hashinfo.ehash_size - 1);
215 list = &tcp_hashinfo.ehash[hash].chain;
216 lock = &tcp_hashinfo.ehash[hash].lock;
215 write_lock(lock); 217 write_lock(lock);
216 } 218 }
217 219
@@ -322,13 +324,13 @@ static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
322 const struct in6_addr *saddr = &np->daddr; 324 const struct in6_addr *saddr = &np->daddr;
323 const int dif = sk->sk_bound_dev_if; 325 const int dif = sk->sk_bound_dev_if;
324 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 326 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
325 const int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport, 327 unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport);
326 tcp_hashinfo.ehash_size); 328 struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash);
327 struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
328 struct sock *sk2; 329 struct sock *sk2;
329 const struct hlist_node *node; 330 const struct hlist_node *node;
330 struct inet_timewait_sock *tw; 331 struct inet_timewait_sock *tw;
331 332
333 prefetch(head->chain.first);
332 write_lock(&head->lock); 334 write_lock(&head->lock);
333 335
334 /* Check TIME-WAIT sockets first. */ 336 /* Check TIME-WAIT sockets first. */
@@ -365,14 +367,14 @@ static int __tcp_v6_check_established(struct sock *sk, const __u16 lport,
365 367
366 /* And established part... */ 368 /* And established part... */
367 sk_for_each(sk2, node, &head->chain) { 369 sk_for_each(sk2, node, &head->chain) {
368 if (INET6_MATCH(sk2, saddr, daddr, ports, dif)) 370 if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif))
369 goto not_unique; 371 goto not_unique;
370 } 372 }
371 373
372unique: 374unique:
373 BUG_TRAP(sk_unhashed(sk)); 375 BUG_TRAP(sk_unhashed(sk));
374 __sk_add_node(sk, &head->chain); 376 __sk_add_node(sk, &head->chain);
375 sk->sk_hashent = hash; 377 sk->sk_hash = hash;
376 sock_prot_inc_use(sk->sk_prot); 378 sock_prot_inc_use(sk->sk_prot);
377 write_unlock(&head->lock); 379 write_unlock(&head->lock);
378 380
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 6001948600f3..bf9519341fd3 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -99,7 +99,7 @@ static int udp_v6_get_port(struct sock *sk, unsigned short snum)
99 next:; 99 next:;
100 } 100 }
101 result = best; 101 result = best;
102 for(;; result += UDP_HTABLE_SIZE) { 102 for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) {
103 if (result > sysctl_local_port_range[1]) 103 if (result > sysctl_local_port_range[1])
104 result = sysctl_local_port_range[0] 104 result = sysctl_local_port_range[0]
105 + ((result - sysctl_local_port_range[0]) & 105 + ((result - sysctl_local_port_range[0]) &
@@ -107,6 +107,8 @@ static int udp_v6_get_port(struct sock *sk, unsigned short snum)
107 if (!udp_lport_inuse(result)) 107 if (!udp_lport_inuse(result))
108 break; 108 break;
109 } 109 }
110 if (i >= (1 << 16) / UDP_HTABLE_SIZE)
111 goto fail;
110gotit: 112gotit:
111 udp_port_rover = snum = result; 113 udp_port_rover = snum = result;
112 } else { 114 } else {
@@ -852,10 +854,16 @@ do_append_data:
852 else if (!corkreq) 854 else if (!corkreq)
853 err = udp_v6_push_pending_frames(sk, up); 855 err = udp_v6_push_pending_frames(sk, up);
854 856
855 if (dst && connected) 857 if (dst) {
856 ip6_dst_store(sk, dst, 858 if (connected) {
857 ipv6_addr_equal(&fl->fl6_dst, &np->daddr) ? 859 ip6_dst_store(sk, dst,
858 &np->daddr : NULL); 860 ipv6_addr_equal(&fl->fl6_dst, &np->daddr) ?
861 &np->daddr : NULL);
862 } else {
863 dst_release(dst);
864 }
865 }
866
859 if (err > 0) 867 if (err > 0)
860 err = np->recverr ? net_xmit_errno(err) : 0; 868 err = np->recverr ? net_xmit_errno(err) : 0;
861 release_sock(sk); 869 release_sock(sk);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 071cd2cefd8a..953e255d2bc8 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -310,7 +310,7 @@ void irlan_eth_send_gratuitous_arp(struct net_device *dev)
310#ifdef CONFIG_INET 310#ifdef CONFIG_INET
311 IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n"); 311 IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n");
312 rcu_read_lock(); 312 rcu_read_lock();
313 in_dev = __in_dev_get(dev); 313 in_dev = __in_dev_get_rcu(dev);
314 if (in_dev == NULL) 314 if (in_dev == NULL)
315 goto out; 315 goto out;
316 if (in_dev->ifa_list) 316 if (in_dev->ifa_list)
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 4879743b945a..bbf0f69181ba 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -185,7 +185,7 @@ static int pfkey_release(struct socket *sock)
185} 185}
186 186
187static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, 187static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
188 int allocation, struct sock *sk) 188 unsigned int __nocast allocation, struct sock *sk)
189{ 189{
190 int err = -ENOBUFS; 190 int err = -ENOBUFS;
191 191
@@ -217,7 +217,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
217#define BROADCAST_ONE 1 217#define BROADCAST_ONE 1
218#define BROADCAST_REGISTERED 2 218#define BROADCAST_REGISTERED 2
219#define BROADCAST_PROMISC_ONLY 4 219#define BROADCAST_PROMISC_ONLY 4
220static int pfkey_broadcast(struct sk_buff *skb, int allocation, 220static int pfkey_broadcast(struct sk_buff *skb, unsigned int __nocast allocation,
221 int broadcast_flags, struct sock *one_sk) 221 int broadcast_flags, struct sock *one_sk)
222{ 222{
223 struct sock *sk; 223 struct sock *sk;
@@ -1416,7 +1416,8 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
1416 return 0; 1416 return 0;
1417} 1417}
1418 1418
1419static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, int allocation) 1419static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig,
1420 unsigned int __nocast allocation)
1420{ 1421{
1421 struct sk_buff *skb; 1422 struct sk_buff *skb;
1422 struct sadb_msg *hdr; 1423 struct sadb_msg *hdr;
@@ -2153,6 +2154,7 @@ out:
2153 2154
2154static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) 2155static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
2155{ 2156{
2157 unsigned int dir;
2156 int err; 2158 int err;
2157 struct sadb_x_policy *pol; 2159 struct sadb_x_policy *pol;
2158 struct xfrm_policy *xp; 2160 struct xfrm_policy *xp;
@@ -2161,7 +2163,11 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2161 if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL) 2163 if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL)
2162 return -EINVAL; 2164 return -EINVAL;
2163 2165
2164 xp = xfrm_policy_byid(0, pol->sadb_x_policy_id, 2166 dir = xfrm_policy_id2dir(pol->sadb_x_policy_id);
2167 if (dir >= XFRM_POLICY_MAX)
2168 return -EINVAL;
2169
2170 xp = xfrm_policy_byid(dir, pol->sadb_x_policy_id,
2165 hdr->sadb_msg_type == SADB_X_SPDDELETE2); 2171 hdr->sadb_msg_type == SADB_X_SPDDELETE2);
2166 if (xp == NULL) 2172 if (xp == NULL)
2167 return -ENOENT; 2173 return -ENOENT;
@@ -2173,9 +2179,9 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
2173 if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) { 2179 if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) {
2174 c.data.byid = 1; 2180 c.data.byid = 1;
2175 c.event = XFRM_MSG_DELPOLICY; 2181 c.event = XFRM_MSG_DELPOLICY;
2176 km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); 2182 km_policy_notify(xp, dir, &c);
2177 } else { 2183 } else {
2178 err = key_pol_get_resp(sk, xp, hdr, pol->sadb_x_policy_dir-1); 2184 err = key_pol_get_resp(sk, xp, hdr, dir);
2179 } 2185 }
2180 2186
2181 xfrm_pol_put(xp); 2187 xfrm_pol_put(xp);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 49a3900e3d32..34d671974a4d 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -195,7 +195,8 @@ nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys,
195 195
196int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) 196int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo)
197{ 197{
198 int allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; 198 unsigned int __nocast allocation =
199 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
199 int err = 0; 200 int err = 0;
200 201
201 NETLINK_CB(skb).dst_group = group; 202 NETLINK_CB(skb).dst_group = group;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index ff5601ceedcb..efcd10f996ba 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -494,8 +494,8 @@ __build_packet_message(struct nfulnl_instance *inst,
494 if (skb->tstamp.off_sec) { 494 if (skb->tstamp.off_sec) {
495 struct nfulnl_msg_packet_timestamp ts; 495 struct nfulnl_msg_packet_timestamp ts;
496 496
497 ts.sec = cpu_to_be64(skb_tv_base.tv_sec + skb->tstamp.off_sec); 497 ts.sec = cpu_to_be64(skb->tstamp.off_sec);
498 ts.usec = cpu_to_be64(skb_tv_base.tv_usec + skb->tstamp.off_usec); 498 ts.usec = cpu_to_be64(skb->tstamp.off_usec);
499 499
500 NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); 500 NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
501 } 501 }
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index f81fe8c52e99..eaa44c49567b 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -492,8 +492,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
492 if (entry->skb->tstamp.off_sec) { 492 if (entry->skb->tstamp.off_sec) {
493 struct nfqnl_msg_packet_timestamp ts; 493 struct nfqnl_msg_packet_timestamp ts;
494 494
495 ts.sec = cpu_to_be64(skb_tv_base.tv_sec + entry->skb->tstamp.off_sec); 495 ts.sec = cpu_to_be64(entry->skb->tstamp.off_sec);
496 ts.usec = cpu_to_be64(skb_tv_base.tv_usec + entry->skb->tstamp.off_usec); 496 ts.usec = cpu_to_be64(entry->skb->tstamp.off_usec);
497 497
498 NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); 498 NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
499 } 499 }
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 4e66eef9a034..509afddae569 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -58,7 +58,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev)
58 58
59 /* Spoof incoming device */ 59 /* Spoof incoming device */
60 skb->dev = dev; 60 skb->dev = dev;
61 skb->h.raw = skb->data; 61 skb->mac.raw = skb->nh.raw;
62 skb->nh.raw = skb->data; 62 skb->nh.raw = skb->data;
63 skb->pkt_type = PACKET_HOST; 63 skb->pkt_type = PACKET_HOST;
64 64
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6a67a87384cc..499ae3df4a44 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -654,8 +654,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
654 __net_timestamp(skb); 654 __net_timestamp(skb);
655 sock_enable_timestamp(sk); 655 sock_enable_timestamp(sk);
656 } 656 }
657 h->tp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec; 657 h->tp_sec = skb->tstamp.off_sec;
658 h->tp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec; 658 h->tp_usec = skb->tstamp.off_usec;
659 659
660 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h))); 660 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
661 sll->sll_halen = 0; 661 sll->sll_halen = 0;
diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c
index 5cfd4cadee42..86f777052633 100644
--- a/net/rxrpc/call.c
+++ b/net/rxrpc/call.c
@@ -1923,7 +1923,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call,
1923 size_t sioc, 1923 size_t sioc,
1924 struct kvec *siov, 1924 struct kvec *siov,
1925 u8 rxhdr_flags, 1925 u8 rxhdr_flags,
1926 int alloc_flags, 1926 unsigned int __nocast alloc_flags,
1927 int dup_data, 1927 int dup_data,
1928 size_t *size_sent) 1928 size_t *size_sent)
1929{ 1929{
diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c
index 61463c74f8cc..be4b2be58956 100644
--- a/net/rxrpc/connection.c
+++ b/net/rxrpc/connection.c
@@ -522,7 +522,7 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
522 uint8_t type, 522 uint8_t type,
523 int dcount, 523 int dcount,
524 struct kvec diov[], 524 struct kvec diov[],
525 int alloc_flags, 525 unsigned int __nocast alloc_flags,
526 struct rxrpc_message **_msg) 526 struct rxrpc_message **_msg)
527{ 527{
528 struct rxrpc_message *msg; 528 struct rxrpc_message *msg;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 00eae5f9a01a..cf68a59fdc5a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -393,10 +393,10 @@ META_COLLECTOR(int_sk_route_caps)
393 dst->value = skb->sk->sk_route_caps; 393 dst->value = skb->sk->sk_route_caps;
394} 394}
395 395
396META_COLLECTOR(int_sk_hashent) 396META_COLLECTOR(int_sk_hash)
397{ 397{
398 SKIP_NONLOCAL(skb); 398 SKIP_NONLOCAL(skb);
399 dst->value = skb->sk->sk_hashent; 399 dst->value = skb->sk->sk_hash;
400} 400}
401 401
402META_COLLECTOR(int_sk_lingertime) 402META_COLLECTOR(int_sk_lingertime)
@@ -515,7 +515,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
515 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc), 515 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
516 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc), 516 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
517 [META_ID(SK_ROUTE_CAPS)] = META_FUNC(int_sk_route_caps), 517 [META_ID(SK_ROUTE_CAPS)] = META_FUNC(int_sk_route_caps),
518 [META_ID(SK_HASHENT)] = META_FUNC(int_sk_hashent), 518 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
519 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime), 519 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
520 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl), 520 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
521 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl), 521 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e7025be77691..f01d1c9002a1 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -147,7 +147,7 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
147 struct sctp_sockaddr_entry *addr; 147 struct sctp_sockaddr_entry *addr;
148 148
149 rcu_read_lock(); 149 rcu_read_lock();
150 if ((in_dev = __in_dev_get(dev)) == NULL) { 150 if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
151 rcu_read_unlock(); 151 rcu_read_unlock();
152 return; 152 return;
153 } 153 }
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index f3104035e35d..ade730eaf401 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -719,7 +719,7 @@ static void rpc_async_schedule(void *arg)
719void * 719void *
720rpc_malloc(struct rpc_task *task, size_t size) 720rpc_malloc(struct rpc_task *task, size_t size)
721{ 721{
722 int gfp; 722 unsigned int __nocast gfp;
723 723
724 if (task->tk_flags & RPC_TASK_SWAPPER) 724 if (task->tk_flags & RPC_TASK_SWAPPER)
725 gfp = GFP_ATOMIC; 725 gfp = GFP_ATOMIC;
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index c5241fcbb966..55538f6b60ff 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -16,6 +16,8 @@
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/sysctl.h> 17#include <linux/sysctl.h>
18 18
19#include <net/sock.h>
20
19#ifdef CONFIG_INET 21#ifdef CONFIG_INET
20#include <net/ip.h> 22#include <net/ip.h>
21#endif 23#endif
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index fda737d77edc..061b44cc2451 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -163,7 +163,7 @@ static void xfrm_policy_timer(unsigned long data)
163 if (xp->dead) 163 if (xp->dead)
164 goto out; 164 goto out;
165 165
166 dir = xp->index & 7; 166 dir = xfrm_policy_id2dir(xp->index);
167 167
168 if (xp->lft.hard_add_expires_seconds) { 168 if (xp->lft.hard_add_expires_seconds) {
169 long tmo = xp->lft.hard_add_expires_seconds + 169 long tmo = xp->lft.hard_add_expires_seconds +
@@ -225,7 +225,7 @@ expired:
225 * SPD calls. 225 * SPD calls.
226 */ 226 */
227 227
228struct xfrm_policy *xfrm_policy_alloc(int gfp) 228struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp)
229{ 229{
230 struct xfrm_policy *policy; 230 struct xfrm_policy *policy;
231 231
@@ -417,7 +417,7 @@ struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete)
417 struct xfrm_policy *pol, **p; 417 struct xfrm_policy *pol, **p;
418 418
419 write_lock_bh(&xfrm_policy_lock); 419 write_lock_bh(&xfrm_policy_lock);
420 for (p = &xfrm_policy_list[id & 7]; (pol=*p)!=NULL; p = &pol->next) { 420 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) {
421 if (pol->index == id) { 421 if (pol->index == id) {
422 xfrm_pol_hold(pol); 422 xfrm_pol_hold(pol);
423 if (delete) 423 if (delete)