aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@tglx.tec.linutronix.de>2005-05-23 09:11:45 -0400
committerThomas Gleixner <tglx@mtd.linutronix.de>2005-05-23 09:11:45 -0400
commitf08276136bdc8607c1da493279569beb9859b133 (patch)
tree5a4e7ea9300eece5ff5187fa7f64f0f48f37cf12
parent7d27c8143c8234e1cae8285fd2d43c19dad69bde (diff)
parent1263cc67c09bc7f913a6877f3ba0427f0b76617e (diff)
Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
-rw-r--r--Documentation/cpusets.txt3
-rw-r--r--Documentation/x86_64/boot-options.txt3
-rw-r--r--arch/i386/kernel/cpu/amd.c9
-rw-r--r--arch/i386/kernel/cpu/common.c5
-rw-r--r--arch/i386/kernel/smpboot.c1
-rw-r--r--arch/i386/mm/ioremap.c10
-rw-r--r--arch/ppc/kernel/head_44x.S15
-rw-r--r--arch/ppc/kernel/setup.c2
-rw-r--r--arch/ppc/lib/string.S7
-rw-r--r--arch/ppc/mm/init.c1
-rw-r--r--arch/ppc/syslib/mpc83xx_devices.c1
-rw-r--r--arch/ppc/syslib/mpc85xx_devices.c1
-rw-r--r--arch/ppc/syslib/open_pic.c2
-rw-r--r--arch/ppc64/kernel/prom_init.c44
-rw-r--r--arch/sparc64/kernel/pci_iommu.c167
-rw-r--r--arch/sparc64/kernel/sbus.c31
-rw-r--r--arch/um/Kconfig_x86_644
-rw-r--r--arch/um/drivers/chan_kern.c22
-rw-r--r--arch/um/drivers/mcast_kern.c4
-rw-r--r--arch/um/drivers/mcast_user.c47
-rw-r--r--arch/um/drivers/ubd_kern.c296
-rw-r--r--arch/um/include/sysdep-i386/ptrace.h2
-rw-r--r--arch/um/include/sysdep-x86_64/checksum.h26
-rw-r--r--arch/um/include/sysdep-x86_64/ptrace.h62
-rw-r--r--arch/um/kernel/Makefile2
-rw-r--r--arch/um/kernel/checksum.c0
-rw-r--r--arch/um/kernel/initrd.c78
-rw-r--r--arch/um/kernel/ksyms.c1
-rw-r--r--arch/um/kernel/mem.c40
-rw-r--r--arch/um/kernel/ptrace.c6
-rw-r--r--arch/um/kernel/trap_kern.c36
-rw-r--r--arch/um/kernel/tt/ksyms.c1
-rw-r--r--arch/um/kernel/uml.lds.S2
-rw-r--r--arch/um/sys-i386/Makefile4
-rw-r--r--arch/um/sys-i386/delay.c16
-rw-r--r--arch/um/sys-x86_64/Makefile6
-rw-r--r--arch/um/sys-x86_64/delay.c33
-rw-r--r--arch/um/sys-x86_64/ksyms.c3
-rw-r--r--arch/um/sys-x86_64/ptrace.c9
-rw-r--r--arch/um/sys-x86_64/syscalls.c1
-rw-r--r--arch/um/sys-x86_64/user-offsets.c8
-rw-r--r--arch/x86_64/kernel/io_apic.c11
-rw-r--r--arch/x86_64/kernel/ptrace.c4
-rw-r--r--arch/x86_64/kernel/setup.c25
-rw-r--r--arch/x86_64/kernel/signal.c4
-rw-r--r--arch/x86_64/kernel/smpboot.c1
-rw-r--r--arch/x86_64/mm/ioremap.c29
-rw-r--r--drivers/block/pktcdvd.c8
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c20
-rw-r--r--drivers/i2c/busses/i2c-keywest.c5
-rw-r--r--drivers/mmc/mmc_block.c5
-rw-r--r--drivers/net/tg3.c480
-rw-r--r--drivers/net/tg3.h8
-rw-r--r--drivers/sbus/char/aurora.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c52
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c1400
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h169
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c11
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c13
-rw-r--r--drivers/scsi/aic7xxx/aiclib.c1
-rw-r--r--drivers/scsi/scsi_transport_spi.c188
-rw-r--r--drivers/serial/8250.c17
-rw-r--r--drivers/serial/sunsab.c109
-rw-r--r--drivers/serial/sunsab.h1
-rw-r--r--fs/namei.c1
-rw-r--r--fs/reiserfs/stree.c1
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--include/asm-ia64/ioctl32.h0
-rw-r--r--include/asm-um/arch-signal-i386.h0
-rw-r--r--include/asm-um/elf-i386.h2
-rw-r--r--include/asm-um/elf-x86_64.h24
-rw-r--r--include/asm-x86_64/ioctl32.h0
-rw-r--r--include/linux/err.h4
-rw-r--r--include/linux/mmc/protocol.h27
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/net/act_generic.h4
-rw-r--r--include/scsi/scsi_transport_spi.h6
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/spinlock.c8
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/mmap.c59
-rw-r--r--mm/vmalloc.c33
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c28
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/netlink/af_netlink.c13
-rw-r--r--net/unix/af_unix.c28
-rw-r--r--net/xfrm/xfrm_algo.c2
-rw-r--r--net/xfrm/xfrm_user.c15
91 files changed, 1639 insertions, 2236 deletions
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt
index 1ad26d2c20ae..2f8f24eaefd9 100644
--- a/Documentation/cpusets.txt
+++ b/Documentation/cpusets.txt
@@ -252,8 +252,7 @@ in a tasks processor placement.
252There is an exception to the above. If hotplug funtionality is used 252There is an exception to the above. If hotplug funtionality is used
253to remove all the CPUs that are currently assigned to a cpuset, 253to remove all the CPUs that are currently assigned to a cpuset,
254then the kernel will automatically update the cpus_allowed of all 254then the kernel will automatically update the cpus_allowed of all
255tasks attached to CPUs in that cpuset with the online CPUs of the 255tasks attached to CPUs in that cpuset to allow all CPUs. When memory
256nearest parent cpuset that still has some CPUs online. When memory
257hotplug functionality for removing Memory Nodes is available, a 256hotplug functionality for removing Memory Nodes is available, a
258similar exception is expected to apply there as well. In general, 257similar exception is expected to apply there as well. In general,
259the kernel prefers to violate cpuset placement, over starving a task 258the kernel prefers to violate cpuset placement, over starving a task
diff --git a/Documentation/x86_64/boot-options.txt b/Documentation/x86_64/boot-options.txt
index 44b6eea60ece..b9e6be00cadf 100644
--- a/Documentation/x86_64/boot-options.txt
+++ b/Documentation/x86_64/boot-options.txt
@@ -25,6 +25,9 @@ APICs
25 25
26 noapictimer Don't set up the APIC timer 26 noapictimer Don't set up the APIC timer
27 27
28 no_timer_check Don't check the IO-APIC timer. This can work around
29 problems with incorrect timer initialization on some boards.
30
28Early Console 31Early Console
29 32
30 syntax: earlyprintk=vga 33 syntax: earlyprintk=vga
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index fa34a06c0d79..73aeaf5a9d4e 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -195,7 +195,7 @@ static void __init init_amd(struct cpuinfo_x86 *c)
195 c->x86_num_cores = 1; 195 c->x86_num_cores = 1;
196 } 196 }
197 197
198#ifdef CONFIG_X86_SMP 198#ifdef CONFIG_X86_HT
199 /* 199 /*
200 * On a AMD dual core setup the lower bits of the APIC id 200 * On a AMD dual core setup the lower bits of the APIC id
201 * distingush the cores. Assumes number of cores is a power 201 * distingush the cores. Assumes number of cores is a power
@@ -203,8 +203,11 @@ static void __init init_amd(struct cpuinfo_x86 *c)
203 */ 203 */
204 if (c->x86_num_cores > 1) { 204 if (c->x86_num_cores > 1) {
205 int cpu = smp_processor_id(); 205 int cpu = smp_processor_id();
206 /* Fix up the APIC ID following AMD specifications. */ 206 unsigned bits = 0;
207 cpu_core_id[cpu] >>= hweight32(c->x86_num_cores - 1); 207 while ((1 << bits) < c->x86_num_cores)
208 bits++;
209 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
210 phys_proc_id[cpu] >>= bits;
208 printk(KERN_INFO "CPU %d(%d) -> Core %d\n", 211 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
209 cpu, c->x86_num_cores, cpu_core_id[cpu]); 212 cpu, c->x86_num_cores, cpu_core_id[cpu]);
210 } 213 }
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 11e6e6f23fa0..d199e525680a 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -244,11 +244,8 @@ static void __init early_cpu_detect(void)
244 244
245 early_intel_workaround(c); 245 early_intel_workaround(c);
246 246
247#ifdef CONFIG_SMP
248#ifdef CONFIG_X86_HT 247#ifdef CONFIG_X86_HT
249 phys_proc_id[smp_processor_id()] = 248 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
250#endif
251 cpu_core_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
252#endif 249#endif
253} 250}
254 251
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index cbea7ac582e5..35bfe138cb1a 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -888,6 +888,7 @@ void *xquad_portio;
888 888
889cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; 889cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
890cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 890cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
891EXPORT_SYMBOL(cpu_core_map);
891 892
892static void __init smp_boot_cpus(unsigned int max_cpus) 893static void __init smp_boot_cpus(unsigned int max_cpus)
893{ 894{
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index db06f7399913..ab542792b27b 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -238,19 +238,21 @@ void iounmap(volatile void __iomem *addr)
238 addr < phys_to_virt(ISA_END_ADDRESS)) 238 addr < phys_to_virt(ISA_END_ADDRESS))
239 return; 239 return;
240 240
241 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); 241 write_lock(&vmlist_lock);
242 p = __remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
242 if (!p) { 243 if (!p) {
243 printk("__iounmap: bad address %p\n", addr); 244 printk("iounmap: bad address %p\n", addr);
244 return; 245 goto out_unlock;
245 } 246 }
246 247
247 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) { 248 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
248 /* p->size includes the guard page, but cpa doesn't like that */
249 change_page_attr(virt_to_page(__va(p->phys_addr)), 249 change_page_attr(virt_to_page(__va(p->phys_addr)),
250 p->size >> PAGE_SHIFT, 250 p->size >> PAGE_SHIFT,
251 PAGE_KERNEL); 251 PAGE_KERNEL);
252 global_flush_tlb(); 252 global_flush_tlb();
253 } 253 }
254out_unlock:
255 write_unlock(&vmlist_lock);
254 kfree(p); 256 kfree(p);
255} 257}
256 258
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
index 9b6a8e513657..6c7ae6052464 100644
--- a/arch/ppc/kernel/head_44x.S
+++ b/arch/ppc/kernel/head_44x.S
@@ -330,8 +330,9 @@ interrupt_base:
330 /* If we are faulting a kernel address, we have to use the 330 /* If we are faulting a kernel address, we have to use the
331 * kernel page tables. 331 * kernel page tables.
332 */ 332 */
333 andis. r11, r10, 0x8000 333 lis r11, TASK_SIZE@h
334 beq 3f 334 cmplw r10, r11
335 blt+ 3f
335 lis r11, swapper_pg_dir@h 336 lis r11, swapper_pg_dir@h
336 ori r11, r11, swapper_pg_dir@l 337 ori r11, r11, swapper_pg_dir@l
337 338
@@ -464,8 +465,9 @@ interrupt_base:
464 /* If we are faulting a kernel address, we have to use the 465 /* If we are faulting a kernel address, we have to use the
465 * kernel page tables. 466 * kernel page tables.
466 */ 467 */
467 andis. r11, r10, 0x8000 468 lis r11, TASK_SIZE@h
468 beq 3f 469 cmplw r10, r11
470 blt+ 3f
469 lis r11, swapper_pg_dir@h 471 lis r11, swapper_pg_dir@h
470 ori r11, r11, swapper_pg_dir@l 472 ori r11, r11, swapper_pg_dir@l
471 473
@@ -533,8 +535,9 @@ interrupt_base:
533 /* If we are faulting a kernel address, we have to use the 535 /* If we are faulting a kernel address, we have to use the
534 * kernel page tables. 536 * kernel page tables.
535 */ 537 */
536 andis. r11, r10, 0x8000 538 lis r11, TASK_SIZE@h
537 beq 3f 539 cmplw r10, r11
540 blt+ 3f
538 lis r11, swapper_pg_dir@h 541 lis r11, swapper_pg_dir@h
539 ori r11, r11, swapper_pg_dir@l 542 ori r11, r11, swapper_pg_dir@l
540 543
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 309797d7f96d..5c20266e3b1f 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -499,7 +499,7 @@ static int __init set_preferred_console(void)
499{ 499{
500 struct device_node *prom_stdout; 500 struct device_node *prom_stdout;
501 char *name; 501 char *name;
502 int offset; 502 int offset = 0;
503 503
504 if (of_stdout_device == NULL) 504 if (of_stdout_device == NULL)
505 return -ENODEV; 505 return -ENODEV;
diff --git a/arch/ppc/lib/string.S b/arch/ppc/lib/string.S
index 8d08a2eb225e..36c9b97fd92a 100644
--- a/arch/ppc/lib/string.S
+++ b/arch/ppc/lib/string.S
@@ -446,6 +446,7 @@ _GLOBAL(__copy_tofrom_user)
446#ifdef CONFIG_8xx 446#ifdef CONFIG_8xx
447 /* Don't use prefetch on 8xx */ 447 /* Don't use prefetch on 8xx */
448 mtctr r0 448 mtctr r0
449 li r0,0
44953: COPY_16_BYTES_WITHEX(0) 45053: COPY_16_BYTES_WITHEX(0)
450 bdnz 53b 451 bdnz 53b
451 452
@@ -564,7 +565,9 @@ _GLOBAL(__copy_tofrom_user)
564/* or write fault in cacheline loop */ 565/* or write fault in cacheline loop */
565105: li r9,1 566105: li r9,1
56692: li r3,LG_CACHELINE_BYTES 56792: li r3,LG_CACHELINE_BYTES
567 b 99f 568 mfctr r8
569 add r0,r0,r8
570 b 106f
568/* read fault in final word loop */ 571/* read fault in final word loop */
569108: li r9,0 572108: li r9,0
570 b 93f 573 b 93f
@@ -585,7 +588,7 @@ _GLOBAL(__copy_tofrom_user)
585 * r5 + (ctr << r3), and r9 is 0 for read or 1 for write. 588 * r5 + (ctr << r3), and r9 is 0 for read or 1 for write.
586 */ 589 */
58799: mfctr r0 59099: mfctr r0
588 slw r3,r0,r3 591106: slw r3,r0,r3
589 add. r3,r3,r5 592 add. r3,r3,r5
590 beq 120f /* shouldn't happen */ 593 beq 120f /* shouldn't happen */
591 cmpwi 0,r9,0 594 cmpwi 0,r9,0
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index be02a7fec2b7..363c157e3617 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -179,6 +179,7 @@ void free_initmem(void)
179 if (!have_of) 179 if (!have_of)
180 FREESEC(openfirmware); 180 FREESEC(openfirmware);
181 printk("\n"); 181 printk("\n");
182 ppc_md.progress = NULL;
182#undef FREESEC 183#undef FREESEC
183} 184}
184 185
diff --git a/arch/ppc/syslib/mpc83xx_devices.c b/arch/ppc/syslib/mpc83xx_devices.c
index 5c1a919eaabf..75c8e9834ae7 100644
--- a/arch/ppc/syslib/mpc83xx_devices.c
+++ b/arch/ppc/syslib/mpc83xx_devices.c
@@ -61,6 +61,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
61 .iotype = UPIO_MEM, 61 .iotype = UPIO_MEM,
62 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, 62 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
63 }, 63 },
64 { },
64}; 65};
65 66
66struct platform_device ppc_sys_platform_devices[] = { 67struct platform_device ppc_sys_platform_devices[] = {
diff --git a/arch/ppc/syslib/mpc85xx_devices.c b/arch/ppc/syslib/mpc85xx_devices.c
index a231795ee26f..1e658ef57e75 100644
--- a/arch/ppc/syslib/mpc85xx_devices.c
+++ b/arch/ppc/syslib/mpc85xx_devices.c
@@ -61,6 +61,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
61 .iotype = UPIO_MEM, 61 .iotype = UPIO_MEM,
62 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ, 62 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ,
63 }, 63 },
64 { },
64}; 65};
65 66
66struct platform_device ppc_sys_platform_devices[] = { 67struct platform_device ppc_sys_platform_devices[] = {
diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c
index 7619e16fccae..9d4ed68b5804 100644
--- a/arch/ppc/syslib/open_pic.c
+++ b/arch/ppc/syslib/open_pic.c
@@ -557,12 +557,10 @@ static void __init openpic_initipi(u_int ipi, u_int pri, u_int vec)
557 */ 557 */
558void openpic_cause_IPI(u_int ipi, cpumask_t cpumask) 558void openpic_cause_IPI(u_int ipi, cpumask_t cpumask)
559{ 559{
560 cpumask_t phys;
561 DECL_THIS_CPU; 560 DECL_THIS_CPU;
562 561
563 CHECK_THIS_CPU; 562 CHECK_THIS_CPU;
564 check_arg_ipi(ipi); 563 check_arg_ipi(ipi);
565 phys = physmask(cpumask);
566 openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi), 564 openpic_write(&OpenPIC->THIS_CPU.IPI_Dispatch(ipi),
567 cpus_addr(physmask(cpumask))[0]); 565 cpus_addr(physmask(cpumask))[0]);
568} 566}
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index 35ec42de962e..6f79b7b9b445 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -1750,7 +1750,44 @@ static void __init flatten_device_tree(void)
1750 prom_printf("Device tree struct 0x%x -> 0x%x\n", 1750 prom_printf("Device tree struct 0x%x -> 0x%x\n",
1751 RELOC(dt_struct_start), RELOC(dt_struct_end)); 1751 RELOC(dt_struct_start), RELOC(dt_struct_end));
1752 1752
1753 } 1753}
1754
1755
1756static void __init fixup_device_tree(void)
1757{
1758 unsigned long offset = reloc_offset();
1759 phandle u3, i2c, mpic;
1760 u32 u3_rev;
1761 u32 interrupts[2];
1762 u32 parent;
1763
1764 /* Some G5s have a missing interrupt definition, fix it up here */
1765 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
1766 if ((long)u3 <= 0)
1767 return;
1768 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
1769 if ((long)i2c <= 0)
1770 return;
1771 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
1772 if ((long)mpic <= 0)
1773 return;
1774
1775 /* check if proper rev of u3 */
1776 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) <= 0)
1777 return;
1778 if (u3_rev != 0x35)
1779 return;
1780 /* does it need fixup ? */
1781 if (prom_getproplen(i2c, "interrupts") > 0)
1782 return;
1783 /* interrupt on this revision of u3 is number 0 and level */
1784 interrupts[0] = 0;
1785 interrupts[1] = 1;
1786 prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
1787 parent = (u32)mpic;
1788 prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
1789}
1790
1754 1791
1755static void __init prom_find_boot_cpu(void) 1792static void __init prom_find_boot_cpu(void)
1756{ 1793{
@@ -1920,6 +1957,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long
1920 } 1957 }
1921 1958
1922 /* 1959 /*
1960 * Fixup any known bugs in the device-tree
1961 */
1962 fixup_device_tree();
1963
1964 /*
1923 * Now finally create the flattened device-tree 1965 * Now finally create the flattened device-tree
1924 */ 1966 */
1925 prom_printf("copying OF device tree ...\n"); 1967 prom_printf("copying OF device tree ...\n");
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 292983413ae2..33ca56c90da2 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -8,6 +8,7 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/delay.h>
11 12
12#include <asm/pbm.h> 13#include <asm/pbm.h>
13 14
@@ -379,6 +380,56 @@ bad:
379 return PCI_DMA_ERROR_CODE; 380 return PCI_DMA_ERROR_CODE;
380} 381}
381 382
383static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages)
384{
385 int limit;
386
387 PCI_STC_FLUSHFLAG_INIT(strbuf);
388 if (strbuf->strbuf_ctxflush &&
389 iommu->iommu_ctxflush) {
390 unsigned long matchreg, flushreg;
391
392 flushreg = strbuf->strbuf_ctxflush;
393 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
394
395 limit = 100000;
396 pci_iommu_write(flushreg, ctx);
397 for(;;) {
398 if (((long)pci_iommu_read(matchreg)) >= 0L)
399 break;
400 limit--;
401 if (!limit)
402 break;
403 udelay(1);
404 }
405 if (!limit)
406 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
407 "timeout vaddr[%08x] ctx[%lx]\n",
408 vaddr, ctx);
409 } else {
410 unsigned long i;
411
412 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
413 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
414 }
415
416 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
417 (void) pci_iommu_read(iommu->write_complete_reg);
418
419 limit = 100000;
420 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
421 limit--;
422 if (!limit)
423 break;
424 udelay(1);
425 membar("#LoadLoad");
426 }
427 if (!limit)
428 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
429 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
430 vaddr, ctx, npages);
431}
432
382/* Unmap a single streaming mode DMA translation. */ 433/* Unmap a single streaming mode DMA translation. */
383void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 434void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
384{ 435{
@@ -386,7 +437,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
386 struct pci_iommu *iommu; 437 struct pci_iommu *iommu;
387 struct pci_strbuf *strbuf; 438 struct pci_strbuf *strbuf;
388 iopte_t *base; 439 iopte_t *base;
389 unsigned long flags, npages, i, ctx; 440 unsigned long flags, npages, ctx;
390 441
391 if (direction == PCI_DMA_NONE) 442 if (direction == PCI_DMA_NONE)
392 BUG(); 443 BUG();
@@ -414,29 +465,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
414 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; 465 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
415 466
416 /* Step 1: Kick data out of streaming buffers if necessary. */ 467 /* Step 1: Kick data out of streaming buffers if necessary. */
417 if (strbuf->strbuf_enabled) { 468 if (strbuf->strbuf_enabled)
418 u32 vaddr = bus_addr; 469 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
419
420 PCI_STC_FLUSHFLAG_INIT(strbuf);
421 if (strbuf->strbuf_ctxflush &&
422 iommu->iommu_ctxflush) {
423 unsigned long matchreg, flushreg;
424
425 flushreg = strbuf->strbuf_ctxflush;
426 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
427 do {
428 pci_iommu_write(flushreg, ctx);
429 } while(((long)pci_iommu_read(matchreg)) < 0L);
430 } else {
431 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
432 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
433 }
434
435 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
436 (void) pci_iommu_read(iommu->write_complete_reg);
437 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
438 membar("#LoadLoad");
439 }
440 470
441 /* Step 2: Clear out first TSB entry. */ 471 /* Step 2: Clear out first TSB entry. */
442 iopte_make_dummy(iommu, base); 472 iopte_make_dummy(iommu, base);
@@ -647,29 +677,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
647 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; 677 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
648 678
649 /* Step 1: Kick data out of streaming buffers if necessary. */ 679 /* Step 1: Kick data out of streaming buffers if necessary. */
650 if (strbuf->strbuf_enabled) { 680 if (strbuf->strbuf_enabled)
651 u32 vaddr = (u32) bus_addr; 681 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
652
653 PCI_STC_FLUSHFLAG_INIT(strbuf);
654 if (strbuf->strbuf_ctxflush &&
655 iommu->iommu_ctxflush) {
656 unsigned long matchreg, flushreg;
657
658 flushreg = strbuf->strbuf_ctxflush;
659 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
660 do {
661 pci_iommu_write(flushreg, ctx);
662 } while(((long)pci_iommu_read(matchreg)) < 0L);
663 } else {
664 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
665 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
666 }
667
668 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
669 (void) pci_iommu_read(iommu->write_complete_reg);
670 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
671 membar("#LoadLoad");
672 }
673 682
674 /* Step 2: Clear out first TSB entry. */ 683 /* Step 2: Clear out first TSB entry. */
675 iopte_make_dummy(iommu, base); 684 iopte_make_dummy(iommu, base);
@@ -715,28 +724,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
715 } 724 }
716 725
717 /* Step 2: Kick data out of streaming buffers. */ 726 /* Step 2: Kick data out of streaming buffers. */
718 PCI_STC_FLUSHFLAG_INIT(strbuf); 727 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
719 if (iommu->iommu_ctxflush &&
720 strbuf->strbuf_ctxflush) {
721 unsigned long matchreg, flushreg;
722
723 flushreg = strbuf->strbuf_ctxflush;
724 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
725 do {
726 pci_iommu_write(flushreg, ctx);
727 } while(((long)pci_iommu_read(matchreg)) < 0L);
728 } else {
729 unsigned long i;
730
731 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
732 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
733 }
734
735 /* Step 3: Perform flush synchronization sequence. */
736 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
737 (void) pci_iommu_read(iommu->write_complete_reg);
738 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
739 membar("#LoadLoad");
740 728
741 spin_unlock_irqrestore(&iommu->lock, flags); 729 spin_unlock_irqrestore(&iommu->lock, flags);
742} 730}
@@ -749,7 +737,8 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
749 struct pcidev_cookie *pcp; 737 struct pcidev_cookie *pcp;
750 struct pci_iommu *iommu; 738 struct pci_iommu *iommu;
751 struct pci_strbuf *strbuf; 739 struct pci_strbuf *strbuf;
752 unsigned long flags, ctx; 740 unsigned long flags, ctx, npages, i;
741 u32 bus_addr;
753 742
754 pcp = pdev->sysdata; 743 pcp = pdev->sysdata;
755 iommu = pcp->pbm->iommu; 744 iommu = pcp->pbm->iommu;
@@ -772,36 +761,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
772 } 761 }
773 762
774 /* Step 2: Kick data out of streaming buffers. */ 763 /* Step 2: Kick data out of streaming buffers. */
775 PCI_STC_FLUSHFLAG_INIT(strbuf); 764 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
776 if (iommu->iommu_ctxflush && 765 for(i = 1; i < nelems; i++)
777 strbuf->strbuf_ctxflush) { 766 if (!sglist[i].dma_length)
778 unsigned long matchreg, flushreg; 767 break;
779 768 i--;
780 flushreg = strbuf->strbuf_ctxflush; 769 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
781 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); 770 - bus_addr) >> IO_PAGE_SHIFT;
782 do { 771 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
783 pci_iommu_write(flushreg, ctx);
784 } while (((long)pci_iommu_read(matchreg)) < 0L);
785 } else {
786 unsigned long i, npages;
787 u32 bus_addr;
788
789 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
790
791 for(i = 1; i < nelems; i++)
792 if (!sglist[i].dma_length)
793 break;
794 i--;
795 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
796 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
797 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
798 }
799
800 /* Step 3: Perform flush synchronization sequence. */
801 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
802 (void) pci_iommu_read(iommu->write_complete_reg);
803 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
804 membar("#LoadLoad");
805 772
806 spin_unlock_irqrestore(&iommu->lock, flags); 773 spin_unlock_irqrestore(&iommu->lock, flags);
807} 774}
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 14d9c3a21b9a..76ea6455433f 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -117,19 +117,34 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages
117 117
118#define STRBUF_TAG_VALID 0x02UL 118#define STRBUF_TAG_VALID 0x02UL
119 119
120static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) 120static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
121{ 121{
122 unsigned long n;
123 int limit;
124
122 iommu->strbuf_flushflag = 0UL; 125 iommu->strbuf_flushflag = 0UL;
123 while (npages--) 126 n = npages;
124 upa_writeq(base + (npages << IO_PAGE_SHIFT), 127 while (n--)
128 upa_writeq(base + (n << IO_PAGE_SHIFT),
125 iommu->strbuf_regs + STRBUF_PFLUSH); 129 iommu->strbuf_regs + STRBUF_PFLUSH);
126 130
127 /* Whoopee cushion! */ 131 /* Whoopee cushion! */
128 upa_writeq(__pa(&iommu->strbuf_flushflag), 132 upa_writeq(__pa(&iommu->strbuf_flushflag),
129 iommu->strbuf_regs + STRBUF_FSYNC); 133 iommu->strbuf_regs + STRBUF_FSYNC);
130 upa_readq(iommu->sbus_control_reg); 134 upa_readq(iommu->sbus_control_reg);
131 while (iommu->strbuf_flushflag == 0UL) 135
136 limit = 100000;
137 while (iommu->strbuf_flushflag == 0UL) {
138 limit--;
139 if (!limit)
140 break;
141 udelay(1);
132 membar("#LoadLoad"); 142 membar("#LoadLoad");
143 }
144 if (!limit)
145 printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
146 "vaddr[%08x] npages[%ld]\n",
147 base, npages);
133} 148}
134 149
135static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages) 150static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
@@ -406,7 +421,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size,
406 421
407 spin_lock_irqsave(&iommu->lock, flags); 422 spin_lock_irqsave(&iommu->lock, flags);
408 free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); 423 free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
409 strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT); 424 sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
410 spin_unlock_irqrestore(&iommu->lock, flags); 425 spin_unlock_irqrestore(&iommu->lock, flags);
411} 426}
412 427
@@ -569,7 +584,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int
569 iommu = sdev->bus->iommu; 584 iommu = sdev->bus->iommu;
570 spin_lock_irqsave(&iommu->lock, flags); 585 spin_lock_irqsave(&iommu->lock, flags);
571 free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); 586 free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
572 strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT); 587 sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
573 spin_unlock_irqrestore(&iommu->lock, flags); 588 spin_unlock_irqrestore(&iommu->lock, flags);
574} 589}
575 590
@@ -581,7 +596,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t
581 size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); 596 size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
582 597
583 spin_lock_irqsave(&iommu->lock, flags); 598 spin_lock_irqsave(&iommu->lock, flags);
584 strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT); 599 sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
585 spin_unlock_irqrestore(&iommu->lock, flags); 600 spin_unlock_irqrestore(&iommu->lock, flags);
586} 601}
587 602
@@ -605,7 +620,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int
605 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; 620 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
606 621
607 spin_lock_irqsave(&iommu->lock, flags); 622 spin_lock_irqsave(&iommu->lock, flags);
608 strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT); 623 sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
609 spin_unlock_irqrestore(&iommu->lock, flags); 624 spin_unlock_irqrestore(&iommu->lock, flags);
610} 625}
611 626
diff --git a/arch/um/Kconfig_x86_64 b/arch/um/Kconfig_x86_64
index fd8d7e8982b1..f162f50f0b17 100644
--- a/arch/um/Kconfig_x86_64
+++ b/arch/um/Kconfig_x86_64
@@ -6,6 +6,10 @@ config 64BIT
6 bool 6 bool
7 default y 7 default y
8 8
9config TOP_ADDR
10 hex
11 default 0x80000000
12
9config 3_LEVEL_PGTABLES 13config 3_LEVEL_PGTABLES
10 bool 14 bool
11 default y 15 default y
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 0150038af795..14a12d6b3df6 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -20,9 +20,17 @@
20#include "os.h" 20#include "os.h"
21 21
22#ifdef CONFIG_NOCONFIG_CHAN 22#ifdef CONFIG_NOCONFIG_CHAN
23
24/* The printk's here are wrong because we are complaining that there is no
25 * output device, but printk is printing to that output device. The user will
26 * never see the error. printf would be better, except it can't run on a
27 * kernel stack because it will overflow it.
28 * Use printk for now since that will avoid crashing.
29 */
30
23static void *not_configged_init(char *str, int device, struct chan_opts *opts) 31static void *not_configged_init(char *str, int device, struct chan_opts *opts)
24{ 32{
25 printf(KERN_ERR "Using a channel type which is configured out of " 33 printk(KERN_ERR "Using a channel type which is configured out of "
26 "UML\n"); 34 "UML\n");
27 return(NULL); 35 return(NULL);
28} 36}
@@ -30,27 +38,27 @@ static void *not_configged_init(char *str, int device, struct chan_opts *opts)
30static int not_configged_open(int input, int output, int primary, void *data, 38static int not_configged_open(int input, int output, int primary, void *data,
31 char **dev_out) 39 char **dev_out)
32{ 40{
33 printf(KERN_ERR "Using a channel type which is configured out of " 41 printk(KERN_ERR "Using a channel type which is configured out of "
34 "UML\n"); 42 "UML\n");
35 return(-ENODEV); 43 return(-ENODEV);
36} 44}
37 45
38static void not_configged_close(int fd, void *data) 46static void not_configged_close(int fd, void *data)
39{ 47{
40 printf(KERN_ERR "Using a channel type which is configured out of " 48 printk(KERN_ERR "Using a channel type which is configured out of "
41 "UML\n"); 49 "UML\n");
42} 50}
43 51
44static int not_configged_read(int fd, char *c_out, void *data) 52static int not_configged_read(int fd, char *c_out, void *data)
45{ 53{
46 printf(KERN_ERR "Using a channel type which is configured out of " 54 printk(KERN_ERR "Using a channel type which is configured out of "
47 "UML\n"); 55 "UML\n");
48 return(-EIO); 56 return(-EIO);
49} 57}
50 58
51static int not_configged_write(int fd, const char *buf, int len, void *data) 59static int not_configged_write(int fd, const char *buf, int len, void *data)
52{ 60{
53 printf(KERN_ERR "Using a channel type which is configured out of " 61 printk(KERN_ERR "Using a channel type which is configured out of "
54 "UML\n"); 62 "UML\n");
55 return(-EIO); 63 return(-EIO);
56} 64}
@@ -58,7 +66,7 @@ static int not_configged_write(int fd, const char *buf, int len, void *data)
58static int not_configged_console_write(int fd, const char *buf, int len, 66static int not_configged_console_write(int fd, const char *buf, int len,
59 void *data) 67 void *data)
60{ 68{
61 printf(KERN_ERR "Using a channel type which is configured out of " 69 printk(KERN_ERR "Using a channel type which is configured out of "
62 "UML\n"); 70 "UML\n");
63 return(-EIO); 71 return(-EIO);
64} 72}
@@ -66,7 +74,7 @@ static int not_configged_console_write(int fd, const char *buf, int len,
66static int not_configged_window_size(int fd, void *data, unsigned short *rows, 74static int not_configged_window_size(int fd, void *data, unsigned short *rows,
67 unsigned short *cols) 75 unsigned short *cols)
68{ 76{
69 printf(KERN_ERR "Using a channel type which is configured out of " 77 printk(KERN_ERR "Using a channel type which is configured out of "
70 "UML\n"); 78 "UML\n");
71 return(-ENODEV); 79 return(-ENODEV);
72} 80}
diff --git a/arch/um/drivers/mcast_kern.c b/arch/um/drivers/mcast_kern.c
index faf714e87b5b..217438cdef33 100644
--- a/arch/um/drivers/mcast_kern.c
+++ b/arch/um/drivers/mcast_kern.c
@@ -73,7 +73,6 @@ int mcast_setup(char *str, char **mac_out, void *data)
73 struct mcast_init *init = data; 73 struct mcast_init *init = data;
74 char *port_str = NULL, *ttl_str = NULL, *remain; 74 char *port_str = NULL, *ttl_str = NULL, *remain;
75 char *last; 75 char *last;
76 int n;
77 76
78 *init = ((struct mcast_init) 77 *init = ((struct mcast_init)
79 { .addr = "239.192.168.1", 78 { .addr = "239.192.168.1",
@@ -89,13 +88,12 @@ int mcast_setup(char *str, char **mac_out, void *data)
89 } 88 }
90 89
91 if(port_str != NULL){ 90 if(port_str != NULL){
92 n = simple_strtoul(port_str, &last, 10); 91 init->port = simple_strtoul(port_str, &last, 10);
93 if((*last != '\0') || (last == port_str)){ 92 if((*last != '\0') || (last == port_str)){
94 printk(KERN_ERR "mcast_setup - Bad port : '%s'\n", 93 printk(KERN_ERR "mcast_setup - Bad port : '%s'\n",
95 port_str); 94 port_str);
96 return(0); 95 return(0);
97 } 96 }
98 init->port = htons(n);
99 } 97 }
100 98
101 if(ttl_str != NULL){ 99 if(ttl_str != NULL){
diff --git a/arch/um/drivers/mcast_user.c b/arch/um/drivers/mcast_user.c
index 0fe1d9fa9139..7a0d115b29d0 100644
--- a/arch/um/drivers/mcast_user.c
+++ b/arch/um/drivers/mcast_user.c
@@ -38,7 +38,7 @@ static struct sockaddr_in *new_addr(char *addr, unsigned short port)
38 } 38 }
39 sin->sin_family = AF_INET; 39 sin->sin_family = AF_INET;
40 sin->sin_addr.s_addr = in_aton(addr); 40 sin->sin_addr.s_addr = in_aton(addr);
41 sin->sin_port = port; 41 sin->sin_port = htons(port);
42 return(sin); 42 return(sin);
43} 43}
44 44
@@ -55,28 +55,25 @@ static int mcast_open(void *data)
55 struct mcast_data *pri = data; 55 struct mcast_data *pri = data;
56 struct sockaddr_in *sin = pri->mcast_addr; 56 struct sockaddr_in *sin = pri->mcast_addr;
57 struct ip_mreq mreq; 57 struct ip_mreq mreq;
58 int fd, yes = 1; 58 int fd = -EINVAL, yes = 1, err = -EINVAL;;
59 59
60 60
61 if ((sin->sin_addr.s_addr == 0) || (sin->sin_port == 0)) { 61 if ((sin->sin_addr.s_addr == 0) || (sin->sin_port == 0))
62 fd = -EINVAL;
63 goto out; 62 goto out;
64 }
65 63
66 fd = socket(AF_INET, SOCK_DGRAM, 0); 64 fd = socket(AF_INET, SOCK_DGRAM, 0);
65
67 if (fd < 0){ 66 if (fd < 0){
68 printk("mcast_open : data socket failed, errno = %d\n", 67 printk("mcast_open : data socket failed, errno = %d\n",
69 errno); 68 errno);
70 fd = -ENOMEM; 69 fd = -errno;
71 goto out; 70 goto out;
72 } 71 }
73 72
74 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) { 73 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) {
75 printk("mcast_open: SO_REUSEADDR failed, errno = %d\n", 74 printk("mcast_open: SO_REUSEADDR failed, errno = %d\n",
76 errno); 75 errno);
77 os_close_file(fd); 76 goto out_close;
78 fd = -EINVAL;
79 goto out;
80 } 77 }
81 78
82 /* set ttl according to config */ 79 /* set ttl according to config */
@@ -84,26 +81,20 @@ static int mcast_open(void *data)
84 sizeof(pri->ttl)) < 0) { 81 sizeof(pri->ttl)) < 0) {
85 printk("mcast_open: IP_MULTICAST_TTL failed, error = %d\n", 82 printk("mcast_open: IP_MULTICAST_TTL failed, error = %d\n",
86 errno); 83 errno);
87 os_close_file(fd); 84 goto out_close;
88 fd = -EINVAL;
89 goto out;
90 } 85 }
91 86
92 /* set LOOP, so data does get fed back to local sockets */ 87 /* set LOOP, so data does get fed back to local sockets */
93 if (setsockopt(fd, SOL_IP, IP_MULTICAST_LOOP, &yes, sizeof(yes)) < 0) { 88 if (setsockopt(fd, SOL_IP, IP_MULTICAST_LOOP, &yes, sizeof(yes)) < 0) {
94 printk("mcast_open: IP_MULTICAST_LOOP failed, error = %d\n", 89 printk("mcast_open: IP_MULTICAST_LOOP failed, error = %d\n",
95 errno); 90 errno);
96 os_close_file(fd); 91 goto out_close;
97 fd = -EINVAL;
98 goto out;
99 } 92 }
100 93
101 /* bind socket to mcast address */ 94 /* bind socket to mcast address */
102 if (bind(fd, (struct sockaddr *) sin, sizeof(*sin)) < 0) { 95 if (bind(fd, (struct sockaddr *) sin, sizeof(*sin)) < 0) {
103 printk("mcast_open : data bind failed, errno = %d\n", errno); 96 printk("mcast_open : data bind failed, errno = %d\n", errno);
104 os_close_file(fd); 97 goto out_close;
105 fd = -EINVAL;
106 goto out;
107 } 98 }
108 99
109 /* subscribe to the multicast group */ 100 /* subscribe to the multicast group */
@@ -117,12 +108,15 @@ static int mcast_open(void *data)
117 "interface on the host.\n"); 108 "interface on the host.\n");
118 printk("eth0 should be configured in order to use the " 109 printk("eth0 should be configured in order to use the "
119 "multicast transport.\n"); 110 "multicast transport.\n");
120 os_close_file(fd); 111 goto out_close;
121 fd = -EINVAL;
122 } 112 }
123 113
124 out: 114 out:
125 return(fd); 115 return fd;
116
117 out_close:
118 os_close_file(fd);
119 return err;
126} 120}
127 121
128static void mcast_close(int fd, void *data) 122static void mcast_close(int fd, void *data)
@@ -164,14 +158,3 @@ struct net_user_info mcast_user_info = {
164 .delete_address = NULL, 158 .delete_address = NULL,
165 .max_packet = MAX_PACKET - ETH_HEADER_OTHER 159 .max_packet = MAX_PACKET - ETH_HEADER_OTHER
166}; 160};
167
168/*
169 * Overrides for Emacs so that we follow Linus's tabbing style.
170 * Emacs will notice this stuff at the end of the file and automatically
171 * adjust the settings for this buffer only. This must remain at the end
172 * of the file.
173 * ---------------------------------------------------------------------------
174 * Local variables:
175 * c-file-style: "linux"
176 * End:
177 */
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 9a56ff94308d..88f956c34fed 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -55,7 +55,7 @@
55#include "mem_kern.h" 55#include "mem_kern.h"
56#include "cow.h" 56#include "cow.h"
57 57
58enum ubd_req { UBD_READ, UBD_WRITE, UBD_MMAP }; 58enum ubd_req { UBD_READ, UBD_WRITE };
59 59
60struct io_thread_req { 60struct io_thread_req {
61 enum ubd_req op; 61 enum ubd_req op;
@@ -68,8 +68,6 @@ struct io_thread_req {
68 unsigned long sector_mask; 68 unsigned long sector_mask;
69 unsigned long long cow_offset; 69 unsigned long long cow_offset;
70 unsigned long bitmap_words[2]; 70 unsigned long bitmap_words[2];
71 int map_fd;
72 unsigned long long map_offset;
73 int error; 71 int error;
74}; 72};
75 73
@@ -122,10 +120,6 @@ static int ubd_ioctl(struct inode * inode, struct file * file,
122 120
123#define MAX_DEV (8) 121#define MAX_DEV (8)
124 122
125/* Changed in early boot */
126static int ubd_do_mmap = 0;
127#define UBD_MMAP_BLOCK_SIZE PAGE_SIZE
128
129static struct block_device_operations ubd_blops = { 123static struct block_device_operations ubd_blops = {
130 .owner = THIS_MODULE, 124 .owner = THIS_MODULE,
131 .open = ubd_open, 125 .open = ubd_open,
@@ -175,12 +169,6 @@ struct ubd {
175 int no_cow; 169 int no_cow;
176 struct cow cow; 170 struct cow cow;
177 struct platform_device pdev; 171 struct platform_device pdev;
178
179 int map_writes;
180 int map_reads;
181 int nomap_writes;
182 int nomap_reads;
183 int write_maps;
184}; 172};
185 173
186#define DEFAULT_COW { \ 174#define DEFAULT_COW { \
@@ -200,11 +188,6 @@ struct ubd {
200 .openflags = OPEN_FLAGS, \ 188 .openflags = OPEN_FLAGS, \
201 .no_cow = 0, \ 189 .no_cow = 0, \
202 .cow = DEFAULT_COW, \ 190 .cow = DEFAULT_COW, \
203 .map_writes = 0, \
204 .map_reads = 0, \
205 .nomap_writes = 0, \
206 .nomap_reads = 0, \
207 .write_maps = 0, \
208} 191}
209 192
210struct ubd ubd_dev[MAX_DEV] = { [ 0 ... MAX_DEV - 1 ] = DEFAULT_UBD }; 193struct ubd ubd_dev[MAX_DEV] = { [ 0 ... MAX_DEV - 1 ] = DEFAULT_UBD };
@@ -314,13 +297,6 @@ static int ubd_setup_common(char *str, int *index_out)
314 int major; 297 int major;
315 298
316 str++; 299 str++;
317 if(!strcmp(str, "mmap")){
318 CHOOSE_MODE(printk("mmap not supported by the ubd "
319 "driver in tt mode\n"),
320 ubd_do_mmap = 1);
321 return(0);
322 }
323
324 if(!strcmp(str, "sync")){ 300 if(!strcmp(str, "sync")){
325 global_openflags = of_sync(global_openflags); 301 global_openflags = of_sync(global_openflags);
326 return(0); 302 return(0);
@@ -524,7 +500,7 @@ static void ubd_handler(void)
524{ 500{
525 struct io_thread_req req; 501 struct io_thread_req req;
526 struct request *rq = elv_next_request(ubd_queue); 502 struct request *rq = elv_next_request(ubd_queue);
527 int n, err; 503 int n;
528 504
529 do_ubd = NULL; 505 do_ubd = NULL;
530 intr_count++; 506 intr_count++;
@@ -538,19 +514,6 @@ static void ubd_handler(void)
538 return; 514 return;
539 } 515 }
540 516
541 if((req.op != UBD_MMAP) &&
542 ((req.offset != ((__u64) (rq->sector)) << 9) ||
543 (req.length != (rq->current_nr_sectors) << 9)))
544 panic("I/O op mismatch");
545
546 if(req.map_fd != -1){
547 err = physmem_subst_mapping(req.buffer, req.map_fd,
548 req.map_offset, 1);
549 if(err)
550 printk("ubd_handler - physmem_subst_mapping failed, "
551 "err = %d\n", -err);
552 }
553
554 ubd_finish(rq, req.error); 517 ubd_finish(rq, req.error);
555 reactivate_fd(thread_fd, UBD_IRQ); 518 reactivate_fd(thread_fd, UBD_IRQ);
556 do_ubd_request(ubd_queue); 519 do_ubd_request(ubd_queue);
@@ -583,14 +546,10 @@ static int ubd_file_size(struct ubd *dev, __u64 *size_out)
583 546
584static void ubd_close(struct ubd *dev) 547static void ubd_close(struct ubd *dev)
585{ 548{
586 if(ubd_do_mmap)
587 physmem_forget_descriptor(dev->fd);
588 os_close_file(dev->fd); 549 os_close_file(dev->fd);
589 if(dev->cow.file == NULL) 550 if(dev->cow.file == NULL)
590 return; 551 return;
591 552
592 if(ubd_do_mmap)
593 physmem_forget_descriptor(dev->cow.fd);
594 os_close_file(dev->cow.fd); 553 os_close_file(dev->cow.fd);
595 vfree(dev->cow.bitmap); 554 vfree(dev->cow.bitmap);
596 dev->cow.bitmap = NULL; 555 dev->cow.bitmap = NULL;
@@ -1010,94 +969,13 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
1010 req->bitmap_words, bitmap_len); 969 req->bitmap_words, bitmap_len);
1011} 970}
1012 971
1013static int mmap_fd(struct request *req, struct ubd *dev, __u64 offset)
1014{
1015 __u64 sector;
1016 unsigned char *bitmap;
1017 int bit, i;
1018
1019 /* mmap must have been requested on the command line */
1020 if(!ubd_do_mmap)
1021 return(-1);
1022
1023 /* The buffer must be page aligned */
1024 if(((unsigned long) req->buffer % UBD_MMAP_BLOCK_SIZE) != 0)
1025 return(-1);
1026
1027 /* The request must be a page long */
1028 if((req->current_nr_sectors << 9) != PAGE_SIZE)
1029 return(-1);
1030
1031 if(dev->cow.file == NULL)
1032 return(dev->fd);
1033
1034 sector = offset >> 9;
1035 bitmap = (unsigned char *) dev->cow.bitmap;
1036 bit = ubd_test_bit(sector, bitmap);
1037
1038 for(i = 1; i < req->current_nr_sectors; i++){
1039 if(ubd_test_bit(sector + i, bitmap) != bit)
1040 return(-1);
1041 }
1042
1043 if(bit || (rq_data_dir(req) == WRITE))
1044 offset += dev->cow.data_offset;
1045
1046 /* The data on disk must be page aligned */
1047 if((offset % UBD_MMAP_BLOCK_SIZE) != 0)
1048 return(-1);
1049
1050 return(bit ? dev->fd : dev->cow.fd);
1051}
1052
1053static int prepare_mmap_request(struct ubd *dev, int fd, __u64 offset,
1054 struct request *req,
1055 struct io_thread_req *io_req)
1056{
1057 int err;
1058
1059 if(rq_data_dir(req) == WRITE){
1060 /* Writes are almost no-ops since the new data is already in the
1061 * host page cache
1062 */
1063 dev->map_writes++;
1064 if(dev->cow.file != NULL)
1065 cowify_bitmap(io_req->offset, io_req->length,
1066 &io_req->sector_mask, &io_req->cow_offset,
1067 dev->cow.bitmap, dev->cow.bitmap_offset,
1068 io_req->bitmap_words,
1069 dev->cow.bitmap_len);
1070 }
1071 else {
1072 int w;
1073
1074 if((dev->cow.file != NULL) && (fd == dev->cow.fd))
1075 w = 0;
1076 else w = dev->openflags.w;
1077
1078 if((dev->cow.file != NULL) && (fd == dev->fd))
1079 offset += dev->cow.data_offset;
1080
1081 err = physmem_subst_mapping(req->buffer, fd, offset, w);
1082 if(err){
1083 printk("physmem_subst_mapping failed, err = %d\n",
1084 -err);
1085 return(1);
1086 }
1087 dev->map_reads++;
1088 }
1089 io_req->op = UBD_MMAP;
1090 io_req->buffer = req->buffer;
1091 return(0);
1092}
1093
1094/* Called with ubd_io_lock held */ 972/* Called with ubd_io_lock held */
1095static int prepare_request(struct request *req, struct io_thread_req *io_req) 973static int prepare_request(struct request *req, struct io_thread_req *io_req)
1096{ 974{
1097 struct gendisk *disk = req->rq_disk; 975 struct gendisk *disk = req->rq_disk;
1098 struct ubd *dev = disk->private_data; 976 struct ubd *dev = disk->private_data;
1099 __u64 offset; 977 __u64 offset;
1100 int len, fd; 978 int len;
1101 979
1102 if(req->rq_status == RQ_INACTIVE) return(1); 980 if(req->rq_status == RQ_INACTIVE) return(1);
1103 981
@@ -1114,34 +992,12 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
1114 992
1115 io_req->fds[0] = (dev->cow.file != NULL) ? dev->cow.fd : dev->fd; 993 io_req->fds[0] = (dev->cow.file != NULL) ? dev->cow.fd : dev->fd;
1116 io_req->fds[1] = dev->fd; 994 io_req->fds[1] = dev->fd;
1117 io_req->map_fd = -1;
1118 io_req->cow_offset = -1; 995 io_req->cow_offset = -1;
1119 io_req->offset = offset; 996 io_req->offset = offset;
1120 io_req->length = len; 997 io_req->length = len;
1121 io_req->error = 0; 998 io_req->error = 0;
1122 io_req->sector_mask = 0; 999 io_req->sector_mask = 0;
1123 1000
1124 fd = mmap_fd(req, dev, io_req->offset);
1125 if(fd > 0){
1126 /* If mmapping is otherwise OK, but the first access to the
1127 * page is a write, then it's not mapped in yet. So we have
1128 * to write the data to disk first, then we can map the disk
1129 * page in and continue normally from there.
1130 */
1131 if((rq_data_dir(req) == WRITE) && !is_remapped(req->buffer)){
1132 io_req->map_fd = dev->fd;
1133 io_req->map_offset = io_req->offset +
1134 dev->cow.data_offset;
1135 dev->write_maps++;
1136 }
1137 else return(prepare_mmap_request(dev, fd, io_req->offset, req,
1138 io_req));
1139 }
1140
1141 if(rq_data_dir(req) == READ)
1142 dev->nomap_reads++;
1143 else dev->nomap_writes++;
1144
1145 io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE; 1001 io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE;
1146 io_req->offsets[0] = 0; 1002 io_req->offsets[0] = 0;
1147 io_req->offsets[1] = dev->cow.data_offset; 1003 io_req->offsets[1] = dev->cow.data_offset;
@@ -1229,143 +1085,6 @@ static int ubd_ioctl(struct inode * inode, struct file * file,
1229 return(-EINVAL); 1085 return(-EINVAL);
1230} 1086}
1231 1087
1232static int ubd_check_remapped(int fd, unsigned long address, int is_write,
1233 __u64 offset)
1234{
1235 __u64 bitmap_offset;
1236 unsigned long new_bitmap[2];
1237 int i, err, n;
1238
1239 /* If it's not a write access, we can't do anything about it */
1240 if(!is_write)
1241 return(0);
1242
1243 /* We have a write */
1244 for(i = 0; i < sizeof(ubd_dev) / sizeof(ubd_dev[0]); i++){
1245 struct ubd *dev = &ubd_dev[i];
1246
1247 if((dev->fd != fd) && (dev->cow.fd != fd))
1248 continue;
1249
1250 /* It's a write to a ubd device */
1251
1252 /* This should be impossible now */
1253 if(!dev->openflags.w){
1254 /* It's a write access on a read-only device - probably
1255 * shouldn't happen. If the kernel is trying to change
1256 * something with no intention of writing it back out,
1257 * then this message will clue us in that this needs
1258 * fixing
1259 */
1260 printk("Write access to mapped page from readonly ubd "
1261 "device %d\n", i);
1262 return(0);
1263 }
1264
1265 /* It's a write to a writeable ubd device - it must be COWed
1266 * because, otherwise, the page would have been mapped in
1267 * writeable
1268 */
1269
1270 if(!dev->cow.file)
1271 panic("Write fault on writeable non-COW ubd device %d",
1272 i);
1273
1274 /* It should also be an access to the backing file since the
1275 * COW pages should be mapped in read-write
1276 */
1277
1278 if(fd == dev->fd)
1279 panic("Write fault on a backing page of ubd "
1280 "device %d\n", i);
1281
1282 /* So, we do the write, copying the backing data to the COW
1283 * file...
1284 */
1285
1286 err = os_seek_file(dev->fd, offset + dev->cow.data_offset);
1287 if(err < 0)
1288 panic("Couldn't seek to %lld in COW file of ubd "
1289 "device %d, err = %d",
1290 offset + dev->cow.data_offset, i, -err);
1291
1292 n = os_write_file(dev->fd, (void *) address, PAGE_SIZE);
1293 if(n != PAGE_SIZE)
1294 panic("Couldn't copy data to COW file of ubd "
1295 "device %d, err = %d", i, -n);
1296
1297 /* ... updating the COW bitmap... */
1298
1299 cowify_bitmap(offset, PAGE_SIZE, NULL, &bitmap_offset,
1300 dev->cow.bitmap, dev->cow.bitmap_offset,
1301 new_bitmap, dev->cow.bitmap_len);
1302
1303 err = os_seek_file(dev->fd, bitmap_offset);
1304 if(err < 0)
1305 panic("Couldn't seek to %lld in COW file of ubd "
1306 "device %d, err = %d", bitmap_offset, i, -err);
1307
1308 n = os_write_file(dev->fd, new_bitmap, sizeof(new_bitmap));
1309 if(n != sizeof(new_bitmap))
1310 panic("Couldn't update bitmap of ubd device %d, "
1311 "err = %d", i, -n);
1312
1313 /* Maybe we can map the COW page in, and maybe we can't. If
1314 * it is a pre-V3 COW file, we can't, since the alignment will
1315 * be wrong. If it is a V3 or later COW file which has been
1316 * moved to a system with a larger page size, then maybe we
1317 * can't, depending on the exact location of the page.
1318 */
1319
1320 offset += dev->cow.data_offset;
1321
1322 /* Remove the remapping, putting the original anonymous page
1323 * back. If the COW file can be mapped in, that is done.
1324 * Otherwise, the COW page is read in.
1325 */
1326
1327 if(!physmem_remove_mapping((void *) address))
1328 panic("Address 0x%lx not remapped by ubd device %d",
1329 address, i);
1330 if((offset % UBD_MMAP_BLOCK_SIZE) == 0)
1331 physmem_subst_mapping((void *) address, dev->fd,
1332 offset, 1);
1333 else {
1334 err = os_seek_file(dev->fd, offset);
1335 if(err < 0)
1336 panic("Couldn't seek to %lld in COW file of "
1337 "ubd device %d, err = %d", offset, i,
1338 -err);
1339
1340 n = os_read_file(dev->fd, (void *) address, PAGE_SIZE);
1341 if(n != PAGE_SIZE)
1342 panic("Failed to read page from offset %llx of "
1343 "COW file of ubd device %d, err = %d",
1344 offset, i, -n);
1345 }
1346
1347 return(1);
1348 }
1349
1350 /* It's not a write on a ubd device */
1351 return(0);
1352}
1353
1354static struct remapper ubd_remapper = {
1355 .list = LIST_HEAD_INIT(ubd_remapper.list),
1356 .proc = ubd_check_remapped,
1357};
1358
1359static int ubd_remapper_setup(void)
1360{
1361 if(ubd_do_mmap)
1362 register_remapper(&ubd_remapper);
1363
1364 return(0);
1365}
1366
1367__initcall(ubd_remapper_setup);
1368
1369static int same_backing_files(char *from_cmdline, char *from_cow, char *cow) 1088static int same_backing_files(char *from_cmdline, char *from_cow, char *cow)
1370{ 1089{
1371 struct uml_stat buf1, buf2; 1090 struct uml_stat buf1, buf2;
@@ -1568,15 +1287,6 @@ void do_io(struct io_thread_req *req)
1568 int err; 1287 int err;
1569 __u64 off; 1288 __u64 off;
1570 1289
1571 if(req->op == UBD_MMAP){
1572 /* Touch the page to force the host to do any necessary IO to
1573 * get it into memory
1574 */
1575 n = *((volatile int *) req->buffer);
1576 req->error = update_bitmap(req);
1577 return;
1578 }
1579
1580 nsectors = req->length / req->sectorsize; 1290 nsectors = req->length / req->sectorsize;
1581 start = 0; 1291 start = 0;
1582 do { 1292 do {
diff --git a/arch/um/include/sysdep-i386/ptrace.h b/arch/um/include/sysdep-i386/ptrace.h
index 84ec7ff5cf8c..6eaeb9919983 100644
--- a/arch/um/include/sysdep-i386/ptrace.h
+++ b/arch/um/include/sysdep-i386/ptrace.h
@@ -31,7 +31,6 @@ extern int sysemu_supported;
31#ifdef UML_CONFIG_MODE_SKAS 31#ifdef UML_CONFIG_MODE_SKAS
32 32
33#include "skas_ptregs.h" 33#include "skas_ptregs.h"
34#include "sysdep/faultinfo.h"
35 34
36#define REGS_IP(r) ((r)[HOST_IP]) 35#define REGS_IP(r) ((r)[HOST_IP])
37#define REGS_SP(r) ((r)[HOST_SP]) 36#define REGS_SP(r) ((r)[HOST_SP])
@@ -59,6 +58,7 @@ extern int sysemu_supported;
59#define PTRACE_SYSEMU_SINGLESTEP 32 58#define PTRACE_SYSEMU_SINGLESTEP 32
60#endif 59#endif
61 60
61#include "sysdep/faultinfo.h"
62#include "choose-mode.h" 62#include "choose-mode.h"
63 63
64union uml_pt_regs { 64union uml_pt_regs {
diff --git a/arch/um/include/sysdep-x86_64/checksum.h b/arch/um/include/sysdep-x86_64/checksum.h
index 572c6c19be33..ea97005af694 100644
--- a/arch/um/include/sysdep-x86_64/checksum.h
+++ b/arch/um/include/sysdep-x86_64/checksum.h
@@ -9,8 +9,6 @@
9#include "linux/in6.h" 9#include "linux/in6.h"
10#include "asm/uaccess.h" 10#include "asm/uaccess.h"
11 11
12extern unsigned int csum_partial_copy_from(const unsigned char *src, unsigned char *dst, int len,
13 int sum, int *err_ptr);
14extern unsigned csum_partial(const unsigned char *buff, unsigned len, 12extern unsigned csum_partial(const unsigned char *buff, unsigned len,
15 unsigned sum); 13 unsigned sum);
16 14
@@ -31,10 +29,15 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *
31} 29}
32 30
33static __inline__ 31static __inline__
34unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, 32unsigned int csum_partial_copy_from_user(const unsigned char *src,
35 int len, int sum, int *err_ptr) 33 unsigned char *dst, int len, int sum,
34 int *err_ptr)
36{ 35{
37 return csum_partial_copy_from(src, dst, len, sum, err_ptr); 36 if(copy_from_user(dst, src, len)){
37 *err_ptr = -EFAULT;
38 return(-1);
39 }
40 return csum_partial(dst, len, sum);
38} 41}
39 42
40/** 43/**
@@ -137,15 +140,6 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b)
137 return a; 140 return a;
138} 141}
139 142
140#endif 143extern unsigned short ip_compute_csum(unsigned char * buff, int len);
141 144
142/* 145#endif
143 * Overrides for Emacs so that we follow Linus's tabbing style.
144 * Emacs will notice this stuff at the end of the file and automatically
145 * adjust the settings for this buffer only. This must remain at the end
146 * of the file.
147 * ---------------------------------------------------------------------------
148 * Local variables:
149 * c-file-style: "linux"
150 * End:
151 */
diff --git a/arch/um/include/sysdep-x86_64/ptrace.h b/arch/um/include/sysdep-x86_64/ptrace.h
index 348e8fcd513f..be8acd5efd97 100644
--- a/arch/um/include/sysdep-x86_64/ptrace.h
+++ b/arch/um/include/sysdep-x86_64/ptrace.h
@@ -135,6 +135,7 @@ extern int mode_tt;
135 __CHOOSE_MODE(SC_EFLAGS(UPT_SC(r)), REGS_EFLAGS((r)->skas.regs)) 135 __CHOOSE_MODE(SC_EFLAGS(UPT_SC(r)), REGS_EFLAGS((r)->skas.regs))
136#define UPT_SC(r) ((r)->tt.sc) 136#define UPT_SC(r) ((r)->tt.sc)
137#define UPT_SYSCALL_NR(r) __CHOOSE_MODE((r)->tt.syscall, (r)->skas.syscall) 137#define UPT_SYSCALL_NR(r) __CHOOSE_MODE((r)->tt.syscall, (r)->skas.syscall)
138#define UPT_SYSCALL_RET(r) UPT_RAX(r)
138 139
139extern int user_context(unsigned long sp); 140extern int user_context(unsigned long sp);
140 141
@@ -196,32 +197,32 @@ struct syscall_args {
196 197
197 198
198#define UPT_SET(regs, reg, val) \ 199#define UPT_SET(regs, reg, val) \
199 ({ unsigned long val; \ 200 ({ unsigned long __upt_val = val; \
200 switch(reg){ \ 201 switch(reg){ \
201 case R8: UPT_R8(regs) = val; break; \ 202 case R8: UPT_R8(regs) = __upt_val; break; \
202 case R9: UPT_R9(regs) = val; break; \ 203 case R9: UPT_R9(regs) = __upt_val; break; \
203 case R10: UPT_R10(regs) = val; break; \ 204 case R10: UPT_R10(regs) = __upt_val; break; \
204 case R11: UPT_R11(regs) = val; break; \ 205 case R11: UPT_R11(regs) = __upt_val; break; \
205 case R12: UPT_R12(regs) = val; break; \ 206 case R12: UPT_R12(regs) = __upt_val; break; \
206 case R13: UPT_R13(regs) = val; break; \ 207 case R13: UPT_R13(regs) = __upt_val; break; \
207 case R14: UPT_R14(regs) = val; break; \ 208 case R14: UPT_R14(regs) = __upt_val; break; \
208 case R15: UPT_R15(regs) = val; break; \ 209 case R15: UPT_R15(regs) = __upt_val; break; \
209 case RIP: UPT_IP(regs) = val; break; \ 210 case RIP: UPT_IP(regs) = __upt_val; break; \
210 case RSP: UPT_SP(regs) = val; break; \ 211 case RSP: UPT_SP(regs) = __upt_val; break; \
211 case RAX: UPT_RAX(regs) = val; break; \ 212 case RAX: UPT_RAX(regs) = __upt_val; break; \
212 case RBX: UPT_RBX(regs) = val; break; \ 213 case RBX: UPT_RBX(regs) = __upt_val; break; \
213 case RCX: UPT_RCX(regs) = val; break; \ 214 case RCX: UPT_RCX(regs) = __upt_val; break; \
214 case RDX: UPT_RDX(regs) = val; break; \ 215 case RDX: UPT_RDX(regs) = __upt_val; break; \
215 case RSI: UPT_RSI(regs) = val; break; \ 216 case RSI: UPT_RSI(regs) = __upt_val; break; \
216 case RDI: UPT_RDI(regs) = val; break; \ 217 case RDI: UPT_RDI(regs) = __upt_val; break; \
217 case RBP: UPT_RBP(regs) = val; break; \ 218 case RBP: UPT_RBP(regs) = __upt_val; break; \
218 case ORIG_RAX: UPT_ORIG_RAX(regs) = val; break; \ 219 case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \
219 case CS: UPT_CS(regs) = val; break; \ 220 case CS: UPT_CS(regs) = __upt_val; break; \
220 case DS: UPT_DS(regs) = val; break; \ 221 case DS: UPT_DS(regs) = __upt_val; break; \
221 case ES: UPT_ES(regs) = val; break; \ 222 case ES: UPT_ES(regs) = __upt_val; break; \
222 case FS: UPT_FS(regs) = val; break; \ 223 case FS: UPT_FS(regs) = __upt_val; break; \
223 case GS: UPT_GS(regs) = val; break; \ 224 case GS: UPT_GS(regs) = __upt_val; break; \
224 case EFLAGS: UPT_EFLAGS(regs) = val; break; \ 225 case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \
225 default : \ 226 default : \
226 panic("Bad register in UPT_SET : %d\n", reg); \ 227 panic("Bad register in UPT_SET : %d\n", reg); \
227 break; \ 228 break; \
@@ -245,14 +246,3 @@ struct syscall_args {
245 CHOOSE_MODE((&(r)->tt.faultinfo), (&(r)->skas.faultinfo)) 246 CHOOSE_MODE((&(r)->tt.faultinfo), (&(r)->skas.faultinfo))
246 247
247#endif 248#endif
248
249/*
250 * Overrides for Emacs so that we follow Linus's tabbing style.
251 * Emacs will notice this stuff at the end of the file and automatically
252 * adjust the settings for this buffer only. This must remain at the end
253 * of the file.
254 * ---------------------------------------------------------------------------
255 * Local variables:
256 * c-file-style: "linux"
257 * End:
258 */
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 9736ca27c5f0..a8918e80df96 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -14,7 +14,7 @@ obj-y = config.o exec_kern.o exitcode.o \
14 tlb.o trap_kern.o trap_user.o uaccess_user.o um_arch.o umid.o \ 14 tlb.o trap_kern.o trap_user.o uaccess_user.o um_arch.o umid.o \
15 user_util.o 15 user_util.o
16 16
17obj-$(CONFIG_BLK_DEV_INITRD) += initrd_kern.o initrd_user.o 17obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
18obj-$(CONFIG_GPROF) += gprof_syms.o 18obj-$(CONFIG_GPROF) += gprof_syms.o
19obj-$(CONFIG_GCOV) += gmon_syms.o 19obj-$(CONFIG_GCOV) += gmon_syms.o
20obj-$(CONFIG_TTY_LOG) += tty_log.o 20obj-$(CONFIG_TTY_LOG) += tty_log.o
diff --git a/arch/um/kernel/checksum.c b/arch/um/kernel/checksum.c
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/arch/um/kernel/checksum.c
+++ /dev/null
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
new file mode 100644
index 000000000000..82ecf904b09c
--- /dev/null
+++ b/arch/um/kernel/initrd.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/init.h"
7#include "linux/bootmem.h"
8#include "linux/initrd.h"
9#include "asm/types.h"
10#include "user_util.h"
11#include "kern_util.h"
12#include "initrd.h"
13#include "init.h"
14#include "os.h"
15
16/* Changed by uml_initrd_setup, which is a setup */
17static char *initrd __initdata = NULL;
18
19static int __init read_initrd(void)
20{
21 void *area;
22 long long size;
23 int err;
24
25 if(initrd == NULL) return 0;
26 err = os_file_size(initrd, &size);
27 if(err) return 0;
28 area = alloc_bootmem(size);
29 if(area == NULL) return 0;
30 if(load_initrd(initrd, area, size) == -1) return 0;
31 initrd_start = (unsigned long) area;
32 initrd_end = initrd_start + size;
33 return 0;
34}
35
36__uml_postsetup(read_initrd);
37
38static int __init uml_initrd_setup(char *line, int *add)
39{
40 initrd = line;
41 return 0;
42}
43
44__uml_setup("initrd=", uml_initrd_setup,
45"initrd=<initrd image>\n"
46" This is used to boot UML from an initrd image. The argument is the\n"
47" name of the file containing the image.\n\n"
48);
49
50int load_initrd(char *filename, void *buf, int size)
51{
52 int fd, n;
53
54 fd = os_open_file(filename, of_read(OPENFLAGS()), 0);
55 if(fd < 0){
56 printk("Opening '%s' failed - err = %d\n", filename, -fd);
57 return(-1);
58 }
59 n = os_read_file(fd, buf, size);
60 if(n != size){
61 printk("Read of %d bytes from '%s' failed, err = %d\n", size,
62 filename, -n);
63 return(-1);
64 }
65
66 os_close_file(fd);
67 return(0);
68}
69/*
70 * Overrides for Emacs so that we follow Linus's tabbing style.
71 * Emacs will notice this stuff at the end of the file and automatically
72 * adjust the settings for this buffer only. This must remain at the end
73 * of the file.
74 * ---------------------------------------------------------------------------
75 * Local variables:
76 * c-file-style: "linux"
77 * End:
78 */
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
index 78d69dc74b26..99439fa15ef4 100644
--- a/arch/um/kernel/ksyms.c
+++ b/arch/um/kernel/ksyms.c
@@ -57,6 +57,7 @@ EXPORT_SYMBOL(copy_to_user_tt);
57EXPORT_SYMBOL(strncpy_from_user_skas); 57EXPORT_SYMBOL(strncpy_from_user_skas);
58EXPORT_SYMBOL(copy_to_user_skas); 58EXPORT_SYMBOL(copy_to_user_skas);
59EXPORT_SYMBOL(copy_from_user_skas); 59EXPORT_SYMBOL(copy_from_user_skas);
60EXPORT_SYMBOL(clear_user_skas);
60#endif 61#endif
61EXPORT_SYMBOL(uml_strdup); 62EXPORT_SYMBOL(uml_strdup);
62 63
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index f156661781cb..c22825f13e40 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -100,12 +100,37 @@ void mem_init(void)
100#endif 100#endif
101} 101}
102 102
103/*
104 * Create a page table and place a pointer to it in a middle page
105 * directory entry.
106 */
107static void __init one_page_table_init(pmd_t *pmd)
108{
109 if (pmd_none(*pmd)) {
110 pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
111 set_pmd(pmd, __pmd(_KERNPG_TABLE +
112 (unsigned long) __pa(pte)));
113 if (pte != pte_offset_kernel(pmd, 0))
114 BUG();
115 }
116}
117
118static void __init one_md_table_init(pud_t *pud)
119{
120#ifdef CONFIG_3_LEVEL_PGTABLES
121 pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
122 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
123 if (pmd_table != pmd_offset(pud, 0))
124 BUG();
125#endif
126}
127
103static void __init fixrange_init(unsigned long start, unsigned long end, 128static void __init fixrange_init(unsigned long start, unsigned long end,
104 pgd_t *pgd_base) 129 pgd_t *pgd_base)
105{ 130{
106 pgd_t *pgd; 131 pgd_t *pgd;
132 pud_t *pud;
107 pmd_t *pmd; 133 pmd_t *pmd;
108 pte_t *pte;
109 int i, j; 134 int i, j;
110 unsigned long vaddr; 135 unsigned long vaddr;
111 136
@@ -115,15 +140,12 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
115 pgd = pgd_base + i; 140 pgd = pgd_base + i;
116 141
117 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { 142 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
118 pmd = (pmd_t *)pgd; 143 pud = pud_offset(pgd, vaddr);
144 if (pud_none(*pud))
145 one_md_table_init(pud);
146 pmd = pmd_offset(pud, vaddr);
119 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { 147 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
120 if (pmd_none(*pmd)) { 148 one_page_table_init(pmd);
121 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
122 set_pmd(pmd, __pmd(_KERNPG_TABLE +
123 (unsigned long) __pa(pte)));
124 if (pte != pte_offset_kernel(pmd, 0))
125 BUG();
126 }
127 vaddr += PMD_SIZE; 149 vaddr += PMD_SIZE;
128 } 150 }
129 j = 0; 151 j = 0;
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 2b75d8d9ba73..2925e15324de 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -28,9 +28,9 @@ static inline void set_singlestepping(struct task_struct *child, int on)
28 child->thread.singlestep_syscall = 0; 28 child->thread.singlestep_syscall = 0;
29 29
30#ifdef SUBARCH_SET_SINGLESTEPPING 30#ifdef SUBARCH_SET_SINGLESTEPPING
31 SUBARCH_SET_SINGLESTEPPING(child, on) 31 SUBARCH_SET_SINGLESTEPPING(child, on);
32#endif 32#endif
33 } 33}
34 34
35/* 35/*
36 * Called by kernel/ptrace.c when detaching.. 36 * Called by kernel/ptrace.c when detaching..
@@ -83,7 +83,7 @@ long sys_ptrace(long request, long pid, long addr, long data)
83 } 83 }
84 84
85#ifdef SUBACH_PTRACE_SPECIAL 85#ifdef SUBACH_PTRACE_SPECIAL
86 SUBARCH_PTRACE_SPECIAL(child,request,addr,data) 86 SUBARCH_PTRACE_SPECIAL(child,request,addr,data);
87#endif 87#endif
88 88
89 ret = ptrace_check_attach(child, request == PTRACE_KILL); 89 ret = ptrace_check_attach(child, request == PTRACE_KILL);
diff --git a/arch/um/kernel/trap_kern.c b/arch/um/kernel/trap_kern.c
index 5fca2c61eb98..1de22d8a313a 100644
--- a/arch/um/kernel/trap_kern.c
+++ b/arch/um/kernel/trap_kern.c
@@ -57,10 +57,11 @@ int handle_page_fault(unsigned long address, unsigned long ip,
57 *code_out = SEGV_ACCERR; 57 *code_out = SEGV_ACCERR;
58 if(is_write && !(vma->vm_flags & VM_WRITE)) 58 if(is_write && !(vma->vm_flags & VM_WRITE))
59 goto out; 59 goto out;
60
61 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
62 goto out;
63
60 page = address & PAGE_MASK; 64 page = address & PAGE_MASK;
61 pgd = pgd_offset(mm, page);
62 pud = pud_offset(pgd, page);
63 pmd = pmd_offset(pud, page);
64 do { 65 do {
65 survive: 66 survive:
66 switch (handle_mm_fault(mm, vma, address, is_write)){ 67 switch (handle_mm_fault(mm, vma, address, is_write)){
@@ -106,33 +107,6 @@ out_of_memory:
106 goto out; 107 goto out;
107} 108}
108 109
109LIST_HEAD(physmem_remappers);
110
111void register_remapper(struct remapper *info)
112{
113 list_add(&info->list, &physmem_remappers);
114}
115
116static int check_remapped_addr(unsigned long address, int is_write)
117{
118 struct remapper *remapper;
119 struct list_head *ele;
120 __u64 offset;
121 int fd;
122
123 fd = phys_mapping(__pa(address), &offset);
124 if(fd == -1)
125 return(0);
126
127 list_for_each(ele, &physmem_remappers){
128 remapper = list_entry(ele, struct remapper, list);
129 if((*remapper->proc)(fd, address, is_write, offset))
130 return(1);
131 }
132
133 return(0);
134}
135
136/* 110/*
137 * We give a *copy* of the faultinfo in the regs to segv. 111 * We give a *copy* of the faultinfo in the regs to segv.
138 * This must be done, since nesting SEGVs could overwrite 112 * This must be done, since nesting SEGVs could overwrite
@@ -151,8 +125,6 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc)
151 flush_tlb_kernel_vm(); 125 flush_tlb_kernel_vm();
152 return(0); 126 return(0);
153 } 127 }
154 else if(check_remapped_addr(address & PAGE_MASK, is_write))
155 return(0);
156 else if(current->mm == NULL) 128 else if(current->mm == NULL)
157 panic("Segfault with no mm"); 129 panic("Segfault with no mm");
158 err = handle_page_fault(address, ip, is_write, is_user, &si.si_code); 130 err = handle_page_fault(address, ip, is_write, is_user, &si.si_code);
diff --git a/arch/um/kernel/tt/ksyms.c b/arch/um/kernel/tt/ksyms.c
index 92ec85d67c7c..84a9385a8fef 100644
--- a/arch/um/kernel/tt/ksyms.c
+++ b/arch/um/kernel/tt/ksyms.c
@@ -12,6 +12,7 @@ EXPORT_SYMBOL(__do_copy_to_user);
12EXPORT_SYMBOL(__do_strncpy_from_user); 12EXPORT_SYMBOL(__do_strncpy_from_user);
13EXPORT_SYMBOL(__do_strnlen_user); 13EXPORT_SYMBOL(__do_strnlen_user);
14EXPORT_SYMBOL(__do_clear_user); 14EXPORT_SYMBOL(__do_clear_user);
15EXPORT_SYMBOL(clear_user_tt);
15 16
16EXPORT_SYMBOL(tracing_pid); 17EXPORT_SYMBOL(tracing_pid);
17EXPORT_SYMBOL(honeypot); 18EXPORT_SYMBOL(honeypot);
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 76eadb309189..dd5355500bdc 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -73,6 +73,8 @@ SECTIONS
73 73
74 .got : { *(.got.plt) *(.got) } 74 .got : { *(.got.plt) *(.got) }
75 .dynamic : { *(.dynamic) } 75 .dynamic : { *(.dynamic) }
76 .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
77 .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
76 /* We want the small data sections together, so single-instruction offsets 78 /* We want the small data sections together, so single-instruction offsets
77 can access them all, and initialized data all before uninitialized, so 79 can access them all, and initialized data all before uninitialized, so
78 we can shorten the on-disk segment size. */ 80 we can shorten the on-disk segment size. */
diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile
index fcd67c3414e4..4351e5605506 100644
--- a/arch/um/sys-i386/Makefile
+++ b/arch/um/sys-i386/Makefile
@@ -9,11 +9,11 @@ USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o
9 9
10SYMLINKS = bitops.c semaphore.c highmem.c module.c 10SYMLINKS = bitops.c semaphore.c highmem.c module.c
11 11
12include arch/um/scripts/Makefile.rules
13
12bitops.c-dir = lib 14bitops.c-dir = lib
13semaphore.c-dir = kernel 15semaphore.c-dir = kernel
14highmem.c-dir = mm 16highmem.c-dir = mm
15module.c-dir = kernel 17module.c-dir = kernel
16 18
17subdir- := util 19subdir- := util
18
19include arch/um/scripts/Makefile.rules
diff --git a/arch/um/sys-i386/delay.c b/arch/um/sys-i386/delay.c
index e9892eef51ce..2c11b9770e8b 100644
--- a/arch/um/sys-i386/delay.c
+++ b/arch/um/sys-i386/delay.c
@@ -1,5 +1,7 @@
1#include "linux/delay.h" 1#include <linux/module.h>
2#include "asm/param.h" 2#include <linux/kernel.h>
3#include <linux/delay.h>
4#include <asm/param.h>
3 5
4void __delay(unsigned long time) 6void __delay(unsigned long time)
5{ 7{
@@ -20,13 +22,19 @@ void __udelay(unsigned long usecs)
20 int i, n; 22 int i, n;
21 23
22 n = (loops_per_jiffy * HZ * usecs) / MILLION; 24 n = (loops_per_jiffy * HZ * usecs) / MILLION;
23 for(i=0;i<n;i++) ; 25 for(i=0;i<n;i++)
26 cpu_relax();
24} 27}
25 28
29EXPORT_SYMBOL(__udelay);
30
26void __const_udelay(unsigned long usecs) 31void __const_udelay(unsigned long usecs)
27{ 32{
28 int i, n; 33 int i, n;
29 34
30 n = (loops_per_jiffy * HZ * usecs) / MILLION; 35 n = (loops_per_jiffy * HZ * usecs) / MILLION;
31 for(i=0;i<n;i++) ; 36 for(i=0;i<n;i++)
37 cpu_relax();
32} 38}
39
40EXPORT_SYMBOL(__const_udelay);
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index 3d7da911cc8c..608466ad6b22 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -14,11 +14,11 @@ obj-$(CONFIG_MODULES) += module.o um_module.o
14 14
15USER_OBJS := ptrace_user.o sigcontext.o 15USER_OBJS := ptrace_user.o sigcontext.o
16 16
17include arch/um/scripts/Makefile.rules
18
19SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c memcpy.S \ 17SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c memcpy.S \
20 semaphore.c thunk.S module.c 18 semaphore.c thunk.S module.c
21 19
20include arch/um/scripts/Makefile.rules
21
22bitops.c-dir = lib 22bitops.c-dir = lib
23csum-copy.S-dir = lib 23csum-copy.S-dir = lib
24csum-partial.c-dir = lib 24csum-partial.c-dir = lib
@@ -28,6 +28,4 @@ semaphore.c-dir = kernel
28thunk.S-dir = lib 28thunk.S-dir = lib
29module.c-dir = kernel 29module.c-dir = kernel
30 30
31CFLAGS_csum-partial.o := -Dcsum_partial=arch_csum_partial
32
33subdir- := util 31subdir- := util
diff --git a/arch/um/sys-x86_64/delay.c b/arch/um/sys-x86_64/delay.c
index 651332aeec22..137f4446b439 100644
--- a/arch/um/sys-x86_64/delay.c
+++ b/arch/um/sys-x86_64/delay.c
@@ -5,40 +5,37 @@
5 * Licensed under the GPL 5 * Licensed under the GPL
6 */ 6 */
7 7
8#include "linux/delay.h" 8#include <linux/module.h>
9#include "asm/processor.h" 9#include <linux/delay.h>
10#include "asm/param.h" 10#include <asm/processor.h>
11#include <asm/param.h>
11 12
12void __delay(unsigned long loops) 13void __delay(unsigned long loops)
13{ 14{
14 unsigned long i; 15 unsigned long i;
15 16
16 for(i = 0; i < loops; i++) ; 17 for(i = 0; i < loops; i++)
18 cpu_relax();
17} 19}
18 20
19void __udelay(unsigned long usecs) 21void __udelay(unsigned long usecs)
20{ 22{
21 int i, n; 23 unsigned long i, n;
22 24
23 n = (loops_per_jiffy * HZ * usecs) / MILLION; 25 n = (loops_per_jiffy * HZ * usecs) / MILLION;
24 for(i=0;i<n;i++) ; 26 for(i=0;i<n;i++)
27 cpu_relax();
25} 28}
26 29
30EXPORT_SYMBOL(__udelay);
31
27void __const_udelay(unsigned long usecs) 32void __const_udelay(unsigned long usecs)
28{ 33{
29 int i, n; 34 unsigned long i, n;
30 35
31 n = (loops_per_jiffy * HZ * usecs) / MILLION; 36 n = (loops_per_jiffy * HZ * usecs) / MILLION;
32 for(i=0;i<n;i++) ; 37 for(i=0;i<n;i++)
38 cpu_relax();
33} 39}
34 40
35/* 41EXPORT_SYMBOL(__const_udelay);
36 * Overrides for Emacs so that we follow Linus's tabbing style.
37 * Emacs will notice this stuff at the end of the file and automatically
38 * adjust the settings for this buffer only. This must remain at the end
39 * of the file.
40 * ---------------------------------------------------------------------------
41 * Local variables:
42 * c-file-style: "linux"
43 * End:
44 */
diff --git a/arch/um/sys-x86_64/ksyms.c b/arch/um/sys-x86_64/ksyms.c
index a27f0ee6a4f6..859273808203 100644
--- a/arch/um/sys-x86_64/ksyms.c
+++ b/arch/um/sys-x86_64/ksyms.c
@@ -16,5 +16,4 @@ EXPORT_SYMBOL(__up_wakeup);
16EXPORT_SYMBOL(__memcpy); 16EXPORT_SYMBOL(__memcpy);
17 17
18/* Networking helper routines. */ 18/* Networking helper routines. */
19/*EXPORT_SYMBOL(csum_partial_copy_from); 19EXPORT_SYMBOL(ip_compute_csum);
20EXPORT_SYMBOL(csum_partial_copy_to);*/
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c
index b593bb256f2c..74eee5c7c6dd 100644
--- a/arch/um/sys-x86_64/ptrace.c
+++ b/arch/um/sys-x86_64/ptrace.c
@@ -5,10 +5,11 @@
5 */ 5 */
6 6
7#define __FRAME_OFFSETS 7#define __FRAME_OFFSETS
8#include "asm/ptrace.h" 8#include <asm/ptrace.h>
9#include "linux/sched.h" 9#include <linux/sched.h>
10#include "linux/errno.h" 10#include <linux/errno.h>
11#include "asm/elf.h" 11#include <asm/uaccess.h>
12#include <asm/elf.h>
12 13
13/* XXX x86_64 */ 14/* XXX x86_64 */
14unsigned long not_ss; 15unsigned long not_ss;
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
index dd9914642b8e..d4a59657fb99 100644
--- a/arch/um/sys-x86_64/syscalls.c
+++ b/arch/um/sys-x86_64/syscalls.c
@@ -15,6 +15,7 @@
15#include "asm/unistd.h" 15#include "asm/unistd.h"
16#include "asm/prctl.h" /* XXX This should get the constants from libc */ 16#include "asm/prctl.h" /* XXX This should get the constants from libc */
17#include "choose-mode.h" 17#include "choose-mode.h"
18#include "kern.h"
18 19
19asmlinkage long sys_uname64(struct new_utsname __user * name) 20asmlinkage long sys_uname64(struct new_utsname __user * name)
20{ 21{
diff --git a/arch/um/sys-x86_64/user-offsets.c b/arch/um/sys-x86_64/user-offsets.c
index 5e14792e4838..513d17ceafd4 100644
--- a/arch/um/sys-x86_64/user-offsets.c
+++ b/arch/um/sys-x86_64/user-offsets.c
@@ -3,6 +3,14 @@
3#include <signal.h> 3#include <signal.h>
4#define __FRAME_OFFSETS 4#define __FRAME_OFFSETS
5#include <asm/ptrace.h> 5#include <asm/ptrace.h>
6#include <asm/types.h>
7/* For some reason, x86_64 defines u64 and u32 only in <pci/types.h>, which I
8 * refuse to include here, even though they're used throughout the headers.
9 * These are used in asm/user.h, and that include can't be avoided because of
10 * the sizeof(struct user_regs_struct) below.
11 */
12typedef __u64 u64;
13typedef __u32 u32;
6#include <asm/user.h> 14#include <asm/user.h>
7 15
8#define DEFINE(sym, val) \ 16#define DEFINE(sym, val) \
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index ac7684324954..80e9b498c443 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -42,6 +42,8 @@
42 42
43int sis_apic_bug; /* not actually supported, dummy for compile */ 43int sis_apic_bug; /* not actually supported, dummy for compile */
44 44
45static int no_timer_check;
46
45static DEFINE_SPINLOCK(ioapic_lock); 47static DEFINE_SPINLOCK(ioapic_lock);
46 48
47/* 49/*
@@ -1601,7 +1603,7 @@ static inline void check_timer(void)
1601 * Ok, does IRQ0 through the IOAPIC work? 1603 * Ok, does IRQ0 through the IOAPIC work?
1602 */ 1604 */
1603 unmask_IO_APIC_irq(0); 1605 unmask_IO_APIC_irq(0);
1604 if (timer_irq_works()) { 1606 if (!no_timer_check && timer_irq_works()) {
1605 nmi_watchdog_default(); 1607 nmi_watchdog_default();
1606 if (nmi_watchdog == NMI_IO_APIC) { 1608 if (nmi_watchdog == NMI_IO_APIC) {
1607 disable_8259A_irq(0); 1609 disable_8259A_irq(0);
@@ -1671,6 +1673,13 @@ static inline void check_timer(void)
1671 panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n"); 1673 panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
1672} 1674}
1673 1675
1676static int __init notimercheck(char *s)
1677{
1678 no_timer_check = 1;
1679 return 1;
1680}
1681__setup("no_timer_check", notimercheck);
1682
1674/* 1683/*
1675 * 1684 *
1676 * IRQ's that are handled by the PIC in the MPS IOAPIC case. 1685 * IRQ's that are handled by the PIC in the MPS IOAPIC case.
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index 60dc9b98951d..525f6a128a27 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -380,7 +380,7 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
380 break; 380 break;
381 381
382 switch (addr) { 382 switch (addr) {
383 case 0 ... sizeof(struct user_regs_struct): 383 case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
384 tmp = getreg(child, addr); 384 tmp = getreg(child, addr);
385 break; 385 break;
386 case offsetof(struct user, u_debugreg[0]): 386 case offsetof(struct user, u_debugreg[0]):
@@ -425,7 +425,7 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
425 break; 425 break;
426 426
427 switch (addr) { 427 switch (addr) {
428 case 0 ... sizeof(struct user_regs_struct): 428 case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
429 ret = putreg(child, addr, data); 429 ret = putreg(child, addr, data);
430 break; 430 break;
431 /* Disallows to set a breakpoint into the vsyscall */ 431 /* Disallows to set a breakpoint into the vsyscall */
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index b9fd0252c279..99f038ede23c 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -719,7 +719,6 @@ static void __init display_cacheinfo(struct cpuinfo_x86 *c)
719 } 719 }
720} 720}
721 721
722#ifdef CONFIG_SMP
723/* 722/*
724 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 723 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
725 * Assumes number of cores is a power of two. 724 * Assumes number of cores is a power of two.
@@ -729,16 +728,24 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
729#ifdef CONFIG_SMP 728#ifdef CONFIG_SMP
730 int cpu = smp_processor_id(); 729 int cpu = smp_processor_id();
731 int node = 0; 730 int node = 0;
731 unsigned bits;
732 if (c->x86_num_cores == 1) 732 if (c->x86_num_cores == 1)
733 return; 733 return;
734 /* Fix up the APIC ID following the AMD specification. */ 734
735 cpu_core_id[cpu] >>= hweight32(c->x86_num_cores - 1); 735 bits = 0;
736 while ((1 << bits) < c->x86_num_cores)
737 bits++;
738
739 /* Low order bits define the core id (index of core in socket) */
740 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
741 /* Convert the APIC ID into the socket ID */
742 phys_proc_id[cpu] >>= bits;
736 743
737#ifdef CONFIG_NUMA 744#ifdef CONFIG_NUMA
738 /* When an ACPI SRAT table is available use the mappings from SRAT 745 /* When an ACPI SRAT table is available use the mappings from SRAT
739 instead. */ 746 instead. */
740 if (acpi_numa <= 0) { 747 if (acpi_numa <= 0) {
741 node = cpu_core_id[cpu]; 748 node = phys_proc_id[cpu];
742 if (!node_online(node)) 749 if (!node_online(node))
743 node = first_node(node_online_map); 750 node = first_node(node_online_map);
744 cpu_to_node[cpu] = node; 751 cpu_to_node[cpu] = node;
@@ -746,18 +753,11 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
746 node = cpu_to_node[cpu]; 753 node = cpu_to_node[cpu];
747 } 754 }
748#endif 755#endif
749 /* For now: - better than BAD_APIC_ID at least*/
750 phys_proc_id[cpu] = cpu_core_id[cpu];
751 756
752 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n", 757 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
753 cpu, c->x86_num_cores, node, cpu_core_id[cpu]); 758 cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
754#endif 759#endif
755} 760}
756#else
757static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
758{
759}
760#endif
761 761
762static int __init init_amd(struct cpuinfo_x86 *c) 762static int __init init_amd(struct cpuinfo_x86 *c)
763{ 763{
@@ -963,8 +963,7 @@ void __init early_identify_cpu(struct cpuinfo_x86 *c)
963 } 963 }
964 964
965#ifdef CONFIG_SMP 965#ifdef CONFIG_SMP
966 phys_proc_id[smp_processor_id()] = 966 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
967 cpu_core_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
968#endif 967#endif
969} 968}
970 969
diff --git a/arch/x86_64/kernel/signal.c b/arch/x86_64/kernel/signal.c
index d439ced150c6..3fdcdba0fec5 100644
--- a/arch/x86_64/kernel/signal.c
+++ b/arch/x86_64/kernel/signal.c
@@ -452,7 +452,9 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
452 regs->rip -= 2; 452 regs->rip -= 2;
453 } 453 }
454 if (regs->rax == (unsigned long)-ERESTART_RESTARTBLOCK) { 454 if (regs->rax == (unsigned long)-ERESTART_RESTARTBLOCK) {
455 regs->rax = __NR_restart_syscall; 455 regs->rax = test_thread_flag(TIF_IA32) ?
456 __NR_ia32_restart_syscall :
457 __NR_restart_syscall;
456 regs->rip -= 2; 458 regs->rip -= 2;
457 } 459 }
458 } 460 }
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 5abdee1e16a5..f1ec0f345941 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -94,6 +94,7 @@ int smp_threads_ready;
94 94
95cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; 95cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
96cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 96cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
97EXPORT_SYMBOL(cpu_core_map);
97 98
98/* 99/*
99 * Trampoline 80x86 program as an array. 100 * Trampoline 80x86 program as an array.
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index c6fb0cb69992..58aac23760ef 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -133,7 +133,7 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
133 unsigned long flags) 133 unsigned long flags)
134{ 134{
135 int err = 0; 135 int err = 0;
136 if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { 136 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
137 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 137 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
138 unsigned long vaddr = (unsigned long) __va(phys_addr); 138 unsigned long vaddr = (unsigned long) __va(phys_addr);
139 139
@@ -214,7 +214,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
214 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); 214 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
215 return NULL; 215 return NULL;
216 } 216 }
217 if (ioremap_change_attr(phys_addr, size, flags) < 0) { 217 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
218 area->flags &= 0xffffff; 218 area->flags &= 0xffffff;
219 vunmap(addr); 219 vunmap(addr);
220 return NULL; 220 return NULL;
@@ -251,7 +251,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
251 251
252void iounmap(volatile void __iomem *addr) 252void iounmap(volatile void __iomem *addr)
253{ 253{
254 struct vm_struct *p, **pprev; 254 struct vm_struct *p;
255 255
256 if (addr <= high_memory) 256 if (addr <= high_memory)
257 return; 257 return;
@@ -260,24 +260,11 @@ void iounmap(volatile void __iomem *addr)
260 return; 260 return;
261 261
262 write_lock(&vmlist_lock); 262 write_lock(&vmlist_lock);
263 for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev) 263 p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
264 if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr)) 264 if (!p)
265 break; 265 printk("iounmap: bad address %p\n", addr);
266 if (!p) { 266 else if (p->flags >> 20)
267 printk("__iounmap: bad address %p\n", addr); 267 ioremap_change_attr(p->phys_addr, p->size, 0);
268 goto out_unlock;
269 }
270 *pprev = p->next;
271 unmap_vm_area(p);
272 if ((p->flags >> 20) &&
273 p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
274 /* p->size includes the guard page, but cpa doesn't like that */
275 change_page_attr_addr((unsigned long)__va(p->phys_addr),
276 p->size >> PAGE_SHIFT,
277 PAGE_KERNEL);
278 global_flush_tlb();
279 }
280out_unlock:
281 write_unlock(&vmlist_lock); 268 write_unlock(&vmlist_lock);
282 kfree(p); 269 kfree(p);
283} 270}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b9a6b7ad64f3..bc56770bcc90 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2021,7 +2021,13 @@ static int pkt_open(struct inode *inode, struct file *file)
2021 BUG_ON(pd->refcnt < 0); 2021 BUG_ON(pd->refcnt < 0);
2022 2022
2023 pd->refcnt++; 2023 pd->refcnt++;
2024 if (pd->refcnt == 1) { 2024 if (pd->refcnt > 1) {
2025 if ((file->f_mode & FMODE_WRITE) &&
2026 !test_bit(PACKET_WRITABLE, &pd->flags)) {
2027 ret = -EBUSY;
2028 goto out_dec;
2029 }
2030 } else {
2025 if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) { 2031 if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
2026 ret = -EIO; 2032 ret = -EIO;
2027 goto out_dec; 2033 goto out_dec;
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 49d67f5384a2..4bb9af736fba 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -44,6 +44,7 @@
44#include <linux/ipmi.h> 44#include <linux/ipmi.h>
45#include <asm/semaphore.h> 45#include <asm/semaphore.h>
46#include <linux/init.h> 46#include <linux/init.h>
47#include <linux/device.h>
47 48
48#define IPMI_DEVINTF_VERSION "v33" 49#define IPMI_DEVINTF_VERSION "v33"
49 50
@@ -519,15 +520,21 @@ MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
519 " interface. Other values will set the major device number" 520 " interface. Other values will set the major device number"
520 " to that value."); 521 " to that value.");
521 522
523static struct class *ipmi_class;
524
522static void ipmi_new_smi(int if_num) 525static void ipmi_new_smi(int if_num)
523{ 526{
524 devfs_mk_cdev(MKDEV(ipmi_major, if_num), 527 dev_t dev = MKDEV(ipmi_major, if_num);
525 S_IFCHR | S_IRUSR | S_IWUSR, 528
529 devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR,
526 "ipmidev/%d", if_num); 530 "ipmidev/%d", if_num);
531
532 class_simple_device_add(ipmi_class, dev, NULL, "ipmi%d", if_num);
527} 533}
528 534
529static void ipmi_smi_gone(int if_num) 535static void ipmi_smi_gone(int if_num)
530{ 536{
537 class_simple_device_remove(ipmi_class, MKDEV(ipmi_major, if_num));
531 devfs_remove("ipmidev/%d", if_num); 538 devfs_remove("ipmidev/%d", if_num);
532} 539}
533 540
@@ -548,8 +555,15 @@ static __init int init_ipmi_devintf(void)
548 printk(KERN_INFO "ipmi device interface version " 555 printk(KERN_INFO "ipmi device interface version "
549 IPMI_DEVINTF_VERSION "\n"); 556 IPMI_DEVINTF_VERSION "\n");
550 557
558 ipmi_class = class_simple_create(THIS_MODULE, "ipmi");
559 if (IS_ERR(ipmi_class)) {
560 printk(KERN_ERR "ipmi: can't register device class\n");
561 return PTR_ERR(ipmi_class);
562 }
563
551 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops); 564 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
552 if (rv < 0) { 565 if (rv < 0) {
566 class_simple_destroy(ipmi_class);
553 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major); 567 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
554 return rv; 568 return rv;
555 } 569 }
@@ -563,6 +577,7 @@ static __init int init_ipmi_devintf(void)
563 rv = ipmi_smi_watcher_register(&smi_watcher); 577 rv = ipmi_smi_watcher_register(&smi_watcher);
564 if (rv) { 578 if (rv) {
565 unregister_chrdev(ipmi_major, DEVICE_NAME); 579 unregister_chrdev(ipmi_major, DEVICE_NAME);
580 class_simple_destroy(ipmi_class);
566 printk(KERN_WARNING "ipmi: can't register smi watcher\n"); 581 printk(KERN_WARNING "ipmi: can't register smi watcher\n");
567 return rv; 582 return rv;
568 } 583 }
@@ -573,6 +588,7 @@ module_init(init_ipmi_devintf);
573 588
574static __exit void cleanup_ipmi(void) 589static __exit void cleanup_ipmi(void)
575{ 590{
591 class_simple_destroy(ipmi_class);
576 ipmi_smi_watcher_unregister(&smi_watcher); 592 ipmi_smi_watcher_unregister(&smi_watcher);
577 devfs_remove(DEVICE_NAME); 593 devfs_remove(DEVICE_NAME);
578 unregister_chrdev(ipmi_major, DEVICE_NAME); 594 unregister_chrdev(ipmi_major, DEVICE_NAME);
diff --git a/drivers/i2c/busses/i2c-keywest.c b/drivers/i2c/busses/i2c-keywest.c
index dd0d4c463146..867d443e7133 100644
--- a/drivers/i2c/busses/i2c-keywest.c
+++ b/drivers/i2c/busses/i2c-keywest.c
@@ -516,6 +516,11 @@ create_iface(struct device_node *np, struct device *dev)
516 u32 *psteps, *prate; 516 u32 *psteps, *prate;
517 int rc; 517 int rc;
518 518
519 if (np->n_intrs < 1 || np->n_addrs < 1) {
520 printk(KERN_ERR "%s: Missing interrupt or address !\n",
521 np->full_name);
522 return -ENODEV;
523 }
519 if (pmac_low_i2c_lock(np)) 524 if (pmac_low_i2c_lock(np))
520 return -ENODEV; 525 return -ENODEV;
521 526
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index b5b4a7b11903..d4eee99c2bf6 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -383,7 +383,10 @@ static int mmc_blk_probe(struct mmc_card *card)
383 struct mmc_blk_data *md; 383 struct mmc_blk_data *md;
384 int err; 384 int err;
385 385
386 if (card->csd.cmdclass & ~0x1ff) 386 /*
387 * Check that the card supports the command class(es) we need.
388 */
389 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
387 return -ENODEV; 390 return -ENODEV;
388 391
389 if (card->csd.read_blkbits < 9) { 392 if (card->csd.read_blkbits < 9) {
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f79b02e80e75..4d2bdbdd34e8 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -420,7 +420,8 @@ static void tg3_enable_ints(struct tg3 *tp)
420{ 420{
421 tw32(TG3PCI_MISC_HOST_CTRL, 421 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
424 (tp->last_tag << 24));
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 425 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425 426
426 tg3_cond_int(tp); 427 tg3_cond_int(tp);
@@ -455,10 +456,16 @@ static void tg3_restart_ints(struct tg3 *tp)
455{ 456{
456 tw32(TG3PCI_MISC_HOST_CTRL, 457 tw32(TG3PCI_MISC_HOST_CTRL,
457 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); 458 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
458 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000); 459 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
460 tp->last_tag << 24);
459 mmiowb(); 461 mmiowb();
460 462
461 if (tg3_has_work(tp)) 463 /* When doing tagged status, this work check is unnecessary.
464 * The last_tag we write above tells the chip which piece of
465 * work we've completed.
466 */
467 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
468 tg3_has_work(tp))
462 tw32(HOSTCC_MODE, tp->coalesce_mode | 469 tw32(HOSTCC_MODE, tp->coalesce_mode |
463 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 470 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
464} 471}
@@ -2500,7 +2507,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2500 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 2507 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2501 if (netif_carrier_ok(tp->dev)) { 2508 if (netif_carrier_ok(tp->dev)) {
2502 tw32(HOSTCC_STAT_COAL_TICKS, 2509 tw32(HOSTCC_STAT_COAL_TICKS,
2503 DEFAULT_STAT_COAL_TICKS); 2510 tp->coal.stats_block_coalesce_usecs);
2504 } else { 2511 } else {
2505 tw32(HOSTCC_STAT_COAL_TICKS, 0); 2512 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2506 } 2513 }
@@ -2886,7 +2893,6 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2886 * All RX "locking" is done by ensuring outside 2893 * All RX "locking" is done by ensuring outside
2887 * code synchronizes with dev->poll() 2894 * code synchronizes with dev->poll()
2888 */ 2895 */
2889 done = 1;
2890 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) { 2896 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2891 int orig_budget = *budget; 2897 int orig_budget = *budget;
2892 int work_done; 2898 int work_done;
@@ -2898,12 +2904,14 @@ static int tg3_poll(struct net_device *netdev, int *budget)
2898 2904
2899 *budget -= work_done; 2905 *budget -= work_done;
2900 netdev->quota -= work_done; 2906 netdev->quota -= work_done;
2901
2902 if (work_done >= orig_budget)
2903 done = 0;
2904 } 2907 }
2905 2908
2909 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
2910 tp->last_tag = sblk->status_tag;
2911 rmb();
2912
2906 /* if no more work, tell net stack and NIC we're done */ 2913 /* if no more work, tell net stack and NIC we're done */
2914 done = !tg3_has_work(tp);
2907 if (done) { 2915 if (done) {
2908 spin_lock_irqsave(&tp->lock, flags); 2916 spin_lock_irqsave(&tp->lock, flags);
2909 __netif_rx_complete(netdev); 2917 __netif_rx_complete(netdev);
@@ -2928,22 +2936,21 @@ static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
2928 spin_lock_irqsave(&tp->lock, flags); 2936 spin_lock_irqsave(&tp->lock, flags);
2929 2937
2930 /* 2938 /*
2931 * writing any value to intr-mbox-0 clears PCI INTA# and 2939 * Writing any value to intr-mbox-0 clears PCI INTA# and
2932 * chip-internal interrupt pending events. 2940 * chip-internal interrupt pending events.
2933 * writing non-zero to intr-mbox-0 additional tells the 2941 * Writing non-zero to intr-mbox-0 additional tells the
2934 * NIC to stop sending us irqs, engaging "in-intr-handler" 2942 * NIC to stop sending us irqs, engaging "in-intr-handler"
2935 * event coalescing. 2943 * event coalescing.
2936 */ 2944 */
2937 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 2945 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
2946 tp->last_tag = sblk->status_tag;
2938 sblk->status &= ~SD_STATUS_UPDATED; 2947 sblk->status &= ~SD_STATUS_UPDATED;
2939
2940 if (likely(tg3_has_work(tp))) 2948 if (likely(tg3_has_work(tp)))
2941 netif_rx_schedule(dev); /* schedule NAPI poll */ 2949 netif_rx_schedule(dev); /* schedule NAPI poll */
2942 else { 2950 else {
2943 /* no work, re-enable interrupts 2951 /* No work, re-enable interrupts. */
2944 */
2945 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2952 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2946 0x00000000); 2953 tp->last_tag << 24);
2947 } 2954 }
2948 2955
2949 spin_unlock_irqrestore(&tp->lock, flags); 2956 spin_unlock_irqrestore(&tp->lock, flags);
@@ -2969,21 +2976,62 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2969 if ((sblk->status & SD_STATUS_UPDATED) || 2976 if ((sblk->status & SD_STATUS_UPDATED) ||
2970 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { 2977 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2971 /* 2978 /*
2972 * writing any value to intr-mbox-0 clears PCI INTA# and 2979 * Writing any value to intr-mbox-0 clears PCI INTA# and
2973 * chip-internal interrupt pending events. 2980 * chip-internal interrupt pending events.
2974 * writing non-zero to intr-mbox-0 additional tells the 2981 * Writing non-zero to intr-mbox-0 additional tells the
2975 * NIC to stop sending us irqs, engaging "in-intr-handler" 2982 * NIC to stop sending us irqs, engaging "in-intr-handler"
2976 * event coalescing. 2983 * event coalescing.
2977 */ 2984 */
2978 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 2985 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2979 0x00000001); 2986 0x00000001);
2987 sblk->status &= ~SD_STATUS_UPDATED;
2988 if (likely(tg3_has_work(tp)))
2989 netif_rx_schedule(dev); /* schedule NAPI poll */
2990 else {
2991 /* No work, shared interrupt perhaps? re-enable
2992 * interrupts, and flush that PCI write
2993 */
2994 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2995 0x00000000);
2996 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2997 }
2998 } else { /* shared interrupt */
2999 handled = 0;
3000 }
3001
3002 spin_unlock_irqrestore(&tp->lock, flags);
3003
3004 return IRQ_RETVAL(handled);
3005}
3006
3007static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3008{
3009 struct net_device *dev = dev_id;
3010 struct tg3 *tp = netdev_priv(dev);
3011 struct tg3_hw_status *sblk = tp->hw_status;
3012 unsigned long flags;
3013 unsigned int handled = 1;
3014
3015 spin_lock_irqsave(&tp->lock, flags);
3016
3017 /* In INTx mode, it is possible for the interrupt to arrive at
3018 * the CPU before the status block posted prior to the interrupt.
3019 * Reading the PCI State register will confirm whether the
3020 * interrupt is ours and will flush the status block.
3021 */
3022 if ((sblk->status & SD_STATUS_UPDATED) ||
3023 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2980 /* 3024 /*
2981 * Flush PCI write. This also guarantees that our 3025 * writing any value to intr-mbox-0 clears PCI INTA# and
2982 * status block has been flushed to host memory. 3026 * chip-internal interrupt pending events.
3027 * writing non-zero to intr-mbox-0 additional tells the
3028 * NIC to stop sending us irqs, engaging "in-intr-handler"
3029 * event coalescing.
2983 */ 3030 */
2984 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3031 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3032 0x00000001);
3033 tp->last_tag = sblk->status_tag;
2985 sblk->status &= ~SD_STATUS_UPDATED; 3034 sblk->status &= ~SD_STATUS_UPDATED;
2986
2987 if (likely(tg3_has_work(tp))) 3035 if (likely(tg3_has_work(tp)))
2988 netif_rx_schedule(dev); /* schedule NAPI poll */ 3036 netif_rx_schedule(dev); /* schedule NAPI poll */
2989 else { 3037 else {
@@ -2991,7 +3039,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2991 * interrupts, and flush that PCI write 3039 * interrupts, and flush that PCI write
2992 */ 3040 */
2993 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 3041 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2994 0x00000000); 3042 tp->last_tag << 24);
2995 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW); 3043 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2996 } 3044 }
2997 } else { /* shared interrupt */ 3045 } else { /* shared interrupt */
@@ -5044,6 +5092,27 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5044} 5092}
5045 5093
5046static void __tg3_set_rx_mode(struct net_device *); 5094static void __tg3_set_rx_mode(struct net_device *);
5095static void tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5096{
5097 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5098 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5099 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5100 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5101 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5102 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5103 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5104 }
5105 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5106 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5107 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5108 u32 val = ec->stats_block_coalesce_usecs;
5109
5110 if (!netif_carrier_ok(tp->dev))
5111 val = 0;
5112
5113 tw32(HOSTCC_STAT_COAL_TICKS, val);
5114 }
5115}
5047 5116
5048/* tp->lock is held. */ 5117/* tp->lock is held. */
5049static int tg3_reset_hw(struct tg3 *tp) 5118static int tg3_reset_hw(struct tg3 *tp)
@@ -5366,16 +5435,7 @@ static int tg3_reset_hw(struct tg3 *tp)
5366 udelay(10); 5435 udelay(10);
5367 } 5436 }
5368 5437
5369 tw32(HOSTCC_RXCOL_TICKS, 0); 5438 tg3_set_coalesce(tp, &tp->coal);
5370 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5371 tw32(HOSTCC_RXMAX_FRAMES, 1);
5372 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5373 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5374 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5375 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5376 }
5377 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5378 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5379 5439
5380 /* set status block DMA address */ 5440 /* set status block DMA address */
5381 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5441 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
@@ -5388,8 +5448,6 @@ static int tg3_reset_hw(struct tg3 *tp)
5388 * the tg3_periodic_fetch_stats call there, and 5448 * the tg3_periodic_fetch_stats call there, and
5389 * tg3_get_stats to see how this works for 5705/5750 chips. 5449 * tg3_get_stats to see how this works for 5705/5750 chips.
5390 */ 5450 */
5391 tw32(HOSTCC_STAT_COAL_TICKS,
5392 DEFAULT_STAT_COAL_TICKS);
5393 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH, 5451 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5394 ((u64) tp->stats_mapping >> 32)); 5452 ((u64) tp->stats_mapping >> 32));
5395 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, 5453 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
@@ -5445,7 +5503,8 @@ static int tg3_reset_hw(struct tg3 *tp)
5445 udelay(100); 5503 udelay(100);
5446 5504
5447 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0); 5505 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5448 tr32(MAILBOX_INTERRUPT_0); 5506 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
5507 tp->last_tag = 0;
5449 5508
5450 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 5509 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5451 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE); 5510 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
@@ -5723,31 +5782,33 @@ static void tg3_timer(unsigned long __opaque)
5723 spin_lock_irqsave(&tp->lock, flags); 5782 spin_lock_irqsave(&tp->lock, flags);
5724 spin_lock(&tp->tx_lock); 5783 spin_lock(&tp->tx_lock);
5725 5784
5726 /* All of this garbage is because when using non-tagged 5785 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
5727 * IRQ status the mailbox/status_block protocol the chip 5786 /* All of this garbage is because when using non-tagged
5728 * uses with the cpu is race prone. 5787 * IRQ status the mailbox/status_block protocol the chip
5729 */ 5788 * uses with the cpu is race prone.
5730 if (tp->hw_status->status & SD_STATUS_UPDATED) { 5789 */
5731 tw32(GRC_LOCAL_CTRL, 5790 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5732 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT); 5791 tw32(GRC_LOCAL_CTRL,
5733 } else { 5792 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5734 tw32(HOSTCC_MODE, tp->coalesce_mode | 5793 } else {
5735 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW)); 5794 tw32(HOSTCC_MODE, tp->coalesce_mode |
5736 } 5795 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5796 }
5737 5797
5738 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 5798 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5739 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER; 5799 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5740 spin_unlock(&tp->tx_lock); 5800 spin_unlock(&tp->tx_lock);
5741 spin_unlock_irqrestore(&tp->lock, flags); 5801 spin_unlock_irqrestore(&tp->lock, flags);
5742 schedule_work(&tp->reset_task); 5802 schedule_work(&tp->reset_task);
5743 return; 5803 return;
5804 }
5744 } 5805 }
5745 5806
5746 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5747 tg3_periodic_fetch_stats(tp);
5748
5749 /* This part only runs once per second. */ 5807 /* This part only runs once per second. */
5750 if (!--tp->timer_counter) { 5808 if (!--tp->timer_counter) {
5809 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5810 tg3_periodic_fetch_stats(tp);
5811
5751 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) { 5812 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5752 u32 mac_stat; 5813 u32 mac_stat;
5753 int phy_event; 5814 int phy_event;
@@ -5846,9 +5907,13 @@ static int tg3_test_interrupt(struct tg3 *tp)
5846 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 5907 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5847 err = request_irq(tp->pdev->irq, tg3_msi, 5908 err = request_irq(tp->pdev->irq, tg3_msi,
5848 SA_SAMPLE_RANDOM, dev->name, dev); 5909 SA_SAMPLE_RANDOM, dev->name, dev);
5849 else 5910 else {
5850 err = request_irq(tp->pdev->irq, tg3_interrupt, 5911 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5912 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5913 fn = tg3_interrupt_tagged;
5914 err = request_irq(tp->pdev->irq, fn,
5851 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5915 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5916 }
5852 5917
5853 if (err) 5918 if (err)
5854 return err; 5919 return err;
@@ -5900,9 +5965,14 @@ static int tg3_test_msi(struct tg3 *tp)
5900 5965
5901 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 5966 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
5902 5967
5903 err = request_irq(tp->pdev->irq, tg3_interrupt, 5968 {
5904 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 5969 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
5970 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5971 fn = tg3_interrupt_tagged;
5905 5972
5973 err = request_irq(tp->pdev->irq, fn,
5974 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
5975 }
5906 if (err) 5976 if (err)
5907 return err; 5977 return err;
5908 5978
@@ -5948,7 +6018,13 @@ static int tg3_open(struct net_device *dev)
5948 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 6018 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5949 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && 6019 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
5950 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { 6020 (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) {
5951 if (pci_enable_msi(tp->pdev) == 0) { 6021 /* All MSI supporting chips should support tagged
6022 * status. Assert that this is the case.
6023 */
6024 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6025 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6026 "Not using MSI.\n", tp->dev->name);
6027 } else if (pci_enable_msi(tp->pdev) == 0) {
5952 u32 msi_mode; 6028 u32 msi_mode;
5953 6029
5954 msi_mode = tr32(MSGINT_MODE); 6030 msi_mode = tr32(MSGINT_MODE);
@@ -5959,9 +6035,14 @@ static int tg3_open(struct net_device *dev)
5959 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) 6035 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
5960 err = request_irq(tp->pdev->irq, tg3_msi, 6036 err = request_irq(tp->pdev->irq, tg3_msi,
5961 SA_SAMPLE_RANDOM, dev->name, dev); 6037 SA_SAMPLE_RANDOM, dev->name, dev);
5962 else 6038 else {
5963 err = request_irq(tp->pdev->irq, tg3_interrupt, 6039 irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt;
6040 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6041 fn = tg3_interrupt_tagged;
6042
6043 err = request_irq(tp->pdev->irq, fn,
5964 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); 6044 SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
6045 }
5965 6046
5966 if (err) { 6047 if (err) {
5967 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6048 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
@@ -5980,9 +6061,16 @@ static int tg3_open(struct net_device *dev)
5980 tg3_halt(tp, 1); 6061 tg3_halt(tp, 1);
5981 tg3_free_rings(tp); 6062 tg3_free_rings(tp);
5982 } else { 6063 } else {
5983 tp->timer_offset = HZ / 10; 6064 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
5984 tp->timer_counter = tp->timer_multiplier = 10; 6065 tp->timer_offset = HZ;
5985 tp->asf_counter = tp->asf_multiplier = (10 * 120); 6066 else
6067 tp->timer_offset = HZ / 10;
6068
6069 BUG_ON(tp->timer_offset > HZ);
6070 tp->timer_counter = tp->timer_multiplier =
6071 (HZ / tp->timer_offset);
6072 tp->asf_counter = tp->asf_multiplier =
6073 ((HZ / tp->timer_offset) * 120);
5986 6074
5987 init_timer(&tp->timer); 6075 init_timer(&tp->timer);
5988 tp->timer.expires = jiffies + tp->timer_offset; 6076 tp->timer.expires = jiffies + tp->timer_offset;
@@ -6005,6 +6093,7 @@ static int tg3_open(struct net_device *dev)
6005 6093
6006 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { 6094 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6007 err = tg3_test_msi(tp); 6095 err = tg3_test_msi(tp);
6096
6008 if (err) { 6097 if (err) {
6009 spin_lock_irq(&tp->lock); 6098 spin_lock_irq(&tp->lock);
6010 spin_lock(&tp->tx_lock); 6099 spin_lock(&tp->tx_lock);
@@ -7203,6 +7292,14 @@ static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
7203} 7292}
7204#endif 7293#endif
7205 7294
7295static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
7296{
7297 struct tg3 *tp = netdev_priv(dev);
7298
7299 memcpy(ec, &tp->coal, sizeof(*ec));
7300 return 0;
7301}
7302
7206static struct ethtool_ops tg3_ethtool_ops = { 7303static struct ethtool_ops tg3_ethtool_ops = {
7207 .get_settings = tg3_get_settings, 7304 .get_settings = tg3_get_settings,
7208 .set_settings = tg3_set_settings, 7305 .set_settings = tg3_set_settings,
@@ -7235,6 +7332,7 @@ static struct ethtool_ops tg3_ethtool_ops = {
7235 .get_strings = tg3_get_strings, 7332 .get_strings = tg3_get_strings,
7236 .get_stats_count = tg3_get_stats_count, 7333 .get_stats_count = tg3_get_stats_count,
7237 .get_ethtool_stats = tg3_get_ethtool_stats, 7334 .get_ethtool_stats = tg3_get_ethtool_stats,
7335 .get_coalesce = tg3_get_coalesce,
7238}; 7336};
7239 7337
7240static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 7338static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -8422,15 +8520,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8422 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) 8520 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8423 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 8521 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8424 8522
8425 /* Only 5701 and later support tagged irq status mode.
8426 * Also, 5788 chips cannot use tagged irq status.
8427 *
8428 * However, since we are using NAPI avoid tagged irq status
8429 * because the interrupt condition is more difficult to
8430 * fully clear in that mode.
8431 */
8432 tp->coalesce_mode = 0; 8523 tp->coalesce_mode = 0;
8433
8434 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && 8524 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8435 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 8525 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8436 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 8526 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
@@ -8494,6 +8584,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
8494 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M)) 8584 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8495 tp->tg3_flags2 |= TG3_FLG2_IS_5788; 8585 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8496 8586
8587 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8588 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
8589 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
8590 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
8591 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
8592 HOSTCC_MODE_CLRTICK_TXBD);
8593
8594 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
8595 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8596 tp->misc_host_ctrl);
8597 }
8598
8497 /* these are limited to 10/100 only */ 8599 /* these are limited to 10/100 only */
8498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 8600 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8499 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) || 8601 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
@@ -8671,6 +8773,146 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
8671 return 0; 8773 return 0;
8672} 8774}
8673 8775
8776#define BOUNDARY_SINGLE_CACHELINE 1
8777#define BOUNDARY_MULTI_CACHELINE 2
8778
8779static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
8780{
8781 int cacheline_size;
8782 u8 byte;
8783 int goal;
8784
8785 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8786 if (byte == 0)
8787 cacheline_size = 1024;
8788 else
8789 cacheline_size = (int) byte * 4;
8790
8791 /* On 5703 and later chips, the boundary bits have no
8792 * effect.
8793 */
8794 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8795 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
8796 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8797 goto out;
8798
8799#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
8800 goal = BOUNDARY_MULTI_CACHELINE;
8801#else
8802#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
8803 goal = BOUNDARY_SINGLE_CACHELINE;
8804#else
8805 goal = 0;
8806#endif
8807#endif
8808
8809 if (!goal)
8810 goto out;
8811
8812 /* PCI controllers on most RISC systems tend to disconnect
8813 * when a device tries to burst across a cache-line boundary.
8814 * Therefore, letting tg3 do so just wastes PCI bandwidth.
8815 *
8816 * Unfortunately, for PCI-E there are only limited
8817 * write-side controls for this, and thus for reads
8818 * we will still get the disconnects. We'll also waste
8819 * these PCI cycles for both read and write for chips
8820 * other than 5700 and 5701 which do not implement the
8821 * boundary bits.
8822 */
8823 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8824 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8825 switch (cacheline_size) {
8826 case 16:
8827 case 32:
8828 case 64:
8829 case 128:
8830 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8831 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
8832 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
8833 } else {
8834 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
8835 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
8836 }
8837 break;
8838
8839 case 256:
8840 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
8841 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
8842 break;
8843
8844 default:
8845 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
8846 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
8847 break;
8848 };
8849 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8850 switch (cacheline_size) {
8851 case 16:
8852 case 32:
8853 case 64:
8854 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8855 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
8856 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
8857 break;
8858 }
8859 /* fallthrough */
8860 case 128:
8861 default:
8862 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
8863 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8864 break;
8865 };
8866 } else {
8867 switch (cacheline_size) {
8868 case 16:
8869 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8870 val |= (DMA_RWCTRL_READ_BNDRY_16 |
8871 DMA_RWCTRL_WRITE_BNDRY_16);
8872 break;
8873 }
8874 /* fallthrough */
8875 case 32:
8876 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8877 val |= (DMA_RWCTRL_READ_BNDRY_32 |
8878 DMA_RWCTRL_WRITE_BNDRY_32);
8879 break;
8880 }
8881 /* fallthrough */
8882 case 64:
8883 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8884 val |= (DMA_RWCTRL_READ_BNDRY_64 |
8885 DMA_RWCTRL_WRITE_BNDRY_64);
8886 break;
8887 }
8888 /* fallthrough */
8889 case 128:
8890 if (goal == BOUNDARY_SINGLE_CACHELINE) {
8891 val |= (DMA_RWCTRL_READ_BNDRY_128 |
8892 DMA_RWCTRL_WRITE_BNDRY_128);
8893 break;
8894 }
8895 /* fallthrough */
8896 case 256:
8897 val |= (DMA_RWCTRL_READ_BNDRY_256 |
8898 DMA_RWCTRL_WRITE_BNDRY_256);
8899 break;
8900 case 512:
8901 val |= (DMA_RWCTRL_READ_BNDRY_512 |
8902 DMA_RWCTRL_WRITE_BNDRY_512);
8903 break;
8904 case 1024:
8905 default:
8906 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
8907 DMA_RWCTRL_WRITE_BNDRY_1024);
8908 break;
8909 };
8910 }
8911
8912out:
8913 return val;
8914}
8915
8674static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 8916static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8675{ 8917{
8676 struct tg3_internal_buffer_desc test_desc; 8918 struct tg3_internal_buffer_desc test_desc;
@@ -8757,7 +8999,7 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
8757static int __devinit tg3_test_dma(struct tg3 *tp) 8999static int __devinit tg3_test_dma(struct tg3 *tp)
8758{ 9000{
8759 dma_addr_t buf_dma; 9001 dma_addr_t buf_dma;
8760 u32 *buf; 9002 u32 *buf, saved_dma_rwctrl;
8761 int ret; 9003 int ret;
8762 9004
8763 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 9005 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
@@ -8769,46 +9011,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8769 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) | 9011 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8770 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT)); 9012 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8771 9013
8772#ifndef CONFIG_X86 9014 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
8773 {
8774 u8 byte;
8775 int cacheline_size;
8776 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8777
8778 if (byte == 0)
8779 cacheline_size = 1024;
8780 else
8781 cacheline_size = (int) byte * 4;
8782
8783 switch (cacheline_size) {
8784 case 16:
8785 case 32:
8786 case 64:
8787 case 128:
8788 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8789 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8790 tp->dma_rwctrl |=
8791 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8792 break;
8793 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8794 tp->dma_rwctrl &=
8795 ~(DMA_RWCTRL_PCI_WRITE_CMD);
8796 tp->dma_rwctrl |=
8797 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8798 break;
8799 }
8800 /* fallthrough */
8801 case 256:
8802 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8803 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8804 tp->dma_rwctrl |=
8805 DMA_RWCTRL_WRITE_BNDRY_256;
8806 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8807 tp->dma_rwctrl |=
8808 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8809 };
8810 }
8811#endif
8812 9015
8813 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 9016 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8814 /* DMA read watermark not used on PCIE */ 9017 /* DMA read watermark not used on PCIE */
@@ -8827,7 +9030,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8827 if (ccval == 0x6 || ccval == 0x7) 9030 if (ccval == 0x6 || ccval == 0x7)
8828 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA; 9031 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8829 9032
8830 /* Set bit 23 to renable PCIX hw bug fix */ 9033 /* Set bit 23 to enable PCIX hw bug fix */
8831 tp->dma_rwctrl |= 0x009f0000; 9034 tp->dma_rwctrl |= 0x009f0000;
8832 } else { 9035 } else {
8833 tp->dma_rwctrl |= 0x001b000f; 9036 tp->dma_rwctrl |= 0x001b000f;
@@ -8868,6 +9071,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8868 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 9071 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8869 goto out; 9072 goto out;
8870 9073
9074 /* It is best to perform DMA test with maximum write burst size
9075 * to expose the 5700/5701 write DMA bug.
9076 */
9077 saved_dma_rwctrl = tp->dma_rwctrl;
9078 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
9079 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9080
8871 while (1) { 9081 while (1) {
8872 u32 *p = buf, i; 9082 u32 *p = buf, i;
8873 9083
@@ -8906,8 +9116,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8906 if (p[i] == i) 9116 if (p[i] == i)
8907 continue; 9117 continue;
8908 9118
8909 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) == 9119 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
8910 DMA_RWCTRL_WRITE_BNDRY_DISAB) { 9120 DMA_RWCTRL_WRITE_BNDRY_16) {
9121 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
8911 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 9122 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8912 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 9123 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8913 break; 9124 break;
@@ -8924,6 +9135,14 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
8924 break; 9135 break;
8925 } 9136 }
8926 } 9137 }
9138 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
9139 DMA_RWCTRL_WRITE_BNDRY_16) {
9140 /* DMA test passed without adjusting DMA boundary,
9141 * just restore the calculated DMA boundary
9142 */
9143 tp->dma_rwctrl = saved_dma_rwctrl;
9144 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9145 }
8927 9146
8928out: 9147out:
8929 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 9148 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
@@ -9011,6 +9230,31 @@ static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
9011 return peer; 9230 return peer;
9012} 9231}
9013 9232
9233static void __devinit tg3_init_coal(struct tg3 *tp)
9234{
9235 struct ethtool_coalesce *ec = &tp->coal;
9236
9237 memset(ec, 0, sizeof(*ec));
9238 ec->cmd = ETHTOOL_GCOALESCE;
9239 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
9240 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
9241 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
9242 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
9243 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
9244 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
9245 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
9246 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
9247 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
9248
9249 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
9250 HOSTCC_MODE_CLRTICK_TXBD)) {
9251 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
9252 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
9253 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
9254 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
9255 }
9256}
9257
9014static int __devinit tg3_init_one(struct pci_dev *pdev, 9258static int __devinit tg3_init_one(struct pci_dev *pdev,
9015 const struct pci_device_id *ent) 9259 const struct pci_device_id *ent)
9016{ 9260{
@@ -9256,6 +9500,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9256 /* flow control autonegotiation is default behavior */ 9500 /* flow control autonegotiation is default behavior */
9257 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 9501 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9258 9502
9503 tg3_init_coal(tp);
9504
9259 err = register_netdev(dev); 9505 err = register_netdev(dev);
9260 if (err) { 9506 if (err) {
9261 printk(KERN_ERR PFX "Cannot register net device, " 9507 printk(KERN_ERR PFX "Cannot register net device, "
@@ -9298,6 +9544,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9298 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0, 9544 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
9299 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0, 9545 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
9300 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 9546 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
9547 printk(KERN_INFO "%s: dma_rwctrl[%08x]\n",
9548 dev->name, tp->dma_rwctrl);
9301 9549
9302 return 0; 9550 return 0;
9303 9551
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8de6f21037ba..993f84c93dc4 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -876,10 +876,12 @@
876#define HOSTCC_STATUS_ERROR_ATTN 0x00000004 876#define HOSTCC_STATUS_ERROR_ATTN 0x00000004
877#define HOSTCC_RXCOL_TICKS 0x00003c08 877#define HOSTCC_RXCOL_TICKS 0x00003c08
878#define LOW_RXCOL_TICKS 0x00000032 878#define LOW_RXCOL_TICKS 0x00000032
879#define LOW_RXCOL_TICKS_CLRTCKS 0x00000014
879#define DEFAULT_RXCOL_TICKS 0x00000048 880#define DEFAULT_RXCOL_TICKS 0x00000048
880#define HIGH_RXCOL_TICKS 0x00000096 881#define HIGH_RXCOL_TICKS 0x00000096
881#define HOSTCC_TXCOL_TICKS 0x00003c0c 882#define HOSTCC_TXCOL_TICKS 0x00003c0c
882#define LOW_TXCOL_TICKS 0x00000096 883#define LOW_TXCOL_TICKS 0x00000096
884#define LOW_TXCOL_TICKS_CLRTCKS 0x00000048
883#define DEFAULT_TXCOL_TICKS 0x0000012c 885#define DEFAULT_TXCOL_TICKS 0x0000012c
884#define HIGH_TXCOL_TICKS 0x00000145 886#define HIGH_TXCOL_TICKS 0x00000145
885#define HOSTCC_RXMAX_FRAMES 0x00003c10 887#define HOSTCC_RXMAX_FRAMES 0x00003c10
@@ -892,8 +894,10 @@
892#define HIGH_TXMAX_FRAMES 0x00000052 894#define HIGH_TXMAX_FRAMES 0x00000052
893#define HOSTCC_RXCOAL_TICK_INT 0x00003c18 895#define HOSTCC_RXCOAL_TICK_INT 0x00003c18
894#define DEFAULT_RXCOAL_TICK_INT 0x00000019 896#define DEFAULT_RXCOAL_TICK_INT 0x00000019
897#define DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014
895#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c 898#define HOSTCC_TXCOAL_TICK_INT 0x00003c1c
896#define DEFAULT_TXCOAL_TICK_INT 0x00000019 899#define DEFAULT_TXCOAL_TICK_INT 0x00000019
900#define DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014
897#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20 901#define HOSTCC_RXCOAL_MAXF_INT 0x00003c20
898#define DEFAULT_RXCOAL_MAXF_INT 0x00000005 902#define DEFAULT_RXCOAL_MAXF_INT 0x00000005
899#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24 903#define HOSTCC_TXCOAL_MAXF_INT 0x00003c24
@@ -2023,6 +2027,7 @@ struct tg3 {
2023 2027
2024 struct tg3_hw_status *hw_status; 2028 struct tg3_hw_status *hw_status;
2025 dma_addr_t status_mapping; 2029 dma_addr_t status_mapping;
2030 u32 last_tag;
2026 2031
2027 u32 msg_enable; 2032 u32 msg_enable;
2028 2033
@@ -2068,6 +2073,7 @@ struct tg3 {
2068 2073
2069 u32 rx_offset; 2074 u32 rx_offset;
2070 u32 tg3_flags; 2075 u32 tg3_flags;
2076#define TG3_FLAG_TAGGED_STATUS 0x00000001
2071#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 2077#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
2072#define TG3_FLAG_RX_CHECKSUMS 0x00000004 2078#define TG3_FLAG_RX_CHECKSUMS 0x00000004
2073#define TG3_FLAG_USE_LINKCHG_REG 0x00000008 2079#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
@@ -2225,7 +2231,7 @@ struct tg3 {
2225 2231
2226#define SST_25VF0X0_PAGE_SIZE 4098 2232#define SST_25VF0X0_PAGE_SIZE 4098
2227 2233
2228 2234 struct ethtool_coalesce coal;
2229}; 2235};
2230 2236
2231#endif /* !(_T3_H) */ 2237#endif /* !(_T3_H) */
diff --git a/drivers/sbus/char/aurora.c b/drivers/sbus/char/aurora.c
index e5fa1703856b..650d5e924f47 100644
--- a/drivers/sbus/char/aurora.c
+++ b/drivers/sbus/char/aurora.c
@@ -81,10 +81,6 @@ unsigned char irqs[4] = {
81int irqhit=0; 81int irqhit=0;
82#endif 82#endif
83 83
84#ifndef MIN
85#define MIN(a,b) ((a) < (b) ? (a) : (b))
86#endif
87
88static struct tty_driver *aurora_driver; 84static struct tty_driver *aurora_driver;
89static struct Aurora_board aurora_board[AURORA_NBOARD] = { 85static struct Aurora_board aurora_board[AURORA_NBOARD] = {
90 {0,}, 86 {0,},
@@ -594,7 +590,7 @@ static void aurora_transmit(struct Aurora_board const * bp, int chip)
594 &bp->r[chip]->r[CD180_TDR]); 590 &bp->r[chip]->r[CD180_TDR]);
595 port->COR2 &= ~COR2_ETC; 591 port->COR2 &= ~COR2_ETC;
596 } 592 }
597 count = MIN(port->break_length, 0xff); 593 count = min(port->break_length, 0xff);
598 sbus_writeb(CD180_C_ESC, 594 sbus_writeb(CD180_C_ESC,
599 &bp->r[chip]->r[CD180_TDR]); 595 &bp->r[chip]->r[CD180_TDR]);
600 sbus_writeb(CD180_C_DELAY, 596 sbus_writeb(CD180_C_DELAY,
@@ -1575,7 +1571,7 @@ static int aurora_write(struct tty_struct * tty,
1575 save_flags(flags); 1571 save_flags(flags);
1576 while (1) { 1572 while (1) {
1577 cli(); 1573 cli();
1578 c = MIN(count, MIN(SERIAL_XMIT_SIZE - port->xmit_cnt - 1, 1574 c = min(count, min(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
1579 SERIAL_XMIT_SIZE - port->xmit_head)); 1575 SERIAL_XMIT_SIZE - port->xmit_head));
1580 if (c <= 0) { 1576 if (c <= 0) {
1581 restore_flags(flags); 1577 restore_flags(flags);
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index c2b47f2bdffd..682ca0b32b44 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -41,7 +41,6 @@
41 41
42#include "aic7xxx_osm.h" 42#include "aic7xxx_osm.h"
43 43
44#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
45#include <linux/device.h> 44#include <linux/device.h>
46#include <linux/eisa.h> 45#include <linux/eisa.h>
47 46
@@ -62,13 +61,6 @@ static struct eisa_driver aic7770_driver = {
62}; 61};
63 62
64typedef struct device *aic7770_dev_t; 63typedef struct device *aic7770_dev_t;
65#else
66#define MINSLOT 1
67#define NUMSLOTS 16
68#define IDOFFSET 0x80
69
70typedef void *aic7770_dev_t;
71#endif
72 64
73static int aic7770_linux_config(struct aic7770_identity *entry, 65static int aic7770_linux_config(struct aic7770_identity *entry,
74 aic7770_dev_t dev, u_int eisaBase); 66 aic7770_dev_t dev, u_int eisaBase);
@@ -76,7 +68,6 @@ static int aic7770_linux_config(struct aic7770_identity *entry,
76int 68int
77ahc_linux_eisa_init(void) 69ahc_linux_eisa_init(void)
78{ 70{
79#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
80 struct eisa_device_id *eid; 71 struct eisa_device_id *eid;
81 struct aic7770_identity *id; 72 struct aic7770_identity *id;
82 int i; 73 int i;
@@ -110,44 +101,6 @@ ahc_linux_eisa_init(void)
110 eid->sig[0] = 0; 101 eid->sig[0] = 0;
111 102
112 return eisa_driver_register(&aic7770_driver); 103 return eisa_driver_register(&aic7770_driver);
113#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) */
114 struct aic7770_identity *entry;
115 u_int slot;
116 u_int eisaBase;
117 u_int i;
118 int ret = -ENODEV;
119
120 if (aic7xxx_probe_eisa_vl == 0)
121 return ret;
122
123 eisaBase = 0x1000 + AHC_EISA_SLOT_OFFSET;
124 for (slot = 1; slot < NUMSLOTS; eisaBase+=0x1000, slot++) {
125 uint32_t eisa_id;
126 size_t id_size;
127
128 if (request_region(eisaBase, AHC_EISA_IOSIZE, "aic7xxx") == 0)
129 continue;
130
131 eisa_id = 0;
132 id_size = sizeof(eisa_id);
133 for (i = 0; i < 4; i++) {
134 /* VLcards require priming*/
135 outb(0x80 + i, eisaBase + IDOFFSET);
136 eisa_id |= inb(eisaBase + IDOFFSET + i)
137 << ((id_size-i-1) * 8);
138 }
139 release_region(eisaBase, AHC_EISA_IOSIZE);
140 if (eisa_id & 0x80000000)
141 continue; /* no EISA card in slot */
142
143 entry = aic7770_find_device(eisa_id);
144 if (entry != NULL) {
145 aic7770_linux_config(entry, NULL, eisaBase);
146 ret = 0;
147 }
148 }
149 return ret;
150#endif
151} 104}
152 105
153void 106void
@@ -187,11 +140,10 @@ aic7770_linux_config(struct aic7770_identity *entry, aic7770_dev_t dev,
187 ahc_free(ahc); 140 ahc_free(ahc);
188 return (error); 141 return (error);
189 } 142 }
190#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 143
191 dev->driver_data = (void *)ahc; 144 dev->driver_data = (void *)ahc;
192 if (aic7xxx_detect_complete) 145 if (aic7xxx_detect_complete)
193 error = ahc_linux_register_host(ahc, &aic7xxx_driver_template); 146 error = ahc_linux_register_host(ahc, &aic7xxx_driver_template);
194#endif
195 return (error); 147 return (error);
196} 148}
197 149
@@ -225,7 +177,6 @@ aic7770_map_int(struct ahc_softc *ahc, u_int irq)
225 return (-error); 177 return (-error);
226} 178}
227 179
228#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
229static int 180static int
230aic7770_eisa_dev_probe(struct device *dev) 181aic7770_eisa_dev_probe(struct device *dev)
231{ 182{
@@ -261,4 +212,3 @@ aic7770_eisa_dev_remove(struct device *dev)
261 212
262 return (0); 213 return (0);
263} 214}
264#endif
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index d978e4a3e973..f90efa265ba2 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -134,11 +134,6 @@ static struct scsi_transport_template *ahc_linux_transport_template = NULL;
134#include "aiclib.c" 134#include "aiclib.c"
135 135
136#include <linux/init.h> /* __setup */ 136#include <linux/init.h> /* __setup */
137
138#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
139#include "sd.h" /* For geometry detection */
140#endif
141
142#include <linux/mm.h> /* For fetching system memory size */ 137#include <linux/mm.h> /* For fetching system memory size */
143#include <linux/blkdev.h> /* For block_size() */ 138#include <linux/blkdev.h> /* For block_size() */
144#include <linux/delay.h> /* For ssleep/msleep */ 139#include <linux/delay.h> /* For ssleep/msleep */
@@ -148,11 +143,6 @@ static struct scsi_transport_template *ahc_linux_transport_template = NULL;
148 */ 143 */
149spinlock_t ahc_list_spinlock; 144spinlock_t ahc_list_spinlock;
150 145
151#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
152/* For dynamic sglist size calculation. */
153u_int ahc_linux_nseg;
154#endif
155
156/* 146/*
157 * Set this to the delay in seconds after SCSI bus reset. 147 * Set this to the delay in seconds after SCSI bus reset.
158 * Note, we honor this only for the initial bus reset. 148 * Note, we honor this only for the initial bus reset.
@@ -436,15 +426,12 @@ static void ahc_linux_handle_scsi_status(struct ahc_softc *,
436 struct ahc_linux_device *, 426 struct ahc_linux_device *,
437 struct scb *); 427 struct scb *);
438static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, 428static void ahc_linux_queue_cmd_complete(struct ahc_softc *ahc,
439 Scsi_Cmnd *cmd); 429 struct scsi_cmnd *cmd);
440static void ahc_linux_sem_timeout(u_long arg); 430static void ahc_linux_sem_timeout(u_long arg);
441static void ahc_linux_freeze_simq(struct ahc_softc *ahc); 431static void ahc_linux_freeze_simq(struct ahc_softc *ahc);
442static void ahc_linux_release_simq(u_long arg); 432static void ahc_linux_release_simq(u_long arg);
443static void ahc_linux_dev_timed_unfreeze(u_long arg); 433static int ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag);
444static int ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag);
445static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc); 434static void ahc_linux_initialize_scsi_bus(struct ahc_softc *ahc);
446static void ahc_linux_size_nseg(void);
447static void ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc);
448static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc, 435static u_int ahc_linux_user_tagdepth(struct ahc_softc *ahc,
449 struct ahc_devinfo *devinfo); 436 struct ahc_devinfo *devinfo);
450static void ahc_linux_device_queue_depth(struct ahc_softc *ahc, 437static void ahc_linux_device_queue_depth(struct ahc_softc *ahc,
@@ -458,54 +445,27 @@ static struct ahc_linux_device* ahc_linux_alloc_device(struct ahc_softc*,
458 u_int); 445 u_int);
459static void ahc_linux_free_device(struct ahc_softc*, 446static void ahc_linux_free_device(struct ahc_softc*,
460 struct ahc_linux_device*); 447 struct ahc_linux_device*);
461static void ahc_linux_run_device_queue(struct ahc_softc*, 448static int ahc_linux_run_command(struct ahc_softc*,
462 struct ahc_linux_device*); 449 struct ahc_linux_device *,
450 struct scsi_cmnd *);
463static void ahc_linux_setup_tag_info_global(char *p); 451static void ahc_linux_setup_tag_info_global(char *p);
464static aic_option_callback_t ahc_linux_setup_tag_info; 452static aic_option_callback_t ahc_linux_setup_tag_info;
465static int aic7xxx_setup(char *s); 453static int aic7xxx_setup(char *s);
466static int ahc_linux_next_unit(void); 454static int ahc_linux_next_unit(void);
467static void ahc_runq_tasklet(unsigned long data);
468static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc);
469 455
470/********************************* Inlines ************************************/ 456/********************************* Inlines ************************************/
471static __inline void ahc_schedule_runq(struct ahc_softc *ahc);
472static __inline struct ahc_linux_device* 457static __inline struct ahc_linux_device*
473 ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, 458 ahc_linux_get_device(struct ahc_softc *ahc, u_int channel,
474 u_int target, u_int lun, int alloc); 459 u_int target, u_int lun);
475static __inline void ahc_schedule_completeq(struct ahc_softc *ahc);
476static __inline void ahc_linux_check_device_queue(struct ahc_softc *ahc,
477 struct ahc_linux_device *dev);
478static __inline struct ahc_linux_device *
479 ahc_linux_next_device_to_run(struct ahc_softc *ahc);
480static __inline void ahc_linux_run_device_queues(struct ahc_softc *ahc);
481static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); 460static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
482 461
483static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, 462static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
484 struct ahc_dma_seg *sg, 463 struct ahc_dma_seg *sg,
485 dma_addr_t addr, bus_size_t len); 464 dma_addr_t addr, bus_size_t len);
486 465
487static __inline void
488ahc_schedule_completeq(struct ahc_softc *ahc)
489{
490 if ((ahc->platform_data->flags & AHC_RUN_CMPLT_Q_TIMER) == 0) {
491 ahc->platform_data->flags |= AHC_RUN_CMPLT_Q_TIMER;
492 ahc->platform_data->completeq_timer.expires = jiffies;
493 add_timer(&ahc->platform_data->completeq_timer);
494 }
495}
496
497/*
498 * Must be called with our lock held.
499 */
500static __inline void
501ahc_schedule_runq(struct ahc_softc *ahc)
502{
503 tasklet_schedule(&ahc->platform_data->runq_tasklet);
504}
505
506static __inline struct ahc_linux_device* 466static __inline struct ahc_linux_device*
507ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target, 467ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target,
508 u_int lun, int alloc) 468 u_int lun)
509{ 469{
510 struct ahc_linux_target *targ; 470 struct ahc_linux_target *targ;
511 struct ahc_linux_device *dev; 471 struct ahc_linux_device *dev;
@@ -515,102 +475,15 @@ ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target,
515 if (channel != 0) 475 if (channel != 0)
516 target_offset += 8; 476 target_offset += 8;
517 targ = ahc->platform_data->targets[target_offset]; 477 targ = ahc->platform_data->targets[target_offset];
518 if (targ == NULL) { 478 BUG_ON(targ == NULL);
519 if (alloc != 0) {
520 targ = ahc_linux_alloc_target(ahc, channel, target);
521 if (targ == NULL)
522 return (NULL);
523 } else
524 return (NULL);
525 }
526 dev = targ->devices[lun]; 479 dev = targ->devices[lun];
527 if (dev == NULL && alloc != 0) 480 return dev;
528 dev = ahc_linux_alloc_device(ahc, targ, lun);
529 return (dev);
530}
531
532#define AHC_LINUX_MAX_RETURNED_ERRORS 4
533static struct ahc_cmd *
534ahc_linux_run_complete_queue(struct ahc_softc *ahc)
535{
536 struct ahc_cmd *acmd;
537 u_long done_flags;
538 int with_errors;
539
540 with_errors = 0;
541 ahc_done_lock(ahc, &done_flags);
542 while ((acmd = TAILQ_FIRST(&ahc->platform_data->completeq)) != NULL) {
543 Scsi_Cmnd *cmd;
544
545 if (with_errors > AHC_LINUX_MAX_RETURNED_ERRORS) {
546 /*
547 * Linux uses stack recursion to requeue
548 * commands that need to be retried. Avoid
549 * blowing out the stack by "spoon feeding"
550 * commands that completed with error back
551 * the operating system in case they are going
552 * to be retried. "ick"
553 */
554 ahc_schedule_completeq(ahc);
555 break;
556 }
557 TAILQ_REMOVE(&ahc->platform_data->completeq,
558 acmd, acmd_links.tqe);
559 cmd = &acmd_scsi_cmd(acmd);
560 cmd->host_scribble = NULL;
561 if (ahc_cmd_get_transaction_status(cmd) != DID_OK
562 || (cmd->result & 0xFF) != SCSI_STATUS_OK)
563 with_errors++;
564
565 cmd->scsi_done(cmd);
566 }
567 ahc_done_unlock(ahc, &done_flags);
568 return (acmd);
569}
570
571static __inline void
572ahc_linux_check_device_queue(struct ahc_softc *ahc,
573 struct ahc_linux_device *dev)
574{
575 if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) != 0
576 && dev->active == 0) {
577 dev->flags &= ~AHC_DEV_FREEZE_TIL_EMPTY;
578 dev->qfrozen--;
579 }
580
581 if (TAILQ_FIRST(&dev->busyq) == NULL
582 || dev->openings == 0 || dev->qfrozen != 0)
583 return;
584
585 ahc_linux_run_device_queue(ahc, dev);
586}
587
588static __inline struct ahc_linux_device *
589ahc_linux_next_device_to_run(struct ahc_softc *ahc)
590{
591
592 if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0
593 || (ahc->platform_data->qfrozen != 0))
594 return (NULL);
595 return (TAILQ_FIRST(&ahc->platform_data->device_runq));
596}
597
598static __inline void
599ahc_linux_run_device_queues(struct ahc_softc *ahc)
600{
601 struct ahc_linux_device *dev;
602
603 while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
604 TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
605 dev->flags &= ~AHC_DEV_ON_RUN_LIST;
606 ahc_linux_check_device_queue(ahc, dev);
607 }
608} 481}
609 482
610static __inline void 483static __inline void
611ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb) 484ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
612{ 485{
613 Scsi_Cmnd *cmd; 486 struct scsi_cmnd *cmd;
614 487
615 cmd = scb->io_ctx; 488 cmd = scb->io_ctx;
616 ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE); 489 ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
@@ -650,109 +523,15 @@ ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
650 return (consumed); 523 return (consumed);
651} 524}
652 525
653/************************ Host template entry points *************************/
654static int ahc_linux_detect(Scsi_Host_Template *);
655static int ahc_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
656static const char *ahc_linux_info(struct Scsi_Host *);
657#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
658static int ahc_linux_slave_alloc(Scsi_Device *);
659static int ahc_linux_slave_configure(Scsi_Device *);
660static void ahc_linux_slave_destroy(Scsi_Device *);
661#if defined(__i386__)
662static int ahc_linux_biosparam(struct scsi_device*,
663 struct block_device*,
664 sector_t, int[]);
665#endif
666#else
667static int ahc_linux_release(struct Scsi_Host *);
668static void ahc_linux_select_queue_depth(struct Scsi_Host *host,
669 Scsi_Device *scsi_devs);
670#if defined(__i386__)
671static int ahc_linux_biosparam(Disk *, kdev_t, int[]);
672#endif
673#endif
674static int ahc_linux_bus_reset(Scsi_Cmnd *);
675static int ahc_linux_dev_reset(Scsi_Cmnd *);
676static int ahc_linux_abort(Scsi_Cmnd *);
677
678/*
679 * Calculate a safe value for AHC_NSEG (as expressed through ahc_linux_nseg).
680 *
681 * In pre-2.5.X...
682 * The midlayer allocates an S/G array dynamically when a command is issued
683 * using SCSI malloc. This array, which is in an OS dependent format that
684 * must later be copied to our private S/G list, is sized to house just the
685 * number of segments needed for the current transfer. Since the code that
686 * sizes the SCSI malloc pool does not take into consideration fragmentation
687 * of the pool, executing transactions numbering just a fraction of our
688 * concurrent transaction limit with list lengths aproaching AHC_NSEG will
689 * quickly depleat the SCSI malloc pool of usable space. Unfortunately, the
690 * mid-layer does not properly handle this scsi malloc failures for the S/G
691 * array and the result can be a lockup of the I/O subsystem. We try to size
692 * our S/G list so that it satisfies our drivers allocation requirements in
693 * addition to avoiding fragmentation of the SCSI malloc pool.
694 */
695static void
696ahc_linux_size_nseg(void)
697{
698#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
699 u_int cur_size;
700 u_int best_size;
701
702 /*
703 * The SCSI allocator rounds to the nearest 512 bytes
704 * an cannot allocate across a page boundary. Our algorithm
705 * is to start at 1K of scsi malloc space per-command and
706 * loop through all factors of the PAGE_SIZE and pick the best.
707 */
708 best_size = 0;
709 for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) {
710 u_int nseg;
711
712 nseg = cur_size / sizeof(struct scatterlist);
713 if (nseg < AHC_LINUX_MIN_NSEG)
714 continue;
715
716 if (best_size == 0) {
717 best_size = cur_size;
718 ahc_linux_nseg = nseg;
719 } else {
720 u_int best_rem;
721 u_int cur_rem;
722
723 /*
724 * Compare the traits of the current "best_size"
725 * with the current size to determine if the
726 * current size is a better size.
727 */
728 best_rem = best_size % sizeof(struct scatterlist);
729 cur_rem = cur_size % sizeof(struct scatterlist);
730 if (cur_rem < best_rem) {
731 best_size = cur_size;
732 ahc_linux_nseg = nseg;
733 }
734 }
735 }
736#endif
737}
738
739/* 526/*
740 * Try to detect an Adaptec 7XXX controller. 527 * Try to detect an Adaptec 7XXX controller.
741 */ 528 */
742static int 529static int
743ahc_linux_detect(Scsi_Host_Template *template) 530ahc_linux_detect(struct scsi_host_template *template)
744{ 531{
745 struct ahc_softc *ahc; 532 struct ahc_softc *ahc;
746 int found = 0; 533 int found = 0;
747 534
748#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
749 /*
750 * It is a bug that the upper layer takes
751 * this lock just prior to calling us.
752 */
753 spin_unlock_irq(&io_request_lock);
754#endif
755
756 /* 535 /*
757 * Sanity checking of Linux SCSI data structures so 536 * Sanity checking of Linux SCSI data structures so
758 * that some of our hacks^H^H^H^H^Hassumptions aren't 537 * that some of our hacks^H^H^H^H^Hassumptions aren't
@@ -764,7 +543,6 @@ ahc_linux_detect(Scsi_Host_Template *template)
764 printf("ahc_linux_detect: Unable to attach\n"); 543 printf("ahc_linux_detect: Unable to attach\n");
765 return (0); 544 return (0);
766 } 545 }
767 ahc_linux_size_nseg();
768 /* 546 /*
769 * If we've been passed any parameters, process them now. 547 * If we've been passed any parameters, process them now.
770 */ 548 */
@@ -793,48 +571,11 @@ ahc_linux_detect(Scsi_Host_Template *template)
793 found++; 571 found++;
794 } 572 }
795 573
796#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
797 spin_lock_irq(&io_request_lock);
798#endif
799 aic7xxx_detect_complete++; 574 aic7xxx_detect_complete++;
800 575
801 return (found); 576 return (found);
802} 577}
803 578
804#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
805/*
806 * Free the passed in Scsi_Host memory structures prior to unloading the
807 * module.
808 */
809int
810ahc_linux_release(struct Scsi_Host * host)
811{
812 struct ahc_softc *ahc;
813 u_long l;
814
815 ahc_list_lock(&l);
816 if (host != NULL) {
817
818 /*
819 * We should be able to just perform
820 * the free directly, but check our
821 * list for extra sanity.
822 */
823 ahc = ahc_find_softc(*(struct ahc_softc **)host->hostdata);
824 if (ahc != NULL) {
825 u_long s;
826
827 ahc_lock(ahc, &s);
828 ahc_intr_enable(ahc, FALSE);
829 ahc_unlock(ahc, &s);
830 ahc_free(ahc);
831 }
832 }
833 ahc_list_unlock(&l);
834 return (0);
835}
836#endif
837
838/* 579/*
839 * Return a string describing the driver. 580 * Return a string describing the driver.
840 */ 581 */
@@ -867,11 +608,10 @@ ahc_linux_info(struct Scsi_Host *host)
867 * Queue an SCB to the controller. 608 * Queue an SCB to the controller.
868 */ 609 */
869static int 610static int
870ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *)) 611ahc_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
871{ 612{
872 struct ahc_softc *ahc; 613 struct ahc_softc *ahc;
873 struct ahc_linux_device *dev; 614 struct ahc_linux_device *dev;
874 u_long flags;
875 615
876 ahc = *(struct ahc_softc **)cmd->device->host->hostdata; 616 ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
877 617
@@ -880,205 +620,149 @@ ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
880 */ 620 */
881 cmd->scsi_done = scsi_done; 621 cmd->scsi_done = scsi_done;
882 622
883 ahc_midlayer_entrypoint_lock(ahc, &flags);
884
885 /* 623 /*
886 * Close the race of a command that was in the process of 624 * Close the race of a command that was in the process of
887 * being queued to us just as our simq was frozen. Let 625 * being queued to us just as our simq was frozen. Let
888 * DV commands through so long as we are only frozen to 626 * DV commands through so long as we are only frozen to
889 * perform DV. 627 * perform DV.
890 */ 628 */
891 if (ahc->platform_data->qfrozen != 0) { 629 if (ahc->platform_data->qfrozen != 0)
630 return SCSI_MLQUEUE_HOST_BUSY;
892 631
893 ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
894 ahc_linux_queue_cmd_complete(ahc, cmd);
895 ahc_schedule_completeq(ahc);
896 ahc_midlayer_entrypoint_unlock(ahc, &flags);
897 return (0);
898 }
899 dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id, 632 dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
900 cmd->device->lun, /*alloc*/TRUE); 633 cmd->device->lun);
901 if (dev == NULL) { 634 BUG_ON(dev == NULL);
902 ahc_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL); 635
903 ahc_linux_queue_cmd_complete(ahc, cmd);
904 ahc_schedule_completeq(ahc);
905 ahc_midlayer_entrypoint_unlock(ahc, &flags);
906 printf("%s: aic7xxx_linux_queue - Unable to allocate device!\n",
907 ahc_name(ahc));
908 return (0);
909 }
910 cmd->result = CAM_REQ_INPROG << 16; 636 cmd->result = CAM_REQ_INPROG << 16;
911 TAILQ_INSERT_TAIL(&dev->busyq, (struct ahc_cmd *)cmd, acmd_links.tqe); 637
912 if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) { 638 return ahc_linux_run_command(ahc, dev, cmd);
913 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
914 dev->flags |= AHC_DEV_ON_RUN_LIST;
915 ahc_linux_run_device_queues(ahc);
916 }
917 ahc_midlayer_entrypoint_unlock(ahc, &flags);
918 return (0);
919} 639}
920 640
921#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
922static int 641static int
923ahc_linux_slave_alloc(Scsi_Device *device) 642ahc_linux_slave_alloc(struct scsi_device *device)
924{ 643{
925 struct ahc_softc *ahc; 644 struct ahc_softc *ahc;
645 struct ahc_linux_target *targ;
646 struct scsi_target *starget = device->sdev_target;
647 struct ahc_linux_device *dev;
648 unsigned int target_offset;
649 unsigned long flags;
650 int retval = -ENOMEM;
651
652 target_offset = starget->id;
653 if (starget->channel != 0)
654 target_offset += 8;
926 655
927 ahc = *((struct ahc_softc **)device->host->hostdata); 656 ahc = *((struct ahc_softc **)device->host->hostdata);
928 if (bootverbose) 657 if (bootverbose)
929 printf("%s: Slave Alloc %d\n", ahc_name(ahc), device->id); 658 printf("%s: Slave Alloc %d\n", ahc_name(ahc), device->id);
930 return (0); 659 ahc_lock(ahc, &flags);
660 targ = ahc->platform_data->targets[target_offset];
661 if (targ == NULL) {
662 targ = ahc_linux_alloc_target(ahc, starget->channel, starget->id);
663 struct seeprom_config *sc = ahc->seep_config;
664 if (targ == NULL)
665 goto out;
666
667 if (sc) {
668 unsigned short scsirate;
669 struct ahc_devinfo devinfo;
670 struct ahc_initiator_tinfo *tinfo;
671 struct ahc_tmode_tstate *tstate;
672 char channel = starget->channel + 'A';
673 unsigned int our_id = ahc->our_id;
674
675 if (starget->channel)
676 our_id = ahc->our_id_b;
677
678 if ((ahc->features & AHC_ULTRA2) != 0) {
679 scsirate = sc->device_flags[target_offset] & CFXFER;
680 } else {
681 scsirate = (sc->device_flags[target_offset] & CFXFER) << 4;
682 if (sc->device_flags[target_offset] & CFSYNCH)
683 scsirate |= SOFS;
684 }
685 if (sc->device_flags[target_offset] & CFWIDEB) {
686 scsirate |= WIDEXFER;
687 spi_max_width(starget) = 1;
688 } else
689 spi_max_width(starget) = 0;
690 spi_min_period(starget) =
691 ahc_find_period(ahc, scsirate, AHC_SYNCRATE_DT);
692 tinfo = ahc_fetch_transinfo(ahc, channel, ahc->our_id,
693 targ->target, &tstate);
694 ahc_compile_devinfo(&devinfo, our_id, targ->target,
695 CAM_LUN_WILDCARD, channel,
696 ROLE_INITIATOR);
697 ahc_set_syncrate(ahc, &devinfo, NULL, 0, 0, 0,
698 AHC_TRANS_GOAL, /*paused*/FALSE);
699 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
700 AHC_TRANS_GOAL, /*paused*/FALSE);
701 }
702
703 }
704 dev = targ->devices[device->lun];
705 if (dev == NULL) {
706 dev = ahc_linux_alloc_device(ahc, targ, device->lun);
707 if (dev == NULL)
708 goto out;
709 }
710 retval = 0;
711
712 out:
713 ahc_unlock(ahc, &flags);
714 return retval;
931} 715}
932 716
933static int 717static int
934ahc_linux_slave_configure(Scsi_Device *device) 718ahc_linux_slave_configure(struct scsi_device *device)
935{ 719{
936 struct ahc_softc *ahc; 720 struct ahc_softc *ahc;
937 struct ahc_linux_device *dev; 721 struct ahc_linux_device *dev;
938 u_long flags;
939 722
940 ahc = *((struct ahc_softc **)device->host->hostdata); 723 ahc = *((struct ahc_softc **)device->host->hostdata);
724
941 if (bootverbose) 725 if (bootverbose)
942 printf("%s: Slave Configure %d\n", ahc_name(ahc), device->id); 726 printf("%s: Slave Configure %d\n", ahc_name(ahc), device->id);
943 ahc_midlayer_entrypoint_lock(ahc, &flags); 727
944 /* 728 dev = ahc_linux_get_device(ahc, device->channel, device->id,
945 * Since Linux has attached to the device, configure 729 device->lun);
946 * it so we don't free and allocate the device 730 dev->scsi_device = device;
947 * structure on every command. 731 ahc_linux_device_queue_depth(ahc, dev);
948 */
949 dev = ahc_linux_get_device(ahc, device->channel,
950 device->id, device->lun,
951 /*alloc*/TRUE);
952 if (dev != NULL) {
953 dev->flags &= ~AHC_DEV_UNCONFIGURED;
954 dev->scsi_device = device;
955 ahc_linux_device_queue_depth(ahc, dev);
956 }
957 ahc_midlayer_entrypoint_unlock(ahc, &flags);
958 732
959 /* Initial Domain Validation */ 733 /* Initial Domain Validation */
960 if (!spi_initial_dv(device->sdev_target)) 734 if (!spi_initial_dv(device->sdev_target))
961 spi_dv_device(device); 735 spi_dv_device(device);
962 736
963 return (0); 737 return 0;
964} 738}
965 739
966static void 740static void
967ahc_linux_slave_destroy(Scsi_Device *device) 741ahc_linux_slave_destroy(struct scsi_device *device)
968{ 742{
969 struct ahc_softc *ahc; 743 struct ahc_softc *ahc;
970 struct ahc_linux_device *dev; 744 struct ahc_linux_device *dev;
971 u_long flags;
972 745
973 ahc = *((struct ahc_softc **)device->host->hostdata); 746 ahc = *((struct ahc_softc **)device->host->hostdata);
974 if (bootverbose) 747 if (bootverbose)
975 printf("%s: Slave Destroy %d\n", ahc_name(ahc), device->id); 748 printf("%s: Slave Destroy %d\n", ahc_name(ahc), device->id);
976 ahc_midlayer_entrypoint_lock(ahc, &flags);
977 dev = ahc_linux_get_device(ahc, device->channel, 749 dev = ahc_linux_get_device(ahc, device->channel,
978 device->id, device->lun, 750 device->id, device->lun);
979 /*alloc*/FALSE);
980 /*
981 * Filter out "silly" deletions of real devices by only
982 * deleting devices that have had slave_configure()
983 * called on them. All other devices that have not
984 * been configured will automatically be deleted by
985 * the refcounting process.
986 */
987 if (dev != NULL
988 && (dev->flags & AHC_DEV_SLAVE_CONFIGURED) != 0) {
989 dev->flags |= AHC_DEV_UNCONFIGURED;
990 if (TAILQ_EMPTY(&dev->busyq)
991 && dev->active == 0
992 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
993 ahc_linux_free_device(ahc, dev);
994 }
995 ahc_midlayer_entrypoint_unlock(ahc, &flags);
996}
997#else
998/*
999 * Sets the queue depth for each SCSI device hanging
1000 * off the input host adapter.
1001 */
1002static void
1003ahc_linux_select_queue_depth(struct Scsi_Host *host, Scsi_Device *scsi_devs)
1004{
1005 Scsi_Device *device;
1006 Scsi_Device *ldev;
1007 struct ahc_softc *ahc;
1008 u_long flags;
1009 751
1010 ahc = *((struct ahc_softc **)host->hostdata); 752 BUG_ON(dev->active);
1011 ahc_lock(ahc, &flags);
1012 for (device = scsi_devs; device != NULL; device = device->next) {
1013 753
1014 /* 754 ahc_linux_free_device(ahc, dev);
1015 * Watch out for duplicate devices. This works around
1016 * some quirks in how the SCSI scanning code does its
1017 * device management.
1018 */
1019 for (ldev = scsi_devs; ldev != device; ldev = ldev->next) {
1020 if (ldev->host == device->host
1021 && ldev->channel == device->channel
1022 && ldev->id == device->id
1023 && ldev->lun == device->lun)
1024 break;
1025 }
1026 /* Skip duplicate. */
1027 if (ldev != device)
1028 continue;
1029
1030 if (device->host == host) {
1031 struct ahc_linux_device *dev;
1032
1033 /*
1034 * Since Linux has attached to the device, configure
1035 * it so we don't free and allocate the device
1036 * structure on every command.
1037 */
1038 dev = ahc_linux_get_device(ahc, device->channel,
1039 device->id, device->lun,
1040 /*alloc*/TRUE);
1041 if (dev != NULL) {
1042 dev->flags &= ~AHC_DEV_UNCONFIGURED;
1043 dev->scsi_device = device;
1044 ahc_linux_device_queue_depth(ahc, dev);
1045 device->queue_depth = dev->openings
1046 + dev->active;
1047 if ((dev->flags & (AHC_DEV_Q_BASIC
1048 | AHC_DEV_Q_TAGGED)) == 0) {
1049 /*
1050 * We allow the OS to queue 2 untagged
1051 * transactions to us at any time even
1052 * though we can only execute them
1053 * serially on the controller/device.
1054 * This should remove some latency.
1055 */
1056 device->queue_depth = 2;
1057 }
1058 }
1059 }
1060 }
1061 ahc_unlock(ahc, &flags);
1062} 755}
1063#endif
1064 756
1065#if defined(__i386__) 757#if defined(__i386__)
1066/* 758/*
1067 * Return the disk geometry for the given SCSI device. 759 * Return the disk geometry for the given SCSI device.
1068 */ 760 */
1069static int 761static int
1070#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1071ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev, 762ahc_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1072 sector_t capacity, int geom[]) 763 sector_t capacity, int geom[])
1073{ 764{
1074 uint8_t *bh; 765 uint8_t *bh;
1075#else
1076ahc_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1077{
1078 struct scsi_device *sdev = disk->device;
1079 u_long capacity = disk->capacity;
1080 struct buffer_head *bh;
1081#endif
1082 int heads; 766 int heads;
1083 int sectors; 767 int sectors;
1084 int cylinders; 768 int cylinders;
@@ -1090,22 +774,11 @@ ahc_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1090 ahc = *((struct ahc_softc **)sdev->host->hostdata); 774 ahc = *((struct ahc_softc **)sdev->host->hostdata);
1091 channel = sdev->channel; 775 channel = sdev->channel;
1092 776
1093#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1094 bh = scsi_bios_ptable(bdev); 777 bh = scsi_bios_ptable(bdev);
1095#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17)
1096 bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev));
1097#else
1098 bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024);
1099#endif
1100
1101 if (bh) { 778 if (bh) {
1102 ret = scsi_partsize(bh, capacity, 779 ret = scsi_partsize(bh, capacity,
1103 &geom[2], &geom[0], &geom[1]); 780 &geom[2], &geom[0], &geom[1]);
1104#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1105 kfree(bh); 781 kfree(bh);
1106#else
1107 brelse(bh);
1108#endif
1109 if (ret != -1) 782 if (ret != -1)
1110 return (ret); 783 return (ret);
1111 } 784 }
@@ -1135,7 +808,7 @@ ahc_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1135 * Abort the current SCSI command(s). 808 * Abort the current SCSI command(s).
1136 */ 809 */
1137static int 810static int
1138ahc_linux_abort(Scsi_Cmnd *cmd) 811ahc_linux_abort(struct scsi_cmnd *cmd)
1139{ 812{
1140 int error; 813 int error;
1141 814
@@ -1149,7 +822,7 @@ ahc_linux_abort(Scsi_Cmnd *cmd)
1149 * Attempt to send a target reset message to the device that timed out. 822 * Attempt to send a target reset message to the device that timed out.
1150 */ 823 */
1151static int 824static int
1152ahc_linux_dev_reset(Scsi_Cmnd *cmd) 825ahc_linux_dev_reset(struct scsi_cmnd *cmd)
1153{ 826{
1154 int error; 827 int error;
1155 828
@@ -1163,18 +836,14 @@ ahc_linux_dev_reset(Scsi_Cmnd *cmd)
1163 * Reset the SCSI bus. 836 * Reset the SCSI bus.
1164 */ 837 */
1165static int 838static int
1166ahc_linux_bus_reset(Scsi_Cmnd *cmd) 839ahc_linux_bus_reset(struct scsi_cmnd *cmd)
1167{ 840{
1168 struct ahc_softc *ahc; 841 struct ahc_softc *ahc;
1169 u_long s;
1170 int found; 842 int found;
1171 843
1172 ahc = *(struct ahc_softc **)cmd->device->host->hostdata; 844 ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
1173 ahc_midlayer_entrypoint_lock(ahc, &s);
1174 found = ahc_reset_channel(ahc, cmd->device->channel + 'A', 845 found = ahc_reset_channel(ahc, cmd->device->channel + 'A',
1175 /*initiate reset*/TRUE); 846 /*initiate reset*/TRUE);
1176 ahc_linux_run_complete_queue(ahc);
1177 ahc_midlayer_entrypoint_unlock(ahc, &s);
1178 847
1179 if (bootverbose) 848 if (bootverbose)
1180 printf("%s: SCSI bus reset delivered. " 849 printf("%s: SCSI bus reset delivered. "
@@ -1183,7 +852,7 @@ ahc_linux_bus_reset(Scsi_Cmnd *cmd)
1183 return SUCCESS; 852 return SUCCESS;
1184} 853}
1185 854
1186Scsi_Host_Template aic7xxx_driver_template = { 855struct scsi_host_template aic7xxx_driver_template = {
1187 .module = THIS_MODULE, 856 .module = THIS_MODULE,
1188 .name = "aic7xxx", 857 .name = "aic7xxx",
1189 .proc_info = ahc_linux_proc_info, 858 .proc_info = ahc_linux_proc_info,
@@ -1206,33 +875,6 @@ Scsi_Host_Template aic7xxx_driver_template = {
1206 875
1207/**************************** Tasklet Handler *********************************/ 876/**************************** Tasklet Handler *********************************/
1208 877
1209/*
1210 * In 2.4.X and above, this routine is called from a tasklet,
1211 * so we must re-acquire our lock prior to executing this code.
1212 * In all prior kernels, ahc_schedule_runq() calls this routine
1213 * directly and ahc_schedule_runq() is called with our lock held.
1214 */
1215static void
1216ahc_runq_tasklet(unsigned long data)
1217{
1218 struct ahc_softc* ahc;
1219 struct ahc_linux_device *dev;
1220 u_long flags;
1221
1222 ahc = (struct ahc_softc *)data;
1223 ahc_lock(ahc, &flags);
1224 while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
1225
1226 TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
1227 dev->flags &= ~AHC_DEV_ON_RUN_LIST;
1228 ahc_linux_check_device_queue(ahc, dev);
1229 /* Yeild to our interrupt handler */
1230 ahc_unlock(ahc, &flags);
1231 ahc_lock(ahc, &flags);
1232 }
1233 ahc_unlock(ahc, &flags);
1234}
1235
1236/******************************** Macros **************************************/ 878/******************************** Macros **************************************/
1237#define BUILD_SCSIID(ahc, cmd) \ 879#define BUILD_SCSIID(ahc, cmd) \
1238 ((((cmd)->device->id << TID_SHIFT) & TID) \ 880 ((((cmd)->device->id << TID_SHIFT) & TID) \
@@ -1278,37 +920,11 @@ int
1278ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr, 920ahc_dmamem_alloc(struct ahc_softc *ahc, bus_dma_tag_t dmat, void** vaddr,
1279 int flags, bus_dmamap_t *mapp) 921 int flags, bus_dmamap_t *mapp)
1280{ 922{
1281 bus_dmamap_t map;
1282
1283 map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
1284 if (map == NULL)
1285 return (ENOMEM);
1286 /*
1287 * Although we can dma data above 4GB, our
1288 * "consistent" memory is below 4GB for
1289 * space efficiency reasons (only need a 4byte
1290 * address). For this reason, we have to reset
1291 * our dma mask when doing allocations.
1292 */
1293 if (ahc->dev_softc != NULL)
1294 if (pci_set_dma_mask(ahc->dev_softc, 0xFFFFFFFF)) {
1295 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
1296 kfree(map);
1297 return (ENODEV);
1298 }
1299 *vaddr = pci_alloc_consistent(ahc->dev_softc, 923 *vaddr = pci_alloc_consistent(ahc->dev_softc,
1300 dmat->maxsize, &map->bus_addr); 924 dmat->maxsize, mapp);
1301 if (ahc->dev_softc != NULL)
1302 if (pci_set_dma_mask(ahc->dev_softc,
1303 ahc->platform_data->hw_dma_mask)) {
1304 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
1305 kfree(map);
1306 return (ENODEV);
1307 }
1308 if (*vaddr == NULL) 925 if (*vaddr == NULL)
1309 return (ENOMEM); 926 return ENOMEM;
1310 *mapp = map; 927 return 0;
1311 return(0);
1312} 928}
1313 929
1314void 930void
@@ -1316,7 +932,7 @@ ahc_dmamem_free(struct ahc_softc *ahc, bus_dma_tag_t dmat,
1316 void* vaddr, bus_dmamap_t map) 932 void* vaddr, bus_dmamap_t map)
1317{ 933{
1318 pci_free_consistent(ahc->dev_softc, dmat->maxsize, 934 pci_free_consistent(ahc->dev_softc, dmat->maxsize,
1319 vaddr, map->bus_addr); 935 vaddr, map);
1320} 936}
1321 937
1322int 938int
@@ -1330,7 +946,7 @@ ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map,
1330 */ 946 */
1331 bus_dma_segment_t stack_sg; 947 bus_dma_segment_t stack_sg;
1332 948
1333 stack_sg.ds_addr = map->bus_addr; 949 stack_sg.ds_addr = map;
1334 stack_sg.ds_len = dmat->maxsize; 950 stack_sg.ds_len = dmat->maxsize;
1335 cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0); 951 cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
1336 return (0); 952 return (0);
@@ -1339,12 +955,6 @@ ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map,
1339void 955void
1340ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) 956ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map)
1341{ 957{
1342 /*
1343 * The map may is NULL in our < 2.3.X implementation.
1344 * Now it's 2.6.5, but just in case...
1345 */
1346 BUG_ON(map == NULL);
1347 free(map, M_DEVBUF);
1348} 958}
1349 959
1350int 960int
@@ -1550,7 +1160,7 @@ __setup("aic7xxx=", aic7xxx_setup);
1550uint32_t aic7xxx_verbose; 1160uint32_t aic7xxx_verbose;
1551 1161
1552int 1162int
1553ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template) 1163ahc_linux_register_host(struct ahc_softc *ahc, struct scsi_host_template *template)
1554{ 1164{
1555 char buf[80]; 1165 char buf[80];
1556 struct Scsi_Host *host; 1166 struct Scsi_Host *host;
@@ -1564,11 +1174,7 @@ ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
1564 1174
1565 *((struct ahc_softc **)host->hostdata) = ahc; 1175 *((struct ahc_softc **)host->hostdata) = ahc;
1566 ahc_lock(ahc, &s); 1176 ahc_lock(ahc, &s);
1567#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1568 scsi_assign_lock(host, &ahc->platform_data->spin_lock); 1177 scsi_assign_lock(host, &ahc->platform_data->spin_lock);
1569#elif AHC_SCSI_HAS_HOST_LOCK != 0
1570 host->lock = &ahc->platform_data->spin_lock;
1571#endif
1572 ahc->platform_data->host = host; 1178 ahc->platform_data->host = host;
1573 host->can_queue = AHC_MAX_QUEUE; 1179 host->can_queue = AHC_MAX_QUEUE;
1574 host->cmd_per_lun = 2; 1180 host->cmd_per_lun = 2;
@@ -1587,19 +1193,14 @@ ahc_linux_register_host(struct ahc_softc *ahc, Scsi_Host_Template *template)
1587 ahc_set_name(ahc, new_name); 1193 ahc_set_name(ahc, new_name);
1588 } 1194 }
1589 host->unique_id = ahc->unit; 1195 host->unique_id = ahc->unit;
1590#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
1591 scsi_set_pci_device(host, ahc->dev_softc);
1592#endif
1593 ahc_linux_initialize_scsi_bus(ahc); 1196 ahc_linux_initialize_scsi_bus(ahc);
1594 ahc_intr_enable(ahc, TRUE); 1197 ahc_intr_enable(ahc, TRUE);
1595 ahc_unlock(ahc, &s); 1198 ahc_unlock(ahc, &s);
1596 1199
1597 host->transportt = ahc_linux_transport_template; 1200 host->transportt = ahc_linux_transport_template;
1598 1201
1599#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1600 scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */ 1202 scsi_add_host(host, (ahc->dev_softc ? &ahc->dev_softc->dev : NULL)); /* XXX handle failure */
1601 scsi_scan_host(host); 1203 scsi_scan_host(host);
1602#endif
1603 return (0); 1204 return (0);
1604} 1205}
1605 1206
@@ -1717,19 +1318,9 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1717 if (ahc->platform_data == NULL) 1318 if (ahc->platform_data == NULL)
1718 return (ENOMEM); 1319 return (ENOMEM);
1719 memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data)); 1320 memset(ahc->platform_data, 0, sizeof(struct ahc_platform_data));
1720 TAILQ_INIT(&ahc->platform_data->completeq);
1721 TAILQ_INIT(&ahc->platform_data->device_runq);
1722 ahc->platform_data->irq = AHC_LINUX_NOIRQ; 1321 ahc->platform_data->irq = AHC_LINUX_NOIRQ;
1723 ahc->platform_data->hw_dma_mask = 0xFFFFFFFF;
1724 ahc_lockinit(ahc); 1322 ahc_lockinit(ahc);
1725 ahc_done_lockinit(ahc);
1726 init_timer(&ahc->platform_data->completeq_timer);
1727 ahc->platform_data->completeq_timer.data = (u_long)ahc;
1728 ahc->platform_data->completeq_timer.function =
1729 (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue;
1730 init_MUTEX_LOCKED(&ahc->platform_data->eh_sem); 1323 init_MUTEX_LOCKED(&ahc->platform_data->eh_sem);
1731 tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet,
1732 (unsigned long)ahc);
1733 ahc->seltime = (aic7xxx_seltime & 0x3) << 4; 1324 ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
1734 ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4; 1325 ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4;
1735 if (aic7xxx_pci_parity == 0) 1326 if (aic7xxx_pci_parity == 0)
@@ -1746,12 +1337,8 @@ ahc_platform_free(struct ahc_softc *ahc)
1746 int i, j; 1337 int i, j;
1747 1338
1748 if (ahc->platform_data != NULL) { 1339 if (ahc->platform_data != NULL) {
1749 del_timer_sync(&ahc->platform_data->completeq_timer);
1750 tasklet_kill(&ahc->platform_data->runq_tasklet);
1751 if (ahc->platform_data->host != NULL) { 1340 if (ahc->platform_data->host != NULL) {
1752#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1753 scsi_remove_host(ahc->platform_data->host); 1341 scsi_remove_host(ahc->platform_data->host);
1754#endif
1755 scsi_host_put(ahc->platform_data->host); 1342 scsi_host_put(ahc->platform_data->host);
1756 } 1343 }
1757 1344
@@ -1787,16 +1374,7 @@ ahc_platform_free(struct ahc_softc *ahc)
1787 release_mem_region(ahc->platform_data->mem_busaddr, 1374 release_mem_region(ahc->platform_data->mem_busaddr,
1788 0x1000); 1375 0x1000);
1789 } 1376 }
1790#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) 1377
1791 /*
1792 * In 2.4 we detach from the scsi midlayer before the PCI
1793 * layer invokes our remove callback. No per-instance
1794 * detach is provided, so we must reach inside the PCI
1795 * subsystem's internals and detach our driver manually.
1796 */
1797 if (ahc->dev_softc != NULL)
1798 ahc->dev_softc->driver = NULL;
1799#endif
1800 free(ahc->platform_data, M_DEVBUF); 1378 free(ahc->platform_data, M_DEVBUF);
1801 } 1379 }
1802} 1380}
@@ -1820,7 +1398,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1820 1398
1821 dev = ahc_linux_get_device(ahc, devinfo->channel - 'A', 1399 dev = ahc_linux_get_device(ahc, devinfo->channel - 'A',
1822 devinfo->target, 1400 devinfo->target,
1823 devinfo->lun, /*alloc*/FALSE); 1401 devinfo->lun);
1824 if (dev == NULL) 1402 if (dev == NULL)
1825 return; 1403 return;
1826 was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED); 1404 was_queuing = dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED);
@@ -1873,7 +1451,6 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1873 dev->maxtags = 0; 1451 dev->maxtags = 0;
1874 dev->openings = 1 - dev->active; 1452 dev->openings = 1 - dev->active;
1875 } 1453 }
1876#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1877 if (dev->scsi_device != NULL) { 1454 if (dev->scsi_device != NULL) {
1878 switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) { 1455 switch ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED))) {
1879 case AHC_DEV_Q_BASIC: 1456 case AHC_DEV_Q_BASIC:
@@ -1899,90 +1476,13 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
1899 break; 1476 break;
1900 } 1477 }
1901 } 1478 }
1902#endif
1903} 1479}
1904 1480
1905int 1481int
1906ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, 1482ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel,
1907 int lun, u_int tag, role_t role, uint32_t status) 1483 int lun, u_int tag, role_t role, uint32_t status)
1908{ 1484{
1909 int chan; 1485 return 0;
1910 int maxchan;
1911 int targ;
1912 int maxtarg;
1913 int clun;
1914 int maxlun;
1915 int count;
1916
1917 if (tag != SCB_LIST_NULL)
1918 return (0);
1919
1920 chan = 0;
1921 if (channel != ALL_CHANNELS) {
1922 chan = channel - 'A';
1923 maxchan = chan + 1;
1924 } else {
1925 maxchan = (ahc->features & AHC_TWIN) ? 2 : 1;
1926 }
1927 targ = 0;
1928 if (target != CAM_TARGET_WILDCARD) {
1929 targ = target;
1930 maxtarg = targ + 1;
1931 } else {
1932 maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
1933 }
1934 clun = 0;
1935 if (lun != CAM_LUN_WILDCARD) {
1936 clun = lun;
1937 maxlun = clun + 1;
1938 } else {
1939 maxlun = AHC_NUM_LUNS;
1940 }
1941
1942 count = 0;
1943 for (; chan < maxchan; chan++) {
1944
1945 for (; targ < maxtarg; targ++) {
1946
1947 for (; clun < maxlun; clun++) {
1948 struct ahc_linux_device *dev;
1949 struct ahc_busyq *busyq;
1950 struct ahc_cmd *acmd;
1951
1952 dev = ahc_linux_get_device(ahc, chan,
1953 targ, clun,
1954 /*alloc*/FALSE);
1955 if (dev == NULL)
1956 continue;
1957
1958 busyq = &dev->busyq;
1959 while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
1960 Scsi_Cmnd *cmd;
1961
1962 cmd = &acmd_scsi_cmd(acmd);
1963 TAILQ_REMOVE(busyq, acmd,
1964 acmd_links.tqe);
1965 count++;
1966 cmd->result = status << 16;
1967 ahc_linux_queue_cmd_complete(ahc, cmd);
1968 }
1969 }
1970 }
1971 }
1972
1973 return (count);
1974}
1975
1976static void
1977ahc_linux_thread_run_complete_queue(struct ahc_softc *ahc)
1978{
1979 u_long flags;
1980
1981 ahc_lock(ahc, &flags);
1982 del_timer(&ahc->platform_data->completeq_timer);
1983 ahc->platform_data->flags &= ~AHC_RUN_CMPLT_Q_TIMER;
1984 ahc_linux_run_complete_queue(ahc);
1985 ahc_unlock(ahc, &flags);
1986} 1486}
1987 1487
1988static u_int 1488static u_int
@@ -2045,213 +1545,200 @@ ahc_linux_device_queue_depth(struct ahc_softc *ahc,
2045 } 1545 }
2046} 1546}
2047 1547
2048static void 1548static int
2049ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev) 1549ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1550 struct scsi_cmnd *cmd)
2050{ 1551{
2051 struct ahc_cmd *acmd;
2052 struct scsi_cmnd *cmd;
2053 struct scb *scb; 1552 struct scb *scb;
2054 struct hardware_scb *hscb; 1553 struct hardware_scb *hscb;
2055 struct ahc_initiator_tinfo *tinfo; 1554 struct ahc_initiator_tinfo *tinfo;
2056 struct ahc_tmode_tstate *tstate; 1555 struct ahc_tmode_tstate *tstate;
2057 uint16_t mask; 1556 uint16_t mask;
1557 struct scb_tailq *untagged_q = NULL;
2058 1558
2059 if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0) 1559 /*
2060 panic("running device on run list"); 1560 * Schedule us to run later. The only reason we are not
1561 * running is because the whole controller Q is frozen.
1562 */
1563 if (ahc->platform_data->qfrozen != 0)
1564 return SCSI_MLQUEUE_HOST_BUSY;
2061 1565
2062 while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL 1566 /*
2063 && dev->openings > 0 && dev->qfrozen == 0) { 1567 * We only allow one untagged transaction
1568 * per target in the initiator role unless
1569 * we are storing a full busy target *lun*
1570 * table in SCB space.
1571 */
1572 if (!blk_rq_tagged(cmd->request)
1573 && (ahc->features & AHC_SCB_BTT) == 0) {
1574 int target_offset;
2064 1575
2065 /* 1576 target_offset = cmd->device->id + cmd->device->channel * 8;
2066 * Schedule us to run later. The only reason we are not 1577 untagged_q = &(ahc->untagged_queues[target_offset]);
2067 * running is because the whole controller Q is frozen. 1578 if (!TAILQ_EMPTY(untagged_q))
2068 */ 1579 /* if we're already executing an untagged command
2069 if (ahc->platform_data->qfrozen != 0) { 1580 * we're busy to another */
2070 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, 1581 return SCSI_MLQUEUE_DEVICE_BUSY;
2071 dev, links); 1582 }
2072 dev->flags |= AHC_DEV_ON_RUN_LIST;
2073 return;
2074 }
2075 /*
2076 * Get an scb to use.
2077 */
2078 if ((scb = ahc_get_scb(ahc)) == NULL) {
2079 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
2080 dev, links);
2081 dev->flags |= AHC_DEV_ON_RUN_LIST;
2082 ahc->flags |= AHC_RESOURCE_SHORTAGE;
2083 return;
2084 }
2085 TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
2086 cmd = &acmd_scsi_cmd(acmd);
2087 scb->io_ctx = cmd;
2088 scb->platform_data->dev = dev;
2089 hscb = scb->hscb;
2090 cmd->host_scribble = (char *)scb;
2091 1583
2092 /* 1584 /*
2093 * Fill out basics of the HSCB. 1585 * Get an scb to use.
2094 */ 1586 */
2095 hscb->control = 0; 1587 if ((scb = ahc_get_scb(ahc)) == NULL) {
2096 hscb->scsiid = BUILD_SCSIID(ahc, cmd); 1588 ahc->flags |= AHC_RESOURCE_SHORTAGE;
2097 hscb->lun = cmd->device->lun; 1589 return SCSI_MLQUEUE_HOST_BUSY;
2098 mask = SCB_GET_TARGET_MASK(ahc, scb); 1590 }
2099 tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
2100 SCB_GET_OUR_ID(scb),
2101 SCB_GET_TARGET(ahc, scb), &tstate);
2102 hscb->scsirate = tinfo->scsirate;
2103 hscb->scsioffset = tinfo->curr.offset;
2104 if ((tstate->ultraenb & mask) != 0)
2105 hscb->control |= ULTRAENB;
2106
2107 if ((ahc->user_discenable & mask) != 0)
2108 hscb->control |= DISCENB;
2109
2110 if ((tstate->auto_negotiate & mask) != 0) {
2111 scb->flags |= SCB_AUTO_NEGOTIATE;
2112 scb->hscb->control |= MK_MESSAGE;
2113 }
2114 1591
2115 if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { 1592 scb->io_ctx = cmd;
2116#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1593 scb->platform_data->dev = dev;
2117 int msg_bytes; 1594 hscb = scb->hscb;
2118 uint8_t tag_msgs[2]; 1595 cmd->host_scribble = (char *)scb;
2119 1596
2120 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); 1597 /*
2121 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { 1598 * Fill out basics of the HSCB.
2122 hscb->control |= tag_msgs[0]; 1599 */
2123 if (tag_msgs[0] == MSG_ORDERED_TASK) 1600 hscb->control = 0;
2124 dev->commands_since_idle_or_otag = 0; 1601 hscb->scsiid = BUILD_SCSIID(ahc, cmd);
2125 } else 1602 hscb->lun = cmd->device->lun;
2126#endif 1603 mask = SCB_GET_TARGET_MASK(ahc, scb);
2127 if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH 1604 tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
2128 && (dev->flags & AHC_DEV_Q_TAGGED) != 0) { 1605 SCB_GET_OUR_ID(scb),
2129 hscb->control |= MSG_ORDERED_TASK; 1606 SCB_GET_TARGET(ahc, scb), &tstate);
1607 hscb->scsirate = tinfo->scsirate;
1608 hscb->scsioffset = tinfo->curr.offset;
1609 if ((tstate->ultraenb & mask) != 0)
1610 hscb->control |= ULTRAENB;
1611
1612 if ((ahc->user_discenable & mask) != 0)
1613 hscb->control |= DISCENB;
1614
1615 if ((tstate->auto_negotiate & mask) != 0) {
1616 scb->flags |= SCB_AUTO_NEGOTIATE;
1617 scb->hscb->control |= MK_MESSAGE;
1618 }
1619
1620 if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
1621 int msg_bytes;
1622 uint8_t tag_msgs[2];
1623
1624 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
1625 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
1626 hscb->control |= tag_msgs[0];
1627 if (tag_msgs[0] == MSG_ORDERED_TASK)
2130 dev->commands_since_idle_or_otag = 0; 1628 dev->commands_since_idle_or_otag = 0;
2131 } else { 1629 } else if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
2132 hscb->control |= MSG_SIMPLE_TASK; 1630 && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
2133 } 1631 hscb->control |= MSG_ORDERED_TASK;
2134 } 1632 dev->commands_since_idle_or_otag = 0;
2135
2136 hscb->cdb_len = cmd->cmd_len;
2137 if (hscb->cdb_len <= 12) {
2138 memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
2139 } else { 1633 } else {
2140 memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); 1634 hscb->control |= MSG_SIMPLE_TASK;
2141 scb->flags |= SCB_CDB32_PTR;
2142 } 1635 }
1636 }
2143 1637
2144 scb->platform_data->xfer_len = 0; 1638 hscb->cdb_len = cmd->cmd_len;
2145 ahc_set_residual(scb, 0); 1639 if (hscb->cdb_len <= 12) {
2146 ahc_set_sense_residual(scb, 0); 1640 memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
2147 scb->sg_count = 0; 1641 } else {
2148 if (cmd->use_sg != 0) { 1642 memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
2149 struct ahc_dma_seg *sg; 1643 scb->flags |= SCB_CDB32_PTR;
2150 struct scatterlist *cur_seg; 1644 }
2151 struct scatterlist *end_seg;
2152 int nseg;
2153
2154 cur_seg = (struct scatterlist *)cmd->request_buffer;
2155 nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
2156 cmd->sc_data_direction);
2157 end_seg = cur_seg + nseg;
2158 /* Copy the segments into the SG list. */
2159 sg = scb->sg_list;
2160 /*
2161 * The sg_count may be larger than nseg if
2162 * a transfer crosses a 32bit page.
2163 */
2164 while (cur_seg < end_seg) {
2165 dma_addr_t addr;
2166 bus_size_t len;
2167 int consumed;
2168
2169 addr = sg_dma_address(cur_seg);
2170 len = sg_dma_len(cur_seg);
2171 consumed = ahc_linux_map_seg(ahc, scb,
2172 sg, addr, len);
2173 sg += consumed;
2174 scb->sg_count += consumed;
2175 cur_seg++;
2176 }
2177 sg--;
2178 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
2179
2180 /*
2181 * Reset the sg list pointer.
2182 */
2183 scb->hscb->sgptr =
2184 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
2185 1645
2186 /* 1646 scb->platform_data->xfer_len = 0;
2187 * Copy the first SG into the "current" 1647 ahc_set_residual(scb, 0);
2188 * data pointer area. 1648 ahc_set_sense_residual(scb, 0);
2189 */ 1649 scb->sg_count = 0;
2190 scb->hscb->dataptr = scb->sg_list->addr; 1650 if (cmd->use_sg != 0) {
2191 scb->hscb->datacnt = scb->sg_list->len; 1651 struct ahc_dma_seg *sg;
2192 } else if (cmd->request_bufflen != 0) { 1652 struct scatterlist *cur_seg;
2193 struct ahc_dma_seg *sg; 1653 struct scatterlist *end_seg;
1654 int nseg;
1655
1656 cur_seg = (struct scatterlist *)cmd->request_buffer;
1657 nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
1658 cmd->sc_data_direction);
1659 end_seg = cur_seg + nseg;
1660 /* Copy the segments into the SG list. */
1661 sg = scb->sg_list;
1662 /*
1663 * The sg_count may be larger than nseg if
1664 * a transfer crosses a 32bit page.
1665 */
1666 while (cur_seg < end_seg) {
2194 dma_addr_t addr; 1667 dma_addr_t addr;
2195 1668 bus_size_t len;
2196 sg = scb->sg_list; 1669 int consumed;
2197 addr = pci_map_single(ahc->dev_softc, 1670
2198 cmd->request_buffer, 1671 addr = sg_dma_address(cur_seg);
2199 cmd->request_bufflen, 1672 len = sg_dma_len(cur_seg);
2200 cmd->sc_data_direction); 1673 consumed = ahc_linux_map_seg(ahc, scb,
2201 scb->platform_data->buf_busaddr = addr; 1674 sg, addr, len);
2202 scb->sg_count = ahc_linux_map_seg(ahc, scb, 1675 sg += consumed;
2203 sg, addr, 1676 scb->sg_count += consumed;
2204 cmd->request_bufflen); 1677 cur_seg++;
2205 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
2206
2207 /*
2208 * Reset the sg list pointer.
2209 */
2210 scb->hscb->sgptr =
2211 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
2212
2213 /*
2214 * Copy the first SG into the "current"
2215 * data pointer area.
2216 */
2217 scb->hscb->dataptr = sg->addr;
2218 scb->hscb->datacnt = sg->len;
2219 } else {
2220 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
2221 scb->hscb->dataptr = 0;
2222 scb->hscb->datacnt = 0;
2223 scb->sg_count = 0;
2224 } 1678 }
1679 sg--;
1680 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
2225 1681
2226 ahc_sync_sglist(ahc, scb, BUS_DMASYNC_PREWRITE); 1682 /*
2227 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); 1683 * Reset the sg list pointer.
2228 dev->openings--; 1684 */
2229 dev->active++; 1685 scb->hscb->sgptr =
2230 dev->commands_issued++; 1686 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
2231 if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) 1687
2232 dev->commands_since_idle_or_otag++; 1688 /*
1689 * Copy the first SG into the "current"
1690 * data pointer area.
1691 */
1692 scb->hscb->dataptr = scb->sg_list->addr;
1693 scb->hscb->datacnt = scb->sg_list->len;
1694 } else if (cmd->request_bufflen != 0) {
1695 struct ahc_dma_seg *sg;
1696 dma_addr_t addr;
1697
1698 sg = scb->sg_list;
1699 addr = pci_map_single(ahc->dev_softc,
1700 cmd->request_buffer,
1701 cmd->request_bufflen,
1702 cmd->sc_data_direction);
1703 scb->platform_data->buf_busaddr = addr;
1704 scb->sg_count = ahc_linux_map_seg(ahc, scb,
1705 sg, addr,
1706 cmd->request_bufflen);
1707 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
2233 1708
2234 /* 1709 /*
2235 * We only allow one untagged transaction 1710 * Reset the sg list pointer.
2236 * per target in the initiator role unless
2237 * we are storing a full busy target *lun*
2238 * table in SCB space.
2239 */ 1711 */
2240 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 1712 scb->hscb->sgptr =
2241 && (ahc->features & AHC_SCB_BTT) == 0) { 1713 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
2242 struct scb_tailq *untagged_q; 1714
2243 int target_offset; 1715 /*
2244 1716 * Copy the first SG into the "current"
2245 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 1717 * data pointer area.
2246 untagged_q = &(ahc->untagged_queues[target_offset]); 1718 */
2247 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); 1719 scb->hscb->dataptr = sg->addr;
2248 scb->flags |= SCB_UNTAGGEDQ; 1720 scb->hscb->datacnt = sg->len;
2249 if (TAILQ_FIRST(untagged_q) != scb) 1721 } else {
2250 continue; 1722 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
2251 } 1723 scb->hscb->dataptr = 0;
2252 scb->flags |= SCB_ACTIVE; 1724 scb->hscb->datacnt = 0;
2253 ahc_queue_scb(ahc, scb); 1725 scb->sg_count = 0;
2254 } 1726 }
1727
1728 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
1729 dev->openings--;
1730 dev->active++;
1731 dev->commands_issued++;
1732 if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
1733 dev->commands_since_idle_or_otag++;
1734
1735 scb->flags |= SCB_ACTIVE;
1736 if (untagged_q) {
1737 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
1738 scb->flags |= SCB_UNTAGGEDQ;
1739 }
1740 ahc_queue_scb(ahc, scb);
1741 return 0;
2255} 1742}
2256 1743
2257/* 1744/*
@@ -2267,9 +1754,6 @@ ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
2267 ahc = (struct ahc_softc *) dev_id; 1754 ahc = (struct ahc_softc *) dev_id;
2268 ahc_lock(ahc, &flags); 1755 ahc_lock(ahc, &flags);
2269 ours = ahc_intr(ahc); 1756 ours = ahc_intr(ahc);
2270 if (ahc_linux_next_device_to_run(ahc) != NULL)
2271 ahc_schedule_runq(ahc);
2272 ahc_linux_run_complete_queue(ahc);
2273 ahc_unlock(ahc, &flags); 1757 ahc_unlock(ahc, &flags);
2274 return IRQ_RETVAL(ours); 1758 return IRQ_RETVAL(ours);
2275} 1759}
@@ -2278,8 +1762,6 @@ void
2278ahc_platform_flushwork(struct ahc_softc *ahc) 1762ahc_platform_flushwork(struct ahc_softc *ahc)
2279{ 1763{
2280 1764
2281 while (ahc_linux_run_complete_queue(ahc) != NULL)
2282 ;
2283} 1765}
2284 1766
2285static struct ahc_linux_target* 1767static struct ahc_linux_target*
@@ -2348,9 +1830,6 @@ ahc_linux_alloc_device(struct ahc_softc *ahc,
2348 if (dev == NULL) 1830 if (dev == NULL)
2349 return (NULL); 1831 return (NULL);
2350 memset(dev, 0, sizeof(*dev)); 1832 memset(dev, 0, sizeof(*dev));
2351 init_timer(&dev->timer);
2352 TAILQ_INIT(&dev->busyq);
2353 dev->flags = AHC_DEV_UNCONFIGURED;
2354 dev->lun = lun; 1833 dev->lun = lun;
2355 dev->target = targ; 1834 dev->target = targ;
2356 1835
@@ -2373,7 +1852,7 @@ ahc_linux_alloc_device(struct ahc_softc *ahc,
2373} 1852}
2374 1853
2375static void 1854static void
2376__ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev) 1855ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
2377{ 1856{
2378 struct ahc_linux_target *targ; 1857 struct ahc_linux_target *targ;
2379 1858
@@ -2385,13 +1864,6 @@ __ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
2385 ahc_linux_free_target(ahc, targ); 1864 ahc_linux_free_target(ahc, targ);
2386} 1865}
2387 1866
2388static void
2389ahc_linux_free_device(struct ahc_softc *ahc, struct ahc_linux_device *dev)
2390{
2391 del_timer_sync(&dev->timer);
2392 __ahc_linux_free_device(ahc, dev);
2393}
2394
2395void 1867void
2396ahc_send_async(struct ahc_softc *ahc, char channel, 1868ahc_send_async(struct ahc_softc *ahc, char channel,
2397 u_int target, u_int lun, ac_code code, void *arg) 1869 u_int target, u_int lun, ac_code code, void *arg)
@@ -2463,28 +1935,9 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
2463 } 1935 }
2464 case AC_SENT_BDR: 1936 case AC_SENT_BDR:
2465 { 1937 {
2466#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2467 WARN_ON(lun != CAM_LUN_WILDCARD); 1938 WARN_ON(lun != CAM_LUN_WILDCARD);
2468 scsi_report_device_reset(ahc->platform_data->host, 1939 scsi_report_device_reset(ahc->platform_data->host,
2469 channel - 'A', target); 1940 channel - 'A', target);
2470#else
2471 Scsi_Device *scsi_dev;
2472
2473 /*
2474 * Find the SCSI device associated with this
2475 * request and indicate that a UA is expected.
2476 */
2477 for (scsi_dev = ahc->platform_data->host->host_queue;
2478 scsi_dev != NULL; scsi_dev = scsi_dev->next) {
2479 if (channel - 'A' == scsi_dev->channel
2480 && target == scsi_dev->id
2481 && (lun == CAM_LUN_WILDCARD
2482 || lun == scsi_dev->lun)) {
2483 scsi_dev->was_reset = 1;
2484 scsi_dev->expecting_cc_ua = 1;
2485 }
2486 }
2487#endif
2488 break; 1941 break;
2489 } 1942 }
2490 case AC_BUS_RESET: 1943 case AC_BUS_RESET:
@@ -2504,7 +1957,7 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
2504void 1957void
2505ahc_done(struct ahc_softc *ahc, struct scb *scb) 1958ahc_done(struct ahc_softc *ahc, struct scb *scb)
2506{ 1959{
2507 Scsi_Cmnd *cmd; 1960 struct scsi_cmnd *cmd;
2508 struct ahc_linux_device *dev; 1961 struct ahc_linux_device *dev;
2509 1962
2510 LIST_REMOVE(scb, pending_links); 1963 LIST_REMOVE(scb, pending_links);
@@ -2515,7 +1968,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
2515 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 1968 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
2516 untagged_q = &(ahc->untagged_queues[target_offset]); 1969 untagged_q = &(ahc->untagged_queues[target_offset]);
2517 TAILQ_REMOVE(untagged_q, scb, links.tqe); 1970 TAILQ_REMOVE(untagged_q, scb, links.tqe);
2518 ahc_run_untagged_queue(ahc, untagged_q); 1971 BUG_ON(!TAILQ_EMPTY(untagged_q));
2519 } 1972 }
2520 1973
2521 if ((scb->flags & SCB_ACTIVE) == 0) { 1974 if ((scb->flags & SCB_ACTIVE) == 0) {
@@ -2583,8 +2036,6 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
2583 } 2036 }
2584 } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) { 2037 } else if (ahc_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
2585 ahc_linux_handle_scsi_status(ahc, dev, scb); 2038 ahc_linux_handle_scsi_status(ahc, dev, scb);
2586 } else if (ahc_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
2587 dev->flags |= AHC_DEV_UNCONFIGURED;
2588 } 2039 }
2589 2040
2590 if (dev->openings == 1 2041 if (dev->openings == 1
@@ -2606,16 +2057,6 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
2606 if (dev->active == 0) 2057 if (dev->active == 0)
2607 dev->commands_since_idle_or_otag = 0; 2058 dev->commands_since_idle_or_otag = 0;
2608 2059
2609 if (TAILQ_EMPTY(&dev->busyq)) {
2610 if ((dev->flags & AHC_DEV_UNCONFIGURED) != 0
2611 && dev->active == 0
2612 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
2613 ahc_linux_free_device(ahc, dev);
2614 } else if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
2615 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
2616 dev->flags |= AHC_DEV_ON_RUN_LIST;
2617 }
2618
2619 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 2060 if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
2620 printf("Recovery SCB completes\n"); 2061 printf("Recovery SCB completes\n");
2621 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT 2062 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT
@@ -2659,7 +2100,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
2659 case SCSI_STATUS_CHECK_COND: 2100 case SCSI_STATUS_CHECK_COND:
2660 case SCSI_STATUS_CMD_TERMINATED: 2101 case SCSI_STATUS_CMD_TERMINATED:
2661 { 2102 {
2662 Scsi_Cmnd *cmd; 2103 struct scsi_cmnd *cmd;
2663 2104
2664 /* 2105 /*
2665 * Copy sense information to the OS's cmd 2106 * Copy sense information to the OS's cmd
@@ -2754,52 +2195,15 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
2754 ahc_platform_set_tags(ahc, &devinfo, 2195 ahc_platform_set_tags(ahc, &devinfo,
2755 (dev->flags & AHC_DEV_Q_BASIC) 2196 (dev->flags & AHC_DEV_Q_BASIC)
2756 ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED); 2197 ? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
2757 /* FALLTHROUGH */
2758 }
2759 case SCSI_STATUS_BUSY:
2760 {
2761 /*
2762 * Set a short timer to defer sending commands for
2763 * a bit since Linux will not delay in this case.
2764 */
2765 if ((dev->flags & AHC_DEV_TIMER_ACTIVE) != 0) {
2766 printf("%s:%c:%d: Device Timer still active during "
2767 "busy processing\n", ahc_name(ahc),
2768 dev->target->channel, dev->target->target);
2769 break;
2770 }
2771 dev->flags |= AHC_DEV_TIMER_ACTIVE;
2772 dev->qfrozen++;
2773 init_timer(&dev->timer);
2774 dev->timer.data = (u_long)dev;
2775 dev->timer.expires = jiffies + (HZ/2);
2776 dev->timer.function = ahc_linux_dev_timed_unfreeze;
2777 add_timer(&dev->timer);
2778 break; 2198 break;
2779 } 2199 }
2780 } 2200 }
2781} 2201}
2782 2202
2783static void 2203static void
2784ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd) 2204ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, struct scsi_cmnd *cmd)
2785{ 2205{
2786 /* 2206 /*
2787 * Typically, the complete queue has very few entries
2788 * queued to it before the queue is emptied by
2789 * ahc_linux_run_complete_queue, so sorting the entries
2790 * by generation number should be inexpensive.
2791 * We perform the sort so that commands that complete
2792 * with an error are retuned in the order origionally
2793 * queued to the controller so that any subsequent retries
2794 * are performed in order. The underlying ahc routines do
2795 * not guarantee the order that aborted commands will be
2796 * returned to us.
2797 */
2798 struct ahc_completeq *completeq;
2799 struct ahc_cmd *list_cmd;
2800 struct ahc_cmd *acmd;
2801
2802 /*
2803 * Map CAM error codes into Linux Error codes. We 2207 * Map CAM error codes into Linux Error codes. We
2804 * avoid the conversion so that the DV code has the 2208 * avoid the conversion so that the DV code has the
2805 * full error information available when making 2209 * full error information available when making
@@ -2852,26 +2256,7 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd)
2852 new_status = DID_ERROR; 2256 new_status = DID_ERROR;
2853 break; 2257 break;
2854 case CAM_REQUEUE_REQ: 2258 case CAM_REQUEUE_REQ:
2855 /* 2259 new_status = DID_REQUEUE;
2856 * If we want the request requeued, make sure there
2857 * are sufficent retries. In the old scsi error code,
2858 * we used to be able to specify a result code that
2859 * bypassed the retry count. Now we must use this
2860 * hack. We also "fake" a check condition with
2861 * a sense code of ABORTED COMMAND. This seems to
2862 * evoke a retry even if this command is being sent
2863 * via the eh thread. Ick! Ick! Ick!
2864 */
2865 if (cmd->retries > 0)
2866 cmd->retries--;
2867 new_status = DID_OK;
2868 ahc_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND);
2869 cmd->result |= (DRIVER_SENSE << 24);
2870 memset(cmd->sense_buffer, 0,
2871 sizeof(cmd->sense_buffer));
2872 cmd->sense_buffer[0] = SSD_ERRCODE_VALID
2873 | SSD_CURRENT_ERROR;
2874 cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND;
2875 break; 2260 break;
2876 default: 2261 default:
2877 /* We should never get here */ 2262 /* We should never get here */
@@ -2882,17 +2267,7 @@ ahc_linux_queue_cmd_complete(struct ahc_softc *ahc, Scsi_Cmnd *cmd)
2882 ahc_cmd_set_transaction_status(cmd, new_status); 2267 ahc_cmd_set_transaction_status(cmd, new_status);
2883 } 2268 }
2884 2269
2885 completeq = &ahc->platform_data->completeq; 2270 cmd->scsi_done(cmd);
2886 list_cmd = TAILQ_FIRST(completeq);
2887 acmd = (struct ahc_cmd *)cmd;
2888 while (list_cmd != NULL
2889 && acmd_scsi_cmd(list_cmd).serial_number
2890 < acmd_scsi_cmd(acmd).serial_number)
2891 list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe);
2892 if (list_cmd != NULL)
2893 TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe);
2894 else
2895 TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
2896} 2271}
2897 2272
2898static void 2273static void
@@ -2940,7 +2315,6 @@ ahc_linux_release_simq(u_long arg)
2940 ahc->platform_data->qfrozen--; 2315 ahc->platform_data->qfrozen--;
2941 if (ahc->platform_data->qfrozen == 0) 2316 if (ahc->platform_data->qfrozen == 0)
2942 unblock_reqs = 1; 2317 unblock_reqs = 1;
2943 ahc_schedule_runq(ahc);
2944 ahc_unlock(ahc, &s); 2318 ahc_unlock(ahc, &s);
2945 /* 2319 /*
2946 * There is still a race here. The mid-layer 2320 * There is still a race here. The mid-layer
@@ -2952,37 +2326,12 @@ ahc_linux_release_simq(u_long arg)
2952 scsi_unblock_requests(ahc->platform_data->host); 2326 scsi_unblock_requests(ahc->platform_data->host);
2953} 2327}
2954 2328
2955static void
2956ahc_linux_dev_timed_unfreeze(u_long arg)
2957{
2958 struct ahc_linux_device *dev;
2959 struct ahc_softc *ahc;
2960 u_long s;
2961
2962 dev = (struct ahc_linux_device *)arg;
2963 ahc = dev->target->ahc;
2964 ahc_lock(ahc, &s);
2965 dev->flags &= ~AHC_DEV_TIMER_ACTIVE;
2966 if (dev->qfrozen > 0)
2967 dev->qfrozen--;
2968 if (dev->qfrozen == 0
2969 && (dev->flags & AHC_DEV_ON_RUN_LIST) == 0)
2970 ahc_linux_run_device_queue(ahc, dev);
2971 if (TAILQ_EMPTY(&dev->busyq)
2972 && dev->active == 0)
2973 __ahc_linux_free_device(ahc, dev);
2974 ahc_unlock(ahc, &s);
2975}
2976
2977static int 2329static int
2978ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag) 2330ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
2979{ 2331{
2980 struct ahc_softc *ahc; 2332 struct ahc_softc *ahc;
2981 struct ahc_cmd *acmd;
2982 struct ahc_cmd *list_acmd;
2983 struct ahc_linux_device *dev; 2333 struct ahc_linux_device *dev;
2984 struct scb *pending_scb; 2334 struct scb *pending_scb;
2985 u_long s;
2986 u_int saved_scbptr; 2335 u_int saved_scbptr;
2987 u_int active_scb_index; 2336 u_int active_scb_index;
2988 u_int last_phase; 2337 u_int last_phase;
@@ -2998,7 +2347,6 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
2998 paused = FALSE; 2347 paused = FALSE;
2999 wait = FALSE; 2348 wait = FALSE;
3000 ahc = *(struct ahc_softc **)cmd->device->host->hostdata; 2349 ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
3001 acmd = (struct ahc_cmd *)cmd;
3002 2350
3003 printf("%s:%d:%d:%d: Attempting to queue a%s message\n", 2351 printf("%s:%d:%d:%d: Attempting to queue a%s message\n",
3004 ahc_name(ahc), cmd->device->channel, 2352 ahc_name(ahc), cmd->device->channel,
@@ -3011,22 +2359,6 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
3011 printf("\n"); 2359 printf("\n");
3012 2360
3013 /* 2361 /*
3014 * In all versions of Linux, we have to work around
3015 * a major flaw in how the mid-layer is locked down
3016 * if we are to sleep successfully in our error handler
3017 * while allowing our interrupt handler to run. Since
3018 * the midlayer acquires either the io_request_lock or
3019 * our lock prior to calling us, we must use the
3020 * spin_unlock_irq() method for unlocking our lock.
3021 * This will force interrupts to be enabled on the
3022 * current CPU. Since the EH thread should not have
3023 * been running with CPU interrupts disabled other than
3024 * by acquiring either the io_request_lock or our own
3025 * lock, this *should* be safe.
3026 */
3027 ahc_midlayer_entrypoint_lock(ahc, &s);
3028
3029 /*
3030 * First determine if we currently own this command. 2362 * First determine if we currently own this command.
3031 * Start by searching the device queue. If not found 2363 * Start by searching the device queue. If not found
3032 * there, check the pending_scb list. If not found 2364 * there, check the pending_scb list. If not found
@@ -3034,7 +2366,7 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
3034 * command, return success. 2366 * command, return success.
3035 */ 2367 */
3036 dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id, 2368 dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
3037 cmd->device->lun, /*alloc*/FALSE); 2369 cmd->device->lun);
3038 2370
3039 if (dev == NULL) { 2371 if (dev == NULL) {
3040 /* 2372 /*
@@ -3048,24 +2380,6 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
3048 goto no_cmd; 2380 goto no_cmd;
3049 } 2381 }
3050 2382
3051 TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
3052 if (list_acmd == acmd)
3053 break;
3054 }
3055
3056 if (list_acmd != NULL) {
3057 printf("%s:%d:%d:%d: Command found on device queue\n",
3058 ahc_name(ahc), cmd->device->channel, cmd->device->id,
3059 cmd->device->lun);
3060 if (flag == SCB_ABORT) {
3061 TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
3062 cmd->result = DID_ABORT << 16;
3063 ahc_linux_queue_cmd_complete(ahc, cmd);
3064 retval = SUCCESS;
3065 goto done;
3066 }
3067 }
3068
3069 if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0 2383 if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
3070 && ahc_search_untagged_queues(ahc, cmd, cmd->device->id, 2384 && ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
3071 cmd->device->channel + 'A', 2385 cmd->device->channel + 'A',
@@ -3299,53 +2613,42 @@ done:
3299 } 2613 }
3300 spin_lock_irq(&ahc->platform_data->spin_lock); 2614 spin_lock_irq(&ahc->platform_data->spin_lock);
3301 } 2615 }
3302 ahc_schedule_runq(ahc);
3303 ahc_linux_run_complete_queue(ahc);
3304 ahc_midlayer_entrypoint_unlock(ahc, &s);
3305 return (retval); 2616 return (retval);
3306} 2617}
3307 2618
3308void 2619void
3309ahc_platform_dump_card_state(struct ahc_softc *ahc) 2620ahc_platform_dump_card_state(struct ahc_softc *ahc)
3310{ 2621{
3311 struct ahc_linux_device *dev; 2622}
3312 int channel;
3313 int maxchannel;
3314 int target;
3315 int maxtarget;
3316 int lun;
3317 int i;
3318
3319 maxchannel = (ahc->features & AHC_TWIN) ? 1 : 0;
3320 maxtarget = (ahc->features & AHC_WIDE) ? 15 : 7;
3321 for (channel = 0; channel <= maxchannel; channel++) {
3322 2623
3323 for (target = 0; target <=maxtarget; target++) { 2624static void ahc_linux_exit(void);
3324 2625
3325 for (lun = 0; lun < AHC_NUM_LUNS; lun++) { 2626static void ahc_linux_get_width(struct scsi_target *starget)
3326 struct ahc_cmd *acmd; 2627{
2628 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2629 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
2630 struct ahc_tmode_tstate *tstate;
2631 struct ahc_initiator_tinfo *tinfo
2632 = ahc_fetch_transinfo(ahc,
2633 starget->channel + 'A',
2634 shost->this_id, starget->id, &tstate);
2635 spi_width(starget) = tinfo->curr.width;
2636}
3327 2637
3328 dev = ahc_linux_get_device(ahc, channel, target, 2638static void ahc_linux_set_width(struct scsi_target *starget, int width)
3329 lun, /*alloc*/FALSE); 2639{
3330 if (dev == NULL) 2640 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3331 continue; 2641 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
2642 struct ahc_devinfo devinfo;
2643 unsigned long flags;
3332 2644
3333 printf("DevQ(%d:%d:%d): ", 2645 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3334 channel, target, lun); 2646 starget->channel + 'A', ROLE_INITIATOR);
3335 i = 0; 2647 ahc_lock(ahc, &flags);
3336 TAILQ_FOREACH(acmd, &dev->busyq, 2648 ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
3337 acmd_links.tqe) { 2649 ahc_unlock(ahc, &flags);
3338 if (i++ > AHC_SCB_MAX)
3339 break;
3340 }
3341 printf("%d waiting\n", i);
3342 }
3343 }
3344 }
3345} 2650}
3346 2651
3347static void ahc_linux_exit(void);
3348
3349static void ahc_linux_get_period(struct scsi_target *starget) 2652static void ahc_linux_get_period(struct scsi_target *starget)
3350{ 2653{
3351 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2654 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
@@ -3376,8 +2679,21 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period)
3376 if (offset == 0) 2679 if (offset == 0)
3377 offset = MAX_OFFSET; 2680 offset = MAX_OFFSET;
3378 2681
2682 if (period < 9)
2683 period = 9; /* 12.5ns is our minimum */
2684 if (period == 9)
2685 ppr_options |= MSG_EXT_PPR_DT_REQ;
2686
3379 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, 2687 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3380 starget->channel + 'A', ROLE_INITIATOR); 2688 starget->channel + 'A', ROLE_INITIATOR);
2689
2690 /* all PPR requests apart from QAS require wide transfers */
2691 if (ppr_options & ~MSG_EXT_PPR_QAS_REQ) {
2692 ahc_linux_get_width(starget);
2693 if (spi_width(starget) == 0)
2694 ppr_options &= MSG_EXT_PPR_QAS_REQ;
2695 }
2696
3381 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT); 2697 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
3382 ahc_lock(ahc, &flags); 2698 ahc_lock(ahc, &flags);
3383 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, 2699 ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
@@ -3425,32 +2741,6 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
3425 ahc_unlock(ahc, &flags); 2741 ahc_unlock(ahc, &flags);
3426} 2742}
3427 2743
3428static void ahc_linux_get_width(struct scsi_target *starget)
3429{
3430 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3431 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3432 struct ahc_tmode_tstate *tstate;
3433 struct ahc_initiator_tinfo *tinfo
3434 = ahc_fetch_transinfo(ahc,
3435 starget->channel + 'A',
3436 shost->this_id, starget->id, &tstate);
3437 spi_width(starget) = tinfo->curr.width;
3438}
3439
3440static void ahc_linux_set_width(struct scsi_target *starget, int width)
3441{
3442 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3443 struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
3444 struct ahc_devinfo devinfo;
3445 unsigned long flags;
3446
3447 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3448 starget->channel + 'A', ROLE_INITIATOR);
3449 ahc_lock(ahc, &flags);
3450 ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
3451 ahc_unlock(ahc, &flags);
3452}
3453
3454static void ahc_linux_get_dt(struct scsi_target *starget) 2744static void ahc_linux_get_dt(struct scsi_target *starget)
3455{ 2745{
3456 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 2746 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
@@ -3479,10 +2769,15 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
3479 unsigned long flags; 2769 unsigned long flags;
3480 struct ahc_syncrate *syncrate; 2770 struct ahc_syncrate *syncrate;
3481 2771
2772 if (dt) {
2773 period = 9; /* 12.5ns is the only period valid for DT */
2774 ppr_options |= MSG_EXT_PPR_DT_REQ;
2775 } else if (period == 9)
2776 period = 10; /* if resetting DT, period must be >= 25ns */
2777
3482 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, 2778 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3483 starget->channel + 'A', ROLE_INITIATOR); 2779 starget->channel + 'A', ROLE_INITIATOR);
3484 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, 2780 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,AHC_SYNCRATE_DT);
3485 dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
3486 ahc_lock(ahc, &flags); 2781 ahc_lock(ahc, &flags);
3487 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset, 2782 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
3488 ppr_options, AHC_TRANS_GOAL, FALSE); 2783 ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -3514,7 +2809,6 @@ static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
3514 unsigned int ppr_options = tinfo->curr.ppr_options 2809 unsigned int ppr_options = tinfo->curr.ppr_options
3515 & ~MSG_EXT_PPR_QAS_REQ; 2810 & ~MSG_EXT_PPR_QAS_REQ;
3516 unsigned int period = tinfo->curr.period; 2811 unsigned int period = tinfo->curr.period;
3517 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
3518 unsigned long flags; 2812 unsigned long flags;
3519 struct ahc_syncrate *syncrate; 2813 struct ahc_syncrate *syncrate;
3520 2814
@@ -3523,8 +2817,7 @@ static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
3523 2817
3524 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, 2818 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3525 starget->channel + 'A', ROLE_INITIATOR); 2819 starget->channel + 'A', ROLE_INITIATOR);
3526 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, 2820 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
3527 dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
3528 ahc_lock(ahc, &flags); 2821 ahc_lock(ahc, &flags);
3529 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset, 2822 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
3530 ppr_options, AHC_TRANS_GOAL, FALSE); 2823 ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -3556,7 +2849,6 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
3556 unsigned int ppr_options = tinfo->curr.ppr_options 2849 unsigned int ppr_options = tinfo->curr.ppr_options
3557 & ~MSG_EXT_PPR_IU_REQ; 2850 & ~MSG_EXT_PPR_IU_REQ;
3558 unsigned int period = tinfo->curr.period; 2851 unsigned int period = tinfo->curr.period;
3559 unsigned int dt = ppr_options & MSG_EXT_PPR_DT_REQ;
3560 unsigned long flags; 2852 unsigned long flags;
3561 struct ahc_syncrate *syncrate; 2853 struct ahc_syncrate *syncrate;
3562 2854
@@ -3565,8 +2857,7 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
3565 2857
3566 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0, 2858 ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
3567 starget->channel + 'A', ROLE_INITIATOR); 2859 starget->channel + 'A', ROLE_INITIATOR);
3568 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, 2860 syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
3569 dt ? AHC_SYNCRATE_DT : AHC_SYNCRATE_ULTRA2);
3570 ahc_lock(ahc, &flags); 2861 ahc_lock(ahc, &flags);
3571 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset, 2862 ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->curr.offset,
3572 ppr_options, AHC_TRANS_GOAL, FALSE); 2863 ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -3599,7 +2890,6 @@ static struct spi_function_template ahc_linux_transport_functions = {
3599static int __init 2890static int __init
3600ahc_linux_init(void) 2891ahc_linux_init(void)
3601{ 2892{
3602#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
3603 ahc_linux_transport_template = spi_attach_transport(&ahc_linux_transport_functions); 2893 ahc_linux_transport_template = spi_attach_transport(&ahc_linux_transport_functions);
3604 if (!ahc_linux_transport_template) 2894 if (!ahc_linux_transport_template)
3605 return -ENODEV; 2895 return -ENODEV;
@@ -3608,29 +2898,11 @@ ahc_linux_init(void)
3608 spi_release_transport(ahc_linux_transport_template); 2898 spi_release_transport(ahc_linux_transport_template);
3609 ahc_linux_exit(); 2899 ahc_linux_exit();
3610 return -ENODEV; 2900 return -ENODEV;
3611#else
3612 scsi_register_module(MODULE_SCSI_HA, &aic7xxx_driver_template);
3613 if (aic7xxx_driver_template.present == 0) {
3614 scsi_unregister_module(MODULE_SCSI_HA,
3615 &aic7xxx_driver_template);
3616 return (-ENODEV);
3617 }
3618
3619 return (0);
3620#endif
3621} 2901}
3622 2902
3623static void 2903static void
3624ahc_linux_exit(void) 2904ahc_linux_exit(void)
3625{ 2905{
3626#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
3627 /*
3628 * In 2.4 we have to unregister from the PCI core _after_
3629 * unregistering from the scsi midlayer to avoid dangling
3630 * references.
3631 */
3632 scsi_unregister_module(MODULE_SCSI_HA, &aic7xxx_driver_template);
3633#endif
3634 ahc_linux_pci_exit(); 2906 ahc_linux_pci_exit();
3635 ahc_linux_eisa_exit(); 2907 ahc_linux_eisa_exit();
3636 spi_release_transport(ahc_linux_transport_template); 2908 spi_release_transport(ahc_linux_transport_template);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index ed9027bd8a40..30c200d5bcd5 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -59,6 +59,7 @@
59#ifndef _AIC7XXX_LINUX_H_ 59#ifndef _AIC7XXX_LINUX_H_
60#define _AIC7XXX_LINUX_H_ 60#define _AIC7XXX_LINUX_H_
61 61
62#include <linux/config.h>
62#include <linux/types.h> 63#include <linux/types.h>
63#include <linux/blkdev.h> 64#include <linux/blkdev.h>
64#include <linux/delay.h> 65#include <linux/delay.h>
@@ -66,18 +67,21 @@
66#include <linux/pci.h> 67#include <linux/pci.h>
67#include <linux/smp_lock.h> 68#include <linux/smp_lock.h>
68#include <linux/version.h> 69#include <linux/version.h>
70#include <linux/interrupt.h>
69#include <linux/module.h> 71#include <linux/module.h>
72#include <linux/slab.h>
70#include <asm/byteorder.h> 73#include <asm/byteorder.h>
71#include <asm/io.h> 74#include <asm/io.h>
72 75
73#include <linux/interrupt.h> /* For tasklet support. */ 76#include <scsi/scsi.h>
74#include <linux/config.h> 77#include <scsi/scsi_cmnd.h>
75#include <linux/slab.h> 78#include <scsi/scsi_eh.h>
79#include <scsi/scsi_device.h>
80#include <scsi/scsi_host.h>
81#include <scsi/scsi_tcq.h>
76 82
77/* Core SCSI definitions */ 83/* Core SCSI definitions */
78#define AIC_LIB_PREFIX ahc 84#define AIC_LIB_PREFIX ahc
79#include "scsi.h"
80#include <scsi/scsi_host.h>
81 85
82/* Name space conflict with BSD queue macros */ 86/* Name space conflict with BSD queue macros */
83#ifdef LIST_HEAD 87#ifdef LIST_HEAD
@@ -106,7 +110,7 @@
106/************************* Forward Declarations *******************************/ 110/************************* Forward Declarations *******************************/
107struct ahc_softc; 111struct ahc_softc;
108typedef struct pci_dev *ahc_dev_softc_t; 112typedef struct pci_dev *ahc_dev_softc_t;
109typedef Scsi_Cmnd *ahc_io_ctx_t; 113typedef struct scsi_cmnd *ahc_io_ctx_t;
110 114
111/******************************* Byte Order ***********************************/ 115/******************************* Byte Order ***********************************/
112#define ahc_htobe16(x) cpu_to_be16(x) 116#define ahc_htobe16(x) cpu_to_be16(x)
@@ -144,7 +148,7 @@ typedef Scsi_Cmnd *ahc_io_ctx_t;
144extern u_int aic7xxx_no_probe; 148extern u_int aic7xxx_no_probe;
145extern u_int aic7xxx_allow_memio; 149extern u_int aic7xxx_allow_memio;
146extern int aic7xxx_detect_complete; 150extern int aic7xxx_detect_complete;
147extern Scsi_Host_Template aic7xxx_driver_template; 151extern struct scsi_host_template aic7xxx_driver_template;
148 152
149/***************************** Bus Space/DMA **********************************/ 153/***************************** Bus Space/DMA **********************************/
150 154
@@ -174,11 +178,7 @@ struct ahc_linux_dma_tag
174}; 178};
175typedef struct ahc_linux_dma_tag* bus_dma_tag_t; 179typedef struct ahc_linux_dma_tag* bus_dma_tag_t;
176 180
177struct ahc_linux_dmamap 181typedef dma_addr_t bus_dmamap_t;
178{
179 dma_addr_t bus_addr;
180};
181typedef struct ahc_linux_dmamap* bus_dmamap_t;
182 182
183typedef int bus_dma_filter_t(void*, dma_addr_t); 183typedef int bus_dma_filter_t(void*, dma_addr_t);
184typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int); 184typedef void bus_dmamap_callback_t(void *, bus_dma_segment_t *, int, int);
@@ -281,12 +281,6 @@ ahc_scb_timer_reset(struct scb *scb, u_int usec)
281/***************************** SMP support ************************************/ 281/***************************** SMP support ************************************/
282#include <linux/spinlock.h> 282#include <linux/spinlock.h>
283 283
284#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) || defined(SCSI_HAS_HOST_LOCK))
285#define AHC_SCSI_HAS_HOST_LOCK 1
286#else
287#define AHC_SCSI_HAS_HOST_LOCK 0
288#endif
289
290#define AIC7XXX_DRIVER_VERSION "6.2.36" 284#define AIC7XXX_DRIVER_VERSION "6.2.36"
291 285
292/**************************** Front End Queues ********************************/ 286/**************************** Front End Queues ********************************/
@@ -328,20 +322,15 @@ struct ahc_cmd {
328 */ 322 */
329TAILQ_HEAD(ahc_busyq, ahc_cmd); 323TAILQ_HEAD(ahc_busyq, ahc_cmd);
330typedef enum { 324typedef enum {
331 AHC_DEV_UNCONFIGURED = 0x01,
332 AHC_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */ 325 AHC_DEV_FREEZE_TIL_EMPTY = 0x02, /* Freeze queue until active == 0 */
333 AHC_DEV_TIMER_ACTIVE = 0x04, /* Our timer is active */
334 AHC_DEV_ON_RUN_LIST = 0x08, /* Queued to be run later */
335 AHC_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */ 326 AHC_DEV_Q_BASIC = 0x10, /* Allow basic device queuing */
336 AHC_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */ 327 AHC_DEV_Q_TAGGED = 0x20, /* Allow full SCSI2 command queueing */
337 AHC_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */ 328 AHC_DEV_PERIODIC_OTAG = 0x40, /* Send OTAG to prevent starvation */
338 AHC_DEV_SLAVE_CONFIGURED = 0x80 /* slave_configure() has been called */
339} ahc_linux_dev_flags; 329} ahc_linux_dev_flags;
340 330
341struct ahc_linux_target; 331struct ahc_linux_target;
342struct ahc_linux_device { 332struct ahc_linux_device {
343 TAILQ_ENTRY(ahc_linux_device) links; 333 TAILQ_ENTRY(ahc_linux_device) links;
344 struct ahc_busyq busyq;
345 334
346 /* 335 /*
347 * The number of transactions currently 336 * The number of transactions currently
@@ -382,11 +371,6 @@ struct ahc_linux_device {
382 ahc_linux_dev_flags flags; 371 ahc_linux_dev_flags flags;
383 372
384 /* 373 /*
385 * Per device timer.
386 */
387 struct timer_list timer;
388
389 /*
390 * The high limit for the tags variable. 374 * The high limit for the tags variable.
391 */ 375 */
392 u_int maxtags; 376 u_int maxtags;
@@ -419,7 +403,7 @@ struct ahc_linux_device {
419#define AHC_OTAG_THRESH 500 403#define AHC_OTAG_THRESH 500
420 404
421 int lun; 405 int lun;
422 Scsi_Device *scsi_device; 406 struct scsi_device *scsi_device;
423 struct ahc_linux_target *target; 407 struct ahc_linux_target *target;
424}; 408};
425 409
@@ -439,32 +423,16 @@ struct ahc_linux_target {
439 * manner and are allocated below 4GB, the number of S/G segments is 423 * manner and are allocated below 4GB, the number of S/G segments is
440 * unrestricted. 424 * unrestricted.
441 */ 425 */
442#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
443/*
444 * We dynamically adjust the number of segments in pre-2.5 kernels to
445 * avoid fragmentation issues in the SCSI mid-layer's private memory
446 * allocator. See aic7xxx_osm.c ahc_linux_size_nseg() for details.
447 */
448extern u_int ahc_linux_nseg;
449#define AHC_NSEG ahc_linux_nseg
450#define AHC_LINUX_MIN_NSEG 64
451#else
452#define AHC_NSEG 128 426#define AHC_NSEG 128
453#endif
454 427
455/* 428/*
456 * Per-SCB OSM storage. 429 * Per-SCB OSM storage.
457 */ 430 */
458typedef enum {
459 AHC_UP_EH_SEMAPHORE = 0x1
460} ahc_linux_scb_flags;
461
462struct scb_platform_data { 431struct scb_platform_data {
463 struct ahc_linux_device *dev; 432 struct ahc_linux_device *dev;
464 dma_addr_t buf_busaddr; 433 dma_addr_t buf_busaddr;
465 uint32_t xfer_len; 434 uint32_t xfer_len;
466 uint32_t sense_resid; /* Auto-Sense residual */ 435 uint32_t sense_resid; /* Auto-Sense residual */
467 ahc_linux_scb_flags flags;
468}; 436};
469 437
470/* 438/*
@@ -473,39 +441,24 @@ struct scb_platform_data {
473 * alignment restrictions of the various platforms supported by 441 * alignment restrictions of the various platforms supported by
474 * this driver. 442 * this driver.
475 */ 443 */
476typedef enum {
477 AHC_RUN_CMPLT_Q_TIMER = 0x10
478} ahc_linux_softc_flags;
479
480TAILQ_HEAD(ahc_completeq, ahc_cmd);
481
482struct ahc_platform_data { 444struct ahc_platform_data {
483 /* 445 /*
484 * Fields accessed from interrupt context. 446 * Fields accessed from interrupt context.
485 */ 447 */
486 struct ahc_linux_target *targets[AHC_NUM_TARGETS]; 448 struct ahc_linux_target *targets[AHC_NUM_TARGETS];
487 TAILQ_HEAD(, ahc_linux_device) device_runq;
488 struct ahc_completeq completeq;
489 449
490 spinlock_t spin_lock; 450 spinlock_t spin_lock;
491 struct tasklet_struct runq_tasklet;
492 u_int qfrozen; 451 u_int qfrozen;
493 pid_t dv_pid;
494 struct timer_list completeq_timer;
495 struct timer_list reset_timer; 452 struct timer_list reset_timer;
496 struct semaphore eh_sem; 453 struct semaphore eh_sem;
497 struct semaphore dv_sem;
498 struct semaphore dv_cmd_sem; /* XXX This needs to be in
499 * the target struct
500 */
501 struct scsi_device *dv_scsi_dev;
502 struct Scsi_Host *host; /* pointer to scsi host */ 454 struct Scsi_Host *host; /* pointer to scsi host */
503#define AHC_LINUX_NOIRQ ((uint32_t)~0) 455#define AHC_LINUX_NOIRQ ((uint32_t)~0)
504 uint32_t irq; /* IRQ for this adapter */ 456 uint32_t irq; /* IRQ for this adapter */
505 uint32_t bios_address; 457 uint32_t bios_address;
506 uint32_t mem_busaddr; /* Mem Base Addr */ 458 uint32_t mem_busaddr; /* Mem Base Addr */
507 uint64_t hw_dma_mask; 459
508 ahc_linux_softc_flags flags; 460#define AHC_UP_EH_SEMAPHORE 0x1
461 uint32_t flags;
509}; 462};
510 463
511/************************** OS Utility Wrappers *******************************/ 464/************************** OS Utility Wrappers *******************************/
@@ -594,7 +547,7 @@ ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
594 547
595/**************************** Initialization **********************************/ 548/**************************** Initialization **********************************/
596int ahc_linux_register_host(struct ahc_softc *, 549int ahc_linux_register_host(struct ahc_softc *,
597 Scsi_Host_Template *); 550 struct scsi_host_template *);
598 551
599uint64_t ahc_linux_get_memsize(void); 552uint64_t ahc_linux_get_memsize(void);
600 553
@@ -615,17 +568,6 @@ static __inline void ahc_lockinit(struct ahc_softc *);
615static __inline void ahc_lock(struct ahc_softc *, unsigned long *flags); 568static __inline void ahc_lock(struct ahc_softc *, unsigned long *flags);
616static __inline void ahc_unlock(struct ahc_softc *, unsigned long *flags); 569static __inline void ahc_unlock(struct ahc_softc *, unsigned long *flags);
617 570
618/* Lock acquisition and release of the above lock in midlayer entry points. */
619static __inline void ahc_midlayer_entrypoint_lock(struct ahc_softc *,
620 unsigned long *flags);
621static __inline void ahc_midlayer_entrypoint_unlock(struct ahc_softc *,
622 unsigned long *flags);
623
624/* Lock held during command compeletion to the upper layer */
625static __inline void ahc_done_lockinit(struct ahc_softc *);
626static __inline void ahc_done_lock(struct ahc_softc *, unsigned long *flags);
627static __inline void ahc_done_unlock(struct ahc_softc *, unsigned long *flags);
628
629/* Lock held during ahc_list manipulation and ahc softc frees */ 571/* Lock held during ahc_list manipulation and ahc softc frees */
630extern spinlock_t ahc_list_spinlock; 572extern spinlock_t ahc_list_spinlock;
631static __inline void ahc_list_lockinit(void); 573static __inline void ahc_list_lockinit(void);
@@ -651,57 +593,6 @@ ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
651} 593}
652 594
653static __inline void 595static __inline void
654ahc_midlayer_entrypoint_lock(struct ahc_softc *ahc, unsigned long *flags)
655{
656 /*
657 * In 2.5.X and some 2.4.X versions, the midlayer takes our
658 * lock just before calling us, so we avoid locking again.
659 * For other kernel versions, the io_request_lock is taken
660 * just before our entry point is called. In this case, we
661 * trade the io_request_lock for our per-softc lock.
662 */
663#if AHC_SCSI_HAS_HOST_LOCK == 0
664 spin_unlock(&io_request_lock);
665 spin_lock(&ahc->platform_data->spin_lock);
666#endif
667}
668
669static __inline void
670ahc_midlayer_entrypoint_unlock(struct ahc_softc *ahc, unsigned long *flags)
671{
672#if AHC_SCSI_HAS_HOST_LOCK == 0
673 spin_unlock(&ahc->platform_data->spin_lock);
674 spin_lock(&io_request_lock);
675#endif
676}
677
678static __inline void
679ahc_done_lockinit(struct ahc_softc *ahc)
680{
681 /*
682 * In 2.5.X, our own lock is held during completions.
683 * In previous versions, the io_request_lock is used.
684 * In either case, we can't initialize this lock again.
685 */
686}
687
688static __inline void
689ahc_done_lock(struct ahc_softc *ahc, unsigned long *flags)
690{
691#if AHC_SCSI_HAS_HOST_LOCK == 0
692 spin_lock_irqsave(&io_request_lock, *flags);
693#endif
694}
695
696static __inline void
697ahc_done_unlock(struct ahc_softc *ahc, unsigned long *flags)
698{
699#if AHC_SCSI_HAS_HOST_LOCK == 0
700 spin_unlock_irqrestore(&io_request_lock, *flags);
701#endif
702}
703
704static __inline void
705ahc_list_lockinit(void) 596ahc_list_lockinit(void)
706{ 597{
707 spin_lock_init(&ahc_list_spinlock); 598 spin_lock_init(&ahc_list_spinlock);
@@ -767,12 +658,6 @@ typedef enum
767} ahc_power_state; 658} ahc_power_state;
768 659
769/**************************** VL/EISA Routines ********************************/ 660/**************************** VL/EISA Routines ********************************/
770#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) \
771 && (defined(__i386__) || defined(__alpha__)) \
772 && (!defined(CONFIG_EISA)))
773#define CONFIG_EISA
774#endif
775
776#ifdef CONFIG_EISA 661#ifdef CONFIG_EISA
777extern uint32_t aic7xxx_probe_eisa_vl; 662extern uint32_t aic7xxx_probe_eisa_vl;
778int ahc_linux_eisa_init(void); 663int ahc_linux_eisa_init(void);
@@ -888,22 +773,18 @@ ahc_flush_device_writes(struct ahc_softc *ahc)
888} 773}
889 774
890/**************************** Proc FS Support *********************************/ 775/**************************** Proc FS Support *********************************/
891#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
892int ahc_linux_proc_info(char *, char **, off_t, int, int, int);
893#else
894int ahc_linux_proc_info(struct Scsi_Host *, char *, char **, 776int ahc_linux_proc_info(struct Scsi_Host *, char *, char **,
895 off_t, int, int); 777 off_t, int, int);
896#endif
897 778
898/*************************** Domain Validation ********************************/ 779/*************************** Domain Validation ********************************/
899/*********************** Transaction Access Wrappers *************************/ 780/*********************** Transaction Access Wrappers *************************/
900static __inline void ahc_cmd_set_transaction_status(Scsi_Cmnd *, uint32_t); 781static __inline void ahc_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
901static __inline void ahc_set_transaction_status(struct scb *, uint32_t); 782static __inline void ahc_set_transaction_status(struct scb *, uint32_t);
902static __inline void ahc_cmd_set_scsi_status(Scsi_Cmnd *, uint32_t); 783static __inline void ahc_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
903static __inline void ahc_set_scsi_status(struct scb *, uint32_t); 784static __inline void ahc_set_scsi_status(struct scb *, uint32_t);
904static __inline uint32_t ahc_cmd_get_transaction_status(Scsi_Cmnd *cmd); 785static __inline uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd);
905static __inline uint32_t ahc_get_transaction_status(struct scb *); 786static __inline uint32_t ahc_get_transaction_status(struct scb *);
906static __inline uint32_t ahc_cmd_get_scsi_status(Scsi_Cmnd *cmd); 787static __inline uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd);
907static __inline uint32_t ahc_get_scsi_status(struct scb *); 788static __inline uint32_t ahc_get_scsi_status(struct scb *);
908static __inline void ahc_set_transaction_tag(struct scb *, int, u_int); 789static __inline void ahc_set_transaction_tag(struct scb *, int, u_int);
909static __inline u_long ahc_get_transfer_length(struct scb *); 790static __inline u_long ahc_get_transfer_length(struct scb *);
@@ -922,7 +803,7 @@ static __inline void ahc_platform_scb_free(struct ahc_softc *ahc,
922static __inline void ahc_freeze_scb(struct scb *scb); 803static __inline void ahc_freeze_scb(struct scb *scb);
923 804
924static __inline 805static __inline
925void ahc_cmd_set_transaction_status(Scsi_Cmnd *cmd, uint32_t status) 806void ahc_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
926{ 807{
927 cmd->result &= ~(CAM_STATUS_MASK << 16); 808 cmd->result &= ~(CAM_STATUS_MASK << 16);
928 cmd->result |= status << 16; 809 cmd->result |= status << 16;
@@ -935,7 +816,7 @@ void ahc_set_transaction_status(struct scb *scb, uint32_t status)
935} 816}
936 817
937static __inline 818static __inline
938void ahc_cmd_set_scsi_status(Scsi_Cmnd *cmd, uint32_t status) 819void ahc_cmd_set_scsi_status(struct scsi_cmnd *cmd, uint32_t status)
939{ 820{
940 cmd->result &= ~0xFFFF; 821 cmd->result &= ~0xFFFF;
941 cmd->result |= status; 822 cmd->result |= status;
@@ -948,7 +829,7 @@ void ahc_set_scsi_status(struct scb *scb, uint32_t status)
948} 829}
949 830
950static __inline 831static __inline
951uint32_t ahc_cmd_get_transaction_status(Scsi_Cmnd *cmd) 832uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd)
952{ 833{
953 return ((cmd->result >> 16) & CAM_STATUS_MASK); 834 return ((cmd->result >> 16) & CAM_STATUS_MASK);
954} 835}
@@ -960,7 +841,7 @@ uint32_t ahc_get_transaction_status(struct scb *scb)
960} 841}
961 842
962static __inline 843static __inline
963uint32_t ahc_cmd_get_scsi_status(Scsi_Cmnd *cmd) 844uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd)
964{ 845{
965 return (cmd->result & 0xFFFF); 846 return (cmd->result & 0xFFFF);
966} 847}
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index 6f6674aa31ef..2a0ebce83e7a 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -221,13 +221,11 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
221 && ahc_linux_get_memsize() > 0x80000000 221 && ahc_linux_get_memsize() > 0x80000000
222 && pci_set_dma_mask(pdev, mask_39bit) == 0) { 222 && pci_set_dma_mask(pdev, mask_39bit) == 0) {
223 ahc->flags |= AHC_39BIT_ADDRESSING; 223 ahc->flags |= AHC_39BIT_ADDRESSING;
224 ahc->platform_data->hw_dma_mask = mask_39bit;
225 } else { 224 } else {
226 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { 225 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
227 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); 226 printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
228 return (-ENODEV); 227 return (-ENODEV);
229 } 228 }
230 ahc->platform_data->hw_dma_mask = DMA_32BIT_MASK;
231 } 229 }
232 ahc->dev_softc = pci; 230 ahc->dev_softc = pci;
233 error = ahc_pci_config(ahc, entry); 231 error = ahc_pci_config(ahc, entry);
@@ -236,15 +234,8 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
236 return (-error); 234 return (-error);
237 } 235 }
238 pci_set_drvdata(pdev, ahc); 236 pci_set_drvdata(pdev, ahc);
239 if (aic7xxx_detect_complete) { 237 if (aic7xxx_detect_complete)
240#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
241 ahc_linux_register_host(ahc, &aic7xxx_driver_template); 238 ahc_linux_register_host(ahc, &aic7xxx_driver_template);
242#else
243 printf("aic7xxx: ignoring PCI device found after "
244 "initialization\n");
245 return (-ENODEV);
246#endif
247 }
248 return (0); 239 return (0);
249} 240}
250 241
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 85e80eecc9d0..5fece859fbd9 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -289,13 +289,8 @@ done:
289 * Return information to handle /proc support for the driver. 289 * Return information to handle /proc support for the driver.
290 */ 290 */
291int 291int
292#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
293ahc_linux_proc_info(char *buffer, char **start, off_t offset,
294 int length, int hostno, int inout)
295#else
296ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start, 292ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
297 off_t offset, int length, int inout) 293 off_t offset, int length, int inout)
298#endif
299{ 294{
300 struct ahc_softc *ahc; 295 struct ahc_softc *ahc;
301 struct info_str info; 296 struct info_str info;
@@ -307,15 +302,7 @@ ahc_linux_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
307 302
308 retval = -EINVAL; 303 retval = -EINVAL;
309 ahc_list_lock(&s); 304 ahc_list_lock(&s);
310#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
311 TAILQ_FOREACH(ahc, &ahc_tailq, links) {
312 if (ahc->platform_data->host->host_no == hostno)
313 break;
314 }
315#else
316 ahc = ahc_find_softc(*(struct ahc_softc **)shost->hostdata); 305 ahc = ahc_find_softc(*(struct ahc_softc **)shost->hostdata);
317#endif
318
319 if (ahc == NULL) 306 if (ahc == NULL)
320 goto done; 307 goto done;
321 308
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c
index 79bfd9efd8ed..7c5a6db0e672 100644
--- a/drivers/scsi/aic7xxx/aiclib.c
+++ b/drivers/scsi/aic7xxx/aiclib.c
@@ -35,7 +35,6 @@
35#include <linux/version.h> 35#include <linux/version.h>
36 36
37/* Core SCSI definitions */ 37/* Core SCSI definitions */
38#include "scsi.h"
39#include <scsi/scsi_host.h> 38#include <scsi/scsi_host.h>
40#include "aiclib.h" 39#include "aiclib.h"
41#include "cam.h" 40#include "cam.h"
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 28966d05435c..67c6cc40ce16 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -35,7 +35,7 @@
35 35
36#define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a) 36#define SPI_PRINTK(x, l, f, a...) dev_printk(l, &(x)->dev, f , ##a)
37 37
38#define SPI_NUM_ATTRS 10 /* increase this if you add attributes */ 38#define SPI_NUM_ATTRS 13 /* increase this if you add attributes */
39#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always 39#define SPI_OTHER_ATTRS 1 /* Increase this if you add "always
40 * on" attributes */ 40 * on" attributes */
41#define SPI_HOST_ATTRS 1 41#define SPI_HOST_ATTRS 1
@@ -219,8 +219,11 @@ static int spi_setup_transport_attrs(struct device *dev)
219 struct scsi_target *starget = to_scsi_target(dev); 219 struct scsi_target *starget = to_scsi_target(dev);
220 220
221 spi_period(starget) = -1; /* illegal value */ 221 spi_period(starget) = -1; /* illegal value */
222 spi_min_period(starget) = 0;
222 spi_offset(starget) = 0; /* async */ 223 spi_offset(starget) = 0; /* async */
224 spi_max_offset(starget) = 255;
223 spi_width(starget) = 0; /* narrow */ 225 spi_width(starget) = 0; /* narrow */
226 spi_max_width(starget) = 1;
224 spi_iu(starget) = 0; /* no IU */ 227 spi_iu(starget) = 0; /* no IU */
225 spi_dt(starget) = 0; /* ST */ 228 spi_dt(starget) = 0; /* ST */
226 spi_qas(starget) = 0; 229 spi_qas(starget) = 0;
@@ -235,6 +238,34 @@ static int spi_setup_transport_attrs(struct device *dev)
235 return 0; 238 return 0;
236} 239}
237 240
241#define spi_transport_show_simple(field, format_string) \
242 \
243static ssize_t \
244show_spi_transport_##field(struct class_device *cdev, char *buf) \
245{ \
246 struct scsi_target *starget = transport_class_to_starget(cdev); \
247 struct spi_transport_attrs *tp; \
248 \
249 tp = (struct spi_transport_attrs *)&starget->starget_data; \
250 return snprintf(buf, 20, format_string, tp->field); \
251}
252
253#define spi_transport_store_simple(field, format_string) \
254 \
255static ssize_t \
256store_spi_transport_##field(struct class_device *cdev, const char *buf, \
257 size_t count) \
258{ \
259 int val; \
260 struct scsi_target *starget = transport_class_to_starget(cdev); \
261 struct spi_transport_attrs *tp; \
262 \
263 tp = (struct spi_transport_attrs *)&starget->starget_data; \
264 val = simple_strtoul(buf, NULL, 0); \
265 tp->field = val; \
266 return count; \
267}
268
238#define spi_transport_show_function(field, format_string) \ 269#define spi_transport_show_function(field, format_string) \
239 \ 270 \
240static ssize_t \ 271static ssize_t \
@@ -261,6 +292,25 @@ store_spi_transport_##field(struct class_device *cdev, const char *buf, \
261 struct spi_internal *i = to_spi_internal(shost->transportt); \ 292 struct spi_internal *i = to_spi_internal(shost->transportt); \
262 \ 293 \
263 val = simple_strtoul(buf, NULL, 0); \ 294 val = simple_strtoul(buf, NULL, 0); \
295 i->f->set_##field(starget, val); \
296 return count; \
297}
298
299#define spi_transport_store_max(field, format_string) \
300static ssize_t \
301store_spi_transport_##field(struct class_device *cdev, const char *buf, \
302 size_t count) \
303{ \
304 int val; \
305 struct scsi_target *starget = transport_class_to_starget(cdev); \
306 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
307 struct spi_internal *i = to_spi_internal(shost->transportt); \
308 struct spi_transport_attrs *tp \
309 = (struct spi_transport_attrs *)&starget->starget_data; \
310 \
311 val = simple_strtoul(buf, NULL, 0); \
312 if (val > tp->max_##field) \
313 val = tp->max_##field; \
264 i->f->set_##field(starget, val); \ 314 i->f->set_##field(starget, val); \
265 return count; \ 315 return count; \
266} 316}
@@ -272,9 +322,24 @@ static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
272 show_spi_transport_##field, \ 322 show_spi_transport_##field, \
273 store_spi_transport_##field); 323 store_spi_transport_##field);
274 324
325#define spi_transport_simple_attr(field, format_string) \
326 spi_transport_show_simple(field, format_string) \
327 spi_transport_store_simple(field, format_string) \
328static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
329 show_spi_transport_##field, \
330 store_spi_transport_##field);
331
332#define spi_transport_max_attr(field, format_string) \
333 spi_transport_show_function(field, format_string) \
334 spi_transport_store_max(field, format_string) \
335 spi_transport_simple_attr(max_##field, format_string) \
336static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR, \
337 show_spi_transport_##field, \
338 store_spi_transport_##field);
339
275/* The Parallel SCSI Tranport Attributes: */ 340/* The Parallel SCSI Tranport Attributes: */
276spi_transport_rd_attr(offset, "%d\n"); 341spi_transport_max_attr(offset, "%d\n");
277spi_transport_rd_attr(width, "%d\n"); 342spi_transport_max_attr(width, "%d\n");
278spi_transport_rd_attr(iu, "%d\n"); 343spi_transport_rd_attr(iu, "%d\n");
279spi_transport_rd_attr(dt, "%d\n"); 344spi_transport_rd_attr(dt, "%d\n");
280spi_transport_rd_attr(qas, "%d\n"); 345spi_transport_rd_attr(qas, "%d\n");
@@ -300,26 +365,18 @@ static CLASS_DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate);
300 365
301/* Translate the period into ns according to the current spec 366/* Translate the period into ns according to the current spec
302 * for SDTR/PPR messages */ 367 * for SDTR/PPR messages */
303static ssize_t show_spi_transport_period(struct class_device *cdev, char *buf) 368static ssize_t
304 369show_spi_transport_period_helper(struct class_device *cdev, char *buf,
370 int period)
305{ 371{
306 struct scsi_target *starget = transport_class_to_starget(cdev);
307 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
308 struct spi_transport_attrs *tp;
309 int len, picosec; 372 int len, picosec;
310 struct spi_internal *i = to_spi_internal(shost->transportt);
311
312 tp = (struct spi_transport_attrs *)&starget->starget_data;
313
314 if (i->f->get_period)
315 i->f->get_period(starget);
316 373
317 if (tp->period < 0 || tp->period > 0xff) { 374 if (period < 0 || period > 0xff) {
318 picosec = -1; 375 picosec = -1;
319 } else if (tp->period <= SPI_STATIC_PPR) { 376 } else if (period <= SPI_STATIC_PPR) {
320 picosec = ppr_to_ps[tp->period]; 377 picosec = ppr_to_ps[period];
321 } else { 378 } else {
322 picosec = tp->period * 4000; 379 picosec = period * 4000;
323 } 380 }
324 381
325 if (picosec == -1) { 382 if (picosec == -1) {
@@ -334,12 +391,9 @@ static ssize_t show_spi_transport_period(struct class_device *cdev, char *buf)
334} 391}
335 392
336static ssize_t 393static ssize_t
337store_spi_transport_period(struct class_device *cdev, const char *buf, 394store_spi_transport_period_helper(struct class_device *cdev, const char *buf,
338 size_t count) 395 size_t count, int *periodp)
339{ 396{
340 struct scsi_target *starget = transport_class_to_starget(cdev);
341 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
342 struct spi_internal *i = to_spi_internal(shost->transportt);
343 int j, picosec, period = -1; 397 int j, picosec, period = -1;
344 char *endp; 398 char *endp;
345 399
@@ -368,15 +422,79 @@ store_spi_transport_period(struct class_device *cdev, const char *buf,
368 if (period > 0xff) 422 if (period > 0xff)
369 period = 0xff; 423 period = 0xff;
370 424
371 i->f->set_period(starget, period); 425 *periodp = period;
372 426
373 return count; 427 return count;
374} 428}
375 429
430static ssize_t
431show_spi_transport_period(struct class_device *cdev, char *buf)
432{
433 struct scsi_target *starget = transport_class_to_starget(cdev);
434 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
435 struct spi_internal *i = to_spi_internal(shost->transportt);
436 struct spi_transport_attrs *tp =
437 (struct spi_transport_attrs *)&starget->starget_data;
438
439 if (i->f->get_period)
440 i->f->get_period(starget);
441
442 return show_spi_transport_period_helper(cdev, buf, tp->period);
443}
444
445static ssize_t
446store_spi_transport_period(struct class_device *cdev, const char *buf,
447 size_t count)
448{
449 struct scsi_target *starget = transport_class_to_starget(cdev);
450 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
451 struct spi_internal *i = to_spi_internal(shost->transportt);
452 struct spi_transport_attrs *tp =
453 (struct spi_transport_attrs *)&starget->starget_data;
454 int period, retval;
455
456 retval = store_spi_transport_period_helper(cdev, buf, count, &period);
457
458 if (period < tp->min_period)
459 period = tp->min_period;
460
461 i->f->set_period(starget, period);
462
463 return retval;
464}
465
376static CLASS_DEVICE_ATTR(period, S_IRUGO | S_IWUSR, 466static CLASS_DEVICE_ATTR(period, S_IRUGO | S_IWUSR,
377 show_spi_transport_period, 467 show_spi_transport_period,
378 store_spi_transport_period); 468 store_spi_transport_period);
379 469
470static ssize_t
471show_spi_transport_min_period(struct class_device *cdev, char *buf)
472{
473 struct scsi_target *starget = transport_class_to_starget(cdev);
474 struct spi_transport_attrs *tp =
475 (struct spi_transport_attrs *)&starget->starget_data;
476
477 return show_spi_transport_period_helper(cdev, buf, tp->min_period);
478}
479
480static ssize_t
481store_spi_transport_min_period(struct class_device *cdev, const char *buf,
482 size_t count)
483{
484 struct scsi_target *starget = transport_class_to_starget(cdev);
485 struct spi_transport_attrs *tp =
486 (struct spi_transport_attrs *)&starget->starget_data;
487
488 return store_spi_transport_period_helper(cdev, buf, count,
489 &tp->min_period);
490}
491
492
493static CLASS_DEVICE_ATTR(min_period, S_IRUGO | S_IWUSR,
494 show_spi_transport_min_period,
495 store_spi_transport_min_period);
496
497
380static ssize_t show_spi_host_signalling(struct class_device *cdev, char *buf) 498static ssize_t show_spi_host_signalling(struct class_device *cdev, char *buf)
381{ 499{
382 struct Scsi_Host *shost = transport_class_to_shost(cdev); 500 struct Scsi_Host *shost = transport_class_to_shost(cdev);
@@ -642,6 +760,7 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
642{ 760{
643 struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt); 761 struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt);
644 struct scsi_device *sdev = sreq->sr_device; 762 struct scsi_device *sdev = sreq->sr_device;
763 struct scsi_target *starget = sdev->sdev_target;
645 int len = sdev->inquiry_len; 764 int len = sdev->inquiry_len;
646 /* first set us up for narrow async */ 765 /* first set us up for narrow async */
647 DV_SET(offset, 0); 766 DV_SET(offset, 0);
@@ -655,9 +774,11 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
655 } 774 }
656 775
657 /* test width */ 776 /* test width */
658 if (i->f->set_width && sdev->wdtr) { 777 if (i->f->set_width && spi_max_width(starget) && sdev->wdtr) {
659 i->f->set_width(sdev->sdev_target, 1); 778 i->f->set_width(sdev->sdev_target, 1);
660 779
780 printk("WIDTH IS %d\n", spi_max_width(starget));
781
661 if (spi_dv_device_compare_inquiry(sreq, buffer, 782 if (spi_dv_device_compare_inquiry(sreq, buffer,
662 buffer + len, 783 buffer + len,
663 DV_LOOPS) 784 DV_LOOPS)
@@ -684,8 +805,8 @@ spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
684 retry: 805 retry:
685 806
686 /* now set up to the maximum */ 807 /* now set up to the maximum */
687 DV_SET(offset, 255); 808 DV_SET(offset, spi_max_offset(starget));
688 DV_SET(period, 1); 809 DV_SET(period, spi_min_period(starget));
689 810
690 if (len == 0) { 811 if (len == 0) {
691 SPI_PRINTK(sdev->sdev_target, KERN_INFO, "Domain Validation skipping write tests\n"); 812 SPI_PRINTK(sdev->sdev_target, KERN_INFO, "Domain Validation skipping write tests\n");
@@ -892,6 +1013,16 @@ EXPORT_SYMBOL(spi_display_xfer_agreement);
892 if (i->f->show_##field) \ 1013 if (i->f->show_##field) \
893 count++ 1014 count++
894 1015
1016#define SETUP_RELATED_ATTRIBUTE(field, rel_field) \
1017 i->private_attrs[count] = class_device_attr_##field; \
1018 if (!i->f->set_##rel_field) { \
1019 i->private_attrs[count].attr.mode = S_IRUGO; \
1020 i->private_attrs[count].store = NULL; \
1021 } \
1022 i->attrs[count] = &i->private_attrs[count]; \
1023 if (i->f->show_##rel_field) \
1024 count++
1025
895#define SETUP_HOST_ATTRIBUTE(field) \ 1026#define SETUP_HOST_ATTRIBUTE(field) \
896 i->private_host_attrs[count] = class_device_attr_##field; \ 1027 i->private_host_attrs[count] = class_device_attr_##field; \
897 if (!i->f->set_##field) { \ 1028 if (!i->f->set_##field) { \
@@ -975,8 +1106,11 @@ spi_attach_transport(struct spi_function_template *ft)
975 i->f = ft; 1106 i->f = ft;
976 1107
977 SETUP_ATTRIBUTE(period); 1108 SETUP_ATTRIBUTE(period);
1109 SETUP_RELATED_ATTRIBUTE(min_period, period);
978 SETUP_ATTRIBUTE(offset); 1110 SETUP_ATTRIBUTE(offset);
1111 SETUP_RELATED_ATTRIBUTE(max_offset, offset);
979 SETUP_ATTRIBUTE(width); 1112 SETUP_ATTRIBUTE(width);
1113 SETUP_RELATED_ATTRIBUTE(max_width, width);
980 SETUP_ATTRIBUTE(iu); 1114 SETUP_ATTRIBUTE(iu);
981 SETUP_ATTRIBUTE(dt); 1115 SETUP_ATTRIBUTE(dt);
982 SETUP_ATTRIBUTE(qas); 1116 SETUP_ATTRIBUTE(qas);
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 3bbf0cc6e53f..30e8beb71430 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -682,8 +682,6 @@ static void autoconfig_16550a(struct uart_8250_port *up)
682 * from EXCR1. Switch back to bank 0, change it in MCR. Then 682 * from EXCR1. Switch back to bank 0, change it in MCR. Then
683 * switch back to bank 2, read it from EXCR1 again and check 683 * switch back to bank 2, read it from EXCR1 again and check
684 * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2 684 * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
685 * On PowerPC we don't want to change baud_base, as we have
686 * a number of different divisors. -- Tom Rini
687 */ 685 */
688 serial_outp(up, UART_LCR, 0); 686 serial_outp(up, UART_LCR, 0);
689 status1 = serial_in(up, UART_MCR); 687 status1 = serial_in(up, UART_MCR);
@@ -699,16 +697,25 @@ static void autoconfig_16550a(struct uart_8250_port *up)
699 serial_outp(up, UART_MCR, status1); 697 serial_outp(up, UART_MCR, status1);
700 698
701 if ((status2 ^ status1) & UART_MCR_LOOP) { 699 if ((status2 ^ status1) & UART_MCR_LOOP) {
702#ifndef CONFIG_PPC 700 unsigned short quot;
701
703 serial_outp(up, UART_LCR, 0xE0); 702 serial_outp(up, UART_LCR, 0xE0);
703
704 quot = serial_inp(up, UART_DLM) << 8;
705 quot += serial_inp(up, UART_DLL);
706 quot <<= 3;
707
704 status1 = serial_in(up, 0x04); /* EXCR1 */ 708 status1 = serial_in(up, 0x04); /* EXCR1 */
705 status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */ 709 status1 &= ~0xB0; /* Disable LOCK, mask out PRESL[01] */
706 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */ 710 status1 |= 0x10; /* 1.625 divisor for baud_base --> 921600 */
707 serial_outp(up, 0x04, status1); 711 serial_outp(up, 0x04, status1);
712
713 serial_outp(up, UART_DLL, quot & 0xff);
714 serial_outp(up, UART_DLM, quot >> 8);
715
708 serial_outp(up, UART_LCR, 0); 716 serial_outp(up, UART_LCR, 0);
709 up->port.uartclk = 921600*16;
710#endif
711 717
718 up->port.uartclk = 921600*16;
712 up->port.type = PORT_NS16550A; 719 up->port.type = PORT_NS16550A;
713 up->capabilities |= UART_NATSEMI; 720 up->capabilities |= UART_NATSEMI;
714 return; 721 return;
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index 39b788d95e39..10e2990a40d4 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -61,6 +61,16 @@ struct uart_sunsab_port {
61 unsigned char pvr_dtr_bit; /* Which PVR bit is DTR */ 61 unsigned char pvr_dtr_bit; /* Which PVR bit is DTR */
62 unsigned char pvr_dsr_bit; /* Which PVR bit is DSR */ 62 unsigned char pvr_dsr_bit; /* Which PVR bit is DSR */
63 int type; /* SAB82532 version */ 63 int type; /* SAB82532 version */
64
65 /* Setting configuration bits while the transmitter is active
66 * can cause garbage characters to get emitted by the chip.
67 * Therefore, we cache such writes here and do the real register
68 * write the next time the transmitter becomes idle.
69 */
70 unsigned int cached_ebrg;
71 unsigned char cached_mode;
72 unsigned char cached_pvr;
73 unsigned char cached_dafo;
64}; 74};
65 75
66/* 76/*
@@ -236,6 +246,7 @@ receive_chars(struct uart_sunsab_port *up,
236} 246}
237 247
238static void sunsab_stop_tx(struct uart_port *, unsigned int); 248static void sunsab_stop_tx(struct uart_port *, unsigned int);
249static void sunsab_tx_idle(struct uart_sunsab_port *);
239 250
240static void transmit_chars(struct uart_sunsab_port *up, 251static void transmit_chars(struct uart_sunsab_port *up,
241 union sab82532_irq_status *stat) 252 union sab82532_irq_status *stat)
@@ -258,6 +269,7 @@ static void transmit_chars(struct uart_sunsab_port *up,
258 return; 269 return;
259 270
260 set_bit(SAB82532_XPR, &up->irqflags); 271 set_bit(SAB82532_XPR, &up->irqflags);
272 sunsab_tx_idle(up);
261 273
262 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { 274 if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
263 up->interrupt_mask1 |= SAB82532_IMR1_XPR; 275 up->interrupt_mask1 |= SAB82532_IMR1_XPR;
@@ -397,21 +409,21 @@ static void sunsab_set_mctrl(struct uart_port *port, unsigned int mctrl)
397 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; 409 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
398 410
399 if (mctrl & TIOCM_RTS) { 411 if (mctrl & TIOCM_RTS) {
400 writeb(readb(&up->regs->rw.mode) & ~SAB82532_MODE_FRTS, 412 up->cached_mode &= ~SAB82532_MODE_FRTS;
401 &up->regs->rw.mode); 413 up->cached_mode |= SAB82532_MODE_RTS;
402 writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RTS,
403 &up->regs->rw.mode);
404 } else { 414 } else {
405 writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_FRTS, 415 up->cached_mode |= (SAB82532_MODE_FRTS |
406 &up->regs->rw.mode); 416 SAB82532_MODE_RTS);
407 writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RTS,
408 &up->regs->rw.mode);
409 } 417 }
410 if (mctrl & TIOCM_DTR) { 418 if (mctrl & TIOCM_DTR) {
411 writeb(readb(&up->regs->rw.pvr) & ~(up->pvr_dtr_bit), &up->regs->rw.pvr); 419 up->cached_pvr &= ~(up->pvr_dtr_bit);
412 } else { 420 } else {
413 writeb(readb(&up->regs->rw.pvr) | up->pvr_dtr_bit, &up->regs->rw.pvr); 421 up->cached_pvr |= up->pvr_dtr_bit;
414 } 422 }
423
424 set_bit(SAB82532_REGS_PENDING, &up->irqflags);
425 if (test_bit(SAB82532_XPR, &up->irqflags))
426 sunsab_tx_idle(up);
415} 427}
416 428
417/* port->lock is not held. */ 429/* port->lock is not held. */
@@ -450,6 +462,25 @@ static void sunsab_stop_tx(struct uart_port *port, unsigned int tty_stop)
450} 462}
451 463
452/* port->lock held by caller. */ 464/* port->lock held by caller. */
465static void sunsab_tx_idle(struct uart_sunsab_port *up)
466{
467 if (test_bit(SAB82532_REGS_PENDING, &up->irqflags)) {
468 u8 tmp;
469
470 clear_bit(SAB82532_REGS_PENDING, &up->irqflags);
471 writeb(up->cached_mode, &up->regs->rw.mode);
472 writeb(up->cached_pvr, &up->regs->rw.pvr);
473 writeb(up->cached_dafo, &up->regs->w.dafo);
474
475 writeb(up->cached_ebrg & 0xff, &up->regs->w.bgr);
476 tmp = readb(&up->regs->rw.ccr2);
477 tmp &= ~0xc0;
478 tmp |= (up->cached_ebrg >> 2) & 0xc0;
479 writeb(tmp, &up->regs->rw.ccr2);
480 }
481}
482
483/* port->lock held by caller. */
453static void sunsab_start_tx(struct uart_port *port, unsigned int tty_start) 484static void sunsab_start_tx(struct uart_port *port, unsigned int tty_start)
454{ 485{
455 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; 486 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
@@ -517,12 +548,16 @@ static void sunsab_break_ctl(struct uart_port *port, int break_state)
517 548
518 spin_lock_irqsave(&up->port.lock, flags); 549 spin_lock_irqsave(&up->port.lock, flags);
519 550
520 val = readb(&up->regs->rw.dafo); 551 val = up->cached_dafo;
521 if (break_state) 552 if (break_state)
522 val |= SAB82532_DAFO_XBRK; 553 val |= SAB82532_DAFO_XBRK;
523 else 554 else
524 val &= ~SAB82532_DAFO_XBRK; 555 val &= ~SAB82532_DAFO_XBRK;
525 writeb(val, &up->regs->rw.dafo); 556 up->cached_dafo = val;
557
558 set_bit(SAB82532_REGS_PENDING, &up->irqflags);
559 if (test_bit(SAB82532_XPR, &up->irqflags))
560 sunsab_tx_idle(up);
526 561
527 spin_unlock_irqrestore(&up->port.lock, flags); 562 spin_unlock_irqrestore(&up->port.lock, flags);
528} 563}
@@ -566,8 +601,9 @@ static int sunsab_startup(struct uart_port *port)
566 SAB82532_CCR2_TOE, &up->regs->w.ccr2); 601 SAB82532_CCR2_TOE, &up->regs->w.ccr2);
567 writeb(0, &up->regs->w.ccr3); 602 writeb(0, &up->regs->w.ccr3);
568 writeb(SAB82532_CCR4_MCK4 | SAB82532_CCR4_EBRG, &up->regs->w.ccr4); 603 writeb(SAB82532_CCR4_MCK4 | SAB82532_CCR4_EBRG, &up->regs->w.ccr4);
569 writeb(SAB82532_MODE_RTS | SAB82532_MODE_FCTS | 604 up->cached_mode = (SAB82532_MODE_RTS | SAB82532_MODE_FCTS |
570 SAB82532_MODE_RAC, &up->regs->w.mode); 605 SAB82532_MODE_RAC);
606 writeb(up->cached_mode, &up->regs->w.mode);
571 writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc); 607 writeb(SAB82532_RFC_DPS|SAB82532_RFC_RFTH_32, &up->regs->w.rfc);
572 608
573 tmp = readb(&up->regs->rw.ccr0); 609 tmp = readb(&up->regs->rw.ccr0);
@@ -598,7 +634,6 @@ static void sunsab_shutdown(struct uart_port *port)
598{ 634{
599 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port; 635 struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
600 unsigned long flags; 636 unsigned long flags;
601 unsigned char tmp;
602 637
603 spin_lock_irqsave(&up->port.lock, flags); 638 spin_lock_irqsave(&up->port.lock, flags);
604 639
@@ -609,14 +644,13 @@ static void sunsab_shutdown(struct uart_port *port)
609 writeb(up->interrupt_mask1, &up->regs->w.imr1); 644 writeb(up->interrupt_mask1, &up->regs->w.imr1);
610 645
611 /* Disable break condition */ 646 /* Disable break condition */
612 tmp = readb(&up->regs->rw.dafo); 647 up->cached_dafo = readb(&up->regs->rw.dafo);
613 tmp &= ~SAB82532_DAFO_XBRK; 648 up->cached_dafo &= ~SAB82532_DAFO_XBRK;
614 writeb(tmp, &up->regs->rw.dafo); 649 writeb(up->cached_dafo, &up->regs->rw.dafo);
615 650
616 /* Disable Receiver */ 651 /* Disable Receiver */
617 tmp = readb(&up->regs->rw.mode); 652 up->cached_mode &= ~SAB82532_MODE_RAC;
618 tmp &= ~SAB82532_MODE_RAC; 653 writeb(up->cached_mode, &up->regs->rw.mode);
619 writeb(tmp, &up->regs->rw.mode);
620 654
621 /* 655 /*
622 * XXX FIXME 656 * XXX FIXME
@@ -685,7 +719,6 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
685 unsigned int iflag, unsigned int baud, 719 unsigned int iflag, unsigned int baud,
686 unsigned int quot) 720 unsigned int quot)
687{ 721{
688 unsigned int ebrg;
689 unsigned char dafo; 722 unsigned char dafo;
690 int bits, n, m; 723 int bits, n, m;
691 724
@@ -714,10 +747,11 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
714 } else { 747 } else {
715 dafo |= SAB82532_DAFO_PAR_EVEN; 748 dafo |= SAB82532_DAFO_PAR_EVEN;
716 } 749 }
750 up->cached_dafo = dafo;
717 751
718 calc_ebrg(baud, &n, &m); 752 calc_ebrg(baud, &n, &m);
719 753
720 ebrg = n | (m << 6); 754 up->cached_ebrg = n | (m << 6);
721 755
722 up->tec_timeout = (10 * 1000000) / baud; 756 up->tec_timeout = (10 * 1000000) / baud;
723 up->cec_timeout = up->tec_timeout >> 2; 757 up->cec_timeout = up->tec_timeout >> 2;
@@ -770,16 +804,13 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
770 uart_update_timeout(&up->port, cflag, 804 uart_update_timeout(&up->port, cflag,
771 (up->port.uartclk / (16 * quot))); 805 (up->port.uartclk / (16 * quot)));
772 806
773 /* Now bang the new settings into the chip. */ 807 /* Now schedule a register update when the chip's
774 sunsab_cec_wait(up); 808 * transmitter is idle.
775 sunsab_tec_wait(up); 809 */
776 writeb(dafo, &up->regs->w.dafo); 810 up->cached_mode |= SAB82532_MODE_RAC;
777 writeb(ebrg & 0xff, &up->regs->w.bgr); 811 set_bit(SAB82532_REGS_PENDING, &up->irqflags);
778 writeb((readb(&up->regs->rw.ccr2) & ~0xc0) | ((ebrg >> 2) & 0xc0), 812 if (test_bit(SAB82532_XPR, &up->irqflags))
779 &up->regs->rw.ccr2); 813 sunsab_tx_idle(up);
780
781 writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RAC, &up->regs->rw.mode);
782
783} 814}
784 815
785/* port->lock is not held. */ 816/* port->lock is not held. */
@@ -1084,11 +1115,13 @@ static void __init sunsab_init_hw(void)
1084 up->pvr_dsr_bit = (1 << 3); 1115 up->pvr_dsr_bit = (1 << 3);
1085 up->pvr_dtr_bit = (1 << 2); 1116 up->pvr_dtr_bit = (1 << 2);
1086 } 1117 }
1087 writeb((1 << 1) | (1 << 2) | (1 << 4), &up->regs->w.pvr); 1118 up->cached_pvr = (1 << 1) | (1 << 2) | (1 << 4);
1088 writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_FRTS, 1119 writeb(up->cached_pvr, &up->regs->w.pvr);
1089 &up->regs->rw.mode); 1120 up->cached_mode = readb(&up->regs->rw.mode);
1090 writeb(readb(&up->regs->rw.mode) | SAB82532_MODE_RTS, 1121 up->cached_mode |= SAB82532_MODE_FRTS;
1091 &up->regs->rw.mode); 1122 writeb(up->cached_mode, &up->regs->rw.mode);
1123 up->cached_mode |= SAB82532_MODE_RTS;
1124 writeb(up->cached_mode, &up->regs->rw.mode);
1092 1125
1093 up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT; 1126 up->tec_timeout = SAB82532_MAX_TEC_TIMEOUT;
1094 up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT; 1127 up->cec_timeout = SAB82532_MAX_CEC_TIMEOUT;
diff --git a/drivers/serial/sunsab.h b/drivers/serial/sunsab.h
index 686086fcbbf5..b78e1f7b8050 100644
--- a/drivers/serial/sunsab.h
+++ b/drivers/serial/sunsab.h
@@ -126,6 +126,7 @@ union sab82532_irq_status {
126/* irqflags bits */ 126/* irqflags bits */
127#define SAB82532_ALLS 0x00000001 127#define SAB82532_ALLS 0x00000001
128#define SAB82532_XPR 0x00000002 128#define SAB82532_XPR 0x00000002
129#define SAB82532_REGS_PENDING 0x00000004
129 130
130/* RFIFO Status Byte */ 131/* RFIFO Status Byte */
131#define SAB82532_RSTAT_PE 0x80 132#define SAB82532_RSTAT_PE 0x80
diff --git a/fs/namei.c b/fs/namei.c
index defe6781e003..dd78f01b6de8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1580,6 +1580,7 @@ enoent:
1580fail: 1580fail:
1581 return dentry; 1581 return dentry;
1582} 1582}
1583EXPORT_SYMBOL_GPL(lookup_create);
1583 1584
1584int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1585int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1585{ 1586{
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index da23ba75f3d5..c47f8fd31a2d 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -230,7 +230,6 @@ const struct reiserfs_key MAX_KEY = {
230 __constant_cpu_to_le32(0xffffffff)},} 230 __constant_cpu_to_le32(0xffffffff)},}
231}; 231};
232 232
233const struct in_core_key MAX_IN_CORE_KEY = {~0U, ~0U, ~0ULL>>4, 15};
234 233
235/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom 234/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom
236 of the path, and going upwards. We must check the path's validity at each step. If the key is not in 235 of the path, and going upwards. We must check the path's validity at each step. If the key is not in
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 31e75125f48b..b35b87744983 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -164,7 +164,9 @@ static int finish_unfinished (struct super_block * s)
164 164
165 /* compose key to look for "save" links */ 165 /* compose key to look for "save" links */
166 max_cpu_key.version = KEY_FORMAT_3_5; 166 max_cpu_key.version = KEY_FORMAT_3_5;
167 max_cpu_key.on_disk_key = MAX_IN_CORE_KEY; 167 max_cpu_key.on_disk_key.k_dir_id = ~0U;
168 max_cpu_key.on_disk_key.k_objectid = ~0U;
169 set_cpu_key_k_offset (&max_cpu_key, ~0U);
168 max_cpu_key.key_length = 3; 170 max_cpu_key.key_length = 3;
169 171
170#ifdef CONFIG_QUOTA 172#ifdef CONFIG_QUOTA
diff --git a/include/asm-ia64/ioctl32.h b/include/asm-ia64/ioctl32.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/include/asm-ia64/ioctl32.h
+++ /dev/null
diff --git a/include/asm-um/arch-signal-i386.h b/include/asm-um/arch-signal-i386.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/include/asm-um/arch-signal-i386.h
+++ /dev/null
diff --git a/include/asm-um/elf-i386.h b/include/asm-um/elf-i386.h
index b72e23519e00..9bab712dc5c0 100644
--- a/include/asm-um/elf-i386.h
+++ b/include/asm-um/elf-i386.h
@@ -5,7 +5,7 @@
5#ifndef __UM_ELF_I386_H 5#ifndef __UM_ELF_I386_H
6#define __UM_ELF_I386_H 6#define __UM_ELF_I386_H
7 7
8#include "user.h" 8#include <asm/user.h>
9 9
10#define R_386_NONE 0 10#define R_386_NONE 0
11#define R_386_32 1 11#define R_386_32 1
diff --git a/include/asm-um/elf-x86_64.h b/include/asm-um/elf-x86_64.h
index 19309d001aa0..8a8246d03936 100644
--- a/include/asm-um/elf-x86_64.h
+++ b/include/asm-um/elf-x86_64.h
@@ -8,6 +8,27 @@
8 8
9#include <asm/user.h> 9#include <asm/user.h>
10 10
11/* x86-64 relocation types, taken from asm-x86_64/elf.h */
12#define R_X86_64_NONE 0 /* No reloc */
13#define R_X86_64_64 1 /* Direct 64 bit */
14#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
15#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
16#define R_X86_64_PLT32 4 /* 32 bit PLT address */
17#define R_X86_64_COPY 5 /* Copy symbol at runtime */
18#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
19#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
20#define R_X86_64_RELATIVE 8 /* Adjust by program base */
21#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
22 offset to GOT */
23#define R_X86_64_32 10 /* Direct 32 bit zero extended */
24#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
25#define R_X86_64_16 12 /* Direct 16 bit zero extended */
26#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
27#define R_X86_64_8 14 /* Direct 8 bit sign extended */
28#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
29
30#define R_X86_64_NUM 16
31
11typedef unsigned long elf_greg_t; 32typedef unsigned long elf_greg_t;
12 33
13#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) 34#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
@@ -44,7 +65,8 @@ typedef struct { } elf_fpregset_t;
44} while (0) 65} while (0)
45 66
46#ifdef TIF_IA32 /* XXX */ 67#ifdef TIF_IA32 /* XXX */
47 clear_thread_flag(TIF_IA32); \ 68#error XXX, indeed
69 clear_thread_flag(TIF_IA32);
48#endif 70#endif
49 71
50#define USE_ELF_CORE_DUMP 72#define USE_ELF_CORE_DUMP
diff --git a/include/asm-x86_64/ioctl32.h b/include/asm-x86_64/ioctl32.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/include/asm-x86_64/ioctl32.h
+++ /dev/null
diff --git a/include/linux/err.h b/include/linux/err.h
index 17c55df13615..ff71d2af5da3 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -13,6 +13,8 @@
13 * This should be a per-architecture thing, to allow different 13 * This should be a per-architecture thing, to allow different
14 * error and pointer decisions. 14 * error and pointer decisions.
15 */ 15 */
16#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L)
17
16static inline void *ERR_PTR(long error) 18static inline void *ERR_PTR(long error)
17{ 19{
18 return (void *) error; 20 return (void *) error;
@@ -25,7 +27,7 @@ static inline long PTR_ERR(const void *ptr)
25 27
26static inline long IS_ERR(const void *ptr) 28static inline long IS_ERR(const void *ptr)
27{ 29{
28 return unlikely((unsigned long)ptr > (unsigned long)-1000L); 30 return IS_ERR_VALUE((unsigned long)ptr);
29} 31}
30 32
31#endif /* _LINUX_ERR_H */ 33#endif /* _LINUX_ERR_H */
diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h
index 7b904c5102f6..896342817b97 100644
--- a/include/linux/mmc/protocol.h
+++ b/include/linux/mmc/protocol.h
@@ -195,6 +195,33 @@ struct _mmc_csd {
195#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */ 195#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */
196#define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */ 196#define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */
197 197
198/*
199 * Card Command Classes (CCC)
200 */
201#define CCC_BASIC (1<<0) /* (0) Basic protocol functions */
202 /* (CMD0,1,2,3,4,7,9,10,12,13,15) */
203#define CCC_STREAM_READ (1<<1) /* (1) Stream read commands */
204 /* (CMD11) */
205#define CCC_BLOCK_READ (1<<2) /* (2) Block read commands */
206 /* (CMD16,17,18) */
207#define CCC_STREAM_WRITE (1<<3) /* (3) Stream write commands */
208 /* (CMD20) */
209#define CCC_BLOCK_WRITE (1<<4) /* (4) Block write commands */
210 /* (CMD16,24,25,26,27) */
211#define CCC_ERASE (1<<5) /* (5) Ability to erase blocks */
212 /* (CMD32,33,34,35,36,37,38,39) */
213#define CCC_WRITE_PROT (1<<6) /* (6) Able to write protect blocks */
214 /* (CMD28,29,30) */
215#define CCC_LOCK_CARD (1<<7) /* (7) Able to lock down card */
216 /* (CMD16,CMD42) */
217#define CCC_APP_SPEC (1<<8) /* (8) Application specific */
218 /* (CMD55,56,57,ACMD*) */
219#define CCC_IO_MODE (1<<9) /* (9) I/O mode */
220 /* (CMD5,39,40,52,53) */
221#define CCC_SWITCH (1<<10) /* (10) High speed switch */
222 /* (CMD6,34,35,36,37,50) */
223 /* (11) Reserved */
224 /* (CMD?) */
198 225
199/* 226/*
200 * CSD field definitions 227 * CSD field definitions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e895f3eaf53a..d6ba068719b6 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -248,7 +248,7 @@ typedef struct {
248 248
249#define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \ 249#define _spin_trylock_bh(lock) ({preempt_disable(); local_bh_disable(); \
250 _raw_spin_trylock(lock) ? \ 250 _raw_spin_trylock(lock) ? \
251 1 : ({preempt_enable(); local_bh_enable(); 0;});}) 251 1 : ({preempt_enable_no_resched(); local_bh_enable(); 0;});})
252 252
253#define _spin_lock(lock) \ 253#define _spin_lock(lock) \
254do { \ 254do { \
@@ -383,7 +383,7 @@ do { \
383#define _spin_unlock_bh(lock) \ 383#define _spin_unlock_bh(lock) \
384do { \ 384do { \
385 _raw_spin_unlock(lock); \ 385 _raw_spin_unlock(lock); \
386 preempt_enable(); \ 386 preempt_enable_no_resched(); \
387 local_bh_enable(); \ 387 local_bh_enable(); \
388 __release(lock); \ 388 __release(lock); \
389} while (0) 389} while (0)
@@ -391,7 +391,7 @@ do { \
391#define _write_unlock_bh(lock) \ 391#define _write_unlock_bh(lock) \
392do { \ 392do { \
393 _raw_write_unlock(lock); \ 393 _raw_write_unlock(lock); \
394 preempt_enable(); \ 394 preempt_enable_no_resched(); \
395 local_bh_enable(); \ 395 local_bh_enable(); \
396 __release(lock); \ 396 __release(lock); \
397} while (0) 397} while (0)
@@ -423,8 +423,8 @@ do { \
423#define _read_unlock_bh(lock) \ 423#define _read_unlock_bh(lock) \
424do { \ 424do { \
425 _raw_read_unlock(lock); \ 425 _raw_read_unlock(lock); \
426 preempt_enable_no_resched(); \
426 local_bh_enable(); \ 427 local_bh_enable(); \
427 preempt_enable(); \
428 __release(lock); \ 428 __release(lock); \
429} while (0) 429} while (0)
430 430
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 3a358c895188..6409d9cf5965 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -41,6 +41,7 @@ extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
41extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 41extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
42 unsigned long start, unsigned long end); 42 unsigned long start, unsigned long end);
43extern struct vm_struct *remove_vm_area(void *addr); 43extern struct vm_struct *remove_vm_area(void *addr);
44extern struct vm_struct *__remove_vm_area(void *addr);
44extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 45extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
45 struct page ***pages); 46 struct page ***pages);
46extern void unmap_vm_area(struct vm_struct *area); 47extern void unmap_vm_area(struct vm_struct *area);
diff --git a/include/net/act_generic.h b/include/net/act_generic.h
index 95b120781c14..c9daa7e52300 100644
--- a/include/net/act_generic.h
+++ b/include/net/act_generic.h
@@ -2,8 +2,8 @@
2 * include/net/act_generic.h 2 * include/net/act_generic.h
3 * 3 *
4*/ 4*/
5#ifndef ACT_GENERIC_H 5#ifndef _NET_ACT_GENERIC_H
6#define ACT_GENERIC_H 6#define _NET_ACT_GENERIC_H
7static inline int tcf_defact_release(struct tcf_defact *p, int bind) 7static inline int tcf_defact_release(struct tcf_defact *p, int bind)
8{ 8{
9 int ret = 0; 9 int ret = 0;
diff --git a/include/scsi/scsi_transport_spi.h b/include/scsi/scsi_transport_spi.h
index 6dcf497bf46d..a30d6cd4c0e8 100644
--- a/include/scsi/scsi_transport_spi.h
+++ b/include/scsi/scsi_transport_spi.h
@@ -27,8 +27,11 @@ struct scsi_transport_template;
27 27
28struct spi_transport_attrs { 28struct spi_transport_attrs {
29 int period; /* value in the PPR/SDTR command */ 29 int period; /* value in the PPR/SDTR command */
30 int min_period;
30 int offset; 31 int offset;
32 int max_offset;
31 unsigned int width:1; /* 0 - narrow, 1 - wide */ 33 unsigned int width:1; /* 0 - narrow, 1 - wide */
34 unsigned int max_width:1;
32 unsigned int iu:1; /* Information Units enabled */ 35 unsigned int iu:1; /* Information Units enabled */
33 unsigned int dt:1; /* DT clocking enabled */ 36 unsigned int dt:1; /* DT clocking enabled */
34 unsigned int qas:1; /* Quick Arbitration and Selection enabled */ 37 unsigned int qas:1; /* Quick Arbitration and Selection enabled */
@@ -63,8 +66,11 @@ struct spi_host_attrs {
63 66
64/* accessor functions */ 67/* accessor functions */
65#define spi_period(x) (((struct spi_transport_attrs *)&(x)->starget_data)->period) 68#define spi_period(x) (((struct spi_transport_attrs *)&(x)->starget_data)->period)
69#define spi_min_period(x) (((struct spi_transport_attrs *)&(x)->starget_data)->min_period)
66#define spi_offset(x) (((struct spi_transport_attrs *)&(x)->starget_data)->offset) 70#define spi_offset(x) (((struct spi_transport_attrs *)&(x)->starget_data)->offset)
71#define spi_max_offset(x) (((struct spi_transport_attrs *)&(x)->starget_data)->max_offset)
67#define spi_width(x) (((struct spi_transport_attrs *)&(x)->starget_data)->width) 72#define spi_width(x) (((struct spi_transport_attrs *)&(x)->starget_data)->width)
73#define spi_max_width(x) (((struct spi_transport_attrs *)&(x)->starget_data)->max_width)
68#define spi_iu(x) (((struct spi_transport_attrs *)&(x)->starget_data)->iu) 74#define spi_iu(x) (((struct spi_transport_attrs *)&(x)->starget_data)->iu)
69#define spi_dt(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dt) 75#define spi_dt(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dt)
70#define spi_qas(x) (((struct spi_transport_attrs *)&(x)->starget_data)->qas) 76#define spi_qas(x) (((struct spi_transport_attrs *)&(x)->starget_data)->qas)
diff --git a/kernel/sched.c b/kernel/sched.c
index 0dc3158667a2..66b2ed784822 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4243,7 +4243,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
4243 4243
4244 /* No more Mr. Nice Guy. */ 4244 /* No more Mr. Nice Guy. */
4245 if (dest_cpu == NR_CPUS) { 4245 if (dest_cpu == NR_CPUS) {
4246 tsk->cpus_allowed = cpuset_cpus_allowed(tsk); 4246 cpus_setall(tsk->cpus_allowed);
4247 dest_cpu = any_online_cpu(tsk->cpus_allowed); 4247 dest_cpu = any_online_cpu(tsk->cpus_allowed);
4248 4248
4249 /* 4249 /*
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index e15ed17863f1..0c3f9d8bbe17 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -294,7 +294,7 @@ EXPORT_SYMBOL(_spin_unlock_irq);
294void __lockfunc _spin_unlock_bh(spinlock_t *lock) 294void __lockfunc _spin_unlock_bh(spinlock_t *lock)
295{ 295{
296 _raw_spin_unlock(lock); 296 _raw_spin_unlock(lock);
297 preempt_enable(); 297 preempt_enable_no_resched();
298 local_bh_enable(); 298 local_bh_enable();
299} 299}
300EXPORT_SYMBOL(_spin_unlock_bh); 300EXPORT_SYMBOL(_spin_unlock_bh);
@@ -318,7 +318,7 @@ EXPORT_SYMBOL(_read_unlock_irq);
318void __lockfunc _read_unlock_bh(rwlock_t *lock) 318void __lockfunc _read_unlock_bh(rwlock_t *lock)
319{ 319{
320 _raw_read_unlock(lock); 320 _raw_read_unlock(lock);
321 preempt_enable(); 321 preempt_enable_no_resched();
322 local_bh_enable(); 322 local_bh_enable();
323} 323}
324EXPORT_SYMBOL(_read_unlock_bh); 324EXPORT_SYMBOL(_read_unlock_bh);
@@ -342,7 +342,7 @@ EXPORT_SYMBOL(_write_unlock_irq);
342void __lockfunc _write_unlock_bh(rwlock_t *lock) 342void __lockfunc _write_unlock_bh(rwlock_t *lock)
343{ 343{
344 _raw_write_unlock(lock); 344 _raw_write_unlock(lock);
345 preempt_enable(); 345 preempt_enable_no_resched();
346 local_bh_enable(); 346 local_bh_enable();
347} 347}
348EXPORT_SYMBOL(_write_unlock_bh); 348EXPORT_SYMBOL(_write_unlock_bh);
@@ -354,7 +354,7 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock)
354 if (_raw_spin_trylock(lock)) 354 if (_raw_spin_trylock(lock))
355 return 1; 355 return 1;
356 356
357 preempt_enable(); 357 preempt_enable_no_resched();
358 local_bh_enable(); 358 local_bh_enable();
359 return 0; 359 return 0;
360} 360}
diff --git a/mm/filemap.c b/mm/filemap.c
index 47263ac3e4ea..1d33fec7bac6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1004,7 +1004,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1004 if (pos < size) { 1004 if (pos < size) {
1005 retval = generic_file_direct_IO(READ, iocb, 1005 retval = generic_file_direct_IO(READ, iocb,
1006 iov, pos, nr_segs); 1006 iov, pos, nr_segs);
1007 if (retval >= 0 && !is_sync_kiocb(iocb)) 1007 if (retval > 0 && !is_sync_kiocb(iocb))
1008 retval = -EIOCBQUEUED; 1008 retval = -EIOCBQUEUED;
1009 if (retval > 0) 1009 if (retval > 0)
1010 *ppos = pos + retval; 1010 *ppos = pos + retval;
diff --git a/mm/mmap.c b/mm/mmap.c
index 63df2d698414..de54acd9942f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1302,37 +1302,40 @@ unsigned long
1302get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 1302get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1303 unsigned long pgoff, unsigned long flags) 1303 unsigned long pgoff, unsigned long flags)
1304{ 1304{
1305 if (flags & MAP_FIXED) { 1305 unsigned long ret;
1306 unsigned long ret;
1307 1306
1308 if (addr > TASK_SIZE - len) 1307 if (!(flags & MAP_FIXED)) {
1309 return -ENOMEM; 1308 unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1310 if (addr & ~PAGE_MASK)
1311 return -EINVAL;
1312 if (file && is_file_hugepages(file)) {
1313 /*
1314 * Check if the given range is hugepage aligned, and
1315 * can be made suitable for hugepages.
1316 */
1317 ret = prepare_hugepage_range(addr, len);
1318 } else {
1319 /*
1320 * Ensure that a normal request is not falling in a
1321 * reserved hugepage range. For some archs like IA-64,
1322 * there is a separate region for hugepages.
1323 */
1324 ret = is_hugepage_only_range(current->mm, addr, len);
1325 }
1326 if (ret)
1327 return -EINVAL;
1328 return addr;
1329 }
1330 1309
1331 if (file && file->f_op && file->f_op->get_unmapped_area) 1310 get_area = current->mm->get_unmapped_area;
1332 return file->f_op->get_unmapped_area(file, addr, len, 1311 if (file && file->f_op && file->f_op->get_unmapped_area)
1333 pgoff, flags); 1312 get_area = file->f_op->get_unmapped_area;
1313 addr = get_area(file, addr, len, pgoff, flags);
1314 if (IS_ERR_VALUE(addr))
1315 return addr;
1316 }
1334 1317
1335 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 1318 if (addr > TASK_SIZE - len)
1319 return -ENOMEM;
1320 if (addr & ~PAGE_MASK)
1321 return -EINVAL;
1322 if (file && is_file_hugepages(file)) {
1323 /*
1324 * Check if the given range is hugepage aligned, and
1325 * can be made suitable for hugepages.
1326 */
1327 ret = prepare_hugepage_range(addr, len);
1328 } else {
1329 /*
1330 * Ensure that a normal request is not falling in a
1331 * reserved hugepage range. For some archs like IA-64,
1332 * there is a separate region for hugepages.
1333 */
1334 ret = is_hugepage_only_range(current->mm, addr, len);
1335 }
1336 if (ret)
1337 return -EINVAL;
1338 return addr;
1336} 1339}
1337 1340
1338EXPORT_SYMBOL(get_unmapped_area); 1341EXPORT_SYMBOL(get_unmapped_area);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2bd83e5c2bbf..8ff16a1eee6a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -248,31 +248,20 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
248 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 248 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
249} 249}
250 250
251/** 251/* Caller must hold vmlist_lock */
252 * remove_vm_area - find and remove a contingous kernel virtual area 252struct vm_struct *__remove_vm_area(void *addr)
253 *
254 * @addr: base address
255 *
256 * Search for the kernel VM area starting at @addr, and remove it.
257 * This function returns the found VM area, but using it is NOT safe
258 * on SMP machines.
259 */
260struct vm_struct *remove_vm_area(void *addr)
261{ 253{
262 struct vm_struct **p, *tmp; 254 struct vm_struct **p, *tmp;
263 255
264 write_lock(&vmlist_lock);
265 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { 256 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
266 if (tmp->addr == addr) 257 if (tmp->addr == addr)
267 goto found; 258 goto found;
268 } 259 }
269 write_unlock(&vmlist_lock);
270 return NULL; 260 return NULL;
271 261
272found: 262found:
273 unmap_vm_area(tmp); 263 unmap_vm_area(tmp);
274 *p = tmp->next; 264 *p = tmp->next;
275 write_unlock(&vmlist_lock);
276 265
277 /* 266 /*
278 * Remove the guard page. 267 * Remove the guard page.
@@ -281,6 +270,24 @@ found:
281 return tmp; 270 return tmp;
282} 271}
283 272
273/**
274 * remove_vm_area - find and remove a contingous kernel virtual area
275 *
276 * @addr: base address
277 *
278 * Search for the kernel VM area starting at @addr, and remove it.
279 * This function returns the found VM area, but using it is NOT safe
280 * on SMP machines, except for its size or flags.
281 */
282struct vm_struct *remove_vm_area(void *addr)
283{
284 struct vm_struct *v;
285 write_lock(&vmlist_lock);
286 v = __remove_vm_area(addr);
287 write_unlock(&vmlist_lock);
288 return v;
289}
290
284void __vunmap(void *addr, int deallocate_pages) 291void __vunmap(void *addr, int deallocate_pages)
285{ 292{
286 struct vm_struct *area; 293 struct vm_struct *area;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index daebd93fd8a0..760dc8238d65 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -490,6 +490,14 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
490 /* Partially cloned skb? */ 490 /* Partially cloned skb? */
491 if (skb_shared(frag)) 491 if (skb_shared(frag))
492 goto slow_path; 492 goto slow_path;
493
494 BUG_ON(frag->sk);
495 if (skb->sk) {
496 sock_hold(skb->sk);
497 frag->sk = skb->sk;
498 frag->destructor = sock_wfree;
499 skb->truesize -= frag->truesize;
500 }
493 } 501 }
494 502
495 /* Everything is OK. Generate! */ 503 /* Everything is OK. Generate! */
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index faa6176bbeb1..de21da00057f 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -508,7 +508,6 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
508 rc = NF_ACCEPT; 508 rc = NF_ACCEPT;
509 /* do not touch skb anymore */ 509 /* do not touch skb anymore */
510 atomic_inc(&cp->in_pkts); 510 atomic_inc(&cp->in_pkts);
511 __ip_vs_conn_put(cp);
512 goto out; 511 goto out;
513 } 512 }
514 513
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 28d9425d5c39..09e824622977 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -940,37 +940,25 @@ void ip_ct_refresh_acct(struct ip_conntrack *ct,
940struct sk_buff * 940struct sk_buff *
941ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user) 941ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
942{ 942{
943 struct sock *sk = skb->sk;
944#ifdef CONFIG_NETFILTER_DEBUG 943#ifdef CONFIG_NETFILTER_DEBUG
945 unsigned int olddebug = skb->nf_debug; 944 unsigned int olddebug = skb->nf_debug;
946#endif 945#endif
947 946
948 if (sk) { 947 skb_orphan(skb);
949 sock_hold(sk);
950 skb_orphan(skb);
951 }
952 948
953 local_bh_disable(); 949 local_bh_disable();
954 skb = ip_defrag(skb, user); 950 skb = ip_defrag(skb, user);
955 local_bh_enable(); 951 local_bh_enable();
956 952
957 if (!skb) { 953 if (skb) {
958 if (sk) 954 ip_send_check(skb->nh.iph);
959 sock_put(sk); 955 skb->nfcache |= NFC_ALTERED;
960 return skb;
961 }
962
963 if (sk) {
964 skb_set_owner_w(skb, sk);
965 sock_put(sk);
966 }
967
968 ip_send_check(skb->nh.iph);
969 skb->nfcache |= NFC_ALTERED;
970#ifdef CONFIG_NETFILTER_DEBUG 956#ifdef CONFIG_NETFILTER_DEBUG
971 /* Packet path as if nothing had happened. */ 957 /* Packet path as if nothing had happened. */
972 skb->nf_debug = olddebug; 958 skb->nf_debug = olddebug;
973#endif 959#endif
960 }
961
974 return skb; 962 return skb;
975} 963}
976 964
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 0f0711417c9d..b78a53586804 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -552,13 +552,17 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
552 skb_headroom(frag) < hlen) 552 skb_headroom(frag) < hlen)
553 goto slow_path; 553 goto slow_path;
554 554
555 /* Correct socket ownership. */
556 if (frag->sk == NULL)
557 goto slow_path;
558
559 /* Partially cloned skb? */ 555 /* Partially cloned skb? */
560 if (skb_shared(frag)) 556 if (skb_shared(frag))
561 goto slow_path; 557 goto slow_path;
558
559 BUG_ON(frag->sk);
560 if (skb->sk) {
561 sock_hold(skb->sk);
562 frag->sk = skb->sk;
563 frag->destructor = sock_wfree;
564 skb->truesize -= frag->truesize;
565 }
562 } 566 }
563 567
564 err = 0; 568 err = 0;
@@ -1116,12 +1120,10 @@ int ip6_push_pending_frames(struct sock *sk)
1116 tail_skb = &(tmp_skb->next); 1120 tail_skb = &(tmp_skb->next);
1117 skb->len += tmp_skb->len; 1121 skb->len += tmp_skb->len;
1118 skb->data_len += tmp_skb->len; 1122 skb->data_len += tmp_skb->len;
1119#if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */
1120 skb->truesize += tmp_skb->truesize; 1123 skb->truesize += tmp_skb->truesize;
1121 __sock_put(tmp_skb->sk); 1124 __sock_put(tmp_skb->sk);
1122 tmp_skb->destructor = NULL; 1125 tmp_skb->destructor = NULL;
1123 tmp_skb->sk = NULL; 1126 tmp_skb->sk = NULL;
1124#endif
1125 } 1127 }
1126 1128
1127 ipv6_addr_copy(final_dst, &fl->fl6_dst); 1129 ipv6_addr_copy(final_dst, &fl->fl6_dst);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 733bf52cef3e..e41ce458c2a9 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -735,11 +735,15 @@ static inline int do_one_broadcast(struct sock *sk,
735 735
736 sock_hold(sk); 736 sock_hold(sk);
737 if (p->skb2 == NULL) { 737 if (p->skb2 == NULL) {
738 if (atomic_read(&p->skb->users) != 1) { 738 if (skb_shared(p->skb)) {
739 p->skb2 = skb_clone(p->skb, p->allocation); 739 p->skb2 = skb_clone(p->skb, p->allocation);
740 } else { 740 } else {
741 p->skb2 = p->skb; 741 p->skb2 = skb_get(p->skb);
742 atomic_inc(&p->skb->users); 742 /*
743 * skb ownership may have been set when
744 * delivered to a previous socket.
745 */
746 skb_orphan(p->skb2);
743 } 747 }
744 } 748 }
745 if (p->skb2 == NULL) { 749 if (p->skb2 == NULL) {
@@ -785,11 +789,12 @@ int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
785 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 789 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
786 do_one_broadcast(sk, &info); 790 do_one_broadcast(sk, &info);
787 791
792 kfree_skb(skb);
793
788 netlink_unlock_table(); 794 netlink_unlock_table();
789 795
790 if (info.skb2) 796 if (info.skb2)
791 kfree_skb(info.skb2); 797 kfree_skb(info.skb2);
792 kfree_skb(skb);
793 798
794 if (info.delivered) { 799 if (info.delivered) {
795 if (info.congested && (allocation & __GFP_WAIT)) 800 if (info.congested && (allocation & __GFP_WAIT))
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c478fc8db776..c420eba4876b 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -770,33 +770,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
770 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd); 770 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
771 if (err) 771 if (err)
772 goto out_mknod_parent; 772 goto out_mknod_parent;
773 /* 773
774 * Yucky last component or no last component at all? 774 dentry = lookup_create(&nd, 0);
775 * (foo/., foo/.., /////)
776 */
777 err = -EEXIST;
778 if (nd.last_type != LAST_NORM)
779 goto out_mknod;
780 /*
781 * Lock the directory.
782 */
783 down(&nd.dentry->d_inode->i_sem);
784 /*
785 * Do the final lookup.
786 */
787 dentry = lookup_hash(&nd.last, nd.dentry);
788 err = PTR_ERR(dentry); 775 err = PTR_ERR(dentry);
789 if (IS_ERR(dentry)) 776 if (IS_ERR(dentry))
790 goto out_mknod_unlock; 777 goto out_mknod_unlock;
791 err = -ENOENT; 778
792 /*
793 * Special case - lookup gave negative, but... we had foo/bar/
794 * From the vfs_mknod() POV we just have a negative dentry -
795 * all is fine. Let's be bastards - you had / on the end, you've
796 * been asking for (non-existent) directory. -ENOENT for you.
797 */
798 if (nd.last.name[nd.last.len] && !dentry->d_inode)
799 goto out_mknod_dput;
800 /* 779 /*
801 * All right, let's create it. 780 * All right, let's create it.
802 */ 781 */
@@ -845,7 +824,6 @@ out_mknod_dput:
845 dput(dentry); 824 dput(dentry);
846out_mknod_unlock: 825out_mknod_unlock:
847 up(&nd.dentry->d_inode->i_sem); 826 up(&nd.dentry->d_inode->i_sem);
848out_mknod:
849 path_release(&nd); 827 path_release(&nd);
850out_mknod_parent: 828out_mknod_parent:
851 if (err==-EEXIST) 829 if (err==-EEXIST)
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 080aae243ce0..2f4531fcaca2 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -698,7 +698,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
698 return -ENOMEM; 698 return -ENOMEM;
699 699
700 if (skb1->sk) 700 if (skb1->sk)
701 skb_set_owner_w(skb, skb1->sk); 701 skb_set_owner_w(skb2, skb1->sk);
702 702
703 /* Looking around. Are we still alive? 703 /* Looking around. Are we still alive?
704 * OK, link new skb, drop old one */ 704 * OK, link new skb, drop old one */
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 5ddda2c98af9..97509011c274 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -34,14 +34,21 @@ static int verify_one_alg(struct rtattr **xfrma, enum xfrm_attr_type_t type)
34{ 34{
35 struct rtattr *rt = xfrma[type - 1]; 35 struct rtattr *rt = xfrma[type - 1];
36 struct xfrm_algo *algp; 36 struct xfrm_algo *algp;
37 int len;
37 38
38 if (!rt) 39 if (!rt)
39 return 0; 40 return 0;
40 41
41 if ((rt->rta_len - sizeof(*rt)) < sizeof(*algp)) 42 len = (rt->rta_len - sizeof(*rt)) - sizeof(*algp);
43 if (len < 0)
42 return -EINVAL; 44 return -EINVAL;
43 45
44 algp = RTA_DATA(rt); 46 algp = RTA_DATA(rt);
47
48 len -= (algp->alg_key_len + 7U) / 8;
49 if (len < 0)
50 return -EINVAL;
51
45 switch (type) { 52 switch (type) {
46 case XFRMA_ALG_AUTH: 53 case XFRMA_ALG_AUTH:
47 if (!algp->alg_key_len && 54 if (!algp->alg_key_len &&
@@ -162,6 +169,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
162 struct rtattr *rta = u_arg; 169 struct rtattr *rta = u_arg;
163 struct xfrm_algo *p, *ualg; 170 struct xfrm_algo *p, *ualg;
164 struct xfrm_algo_desc *algo; 171 struct xfrm_algo_desc *algo;
172 int len;
165 173
166 if (!rta) 174 if (!rta)
167 return 0; 175 return 0;
@@ -173,11 +181,12 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
173 return -ENOSYS; 181 return -ENOSYS;
174 *props = algo->desc.sadb_alg_id; 182 *props = algo->desc.sadb_alg_id;
175 183
176 p = kmalloc(sizeof(*ualg) + ualg->alg_key_len, GFP_KERNEL); 184 len = sizeof(*ualg) + (ualg->alg_key_len + 7U) / 8;
185 p = kmalloc(len, GFP_KERNEL);
177 if (!p) 186 if (!p)
178 return -ENOMEM; 187 return -ENOMEM;
179 188
180 memcpy(p, ualg, sizeof(*ualg) + ualg->alg_key_len); 189 memcpy(p, ualg, len);
181 *algpp = p; 190 *algpp = p;
182 return 0; 191 return 0;
183} 192}