diff options
472 files changed, 14434 insertions, 11225 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc b/Documentation/ABI/testing/sysfs-class-uwb_rc index a0d18dbeb7a9..6a5fd072849d 100644 --- a/Documentation/ABI/testing/sysfs-class-uwb_rc +++ b/Documentation/ABI/testing/sysfs-class-uwb_rc | |||
@@ -32,14 +32,16 @@ Contact: linux-usb@vger.kernel.org | |||
32 | Description: | 32 | Description: |
33 | Write: | 33 | Write: |
34 | 34 | ||
35 | <channel> [<bpst offset>] | 35 | <channel> |
36 | 36 | ||
37 | to start beaconing on a specific channel, or stop | 37 | to force a specific channel to be used when beaconing, |
38 | beaconing if <channel> is -1. Valid channels depends | 38 | or, if <channel> is -1, to prohibit beaconing. If |
39 | on the radio controller's supported band groups. | 39 | <channel> is 0, then the default channel selection |
40 | algorithm will be used. Valid channels depends on the | ||
41 | radio controller's supported band groups. | ||
40 | 42 | ||
41 | <bpst offset> may be used to try and join a specific | 43 | Reading returns the currently active channel, or -1 if |
42 | beacon group if more than one was found during a scan. | 44 | the radio controller is not beaconing. |
43 | 45 | ||
44 | What: /sys/class/uwb_rc/uwbN/scan | 46 | What: /sys/class/uwb_rc/uwbN/scan |
45 | Date: July 2008 | 47 | Date: July 2008 |
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index 94bbc27ddd4f..9d620c153b04 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt | |||
@@ -50,16 +50,17 @@ additional_cpus=n (*) Use this to limit hotpluggable cpus. This option sets | |||
50 | cpu_possible_map = cpu_present_map + additional_cpus | 50 | cpu_possible_map = cpu_present_map + additional_cpus |
51 | 51 | ||
52 | (*) Option valid only for following architectures | 52 | (*) Option valid only for following architectures |
53 | - x86_64, ia64 | 53 | - ia64 |
54 | 54 | ||
55 | ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT | 55 | ia64 uses the number of disabled local apics in ACPI tables MADT to |
56 | to determine the number of potentially hot-pluggable cpus. The implementation | 56 | determine the number of potentially hot-pluggable cpus. The implementation |
57 | should only rely on this to count the # of cpus, but *MUST* not rely on the | 57 | should only rely on this to count the # of cpus, but *MUST* not rely |
58 | apicid values in those tables for disabled apics. In the event BIOS doesn't | 58 | on the apicid values in those tables for disabled apics. In the event |
59 | mark such hot-pluggable cpus as disabled entries, one could use this | 59 | BIOS doesn't mark such hot-pluggable cpus as disabled entries, one could |
60 | parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map. | 60 | use this parameter "additional_cpus=x" to represent those cpus in the |
61 | cpu_possible_map. | ||
61 | 62 | ||
62 | possible_cpus=n [s390 only] use this to set hotpluggable cpus. | 63 | possible_cpus=n [s390,x86_64] use this to set hotpluggable cpus. |
63 | This option sets possible_cpus bits in | 64 | This option sets possible_cpus bits in |
64 | cpu_possible_map. Thus keeping the numbers of bits set | 65 | cpu_possible_map. Thus keeping the numbers of bits set |
65 | constant even if the machine gets rebooted. | 66 | constant even if the machine gets rebooted. |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index dc7c681e532c..df18d87c4837 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -310,15 +310,6 @@ Who: Krzysztof Piotr Oledzki <ole@ans.pl> | |||
310 | 310 | ||
311 | --------------------------- | 311 | --------------------------- |
312 | 312 | ||
313 | What: ide-scsi (BLK_DEV_IDESCSI) | ||
314 | When: 2.6.29 | ||
315 | Why: The 2.6 kernel supports direct writing to ide CD drives, which | ||
316 | eliminates the need for ide-scsi. The new method is more | ||
317 | efficient in every way. | ||
318 | Who: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | ||
319 | |||
320 | --------------------------- | ||
321 | |||
322 | What: i2c_attach_client(), i2c_detach_client(), i2c_driver->detach_client() | 313 | What: i2c_attach_client(), i2c_detach_client(), i2c_driver->detach_client() |
323 | When: 2.6.29 (ideally) or 2.6.30 (more likely) | 314 | When: 2.6.29 (ideally) or 2.6.30 (more likely) |
324 | Why: Deprecated by the new (standard) device driver binding model. Use | 315 | Why: Deprecated by the new (standard) device driver binding model. Use |
diff --git a/Documentation/filesystems/devpts.txt b/Documentation/filesystems/devpts.txt new file mode 100644 index 000000000000..68dffd87f9b7 --- /dev/null +++ b/Documentation/filesystems/devpts.txt | |||
@@ -0,0 +1,132 @@ | |||
1 | |||
2 | To support containers, we now allow multiple instances of devpts filesystem, | ||
3 | such that indices of ptys allocated in one instance are independent of indices | ||
4 | allocated in other instances of devpts. | ||
5 | |||
6 | To preserve backward compatibility, this support for multiple instances is | ||
7 | enabled only if: | ||
8 | |||
9 | - CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and | ||
10 | - '-o newinstance' mount option is specified while mounting devpts | ||
11 | |||
12 | IOW, devpts now supports both single-instance and multi-instance semantics. | ||
13 | |||
14 | If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and | ||
15 | this referred to as the "legacy" mode. In this mode, the new mount options | ||
16 | (-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message | ||
17 | on console. | ||
18 | |||
19 | If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the | ||
20 | 'newinstance' option (as in current start-up scripts) the new mount binds | ||
21 | to the initial kernel mount of devpts. This mode is referred to as the | ||
22 | 'single-instance' mode and the current, single-instance semantics are | ||
23 | preserved, i.e PTYs are common across the system. | ||
24 | |||
25 | The only difference between this single-instance mode and the legacy mode | ||
26 | is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which | ||
27 | can safely be ignored. | ||
28 | |||
29 | If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified, | ||
30 | the mount is considered to be in the multi-instance mode and a new instance | ||
31 | of the devpts fs is created. Any ptys created in this instance are independent | ||
32 | of ptys in other instances of devpts. Like in the single-instance mode, the | ||
33 | /dev/pts/ptmx node is present. To effectively use the multi-instance mode, | ||
34 | open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or | ||
35 | bind-mount. | ||
36 | |||
37 | Eg: A container startup script could do the following: | ||
38 | |||
39 | $ chmod 0666 /dev/pts/ptmx | ||
40 | $ rm /dev/ptmx | ||
41 | $ ln -s pts/ptmx /dev/ptmx | ||
42 | $ ns_exec -cm /bin/bash | ||
43 | |||
44 | # We are now in new container | ||
45 | |||
46 | $ umount /dev/pts | ||
47 | $ mount -t devpts -o newinstance lxcpts /dev/pts | ||
48 | $ sshd -p 1234 | ||
49 | |||
50 | where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs | ||
51 | /bin/bash in the child process. A pty created by the sshd is not visible in | ||
52 | the original mount of /dev/pts. | ||
53 | |||
54 | User-space changes | ||
55 | ------------------ | ||
56 | |||
57 | In multi-instance mode (i.e '-o newinstance' mount option is specified at least | ||
58 | once), following user-space issues should be noted. | ||
59 | |||
60 | 1. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored | ||
61 | and no change is needed to system-startup scripts. | ||
62 | |||
63 | 2. To effectively use multi-instance mode (i.e -o newinstance is specified) | ||
64 | administrators or startup scripts should "redirect" open of /dev/ptmx to | ||
65 | /dev/pts/ptmx using either a bind mount or symlink. | ||
66 | |||
67 | $ mount -t devpts -o newinstance devpts /dev/pts | ||
68 | |||
69 | followed by either | ||
70 | |||
71 | $ rm /dev/ptmx | ||
72 | $ ln -s pts/ptmx /dev/ptmx | ||
73 | $ chmod 666 /dev/pts/ptmx | ||
74 | or | ||
75 | $ mount -o bind /dev/pts/ptmx /dev/ptmx | ||
76 | |||
77 | 3. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it | ||
78 | enables better error-reporting and treats both single-instance and | ||
79 | multi-instance mounts similarly. | ||
80 | |||
81 | But this method requires that system-startup scripts set the mode of | ||
82 | /dev/pts/ptmx correctly (default mode is 0000). The scripts can set the | ||
83 | mode by, either | ||
84 | |||
85 | - adding ptmxmode mount option to devpts entry in /etc/fstab, or | ||
86 | - using 'chmod 0666 /dev/pts/ptmx' | ||
87 | |||
88 | 4. If multi-instance mode mount is needed for containers, but the system | ||
89 | startup scripts have not yet been updated, container-startup scripts | ||
90 | should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single- | ||
91 | instance mounts. | ||
92 | |||
93 | Or, in general, container-startup scripts should use: | ||
94 | |||
95 | mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts | ||
96 | if [ ! -L /dev/ptmx ]; then | ||
97 | mount -o bind /dev/pts/ptmx /dev/ptmx | ||
98 | fi | ||
99 | |||
100 | When all devpts mounts are multi-instance, /dev/ptmx can permanently be | ||
101 | a symlink to pts/ptmx and the bind mount can be ignored. | ||
102 | |||
103 | 5. A multi-instance mount that is not accompanied by the /dev/ptmx to | ||
104 | /dev/pts/ptmx redirection would result in an unusable/unreachable pty. | ||
105 | |||
106 | mount -t devpts -o newinstance lxcpts /dev/pts | ||
107 | |||
108 | immediately followed by: | ||
109 | |||
110 | open("/dev/ptmx") | ||
111 | |||
112 | would create a pty, say /dev/pts/7, in the initial kernel mount. | ||
113 | But /dev/pts/7 would be invisible in the new mount. | ||
114 | |||
115 | 6. The permissions for /dev/pts/ptmx node should be specified when mounting | ||
116 | /dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000). | ||
117 | |||
118 | mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts | ||
119 | |||
120 | The permissions can be later be changed as usual with 'chmod'. | ||
121 | |||
122 | chmod 666 /dev/pts/ptmx | ||
123 | |||
124 | 7. A mount of devpts without the 'newinstance' option results in binding to | ||
125 | initial kernel mount. This behavior while preserving legacy semantics, | ||
126 | does not provide strict isolation in a container environment. i.e by | ||
127 | mounting devpts without the 'newinstance' option, a container could | ||
128 | get visibility into the 'host' or root container's devpts. | ||
129 | |||
130 | To workaround this and have strict isolation, all mounts of devpts, | ||
131 | including the mount in the root container, should use the newinstance | ||
132 | option. | ||
diff --git a/Documentation/usb/wusb-cbaf b/Documentation/usb/wusb-cbaf index 2e78b70f3adc..426ddaaef96f 100644 --- a/Documentation/usb/wusb-cbaf +++ b/Documentation/usb/wusb-cbaf | |||
@@ -80,12 +80,6 @@ case $1 in | |||
80 | start) | 80 | start) |
81 | for dev in ${2:-$hdevs} | 81 | for dev in ${2:-$hdevs} |
82 | do | 82 | do |
83 | uwb_rc=$(readlink -f $dev/uwb_rc) | ||
84 | if cat $uwb_rc/beacon | grep -q -- "-1" | ||
85 | then | ||
86 | echo 13 0 > $uwb_rc/beacon | ||
87 | echo I: started beaconing on ch 13 on $(basename $uwb_rc) >&2 | ||
88 | fi | ||
89 | echo $host_CHID > $dev/wusb_chid | 83 | echo $host_CHID > $dev/wusb_chid |
90 | echo I: started host $(basename $dev) >&2 | 84 | echo I: started host $(basename $dev) >&2 |
91 | done | 85 | done |
@@ -95,9 +89,6 @@ case $1 in | |||
95 | do | 89 | do |
96 | echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid | 90 | echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid |
97 | echo I: stopped host $(basename $dev) >&2 | 91 | echo I: stopped host $(basename $dev) >&2 |
98 | uwb_rc=$(readlink -f $dev/uwb_rc) | ||
99 | echo -1 | cat > $uwb_rc/beacon | ||
100 | echo I: stopped beaconing on $(basename $uwb_rc) >&2 | ||
101 | done | 92 | done |
102 | ;; | 93 | ;; |
103 | set-chid) | 94 | set-chid) |
diff --git a/MAINTAINERS b/MAINTAINERS index ceb32ee51f9d..befacf07729f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2049,6 +2049,12 @@ M: mikulas@artax.karlin.mff.cuni.cz | |||
2049 | W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi | 2049 | W: http://artax.karlin.mff.cuni.cz/~mikulas/vyplody/hpfs/index-e.cgi |
2050 | S: Maintained | 2050 | S: Maintained |
2051 | 2051 | ||
2052 | HSO 3G Modem Driver (hso.c) | ||
2053 | P: Denis Joseph Barrow | ||
2054 | M: d.barow@option.com | ||
2055 | W: http://www.pharscape.org | ||
2056 | S: Maintained | ||
2057 | |||
2052 | HTCPEN TOUCHSCREEN DRIVER | 2058 | HTCPEN TOUCHSCREEN DRIVER |
2053 | P: Pau Oliva Fora | 2059 | P: Pau Oliva Fora |
2054 | M: pof@eslack.org | 2060 | M: pof@eslack.org |
@@ -2146,11 +2152,6 @@ M: Gadi Oxman <gadio@netvision.net.il> | |||
2146 | L: linux-kernel@vger.kernel.org | 2152 | L: linux-kernel@vger.kernel.org |
2147 | S: Maintained | 2153 | S: Maintained |
2148 | 2154 | ||
2149 | IDE-SCSI DRIVER | ||
2150 | L: linux-ide@vger.kernel.org | ||
2151 | L: linux-scsi@vger.kernel.org | ||
2152 | S: Orphan | ||
2153 | |||
2154 | IDLE-I7300 | 2155 | IDLE-I7300 |
2155 | P: Andy Henroid | 2156 | P: Andy Henroid |
2156 | M: andrew.d.henroid@intel.com | 2157 | M: andrew.d.henroid@intel.com |
@@ -2541,8 +2542,6 @@ W: http://kvm.qumranet.com | |||
2541 | S: Supported | 2542 | S: Supported |
2542 | 2543 | ||
2543 | KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64) | 2544 | KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64) |
2544 | P: Anthony Xu | ||
2545 | M: anthony.xu@intel.com | ||
2546 | P: Xiantao Zhang | 2545 | P: Xiantao Zhang |
2547 | M: xiantao.zhang@intel.com | 2546 | M: xiantao.zhang@intel.com |
2548 | L: kvm-ia64@vger.kernel.org | 2547 | L: kvm-ia64@vger.kernel.org |
@@ -2635,13 +2634,13 @@ W: http://www.hansenpartnership.com/voyager | |||
2635 | S: Maintained | 2634 | S: Maintained |
2636 | 2635 | ||
2637 | LINUX FOR POWERPC (32-BIT AND 64-BIT) | 2636 | LINUX FOR POWERPC (32-BIT AND 64-BIT) |
2638 | P: Paul Mackerras | ||
2639 | M: paulus@samba.org | ||
2640 | P: Benjamin Herrenschmidt | 2637 | P: Benjamin Herrenschmidt |
2641 | M: benh@kernel.crashing.org | 2638 | M: benh@kernel.crashing.org |
2639 | P: Paul Mackerras | ||
2640 | M: paulus@samba.org | ||
2642 | W: http://www.penguinppc.org/ | 2641 | W: http://www.penguinppc.org/ |
2643 | L: linuxppc-dev@ozlabs.org | 2642 | L: linuxppc-dev@ozlabs.org |
2644 | T: git kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc.git | 2643 | T: git kernel.org:/pub/scm/linux/kernel/git/benh/powerpc.git |
2645 | S: Supported | 2644 | S: Supported |
2646 | 2645 | ||
2647 | LINUX FOR POWER MACINTOSH | 2646 | LINUX FOR POWER MACINTOSH |
diff --git a/arch/alpha/include/asm/smp.h b/arch/alpha/include/asm/smp.h index 544c69af8168..547e90951cec 100644 --- a/arch/alpha/include/asm/smp.h +++ b/arch/alpha/include/asm/smp.h | |||
@@ -45,7 +45,6 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS]; | |||
45 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 45 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
46 | 46 | ||
47 | extern int smp_num_cpus; | 47 | extern int smp_num_cpus; |
48 | #define cpu_possible_map cpu_present_map | ||
49 | 48 | ||
50 | extern void arch_send_call_function_single_ipi(int cpu); | 49 | extern void arch_send_call_function_single_ipi(int cpu); |
51 | extern void arch_send_call_function_ipi(cpumask_t mask); | 50 | extern void arch_send_call_function_ipi(cpumask_t mask); |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index c626a821cdcb..d0f1620007f7 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq) | |||
55 | last_cpu = cpu; | 55 | last_cpu = cpu; |
56 | 56 | ||
57 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); | 57 | irq_desc[irq].affinity = cpumask_of_cpu(cpu); |
58 | irq_desc[irq].chip->set_affinity(irq, cpumask_of_cpu(cpu)); | 58 | irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); |
59 | return 0; | 59 | return 0; |
60 | } | 60 | } |
61 | #endif /* CONFIG_SMP */ | 61 | #endif /* CONFIG_SMP */ |
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 351407e07e71..f238370c907d 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -94,6 +94,7 @@ common_shutdown_1(void *generic_ptr) | |||
94 | flags |= 0x00040000UL; /* "remain halted" */ | 94 | flags |= 0x00040000UL; /* "remain halted" */ |
95 | *pflags = flags; | 95 | *pflags = flags; |
96 | cpu_clear(cpuid, cpu_present_map); | 96 | cpu_clear(cpuid, cpu_present_map); |
97 | cpu_clear(cpuid, cpu_possible_map); | ||
97 | halt(); | 98 | halt(); |
98 | } | 99 | } |
99 | #endif | 100 | #endif |
@@ -120,6 +121,7 @@ common_shutdown_1(void *generic_ptr) | |||
120 | #ifdef CONFIG_SMP | 121 | #ifdef CONFIG_SMP |
121 | /* Wait for the secondaries to halt. */ | 122 | /* Wait for the secondaries to halt. */ |
122 | cpu_clear(boot_cpuid, cpu_present_map); | 123 | cpu_clear(boot_cpuid, cpu_present_map); |
124 | cpu_clear(boot_cpuid, cpu_possible_map); | ||
123 | while (cpus_weight(cpu_present_map)) | 125 | while (cpus_weight(cpu_present_map)) |
124 | barrier(); | 126 | barrier(); |
125 | #endif | 127 | #endif |
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index cf7da10097bb..d953e510f68d 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
@@ -70,11 +70,6 @@ enum ipi_message_type { | |||
70 | /* Set to a secondary's cpuid when it comes online. */ | 70 | /* Set to a secondary's cpuid when it comes online. */ |
71 | static int smp_secondary_alive __devinitdata = 0; | 71 | static int smp_secondary_alive __devinitdata = 0; |
72 | 72 | ||
73 | /* Which cpus ids came online. */ | ||
74 | cpumask_t cpu_online_map; | ||
75 | |||
76 | EXPORT_SYMBOL(cpu_online_map); | ||
77 | |||
78 | int smp_num_probed; /* Internal processor count */ | 73 | int smp_num_probed; /* Internal processor count */ |
79 | int smp_num_cpus = 1; /* Number that came online. */ | 74 | int smp_num_cpus = 1; /* Number that came online. */ |
80 | EXPORT_SYMBOL(smp_num_cpus); | 75 | EXPORT_SYMBOL(smp_num_cpus); |
@@ -440,6 +435,7 @@ setup_smp(void) | |||
440 | ((char *)cpubase + i*hwrpb->processor_size); | 435 | ((char *)cpubase + i*hwrpb->processor_size); |
441 | if ((cpu->flags & 0x1cc) == 0x1cc) { | 436 | if ((cpu->flags & 0x1cc) == 0x1cc) { |
442 | smp_num_probed++; | 437 | smp_num_probed++; |
438 | cpu_set(i, cpu_possible_map); | ||
443 | cpu_set(i, cpu_present_map); | 439 | cpu_set(i, cpu_present_map); |
444 | cpu->pal_revision = boot_cpu_palrev; | 440 | cpu->pal_revision = boot_cpu_palrev; |
445 | } | 441 | } |
@@ -473,6 +469,7 @@ smp_prepare_cpus(unsigned int max_cpus) | |||
473 | 469 | ||
474 | /* Nothing to do on a UP box, or when told not to. */ | 470 | /* Nothing to do on a UP box, or when told not to. */ |
475 | if (smp_num_probed == 1 || max_cpus == 0) { | 471 | if (smp_num_probed == 1 || max_cpus == 0) { |
472 | cpu_possible_map = cpumask_of_cpu(boot_cpuid); | ||
476 | cpu_present_map = cpumask_of_cpu(boot_cpuid); | 473 | cpu_present_map = cpumask_of_cpu(boot_cpuid); |
477 | printk(KERN_INFO "SMP mode deactivated.\n"); | 474 | printk(KERN_INFO "SMP mode deactivated.\n"); |
478 | return; | 475 | return; |
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index c71b0fd7a61f..ab44c164d9d4 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c | |||
@@ -177,19 +177,19 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
177 | } | 177 | } |
178 | 178 | ||
179 | static void | 179 | static void |
180 | dp264_set_affinity(unsigned int irq, cpumask_t affinity) | 180 | dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) |
181 | { | 181 | { |
182 | spin_lock(&dp264_irq_lock); | 182 | spin_lock(&dp264_irq_lock); |
183 | cpu_set_irq_affinity(irq, affinity); | 183 | cpu_set_irq_affinity(irq, *affinity); |
184 | tsunami_update_irq_hw(cached_irq_mask); | 184 | tsunami_update_irq_hw(cached_irq_mask); |
185 | spin_unlock(&dp264_irq_lock); | 185 | spin_unlock(&dp264_irq_lock); |
186 | } | 186 | } |
187 | 187 | ||
188 | static void | 188 | static void |
189 | clipper_set_affinity(unsigned int irq, cpumask_t affinity) | 189 | clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) |
190 | { | 190 | { |
191 | spin_lock(&dp264_irq_lock); | 191 | spin_lock(&dp264_irq_lock); |
192 | cpu_set_irq_affinity(irq - 16, affinity); | 192 | cpu_set_irq_affinity(irq - 16, *affinity); |
193 | tsunami_update_irq_hw(cached_irq_mask); | 193 | tsunami_update_irq_hw(cached_irq_mask); |
194 | spin_unlock(&dp264_irq_lock); | 194 | spin_unlock(&dp264_irq_lock); |
195 | } | 195 | } |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 52c91ccc1648..27f840a4ad3d 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -158,10 +158,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
158 | } | 158 | } |
159 | 159 | ||
160 | static void | 160 | static void |
161 | titan_set_irq_affinity(unsigned int irq, cpumask_t affinity) | 161 | titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) |
162 | { | 162 | { |
163 | spin_lock(&titan_irq_lock); | 163 | spin_lock(&titan_irq_lock); |
164 | titan_cpu_set_irq_affinity(irq - 16, affinity); | 164 | titan_cpu_set_irq_affinity(irq - 16, *affinity); |
165 | titan_update_irq_hw(titan_cached_irq_mask); | 165 | titan_update_irq_hw(titan_cached_irq_mask); |
166 | spin_unlock(&titan_irq_lock); | 166 | spin_unlock(&titan_irq_lock); |
167 | } | 167 | } |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 7fc9860a97d7..c6884ba1d5ed 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -109,11 +109,11 @@ static void gic_unmask_irq(unsigned int irq) | |||
109 | } | 109 | } |
110 | 110 | ||
111 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
112 | static void gic_set_cpu(unsigned int irq, cpumask_t mask_val) | 112 | static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) |
113 | { | 113 | { |
114 | void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); | 114 | void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); |
115 | unsigned int shift = (irq % 4) * 8; | 115 | unsigned int shift = (irq % 4) * 8; |
116 | unsigned int cpu = first_cpu(mask_val); | 116 | unsigned int cpu = cpumask_first(mask_val); |
117 | u32 val; | 117 | u32 val; |
118 | 118 | ||
119 | spin_lock(&irq_controller_lock); | 119 | spin_lock(&irq_controller_lock); |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2f3eb795fa6e..7141cee1fab7 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -174,7 +174,7 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) | |||
174 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); | 174 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); |
175 | 175 | ||
176 | spin_lock_irq(&desc->lock); | 176 | spin_lock_irq(&desc->lock); |
177 | desc->chip->set_affinity(irq, cpumask_of_cpu(cpu)); | 177 | desc->chip->set_affinity(irq, cpumask_of(cpu)); |
178 | spin_unlock_irq(&desc->lock); | 178 | spin_unlock_irq(&desc->lock); |
179 | } | 179 | } |
180 | 180 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 019237d21622..55fa7ff96a3e 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -34,16 +34,6 @@ | |||
34 | #include <asm/ptrace.h> | 34 | #include <asm/ptrace.h> |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * bitmask of present and online CPUs. | ||
38 | * The present bitmask indicates that the CPU is physically present. | ||
39 | * The online bitmask indicates that the CPU is up and running. | ||
40 | */ | ||
41 | cpumask_t cpu_possible_map; | ||
42 | EXPORT_SYMBOL(cpu_possible_map); | ||
43 | cpumask_t cpu_online_map; | ||
44 | EXPORT_SYMBOL(cpu_online_map); | ||
45 | |||
46 | /* | ||
47 | * as from 2.5, kernels no longer have an init_tasks structure | 37 | * as from 2.5, kernels no longer have an init_tasks structure |
48 | * so we need some other way of telling a new secondary core | 38 | * so we need some other way of telling a new secondary core |
49 | * where to place its SVC stack | 39 | * where to place its SVC stack |
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index d140eae53ded..1ff1bda0a894 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c | |||
@@ -178,7 +178,6 @@ static struct clock_event_device clkevt = { | |||
178 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 178 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
179 | .shift = 32, | 179 | .shift = 32, |
180 | .rating = 150, | 180 | .rating = 150, |
181 | .cpumask = CPU_MASK_CPU0, | ||
182 | .set_next_event = clkevt32k_next_event, | 181 | .set_next_event = clkevt32k_next_event, |
183 | .set_mode = clkevt32k_mode, | 182 | .set_mode = clkevt32k_mode, |
184 | }; | 183 | }; |
@@ -206,7 +205,7 @@ void __init at91rm9200_timer_init(void) | |||
206 | clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); | 205 | clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); |
207 | clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); | 206 | clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); |
208 | clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; | 207 | clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; |
209 | clkevt.cpumask = cpumask_of_cpu(0); | 208 | clkevt.cpumask = cpumask_of(0); |
210 | clockevents_register_device(&clkevt); | 209 | clockevents_register_device(&clkevt); |
211 | 210 | ||
212 | /* register clocksource */ | 211 | /* register clocksource */ |
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c index 122fd77ed580..b63e1d5f1bad 100644 --- a/arch/arm/mach-at91/at91sam926x_time.c +++ b/arch/arm/mach-at91/at91sam926x_time.c | |||
@@ -91,7 +91,6 @@ static struct clock_event_device pit_clkevt = { | |||
91 | .features = CLOCK_EVT_FEAT_PERIODIC, | 91 | .features = CLOCK_EVT_FEAT_PERIODIC, |
92 | .shift = 32, | 92 | .shift = 32, |
93 | .rating = 100, | 93 | .rating = 100, |
94 | .cpumask = CPU_MASK_CPU0, | ||
95 | .set_mode = pit_clkevt_mode, | 94 | .set_mode = pit_clkevt_mode, |
96 | }; | 95 | }; |
97 | 96 | ||
@@ -173,6 +172,7 @@ static void __init at91sam926x_pit_init(void) | |||
173 | 172 | ||
174 | /* Set up and register clockevents */ | 173 | /* Set up and register clockevents */ |
175 | pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); | 174 | pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); |
175 | pit_clkevt.cpumask = cpumask_of(0); | ||
176 | clockevents_register_device(&pit_clkevt); | 176 | clockevents_register_device(&pit_clkevt); |
177 | } | 177 | } |
178 | 178 | ||
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index 3b9a296b5c4b..f8bcd29d17a6 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c | |||
@@ -322,7 +322,7 @@ static void __init davinci_timer_init(void) | |||
322 | clockevent_davinci.min_delta_ns = | 322 | clockevent_davinci.min_delta_ns = |
323 | clockevent_delta2ns(1, &clockevent_davinci); | 323 | clockevent_delta2ns(1, &clockevent_davinci); |
324 | 324 | ||
325 | clockevent_davinci.cpumask = cpumask_of_cpu(0); | 325 | clockevent_davinci.cpumask = cpumask_of(0); |
326 | clockevents_register_device(&clockevent_davinci); | 326 | clockevents_register_device(&clockevent_davinci); |
327 | } | 327 | } |
328 | 328 | ||
diff --git a/arch/arm/mach-imx/time.c b/arch/arm/mach-imx/time.c index a11765f5f23b..aff0ebcfa847 100644 --- a/arch/arm/mach-imx/time.c +++ b/arch/arm/mach-imx/time.c | |||
@@ -184,7 +184,7 @@ static int __init imx_clockevent_init(unsigned long rate) | |||
184 | clockevent_imx.min_delta_ns = | 184 | clockevent_imx.min_delta_ns = |
185 | clockevent_delta2ns(0xf, &clockevent_imx); | 185 | clockevent_delta2ns(0xf, &clockevent_imx); |
186 | 186 | ||
187 | clockevent_imx.cpumask = cpumask_of_cpu(0); | 187 | clockevent_imx.cpumask = cpumask_of(0); |
188 | 188 | ||
189 | clockevents_register_device(&clockevent_imx); | 189 | clockevents_register_device(&clockevent_imx); |
190 | 190 | ||
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 7766f469456b..f4656d2ac8a8 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c | |||
@@ -487,7 +487,7 @@ static int __init ixp4xx_clockevent_init(void) | |||
487 | clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx); | 487 | clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx); |
488 | clockevent_ixp4xx.min_delta_ns = | 488 | clockevent_ixp4xx.min_delta_ns = |
489 | clockevent_delta2ns(0xf, &clockevent_ixp4xx); | 489 | clockevent_delta2ns(0xf, &clockevent_ixp4xx); |
490 | clockevent_ixp4xx.cpumask = cpumask_of_cpu(0); | 490 | clockevent_ixp4xx.cpumask = cpumask_of(0); |
491 | 491 | ||
492 | clockevents_register_device(&clockevent_ixp4xx); | 492 | clockevents_register_device(&clockevent_ixp4xx); |
493 | return 0; | 493 | return 0; |
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 345a14cb73c3..444d9c0f5ca6 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c | |||
@@ -182,7 +182,7 @@ static void __init msm_timer_init(void) | |||
182 | clockevent_delta2ns(0xf0000000 >> clock->shift, ce); | 182 | clockevent_delta2ns(0xf0000000 >> clock->shift, ce); |
183 | /* 4 gets rounded down to 3 */ | 183 | /* 4 gets rounded down to 3 */ |
184 | ce->min_delta_ns = clockevent_delta2ns(4, ce); | 184 | ce->min_delta_ns = clockevent_delta2ns(4, ce); |
185 | ce->cpumask = cpumask_of_cpu(0); | 185 | ce->cpumask = cpumask_of(0); |
186 | 186 | ||
187 | cs->mult = clocksource_hz2mult(clock->freq, cs->shift); | 187 | cs->mult = clocksource_hz2mult(clock->freq, cs->shift); |
188 | res = clocksource_register(cs); | 188 | res = clocksource_register(cs); |
diff --git a/arch/arm/mach-ns9xxx/time-ns9360.c b/arch/arm/mach-ns9xxx/time-ns9360.c index a63424d083d9..41df69721769 100644 --- a/arch/arm/mach-ns9xxx/time-ns9360.c +++ b/arch/arm/mach-ns9xxx/time-ns9360.c | |||
@@ -173,7 +173,7 @@ static void __init ns9360_timer_init(void) | |||
173 | ns9360_clockevent_device.min_delta_ns = | 173 | ns9360_clockevent_device.min_delta_ns = |
174 | clockevent_delta2ns(1, &ns9360_clockevent_device); | 174 | clockevent_delta2ns(1, &ns9360_clockevent_device); |
175 | 175 | ||
176 | ns9360_clockevent_device.cpumask = cpumask_of_cpu(0); | 176 | ns9360_clockevent_device.cpumask = cpumask_of(0); |
177 | clockevents_register_device(&ns9360_clockevent_device); | 177 | clockevents_register_device(&ns9360_clockevent_device); |
178 | 178 | ||
179 | setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT, | 179 | setup_irq(IRQ_NS9360_TIMER0 + TIMER_CLOCKEVENT, |
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c index 2cf7e32bd293..495a32c287b4 100644 --- a/arch/arm/mach-omap1/time.c +++ b/arch/arm/mach-omap1/time.c | |||
@@ -173,7 +173,7 @@ static __init void omap_init_mpu_timer(unsigned long rate) | |||
173 | clockevent_mpu_timer1.min_delta_ns = | 173 | clockevent_mpu_timer1.min_delta_ns = |
174 | clockevent_delta2ns(1, &clockevent_mpu_timer1); | 174 | clockevent_delta2ns(1, &clockevent_mpu_timer1); |
175 | 175 | ||
176 | clockevent_mpu_timer1.cpumask = cpumask_of_cpu(0); | 176 | clockevent_mpu_timer1.cpumask = cpumask_of(0); |
177 | clockevents_register_device(&clockevent_mpu_timer1); | 177 | clockevents_register_device(&clockevent_mpu_timer1); |
178 | } | 178 | } |
179 | 179 | ||
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c index 705367ece174..fd3f7396e162 100644 --- a/arch/arm/mach-omap1/timer32k.c +++ b/arch/arm/mach-omap1/timer32k.c | |||
@@ -187,7 +187,7 @@ static __init void omap_init_32k_timer(void) | |||
187 | clockevent_32k_timer.min_delta_ns = | 187 | clockevent_32k_timer.min_delta_ns = |
188 | clockevent_delta2ns(1, &clockevent_32k_timer); | 188 | clockevent_delta2ns(1, &clockevent_32k_timer); |
189 | 189 | ||
190 | clockevent_32k_timer.cpumask = cpumask_of_cpu(0); | 190 | clockevent_32k_timer.cpumask = cpumask_of(0); |
191 | clockevents_register_device(&clockevent_32k_timer); | 191 | clockevents_register_device(&clockevent_32k_timer); |
192 | } | 192 | } |
193 | 193 | ||
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 589393bedade..ae6036300f60 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c | |||
@@ -120,7 +120,7 @@ static void __init omap2_gp_clockevent_init(void) | |||
120 | clockevent_gpt.min_delta_ns = | 120 | clockevent_gpt.min_delta_ns = |
121 | clockevent_delta2ns(1, &clockevent_gpt); | 121 | clockevent_delta2ns(1, &clockevent_gpt); |
122 | 122 | ||
123 | clockevent_gpt.cpumask = cpumask_of_cpu(0); | 123 | clockevent_gpt.cpumask = cpumask_of(0); |
124 | clockevents_register_device(&clockevent_gpt); | 124 | clockevents_register_device(&clockevent_gpt); |
125 | } | 125 | } |
126 | 126 | ||
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 001624158519..95656a72268d 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c | |||
@@ -122,7 +122,6 @@ static struct clock_event_device ckevt_pxa_osmr0 = { | |||
122 | .features = CLOCK_EVT_FEAT_ONESHOT, | 122 | .features = CLOCK_EVT_FEAT_ONESHOT, |
123 | .shift = 32, | 123 | .shift = 32, |
124 | .rating = 200, | 124 | .rating = 200, |
125 | .cpumask = CPU_MASK_CPU0, | ||
126 | .set_next_event = pxa_osmr0_set_next_event, | 125 | .set_next_event = pxa_osmr0_set_next_event, |
127 | .set_mode = pxa_osmr0_set_mode, | 126 | .set_mode = pxa_osmr0_set_mode, |
128 | }; | 127 | }; |
@@ -163,6 +162,7 @@ static void __init pxa_timer_init(void) | |||
163 | clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0); | 162 | clockevent_delta2ns(0x7fffffff, &ckevt_pxa_osmr0); |
164 | ckevt_pxa_osmr0.min_delta_ns = | 163 | ckevt_pxa_osmr0.min_delta_ns = |
165 | clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1; | 164 | clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_pxa_osmr0) + 1; |
165 | ckevt_pxa_osmr0.cpumask = cpumask_of(0); | ||
166 | 166 | ||
167 | cksrc_pxa_oscr0.mult = | 167 | cksrc_pxa_oscr0.mult = |
168 | clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift); | 168 | clocksource_hz2mult(clock_tick_rate, cksrc_pxa_oscr0.shift); |
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 5f1d55963ced..bd2aa4f16141 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c | |||
@@ -624,7 +624,7 @@ static struct clock_event_device timer0_clockevent = { | |||
624 | .set_mode = timer_set_mode, | 624 | .set_mode = timer_set_mode, |
625 | .set_next_event = timer_set_next_event, | 625 | .set_next_event = timer_set_next_event, |
626 | .rating = 300, | 626 | .rating = 300, |
627 | .cpumask = CPU_MASK_ALL, | 627 | .cpumask = cpu_all_mask, |
628 | }; | 628 | }; |
629 | 629 | ||
630 | static void __init realview_clockevents_init(unsigned int timer_irq) | 630 | static void __init realview_clockevents_init(unsigned int timer_irq) |
diff --git a/arch/arm/mach-realview/localtimer.c b/arch/arm/mach-realview/localtimer.c index 9019ef2e5611..67d6d9cc68b2 100644 --- a/arch/arm/mach-realview/localtimer.c +++ b/arch/arm/mach-realview/localtimer.c | |||
@@ -154,7 +154,7 @@ void __cpuinit local_timer_setup(void) | |||
154 | clk->set_mode = local_timer_set_mode; | 154 | clk->set_mode = local_timer_set_mode; |
155 | clk->set_next_event = local_timer_set_next_event; | 155 | clk->set_next_event = local_timer_set_next_event; |
156 | clk->irq = IRQ_LOCALTIMER; | 156 | clk->irq = IRQ_LOCALTIMER; |
157 | clk->cpumask = cpumask_of_cpu(cpu); | 157 | clk->cpumask = cpumask_of(cpu); |
158 | clk->shift = 20; | 158 | clk->shift = 20; |
159 | clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift); | 159 | clk->mult = div_sc(mpcore_timer_rate, NSEC_PER_SEC, clk->shift); |
160 | clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); | 160 | clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); |
@@ -193,7 +193,7 @@ void __cpuinit local_timer_setup(void) | |||
193 | clk->rating = 200; | 193 | clk->rating = 200; |
194 | clk->set_mode = dummy_timer_set_mode; | 194 | clk->set_mode = dummy_timer_set_mode; |
195 | clk->broadcast = smp_timer_broadcast; | 195 | clk->broadcast = smp_timer_broadcast; |
196 | clk->cpumask = cpumask_of_cpu(cpu); | 196 | clk->cpumask = cpumask_of(cpu); |
197 | 197 | ||
198 | clockevents_register_device(clk); | 198 | clockevents_register_device(clk); |
199 | } | 199 | } |
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c index 8c5e727f3b75..711c0295c66f 100644 --- a/arch/arm/mach-sa1100/time.c +++ b/arch/arm/mach-sa1100/time.c | |||
@@ -73,7 +73,6 @@ static struct clock_event_device ckevt_sa1100_osmr0 = { | |||
73 | .features = CLOCK_EVT_FEAT_ONESHOT, | 73 | .features = CLOCK_EVT_FEAT_ONESHOT, |
74 | .shift = 32, | 74 | .shift = 32, |
75 | .rating = 200, | 75 | .rating = 200, |
76 | .cpumask = CPU_MASK_CPU0, | ||
77 | .set_next_event = sa1100_osmr0_set_next_event, | 76 | .set_next_event = sa1100_osmr0_set_next_event, |
78 | .set_mode = sa1100_osmr0_set_mode, | 77 | .set_mode = sa1100_osmr0_set_mode, |
79 | }; | 78 | }; |
@@ -110,6 +109,7 @@ static void __init sa1100_timer_init(void) | |||
110 | clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0); | 109 | clockevent_delta2ns(0x7fffffff, &ckevt_sa1100_osmr0); |
111 | ckevt_sa1100_osmr0.min_delta_ns = | 110 | ckevt_sa1100_osmr0.min_delta_ns = |
112 | clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1; | 111 | clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_sa1100_osmr0) + 1; |
112 | ckevt_sa1100_osmr0.cpumask = cpumask_of(0); | ||
113 | 113 | ||
114 | cksrc_sa1100_oscr.mult = | 114 | cksrc_sa1100_oscr.mult = |
115 | clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift); | 115 | clocksource_hz2mult(CLOCK_TICK_RATE, cksrc_sa1100_oscr.shift); |
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index df25aa138509..1c43494f5c42 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c | |||
@@ -1005,7 +1005,7 @@ static void __init versatile_timer_init(void) | |||
1005 | timer0_clockevent.min_delta_ns = | 1005 | timer0_clockevent.min_delta_ns = |
1006 | clockevent_delta2ns(0xf, &timer0_clockevent); | 1006 | clockevent_delta2ns(0xf, &timer0_clockevent); |
1007 | 1007 | ||
1008 | timer0_clockevent.cpumask = cpumask_of_cpu(0); | 1008 | timer0_clockevent.cpumask = cpumask_of(0); |
1009 | clockevents_register_device(&timer0_clockevent); | 1009 | clockevents_register_device(&timer0_clockevent); |
1010 | } | 1010 | } |
1011 | 1011 | ||
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 4de366e8b4c5..6d6bd5899240 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c | |||
@@ -260,10 +260,10 @@ static void em_stop(void) | |||
260 | static void em_route_irq(int irq, unsigned int cpu) | 260 | static void em_route_irq(int irq, unsigned int cpu) |
261 | { | 261 | { |
262 | struct irq_desc *desc = irq_desc + irq; | 262 | struct irq_desc *desc = irq_desc + irq; |
263 | cpumask_t mask = cpumask_of_cpu(cpu); | 263 | const struct cpumask *mask = cpumask_of(cpu); |
264 | 264 | ||
265 | spin_lock_irq(&desc->lock); | 265 | spin_lock_irq(&desc->lock); |
266 | desc->affinity = mask; | 266 | desc->affinity = *mask; |
267 | desc->chip->set_affinity(irq, mask); | 267 | desc->chip->set_affinity(irq, mask); |
268 | spin_unlock_irq(&desc->lock); | 268 | spin_unlock_irq(&desc->lock); |
269 | } | 269 | } |
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c index fd28f5194f71..758a1293bcfa 100644 --- a/arch/arm/plat-mxc/time.c +++ b/arch/arm/plat-mxc/time.c | |||
@@ -190,7 +190,7 @@ static int __init mxc_clockevent_init(void) | |||
190 | clockevent_mxc.min_delta_ns = | 190 | clockevent_mxc.min_delta_ns = |
191 | clockevent_delta2ns(0xff, &clockevent_mxc); | 191 | clockevent_delta2ns(0xff, &clockevent_mxc); |
192 | 192 | ||
193 | clockevent_mxc.cpumask = cpumask_of_cpu(0); | 193 | clockevent_mxc.cpumask = cpumask_of(0); |
194 | 194 | ||
195 | clockevents_register_device(&clockevent_mxc); | 195 | clockevents_register_device(&clockevent_mxc); |
196 | 196 | ||
diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c index 544d6b327f3a..6fa2923e6dca 100644 --- a/arch/arm/plat-orion/time.c +++ b/arch/arm/plat-orion/time.c | |||
@@ -149,7 +149,6 @@ static struct clock_event_device orion_clkevt = { | |||
149 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, | 149 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, |
150 | .shift = 32, | 150 | .shift = 32, |
151 | .rating = 300, | 151 | .rating = 300, |
152 | .cpumask = CPU_MASK_CPU0, | ||
153 | .set_next_event = orion_clkevt_next_event, | 152 | .set_next_event = orion_clkevt_next_event, |
154 | .set_mode = orion_clkevt_mode, | 153 | .set_mode = orion_clkevt_mode, |
155 | }; | 154 | }; |
@@ -199,5 +198,6 @@ void __init orion_time_init(unsigned int irq, unsigned int tclk) | |||
199 | orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift); | 198 | orion_clkevt.mult = div_sc(tclk, NSEC_PER_SEC, orion_clkevt.shift); |
200 | orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt); | 199 | orion_clkevt.max_delta_ns = clockevent_delta2ns(0xfffffffe, &orion_clkevt); |
201 | orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt); | 200 | orion_clkevt.min_delta_ns = clockevent_delta2ns(1, &orion_clkevt); |
201 | orion_clkevt.cpumask = cpumask_of(0); | ||
202 | clockevents_register_device(&orion_clkevt); | 202 | clockevents_register_device(&orion_clkevt); |
203 | } | 203 | } |
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index 283481d74a5b..0ff46bf873b0 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c | |||
@@ -106,7 +106,6 @@ static struct clock_event_device comparator = { | |||
106 | .features = CLOCK_EVT_FEAT_ONESHOT, | 106 | .features = CLOCK_EVT_FEAT_ONESHOT, |
107 | .shift = 16, | 107 | .shift = 16, |
108 | .rating = 50, | 108 | .rating = 50, |
109 | .cpumask = CPU_MASK_CPU0, | ||
110 | .set_next_event = comparator_next_event, | 109 | .set_next_event = comparator_next_event, |
111 | .set_mode = comparator_mode, | 110 | .set_mode = comparator_mode, |
112 | }; | 111 | }; |
@@ -134,6 +133,7 @@ void __init time_init(void) | |||
134 | comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); | 133 | comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); |
135 | comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); | 134 | comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); |
136 | comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; | 135 | comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; |
136 | comparator.cpumask = cpumask_of(0); | ||
137 | 137 | ||
138 | sysreg_write(COMPARE, 0); | 138 | sysreg_write(COMPARE, 0); |
139 | timer_irqaction.dev_id = &comparator; | 139 | timer_irqaction.dev_id = &comparator; |
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index e887efc86c29..0ed2badfd746 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c | |||
@@ -162,7 +162,6 @@ static struct clock_event_device clockevent_bfin = { | |||
162 | .name = "bfin_core_timer", | 162 | .name = "bfin_core_timer", |
163 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 163 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
164 | .shift = 32, | 164 | .shift = 32, |
165 | .cpumask = CPU_MASK_CPU0, | ||
166 | .set_next_event = bfin_timer_set_next_event, | 165 | .set_next_event = bfin_timer_set_next_event, |
167 | .set_mode = bfin_timer_set_mode, | 166 | .set_mode = bfin_timer_set_mode, |
168 | }; | 167 | }; |
@@ -193,6 +192,7 @@ static int __init bfin_clockevent_init(void) | |||
193 | clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); | 192 | clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); |
194 | clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); | 193 | clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); |
195 | clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); | 194 | clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); |
195 | clockevent_bfin.cpumask = cpumask_of(0); | ||
196 | clockevents_register_device(&clockevent_bfin); | 196 | clockevents_register_device(&clockevent_bfin); |
197 | 197 | ||
198 | return 0; | 198 | return 0; |
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c index 173c141ac9ba..295131fee710 100644 --- a/arch/cris/arch-v32/kernel/irq.c +++ b/arch/cris/arch-v32/kernel/irq.c | |||
@@ -325,11 +325,11 @@ static void end_crisv32_irq(unsigned int irq) | |||
325 | { | 325 | { |
326 | } | 326 | } |
327 | 327 | ||
328 | void set_affinity_crisv32_irq(unsigned int irq, cpumask_t dest) | 328 | void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest) |
329 | { | 329 | { |
330 | unsigned long flags; | 330 | unsigned long flags; |
331 | spin_lock_irqsave(&irq_lock, flags); | 331 | spin_lock_irqsave(&irq_lock, flags); |
332 | irq_allocations[irq - FIRST_IRQ].mask = dest; | 332 | irq_allocations[irq - FIRST_IRQ].mask = *dest; |
333 | spin_unlock_irqrestore(&irq_lock, flags); | 333 | spin_unlock_irqrestore(&irq_lock, flags); |
334 | } | 334 | } |
335 | 335 | ||
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 52e16c6436f9..9dac17334640 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c | |||
@@ -29,11 +29,7 @@ | |||
29 | spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; | 29 | spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; |
30 | 30 | ||
31 | /* CPU masks */ | 31 | /* CPU masks */ |
32 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
33 | EXPORT_SYMBOL(cpu_online_map); | ||
34 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | 32 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; |
35 | cpumask_t cpu_possible_map; | ||
36 | EXPORT_SYMBOL(cpu_possible_map); | ||
37 | EXPORT_SYMBOL(phys_cpu_present_map); | 33 | EXPORT_SYMBOL(phys_cpu_present_map); |
38 | 34 | ||
39 | /* Variables used during SMP boot */ | 35 | /* Variables used during SMP boot */ |
diff --git a/arch/cris/include/asm/smp.h b/arch/cris/include/asm/smp.h index dba33aba3e95..c615a06dd757 100644 --- a/arch/cris/include/asm/smp.h +++ b/arch/cris/include/asm/smp.h | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <linux/cpumask.h> | 4 | #include <linux/cpumask.h> |
5 | 5 | ||
6 | extern cpumask_t phys_cpu_present_map; | 6 | extern cpumask_t phys_cpu_present_map; |
7 | extern cpumask_t cpu_possible_map; | ||
8 | 7 | ||
9 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 8 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
10 | 9 | ||
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c index c2f58ff364e7..cc0a3182db3c 100644 --- a/arch/ia64/hp/sim/hpsim_irq.c +++ b/arch/ia64/hp/sim/hpsim_irq.c | |||
@@ -22,7 +22,7 @@ hpsim_irq_noop (unsigned int irq) | |||
22 | } | 22 | } |
23 | 23 | ||
24 | static void | 24 | static void |
25 | hpsim_set_affinity_noop (unsigned int a, cpumask_t b) | 25 | hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b) |
26 | { | 26 | { |
27 | } | 27 | } |
28 | 28 | ||
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h index f38472ac2267..68aa6da807c1 100644 --- a/arch/ia64/include/asm/kvm.h +++ b/arch/ia64/include/asm/kvm.h | |||
@@ -166,8 +166,6 @@ struct saved_vpd { | |||
166 | }; | 166 | }; |
167 | 167 | ||
168 | struct kvm_regs { | 168 | struct kvm_regs { |
169 | char *saved_guest; | ||
170 | char *saved_stack; | ||
171 | struct saved_vpd vpd; | 169 | struct saved_vpd vpd; |
172 | /*Arch-regs*/ | 170 | /*Arch-regs*/ |
173 | int mp_state; | 171 | int mp_state; |
@@ -200,6 +198,10 @@ struct kvm_regs { | |||
200 | unsigned long fp_psr; /*used for lazy float register */ | 198 | unsigned long fp_psr; /*used for lazy float register */ |
201 | unsigned long saved_gp; | 199 | unsigned long saved_gp; |
202 | /*for phycial emulation */ | 200 | /*for phycial emulation */ |
201 | |||
202 | union context saved_guest; | ||
203 | |||
204 | unsigned long reserved[64]; /* for future use */ | ||
203 | }; | 205 | }; |
204 | 206 | ||
205 | struct kvm_sregs { | 207 | struct kvm_sregs { |
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h index c60d324da540..0560f3fae538 100644 --- a/arch/ia64/include/asm/kvm_host.h +++ b/arch/ia64/include/asm/kvm_host.h | |||
@@ -23,17 +23,6 @@ | |||
23 | #ifndef __ASM_KVM_HOST_H | 23 | #ifndef __ASM_KVM_HOST_H |
24 | #define __ASM_KVM_HOST_H | 24 | #define __ASM_KVM_HOST_H |
25 | 25 | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/kvm.h> | ||
30 | #include <linux/kvm_para.h> | ||
31 | #include <linux/kvm_types.h> | ||
32 | |||
33 | #include <asm/pal.h> | ||
34 | #include <asm/sal.h> | ||
35 | |||
36 | #define KVM_MAX_VCPUS 4 | ||
37 | #define KVM_MEMORY_SLOTS 32 | 26 | #define KVM_MEMORY_SLOTS 32 |
38 | /* memory slots that does not exposed to userspace */ | 27 | /* memory slots that does not exposed to userspace */ |
39 | #define KVM_PRIVATE_MEM_SLOTS 4 | 28 | #define KVM_PRIVATE_MEM_SLOTS 4 |
@@ -50,70 +39,132 @@ | |||
50 | #define EXIT_REASON_EXTERNAL_INTERRUPT 6 | 39 | #define EXIT_REASON_EXTERNAL_INTERRUPT 6 |
51 | #define EXIT_REASON_IPI 7 | 40 | #define EXIT_REASON_IPI 7 |
52 | #define EXIT_REASON_PTC_G 8 | 41 | #define EXIT_REASON_PTC_G 8 |
42 | #define EXIT_REASON_DEBUG 20 | ||
53 | 43 | ||
54 | /*Define vmm address space and vm data space.*/ | 44 | /*Define vmm address space and vm data space.*/ |
55 | #define KVM_VMM_SIZE (16UL<<20) | 45 | #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20) |
56 | #define KVM_VMM_SHIFT 24 | 46 | #define KVM_VMM_SHIFT 24 |
57 | #define KVM_VMM_BASE 0xD000000000000000UL | 47 | #define KVM_VMM_BASE 0xD000000000000000 |
58 | #define VMM_SIZE (8UL<<20) | 48 | #define VMM_SIZE (__IA64_UL_CONST(8)<<20) |
59 | 49 | ||
60 | /* | 50 | /* |
61 | * Define vm_buffer, used by PAL Services, base address. | 51 | * Define vm_buffer, used by PAL Services, base address. |
62 | * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M | 52 | * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M |
63 | */ | 53 | */ |
64 | #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) | 54 | #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE) |
65 | #define KVM_VM_BUFFER_SIZE (8UL<<20) | 55 | #define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20) |
66 | 56 | ||
67 | /*Define Virtual machine data layout.*/ | 57 | /* |
68 | #define KVM_VM_DATA_SHIFT 24 | 58 | * kvm guest's data area looks as follow: |
69 | #define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT) | 59 | * |
70 | #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE) | 60 | * +----------------------+ ------- KVM_VM_DATA_SIZE |
71 | 61 | * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET | |
72 | 62 | * | | | / | | |
73 | #define KVM_P2M_BASE KVM_VM_DATA_BASE | 63 | * | .......... | | /vcpu's struct&stack | |
74 | #define KVM_P2M_OFS 0 | 64 | * | .......... | | /---------------------|---- 0 |
75 | #define KVM_P2M_SIZE (8UL << 20) | 65 | * | vcpu[5]'s data | | / vpd | |
76 | 66 | * | vcpu[4]'s data | |/-----------------------| | |
77 | #define KVM_VHPT_BASE (KVM_P2M_BASE + KVM_P2M_SIZE) | 67 | * | vcpu[3]'s data | / vtlb | |
78 | #define KVM_VHPT_OFS KVM_P2M_SIZE | 68 | * | vcpu[2]'s data | /|------------------------| |
79 | #define KVM_VHPT_BLOCK_SIZE (2UL << 20) | 69 | * | vcpu[1]'s data |/ | vhpt | |
80 | #define VHPT_SHIFT 18 | 70 | * | vcpu[0]'s data |____________________________| |
81 | #define VHPT_SIZE (1UL << VHPT_SHIFT) | 71 | * +----------------------+ | |
82 | #define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5)) | 72 | * | memory dirty log | | |
83 | 73 | * +----------------------+ | | |
84 | #define KVM_VTLB_BASE (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE) | 74 | * | vm's data struct | | |
85 | #define KVM_VTLB_OFS (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE) | 75 | * +----------------------+ | |
86 | #define KVM_VTLB_BLOCK_SIZE (1UL<<20) | 76 | * | | | |
87 | #define VTLB_SHIFT 17 | 77 | * | | | |
88 | #define VTLB_SIZE (1UL<<VTLB_SHIFT) | 78 | * | | | |
89 | #define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5)) | 79 | * | | | |
90 | 80 | * | | | | |
91 | #define KVM_VPD_BASE (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE) | 81 | * | | | |
92 | #define KVM_VPD_OFS (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE) | 82 | * | | | |
93 | #define KVM_VPD_BLOCK_SIZE (2UL<<20) | 83 | * | vm's p2m table | | |
94 | #define VPD_SHIFT 16 | 84 | * | | | |
95 | #define VPD_SIZE (1UL<<VPD_SHIFT) | 85 | * | | | |
96 | 86 | * | | | | | |
97 | #define KVM_VCPU_BASE (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE) | 87 | * vm's data->| | | | |
98 | #define KVM_VCPU_OFS (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE) | 88 | * +----------------------+ ------- 0 |
99 | #define KVM_VCPU_BLOCK_SIZE (2UL<<20) | 89 | * To support large memory, needs to increase the size of p2m. |
100 | #define VCPU_SHIFT 18 | 90 | * To support more vcpus, needs to ensure it has enough space to |
101 | #define VCPU_SIZE (1UL<<VCPU_SHIFT) | 91 | * hold vcpus' data. |
102 | #define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE | 92 | */ |
103 | 93 | ||
104 | #define KVM_VM_BASE (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE) | 94 | #define KVM_VM_DATA_SHIFT 26 |
105 | #define KVM_VM_OFS (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE) | 95 | #define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT) |
106 | #define KVM_VM_BLOCK_SIZE (1UL<<19) | 96 | #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE) |
107 | 97 | ||
108 | #define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE) | 98 | #define KVM_P2M_BASE KVM_VM_DATA_BASE |
109 | #define KVM_MEM_DIRTY_LOG_OFS (KVM_VM_OFS+KVM_VM_BLOCK_SIZE) | 99 | #define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20) |
110 | #define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19) | 100 | |
111 | 101 | #define VHPT_SHIFT 16 | |
112 | /* Get vpd, vhpt, tlb, vcpu, base*/ | 102 | #define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT) |
113 | #define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE) | 103 | #define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5)) |
114 | #define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE) | 104 | |
115 | #define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE) | 105 | #define VTLB_SHIFT 16 |
116 | #define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE) | 106 | #define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT) |
107 | #define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5)) | ||
108 | |||
109 | #define VPD_SHIFT 16 | ||
110 | #define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT) | ||
111 | |||
112 | #define VCPU_STRUCT_SHIFT 16 | ||
113 | #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT) | ||
114 | |||
115 | #define KVM_STK_OFFSET VCPU_STRUCT_SIZE | ||
116 | |||
117 | #define KVM_VM_STRUCT_SHIFT 19 | ||
118 | #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT) | ||
119 | |||
120 | #define KVM_MEM_DIRY_LOG_SHIFT 19 | ||
121 | #define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT) | ||
122 | |||
123 | #ifndef __ASSEMBLY__ | ||
124 | |||
125 | /*Define the max vcpus and memory for Guests.*/ | ||
126 | #define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\ | ||
127 | KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data) | ||
128 | #define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT) | ||
129 | |||
130 | #define VMM_LOG_LEN 256 | ||
131 | |||
132 | #include <linux/types.h> | ||
133 | #include <linux/mm.h> | ||
134 | #include <linux/kvm.h> | ||
135 | #include <linux/kvm_para.h> | ||
136 | #include <linux/kvm_types.h> | ||
137 | |||
138 | #include <asm/pal.h> | ||
139 | #include <asm/sal.h> | ||
140 | #include <asm/page.h> | ||
141 | |||
142 | struct kvm_vcpu_data { | ||
143 | char vcpu_vhpt[VHPT_SIZE]; | ||
144 | char vcpu_vtlb[VTLB_SIZE]; | ||
145 | char vcpu_vpd[VPD_SIZE]; | ||
146 | char vcpu_struct[VCPU_STRUCT_SIZE]; | ||
147 | }; | ||
148 | |||
149 | struct kvm_vm_data { | ||
150 | char kvm_p2m[KVM_P2M_SIZE]; | ||
151 | char kvm_vm_struct[KVM_VM_STRUCT_SIZE]; | ||
152 | char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE]; | ||
153 | struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS]; | ||
154 | }; | ||
155 | |||
156 | #define VCPU_BASE(n) KVM_VM_DATA_BASE + \ | ||
157 | offsetof(struct kvm_vm_data, vcpu_data[n]) | ||
158 | #define VM_BASE KVM_VM_DATA_BASE + \ | ||
159 | offsetof(struct kvm_vm_data, kvm_vm_struct) | ||
160 | #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \ | ||
161 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log) | ||
162 | |||
163 | #define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt)) | ||
164 | #define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb)) | ||
165 | #define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd)) | ||
166 | #define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \ | ||
167 | offsetof(struct kvm_vcpu_data, vcpu_struct)) | ||
117 | 168 | ||
118 | /*IO section definitions*/ | 169 | /*IO section definitions*/ |
119 | #define IOREQ_READ 1 | 170 | #define IOREQ_READ 1 |
@@ -389,6 +440,7 @@ struct kvm_vcpu_arch { | |||
389 | 440 | ||
390 | unsigned long opcode; | 441 | unsigned long opcode; |
391 | unsigned long cause; | 442 | unsigned long cause; |
443 | char log_buf[VMM_LOG_LEN]; | ||
392 | union context host; | 444 | union context host; |
393 | union context guest; | 445 | union context guest; |
394 | }; | 446 | }; |
@@ -403,14 +455,13 @@ struct kvm_sal_data { | |||
403 | }; | 455 | }; |
404 | 456 | ||
405 | struct kvm_arch { | 457 | struct kvm_arch { |
458 | spinlock_t dirty_log_lock; | ||
459 | |||
406 | unsigned long vm_base; | 460 | unsigned long vm_base; |
407 | unsigned long metaphysical_rr0; | 461 | unsigned long metaphysical_rr0; |
408 | unsigned long metaphysical_rr4; | 462 | unsigned long metaphysical_rr4; |
409 | unsigned long vmm_init_rr; | 463 | unsigned long vmm_init_rr; |
410 | unsigned long vhpt_base; | 464 | |
411 | unsigned long vtlb_base; | ||
412 | unsigned long vpd_base; | ||
413 | spinlock_t dirty_log_lock; | ||
414 | struct kvm_ioapic *vioapic; | 465 | struct kvm_ioapic *vioapic; |
415 | struct kvm_vm_stat stat; | 466 | struct kvm_vm_stat stat; |
416 | struct kvm_sal_data rdv_sal_data; | 467 | struct kvm_sal_data rdv_sal_data; |
@@ -512,7 +563,7 @@ struct kvm_pt_regs { | |||
512 | 563 | ||
513 | static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) | 564 | static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v) |
514 | { | 565 | { |
515 | return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1; | 566 | return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1; |
516 | } | 567 | } |
517 | 568 | ||
518 | typedef int kvm_vmm_entry(void); | 569 | typedef int kvm_vmm_entry(void); |
@@ -531,5 +582,6 @@ int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); | |||
531 | void kvm_sal_emul(struct kvm_vcpu *vcpu); | 582 | void kvm_sal_emul(struct kvm_vcpu *vcpu); |
532 | 583 | ||
533 | static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {} | 584 | static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {} |
585 | #endif /* __ASSEMBLY__*/ | ||
534 | 586 | ||
535 | #endif | 587 | #endif |
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index 12d96e0cd513..21c402365d0e 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h | |||
@@ -57,7 +57,6 @@ extern struct smp_boot_data { | |||
57 | 57 | ||
58 | extern char no_int_routing __devinitdata; | 58 | extern char no_int_routing __devinitdata; |
59 | 59 | ||
60 | extern cpumask_t cpu_online_map; | ||
61 | extern cpumask_t cpu_core_map[NR_CPUS]; | 60 | extern cpumask_t cpu_core_map[NR_CPUS]; |
62 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); | 61 | DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); |
63 | extern int smp_num_siblings; | 62 | extern int smp_num_siblings; |
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 35bcb641c9e5..a3cc9f65f954 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h | |||
@@ -55,7 +55,6 @@ | |||
55 | void build_cpu_to_node_map(void); | 55 | void build_cpu_to_node_map(void); |
56 | 56 | ||
57 | #define SD_CPU_INIT (struct sched_domain) { \ | 57 | #define SD_CPU_INIT (struct sched_domain) { \ |
58 | .span = CPU_MASK_NONE, \ | ||
59 | .parent = NULL, \ | 58 | .parent = NULL, \ |
60 | .child = NULL, \ | 59 | .child = NULL, \ |
61 | .groups = NULL, \ | 60 | .groups = NULL, \ |
@@ -80,7 +79,6 @@ void build_cpu_to_node_map(void); | |||
80 | 79 | ||
81 | /* sched_domains SD_NODE_INIT for IA64 NUMA machines */ | 80 | /* sched_domains SD_NODE_INIT for IA64 NUMA machines */ |
82 | #define SD_NODE_INIT (struct sched_domain) { \ | 81 | #define SD_NODE_INIT (struct sched_domain) { \ |
83 | .span = CPU_MASK_NONE, \ | ||
84 | .parent = NULL, \ | 82 | .parent = NULL, \ |
85 | .child = NULL, \ | 83 | .child = NULL, \ |
86 | .groups = NULL, \ | 84 | .groups = NULL, \ |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 5c4674ae8aea..c8adecd5b416 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -330,25 +330,25 @@ unmask_irq (unsigned int irq) | |||
330 | 330 | ||
331 | 331 | ||
332 | static void | 332 | static void |
333 | iosapic_set_affinity (unsigned int irq, cpumask_t mask) | 333 | iosapic_set_affinity(unsigned int irq, const struct cpumask *mask) |
334 | { | 334 | { |
335 | #ifdef CONFIG_SMP | 335 | #ifdef CONFIG_SMP |
336 | u32 high32, low32; | 336 | u32 high32, low32; |
337 | int dest, rte_index; | 337 | int cpu, dest, rte_index; |
338 | int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; | 338 | int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; |
339 | struct iosapic_rte_info *rte; | 339 | struct iosapic_rte_info *rte; |
340 | struct iosapic *iosapic; | 340 | struct iosapic *iosapic; |
341 | 341 | ||
342 | irq &= (~IA64_IRQ_REDIRECTED); | 342 | irq &= (~IA64_IRQ_REDIRECTED); |
343 | 343 | ||
344 | cpus_and(mask, mask, cpu_online_map); | 344 | cpu = cpumask_first_and(cpu_online_mask, mask); |
345 | if (cpus_empty(mask)) | 345 | if (cpu >= nr_cpu_ids) |
346 | return; | 346 | return; |
347 | 347 | ||
348 | if (irq_prepare_move(irq, first_cpu(mask))) | 348 | if (irq_prepare_move(irq, cpu)) |
349 | return; | 349 | return; |
350 | 350 | ||
351 | dest = cpu_physical_id(first_cpu(mask)); | 351 | dest = cpu_physical_id(cpu); |
352 | 352 | ||
353 | if (!iosapic_intr_info[irq].count) | 353 | if (!iosapic_intr_info[irq].count) |
354 | return; /* not an IOSAPIC interrupt */ | 354 | return; /* not an IOSAPIC interrupt */ |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7fd18f54c056..0b6db53fedcf 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -133,7 +133,6 @@ unsigned int vectors_in_migration[NR_IRQS]; | |||
133 | */ | 133 | */ |
134 | static void migrate_irqs(void) | 134 | static void migrate_irqs(void) |
135 | { | 135 | { |
136 | cpumask_t mask; | ||
137 | irq_desc_t *desc; | 136 | irq_desc_t *desc; |
138 | int irq, new_cpu; | 137 | int irq, new_cpu; |
139 | 138 | ||
@@ -152,15 +151,14 @@ static void migrate_irqs(void) | |||
152 | if (desc->status == IRQ_PER_CPU) | 151 | if (desc->status == IRQ_PER_CPU) |
153 | continue; | 152 | continue; |
154 | 153 | ||
155 | cpus_and(mask, irq_desc[irq].affinity, cpu_online_map); | 154 | if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) |
156 | if (any_online_cpu(mask) == NR_CPUS) { | 155 | >= nr_cpu_ids) { |
157 | /* | 156 | /* |
158 | * Save it for phase 2 processing | 157 | * Save it for phase 2 processing |
159 | */ | 158 | */ |
160 | vectors_in_migration[irq] = irq; | 159 | vectors_in_migration[irq] = irq; |
161 | 160 | ||
162 | new_cpu = any_online_cpu(cpu_online_map); | 161 | new_cpu = any_online_cpu(cpu_online_map); |
163 | mask = cpumask_of_cpu(new_cpu); | ||
164 | 162 | ||
165 | /* | 163 | /* |
166 | * Al three are essential, currently WARN_ON.. maybe panic? | 164 | * Al three are essential, currently WARN_ON.. maybe panic? |
@@ -168,7 +166,8 @@ static void migrate_irqs(void) | |||
168 | if (desc->chip && desc->chip->disable && | 166 | if (desc->chip && desc->chip->disable && |
169 | desc->chip->enable && desc->chip->set_affinity) { | 167 | desc->chip->enable && desc->chip->set_affinity) { |
170 | desc->chip->disable(irq); | 168 | desc->chip->disable(irq); |
171 | desc->chip->set_affinity(irq, mask); | 169 | desc->chip->set_affinity(irq, |
170 | cpumask_of(new_cpu)); | ||
172 | desc->chip->enable(irq); | 171 | desc->chip->enable(irq); |
173 | } else { | 172 | } else { |
174 | WARN_ON((!(desc->chip) || !(desc->chip->disable) || | 173 | WARN_ON((!(desc->chip) || !(desc->chip->disable) || |
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 702a09c13238..890339339035 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c | |||
@@ -49,11 +49,12 @@ | |||
49 | static struct irq_chip ia64_msi_chip; | 49 | static struct irq_chip ia64_msi_chip; |
50 | 50 | ||
51 | #ifdef CONFIG_SMP | 51 | #ifdef CONFIG_SMP |
52 | static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | 52 | static void ia64_set_msi_irq_affinity(unsigned int irq, |
53 | const cpumask_t *cpu_mask) | ||
53 | { | 54 | { |
54 | struct msi_msg msg; | 55 | struct msi_msg msg; |
55 | u32 addr, data; | 56 | u32 addr, data; |
56 | int cpu = first_cpu(cpu_mask); | 57 | int cpu = first_cpu(*cpu_mask); |
57 | 58 | ||
58 | if (!cpu_online(cpu)) | 59 | if (!cpu_online(cpu)) |
59 | return; | 60 | return; |
@@ -166,12 +167,11 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
166 | 167 | ||
167 | #ifdef CONFIG_DMAR | 168 | #ifdef CONFIG_DMAR |
168 | #ifdef CONFIG_SMP | 169 | #ifdef CONFIG_SMP |
169 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 170 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
170 | { | 171 | { |
171 | struct irq_cfg *cfg = irq_cfg + irq; | 172 | struct irq_cfg *cfg = irq_cfg + irq; |
172 | struct msi_msg msg; | 173 | struct msi_msg msg; |
173 | int cpu = first_cpu(mask); | 174 | int cpu = cpumask_first(mask); |
174 | |||
175 | 175 | ||
176 | if (!cpu_online(cpu)) | 176 | if (!cpu_online(cpu)) |
177 | return; | 177 | return; |
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
187 | msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); | 187 | msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); |
188 | 188 | ||
189 | dmar_msi_write(irq, &msg); | 189 | dmar_msi_write(irq, &msg); |
190 | irq_desc[irq].affinity = mask; | 190 | irq_desc[irq].affinity = *mask; |
191 | } | 191 | } |
192 | #endif /* CONFIG_SMP */ | 192 | #endif /* CONFIG_SMP */ |
193 | 193 | ||
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 1dcbb85fc4ee..11463994a7d5 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -131,12 +131,6 @@ struct task_struct *task_for_booting_cpu; | |||
131 | */ | 131 | */ |
132 | DEFINE_PER_CPU(int, cpu_state); | 132 | DEFINE_PER_CPU(int, cpu_state); |
133 | 133 | ||
134 | /* Bitmasks of currently online, and possible CPUs */ | ||
135 | cpumask_t cpu_online_map; | ||
136 | EXPORT_SYMBOL(cpu_online_map); | ||
137 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
138 | EXPORT_SYMBOL(cpu_possible_map); | ||
139 | |||
140 | cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; | 134 | cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; |
141 | EXPORT_SYMBOL(cpu_core_map); | 135 | EXPORT_SYMBOL(cpu_core_map); |
142 | DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); | 136 | DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); |
@@ -688,7 +682,7 @@ int migrate_platform_irqs(unsigned int cpu) | |||
688 | { | 682 | { |
689 | int new_cpei_cpu; | 683 | int new_cpei_cpu; |
690 | irq_desc_t *desc = NULL; | 684 | irq_desc_t *desc = NULL; |
691 | cpumask_t mask; | 685 | const struct cpumask *mask; |
692 | int retval = 0; | 686 | int retval = 0; |
693 | 687 | ||
694 | /* | 688 | /* |
@@ -701,7 +695,7 @@ int migrate_platform_irqs(unsigned int cpu) | |||
701 | * Now re-target the CPEI to a different processor | 695 | * Now re-target the CPEI to a different processor |
702 | */ | 696 | */ |
703 | new_cpei_cpu = any_online_cpu(cpu_online_map); | 697 | new_cpei_cpu = any_online_cpu(cpu_online_map); |
704 | mask = cpumask_of_cpu(new_cpei_cpu); | 698 | mask = cpumask_of(new_cpei_cpu); |
705 | set_cpei_target_cpu(new_cpei_cpu); | 699 | set_cpei_target_cpu(new_cpei_cpu); |
706 | desc = irq_desc + ia64_cpe_irq; | 700 | desc = irq_desc + ia64_cpe_irq; |
707 | /* | 701 | /* |
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c index c75b914f2d6b..a8d61a3e9a94 100644 --- a/arch/ia64/kernel/topology.c +++ b/arch/ia64/kernel/topology.c | |||
@@ -219,7 +219,7 @@ static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf) | |||
219 | cpumask_t shared_cpu_map; | 219 | cpumask_t shared_cpu_map; |
220 | 220 | ||
221 | cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); | 221 | cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map); |
222 | len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map); | 222 | len = cpumask_scnprintf(buf, NR_CPUS+1, &shared_cpu_map); |
223 | len += sprintf(buf+len, "\n"); | 223 | len += sprintf(buf+len, "\n"); |
224 | return len; | 224 | return len; |
225 | } | 225 | } |
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index 92cef66ca268..76464dc312e6 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile | |||
@@ -60,7 +60,7 @@ obj-$(CONFIG_KVM) += kvm.o | |||
60 | 60 | ||
61 | CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 | 61 | CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 |
62 | kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ | 62 | kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ |
63 | vtlb.o process.o | 63 | vtlb.o process.o kvm_lib.o |
64 | #Add link memcpy and memset to avoid possible structure assignment error | 64 | #Add link memcpy and memset to avoid possible structure assignment error |
65 | kvm-intel-objs += memcpy.o memset.o | 65 | kvm-intel-objs += memcpy.o memset.o |
66 | obj-$(CONFIG_KVM_INTEL) += kvm-intel.o | 66 | obj-$(CONFIG_KVM_INTEL) += kvm-intel.o |
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c index 4e3dc13a619c..0c3564a7a033 100644 --- a/arch/ia64/kvm/asm-offsets.c +++ b/arch/ia64/kvm/asm-offsets.c | |||
@@ -24,19 +24,10 @@ | |||
24 | 24 | ||
25 | #include <linux/autoconf.h> | 25 | #include <linux/autoconf.h> |
26 | #include <linux/kvm_host.h> | 26 | #include <linux/kvm_host.h> |
27 | #include <linux/kbuild.h> | ||
27 | 28 | ||
28 | #include "vcpu.h" | 29 | #include "vcpu.h" |
29 | 30 | ||
30 | #define task_struct kvm_vcpu | ||
31 | |||
32 | #define DEFINE(sym, val) \ | ||
33 | asm volatile("\n->" #sym " (%0) " #val : : "i" (val)) | ||
34 | |||
35 | #define BLANK() asm volatile("\n->" : :) | ||
36 | |||
37 | #define OFFSET(_sym, _str, _mem) \ | ||
38 | DEFINE(_sym, offsetof(_str, _mem)); | ||
39 | |||
40 | void foo(void) | 31 | void foo(void) |
41 | { | 32 | { |
42 | DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu)); | 33 | DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu)); |
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index af1464f7a6ad..0f5ebd948437 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -180,7 +180,6 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
180 | 180 | ||
181 | switch (ext) { | 181 | switch (ext) { |
182 | case KVM_CAP_IRQCHIP: | 182 | case KVM_CAP_IRQCHIP: |
183 | case KVM_CAP_USER_MEMORY: | ||
184 | case KVM_CAP_MP_STATE: | 183 | case KVM_CAP_MP_STATE: |
185 | 184 | ||
186 | r = 1; | 185 | r = 1; |
@@ -439,7 +438,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
439 | expires = div64_u64(itc_diff, cyc_per_usec); | 438 | expires = div64_u64(itc_diff, cyc_per_usec); |
440 | kt = ktime_set(0, 1000 * expires); | 439 | kt = ktime_set(0, 1000 * expires); |
441 | 440 | ||
442 | down_read(&vcpu->kvm->slots_lock); | ||
443 | vcpu->arch.ht_active = 1; | 441 | vcpu->arch.ht_active = 1; |
444 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); | 442 | hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS); |
445 | 443 | ||
@@ -452,7 +450,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) | |||
452 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) | 450 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) |
453 | vcpu->arch.mp_state = | 451 | vcpu->arch.mp_state = |
454 | KVM_MP_STATE_RUNNABLE; | 452 | KVM_MP_STATE_RUNNABLE; |
455 | up_read(&vcpu->kvm->slots_lock); | ||
456 | 453 | ||
457 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) | 454 | if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE) |
458 | return -EINTR; | 455 | return -EINTR; |
@@ -476,6 +473,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu, | |||
476 | return 1; | 473 | return 1; |
477 | } | 474 | } |
478 | 475 | ||
476 | static int handle_vcpu_debug(struct kvm_vcpu *vcpu, | ||
477 | struct kvm_run *kvm_run) | ||
478 | { | ||
479 | printk("VMM: %s", vcpu->arch.log_buf); | ||
480 | return 1; | ||
481 | } | ||
482 | |||
479 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | 483 | static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, |
480 | struct kvm_run *kvm_run) = { | 484 | struct kvm_run *kvm_run) = { |
481 | [EXIT_REASON_VM_PANIC] = handle_vm_error, | 485 | [EXIT_REASON_VM_PANIC] = handle_vm_error, |
@@ -487,6 +491,7 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu, | |||
487 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, | 491 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
488 | [EXIT_REASON_IPI] = handle_ipi, | 492 | [EXIT_REASON_IPI] = handle_ipi, |
489 | [EXIT_REASON_PTC_G] = handle_global_purge, | 493 | [EXIT_REASON_PTC_G] = handle_global_purge, |
494 | [EXIT_REASON_DEBUG] = handle_vcpu_debug, | ||
490 | 495 | ||
491 | }; | 496 | }; |
492 | 497 | ||
@@ -698,27 +703,24 @@ out: | |||
698 | return r; | 703 | return r; |
699 | } | 704 | } |
700 | 705 | ||
701 | /* | ||
702 | * Allocate 16M memory for every vm to hold its specific data. | ||
703 | * Its memory map is defined in kvm_host.h. | ||
704 | */ | ||
705 | static struct kvm *kvm_alloc_kvm(void) | 706 | static struct kvm *kvm_alloc_kvm(void) |
706 | { | 707 | { |
707 | 708 | ||
708 | struct kvm *kvm; | 709 | struct kvm *kvm; |
709 | uint64_t vm_base; | 710 | uint64_t vm_base; |
710 | 711 | ||
712 | BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE); | ||
713 | |||
711 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); | 714 | vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE)); |
712 | 715 | ||
713 | if (!vm_base) | 716 | if (!vm_base) |
714 | return ERR_PTR(-ENOMEM); | 717 | return ERR_PTR(-ENOMEM); |
715 | printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base); | ||
716 | 718 | ||
717 | /* Zero all pages before use! */ | ||
718 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); | 719 | memset((void *)vm_base, 0, KVM_VM_DATA_SIZE); |
719 | 720 | kvm = (struct kvm *)(vm_base + | |
720 | kvm = (struct kvm *)(vm_base + KVM_VM_OFS); | 721 | offsetof(struct kvm_vm_data, kvm_vm_struct)); |
721 | kvm->arch.vm_base = vm_base; | 722 | kvm->arch.vm_base = vm_base; |
723 | printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base); | ||
722 | 724 | ||
723 | return kvm; | 725 | return kvm; |
724 | } | 726 | } |
@@ -760,21 +762,12 @@ static void kvm_build_io_pmt(struct kvm *kvm) | |||
760 | 762 | ||
761 | static void kvm_init_vm(struct kvm *kvm) | 763 | static void kvm_init_vm(struct kvm *kvm) |
762 | { | 764 | { |
763 | long vm_base; | ||
764 | |||
765 | BUG_ON(!kvm); | 765 | BUG_ON(!kvm); |
766 | 766 | ||
767 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; | 767 | kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0; |
768 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; | 768 | kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4; |
769 | kvm->arch.vmm_init_rr = VMM_INIT_RR; | 769 | kvm->arch.vmm_init_rr = VMM_INIT_RR; |
770 | 770 | ||
771 | vm_base = kvm->arch.vm_base; | ||
772 | if (vm_base) { | ||
773 | kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS; | ||
774 | kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS; | ||
775 | kvm->arch.vpd_base = vm_base + KVM_VPD_OFS; | ||
776 | } | ||
777 | |||
778 | /* | 771 | /* |
779 | *Fill P2M entries for MMIO/IO ranges | 772 | *Fill P2M entries for MMIO/IO ranges |
780 | */ | 773 | */ |
@@ -838,9 +831,8 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
838 | 831 | ||
839 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 832 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
840 | { | 833 | { |
841 | int i; | ||
842 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 834 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
843 | int r; | 835 | int i; |
844 | 836 | ||
845 | vcpu_load(vcpu); | 837 | vcpu_load(vcpu); |
846 | 838 | ||
@@ -857,18 +849,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
857 | 849 | ||
858 | vpd->vpr = regs->vpd.vpr; | 850 | vpd->vpr = regs->vpd.vpr; |
859 | 851 | ||
860 | r = -EFAULT; | 852 | memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context)); |
861 | r = copy_from_user(&vcpu->arch.guest, regs->saved_guest, | ||
862 | sizeof(union context)); | ||
863 | if (r) | ||
864 | goto out; | ||
865 | r = copy_from_user(vcpu + 1, regs->saved_stack + | ||
866 | sizeof(struct kvm_vcpu), | ||
867 | IA64_STK_OFFSET - sizeof(struct kvm_vcpu)); | ||
868 | if (r) | ||
869 | goto out; | ||
870 | vcpu->arch.exit_data = | ||
871 | ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data; | ||
872 | 853 | ||
873 | RESTORE_REGS(mp_state); | 854 | RESTORE_REGS(mp_state); |
874 | RESTORE_REGS(vmm_rr); | 855 | RESTORE_REGS(vmm_rr); |
@@ -902,9 +883,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
902 | set_bit(KVM_REQ_RESUME, &vcpu->requests); | 883 | set_bit(KVM_REQ_RESUME, &vcpu->requests); |
903 | 884 | ||
904 | vcpu_put(vcpu); | 885 | vcpu_put(vcpu); |
905 | r = 0; | 886 | |
906 | out: | 887 | return 0; |
907 | return r; | ||
908 | } | 888 | } |
909 | 889 | ||
910 | long kvm_arch_vm_ioctl(struct file *filp, | 890 | long kvm_arch_vm_ioctl(struct file *filp, |
@@ -1166,10 +1146,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1166 | /*Set entry address for first run.*/ | 1146 | /*Set entry address for first run.*/ |
1167 | regs->cr_iip = PALE_RESET_ENTRY; | 1147 | regs->cr_iip = PALE_RESET_ENTRY; |
1168 | 1148 | ||
1169 | /*Initilize itc offset for vcpus*/ | 1149 | /*Initialize itc offset for vcpus*/ |
1170 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); | 1150 | itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC); |
1171 | for (i = 0; i < MAX_VCPU_NUM; i++) { | 1151 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
1172 | v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); | 1152 | v = (struct kvm_vcpu *)((char *)vcpu + |
1153 | sizeof(struct kvm_vcpu_data) * i); | ||
1173 | v->arch.itc_offset = itc_offset; | 1154 | v->arch.itc_offset = itc_offset; |
1174 | v->arch.last_itc = 0; | 1155 | v->arch.last_itc = 0; |
1175 | } | 1156 | } |
@@ -1183,7 +1164,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1183 | vcpu->arch.apic->vcpu = vcpu; | 1164 | vcpu->arch.apic->vcpu = vcpu; |
1184 | 1165 | ||
1185 | p_ctx->gr[1] = 0; | 1166 | p_ctx->gr[1] = 0; |
1186 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET); | 1167 | p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET); |
1187 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; | 1168 | p_ctx->gr[13] = (unsigned long)vmm_vcpu; |
1188 | p_ctx->psr = 0x1008522000UL; | 1169 | p_ctx->psr = 0x1008522000UL; |
1189 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ | 1170 | p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/ |
@@ -1218,12 +1199,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1218 | vcpu->arch.hlt_timer.function = hlt_timer_fn; | 1199 | vcpu->arch.hlt_timer.function = hlt_timer_fn; |
1219 | 1200 | ||
1220 | vcpu->arch.last_run_cpu = -1; | 1201 | vcpu->arch.last_run_cpu = -1; |
1221 | vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id); | 1202 | vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id); |
1222 | vcpu->arch.vsa_base = kvm_vsa_base; | 1203 | vcpu->arch.vsa_base = kvm_vsa_base; |
1223 | vcpu->arch.__gp = kvm_vmm_gp; | 1204 | vcpu->arch.__gp = kvm_vmm_gp; |
1224 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); | 1205 | vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock); |
1225 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id); | 1206 | vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id); |
1226 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id); | 1207 | vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id); |
1227 | init_ptce_info(vcpu); | 1208 | init_ptce_info(vcpu); |
1228 | 1209 | ||
1229 | r = 0; | 1210 | r = 0; |
@@ -1273,12 +1254,22 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
1273 | int r; | 1254 | int r; |
1274 | int cpu; | 1255 | int cpu; |
1275 | 1256 | ||
1257 | BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2); | ||
1258 | |||
1259 | r = -EINVAL; | ||
1260 | if (id >= KVM_MAX_VCPUS) { | ||
1261 | printk(KERN_ERR"kvm: Can't configure vcpus > %ld", | ||
1262 | KVM_MAX_VCPUS); | ||
1263 | goto fail; | ||
1264 | } | ||
1265 | |||
1276 | r = -ENOMEM; | 1266 | r = -ENOMEM; |
1277 | if (!vm_base) { | 1267 | if (!vm_base) { |
1278 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); | 1268 | printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id); |
1279 | goto fail; | 1269 | goto fail; |
1280 | } | 1270 | } |
1281 | vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id); | 1271 | vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data, |
1272 | vcpu_data[id].vcpu_struct)); | ||
1282 | vcpu->kvm = kvm; | 1273 | vcpu->kvm = kvm; |
1283 | 1274 | ||
1284 | cpu = get_cpu(); | 1275 | cpu = get_cpu(); |
@@ -1374,9 +1365,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
1374 | 1365 | ||
1375 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 1366 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
1376 | { | 1367 | { |
1377 | int i; | ||
1378 | int r; | ||
1379 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); | 1368 | struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd); |
1369 | int i; | ||
1370 | |||
1380 | vcpu_load(vcpu); | 1371 | vcpu_load(vcpu); |
1381 | 1372 | ||
1382 | for (i = 0; i < 16; i++) { | 1373 | for (i = 0; i < 16; i++) { |
@@ -1391,14 +1382,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1391 | regs->vpd.vpsr = vpd->vpsr; | 1382 | regs->vpd.vpsr = vpd->vpsr; |
1392 | regs->vpd.vpr = vpd->vpr; | 1383 | regs->vpd.vpr = vpd->vpr; |
1393 | 1384 | ||
1394 | r = -EFAULT; | 1385 | memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context)); |
1395 | r = copy_to_user(regs->saved_guest, &vcpu->arch.guest, | 1386 | |
1396 | sizeof(union context)); | ||
1397 | if (r) | ||
1398 | goto out; | ||
1399 | r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET); | ||
1400 | if (r) | ||
1401 | goto out; | ||
1402 | SAVE_REGS(mp_state); | 1387 | SAVE_REGS(mp_state); |
1403 | SAVE_REGS(vmm_rr); | 1388 | SAVE_REGS(vmm_rr); |
1404 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); | 1389 | memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS); |
@@ -1426,10 +1411,9 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1426 | SAVE_REGS(metaphysical_saved_rr4); | 1411 | SAVE_REGS(metaphysical_saved_rr4); |
1427 | SAVE_REGS(fp_psr); | 1412 | SAVE_REGS(fp_psr); |
1428 | SAVE_REGS(saved_gp); | 1413 | SAVE_REGS(saved_gp); |
1414 | |||
1429 | vcpu_put(vcpu); | 1415 | vcpu_put(vcpu); |
1430 | r = 0; | 1416 | return 0; |
1431 | out: | ||
1432 | return r; | ||
1433 | } | 1417 | } |
1434 | 1418 | ||
1435 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 1419 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
@@ -1457,6 +1441,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
1457 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | 1441 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; |
1458 | unsigned long base_gfn = memslot->base_gfn; | 1442 | unsigned long base_gfn = memslot->base_gfn; |
1459 | 1443 | ||
1444 | if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) | ||
1445 | return -ENOMEM; | ||
1446 | |||
1460 | for (i = 0; i < npages; i++) { | 1447 | for (i = 0; i < npages; i++) { |
1461 | pfn = gfn_to_pfn(kvm, base_gfn + i); | 1448 | pfn = gfn_to_pfn(kvm, base_gfn + i); |
1462 | if (!kvm_is_mmio_pfn(pfn)) { | 1449 | if (!kvm_is_mmio_pfn(pfn)) { |
@@ -1631,8 +1618,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
1631 | struct kvm_memory_slot *memslot; | 1618 | struct kvm_memory_slot *memslot; |
1632 | int r, i; | 1619 | int r, i; |
1633 | long n, base; | 1620 | long n, base; |
1634 | unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS | 1621 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
1635 | + KVM_MEM_DIRTY_LOG_OFS); | 1622 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); |
1636 | 1623 | ||
1637 | r = -EINVAL; | 1624 | r = -EINVAL; |
1638 | if (log->slot >= KVM_MEMORY_SLOTS) | 1625 | if (log->slot >= KVM_MEMORY_SLOTS) |
diff --git a/arch/ia64/kvm/kvm_lib.c b/arch/ia64/kvm/kvm_lib.c new file mode 100644 index 000000000000..a85cb611ecd7 --- /dev/null +++ b/arch/ia64/kvm/kvm_lib.c | |||
@@ -0,0 +1,15 @@ | |||
1 | /* | ||
2 | * kvm_lib.c: Compile some libraries for kvm-intel module. | ||
3 | * | ||
4 | * Just include kernel's library, and disable symbols export. | ||
5 | * Copyright (C) 2008, Intel Corporation. | ||
6 | * Xiantao Zhang (xiantao.zhang@intel.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | #undef CONFIG_MODULES | ||
14 | #include "../../../lib/vsprintf.c" | ||
15 | #include "../../../lib/ctype.c" | ||
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h index 2cc41d17cf99..b2bcaa2787aa 100644 --- a/arch/ia64/kvm/kvm_minstate.h +++ b/arch/ia64/kvm/kvm_minstate.h | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <asm/asmmacro.h> | 24 | #include <asm/asmmacro.h> |
25 | #include <asm/types.h> | 25 | #include <asm/types.h> |
26 | #include <asm/kregs.h> | 26 | #include <asm/kregs.h> |
27 | #include <asm/kvm_host.h> | ||
28 | |||
27 | #include "asm-offsets.h" | 29 | #include "asm-offsets.h" |
28 | 30 | ||
29 | #define KVM_MINSTATE_START_SAVE_MIN \ | 31 | #define KVM_MINSTATE_START_SAVE_MIN \ |
@@ -33,7 +35,7 @@ | |||
33 | addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ | 35 | addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \ |
34 | ;; \ | 36 | ;; \ |
35 | lfetch.fault.excl.nt1 [r22]; \ | 37 | lfetch.fault.excl.nt1 [r22]; \ |
36 | addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1; /* compute base of memory stack */ \ | 38 | addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \ |
37 | mov r23 = ar.bspstore; /* save ar.bspstore */ \ | 39 | mov r23 = ar.bspstore; /* save ar.bspstore */ \ |
38 | ;; \ | 40 | ;; \ |
39 | mov ar.bspstore = r22; /* switch to kernel RBS */\ | 41 | mov ar.bspstore = r22; /* switch to kernel RBS */\ |
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h index e585c4607344..dd979e00b574 100644 --- a/arch/ia64/kvm/misc.h +++ b/arch/ia64/kvm/misc.h | |||
@@ -27,7 +27,8 @@ | |||
27 | */ | 27 | */ |
28 | static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) | 28 | static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm) |
29 | { | 29 | { |
30 | return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS); | 30 | return (uint64_t *)(kvm->arch.vm_base + |
31 | offsetof(struct kvm_vm_data, kvm_p2m)); | ||
31 | } | 32 | } |
32 | 33 | ||
33 | static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, | 34 | static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, |
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c index 7f1a858bc69f..21f63fffc379 100644 --- a/arch/ia64/kvm/mmio.c +++ b/arch/ia64/kvm/mmio.c | |||
@@ -66,31 +66,25 @@ void lsapic_write(struct kvm_vcpu *v, unsigned long addr, | |||
66 | 66 | ||
67 | switch (addr) { | 67 | switch (addr) { |
68 | case PIB_OFST_INTA: | 68 | case PIB_OFST_INTA: |
69 | /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/ | 69 | panic_vm(v, "Undefined write on PIB INTA\n"); |
70 | panic_vm(v); | ||
71 | break; | 70 | break; |
72 | case PIB_OFST_XTP: | 71 | case PIB_OFST_XTP: |
73 | if (length == 1) { | 72 | if (length == 1) { |
74 | vlsapic_write_xtp(v, val); | 73 | vlsapic_write_xtp(v, val); |
75 | } else { | 74 | } else { |
76 | /*panic_domain(NULL, | 75 | panic_vm(v, "Undefined write on PIB XTP\n"); |
77 | "Undefined write on PIB XTP\n");*/ | ||
78 | panic_vm(v); | ||
79 | } | 76 | } |
80 | break; | 77 | break; |
81 | default: | 78 | default: |
82 | if (PIB_LOW_HALF(addr)) { | 79 | if (PIB_LOW_HALF(addr)) { |
83 | /*lower half */ | 80 | /*Lower half */ |
84 | if (length != 8) | 81 | if (length != 8) |
85 | /*panic_domain(NULL, | 82 | panic_vm(v, "Can't LHF write with size %ld!\n", |
86 | "Can't LHF write with size %ld!\n", | 83 | length); |
87 | length);*/ | ||
88 | panic_vm(v); | ||
89 | else | 84 | else |
90 | vlsapic_write_ipi(v, addr, val); | 85 | vlsapic_write_ipi(v, addr, val); |
91 | } else { /* upper half | 86 | } else { /*Upper half */ |
92 | printk("IPI-UHF write %lx\n",addr);*/ | 87 | panic_vm(v, "IPI-UHF write %lx\n", addr); |
93 | panic_vm(v); | ||
94 | } | 88 | } |
95 | break; | 89 | break; |
96 | } | 90 | } |
@@ -108,22 +102,18 @@ unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr, | |||
108 | if (length == 1) /* 1 byte load */ | 102 | if (length == 1) /* 1 byte load */ |
109 | ; /* There is no i8259, there is no INTA access*/ | 103 | ; /* There is no i8259, there is no INTA access*/ |
110 | else | 104 | else |
111 | /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */ | 105 | panic_vm(v, "Undefined read on PIB INTA\n"); |
112 | panic_vm(v); | ||
113 | 106 | ||
114 | break; | 107 | break; |
115 | case PIB_OFST_XTP: | 108 | case PIB_OFST_XTP: |
116 | if (length == 1) { | 109 | if (length == 1) { |
117 | result = VLSAPIC_XTP(v); | 110 | result = VLSAPIC_XTP(v); |
118 | /* printk("read xtp %lx\n", result); */ | ||
119 | } else { | 111 | } else { |
120 | /*panic_domain(NULL, | 112 | panic_vm(v, "Undefined read on PIB XTP\n"); |
121 | "Undefined read on PIB XTP\n");*/ | ||
122 | panic_vm(v); | ||
123 | } | 113 | } |
124 | break; | 114 | break; |
125 | default: | 115 | default: |
126 | panic_vm(v); | 116 | panic_vm(v, "Undefined addr access for lsapic!\n"); |
127 | break; | 117 | break; |
128 | } | 118 | } |
129 | return result; | 119 | return result; |
@@ -162,7 +152,7 @@ static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest, | |||
162 | /* it's necessary to ensure zero extending */ | 152 | /* it's necessary to ensure zero extending */ |
163 | *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); | 153 | *dest = p->u.ioreq.data & (~0UL >> (64-(s*8))); |
164 | } else | 154 | } else |
165 | panic_vm(vcpu); | 155 | panic_vm(vcpu, "Unhandled mmio access returned!\n"); |
166 | out: | 156 | out: |
167 | local_irq_restore(psr); | 157 | local_irq_restore(psr); |
168 | return ; | 158 | return ; |
@@ -324,7 +314,9 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) | |||
324 | return; | 314 | return; |
325 | } else { | 315 | } else { |
326 | inst_type = -1; | 316 | inst_type = -1; |
327 | panic_vm(vcpu); | 317 | panic_vm(vcpu, "Unsupported MMIO access instruction! \ |
318 | Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", | ||
319 | bundle.i64[0], bundle.i64[1]); | ||
328 | } | 320 | } |
329 | 321 | ||
330 | size = 1 << size; | 322 | size = 1 << size; |
@@ -335,7 +327,7 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma) | |||
335 | if (inst_type == SL_INTEGER) | 327 | if (inst_type == SL_INTEGER) |
336 | vcpu_set_gr(vcpu, inst.M1.r1, data, 0); | 328 | vcpu_set_gr(vcpu, inst.M1.r1, data, 0); |
337 | else | 329 | else |
338 | panic_vm(vcpu); | 330 | panic_vm(vcpu, "Unsupported instruction type!\n"); |
339 | 331 | ||
340 | } | 332 | } |
341 | vcpu_increment_iip(vcpu); | 333 | vcpu_increment_iip(vcpu); |
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 800817307b7b..552d07724207 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c | |||
@@ -527,7 +527,8 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim, | |||
527 | vector = vec2off[vec]; | 527 | vector = vec2off[vec]; |
528 | 528 | ||
529 | if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { | 529 | if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) { |
530 | panic_vm(vcpu); | 530 | panic_vm(vcpu, "Interruption with vector :0x%lx occurs " |
531 | "with psr.ic = 0\n", vector); | ||
531 | return; | 532 | return; |
532 | } | 533 | } |
533 | 534 | ||
@@ -586,7 +587,7 @@ static void set_pal_call_result(struct kvm_vcpu *vcpu) | |||
586 | vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); | 587 | vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0); |
587 | vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); | 588 | vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0); |
588 | } else | 589 | } else |
589 | panic_vm(vcpu); | 590 | panic_vm(vcpu, "Mis-set for exit reason!\n"); |
590 | } | 591 | } |
591 | 592 | ||
592 | static void set_sal_call_data(struct kvm_vcpu *vcpu) | 593 | static void set_sal_call_data(struct kvm_vcpu *vcpu) |
@@ -614,7 +615,7 @@ static void set_sal_call_result(struct kvm_vcpu *vcpu) | |||
614 | vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); | 615 | vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0); |
615 | vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); | 616 | vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0); |
616 | } else | 617 | } else |
617 | panic_vm(vcpu); | 618 | panic_vm(vcpu, "Mis-set for exit reason!\n"); |
618 | } | 619 | } |
619 | 620 | ||
620 | void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, | 621 | void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs, |
@@ -680,7 +681,7 @@ static void generate_exirq(struct kvm_vcpu *vcpu) | |||
680 | vpsr = VCPU(vcpu, vpsr); | 681 | vpsr = VCPU(vcpu, vpsr); |
681 | isr = vpsr & IA64_PSR_RI; | 682 | isr = vpsr & IA64_PSR_RI; |
682 | if (!(vpsr & IA64_PSR_IC)) | 683 | if (!(vpsr & IA64_PSR_IC)) |
683 | panic_vm(vcpu); | 684 | panic_vm(vcpu, "Trying to inject one IRQ with psr.ic=0\n"); |
684 | reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ | 685 | reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */ |
685 | } | 686 | } |
686 | 687 | ||
@@ -941,8 +942,20 @@ static void vcpu_do_resume(struct kvm_vcpu *vcpu) | |||
941 | ia64_set_pta(vcpu->arch.vhpt.pta.val); | 942 | ia64_set_pta(vcpu->arch.vhpt.pta.val); |
942 | } | 943 | } |
943 | 944 | ||
945 | static void vmm_sanity_check(struct kvm_vcpu *vcpu) | ||
946 | { | ||
947 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
948 | |||
949 | if (!vmm_sanity && p->exit_reason != EXIT_REASON_DEBUG) { | ||
950 | panic_vm(vcpu, "Failed to do vmm sanity check," | ||
951 | "it maybe caused by crashed vmm!!\n\n"); | ||
952 | } | ||
953 | } | ||
954 | |||
944 | static void kvm_do_resume_op(struct kvm_vcpu *vcpu) | 955 | static void kvm_do_resume_op(struct kvm_vcpu *vcpu) |
945 | { | 956 | { |
957 | vmm_sanity_check(vcpu); /*Guarantee vcpu runing on healthy vmm!*/ | ||
958 | |||
946 | if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { | 959 | if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) { |
947 | vcpu_do_resume(vcpu); | 960 | vcpu_do_resume(vcpu); |
948 | return; | 961 | return; |
@@ -968,3 +981,11 @@ void vmm_transition(struct kvm_vcpu *vcpu) | |||
968 | 1, 0, 0, 0, 0, 0); | 981 | 1, 0, 0, 0, 0, 0); |
969 | kvm_do_resume_op(vcpu); | 982 | kvm_do_resume_op(vcpu); |
970 | } | 983 | } |
984 | |||
985 | void vmm_panic_handler(u64 vec) | ||
986 | { | ||
987 | struct kvm_vcpu *vcpu = current_vcpu; | ||
988 | vmm_sanity = 0; | ||
989 | panic_vm(vcpu, "Unexpected interruption occurs in VMM, vector:0x%lx\n", | ||
990 | vec2off[vec]); | ||
991 | } | ||
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c index e44027ce5667..ecd526b55323 100644 --- a/arch/ia64/kvm/vcpu.c +++ b/arch/ia64/kvm/vcpu.c | |||
@@ -816,8 +816,9 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val) | |||
816 | unsigned long vitv = VCPU(vcpu, itv); | 816 | unsigned long vitv = VCPU(vcpu, itv); |
817 | 817 | ||
818 | if (vcpu->vcpu_id == 0) { | 818 | if (vcpu->vcpu_id == 0) { |
819 | for (i = 0; i < MAX_VCPU_NUM; i++) { | 819 | for (i = 0; i < KVM_MAX_VCPUS; i++) { |
820 | v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i); | 820 | v = (struct kvm_vcpu *)((char *)vcpu + |
821 | sizeof(struct kvm_vcpu_data) * i); | ||
821 | VMX(v, itc_offset) = itc_offset; | 822 | VMX(v, itc_offset) = itc_offset; |
822 | VMX(v, last_itc) = 0; | 823 | VMX(v, last_itc) = 0; |
823 | } | 824 | } |
@@ -1650,7 +1651,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val) | |||
1650 | * Otherwise panic | 1651 | * Otherwise panic |
1651 | */ | 1652 | */ |
1652 | if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) | 1653 | if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) |
1653 | panic_vm(vcpu); | 1654 | panic_vm(vcpu, "Only support guests with vpsr.pk =0 \ |
1655 | & vpsr.is=0\n"); | ||
1654 | 1656 | ||
1655 | /* | 1657 | /* |
1656 | * For those IA64_PSR bits: id/da/dd/ss/ed/ia | 1658 | * For those IA64_PSR bits: id/da/dd/ss/ed/ia |
@@ -2103,7 +2105,7 @@ void kvm_init_all_rr(struct kvm_vcpu *vcpu) | |||
2103 | 2105 | ||
2104 | if (is_physical_mode(vcpu)) { | 2106 | if (is_physical_mode(vcpu)) { |
2105 | if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) | 2107 | if (vcpu->arch.mode_flags & GUEST_PHY_EMUL) |
2106 | panic_vm(vcpu); | 2108 | panic_vm(vcpu, "Machine Status conflicts!\n"); |
2107 | 2109 | ||
2108 | ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); | 2110 | ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); |
2109 | ia64_dv_serialize_data(); | 2111 | ia64_dv_serialize_data(); |
@@ -2152,10 +2154,70 @@ int vmm_entry(void) | |||
2152 | return 0; | 2154 | return 0; |
2153 | } | 2155 | } |
2154 | 2156 | ||
2155 | void panic_vm(struct kvm_vcpu *v) | 2157 | static void kvm_show_registers(struct kvm_pt_regs *regs) |
2156 | { | 2158 | { |
2159 | unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; | ||
2160 | |||
2161 | struct kvm_vcpu *vcpu = current_vcpu; | ||
2162 | if (vcpu != NULL) | ||
2163 | printk("vcpu 0x%p vcpu %d\n", | ||
2164 | vcpu, vcpu->vcpu_id); | ||
2165 | |||
2166 | printk("psr : %016lx ifs : %016lx ip : [<%016lx>]\n", | ||
2167 | regs->cr_ipsr, regs->cr_ifs, ip); | ||
2168 | |||
2169 | printk("unat: %016lx pfs : %016lx rsc : %016lx\n", | ||
2170 | regs->ar_unat, regs->ar_pfs, regs->ar_rsc); | ||
2171 | printk("rnat: %016lx bspstore: %016lx pr : %016lx\n", | ||
2172 | regs->ar_rnat, regs->ar_bspstore, regs->pr); | ||
2173 | printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", | ||
2174 | regs->loadrs, regs->ar_ccv, regs->ar_fpsr); | ||
2175 | printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); | ||
2176 | printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, | ||
2177 | regs->b6, regs->b7); | ||
2178 | printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", | ||
2179 | regs->f6.u.bits[1], regs->f6.u.bits[0], | ||
2180 | regs->f7.u.bits[1], regs->f7.u.bits[0]); | ||
2181 | printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", | ||
2182 | regs->f8.u.bits[1], regs->f8.u.bits[0], | ||
2183 | regs->f9.u.bits[1], regs->f9.u.bits[0]); | ||
2184 | printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", | ||
2185 | regs->f10.u.bits[1], regs->f10.u.bits[0], | ||
2186 | regs->f11.u.bits[1], regs->f11.u.bits[0]); | ||
2187 | |||
2188 | printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, | ||
2189 | regs->r2, regs->r3); | ||
2190 | printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, | ||
2191 | regs->r9, regs->r10); | ||
2192 | printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, | ||
2193 | regs->r12, regs->r13); | ||
2194 | printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, | ||
2195 | regs->r15, regs->r16); | ||
2196 | printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, | ||
2197 | regs->r18, regs->r19); | ||
2198 | printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, | ||
2199 | regs->r21, regs->r22); | ||
2200 | printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, | ||
2201 | regs->r24, regs->r25); | ||
2202 | printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, | ||
2203 | regs->r27, regs->r28); | ||
2204 | printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, | ||
2205 | regs->r30, regs->r31); | ||
2206 | |||
2207 | } | ||
2208 | |||
2209 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...) | ||
2210 | { | ||
2211 | va_list args; | ||
2212 | char buf[256]; | ||
2213 | |||
2214 | struct kvm_pt_regs *regs = vcpu_regs(v); | ||
2157 | struct exit_ctl_data *p = &v->arch.exit_data; | 2215 | struct exit_ctl_data *p = &v->arch.exit_data; |
2158 | 2216 | va_start(args, fmt); | |
2217 | vsnprintf(buf, sizeof(buf), fmt, args); | ||
2218 | va_end(args); | ||
2219 | printk(buf); | ||
2220 | kvm_show_registers(regs); | ||
2159 | p->exit_reason = EXIT_REASON_VM_PANIC; | 2221 | p->exit_reason = EXIT_REASON_VM_PANIC; |
2160 | vmm_transition(v); | 2222 | vmm_transition(v); |
2161 | /*Never to return*/ | 2223 | /*Never to return*/ |
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index e9b2a4e121c0..b2f12a562bdf 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h | |||
@@ -737,9 +737,12 @@ void kvm_init_vtlb(struct kvm_vcpu *v); | |||
737 | void kvm_init_vhpt(struct kvm_vcpu *v); | 737 | void kvm_init_vhpt(struct kvm_vcpu *v); |
738 | void thash_init(struct thash_cb *hcb, u64 sz); | 738 | void thash_init(struct thash_cb *hcb, u64 sz); |
739 | 739 | ||
740 | void panic_vm(struct kvm_vcpu *v); | 740 | void panic_vm(struct kvm_vcpu *v, const char *fmt, ...); |
741 | 741 | ||
742 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, | 742 | extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3, |
743 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); | 743 | u64 arg4, u64 arg5, u64 arg6, u64 arg7); |
744 | |||
745 | extern long vmm_sanity; | ||
746 | |||
744 | #endif | 747 | #endif |
745 | #endif /* __VCPU_H__ */ | 748 | #endif /* __VCPU_H__ */ |
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c index 2275bf4e681a..9eee5c04bacc 100644 --- a/arch/ia64/kvm/vmm.c +++ b/arch/ia64/kvm/vmm.c | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | 22 | ||
23 | #include<linux/kernel.h> | ||
23 | #include<linux/module.h> | 24 | #include<linux/module.h> |
24 | #include<asm/fpswa.h> | 25 | #include<asm/fpswa.h> |
25 | 26 | ||
@@ -31,6 +32,8 @@ MODULE_LICENSE("GPL"); | |||
31 | extern char kvm_ia64_ivt; | 32 | extern char kvm_ia64_ivt; |
32 | extern fpswa_interface_t *vmm_fpswa_interface; | 33 | extern fpswa_interface_t *vmm_fpswa_interface; |
33 | 34 | ||
35 | long vmm_sanity = 1; | ||
36 | |||
34 | struct kvm_vmm_info vmm_info = { | 37 | struct kvm_vmm_info vmm_info = { |
35 | .module = THIS_MODULE, | 38 | .module = THIS_MODULE, |
36 | .vmm_entry = vmm_entry, | 39 | .vmm_entry = vmm_entry, |
@@ -62,5 +65,31 @@ void vmm_spin_unlock(spinlock_t *lock) | |||
62 | { | 65 | { |
63 | _vmm_raw_spin_unlock(lock); | 66 | _vmm_raw_spin_unlock(lock); |
64 | } | 67 | } |
68 | |||
69 | static void vcpu_debug_exit(struct kvm_vcpu *vcpu) | ||
70 | { | ||
71 | struct exit_ctl_data *p = &vcpu->arch.exit_data; | ||
72 | long psr; | ||
73 | |||
74 | local_irq_save(psr); | ||
75 | p->exit_reason = EXIT_REASON_DEBUG; | ||
76 | vmm_transition(vcpu); | ||
77 | local_irq_restore(psr); | ||
78 | } | ||
79 | |||
80 | asmlinkage int printk(const char *fmt, ...) | ||
81 | { | ||
82 | struct kvm_vcpu *vcpu = current_vcpu; | ||
83 | va_list args; | ||
84 | int r; | ||
85 | |||
86 | memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN); | ||
87 | va_start(args, fmt); | ||
88 | r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args); | ||
89 | va_end(args); | ||
90 | vcpu_debug_exit(vcpu); | ||
91 | return r; | ||
92 | } | ||
93 | |||
65 | module_init(kvm_vmm_init) | 94 | module_init(kvm_vmm_init) |
66 | module_exit(kvm_vmm_exit) | 95 | module_exit(kvm_vmm_exit) |
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S index c1d7251a1480..3ef1a017a318 100644 --- a/arch/ia64/kvm/vmm_ivt.S +++ b/arch/ia64/kvm/vmm_ivt.S | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * /ia64/kvm_ivt.S | 2 | * arch/ia64/kvm/vmm_ivt.S |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co |
5 | * Stephane Eranian <eranian@hpl.hp.com> | 5 | * Stephane Eranian <eranian@hpl.hp.com> |
@@ -70,32 +70,39 @@ | |||
70 | # define PSR_DEFAULT_BITS 0 | 70 | # define PSR_DEFAULT_BITS 0 |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | |||
74 | #define KVM_FAULT(n) \ | 73 | #define KVM_FAULT(n) \ |
75 | kvm_fault_##n:; \ | 74 | kvm_fault_##n:; \ |
76 | mov r19=n;; \ | 75 | mov r19=n;; \ |
77 | br.sptk.many kvm_fault_##n; \ | 76 | br.sptk.many kvm_vmm_panic; \ |
78 | ;; \ | 77 | ;; \ |
79 | |||
80 | 78 | ||
81 | #define KVM_REFLECT(n) \ | 79 | #define KVM_REFLECT(n) \ |
82 | mov r31=pr; \ | 80 | mov r31=pr; \ |
83 | mov r19=n; /* prepare to save predicates */ \ | 81 | mov r19=n; /* prepare to save predicates */ \ |
84 | mov r29=cr.ipsr; \ | 82 | mov r29=cr.ipsr; \ |
85 | ;; \ | 83 | ;; \ |
86 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ | 84 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; \ |
87 | (p7)br.sptk.many kvm_dispatch_reflection; \ | 85 | (p7) br.sptk.many kvm_dispatch_reflection; \ |
88 | br.sptk.many kvm_panic; \ | 86 | br.sptk.many kvm_vmm_panic; \ |
89 | 87 | ||
90 | 88 | GLOBAL_ENTRY(kvm_vmm_panic) | |
91 | GLOBAL_ENTRY(kvm_panic) | 89 | KVM_SAVE_MIN_WITH_COVER_R19 |
92 | br.sptk.many kvm_panic | 90 | alloc r14=ar.pfs,0,0,1,0 |
93 | ;; | 91 | mov out0=r15 |
94 | END(kvm_panic) | 92 | adds r3=8,r2 // set up second base pointer |
95 | 93 | ;; | |
96 | 94 | ssm psr.ic | |
97 | 95 | ;; | |
98 | 96 | srlz.i // guarantee that interruption collection is on | |
97 | ;; | ||
98 | //(p15) ssm psr.i // restore psr.i | ||
99 | addl r14=@gprel(ia64_leave_hypervisor),gp | ||
100 | ;; | ||
101 | KVM_SAVE_REST | ||
102 | mov rp=r14 | ||
103 | ;; | ||
104 | br.call.sptk.many b6=vmm_panic_handler; | ||
105 | END(kvm_vmm_panic) | ||
99 | 106 | ||
100 | .section .text.ivt,"ax" | 107 | .section .text.ivt,"ax" |
101 | 108 | ||
@@ -105,308 +112,307 @@ kvm_ia64_ivt: | |||
105 | /////////////////////////////////////////////////////////////// | 112 | /////////////////////////////////////////////////////////////// |
106 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) | 113 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) |
107 | ENTRY(kvm_vhpt_miss) | 114 | ENTRY(kvm_vhpt_miss) |
108 | KVM_FAULT(0) | 115 | KVM_FAULT(0) |
109 | END(kvm_vhpt_miss) | 116 | END(kvm_vhpt_miss) |
110 | 117 | ||
111 | |||
112 | .org kvm_ia64_ivt+0x400 | 118 | .org kvm_ia64_ivt+0x400 |
113 | //////////////////////////////////////////////////////////////// | 119 | //////////////////////////////////////////////////////////////// |
114 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) | 120 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) |
115 | ENTRY(kvm_itlb_miss) | 121 | ENTRY(kvm_itlb_miss) |
116 | mov r31 = pr | 122 | mov r31 = pr |
117 | mov r29=cr.ipsr; | 123 | mov r29=cr.ipsr; |
118 | ;; | 124 | ;; |
119 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; | 125 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; |
120 | (p6) br.sptk kvm_alt_itlb_miss | 126 | (p6) br.sptk kvm_alt_itlb_miss |
121 | mov r19 = 1 | 127 | mov r19 = 1 |
122 | br.sptk kvm_itlb_miss_dispatch | 128 | br.sptk kvm_itlb_miss_dispatch |
123 | KVM_FAULT(1); | 129 | KVM_FAULT(1); |
124 | END(kvm_itlb_miss) | 130 | END(kvm_itlb_miss) |
125 | 131 | ||
126 | .org kvm_ia64_ivt+0x0800 | 132 | .org kvm_ia64_ivt+0x0800 |
127 | ////////////////////////////////////////////////////////////////// | 133 | ////////////////////////////////////////////////////////////////// |
128 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) | 134 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) |
129 | ENTRY(kvm_dtlb_miss) | 135 | ENTRY(kvm_dtlb_miss) |
130 | mov r31 = pr | 136 | mov r31 = pr |
131 | mov r29=cr.ipsr; | 137 | mov r29=cr.ipsr; |
132 | ;; | 138 | ;; |
133 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; | 139 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT; |
134 | (p6)br.sptk kvm_alt_dtlb_miss | 140 | (p6) br.sptk kvm_alt_dtlb_miss |
135 | br.sptk kvm_dtlb_miss_dispatch | 141 | br.sptk kvm_dtlb_miss_dispatch |
136 | END(kvm_dtlb_miss) | 142 | END(kvm_dtlb_miss) |
137 | 143 | ||
138 | .org kvm_ia64_ivt+0x0c00 | 144 | .org kvm_ia64_ivt+0x0c00 |
139 | //////////////////////////////////////////////////////////////////// | 145 | //////////////////////////////////////////////////////////////////// |
140 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) | 146 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) |
141 | ENTRY(kvm_alt_itlb_miss) | 147 | ENTRY(kvm_alt_itlb_miss) |
142 | mov r16=cr.ifa // get address that caused the TLB miss | 148 | mov r16=cr.ifa // get address that caused the TLB miss |
143 | ;; | 149 | ;; |
144 | movl r17=PAGE_KERNEL | 150 | movl r17=PAGE_KERNEL |
145 | mov r24=cr.ipsr | 151 | mov r24=cr.ipsr |
146 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 152 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
147 | ;; | 153 | ;; |
148 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | 154 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits |
149 | ;; | 155 | ;; |
150 | or r19=r17,r19 // insert PTE control bits into r19 | 156 | or r19=r17,r19 // insert PTE control bits into r19 |
151 | ;; | 157 | ;; |
152 | movl r20=IA64_GRANULE_SHIFT<<2 | 158 | movl r20=IA64_GRANULE_SHIFT<<2 |
153 | ;; | 159 | ;; |
154 | mov cr.itir=r20 | 160 | mov cr.itir=r20 |
155 | ;; | 161 | ;; |
156 | itc.i r19 // insert the TLB entry | 162 | itc.i r19 // insert the TLB entry |
157 | mov pr=r31,-1 | 163 | mov pr=r31,-1 |
158 | rfi | 164 | rfi |
159 | END(kvm_alt_itlb_miss) | 165 | END(kvm_alt_itlb_miss) |
160 | 166 | ||
161 | .org kvm_ia64_ivt+0x1000 | 167 | .org kvm_ia64_ivt+0x1000 |
162 | ///////////////////////////////////////////////////////////////////// | 168 | ///////////////////////////////////////////////////////////////////// |
163 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) | 169 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) |
164 | ENTRY(kvm_alt_dtlb_miss) | 170 | ENTRY(kvm_alt_dtlb_miss) |
165 | mov r16=cr.ifa // get address that caused the TLB miss | 171 | mov r16=cr.ifa // get address that caused the TLB miss |
166 | ;; | 172 | ;; |
167 | movl r17=PAGE_KERNEL | 173 | movl r17=PAGE_KERNEL |
168 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) | 174 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
169 | mov r24=cr.ipsr | 175 | mov r24=cr.ipsr |
170 | ;; | 176 | ;; |
171 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits | 177 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits |
172 | ;; | 178 | ;; |
173 | or r19=r19,r17 // insert PTE control bits into r19 | 179 | or r19=r19,r17 // insert PTE control bits into r19 |
174 | ;; | 180 | ;; |
175 | movl r20=IA64_GRANULE_SHIFT<<2 | 181 | movl r20=IA64_GRANULE_SHIFT<<2 |
176 | ;; | 182 | ;; |
177 | mov cr.itir=r20 | 183 | mov cr.itir=r20 |
178 | ;; | 184 | ;; |
179 | itc.d r19 // insert the TLB entry | 185 | itc.d r19 // insert the TLB entry |
180 | mov pr=r31,-1 | 186 | mov pr=r31,-1 |
181 | rfi | 187 | rfi |
182 | END(kvm_alt_dtlb_miss) | 188 | END(kvm_alt_dtlb_miss) |
183 | 189 | ||
184 | .org kvm_ia64_ivt+0x1400 | 190 | .org kvm_ia64_ivt+0x1400 |
185 | ////////////////////////////////////////////////////////////////////// | 191 | ////////////////////////////////////////////////////////////////////// |
186 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) | 192 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) |
187 | ENTRY(kvm_nested_dtlb_miss) | 193 | ENTRY(kvm_nested_dtlb_miss) |
188 | KVM_FAULT(5) | 194 | KVM_FAULT(5) |
189 | END(kvm_nested_dtlb_miss) | 195 | END(kvm_nested_dtlb_miss) |
190 | 196 | ||
191 | .org kvm_ia64_ivt+0x1800 | 197 | .org kvm_ia64_ivt+0x1800 |
192 | ///////////////////////////////////////////////////////////////////// | 198 | ///////////////////////////////////////////////////////////////////// |
193 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) | 199 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) |
194 | ENTRY(kvm_ikey_miss) | 200 | ENTRY(kvm_ikey_miss) |
195 | KVM_REFLECT(6) | 201 | KVM_REFLECT(6) |
196 | END(kvm_ikey_miss) | 202 | END(kvm_ikey_miss) |
197 | 203 | ||
198 | .org kvm_ia64_ivt+0x1c00 | 204 | .org kvm_ia64_ivt+0x1c00 |
199 | ///////////////////////////////////////////////////////////////////// | 205 | ///////////////////////////////////////////////////////////////////// |
200 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) | 206 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) |
201 | ENTRY(kvm_dkey_miss) | 207 | ENTRY(kvm_dkey_miss) |
202 | KVM_REFLECT(7) | 208 | KVM_REFLECT(7) |
203 | END(kvm_dkey_miss) | 209 | END(kvm_dkey_miss) |
204 | 210 | ||
205 | .org kvm_ia64_ivt+0x2000 | 211 | .org kvm_ia64_ivt+0x2000 |
206 | //////////////////////////////////////////////////////////////////// | 212 | //////////////////////////////////////////////////////////////////// |
207 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) | 213 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) |
208 | ENTRY(kvm_dirty_bit) | 214 | ENTRY(kvm_dirty_bit) |
209 | KVM_REFLECT(8) | 215 | KVM_REFLECT(8) |
210 | END(kvm_dirty_bit) | 216 | END(kvm_dirty_bit) |
211 | 217 | ||
212 | .org kvm_ia64_ivt+0x2400 | 218 | .org kvm_ia64_ivt+0x2400 |
213 | //////////////////////////////////////////////////////////////////// | 219 | //////////////////////////////////////////////////////////////////// |
214 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) | 220 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) |
215 | ENTRY(kvm_iaccess_bit) | 221 | ENTRY(kvm_iaccess_bit) |
216 | KVM_REFLECT(9) | 222 | KVM_REFLECT(9) |
217 | END(kvm_iaccess_bit) | 223 | END(kvm_iaccess_bit) |
218 | 224 | ||
219 | .org kvm_ia64_ivt+0x2800 | 225 | .org kvm_ia64_ivt+0x2800 |
220 | /////////////////////////////////////////////////////////////////// | 226 | /////////////////////////////////////////////////////////////////// |
221 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) | 227 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) |
222 | ENTRY(kvm_daccess_bit) | 228 | ENTRY(kvm_daccess_bit) |
223 | KVM_REFLECT(10) | 229 | KVM_REFLECT(10) |
224 | END(kvm_daccess_bit) | 230 | END(kvm_daccess_bit) |
225 | 231 | ||
226 | .org kvm_ia64_ivt+0x2c00 | 232 | .org kvm_ia64_ivt+0x2c00 |
227 | ///////////////////////////////////////////////////////////////// | 233 | ///////////////////////////////////////////////////////////////// |
228 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) | 234 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) |
229 | ENTRY(kvm_break_fault) | 235 | ENTRY(kvm_break_fault) |
230 | mov r31=pr | 236 | mov r31=pr |
231 | mov r19=11 | 237 | mov r19=11 |
232 | mov r29=cr.ipsr | 238 | mov r29=cr.ipsr |
233 | ;; | 239 | ;; |
234 | KVM_SAVE_MIN_WITH_COVER_R19 | 240 | KVM_SAVE_MIN_WITH_COVER_R19 |
235 | ;; | 241 | ;; |
236 | alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) | 242 | alloc r14=ar.pfs,0,0,4,0 //(must be first in insn group!) |
237 | mov out0=cr.ifa | 243 | mov out0=cr.ifa |
238 | mov out2=cr.isr // FIXME: pity to make this slow access twice | 244 | mov out2=cr.isr // FIXME: pity to make this slow access twice |
239 | mov out3=cr.iim // FIXME: pity to make this slow access twice | 245 | mov out3=cr.iim // FIXME: pity to make this slow access twice |
240 | adds r3=8,r2 // set up second base pointer | 246 | adds r3=8,r2 // set up second base pointer |
241 | ;; | 247 | ;; |
242 | ssm psr.ic | 248 | ssm psr.ic |
243 | ;; | 249 | ;; |
244 | srlz.i // guarantee that interruption collection is on | 250 | srlz.i // guarantee that interruption collection is on |
245 | ;; | 251 | ;; |
246 | //(p15)ssm psr.i // restore psr.i | 252 | //(p15)ssm psr.i // restore psr.i |
247 | addl r14=@gprel(ia64_leave_hypervisor),gp | 253 | addl r14=@gprel(ia64_leave_hypervisor),gp |
248 | ;; | 254 | ;; |
249 | KVM_SAVE_REST | 255 | KVM_SAVE_REST |
250 | mov rp=r14 | 256 | mov rp=r14 |
251 | ;; | 257 | ;; |
252 | adds out1=16,sp | 258 | adds out1=16,sp |
253 | br.call.sptk.many b6=kvm_ia64_handle_break | 259 | br.call.sptk.many b6=kvm_ia64_handle_break |
254 | ;; | 260 | ;; |
255 | END(kvm_break_fault) | 261 | END(kvm_break_fault) |
256 | 262 | ||
257 | .org kvm_ia64_ivt+0x3000 | 263 | .org kvm_ia64_ivt+0x3000 |
258 | ///////////////////////////////////////////////////////////////// | 264 | ///////////////////////////////////////////////////////////////// |
259 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) | 265 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) |
260 | ENTRY(kvm_interrupt) | 266 | ENTRY(kvm_interrupt) |
261 | mov r31=pr // prepare to save predicates | 267 | mov r31=pr // prepare to save predicates |
262 | mov r19=12 | 268 | mov r19=12 |
263 | mov r29=cr.ipsr | 269 | mov r29=cr.ipsr |
264 | ;; | 270 | ;; |
265 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT | 271 | tbit.z p6,p7=r29,IA64_PSR_VM_BIT |
266 | tbit.z p0,p15=r29,IA64_PSR_I_BIT | 272 | tbit.z p0,p15=r29,IA64_PSR_I_BIT |
267 | ;; | 273 | ;; |
268 | (p7) br.sptk kvm_dispatch_interrupt | 274 | (p7) br.sptk kvm_dispatch_interrupt |
269 | ;; | 275 | ;; |
270 | mov r27=ar.rsc /* M */ | 276 | mov r27=ar.rsc /* M */ |
271 | mov r20=r1 /* A */ | 277 | mov r20=r1 /* A */ |
272 | mov r25=ar.unat /* M */ | 278 | mov r25=ar.unat /* M */ |
273 | mov r26=ar.pfs /* I */ | 279 | mov r26=ar.pfs /* I */ |
274 | mov r28=cr.iip /* M */ | 280 | mov r28=cr.iip /* M */ |
275 | cover /* B (or nothing) */ | 281 | cover /* B (or nothing) */ |
276 | ;; | 282 | ;; |
277 | mov r1=sp | 283 | mov r1=sp |
278 | ;; | 284 | ;; |
279 | invala /* M */ | 285 | invala /* M */ |
280 | mov r30=cr.ifs | 286 | mov r30=cr.ifs |
281 | ;; | 287 | ;; |
282 | addl r1=-VMM_PT_REGS_SIZE,r1 | 288 | addl r1=-VMM_PT_REGS_SIZE,r1 |
283 | ;; | 289 | ;; |
284 | adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ | 290 | adds r17=2*L1_CACHE_BYTES,r1 /* really: biggest cache-line size */ |
285 | adds r16=PT(CR_IPSR),r1 | 291 | adds r16=PT(CR_IPSR),r1 |
286 | ;; | 292 | ;; |
287 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES | 293 | lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES |
288 | st8 [r16]=r29 /* save cr.ipsr */ | 294 | st8 [r16]=r29 /* save cr.ipsr */ |
289 | ;; | 295 | ;; |
290 | lfetch.fault.excl.nt1 [r17] | 296 | lfetch.fault.excl.nt1 [r17] |
291 | mov r29=b0 | 297 | mov r29=b0 |
292 | ;; | 298 | ;; |
293 | adds r16=PT(R8),r1 /* initialize first base pointer */ | 299 | adds r16=PT(R8),r1 /* initialize first base pointer */ |
294 | adds r17=PT(R9),r1 /* initialize second base pointer */ | 300 | adds r17=PT(R9),r1 /* initialize second base pointer */ |
295 | mov r18=r0 /* make sure r18 isn't NaT */ | 301 | mov r18=r0 /* make sure r18 isn't NaT */ |
296 | ;; | 302 | ;; |
297 | .mem.offset 0,0; st8.spill [r16]=r8,16 | 303 | .mem.offset 0,0; st8.spill [r16]=r8,16 |
298 | .mem.offset 8,0; st8.spill [r17]=r9,16 | 304 | .mem.offset 8,0; st8.spill [r17]=r9,16 |
299 | ;; | 305 | ;; |
300 | .mem.offset 0,0; st8.spill [r16]=r10,24 | 306 | .mem.offset 0,0; st8.spill [r16]=r10,24 |
301 | .mem.offset 8,0; st8.spill [r17]=r11,24 | 307 | .mem.offset 8,0; st8.spill [r17]=r11,24 |
302 | ;; | 308 | ;; |
303 | st8 [r16]=r28,16 /* save cr.iip */ | 309 | st8 [r16]=r28,16 /* save cr.iip */ |
304 | st8 [r17]=r30,16 /* save cr.ifs */ | 310 | st8 [r17]=r30,16 /* save cr.ifs */ |
305 | mov r8=ar.fpsr /* M */ | 311 | mov r8=ar.fpsr /* M */ |
306 | mov r9=ar.csd | 312 | mov r9=ar.csd |
307 | mov r10=ar.ssd | 313 | mov r10=ar.ssd |
308 | movl r11=FPSR_DEFAULT /* L-unit */ | 314 | movl r11=FPSR_DEFAULT /* L-unit */ |
309 | ;; | 315 | ;; |
310 | st8 [r16]=r25,16 /* save ar.unat */ | 316 | st8 [r16]=r25,16 /* save ar.unat */ |
311 | st8 [r17]=r26,16 /* save ar.pfs */ | 317 | st8 [r17]=r26,16 /* save ar.pfs */ |
312 | shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ | 318 | shl r18=r18,16 /* compute ar.rsc to be used for "loadrs" */ |
313 | ;; | 319 | ;; |
314 | st8 [r16]=r27,16 /* save ar.rsc */ | 320 | st8 [r16]=r27,16 /* save ar.rsc */ |
315 | adds r17=16,r17 /* skip over ar_rnat field */ | 321 | adds r17=16,r17 /* skip over ar_rnat field */ |
316 | ;; | 322 | ;; |
317 | st8 [r17]=r31,16 /* save predicates */ | 323 | st8 [r17]=r31,16 /* save predicates */ |
318 | adds r16=16,r16 /* skip over ar_bspstore field */ | 324 | adds r16=16,r16 /* skip over ar_bspstore field */ |
319 | ;; | 325 | ;; |
320 | st8 [r16]=r29,16 /* save b0 */ | 326 | st8 [r16]=r29,16 /* save b0 */ |
321 | st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ | 327 | st8 [r17]=r18,16 /* save ar.rsc value for "loadrs" */ |
322 | ;; | 328 | ;; |
323 | .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ | 329 | .mem.offset 0,0; st8.spill [r16]=r20,16 /* save original r1 */ |
324 | .mem.offset 8,0; st8.spill [r17]=r12,16 | 330 | .mem.offset 8,0; st8.spill [r17]=r12,16 |
325 | adds r12=-16,r1 | 331 | adds r12=-16,r1 |
326 | /* switch to kernel memory stack (with 16 bytes of scratch) */ | 332 | /* switch to kernel memory stack (with 16 bytes of scratch) */ |
327 | ;; | 333 | ;; |
328 | .mem.offset 0,0; st8.spill [r16]=r13,16 | 334 | .mem.offset 0,0; st8.spill [r16]=r13,16 |
329 | .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ | 335 | .mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */ |
330 | ;; | 336 | ;; |
331 | .mem.offset 0,0; st8.spill [r16]=r15,16 | 337 | .mem.offset 0,0; st8.spill [r16]=r15,16 |
332 | .mem.offset 8,0; st8.spill [r17]=r14,16 | 338 | .mem.offset 8,0; st8.spill [r17]=r14,16 |
333 | dep r14=-1,r0,60,4 | 339 | dep r14=-1,r0,60,4 |
334 | ;; | 340 | ;; |
335 | .mem.offset 0,0; st8.spill [r16]=r2,16 | 341 | .mem.offset 0,0; st8.spill [r16]=r2,16 |
336 | .mem.offset 8,0; st8.spill [r17]=r3,16 | 342 | .mem.offset 8,0; st8.spill [r17]=r3,16 |
337 | adds r2=VMM_PT_REGS_R16_OFFSET,r1 | 343 | adds r2=VMM_PT_REGS_R16_OFFSET,r1 |
338 | adds r14 = VMM_VCPU_GP_OFFSET,r13 | 344 | adds r14 = VMM_VCPU_GP_OFFSET,r13 |
339 | ;; | 345 | ;; |
340 | mov r8=ar.ccv | 346 | mov r8=ar.ccv |
341 | ld8 r14 = [r14] | 347 | ld8 r14 = [r14] |
342 | ;; | 348 | ;; |
343 | mov r1=r14 /* establish kernel global pointer */ | 349 | mov r1=r14 /* establish kernel global pointer */ |
344 | ;; \ | 350 | ;; \ |
345 | bsw.1 | 351 | bsw.1 |
346 | ;; | 352 | ;; |
347 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | 353 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group |
348 | mov out0=r13 | 354 | mov out0=r13 |
349 | ;; | 355 | ;; |
350 | ssm psr.ic | 356 | ssm psr.ic |
351 | ;; | 357 | ;; |
352 | srlz.i | 358 | srlz.i |
353 | ;; | 359 | ;; |
354 | //(p15) ssm psr.i | 360 | //(p15) ssm psr.i |
355 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 361 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
356 | srlz.i // ensure everybody knows psr.ic is back on | 362 | srlz.i // ensure everybody knows psr.ic is back on |
357 | ;; | 363 | ;; |
358 | .mem.offset 0,0; st8.spill [r2]=r16,16 | 364 | .mem.offset 0,0; st8.spill [r2]=r16,16 |
359 | .mem.offset 8,0; st8.spill [r3]=r17,16 | 365 | .mem.offset 8,0; st8.spill [r3]=r17,16 |
360 | ;; | 366 | ;; |
361 | .mem.offset 0,0; st8.spill [r2]=r18,16 | 367 | .mem.offset 0,0; st8.spill [r2]=r18,16 |
362 | .mem.offset 8,0; st8.spill [r3]=r19,16 | 368 | .mem.offset 8,0; st8.spill [r3]=r19,16 |
363 | ;; | 369 | ;; |
364 | .mem.offset 0,0; st8.spill [r2]=r20,16 | 370 | .mem.offset 0,0; st8.spill [r2]=r20,16 |
365 | .mem.offset 8,0; st8.spill [r3]=r21,16 | 371 | .mem.offset 8,0; st8.spill [r3]=r21,16 |
366 | mov r18=b6 | 372 | mov r18=b6 |
367 | ;; | 373 | ;; |
368 | .mem.offset 0,0; st8.spill [r2]=r22,16 | 374 | .mem.offset 0,0; st8.spill [r2]=r22,16 |
369 | .mem.offset 8,0; st8.spill [r3]=r23,16 | 375 | .mem.offset 8,0; st8.spill [r3]=r23,16 |
370 | mov r19=b7 | 376 | mov r19=b7 |
371 | ;; | 377 | ;; |
372 | .mem.offset 0,0; st8.spill [r2]=r24,16 | 378 | .mem.offset 0,0; st8.spill [r2]=r24,16 |
373 | .mem.offset 8,0; st8.spill [r3]=r25,16 | 379 | .mem.offset 8,0; st8.spill [r3]=r25,16 |
374 | ;; | 380 | ;; |
375 | .mem.offset 0,0; st8.spill [r2]=r26,16 | 381 | .mem.offset 0,0; st8.spill [r2]=r26,16 |
376 | .mem.offset 8,0; st8.spill [r3]=r27,16 | 382 | .mem.offset 8,0; st8.spill [r3]=r27,16 |
377 | ;; | 383 | ;; |
378 | .mem.offset 0,0; st8.spill [r2]=r28,16 | 384 | .mem.offset 0,0; st8.spill [r2]=r28,16 |
379 | .mem.offset 8,0; st8.spill [r3]=r29,16 | 385 | .mem.offset 8,0; st8.spill [r3]=r29,16 |
380 | ;; | 386 | ;; |
381 | .mem.offset 0,0; st8.spill [r2]=r30,16 | 387 | .mem.offset 0,0; st8.spill [r2]=r30,16 |
382 | .mem.offset 8,0; st8.spill [r3]=r31,32 | 388 | .mem.offset 8,0; st8.spill [r3]=r31,32 |
383 | ;; | 389 | ;; |
384 | mov ar.fpsr=r11 /* M-unit */ | 390 | mov ar.fpsr=r11 /* M-unit */ |
385 | st8 [r2]=r8,8 /* ar.ccv */ | 391 | st8 [r2]=r8,8 /* ar.ccv */ |
386 | adds r24=PT(B6)-PT(F7),r3 | 392 | adds r24=PT(B6)-PT(F7),r3 |
387 | ;; | 393 | ;; |
388 | stf.spill [r2]=f6,32 | 394 | stf.spill [r2]=f6,32 |
389 | stf.spill [r3]=f7,32 | 395 | stf.spill [r3]=f7,32 |
390 | ;; | 396 | ;; |
391 | stf.spill [r2]=f8,32 | 397 | stf.spill [r2]=f8,32 |
392 | stf.spill [r3]=f9,32 | 398 | stf.spill [r3]=f9,32 |
393 | ;; | 399 | ;; |
394 | stf.spill [r2]=f10 | 400 | stf.spill [r2]=f10 |
395 | stf.spill [r3]=f11 | 401 | stf.spill [r3]=f11 |
396 | adds r25=PT(B7)-PT(F11),r3 | 402 | adds r25=PT(B7)-PT(F11),r3 |
397 | ;; | 403 | ;; |
398 | st8 [r24]=r18,16 /* b6 */ | 404 | st8 [r24]=r18,16 /* b6 */ |
399 | st8 [r25]=r19,16 /* b7 */ | 405 | st8 [r25]=r19,16 /* b7 */ |
400 | ;; | 406 | ;; |
401 | st8 [r24]=r9 /* ar.csd */ | 407 | st8 [r24]=r9 /* ar.csd */ |
402 | st8 [r25]=r10 /* ar.ssd */ | 408 | st8 [r25]=r10 /* ar.ssd */ |
403 | ;; | 409 | ;; |
404 | srlz.d // make sure we see the effect of cr.ivr | 410 | srlz.d // make sure we see the effect of cr.ivr |
405 | addl r14=@gprel(ia64_leave_nested),gp | 411 | addl r14=@gprel(ia64_leave_nested),gp |
406 | ;; | 412 | ;; |
407 | mov rp=r14 | 413 | mov rp=r14 |
408 | br.call.sptk.many b6=kvm_ia64_handle_irq | 414 | br.call.sptk.many b6=kvm_ia64_handle_irq |
409 | ;; | 415 | ;; |
410 | END(kvm_interrupt) | 416 | END(kvm_interrupt) |
411 | 417 | ||
412 | .global kvm_dispatch_vexirq | 418 | .global kvm_dispatch_vexirq |
@@ -414,387 +420,385 @@ END(kvm_interrupt) | |||
414 | ////////////////////////////////////////////////////////////////////// | 420 | ////////////////////////////////////////////////////////////////////// |
415 | // 0x3400 Entry 13 (size 64 bundles) Reserved | 421 | // 0x3400 Entry 13 (size 64 bundles) Reserved |
416 | ENTRY(kvm_virtual_exirq) | 422 | ENTRY(kvm_virtual_exirq) |
417 | mov r31=pr | 423 | mov r31=pr |
418 | mov r19=13 | 424 | mov r19=13 |
419 | mov r30 =r0 | 425 | mov r30 =r0 |
420 | ;; | 426 | ;; |
421 | kvm_dispatch_vexirq: | 427 | kvm_dispatch_vexirq: |
422 | cmp.eq p6,p0 = 1,r30 | 428 | cmp.eq p6,p0 = 1,r30 |
423 | ;; | 429 | ;; |
424 | (p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 430 | (p6) add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
425 | ;; | 431 | ;; |
426 | (p6)ld8 r1 = [r29] | 432 | (p6) ld8 r1 = [r29] |
427 | ;; | 433 | ;; |
428 | KVM_SAVE_MIN_WITH_COVER_R19 | 434 | KVM_SAVE_MIN_WITH_COVER_R19 |
429 | alloc r14=ar.pfs,0,0,1,0 | 435 | alloc r14=ar.pfs,0,0,1,0 |
430 | mov out0=r13 | 436 | mov out0=r13 |
431 | 437 | ||
432 | ssm psr.ic | 438 | ssm psr.ic |
433 | ;; | 439 | ;; |
434 | srlz.i // guarantee that interruption collection is on | 440 | srlz.i // guarantee that interruption collection is on |
435 | ;; | 441 | ;; |
436 | //(p15) ssm psr.i // restore psr.i | 442 | //(p15) ssm psr.i // restore psr.i |
437 | adds r3=8,r2 // set up second base pointer | 443 | adds r3=8,r2 // set up second base pointer |
438 | ;; | 444 | ;; |
439 | KVM_SAVE_REST | 445 | KVM_SAVE_REST |
440 | addl r14=@gprel(ia64_leave_hypervisor),gp | 446 | addl r14=@gprel(ia64_leave_hypervisor),gp |
441 | ;; | 447 | ;; |
442 | mov rp=r14 | 448 | mov rp=r14 |
443 | br.call.sptk.many b6=kvm_vexirq | 449 | br.call.sptk.many b6=kvm_vexirq |
444 | END(kvm_virtual_exirq) | 450 | END(kvm_virtual_exirq) |
445 | 451 | ||
446 | .org kvm_ia64_ivt+0x3800 | 452 | .org kvm_ia64_ivt+0x3800 |
447 | ///////////////////////////////////////////////////////////////////// | 453 | ///////////////////////////////////////////////////////////////////// |
448 | // 0x3800 Entry 14 (size 64 bundles) Reserved | 454 | // 0x3800 Entry 14 (size 64 bundles) Reserved |
449 | KVM_FAULT(14) | 455 | KVM_FAULT(14) |
450 | // this code segment is from 2.6.16.13 | 456 | // this code segment is from 2.6.16.13 |
451 | |||
452 | 457 | ||
453 | .org kvm_ia64_ivt+0x3c00 | 458 | .org kvm_ia64_ivt+0x3c00 |
454 | /////////////////////////////////////////////////////////////////////// | 459 | /////////////////////////////////////////////////////////////////////// |
455 | // 0x3c00 Entry 15 (size 64 bundles) Reserved | 460 | // 0x3c00 Entry 15 (size 64 bundles) Reserved |
456 | KVM_FAULT(15) | 461 | KVM_FAULT(15) |
457 | |||
458 | 462 | ||
459 | .org kvm_ia64_ivt+0x4000 | 463 | .org kvm_ia64_ivt+0x4000 |
460 | /////////////////////////////////////////////////////////////////////// | 464 | /////////////////////////////////////////////////////////////////////// |
461 | // 0x4000 Entry 16 (size 64 bundles) Reserved | 465 | // 0x4000 Entry 16 (size 64 bundles) Reserved |
462 | KVM_FAULT(16) | 466 | KVM_FAULT(16) |
463 | 467 | ||
464 | .org kvm_ia64_ivt+0x4400 | 468 | .org kvm_ia64_ivt+0x4400 |
465 | ////////////////////////////////////////////////////////////////////// | 469 | ////////////////////////////////////////////////////////////////////// |
466 | // 0x4400 Entry 17 (size 64 bundles) Reserved | 470 | // 0x4400 Entry 17 (size 64 bundles) Reserved |
467 | KVM_FAULT(17) | 471 | KVM_FAULT(17) |
468 | 472 | ||
469 | .org kvm_ia64_ivt+0x4800 | 473 | .org kvm_ia64_ivt+0x4800 |
470 | ////////////////////////////////////////////////////////////////////// | 474 | ////////////////////////////////////////////////////////////////////// |
471 | // 0x4800 Entry 18 (size 64 bundles) Reserved | 475 | // 0x4800 Entry 18 (size 64 bundles) Reserved |
472 | KVM_FAULT(18) | 476 | KVM_FAULT(18) |
473 | 477 | ||
474 | .org kvm_ia64_ivt+0x4c00 | 478 | .org kvm_ia64_ivt+0x4c00 |
475 | ////////////////////////////////////////////////////////////////////// | 479 | ////////////////////////////////////////////////////////////////////// |
476 | // 0x4c00 Entry 19 (size 64 bundles) Reserved | 480 | // 0x4c00 Entry 19 (size 64 bundles) Reserved |
477 | KVM_FAULT(19) | 481 | KVM_FAULT(19) |
478 | 482 | ||
479 | .org kvm_ia64_ivt+0x5000 | 483 | .org kvm_ia64_ivt+0x5000 |
480 | ////////////////////////////////////////////////////////////////////// | 484 | ////////////////////////////////////////////////////////////////////// |
481 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present | 485 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present |
482 | ENTRY(kvm_page_not_present) | 486 | ENTRY(kvm_page_not_present) |
483 | KVM_REFLECT(20) | 487 | KVM_REFLECT(20) |
484 | END(kvm_page_not_present) | 488 | END(kvm_page_not_present) |
485 | 489 | ||
486 | .org kvm_ia64_ivt+0x5100 | 490 | .org kvm_ia64_ivt+0x5100 |
487 | /////////////////////////////////////////////////////////////////////// | 491 | /////////////////////////////////////////////////////////////////////// |
488 | // 0x5100 Entry 21 (size 16 bundles) Key Permission vector | 492 | // 0x5100 Entry 21 (size 16 bundles) Key Permission vector |
489 | ENTRY(kvm_key_permission) | 493 | ENTRY(kvm_key_permission) |
490 | KVM_REFLECT(21) | 494 | KVM_REFLECT(21) |
491 | END(kvm_key_permission) | 495 | END(kvm_key_permission) |
492 | 496 | ||
493 | .org kvm_ia64_ivt+0x5200 | 497 | .org kvm_ia64_ivt+0x5200 |
494 | ////////////////////////////////////////////////////////////////////// | 498 | ////////////////////////////////////////////////////////////////////// |
495 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) | 499 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) |
496 | ENTRY(kvm_iaccess_rights) | 500 | ENTRY(kvm_iaccess_rights) |
497 | KVM_REFLECT(22) | 501 | KVM_REFLECT(22) |
498 | END(kvm_iaccess_rights) | 502 | END(kvm_iaccess_rights) |
499 | 503 | ||
500 | .org kvm_ia64_ivt+0x5300 | 504 | .org kvm_ia64_ivt+0x5300 |
501 | ////////////////////////////////////////////////////////////////////// | 505 | ////////////////////////////////////////////////////////////////////// |
502 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) | 506 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) |
503 | ENTRY(kvm_daccess_rights) | 507 | ENTRY(kvm_daccess_rights) |
504 | KVM_REFLECT(23) | 508 | KVM_REFLECT(23) |
505 | END(kvm_daccess_rights) | 509 | END(kvm_daccess_rights) |
506 | 510 | ||
507 | .org kvm_ia64_ivt+0x5400 | 511 | .org kvm_ia64_ivt+0x5400 |
508 | ///////////////////////////////////////////////////////////////////// | 512 | ///////////////////////////////////////////////////////////////////// |
509 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) | 513 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) |
510 | ENTRY(kvm_general_exception) | 514 | ENTRY(kvm_general_exception) |
511 | KVM_REFLECT(24) | 515 | KVM_REFLECT(24) |
512 | KVM_FAULT(24) | 516 | KVM_FAULT(24) |
513 | END(kvm_general_exception) | 517 | END(kvm_general_exception) |
514 | 518 | ||
515 | .org kvm_ia64_ivt+0x5500 | 519 | .org kvm_ia64_ivt+0x5500 |
516 | ////////////////////////////////////////////////////////////////////// | 520 | ////////////////////////////////////////////////////////////////////// |
517 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) | 521 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) |
518 | ENTRY(kvm_disabled_fp_reg) | 522 | ENTRY(kvm_disabled_fp_reg) |
519 | KVM_REFLECT(25) | 523 | KVM_REFLECT(25) |
520 | END(kvm_disabled_fp_reg) | 524 | END(kvm_disabled_fp_reg) |
521 | 525 | ||
522 | .org kvm_ia64_ivt+0x5600 | 526 | .org kvm_ia64_ivt+0x5600 |
523 | //////////////////////////////////////////////////////////////////// | 527 | //////////////////////////////////////////////////////////////////// |
524 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) | 528 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) |
525 | ENTRY(kvm_nat_consumption) | 529 | ENTRY(kvm_nat_consumption) |
526 | KVM_REFLECT(26) | 530 | KVM_REFLECT(26) |
527 | END(kvm_nat_consumption) | 531 | END(kvm_nat_consumption) |
528 | 532 | ||
529 | .org kvm_ia64_ivt+0x5700 | 533 | .org kvm_ia64_ivt+0x5700 |
530 | ///////////////////////////////////////////////////////////////////// | 534 | ///////////////////////////////////////////////////////////////////// |
531 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) | 535 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) |
532 | ENTRY(kvm_speculation_vector) | 536 | ENTRY(kvm_speculation_vector) |
533 | KVM_REFLECT(27) | 537 | KVM_REFLECT(27) |
534 | END(kvm_speculation_vector) | 538 | END(kvm_speculation_vector) |
535 | 539 | ||
536 | .org kvm_ia64_ivt+0x5800 | 540 | .org kvm_ia64_ivt+0x5800 |
537 | ///////////////////////////////////////////////////////////////////// | 541 | ///////////////////////////////////////////////////////////////////// |
538 | // 0x5800 Entry 28 (size 16 bundles) Reserved | 542 | // 0x5800 Entry 28 (size 16 bundles) Reserved |
539 | KVM_FAULT(28) | 543 | KVM_FAULT(28) |
540 | 544 | ||
541 | .org kvm_ia64_ivt+0x5900 | 545 | .org kvm_ia64_ivt+0x5900 |
542 | /////////////////////////////////////////////////////////////////// | 546 | /////////////////////////////////////////////////////////////////// |
543 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) | 547 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) |
544 | ENTRY(kvm_debug_vector) | 548 | ENTRY(kvm_debug_vector) |
545 | KVM_FAULT(29) | 549 | KVM_FAULT(29) |
546 | END(kvm_debug_vector) | 550 | END(kvm_debug_vector) |
547 | 551 | ||
548 | .org kvm_ia64_ivt+0x5a00 | 552 | .org kvm_ia64_ivt+0x5a00 |
549 | /////////////////////////////////////////////////////////////// | 553 | /////////////////////////////////////////////////////////////// |
550 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) | 554 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) |
551 | ENTRY(kvm_unaligned_access) | 555 | ENTRY(kvm_unaligned_access) |
552 | KVM_REFLECT(30) | 556 | KVM_REFLECT(30) |
553 | END(kvm_unaligned_access) | 557 | END(kvm_unaligned_access) |
554 | 558 | ||
555 | .org kvm_ia64_ivt+0x5b00 | 559 | .org kvm_ia64_ivt+0x5b00 |
556 | ////////////////////////////////////////////////////////////////////// | 560 | ////////////////////////////////////////////////////////////////////// |
557 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) | 561 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) |
558 | ENTRY(kvm_unsupported_data_reference) | 562 | ENTRY(kvm_unsupported_data_reference) |
559 | KVM_REFLECT(31) | 563 | KVM_REFLECT(31) |
560 | END(kvm_unsupported_data_reference) | 564 | END(kvm_unsupported_data_reference) |
561 | 565 | ||
562 | .org kvm_ia64_ivt+0x5c00 | 566 | .org kvm_ia64_ivt+0x5c00 |
563 | //////////////////////////////////////////////////////////////////// | 567 | //////////////////////////////////////////////////////////////////// |
564 | // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) | 568 | // 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65) |
565 | ENTRY(kvm_floating_point_fault) | 569 | ENTRY(kvm_floating_point_fault) |
566 | KVM_REFLECT(32) | 570 | KVM_REFLECT(32) |
567 | END(kvm_floating_point_fault) | 571 | END(kvm_floating_point_fault) |
568 | 572 | ||
569 | .org kvm_ia64_ivt+0x5d00 | 573 | .org kvm_ia64_ivt+0x5d00 |
570 | ///////////////////////////////////////////////////////////////////// | 574 | ///////////////////////////////////////////////////////////////////// |
571 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) | 575 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) |
572 | ENTRY(kvm_floating_point_trap) | 576 | ENTRY(kvm_floating_point_trap) |
573 | KVM_REFLECT(33) | 577 | KVM_REFLECT(33) |
574 | END(kvm_floating_point_trap) | 578 | END(kvm_floating_point_trap) |
575 | 579 | ||
576 | .org kvm_ia64_ivt+0x5e00 | 580 | .org kvm_ia64_ivt+0x5e00 |
577 | ////////////////////////////////////////////////////////////////////// | 581 | ////////////////////////////////////////////////////////////////////// |
578 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) | 582 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) |
579 | ENTRY(kvm_lower_privilege_trap) | 583 | ENTRY(kvm_lower_privilege_trap) |
580 | KVM_REFLECT(34) | 584 | KVM_REFLECT(34) |
581 | END(kvm_lower_privilege_trap) | 585 | END(kvm_lower_privilege_trap) |
582 | 586 | ||
583 | .org kvm_ia64_ivt+0x5f00 | 587 | .org kvm_ia64_ivt+0x5f00 |
584 | ////////////////////////////////////////////////////////////////////// | 588 | ////////////////////////////////////////////////////////////////////// |
585 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) | 589 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) |
586 | ENTRY(kvm_taken_branch_trap) | 590 | ENTRY(kvm_taken_branch_trap) |
587 | KVM_REFLECT(35) | 591 | KVM_REFLECT(35) |
588 | END(kvm_taken_branch_trap) | 592 | END(kvm_taken_branch_trap) |
589 | 593 | ||
590 | .org kvm_ia64_ivt+0x6000 | 594 | .org kvm_ia64_ivt+0x6000 |
591 | //////////////////////////////////////////////////////////////////// | 595 | //////////////////////////////////////////////////////////////////// |
592 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) | 596 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) |
593 | ENTRY(kvm_single_step_trap) | 597 | ENTRY(kvm_single_step_trap) |
594 | KVM_REFLECT(36) | 598 | KVM_REFLECT(36) |
595 | END(kvm_single_step_trap) | 599 | END(kvm_single_step_trap) |
596 | .global kvm_virtualization_fault_back | 600 | .global kvm_virtualization_fault_back |
597 | .org kvm_ia64_ivt+0x6100 | 601 | .org kvm_ia64_ivt+0x6100 |
598 | ///////////////////////////////////////////////////////////////////// | 602 | ///////////////////////////////////////////////////////////////////// |
599 | // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault | 603 | // 0x6100 Entry 37 (size 16 bundles) Virtualization Fault |
600 | ENTRY(kvm_virtualization_fault) | 604 | ENTRY(kvm_virtualization_fault) |
601 | mov r31=pr | 605 | mov r31=pr |
602 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 606 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
603 | ;; | 607 | ;; |
604 | st8 [r16] = r1 | 608 | st8 [r16] = r1 |
605 | adds r17 = VMM_VCPU_GP_OFFSET, r21 | 609 | adds r17 = VMM_VCPU_GP_OFFSET, r21 |
606 | ;; | 610 | ;; |
607 | ld8 r1 = [r17] | 611 | ld8 r1 = [r17] |
608 | cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 | 612 | cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24 |
609 | cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 | 613 | cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24 |
610 | cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 | 614 | cmp.eq p8,p0=EVENT_MOV_TO_RR,r24 |
611 | cmp.eq p9,p0=EVENT_RSM,r24 | 615 | cmp.eq p9,p0=EVENT_RSM,r24 |
612 | cmp.eq p10,p0=EVENT_SSM,r24 | 616 | cmp.eq p10,p0=EVENT_SSM,r24 |
613 | cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 | 617 | cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24 |
614 | cmp.eq p12,p0=EVENT_THASH,r24 | 618 | cmp.eq p12,p0=EVENT_THASH,r24 |
615 | (p6) br.dptk.many kvm_asm_mov_from_ar | 619 | (p6) br.dptk.many kvm_asm_mov_from_ar |
616 | (p7) br.dptk.many kvm_asm_mov_from_rr | 620 | (p7) br.dptk.many kvm_asm_mov_from_rr |
617 | (p8) br.dptk.many kvm_asm_mov_to_rr | 621 | (p8) br.dptk.many kvm_asm_mov_to_rr |
618 | (p9) br.dptk.many kvm_asm_rsm | 622 | (p9) br.dptk.many kvm_asm_rsm |
619 | (p10) br.dptk.many kvm_asm_ssm | 623 | (p10) br.dptk.many kvm_asm_ssm |
620 | (p11) br.dptk.many kvm_asm_mov_to_psr | 624 | (p11) br.dptk.many kvm_asm_mov_to_psr |
621 | (p12) br.dptk.many kvm_asm_thash | 625 | (p12) br.dptk.many kvm_asm_thash |
622 | ;; | 626 | ;; |
623 | kvm_virtualization_fault_back: | 627 | kvm_virtualization_fault_back: |
624 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 | 628 | adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21 |
625 | ;; | 629 | ;; |
626 | ld8 r1 = [r16] | 630 | ld8 r1 = [r16] |
627 | ;; | 631 | ;; |
628 | mov r19=37 | 632 | mov r19=37 |
629 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 | 633 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 |
630 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 | 634 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 |
631 | ;; | 635 | ;; |
632 | st8 [r16] = r24 | 636 | st8 [r16] = r24 |
633 | st8 [r17] = r25 | 637 | st8 [r17] = r25 |
634 | ;; | 638 | ;; |
635 | cmp.ne p6,p0=EVENT_RFI, r24 | 639 | cmp.ne p6,p0=EVENT_RFI, r24 |
636 | (p6) br.sptk kvm_dispatch_virtualization_fault | 640 | (p6) br.sptk kvm_dispatch_virtualization_fault |
637 | ;; | 641 | ;; |
638 | adds r18=VMM_VPD_BASE_OFFSET,r21 | 642 | adds r18=VMM_VPD_BASE_OFFSET,r21 |
639 | ;; | 643 | ;; |
640 | ld8 r18=[r18] | 644 | ld8 r18=[r18] |
641 | ;; | 645 | ;; |
642 | adds r18=VMM_VPD_VIFS_OFFSET,r18 | 646 | adds r18=VMM_VPD_VIFS_OFFSET,r18 |
643 | ;; | 647 | ;; |
644 | ld8 r18=[r18] | 648 | ld8 r18=[r18] |
645 | ;; | 649 | ;; |
646 | tbit.z p6,p0=r18,63 | 650 | tbit.z p6,p0=r18,63 |
647 | (p6) br.sptk kvm_dispatch_virtualization_fault | 651 | (p6) br.sptk kvm_dispatch_virtualization_fault |
648 | ;; | 652 | ;; |
649 | //if vifs.v=1 desert current register frame | 653 | //if vifs.v=1 desert current register frame |
650 | alloc r18=ar.pfs,0,0,0,0 | 654 | alloc r18=ar.pfs,0,0,0,0 |
651 | br.sptk kvm_dispatch_virtualization_fault | 655 | br.sptk kvm_dispatch_virtualization_fault |
652 | END(kvm_virtualization_fault) | 656 | END(kvm_virtualization_fault) |
653 | 657 | ||
654 | .org kvm_ia64_ivt+0x6200 | 658 | .org kvm_ia64_ivt+0x6200 |
655 | ////////////////////////////////////////////////////////////// | 659 | ////////////////////////////////////////////////////////////// |
656 | // 0x6200 Entry 38 (size 16 bundles) Reserved | 660 | // 0x6200 Entry 38 (size 16 bundles) Reserved |
657 | KVM_FAULT(38) | 661 | KVM_FAULT(38) |
658 | 662 | ||
659 | .org kvm_ia64_ivt+0x6300 | 663 | .org kvm_ia64_ivt+0x6300 |
660 | ///////////////////////////////////////////////////////////////// | 664 | ///////////////////////////////////////////////////////////////// |
661 | // 0x6300 Entry 39 (size 16 bundles) Reserved | 665 | // 0x6300 Entry 39 (size 16 bundles) Reserved |
662 | KVM_FAULT(39) | 666 | KVM_FAULT(39) |
663 | 667 | ||
664 | .org kvm_ia64_ivt+0x6400 | 668 | .org kvm_ia64_ivt+0x6400 |
665 | ///////////////////////////////////////////////////////////////// | 669 | ///////////////////////////////////////////////////////////////// |
666 | // 0x6400 Entry 40 (size 16 bundles) Reserved | 670 | // 0x6400 Entry 40 (size 16 bundles) Reserved |
667 | KVM_FAULT(40) | 671 | KVM_FAULT(40) |
668 | 672 | ||
669 | .org kvm_ia64_ivt+0x6500 | 673 | .org kvm_ia64_ivt+0x6500 |
670 | ////////////////////////////////////////////////////////////////// | 674 | ////////////////////////////////////////////////////////////////// |
671 | // 0x6500 Entry 41 (size 16 bundles) Reserved | 675 | // 0x6500 Entry 41 (size 16 bundles) Reserved |
672 | KVM_FAULT(41) | 676 | KVM_FAULT(41) |
673 | 677 | ||
674 | .org kvm_ia64_ivt+0x6600 | 678 | .org kvm_ia64_ivt+0x6600 |
675 | ////////////////////////////////////////////////////////////////// | 679 | ////////////////////////////////////////////////////////////////// |
676 | // 0x6600 Entry 42 (size 16 bundles) Reserved | 680 | // 0x6600 Entry 42 (size 16 bundles) Reserved |
677 | KVM_FAULT(42) | 681 | KVM_FAULT(42) |
678 | 682 | ||
679 | .org kvm_ia64_ivt+0x6700 | 683 | .org kvm_ia64_ivt+0x6700 |
680 | ////////////////////////////////////////////////////////////////// | 684 | ////////////////////////////////////////////////////////////////// |
681 | // 0x6700 Entry 43 (size 16 bundles) Reserved | 685 | // 0x6700 Entry 43 (size 16 bundles) Reserved |
682 | KVM_FAULT(43) | 686 | KVM_FAULT(43) |
683 | 687 | ||
684 | .org kvm_ia64_ivt+0x6800 | 688 | .org kvm_ia64_ivt+0x6800 |
685 | ////////////////////////////////////////////////////////////////// | 689 | ////////////////////////////////////////////////////////////////// |
686 | // 0x6800 Entry 44 (size 16 bundles) Reserved | 690 | // 0x6800 Entry 44 (size 16 bundles) Reserved |
687 | KVM_FAULT(44) | 691 | KVM_FAULT(44) |
688 | 692 | ||
689 | .org kvm_ia64_ivt+0x6900 | 693 | .org kvm_ia64_ivt+0x6900 |
690 | /////////////////////////////////////////////////////////////////// | 694 | /////////////////////////////////////////////////////////////////// |
691 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception | 695 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception |
692 | //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) | 696 | //(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) |
693 | ENTRY(kvm_ia32_exception) | 697 | ENTRY(kvm_ia32_exception) |
694 | KVM_FAULT(45) | 698 | KVM_FAULT(45) |
695 | END(kvm_ia32_exception) | 699 | END(kvm_ia32_exception) |
696 | 700 | ||
697 | .org kvm_ia64_ivt+0x6a00 | 701 | .org kvm_ia64_ivt+0x6a00 |
698 | //////////////////////////////////////////////////////////////////// | 702 | //////////////////////////////////////////////////////////////////// |
699 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) | 703 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) |
700 | ENTRY(kvm_ia32_intercept) | 704 | ENTRY(kvm_ia32_intercept) |
701 | KVM_FAULT(47) | 705 | KVM_FAULT(47) |
702 | END(kvm_ia32_intercept) | 706 | END(kvm_ia32_intercept) |
703 | 707 | ||
704 | .org kvm_ia64_ivt+0x6c00 | 708 | .org kvm_ia64_ivt+0x6c00 |
705 | ///////////////////////////////////////////////////////////////////// | 709 | ///////////////////////////////////////////////////////////////////// |
706 | // 0x6c00 Entry 48 (size 16 bundles) Reserved | 710 | // 0x6c00 Entry 48 (size 16 bundles) Reserved |
707 | KVM_FAULT(48) | 711 | KVM_FAULT(48) |
708 | 712 | ||
709 | .org kvm_ia64_ivt+0x6d00 | 713 | .org kvm_ia64_ivt+0x6d00 |
710 | ////////////////////////////////////////////////////////////////////// | 714 | ////////////////////////////////////////////////////////////////////// |
711 | // 0x6d00 Entry 49 (size 16 bundles) Reserved | 715 | // 0x6d00 Entry 49 (size 16 bundles) Reserved |
712 | KVM_FAULT(49) | 716 | KVM_FAULT(49) |
713 | 717 | ||
714 | .org kvm_ia64_ivt+0x6e00 | 718 | .org kvm_ia64_ivt+0x6e00 |
715 | ////////////////////////////////////////////////////////////////////// | 719 | ////////////////////////////////////////////////////////////////////// |
716 | // 0x6e00 Entry 50 (size 16 bundles) Reserved | 720 | // 0x6e00 Entry 50 (size 16 bundles) Reserved |
717 | KVM_FAULT(50) | 721 | KVM_FAULT(50) |
718 | 722 | ||
719 | .org kvm_ia64_ivt+0x6f00 | 723 | .org kvm_ia64_ivt+0x6f00 |
720 | ///////////////////////////////////////////////////////////////////// | 724 | ///////////////////////////////////////////////////////////////////// |
721 | // 0x6f00 Entry 51 (size 16 bundles) Reserved | 725 | // 0x6f00 Entry 51 (size 16 bundles) Reserved |
722 | KVM_FAULT(52) | 726 | KVM_FAULT(52) |
723 | 727 | ||
724 | .org kvm_ia64_ivt+0x7100 | 728 | .org kvm_ia64_ivt+0x7100 |
725 | //////////////////////////////////////////////////////////////////// | 729 | //////////////////////////////////////////////////////////////////// |
726 | // 0x7100 Entry 53 (size 16 bundles) Reserved | 730 | // 0x7100 Entry 53 (size 16 bundles) Reserved |
727 | KVM_FAULT(53) | 731 | KVM_FAULT(53) |
728 | 732 | ||
729 | .org kvm_ia64_ivt+0x7200 | 733 | .org kvm_ia64_ivt+0x7200 |
730 | ///////////////////////////////////////////////////////////////////// | 734 | ///////////////////////////////////////////////////////////////////// |
731 | // 0x7200 Entry 54 (size 16 bundles) Reserved | 735 | // 0x7200 Entry 54 (size 16 bundles) Reserved |
732 | KVM_FAULT(54) | 736 | KVM_FAULT(54) |
733 | 737 | ||
734 | .org kvm_ia64_ivt+0x7300 | 738 | .org kvm_ia64_ivt+0x7300 |
735 | //////////////////////////////////////////////////////////////////// | 739 | //////////////////////////////////////////////////////////////////// |
736 | // 0x7300 Entry 55 (size 16 bundles) Reserved | 740 | // 0x7300 Entry 55 (size 16 bundles) Reserved |
737 | KVM_FAULT(55) | 741 | KVM_FAULT(55) |
738 | 742 | ||
739 | .org kvm_ia64_ivt+0x7400 | 743 | .org kvm_ia64_ivt+0x7400 |
740 | //////////////////////////////////////////////////////////////////// | 744 | //////////////////////////////////////////////////////////////////// |
741 | // 0x7400 Entry 56 (size 16 bundles) Reserved | 745 | // 0x7400 Entry 56 (size 16 bundles) Reserved |
742 | KVM_FAULT(56) | 746 | KVM_FAULT(56) |
743 | 747 | ||
744 | .org kvm_ia64_ivt+0x7500 | 748 | .org kvm_ia64_ivt+0x7500 |
745 | ///////////////////////////////////////////////////////////////////// | 749 | ///////////////////////////////////////////////////////////////////// |
746 | // 0x7500 Entry 57 (size 16 bundles) Reserved | 750 | // 0x7500 Entry 57 (size 16 bundles) Reserved |
747 | KVM_FAULT(57) | 751 | KVM_FAULT(57) |
748 | 752 | ||
749 | .org kvm_ia64_ivt+0x7600 | 753 | .org kvm_ia64_ivt+0x7600 |
750 | ///////////////////////////////////////////////////////////////////// | 754 | ///////////////////////////////////////////////////////////////////// |
751 | // 0x7600 Entry 58 (size 16 bundles) Reserved | 755 | // 0x7600 Entry 58 (size 16 bundles) Reserved |
752 | KVM_FAULT(58) | 756 | KVM_FAULT(58) |
753 | 757 | ||
754 | .org kvm_ia64_ivt+0x7700 | 758 | .org kvm_ia64_ivt+0x7700 |
755 | //////////////////////////////////////////////////////////////////// | 759 | //////////////////////////////////////////////////////////////////// |
756 | // 0x7700 Entry 59 (size 16 bundles) Reserved | 760 | // 0x7700 Entry 59 (size 16 bundles) Reserved |
757 | KVM_FAULT(59) | 761 | KVM_FAULT(59) |
758 | 762 | ||
759 | .org kvm_ia64_ivt+0x7800 | 763 | .org kvm_ia64_ivt+0x7800 |
760 | //////////////////////////////////////////////////////////////////// | 764 | //////////////////////////////////////////////////////////////////// |
761 | // 0x7800 Entry 60 (size 16 bundles) Reserved | 765 | // 0x7800 Entry 60 (size 16 bundles) Reserved |
762 | KVM_FAULT(60) | 766 | KVM_FAULT(60) |
763 | 767 | ||
764 | .org kvm_ia64_ivt+0x7900 | 768 | .org kvm_ia64_ivt+0x7900 |
765 | ///////////////////////////////////////////////////////////////////// | 769 | ///////////////////////////////////////////////////////////////////// |
766 | // 0x7900 Entry 61 (size 16 bundles) Reserved | 770 | // 0x7900 Entry 61 (size 16 bundles) Reserved |
767 | KVM_FAULT(61) | 771 | KVM_FAULT(61) |
768 | 772 | ||
769 | .org kvm_ia64_ivt+0x7a00 | 773 | .org kvm_ia64_ivt+0x7a00 |
770 | ///////////////////////////////////////////////////////////////////// | 774 | ///////////////////////////////////////////////////////////////////// |
771 | // 0x7a00 Entry 62 (size 16 bundles) Reserved | 775 | // 0x7a00 Entry 62 (size 16 bundles) Reserved |
772 | KVM_FAULT(62) | 776 | KVM_FAULT(62) |
773 | 777 | ||
774 | .org kvm_ia64_ivt+0x7b00 | 778 | .org kvm_ia64_ivt+0x7b00 |
775 | ///////////////////////////////////////////////////////////////////// | 779 | ///////////////////////////////////////////////////////////////////// |
776 | // 0x7b00 Entry 63 (size 16 bundles) Reserved | 780 | // 0x7b00 Entry 63 (size 16 bundles) Reserved |
777 | KVM_FAULT(63) | 781 | KVM_FAULT(63) |
778 | 782 | ||
779 | .org kvm_ia64_ivt+0x7c00 | 783 | .org kvm_ia64_ivt+0x7c00 |
780 | //////////////////////////////////////////////////////////////////// | 784 | //////////////////////////////////////////////////////////////////// |
781 | // 0x7c00 Entry 64 (size 16 bundles) Reserved | 785 | // 0x7c00 Entry 64 (size 16 bundles) Reserved |
782 | KVM_FAULT(64) | 786 | KVM_FAULT(64) |
783 | 787 | ||
784 | .org kvm_ia64_ivt+0x7d00 | 788 | .org kvm_ia64_ivt+0x7d00 |
785 | ///////////////////////////////////////////////////////////////////// | 789 | ///////////////////////////////////////////////////////////////////// |
786 | // 0x7d00 Entry 65 (size 16 bundles) Reserved | 790 | // 0x7d00 Entry 65 (size 16 bundles) Reserved |
787 | KVM_FAULT(65) | 791 | KVM_FAULT(65) |
788 | 792 | ||
789 | .org kvm_ia64_ivt+0x7e00 | 793 | .org kvm_ia64_ivt+0x7e00 |
790 | ///////////////////////////////////////////////////////////////////// | 794 | ///////////////////////////////////////////////////////////////////// |
791 | // 0x7e00 Entry 66 (size 16 bundles) Reserved | 795 | // 0x7e00 Entry 66 (size 16 bundles) Reserved |
792 | KVM_FAULT(66) | 796 | KVM_FAULT(66) |
793 | 797 | ||
794 | .org kvm_ia64_ivt+0x7f00 | 798 | .org kvm_ia64_ivt+0x7f00 |
795 | //////////////////////////////////////////////////////////////////// | 799 | //////////////////////////////////////////////////////////////////// |
796 | // 0x7f00 Entry 67 (size 16 bundles) Reserved | 800 | // 0x7f00 Entry 67 (size 16 bundles) Reserved |
797 | KVM_FAULT(67) | 801 | KVM_FAULT(67) |
798 | 802 | ||
799 | .org kvm_ia64_ivt+0x8000 | 803 | .org kvm_ia64_ivt+0x8000 |
800 | // There is no particular reason for this code to be here, other than that | 804 | // There is no particular reason for this code to be here, other than that |
@@ -804,132 +808,128 @@ END(kvm_ia32_intercept) | |||
804 | 808 | ||
805 | 809 | ||
806 | ENTRY(kvm_dtlb_miss_dispatch) | 810 | ENTRY(kvm_dtlb_miss_dispatch) |
807 | mov r19 = 2 | 811 | mov r19 = 2 |
808 | KVM_SAVE_MIN_WITH_COVER_R19 | 812 | KVM_SAVE_MIN_WITH_COVER_R19 |
809 | alloc r14=ar.pfs,0,0,3,0 | 813 | alloc r14=ar.pfs,0,0,3,0 |
810 | mov out0=cr.ifa | 814 | mov out0=cr.ifa |
811 | mov out1=r15 | 815 | mov out1=r15 |
812 | adds r3=8,r2 // set up second base pointer | 816 | adds r3=8,r2 // set up second base pointer |
813 | ;; | 817 | ;; |
814 | ssm psr.ic | 818 | ssm psr.ic |
815 | ;; | 819 | ;; |
816 | srlz.i // guarantee that interruption collection is on | 820 | srlz.i // guarantee that interruption collection is on |
817 | ;; | 821 | ;; |
818 | //(p15) ssm psr.i // restore psr.i | 822 | //(p15) ssm psr.i // restore psr.i |
819 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp | 823 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp |
820 | ;; | 824 | ;; |
821 | KVM_SAVE_REST | 825 | KVM_SAVE_REST |
822 | KVM_SAVE_EXTRA | 826 | KVM_SAVE_EXTRA |
823 | mov rp=r14 | 827 | mov rp=r14 |
824 | ;; | 828 | ;; |
825 | adds out2=16,r12 | 829 | adds out2=16,r12 |
826 | br.call.sptk.many b6=kvm_page_fault | 830 | br.call.sptk.many b6=kvm_page_fault |
827 | END(kvm_dtlb_miss_dispatch) | 831 | END(kvm_dtlb_miss_dispatch) |
828 | 832 | ||
829 | ENTRY(kvm_itlb_miss_dispatch) | 833 | ENTRY(kvm_itlb_miss_dispatch) |
830 | 834 | ||
831 | KVM_SAVE_MIN_WITH_COVER_R19 | 835 | KVM_SAVE_MIN_WITH_COVER_R19 |
832 | alloc r14=ar.pfs,0,0,3,0 | 836 | alloc r14=ar.pfs,0,0,3,0 |
833 | mov out0=cr.ifa | 837 | mov out0=cr.ifa |
834 | mov out1=r15 | 838 | mov out1=r15 |
835 | adds r3=8,r2 // set up second base pointer | 839 | adds r3=8,r2 // set up second base pointer |
836 | ;; | 840 | ;; |
837 | ssm psr.ic | 841 | ssm psr.ic |
838 | ;; | 842 | ;; |
839 | srlz.i // guarantee that interruption collection is on | 843 | srlz.i // guarantee that interruption collection is on |
840 | ;; | 844 | ;; |
841 | //(p15) ssm psr.i // restore psr.i | 845 | //(p15) ssm psr.i // restore psr.i |
842 | addl r14=@gprel(ia64_leave_hypervisor),gp | 846 | addl r14=@gprel(ia64_leave_hypervisor),gp |
843 | ;; | 847 | ;; |
844 | KVM_SAVE_REST | 848 | KVM_SAVE_REST |
845 | mov rp=r14 | 849 | mov rp=r14 |
846 | ;; | 850 | ;; |
847 | adds out2=16,r12 | 851 | adds out2=16,r12 |
848 | br.call.sptk.many b6=kvm_page_fault | 852 | br.call.sptk.many b6=kvm_page_fault |
849 | END(kvm_itlb_miss_dispatch) | 853 | END(kvm_itlb_miss_dispatch) |
850 | 854 | ||
851 | ENTRY(kvm_dispatch_reflection) | 855 | ENTRY(kvm_dispatch_reflection) |
852 | /* | 856 | /* |
853 | * Input: | 857 | * Input: |
854 | * psr.ic: off | 858 | * psr.ic: off |
855 | * r19: intr type (offset into ivt, see ia64_int.h) | 859 | * r19: intr type (offset into ivt, see ia64_int.h) |
856 | * r31: contains saved predicates (pr) | 860 | * r31: contains saved predicates (pr) |
857 | */ | 861 | */ |
858 | KVM_SAVE_MIN_WITH_COVER_R19 | 862 | KVM_SAVE_MIN_WITH_COVER_R19 |
859 | alloc r14=ar.pfs,0,0,5,0 | 863 | alloc r14=ar.pfs,0,0,5,0 |
860 | mov out0=cr.ifa | 864 | mov out0=cr.ifa |
861 | mov out1=cr.isr | 865 | mov out1=cr.isr |
862 | mov out2=cr.iim | 866 | mov out2=cr.iim |
863 | mov out3=r15 | 867 | mov out3=r15 |
864 | adds r3=8,r2 // set up second base pointer | 868 | adds r3=8,r2 // set up second base pointer |
865 | ;; | 869 | ;; |
866 | ssm psr.ic | 870 | ssm psr.ic |
867 | ;; | 871 | ;; |
868 | srlz.i // guarantee that interruption collection is on | 872 | srlz.i // guarantee that interruption collection is on |
869 | ;; | 873 | ;; |
870 | //(p15) ssm psr.i // restore psr.i | 874 | //(p15) ssm psr.i // restore psr.i |
871 | addl r14=@gprel(ia64_leave_hypervisor),gp | 875 | addl r14=@gprel(ia64_leave_hypervisor),gp |
872 | ;; | 876 | ;; |
873 | KVM_SAVE_REST | 877 | KVM_SAVE_REST |
874 | mov rp=r14 | 878 | mov rp=r14 |
875 | ;; | 879 | ;; |
876 | adds out4=16,r12 | 880 | adds out4=16,r12 |
877 | br.call.sptk.many b6=reflect_interruption | 881 | br.call.sptk.many b6=reflect_interruption |
878 | END(kvm_dispatch_reflection) | 882 | END(kvm_dispatch_reflection) |
879 | 883 | ||
880 | ENTRY(kvm_dispatch_virtualization_fault) | 884 | ENTRY(kvm_dispatch_virtualization_fault) |
881 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 | 885 | adds r16 = VMM_VCPU_CAUSE_OFFSET,r21 |
882 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 | 886 | adds r17 = VMM_VCPU_OPCODE_OFFSET,r21 |
883 | ;; | 887 | ;; |
884 | st8 [r16] = r24 | 888 | st8 [r16] = r24 |
885 | st8 [r17] = r25 | 889 | st8 [r17] = r25 |
886 | ;; | 890 | ;; |
887 | KVM_SAVE_MIN_WITH_COVER_R19 | 891 | KVM_SAVE_MIN_WITH_COVER_R19 |
888 | ;; | 892 | ;; |
889 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | 893 | alloc r14=ar.pfs,0,0,2,0 // (must be first in insn group!) |
890 | mov out0=r13 //vcpu | 894 | mov out0=r13 //vcpu |
891 | adds r3=8,r2 // set up second base pointer | 895 | adds r3=8,r2 // set up second base pointer |
892 | ;; | 896 | ;; |
893 | ssm psr.ic | 897 | ssm psr.ic |
894 | ;; | 898 | ;; |
895 | srlz.i // guarantee that interruption collection is on | 899 | srlz.i // guarantee that interruption collection is on |
896 | ;; | 900 | ;; |
897 | //(p15) ssm psr.i // restore psr.i | 901 | //(p15) ssm psr.i // restore psr.i |
898 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp | 902 | addl r14=@gprel(ia64_leave_hypervisor_prepare),gp |
899 | ;; | 903 | ;; |
900 | KVM_SAVE_REST | 904 | KVM_SAVE_REST |
901 | KVM_SAVE_EXTRA | 905 | KVM_SAVE_EXTRA |
902 | mov rp=r14 | 906 | mov rp=r14 |
903 | ;; | 907 | ;; |
904 | adds out1=16,sp //regs | 908 | adds out1=16,sp //regs |
905 | br.call.sptk.many b6=kvm_emulate | 909 | br.call.sptk.many b6=kvm_emulate |
906 | END(kvm_dispatch_virtualization_fault) | 910 | END(kvm_dispatch_virtualization_fault) |
907 | 911 | ||
908 | 912 | ||
909 | ENTRY(kvm_dispatch_interrupt) | 913 | ENTRY(kvm_dispatch_interrupt) |
910 | KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 | 914 | KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3 |
911 | ;; | 915 | ;; |
912 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group | 916 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group |
913 | //mov out0=cr.ivr // pass cr.ivr as first arg | 917 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
914 | adds r3=8,r2 // set up second base pointer for SAVE_REST | 918 | ;; |
915 | ;; | 919 | ssm psr.ic |
916 | ssm psr.ic | 920 | ;; |
917 | ;; | 921 | srlz.i |
918 | srlz.i | 922 | ;; |
919 | ;; | 923 | //(p15) ssm psr.i |
920 | //(p15) ssm psr.i | 924 | addl r14=@gprel(ia64_leave_hypervisor),gp |
921 | addl r14=@gprel(ia64_leave_hypervisor),gp | 925 | ;; |
922 | ;; | 926 | KVM_SAVE_REST |
923 | KVM_SAVE_REST | 927 | mov rp=r14 |
924 | mov rp=r14 | 928 | ;; |
925 | ;; | 929 | mov out0=r13 // pass pointer to pt_regs as second arg |
926 | mov out0=r13 // pass pointer to pt_regs as second arg | 930 | br.call.sptk.many b6=kvm_ia64_handle_irq |
927 | br.call.sptk.many b6=kvm_ia64_handle_irq | ||
928 | END(kvm_dispatch_interrupt) | 931 | END(kvm_dispatch_interrupt) |
929 | 932 | ||
930 | |||
931 | |||
932 | |||
933 | GLOBAL_ENTRY(ia64_leave_nested) | 933 | GLOBAL_ENTRY(ia64_leave_nested) |
934 | rsm psr.i | 934 | rsm psr.i |
935 | ;; | 935 | ;; |
@@ -1008,7 +1008,7 @@ GLOBAL_ENTRY(ia64_leave_nested) | |||
1008 | ;; | 1008 | ;; |
1009 | ldf.fill f11=[r2] | 1009 | ldf.fill f11=[r2] |
1010 | // mov r18=r13 | 1010 | // mov r18=r13 |
1011 | // mov r21=r13 | 1011 | // mov r21=r13 |
1012 | adds r16=PT(CR_IPSR)+16,r12 | 1012 | adds r16=PT(CR_IPSR)+16,r12 |
1013 | adds r17=PT(CR_IIP)+16,r12 | 1013 | adds r17=PT(CR_IIP)+16,r12 |
1014 | ;; | 1014 | ;; |
@@ -1058,138 +1058,135 @@ GLOBAL_ENTRY(ia64_leave_nested) | |||
1058 | rfi | 1058 | rfi |
1059 | END(ia64_leave_nested) | 1059 | END(ia64_leave_nested) |
1060 | 1060 | ||
1061 | |||
1062 | |||
1063 | GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) | 1061 | GLOBAL_ENTRY(ia64_leave_hypervisor_prepare) |
1064 | /* | 1062 | /* |
1065 | * work.need_resched etc. mustn't get changed | 1063 | * work.need_resched etc. mustn't get changed |
1066 | *by this CPU before it returns to | 1064 | *by this CPU before it returns to |
1067 | ;; | 1065 | * user- or fsys-mode, hence we disable interrupts early on: |
1068 | * user- or fsys-mode, hence we disable interrupts early on: | 1066 | */ |
1069 | */ | 1067 | adds r2 = PT(R4)+16,r12 |
1070 | adds r2 = PT(R4)+16,r12 | 1068 | adds r3 = PT(R5)+16,r12 |
1071 | adds r3 = PT(R5)+16,r12 | 1069 | adds r8 = PT(EML_UNAT)+16,r12 |
1072 | adds r8 = PT(EML_UNAT)+16,r12 | 1070 | ;; |
1073 | ;; | 1071 | ld8 r8 = [r8] |
1074 | ld8 r8 = [r8] | 1072 | ;; |
1075 | ;; | 1073 | mov ar.unat=r8 |
1076 | mov ar.unat=r8 | 1074 | ;; |
1077 | ;; | 1075 | ld8.fill r4=[r2],16 //load r4 |
1078 | ld8.fill r4=[r2],16 //load r4 | 1076 | ld8.fill r5=[r3],16 //load r5 |
1079 | ld8.fill r5=[r3],16 //load r5 | 1077 | ;; |
1080 | ;; | 1078 | ld8.fill r6=[r2] //load r6 |
1081 | ld8.fill r6=[r2] //load r6 | 1079 | ld8.fill r7=[r3] //load r7 |
1082 | ld8.fill r7=[r3] //load r7 | 1080 | ;; |
1083 | ;; | ||
1084 | END(ia64_leave_hypervisor_prepare) | 1081 | END(ia64_leave_hypervisor_prepare) |
1085 | //fall through | 1082 | //fall through |
1086 | GLOBAL_ENTRY(ia64_leave_hypervisor) | 1083 | GLOBAL_ENTRY(ia64_leave_hypervisor) |
1087 | rsm psr.i | 1084 | rsm psr.i |
1088 | ;; | 1085 | ;; |
1089 | br.call.sptk.many b0=leave_hypervisor_tail | 1086 | br.call.sptk.many b0=leave_hypervisor_tail |
1090 | ;; | 1087 | ;; |
1091 | adds r20=PT(PR)+16,r12 | 1088 | adds r20=PT(PR)+16,r12 |
1092 | adds r8=PT(EML_UNAT)+16,r12 | 1089 | adds r8=PT(EML_UNAT)+16,r12 |
1093 | ;; | 1090 | ;; |
1094 | ld8 r8=[r8] | 1091 | ld8 r8=[r8] |
1095 | ;; | 1092 | ;; |
1096 | mov ar.unat=r8 | 1093 | mov ar.unat=r8 |
1097 | ;; | 1094 | ;; |
1098 | lfetch [r20],PT(CR_IPSR)-PT(PR) | 1095 | lfetch [r20],PT(CR_IPSR)-PT(PR) |
1099 | adds r2 = PT(B6)+16,r12 | 1096 | adds r2 = PT(B6)+16,r12 |
1100 | adds r3 = PT(B7)+16,r12 | 1097 | adds r3 = PT(B7)+16,r12 |
1101 | ;; | 1098 | ;; |
1102 | lfetch [r20] | 1099 | lfetch [r20] |
1103 | ;; | 1100 | ;; |
1104 | ld8 r24=[r2],16 /* B6 */ | 1101 | ld8 r24=[r2],16 /* B6 */ |
1105 | ld8 r25=[r3],16 /* B7 */ | 1102 | ld8 r25=[r3],16 /* B7 */ |
1106 | ;; | 1103 | ;; |
1107 | ld8 r26=[r2],16 /* ar_csd */ | 1104 | ld8 r26=[r2],16 /* ar_csd */ |
1108 | ld8 r27=[r3],16 /* ar_ssd */ | 1105 | ld8 r27=[r3],16 /* ar_ssd */ |
1109 | mov b6 = r24 | 1106 | mov b6 = r24 |
1110 | ;; | 1107 | ;; |
1111 | ld8.fill r8=[r2],16 | 1108 | ld8.fill r8=[r2],16 |
1112 | ld8.fill r9=[r3],16 | 1109 | ld8.fill r9=[r3],16 |
1113 | mov b7 = r25 | 1110 | mov b7 = r25 |
1114 | ;; | 1111 | ;; |
1115 | mov ar.csd = r26 | 1112 | mov ar.csd = r26 |
1116 | mov ar.ssd = r27 | 1113 | mov ar.ssd = r27 |
1117 | ;; | 1114 | ;; |
1118 | ld8.fill r10=[r2],PT(R15)-PT(R10) | 1115 | ld8.fill r10=[r2],PT(R15)-PT(R10) |
1119 | ld8.fill r11=[r3],PT(R14)-PT(R11) | 1116 | ld8.fill r11=[r3],PT(R14)-PT(R11) |
1120 | ;; | 1117 | ;; |
1121 | ld8.fill r15=[r2],PT(R16)-PT(R15) | 1118 | ld8.fill r15=[r2],PT(R16)-PT(R15) |
1122 | ld8.fill r14=[r3],PT(R17)-PT(R14) | 1119 | ld8.fill r14=[r3],PT(R17)-PT(R14) |
1123 | ;; | 1120 | ;; |
1124 | ld8.fill r16=[r2],16 | 1121 | ld8.fill r16=[r2],16 |
1125 | ld8.fill r17=[r3],16 | 1122 | ld8.fill r17=[r3],16 |
1126 | ;; | 1123 | ;; |
1127 | ld8.fill r18=[r2],16 | 1124 | ld8.fill r18=[r2],16 |
1128 | ld8.fill r19=[r3],16 | 1125 | ld8.fill r19=[r3],16 |
1129 | ;; | 1126 | ;; |
1130 | ld8.fill r20=[r2],16 | 1127 | ld8.fill r20=[r2],16 |
1131 | ld8.fill r21=[r3],16 | 1128 | ld8.fill r21=[r3],16 |
1132 | ;; | 1129 | ;; |
1133 | ld8.fill r22=[r2],16 | 1130 | ld8.fill r22=[r2],16 |
1134 | ld8.fill r23=[r3],16 | 1131 | ld8.fill r23=[r3],16 |
1135 | ;; | 1132 | ;; |
1136 | ld8.fill r24=[r2],16 | 1133 | ld8.fill r24=[r2],16 |
1137 | ld8.fill r25=[r3],16 | 1134 | ld8.fill r25=[r3],16 |
1138 | ;; | 1135 | ;; |
1139 | ld8.fill r26=[r2],16 | 1136 | ld8.fill r26=[r2],16 |
1140 | ld8.fill r27=[r3],16 | 1137 | ld8.fill r27=[r3],16 |
1141 | ;; | 1138 | ;; |
1142 | ld8.fill r28=[r2],16 | 1139 | ld8.fill r28=[r2],16 |
1143 | ld8.fill r29=[r3],16 | 1140 | ld8.fill r29=[r3],16 |
1144 | ;; | 1141 | ;; |
1145 | ld8.fill r30=[r2],PT(F6)-PT(R30) | 1142 | ld8.fill r30=[r2],PT(F6)-PT(R30) |
1146 | ld8.fill r31=[r3],PT(F7)-PT(R31) | 1143 | ld8.fill r31=[r3],PT(F7)-PT(R31) |
1147 | ;; | 1144 | ;; |
1148 | rsm psr.i | psr.ic | 1145 | rsm psr.i | psr.ic |
1149 | // initiate turning off of interrupt and interruption collection | 1146 | // initiate turning off of interrupt and interruption collection |
1150 | invala // invalidate ALAT | 1147 | invala // invalidate ALAT |
1151 | ;; | 1148 | ;; |
1152 | srlz.i // ensure interruption collection is off | 1149 | srlz.i // ensure interruption collection is off |
1153 | ;; | 1150 | ;; |
1154 | bsw.0 | 1151 | bsw.0 |
1155 | ;; | 1152 | ;; |
1156 | adds r16 = PT(CR_IPSR)+16,r12 | 1153 | adds r16 = PT(CR_IPSR)+16,r12 |
1157 | adds r17 = PT(CR_IIP)+16,r12 | 1154 | adds r17 = PT(CR_IIP)+16,r12 |
1158 | mov r21=r13 // get current | 1155 | mov r21=r13 // get current |
1159 | ;; | 1156 | ;; |
1160 | ld8 r31=[r16],16 // load cr.ipsr | 1157 | ld8 r31=[r16],16 // load cr.ipsr |
1161 | ld8 r30=[r17],16 // load cr.iip | 1158 | ld8 r30=[r17],16 // load cr.iip |
1162 | ;; | 1159 | ;; |
1163 | ld8 r29=[r16],16 // load cr.ifs | 1160 | ld8 r29=[r16],16 // load cr.ifs |
1164 | ld8 r28=[r17],16 // load ar.unat | 1161 | ld8 r28=[r17],16 // load ar.unat |
1165 | ;; | 1162 | ;; |
1166 | ld8 r27=[r16],16 // load ar.pfs | 1163 | ld8 r27=[r16],16 // load ar.pfs |
1167 | ld8 r26=[r17],16 // load ar.rsc | 1164 | ld8 r26=[r17],16 // load ar.rsc |
1168 | ;; | 1165 | ;; |
1169 | ld8 r25=[r16],16 // load ar.rnat | 1166 | ld8 r25=[r16],16 // load ar.rnat |
1170 | ld8 r24=[r17],16 // load ar.bspstore | 1167 | ld8 r24=[r17],16 // load ar.bspstore |
1171 | ;; | 1168 | ;; |
1172 | ld8 r23=[r16],16 // load predicates | 1169 | ld8 r23=[r16],16 // load predicates |
1173 | ld8 r22=[r17],16 // load b0 | 1170 | ld8 r22=[r17],16 // load b0 |
1174 | ;; | 1171 | ;; |
1175 | ld8 r20=[r16],16 // load ar.rsc value for "loadrs" | 1172 | ld8 r20=[r16],16 // load ar.rsc value for "loadrs" |
1176 | ld8.fill r1=[r17],16 //load r1 | 1173 | ld8.fill r1=[r17],16 //load r1 |
1177 | ;; | 1174 | ;; |
1178 | ld8.fill r12=[r16],16 //load r12 | 1175 | ld8.fill r12=[r16],16 //load r12 |
1179 | ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 | 1176 | ld8.fill r13=[r17],PT(R2)-PT(R13) //load r13 |
1180 | ;; | 1177 | ;; |
1181 | ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr | 1178 | ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr |
1182 | ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 | 1179 | ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2 |
1183 | ;; | 1180 | ;; |
1184 | ld8.fill r3=[r16] //load r3 | 1181 | ld8.fill r3=[r16] //load r3 |
1185 | ld8 r18=[r17] //load ar_ccv | 1182 | ld8 r18=[r17] //load ar_ccv |
1186 | ;; | 1183 | ;; |
1187 | mov ar.fpsr=r19 | 1184 | mov ar.fpsr=r19 |
1188 | mov ar.ccv=r18 | 1185 | mov ar.ccv=r18 |
1189 | shr.u r18=r20,16 | 1186 | shr.u r18=r20,16 |
1190 | ;; | 1187 | ;; |
1191 | kvm_rbs_switch: | 1188 | kvm_rbs_switch: |
1192 | mov r19=96 | 1189 | mov r19=96 |
1193 | 1190 | ||
1194 | kvm_dont_preserve_current_frame: | 1191 | kvm_dont_preserve_current_frame: |
1195 | /* | 1192 | /* |
@@ -1201,76 +1198,76 @@ kvm_dont_preserve_current_frame: | |||
1201 | # define pReturn p7 | 1198 | # define pReturn p7 |
1202 | # define Nregs 14 | 1199 | # define Nregs 14 |
1203 | 1200 | ||
1204 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | 1201 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
1205 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) | 1202 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) |
1206 | sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize | 1203 | sub r19=r19,r18 // r19 = (physStackedSize + 8) - dirtySize |
1207 | ;; | 1204 | ;; |
1208 | mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" | 1205 | mov ar.rsc=r20 // load ar.rsc to be used for "loadrs" |
1209 | shladd in0=loc1,3,r19 | 1206 | shladd in0=loc1,3,r19 |
1210 | mov in1=0 | 1207 | mov in1=0 |
1211 | ;; | 1208 | ;; |
1212 | TEXT_ALIGN(32) | 1209 | TEXT_ALIGN(32) |
1213 | kvm_rse_clear_invalid: | 1210 | kvm_rse_clear_invalid: |
1214 | alloc loc0=ar.pfs,2,Nregs-2,2,0 | 1211 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
1215 | cmp.lt pRecurse,p0=Nregs*8,in0 | 1212 | cmp.lt pRecurse,p0=Nregs*8,in0 |
1216 | // if more than Nregs regs left to clear, (re)curse | 1213 | // if more than Nregs regs left to clear, (re)curse |
1217 | add out0=-Nregs*8,in0 | 1214 | add out0=-Nregs*8,in0 |
1218 | add out1=1,in1 // increment recursion count | 1215 | add out1=1,in1 // increment recursion count |
1219 | mov loc1=0 | 1216 | mov loc1=0 |
1220 | mov loc2=0 | 1217 | mov loc2=0 |
1221 | ;; | 1218 | ;; |
1222 | mov loc3=0 | 1219 | mov loc3=0 |
1223 | mov loc4=0 | 1220 | mov loc4=0 |
1224 | mov loc5=0 | 1221 | mov loc5=0 |
1225 | mov loc6=0 | 1222 | mov loc6=0 |
1226 | mov loc7=0 | 1223 | mov loc7=0 |
1227 | (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid | 1224 | (pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid |
1228 | ;; | 1225 | ;; |
1229 | mov loc8=0 | 1226 | mov loc8=0 |
1230 | mov loc9=0 | 1227 | mov loc9=0 |
1231 | cmp.ne pReturn,p0=r0,in1 | 1228 | cmp.ne pReturn,p0=r0,in1 |
1232 | // if recursion count != 0, we need to do a br.ret | 1229 | // if recursion count != 0, we need to do a br.ret |
1233 | mov loc10=0 | 1230 | mov loc10=0 |
1234 | mov loc11=0 | 1231 | mov loc11=0 |
1235 | (pReturn) br.ret.dptk.many b0 | 1232 | (pReturn) br.ret.dptk.many b0 |
1236 | 1233 | ||
1237 | # undef pRecurse | 1234 | # undef pRecurse |
1238 | # undef pReturn | 1235 | # undef pReturn |
1239 | 1236 | ||
1240 | // loadrs has already been shifted | 1237 | // loadrs has already been shifted |
1241 | alloc r16=ar.pfs,0,0,0,0 // drop current register frame | 1238 | alloc r16=ar.pfs,0,0,0,0 // drop current register frame |
1242 | ;; | 1239 | ;; |
1243 | loadrs | 1240 | loadrs |
1244 | ;; | 1241 | ;; |
1245 | mov ar.bspstore=r24 | 1242 | mov ar.bspstore=r24 |
1246 | ;; | 1243 | ;; |
1247 | mov ar.unat=r28 | 1244 | mov ar.unat=r28 |
1248 | mov ar.rnat=r25 | 1245 | mov ar.rnat=r25 |
1249 | mov ar.rsc=r26 | 1246 | mov ar.rsc=r26 |
1250 | ;; | 1247 | ;; |
1251 | mov cr.ipsr=r31 | 1248 | mov cr.ipsr=r31 |
1252 | mov cr.iip=r30 | 1249 | mov cr.iip=r30 |
1253 | mov cr.ifs=r29 | 1250 | mov cr.ifs=r29 |
1254 | mov ar.pfs=r27 | 1251 | mov ar.pfs=r27 |
1255 | adds r18=VMM_VPD_BASE_OFFSET,r21 | 1252 | adds r18=VMM_VPD_BASE_OFFSET,r21 |
1256 | ;; | 1253 | ;; |
1257 | ld8 r18=[r18] //vpd | 1254 | ld8 r18=[r18] //vpd |
1258 | adds r17=VMM_VCPU_ISR_OFFSET,r21 | 1255 | adds r17=VMM_VCPU_ISR_OFFSET,r21 |
1259 | ;; | 1256 | ;; |
1260 | ld8 r17=[r17] | 1257 | ld8 r17=[r17] |
1261 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | 1258 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
1262 | ;; | 1259 | ;; |
1263 | ld8 r19=[r19] //vpsr | 1260 | ld8 r19=[r19] //vpsr |
1264 | mov r25=r18 | 1261 | mov r25=r18 |
1265 | adds r16= VMM_VCPU_GP_OFFSET,r21 | 1262 | adds r16= VMM_VCPU_GP_OFFSET,r21 |
1266 | ;; | 1263 | ;; |
1267 | ld8 r16= [r16] // Put gp in r24 | 1264 | ld8 r16= [r16] // Put gp in r24 |
1268 | movl r24=@gprel(ia64_vmm_entry) // calculate return address | 1265 | movl r24=@gprel(ia64_vmm_entry) // calculate return address |
1269 | ;; | 1266 | ;; |
1270 | add r24=r24,r16 | 1267 | add r24=r24,r16 |
1271 | ;; | 1268 | ;; |
1272 | br.sptk.many kvm_vps_sync_write // call the service | 1269 | br.sptk.many kvm_vps_sync_write // call the service |
1273 | ;; | 1270 | ;; |
1274 | END(ia64_leave_hypervisor) | 1271 | END(ia64_leave_hypervisor) |
1275 | // fall through | 1272 | // fall through |
1276 | GLOBAL_ENTRY(ia64_vmm_entry) | 1273 | GLOBAL_ENTRY(ia64_vmm_entry) |
@@ -1283,16 +1280,14 @@ GLOBAL_ENTRY(ia64_vmm_entry) | |||
1283 | * r22:b0 | 1280 | * r22:b0 |
1284 | * r23:predicate | 1281 | * r23:predicate |
1285 | */ | 1282 | */ |
1286 | mov r24=r22 | 1283 | mov r24=r22 |
1287 | mov r25=r18 | 1284 | mov r25=r18 |
1288 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic | 1285 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic |
1289 | (p1) br.cond.sptk.few kvm_vps_resume_normal | 1286 | (p1) br.cond.sptk.few kvm_vps_resume_normal |
1290 | (p2) br.cond.sptk.many kvm_vps_resume_handler | 1287 | (p2) br.cond.sptk.many kvm_vps_resume_handler |
1291 | ;; | 1288 | ;; |
1292 | END(ia64_vmm_entry) | 1289 | END(ia64_vmm_entry) |
1293 | 1290 | ||
1294 | |||
1295 | |||
1296 | /* | 1291 | /* |
1297 | * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, | 1292 | * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, |
1298 | * u64 arg3, u64 arg4, u64 arg5, | 1293 | * u64 arg3, u64 arg4, u64 arg5, |
@@ -1310,88 +1305,88 @@ psrsave = loc2 | |||
1310 | entry = loc3 | 1305 | entry = loc3 |
1311 | hostret = r24 | 1306 | hostret = r24 |
1312 | 1307 | ||
1313 | alloc pfssave=ar.pfs,4,4,0,0 | 1308 | alloc pfssave=ar.pfs,4,4,0,0 |
1314 | mov rpsave=rp | 1309 | mov rpsave=rp |
1315 | adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 | 1310 | adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13 |
1316 | ;; | 1311 | ;; |
1317 | ld8 entry=[entry] | 1312 | ld8 entry=[entry] |
1318 | 1: mov hostret=ip | 1313 | 1: mov hostret=ip |
1319 | mov r25=in1 // copy arguments | 1314 | mov r25=in1 // copy arguments |
1320 | mov r26=in2 | 1315 | mov r26=in2 |
1321 | mov r27=in3 | 1316 | mov r27=in3 |
1322 | mov psrsave=psr | 1317 | mov psrsave=psr |
1323 | ;; | 1318 | ;; |
1324 | tbit.nz p6,p0=psrsave,14 // IA64_PSR_I | 1319 | tbit.nz p6,p0=psrsave,14 // IA64_PSR_I |
1325 | tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC | 1320 | tbit.nz p7,p0=psrsave,13 // IA64_PSR_IC |
1326 | ;; | 1321 | ;; |
1327 | add hostret=2f-1b,hostret // calculate return address | 1322 | add hostret=2f-1b,hostret // calculate return address |
1328 | add entry=entry,in0 | 1323 | add entry=entry,in0 |
1329 | ;; | 1324 | ;; |
1330 | rsm psr.i | psr.ic | 1325 | rsm psr.i | psr.ic |
1331 | ;; | 1326 | ;; |
1332 | srlz.i | 1327 | srlz.i |
1333 | mov b6=entry | 1328 | mov b6=entry |
1334 | br.cond.sptk b6 // call the service | 1329 | br.cond.sptk b6 // call the service |
1335 | 2: | 1330 | 2: |
1336 | // Architectural sequence for enabling interrupts if necessary | 1331 | // Architectural sequence for enabling interrupts if necessary |
1337 | (p7) ssm psr.ic | 1332 | (p7) ssm psr.ic |
1338 | ;; | 1333 | ;; |
1339 | (p7) srlz.i | 1334 | (p7) srlz.i |
1340 | ;; | 1335 | ;; |
1341 | //(p6) ssm psr.i | 1336 | //(p6) ssm psr.i |
1342 | ;; | 1337 | ;; |
1343 | mov rp=rpsave | 1338 | mov rp=rpsave |
1344 | mov ar.pfs=pfssave | 1339 | mov ar.pfs=pfssave |
1345 | mov r8=r31 | 1340 | mov r8=r31 |
1346 | ;; | 1341 | ;; |
1347 | srlz.d | 1342 | srlz.d |
1348 | br.ret.sptk rp | 1343 | br.ret.sptk rp |
1349 | 1344 | ||
1350 | END(ia64_call_vsa) | 1345 | END(ia64_call_vsa) |
1351 | 1346 | ||
1352 | #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) | 1347 | #define INIT_BSPSTORE ((4<<30)-(12<<20)-0x100) |
1353 | 1348 | ||
1354 | GLOBAL_ENTRY(vmm_reset_entry) | 1349 | GLOBAL_ENTRY(vmm_reset_entry) |
1355 | //set up ipsr, iip, vpd.vpsr, dcr | 1350 | //set up ipsr, iip, vpd.vpsr, dcr |
1356 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 | 1351 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 |
1357 | // For DCR: all bits 0 | 1352 | // For DCR: all bits 0 |
1358 | bsw.0 | 1353 | bsw.0 |
1359 | ;; | 1354 | ;; |
1360 | mov r21 =r13 | 1355 | mov r21 =r13 |
1361 | adds r14=-VMM_PT_REGS_SIZE, r12 | 1356 | adds r14=-VMM_PT_REGS_SIZE, r12 |
1362 | ;; | 1357 | ;; |
1363 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 | 1358 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 |
1364 | movl r10=0x8000000000000000 | 1359 | movl r10=0x8000000000000000 |
1365 | adds r16=PT(CR_IIP), r14 | 1360 | adds r16=PT(CR_IIP), r14 |
1366 | adds r20=PT(R1), r14 | 1361 | adds r20=PT(R1), r14 |
1367 | ;; | 1362 | ;; |
1368 | rsm psr.ic | psr.i | 1363 | rsm psr.ic | psr.i |
1369 | ;; | 1364 | ;; |
1370 | srlz.i | 1365 | srlz.i |
1371 | ;; | 1366 | ;; |
1372 | mov ar.rsc = 0 | 1367 | mov ar.rsc = 0 |
1373 | ;; | 1368 | ;; |
1374 | flushrs | 1369 | flushrs |
1375 | ;; | 1370 | ;; |
1376 | mov ar.bspstore = 0 | 1371 | mov ar.bspstore = 0 |
1377 | // clear BSPSTORE | 1372 | // clear BSPSTORE |
1378 | ;; | 1373 | ;; |
1379 | mov cr.ipsr=r6 | 1374 | mov cr.ipsr=r6 |
1380 | mov cr.ifs=r10 | 1375 | mov cr.ifs=r10 |
1381 | ld8 r4 = [r16] // Set init iip for first run. | 1376 | ld8 r4 = [r16] // Set init iip for first run. |
1382 | ld8 r1 = [r20] | 1377 | ld8 r1 = [r20] |
1383 | ;; | 1378 | ;; |
1384 | mov cr.iip=r4 | 1379 | mov cr.iip=r4 |
1385 | adds r16=VMM_VPD_BASE_OFFSET,r13 | 1380 | adds r16=VMM_VPD_BASE_OFFSET,r13 |
1386 | ;; | 1381 | ;; |
1387 | ld8 r18=[r16] | 1382 | ld8 r18=[r16] |
1388 | ;; | 1383 | ;; |
1389 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | 1384 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
1390 | ;; | 1385 | ;; |
1391 | ld8 r19=[r19] | 1386 | ld8 r19=[r19] |
1392 | mov r17=r0 | 1387 | mov r17=r0 |
1393 | mov r22=r0 | 1388 | mov r22=r0 |
1394 | mov r23=r0 | 1389 | mov r23=r0 |
1395 | br.cond.sptk ia64_vmm_entry | 1390 | br.cond.sptk ia64_vmm_entry |
1396 | br.ret.sptk b0 | 1391 | br.ret.sptk b0 |
1397 | END(vmm_reset_entry) | 1392 | END(vmm_reset_entry) |
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index e22b93361e08..6b6307a3bd55 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c | |||
@@ -183,8 +183,8 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) | |||
183 | u64 i, dirty_pages = 1; | 183 | u64 i, dirty_pages = 1; |
184 | u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; | 184 | u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; |
185 | spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); | 185 | spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); |
186 | void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE) | 186 | void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; |
187 | + KVM_MEM_DIRTY_LOG_OFS; | 187 | |
188 | dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; | 188 | dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; |
189 | 189 | ||
190 | vmm_spin_lock(lock); | 190 | vmm_spin_lock(lock); |
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 0c66dbdd1d72..66fd705e82c0 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -227,14 +227,14 @@ finish_up: | |||
227 | return new_irq_info; | 227 | return new_irq_info; |
228 | } | 228 | } |
229 | 229 | ||
230 | static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) | 230 | static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) |
231 | { | 231 | { |
232 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; | 232 | struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; |
233 | nasid_t nasid; | 233 | nasid_t nasid; |
234 | int slice; | 234 | int slice; |
235 | 235 | ||
236 | nasid = cpuid_to_nasid(first_cpu(mask)); | 236 | nasid = cpuid_to_nasid(cpumask_first(mask)); |
237 | slice = cpuid_to_slice(first_cpu(mask)); | 237 | slice = cpuid_to_slice(cpumask_first(mask)); |
238 | 238 | ||
239 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, | 239 | list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, |
240 | sn_irq_lh[irq], list) | 240 | sn_irq_lh[irq], list) |
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index 83f190ffe350..ca553b0429ce 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c | |||
@@ -151,7 +151,8 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | #ifdef CONFIG_SMP | 153 | #ifdef CONFIG_SMP |
154 | static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | 154 | static void sn_set_msi_irq_affinity(unsigned int irq, |
155 | const struct cpumask *cpu_mask) | ||
155 | { | 156 | { |
156 | struct msi_msg msg; | 157 | struct msi_msg msg; |
157 | int slice; | 158 | int slice; |
@@ -164,7 +165,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | |||
164 | struct sn_pcibus_provider *provider; | 165 | struct sn_pcibus_provider *provider; |
165 | unsigned int cpu; | 166 | unsigned int cpu; |
166 | 167 | ||
167 | cpu = first_cpu(cpu_mask); | 168 | cpu = cpumask_first(cpu_mask); |
168 | sn_irq_info = sn_msi_info[irq].sn_irq_info; | 169 | sn_irq_info = sn_msi_info[irq].sn_irq_info; |
169 | if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) | 170 | if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) |
170 | return; | 171 | return; |
@@ -204,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) | |||
204 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); | 205 | msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); |
205 | 206 | ||
206 | write_msi_msg(irq, &msg); | 207 | write_msi_msg(irq, &msg); |
207 | irq_desc[irq].affinity = cpu_mask; | 208 | irq_desc[irq].affinity = *cpu_mask; |
208 | } | 209 | } |
209 | #endif /* CONFIG_SMP */ | 210 | #endif /* CONFIG_SMP */ |
210 | 211 | ||
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 29047d5c259a..cabba332cc48 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -10,6 +10,7 @@ config M32R | |||
10 | default y | 10 | default y |
11 | select HAVE_IDE | 11 | select HAVE_IDE |
12 | select HAVE_OPROFILE | 12 | select HAVE_OPROFILE |
13 | select INIT_ALL_POSSIBLE | ||
13 | 14 | ||
14 | config SBUS | 15 | config SBUS |
15 | bool | 16 | bool |
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 39cb6da72dcb..0f06b3722e96 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c | |||
@@ -73,17 +73,11 @@ static unsigned int bsp_phys_id = -1; | |||
73 | /* Bitmask of physically existing CPUs */ | 73 | /* Bitmask of physically existing CPUs */ |
74 | physid_mask_t phys_cpu_present_map; | 74 | physid_mask_t phys_cpu_present_map; |
75 | 75 | ||
76 | /* Bitmask of currently online CPUs */ | ||
77 | cpumask_t cpu_online_map; | ||
78 | EXPORT_SYMBOL(cpu_online_map); | ||
79 | |||
80 | cpumask_t cpu_bootout_map; | 76 | cpumask_t cpu_bootout_map; |
81 | cpumask_t cpu_bootin_map; | 77 | cpumask_t cpu_bootin_map; |
82 | static cpumask_t cpu_callin_map; | 78 | static cpumask_t cpu_callin_map; |
83 | cpumask_t cpu_callout_map; | 79 | cpumask_t cpu_callout_map; |
84 | EXPORT_SYMBOL(cpu_callout_map); | 80 | EXPORT_SYMBOL(cpu_callout_map); |
85 | cpumask_t cpu_possible_map = CPU_MASK_ALL; | ||
86 | EXPORT_SYMBOL(cpu_possible_map); | ||
87 | 81 | ||
88 | /* Per CPU bogomips and other parameters */ | 82 | /* Per CPU bogomips and other parameters */ |
89 | struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned; | 83 | struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned; |
diff --git a/arch/m68knommu/platform/coldfire/pit.c b/arch/m68knommu/platform/coldfire/pit.c index c5b916700b22..2a12e7fa9748 100644 --- a/arch/m68knommu/platform/coldfire/pit.c +++ b/arch/m68knommu/platform/coldfire/pit.c | |||
@@ -156,7 +156,7 @@ void hw_timer_init(void) | |||
156 | { | 156 | { |
157 | u32 imr; | 157 | u32 imr; |
158 | 158 | ||
159 | cf_pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); | 159 | cf_pit_clockevent.cpumask = cpumask_of(smp_processor_id()); |
160 | cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32); | 160 | cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32); |
161 | cf_pit_clockevent.max_delta_ns = | 161 | cf_pit_clockevent.max_delta_ns = |
162 | clockevent_delta2ns(0xFFFF, &cf_pit_clockevent); | 162 | clockevent_delta2ns(0xFFFF, &cf_pit_clockevent); |
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h index a58f0eecc68f..abc62aa744ac 100644 --- a/arch/mips/include/asm/irq.h +++ b/arch/mips/include/asm/irq.h | |||
@@ -49,7 +49,8 @@ static inline void smtc_im_ack_irq(unsigned int irq) | |||
49 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF | 49 | #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF |
50 | #include <linux/cpumask.h> | 50 | #include <linux/cpumask.h> |
51 | 51 | ||
52 | extern void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity); | 52 | extern void plat_set_irq_affinity(unsigned int irq, |
53 | const struct cpumask *affinity); | ||
53 | extern void smtc_forward_irq(unsigned int irq); | 54 | extern void smtc_forward_irq(unsigned int irq); |
54 | 55 | ||
55 | /* | 56 | /* |
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h index 7785bec732f2..1fb959f98982 100644 --- a/arch/mips/include/asm/mach-ip27/topology.h +++ b/arch/mips/include/asm/mach-ip27/topology.h | |||
@@ -37,7 +37,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; | |||
37 | 37 | ||
38 | /* sched_domains SD_NODE_INIT for SGI IP27 machines */ | 38 | /* sched_domains SD_NODE_INIT for SGI IP27 machines */ |
39 | #define SD_NODE_INIT (struct sched_domain) { \ | 39 | #define SD_NODE_INIT (struct sched_domain) { \ |
40 | .span = CPU_MASK_NONE, \ | ||
41 | .parent = NULL, \ | 40 | .parent = NULL, \ |
42 | .child = NULL, \ | 41 | .child = NULL, \ |
43 | .groups = NULL, \ | 42 | .groups = NULL, \ |
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 0ff5b523ea77..86557b5d1b3f 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h | |||
@@ -38,9 +38,6 @@ extern int __cpu_logical_map[NR_CPUS]; | |||
38 | #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ | 38 | #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ |
39 | #define SMP_CALL_FUNCTION 0x2 | 39 | #define SMP_CALL_FUNCTION 0x2 |
40 | 40 | ||
41 | extern cpumask_t phys_cpu_present_map; | ||
42 | #define cpu_possible_map phys_cpu_present_map | ||
43 | |||
44 | extern void asmlinkage smp_bootstrap(void); | 41 | extern void asmlinkage smp_bootstrap(void); |
45 | 42 | ||
46 | /* | 43 | /* |
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c index d7f8a782aae4..03965cb1b252 100644 --- a/arch/mips/jazz/irq.c +++ b/arch/mips/jazz/irq.c | |||
@@ -146,7 +146,7 @@ void __init plat_time_init(void) | |||
146 | 146 | ||
147 | BUG_ON(HZ != 100); | 147 | BUG_ON(HZ != 100); |
148 | 148 | ||
149 | cd->cpumask = cpumask_of_cpu(cpu); | 149 | cd->cpumask = cpumask_of(cpu); |
150 | clockevents_register_device(cd); | 150 | clockevents_register_device(cd); |
151 | action->dev_id = cd; | 151 | action->dev_id = cd; |
152 | setup_irq(JAZZ_TIMER_IRQ, action); | 152 | setup_irq(JAZZ_TIMER_IRQ, action); |
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c index 0a57f86945f1..b820661678b0 100644 --- a/arch/mips/kernel/cevt-bcm1480.c +++ b/arch/mips/kernel/cevt-bcm1480.c | |||
@@ -126,7 +126,7 @@ void __cpuinit sb1480_clockevent_init(void) | |||
126 | cd->min_delta_ns = clockevent_delta2ns(2, cd); | 126 | cd->min_delta_ns = clockevent_delta2ns(2, cd); |
127 | cd->rating = 200; | 127 | cd->rating = 200; |
128 | cd->irq = irq; | 128 | cd->irq = irq; |
129 | cd->cpumask = cpumask_of_cpu(cpu); | 129 | cd->cpumask = cpumask_of(cpu); |
130 | cd->set_next_event = sibyte_next_event; | 130 | cd->set_next_event = sibyte_next_event; |
131 | cd->set_mode = sibyte_set_mode; | 131 | cd->set_mode = sibyte_set_mode; |
132 | clockevents_register_device(cd); | 132 | clockevents_register_device(cd); |
@@ -148,6 +148,6 @@ void __cpuinit sb1480_clockevent_init(void) | |||
148 | action->name = name; | 148 | action->name = name; |
149 | action->dev_id = cd; | 149 | action->dev_id = cd; |
150 | 150 | ||
151 | irq_set_affinity(irq, cpumask_of_cpu(cpu)); | 151 | irq_set_affinity(irq, cpumask_of(cpu)); |
152 | setup_irq(irq, action); | 152 | setup_irq(irq, action); |
153 | } | 153 | } |
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index df4acb68bfb5..1ada45ea0700 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c | |||
@@ -88,7 +88,6 @@ static void ds1287_event_handler(struct clock_event_device *dev) | |||
88 | static struct clock_event_device ds1287_clockevent = { | 88 | static struct clock_event_device ds1287_clockevent = { |
89 | .name = "ds1287", | 89 | .name = "ds1287", |
90 | .features = CLOCK_EVT_FEAT_PERIODIC, | 90 | .features = CLOCK_EVT_FEAT_PERIODIC, |
91 | .cpumask = CPU_MASK_CPU0, | ||
92 | .set_next_event = ds1287_set_next_event, | 91 | .set_next_event = ds1287_set_next_event, |
93 | .set_mode = ds1287_set_mode, | 92 | .set_mode = ds1287_set_mode, |
94 | .event_handler = ds1287_event_handler, | 93 | .event_handler = ds1287_event_handler, |
@@ -122,6 +121,7 @@ int __init ds1287_clockevent_init(int irq) | |||
122 | clockevent_set_clock(cd, 32768); | 121 | clockevent_set_clock(cd, 32768); |
123 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | 122 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); |
124 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | 123 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); |
124 | cd->cpumask = cpumask_of(0); | ||
125 | 125 | ||
126 | clockevents_register_device(&ds1287_clockevent); | 126 | clockevents_register_device(&ds1287_clockevent); |
127 | 127 | ||
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c index 6e2f58520afb..e9b787feedcb 100644 --- a/arch/mips/kernel/cevt-gt641xx.c +++ b/arch/mips/kernel/cevt-gt641xx.c | |||
@@ -96,7 +96,6 @@ static void gt641xx_timer0_event_handler(struct clock_event_device *dev) | |||
96 | static struct clock_event_device gt641xx_timer0_clockevent = { | 96 | static struct clock_event_device gt641xx_timer0_clockevent = { |
97 | .name = "gt641xx-timer0", | 97 | .name = "gt641xx-timer0", |
98 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 98 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
99 | .cpumask = CPU_MASK_CPU0, | ||
100 | .irq = GT641XX_TIMER0_IRQ, | 99 | .irq = GT641XX_TIMER0_IRQ, |
101 | .set_next_event = gt641xx_timer0_set_next_event, | 100 | .set_next_event = gt641xx_timer0_set_next_event, |
102 | .set_mode = gt641xx_timer0_set_mode, | 101 | .set_mode = gt641xx_timer0_set_mode, |
@@ -132,6 +131,7 @@ static int __init gt641xx_timer0_clockevent_init(void) | |||
132 | clockevent_set_clock(cd, gt641xx_base_clock); | 131 | clockevent_set_clock(cd, gt641xx_base_clock); |
133 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | 132 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); |
134 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | 133 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); |
134 | cd->cpumask = cpumask_of(0); | ||
135 | 135 | ||
136 | clockevents_register_device(>641xx_timer0_clockevent); | 136 | clockevents_register_device(>641xx_timer0_clockevent); |
137 | 137 | ||
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 4a4c59f2737a..e1ec83b68031 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -195,7 +195,7 @@ int __cpuinit mips_clockevent_init(void) | |||
195 | 195 | ||
196 | cd->rating = 300; | 196 | cd->rating = 300; |
197 | cd->irq = irq; | 197 | cd->irq = irq; |
198 | cd->cpumask = cpumask_of_cpu(cpu); | 198 | cd->cpumask = cpumask_of(cpu); |
199 | cd->set_next_event = mips_next_event; | 199 | cd->set_next_event = mips_next_event; |
200 | cd->set_mode = mips_set_clock_mode; | 200 | cd->set_mode = mips_set_clock_mode; |
201 | cd->event_handler = mips_event_handler; | 201 | cd->event_handler = mips_event_handler; |
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c index 63ac3ad462bc..a2eebaafda52 100644 --- a/arch/mips/kernel/cevt-sb1250.c +++ b/arch/mips/kernel/cevt-sb1250.c | |||
@@ -125,7 +125,7 @@ void __cpuinit sb1250_clockevent_init(void) | |||
125 | cd->min_delta_ns = clockevent_delta2ns(2, cd); | 125 | cd->min_delta_ns = clockevent_delta2ns(2, cd); |
126 | cd->rating = 200; | 126 | cd->rating = 200; |
127 | cd->irq = irq; | 127 | cd->irq = irq; |
128 | cd->cpumask = cpumask_of_cpu(cpu); | 128 | cd->cpumask = cpumask_of(cpu); |
129 | cd->set_next_event = sibyte_next_event; | 129 | cd->set_next_event = sibyte_next_event; |
130 | cd->set_mode = sibyte_set_mode; | 130 | cd->set_mode = sibyte_set_mode; |
131 | clockevents_register_device(cd); | 131 | clockevents_register_device(cd); |
@@ -147,6 +147,6 @@ void __cpuinit sb1250_clockevent_init(void) | |||
147 | action->name = name; | 147 | action->name = name; |
148 | action->dev_id = cd; | 148 | action->dev_id = cd; |
149 | 149 | ||
150 | irq_set_affinity(irq, cpumask_of_cpu(cpu)); | 150 | irq_set_affinity(irq, cpumask_of(cpu)); |
151 | setup_irq(irq, action); | 151 | setup_irq(irq, action); |
152 | } | 152 | } |
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c index 5162fe4b5952..6d45e24db5bf 100644 --- a/arch/mips/kernel/cevt-smtc.c +++ b/arch/mips/kernel/cevt-smtc.c | |||
@@ -292,7 +292,7 @@ int __cpuinit mips_clockevent_init(void) | |||
292 | 292 | ||
293 | cd->rating = 300; | 293 | cd->rating = 300; |
294 | cd->irq = irq; | 294 | cd->irq = irq; |
295 | cd->cpumask = cpumask_of_cpu(cpu); | 295 | cd->cpumask = cpumask_of(cpu); |
296 | cd->set_next_event = mips_next_event; | 296 | cd->set_next_event = mips_next_event; |
297 | cd->set_mode = mips_set_clock_mode; | 297 | cd->set_mode = mips_set_clock_mode; |
298 | cd->event_handler = mips_event_handler; | 298 | cd->event_handler = mips_event_handler; |
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index b5fc4eb412d2..eccf7d6096bd 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c | |||
@@ -112,7 +112,6 @@ static struct clock_event_device txx9tmr_clock_event_device = { | |||
112 | .name = "TXx9", | 112 | .name = "TXx9", |
113 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 113 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
114 | .rating = 200, | 114 | .rating = 200, |
115 | .cpumask = CPU_MASK_CPU0, | ||
116 | .set_mode = txx9tmr_set_mode, | 115 | .set_mode = txx9tmr_set_mode, |
117 | .set_next_event = txx9tmr_set_next_event, | 116 | .set_next_event = txx9tmr_set_next_event, |
118 | }; | 117 | }; |
@@ -150,6 +149,7 @@ void __init txx9_clockevent_init(unsigned long baseaddr, int irq, | |||
150 | clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); | 149 | clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); |
151 | cd->min_delta_ns = clockevent_delta2ns(0xf, cd); | 150 | cd->min_delta_ns = clockevent_delta2ns(0xf, cd); |
152 | cd->irq = irq; | 151 | cd->irq = irq; |
152 | cd->cpumask = cpumask_of(0), | ||
153 | clockevents_register_device(cd); | 153 | clockevents_register_device(cd); |
154 | setup_irq(irq, &txx9tmr_irq); | 154 | setup_irq(irq, &txx9tmr_irq); |
155 | printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n", | 155 | printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n", |
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index b6ac55162b9a..f4d187825f96 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c | |||
@@ -115,7 +115,7 @@ void __init setup_pit_timer(void) | |||
115 | * Start pit with the boot cpu mask and make it global after the | 115 | * Start pit with the boot cpu mask and make it global after the |
116 | * IO_APIC has been initialized. | 116 | * IO_APIC has been initialized. |
117 | */ | 117 | */ |
118 | cd->cpumask = cpumask_of_cpu(cpu); | 118 | cd->cpumask = cpumask_of(cpu); |
119 | clockevent_set_clock(cd, CLOCK_TICK_RATE); | 119 | clockevent_set_clock(cd, CLOCK_TICK_RATE); |
120 | cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd); | 120 | cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd); |
121 | cd->min_delta_ns = clockevent_delta2ns(0xF, cd); | 121 | cd->min_delta_ns = clockevent_delta2ns(0xF, cd); |
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index f0a4bb19e096..494a49a317e9 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq) | |||
155 | 155 | ||
156 | static DEFINE_SPINLOCK(gic_lock); | 156 | static DEFINE_SPINLOCK(gic_lock); |
157 | 157 | ||
158 | static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) | 158 | static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
159 | { | 159 | { |
160 | cpumask_t tmp = CPU_MASK_NONE; | 160 | cpumask_t tmp = CPU_MASK_NONE; |
161 | unsigned long flags; | 161 | unsigned long flags; |
@@ -164,7 +164,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
164 | pr_debug(KERN_DEBUG "%s called\n", __func__); | 164 | pr_debug(KERN_DEBUG "%s called\n", __func__); |
165 | irq -= _irqbase; | 165 | irq -= _irqbase; |
166 | 166 | ||
167 | cpus_and(tmp, cpumask, cpu_online_map); | 167 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
168 | if (cpus_empty(tmp)) | 168 | if (cpus_empty(tmp)) |
169 | return; | 169 | return; |
170 | 170 | ||
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
187 | set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); | 187 | set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); |
188 | 188 | ||
189 | } | 189 | } |
190 | irq_desc[irq].affinity = cpumask; | 190 | irq_desc[irq].affinity = *cpumask; |
191 | spin_unlock_irqrestore(&gic_lock, flags); | 191 | spin_unlock_irqrestore(&gic_lock, flags); |
192 | 192 | ||
193 | } | 193 | } |
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index ca476c4f62a5..f27beca4b26d 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c | |||
@@ -51,10 +51,10 @@ static int __init allowcpus(char *str) | |||
51 | int len; | 51 | int len; |
52 | 52 | ||
53 | cpus_clear(cpu_allow_map); | 53 | cpus_clear(cpu_allow_map); |
54 | if (cpulist_parse(str, cpu_allow_map) == 0) { | 54 | if (cpulist_parse(str, &cpu_allow_map) == 0) { |
55 | cpu_set(0, cpu_allow_map); | 55 | cpu_set(0, cpu_allow_map); |
56 | cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map); | 56 | cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map); |
57 | len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map); | 57 | len = cpulist_scnprintf(buf, sizeof(buf)-1, &cpu_possible_map); |
58 | buf[len] = '\0'; | 58 | buf[len] = '\0'; |
59 | pr_debug("Allowable CPUs: %s\n", buf); | 59 | pr_debug("Allowable CPUs: %s\n", buf); |
60 | return 1; | 60 | return 1; |
@@ -226,7 +226,7 @@ void __init cmp_smp_setup(void) | |||
226 | 226 | ||
227 | for (i = 1; i < NR_CPUS; i++) { | 227 | for (i = 1; i < NR_CPUS; i++) { |
228 | if (amon_cpu_avail(i)) { | 228 | if (amon_cpu_avail(i)) { |
229 | cpu_set(i, phys_cpu_present_map); | 229 | cpu_set(i, cpu_possible_map); |
230 | __cpu_number_map[i] = ++ncpu; | 230 | __cpu_number_map[i] = ++ncpu; |
231 | __cpu_logical_map[ncpu] = i; | 231 | __cpu_logical_map[ncpu] = i; |
232 | } | 232 | } |
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 87a1816c1f45..6f7ee5ac46ee 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, | |||
70 | write_vpe_c0_vpeconf0(tmp); | 70 | write_vpe_c0_vpeconf0(tmp); |
71 | 71 | ||
72 | /* Record this as available CPU */ | 72 | /* Record this as available CPU */ |
73 | cpu_set(tc, phys_cpu_present_map); | 73 | cpu_set(tc, cpu_possible_map); |
74 | __cpu_number_map[tc] = ++ncpu; | 74 | __cpu_number_map[tc] = ++ncpu; |
75 | __cpu_logical_map[ncpu] = tc; | 75 | __cpu_logical_map[ncpu] = tc; |
76 | } | 76 | } |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 8bf88faf5afd..3da94704f816 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -44,15 +44,10 @@ | |||
44 | #include <asm/mipsmtregs.h> | 44 | #include <asm/mipsmtregs.h> |
45 | #endif /* CONFIG_MIPS_MT_SMTC */ | 45 | #endif /* CONFIG_MIPS_MT_SMTC */ |
46 | 46 | ||
47 | cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ | ||
48 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ | 47 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ |
49 | cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ | ||
50 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ | 48 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ |
51 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ | 49 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ |
52 | 50 | ||
53 | EXPORT_SYMBOL(phys_cpu_present_map); | ||
54 | EXPORT_SYMBOL(cpu_online_map); | ||
55 | |||
56 | extern void cpu_idle(void); | 51 | extern void cpu_idle(void); |
57 | 52 | ||
58 | /* Number of TCs (or siblings in Intel speak) per CPU core */ | 53 | /* Number of TCs (or siblings in Intel speak) per CPU core */ |
@@ -195,7 +190,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
195 | /* preload SMP state for boot cpu */ | 190 | /* preload SMP state for boot cpu */ |
196 | void __devinit smp_prepare_boot_cpu(void) | 191 | void __devinit smp_prepare_boot_cpu(void) |
197 | { | 192 | { |
198 | cpu_set(0, phys_cpu_present_map); | 193 | cpu_set(0, cpu_possible_map); |
199 | cpu_set(0, cpu_online_map); | 194 | cpu_set(0, cpu_online_map); |
200 | cpu_set(0, cpu_callin_map); | 195 | cpu_set(0, cpu_callin_map); |
201 | } | 196 | } |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 897fb2b4751c..b6cca01ff82b 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -290,7 +290,7 @@ static void smtc_configure_tlb(void) | |||
290 | * possibly leave some TCs/VPEs as "slave" processors. | 290 | * possibly leave some TCs/VPEs as "slave" processors. |
291 | * | 291 | * |
292 | * Use c0_MVPConf0 to find out how many TCs are available, setting up | 292 | * Use c0_MVPConf0 to find out how many TCs are available, setting up |
293 | * phys_cpu_present_map and the logical/physical mappings. | 293 | * cpu_possible_map and the logical/physical mappings. |
294 | */ | 294 | */ |
295 | 295 | ||
296 | int __init smtc_build_cpu_map(int start_cpu_slot) | 296 | int __init smtc_build_cpu_map(int start_cpu_slot) |
@@ -304,7 +304,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot) | |||
304 | */ | 304 | */ |
305 | ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | 305 | ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; |
306 | for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { | 306 | for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) { |
307 | cpu_set(i, phys_cpu_present_map); | 307 | cpu_set(i, cpu_possible_map); |
308 | __cpu_number_map[i] = i; | 308 | __cpu_number_map[i] = i; |
309 | __cpu_logical_map[i] = i; | 309 | __cpu_logical_map[i] = i; |
310 | } | 310 | } |
@@ -521,7 +521,7 @@ void smtc_prepare_cpus(int cpus) | |||
521 | * Pull any physically present but unused TCs out of circulation. | 521 | * Pull any physically present but unused TCs out of circulation. |
522 | */ | 522 | */ |
523 | while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { | 523 | while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) { |
524 | cpu_clear(tc, phys_cpu_present_map); | 524 | cpu_clear(tc, cpu_possible_map); |
525 | cpu_clear(tc, cpu_present_map); | 525 | cpu_clear(tc, cpu_present_map); |
526 | tc++; | 526 | tc++; |
527 | } | 527 | } |
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index f84a46a8ae6e..aabd7274507b 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c | |||
@@ -114,9 +114,9 @@ struct plat_smp_ops msmtc_smp_ops = { | |||
114 | */ | 114 | */ |
115 | 115 | ||
116 | 116 | ||
117 | void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) | 117 | void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) |
118 | { | 118 | { |
119 | cpumask_t tmask = affinity; | 119 | cpumask_t tmask = *affinity; |
120 | int cpu = 0; | 120 | int cpu = 0; |
121 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); | 121 | void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); |
122 | 122 | ||
@@ -139,7 +139,7 @@ void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
139 | * be made to forward to an offline "CPU". | 139 | * be made to forward to an offline "CPU". |
140 | */ | 140 | */ |
141 | 141 | ||
142 | for_each_cpu_mask(cpu, affinity) { | 142 | for_each_cpu(cpu, affinity) { |
143 | if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) | 143 | if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) |
144 | cpu_clear(cpu, tmask); | 144 | cpu_clear(cpu, tmask); |
145 | } | 145 | } |
diff --git a/arch/mips/nxp/pnx8550/common/time.c b/arch/mips/nxp/pnx8550/common/time.c index 62f495b57f93..cf293b279098 100644 --- a/arch/mips/nxp/pnx8550/common/time.c +++ b/arch/mips/nxp/pnx8550/common/time.c | |||
@@ -102,6 +102,7 @@ __init void plat_time_init(void) | |||
102 | unsigned int p; | 102 | unsigned int p; |
103 | unsigned int pow2p; | 103 | unsigned int pow2p; |
104 | 104 | ||
105 | pnx8xxx_clockevent.cpumask = cpu_none_mask; | ||
105 | clockevents_register_device(&pnx8xxx_clockevent); | 106 | clockevents_register_device(&pnx8xxx_clockevent); |
106 | clocksource_register(&pnx_clocksource); | 107 | clocksource_register(&pnx_clocksource); |
107 | 108 | ||
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c index 3a7df647ca77..f78c29b68d77 100644 --- a/arch/mips/pmc-sierra/yosemite/smp.c +++ b/arch/mips/pmc-sierra/yosemite/smp.c | |||
@@ -141,7 +141,7 @@ static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle) | |||
141 | } | 141 | } |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * Detect available CPUs, populate phys_cpu_present_map before smp_init | 144 | * Detect available CPUs, populate cpu_possible_map before smp_init |
145 | * | 145 | * |
146 | * We don't want to start the secondary CPU yet nor do we have a nice probing | 146 | * We don't want to start the secondary CPU yet nor do we have a nice probing |
147 | * feature in PMON so we just assume presence of the secondary core. | 147 | * feature in PMON so we just assume presence of the secondary core. |
@@ -150,10 +150,10 @@ static void __init yos_smp_setup(void) | |||
150 | { | 150 | { |
151 | int i; | 151 | int i; |
152 | 152 | ||
153 | cpus_clear(phys_cpu_present_map); | 153 | cpus_clear(cpu_possible_map); |
154 | 154 | ||
155 | for (i = 0; i < 2; i++) { | 155 | for (i = 0; i < 2; i++) { |
156 | cpu_set(i, phys_cpu_present_map); | 156 | cpu_set(i, cpu_possible_map); |
157 | __cpu_number_map[i] = i; | 157 | __cpu_number_map[i] = i; |
158 | __cpu_logical_map[i] = i; | 158 | __cpu_logical_map[i] = i; |
159 | } | 159 | } |
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index ba5cdebeaf0d..5b47d6b65275 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c | |||
@@ -76,7 +76,7 @@ static int do_cpumask(cnodeid_t cnode, nasid_t nasid, int highest) | |||
76 | /* Only let it join in if it's marked enabled */ | 76 | /* Only let it join in if it's marked enabled */ |
77 | if ((acpu->cpu_info.flags & KLINFO_ENABLE) && | 77 | if ((acpu->cpu_info.flags & KLINFO_ENABLE) && |
78 | (tot_cpus_found != NR_CPUS)) { | 78 | (tot_cpus_found != NR_CPUS)) { |
79 | cpu_set(cpuid, phys_cpu_present_map); | 79 | cpu_set(cpuid, cpu_possible_map); |
80 | alloc_cpupda(cpuid, tot_cpus_found); | 80 | alloc_cpupda(cpuid, tot_cpus_found); |
81 | cpus_found++; | 81 | cpus_found++; |
82 | tot_cpus_found++; | 82 | tot_cpus_found++; |
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index 1327c2746fb7..f024057a35f8 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c | |||
@@ -134,7 +134,7 @@ void __cpuinit hub_rt_clock_event_init(void) | |||
134 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | 134 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); |
135 | cd->rating = 200; | 135 | cd->rating = 200; |
136 | cd->irq = irq; | 136 | cd->irq = irq; |
137 | cd->cpumask = cpumask_of_cpu(cpu); | 137 | cd->cpumask = cpumask_of(cpu); |
138 | cd->set_next_event = rt_next_event; | 138 | cd->set_next_event = rt_next_event; |
139 | cd->set_mode = rt_set_mode; | 139 | cd->set_mode = rt_set_mode; |
140 | clockevents_register_device(cd); | 140 | clockevents_register_device(cd); |
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c index a35818ed4263..12b465d404df 100644 --- a/arch/mips/sibyte/bcm1480/irq.c +++ b/arch/mips/sibyte/bcm1480/irq.c | |||
@@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq); | |||
50 | static void disable_bcm1480_irq(unsigned int irq); | 50 | static void disable_bcm1480_irq(unsigned int irq); |
51 | static void ack_bcm1480_irq(unsigned int irq); | 51 | static void ack_bcm1480_irq(unsigned int irq); |
52 | #ifdef CONFIG_SMP | 52 | #ifdef CONFIG_SMP |
53 | static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask); | 53 | static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_PCI | 56 | #ifdef CONFIG_PCI |
@@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq) | |||
109 | } | 109 | } |
110 | 110 | ||
111 | #ifdef CONFIG_SMP | 111 | #ifdef CONFIG_SMP |
112 | static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) | 112 | static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask) |
113 | { | 113 | { |
114 | int i = 0, old_cpu, cpu, int_on, k; | 114 | int i = 0, old_cpu, cpu, int_on, k; |
115 | u64 cur_ints; | 115 | u64 cur_ints; |
@@ -117,11 +117,11 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) | |||
117 | unsigned long flags; | 117 | unsigned long flags; |
118 | unsigned int irq_dirty; | 118 | unsigned int irq_dirty; |
119 | 119 | ||
120 | if (cpus_weight(mask) != 1) { | 120 | if (cpumask_weight(mask) != 1) { |
121 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); | 121 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); |
122 | return; | 122 | return; |
123 | } | 123 | } |
124 | i = first_cpu(mask); | 124 | i = cpumask_first(mask); |
125 | 125 | ||
126 | /* Convert logical CPU to physical CPU */ | 126 | /* Convert logical CPU to physical CPU */ |
127 | cpu = cpu_logical_map(i); | 127 | cpu = cpu_logical_map(i); |
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index bd9eeb43ed0e..dddfda8e8294 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c | |||
@@ -136,7 +136,7 @@ static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) | |||
136 | 136 | ||
137 | /* | 137 | /* |
138 | * Use CFE to find out how many CPUs are available, setting up | 138 | * Use CFE to find out how many CPUs are available, setting up |
139 | * phys_cpu_present_map and the logical/physical mappings. | 139 | * cpu_possible_map and the logical/physical mappings. |
140 | * XXXKW will the boot CPU ever not be physical 0? | 140 | * XXXKW will the boot CPU ever not be physical 0? |
141 | * | 141 | * |
142 | * Common setup before any secondaries are started | 142 | * Common setup before any secondaries are started |
@@ -145,14 +145,14 @@ static void __init bcm1480_smp_setup(void) | |||
145 | { | 145 | { |
146 | int i, num; | 146 | int i, num; |
147 | 147 | ||
148 | cpus_clear(phys_cpu_present_map); | 148 | cpus_clear(cpu_possible_map); |
149 | cpu_set(0, phys_cpu_present_map); | 149 | cpu_set(0, cpu_possible_map); |
150 | __cpu_number_map[0] = 0; | 150 | __cpu_number_map[0] = 0; |
151 | __cpu_logical_map[0] = 0; | 151 | __cpu_logical_map[0] = 0; |
152 | 152 | ||
153 | for (i = 1, num = 0; i < NR_CPUS; i++) { | 153 | for (i = 1, num = 0; i < NR_CPUS; i++) { |
154 | if (cfe_cpu_stop(i) == 0) { | 154 | if (cfe_cpu_stop(i) == 0) { |
155 | cpu_set(i, phys_cpu_present_map); | 155 | cpu_set(i, cpu_possible_map); |
156 | __cpu_number_map[i] = ++num; | 156 | __cpu_number_map[i] = ++num; |
157 | __cpu_logical_map[num] = i; | 157 | __cpu_logical_map[num] = i; |
158 | } | 158 | } |
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c index a5158483986e..808ac2959b8c 100644 --- a/arch/mips/sibyte/sb1250/irq.c +++ b/arch/mips/sibyte/sb1250/irq.c | |||
@@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq); | |||
50 | static void disable_sb1250_irq(unsigned int irq); | 50 | static void disable_sb1250_irq(unsigned int irq); |
51 | static void ack_sb1250_irq(unsigned int irq); | 51 | static void ack_sb1250_irq(unsigned int irq); |
52 | #ifdef CONFIG_SMP | 52 | #ifdef CONFIG_SMP |
53 | static void sb1250_set_affinity(unsigned int irq, cpumask_t mask); | 53 | static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_SIBYTE_HAS_LDT | 56 | #ifdef CONFIG_SIBYTE_HAS_LDT |
@@ -103,16 +103,16 @@ void sb1250_unmask_irq(int cpu, int irq) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | #ifdef CONFIG_SMP | 105 | #ifdef CONFIG_SMP |
106 | static void sb1250_set_affinity(unsigned int irq, cpumask_t mask) | 106 | static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask) |
107 | { | 107 | { |
108 | int i = 0, old_cpu, cpu, int_on; | 108 | int i = 0, old_cpu, cpu, int_on; |
109 | u64 cur_ints; | 109 | u64 cur_ints; |
110 | struct irq_desc *desc = irq_desc + irq; | 110 | struct irq_desc *desc = irq_desc + irq; |
111 | unsigned long flags; | 111 | unsigned long flags; |
112 | 112 | ||
113 | i = first_cpu(mask); | 113 | i = cpumask_first(mask); |
114 | 114 | ||
115 | if (cpus_weight(mask) > 1) { | 115 | if (cpumask_weight(mask) > 1) { |
116 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); | 116 | printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq); |
117 | return; | 117 | return; |
118 | } | 118 | } |
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index 0734b933e969..5950a288a7da 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c | |||
@@ -124,7 +124,7 @@ static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle) | |||
124 | 124 | ||
125 | /* | 125 | /* |
126 | * Use CFE to find out how many CPUs are available, setting up | 126 | * Use CFE to find out how many CPUs are available, setting up |
127 | * phys_cpu_present_map and the logical/physical mappings. | 127 | * cpu_possible_map and the logical/physical mappings. |
128 | * XXXKW will the boot CPU ever not be physical 0? | 128 | * XXXKW will the boot CPU ever not be physical 0? |
129 | * | 129 | * |
130 | * Common setup before any secondaries are started | 130 | * Common setup before any secondaries are started |
@@ -133,14 +133,14 @@ static void __init sb1250_smp_setup(void) | |||
133 | { | 133 | { |
134 | int i, num; | 134 | int i, num; |
135 | 135 | ||
136 | cpus_clear(phys_cpu_present_map); | 136 | cpus_clear(cpu_possible_map); |
137 | cpu_set(0, phys_cpu_present_map); | 137 | cpu_set(0, cpu_possible_map); |
138 | __cpu_number_map[0] = 0; | 138 | __cpu_number_map[0] = 0; |
139 | __cpu_logical_map[0] = 0; | 139 | __cpu_logical_map[0] = 0; |
140 | 140 | ||
141 | for (i = 1, num = 0; i < NR_CPUS; i++) { | 141 | for (i = 1, num = 0; i < NR_CPUS; i++) { |
142 | if (cfe_cpu_stop(i) == 0) { | 142 | if (cfe_cpu_stop(i) == 0) { |
143 | cpu_set(i, phys_cpu_present_map); | 143 | cpu_set(i, cpu_possible_map); |
144 | __cpu_number_map[i] = ++num; | 144 | __cpu_number_map[i] = ++num; |
145 | __cpu_logical_map[num] = i; | 145 | __cpu_logical_map[num] = i; |
146 | } | 146 | } |
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index 796e3ce28720..69f5f88711cc 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c | |||
@@ -80,7 +80,7 @@ static void __init sni_a20r_timer_setup(void) | |||
80 | struct irqaction *action = &a20r_irqaction; | 80 | struct irqaction *action = &a20r_irqaction; |
81 | unsigned int cpu = smp_processor_id(); | 81 | unsigned int cpu = smp_processor_id(); |
82 | 82 | ||
83 | cd->cpumask = cpumask_of_cpu(cpu); | 83 | cd->cpumask = cpumask_of(cpu); |
84 | clockevents_register_device(cd); | 84 | clockevents_register_device(cd); |
85 | action->dev_id = cd; | 85 | action->dev_id = cd; |
86 | setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction); | 86 | setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction); |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 644a70b1b04e..aacf11d33723 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -11,6 +11,7 @@ config PARISC | |||
11 | select HAVE_OPROFILE | 11 | select HAVE_OPROFILE |
12 | select RTC_CLASS | 12 | select RTC_CLASS |
13 | select RTC_DRV_PARISC | 13 | select RTC_DRV_PARISC |
14 | select INIT_ALL_POSSIBLE | ||
14 | help | 15 | help |
15 | The PA-RISC microprocessor is designed by Hewlett-Packard and used | 16 | The PA-RISC microprocessor is designed by Hewlett-Packard and used |
16 | in many of their workstations & servers (HP9000 700 and 800 series, | 17 | in many of their workstations & servers (HP9000 700 and 800 series, |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 23ef950df008..4cea935e2f99 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -131,12 +131,12 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest) | |||
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
133 | 133 | ||
134 | static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) | 134 | static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest) |
135 | { | 135 | { |
136 | if (cpu_check_affinity(irq, &dest)) | 136 | if (cpu_check_affinity(irq, dest)) |
137 | return; | 137 | return; |
138 | 138 | ||
139 | irq_desc[irq].affinity = dest; | 139 | irq_desc[irq].affinity = *dest; |
140 | } | 140 | } |
141 | #endif | 141 | #endif |
142 | 142 | ||
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index d47f3975c9c6..80bc000523fa 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -67,21 +67,6 @@ static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is boo | |||
67 | 67 | ||
68 | static int parisc_max_cpus __read_mostly = 1; | 68 | static int parisc_max_cpus __read_mostly = 1; |
69 | 69 | ||
70 | /* online cpus are ones that we've managed to bring up completely | ||
71 | * possible cpus are all valid cpu | ||
72 | * present cpus are all detected cpu | ||
73 | * | ||
74 | * On startup we bring up the "possible" cpus. Since we discover | ||
75 | * CPUs later, we add them as hotplug, so the possible cpu mask is | ||
76 | * empty in the beginning. | ||
77 | */ | ||
78 | |||
79 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */ | ||
80 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */ | ||
81 | |||
82 | EXPORT_SYMBOL(cpu_online_map); | ||
83 | EXPORT_SYMBOL(cpu_possible_map); | ||
84 | |||
85 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; | 70 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; |
86 | 71 | ||
87 | enum ipi_message_type { | 72 | enum ipi_message_type { |
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h new file mode 100644 index 000000000000..9b198d1b3b2b --- /dev/null +++ b/arch/powerpc/include/asm/disassemble.h | |||
@@ -0,0 +1,80 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __ASM_PPC_DISASSEMBLE_H__ | ||
21 | #define __ASM_PPC_DISASSEMBLE_H__ | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | |||
25 | static inline unsigned int get_op(u32 inst) | ||
26 | { | ||
27 | return inst >> 26; | ||
28 | } | ||
29 | |||
30 | static inline unsigned int get_xop(u32 inst) | ||
31 | { | ||
32 | return (inst >> 1) & 0x3ff; | ||
33 | } | ||
34 | |||
35 | static inline unsigned int get_sprn(u32 inst) | ||
36 | { | ||
37 | return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); | ||
38 | } | ||
39 | |||
40 | static inline unsigned int get_dcrn(u32 inst) | ||
41 | { | ||
42 | return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); | ||
43 | } | ||
44 | |||
45 | static inline unsigned int get_rt(u32 inst) | ||
46 | { | ||
47 | return (inst >> 21) & 0x1f; | ||
48 | } | ||
49 | |||
50 | static inline unsigned int get_rs(u32 inst) | ||
51 | { | ||
52 | return (inst >> 21) & 0x1f; | ||
53 | } | ||
54 | |||
55 | static inline unsigned int get_ra(u32 inst) | ||
56 | { | ||
57 | return (inst >> 16) & 0x1f; | ||
58 | } | ||
59 | |||
60 | static inline unsigned int get_rb(u32 inst) | ||
61 | { | ||
62 | return (inst >> 11) & 0x1f; | ||
63 | } | ||
64 | |||
65 | static inline unsigned int get_rc(u32 inst) | ||
66 | { | ||
67 | return inst & 0x1; | ||
68 | } | ||
69 | |||
70 | static inline unsigned int get_ws(u32 inst) | ||
71 | { | ||
72 | return (inst >> 11) & 0x1f; | ||
73 | } | ||
74 | |||
75 | static inline unsigned int get_d(u32 inst) | ||
76 | { | ||
77 | return inst & 0xffff; | ||
78 | } | ||
79 | |||
80 | #endif /* __ASM_PPC_DISASSEMBLE_H__ */ | ||
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h new file mode 100644 index 000000000000..f49031b632ca --- /dev/null +++ b/arch/powerpc/include/asm/kvm_44x.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __ASM_44X_H__ | ||
21 | #define __ASM_44X_H__ | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | |||
25 | #define PPC44x_TLB_SIZE 64 | ||
26 | |||
27 | /* If the guest is expecting it, this can be as large as we like; we'd just | ||
28 | * need to find some way of advertising it. */ | ||
29 | #define KVM44x_GUEST_TLB_SIZE 64 | ||
30 | |||
31 | struct kvmppc_44x_shadow_ref { | ||
32 | struct page *page; | ||
33 | u16 gtlb_index; | ||
34 | u8 writeable; | ||
35 | u8 tid; | ||
36 | }; | ||
37 | |||
38 | struct kvmppc_vcpu_44x { | ||
39 | /* Unmodified copy of the guest's TLB. */ | ||
40 | struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE]; | ||
41 | |||
42 | /* References to guest pages in the hardware TLB. */ | ||
43 | struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE]; | ||
44 | |||
45 | /* State of the shadow TLB at guest context switch time. */ | ||
46 | struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE]; | ||
47 | u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; | ||
48 | |||
49 | struct kvm_vcpu vcpu; | ||
50 | }; | ||
51 | |||
52 | static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu) | ||
53 | { | ||
54 | return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu); | ||
55 | } | ||
56 | |||
57 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid); | ||
58 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu); | ||
59 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu); | ||
60 | |||
61 | #endif /* __ASM_44X_H__ */ | ||
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 34b52b7180cd..c1e436fe7738 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -64,27 +64,58 @@ struct kvm_vcpu_stat { | |||
64 | u32 halt_wakeup; | 64 | u32 halt_wakeup; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct tlbe { | 67 | struct kvmppc_44x_tlbe { |
68 | u32 tid; /* Only the low 8 bits are used. */ | 68 | u32 tid; /* Only the low 8 bits are used. */ |
69 | u32 word0; | 69 | u32 word0; |
70 | u32 word1; | 70 | u32 word1; |
71 | u32 word2; | 71 | u32 word2; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct kvm_arch { | 74 | enum kvm_exit_types { |
75 | MMIO_EXITS, | ||
76 | DCR_EXITS, | ||
77 | SIGNAL_EXITS, | ||
78 | ITLB_REAL_MISS_EXITS, | ||
79 | ITLB_VIRT_MISS_EXITS, | ||
80 | DTLB_REAL_MISS_EXITS, | ||
81 | DTLB_VIRT_MISS_EXITS, | ||
82 | SYSCALL_EXITS, | ||
83 | ISI_EXITS, | ||
84 | DSI_EXITS, | ||
85 | EMULATED_INST_EXITS, | ||
86 | EMULATED_MTMSRWE_EXITS, | ||
87 | EMULATED_WRTEE_EXITS, | ||
88 | EMULATED_MTSPR_EXITS, | ||
89 | EMULATED_MFSPR_EXITS, | ||
90 | EMULATED_MTMSR_EXITS, | ||
91 | EMULATED_MFMSR_EXITS, | ||
92 | EMULATED_TLBSX_EXITS, | ||
93 | EMULATED_TLBWE_EXITS, | ||
94 | EMULATED_RFI_EXITS, | ||
95 | DEC_EXITS, | ||
96 | EXT_INTR_EXITS, | ||
97 | HALT_WAKEUP, | ||
98 | USR_PR_INST, | ||
99 | FP_UNAVAIL, | ||
100 | DEBUG_EXITS, | ||
101 | TIMEINGUEST, | ||
102 | __NUMBER_OF_KVM_EXIT_TYPES | ||
75 | }; | 103 | }; |
76 | 104 | ||
77 | struct kvm_vcpu_arch { | 105 | /* allow access to big endian 32bit upper/lower parts and 64bit var */ |
78 | /* Unmodified copy of the guest's TLB. */ | 106 | struct kvmppc_exit_timing { |
79 | struct tlbe guest_tlb[PPC44x_TLB_SIZE]; | 107 | union { |
80 | /* TLB that's actually used when the guest is running. */ | 108 | u64 tv64; |
81 | struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; | 109 | struct { |
82 | /* Pages which are referenced in the shadow TLB. */ | 110 | u32 tbu, tbl; |
83 | struct page *shadow_pages[PPC44x_TLB_SIZE]; | 111 | } tv32; |
112 | }; | ||
113 | }; | ||
84 | 114 | ||
85 | /* Track which TLB entries we've modified in the current exit. */ | 115 | struct kvm_arch { |
86 | u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; | 116 | }; |
87 | 117 | ||
118 | struct kvm_vcpu_arch { | ||
88 | u32 host_stack; | 119 | u32 host_stack; |
89 | u32 host_pid; | 120 | u32 host_pid; |
90 | u32 host_dbcr0; | 121 | u32 host_dbcr0; |
@@ -94,32 +125,32 @@ struct kvm_vcpu_arch { | |||
94 | u32 host_msr; | 125 | u32 host_msr; |
95 | 126 | ||
96 | u64 fpr[32]; | 127 | u64 fpr[32]; |
97 | u32 gpr[32]; | 128 | ulong gpr[32]; |
98 | 129 | ||
99 | u32 pc; | 130 | ulong pc; |
100 | u32 cr; | 131 | u32 cr; |
101 | u32 ctr; | 132 | ulong ctr; |
102 | u32 lr; | 133 | ulong lr; |
103 | u32 xer; | 134 | ulong xer; |
104 | 135 | ||
105 | u32 msr; | 136 | ulong msr; |
106 | u32 mmucr; | 137 | u32 mmucr; |
107 | u32 sprg0; | 138 | ulong sprg0; |
108 | u32 sprg1; | 139 | ulong sprg1; |
109 | u32 sprg2; | 140 | ulong sprg2; |
110 | u32 sprg3; | 141 | ulong sprg3; |
111 | u32 sprg4; | 142 | ulong sprg4; |
112 | u32 sprg5; | 143 | ulong sprg5; |
113 | u32 sprg6; | 144 | ulong sprg6; |
114 | u32 sprg7; | 145 | ulong sprg7; |
115 | u32 srr0; | 146 | ulong srr0; |
116 | u32 srr1; | 147 | ulong srr1; |
117 | u32 csrr0; | 148 | ulong csrr0; |
118 | u32 csrr1; | 149 | ulong csrr1; |
119 | u32 dsrr0; | 150 | ulong dsrr0; |
120 | u32 dsrr1; | 151 | ulong dsrr1; |
121 | u32 dear; | 152 | ulong dear; |
122 | u32 esr; | 153 | ulong esr; |
123 | u32 dec; | 154 | u32 dec; |
124 | u32 decar; | 155 | u32 decar; |
125 | u32 tbl; | 156 | u32 tbl; |
@@ -127,7 +158,7 @@ struct kvm_vcpu_arch { | |||
127 | u32 tcr; | 158 | u32 tcr; |
128 | u32 tsr; | 159 | u32 tsr; |
129 | u32 ivor[16]; | 160 | u32 ivor[16]; |
130 | u32 ivpr; | 161 | ulong ivpr; |
131 | u32 pir; | 162 | u32 pir; |
132 | 163 | ||
133 | u32 shadow_pid; | 164 | u32 shadow_pid; |
@@ -140,9 +171,22 @@ struct kvm_vcpu_arch { | |||
140 | u32 dbcr0; | 171 | u32 dbcr0; |
141 | u32 dbcr1; | 172 | u32 dbcr1; |
142 | 173 | ||
174 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
175 | struct kvmppc_exit_timing timing_exit; | ||
176 | struct kvmppc_exit_timing timing_last_enter; | ||
177 | u32 last_exit_type; | ||
178 | u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES]; | ||
179 | u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | ||
180 | u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | ||
181 | u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | ||
182 | u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | ||
183 | u64 timing_last_exit; | ||
184 | struct dentry *debugfs_exit_timing; | ||
185 | #endif | ||
186 | |||
143 | u32 last_inst; | 187 | u32 last_inst; |
144 | u32 fault_dear; | 188 | ulong fault_dear; |
145 | u32 fault_esr; | 189 | ulong fault_esr; |
146 | gpa_t paddr_accessed; | 190 | gpa_t paddr_accessed; |
147 | 191 | ||
148 | u8 io_gpr; /* GPR used as IO source/target */ | 192 | u8 io_gpr; /* GPR used as IO source/target */ |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index bb62ad876de3..36d2a50a8487 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -29,11 +29,6 @@ | |||
29 | #include <linux/kvm_types.h> | 29 | #include <linux/kvm_types.h> |
30 | #include <linux/kvm_host.h> | 30 | #include <linux/kvm_host.h> |
31 | 31 | ||
32 | struct kvm_tlb { | ||
33 | struct tlbe guest_tlb[PPC44x_TLB_SIZE]; | ||
34 | struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; | ||
35 | }; | ||
36 | |||
37 | enum emulation_result { | 32 | enum emulation_result { |
38 | EMULATE_DONE, /* no further processing */ | 33 | EMULATE_DONE, /* no further processing */ |
39 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ | 34 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ |
@@ -41,9 +36,6 @@ enum emulation_result { | |||
41 | EMULATE_FAIL, /* can't emulate this instruction */ | 36 | EMULATE_FAIL, /* can't emulate this instruction */ |
42 | }; | 37 | }; |
43 | 38 | ||
44 | extern const unsigned char exception_priority[]; | ||
45 | extern const unsigned char priority_exception[]; | ||
46 | |||
47 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 39 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
48 | extern char kvmppc_handlers_start[]; | 40 | extern char kvmppc_handlers_start[]; |
49 | extern unsigned long kvmppc_handler_len; | 41 | extern unsigned long kvmppc_handler_len; |
@@ -58,51 +50,44 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
58 | extern int kvmppc_emulate_instruction(struct kvm_run *run, | 50 | extern int kvmppc_emulate_instruction(struct kvm_run *run, |
59 | struct kvm_vcpu *vcpu); | 51 | struct kvm_vcpu *vcpu); |
60 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); | 52 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); |
53 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); | ||
61 | 54 | ||
62 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, | 55 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, |
63 | u64 asid, u32 flags); | 56 | u64 asid, u32 flags, u32 max_bytes, |
64 | extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | 57 | unsigned int gtlb_idx); |
65 | gva_t eend, u32 asid); | ||
66 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); | 58 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); |
67 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); | 59 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); |
68 | 60 | ||
69 | /* XXX Book E specific */ | 61 | /* Core-specific hooks */ |
70 | extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); | 62 | |
71 | 63 | extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, | |
72 | extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); | 64 | unsigned int id); |
73 | 65 | extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu); | |
74 | static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) | 66 | extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu); |
75 | { | 67 | extern int kvmppc_core_check_processor_compat(void); |
76 | unsigned int priority = exception_priority[exception]; | 68 | extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, |
77 | set_bit(priority, &vcpu->arch.pending_exceptions); | 69 | struct kvm_translation *tr); |
78 | } | 70 | |
79 | 71 | extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
80 | static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) | 72 | extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); |
81 | { | 73 | |
82 | unsigned int priority = exception_priority[exception]; | 74 | extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu); |
83 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 75 | extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu); |
84 | } | 76 | |
85 | 77 | extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); | |
86 | /* Helper function for "full" MSR writes. No need to call this if only EE is | 78 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); |
87 | * changing. */ | 79 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); |
88 | static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | 80 | extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); |
89 | { | 81 | extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
90 | if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) | 82 | struct kvm_interrupt *irq); |
91 | kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); | 83 | |
92 | 84 | extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
93 | vcpu->arch.msr = new_msr; | 85 | unsigned int op, int *advance); |
94 | 86 | extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); | |
95 | if (vcpu->arch.msr & MSR_WE) | 87 | extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); |
96 | kvm_vcpu_block(vcpu); | 88 | |
97 | } | 89 | extern int kvmppc_booke_init(void); |
98 | 90 | extern void kvmppc_booke_exit(void); | |
99 | static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) | ||
100 | { | ||
101 | if (vcpu->arch.pid != new_pid) { | ||
102 | vcpu->arch.pid = new_pid; | ||
103 | vcpu->arch.swap_pid = 1; | ||
104 | } | ||
105 | } | ||
106 | 91 | ||
107 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); | 92 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); |
108 | 93 | ||
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h index 8a97cfb08b7e..27cc6fdcd3b7 100644 --- a/arch/powerpc/include/asm/mmu-44x.h +++ b/arch/powerpc/include/asm/mmu-44x.h | |||
@@ -56,6 +56,7 @@ | |||
56 | #ifndef __ASSEMBLY__ | 56 | #ifndef __ASSEMBLY__ |
57 | 57 | ||
58 | extern unsigned int tlb_44x_hwater; | 58 | extern unsigned int tlb_44x_hwater; |
59 | extern unsigned int tlb_44x_index; | ||
59 | 60 | ||
60 | typedef struct { | 61 | typedef struct { |
61 | unsigned int id; | 62 | unsigned int id; |
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index c32da6f97999..373fca394a54 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h | |||
@@ -48,7 +48,6 @@ static inline int pcibus_to_node(struct pci_bus *bus) | |||
48 | 48 | ||
49 | /* sched_domains SD_NODE_INIT for PPC64 machines */ | 49 | /* sched_domains SD_NODE_INIT for PPC64 machines */ |
50 | #define SD_NODE_INIT (struct sched_domain) { \ | 50 | #define SD_NODE_INIT (struct sched_domain) { \ |
51 | .span = CPU_MASK_NONE, \ | ||
52 | .parent = NULL, \ | 51 | .parent = NULL, \ |
53 | .child = NULL, \ | 52 | .child = NULL, \ |
54 | .groups = NULL, \ | 53 | .groups = NULL, \ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 661d07d2146b..9937fe44555f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -23,9 +23,6 @@ | |||
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/suspend.h> | 24 | #include <linux/suspend.h> |
25 | #include <linux/hrtimer.h> | 25 | #include <linux/hrtimer.h> |
26 | #ifdef CONFIG_KVM | ||
27 | #include <linux/kvm_host.h> | ||
28 | #endif | ||
29 | #ifdef CONFIG_PPC64 | 26 | #ifdef CONFIG_PPC64 |
30 | #include <linux/time.h> | 27 | #include <linux/time.h> |
31 | #include <linux/hardirq.h> | 28 | #include <linux/hardirq.h> |
@@ -51,6 +48,9 @@ | |||
51 | #ifdef CONFIG_PPC_ISERIES | 48 | #ifdef CONFIG_PPC_ISERIES |
52 | #include <asm/iseries/alpaca.h> | 49 | #include <asm/iseries/alpaca.h> |
53 | #endif | 50 | #endif |
51 | #ifdef CONFIG_KVM | ||
52 | #include <asm/kvm_44x.h> | ||
53 | #endif | ||
54 | 54 | ||
55 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | 55 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
56 | #include "head_booke.h" | 56 | #include "head_booke.h" |
@@ -357,12 +357,10 @@ int main(void) | |||
357 | DEFINE(PTE_SIZE, sizeof(pte_t)); | 357 | DEFINE(PTE_SIZE, sizeof(pte_t)); |
358 | 358 | ||
359 | #ifdef CONFIG_KVM | 359 | #ifdef CONFIG_KVM |
360 | DEFINE(TLBE_BYTES, sizeof(struct tlbe)); | 360 | DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe)); |
361 | 361 | ||
362 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); | 362 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); |
363 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | 363 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
364 | DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); | ||
365 | DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod)); | ||
366 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | 364 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
367 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); | 365 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); |
368 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | 366 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); |
@@ -385,5 +383,16 @@ int main(void) | |||
385 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); | 383 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); |
386 | #endif | 384 | #endif |
387 | 385 | ||
386 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
387 | DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, | ||
388 | arch.timing_exit.tv32.tbu)); | ||
389 | DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu, | ||
390 | arch.timing_exit.tv32.tbl)); | ||
391 | DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu, | ||
392 | arch.timing_last_enter.tv32.tbu)); | ||
393 | DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu, | ||
394 | arch.timing_last_enter.tv32.tbl)); | ||
395 | #endif | ||
396 | |||
388 | return 0; | 397 | return 0; |
389 | } | 398 | } |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index ac222d0ab12e..23b8b5e36f98 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -237,7 +237,7 @@ void fixup_irqs(cpumask_t map) | |||
237 | mask = map; | 237 | mask = map; |
238 | } | 238 | } |
239 | if (irq_desc[irq].chip->set_affinity) | 239 | if (irq_desc[irq].chip->set_affinity) |
240 | irq_desc[irq].chip->set_affinity(irq, mask); | 240 | irq_desc[irq].chip->set_affinity(irq, &mask); |
241 | else if (irq_desc[irq].action && !(warned++)) | 241 | else if (irq_desc[irq].action && !(warned++)) |
242 | printk("Cannot set affinity for irq %i\n", irq); | 242 | printk("Cannot set affinity for irq %i\n", irq); |
243 | } | 243 | } |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8ac3f721d235..65484b2200b3 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -59,13 +59,9 @@ | |||
59 | 59 | ||
60 | struct thread_info *secondary_ti; | 60 | struct thread_info *secondary_ti; |
61 | 61 | ||
62 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
63 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
64 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; | 62 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
65 | DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; | 63 | DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; |
66 | 64 | ||
67 | EXPORT_SYMBOL(cpu_online_map); | ||
68 | EXPORT_SYMBOL(cpu_possible_map); | ||
69 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 65 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
70 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | 66 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
71 | 67 | ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index e1f3a5140429..99f1ddd68582 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -844,7 +844,7 @@ static void register_decrementer_clockevent(int cpu) | |||
844 | struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; | 844 | struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; |
845 | 845 | ||
846 | *dec = decrementer_clockevent; | 846 | *dec = decrementer_clockevent; |
847 | dec->cpumask = cpumask_of_cpu(cpu); | 847 | dec->cpumask = cpumask_of(cpu); |
848 | 848 | ||
849 | printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", | 849 | printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", |
850 | dec->name, dec->mult, dec->shift, cpu); | 850 | dec->name, dec->mult, dec->shift, cpu); |
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c new file mode 100644 index 000000000000..a66bec57265a --- /dev/null +++ b/arch/powerpc/kvm/44x.c | |||
@@ -0,0 +1,228 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/kvm_host.h> | ||
21 | #include <linux/err.h> | ||
22 | |||
23 | #include <asm/reg.h> | ||
24 | #include <asm/cputable.h> | ||
25 | #include <asm/tlbflush.h> | ||
26 | #include <asm/kvm_44x.h> | ||
27 | #include <asm/kvm_ppc.h> | ||
28 | |||
29 | #include "44x_tlb.h" | ||
30 | |||
31 | /* Note: clearing MSR[DE] just means that the debug interrupt will not be | ||
32 | * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits. | ||
33 | * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt | ||
34 | * will be delivered as an "imprecise debug event" (which is indicated by | ||
35 | * DBSR[IDE]. | ||
36 | */ | ||
37 | static void kvm44x_disable_debug_interrupts(void) | ||
38 | { | ||
39 | mtmsr(mfmsr() & ~MSR_DE); | ||
40 | } | ||
41 | |||
42 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) | ||
43 | { | ||
44 | kvm44x_disable_debug_interrupts(); | ||
45 | |||
46 | mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]); | ||
47 | mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]); | ||
48 | mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]); | ||
49 | mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]); | ||
50 | mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1); | ||
51 | mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2); | ||
52 | mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0); | ||
53 | mtmsr(vcpu->arch.host_msr); | ||
54 | } | ||
55 | |||
56 | void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | ||
57 | { | ||
58 | struct kvm_guest_debug *dbg = &vcpu->guest_debug; | ||
59 | u32 dbcr0 = 0; | ||
60 | |||
61 | vcpu->arch.host_msr = mfmsr(); | ||
62 | kvm44x_disable_debug_interrupts(); | ||
63 | |||
64 | /* Save host debug register state. */ | ||
65 | vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1); | ||
66 | vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2); | ||
67 | vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3); | ||
68 | vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4); | ||
69 | vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0); | ||
70 | vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1); | ||
71 | vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2); | ||
72 | |||
73 | /* set registers up for guest */ | ||
74 | |||
75 | if (dbg->bp[0]) { | ||
76 | mtspr(SPRN_IAC1, dbg->bp[0]); | ||
77 | dbcr0 |= DBCR0_IAC1 | DBCR0_IDM; | ||
78 | } | ||
79 | if (dbg->bp[1]) { | ||
80 | mtspr(SPRN_IAC2, dbg->bp[1]); | ||
81 | dbcr0 |= DBCR0_IAC2 | DBCR0_IDM; | ||
82 | } | ||
83 | if (dbg->bp[2]) { | ||
84 | mtspr(SPRN_IAC3, dbg->bp[2]); | ||
85 | dbcr0 |= DBCR0_IAC3 | DBCR0_IDM; | ||
86 | } | ||
87 | if (dbg->bp[3]) { | ||
88 | mtspr(SPRN_IAC4, dbg->bp[3]); | ||
89 | dbcr0 |= DBCR0_IAC4 | DBCR0_IDM; | ||
90 | } | ||
91 | |||
92 | mtspr(SPRN_DBCR0, dbcr0); | ||
93 | mtspr(SPRN_DBCR1, 0); | ||
94 | mtspr(SPRN_DBCR2, 0); | ||
95 | } | ||
96 | |||
97 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
98 | { | ||
99 | kvmppc_44x_tlb_load(vcpu); | ||
100 | } | ||
101 | |||
102 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | ||
103 | { | ||
104 | kvmppc_44x_tlb_put(vcpu); | ||
105 | } | ||
106 | |||
107 | int kvmppc_core_check_processor_compat(void) | ||
108 | { | ||
109 | int r; | ||
110 | |||
111 | if (strcmp(cur_cpu_spec->platform, "ppc440") == 0) | ||
112 | r = 0; | ||
113 | else | ||
114 | r = -ENOTSUPP; | ||
115 | |||
116 | return r; | ||
117 | } | ||
118 | |||
119 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | ||
120 | { | ||
121 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
122 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0]; | ||
123 | int i; | ||
124 | |||
125 | tlbe->tid = 0; | ||
126 | tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; | ||
127 | tlbe->word1 = 0; | ||
128 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR; | ||
129 | |||
130 | tlbe++; | ||
131 | tlbe->tid = 0; | ||
132 | tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID; | ||
133 | tlbe->word1 = 0xef600000; | ||
134 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR | ||
135 | | PPC44x_TLB_I | PPC44x_TLB_G; | ||
136 | |||
137 | /* Since the guest can directly access the timebase, it must know the | ||
138 | * real timebase frequency. Accordingly, it must see the state of | ||
139 | * CCR1[TCS]. */ | ||
140 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); | ||
141 | |||
142 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) | ||
143 | vcpu_44x->shadow_refs[i].gtlb_index = -1; | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
149 | int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | ||
150 | struct kvm_translation *tr) | ||
151 | { | ||
152 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
153 | struct kvmppc_44x_tlbe *gtlbe; | ||
154 | int index; | ||
155 | gva_t eaddr; | ||
156 | u8 pid; | ||
157 | u8 as; | ||
158 | |||
159 | eaddr = tr->linear_address; | ||
160 | pid = (tr->linear_address >> 32) & 0xff; | ||
161 | as = (tr->linear_address >> 40) & 0x1; | ||
162 | |||
163 | index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as); | ||
164 | if (index == -1) { | ||
165 | tr->valid = 0; | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | gtlbe = &vcpu_44x->guest_tlb[index]; | ||
170 | |||
171 | tr->physical_address = tlb_xlate(gtlbe, eaddr); | ||
172 | /* XXX what does "writeable" and "usermode" even mean? */ | ||
173 | tr->valid = 1; | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
179 | { | ||
180 | struct kvmppc_vcpu_44x *vcpu_44x; | ||
181 | struct kvm_vcpu *vcpu; | ||
182 | int err; | ||
183 | |||
184 | vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
185 | if (!vcpu_44x) { | ||
186 | err = -ENOMEM; | ||
187 | goto out; | ||
188 | } | ||
189 | |||
190 | vcpu = &vcpu_44x->vcpu; | ||
191 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
192 | if (err) | ||
193 | goto free_vcpu; | ||
194 | |||
195 | return vcpu; | ||
196 | |||
197 | free_vcpu: | ||
198 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | ||
199 | out: | ||
200 | return ERR_PTR(err); | ||
201 | } | ||
202 | |||
203 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | ||
204 | { | ||
205 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
206 | |||
207 | kvm_vcpu_uninit(vcpu); | ||
208 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | ||
209 | } | ||
210 | |||
211 | static int kvmppc_44x_init(void) | ||
212 | { | ||
213 | int r; | ||
214 | |||
215 | r = kvmppc_booke_init(); | ||
216 | if (r) | ||
217 | return r; | ||
218 | |||
219 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE); | ||
220 | } | ||
221 | |||
222 | static void kvmppc_44x_exit(void) | ||
223 | { | ||
224 | kvmppc_booke_exit(); | ||
225 | } | ||
226 | |||
227 | module_init(kvmppc_44x_init); | ||
228 | module_exit(kvmppc_44x_exit); | ||
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c new file mode 100644 index 000000000000..82489a743a6f --- /dev/null +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -0,0 +1,371 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <asm/kvm_ppc.h> | ||
21 | #include <asm/dcr.h> | ||
22 | #include <asm/dcr-regs.h> | ||
23 | #include <asm/disassemble.h> | ||
24 | #include <asm/kvm_44x.h> | ||
25 | #include "timing.h" | ||
26 | |||
27 | #include "booke.h" | ||
28 | #include "44x_tlb.h" | ||
29 | |||
30 | #define OP_RFI 19 | ||
31 | |||
32 | #define XOP_RFI 50 | ||
33 | #define XOP_MFMSR 83 | ||
34 | #define XOP_WRTEE 131 | ||
35 | #define XOP_MTMSR 146 | ||
36 | #define XOP_WRTEEI 163 | ||
37 | #define XOP_MFDCR 323 | ||
38 | #define XOP_MTDCR 451 | ||
39 | #define XOP_TLBSX 914 | ||
40 | #define XOP_ICCCI 966 | ||
41 | #define XOP_TLBWE 978 | ||
42 | |||
43 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | ||
44 | { | ||
45 | vcpu->arch.pc = vcpu->arch.srr0; | ||
46 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | ||
47 | } | ||
48 | |||
49 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
50 | unsigned int inst, int *advance) | ||
51 | { | ||
52 | int emulated = EMULATE_DONE; | ||
53 | int dcrn; | ||
54 | int ra; | ||
55 | int rb; | ||
56 | int rc; | ||
57 | int rs; | ||
58 | int rt; | ||
59 | int ws; | ||
60 | |||
61 | switch (get_op(inst)) { | ||
62 | case OP_RFI: | ||
63 | switch (get_xop(inst)) { | ||
64 | case XOP_RFI: | ||
65 | kvmppc_emul_rfi(vcpu); | ||
66 | kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); | ||
67 | *advance = 0; | ||
68 | break; | ||
69 | |||
70 | default: | ||
71 | emulated = EMULATE_FAIL; | ||
72 | break; | ||
73 | } | ||
74 | break; | ||
75 | |||
76 | case 31: | ||
77 | switch (get_xop(inst)) { | ||
78 | |||
79 | case XOP_MFMSR: | ||
80 | rt = get_rt(inst); | ||
81 | vcpu->arch.gpr[rt] = vcpu->arch.msr; | ||
82 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | ||
83 | break; | ||
84 | |||
85 | case XOP_MTMSR: | ||
86 | rs = get_rs(inst); | ||
87 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); | ||
88 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); | ||
89 | break; | ||
90 | |||
91 | case XOP_WRTEE: | ||
92 | rs = get_rs(inst); | ||
93 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | ||
94 | | (vcpu->arch.gpr[rs] & MSR_EE); | ||
95 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | ||
96 | break; | ||
97 | |||
98 | case XOP_WRTEEI: | ||
99 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | ||
100 | | (inst & MSR_EE); | ||
101 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | ||
102 | break; | ||
103 | |||
104 | case XOP_MFDCR: | ||
105 | dcrn = get_dcrn(inst); | ||
106 | rt = get_rt(inst); | ||
107 | |||
108 | /* The guest may access CPR0 registers to determine the timebase | ||
109 | * frequency, and it must know the real host frequency because it | ||
110 | * can directly access the timebase registers. | ||
111 | * | ||
112 | * It would be possible to emulate those accesses in userspace, | ||
113 | * but userspace can really only figure out the end frequency. | ||
114 | * We could decompose that into the factors that compute it, but | ||
115 | * that's tricky math, and it's easier to just report the real | ||
116 | * CPR0 values. | ||
117 | */ | ||
118 | switch (dcrn) { | ||
119 | case DCRN_CPR0_CONFIG_ADDR: | ||
120 | vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr; | ||
121 | break; | ||
122 | case DCRN_CPR0_CONFIG_DATA: | ||
123 | local_irq_disable(); | ||
124 | mtdcr(DCRN_CPR0_CONFIG_ADDR, | ||
125 | vcpu->arch.cpr0_cfgaddr); | ||
126 | vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA); | ||
127 | local_irq_enable(); | ||
128 | break; | ||
129 | default: | ||
130 | run->dcr.dcrn = dcrn; | ||
131 | run->dcr.data = 0; | ||
132 | run->dcr.is_write = 0; | ||
133 | vcpu->arch.io_gpr = rt; | ||
134 | vcpu->arch.dcr_needed = 1; | ||
135 | kvmppc_account_exit(vcpu, DCR_EXITS); | ||
136 | emulated = EMULATE_DO_DCR; | ||
137 | } | ||
138 | |||
139 | break; | ||
140 | |||
141 | case XOP_MTDCR: | ||
142 | dcrn = get_dcrn(inst); | ||
143 | rs = get_rs(inst); | ||
144 | |||
145 | /* emulate some access in kernel */ | ||
146 | switch (dcrn) { | ||
147 | case DCRN_CPR0_CONFIG_ADDR: | ||
148 | vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs]; | ||
149 | break; | ||
150 | default: | ||
151 | run->dcr.dcrn = dcrn; | ||
152 | run->dcr.data = vcpu->arch.gpr[rs]; | ||
153 | run->dcr.is_write = 1; | ||
154 | vcpu->arch.dcr_needed = 1; | ||
155 | kvmppc_account_exit(vcpu, DCR_EXITS); | ||
156 | emulated = EMULATE_DO_DCR; | ||
157 | } | ||
158 | |||
159 | break; | ||
160 | |||
161 | case XOP_TLBWE: | ||
162 | ra = get_ra(inst); | ||
163 | rs = get_rs(inst); | ||
164 | ws = get_ws(inst); | ||
165 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); | ||
166 | break; | ||
167 | |||
168 | case XOP_TLBSX: | ||
169 | rt = get_rt(inst); | ||
170 | ra = get_ra(inst); | ||
171 | rb = get_rb(inst); | ||
172 | rc = get_rc(inst); | ||
173 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); | ||
174 | break; | ||
175 | |||
176 | case XOP_ICCCI: | ||
177 | break; | ||
178 | |||
179 | default: | ||
180 | emulated = EMULATE_FAIL; | ||
181 | } | ||
182 | |||
183 | break; | ||
184 | |||
185 | default: | ||
186 | emulated = EMULATE_FAIL; | ||
187 | } | ||
188 | |||
189 | return emulated; | ||
190 | } | ||
191 | |||
192 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | ||
193 | { | ||
194 | switch (sprn) { | ||
195 | case SPRN_MMUCR: | ||
196 | vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; | ||
197 | case SPRN_PID: | ||
198 | kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; | ||
199 | case SPRN_CCR0: | ||
200 | vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; | ||
201 | case SPRN_CCR1: | ||
202 | vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; | ||
203 | case SPRN_DEAR: | ||
204 | vcpu->arch.dear = vcpu->arch.gpr[rs]; break; | ||
205 | case SPRN_ESR: | ||
206 | vcpu->arch.esr = vcpu->arch.gpr[rs]; break; | ||
207 | case SPRN_DBCR0: | ||
208 | vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; | ||
209 | case SPRN_DBCR1: | ||
210 | vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; | ||
211 | case SPRN_TSR: | ||
212 | vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; | ||
213 | case SPRN_TCR: | ||
214 | vcpu->arch.tcr = vcpu->arch.gpr[rs]; | ||
215 | kvmppc_emulate_dec(vcpu); | ||
216 | break; | ||
217 | |||
218 | /* Note: SPRG4-7 are user-readable. These values are | ||
219 | * loaded into the real SPRGs when resuming the | ||
220 | * guest. */ | ||
221 | case SPRN_SPRG4: | ||
222 | vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; | ||
223 | case SPRN_SPRG5: | ||
224 | vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; | ||
225 | case SPRN_SPRG6: | ||
226 | vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; | ||
227 | case SPRN_SPRG7: | ||
228 | vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; | ||
229 | |||
230 | case SPRN_IVPR: | ||
231 | vcpu->arch.ivpr = vcpu->arch.gpr[rs]; | ||
232 | break; | ||
233 | case SPRN_IVOR0: | ||
234 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs]; | ||
235 | break; | ||
236 | case SPRN_IVOR1: | ||
237 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs]; | ||
238 | break; | ||
239 | case SPRN_IVOR2: | ||
240 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs]; | ||
241 | break; | ||
242 | case SPRN_IVOR3: | ||
243 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs]; | ||
244 | break; | ||
245 | case SPRN_IVOR4: | ||
246 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs]; | ||
247 | break; | ||
248 | case SPRN_IVOR5: | ||
249 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs]; | ||
250 | break; | ||
251 | case SPRN_IVOR6: | ||
252 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs]; | ||
253 | break; | ||
254 | case SPRN_IVOR7: | ||
255 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs]; | ||
256 | break; | ||
257 | case SPRN_IVOR8: | ||
258 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs]; | ||
259 | break; | ||
260 | case SPRN_IVOR9: | ||
261 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs]; | ||
262 | break; | ||
263 | case SPRN_IVOR10: | ||
264 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs]; | ||
265 | break; | ||
266 | case SPRN_IVOR11: | ||
267 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs]; | ||
268 | break; | ||
269 | case SPRN_IVOR12: | ||
270 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs]; | ||
271 | break; | ||
272 | case SPRN_IVOR13: | ||
273 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs]; | ||
274 | break; | ||
275 | case SPRN_IVOR14: | ||
276 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs]; | ||
277 | break; | ||
278 | case SPRN_IVOR15: | ||
279 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs]; | ||
280 | break; | ||
281 | |||
282 | default: | ||
283 | return EMULATE_FAIL; | ||
284 | } | ||
285 | |||
286 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); | ||
287 | return EMULATE_DONE; | ||
288 | } | ||
289 | |||
290 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | ||
291 | { | ||
292 | switch (sprn) { | ||
293 | /* 440 */ | ||
294 | case SPRN_MMUCR: | ||
295 | vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; | ||
296 | case SPRN_CCR0: | ||
297 | vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; | ||
298 | case SPRN_CCR1: | ||
299 | vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; | ||
300 | |||
301 | /* Book E */ | ||
302 | case SPRN_PID: | ||
303 | vcpu->arch.gpr[rt] = vcpu->arch.pid; break; | ||
304 | case SPRN_IVPR: | ||
305 | vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; | ||
306 | case SPRN_DEAR: | ||
307 | vcpu->arch.gpr[rt] = vcpu->arch.dear; break; | ||
308 | case SPRN_ESR: | ||
309 | vcpu->arch.gpr[rt] = vcpu->arch.esr; break; | ||
310 | case SPRN_DBCR0: | ||
311 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; | ||
312 | case SPRN_DBCR1: | ||
313 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; | ||
314 | |||
315 | case SPRN_IVOR0: | ||
316 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; | ||
317 | break; | ||
318 | case SPRN_IVOR1: | ||
319 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; | ||
320 | break; | ||
321 | case SPRN_IVOR2: | ||
322 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; | ||
323 | break; | ||
324 | case SPRN_IVOR3: | ||
325 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; | ||
326 | break; | ||
327 | case SPRN_IVOR4: | ||
328 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; | ||
329 | break; | ||
330 | case SPRN_IVOR5: | ||
331 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; | ||
332 | break; | ||
333 | case SPRN_IVOR6: | ||
334 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; | ||
335 | break; | ||
336 | case SPRN_IVOR7: | ||
337 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; | ||
338 | break; | ||
339 | case SPRN_IVOR8: | ||
340 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; | ||
341 | break; | ||
342 | case SPRN_IVOR9: | ||
343 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; | ||
344 | break; | ||
345 | case SPRN_IVOR10: | ||
346 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; | ||
347 | break; | ||
348 | case SPRN_IVOR11: | ||
349 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; | ||
350 | break; | ||
351 | case SPRN_IVOR12: | ||
352 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; | ||
353 | break; | ||
354 | case SPRN_IVOR13: | ||
355 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | ||
356 | break; | ||
357 | case SPRN_IVOR14: | ||
358 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | ||
359 | break; | ||
360 | case SPRN_IVOR15: | ||
361 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | ||
362 | break; | ||
363 | |||
364 | default: | ||
365 | return EMULATE_FAIL; | ||
366 | } | ||
367 | |||
368 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); | ||
369 | return EMULATE_DONE; | ||
370 | } | ||
371 | |||
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index ad72c6f9811f..9a34b8edb9e2 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -22,20 +22,103 @@ | |||
22 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
25 | |||
26 | #include <asm/tlbflush.h> | ||
25 | #include <asm/mmu-44x.h> | 27 | #include <asm/mmu-44x.h> |
26 | #include <asm/kvm_ppc.h> | 28 | #include <asm/kvm_ppc.h> |
29 | #include <asm/kvm_44x.h> | ||
30 | #include "timing.h" | ||
27 | 31 | ||
28 | #include "44x_tlb.h" | 32 | #include "44x_tlb.h" |
29 | 33 | ||
34 | #ifndef PPC44x_TLBE_SIZE | ||
35 | #define PPC44x_TLBE_SIZE PPC44x_TLB_4K | ||
36 | #endif | ||
37 | |||
38 | #define PAGE_SIZE_4K (1<<12) | ||
39 | #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) | ||
40 | |||
41 | #define PPC44x_TLB_UATTR_MASK \ | ||
42 | (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3) | ||
30 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) | 43 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) |
31 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) | 44 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) |
32 | 45 | ||
33 | static unsigned int kvmppc_tlb_44x_pos; | 46 | #ifdef DEBUG |
47 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | ||
48 | { | ||
49 | struct kvmppc_44x_tlbe *tlbe; | ||
50 | int i; | ||
51 | |||
52 | printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); | ||
53 | printk("| %2s | %3s | %8s | %8s | %8s |\n", | ||
54 | "nr", "tid", "word0", "word1", "word2"); | ||
55 | |||
56 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { | ||
57 | tlbe = &vcpu_44x->guest_tlb[i]; | ||
58 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
59 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", | ||
60 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
61 | tlbe->word2); | ||
62 | } | ||
63 | } | ||
64 | #endif | ||
65 | |||
66 | static inline void kvmppc_44x_tlbie(unsigned int index) | ||
67 | { | ||
68 | /* 0 <= index < 64, so the V bit is clear and we can use the index as | ||
69 | * word0. */ | ||
70 | asm volatile( | ||
71 | "tlbwe %[index], %[index], 0\n" | ||
72 | : | ||
73 | : [index] "r"(index) | ||
74 | ); | ||
75 | } | ||
76 | |||
77 | static inline void kvmppc_44x_tlbre(unsigned int index, | ||
78 | struct kvmppc_44x_tlbe *tlbe) | ||
79 | { | ||
80 | asm volatile( | ||
81 | "tlbre %[word0], %[index], 0\n" | ||
82 | "mfspr %[tid], %[sprn_mmucr]\n" | ||
83 | "andi. %[tid], %[tid], 0xff\n" | ||
84 | "tlbre %[word1], %[index], 1\n" | ||
85 | "tlbre %[word2], %[index], 2\n" | ||
86 | : [word0] "=r"(tlbe->word0), | ||
87 | [word1] "=r"(tlbe->word1), | ||
88 | [word2] "=r"(tlbe->word2), | ||
89 | [tid] "=r"(tlbe->tid) | ||
90 | : [index] "r"(index), | ||
91 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
92 | : "cc" | ||
93 | ); | ||
94 | } | ||
95 | |||
96 | static inline void kvmppc_44x_tlbwe(unsigned int index, | ||
97 | struct kvmppc_44x_tlbe *stlbe) | ||
98 | { | ||
99 | unsigned long tmp; | ||
100 | |||
101 | asm volatile( | ||
102 | "mfspr %[tmp], %[sprn_mmucr]\n" | ||
103 | "rlwimi %[tmp], %[tid], 0, 0xff\n" | ||
104 | "mtspr %[sprn_mmucr], %[tmp]\n" | ||
105 | "tlbwe %[word0], %[index], 0\n" | ||
106 | "tlbwe %[word1], %[index], 1\n" | ||
107 | "tlbwe %[word2], %[index], 2\n" | ||
108 | : [tmp] "=&r"(tmp) | ||
109 | : [word0] "r"(stlbe->word0), | ||
110 | [word1] "r"(stlbe->word1), | ||
111 | [word2] "r"(stlbe->word2), | ||
112 | [tid] "r"(stlbe->tid), | ||
113 | [index] "r"(index), | ||
114 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
115 | ); | ||
116 | } | ||
34 | 117 | ||
35 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | 118 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) |
36 | { | 119 | { |
37 | /* Mask off reserved bits. */ | 120 | /* We only care about the guest's permission and user bits. */ |
38 | attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK; | 121 | attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK; |
39 | 122 | ||
40 | if (!usermode) { | 123 | if (!usermode) { |
41 | /* Guest is in supervisor mode, so we need to translate guest | 124 | /* Guest is in supervisor mode, so we need to translate guest |
@@ -47,18 +130,60 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | |||
47 | /* Make sure host can always access this memory. */ | 130 | /* Make sure host can always access this memory. */ |
48 | attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; | 131 | attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; |
49 | 132 | ||
133 | /* WIMGE = 0b00100 */ | ||
134 | attrib |= PPC44x_TLB_M; | ||
135 | |||
50 | return attrib; | 136 | return attrib; |
51 | } | 137 | } |
52 | 138 | ||
139 | /* Load shadow TLB back into hardware. */ | ||
140 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu) | ||
141 | { | ||
142 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
143 | int i; | ||
144 | |||
145 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
146 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
147 | |||
148 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
149 | kvmppc_44x_tlbwe(i, stlbe); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x, | ||
154 | unsigned int i) | ||
155 | { | ||
156 | vcpu_44x->shadow_tlb_mod[i] = 1; | ||
157 | } | ||
158 | |||
159 | /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */ | ||
160 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu) | ||
161 | { | ||
162 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
163 | int i; | ||
164 | |||
165 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
166 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
167 | |||
168 | if (vcpu_44x->shadow_tlb_mod[i]) | ||
169 | kvmppc_44x_tlbre(i, stlbe); | ||
170 | |||
171 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
172 | kvmppc_44x_tlbie(i); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | |||
53 | /* Search the guest TLB for a matching entry. */ | 177 | /* Search the guest TLB for a matching entry. */ |
54 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | 178 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, |
55 | unsigned int as) | 179 | unsigned int as) |
56 | { | 180 | { |
181 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
57 | int i; | 182 | int i; |
58 | 183 | ||
59 | /* XXX Replace loop with fancy data structures. */ | 184 | /* XXX Replace loop with fancy data structures. */ |
60 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | 185 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { |
61 | struct tlbe *tlbe = &vcpu->arch.guest_tlb[i]; | 186 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; |
62 | unsigned int tid; | 187 | unsigned int tid; |
63 | 188 | ||
64 | if (eaddr < get_tlb_eaddr(tlbe)) | 189 | if (eaddr < get_tlb_eaddr(tlbe)) |
@@ -83,78 +208,89 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | |||
83 | return -1; | 208 | return -1; |
84 | } | 209 | } |
85 | 210 | ||
86 | struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) | 211 | int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
87 | { | 212 | { |
88 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | 213 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); |
89 | unsigned int index; | ||
90 | 214 | ||
91 | index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 215 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
92 | if (index == -1) | ||
93 | return NULL; | ||
94 | return &vcpu->arch.guest_tlb[index]; | ||
95 | } | 216 | } |
96 | 217 | ||
97 | struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) | 218 | int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
98 | { | 219 | { |
99 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | 220 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); |
100 | unsigned int index; | ||
101 | 221 | ||
102 | index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 222 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
103 | if (index == -1) | ||
104 | return NULL; | ||
105 | return &vcpu->arch.guest_tlb[index]; | ||
106 | } | 223 | } |
107 | 224 | ||
108 | static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe) | 225 | static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, |
226 | unsigned int stlb_index) | ||
109 | { | 227 | { |
110 | return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); | 228 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index]; |
111 | } | ||
112 | 229 | ||
113 | static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | 230 | if (!ref->page) |
114 | unsigned int index) | 231 | return; |
115 | { | ||
116 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; | ||
117 | struct page *page = vcpu->arch.shadow_pages[index]; | ||
118 | 232 | ||
119 | if (get_tlb_v(stlbe)) { | 233 | /* Discard from the TLB. */ |
120 | if (kvmppc_44x_tlbe_is_writable(stlbe)) | 234 | /* Note: we could actually invalidate a host mapping, if the host overwrote |
121 | kvm_release_page_dirty(page); | 235 | * this TLB entry since we inserted a guest mapping. */ |
122 | else | 236 | kvmppc_44x_tlbie(stlb_index); |
123 | kvm_release_page_clean(page); | 237 | |
124 | } | 238 | /* Now release the page. */ |
239 | if (ref->writeable) | ||
240 | kvm_release_page_dirty(ref->page); | ||
241 | else | ||
242 | kvm_release_page_clean(ref->page); | ||
243 | |||
244 | ref->page = NULL; | ||
245 | |||
246 | /* XXX set tlb_44x_index to stlb_index? */ | ||
247 | |||
248 | KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler); | ||
125 | } | 249 | } |
126 | 250 | ||
127 | void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) | 251 | void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) |
128 | { | 252 | { |
253 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
129 | int i; | 254 | int i; |
130 | 255 | ||
131 | for (i = 0; i <= tlb_44x_hwater; i++) | 256 | for (i = 0; i <= tlb_44x_hwater; i++) |
132 | kvmppc_44x_shadow_release(vcpu, i); | 257 | kvmppc_44x_shadow_release(vcpu_44x, i); |
133 | } | ||
134 | |||
135 | void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) | ||
136 | { | ||
137 | vcpu->arch.shadow_tlb_mod[i] = 1; | ||
138 | } | 258 | } |
139 | 259 | ||
140 | /* Caller must ensure that the specified guest TLB entry is safe to insert into | 260 | /** |
141 | * the shadow TLB. */ | 261 | * kvmppc_mmu_map -- create a host mapping for guest memory |
142 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | 262 | * |
143 | u32 flags) | 263 | * If the guest wanted a larger page than the host supports, only the first |
264 | * host page is mapped here and the rest are demand faulted. | ||
265 | * | ||
266 | * If the guest wanted a smaller page than the host page size, we map only the | ||
267 | * guest-size page (i.e. not a full host page mapping). | ||
268 | * | ||
269 | * Caller must ensure that the specified guest TLB entry is safe to insert into | ||
270 | * the shadow TLB. | ||
271 | */ | ||
272 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, | ||
273 | u32 flags, u32 max_bytes, unsigned int gtlb_index) | ||
144 | { | 274 | { |
275 | struct kvmppc_44x_tlbe stlbe; | ||
276 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
277 | struct kvmppc_44x_shadow_ref *ref; | ||
145 | struct page *new_page; | 278 | struct page *new_page; |
146 | struct tlbe *stlbe; | ||
147 | hpa_t hpaddr; | 279 | hpa_t hpaddr; |
280 | gfn_t gfn; | ||
148 | unsigned int victim; | 281 | unsigned int victim; |
149 | 282 | ||
150 | /* Future optimization: don't overwrite the TLB entry containing the | 283 | /* Select TLB entry to clobber. Indirectly guard against races with the TLB |
151 | * current PC (or stack?). */ | 284 | * miss handler by disabling interrupts. */ |
152 | victim = kvmppc_tlb_44x_pos++; | 285 | local_irq_disable(); |
153 | if (kvmppc_tlb_44x_pos > tlb_44x_hwater) | 286 | victim = ++tlb_44x_index; |
154 | kvmppc_tlb_44x_pos = 0; | 287 | if (victim > tlb_44x_hwater) |
155 | stlbe = &vcpu->arch.shadow_tlb[victim]; | 288 | victim = 0; |
289 | tlb_44x_index = victim; | ||
290 | local_irq_enable(); | ||
156 | 291 | ||
157 | /* Get reference to new page. */ | 292 | /* Get reference to new page. */ |
293 | gfn = gpaddr >> PAGE_SHIFT; | ||
158 | new_page = gfn_to_page(vcpu->kvm, gfn); | 294 | new_page = gfn_to_page(vcpu->kvm, gfn); |
159 | if (is_error_page(new_page)) { | 295 | if (is_error_page(new_page)) { |
160 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); | 296 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); |
@@ -163,10 +299,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
163 | } | 299 | } |
164 | hpaddr = page_to_phys(new_page); | 300 | hpaddr = page_to_phys(new_page); |
165 | 301 | ||
166 | /* Drop reference to old page. */ | 302 | /* Invalidate any previous shadow mappings. */ |
167 | kvmppc_44x_shadow_release(vcpu, victim); | 303 | kvmppc_44x_shadow_release(vcpu_44x, victim); |
168 | |||
169 | vcpu->arch.shadow_pages[victim] = new_page; | ||
170 | 304 | ||
171 | /* XXX Make sure (va, size) doesn't overlap any other | 305 | /* XXX Make sure (va, size) doesn't overlap any other |
172 | * entries. 440x6 user manual says the result would be | 306 | * entries. 440x6 user manual says the result would be |
@@ -174,78 +308,193 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
174 | 308 | ||
175 | /* XXX what about AS? */ | 309 | /* XXX what about AS? */ |
176 | 310 | ||
177 | stlbe->tid = !(asid & 0xff); | ||
178 | |||
179 | /* Force TS=1 for all guest mappings. */ | 311 | /* Force TS=1 for all guest mappings. */ |
180 | /* For now we hardcode 4KB mappings, but it will be important to | 312 | stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; |
181 | * use host large pages in the future. */ | 313 | |
182 | stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS | 314 | if (max_bytes >= PAGE_SIZE) { |
183 | | PPC44x_TLB_4K; | 315 | /* Guest mapping is larger than or equal to host page size. We can use |
184 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | 316 | * a "native" host mapping. */ |
185 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, | 317 | stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; |
186 | vcpu->arch.msr & MSR_PR); | 318 | } else { |
187 | kvmppc_tlbe_set_modified(vcpu, victim); | 319 | /* Guest mapping is smaller than host page size. We must restrict the |
320 | * size of the mapping to be at most the smaller of the two, but for | ||
321 | * simplicity we fall back to a 4K mapping (this is probably what the | ||
322 | * guest is using anyways). */ | ||
323 | stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; | ||
324 | |||
325 | /* 'hpaddr' is a host page, which is larger than the mapping we're | ||
326 | * inserting here. To compensate, we must add the in-page offset to the | ||
327 | * sub-page. */ | ||
328 | hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); | ||
329 | } | ||
188 | 330 | ||
189 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, | 331 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); |
190 | stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2, | 332 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, |
191 | handler); | 333 | vcpu->arch.msr & MSR_PR); |
334 | stlbe.tid = !(asid & 0xff); | ||
335 | |||
336 | /* Keep track of the reference so we can properly release it later. */ | ||
337 | ref = &vcpu_44x->shadow_refs[victim]; | ||
338 | ref->page = new_page; | ||
339 | ref->gtlb_index = gtlb_index; | ||
340 | ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW); | ||
341 | ref->tid = stlbe.tid; | ||
342 | |||
343 | /* Insert shadow mapping into hardware TLB. */ | ||
344 | kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); | ||
345 | kvmppc_44x_tlbwe(victim, &stlbe); | ||
346 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1, | ||
347 | stlbe.word2, handler); | ||
192 | } | 348 | } |
193 | 349 | ||
194 | void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | 350 | /* For a particular guest TLB entry, invalidate the corresponding host TLB |
195 | gva_t eend, u32 asid) | 351 | * mappings and release the host pages. */ |
352 | static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu, | ||
353 | unsigned int gtlb_index) | ||
196 | { | 354 | { |
197 | unsigned int pid = !(asid & 0xff); | 355 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
198 | int i; | 356 | int i; |
199 | 357 | ||
200 | /* XXX Replace loop with fancy data structures. */ | 358 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { |
201 | for (i = 0; i <= tlb_44x_hwater; i++) { | 359 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; |
202 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; | 360 | if (ref->gtlb_index == gtlb_index) |
203 | unsigned int tid; | 361 | kvmppc_44x_shadow_release(vcpu_44x, i); |
362 | } | ||
363 | } | ||
204 | 364 | ||
205 | if (!get_tlb_v(stlbe)) | 365 | void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) |
206 | continue; | 366 | { |
367 | vcpu->arch.shadow_pid = !usermode; | ||
368 | } | ||
207 | 369 | ||
208 | if (eend < get_tlb_eaddr(stlbe)) | 370 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) |
209 | continue; | 371 | { |
372 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
373 | int i; | ||
210 | 374 | ||
211 | if (eaddr > get_tlb_end(stlbe)) | 375 | if (unlikely(vcpu->arch.pid == new_pid)) |
212 | continue; | 376 | return; |
213 | 377 | ||
214 | tid = get_tlb_tid(stlbe); | 378 | vcpu->arch.pid = new_pid; |
215 | if (tid && (tid != pid)) | ||
216 | continue; | ||
217 | 379 | ||
218 | kvmppc_44x_shadow_release(vcpu, i); | 380 | /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it |
219 | stlbe->word0 = 0; | 381 | * can't access guest kernel mappings (TID=1). When we switch to a new |
220 | kvmppc_tlbe_set_modified(vcpu, i); | 382 | * guest PID, which will also use host PID=0, we must discard the old guest |
221 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | 383 | * userspace mappings. */ |
222 | stlbe->tid, stlbe->word0, stlbe->word1, | 384 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { |
223 | stlbe->word2, handler); | 385 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; |
386 | |||
387 | if (ref->tid == 0) | ||
388 | kvmppc_44x_shadow_release(vcpu_44x, i); | ||
224 | } | 389 | } |
225 | } | 390 | } |
226 | 391 | ||
227 | /* Invalidate all mappings on the privilege switch after PID has been changed. | 392 | static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, |
228 | * The guest always runs with PID=1, so we must clear the entire TLB when | 393 | const struct kvmppc_44x_tlbe *tlbe) |
229 | * switching address spaces. */ | ||
230 | void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) | ||
231 | { | 394 | { |
232 | int i; | 395 | gpa_t gpa; |
233 | 396 | ||
234 | if (vcpu->arch.swap_pid) { | 397 | if (!get_tlb_v(tlbe)) |
235 | /* XXX Replace loop with fancy data structures. */ | 398 | return 0; |
236 | for (i = 0; i <= tlb_44x_hwater; i++) { | 399 | |
237 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; | 400 | /* Does it match current guest AS? */ |
238 | 401 | /* XXX what about IS != DS? */ | |
239 | /* Future optimization: clear only userspace mappings. */ | 402 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) |
240 | kvmppc_44x_shadow_release(vcpu, i); | 403 | return 0; |
241 | stlbe->word0 = 0; | 404 | |
242 | kvmppc_tlbe_set_modified(vcpu, i); | 405 | gpa = get_tlb_raddr(tlbe); |
243 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | 406 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) |
244 | stlbe->tid, stlbe->word0, stlbe->word1, | 407 | /* Mapping is not for RAM. */ |
245 | stlbe->word2, handler); | 408 | return 0; |
246 | } | 409 | |
247 | vcpu->arch.swap_pid = 0; | 410 | return 1; |
411 | } | ||
412 | |||
413 | int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | ||
414 | { | ||
415 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
416 | struct kvmppc_44x_tlbe *tlbe; | ||
417 | unsigned int gtlb_index; | ||
418 | |||
419 | gtlb_index = vcpu->arch.gpr[ra]; | ||
420 | if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { | ||
421 | printk("%s: index %d\n", __func__, gtlb_index); | ||
422 | kvmppc_dump_vcpu(vcpu); | ||
423 | return EMULATE_FAIL; | ||
248 | } | 424 | } |
249 | 425 | ||
250 | vcpu->arch.shadow_pid = !usermode; | 426 | tlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
427 | |||
428 | /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */ | ||
429 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
430 | kvmppc_44x_invalidate(vcpu, gtlb_index); | ||
431 | |||
432 | switch (ws) { | ||
433 | case PPC44x_TLB_PAGEID: | ||
434 | tlbe->tid = get_mmucr_stid(vcpu); | ||
435 | tlbe->word0 = vcpu->arch.gpr[rs]; | ||
436 | break; | ||
437 | |||
438 | case PPC44x_TLB_XLAT: | ||
439 | tlbe->word1 = vcpu->arch.gpr[rs]; | ||
440 | break; | ||
441 | |||
442 | case PPC44x_TLB_ATTRIB: | ||
443 | tlbe->word2 = vcpu->arch.gpr[rs]; | ||
444 | break; | ||
445 | |||
446 | default: | ||
447 | return EMULATE_FAIL; | ||
448 | } | ||
449 | |||
450 | if (tlbe_is_host_safe(vcpu, tlbe)) { | ||
451 | u64 asid; | ||
452 | gva_t eaddr; | ||
453 | gpa_t gpaddr; | ||
454 | u32 flags; | ||
455 | u32 bytes; | ||
456 | |||
457 | eaddr = get_tlb_eaddr(tlbe); | ||
458 | gpaddr = get_tlb_raddr(tlbe); | ||
459 | |||
460 | /* Use the advertised page size to mask effective and real addrs. */ | ||
461 | bytes = get_tlb_bytes(tlbe); | ||
462 | eaddr &= ~(bytes - 1); | ||
463 | gpaddr &= ~(bytes - 1); | ||
464 | |||
465 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | ||
466 | flags = tlbe->word2 & 0xffff; | ||
467 | |||
468 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index); | ||
469 | } | ||
470 | |||
471 | KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, | ||
472 | tlbe->word1, tlbe->word2, handler); | ||
473 | |||
474 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); | ||
475 | return EMULATE_DONE; | ||
476 | } | ||
477 | |||
478 | int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) | ||
479 | { | ||
480 | u32 ea; | ||
481 | int gtlb_index; | ||
482 | unsigned int as = get_mmucr_sts(vcpu); | ||
483 | unsigned int pid = get_mmucr_stid(vcpu); | ||
484 | |||
485 | ea = vcpu->arch.gpr[rb]; | ||
486 | if (ra) | ||
487 | ea += vcpu->arch.gpr[ra]; | ||
488 | |||
489 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | ||
490 | if (rc) { | ||
491 | if (gtlb_index < 0) | ||
492 | vcpu->arch.cr &= ~0x20000000; | ||
493 | else | ||
494 | vcpu->arch.cr |= 0x20000000; | ||
495 | } | ||
496 | vcpu->arch.gpr[rt] = gtlb_index; | ||
497 | |||
498 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); | ||
499 | return EMULATE_DONE; | ||
251 | } | 500 | } |
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h index 2ccd46b6f6b7..772191f29e62 100644 --- a/arch/powerpc/kvm/44x_tlb.h +++ b/arch/powerpc/kvm/44x_tlb.h | |||
@@ -25,48 +25,52 @@ | |||
25 | 25 | ||
26 | extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, | 26 | extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, |
27 | unsigned int pid, unsigned int as); | 27 | unsigned int pid, unsigned int as); |
28 | extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr); | 28 | extern int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
29 | extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr); | 29 | extern int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
30 | |||
31 | extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, | ||
32 | u8 rc); | ||
33 | extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws); | ||
30 | 34 | ||
31 | /* TLB helper functions */ | 35 | /* TLB helper functions */ |
32 | static inline unsigned int get_tlb_size(const struct tlbe *tlbe) | 36 | static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe) |
33 | { | 37 | { |
34 | return (tlbe->word0 >> 4) & 0xf; | 38 | return (tlbe->word0 >> 4) & 0xf; |
35 | } | 39 | } |
36 | 40 | ||
37 | static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) | 41 | static inline gva_t get_tlb_eaddr(const struct kvmppc_44x_tlbe *tlbe) |
38 | { | 42 | { |
39 | return tlbe->word0 & 0xfffffc00; | 43 | return tlbe->word0 & 0xfffffc00; |
40 | } | 44 | } |
41 | 45 | ||
42 | static inline gva_t get_tlb_bytes(const struct tlbe *tlbe) | 46 | static inline gva_t get_tlb_bytes(const struct kvmppc_44x_tlbe *tlbe) |
43 | { | 47 | { |
44 | unsigned int pgsize = get_tlb_size(tlbe); | 48 | unsigned int pgsize = get_tlb_size(tlbe); |
45 | return 1 << 10 << (pgsize << 1); | 49 | return 1 << 10 << (pgsize << 1); |
46 | } | 50 | } |
47 | 51 | ||
48 | static inline gva_t get_tlb_end(const struct tlbe *tlbe) | 52 | static inline gva_t get_tlb_end(const struct kvmppc_44x_tlbe *tlbe) |
49 | { | 53 | { |
50 | return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1; | 54 | return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1; |
51 | } | 55 | } |
52 | 56 | ||
53 | static inline u64 get_tlb_raddr(const struct tlbe *tlbe) | 57 | static inline u64 get_tlb_raddr(const struct kvmppc_44x_tlbe *tlbe) |
54 | { | 58 | { |
55 | u64 word1 = tlbe->word1; | 59 | u64 word1 = tlbe->word1; |
56 | return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00); | 60 | return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00); |
57 | } | 61 | } |
58 | 62 | ||
59 | static inline unsigned int get_tlb_tid(const struct tlbe *tlbe) | 63 | static inline unsigned int get_tlb_tid(const struct kvmppc_44x_tlbe *tlbe) |
60 | { | 64 | { |
61 | return tlbe->tid & 0xff; | 65 | return tlbe->tid & 0xff; |
62 | } | 66 | } |
63 | 67 | ||
64 | static inline unsigned int get_tlb_ts(const struct tlbe *tlbe) | 68 | static inline unsigned int get_tlb_ts(const struct kvmppc_44x_tlbe *tlbe) |
65 | { | 69 | { |
66 | return (tlbe->word0 >> 8) & 0x1; | 70 | return (tlbe->word0 >> 8) & 0x1; |
67 | } | 71 | } |
68 | 72 | ||
69 | static inline unsigned int get_tlb_v(const struct tlbe *tlbe) | 73 | static inline unsigned int get_tlb_v(const struct kvmppc_44x_tlbe *tlbe) |
70 | { | 74 | { |
71 | return (tlbe->word0 >> 9) & 0x1; | 75 | return (tlbe->word0 >> 9) & 0x1; |
72 | } | 76 | } |
@@ -81,7 +85,7 @@ static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu) | |||
81 | return (vcpu->arch.mmucr >> 16) & 0x1; | 85 | return (vcpu->arch.mmucr >> 16) & 0x1; |
82 | } | 86 | } |
83 | 87 | ||
84 | static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr) | 88 | static inline gpa_t tlb_xlate(struct kvmppc_44x_tlbe *tlbe, gva_t eaddr) |
85 | { | 89 | { |
86 | unsigned int pgmask = get_tlb_bytes(tlbe) - 1; | 90 | unsigned int pgmask = get_tlb_bytes(tlbe) - 1; |
87 | 91 | ||
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 53aaa66b25e5..6dbdc4817d80 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -15,27 +15,33 @@ menuconfig VIRTUALIZATION | |||
15 | if VIRTUALIZATION | 15 | if VIRTUALIZATION |
16 | 16 | ||
17 | config KVM | 17 | config KVM |
18 | bool "Kernel-based Virtual Machine (KVM) support" | 18 | bool |
19 | depends on 44x && EXPERIMENTAL | ||
20 | select PREEMPT_NOTIFIERS | 19 | select PREEMPT_NOTIFIERS |
21 | select ANON_INODES | 20 | select ANON_INODES |
22 | # We can only run on Book E hosts so far | 21 | |
23 | select KVM_BOOKE_HOST | 22 | config KVM_440 |
23 | bool "KVM support for PowerPC 440 processors" | ||
24 | depends on EXPERIMENTAL && 44x | ||
25 | select KVM | ||
24 | ---help--- | 26 | ---help--- |
25 | Support hosting virtualized guest machines. You will also | 27 | Support running unmodified 440 guest kernels in virtual machines on |
26 | need to select one or more of the processor modules below. | 28 | 440 host processors. |
27 | 29 | ||
28 | This module provides access to the hardware capabilities through | 30 | This module provides access to the hardware capabilities through |
29 | a character device node named /dev/kvm. | 31 | a character device node named /dev/kvm. |
30 | 32 | ||
31 | If unsure, say N. | 33 | If unsure, say N. |
32 | 34 | ||
33 | config KVM_BOOKE_HOST | 35 | config KVM_EXIT_TIMING |
34 | bool "KVM host support for Book E PowerPC processors" | 36 | bool "Detailed exit timing" |
35 | depends on KVM && 44x | 37 | depends on KVM |
36 | ---help--- | 38 | ---help--- |
37 | Provides host support for KVM on Book E PowerPC processors. Currently | 39 | Calculate elapsed time for every exit/enter cycle. A per-vcpu |
38 | this works on 440 processors only. | 40 | report is available in debugfs kvm/vm#_vcpu#_timing. |
41 | The overhead is relatively small, however it is not recommended for | ||
42 | production environments. | ||
43 | |||
44 | If unsure, say N. | ||
39 | 45 | ||
40 | config KVM_TRACE | 46 | config KVM_TRACE |
41 | bool "KVM trace support" | 47 | bool "KVM trace support" |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 2a5d4397ac4b..df7ba59e6d53 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -8,10 +8,16 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | |||
8 | 8 | ||
9 | common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) | 9 | common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) |
10 | 10 | ||
11 | kvm-objs := $(common-objs-y) powerpc.o emulate.o booke_guest.o | 11 | kvm-objs := $(common-objs-y) powerpc.o emulate.o |
12 | obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o | ||
12 | obj-$(CONFIG_KVM) += kvm.o | 13 | obj-$(CONFIG_KVM) += kvm.o |
13 | 14 | ||
14 | AFLAGS_booke_interrupts.o := -I$(obj) | 15 | AFLAGS_booke_interrupts.o := -I$(obj) |
15 | 16 | ||
16 | kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o | 17 | kvm-440-objs := \ |
17 | obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o | 18 | booke.o \ |
19 | booke_interrupts.o \ | ||
20 | 44x.o \ | ||
21 | 44x_tlb.o \ | ||
22 | 44x_emulate.o | ||
23 | obj-$(CONFIG_KVM_440) += kvm-440.o | ||
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke.c index 7b2591e26bae..35485dd6927e 100644 --- a/arch/powerpc/kvm/booke_guest.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -24,21 +24,26 @@ | |||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | |||
27 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/kvm_ppc.h> | 30 | #include <asm/kvm_ppc.h> |
31 | #include "timing.h" | ||
32 | #include <asm/cacheflush.h> | ||
33 | #include <asm/kvm_44x.h> | ||
30 | 34 | ||
35 | #include "booke.h" | ||
31 | #include "44x_tlb.h" | 36 | #include "44x_tlb.h" |
32 | 37 | ||
38 | unsigned long kvmppc_booke_handlers; | ||
39 | |||
33 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM | 40 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM |
34 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 41 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
35 | 42 | ||
36 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 43 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
37 | { "exits", VCPU_STAT(sum_exits) }, | ||
38 | { "mmio", VCPU_STAT(mmio_exits) }, | 44 | { "mmio", VCPU_STAT(mmio_exits) }, |
39 | { "dcr", VCPU_STAT(dcr_exits) }, | 45 | { "dcr", VCPU_STAT(dcr_exits) }, |
40 | { "sig", VCPU_STAT(signal_exits) }, | 46 | { "sig", VCPU_STAT(signal_exits) }, |
41 | { "light", VCPU_STAT(light_exits) }, | ||
42 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, | 47 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, |
43 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, | 48 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, |
44 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, | 49 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, |
@@ -53,103 +58,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
53 | { NULL } | 58 | { NULL } |
54 | }; | 59 | }; |
55 | 60 | ||
56 | static const u32 interrupt_msr_mask[16] = { | ||
57 | [BOOKE_INTERRUPT_CRITICAL] = MSR_ME, | ||
58 | [BOOKE_INTERRUPT_MACHINE_CHECK] = 0, | ||
59 | [BOOKE_INTERRUPT_DATA_STORAGE] = MSR_CE|MSR_ME|MSR_DE, | ||
60 | [BOOKE_INTERRUPT_INST_STORAGE] = MSR_CE|MSR_ME|MSR_DE, | ||
61 | [BOOKE_INTERRUPT_EXTERNAL] = MSR_CE|MSR_ME|MSR_DE, | ||
62 | [BOOKE_INTERRUPT_ALIGNMENT] = MSR_CE|MSR_ME|MSR_DE, | ||
63 | [BOOKE_INTERRUPT_PROGRAM] = MSR_CE|MSR_ME|MSR_DE, | ||
64 | [BOOKE_INTERRUPT_FP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE, | ||
65 | [BOOKE_INTERRUPT_SYSCALL] = MSR_CE|MSR_ME|MSR_DE, | ||
66 | [BOOKE_INTERRUPT_AP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE, | ||
67 | [BOOKE_INTERRUPT_DECREMENTER] = MSR_CE|MSR_ME|MSR_DE, | ||
68 | [BOOKE_INTERRUPT_FIT] = MSR_CE|MSR_ME|MSR_DE, | ||
69 | [BOOKE_INTERRUPT_WATCHDOG] = MSR_ME, | ||
70 | [BOOKE_INTERRUPT_DTLB_MISS] = MSR_CE|MSR_ME|MSR_DE, | ||
71 | [BOOKE_INTERRUPT_ITLB_MISS] = MSR_CE|MSR_ME|MSR_DE, | ||
72 | [BOOKE_INTERRUPT_DEBUG] = MSR_ME, | ||
73 | }; | ||
74 | |||
75 | const unsigned char exception_priority[] = { | ||
76 | [BOOKE_INTERRUPT_DATA_STORAGE] = 0, | ||
77 | [BOOKE_INTERRUPT_INST_STORAGE] = 1, | ||
78 | [BOOKE_INTERRUPT_ALIGNMENT] = 2, | ||
79 | [BOOKE_INTERRUPT_PROGRAM] = 3, | ||
80 | [BOOKE_INTERRUPT_FP_UNAVAIL] = 4, | ||
81 | [BOOKE_INTERRUPT_SYSCALL] = 5, | ||
82 | [BOOKE_INTERRUPT_AP_UNAVAIL] = 6, | ||
83 | [BOOKE_INTERRUPT_DTLB_MISS] = 7, | ||
84 | [BOOKE_INTERRUPT_ITLB_MISS] = 8, | ||
85 | [BOOKE_INTERRUPT_MACHINE_CHECK] = 9, | ||
86 | [BOOKE_INTERRUPT_DEBUG] = 10, | ||
87 | [BOOKE_INTERRUPT_CRITICAL] = 11, | ||
88 | [BOOKE_INTERRUPT_WATCHDOG] = 12, | ||
89 | [BOOKE_INTERRUPT_EXTERNAL] = 13, | ||
90 | [BOOKE_INTERRUPT_FIT] = 14, | ||
91 | [BOOKE_INTERRUPT_DECREMENTER] = 15, | ||
92 | }; | ||
93 | |||
94 | const unsigned char priority_exception[] = { | ||
95 | BOOKE_INTERRUPT_DATA_STORAGE, | ||
96 | BOOKE_INTERRUPT_INST_STORAGE, | ||
97 | BOOKE_INTERRUPT_ALIGNMENT, | ||
98 | BOOKE_INTERRUPT_PROGRAM, | ||
99 | BOOKE_INTERRUPT_FP_UNAVAIL, | ||
100 | BOOKE_INTERRUPT_SYSCALL, | ||
101 | BOOKE_INTERRUPT_AP_UNAVAIL, | ||
102 | BOOKE_INTERRUPT_DTLB_MISS, | ||
103 | BOOKE_INTERRUPT_ITLB_MISS, | ||
104 | BOOKE_INTERRUPT_MACHINE_CHECK, | ||
105 | BOOKE_INTERRUPT_DEBUG, | ||
106 | BOOKE_INTERRUPT_CRITICAL, | ||
107 | BOOKE_INTERRUPT_WATCHDOG, | ||
108 | BOOKE_INTERRUPT_EXTERNAL, | ||
109 | BOOKE_INTERRUPT_FIT, | ||
110 | BOOKE_INTERRUPT_DECREMENTER, | ||
111 | }; | ||
112 | |||
113 | |||
114 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | ||
115 | { | ||
116 | struct tlbe *tlbe; | ||
117 | int i; | ||
118 | |||
119 | printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); | ||
120 | printk("| %2s | %3s | %8s | %8s | %8s |\n", | ||
121 | "nr", "tid", "word0", "word1", "word2"); | ||
122 | |||
123 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | ||
124 | tlbe = &vcpu->arch.guest_tlb[i]; | ||
125 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
126 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", | ||
127 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
128 | tlbe->word2); | ||
129 | } | ||
130 | |||
131 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | ||
132 | tlbe = &vcpu->arch.shadow_tlb[i]; | ||
133 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
134 | printk(" S%2d | %02X | %08X | %08X | %08X |\n", | ||
135 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
136 | tlbe->word2); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | /* TODO: use vcpu_printf() */ | 61 | /* TODO: use vcpu_printf() */ |
141 | void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | 62 | void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) |
142 | { | 63 | { |
143 | int i; | 64 | int i; |
144 | 65 | ||
145 | printk("pc: %08x msr: %08x\n", vcpu->arch.pc, vcpu->arch.msr); | 66 | printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); |
146 | printk("lr: %08x ctr: %08x\n", vcpu->arch.lr, vcpu->arch.ctr); | 67 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); |
147 | printk("srr0: %08x srr1: %08x\n", vcpu->arch.srr0, vcpu->arch.srr1); | 68 | printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); |
148 | 69 | ||
149 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); | 70 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); |
150 | 71 | ||
151 | for (i = 0; i < 32; i += 4) { | 72 | for (i = 0; i < 32; i += 4) { |
152 | printk("gpr%02d: %08x %08x %08x %08x\n", i, | 73 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, |
153 | vcpu->arch.gpr[i], | 74 | vcpu->arch.gpr[i], |
154 | vcpu->arch.gpr[i+1], | 75 | vcpu->arch.gpr[i+1], |
155 | vcpu->arch.gpr[i+2], | 76 | vcpu->arch.gpr[i+2], |
@@ -157,69 +78,96 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |||
157 | } | 78 | } |
158 | } | 79 | } |
159 | 80 | ||
160 | /* Check if we are ready to deliver the interrupt */ | 81 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, |
161 | static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt) | 82 | unsigned int priority) |
162 | { | 83 | { |
163 | int r; | 84 | set_bit(priority, &vcpu->arch.pending_exceptions); |
85 | } | ||
164 | 86 | ||
165 | switch (interrupt) { | 87 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) |
166 | case BOOKE_INTERRUPT_CRITICAL: | 88 | { |
167 | r = vcpu->arch.msr & MSR_CE; | 89 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
168 | break; | 90 | } |
169 | case BOOKE_INTERRUPT_MACHINE_CHECK: | 91 | |
170 | r = vcpu->arch.msr & MSR_ME; | 92 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) |
171 | break; | 93 | { |
172 | case BOOKE_INTERRUPT_EXTERNAL: | 94 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); |
173 | r = vcpu->arch.msr & MSR_EE; | 95 | } |
96 | |||
97 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | ||
98 | { | ||
99 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | ||
100 | } | ||
101 | |||
102 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | ||
103 | struct kvm_interrupt *irq) | ||
104 | { | ||
105 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL); | ||
106 | } | ||
107 | |||
108 | /* Deliver the interrupt of the corresponding priority, if possible. */ | ||
109 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | ||
110 | unsigned int priority) | ||
111 | { | ||
112 | int allowed = 0; | ||
113 | ulong msr_mask; | ||
114 | |||
115 | switch (priority) { | ||
116 | case BOOKE_IRQPRIO_PROGRAM: | ||
117 | case BOOKE_IRQPRIO_DTLB_MISS: | ||
118 | case BOOKE_IRQPRIO_ITLB_MISS: | ||
119 | case BOOKE_IRQPRIO_SYSCALL: | ||
120 | case BOOKE_IRQPRIO_DATA_STORAGE: | ||
121 | case BOOKE_IRQPRIO_INST_STORAGE: | ||
122 | case BOOKE_IRQPRIO_FP_UNAVAIL: | ||
123 | case BOOKE_IRQPRIO_AP_UNAVAIL: | ||
124 | case BOOKE_IRQPRIO_ALIGNMENT: | ||
125 | allowed = 1; | ||
126 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | ||
174 | break; | 127 | break; |
175 | case BOOKE_INTERRUPT_DECREMENTER: | 128 | case BOOKE_IRQPRIO_CRITICAL: |
176 | r = vcpu->arch.msr & MSR_EE; | 129 | case BOOKE_IRQPRIO_WATCHDOG: |
130 | allowed = vcpu->arch.msr & MSR_CE; | ||
131 | msr_mask = MSR_ME; | ||
177 | break; | 132 | break; |
178 | case BOOKE_INTERRUPT_FIT: | 133 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
179 | r = vcpu->arch.msr & MSR_EE; | 134 | allowed = vcpu->arch.msr & MSR_ME; |
135 | msr_mask = 0; | ||
180 | break; | 136 | break; |
181 | case BOOKE_INTERRUPT_WATCHDOG: | 137 | case BOOKE_IRQPRIO_EXTERNAL: |
182 | r = vcpu->arch.msr & MSR_CE; | 138 | case BOOKE_IRQPRIO_DECREMENTER: |
139 | case BOOKE_IRQPRIO_FIT: | ||
140 | allowed = vcpu->arch.msr & MSR_EE; | ||
141 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | ||
183 | break; | 142 | break; |
184 | case BOOKE_INTERRUPT_DEBUG: | 143 | case BOOKE_IRQPRIO_DEBUG: |
185 | r = vcpu->arch.msr & MSR_DE; | 144 | allowed = vcpu->arch.msr & MSR_DE; |
145 | msr_mask = MSR_ME; | ||
186 | break; | 146 | break; |
187 | default: | ||
188 | r = 1; | ||
189 | } | 147 | } |
190 | 148 | ||
191 | return r; | 149 | if (allowed) { |
192 | } | 150 | vcpu->arch.srr0 = vcpu->arch.pc; |
151 | vcpu->arch.srr1 = vcpu->arch.msr; | ||
152 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | ||
153 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); | ||
193 | 154 | ||
194 | static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt) | 155 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
195 | { | ||
196 | switch (interrupt) { | ||
197 | case BOOKE_INTERRUPT_DECREMENTER: | ||
198 | vcpu->arch.tsr |= TSR_DIS; | ||
199 | break; | ||
200 | } | 156 | } |
201 | 157 | ||
202 | vcpu->arch.srr0 = vcpu->arch.pc; | 158 | return allowed; |
203 | vcpu->arch.srr1 = vcpu->arch.msr; | ||
204 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[interrupt]; | ||
205 | kvmppc_set_msr(vcpu, vcpu->arch.msr & interrupt_msr_mask[interrupt]); | ||
206 | } | 159 | } |
207 | 160 | ||
208 | /* Check pending exceptions and deliver one, if possible. */ | 161 | /* Check pending exceptions and deliver one, if possible. */ |
209 | void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu) | 162 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) |
210 | { | 163 | { |
211 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 164 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
212 | unsigned int exception; | ||
213 | unsigned int priority; | 165 | unsigned int priority; |
214 | 166 | ||
215 | priority = find_first_bit(pending, BITS_PER_BYTE * sizeof(*pending)); | 167 | priority = __ffs(*pending); |
216 | while (priority <= BOOKE_MAX_INTERRUPT) { | 168 | while (priority <= BOOKE_MAX_INTERRUPT) { |
217 | exception = priority_exception[priority]; | 169 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) |
218 | if (kvmppc_can_deliver_interrupt(vcpu, exception)) { | ||
219 | kvmppc_clear_exception(vcpu, exception); | ||
220 | kvmppc_deliver_interrupt(vcpu, exception); | ||
221 | break; | 170 | break; |
222 | } | ||
223 | 171 | ||
224 | priority = find_next_bit(pending, | 172 | priority = find_next_bit(pending, |
225 | BITS_PER_BYTE * sizeof(*pending), | 173 | BITS_PER_BYTE * sizeof(*pending), |
@@ -238,6 +186,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
238 | enum emulation_result er; | 186 | enum emulation_result er; |
239 | int r = RESUME_HOST; | 187 | int r = RESUME_HOST; |
240 | 188 | ||
189 | /* update before a new last_exit_type is rewritten */ | ||
190 | kvmppc_update_timing_stats(vcpu); | ||
191 | |||
241 | local_irq_enable(); | 192 | local_irq_enable(); |
242 | 193 | ||
243 | run->exit_reason = KVM_EXIT_UNKNOWN; | 194 | run->exit_reason = KVM_EXIT_UNKNOWN; |
@@ -251,21 +202,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
251 | break; | 202 | break; |
252 | 203 | ||
253 | case BOOKE_INTERRUPT_EXTERNAL: | 204 | case BOOKE_INTERRUPT_EXTERNAL: |
205 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); | ||
206 | if (need_resched()) | ||
207 | cond_resched(); | ||
208 | r = RESUME_GUEST; | ||
209 | break; | ||
210 | |||
254 | case BOOKE_INTERRUPT_DECREMENTER: | 211 | case BOOKE_INTERRUPT_DECREMENTER: |
255 | /* Since we switched IVPR back to the host's value, the host | 212 | /* Since we switched IVPR back to the host's value, the host |
256 | * handled this interrupt the moment we enabled interrupts. | 213 | * handled this interrupt the moment we enabled interrupts. |
257 | * Now we just offer it a chance to reschedule the guest. */ | 214 | * Now we just offer it a chance to reschedule the guest. */ |
258 | 215 | kvmppc_account_exit(vcpu, DEC_EXITS); | |
259 | /* XXX At this point the TLB still holds our shadow TLB, so if | ||
260 | * we do reschedule the host will fault over it. Perhaps we | ||
261 | * should politely restore the host's entries to minimize | ||
262 | * misses before ceding control. */ | ||
263 | if (need_resched()) | 216 | if (need_resched()) |
264 | cond_resched(); | 217 | cond_resched(); |
265 | if (exit_nr == BOOKE_INTERRUPT_DECREMENTER) | ||
266 | vcpu->stat.dec_exits++; | ||
267 | else | ||
268 | vcpu->stat.ext_intr_exits++; | ||
269 | r = RESUME_GUEST; | 218 | r = RESUME_GUEST; |
270 | break; | 219 | break; |
271 | 220 | ||
@@ -274,17 +223,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
274 | /* Program traps generated by user-level software must be handled | 223 | /* Program traps generated by user-level software must be handled |
275 | * by the guest kernel. */ | 224 | * by the guest kernel. */ |
276 | vcpu->arch.esr = vcpu->arch.fault_esr; | 225 | vcpu->arch.esr = vcpu->arch.fault_esr; |
277 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM); | 226 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
278 | r = RESUME_GUEST; | 227 | r = RESUME_GUEST; |
228 | kvmppc_account_exit(vcpu, USR_PR_INST); | ||
279 | break; | 229 | break; |
280 | } | 230 | } |
281 | 231 | ||
282 | er = kvmppc_emulate_instruction(run, vcpu); | 232 | er = kvmppc_emulate_instruction(run, vcpu); |
283 | switch (er) { | 233 | switch (er) { |
284 | case EMULATE_DONE: | 234 | case EMULATE_DONE: |
235 | /* don't overwrite subtypes, just account kvm_stats */ | ||
236 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | ||
285 | /* Future optimization: only reload non-volatiles if | 237 | /* Future optimization: only reload non-volatiles if |
286 | * they were actually modified by emulation. */ | 238 | * they were actually modified by emulation. */ |
287 | vcpu->stat.emulated_inst_exits++; | ||
288 | r = RESUME_GUEST_NV; | 239 | r = RESUME_GUEST_NV; |
289 | break; | 240 | break; |
290 | case EMULATE_DO_DCR: | 241 | case EMULATE_DO_DCR: |
@@ -293,7 +244,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
293 | break; | 244 | break; |
294 | case EMULATE_FAIL: | 245 | case EMULATE_FAIL: |
295 | /* XXX Deliver Program interrupt to guest. */ | 246 | /* XXX Deliver Program interrupt to guest. */ |
296 | printk(KERN_CRIT "%s: emulation at %x failed (%08x)\n", | 247 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
297 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | 248 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); |
298 | /* For debugging, encode the failing instruction and | 249 | /* For debugging, encode the failing instruction and |
299 | * report it to userspace. */ | 250 | * report it to userspace. */ |
@@ -307,48 +258,53 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
307 | break; | 258 | break; |
308 | 259 | ||
309 | case BOOKE_INTERRUPT_FP_UNAVAIL: | 260 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
310 | kvmppc_queue_exception(vcpu, exit_nr); | 261 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); |
262 | kvmppc_account_exit(vcpu, FP_UNAVAIL); | ||
311 | r = RESUME_GUEST; | 263 | r = RESUME_GUEST; |
312 | break; | 264 | break; |
313 | 265 | ||
314 | case BOOKE_INTERRUPT_DATA_STORAGE: | 266 | case BOOKE_INTERRUPT_DATA_STORAGE: |
315 | vcpu->arch.dear = vcpu->arch.fault_dear; | 267 | vcpu->arch.dear = vcpu->arch.fault_dear; |
316 | vcpu->arch.esr = vcpu->arch.fault_esr; | 268 | vcpu->arch.esr = vcpu->arch.fault_esr; |
317 | kvmppc_queue_exception(vcpu, exit_nr); | 269 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); |
318 | vcpu->stat.dsi_exits++; | 270 | kvmppc_account_exit(vcpu, DSI_EXITS); |
319 | r = RESUME_GUEST; | 271 | r = RESUME_GUEST; |
320 | break; | 272 | break; |
321 | 273 | ||
322 | case BOOKE_INTERRUPT_INST_STORAGE: | 274 | case BOOKE_INTERRUPT_INST_STORAGE: |
323 | vcpu->arch.esr = vcpu->arch.fault_esr; | 275 | vcpu->arch.esr = vcpu->arch.fault_esr; |
324 | kvmppc_queue_exception(vcpu, exit_nr); | 276 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); |
325 | vcpu->stat.isi_exits++; | 277 | kvmppc_account_exit(vcpu, ISI_EXITS); |
326 | r = RESUME_GUEST; | 278 | r = RESUME_GUEST; |
327 | break; | 279 | break; |
328 | 280 | ||
329 | case BOOKE_INTERRUPT_SYSCALL: | 281 | case BOOKE_INTERRUPT_SYSCALL: |
330 | kvmppc_queue_exception(vcpu, exit_nr); | 282 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); |
331 | vcpu->stat.syscall_exits++; | 283 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
332 | r = RESUME_GUEST; | 284 | r = RESUME_GUEST; |
333 | break; | 285 | break; |
334 | 286 | ||
287 | /* XXX move to a 440-specific file. */ | ||
335 | case BOOKE_INTERRUPT_DTLB_MISS: { | 288 | case BOOKE_INTERRUPT_DTLB_MISS: { |
336 | struct tlbe *gtlbe; | 289 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
290 | struct kvmppc_44x_tlbe *gtlbe; | ||
337 | unsigned long eaddr = vcpu->arch.fault_dear; | 291 | unsigned long eaddr = vcpu->arch.fault_dear; |
292 | int gtlb_index; | ||
338 | gfn_t gfn; | 293 | gfn_t gfn; |
339 | 294 | ||
340 | /* Check the guest TLB. */ | 295 | /* Check the guest TLB. */ |
341 | gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr); | 296 | gtlb_index = kvmppc_44x_dtlb_index(vcpu, eaddr); |
342 | if (!gtlbe) { | 297 | if (gtlb_index < 0) { |
343 | /* The guest didn't have a mapping for it. */ | 298 | /* The guest didn't have a mapping for it. */ |
344 | kvmppc_queue_exception(vcpu, exit_nr); | 299 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); |
345 | vcpu->arch.dear = vcpu->arch.fault_dear; | 300 | vcpu->arch.dear = vcpu->arch.fault_dear; |
346 | vcpu->arch.esr = vcpu->arch.fault_esr; | 301 | vcpu->arch.esr = vcpu->arch.fault_esr; |
347 | vcpu->stat.dtlb_real_miss_exits++; | 302 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
348 | r = RESUME_GUEST; | 303 | r = RESUME_GUEST; |
349 | break; | 304 | break; |
350 | } | 305 | } |
351 | 306 | ||
307 | gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | ||
352 | vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); | 308 | vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); |
353 | gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT; | 309 | gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT; |
354 | 310 | ||
@@ -359,38 +315,45 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
359 | * b) the guest used a large mapping which we're faking | 315 | * b) the guest used a large mapping which we're faking |
360 | * Either way, we need to satisfy the fault without | 316 | * Either way, we need to satisfy the fault without |
361 | * invoking the guest. */ | 317 | * invoking the guest. */ |
362 | kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid, | 318 | kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, |
363 | gtlbe->word2); | 319 | gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); |
364 | vcpu->stat.dtlb_virt_miss_exits++; | 320 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); |
365 | r = RESUME_GUEST; | 321 | r = RESUME_GUEST; |
366 | } else { | 322 | } else { |
367 | /* Guest has mapped and accessed a page which is not | 323 | /* Guest has mapped and accessed a page which is not |
368 | * actually RAM. */ | 324 | * actually RAM. */ |
369 | r = kvmppc_emulate_mmio(run, vcpu); | 325 | r = kvmppc_emulate_mmio(run, vcpu); |
326 | kvmppc_account_exit(vcpu, MMIO_EXITS); | ||
370 | } | 327 | } |
371 | 328 | ||
372 | break; | 329 | break; |
373 | } | 330 | } |
374 | 331 | ||
332 | /* XXX move to a 440-specific file. */ | ||
375 | case BOOKE_INTERRUPT_ITLB_MISS: { | 333 | case BOOKE_INTERRUPT_ITLB_MISS: { |
376 | struct tlbe *gtlbe; | 334 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
335 | struct kvmppc_44x_tlbe *gtlbe; | ||
377 | unsigned long eaddr = vcpu->arch.pc; | 336 | unsigned long eaddr = vcpu->arch.pc; |
337 | gpa_t gpaddr; | ||
378 | gfn_t gfn; | 338 | gfn_t gfn; |
339 | int gtlb_index; | ||
379 | 340 | ||
380 | r = RESUME_GUEST; | 341 | r = RESUME_GUEST; |
381 | 342 | ||
382 | /* Check the guest TLB. */ | 343 | /* Check the guest TLB. */ |
383 | gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr); | 344 | gtlb_index = kvmppc_44x_itlb_index(vcpu, eaddr); |
384 | if (!gtlbe) { | 345 | if (gtlb_index < 0) { |
385 | /* The guest didn't have a mapping for it. */ | 346 | /* The guest didn't have a mapping for it. */ |
386 | kvmppc_queue_exception(vcpu, exit_nr); | 347 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); |
387 | vcpu->stat.itlb_real_miss_exits++; | 348 | kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); |
388 | break; | 349 | break; |
389 | } | 350 | } |
390 | 351 | ||
391 | vcpu->stat.itlb_virt_miss_exits++; | 352 | kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); |
392 | 353 | ||
393 | gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT; | 354 | gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
355 | gpaddr = tlb_xlate(gtlbe, eaddr); | ||
356 | gfn = gpaddr >> PAGE_SHIFT; | ||
394 | 357 | ||
395 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | 358 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { |
396 | /* The guest TLB had a mapping, but the shadow TLB | 359 | /* The guest TLB had a mapping, but the shadow TLB |
@@ -399,12 +362,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
399 | * b) the guest used a large mapping which we're faking | 362 | * b) the guest used a large mapping which we're faking |
400 | * Either way, we need to satisfy the fault without | 363 | * Either way, we need to satisfy the fault without |
401 | * invoking the guest. */ | 364 | * invoking the guest. */ |
402 | kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid, | 365 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, |
403 | gtlbe->word2); | 366 | gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); |
404 | } else { | 367 | } else { |
405 | /* Guest mapped and leaped at non-RAM! */ | 368 | /* Guest mapped and leaped at non-RAM! */ |
406 | kvmppc_queue_exception(vcpu, | 369 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); |
407 | BOOKE_INTERRUPT_MACHINE_CHECK); | ||
408 | } | 370 | } |
409 | 371 | ||
410 | break; | 372 | break; |
@@ -421,6 +383,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
421 | mtspr(SPRN_DBSR, dbsr); | 383 | mtspr(SPRN_DBSR, dbsr); |
422 | 384 | ||
423 | run->exit_reason = KVM_EXIT_DEBUG; | 385 | run->exit_reason = KVM_EXIT_DEBUG; |
386 | kvmppc_account_exit(vcpu, DEBUG_EXITS); | ||
424 | r = RESUME_HOST; | 387 | r = RESUME_HOST; |
425 | break; | 388 | break; |
426 | } | 389 | } |
@@ -432,10 +395,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
432 | 395 | ||
433 | local_irq_disable(); | 396 | local_irq_disable(); |
434 | 397 | ||
435 | kvmppc_check_and_deliver_interrupts(vcpu); | 398 | kvmppc_core_deliver_interrupts(vcpu); |
436 | 399 | ||
437 | /* Do some exit accounting. */ | ||
438 | vcpu->stat.sum_exits++; | ||
439 | if (!(r & RESUME_HOST)) { | 400 | if (!(r & RESUME_HOST)) { |
440 | /* To avoid clobbering exit_reason, only check for signals if | 401 | /* To avoid clobbering exit_reason, only check for signals if |
441 | * we aren't already exiting to userspace for some other | 402 | * we aren't already exiting to userspace for some other |
@@ -443,22 +404,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
443 | if (signal_pending(current)) { | 404 | if (signal_pending(current)) { |
444 | run->exit_reason = KVM_EXIT_INTR; | 405 | run->exit_reason = KVM_EXIT_INTR; |
445 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | 406 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
446 | 407 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); | |
447 | vcpu->stat.signal_exits++; | ||
448 | } else { | ||
449 | vcpu->stat.light_exits++; | ||
450 | } | ||
451 | } else { | ||
452 | switch (run->exit_reason) { | ||
453 | case KVM_EXIT_MMIO: | ||
454 | vcpu->stat.mmio_exits++; | ||
455 | break; | ||
456 | case KVM_EXIT_DCR: | ||
457 | vcpu->stat.dcr_exits++; | ||
458 | break; | ||
459 | case KVM_EXIT_INTR: | ||
460 | vcpu->stat.signal_exits++; | ||
461 | break; | ||
462 | } | 408 | } |
463 | } | 409 | } |
464 | 410 | ||
@@ -468,20 +414,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
468 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ | 414 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ |
469 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 415 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
470 | { | 416 | { |
471 | struct tlbe *tlbe = &vcpu->arch.guest_tlb[0]; | ||
472 | |||
473 | tlbe->tid = 0; | ||
474 | tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; | ||
475 | tlbe->word1 = 0; | ||
476 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR; | ||
477 | |||
478 | tlbe++; | ||
479 | tlbe->tid = 0; | ||
480 | tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID; | ||
481 | tlbe->word1 = 0xef600000; | ||
482 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR | ||
483 | | PPC44x_TLB_I | PPC44x_TLB_G; | ||
484 | |||
485 | vcpu->arch.pc = 0; | 417 | vcpu->arch.pc = 0; |
486 | vcpu->arch.msr = 0; | 418 | vcpu->arch.msr = 0; |
487 | vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ | 419 | vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ |
@@ -492,12 +424,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
492 | * before it's programmed its own IVPR. */ | 424 | * before it's programmed its own IVPR. */ |
493 | vcpu->arch.ivpr = 0x55550000; | 425 | vcpu->arch.ivpr = 0x55550000; |
494 | 426 | ||
495 | /* Since the guest can directly access the timebase, it must know the | 427 | kvmppc_init_timing_stats(vcpu); |
496 | * real timebase frequency. Accordingly, it must see the state of | ||
497 | * CCR1[TCS]. */ | ||
498 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); | ||
499 | 428 | ||
500 | return 0; | 429 | return kvmppc_core_vcpu_setup(vcpu); |
501 | } | 430 | } |
502 | 431 | ||
503 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 432 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
@@ -536,7 +465,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
536 | vcpu->arch.ctr = regs->ctr; | 465 | vcpu->arch.ctr = regs->ctr; |
537 | vcpu->arch.lr = regs->lr; | 466 | vcpu->arch.lr = regs->lr; |
538 | vcpu->arch.xer = regs->xer; | 467 | vcpu->arch.xer = regs->xer; |
539 | vcpu->arch.msr = regs->msr; | 468 | kvmppc_set_msr(vcpu, regs->msr); |
540 | vcpu->arch.srr0 = regs->srr0; | 469 | vcpu->arch.srr0 = regs->srr0; |
541 | vcpu->arch.srr1 = regs->srr1; | 470 | vcpu->arch.srr1 = regs->srr1; |
542 | vcpu->arch.sprg0 = regs->sprg0; | 471 | vcpu->arch.sprg0 = regs->sprg0; |
@@ -575,31 +504,62 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |||
575 | return -ENOTSUPP; | 504 | return -ENOTSUPP; |
576 | } | 505 | } |
577 | 506 | ||
578 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
579 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 507 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
580 | struct kvm_translation *tr) | 508 | struct kvm_translation *tr) |
581 | { | 509 | { |
582 | struct tlbe *gtlbe; | 510 | return kvmppc_core_vcpu_translate(vcpu, tr); |
583 | int index; | 511 | } |
584 | gva_t eaddr; | ||
585 | u8 pid; | ||
586 | u8 as; | ||
587 | |||
588 | eaddr = tr->linear_address; | ||
589 | pid = (tr->linear_address >> 32) & 0xff; | ||
590 | as = (tr->linear_address >> 40) & 0x1; | ||
591 | |||
592 | index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as); | ||
593 | if (index == -1) { | ||
594 | tr->valid = 0; | ||
595 | return 0; | ||
596 | } | ||
597 | 512 | ||
598 | gtlbe = &vcpu->arch.guest_tlb[index]; | 513 | int kvmppc_booke_init(void) |
514 | { | ||
515 | unsigned long ivor[16]; | ||
516 | unsigned long max_ivor = 0; | ||
517 | int i; | ||
599 | 518 | ||
600 | tr->physical_address = tlb_xlate(gtlbe, eaddr); | 519 | /* We install our own exception handlers by hijacking IVPR. IVPR must |
601 | /* XXX what does "writeable" and "usermode" even mean? */ | 520 | * be 16-bit aligned, so we need a 64KB allocation. */ |
602 | tr->valid = 1; | 521 | kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, |
522 | VCPU_SIZE_ORDER); | ||
523 | if (!kvmppc_booke_handlers) | ||
524 | return -ENOMEM; | ||
525 | |||
526 | /* XXX make sure our handlers are smaller than Linux's */ | ||
527 | |||
528 | /* Copy our interrupt handlers to match host IVORs. That way we don't | ||
529 | * have to swap the IVORs on every guest/host transition. */ | ||
530 | ivor[0] = mfspr(SPRN_IVOR0); | ||
531 | ivor[1] = mfspr(SPRN_IVOR1); | ||
532 | ivor[2] = mfspr(SPRN_IVOR2); | ||
533 | ivor[3] = mfspr(SPRN_IVOR3); | ||
534 | ivor[4] = mfspr(SPRN_IVOR4); | ||
535 | ivor[5] = mfspr(SPRN_IVOR5); | ||
536 | ivor[6] = mfspr(SPRN_IVOR6); | ||
537 | ivor[7] = mfspr(SPRN_IVOR7); | ||
538 | ivor[8] = mfspr(SPRN_IVOR8); | ||
539 | ivor[9] = mfspr(SPRN_IVOR9); | ||
540 | ivor[10] = mfspr(SPRN_IVOR10); | ||
541 | ivor[11] = mfspr(SPRN_IVOR11); | ||
542 | ivor[12] = mfspr(SPRN_IVOR12); | ||
543 | ivor[13] = mfspr(SPRN_IVOR13); | ||
544 | ivor[14] = mfspr(SPRN_IVOR14); | ||
545 | ivor[15] = mfspr(SPRN_IVOR15); | ||
546 | |||
547 | for (i = 0; i < 16; i++) { | ||
548 | if (ivor[i] > max_ivor) | ||
549 | max_ivor = ivor[i]; | ||
550 | |||
551 | memcpy((void *)kvmppc_booke_handlers + ivor[i], | ||
552 | kvmppc_handlers_start + i * kvmppc_handler_len, | ||
553 | kvmppc_handler_len); | ||
554 | } | ||
555 | flush_icache_range(kvmppc_booke_handlers, | ||
556 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); | ||
603 | 557 | ||
604 | return 0; | 558 | return 0; |
605 | } | 559 | } |
560 | |||
561 | void __exit kvmppc_booke_exit(void) | ||
562 | { | ||
563 | free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); | ||
564 | kvm_exit(); | ||
565 | } | ||
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h new file mode 100644 index 000000000000..cf7c94ca24bf --- /dev/null +++ b/arch/powerpc/kvm/booke.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __KVM_BOOKE_H__ | ||
21 | #define __KVM_BOOKE_H__ | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | #include <linux/kvm_host.h> | ||
25 | #include "timing.h" | ||
26 | |||
27 | /* interrupt priortity ordering */ | ||
28 | #define BOOKE_IRQPRIO_DATA_STORAGE 0 | ||
29 | #define BOOKE_IRQPRIO_INST_STORAGE 1 | ||
30 | #define BOOKE_IRQPRIO_ALIGNMENT 2 | ||
31 | #define BOOKE_IRQPRIO_PROGRAM 3 | ||
32 | #define BOOKE_IRQPRIO_FP_UNAVAIL 4 | ||
33 | #define BOOKE_IRQPRIO_SYSCALL 5 | ||
34 | #define BOOKE_IRQPRIO_AP_UNAVAIL 6 | ||
35 | #define BOOKE_IRQPRIO_DTLB_MISS 7 | ||
36 | #define BOOKE_IRQPRIO_ITLB_MISS 8 | ||
37 | #define BOOKE_IRQPRIO_MACHINE_CHECK 9 | ||
38 | #define BOOKE_IRQPRIO_DEBUG 10 | ||
39 | #define BOOKE_IRQPRIO_CRITICAL 11 | ||
40 | #define BOOKE_IRQPRIO_WATCHDOG 12 | ||
41 | #define BOOKE_IRQPRIO_EXTERNAL 13 | ||
42 | #define BOOKE_IRQPRIO_FIT 14 | ||
43 | #define BOOKE_IRQPRIO_DECREMENTER 15 | ||
44 | |||
45 | /* Helper function for "full" MSR writes. No need to call this if only EE is | ||
46 | * changing. */ | ||
47 | static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | ||
48 | { | ||
49 | if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) | ||
50 | kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); | ||
51 | |||
52 | vcpu->arch.msr = new_msr; | ||
53 | |||
54 | if (vcpu->arch.msr & MSR_WE) { | ||
55 | kvm_vcpu_block(vcpu); | ||
56 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | ||
57 | }; | ||
58 | } | ||
59 | |||
60 | #endif /* __KVM_BOOKE_H__ */ | ||
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c deleted file mode 100644 index b480341bc31e..000000000000 --- a/arch/powerpc/kvm/booke_host.c +++ /dev/null | |||
@@ -1,83 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/errno.h> | ||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | #include <asm/kvm_ppc.h> | ||
25 | |||
26 | unsigned long kvmppc_booke_handlers; | ||
27 | |||
28 | static int kvmppc_booke_init(void) | ||
29 | { | ||
30 | unsigned long ivor[16]; | ||
31 | unsigned long max_ivor = 0; | ||
32 | int i; | ||
33 | |||
34 | /* We install our own exception handlers by hijacking IVPR. IVPR must | ||
35 | * be 16-bit aligned, so we need a 64KB allocation. */ | ||
36 | kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
37 | VCPU_SIZE_ORDER); | ||
38 | if (!kvmppc_booke_handlers) | ||
39 | return -ENOMEM; | ||
40 | |||
41 | /* XXX make sure our handlers are smaller than Linux's */ | ||
42 | |||
43 | /* Copy our interrupt handlers to match host IVORs. That way we don't | ||
44 | * have to swap the IVORs on every guest/host transition. */ | ||
45 | ivor[0] = mfspr(SPRN_IVOR0); | ||
46 | ivor[1] = mfspr(SPRN_IVOR1); | ||
47 | ivor[2] = mfspr(SPRN_IVOR2); | ||
48 | ivor[3] = mfspr(SPRN_IVOR3); | ||
49 | ivor[4] = mfspr(SPRN_IVOR4); | ||
50 | ivor[5] = mfspr(SPRN_IVOR5); | ||
51 | ivor[6] = mfspr(SPRN_IVOR6); | ||
52 | ivor[7] = mfspr(SPRN_IVOR7); | ||
53 | ivor[8] = mfspr(SPRN_IVOR8); | ||
54 | ivor[9] = mfspr(SPRN_IVOR9); | ||
55 | ivor[10] = mfspr(SPRN_IVOR10); | ||
56 | ivor[11] = mfspr(SPRN_IVOR11); | ||
57 | ivor[12] = mfspr(SPRN_IVOR12); | ||
58 | ivor[13] = mfspr(SPRN_IVOR13); | ||
59 | ivor[14] = mfspr(SPRN_IVOR14); | ||
60 | ivor[15] = mfspr(SPRN_IVOR15); | ||
61 | |||
62 | for (i = 0; i < 16; i++) { | ||
63 | if (ivor[i] > max_ivor) | ||
64 | max_ivor = ivor[i]; | ||
65 | |||
66 | memcpy((void *)kvmppc_booke_handlers + ivor[i], | ||
67 | kvmppc_handlers_start + i * kvmppc_handler_len, | ||
68 | kvmppc_handler_len); | ||
69 | } | ||
70 | flush_icache_range(kvmppc_booke_handlers, | ||
71 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); | ||
72 | |||
73 | return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE); | ||
74 | } | ||
75 | |||
76 | static void __exit kvmppc_booke_exit(void) | ||
77 | { | ||
78 | free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); | ||
79 | kvm_exit(); | ||
80 | } | ||
81 | |||
82 | module_init(kvmppc_booke_init) | ||
83 | module_exit(kvmppc_booke_exit) | ||
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 95e165baf85f..084ebcd7dd83 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -107,6 +107,18 @@ _GLOBAL(kvmppc_resume_host) | |||
107 | li r6, 1 | 107 | li r6, 1 |
108 | slw r6, r6, r5 | 108 | slw r6, r6, r5 |
109 | 109 | ||
110 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
111 | /* save exit time */ | ||
112 | 1: | ||
113 | mfspr r7, SPRN_TBRU | ||
114 | mfspr r8, SPRN_TBRL | ||
115 | mfspr r9, SPRN_TBRU | ||
116 | cmpw r9, r7 | ||
117 | bne 1b | ||
118 | stw r8, VCPU_TIMING_EXIT_TBL(r4) | ||
119 | stw r9, VCPU_TIMING_EXIT_TBU(r4) | ||
120 | #endif | ||
121 | |||
110 | /* Save the faulting instruction and all GPRs for emulation. */ | 122 | /* Save the faulting instruction and all GPRs for emulation. */ |
111 | andi. r7, r6, NEED_INST_MASK | 123 | andi. r7, r6, NEED_INST_MASK |
112 | beq ..skip_inst_copy | 124 | beq ..skip_inst_copy |
@@ -335,54 +347,6 @@ lightweight_exit: | |||
335 | lwz r3, VCPU_SHADOW_PID(r4) | 347 | lwz r3, VCPU_SHADOW_PID(r4) |
336 | mtspr SPRN_PID, r3 | 348 | mtspr SPRN_PID, r3 |
337 | 349 | ||
338 | /* Prevent all asynchronous TLB updates. */ | ||
339 | mfmsr r5 | ||
340 | lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h | ||
341 | ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l | ||
342 | andc r6, r5, r6 | ||
343 | mtmsr r6 | ||
344 | |||
345 | /* Load the guest mappings, leaving the host's "pinned" kernel mappings | ||
346 | * in place. */ | ||
347 | mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ | ||
348 | li r5, PPC44x_TLB_SIZE | ||
349 | lis r5, tlb_44x_hwater@ha | ||
350 | lwz r5, tlb_44x_hwater@l(r5) | ||
351 | mtctr r5 | ||
352 | addi r9, r4, VCPU_SHADOW_TLB | ||
353 | addi r5, r4, VCPU_SHADOW_MOD | ||
354 | li r3, 0 | ||
355 | 1: | ||
356 | lbzx r7, r3, r5 | ||
357 | cmpwi r7, 0 | ||
358 | beq 3f | ||
359 | |||
360 | /* Load guest entry. */ | ||
361 | mulli r11, r3, TLBE_BYTES | ||
362 | add r11, r11, r9 | ||
363 | lwz r7, 0(r11) | ||
364 | mtspr SPRN_MMUCR, r7 | ||
365 | lwz r7, 4(r11) | ||
366 | tlbwe r7, r3, PPC44x_TLB_PAGEID | ||
367 | lwz r7, 8(r11) | ||
368 | tlbwe r7, r3, PPC44x_TLB_XLAT | ||
369 | lwz r7, 12(r11) | ||
370 | tlbwe r7, r3, PPC44x_TLB_ATTRIB | ||
371 | 3: | ||
372 | addi r3, r3, 1 /* Increment index. */ | ||
373 | bdnz 1b | ||
374 | |||
375 | mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ | ||
376 | |||
377 | /* Clear bitmap of modified TLB entries */ | ||
378 | li r5, PPC44x_TLB_SIZE>>2 | ||
379 | mtctr r5 | ||
380 | addi r5, r4, VCPU_SHADOW_MOD - 4 | ||
381 | li r6, 0 | ||
382 | 1: | ||
383 | stwu r6, 4(r5) | ||
384 | bdnz 1b | ||
385 | |||
386 | iccci 0, 0 /* XXX hack */ | 350 | iccci 0, 0 /* XXX hack */ |
387 | 351 | ||
388 | /* Load some guest volatiles. */ | 352 | /* Load some guest volatiles. */ |
@@ -423,6 +387,18 @@ lightweight_exit: | |||
423 | lwz r3, VCPU_SPRG7(r4) | 387 | lwz r3, VCPU_SPRG7(r4) |
424 | mtspr SPRN_SPRG7, r3 | 388 | mtspr SPRN_SPRG7, r3 |
425 | 389 | ||
390 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
391 | /* save enter time */ | ||
392 | 1: | ||
393 | mfspr r6, SPRN_TBRU | ||
394 | mfspr r7, SPRN_TBRL | ||
395 | mfspr r8, SPRN_TBRU | ||
396 | cmpw r8, r6 | ||
397 | bne 1b | ||
398 | stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4) | ||
399 | stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | ||
400 | #endif | ||
401 | |||
426 | /* Finish loading guest volatiles and jump to guest. */ | 402 | /* Finish loading guest volatiles and jump to guest. */ |
427 | lwz r3, VCPU_CTR(r4) | 403 | lwz r3, VCPU_CTR(r4) |
428 | mtctr r3 | 404 | mtctr r3 |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 0fce4fbdc20d..d1d38daa93fb 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -23,161 +23,14 @@ | |||
23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | 25 | ||
26 | #include <asm/dcr.h> | 26 | #include <asm/reg.h> |
27 | #include <asm/dcr-regs.h> | ||
28 | #include <asm/time.h> | 27 | #include <asm/time.h> |
29 | #include <asm/byteorder.h> | 28 | #include <asm/byteorder.h> |
30 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | #include <asm/disassemble.h> | ||
31 | #include "timing.h" | ||
31 | 32 | ||
32 | #include "44x_tlb.h" | 33 | void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) |
33 | |||
34 | /* Instruction decoding */ | ||
35 | static inline unsigned int get_op(u32 inst) | ||
36 | { | ||
37 | return inst >> 26; | ||
38 | } | ||
39 | |||
40 | static inline unsigned int get_xop(u32 inst) | ||
41 | { | ||
42 | return (inst >> 1) & 0x3ff; | ||
43 | } | ||
44 | |||
45 | static inline unsigned int get_sprn(u32 inst) | ||
46 | { | ||
47 | return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); | ||
48 | } | ||
49 | |||
50 | static inline unsigned int get_dcrn(u32 inst) | ||
51 | { | ||
52 | return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); | ||
53 | } | ||
54 | |||
55 | static inline unsigned int get_rt(u32 inst) | ||
56 | { | ||
57 | return (inst >> 21) & 0x1f; | ||
58 | } | ||
59 | |||
60 | static inline unsigned int get_rs(u32 inst) | ||
61 | { | ||
62 | return (inst >> 21) & 0x1f; | ||
63 | } | ||
64 | |||
65 | static inline unsigned int get_ra(u32 inst) | ||
66 | { | ||
67 | return (inst >> 16) & 0x1f; | ||
68 | } | ||
69 | |||
70 | static inline unsigned int get_rb(u32 inst) | ||
71 | { | ||
72 | return (inst >> 11) & 0x1f; | ||
73 | } | ||
74 | |||
75 | static inline unsigned int get_rc(u32 inst) | ||
76 | { | ||
77 | return inst & 0x1; | ||
78 | } | ||
79 | |||
80 | static inline unsigned int get_ws(u32 inst) | ||
81 | { | ||
82 | return (inst >> 11) & 0x1f; | ||
83 | } | ||
84 | |||
85 | static inline unsigned int get_d(u32 inst) | ||
86 | { | ||
87 | return inst & 0xffff; | ||
88 | } | ||
89 | |||
90 | static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | ||
91 | const struct tlbe *tlbe) | ||
92 | { | ||
93 | gpa_t gpa; | ||
94 | |||
95 | if (!get_tlb_v(tlbe)) | ||
96 | return 0; | ||
97 | |||
98 | /* Does it match current guest AS? */ | ||
99 | /* XXX what about IS != DS? */ | ||
100 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) | ||
101 | return 0; | ||
102 | |||
103 | gpa = get_tlb_raddr(tlbe); | ||
104 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) | ||
105 | /* Mapping is not for RAM. */ | ||
106 | return 0; | ||
107 | |||
108 | return 1; | ||
109 | } | ||
110 | |||
111 | static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst) | ||
112 | { | ||
113 | u64 eaddr; | ||
114 | u64 raddr; | ||
115 | u64 asid; | ||
116 | u32 flags; | ||
117 | struct tlbe *tlbe; | ||
118 | unsigned int ra; | ||
119 | unsigned int rs; | ||
120 | unsigned int ws; | ||
121 | unsigned int index; | ||
122 | |||
123 | ra = get_ra(inst); | ||
124 | rs = get_rs(inst); | ||
125 | ws = get_ws(inst); | ||
126 | |||
127 | index = vcpu->arch.gpr[ra]; | ||
128 | if (index > PPC44x_TLB_SIZE) { | ||
129 | printk("%s: index %d\n", __func__, index); | ||
130 | kvmppc_dump_vcpu(vcpu); | ||
131 | return EMULATE_FAIL; | ||
132 | } | ||
133 | |||
134 | tlbe = &vcpu->arch.guest_tlb[index]; | ||
135 | |||
136 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | ||
137 | if (tlbe->word0 & PPC44x_TLB_VALID) { | ||
138 | eaddr = get_tlb_eaddr(tlbe); | ||
139 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | ||
140 | kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid); | ||
141 | } | ||
142 | |||
143 | switch (ws) { | ||
144 | case PPC44x_TLB_PAGEID: | ||
145 | tlbe->tid = vcpu->arch.mmucr & 0xff; | ||
146 | tlbe->word0 = vcpu->arch.gpr[rs]; | ||
147 | break; | ||
148 | |||
149 | case PPC44x_TLB_XLAT: | ||
150 | tlbe->word1 = vcpu->arch.gpr[rs]; | ||
151 | break; | ||
152 | |||
153 | case PPC44x_TLB_ATTRIB: | ||
154 | tlbe->word2 = vcpu->arch.gpr[rs]; | ||
155 | break; | ||
156 | |||
157 | default: | ||
158 | return EMULATE_FAIL; | ||
159 | } | ||
160 | |||
161 | if (tlbe_is_host_safe(vcpu, tlbe)) { | ||
162 | eaddr = get_tlb_eaddr(tlbe); | ||
163 | raddr = get_tlb_raddr(tlbe); | ||
164 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | ||
165 | flags = tlbe->word2 & 0xffff; | ||
166 | |||
167 | /* Create a 4KB mapping on the host. If the guest wanted a | ||
168 | * large page, only the first 4KB is mapped here and the rest | ||
169 | * are mapped on the fly. */ | ||
170 | kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags); | ||
171 | } | ||
172 | |||
173 | KVMTRACE_5D(GTLB_WRITE, vcpu, index, | ||
174 | tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2, | ||
175 | handler); | ||
176 | |||
177 | return EMULATE_DONE; | ||
178 | } | ||
179 | |||
180 | static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | ||
181 | { | 34 | { |
182 | if (vcpu->arch.tcr & TCR_DIE) { | 35 | if (vcpu->arch.tcr & TCR_DIE) { |
183 | /* The decrementer ticks at the same rate as the timebase, so | 36 | /* The decrementer ticks at the same rate as the timebase, so |
@@ -193,12 +46,6 @@ static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
193 | } | 46 | } |
194 | } | 47 | } |
195 | 48 | ||
196 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | ||
197 | { | ||
198 | vcpu->arch.pc = vcpu->arch.srr0; | ||
199 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | ||
200 | } | ||
201 | |||
202 | /* XXX to do: | 49 | /* XXX to do: |
203 | * lhax | 50 | * lhax |
204 | * lhaux | 51 | * lhaux |
@@ -213,40 +60,30 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | |||
213 | * | 60 | * |
214 | * XXX is_bigendian should depend on MMU mapping or MSR[LE] | 61 | * XXX is_bigendian should depend on MMU mapping or MSR[LE] |
215 | */ | 62 | */ |
63 | /* XXX Should probably auto-generate instruction decoding for a particular core | ||
64 | * from opcode tables in the future. */ | ||
216 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 65 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
217 | { | 66 | { |
218 | u32 inst = vcpu->arch.last_inst; | 67 | u32 inst = vcpu->arch.last_inst; |
219 | u32 ea; | 68 | u32 ea; |
220 | int ra; | 69 | int ra; |
221 | int rb; | 70 | int rb; |
222 | int rc; | ||
223 | int rs; | 71 | int rs; |
224 | int rt; | 72 | int rt; |
225 | int sprn; | 73 | int sprn; |
226 | int dcrn; | ||
227 | enum emulation_result emulated = EMULATE_DONE; | 74 | enum emulation_result emulated = EMULATE_DONE; |
228 | int advance = 1; | 75 | int advance = 1; |
229 | 76 | ||
77 | /* this default type might be overwritten by subcategories */ | ||
78 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | ||
79 | |||
230 | switch (get_op(inst)) { | 80 | switch (get_op(inst)) { |
231 | case 3: /* trap */ | 81 | case 3: /* trap */ |
232 | printk("trap!\n"); | 82 | vcpu->arch.esr |= ESR_PTR; |
233 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM); | 83 | kvmppc_core_queue_program(vcpu); |
234 | advance = 0; | 84 | advance = 0; |
235 | break; | 85 | break; |
236 | 86 | ||
237 | case 19: | ||
238 | switch (get_xop(inst)) { | ||
239 | case 50: /* rfi */ | ||
240 | kvmppc_emul_rfi(vcpu); | ||
241 | advance = 0; | ||
242 | break; | ||
243 | |||
244 | default: | ||
245 | emulated = EMULATE_FAIL; | ||
246 | break; | ||
247 | } | ||
248 | break; | ||
249 | |||
250 | case 31: | 87 | case 31: |
251 | switch (get_xop(inst)) { | 88 | switch (get_xop(inst)) { |
252 | 89 | ||
@@ -255,27 +92,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
255 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 92 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
256 | break; | 93 | break; |
257 | 94 | ||
258 | case 83: /* mfmsr */ | ||
259 | rt = get_rt(inst); | ||
260 | vcpu->arch.gpr[rt] = vcpu->arch.msr; | ||
261 | break; | ||
262 | |||
263 | case 87: /* lbzx */ | 95 | case 87: /* lbzx */ |
264 | rt = get_rt(inst); | 96 | rt = get_rt(inst); |
265 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 97 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
266 | break; | 98 | break; |
267 | 99 | ||
268 | case 131: /* wrtee */ | ||
269 | rs = get_rs(inst); | ||
270 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | ||
271 | | (vcpu->arch.gpr[rs] & MSR_EE); | ||
272 | break; | ||
273 | |||
274 | case 146: /* mtmsr */ | ||
275 | rs = get_rs(inst); | ||
276 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); | ||
277 | break; | ||
278 | |||
279 | case 151: /* stwx */ | 100 | case 151: /* stwx */ |
280 | rs = get_rs(inst); | 101 | rs = get_rs(inst); |
281 | emulated = kvmppc_handle_store(run, vcpu, | 102 | emulated = kvmppc_handle_store(run, vcpu, |
@@ -283,11 +104,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
283 | 4, 1); | 104 | 4, 1); |
284 | break; | 105 | break; |
285 | 106 | ||
286 | case 163: /* wrteei */ | ||
287 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | ||
288 | | (inst & MSR_EE); | ||
289 | break; | ||
290 | |||
291 | case 215: /* stbx */ | 107 | case 215: /* stbx */ |
292 | rs = get_rs(inst); | 108 | rs = get_rs(inst); |
293 | emulated = kvmppc_handle_store(run, vcpu, | 109 | emulated = kvmppc_handle_store(run, vcpu, |
@@ -328,42 +144,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
328 | vcpu->arch.gpr[ra] = ea; | 144 | vcpu->arch.gpr[ra] = ea; |
329 | break; | 145 | break; |
330 | 146 | ||
331 | case 323: /* mfdcr */ | ||
332 | dcrn = get_dcrn(inst); | ||
333 | rt = get_rt(inst); | ||
334 | |||
335 | /* The guest may access CPR0 registers to determine the timebase | ||
336 | * frequency, and it must know the real host frequency because it | ||
337 | * can directly access the timebase registers. | ||
338 | * | ||
339 | * It would be possible to emulate those accesses in userspace, | ||
340 | * but userspace can really only figure out the end frequency. | ||
341 | * We could decompose that into the factors that compute it, but | ||
342 | * that's tricky math, and it's easier to just report the real | ||
343 | * CPR0 values. | ||
344 | */ | ||
345 | switch (dcrn) { | ||
346 | case DCRN_CPR0_CONFIG_ADDR: | ||
347 | vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr; | ||
348 | break; | ||
349 | case DCRN_CPR0_CONFIG_DATA: | ||
350 | local_irq_disable(); | ||
351 | mtdcr(DCRN_CPR0_CONFIG_ADDR, | ||
352 | vcpu->arch.cpr0_cfgaddr); | ||
353 | vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA); | ||
354 | local_irq_enable(); | ||
355 | break; | ||
356 | default: | ||
357 | run->dcr.dcrn = dcrn; | ||
358 | run->dcr.data = 0; | ||
359 | run->dcr.is_write = 0; | ||
360 | vcpu->arch.io_gpr = rt; | ||
361 | vcpu->arch.dcr_needed = 1; | ||
362 | emulated = EMULATE_DO_DCR; | ||
363 | } | ||
364 | |||
365 | break; | ||
366 | |||
367 | case 339: /* mfspr */ | 147 | case 339: /* mfspr */ |
368 | sprn = get_sprn(inst); | 148 | sprn = get_sprn(inst); |
369 | rt = get_rt(inst); | 149 | rt = get_rt(inst); |
@@ -373,26 +153,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
373 | vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; | 153 | vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; |
374 | case SPRN_SRR1: | 154 | case SPRN_SRR1: |
375 | vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; | 155 | vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; |
376 | case SPRN_MMUCR: | ||
377 | vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; | ||
378 | case SPRN_PID: | ||
379 | vcpu->arch.gpr[rt] = vcpu->arch.pid; break; | ||
380 | case SPRN_IVPR: | ||
381 | vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; | ||
382 | case SPRN_CCR0: | ||
383 | vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; | ||
384 | case SPRN_CCR1: | ||
385 | vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; | ||
386 | case SPRN_PVR: | 156 | case SPRN_PVR: |
387 | vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; | 157 | vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; |
388 | case SPRN_DEAR: | ||
389 | vcpu->arch.gpr[rt] = vcpu->arch.dear; break; | ||
390 | case SPRN_ESR: | ||
391 | vcpu->arch.gpr[rt] = vcpu->arch.esr; break; | ||
392 | case SPRN_DBCR0: | ||
393 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; | ||
394 | case SPRN_DBCR1: | ||
395 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; | ||
396 | 158 | ||
397 | /* Note: mftb and TBRL/TBWL are user-accessible, so | 159 | /* Note: mftb and TBRL/TBWL are user-accessible, so |
398 | * the guest can always access the real TB anyways. | 160 | * the guest can always access the real TB anyways. |
@@ -413,42 +175,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
413 | /* Note: SPRG4-7 are user-readable, so we don't get | 175 | /* Note: SPRG4-7 are user-readable, so we don't get |
414 | * a trap. */ | 176 | * a trap. */ |
415 | 177 | ||
416 | case SPRN_IVOR0: | ||
417 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break; | ||
418 | case SPRN_IVOR1: | ||
419 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break; | ||
420 | case SPRN_IVOR2: | ||
421 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break; | ||
422 | case SPRN_IVOR3: | ||
423 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break; | ||
424 | case SPRN_IVOR4: | ||
425 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break; | ||
426 | case SPRN_IVOR5: | ||
427 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break; | ||
428 | case SPRN_IVOR6: | ||
429 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break; | ||
430 | case SPRN_IVOR7: | ||
431 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break; | ||
432 | case SPRN_IVOR8: | ||
433 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break; | ||
434 | case SPRN_IVOR9: | ||
435 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break; | ||
436 | case SPRN_IVOR10: | ||
437 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break; | ||
438 | case SPRN_IVOR11: | ||
439 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break; | ||
440 | case SPRN_IVOR12: | ||
441 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break; | ||
442 | case SPRN_IVOR13: | ||
443 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break; | ||
444 | case SPRN_IVOR14: | ||
445 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break; | ||
446 | case SPRN_IVOR15: | ||
447 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break; | ||
448 | |||
449 | default: | 178 | default: |
450 | printk("mfspr: unknown spr %x\n", sprn); | 179 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); |
451 | vcpu->arch.gpr[rt] = 0; | 180 | if (emulated == EMULATE_FAIL) { |
181 | printk("mfspr: unknown spr %x\n", sprn); | ||
182 | vcpu->arch.gpr[rt] = 0; | ||
183 | } | ||
452 | break; | 184 | break; |
453 | } | 185 | } |
454 | break; | 186 | break; |
@@ -478,25 +210,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
478 | vcpu->arch.gpr[ra] = ea; | 210 | vcpu->arch.gpr[ra] = ea; |
479 | break; | 211 | break; |
480 | 212 | ||
481 | case 451: /* mtdcr */ | ||
482 | dcrn = get_dcrn(inst); | ||
483 | rs = get_rs(inst); | ||
484 | |||
485 | /* emulate some access in kernel */ | ||
486 | switch (dcrn) { | ||
487 | case DCRN_CPR0_CONFIG_ADDR: | ||
488 | vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs]; | ||
489 | break; | ||
490 | default: | ||
491 | run->dcr.dcrn = dcrn; | ||
492 | run->dcr.data = vcpu->arch.gpr[rs]; | ||
493 | run->dcr.is_write = 1; | ||
494 | vcpu->arch.dcr_needed = 1; | ||
495 | emulated = EMULATE_DO_DCR; | ||
496 | } | ||
497 | |||
498 | break; | ||
499 | |||
500 | case 467: /* mtspr */ | 213 | case 467: /* mtspr */ |
501 | sprn = get_sprn(inst); | 214 | sprn = get_sprn(inst); |
502 | rs = get_rs(inst); | 215 | rs = get_rs(inst); |
@@ -505,22 +218,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
505 | vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; | 218 | vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; |
506 | case SPRN_SRR1: | 219 | case SPRN_SRR1: |
507 | vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; | 220 | vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; |
508 | case SPRN_MMUCR: | ||
509 | vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; | ||
510 | case SPRN_PID: | ||
511 | kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; | ||
512 | case SPRN_CCR0: | ||
513 | vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; | ||
514 | case SPRN_CCR1: | ||
515 | vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; | ||
516 | case SPRN_DEAR: | ||
517 | vcpu->arch.dear = vcpu->arch.gpr[rs]; break; | ||
518 | case SPRN_ESR: | ||
519 | vcpu->arch.esr = vcpu->arch.gpr[rs]; break; | ||
520 | case SPRN_DBCR0: | ||
521 | vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; | ||
522 | case SPRN_DBCR1: | ||
523 | vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; | ||
524 | 221 | ||
525 | /* XXX We need to context-switch the timebase for | 222 | /* XXX We need to context-switch the timebase for |
526 | * watchdog and FIT. */ | 223 | * watchdog and FIT. */ |
@@ -532,14 +229,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
532 | kvmppc_emulate_dec(vcpu); | 229 | kvmppc_emulate_dec(vcpu); |
533 | break; | 230 | break; |
534 | 231 | ||
535 | case SPRN_TSR: | ||
536 | vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; | ||
537 | |||
538 | case SPRN_TCR: | ||
539 | vcpu->arch.tcr = vcpu->arch.gpr[rs]; | ||
540 | kvmppc_emulate_dec(vcpu); | ||
541 | break; | ||
542 | |||
543 | case SPRN_SPRG0: | 232 | case SPRN_SPRG0: |
544 | vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; | 233 | vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; |
545 | case SPRN_SPRG1: | 234 | case SPRN_SPRG1: |
@@ -549,56 +238,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
549 | case SPRN_SPRG3: | 238 | case SPRN_SPRG3: |
550 | vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; | 239 | vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; |
551 | 240 | ||
552 | /* Note: SPRG4-7 are user-readable. These values are | ||
553 | * loaded into the real SPRGs when resuming the | ||
554 | * guest. */ | ||
555 | case SPRN_SPRG4: | ||
556 | vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; | ||
557 | case SPRN_SPRG5: | ||
558 | vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; | ||
559 | case SPRN_SPRG6: | ||
560 | vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; | ||
561 | case SPRN_SPRG7: | ||
562 | vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; | ||
563 | |||
564 | case SPRN_IVPR: | ||
565 | vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break; | ||
566 | case SPRN_IVOR0: | ||
567 | vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break; | ||
568 | case SPRN_IVOR1: | ||
569 | vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break; | ||
570 | case SPRN_IVOR2: | ||
571 | vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break; | ||
572 | case SPRN_IVOR3: | ||
573 | vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break; | ||
574 | case SPRN_IVOR4: | ||
575 | vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break; | ||
576 | case SPRN_IVOR5: | ||
577 | vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break; | ||
578 | case SPRN_IVOR6: | ||
579 | vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break; | ||
580 | case SPRN_IVOR7: | ||
581 | vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break; | ||
582 | case SPRN_IVOR8: | ||
583 | vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break; | ||
584 | case SPRN_IVOR9: | ||
585 | vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break; | ||
586 | case SPRN_IVOR10: | ||
587 | vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break; | ||
588 | case SPRN_IVOR11: | ||
589 | vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break; | ||
590 | case SPRN_IVOR12: | ||
591 | vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break; | ||
592 | case SPRN_IVOR13: | ||
593 | vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break; | ||
594 | case SPRN_IVOR14: | ||
595 | vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break; | ||
596 | case SPRN_IVOR15: | ||
597 | vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break; | ||
598 | |||
599 | default: | 241 | default: |
600 | printk("mtspr: unknown spr %x\n", sprn); | 242 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); |
601 | emulated = EMULATE_FAIL; | 243 | if (emulated == EMULATE_FAIL) |
244 | printk("mtspr: unknown spr %x\n", sprn); | ||
602 | break; | 245 | break; |
603 | } | 246 | } |
604 | break; | 247 | break; |
@@ -629,36 +272,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
629 | 4, 0); | 272 | 4, 0); |
630 | break; | 273 | break; |
631 | 274 | ||
632 | case 978: /* tlbwe */ | ||
633 | emulated = kvmppc_emul_tlbwe(vcpu, inst); | ||
634 | break; | ||
635 | |||
636 | case 914: { /* tlbsx */ | ||
637 | int index; | ||
638 | unsigned int as = get_mmucr_sts(vcpu); | ||
639 | unsigned int pid = get_mmucr_stid(vcpu); | ||
640 | |||
641 | rt = get_rt(inst); | ||
642 | ra = get_ra(inst); | ||
643 | rb = get_rb(inst); | ||
644 | rc = get_rc(inst); | ||
645 | |||
646 | ea = vcpu->arch.gpr[rb]; | ||
647 | if (ra) | ||
648 | ea += vcpu->arch.gpr[ra]; | ||
649 | |||
650 | index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | ||
651 | if (rc) { | ||
652 | if (index < 0) | ||
653 | vcpu->arch.cr &= ~0x20000000; | ||
654 | else | ||
655 | vcpu->arch.cr |= 0x20000000; | ||
656 | } | ||
657 | vcpu->arch.gpr[rt] = index; | ||
658 | |||
659 | } | ||
660 | break; | ||
661 | |||
662 | case 790: /* lhbrx */ | 275 | case 790: /* lhbrx */ |
663 | rt = get_rt(inst); | 276 | rt = get_rt(inst); |
664 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | 277 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); |
@@ -674,14 +287,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
674 | 2, 0); | 287 | 2, 0); |
675 | break; | 288 | break; |
676 | 289 | ||
677 | case 966: /* iccci */ | ||
678 | break; | ||
679 | |||
680 | default: | 290 | default: |
681 | printk("unknown: op %d xop %d\n", get_op(inst), | 291 | /* Attempt core-specific emulation below. */ |
682 | get_xop(inst)); | ||
683 | emulated = EMULATE_FAIL; | 292 | emulated = EMULATE_FAIL; |
684 | break; | ||
685 | } | 293 | } |
686 | break; | 294 | break; |
687 | 295 | ||
@@ -764,12 +372,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
764 | break; | 372 | break; |
765 | 373 | ||
766 | default: | 374 | default: |
767 | printk("unknown op %d\n", get_op(inst)); | ||
768 | emulated = EMULATE_FAIL; | 375 | emulated = EMULATE_FAIL; |
769 | break; | ||
770 | } | 376 | } |
771 | 377 | ||
772 | KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit); | 378 | if (emulated == EMULATE_FAIL) { |
379 | emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); | ||
380 | if (emulated == EMULATE_FAIL) { | ||
381 | advance = 0; | ||
382 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " | ||
383 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); | ||
384 | } | ||
385 | } | ||
386 | |||
387 | KVMTRACE_3D(PPC_INSTR, vcpu, inst, (int)vcpu->arch.pc, emulated, entryexit); | ||
773 | 388 | ||
774 | if (advance) | 389 | if (advance) |
775 | vcpu->arch.pc += 4; /* Advance past emulated instruction. */ | 390 | vcpu->arch.pc += 4; /* Advance past emulated instruction. */ |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 8bef0efcdfe1..2822c8ccfaaf 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -28,9 +28,9 @@ | |||
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
31 | #include "timing.h" | ||
31 | #include "../mm/mmu_decl.h" | 32 | #include "../mm/mmu_decl.h" |
32 | 33 | ||
33 | |||
34 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 34 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
35 | { | 35 | { |
36 | return gfn; | 36 | return gfn; |
@@ -99,14 +99,7 @@ void kvm_arch_hardware_unsetup(void) | |||
99 | 99 | ||
100 | void kvm_arch_check_processor_compat(void *rtn) | 100 | void kvm_arch_check_processor_compat(void *rtn) |
101 | { | 101 | { |
102 | int r; | 102 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
103 | |||
104 | if (strcmp(cur_cpu_spec->platform, "ppc440") == 0) | ||
105 | r = 0; | ||
106 | else | ||
107 | r = -ENOTSUPP; | ||
108 | |||
109 | *(int *)rtn = r; | ||
110 | } | 103 | } |
111 | 104 | ||
112 | struct kvm *kvm_arch_create_vm(void) | 105 | struct kvm *kvm_arch_create_vm(void) |
@@ -144,9 +137,6 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
144 | int r; | 137 | int r; |
145 | 138 | ||
146 | switch (ext) { | 139 | switch (ext) { |
147 | case KVM_CAP_USER_MEMORY: | ||
148 | r = 1; | ||
149 | break; | ||
150 | case KVM_CAP_COALESCED_MMIO: | 140 | case KVM_CAP_COALESCED_MMIO: |
151 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 141 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
152 | break; | 142 | break; |
@@ -179,30 +169,15 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
179 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | 169 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
180 | { | 170 | { |
181 | struct kvm_vcpu *vcpu; | 171 | struct kvm_vcpu *vcpu; |
182 | int err; | 172 | vcpu = kvmppc_core_vcpu_create(kvm, id); |
183 | 173 | kvmppc_create_vcpu_debugfs(vcpu, id); | |
184 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
185 | if (!vcpu) { | ||
186 | err = -ENOMEM; | ||
187 | goto out; | ||
188 | } | ||
189 | |||
190 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
191 | if (err) | ||
192 | goto free_vcpu; | ||
193 | |||
194 | return vcpu; | 174 | return vcpu; |
195 | |||
196 | free_vcpu: | ||
197 | kmem_cache_free(kvm_vcpu_cache, vcpu); | ||
198 | out: | ||
199 | return ERR_PTR(err); | ||
200 | } | 175 | } |
201 | 176 | ||
202 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | 177 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
203 | { | 178 | { |
204 | kvm_vcpu_uninit(vcpu); | 179 | kvmppc_remove_vcpu_debugfs(vcpu); |
205 | kmem_cache_free(kvm_vcpu_cache, vcpu); | 180 | kvmppc_core_vcpu_free(vcpu); |
206 | } | 181 | } |
207 | 182 | ||
208 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | 183 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
@@ -212,16 +187,14 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
212 | 187 | ||
213 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 188 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
214 | { | 189 | { |
215 | unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER]; | 190 | return kvmppc_core_pending_dec(vcpu); |
216 | |||
217 | return test_bit(priority, &vcpu->arch.pending_exceptions); | ||
218 | } | 191 | } |
219 | 192 | ||
220 | static void kvmppc_decrementer_func(unsigned long data) | 193 | static void kvmppc_decrementer_func(unsigned long data) |
221 | { | 194 | { |
222 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | 195 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; |
223 | 196 | ||
224 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); | 197 | kvmppc_core_queue_dec(vcpu); |
225 | 198 | ||
226 | if (waitqueue_active(&vcpu->wq)) { | 199 | if (waitqueue_active(&vcpu->wq)) { |
227 | wake_up_interruptible(&vcpu->wq); | 200 | wake_up_interruptible(&vcpu->wq); |
@@ -242,96 +215,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |||
242 | kvmppc_core_destroy_mmu(vcpu); | 215 | kvmppc_core_destroy_mmu(vcpu); |
243 | } | 216 | } |
244 | 217 | ||
245 | /* Note: clearing MSR[DE] just means that the debug interrupt will not be | ||
246 | * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits. | ||
247 | * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt | ||
248 | * will be delivered as an "imprecise debug event" (which is indicated by | ||
249 | * DBSR[IDE]. | ||
250 | */ | ||
251 | static void kvmppc_disable_debug_interrupts(void) | ||
252 | { | ||
253 | mtmsr(mfmsr() & ~MSR_DE); | ||
254 | } | ||
255 | |||
256 | static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu) | ||
257 | { | ||
258 | kvmppc_disable_debug_interrupts(); | ||
259 | |||
260 | mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]); | ||
261 | mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]); | ||
262 | mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]); | ||
263 | mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]); | ||
264 | mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1); | ||
265 | mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2); | ||
266 | mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0); | ||
267 | mtmsr(vcpu->arch.host_msr); | ||
268 | } | ||
269 | |||
270 | static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu) | ||
271 | { | ||
272 | struct kvm_guest_debug *dbg = &vcpu->guest_debug; | ||
273 | u32 dbcr0 = 0; | ||
274 | |||
275 | vcpu->arch.host_msr = mfmsr(); | ||
276 | kvmppc_disable_debug_interrupts(); | ||
277 | |||
278 | /* Save host debug register state. */ | ||
279 | vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1); | ||
280 | vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2); | ||
281 | vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3); | ||
282 | vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4); | ||
283 | vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0); | ||
284 | vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1); | ||
285 | vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2); | ||
286 | |||
287 | /* set registers up for guest */ | ||
288 | |||
289 | if (dbg->bp[0]) { | ||
290 | mtspr(SPRN_IAC1, dbg->bp[0]); | ||
291 | dbcr0 |= DBCR0_IAC1 | DBCR0_IDM; | ||
292 | } | ||
293 | if (dbg->bp[1]) { | ||
294 | mtspr(SPRN_IAC2, dbg->bp[1]); | ||
295 | dbcr0 |= DBCR0_IAC2 | DBCR0_IDM; | ||
296 | } | ||
297 | if (dbg->bp[2]) { | ||
298 | mtspr(SPRN_IAC3, dbg->bp[2]); | ||
299 | dbcr0 |= DBCR0_IAC3 | DBCR0_IDM; | ||
300 | } | ||
301 | if (dbg->bp[3]) { | ||
302 | mtspr(SPRN_IAC4, dbg->bp[3]); | ||
303 | dbcr0 |= DBCR0_IAC4 | DBCR0_IDM; | ||
304 | } | ||
305 | |||
306 | mtspr(SPRN_DBCR0, dbcr0); | ||
307 | mtspr(SPRN_DBCR1, 0); | ||
308 | mtspr(SPRN_DBCR2, 0); | ||
309 | } | ||
310 | |||
311 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 218 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
312 | { | 219 | { |
313 | int i; | ||
314 | |||
315 | if (vcpu->guest_debug.enabled) | 220 | if (vcpu->guest_debug.enabled) |
316 | kvmppc_load_guest_debug_registers(vcpu); | 221 | kvmppc_core_load_guest_debugstate(vcpu); |
317 | 222 | ||
318 | /* Mark every guest entry in the shadow TLB entry modified, so that they | 223 | kvmppc_core_vcpu_load(vcpu, cpu); |
319 | * will all be reloaded on the next vcpu run (instead of being | ||
320 | * demand-faulted). */ | ||
321 | for (i = 0; i <= tlb_44x_hwater; i++) | ||
322 | kvmppc_tlbe_set_modified(vcpu, i); | ||
323 | } | 224 | } |
324 | 225 | ||
325 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 226 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
326 | { | 227 | { |
327 | if (vcpu->guest_debug.enabled) | 228 | if (vcpu->guest_debug.enabled) |
328 | kvmppc_restore_host_debug_state(vcpu); | 229 | kvmppc_core_load_host_debugstate(vcpu); |
329 | 230 | ||
330 | /* Don't leave guest TLB entries resident when being de-scheduled. */ | 231 | /* Don't leave guest TLB entries resident when being de-scheduled. */ |
331 | /* XXX It would be nice to differentiate between heavyweight exit and | 232 | /* XXX It would be nice to differentiate between heavyweight exit and |
332 | * sched_out here, since we could avoid the TLB flush for heavyweight | 233 | * sched_out here, since we could avoid the TLB flush for heavyweight |
333 | * exits. */ | 234 | * exits. */ |
334 | _tlbil_all(); | 235 | _tlbil_all(); |
236 | kvmppc_core_vcpu_put(vcpu); | ||
335 | } | 237 | } |
336 | 238 | ||
337 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | 239 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, |
@@ -355,14 +257,14 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | |||
355 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | 257 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, |
356 | struct kvm_run *run) | 258 | struct kvm_run *run) |
357 | { | 259 | { |
358 | u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | 260 | ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; |
359 | *gpr = run->dcr.data; | 261 | *gpr = run->dcr.data; |
360 | } | 262 | } |
361 | 263 | ||
362 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | 264 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
363 | struct kvm_run *run) | 265 | struct kvm_run *run) |
364 | { | 266 | { |
365 | u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | 267 | ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; |
366 | 268 | ||
367 | if (run->mmio.len > sizeof(*gpr)) { | 269 | if (run->mmio.len > sizeof(*gpr)) { |
368 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); | 270 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
@@ -460,7 +362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
460 | vcpu->arch.dcr_needed = 0; | 362 | vcpu->arch.dcr_needed = 0; |
461 | } | 363 | } |
462 | 364 | ||
463 | kvmppc_check_and_deliver_interrupts(vcpu); | 365 | kvmppc_core_deliver_interrupts(vcpu); |
464 | 366 | ||
465 | local_irq_disable(); | 367 | local_irq_disable(); |
466 | kvm_guest_enter(); | 368 | kvm_guest_enter(); |
@@ -478,7 +380,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
478 | 380 | ||
479 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | 381 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
480 | { | 382 | { |
481 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); | 383 | kvmppc_core_queue_external(vcpu, irq); |
482 | 384 | ||
483 | if (waitqueue_active(&vcpu->wq)) { | 385 | if (waitqueue_active(&vcpu->wq)) { |
484 | wake_up_interruptible(&vcpu->wq); | 386 | wake_up_interruptible(&vcpu->wq); |
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c new file mode 100644 index 000000000000..47ee603f558e --- /dev/null +++ b/arch/powerpc/kvm/timing.c | |||
@@ -0,0 +1,239 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | ||
19 | */ | ||
20 | |||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/seq_file.h> | ||
24 | #include <linux/debugfs.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #include <asm/time.h> | ||
28 | #include <asm-generic/div64.h> | ||
29 | |||
30 | #include "timing.h" | ||
31 | |||
32 | void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) | ||
33 | { | ||
34 | int i; | ||
35 | |||
36 | /* pause guest execution to avoid concurrent updates */ | ||
37 | local_irq_disable(); | ||
38 | mutex_lock(&vcpu->mutex); | ||
39 | |||
40 | vcpu->arch.last_exit_type = 0xDEAD; | ||
41 | for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { | ||
42 | vcpu->arch.timing_count_type[i] = 0; | ||
43 | vcpu->arch.timing_max_duration[i] = 0; | ||
44 | vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; | ||
45 | vcpu->arch.timing_sum_duration[i] = 0; | ||
46 | vcpu->arch.timing_sum_quad_duration[i] = 0; | ||
47 | } | ||
48 | vcpu->arch.timing_last_exit = 0; | ||
49 | vcpu->arch.timing_exit.tv64 = 0; | ||
50 | vcpu->arch.timing_last_enter.tv64 = 0; | ||
51 | |||
52 | mutex_unlock(&vcpu->mutex); | ||
53 | local_irq_enable(); | ||
54 | } | ||
55 | |||
56 | static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) | ||
57 | { | ||
58 | u64 old; | ||
59 | |||
60 | do_div(duration, tb_ticks_per_usec); | ||
61 | if (unlikely(duration > 0xFFFFFFFF)) { | ||
62 | printk(KERN_ERR"%s - duration too big -> overflow" | ||
63 | " duration %lld type %d exit #%d\n", | ||
64 | __func__, duration, type, | ||
65 | vcpu->arch.timing_count_type[type]); | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | vcpu->arch.timing_count_type[type]++; | ||
70 | |||
71 | /* sum */ | ||
72 | old = vcpu->arch.timing_sum_duration[type]; | ||
73 | vcpu->arch.timing_sum_duration[type] += duration; | ||
74 | if (unlikely(old > vcpu->arch.timing_sum_duration[type])) { | ||
75 | printk(KERN_ERR"%s - wrap adding sum of durations" | ||
76 | " old %lld new %lld type %d exit # of type %d\n", | ||
77 | __func__, old, vcpu->arch.timing_sum_duration[type], | ||
78 | type, vcpu->arch.timing_count_type[type]); | ||
79 | } | ||
80 | |||
81 | /* square sum */ | ||
82 | old = vcpu->arch.timing_sum_quad_duration[type]; | ||
83 | vcpu->arch.timing_sum_quad_duration[type] += (duration*duration); | ||
84 | if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) { | ||
85 | printk(KERN_ERR"%s - wrap adding sum of squared durations" | ||
86 | " old %lld new %lld type %d exit # of type %d\n", | ||
87 | __func__, old, | ||
88 | vcpu->arch.timing_sum_quad_duration[type], | ||
89 | type, vcpu->arch.timing_count_type[type]); | ||
90 | } | ||
91 | |||
92 | /* set min/max */ | ||
93 | if (unlikely(duration < vcpu->arch.timing_min_duration[type])) | ||
94 | vcpu->arch.timing_min_duration[type] = duration; | ||
95 | if (unlikely(duration > vcpu->arch.timing_max_duration[type])) | ||
96 | vcpu->arch.timing_max_duration[type] = duration; | ||
97 | } | ||
98 | |||
99 | void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) | ||
100 | { | ||
101 | u64 exit = vcpu->arch.timing_last_exit; | ||
102 | u64 enter = vcpu->arch.timing_last_enter.tv64; | ||
103 | |||
104 | /* save exit time, used next exit when the reenter time is known */ | ||
105 | vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; | ||
106 | |||
107 | if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) | ||
108 | return; /* skip incomplete cycle (e.g. after reset) */ | ||
109 | |||
110 | /* update statistics for average and standard deviation */ | ||
111 | add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); | ||
112 | /* enter -> timing_last_exit is time spent in guest - log this too */ | ||
113 | add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), | ||
114 | TIMEINGUEST); | ||
115 | } | ||
116 | |||
117 | static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { | ||
118 | [MMIO_EXITS] = "MMIO", | ||
119 | [DCR_EXITS] = "DCR", | ||
120 | [SIGNAL_EXITS] = "SIGNAL", | ||
121 | [ITLB_REAL_MISS_EXITS] = "ITLBREAL", | ||
122 | [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", | ||
123 | [DTLB_REAL_MISS_EXITS] = "DTLBREAL", | ||
124 | [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT", | ||
125 | [SYSCALL_EXITS] = "SYSCALL", | ||
126 | [ISI_EXITS] = "ISI", | ||
127 | [DSI_EXITS] = "DSI", | ||
128 | [EMULATED_INST_EXITS] = "EMULINST", | ||
129 | [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT", | ||
130 | [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE", | ||
131 | [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR", | ||
132 | [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR", | ||
133 | [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR", | ||
134 | [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR", | ||
135 | [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX", | ||
136 | [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE", | ||
137 | [EMULATED_RFI_EXITS] = "EMUL_RFI", | ||
138 | [DEC_EXITS] = "DEC", | ||
139 | [EXT_INTR_EXITS] = "EXTINT", | ||
140 | [HALT_WAKEUP] = "HALT", | ||
141 | [USR_PR_INST] = "USR_PR_INST", | ||
142 | [FP_UNAVAIL] = "FP_UNAVAIL", | ||
143 | [DEBUG_EXITS] = "DEBUG", | ||
144 | [TIMEINGUEST] = "TIMEINGUEST" | ||
145 | }; | ||
146 | |||
147 | static int kvmppc_exit_timing_show(struct seq_file *m, void *private) | ||
148 | { | ||
149 | struct kvm_vcpu *vcpu = m->private; | ||
150 | int i; | ||
151 | |||
152 | seq_printf(m, "%s", "type count min max sum sum_squared\n"); | ||
153 | |||
154 | for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { | ||
155 | seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n", | ||
156 | kvm_exit_names[i], | ||
157 | vcpu->arch.timing_count_type[i], | ||
158 | vcpu->arch.timing_min_duration[i], | ||
159 | vcpu->arch.timing_max_duration[i], | ||
160 | vcpu->arch.timing_sum_duration[i], | ||
161 | vcpu->arch.timing_sum_quad_duration[i]); | ||
162 | } | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | /* Write 'c' to clear the timing statistics. */ | ||
167 | static ssize_t kvmppc_exit_timing_write(struct file *file, | ||
168 | const char __user *user_buf, | ||
169 | size_t count, loff_t *ppos) | ||
170 | { | ||
171 | int err = -EINVAL; | ||
172 | char c; | ||
173 | |||
174 | if (count > 1) { | ||
175 | goto done; | ||
176 | } | ||
177 | |||
178 | if (get_user(c, user_buf)) { | ||
179 | err = -EFAULT; | ||
180 | goto done; | ||
181 | } | ||
182 | |||
183 | if (c == 'c') { | ||
184 | struct seq_file *seqf = (struct seq_file *)file->private_data; | ||
185 | struct kvm_vcpu *vcpu = seqf->private; | ||
186 | /* Write does not affect our buffers previously generated with | ||
187 | * show. seq_file is locked here to prevent races of init with | ||
188 | * a show call */ | ||
189 | mutex_lock(&seqf->lock); | ||
190 | kvmppc_init_timing_stats(vcpu); | ||
191 | mutex_unlock(&seqf->lock); | ||
192 | err = count; | ||
193 | } | ||
194 | |||
195 | done: | ||
196 | return err; | ||
197 | } | ||
198 | |||
199 | static int kvmppc_exit_timing_open(struct inode *inode, struct file *file) | ||
200 | { | ||
201 | return single_open(file, kvmppc_exit_timing_show, inode->i_private); | ||
202 | } | ||
203 | |||
204 | static struct file_operations kvmppc_exit_timing_fops = { | ||
205 | .owner = THIS_MODULE, | ||
206 | .open = kvmppc_exit_timing_open, | ||
207 | .read = seq_read, | ||
208 | .write = kvmppc_exit_timing_write, | ||
209 | .llseek = seq_lseek, | ||
210 | .release = single_release, | ||
211 | }; | ||
212 | |||
213 | void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id) | ||
214 | { | ||
215 | static char dbg_fname[50]; | ||
216 | struct dentry *debugfs_file; | ||
217 | |||
218 | snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing", | ||
219 | current->pid, id); | ||
220 | debugfs_file = debugfs_create_file(dbg_fname, 0666, | ||
221 | kvm_debugfs_dir, vcpu, | ||
222 | &kvmppc_exit_timing_fops); | ||
223 | |||
224 | if (!debugfs_file) { | ||
225 | printk(KERN_ERR"%s: error creating debugfs file %s\n", | ||
226 | __func__, dbg_fname); | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | vcpu->arch.debugfs_exit_timing = debugfs_file; | ||
231 | } | ||
232 | |||
233 | void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) | ||
234 | { | ||
235 | if (vcpu->arch.debugfs_exit_timing) { | ||
236 | debugfs_remove(vcpu->arch.debugfs_exit_timing); | ||
237 | vcpu->arch.debugfs_exit_timing = NULL; | ||
238 | } | ||
239 | } | ||
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h new file mode 100644 index 000000000000..bb13b1f3cd5a --- /dev/null +++ b/arch/powerpc/kvm/timing.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __POWERPC_KVM_EXITTIMING_H__ | ||
21 | #define __POWERPC_KVM_EXITTIMING_H__ | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | #include <asm/kvm_host.h> | ||
25 | |||
26 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
27 | void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu); | ||
28 | void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu); | ||
29 | void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id); | ||
30 | void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu); | ||
31 | |||
32 | static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) | ||
33 | { | ||
34 | vcpu->arch.last_exit_type = type; | ||
35 | } | ||
36 | |||
37 | #else | ||
38 | /* if exit timing is not configured there is no need to build the c file */ | ||
39 | static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {} | ||
40 | static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {} | ||
41 | static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, | ||
42 | unsigned int id) {} | ||
43 | static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {} | ||
44 | static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {} | ||
45 | #endif /* CONFIG_KVM_EXIT_TIMING */ | ||
46 | |||
47 | /* account the exit in kvm_stats */ | ||
48 | static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) | ||
49 | { | ||
50 | /* type has to be known at build time for optimization */ | ||
51 | BUILD_BUG_ON(__builtin_constant_p(type)); | ||
52 | switch (type) { | ||
53 | case EXT_INTR_EXITS: | ||
54 | vcpu->stat.ext_intr_exits++; | ||
55 | break; | ||
56 | case DEC_EXITS: | ||
57 | vcpu->stat.dec_exits++; | ||
58 | break; | ||
59 | case EMULATED_INST_EXITS: | ||
60 | vcpu->stat.emulated_inst_exits++; | ||
61 | break; | ||
62 | case DCR_EXITS: | ||
63 | vcpu->stat.dcr_exits++; | ||
64 | break; | ||
65 | case DSI_EXITS: | ||
66 | vcpu->stat.dsi_exits++; | ||
67 | break; | ||
68 | case ISI_EXITS: | ||
69 | vcpu->stat.isi_exits++; | ||
70 | break; | ||
71 | case SYSCALL_EXITS: | ||
72 | vcpu->stat.syscall_exits++; | ||
73 | break; | ||
74 | case DTLB_REAL_MISS_EXITS: | ||
75 | vcpu->stat.dtlb_real_miss_exits++; | ||
76 | break; | ||
77 | case DTLB_VIRT_MISS_EXITS: | ||
78 | vcpu->stat.dtlb_virt_miss_exits++; | ||
79 | break; | ||
80 | case MMIO_EXITS: | ||
81 | vcpu->stat.mmio_exits++; | ||
82 | break; | ||
83 | case ITLB_REAL_MISS_EXITS: | ||
84 | vcpu->stat.itlb_real_miss_exits++; | ||
85 | break; | ||
86 | case ITLB_VIRT_MISS_EXITS: | ||
87 | vcpu->stat.itlb_virt_miss_exits++; | ||
88 | break; | ||
89 | case SIGNAL_EXITS: | ||
90 | vcpu->stat.signal_exits++; | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | /* wrapper to set exit time and account for it in kvm_stats */ | ||
96 | static inline void kvmppc_account_exit(struct kvm_vcpu *vcpu, int type) | ||
97 | { | ||
98 | kvmppc_set_exit_type(vcpu, type); | ||
99 | kvmppc_account_exit_stat(vcpu, type); | ||
100 | } | ||
101 | |||
102 | #endif /* __POWERPC_KVM_EXITTIMING_H__ */ | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index f7a69021b7bf..84e058f1e1cc 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -332,7 +332,7 @@ static void xics_eoi_lpar(unsigned int virq) | |||
332 | lpar_xirr_info_set((0xff << 24) | irq); | 332 | lpar_xirr_info_set((0xff << 24) | irq); |
333 | } | 333 | } |
334 | 334 | ||
335 | static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | 335 | static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) |
336 | { | 336 | { |
337 | unsigned int irq; | 337 | unsigned int irq; |
338 | int status; | 338 | int status; |
@@ -870,7 +870,7 @@ void xics_migrate_irqs_away(void) | |||
870 | 870 | ||
871 | /* Reset affinity to all cpus */ | 871 | /* Reset affinity to all cpus */ |
872 | irq_desc[virq].affinity = CPU_MASK_ALL; | 872 | irq_desc[virq].affinity = CPU_MASK_ALL; |
873 | desc->chip->set_affinity(virq, CPU_MASK_ALL); | 873 | desc->chip->set_affinity(virq, cpu_all_mask); |
874 | unlock: | 874 | unlock: |
875 | spin_unlock_irqrestore(&desc->lock, flags); | 875 | spin_unlock_irqrestore(&desc->lock, flags); |
876 | } | 876 | } |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index c82babb70074..3e0d89dcdba2 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -806,7 +806,7 @@ static void mpic_end_ipi(unsigned int irq) | |||
806 | 806 | ||
807 | #endif /* CONFIG_SMP */ | 807 | #endif /* CONFIG_SMP */ |
808 | 808 | ||
809 | void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) | 809 | void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
810 | { | 810 | { |
811 | struct mpic *mpic = mpic_from_irq(irq); | 811 | struct mpic *mpic = mpic_from_irq(irq); |
812 | unsigned int src = mpic_irq_to_hw(irq); | 812 | unsigned int src = mpic_irq_to_hw(irq); |
@@ -818,7 +818,7 @@ void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
818 | } else { | 818 | } else { |
819 | cpumask_t tmp; | 819 | cpumask_t tmp; |
820 | 820 | ||
821 | cpus_and(tmp, cpumask, cpu_online_map); | 821 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
822 | 822 | ||
823 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), | 823 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), |
824 | mpic_physmask(cpus_addr(tmp)[0])); | 824 | mpic_physmask(cpus_addr(tmp)[0])); |
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h index 6209c62a426d..3cef2af10f42 100644 --- a/arch/powerpc/sysdev/mpic.h +++ b/arch/powerpc/sysdev/mpic.h | |||
@@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic) | |||
36 | 36 | ||
37 | extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); | 37 | extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); |
38 | extern void mpic_set_vector(unsigned int virq, unsigned int vector); | 38 | extern void mpic_set_vector(unsigned int virq, unsigned int vector); |
39 | extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask); | 39 | extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
40 | 40 | ||
41 | #endif /* _POWERPC_SYSDEV_MPIC_H */ | 41 | #endif /* _POWERPC_SYSDEV_MPIC_H */ |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8152fefc97b9..19577aeffd7b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -83,6 +83,7 @@ config S390 | |||
83 | select HAVE_KRETPROBES | 83 | select HAVE_KRETPROBES |
84 | select HAVE_KVM if 64BIT | 84 | select HAVE_KVM if 64BIT |
85 | select HAVE_ARCH_TRACEHOOK | 85 | select HAVE_ARCH_TRACEHOOK |
86 | select INIT_ALL_POSSIBLE | ||
86 | 87 | ||
87 | source "init/Kconfig" | 88 | source "init/Kconfig" |
88 | 89 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 6fc78541dc57..3ed5c7a83c6c 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -55,12 +55,6 @@ | |||
55 | struct _lowcore *lowcore_ptr[NR_CPUS]; | 55 | struct _lowcore *lowcore_ptr[NR_CPUS]; |
56 | EXPORT_SYMBOL(lowcore_ptr); | 56 | EXPORT_SYMBOL(lowcore_ptr); |
57 | 57 | ||
58 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
59 | EXPORT_SYMBOL(cpu_online_map); | ||
60 | |||
61 | cpumask_t cpu_possible_map = CPU_MASK_ALL; | ||
62 | EXPORT_SYMBOL(cpu_possible_map); | ||
63 | |||
64 | static struct task_struct *current_set[NR_CPUS]; | 58 | static struct task_struct *current_set[NR_CPUS]; |
65 | 59 | ||
66 | static u8 smp_cpu_type; | 60 | static u8 smp_cpu_type; |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 5be981a36c3e..d649600df5b9 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -160,7 +160,7 @@ void init_cpu_timer(void) | |||
160 | cd->min_delta_ns = 1; | 160 | cd->min_delta_ns = 1; |
161 | cd->max_delta_ns = LONG_MAX; | 161 | cd->max_delta_ns = LONG_MAX; |
162 | cd->rating = 400; | 162 | cd->rating = 400; |
163 | cd->cpumask = cpumask_of_cpu(cpu); | 163 | cd->cpumask = cpumask_of(cpu); |
164 | cd->set_next_event = s390_next_event; | 164 | cd->set_next_event = s390_next_event; |
165 | cd->set_mode = s390_set_mode; | 165 | cd->set_mode = s390_set_mode; |
166 | 166 | ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8b00eb2ddf57..be8497186b96 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -113,8 +113,6 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
113 | int kvm_dev_ioctl_check_extension(long ext) | 113 | int kvm_dev_ioctl_check_extension(long ext) |
114 | { | 114 | { |
115 | switch (ext) { | 115 | switch (ext) { |
116 | case KVM_CAP_USER_MEMORY: | ||
117 | return 1; | ||
118 | default: | 116 | default: |
119 | return 0; | 117 | return 0; |
120 | } | 118 | } |
@@ -185,8 +183,6 @@ struct kvm *kvm_arch_create_vm(void) | |||
185 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); | 183 | debug_register_view(kvm->arch.dbf, &debug_sprintf_view); |
186 | VM_EVENT(kvm, 3, "%s", "vm created"); | 184 | VM_EVENT(kvm, 3, "%s", "vm created"); |
187 | 185 | ||
188 | try_module_get(THIS_MODULE); | ||
189 | |||
190 | return kvm; | 186 | return kvm; |
191 | out_nodbf: | 187 | out_nodbf: |
192 | free_page((unsigned long)(kvm->arch.sca)); | 188 | free_page((unsigned long)(kvm->arch.sca)); |
@@ -196,13 +192,33 @@ out_nokvm: | |||
196 | return ERR_PTR(rc); | 192 | return ERR_PTR(rc); |
197 | } | 193 | } |
198 | 194 | ||
195 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
196 | { | ||
197 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); | ||
198 | free_page((unsigned long)(vcpu->arch.sie_block)); | ||
199 | kvm_vcpu_uninit(vcpu); | ||
200 | kfree(vcpu); | ||
201 | } | ||
202 | |||
203 | static void kvm_free_vcpus(struct kvm *kvm) | ||
204 | { | ||
205 | unsigned int i; | ||
206 | |||
207 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
208 | if (kvm->vcpus[i]) { | ||
209 | kvm_arch_vcpu_destroy(kvm->vcpus[i]); | ||
210 | kvm->vcpus[i] = NULL; | ||
211 | } | ||
212 | } | ||
213 | } | ||
214 | |||
199 | void kvm_arch_destroy_vm(struct kvm *kvm) | 215 | void kvm_arch_destroy_vm(struct kvm *kvm) |
200 | { | 216 | { |
201 | debug_unregister(kvm->arch.dbf); | 217 | kvm_free_vcpus(kvm); |
202 | kvm_free_physmem(kvm); | 218 | kvm_free_physmem(kvm); |
203 | free_page((unsigned long)(kvm->arch.sca)); | 219 | free_page((unsigned long)(kvm->arch.sca)); |
220 | debug_unregister(kvm->arch.dbf); | ||
204 | kfree(kvm); | 221 | kfree(kvm); |
205 | module_put(THIS_MODULE); | ||
206 | } | 222 | } |
207 | 223 | ||
208 | /* Section: vcpu related */ | 224 | /* Section: vcpu related */ |
@@ -213,8 +229,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
213 | 229 | ||
214 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 230 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
215 | { | 231 | { |
216 | /* kvm common code refers to this, but does'nt call it */ | 232 | /* Nothing todo */ |
217 | BUG(); | ||
218 | } | 233 | } |
219 | 234 | ||
220 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 235 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
@@ -308,8 +323,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
308 | VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, | 323 | VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, |
309 | vcpu->arch.sie_block); | 324 | vcpu->arch.sie_block); |
310 | 325 | ||
311 | try_module_get(THIS_MODULE); | ||
312 | |||
313 | return vcpu; | 326 | return vcpu; |
314 | out_free_cpu: | 327 | out_free_cpu: |
315 | kfree(vcpu); | 328 | kfree(vcpu); |
@@ -317,14 +330,6 @@ out_nomem: | |||
317 | return ERR_PTR(rc); | 330 | return ERR_PTR(rc); |
318 | } | 331 | } |
319 | 332 | ||
320 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
321 | { | ||
322 | VCPU_EVENT(vcpu, 3, "%s", "destroy cpu"); | ||
323 | free_page((unsigned long)(vcpu->arch.sie_block)); | ||
324 | kfree(vcpu); | ||
325 | module_put(THIS_MODULE); | ||
326 | } | ||
327 | |||
328 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 333 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
329 | { | 334 | { |
330 | /* kvm common code refers to this, but never calls it */ | 335 | /* kvm common code refers to this, but never calls it */ |
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h index 85b660c17eb0..c24e9c6a1736 100644 --- a/arch/sh/include/asm/smp.h +++ b/arch/sh/include/asm/smp.h | |||
@@ -31,7 +31,7 @@ enum { | |||
31 | }; | 31 | }; |
32 | 32 | ||
33 | void smp_message_recv(unsigned int msg); | 33 | void smp_message_recv(unsigned int msg); |
34 | void smp_timer_broadcast(cpumask_t mask); | 34 | void smp_timer_broadcast(const struct cpumask *mask); |
35 | 35 | ||
36 | void local_timer_interrupt(void); | 36 | void local_timer_interrupt(void); |
37 | void local_timer_setup(unsigned int cpu); | 37 | void local_timer_setup(unsigned int cpu); |
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index 95f0085e098a..279d9cc4a007 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h | |||
@@ -5,7 +5,6 @@ | |||
5 | 5 | ||
6 | /* sched_domains SD_NODE_INIT for sh machines */ | 6 | /* sched_domains SD_NODE_INIT for sh machines */ |
7 | #define SD_NODE_INIT (struct sched_domain) { \ | 7 | #define SD_NODE_INIT (struct sched_domain) { \ |
8 | .span = CPU_MASK_NONE, \ | ||
9 | .parent = NULL, \ | 8 | .parent = NULL, \ |
10 | .child = NULL, \ | 9 | .child = NULL, \ |
11 | .groups = NULL, \ | 10 | .groups = NULL, \ |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 3c5ad1660bbc..8f4027412614 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -31,12 +31,6 @@ | |||
31 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ | 31 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ |
32 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ | 32 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ |
33 | 33 | ||
34 | cpumask_t cpu_possible_map; | ||
35 | EXPORT_SYMBOL(cpu_possible_map); | ||
36 | |||
37 | cpumask_t cpu_online_map; | ||
38 | EXPORT_SYMBOL(cpu_online_map); | ||
39 | |||
40 | static inline void __init smp_store_cpu_info(unsigned int cpu) | 34 | static inline void __init smp_store_cpu_info(unsigned int cpu) |
41 | { | 35 | { |
42 | struct sh_cpuinfo *c = cpu_data + cpu; | 36 | struct sh_cpuinfo *c = cpu_data + cpu; |
@@ -190,11 +184,11 @@ void arch_send_call_function_single_ipi(int cpu) | |||
190 | plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); | 184 | plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); |
191 | } | 185 | } |
192 | 186 | ||
193 | void smp_timer_broadcast(cpumask_t mask) | 187 | void smp_timer_broadcast(const struct cpumask *mask) |
194 | { | 188 | { |
195 | int cpu; | 189 | int cpu; |
196 | 190 | ||
197 | for_each_cpu_mask(cpu, mask) | 191 | for_each_cpu(cpu, mask) |
198 | plat_send_ipi(cpu, SMP_MSG_TIMER); | 192 | plat_send_ipi(cpu, SMP_MSG_TIMER); |
199 | } | 193 | } |
200 | 194 | ||
diff --git a/arch/sh/kernel/timers/timer-broadcast.c b/arch/sh/kernel/timers/timer-broadcast.c index c2317635230f..96e8eaea1e62 100644 --- a/arch/sh/kernel/timers/timer-broadcast.c +++ b/arch/sh/kernel/timers/timer-broadcast.c | |||
@@ -51,7 +51,7 @@ void __cpuinit local_timer_setup(unsigned int cpu) | |||
51 | clk->mult = 1; | 51 | clk->mult = 1; |
52 | clk->set_mode = dummy_timer_set_mode; | 52 | clk->set_mode = dummy_timer_set_mode; |
53 | clk->broadcast = smp_timer_broadcast; | 53 | clk->broadcast = smp_timer_broadcast; |
54 | clk->cpumask = cpumask_of_cpu(cpu); | 54 | clk->cpumask = cpumask_of(cpu); |
55 | 55 | ||
56 | clockevents_register_device(clk); | 56 | clockevents_register_device(clk); |
57 | } | 57 | } |
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index 3c61ddd4d43e..0db3f9510336 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c | |||
@@ -263,7 +263,7 @@ static int tmu_timer_init(void) | |||
263 | tmu0_clockevent.min_delta_ns = | 263 | tmu0_clockevent.min_delta_ns = |
264 | clockevent_delta2ns(1, &tmu0_clockevent); | 264 | clockevent_delta2ns(1, &tmu0_clockevent); |
265 | 265 | ||
266 | tmu0_clockevent.cpumask = cpumask_of_cpu(0); | 266 | tmu0_clockevent.cpumask = cpumask_of(0); |
267 | 267 | ||
268 | clockevents_register_device(&tmu0_clockevent); | 268 | clockevents_register_device(&tmu0_clockevent); |
269 | 269 | ||
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h index a8180e546a48..8408d9d2a662 100644 --- a/arch/sparc/include/asm/smp_32.h +++ b/arch/sparc/include/asm/smp_32.h | |||
@@ -29,8 +29,6 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | extern unsigned char boot_cpu_id; | 31 | extern unsigned char boot_cpu_id; |
32 | extern cpumask_t phys_cpu_present_map; | ||
33 | #define cpu_possible_map phys_cpu_present_map | ||
34 | 32 | ||
35 | typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, | 33 | typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long, |
36 | unsigned long, unsigned long); | 34 | unsigned long, unsigned long); |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index a3ea2bcb95de..cab8e0286871 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -312,7 +312,8 @@ static void sun4u_irq_enable(unsigned int virt_irq) | |||
312 | } | 312 | } |
313 | } | 313 | } |
314 | 314 | ||
315 | static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask) | 315 | static void sun4u_set_affinity(unsigned int virt_irq, |
316 | const struct cpumask *mask) | ||
316 | { | 317 | { |
317 | sun4u_irq_enable(virt_irq); | 318 | sun4u_irq_enable(virt_irq); |
318 | } | 319 | } |
@@ -362,7 +363,8 @@ static void sun4v_irq_enable(unsigned int virt_irq) | |||
362 | ino, err); | 363 | ino, err); |
363 | } | 364 | } |
364 | 365 | ||
365 | static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) | 366 | static void sun4v_set_affinity(unsigned int virt_irq, |
367 | const struct cpumask *mask) | ||
366 | { | 368 | { |
367 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; | 369 | unsigned int ino = virt_irq_table[virt_irq].dev_ino; |
368 | unsigned long cpuid = irq_choose_cpu(virt_irq); | 370 | unsigned long cpuid = irq_choose_cpu(virt_irq); |
@@ -429,7 +431,8 @@ static void sun4v_virq_enable(unsigned int virt_irq) | |||
429 | dev_handle, dev_ino, err); | 431 | dev_handle, dev_ino, err); |
430 | } | 432 | } |
431 | 433 | ||
432 | static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) | 434 | static void sun4v_virt_set_affinity(unsigned int virt_irq, |
435 | const struct cpumask *mask) | ||
433 | { | 436 | { |
434 | unsigned long cpuid, dev_handle, dev_ino; | 437 | unsigned long cpuid, dev_handle, dev_ino; |
435 | int err; | 438 | int err; |
@@ -851,7 +854,7 @@ void fixup_irqs(void) | |||
851 | !(irq_desc[irq].status & IRQ_PER_CPU)) { | 854 | !(irq_desc[irq].status & IRQ_PER_CPU)) { |
852 | if (irq_desc[irq].chip->set_affinity) | 855 | if (irq_desc[irq].chip->set_affinity) |
853 | irq_desc[irq].chip->set_affinity(irq, | 856 | irq_desc[irq].chip->set_affinity(irq, |
854 | irq_desc[irq].affinity); | 857 | &irq_desc[irq].affinity); |
855 | } | 858 | } |
856 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); | 859 | spin_unlock_irqrestore(&irq_desc[irq].lock, flags); |
857 | } | 860 | } |
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 46e231f7c5ce..322046cdf85f 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c | |||
@@ -780,7 +780,7 @@ out: | |||
780 | if (nid != -1) { | 780 | if (nid != -1) { |
781 | cpumask_t numa_mask = node_to_cpumask(nid); | 781 | cpumask_t numa_mask = node_to_cpumask(nid); |
782 | 782 | ||
783 | irq_set_affinity(irq, numa_mask); | 783 | irq_set_affinity(irq, &numa_mask); |
784 | } | 784 | } |
785 | 785 | ||
786 | return irq; | 786 | return irq; |
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index 2e680f34f727..0d0cd815e83e 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c | |||
@@ -288,7 +288,7 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm, | |||
288 | if (nid != -1) { | 288 | if (nid != -1) { |
289 | cpumask_t numa_mask = node_to_cpumask(nid); | 289 | cpumask_t numa_mask = node_to_cpumask(nid); |
290 | 290 | ||
291 | irq_set_affinity(irq, numa_mask); | 291 | irq_set_affinity(irq, &numa_mask); |
292 | } | 292 | } |
293 | err = request_irq(irq, sparc64_msiq_interrupt, 0, | 293 | err = request_irq(irq, sparc64_msiq_interrupt, 0, |
294 | "MSIQ", | 294 | "MSIQ", |
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index e396c1f17a92..1e5ac4e282e1 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c | |||
@@ -39,8 +39,6 @@ volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; | |||
39 | unsigned char boot_cpu_id = 0; | 39 | unsigned char boot_cpu_id = 0; |
40 | unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ | 40 | unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ |
41 | 41 | ||
42 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
43 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | ||
44 | cpumask_t smp_commenced_mask = CPU_MASK_NONE; | 42 | cpumask_t smp_commenced_mask = CPU_MASK_NONE; |
45 | 43 | ||
46 | /* The only guaranteed locking primitive available on all Sparc | 44 | /* The only guaranteed locking primitive available on all Sparc |
@@ -334,7 +332,7 @@ void __init smp_setup_cpu_possible_map(void) | |||
334 | instance = 0; | 332 | instance = 0; |
335 | while (!cpu_find_by_instance(instance, NULL, &mid)) { | 333 | while (!cpu_find_by_instance(instance, NULL, &mid)) { |
336 | if (mid < NR_CPUS) { | 334 | if (mid < NR_CPUS) { |
337 | cpu_set(mid, phys_cpu_present_map); | 335 | cpu_set(mid, cpu_possible_map); |
338 | cpu_set(mid, cpu_present_map); | 336 | cpu_set(mid, cpu_present_map); |
339 | } | 337 | } |
340 | instance++; | 338 | instance++; |
@@ -354,7 +352,7 @@ void __init smp_prepare_boot_cpu(void) | |||
354 | 352 | ||
355 | current_thread_info()->cpu = cpuid; | 353 | current_thread_info()->cpu = cpuid; |
356 | cpu_set(cpuid, cpu_online_map); | 354 | cpu_set(cpuid, cpu_online_map); |
357 | cpu_set(cpuid, phys_cpu_present_map); | 355 | cpu_set(cpuid, cpu_possible_map); |
358 | } | 356 | } |
359 | 357 | ||
360 | int __cpuinit __cpu_up(unsigned int cpu) | 358 | int __cpuinit __cpu_up(unsigned int cpu) |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index bfe99d82d458..46329799f346 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -49,14 +49,10 @@ | |||
49 | 49 | ||
50 | int sparc64_multi_core __read_mostly; | 50 | int sparc64_multi_core __read_mostly; |
51 | 51 | ||
52 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; | ||
53 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; | ||
54 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; | 52 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
55 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly = | 53 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly = |
56 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | 54 | { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; |
57 | 55 | ||
58 | EXPORT_SYMBOL(cpu_possible_map); | ||
59 | EXPORT_SYMBOL(cpu_online_map); | ||
60 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 56 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
61 | EXPORT_SYMBOL(cpu_core_map); | 57 | EXPORT_SYMBOL(cpu_core_map); |
62 | 58 | ||
diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c index a4d45fc29b21..e1e97639231b 100644 --- a/arch/sparc/kernel/sparc_ksyms_32.c +++ b/arch/sparc/kernel/sparc_ksyms_32.c | |||
@@ -112,10 +112,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data); | |||
112 | #ifdef CONFIG_SMP | 112 | #ifdef CONFIG_SMP |
113 | /* IRQ implementation. */ | 113 | /* IRQ implementation. */ |
114 | EXPORT_SYMBOL(synchronize_irq); | 114 | EXPORT_SYMBOL(synchronize_irq); |
115 | |||
116 | /* CPU online map and active count. */ | ||
117 | EXPORT_SYMBOL(cpu_online_map); | ||
118 | EXPORT_SYMBOL(phys_cpu_present_map); | ||
119 | #endif | 115 | #endif |
120 | 116 | ||
121 | EXPORT_SYMBOL(__udelay); | 117 | EXPORT_SYMBOL(__udelay); |
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 141da3759091..9df8f095a8b1 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c | |||
@@ -763,7 +763,7 @@ void __devinit setup_sparc64_timer(void) | |||
763 | sevt = &__get_cpu_var(sparc64_events); | 763 | sevt = &__get_cpu_var(sparc64_events); |
764 | 764 | ||
765 | memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); | 765 | memcpy(sevt, &sparc64_clockevent, sizeof(*sevt)); |
766 | sevt->cpumask = cpumask_of_cpu(smp_processor_id()); | 766 | sevt->cpumask = cpumask_of(smp_processor_id()); |
767 | 767 | ||
768 | clockevents_register_device(sevt); | 768 | clockevents_register_device(sevt); |
769 | } | 769 | } |
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index 045772142844..98351c78bc81 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c | |||
@@ -25,13 +25,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |||
25 | #include "irq_user.h" | 25 | #include "irq_user.h" |
26 | #include "os.h" | 26 | #include "os.h" |
27 | 27 | ||
28 | /* CPU online map, set by smp_boot_cpus */ | ||
29 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
30 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
31 | |||
32 | EXPORT_SYMBOL(cpu_online_map); | ||
33 | EXPORT_SYMBOL(cpu_possible_map); | ||
34 | |||
35 | /* Per CPU bogomips and other parameters | 28 | /* Per CPU bogomips and other parameters |
36 | * The only piece used here is the ipi pipe, which is set before SMP is | 29 | * The only piece used here is the ipi pipe, which is set before SMP is |
37 | * started and never changed. | 30 | * started and never changed. |
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 47f04f4a3464..b13a87a3ec95 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c | |||
@@ -50,7 +50,7 @@ static int itimer_next_event(unsigned long delta, | |||
50 | static struct clock_event_device itimer_clockevent = { | 50 | static struct clock_event_device itimer_clockevent = { |
51 | .name = "itimer", | 51 | .name = "itimer", |
52 | .rating = 250, | 52 | .rating = 250, |
53 | .cpumask = CPU_MASK_ALL, | 53 | .cpumask = cpu_all_mask, |
54 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 54 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
55 | .set_mode = itimer_set_mode, | 55 | .set_mode = itimer_set_mode, |
56 | .set_next_event = itimer_next_event, | 56 | .set_next_event = itimer_next_event, |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0f44add3e0b7..249d1e0824b5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -601,19 +601,20 @@ config IOMMU_HELPER | |||
601 | 601 | ||
602 | config MAXSMP | 602 | config MAXSMP |
603 | bool "Configure Maximum number of SMP Processors and NUMA Nodes" | 603 | bool "Configure Maximum number of SMP Processors and NUMA Nodes" |
604 | depends on X86_64 && SMP && BROKEN | 604 | depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL |
605 | select CPUMASK_OFFSTACK | ||
605 | default n | 606 | default n |
606 | help | 607 | help |
607 | Configure maximum number of CPUS and NUMA Nodes for this architecture. | 608 | Configure maximum number of CPUS and NUMA Nodes for this architecture. |
608 | If unsure, say N. | 609 | If unsure, say N. |
609 | 610 | ||
610 | config NR_CPUS | 611 | config NR_CPUS |
611 | int "Maximum number of CPUs (2-512)" if !MAXSMP | 612 | int "Maximum number of CPUs" if SMP && !MAXSMP |
612 | range 2 512 | 613 | range 2 512 if SMP && !MAXSMP |
613 | depends on SMP | 614 | default "1" if !SMP |
614 | default "4096" if MAXSMP | 615 | default "4096" if MAXSMP |
615 | default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 | 616 | default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000) |
616 | default "8" | 617 | default "8" if SMP |
617 | help | 618 | help |
618 | This allows you to specify the maximum number of CPUs which this | 619 | This allows you to specify the maximum number of CPUs which this |
619 | kernel will support. The maximum supported value is 512 and the | 620 | kernel will support. The maximum supported value is 512 and the |
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index b195f85526e3..9dabd00e9805 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c | |||
@@ -24,15 +24,14 @@ | |||
24 | #include <asm/ucontext.h> | 24 | #include <asm/ucontext.h> |
25 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
26 | #include <asm/i387.h> | 26 | #include <asm/i387.h> |
27 | #include <asm/ia32.h> | ||
28 | #include <asm/ptrace.h> | 27 | #include <asm/ptrace.h> |
29 | #include <asm/ia32_unistd.h> | 28 | #include <asm/ia32_unistd.h> |
30 | #include <asm/user32.h> | 29 | #include <asm/user32.h> |
31 | #include <asm/sigcontext32.h> | 30 | #include <asm/sigcontext32.h> |
32 | #include <asm/proto.h> | 31 | #include <asm/proto.h> |
33 | #include <asm/vdso.h> | 32 | #include <asm/vdso.h> |
34 | |||
35 | #include <asm/sigframe.h> | 33 | #include <asm/sigframe.h> |
34 | #include <asm/sys_ia32.h> | ||
36 | 35 | ||
37 | #define DEBUG_SIG 0 | 36 | #define DEBUG_SIG 0 |
38 | 37 | ||
diff --git a/arch/x86/ia32/ipc32.c b/arch/x86/ia32/ipc32.c index d21991ce606c..29cdcd02ead3 100644 --- a/arch/x86/ia32/ipc32.c +++ b/arch/x86/ia32/ipc32.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/shm.h> | 8 | #include <linux/shm.h> |
9 | #include <linux/ipc.h> | 9 | #include <linux/ipc.h> |
10 | #include <linux/compat.h> | 10 | #include <linux/compat.h> |
11 | #include <asm/sys_ia32.h> | ||
11 | 12 | ||
12 | asmlinkage long sys32_ipc(u32 call, int first, int second, int third, | 13 | asmlinkage long sys32_ipc(u32 call, int first, int second, int third, |
13 | compat_uptr_t ptr, u32 fifth) | 14 | compat_uptr_t ptr, u32 fifth) |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 2e09dcd3c0a6..6c0d7f6231af 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -44,8 +44,8 @@ | |||
44 | #include <asm/types.h> | 44 | #include <asm/types.h> |
45 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
46 | #include <asm/atomic.h> | 46 | #include <asm/atomic.h> |
47 | #include <asm/ia32.h> | ||
48 | #include <asm/vgtod.h> | 47 | #include <asm/vgtod.h> |
48 | #include <asm/sys_ia32.h> | ||
49 | 49 | ||
50 | #define AA(__x) ((unsigned long)(__x)) | 50 | #define AA(__x) ((unsigned long)(__x)) |
51 | 51 | ||
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 25caa0738af5..ab1d51a8855e 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -54,7 +54,6 @@ extern int disable_apic; | |||
54 | extern int is_vsmp_box(void); | 54 | extern int is_vsmp_box(void); |
55 | extern void xapic_wait_icr_idle(void); | 55 | extern void xapic_wait_icr_idle(void); |
56 | extern u32 safe_xapic_wait_icr_idle(void); | 56 | extern u32 safe_xapic_wait_icr_idle(void); |
57 | extern u64 xapic_icr_read(void); | ||
58 | extern void xapic_icr_write(u32, u32); | 57 | extern void xapic_icr_write(u32, u32); |
59 | extern int setup_profiling_timer(unsigned int); | 58 | extern int setup_profiling_timer(unsigned int); |
60 | 59 | ||
@@ -93,7 +92,7 @@ static inline u32 native_apic_msr_read(u32 reg) | |||
93 | } | 92 | } |
94 | 93 | ||
95 | #ifndef CONFIG_X86_32 | 94 | #ifndef CONFIG_X86_32 |
96 | extern int x2apic, x2apic_preenabled; | 95 | extern int x2apic; |
97 | extern void check_x2apic(void); | 96 | extern void check_x2apic(void); |
98 | extern void enable_x2apic(void); | 97 | extern void enable_x2apic(void); |
99 | extern void enable_IR_x2apic(void); | 98 | extern void enable_IR_x2apic(void); |
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h index ce547f24a1cd..d8dd9f537911 100644 --- a/arch/x86/include/asm/bigsmp/apic.h +++ b/arch/x86/include/asm/bigsmp/apic.h | |||
@@ -9,12 +9,12 @@ static inline int apic_id_registered(void) | |||
9 | return (1); | 9 | return (1); |
10 | } | 10 | } |
11 | 11 | ||
12 | static inline cpumask_t target_cpus(void) | 12 | static inline const cpumask_t *target_cpus(void) |
13 | { | 13 | { |
14 | #ifdef CONFIG_SMP | 14 | #ifdef CONFIG_SMP |
15 | return cpu_online_map; | 15 | return &cpu_online_map; |
16 | #else | 16 | #else |
17 | return cpumask_of_cpu(0); | 17 | return &cpumask_of_cpu(0); |
18 | #endif | 18 | #endif |
19 | } | 19 | } |
20 | 20 | ||
@@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid) | |||
79 | 79 | ||
80 | static inline int cpu_present_to_apicid(int mps_cpu) | 80 | static inline int cpu_present_to_apicid(int mps_cpu) |
81 | { | 81 | { |
82 | if (mps_cpu < NR_CPUS) | 82 | if (mps_cpu < nr_cpu_ids) |
83 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); | 83 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
84 | 84 | ||
85 | return BAD_APICID; | 85 | return BAD_APICID; |
@@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[]; | |||
94 | /* Mapping from cpu number to logical apicid */ | 94 | /* Mapping from cpu number to logical apicid */ |
95 | static inline int cpu_to_logical_apicid(int cpu) | 95 | static inline int cpu_to_logical_apicid(int cpu) |
96 | { | 96 | { |
97 | if (cpu >= NR_CPUS) | 97 | if (cpu >= nr_cpu_ids) |
98 | return BAD_APICID; | 98 | return BAD_APICID; |
99 | return cpu_physical_id(cpu); | 99 | return cpu_physical_id(cpu); |
100 | } | 100 | } |
@@ -119,16 +119,34 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) | |||
119 | } | 119 | } |
120 | 120 | ||
121 | /* As we are using single CPU as destination, pick only one CPU here */ | 121 | /* As we are using single CPU as destination, pick only one CPU here */ |
122 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 122 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
123 | { | 123 | { |
124 | int cpu; | 124 | int cpu; |
125 | int apicid; | 125 | int apicid; |
126 | 126 | ||
127 | cpu = first_cpu(cpumask); | 127 | cpu = first_cpu(*cpumask); |
128 | apicid = cpu_to_logical_apicid(cpu); | 128 | apicid = cpu_to_logical_apicid(cpu); |
129 | return apicid; | 129 | return apicid; |
130 | } | 130 | } |
131 | 131 | ||
132 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
133 | const struct cpumask *andmask) | ||
134 | { | ||
135 | int cpu; | ||
136 | |||
137 | /* | ||
138 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
139 | * May as well be the first. | ||
140 | */ | ||
141 | for_each_cpu_and(cpu, cpumask, andmask) | ||
142 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
143 | break; | ||
144 | if (cpu < nr_cpu_ids) | ||
145 | return cpu_to_logical_apicid(cpu); | ||
146 | |||
147 | return BAD_APICID; | ||
148 | } | ||
149 | |||
132 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | 150 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) |
133 | { | 151 | { |
134 | return cpuid_apic >> index_msb; | 152 | return cpuid_apic >> index_msb; |
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h index 9404c535b7ec..27fcd01b3ae6 100644 --- a/arch/x86/include/asm/bigsmp/ipi.h +++ b/arch/x86/include/asm/bigsmp/ipi.h | |||
@@ -1,25 +1,22 @@ | |||
1 | #ifndef __ASM_MACH_IPI_H | 1 | #ifndef __ASM_MACH_IPI_H |
2 | #define __ASM_MACH_IPI_H | 2 | #define __ASM_MACH_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector); |
5 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
10 | 11 | ||
11 | static inline void send_IPI_allbutself(int vector) | 12 | static inline void send_IPI_allbutself(int vector) |
12 | { | 13 | { |
13 | cpumask_t mask = cpu_online_map; | 14 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | |||
16 | if (!cpus_empty(mask)) | ||
17 | send_IPI_mask(mask, vector); | ||
18 | } | 15 | } |
19 | 16 | ||
20 | static inline void send_IPI_all(int vector) | 17 | static inline void send_IPI_all(int vector) |
21 | { | 18 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 19 | send_IPI_mask(cpu_online_mask, vector); |
23 | } | 20 | } |
24 | 21 | ||
25 | #endif /* __ASM_MACH_IPI_H */ | 22 | #endif /* __ASM_MACH_IPI_H */ |
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index e6b82b17b072..dc27705f5443 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -320,16 +320,14 @@ static inline void set_intr_gate(unsigned int n, void *addr) | |||
320 | _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); | 320 | _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); |
321 | } | 321 | } |
322 | 322 | ||
323 | #define SYS_VECTOR_FREE 0 | ||
324 | #define SYS_VECTOR_ALLOCED 1 | ||
325 | |||
326 | extern int first_system_vector; | 323 | extern int first_system_vector; |
327 | extern char system_vectors[]; | 324 | /* used_vectors is BITMAP for irq is not managed by percpu vector_irq */ |
325 | extern unsigned long used_vectors[]; | ||
328 | 326 | ||
329 | static inline void alloc_system_vector(int vector) | 327 | static inline void alloc_system_vector(int vector) |
330 | { | 328 | { |
331 | if (system_vectors[vector] == SYS_VECTOR_FREE) { | 329 | if (!test_bit(vector, used_vectors)) { |
332 | system_vectors[vector] = SYS_VECTOR_ALLOCED; | 330 | set_bit(vector, used_vectors); |
333 | if (first_system_vector > vector) | 331 | if (first_system_vector > vector) |
334 | first_system_vector = vector; | 332 | first_system_vector = vector; |
335 | } else | 333 | } else |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index a2e545c91c35..ca5ffb2856b6 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -90,6 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size); | |||
90 | 90 | ||
91 | #endif /* CONFIG_X86_32 */ | 91 | #endif /* CONFIG_X86_32 */ |
92 | 92 | ||
93 | extern int add_efi_memmap; | ||
93 | extern void efi_reserve_early(void); | 94 | extern void efi_reserve_early(void); |
94 | extern void efi_call_phys_prelog(void); | 95 | extern void efi_call_phys_prelog(void); |
95 | extern void efi_call_phys_epilog(void); | 96 | extern void efi_call_phys_epilog(void); |
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h index e24ef876915f..51ac1230294e 100644 --- a/arch/x86/include/asm/es7000/apic.h +++ b/arch/x86/include/asm/es7000/apic.h | |||
@@ -9,14 +9,14 @@ static inline int apic_id_registered(void) | |||
9 | return (1); | 9 | return (1); |
10 | } | 10 | } |
11 | 11 | ||
12 | static inline cpumask_t target_cpus_cluster(void) | 12 | static inline const cpumask_t *target_cpus_cluster(void) |
13 | { | 13 | { |
14 | return CPU_MASK_ALL; | 14 | return &CPU_MASK_ALL; |
15 | } | 15 | } |
16 | 16 | ||
17 | static inline cpumask_t target_cpus(void) | 17 | static inline const cpumask_t *target_cpus(void) |
18 | { | 18 | { |
19 | return cpumask_of_cpu(smp_processor_id()); | 19 | return &cpumask_of_cpu(smp_processor_id()); |
20 | } | 20 | } |
21 | 21 | ||
22 | #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) | 22 | #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) |
@@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS]; | |||
80 | static inline void setup_apic_routing(void) | 80 | static inline void setup_apic_routing(void) |
81 | { | 81 | { |
82 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); | 82 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); |
83 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | 83 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
84 | (apic_version[apic] == 0x14) ? | 84 | (apic_version[apic] == 0x14) ? |
85 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); | 85 | "Physical Cluster" : "Logical Cluster", |
86 | nr_ioapics, cpus_addr(*target_cpus())[0]); | ||
86 | } | 87 | } |
87 | 88 | ||
88 | static inline int multi_timer_check(int apic, int irq) | 89 | static inline int multi_timer_check(int apic, int irq) |
@@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) | |||
100 | { | 101 | { |
101 | if (!mps_cpu) | 102 | if (!mps_cpu) |
102 | return boot_cpu_physical_apicid; | 103 | return boot_cpu_physical_apicid; |
103 | else if (mps_cpu < NR_CPUS) | 104 | else if (mps_cpu < nr_cpu_ids) |
104 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); | 105 | return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); |
105 | else | 106 | else |
106 | return BAD_APICID; | 107 | return BAD_APICID; |
@@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[]; | |||
120 | static inline int cpu_to_logical_apicid(int cpu) | 121 | static inline int cpu_to_logical_apicid(int cpu) |
121 | { | 122 | { |
122 | #ifdef CONFIG_SMP | 123 | #ifdef CONFIG_SMP |
123 | if (cpu >= NR_CPUS) | 124 | if (cpu >= nr_cpu_ids) |
124 | return BAD_APICID; | 125 | return BAD_APICID; |
125 | return (int)cpu_2_logical_apicid[cpu]; | 126 | return (int)cpu_2_logical_apicid[cpu]; |
126 | #else | 127 | #else |
127 | return logical_smp_processor_id(); | 128 | return logical_smp_processor_id(); |
128 | #endif | 129 | #endif |
@@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) | |||
146 | return (1); | 147 | return (1); |
147 | } | 148 | } |
148 | 149 | ||
149 | static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | 150 | static inline unsigned int |
151 | cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) | ||
150 | { | 152 | { |
151 | int num_bits_set; | 153 | int num_bits_set; |
152 | int cpus_found = 0; | 154 | int cpus_found = 0; |
153 | int cpu; | 155 | int cpu; |
154 | int apicid; | 156 | int apicid; |
155 | 157 | ||
156 | num_bits_set = cpus_weight(cpumask); | 158 | num_bits_set = cpumask_weight(cpumask); |
157 | /* Return id to all */ | 159 | /* Return id to all */ |
158 | if (num_bits_set == NR_CPUS) | 160 | if (num_bits_set == NR_CPUS) |
159 | return 0xFF; | 161 | return 0xFF; |
@@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | |||
161 | * The cpus in the mask must all be on the apic cluster. If are not | 163 | * The cpus in the mask must all be on the apic cluster. If are not |
162 | * on the same apicid cluster return default value of TARGET_CPUS. | 164 | * on the same apicid cluster return default value of TARGET_CPUS. |
163 | */ | 165 | */ |
164 | cpu = first_cpu(cpumask); | 166 | cpu = cpumask_first(cpumask); |
165 | apicid = cpu_to_logical_apicid(cpu); | 167 | apicid = cpu_to_logical_apicid(cpu); |
166 | while (cpus_found < num_bits_set) { | 168 | while (cpus_found < num_bits_set) { |
167 | if (cpu_isset(cpu, cpumask)) { | 169 | if (cpumask_test_cpu(cpu, cpumask)) { |
168 | int new_apicid = cpu_to_logical_apicid(cpu); | 170 | int new_apicid = cpu_to_logical_apicid(cpu); |
169 | if (apicid_cluster(apicid) != | 171 | if (apicid_cluster(apicid) != |
170 | apicid_cluster(new_apicid)){ | 172 | apicid_cluster(new_apicid)){ |
@@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) | |||
179 | return apicid; | 181 | return apicid; |
180 | } | 182 | } |
181 | 183 | ||
182 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 184 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
183 | { | 185 | { |
184 | int num_bits_set; | 186 | int num_bits_set; |
185 | int cpus_found = 0; | 187 | int cpus_found = 0; |
186 | int cpu; | 188 | int cpu; |
187 | int apicid; | 189 | int apicid; |
188 | 190 | ||
189 | num_bits_set = cpus_weight(cpumask); | 191 | num_bits_set = cpus_weight(*cpumask); |
190 | /* Return id to all */ | 192 | /* Return id to all */ |
191 | if (num_bits_set == NR_CPUS) | 193 | if (num_bits_set == NR_CPUS) |
192 | return cpu_to_logical_apicid(0); | 194 | return cpu_to_logical_apicid(0); |
@@ -194,10 +196,52 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
194 | * The cpus in the mask must all be on the apic cluster. If are not | 196 | * The cpus in the mask must all be on the apic cluster. If are not |
195 | * on the same apicid cluster return default value of TARGET_CPUS. | 197 | * on the same apicid cluster return default value of TARGET_CPUS. |
196 | */ | 198 | */ |
197 | cpu = first_cpu(cpumask); | 199 | cpu = first_cpu(*cpumask); |
200 | apicid = cpu_to_logical_apicid(cpu); | ||
201 | while (cpus_found < num_bits_set) { | ||
202 | if (cpu_isset(cpu, *cpumask)) { | ||
203 | int new_apicid = cpu_to_logical_apicid(cpu); | ||
204 | if (apicid_cluster(apicid) != | ||
205 | apicid_cluster(new_apicid)){ | ||
206 | printk ("%s: Not a valid mask!\n", __func__); | ||
207 | return cpu_to_logical_apicid(0); | ||
208 | } | ||
209 | apicid = new_apicid; | ||
210 | cpus_found++; | ||
211 | } | ||
212 | cpu++; | ||
213 | } | ||
214 | return apicid; | ||
215 | } | ||
216 | |||
217 | |||
218 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, | ||
219 | const struct cpumask *andmask) | ||
220 | { | ||
221 | int num_bits_set; | ||
222 | int cpus_found = 0; | ||
223 | int cpu; | ||
224 | int apicid = cpu_to_logical_apicid(0); | ||
225 | cpumask_var_t cpumask; | ||
226 | |||
227 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | ||
228 | return apicid; | ||
229 | |||
230 | cpumask_and(cpumask, inmask, andmask); | ||
231 | cpumask_and(cpumask, cpumask, cpu_online_mask); | ||
232 | |||
233 | num_bits_set = cpumask_weight(cpumask); | ||
234 | /* Return id to all */ | ||
235 | if (num_bits_set == NR_CPUS) | ||
236 | goto exit; | ||
237 | /* | ||
238 | * The cpus in the mask must all be on the apic cluster. If are not | ||
239 | * on the same apicid cluster return default value of TARGET_CPUS. | ||
240 | */ | ||
241 | cpu = cpumask_first(cpumask); | ||
198 | apicid = cpu_to_logical_apicid(cpu); | 242 | apicid = cpu_to_logical_apicid(cpu); |
199 | while (cpus_found < num_bits_set) { | 243 | while (cpus_found < num_bits_set) { |
200 | if (cpu_isset(cpu, cpumask)) { | 244 | if (cpumask_test_cpu(cpu, cpumask)) { |
201 | int new_apicid = cpu_to_logical_apicid(cpu); | 245 | int new_apicid = cpu_to_logical_apicid(cpu); |
202 | if (apicid_cluster(apicid) != | 246 | if (apicid_cluster(apicid) != |
203 | apicid_cluster(new_apicid)){ | 247 | apicid_cluster(new_apicid)){ |
@@ -209,6 +253,8 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
209 | } | 253 | } |
210 | cpu++; | 254 | cpu++; |
211 | } | 255 | } |
256 | exit: | ||
257 | free_cpumask_var(cpumask); | ||
212 | return apicid; | 258 | return apicid; |
213 | } | 259 | } |
214 | 260 | ||
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h index 632a955fcc0a..7e8ed24d4b8a 100644 --- a/arch/x86/include/asm/es7000/ipi.h +++ b/arch/x86/include/asm/es7000/ipi.h | |||
@@ -1,24 +1,22 @@ | |||
1 | #ifndef __ASM_ES7000_IPI_H | 1 | #ifndef __ASM_ES7000_IPI_H |
2 | #define __ASM_ES7000_IPI_H | 2 | #define __ASM_ES7000_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector); |
5 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
10 | 11 | ||
11 | static inline void send_IPI_allbutself(int vector) | 12 | static inline void send_IPI_allbutself(int vector) |
12 | { | 13 | { |
13 | cpumask_t mask = cpu_online_map; | 14 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | if (!cpus_empty(mask)) | ||
16 | send_IPI_mask(mask, vector); | ||
17 | } | 15 | } |
18 | 16 | ||
19 | static inline void send_IPI_all(int vector) | 17 | static inline void send_IPI_all(int vector) |
20 | { | 18 | { |
21 | send_IPI_mask(cpu_online_map, vector); | 19 | send_IPI_mask(cpu_online_mask, vector); |
22 | } | 20 | } |
23 | 21 | ||
24 | #endif /* __ASM_ES7000_IPI_H */ | 22 | #endif /* __ASM_ES7000_IPI_H */ |
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h index 0ac17d33a8c7..746f37a7963a 100644 --- a/arch/x86/include/asm/genapic_32.h +++ b/arch/x86/include/asm/genapic_32.h | |||
@@ -24,7 +24,7 @@ struct genapic { | |||
24 | int (*probe)(void); | 24 | int (*probe)(void); |
25 | 25 | ||
26 | int (*apic_id_registered)(void); | 26 | int (*apic_id_registered)(void); |
27 | cpumask_t (*target_cpus)(void); | 27 | const struct cpumask *(*target_cpus)(void); |
28 | int int_delivery_mode; | 28 | int int_delivery_mode; |
29 | int int_dest_mode; | 29 | int int_dest_mode; |
30 | int ESR_DISABLE; | 30 | int ESR_DISABLE; |
@@ -57,12 +57,16 @@ struct genapic { | |||
57 | 57 | ||
58 | unsigned (*get_apic_id)(unsigned long x); | 58 | unsigned (*get_apic_id)(unsigned long x); |
59 | unsigned long apic_id_mask; | 59 | unsigned long apic_id_mask; |
60 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 60 | unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); |
61 | cpumask_t (*vector_allocation_domain)(int cpu); | 61 | unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, |
62 | const struct cpumask *andmask); | ||
63 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); | ||
62 | 64 | ||
63 | #ifdef CONFIG_SMP | 65 | #ifdef CONFIG_SMP |
64 | /* ipi */ | 66 | /* ipi */ |
65 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 67 | void (*send_IPI_mask)(const struct cpumask *mask, int vector); |
68 | void (*send_IPI_mask_allbutself)(const struct cpumask *mask, | ||
69 | int vector); | ||
66 | void (*send_IPI_allbutself)(int vector); | 70 | void (*send_IPI_allbutself)(int vector); |
67 | void (*send_IPI_all)(int vector); | 71 | void (*send_IPI_all)(int vector); |
68 | #endif | 72 | #endif |
@@ -114,6 +118,7 @@ struct genapic { | |||
114 | APICFUNC(get_apic_id) \ | 118 | APICFUNC(get_apic_id) \ |
115 | .apic_id_mask = APIC_ID_MASK, \ | 119 | .apic_id_mask = APIC_ID_MASK, \ |
116 | APICFUNC(cpu_mask_to_apicid) \ | 120 | APICFUNC(cpu_mask_to_apicid) \ |
121 | APICFUNC(cpu_mask_to_apicid_and) \ | ||
117 | APICFUNC(vector_allocation_domain) \ | 122 | APICFUNC(vector_allocation_domain) \ |
118 | APICFUNC(acpi_madt_oem_check) \ | 123 | APICFUNC(acpi_madt_oem_check) \ |
119 | IPIFUNC(send_IPI_mask) \ | 124 | IPIFUNC(send_IPI_mask) \ |
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h index 2cae011668b7..adf32fb56aa6 100644 --- a/arch/x86/include/asm/genapic_64.h +++ b/arch/x86/include/asm/genapic_64.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_X86_GENAPIC_64_H | 1 | #ifndef _ASM_X86_GENAPIC_64_H |
2 | #define _ASM_X86_GENAPIC_64_H | 2 | #define _ASM_X86_GENAPIC_64_H |
3 | 3 | ||
4 | #include <linux/cpumask.h> | ||
5 | |||
4 | /* | 6 | /* |
5 | * Copyright 2004 James Cleverdon, IBM. | 7 | * Copyright 2004 James Cleverdon, IBM. |
6 | * Subject to the GNU Public License, v.2 | 8 | * Subject to the GNU Public License, v.2 |
@@ -18,16 +20,20 @@ struct genapic { | |||
18 | u32 int_delivery_mode; | 20 | u32 int_delivery_mode; |
19 | u32 int_dest_mode; | 21 | u32 int_dest_mode; |
20 | int (*apic_id_registered)(void); | 22 | int (*apic_id_registered)(void); |
21 | cpumask_t (*target_cpus)(void); | 23 | const struct cpumask *(*target_cpus)(void); |
22 | cpumask_t (*vector_allocation_domain)(int cpu); | 24 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); |
23 | void (*init_apic_ldr)(void); | 25 | void (*init_apic_ldr)(void); |
24 | /* ipi */ | 26 | /* ipi */ |
25 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 27 | void (*send_IPI_mask)(const struct cpumask *mask, int vector); |
28 | void (*send_IPI_mask_allbutself)(const struct cpumask *mask, | ||
29 | int vector); | ||
26 | void (*send_IPI_allbutself)(int vector); | 30 | void (*send_IPI_allbutself)(int vector); |
27 | void (*send_IPI_all)(int vector); | 31 | void (*send_IPI_all)(int vector); |
28 | void (*send_IPI_self)(int vector); | 32 | void (*send_IPI_self)(int vector); |
29 | /* */ | 33 | /* */ |
30 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 34 | unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask); |
35 | unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask, | ||
36 | const struct cpumask *andmask); | ||
31 | unsigned int (*phys_pkg_id)(int index_msb); | 37 | unsigned int (*phys_pkg_id)(int index_msb); |
32 | unsigned int (*get_apic_id)(unsigned long x); | 38 | unsigned int (*get_apic_id)(unsigned long x); |
33 | unsigned long (*set_apic_id)(unsigned int id); | 39 | unsigned long (*set_apic_id)(unsigned int id); |
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h index f89dffb28aa9..c745a306f7d3 100644 --- a/arch/x86/include/asm/ipi.h +++ b/arch/x86/include/asm/ipi.h | |||
@@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, | |||
117 | native_apic_mem_write(APIC_ICR, cfg); | 117 | native_apic_mem_write(APIC_ICR, cfg); |
118 | } | 118 | } |
119 | 119 | ||
120 | static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | 120 | static inline void send_IPI_mask_sequence(const struct cpumask *mask, |
121 | int vector) | ||
121 | { | 122 | { |
122 | unsigned long flags; | 123 | unsigned long flags; |
123 | unsigned long query_cpu; | 124 | unsigned long query_cpu; |
@@ -128,11 +129,29 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
128 | * - mbligh | 129 | * - mbligh |
129 | */ | 130 | */ |
130 | local_irq_save(flags); | 131 | local_irq_save(flags); |
131 | for_each_cpu_mask_nr(query_cpu, mask) { | 132 | for_each_cpu(query_cpu, mask) { |
132 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), | 133 | __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu), |
133 | vector, APIC_DEST_PHYSICAL); | 134 | vector, APIC_DEST_PHYSICAL); |
134 | } | 135 | } |
135 | local_irq_restore(flags); | 136 | local_irq_restore(flags); |
136 | } | 137 | } |
137 | 138 | ||
139 | static inline void send_IPI_mask_allbutself(const struct cpumask *mask, | ||
140 | int vector) | ||
141 | { | ||
142 | unsigned long flags; | ||
143 | unsigned int query_cpu; | ||
144 | unsigned int this_cpu = smp_processor_id(); | ||
145 | |||
146 | /* See Hack comment above */ | ||
147 | |||
148 | local_irq_save(flags); | ||
149 | for_each_cpu(query_cpu, mask) | ||
150 | if (query_cpu != this_cpu) | ||
151 | __send_IPI_dest_field( | ||
152 | per_cpu(x86_cpu_to_apicid, query_cpu), | ||
153 | vector, APIC_DEST_PHYSICAL); | ||
154 | local_irq_restore(flags); | ||
155 | } | ||
156 | |||
138 | #endif /* _ASM_X86_IPI_H */ | 157 | #endif /* _ASM_X86_IPI_H */ |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 28e409fc73f3..592688ed04d3 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -33,7 +33,7 @@ static inline int irq_canonicalize(int irq) | |||
33 | 33 | ||
34 | #ifdef CONFIG_HOTPLUG_CPU | 34 | #ifdef CONFIG_HOTPLUG_CPU |
35 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
36 | extern void fixup_irqs(cpumask_t map); | 36 | extern void fixup_irqs(void); |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | extern unsigned int do_IRQ(struct pt_regs *regs); | 39 | extern unsigned int do_IRQ(struct pt_regs *regs); |
@@ -42,5 +42,6 @@ extern void native_init_IRQ(void); | |||
42 | 42 | ||
43 | /* Interrupt vector management */ | 43 | /* Interrupt vector management */ |
44 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); | 44 | extern DECLARE_BITMAP(used_vectors, NR_VECTORS); |
45 | extern int vector_used_by_percpu_irq(unsigned int vector); | ||
45 | 46 | ||
46 | #endif /* _ASM_X86_IRQ_H */ | 47 | #endif /* _ASM_X86_IRQ_H */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 8346be87cfa1..97215a458e5f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <asm/pvclock-abi.h> | 22 | #include <asm/pvclock-abi.h> |
23 | #include <asm/desc.h> | 23 | #include <asm/desc.h> |
24 | #include <asm/mtrr.h> | ||
24 | 25 | ||
25 | #define KVM_MAX_VCPUS 16 | 26 | #define KVM_MAX_VCPUS 16 |
26 | #define KVM_MEMORY_SLOTS 32 | 27 | #define KVM_MEMORY_SLOTS 32 |
@@ -86,6 +87,7 @@ | |||
86 | #define KVM_MIN_FREE_MMU_PAGES 5 | 87 | #define KVM_MIN_FREE_MMU_PAGES 5 |
87 | #define KVM_REFILL_PAGES 25 | 88 | #define KVM_REFILL_PAGES 25 |
88 | #define KVM_MAX_CPUID_ENTRIES 40 | 89 | #define KVM_MAX_CPUID_ENTRIES 40 |
90 | #define KVM_NR_FIXED_MTRR_REGION 88 | ||
89 | #define KVM_NR_VAR_MTRR 8 | 91 | #define KVM_NR_VAR_MTRR 8 |
90 | 92 | ||
91 | extern spinlock_t kvm_lock; | 93 | extern spinlock_t kvm_lock; |
@@ -180,6 +182,8 @@ struct kvm_mmu_page { | |||
180 | struct list_head link; | 182 | struct list_head link; |
181 | struct hlist_node hash_link; | 183 | struct hlist_node hash_link; |
182 | 184 | ||
185 | struct list_head oos_link; | ||
186 | |||
183 | /* | 187 | /* |
184 | * The following two entries are used to key the shadow page in the | 188 | * The following two entries are used to key the shadow page in the |
185 | * hash table. | 189 | * hash table. |
@@ -190,13 +194,16 @@ struct kvm_mmu_page { | |||
190 | u64 *spt; | 194 | u64 *spt; |
191 | /* hold the gfn of each spte inside spt */ | 195 | /* hold the gfn of each spte inside spt */ |
192 | gfn_t *gfns; | 196 | gfn_t *gfns; |
193 | unsigned long slot_bitmap; /* One bit set per slot which has memory | 197 | /* |
194 | * in this shadow page. | 198 | * One bit set per slot which has memory |
195 | */ | 199 | * in this shadow page. |
200 | */ | ||
201 | DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); | ||
196 | int multimapped; /* More than one parent_pte? */ | 202 | int multimapped; /* More than one parent_pte? */ |
197 | int root_count; /* Currently serving as active root */ | 203 | int root_count; /* Currently serving as active root */ |
198 | bool unsync; | 204 | bool unsync; |
199 | bool unsync_children; | 205 | bool global; |
206 | unsigned int unsync_children; | ||
200 | union { | 207 | union { |
201 | u64 *parent_pte; /* !multimapped */ | 208 | u64 *parent_pte; /* !multimapped */ |
202 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ | 209 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ |
@@ -327,8 +334,10 @@ struct kvm_vcpu_arch { | |||
327 | 334 | ||
328 | bool nmi_pending; | 335 | bool nmi_pending; |
329 | bool nmi_injected; | 336 | bool nmi_injected; |
337 | bool nmi_window_open; | ||
330 | 338 | ||
331 | u64 mtrr[0x100]; | 339 | struct mtrr_state_type mtrr_state; |
340 | u32 pat; | ||
332 | }; | 341 | }; |
333 | 342 | ||
334 | struct kvm_mem_alias { | 343 | struct kvm_mem_alias { |
@@ -350,11 +359,13 @@ struct kvm_arch{ | |||
350 | */ | 359 | */ |
351 | struct list_head active_mmu_pages; | 360 | struct list_head active_mmu_pages; |
352 | struct list_head assigned_dev_head; | 361 | struct list_head assigned_dev_head; |
362 | struct list_head oos_global_pages; | ||
353 | struct dmar_domain *intel_iommu_domain; | 363 | struct dmar_domain *intel_iommu_domain; |
354 | struct kvm_pic *vpic; | 364 | struct kvm_pic *vpic; |
355 | struct kvm_ioapic *vioapic; | 365 | struct kvm_ioapic *vioapic; |
356 | struct kvm_pit *vpit; | 366 | struct kvm_pit *vpit; |
357 | struct hlist_head irq_ack_notifier_list; | 367 | struct hlist_head irq_ack_notifier_list; |
368 | int vapics_in_nmi_mode; | ||
358 | 369 | ||
359 | int round_robin_prev_vcpu; | 370 | int round_robin_prev_vcpu; |
360 | unsigned int tss_addr; | 371 | unsigned int tss_addr; |
@@ -378,6 +389,7 @@ struct kvm_vm_stat { | |||
378 | u32 mmu_recycled; | 389 | u32 mmu_recycled; |
379 | u32 mmu_cache_miss; | 390 | u32 mmu_cache_miss; |
380 | u32 mmu_unsync; | 391 | u32 mmu_unsync; |
392 | u32 mmu_unsync_global; | ||
381 | u32 remote_tlb_flush; | 393 | u32 remote_tlb_flush; |
382 | u32 lpages; | 394 | u32 lpages; |
383 | }; | 395 | }; |
@@ -397,6 +409,7 @@ struct kvm_vcpu_stat { | |||
397 | u32 halt_exits; | 409 | u32 halt_exits; |
398 | u32 halt_wakeup; | 410 | u32 halt_wakeup; |
399 | u32 request_irq_exits; | 411 | u32 request_irq_exits; |
412 | u32 request_nmi_exits; | ||
400 | u32 irq_exits; | 413 | u32 irq_exits; |
401 | u32 host_state_reload; | 414 | u32 host_state_reload; |
402 | u32 efer_reload; | 415 | u32 efer_reload; |
@@ -405,6 +418,7 @@ struct kvm_vcpu_stat { | |||
405 | u32 insn_emulation_fail; | 418 | u32 insn_emulation_fail; |
406 | u32 hypercalls; | 419 | u32 hypercalls; |
407 | u32 irq_injections; | 420 | u32 irq_injections; |
421 | u32 nmi_injections; | ||
408 | }; | 422 | }; |
409 | 423 | ||
410 | struct descriptor_table { | 424 | struct descriptor_table { |
@@ -477,6 +491,7 @@ struct kvm_x86_ops { | |||
477 | 491 | ||
478 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | 492 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
479 | int (*get_tdp_level)(void); | 493 | int (*get_tdp_level)(void); |
494 | int (*get_mt_mask_shift)(void); | ||
480 | }; | 495 | }; |
481 | 496 | ||
482 | extern struct kvm_x86_ops *kvm_x86_ops; | 497 | extern struct kvm_x86_ops *kvm_x86_ops; |
@@ -490,7 +505,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu); | |||
490 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); | 505 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); |
491 | void kvm_mmu_set_base_ptes(u64 base_pte); | 506 | void kvm_mmu_set_base_ptes(u64 base_pte); |
492 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | 507 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
493 | u64 dirty_mask, u64 nx_mask, u64 x_mask); | 508 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask); |
494 | 509 | ||
495 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); | 510 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); |
496 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); | 511 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); |
@@ -587,12 +602,14 @@ unsigned long segment_base(u16 selector); | |||
587 | 602 | ||
588 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); | 603 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); |
589 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 604 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
590 | const u8 *new, int bytes); | 605 | const u8 *new, int bytes, |
606 | bool guest_initiated); | ||
591 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); | 607 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); |
592 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); | 608 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); |
593 | int kvm_mmu_load(struct kvm_vcpu *vcpu); | 609 | int kvm_mmu_load(struct kvm_vcpu *vcpu); |
594 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); | 610 | void kvm_mmu_unload(struct kvm_vcpu *vcpu); |
595 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); | 611 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); |
612 | void kvm_mmu_sync_global(struct kvm_vcpu *vcpu); | ||
596 | 613 | ||
597 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); | 614 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); |
598 | 615 | ||
@@ -607,6 +624,8 @@ void kvm_disable_tdp(void); | |||
607 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); | 624 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); |
608 | int complete_pio(struct kvm_vcpu *vcpu); | 625 | int complete_pio(struct kvm_vcpu *vcpu); |
609 | 626 | ||
627 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); | ||
628 | |||
610 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) | 629 | static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) |
611 | { | 630 | { |
612 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); | 631 | struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); |
@@ -702,18 +721,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) | |||
702 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); | 721 | kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); |
703 | } | 722 | } |
704 | 723 | ||
705 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | ||
706 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | ||
707 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | ||
708 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" | ||
709 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" | ||
710 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" | ||
711 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" | ||
712 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" | ||
713 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" | ||
714 | #define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" | ||
715 | #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" | ||
716 | |||
717 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 | 724 | #define MSR_IA32_TIME_STAMP_COUNTER 0x010 |
718 | 725 | ||
719 | #define TSS_IOPB_BASE_OFFSET 0x66 | 726 | #define TSS_IOPB_BASE_OFFSET 0x66 |
diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_x86_emulate.h index 25179a29f208..6a159732881a 100644 --- a/arch/x86/include/asm/kvm_x86_emulate.h +++ b/arch/x86/include/asm/kvm_x86_emulate.h | |||
@@ -123,6 +123,7 @@ struct decode_cache { | |||
123 | u8 ad_bytes; | 123 | u8 ad_bytes; |
124 | u8 rex_prefix; | 124 | u8 rex_prefix; |
125 | struct operand src; | 125 | struct operand src; |
126 | struct operand src2; | ||
126 | struct operand dst; | 127 | struct operand dst; |
127 | bool has_seg_override; | 128 | bool has_seg_override; |
128 | u8 seg_override; | 129 | u8 seg_override; |
@@ -146,22 +147,18 @@ struct x86_emulate_ctxt { | |||
146 | /* Register state before/after emulation. */ | 147 | /* Register state before/after emulation. */ |
147 | struct kvm_vcpu *vcpu; | 148 | struct kvm_vcpu *vcpu; |
148 | 149 | ||
149 | /* Linear faulting address (if emulating a page-faulting instruction) */ | ||
150 | unsigned long eflags; | 150 | unsigned long eflags; |
151 | |||
152 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ | 151 | /* Emulated execution mode, represented by an X86EMUL_MODE value. */ |
153 | int mode; | 152 | int mode; |
154 | |||
155 | u32 cs_base; | 153 | u32 cs_base; |
156 | 154 | ||
157 | /* decode cache */ | 155 | /* decode cache */ |
158 | |||
159 | struct decode_cache decode; | 156 | struct decode_cache decode; |
160 | }; | 157 | }; |
161 | 158 | ||
162 | /* Repeat String Operation Prefix */ | 159 | /* Repeat String Operation Prefix */ |
163 | #define REPE_PREFIX 1 | 160 | #define REPE_PREFIX 1 |
164 | #define REPNE_PREFIX 2 | 161 | #define REPNE_PREFIX 2 |
165 | 162 | ||
166 | /* Execution mode, passed to the emulator. */ | 163 | /* Execution mode, passed to the emulator. */ |
167 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ | 164 | #define X86EMUL_MODE_REAL 0 /* Real mode. */ |
@@ -170,7 +167,7 @@ struct x86_emulate_ctxt { | |||
170 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ | 167 | #define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ |
171 | 168 | ||
172 | /* Host execution mode. */ | 169 | /* Host execution mode. */ |
173 | #if defined(__i386__) | 170 | #if defined(CONFIG_X86_32) |
174 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 | 171 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 |
175 | #elif defined(CONFIG_X86_64) | 172 | #elif defined(CONFIG_X86_64) |
176 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 | 173 | #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 |
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h index 6cb3a467e067..cc09cbbee27e 100644 --- a/arch/x86/include/asm/mach-default/mach_apic.h +++ b/arch/x86/include/asm/mach-default/mach_apic.h | |||
@@ -8,12 +8,12 @@ | |||
8 | 8 | ||
9 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | 9 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) |
10 | 10 | ||
11 | static inline cpumask_t target_cpus(void) | 11 | static inline const struct cpumask *target_cpus(void) |
12 | { | 12 | { |
13 | #ifdef CONFIG_SMP | 13 | #ifdef CONFIG_SMP |
14 | return cpu_online_map; | 14 | return cpu_online_mask; |
15 | #else | 15 | #else |
16 | return cpumask_of_cpu(0); | 16 | return cpumask_of(0); |
17 | #endif | 17 | #endif |
18 | } | 18 | } |
19 | 19 | ||
@@ -28,6 +28,7 @@ static inline cpumask_t target_cpus(void) | |||
28 | #define apic_id_registered (genapic->apic_id_registered) | 28 | #define apic_id_registered (genapic->apic_id_registered) |
29 | #define init_apic_ldr (genapic->init_apic_ldr) | 29 | #define init_apic_ldr (genapic->init_apic_ldr) |
30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | 30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) |
31 | #define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) | ||
31 | #define phys_pkg_id (genapic->phys_pkg_id) | 32 | #define phys_pkg_id (genapic->phys_pkg_id) |
32 | #define vector_allocation_domain (genapic->vector_allocation_domain) | 33 | #define vector_allocation_domain (genapic->vector_allocation_domain) |
33 | #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) | 34 | #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) |
@@ -61,9 +62,19 @@ static inline int apic_id_registered(void) | |||
61 | return physid_isset(read_apic_id(), phys_cpu_present_map); | 62 | return physid_isset(read_apic_id(), phys_cpu_present_map); |
62 | } | 63 | } |
63 | 64 | ||
64 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 65 | static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask) |
65 | { | 66 | { |
66 | return cpus_addr(cpumask)[0]; | 67 | return cpumask_bits(cpumask)[0]; |
68 | } | ||
69 | |||
70 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
71 | const struct cpumask *andmask) | ||
72 | { | ||
73 | unsigned long mask1 = cpumask_bits(cpumask)[0]; | ||
74 | unsigned long mask2 = cpumask_bits(andmask)[0]; | ||
75 | unsigned long mask3 = cpumask_bits(cpu_online_mask)[0]; | ||
76 | |||
77 | return (unsigned int)(mask1 & mask2 & mask3); | ||
67 | } | 78 | } |
68 | 79 | ||
69 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | 80 | static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) |
@@ -88,7 +99,7 @@ static inline int apicid_to_node(int logical_apicid) | |||
88 | #endif | 99 | #endif |
89 | } | 100 | } |
90 | 101 | ||
91 | static inline cpumask_t vector_allocation_domain(int cpu) | 102 | static inline void vector_allocation_domain(int cpu, struct cpumask *retmask) |
92 | { | 103 | { |
93 | /* Careful. Some cpus do not strictly honor the set of cpus | 104 | /* Careful. Some cpus do not strictly honor the set of cpus |
94 | * specified in the interrupt destination when using lowest | 105 | * specified in the interrupt destination when using lowest |
@@ -98,8 +109,7 @@ static inline cpumask_t vector_allocation_domain(int cpu) | |||
98 | * deliver interrupts to the wrong hyperthread when only one | 109 | * deliver interrupts to the wrong hyperthread when only one |
99 | * hyperthread was specified in the interrupt desitination. | 110 | * hyperthread was specified in the interrupt desitination. |
100 | */ | 111 | */ |
101 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 112 | *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } }; |
102 | return domain; | ||
103 | } | 113 | } |
104 | #endif | 114 | #endif |
105 | 115 | ||
@@ -131,7 +141,7 @@ static inline int cpu_to_logical_apicid(int cpu) | |||
131 | 141 | ||
132 | static inline int cpu_present_to_apicid(int mps_cpu) | 142 | static inline int cpu_present_to_apicid(int mps_cpu) |
133 | { | 143 | { |
134 | if (mps_cpu < NR_CPUS && cpu_present(mps_cpu)) | 144 | if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) |
135 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); | 145 | return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); |
136 | else | 146 | else |
137 | return BAD_APICID; | 147 | return BAD_APICID; |
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h index fabca01ebacf..191312d155da 100644 --- a/arch/x86/include/asm/mach-default/mach_ipi.h +++ b/arch/x86/include/asm/mach-default/mach_ipi.h | |||
@@ -4,7 +4,8 @@ | |||
4 | /* Avoid include hell */ | 4 | /* Avoid include hell */ |
5 | #define NMI_VECTOR 0x02 | 5 | #define NMI_VECTOR 0x02 |
6 | 6 | ||
7 | void send_IPI_mask_bitmask(cpumask_t mask, int vector); | 7 | void send_IPI_mask_bitmask(const struct cpumask *mask, int vector); |
8 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
8 | void __send_IPI_shortcut(unsigned int shortcut, int vector); | 9 | void __send_IPI_shortcut(unsigned int shortcut, int vector); |
9 | 10 | ||
10 | extern int no_broadcast; | 11 | extern int no_broadcast; |
@@ -12,28 +13,27 @@ extern int no_broadcast; | |||
12 | #ifdef CONFIG_X86_64 | 13 | #ifdef CONFIG_X86_64 |
13 | #include <asm/genapic.h> | 14 | #include <asm/genapic.h> |
14 | #define send_IPI_mask (genapic->send_IPI_mask) | 15 | #define send_IPI_mask (genapic->send_IPI_mask) |
16 | #define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself) | ||
15 | #else | 17 | #else |
16 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 18 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
17 | { | 19 | { |
18 | send_IPI_mask_bitmask(mask, vector); | 20 | send_IPI_mask_bitmask(mask, vector); |
19 | } | 21 | } |
22 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
20 | #endif | 23 | #endif |
21 | 24 | ||
22 | static inline void __local_send_IPI_allbutself(int vector) | 25 | static inline void __local_send_IPI_allbutself(int vector) |
23 | { | 26 | { |
24 | if (no_broadcast || vector == NMI_VECTOR) { | 27 | if (no_broadcast || vector == NMI_VECTOR) |
25 | cpumask_t mask = cpu_online_map; | 28 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
26 | 29 | else | |
27 | cpu_clear(smp_processor_id(), mask); | ||
28 | send_IPI_mask(mask, vector); | ||
29 | } else | ||
30 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); | 30 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void __local_send_IPI_all(int vector) | 33 | static inline void __local_send_IPI_all(int vector) |
34 | { | 34 | { |
35 | if (no_broadcast || vector == NMI_VECTOR) | 35 | if (no_broadcast || vector == NMI_VECTOR) |
36 | send_IPI_mask(cpu_online_map, vector); | 36 | send_IPI_mask(cpu_online_mask, vector); |
37 | else | 37 | else |
38 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); | 38 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector); |
39 | } | 39 | } |
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h index e430f47df667..48553e958ad5 100644 --- a/arch/x86/include/asm/mach-generic/mach_apic.h +++ b/arch/x86/include/asm/mach-generic/mach_apic.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) | 24 | #define check_phys_apicid_present (genapic->check_phys_apicid_present) |
25 | #define check_apicid_used (genapic->check_apicid_used) | 25 | #define check_apicid_used (genapic->check_apicid_used) |
26 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | 26 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) |
27 | #define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and) | ||
27 | #define vector_allocation_domain (genapic->vector_allocation_domain) | 28 | #define vector_allocation_domain (genapic->vector_allocation_domain) |
28 | #define enable_apic_mode (genapic->enable_apic_mode) | 29 | #define enable_apic_mode (genapic->enable_apic_mode) |
29 | #define phys_pkg_id (genapic->phys_pkg_id) | 30 | #define phys_pkg_id (genapic->phys_pkg_id) |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 91885c28f66b..62d14ce3cd00 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -6,13 +6,13 @@ | |||
6 | #include <asm/mpspec_def.h> | 6 | #include <asm/mpspec_def.h> |
7 | 7 | ||
8 | extern int apic_version[MAX_APICS]; | 8 | extern int apic_version[MAX_APICS]; |
9 | extern int pic_mode; | ||
9 | 10 | ||
10 | #ifdef CONFIG_X86_32 | 11 | #ifdef CONFIG_X86_32 |
11 | #include <mach_mpspec.h> | 12 | #include <mach_mpspec.h> |
12 | 13 | ||
13 | extern unsigned int def_to_bigsmp; | 14 | extern unsigned int def_to_bigsmp; |
14 | extern u8 apicid_2_node[]; | 15 | extern u8 apicid_2_node[]; |
15 | extern int pic_mode; | ||
16 | 16 | ||
17 | #ifdef CONFIG_X86_NUMAQ | 17 | #ifdef CONFIG_X86_NUMAQ |
18 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; | 18 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; |
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h index 7c1e4258b31e..cb988aab716d 100644 --- a/arch/x86/include/asm/mtrr.h +++ b/arch/x86/include/asm/mtrr.h | |||
@@ -57,6 +57,31 @@ struct mtrr_gentry { | |||
57 | }; | 57 | }; |
58 | #endif /* !__i386__ */ | 58 | #endif /* !__i386__ */ |
59 | 59 | ||
60 | struct mtrr_var_range { | ||
61 | u32 base_lo; | ||
62 | u32 base_hi; | ||
63 | u32 mask_lo; | ||
64 | u32 mask_hi; | ||
65 | }; | ||
66 | |||
67 | /* In the Intel processor's MTRR interface, the MTRR type is always held in | ||
68 | an 8 bit field: */ | ||
69 | typedef u8 mtrr_type; | ||
70 | |||
71 | #define MTRR_NUM_FIXED_RANGES 88 | ||
72 | #define MTRR_MAX_VAR_RANGES 256 | ||
73 | |||
74 | struct mtrr_state_type { | ||
75 | struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES]; | ||
76 | mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES]; | ||
77 | unsigned char enabled; | ||
78 | unsigned char have_fixed; | ||
79 | mtrr_type def_type; | ||
80 | }; | ||
81 | |||
82 | #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) | ||
83 | #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) | ||
84 | |||
60 | /* These are the various ioctls */ | 85 | /* These are the various ioctls */ |
61 | #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) | 86 | #define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) |
62 | #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) | 87 | #define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) |
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h index 0bf2a06b7a4e..c80f00d29965 100644 --- a/arch/x86/include/asm/numaq/apic.h +++ b/arch/x86/include/asm/numaq/apic.h | |||
@@ -7,9 +7,9 @@ | |||
7 | 7 | ||
8 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 8 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
9 | 9 | ||
10 | static inline cpumask_t target_cpus(void) | 10 | static inline const cpumask_t *target_cpus(void) |
11 | { | 11 | { |
12 | return CPU_MASK_ALL; | 12 | return &CPU_MASK_ALL; |
13 | } | 13 | } |
14 | 14 | ||
15 | #define NO_BALANCE_IRQ (1) | 15 | #define NO_BALANCE_IRQ (1) |
@@ -122,7 +122,13 @@ static inline void enable_apic_mode(void) | |||
122 | * We use physical apicids here, not logical, so just return the default | 122 | * We use physical apicids here, not logical, so just return the default |
123 | * physical broadcast to stop people from breaking us | 123 | * physical broadcast to stop people from breaking us |
124 | */ | 124 | */ |
125 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 125 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
126 | { | ||
127 | return (int) 0xF; | ||
128 | } | ||
129 | |||
130 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
131 | const struct cpumask *andmask) | ||
126 | { | 132 | { |
127 | return (int) 0xF; | 133 | return (int) 0xF; |
128 | } | 134 | } |
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h index 935588d286cf..a8374c652778 100644 --- a/arch/x86/include/asm/numaq/ipi.h +++ b/arch/x86/include/asm/numaq/ipi.h | |||
@@ -1,25 +1,22 @@ | |||
1 | #ifndef __ASM_NUMAQ_IPI_H | 1 | #ifndef __ASM_NUMAQ_IPI_H |
2 | #define __ASM_NUMAQ_IPI_H | 2 | #define __ASM_NUMAQ_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t, int vector); | 4 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector); |
5 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const struct cpumask *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
10 | 11 | ||
11 | static inline void send_IPI_allbutself(int vector) | 12 | static inline void send_IPI_allbutself(int vector) |
12 | { | 13 | { |
13 | cpumask_t mask = cpu_online_map; | 14 | send_IPI_mask_allbutself(cpu_online_mask, vector); |
14 | cpu_clear(smp_processor_id(), mask); | ||
15 | |||
16 | if (!cpus_empty(mask)) | ||
17 | send_IPI_mask(mask, vector); | ||
18 | } | 15 | } |
19 | 16 | ||
20 | static inline void send_IPI_all(int vector) | 17 | static inline void send_IPI_all(int vector) |
21 | { | 18 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 19 | send_IPI_mask(cpu_online_mask, vector); |
23 | } | 20 | } |
24 | 21 | ||
25 | #endif /* __ASM_NUMAQ_IPI_H */ | 22 | #endif /* __ASM_NUMAQ_IPI_H */ |
diff --git a/arch/x86/pci/pci.h b/arch/x86/include/asm/pci_x86.h index 1959018aac02..e60fd3e14bdf 100644 --- a/arch/x86/pci/pci.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -57,7 +57,8 @@ extern struct pci_ops pci_root_ops; | |||
57 | struct irq_info { | 57 | struct irq_info { |
58 | u8 bus, devfn; /* Bus, device and function */ | 58 | u8 bus, devfn; /* Bus, device and function */ |
59 | struct { | 59 | struct { |
60 | u8 link; /* IRQ line ID, chipset dependent, 0=not routed */ | 60 | u8 link; /* IRQ line ID, chipset dependent, |
61 | 0 = not routed */ | ||
61 | u16 bitmap; /* Available IRQs */ | 62 | u16 bitmap; /* Available IRQs */ |
62 | } __attribute__((packed)) irq[4]; | 63 | } __attribute__((packed)) irq[4]; |
63 | u8 slot; /* Slot number, 0=onboard */ | 64 | u8 slot; /* Slot number, 0=onboard */ |
@@ -69,11 +70,13 @@ struct irq_routing_table { | |||
69 | u16 version; /* PIRQ_VERSION */ | 70 | u16 version; /* PIRQ_VERSION */ |
70 | u16 size; /* Table size in bytes */ | 71 | u16 size; /* Table size in bytes */ |
71 | u8 rtr_bus, rtr_devfn; /* Where the interrupt router lies */ | 72 | u8 rtr_bus, rtr_devfn; /* Where the interrupt router lies */ |
72 | u16 exclusive_irqs; /* IRQs devoted exclusively to PCI usage */ | 73 | u16 exclusive_irqs; /* IRQs devoted exclusively to |
73 | u16 rtr_vendor, rtr_device; /* Vendor and device ID of interrupt router */ | 74 | PCI usage */ |
75 | u16 rtr_vendor, rtr_device; /* Vendor and device ID of | ||
76 | interrupt router */ | ||
74 | u32 miniport_data; /* Crap */ | 77 | u32 miniport_data; /* Crap */ |
75 | u8 rfu[11]; | 78 | u8 rfu[11]; |
76 | u8 checksum; /* Modulo 256 checksum must give zero */ | 79 | u8 checksum; /* Modulo 256 checksum must give 0 */ |
77 | struct irq_info slots[0]; | 80 | struct irq_info slots[0]; |
78 | } __attribute__((packed)); | 81 | } __attribute__((packed)); |
79 | 82 | ||
@@ -148,15 +151,15 @@ static inline unsigned int mmio_config_readl(void __iomem *pos) | |||
148 | 151 | ||
149 | static inline void mmio_config_writeb(void __iomem *pos, u8 val) | 152 | static inline void mmio_config_writeb(void __iomem *pos, u8 val) |
150 | { | 153 | { |
151 | asm volatile("movb %%al,(%1)" :: "a" (val), "r" (pos) : "memory"); | 154 | asm volatile("movb %%al,(%1)" : : "a" (val), "r" (pos) : "memory"); |
152 | } | 155 | } |
153 | 156 | ||
154 | static inline void mmio_config_writew(void __iomem *pos, u16 val) | 157 | static inline void mmio_config_writew(void __iomem *pos, u16 val) |
155 | { | 158 | { |
156 | asm volatile("movw %%ax,(%1)" :: "a" (val), "r" (pos) : "memory"); | 159 | asm volatile("movw %%ax,(%1)" : : "a" (val), "r" (pos) : "memory"); |
157 | } | 160 | } |
158 | 161 | ||
159 | static inline void mmio_config_writel(void __iomem *pos, u32 val) | 162 | static inline void mmio_config_writel(void __iomem *pos, u32 val) |
160 | { | 163 | { |
161 | asm volatile("movl %%eax,(%1)" :: "a" (val), "r" (pos) : "memory"); | 164 | asm volatile("movl %%eax,(%1)" : : "a" (val), "r" (pos) : "memory"); |
162 | } | 165 | } |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index d12811ce51d9..830b9fcb6427 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -60,7 +60,7 @@ struct smp_ops { | |||
60 | void (*cpu_die)(unsigned int cpu); | 60 | void (*cpu_die)(unsigned int cpu); |
61 | void (*play_dead)(void); | 61 | void (*play_dead)(void); |
62 | 62 | ||
63 | void (*send_call_func_ipi)(cpumask_t mask); | 63 | void (*send_call_func_ipi)(const struct cpumask *mask); |
64 | void (*send_call_func_single_ipi)(int cpu); | 64 | void (*send_call_func_single_ipi)(int cpu); |
65 | }; | 65 | }; |
66 | 66 | ||
@@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu) | |||
125 | 125 | ||
126 | static inline void arch_send_call_function_ipi(cpumask_t mask) | 126 | static inline void arch_send_call_function_ipi(cpumask_t mask) |
127 | { | 127 | { |
128 | smp_ops.send_call_func_ipi(mask); | 128 | smp_ops.send_call_func_ipi(&mask); |
129 | } | 129 | } |
130 | 130 | ||
131 | void cpu_disable_common(void); | 131 | void cpu_disable_common(void); |
@@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu); | |||
138 | void native_play_dead(void); | 138 | void native_play_dead(void); |
139 | void play_dead_common(void); | 139 | void play_dead_common(void); |
140 | 140 | ||
141 | void native_send_call_func_ipi(cpumask_t mask); | 141 | void native_send_call_func_ipi(const struct cpumask *mask); |
142 | void native_send_call_func_single_ipi(int cpu); | 142 | void native_send_call_func_single_ipi(int cpu); |
143 | 143 | ||
144 | extern void prefill_possible_map(void); | 144 | extern void prefill_possible_map(void); |
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h index 9b3070f1c2ac..99327d1be49f 100644 --- a/arch/x86/include/asm/summit/apic.h +++ b/arch/x86/include/asm/summit/apic.h | |||
@@ -14,13 +14,13 @@ | |||
14 | 14 | ||
15 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 15 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
16 | 16 | ||
17 | static inline cpumask_t target_cpus(void) | 17 | static inline const cpumask_t *target_cpus(void) |
18 | { | 18 | { |
19 | /* CPU_MASK_ALL (0xff) has undefined behaviour with | 19 | /* CPU_MASK_ALL (0xff) has undefined behaviour with |
20 | * dest_LowestPrio mode logical clustered apic interrupt routing | 20 | * dest_LowestPrio mode logical clustered apic interrupt routing |
21 | * Just start on cpu 0. IRQ balancing will spread load | 21 | * Just start on cpu 0. IRQ balancing will spread load |
22 | */ | 22 | */ |
23 | return cpumask_of_cpu(0); | 23 | return &cpumask_of_cpu(0); |
24 | } | 24 | } |
25 | 25 | ||
26 | #define INT_DELIVERY_MODE (dest_LowestPrio) | 26 | #define INT_DELIVERY_MODE (dest_LowestPrio) |
@@ -137,14 +137,14 @@ static inline void enable_apic_mode(void) | |||
137 | { | 137 | { |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 140 | static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) |
141 | { | 141 | { |
142 | int num_bits_set; | 142 | int num_bits_set; |
143 | int cpus_found = 0; | 143 | int cpus_found = 0; |
144 | int cpu; | 144 | int cpu; |
145 | int apicid; | 145 | int apicid; |
146 | 146 | ||
147 | num_bits_set = cpus_weight(cpumask); | 147 | num_bits_set = cpus_weight(*cpumask); |
148 | /* Return id to all */ | 148 | /* Return id to all */ |
149 | if (num_bits_set == NR_CPUS) | 149 | if (num_bits_set == NR_CPUS) |
150 | return (int) 0xFF; | 150 | return (int) 0xFF; |
@@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
152 | * The cpus in the mask must all be on the apic cluster. If are not | 152 | * The cpus in the mask must all be on the apic cluster. If are not |
153 | * on the same apicid cluster return default value of TARGET_CPUS. | 153 | * on the same apicid cluster return default value of TARGET_CPUS. |
154 | */ | 154 | */ |
155 | cpu = first_cpu(cpumask); | 155 | cpu = first_cpu(*cpumask); |
156 | apicid = cpu_to_logical_apicid(cpu); | 156 | apicid = cpu_to_logical_apicid(cpu); |
157 | while (cpus_found < num_bits_set) { | 157 | while (cpus_found < num_bits_set) { |
158 | if (cpu_isset(cpu, cpumask)) { | 158 | if (cpu_isset(cpu, *cpumask)) { |
159 | int new_apicid = cpu_to_logical_apicid(cpu); | 159 | int new_apicid = cpu_to_logical_apicid(cpu); |
160 | if (apicid_cluster(apicid) != | 160 | if (apicid_cluster(apicid) != |
161 | apicid_cluster(new_apicid)){ | 161 | apicid_cluster(new_apicid)){ |
@@ -170,6 +170,49 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
170 | return apicid; | 170 | return apicid; |
171 | } | 171 | } |
172 | 172 | ||
173 | static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask, | ||
174 | const struct cpumask *andmask) | ||
175 | { | ||
176 | int num_bits_set; | ||
177 | int cpus_found = 0; | ||
178 | int cpu; | ||
179 | int apicid = 0xFF; | ||
180 | cpumask_var_t cpumask; | ||
181 | |||
182 | if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) | ||
183 | return (int) 0xFF; | ||
184 | |||
185 | cpumask_and(cpumask, inmask, andmask); | ||
186 | cpumask_and(cpumask, cpumask, cpu_online_mask); | ||
187 | |||
188 | num_bits_set = cpumask_weight(cpumask); | ||
189 | /* Return id to all */ | ||
190 | if (num_bits_set == nr_cpu_ids) | ||
191 | goto exit; | ||
192 | /* | ||
193 | * The cpus in the mask must all be on the apic cluster. If are not | ||
194 | * on the same apicid cluster return default value of TARGET_CPUS. | ||
195 | */ | ||
196 | cpu = cpumask_first(cpumask); | ||
197 | apicid = cpu_to_logical_apicid(cpu); | ||
198 | while (cpus_found < num_bits_set) { | ||
199 | if (cpumask_test_cpu(cpu, cpumask)) { | ||
200 | int new_apicid = cpu_to_logical_apicid(cpu); | ||
201 | if (apicid_cluster(apicid) != | ||
202 | apicid_cluster(new_apicid)){ | ||
203 | printk ("%s: Not a valid mask!\n", __func__); | ||
204 | return 0xFF; | ||
205 | } | ||
206 | apicid = apicid | new_apicid; | ||
207 | cpus_found++; | ||
208 | } | ||
209 | cpu++; | ||
210 | } | ||
211 | exit: | ||
212 | free_cpumask_var(cpumask); | ||
213 | return apicid; | ||
214 | } | ||
215 | |||
173 | /* cpuid returns the value latched in the HW at reset, not the APIC ID | 216 | /* cpuid returns the value latched in the HW at reset, not the APIC ID |
174 | * register's value. For any box whose BIOS changes APIC IDs, like | 217 | * register's value. For any box whose BIOS changes APIC IDs, like |
175 | * clustered APIC systems, we must use hard_smp_processor_id. | 218 | * clustered APIC systems, we must use hard_smp_processor_id. |
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h index 53bd1e7bd7b4..a8a2c24f50cc 100644 --- a/arch/x86/include/asm/summit/ipi.h +++ b/arch/x86/include/asm/summit/ipi.h | |||
@@ -1,9 +1,10 @@ | |||
1 | #ifndef __ASM_SUMMIT_IPI_H | 1 | #ifndef __ASM_SUMMIT_IPI_H |
2 | #define __ASM_SUMMIT_IPI_H | 2 | #define __ASM_SUMMIT_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(const cpumask_t *mask, int vector); |
5 | void send_IPI_mask_allbutself(const cpumask_t *mask, int vector); | ||
5 | 6 | ||
6 | static inline void send_IPI_mask(cpumask_t mask, int vector) | 7 | static inline void send_IPI_mask(const cpumask_t *mask, int vector) |
7 | { | 8 | { |
8 | send_IPI_mask_sequence(mask, vector); | 9 | send_IPI_mask_sequence(mask, vector); |
9 | } | 10 | } |
@@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector) | |||
14 | cpu_clear(smp_processor_id(), mask); | 15 | cpu_clear(smp_processor_id(), mask); |
15 | 16 | ||
16 | if (!cpus_empty(mask)) | 17 | if (!cpus_empty(mask)) |
17 | send_IPI_mask(mask, vector); | 18 | send_IPI_mask(&mask, vector); |
18 | } | 19 | } |
19 | 20 | ||
20 | static inline void send_IPI_all(int vector) | 21 | static inline void send_IPI_all(int vector) |
21 | { | 22 | { |
22 | send_IPI_mask(cpu_online_map, vector); | 23 | send_IPI_mask(&cpu_online_map, vector); |
23 | } | 24 | } |
24 | 25 | ||
25 | #endif /* __ASM_SUMMIT_IPI_H */ | 26 | #endif /* __ASM_SUMMIT_IPI_H */ |
diff --git a/arch/x86/kvm/svm.h b/arch/x86/include/asm/svm.h index 1b8afa78e869..1b8afa78e869 100644 --- a/arch/x86/kvm/svm.h +++ b/arch/x86/include/asm/svm.h | |||
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h new file mode 100644 index 000000000000..ffb08be2a530 --- /dev/null +++ b/arch/x86/include/asm/sys_ia32.h | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * sys_ia32.h - Linux ia32 syscall interfaces | ||
3 | * | ||
4 | * Copyright (c) 2008 Jaswinder Singh Rajput | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | * See the file COPYING for more details. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_X86_SYS_IA32_H | ||
11 | #define _ASM_X86_SYS_IA32_H | ||
12 | |||
13 | #include <linux/compiler.h> | ||
14 | #include <linux/linkage.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/signal.h> | ||
17 | #include <asm/compat.h> | ||
18 | #include <asm/ia32.h> | ||
19 | |||
20 | /* ia32/sys_ia32.c */ | ||
21 | asmlinkage long sys32_truncate64(char __user *, unsigned long, unsigned long); | ||
22 | asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); | ||
23 | |||
24 | asmlinkage long sys32_stat64(char __user *, struct stat64 __user *); | ||
25 | asmlinkage long sys32_lstat64(char __user *, struct stat64 __user *); | ||
26 | asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); | ||
27 | asmlinkage long sys32_fstatat(unsigned int, char __user *, | ||
28 | struct stat64 __user *, int); | ||
29 | struct mmap_arg_struct; | ||
30 | asmlinkage long sys32_mmap(struct mmap_arg_struct __user *); | ||
31 | asmlinkage long sys32_mprotect(unsigned long, size_t, unsigned long); | ||
32 | |||
33 | asmlinkage long sys32_pipe(int __user *); | ||
34 | struct sigaction32; | ||
35 | struct old_sigaction32; | ||
36 | asmlinkage long sys32_rt_sigaction(int, struct sigaction32 __user *, | ||
37 | struct sigaction32 __user *, unsigned int); | ||
38 | asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, | ||
39 | struct old_sigaction32 __user *); | ||
40 | asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *, | ||
41 | compat_sigset_t __user *, unsigned int); | ||
42 | asmlinkage long sys32_alarm(unsigned int); | ||
43 | |||
44 | struct sel_arg_struct; | ||
45 | asmlinkage long sys32_old_select(struct sel_arg_struct __user *); | ||
46 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); | ||
47 | asmlinkage long sys32_sysfs(int, u32, u32); | ||
48 | |||
49 | asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, | ||
50 | struct compat_timespec __user *); | ||
51 | asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *, compat_size_t); | ||
52 | asmlinkage long sys32_rt_sigqueueinfo(int, int, compat_siginfo_t __user *); | ||
53 | |||
54 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
55 | struct sysctl_ia32; | ||
56 | asmlinkage long sys32_sysctl(struct sysctl_ia32 __user *); | ||
57 | #endif | ||
58 | |||
59 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); | ||
60 | asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32); | ||
61 | |||
62 | asmlinkage long sys32_personality(unsigned long); | ||
63 | asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); | ||
64 | |||
65 | asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long, | ||
66 | unsigned long, unsigned long, unsigned long); | ||
67 | |||
68 | struct oldold_utsname; | ||
69 | struct old_utsname; | ||
70 | asmlinkage long sys32_olduname(struct oldold_utsname __user *); | ||
71 | long sys32_uname(struct old_utsname __user *); | ||
72 | |||
73 | long sys32_ustat(unsigned, struct ustat32 __user *); | ||
74 | |||
75 | asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *, | ||
76 | compat_uptr_t __user *, struct pt_regs *); | ||
77 | asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *); | ||
78 | |||
79 | long sys32_lseek(unsigned int, int, unsigned int); | ||
80 | long sys32_kill(int, int); | ||
81 | long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); | ||
82 | long sys32_vm86_warning(void); | ||
83 | long sys32_lookup_dcookie(u32, u32, char __user *, size_t); | ||
84 | |||
85 | asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); | ||
86 | asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, | ||
87 | unsigned, unsigned, int); | ||
88 | asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); | ||
89 | asmlinkage long sys32_fallocate(int, int, unsigned, | ||
90 | unsigned, unsigned, unsigned); | ||
91 | |||
92 | /* ia32/ia32_signal.c */ | ||
93 | asmlinkage long sys32_sigsuspend(int, int, old_sigset_t); | ||
94 | asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *, | ||
95 | stack_ia32_t __user *, struct pt_regs *); | ||
96 | asmlinkage long sys32_sigreturn(struct pt_regs *); | ||
97 | asmlinkage long sys32_rt_sigreturn(struct pt_regs *); | ||
98 | |||
99 | /* ia32/ipc32.c */ | ||
100 | asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32); | ||
101 | #endif /* _ASM_X86_SYS_IA32_H */ | ||
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index ff386ff50ed7..79e31e9dcdda 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -226,6 +226,8 @@ extern cpumask_t cpu_coregroup_map(int cpu); | |||
226 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | 226 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) |
227 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) | 227 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) |
228 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | 228 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) |
229 | #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) | ||
230 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | ||
229 | 231 | ||
230 | /* indicates that pointers to the topology cpumask_t maps are valid */ | 232 | /* indicates that pointers to the topology cpumask_t maps are valid */ |
231 | #define arch_provides_topology_pointers yes | 233 | #define arch_provides_topology_pointers yes |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index e2363253bbbf..50423c7b56b2 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -133,61 +133,61 @@ struct bau_msg_payload { | |||
133 | * see table 4.2.3.0.1 in broacast_assist spec. | 133 | * see table 4.2.3.0.1 in broacast_assist spec. |
134 | */ | 134 | */ |
135 | struct bau_msg_header { | 135 | struct bau_msg_header { |
136 | int dest_subnodeid:6; /* must be zero */ | 136 | unsigned int dest_subnodeid:6; /* must be zero */ |
137 | /* bits 5:0 */ | 137 | /* bits 5:0 */ |
138 | int base_dest_nodeid:15; /* nasid>>1 (pnode) of first bit in node_map */ | 138 | unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */ |
139 | /* bits 20:6 */ | 139 | /* bits 20:6 */ /* first bit in node_map */ |
140 | int command:8; /* message type */ | 140 | unsigned int command:8; /* message type */ |
141 | /* bits 28:21 */ | 141 | /* bits 28:21 */ |
142 | /* 0x38: SN3net EndPoint Message */ | 142 | /* 0x38: SN3net EndPoint Message */ |
143 | int rsvd_1:3; /* must be zero */ | 143 | unsigned int rsvd_1:3; /* must be zero */ |
144 | /* bits 31:29 */ | 144 | /* bits 31:29 */ |
145 | /* int will align on 32 bits */ | 145 | /* int will align on 32 bits */ |
146 | int rsvd_2:9; /* must be zero */ | 146 | unsigned int rsvd_2:9; /* must be zero */ |
147 | /* bits 40:32 */ | 147 | /* bits 40:32 */ |
148 | /* Suppl_A is 56-41 */ | 148 | /* Suppl_A is 56-41 */ |
149 | int payload_2a:8; /* becomes byte 16 of msg */ | 149 | unsigned int payload_2a:8;/* becomes byte 16 of msg */ |
150 | /* bits 48:41 */ /* not currently using */ | 150 | /* bits 48:41 */ /* not currently using */ |
151 | int payload_2b:8; /* becomes byte 17 of msg */ | 151 | unsigned int payload_2b:8;/* becomes byte 17 of msg */ |
152 | /* bits 56:49 */ /* not currently using */ | 152 | /* bits 56:49 */ /* not currently using */ |
153 | /* Address field (96:57) is never used as an | 153 | /* Address field (96:57) is never used as an |
154 | address (these are address bits 42:3) */ | 154 | address (these are address bits 42:3) */ |
155 | int rsvd_3:1; /* must be zero */ | 155 | unsigned int rsvd_3:1; /* must be zero */ |
156 | /* bit 57 */ | 156 | /* bit 57 */ |
157 | /* address bits 27:4 are payload */ | 157 | /* address bits 27:4 are payload */ |
158 | /* these 24 bits become bytes 12-14 of msg */ | 158 | /* these 24 bits become bytes 12-14 of msg */ |
159 | int replied_to:1; /* sent as 0 by the source to byte 12 */ | 159 | unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */ |
160 | /* bit 58 */ | 160 | /* bit 58 */ |
161 | 161 | ||
162 | int payload_1a:5; /* not currently used */ | 162 | unsigned int payload_1a:5;/* not currently used */ |
163 | /* bits 63:59 */ | 163 | /* bits 63:59 */ |
164 | int payload_1b:8; /* not currently used */ | 164 | unsigned int payload_1b:8;/* not currently used */ |
165 | /* bits 71:64 */ | 165 | /* bits 71:64 */ |
166 | int payload_1c:8; /* not currently used */ | 166 | unsigned int payload_1c:8;/* not currently used */ |
167 | /* bits 79:72 */ | 167 | /* bits 79:72 */ |
168 | int payload_1d:2; /* not currently used */ | 168 | unsigned int payload_1d:2;/* not currently used */ |
169 | /* bits 81:80 */ | 169 | /* bits 81:80 */ |
170 | 170 | ||
171 | int rsvd_4:7; /* must be zero */ | 171 | unsigned int rsvd_4:7; /* must be zero */ |
172 | /* bits 88:82 */ | 172 | /* bits 88:82 */ |
173 | int sw_ack_flag:1; /* software acknowledge flag */ | 173 | unsigned int sw_ack_flag:1;/* software acknowledge flag */ |
174 | /* bit 89 */ | 174 | /* bit 89 */ |
175 | /* INTD trasactions at destination are to | 175 | /* INTD trasactions at destination are to |
176 | wait for software acknowledge */ | 176 | wait for software acknowledge */ |
177 | int rsvd_5:6; /* must be zero */ | 177 | unsigned int rsvd_5:6; /* must be zero */ |
178 | /* bits 95:90 */ | 178 | /* bits 95:90 */ |
179 | int rsvd_6:5; /* must be zero */ | 179 | unsigned int rsvd_6:5; /* must be zero */ |
180 | /* bits 100:96 */ | 180 | /* bits 100:96 */ |
181 | int int_both:1; /* if 1, interrupt both sockets on the blade */ | 181 | unsigned int int_both:1;/* if 1, interrupt both sockets on the blade */ |
182 | /* bit 101*/ | 182 | /* bit 101*/ |
183 | int fairness:3; /* usually zero */ | 183 | unsigned int fairness:3;/* usually zero */ |
184 | /* bits 104:102 */ | 184 | /* bits 104:102 */ |
185 | int multilevel:1; /* multi-level multicast format */ | 185 | unsigned int multilevel:1; /* multi-level multicast format */ |
186 | /* bit 105 */ | 186 | /* bit 105 */ |
187 | /* 0 for TLB: endpoint multi-unicast messages */ | 187 | /* 0 for TLB: endpoint multi-unicast messages */ |
188 | int chaining:1; /* next descriptor is part of this activation*/ | 188 | unsigned int chaining:1;/* next descriptor is part of this activation*/ |
189 | /* bit 106 */ | 189 | /* bit 106 */ |
190 | int rsvd_7:21; /* must be zero */ | 190 | unsigned int rsvd_7:21; /* must be zero */ |
191 | /* bits 127:107 */ | 191 | /* bits 127:107 */ |
192 | }; | 192 | }; |
193 | 193 | ||
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h new file mode 100644 index 000000000000..593636275238 --- /dev/null +++ b/arch/x86/include/asm/virtext.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* CPU virtualization extensions handling | ||
2 | * | ||
3 | * This should carry the code for handling CPU virtualization extensions | ||
4 | * that needs to live in the kernel core. | ||
5 | * | ||
6 | * Author: Eduardo Habkost <ehabkost@redhat.com> | ||
7 | * | ||
8 | * Copyright (C) 2008, Red Hat Inc. | ||
9 | * | ||
10 | * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc. | ||
11 | * | ||
12 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
13 | * the COPYING file in the top-level directory. | ||
14 | */ | ||
15 | #ifndef _ASM_X86_VIRTEX_H | ||
16 | #define _ASM_X86_VIRTEX_H | ||
17 | |||
18 | #include <asm/processor.h> | ||
19 | #include <asm/system.h> | ||
20 | |||
21 | #include <asm/vmx.h> | ||
22 | #include <asm/svm.h> | ||
23 | |||
24 | /* | ||
25 | * VMX functions: | ||
26 | */ | ||
27 | |||
28 | static inline int cpu_has_vmx(void) | ||
29 | { | ||
30 | unsigned long ecx = cpuid_ecx(1); | ||
31 | return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ | ||
32 | } | ||
33 | |||
34 | |||
35 | /** Disable VMX on the current CPU | ||
36 | * | ||
37 | * vmxoff causes a undefined-opcode exception if vmxon was not run | ||
38 | * on the CPU previously. Only call this function if you know VMX | ||
39 | * is enabled. | ||
40 | */ | ||
41 | static inline void cpu_vmxoff(void) | ||
42 | { | ||
43 | asm volatile (ASM_VMX_VMXOFF : : : "cc"); | ||
44 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | ||
45 | } | ||
46 | |||
47 | static inline int cpu_vmx_enabled(void) | ||
48 | { | ||
49 | return read_cr4() & X86_CR4_VMXE; | ||
50 | } | ||
51 | |||
52 | /** Disable VMX if it is enabled on the current CPU | ||
53 | * | ||
54 | * You shouldn't call this if cpu_has_vmx() returns 0. | ||
55 | */ | ||
56 | static inline void __cpu_emergency_vmxoff(void) | ||
57 | { | ||
58 | if (cpu_vmx_enabled()) | ||
59 | cpu_vmxoff(); | ||
60 | } | ||
61 | |||
62 | /** Disable VMX if it is supported and enabled on the current CPU | ||
63 | */ | ||
64 | static inline void cpu_emergency_vmxoff(void) | ||
65 | { | ||
66 | if (cpu_has_vmx()) | ||
67 | __cpu_emergency_vmxoff(); | ||
68 | } | ||
69 | |||
70 | |||
71 | |||
72 | |||
73 | /* | ||
74 | * SVM functions: | ||
75 | */ | ||
76 | |||
77 | /** Check if the CPU has SVM support | ||
78 | * | ||
79 | * You can use the 'msg' arg to get a message describing the problem, | ||
80 | * if the function returns zero. Simply pass NULL if you are not interested | ||
81 | * on the messages; gcc should take care of not generating code for | ||
82 | * the messages on this case. | ||
83 | */ | ||
84 | static inline int cpu_has_svm(const char **msg) | ||
85 | { | ||
86 | uint32_t eax, ebx, ecx, edx; | ||
87 | |||
88 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { | ||
89 | if (msg) | ||
90 | *msg = "not amd"; | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | cpuid(0x80000000, &eax, &ebx, &ecx, &edx); | ||
95 | if (eax < SVM_CPUID_FUNC) { | ||
96 | if (msg) | ||
97 | *msg = "can't execute cpuid_8000000a"; | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | cpuid(0x80000001, &eax, &ebx, &ecx, &edx); | ||
102 | if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { | ||
103 | if (msg) | ||
104 | *msg = "svm not available"; | ||
105 | return 0; | ||
106 | } | ||
107 | return 1; | ||
108 | } | ||
109 | |||
110 | |||
111 | /** Disable SVM on the current CPU | ||
112 | * | ||
113 | * You should call this only if cpu_has_svm() returned true. | ||
114 | */ | ||
115 | static inline void cpu_svm_disable(void) | ||
116 | { | ||
117 | uint64_t efer; | ||
118 | |||
119 | wrmsrl(MSR_VM_HSAVE_PA, 0); | ||
120 | rdmsrl(MSR_EFER, efer); | ||
121 | wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); | ||
122 | } | ||
123 | |||
124 | /** Makes sure SVM is disabled, if it is supported on the CPU | ||
125 | */ | ||
126 | static inline void cpu_emergency_svm_disable(void) | ||
127 | { | ||
128 | if (cpu_has_svm(NULL)) | ||
129 | cpu_svm_disable(); | ||
130 | } | ||
131 | |||
132 | #endif /* _ASM_X86_VIRTEX_H */ | ||
diff --git a/arch/x86/kvm/vmx.h b/arch/x86/include/asm/vmx.h index ec5edc339da6..d0238e6151d8 100644 --- a/arch/x86/kvm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -63,10 +63,13 @@ | |||
63 | 63 | ||
64 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 | 64 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 |
65 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 | 65 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 |
66 | #define VM_EXIT_SAVE_IA32_PAT 0x00040000 | ||
67 | #define VM_EXIT_LOAD_IA32_PAT 0x00080000 | ||
66 | 68 | ||
67 | #define VM_ENTRY_IA32E_MODE 0x00000200 | 69 | #define VM_ENTRY_IA32E_MODE 0x00000200 |
68 | #define VM_ENTRY_SMM 0x00000400 | 70 | #define VM_ENTRY_SMM 0x00000400 |
69 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 | 71 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 |
72 | #define VM_ENTRY_LOAD_IA32_PAT 0x00004000 | ||
70 | 73 | ||
71 | /* VMCS Encodings */ | 74 | /* VMCS Encodings */ |
72 | enum vmcs_field { | 75 | enum vmcs_field { |
@@ -112,6 +115,8 @@ enum vmcs_field { | |||
112 | VMCS_LINK_POINTER_HIGH = 0x00002801, | 115 | VMCS_LINK_POINTER_HIGH = 0x00002801, |
113 | GUEST_IA32_DEBUGCTL = 0x00002802, | 116 | GUEST_IA32_DEBUGCTL = 0x00002802, |
114 | GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, | 117 | GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, |
118 | GUEST_IA32_PAT = 0x00002804, | ||
119 | GUEST_IA32_PAT_HIGH = 0x00002805, | ||
115 | GUEST_PDPTR0 = 0x0000280a, | 120 | GUEST_PDPTR0 = 0x0000280a, |
116 | GUEST_PDPTR0_HIGH = 0x0000280b, | 121 | GUEST_PDPTR0_HIGH = 0x0000280b, |
117 | GUEST_PDPTR1 = 0x0000280c, | 122 | GUEST_PDPTR1 = 0x0000280c, |
@@ -120,6 +125,8 @@ enum vmcs_field { | |||
120 | GUEST_PDPTR2_HIGH = 0x0000280f, | 125 | GUEST_PDPTR2_HIGH = 0x0000280f, |
121 | GUEST_PDPTR3 = 0x00002810, | 126 | GUEST_PDPTR3 = 0x00002810, |
122 | GUEST_PDPTR3_HIGH = 0x00002811, | 127 | GUEST_PDPTR3_HIGH = 0x00002811, |
128 | HOST_IA32_PAT = 0x00002c00, | ||
129 | HOST_IA32_PAT_HIGH = 0x00002c01, | ||
123 | PIN_BASED_VM_EXEC_CONTROL = 0x00004000, | 130 | PIN_BASED_VM_EXEC_CONTROL = 0x00004000, |
124 | CPU_BASED_VM_EXEC_CONTROL = 0x00004002, | 131 | CPU_BASED_VM_EXEC_CONTROL = 0x00004002, |
125 | EXCEPTION_BITMAP = 0x00004004, | 132 | EXCEPTION_BITMAP = 0x00004004, |
@@ -331,8 +338,9 @@ enum vmcs_field { | |||
331 | 338 | ||
332 | #define AR_RESERVD_MASK 0xfffe0f00 | 339 | #define AR_RESERVD_MASK 0xfffe0f00 |
333 | 340 | ||
334 | #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT 9 | 341 | #define TSS_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 0) |
335 | #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT 10 | 342 | #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 1) |
343 | #define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT (KVM_MEMORY_SLOTS + 2) | ||
336 | 344 | ||
337 | #define VMX_NR_VPIDS (1 << 16) | 345 | #define VMX_NR_VPIDS (1 << 16) |
338 | #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 | 346 | #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 |
@@ -356,4 +364,19 @@ enum vmcs_field { | |||
356 | 364 | ||
357 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul | 365 | #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul |
358 | 366 | ||
367 | |||
368 | #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" | ||
369 | #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" | ||
370 | #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" | ||
371 | #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" | ||
372 | #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" | ||
373 | #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" | ||
374 | #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" | ||
375 | #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" | ||
376 | #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" | ||
377 | #define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08" | ||
378 | #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08" | ||
379 | |||
380 | |||
381 | |||
359 | #endif | 382 | #endif |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 2e2da717b350..658e29e0f49b 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1296,7 +1296,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) | |||
1296 | * we don't need to preallocate the protection domains anymore. | 1296 | * we don't need to preallocate the protection domains anymore. |
1297 | * For now we have to. | 1297 | * For now we have to. |
1298 | */ | 1298 | */ |
1299 | void prealloc_protection_domains(void) | 1299 | static void prealloc_protection_domains(void) |
1300 | { | 1300 | { |
1301 | struct pci_dev *dev = NULL; | 1301 | struct pci_dev *dev = NULL; |
1302 | struct dma_ops_domain *dma_dom; | 1302 | struct dma_ops_domain *dma_dom; |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index c625800c55ca..fb85e8d466cc 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -243,7 +243,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | |||
243 | } | 243 | } |
244 | 244 | ||
245 | /* Function to enable the hardware */ | 245 | /* Function to enable the hardware */ |
246 | void __init iommu_enable(struct amd_iommu *iommu) | 246 | static void __init iommu_enable(struct amd_iommu *iommu) |
247 | { | 247 | { |
248 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU " | 248 | printk(KERN_INFO "AMD IOMMU: Enabling IOMMU " |
249 | "at %02x:%02x.%x cap 0x%hx\n", | 249 | "at %02x:%02x.%x cap 0x%hx\n", |
@@ -256,7 +256,7 @@ void __init iommu_enable(struct amd_iommu *iommu) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | /* Function to enable IOMMU event logging and event interrupts */ | 258 | /* Function to enable IOMMU event logging and event interrupts */ |
259 | void __init iommu_enable_event_logging(struct amd_iommu *iommu) | 259 | static void __init iommu_enable_event_logging(struct amd_iommu *iommu) |
260 | { | 260 | { |
261 | iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); | 261 | iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); |
262 | iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); | 262 | iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); |
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index b5229affb953..d652515e2855 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
@@ -98,8 +98,8 @@ __setup("apicpmtimer", setup_apicpmtimer); | |||
98 | #ifdef HAVE_X2APIC | 98 | #ifdef HAVE_X2APIC |
99 | int x2apic; | 99 | int x2apic; |
100 | /* x2apic enabled before OS handover */ | 100 | /* x2apic enabled before OS handover */ |
101 | int x2apic_preenabled; | 101 | static int x2apic_preenabled; |
102 | int disable_x2apic; | 102 | static int disable_x2apic; |
103 | static __init int setup_nox2apic(char *str) | 103 | static __init int setup_nox2apic(char *str) |
104 | { | 104 | { |
105 | disable_x2apic = 1; | 105 | disable_x2apic = 1; |
@@ -119,8 +119,6 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok); | |||
119 | 119 | ||
120 | int first_system_vector = 0xfe; | 120 | int first_system_vector = 0xfe; |
121 | 121 | ||
122 | char system_vectors[NR_VECTORS] = { [0 ... NR_VECTORS-1] = SYS_VECTOR_FREE}; | ||
123 | |||
124 | /* | 122 | /* |
125 | * Debug level, exported for io_apic.c | 123 | * Debug level, exported for io_apic.c |
126 | */ | 124 | */ |
@@ -142,7 +140,7 @@ static int lapic_next_event(unsigned long delta, | |||
142 | struct clock_event_device *evt); | 140 | struct clock_event_device *evt); |
143 | static void lapic_timer_setup(enum clock_event_mode mode, | 141 | static void lapic_timer_setup(enum clock_event_mode mode, |
144 | struct clock_event_device *evt); | 142 | struct clock_event_device *evt); |
145 | static void lapic_timer_broadcast(cpumask_t mask); | 143 | static void lapic_timer_broadcast(const cpumask_t *mask); |
146 | static void apic_pm_activate(void); | 144 | static void apic_pm_activate(void); |
147 | 145 | ||
148 | /* | 146 | /* |
@@ -228,7 +226,7 @@ void xapic_icr_write(u32 low, u32 id) | |||
228 | apic_write(APIC_ICR, low); | 226 | apic_write(APIC_ICR, low); |
229 | } | 227 | } |
230 | 228 | ||
231 | u64 xapic_icr_read(void) | 229 | static u64 xapic_icr_read(void) |
232 | { | 230 | { |
233 | u32 icr1, icr2; | 231 | u32 icr1, icr2; |
234 | 232 | ||
@@ -268,7 +266,7 @@ void x2apic_icr_write(u32 low, u32 id) | |||
268 | wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); | 266 | wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low); |
269 | } | 267 | } |
270 | 268 | ||
271 | u64 x2apic_icr_read(void) | 269 | static u64 x2apic_icr_read(void) |
272 | { | 270 | { |
273 | unsigned long val; | 271 | unsigned long val; |
274 | 272 | ||
@@ -455,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode, | |||
455 | /* | 453 | /* |
456 | * Local APIC timer broadcast function | 454 | * Local APIC timer broadcast function |
457 | */ | 455 | */ |
458 | static void lapic_timer_broadcast(cpumask_t mask) | 456 | static void lapic_timer_broadcast(const cpumask_t *mask) |
459 | { | 457 | { |
460 | #ifdef CONFIG_SMP | 458 | #ifdef CONFIG_SMP |
461 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); | 459 | send_IPI_mask(mask, LOCAL_TIMER_VECTOR); |
@@ -471,7 +469,7 @@ static void __cpuinit setup_APIC_timer(void) | |||
471 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 469 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
472 | 470 | ||
473 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); | 471 | memcpy(levt, &lapic_clockevent, sizeof(*levt)); |
474 | levt->cpumask = cpumask_of_cpu(smp_processor_id()); | 472 | levt->cpumask = cpumask_of(smp_processor_id()); |
475 | 473 | ||
476 | clockevents_register_device(levt); | 474 | clockevents_register_device(levt); |
477 | } | 475 | } |
@@ -1807,28 +1805,32 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |||
1807 | void __cpuinit generic_processor_info(int apicid, int version) | 1805 | void __cpuinit generic_processor_info(int apicid, int version) |
1808 | { | 1806 | { |
1809 | int cpu; | 1807 | int cpu; |
1810 | cpumask_t tmp_map; | ||
1811 | 1808 | ||
1812 | /* | 1809 | /* |
1813 | * Validate version | 1810 | * Validate version |
1814 | */ | 1811 | */ |
1815 | if (version == 0x0) { | 1812 | if (version == 0x0) { |
1816 | pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " | 1813 | pr_warning("BIOS bug, APIC version is 0 for CPU#%d! " |
1817 | "fixing up to 0x10. (tell your hw vendor)\n", | 1814 | "fixing up to 0x10. (tell your hw vendor)\n", |
1818 | version); | 1815 | version); |
1819 | version = 0x10; | 1816 | version = 0x10; |
1820 | } | 1817 | } |
1821 | apic_version[apicid] = version; | 1818 | apic_version[apicid] = version; |
1822 | 1819 | ||
1823 | if (num_processors >= NR_CPUS) { | 1820 | if (num_processors >= nr_cpu_ids) { |
1824 | pr_warning("WARNING: NR_CPUS limit of %i reached." | 1821 | int max = nr_cpu_ids; |
1825 | " Processor ignored.\n", NR_CPUS); | 1822 | int thiscpu = max + disabled_cpus; |
1823 | |||
1824 | pr_warning( | ||
1825 | "ACPI: NR_CPUS/possible_cpus limit of %i reached." | ||
1826 | " Processor %d/0x%x ignored.\n", max, thiscpu, apicid); | ||
1827 | |||
1828 | disabled_cpus++; | ||
1826 | return; | 1829 | return; |
1827 | } | 1830 | } |
1828 | 1831 | ||
1829 | num_processors++; | 1832 | num_processors++; |
1830 | cpus_complement(tmp_map, cpu_present_map); | 1833 | cpu = cpumask_next_zero(-1, cpu_present_mask); |
1831 | cpu = first_cpu(tmp_map); | ||
1832 | 1834 | ||
1833 | physid_set(apicid, phys_cpu_present_map); | 1835 | physid_set(apicid, phys_cpu_present_map); |
1834 | if (apicid == boot_cpu_physical_apicid) { | 1836 | if (apicid == boot_cpu_physical_apicid) { |
@@ -1878,8 +1880,8 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1878 | } | 1880 | } |
1879 | #endif | 1881 | #endif |
1880 | 1882 | ||
1881 | cpu_set(cpu, cpu_possible_map); | 1883 | set_cpu_possible(cpu, true); |
1882 | cpu_set(cpu, cpu_present_map); | 1884 | set_cpu_present(cpu, true); |
1883 | } | 1885 | } |
1884 | 1886 | ||
1885 | #ifdef CONFIG_X86_64 | 1887 | #ifdef CONFIG_X86_64 |
@@ -2081,7 +2083,7 @@ __cpuinit int apic_is_clustered_box(void) | |||
2081 | bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); | 2083 | bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid); |
2082 | bitmap_zero(clustermap, NUM_APIC_CLUSTERS); | 2084 | bitmap_zero(clustermap, NUM_APIC_CLUSTERS); |
2083 | 2085 | ||
2084 | for (i = 0; i < NR_CPUS; i++) { | 2086 | for (i = 0; i < nr_cpu_ids; i++) { |
2085 | /* are we being called early in kernel startup? */ | 2087 | /* are we being called early in kernel startup? */ |
2086 | if (bios_cpu_apicid) { | 2088 | if (bios_cpu_apicid) { |
2087 | id = bios_cpu_apicid[i]; | 2089 | id = bios_cpu_apicid[i]; |
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c index 2a0a2a3cac26..f63882728d91 100644 --- a/arch/x86/kernel/bios_uv.c +++ b/arch/x86/kernel/bios_uv.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/uv/bios.h> | 25 | #include <asm/uv/bios.h> |
26 | #include <asm/uv/uv_hub.h> | 26 | #include <asm/uv/uv_hub.h> |
27 | 27 | ||
28 | struct uv_systab uv_systab; | 28 | static struct uv_systab uv_systab; |
29 | 29 | ||
30 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | 30 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) |
31 | { | 31 | { |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 68b5d8681cbb..c6ecda64f5f1 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -534,31 +534,16 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
534 | per_cpu(cpuid4_info, cpu) = NULL; | 534 | per_cpu(cpuid4_info, cpu) = NULL; |
535 | } | 535 | } |
536 | 536 | ||
537 | static int __cpuinit detect_cache_attributes(unsigned int cpu) | 537 | static void get_cpu_leaves(void *_retval) |
538 | { | 538 | { |
539 | struct _cpuid4_info *this_leaf; | 539 | int j, *retval = _retval, cpu = smp_processor_id(); |
540 | unsigned long j; | ||
541 | int retval; | ||
542 | cpumask_t oldmask; | ||
543 | |||
544 | if (num_cache_leaves == 0) | ||
545 | return -ENOENT; | ||
546 | |||
547 | per_cpu(cpuid4_info, cpu) = kzalloc( | ||
548 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | ||
549 | if (per_cpu(cpuid4_info, cpu) == NULL) | ||
550 | return -ENOMEM; | ||
551 | |||
552 | oldmask = current->cpus_allowed; | ||
553 | retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
554 | if (retval) | ||
555 | goto out; | ||
556 | 540 | ||
557 | /* Do cpuid and store the results */ | 541 | /* Do cpuid and store the results */ |
558 | for (j = 0; j < num_cache_leaves; j++) { | 542 | for (j = 0; j < num_cache_leaves; j++) { |
543 | struct _cpuid4_info *this_leaf; | ||
559 | this_leaf = CPUID4_INFO_IDX(cpu, j); | 544 | this_leaf = CPUID4_INFO_IDX(cpu, j); |
560 | retval = cpuid4_cache_lookup(j, this_leaf); | 545 | *retval = cpuid4_cache_lookup(j, this_leaf); |
561 | if (unlikely(retval < 0)) { | 546 | if (unlikely(*retval < 0)) { |
562 | int i; | 547 | int i; |
563 | 548 | ||
564 | for (i = 0; i < j; i++) | 549 | for (i = 0; i < j; i++) |
@@ -567,9 +552,21 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
567 | } | 552 | } |
568 | cache_shared_cpu_map_setup(cpu, j); | 553 | cache_shared_cpu_map_setup(cpu, j); |
569 | } | 554 | } |
570 | set_cpus_allowed_ptr(current, &oldmask); | 555 | } |
556 | |||
557 | static int __cpuinit detect_cache_attributes(unsigned int cpu) | ||
558 | { | ||
559 | int retval; | ||
560 | |||
561 | if (num_cache_leaves == 0) | ||
562 | return -ENOENT; | ||
563 | |||
564 | per_cpu(cpuid4_info, cpu) = kzalloc( | ||
565 | sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); | ||
566 | if (per_cpu(cpuid4_info, cpu) == NULL) | ||
567 | return -ENOMEM; | ||
571 | 568 | ||
572 | out: | 569 | smp_call_function_single(cpu, get_cpu_leaves, &retval, true); |
573 | if (retval) { | 570 | if (retval) { |
574 | kfree(per_cpu(cpuid4_info, cpu)); | 571 | kfree(per_cpu(cpuid4_info, cpu)); |
575 | per_cpu(cpuid4_info, cpu) = NULL; | 572 | per_cpu(cpuid4_info, cpu) = NULL; |
@@ -626,8 +623,8 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
626 | cpumask_t *mask = &this_leaf->shared_cpu_map; | 623 | cpumask_t *mask = &this_leaf->shared_cpu_map; |
627 | 624 | ||
628 | n = type? | 625 | n = type? |
629 | cpulist_scnprintf(buf, len-2, *mask): | 626 | cpulist_scnprintf(buf, len-2, mask) : |
630 | cpumask_scnprintf(buf, len-2, *mask); | 627 | cpumask_scnprintf(buf, len-2, mask); |
631 | buf[n++] = '\n'; | 628 | buf[n++] = '\n'; |
632 | buf[n] = '\0'; | 629 | buf[n] = '\0'; |
633 | } | 630 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 748c8f9e7a05..a5a5e0530370 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -83,34 +83,41 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ | |||
83 | * CPU Initialization | 83 | * CPU Initialization |
84 | */ | 84 | */ |
85 | 85 | ||
86 | struct thresh_restart { | ||
87 | struct threshold_block *b; | ||
88 | int reset; | ||
89 | u16 old_limit; | ||
90 | }; | ||
91 | |||
86 | /* must be called with correct cpu affinity */ | 92 | /* must be called with correct cpu affinity */ |
87 | static void threshold_restart_bank(struct threshold_block *b, | 93 | static long threshold_restart_bank(void *_tr) |
88 | int reset, u16 old_limit) | ||
89 | { | 94 | { |
95 | struct thresh_restart *tr = _tr; | ||
90 | u32 mci_misc_hi, mci_misc_lo; | 96 | u32 mci_misc_hi, mci_misc_lo; |
91 | 97 | ||
92 | rdmsr(b->address, mci_misc_lo, mci_misc_hi); | 98 | rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi); |
93 | 99 | ||
94 | if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) | 100 | if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) |
95 | reset = 1; /* limit cannot be lower than err count */ | 101 | tr->reset = 1; /* limit cannot be lower than err count */ |
96 | 102 | ||
97 | if (reset) { /* reset err count and overflow bit */ | 103 | if (tr->reset) { /* reset err count and overflow bit */ |
98 | mci_misc_hi = | 104 | mci_misc_hi = |
99 | (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | | 105 | (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | |
100 | (THRESHOLD_MAX - b->threshold_limit); | 106 | (THRESHOLD_MAX - tr->b->threshold_limit); |
101 | } else if (old_limit) { /* change limit w/o reset */ | 107 | } else if (tr->old_limit) { /* change limit w/o reset */ |
102 | int new_count = (mci_misc_hi & THRESHOLD_MAX) + | 108 | int new_count = (mci_misc_hi & THRESHOLD_MAX) + |
103 | (old_limit - b->threshold_limit); | 109 | (tr->old_limit - tr->b->threshold_limit); |
104 | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | | 110 | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | |
105 | (new_count & THRESHOLD_MAX); | 111 | (new_count & THRESHOLD_MAX); |
106 | } | 112 | } |
107 | 113 | ||
108 | b->interrupt_enable ? | 114 | tr->b->interrupt_enable ? |
109 | (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : | 115 | (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : |
110 | (mci_misc_hi &= ~MASK_INT_TYPE_HI); | 116 | (mci_misc_hi &= ~MASK_INT_TYPE_HI); |
111 | 117 | ||
112 | mci_misc_hi |= MASK_COUNT_EN_HI; | 118 | mci_misc_hi |= MASK_COUNT_EN_HI; |
113 | wrmsr(b->address, mci_misc_lo, mci_misc_hi); | 119 | wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi); |
120 | return 0; | ||
114 | } | 121 | } |
115 | 122 | ||
116 | /* cpu init entry point, called from mce.c with preempt off */ | 123 | /* cpu init entry point, called from mce.c with preempt off */ |
@@ -120,6 +127,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
120 | unsigned int cpu = smp_processor_id(); | 127 | unsigned int cpu = smp_processor_id(); |
121 | u8 lvt_off; | 128 | u8 lvt_off; |
122 | u32 low = 0, high = 0, address = 0; | 129 | u32 low = 0, high = 0, address = 0; |
130 | struct thresh_restart tr; | ||
123 | 131 | ||
124 | for (bank = 0; bank < NR_BANKS; ++bank) { | 132 | for (bank = 0; bank < NR_BANKS; ++bank) { |
125 | for (block = 0; block < NR_BLOCKS; ++block) { | 133 | for (block = 0; block < NR_BLOCKS; ++block) { |
@@ -162,7 +170,10 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
162 | wrmsr(address, low, high); | 170 | wrmsr(address, low, high); |
163 | 171 | ||
164 | threshold_defaults.address = address; | 172 | threshold_defaults.address = address; |
165 | threshold_restart_bank(&threshold_defaults, 0, 0); | 173 | tr.b = &threshold_defaults; |
174 | tr.reset = 0; | ||
175 | tr.old_limit = 0; | ||
176 | threshold_restart_bank(&tr); | ||
166 | } | 177 | } |
167 | } | 178 | } |
168 | } | 179 | } |
@@ -251,20 +262,6 @@ struct threshold_attr { | |||
251 | ssize_t(*store) (struct threshold_block *, const char *, size_t count); | 262 | ssize_t(*store) (struct threshold_block *, const char *, size_t count); |
252 | }; | 263 | }; |
253 | 264 | ||
254 | static void affinity_set(unsigned int cpu, cpumask_t *oldmask, | ||
255 | cpumask_t *newmask) | ||
256 | { | ||
257 | *oldmask = current->cpus_allowed; | ||
258 | cpus_clear(*newmask); | ||
259 | cpu_set(cpu, *newmask); | ||
260 | set_cpus_allowed_ptr(current, newmask); | ||
261 | } | ||
262 | |||
263 | static void affinity_restore(const cpumask_t *oldmask) | ||
264 | { | ||
265 | set_cpus_allowed_ptr(current, oldmask); | ||
266 | } | ||
267 | |||
268 | #define SHOW_FIELDS(name) \ | 265 | #define SHOW_FIELDS(name) \ |
269 | static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ | 266 | static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ |
270 | { \ | 267 | { \ |
@@ -277,15 +274,16 @@ static ssize_t store_interrupt_enable(struct threshold_block *b, | |||
277 | const char *buf, size_t count) | 274 | const char *buf, size_t count) |
278 | { | 275 | { |
279 | char *end; | 276 | char *end; |
280 | cpumask_t oldmask, newmask; | 277 | struct thresh_restart tr; |
281 | unsigned long new = simple_strtoul(buf, &end, 0); | 278 | unsigned long new = simple_strtoul(buf, &end, 0); |
282 | if (end == buf) | 279 | if (end == buf) |
283 | return -EINVAL; | 280 | return -EINVAL; |
284 | b->interrupt_enable = !!new; | 281 | b->interrupt_enable = !!new; |
285 | 282 | ||
286 | affinity_set(b->cpu, &oldmask, &newmask); | 283 | tr.b = b; |
287 | threshold_restart_bank(b, 0, 0); | 284 | tr.reset = 0; |
288 | affinity_restore(&oldmask); | 285 | tr.old_limit = 0; |
286 | work_on_cpu(b->cpu, threshold_restart_bank, &tr); | ||
289 | 287 | ||
290 | return end - buf; | 288 | return end - buf; |
291 | } | 289 | } |
@@ -294,8 +292,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b, | |||
294 | const char *buf, size_t count) | 292 | const char *buf, size_t count) |
295 | { | 293 | { |
296 | char *end; | 294 | char *end; |
297 | cpumask_t oldmask, newmask; | 295 | struct thresh_restart tr; |
298 | u16 old; | ||
299 | unsigned long new = simple_strtoul(buf, &end, 0); | 296 | unsigned long new = simple_strtoul(buf, &end, 0); |
300 | if (end == buf) | 297 | if (end == buf) |
301 | return -EINVAL; | 298 | return -EINVAL; |
@@ -303,34 +300,36 @@ static ssize_t store_threshold_limit(struct threshold_block *b, | |||
303 | new = THRESHOLD_MAX; | 300 | new = THRESHOLD_MAX; |
304 | if (new < 1) | 301 | if (new < 1) |
305 | new = 1; | 302 | new = 1; |
306 | old = b->threshold_limit; | 303 | tr.old_limit = b->threshold_limit; |
307 | b->threshold_limit = new; | 304 | b->threshold_limit = new; |
305 | tr.b = b; | ||
306 | tr.reset = 0; | ||
308 | 307 | ||
309 | affinity_set(b->cpu, &oldmask, &newmask); | 308 | work_on_cpu(b->cpu, threshold_restart_bank, &tr); |
310 | threshold_restart_bank(b, 0, old); | ||
311 | affinity_restore(&oldmask); | ||
312 | 309 | ||
313 | return end - buf; | 310 | return end - buf; |
314 | } | 311 | } |
315 | 312 | ||
316 | static ssize_t show_error_count(struct threshold_block *b, char *buf) | 313 | static long local_error_count(void *_b) |
317 | { | 314 | { |
318 | u32 high, low; | 315 | struct threshold_block *b = _b; |
319 | cpumask_t oldmask, newmask; | 316 | u32 low, high; |
320 | affinity_set(b->cpu, &oldmask, &newmask); | 317 | |
321 | rdmsr(b->address, low, high); | 318 | rdmsr(b->address, low, high); |
322 | affinity_restore(&oldmask); | 319 | return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit); |
323 | return sprintf(buf, "%x\n", | 320 | } |
324 | (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); | 321 | |
322 | static ssize_t show_error_count(struct threshold_block *b, char *buf) | ||
323 | { | ||
324 | return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b)); | ||
325 | } | 325 | } |
326 | 326 | ||
327 | static ssize_t store_error_count(struct threshold_block *b, | 327 | static ssize_t store_error_count(struct threshold_block *b, |
328 | const char *buf, size_t count) | 328 | const char *buf, size_t count) |
329 | { | 329 | { |
330 | cpumask_t oldmask, newmask; | 330 | struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 }; |
331 | affinity_set(b->cpu, &oldmask, &newmask); | 331 | |
332 | threshold_restart_bank(b, 1, 0); | 332 | work_on_cpu(b->cpu, threshold_restart_bank, &tr); |
333 | affinity_restore(&oldmask); | ||
334 | return 1; | 333 | return 1; |
335 | } | 334 | } |
336 | 335 | ||
@@ -463,12 +462,19 @@ out_free: | |||
463 | return err; | 462 | return err; |
464 | } | 463 | } |
465 | 464 | ||
465 | static long local_allocate_threshold_blocks(void *_bank) | ||
466 | { | ||
467 | unsigned int *bank = _bank; | ||
468 | |||
469 | return allocate_threshold_blocks(smp_processor_id(), *bank, 0, | ||
470 | MSR_IA32_MC0_MISC + *bank * 4); | ||
471 | } | ||
472 | |||
466 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ | 473 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ |
467 | static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | 474 | static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) |
468 | { | 475 | { |
469 | int i, err = 0; | 476 | int i, err = 0; |
470 | struct threshold_bank *b = NULL; | 477 | struct threshold_bank *b = NULL; |
471 | cpumask_t oldmask, newmask; | ||
472 | char name[32]; | 478 | char name[32]; |
473 | 479 | ||
474 | sprintf(name, "threshold_bank%i", bank); | 480 | sprintf(name, "threshold_bank%i", bank); |
@@ -519,11 +525,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
519 | 525 | ||
520 | per_cpu(threshold_banks, cpu)[bank] = b; | 526 | per_cpu(threshold_banks, cpu)[bank] = b; |
521 | 527 | ||
522 | affinity_set(cpu, &oldmask, &newmask); | 528 | err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank); |
523 | err = allocate_threshold_blocks(cpu, bank, 0, | ||
524 | MSR_IA32_MC0_MISC + bank * 4); | ||
525 | affinity_restore(&oldmask); | ||
526 | |||
527 | if (err) | 529 | if (err) |
528 | goto out_free; | 530 | goto out_free; |
529 | 531 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 4e8d77f01eeb..b59ddcc88cd8 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -14,14 +14,6 @@ | |||
14 | #include <asm/pat.h> | 14 | #include <asm/pat.h> |
15 | #include "mtrr.h" | 15 | #include "mtrr.h" |
16 | 16 | ||
17 | struct mtrr_state { | ||
18 | struct mtrr_var_range var_ranges[MAX_VAR_RANGES]; | ||
19 | mtrr_type fixed_ranges[NUM_FIXED_RANGES]; | ||
20 | unsigned char enabled; | ||
21 | unsigned char have_fixed; | ||
22 | mtrr_type def_type; | ||
23 | }; | ||
24 | |||
25 | struct fixed_range_block { | 17 | struct fixed_range_block { |
26 | int base_msr; /* start address of an MTRR block */ | 18 | int base_msr; /* start address of an MTRR block */ |
27 | int ranges; /* number of MTRRs in this block */ | 19 | int ranges; /* number of MTRRs in this block */ |
@@ -35,10 +27,12 @@ static struct fixed_range_block fixed_range_blocks[] = { | |||
35 | }; | 27 | }; |
36 | 28 | ||
37 | static unsigned long smp_changes_mask; | 29 | static unsigned long smp_changes_mask; |
38 | static struct mtrr_state mtrr_state = {}; | ||
39 | static int mtrr_state_set; | 30 | static int mtrr_state_set; |
40 | u64 mtrr_tom2; | 31 | u64 mtrr_tom2; |
41 | 32 | ||
33 | struct mtrr_state_type mtrr_state = {}; | ||
34 | EXPORT_SYMBOL_GPL(mtrr_state); | ||
35 | |||
42 | #undef MODULE_PARAM_PREFIX | 36 | #undef MODULE_PARAM_PREFIX |
43 | #define MODULE_PARAM_PREFIX "mtrr." | 37 | #define MODULE_PARAM_PREFIX "mtrr." |
44 | 38 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 1159e269e596..d259e5d2e054 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | u32 num_var_ranges = 0; | 50 | u32 num_var_ranges = 0; |
51 | 51 | ||
52 | unsigned int mtrr_usage_table[MAX_VAR_RANGES]; | 52 | unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; |
53 | static DEFINE_MUTEX(mtrr_mutex); | 53 | static DEFINE_MUTEX(mtrr_mutex); |
54 | 54 | ||
55 | u64 size_or_mask, size_and_mask; | 55 | u64 size_or_mask, size_and_mask; |
@@ -574,7 +574,7 @@ struct mtrr_value { | |||
574 | unsigned long lsize; | 574 | unsigned long lsize; |
575 | }; | 575 | }; |
576 | 576 | ||
577 | static struct mtrr_value mtrr_state[MAX_VAR_RANGES]; | 577 | static struct mtrr_value mtrr_state[MTRR_MAX_VAR_RANGES]; |
578 | 578 | ||
579 | static int mtrr_save(struct sys_device * sysdev, pm_message_t state) | 579 | static int mtrr_save(struct sys_device * sysdev, pm_message_t state) |
580 | { | 580 | { |
@@ -824,16 +824,14 @@ static int enable_mtrr_cleanup __initdata = | |||
824 | 824 | ||
825 | static int __init disable_mtrr_cleanup_setup(char *str) | 825 | static int __init disable_mtrr_cleanup_setup(char *str) |
826 | { | 826 | { |
827 | if (enable_mtrr_cleanup != -1) | 827 | enable_mtrr_cleanup = 0; |
828 | enable_mtrr_cleanup = 0; | ||
829 | return 0; | 828 | return 0; |
830 | } | 829 | } |
831 | early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup); | 830 | early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup); |
832 | 831 | ||
833 | static int __init enable_mtrr_cleanup_setup(char *str) | 832 | static int __init enable_mtrr_cleanup_setup(char *str) |
834 | { | 833 | { |
835 | if (enable_mtrr_cleanup != -1) | 834 | enable_mtrr_cleanup = 1; |
836 | enable_mtrr_cleanup = 1; | ||
837 | return 0; | 835 | return 0; |
838 | } | 836 | } |
839 | early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); | 837 | early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); |
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h index 2dc4ec656b23..ffd60409cc6d 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h | |||
@@ -8,11 +8,6 @@ | |||
8 | #define MTRRcap_MSR 0x0fe | 8 | #define MTRRcap_MSR 0x0fe |
9 | #define MTRRdefType_MSR 0x2ff | 9 | #define MTRRdefType_MSR 0x2ff |
10 | 10 | ||
11 | #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg)) | ||
12 | #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1) | ||
13 | |||
14 | #define NUM_FIXED_RANGES 88 | ||
15 | #define MAX_VAR_RANGES 256 | ||
16 | #define MTRRfix64K_00000_MSR 0x250 | 11 | #define MTRRfix64K_00000_MSR 0x250 |
17 | #define MTRRfix16K_80000_MSR 0x258 | 12 | #define MTRRfix16K_80000_MSR 0x258 |
18 | #define MTRRfix16K_A0000_MSR 0x259 | 13 | #define MTRRfix16K_A0000_MSR 0x259 |
@@ -29,11 +24,7 @@ | |||
29 | #define MTRR_CHANGE_MASK_VARIABLE 0x02 | 24 | #define MTRR_CHANGE_MASK_VARIABLE 0x02 |
30 | #define MTRR_CHANGE_MASK_DEFTYPE 0x04 | 25 | #define MTRR_CHANGE_MASK_DEFTYPE 0x04 |
31 | 26 | ||
32 | /* In the Intel processor's MTRR interface, the MTRR type is always held in | 27 | extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; |
33 | an 8 bit field: */ | ||
34 | typedef u8 mtrr_type; | ||
35 | |||
36 | extern unsigned int mtrr_usage_table[MAX_VAR_RANGES]; | ||
37 | 28 | ||
38 | struct mtrr_ops { | 29 | struct mtrr_ops { |
39 | u32 vendor; | 30 | u32 vendor; |
@@ -70,13 +61,6 @@ struct set_mtrr_context { | |||
70 | u32 ccr3; | 61 | u32 ccr3; |
71 | }; | 62 | }; |
72 | 63 | ||
73 | struct mtrr_var_range { | ||
74 | u32 base_lo; | ||
75 | u32 base_hi; | ||
76 | u32 mask_lo; | ||
77 | u32 mask_hi; | ||
78 | }; | ||
79 | |||
80 | void set_mtrr_done(struct set_mtrr_context *ctxt); | 64 | void set_mtrr_done(struct set_mtrr_context *ctxt); |
81 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt); | 65 | void set_mtrr_cache_disable(struct set_mtrr_context *ctxt); |
82 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt); | 66 | void set_mtrr_prepare_save(struct set_mtrr_context *ctxt); |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 72cefd1e649b..85d28d53f5d3 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -39,10 +39,10 @@ | |||
39 | #include <linux/device.h> | 39 | #include <linux/device.h> |
40 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
41 | #include <linux/notifier.h> | 41 | #include <linux/notifier.h> |
42 | #include <linux/uaccess.h> | ||
42 | 43 | ||
43 | #include <asm/processor.h> | 44 | #include <asm/processor.h> |
44 | #include <asm/msr.h> | 45 | #include <asm/msr.h> |
45 | #include <asm/uaccess.h> | ||
46 | #include <asm/system.h> | 46 | #include <asm/system.h> |
47 | 47 | ||
48 | static struct class *cpuid_class; | 48 | static struct class *cpuid_class; |
@@ -82,7 +82,7 @@ static loff_t cpuid_seek(struct file *file, loff_t offset, int orig) | |||
82 | } | 82 | } |
83 | 83 | ||
84 | static ssize_t cpuid_read(struct file *file, char __user *buf, | 84 | static ssize_t cpuid_read(struct file *file, char __user *buf, |
85 | size_t count, loff_t * ppos) | 85 | size_t count, loff_t *ppos) |
86 | { | 86 | { |
87 | char __user *tmp = buf; | 87 | char __user *tmp = buf; |
88 | struct cpuid_regs cmd; | 88 | struct cpuid_regs cmd; |
@@ -117,7 +117,7 @@ static int cpuid_open(struct inode *inode, struct file *file) | |||
117 | unsigned int cpu; | 117 | unsigned int cpu; |
118 | struct cpuinfo_x86 *c; | 118 | struct cpuinfo_x86 *c; |
119 | int ret = 0; | 119 | int ret = 0; |
120 | 120 | ||
121 | lock_kernel(); | 121 | lock_kernel(); |
122 | 122 | ||
123 | cpu = iminor(file->f_path.dentry->d_inode); | 123 | cpu = iminor(file->f_path.dentry->d_inode); |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index d84a852e4cd7..c689d19e35ab 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/kdebug.h> | 26 | #include <linux/kdebug.h> |
27 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
28 | #include <asm/reboot.h> | 28 | #include <asm/reboot.h> |
29 | #include <asm/virtext.h> | ||
29 | 30 | ||
30 | #include <mach_ipi.h> | 31 | #include <mach_ipi.h> |
31 | 32 | ||
@@ -49,6 +50,15 @@ static void kdump_nmi_callback(int cpu, struct die_args *args) | |||
49 | #endif | 50 | #endif |
50 | crash_save_cpu(regs, cpu); | 51 | crash_save_cpu(regs, cpu); |
51 | 52 | ||
53 | /* Disable VMX or SVM if needed. | ||
54 | * | ||
55 | * We need to disable virtualization on all CPUs. | ||
56 | * Having VMX or SVM enabled on any CPU may break rebooting | ||
57 | * after the kdump kernel has finished its task. | ||
58 | */ | ||
59 | cpu_emergency_vmxoff(); | ||
60 | cpu_emergency_svm_disable(); | ||
61 | |||
52 | disable_local_APIC(); | 62 | disable_local_APIC(); |
53 | } | 63 | } |
54 | 64 | ||
@@ -80,6 +90,14 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
80 | local_irq_disable(); | 90 | local_irq_disable(); |
81 | 91 | ||
82 | kdump_nmi_shootdown_cpus(); | 92 | kdump_nmi_shootdown_cpus(); |
93 | |||
94 | /* Booting kdump kernel with VMX or SVM enabled won't work, | ||
95 | * because (among other limitations) we can't disable paging | ||
96 | * with the virt flags. | ||
97 | */ | ||
98 | cpu_emergency_vmxoff(); | ||
99 | cpu_emergency_svm_disable(); | ||
100 | |||
83 | lapic_shutdown(); | 101 | lapic_shutdown(); |
84 | #if defined(CONFIG_X86_IO_APIC) | 102 | #if defined(CONFIG_X86_IO_APIC) |
85 | disable_IO_APIC(); | 103 | disable_IO_APIC(); |
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 23b138e31e9c..504ad198e4ad 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c | |||
@@ -886,7 +886,7 @@ asmlinkage void early_printk(const char *fmt, ...) | |||
886 | va_list ap; | 886 | va_list ap; |
887 | 887 | ||
888 | va_start(ap, fmt); | 888 | va_start(ap, fmt); |
889 | n = vscnprintf(buf, 512, fmt, ap); | 889 | n = vscnprintf(buf, sizeof(buf), fmt, ap); |
890 | early_console->write(early_console, buf, n); | 890 | early_console->write(early_console, buf, n); |
891 | va_end(ap); | 891 | va_end(ap); |
892 | } | 892 | } |
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c index c0262791bda4..34185488e4fb 100644 --- a/arch/x86/kernel/genapic_flat_64.c +++ b/arch/x86/kernel/genapic_flat_64.c | |||
@@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
30 | return 1; | 30 | return 1; |
31 | } | 31 | } |
32 | 32 | ||
33 | static cpumask_t flat_target_cpus(void) | 33 | static const struct cpumask *flat_target_cpus(void) |
34 | { | 34 | { |
35 | return cpu_online_map; | 35 | return cpu_online_mask; |
36 | } | 36 | } |
37 | 37 | ||
38 | static cpumask_t flat_vector_allocation_domain(int cpu) | 38 | static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask) |
39 | { | 39 | { |
40 | /* Careful. Some cpus do not strictly honor the set of cpus | 40 | /* Careful. Some cpus do not strictly honor the set of cpus |
41 | * specified in the interrupt destination when using lowest | 41 | * specified in the interrupt destination when using lowest |
@@ -45,8 +45,8 @@ static cpumask_t flat_vector_allocation_domain(int cpu) | |||
45 | * deliver interrupts to the wrong hyperthread when only one | 45 | * deliver interrupts to the wrong hyperthread when only one |
46 | * hyperthread was specified in the interrupt desitination. | 46 | * hyperthread was specified in the interrupt desitination. |
47 | */ | 47 | */ |
48 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 48 | cpumask_clear(retmask); |
49 | return domain; | 49 | cpumask_bits(retmask)[0] = APIC_ALL_CPUS; |
50 | } | 50 | } |
51 | 51 | ||
52 | /* | 52 | /* |
@@ -69,9 +69,8 @@ static void flat_init_apic_ldr(void) | |||
69 | apic_write(APIC_LDR, val); | 69 | apic_write(APIC_LDR, val); |
70 | } | 70 | } |
71 | 71 | ||
72 | static void flat_send_IPI_mask(cpumask_t cpumask, int vector) | 72 | static inline void _flat_send_IPI_mask(unsigned long mask, int vector) |
73 | { | 73 | { |
74 | unsigned long mask = cpus_addr(cpumask)[0]; | ||
75 | unsigned long flags; | 74 | unsigned long flags; |
76 | 75 | ||
77 | local_irq_save(flags); | 76 | local_irq_save(flags); |
@@ -79,20 +78,41 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector) | |||
79 | local_irq_restore(flags); | 78 | local_irq_restore(flags); |
80 | } | 79 | } |
81 | 80 | ||
81 | static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector) | ||
82 | { | ||
83 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
84 | |||
85 | _flat_send_IPI_mask(mask, vector); | ||
86 | } | ||
87 | |||
88 | static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, | ||
89 | int vector) | ||
90 | { | ||
91 | unsigned long mask = cpumask_bits(cpumask)[0]; | ||
92 | int cpu = smp_processor_id(); | ||
93 | |||
94 | if (cpu < BITS_PER_LONG) | ||
95 | clear_bit(cpu, &mask); | ||
96 | _flat_send_IPI_mask(mask, vector); | ||
97 | } | ||
98 | |||
82 | static void flat_send_IPI_allbutself(int vector) | 99 | static void flat_send_IPI_allbutself(int vector) |
83 | { | 100 | { |
101 | int cpu = smp_processor_id(); | ||
84 | #ifdef CONFIG_HOTPLUG_CPU | 102 | #ifdef CONFIG_HOTPLUG_CPU |
85 | int hotplug = 1; | 103 | int hotplug = 1; |
86 | #else | 104 | #else |
87 | int hotplug = 0; | 105 | int hotplug = 0; |
88 | #endif | 106 | #endif |
89 | if (hotplug || vector == NMI_VECTOR) { | 107 | if (hotplug || vector == NMI_VECTOR) { |
90 | cpumask_t allbutme = cpu_online_map; | 108 | if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) { |
109 | unsigned long mask = cpumask_bits(cpu_online_mask)[0]; | ||
91 | 110 | ||
92 | cpu_clear(smp_processor_id(), allbutme); | 111 | if (cpu < BITS_PER_LONG) |
112 | clear_bit(cpu, &mask); | ||
93 | 113 | ||
94 | if (!cpus_empty(allbutme)) | 114 | _flat_send_IPI_mask(mask, vector); |
95 | flat_send_IPI_mask(allbutme, vector); | 115 | } |
96 | } else if (num_online_cpus() > 1) { | 116 | } else if (num_online_cpus() > 1) { |
97 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); | 117 | __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL); |
98 | } | 118 | } |
@@ -101,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector) | |||
101 | static void flat_send_IPI_all(int vector) | 121 | static void flat_send_IPI_all(int vector) |
102 | { | 122 | { |
103 | if (vector == NMI_VECTOR) | 123 | if (vector == NMI_VECTOR) |
104 | flat_send_IPI_mask(cpu_online_map, vector); | 124 | flat_send_IPI_mask(cpu_online_mask, vector); |
105 | else | 125 | else |
106 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); | 126 | __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL); |
107 | } | 127 | } |
@@ -135,9 +155,18 @@ static int flat_apic_id_registered(void) | |||
135 | return physid_isset(read_xapic_id(), phys_cpu_present_map); | 155 | return physid_isset(read_xapic_id(), phys_cpu_present_map); |
136 | } | 156 | } |
137 | 157 | ||
138 | static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask) | 158 | static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask) |
159 | { | ||
160 | return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; | ||
161 | } | ||
162 | |||
163 | static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
164 | const struct cpumask *andmask) | ||
139 | { | 165 | { |
140 | return cpus_addr(cpumask)[0] & APIC_ALL_CPUS; | 166 | unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS; |
167 | unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS; | ||
168 | |||
169 | return mask1 & mask2; | ||
141 | } | 170 | } |
142 | 171 | ||
143 | static unsigned int phys_pkg_id(int index_msb) | 172 | static unsigned int phys_pkg_id(int index_msb) |
@@ -157,8 +186,10 @@ struct genapic apic_flat = { | |||
157 | .send_IPI_all = flat_send_IPI_all, | 186 | .send_IPI_all = flat_send_IPI_all, |
158 | .send_IPI_allbutself = flat_send_IPI_allbutself, | 187 | .send_IPI_allbutself = flat_send_IPI_allbutself, |
159 | .send_IPI_mask = flat_send_IPI_mask, | 188 | .send_IPI_mask = flat_send_IPI_mask, |
189 | .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself, | ||
160 | .send_IPI_self = apic_send_IPI_self, | 190 | .send_IPI_self = apic_send_IPI_self, |
161 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, | 191 | .cpu_mask_to_apicid = flat_cpu_mask_to_apicid, |
192 | .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, | ||
162 | .phys_pkg_id = phys_pkg_id, | 193 | .phys_pkg_id = phys_pkg_id, |
163 | .get_apic_id = get_apic_id, | 194 | .get_apic_id = get_apic_id, |
164 | .set_apic_id = set_apic_id, | 195 | .set_apic_id = set_apic_id, |
@@ -188,35 +219,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
188 | return 0; | 219 | return 0; |
189 | } | 220 | } |
190 | 221 | ||
191 | static cpumask_t physflat_target_cpus(void) | 222 | static const struct cpumask *physflat_target_cpus(void) |
192 | { | 223 | { |
193 | return cpu_online_map; | 224 | return cpu_online_mask; |
194 | } | 225 | } |
195 | 226 | ||
196 | static cpumask_t physflat_vector_allocation_domain(int cpu) | 227 | static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask) |
197 | { | 228 | { |
198 | return cpumask_of_cpu(cpu); | 229 | cpumask_clear(retmask); |
230 | cpumask_set_cpu(cpu, retmask); | ||
199 | } | 231 | } |
200 | 232 | ||
201 | static void physflat_send_IPI_mask(cpumask_t cpumask, int vector) | 233 | static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector) |
202 | { | 234 | { |
203 | send_IPI_mask_sequence(cpumask, vector); | 235 | send_IPI_mask_sequence(cpumask, vector); |
204 | } | 236 | } |
205 | 237 | ||
206 | static void physflat_send_IPI_allbutself(int vector) | 238 | static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask, |
239 | int vector) | ||
207 | { | 240 | { |
208 | cpumask_t allbutme = cpu_online_map; | 241 | send_IPI_mask_allbutself(cpumask, vector); |
242 | } | ||
209 | 243 | ||
210 | cpu_clear(smp_processor_id(), allbutme); | 244 | static void physflat_send_IPI_allbutself(int vector) |
211 | physflat_send_IPI_mask(allbutme, vector); | 245 | { |
246 | send_IPI_mask_allbutself(cpu_online_mask, vector); | ||
212 | } | 247 | } |
213 | 248 | ||
214 | static void physflat_send_IPI_all(int vector) | 249 | static void physflat_send_IPI_all(int vector) |
215 | { | 250 | { |
216 | physflat_send_IPI_mask(cpu_online_map, vector); | 251 | physflat_send_IPI_mask(cpu_online_mask, vector); |
217 | } | 252 | } |
218 | 253 | ||
219 | static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | 254 | static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask) |
220 | { | 255 | { |
221 | int cpu; | 256 | int cpu; |
222 | 257 | ||
@@ -224,13 +259,31 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask) | |||
224 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 259 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
225 | * May as well be the first. | 260 | * May as well be the first. |
226 | */ | 261 | */ |
227 | cpu = first_cpu(cpumask); | 262 | cpu = cpumask_first(cpumask); |
228 | if ((unsigned)cpu < nr_cpu_ids) | 263 | if ((unsigned)cpu < nr_cpu_ids) |
229 | return per_cpu(x86_cpu_to_apicid, cpu); | 264 | return per_cpu(x86_cpu_to_apicid, cpu); |
230 | else | 265 | else |
231 | return BAD_APICID; | 266 | return BAD_APICID; |
232 | } | 267 | } |
233 | 268 | ||
269 | static unsigned int | ||
270 | physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
271 | const struct cpumask *andmask) | ||
272 | { | ||
273 | int cpu; | ||
274 | |||
275 | /* | ||
276 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
277 | * May as well be the first. | ||
278 | */ | ||
279 | for_each_cpu_and(cpu, cpumask, andmask) | ||
280 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
281 | break; | ||
282 | if (cpu < nr_cpu_ids) | ||
283 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
284 | return BAD_APICID; | ||
285 | } | ||
286 | |||
234 | struct genapic apic_physflat = { | 287 | struct genapic apic_physflat = { |
235 | .name = "physical flat", | 288 | .name = "physical flat", |
236 | .acpi_madt_oem_check = physflat_acpi_madt_oem_check, | 289 | .acpi_madt_oem_check = physflat_acpi_madt_oem_check, |
@@ -243,8 +296,10 @@ struct genapic apic_physflat = { | |||
243 | .send_IPI_all = physflat_send_IPI_all, | 296 | .send_IPI_all = physflat_send_IPI_all, |
244 | .send_IPI_allbutself = physflat_send_IPI_allbutself, | 297 | .send_IPI_allbutself = physflat_send_IPI_allbutself, |
245 | .send_IPI_mask = physflat_send_IPI_mask, | 298 | .send_IPI_mask = physflat_send_IPI_mask, |
299 | .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself, | ||
246 | .send_IPI_self = apic_send_IPI_self, | 300 | .send_IPI_self = apic_send_IPI_self, |
247 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, | 301 | .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid, |
302 | .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and, | ||
248 | .phys_pkg_id = phys_pkg_id, | 303 | .phys_pkg_id = phys_pkg_id, |
249 | .get_apic_id = get_apic_id, | 304 | .get_apic_id = get_apic_id, |
250 | .set_apic_id = set_apic_id, | 305 | .set_apic_id = set_apic_id, |
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c index f6a2c8eb48a6..6ce497cc372d 100644 --- a/arch/x86/kernel/genx2apic_cluster.c +++ b/arch/x86/kernel/genx2apic_cluster.c | |||
@@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
22 | 22 | ||
23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 23 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
24 | 24 | ||
25 | static cpumask_t x2apic_target_cpus(void) | 25 | static const struct cpumask *x2apic_target_cpus(void) |
26 | { | 26 | { |
27 | return cpumask_of_cpu(0); | 27 | return cpumask_of(0); |
28 | } | 28 | } |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * for now each logical cpu is in its own vector allocation domain. | 31 | * for now each logical cpu is in its own vector allocation domain. |
32 | */ | 32 | */ |
33 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | 33 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) |
34 | { | 34 | { |
35 | cpumask_t domain = CPU_MASK_NONE; | 35 | cpumask_clear(retmask); |
36 | cpu_set(cpu, domain); | 36 | cpumask_set_cpu(cpu, retmask); |
37 | return domain; | ||
38 | } | 37 | } |
39 | 38 | ||
40 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | 39 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, |
@@ -56,32 +55,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | |||
56 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register | 55 | * at once. We have 16 cpu's in a cluster. This will minimize IPI register |
57 | * writes. | 56 | * writes. |
58 | */ | 57 | */ |
59 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | 58 | static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) |
60 | { | 59 | { |
61 | unsigned long flags; | 60 | unsigned long flags; |
62 | unsigned long query_cpu; | 61 | unsigned long query_cpu; |
63 | 62 | ||
64 | local_irq_save(flags); | 63 | local_irq_save(flags); |
65 | for_each_cpu_mask(query_cpu, mask) { | 64 | for_each_cpu(query_cpu, mask) |
66 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu), | 65 | __x2apic_send_IPI_dest( |
67 | vector, APIC_DEST_LOGICAL); | 66 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
68 | } | 67 | vector, APIC_DEST_LOGICAL); |
69 | local_irq_restore(flags); | 68 | local_irq_restore(flags); |
70 | } | 69 | } |
71 | 70 | ||
72 | static void x2apic_send_IPI_allbutself(int vector) | 71 | static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, |
72 | int vector) | ||
73 | { | 73 | { |
74 | cpumask_t mask = cpu_online_map; | 74 | unsigned long flags; |
75 | unsigned long query_cpu; | ||
76 | unsigned long this_cpu = smp_processor_id(); | ||
75 | 77 | ||
76 | cpu_clear(smp_processor_id(), mask); | 78 | local_irq_save(flags); |
79 | for_each_cpu(query_cpu, mask) | ||
80 | if (query_cpu != this_cpu) | ||
81 | __x2apic_send_IPI_dest( | ||
82 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
83 | vector, APIC_DEST_LOGICAL); | ||
84 | local_irq_restore(flags); | ||
85 | } | ||
86 | |||
87 | static void x2apic_send_IPI_allbutself(int vector) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | unsigned long query_cpu; | ||
91 | unsigned long this_cpu = smp_processor_id(); | ||
77 | 92 | ||
78 | if (!cpus_empty(mask)) | 93 | local_irq_save(flags); |
79 | x2apic_send_IPI_mask(mask, vector); | 94 | for_each_online_cpu(query_cpu) |
95 | if (query_cpu != this_cpu) | ||
96 | __x2apic_send_IPI_dest( | ||
97 | per_cpu(x86_cpu_to_logical_apicid, query_cpu), | ||
98 | vector, APIC_DEST_LOGICAL); | ||
99 | local_irq_restore(flags); | ||
80 | } | 100 | } |
81 | 101 | ||
82 | static void x2apic_send_IPI_all(int vector) | 102 | static void x2apic_send_IPI_all(int vector) |
83 | { | 103 | { |
84 | x2apic_send_IPI_mask(cpu_online_map, vector); | 104 | x2apic_send_IPI_mask(cpu_online_mask, vector); |
85 | } | 105 | } |
86 | 106 | ||
87 | static int x2apic_apic_id_registered(void) | 107 | static int x2apic_apic_id_registered(void) |
@@ -89,21 +109,38 @@ static int x2apic_apic_id_registered(void) | |||
89 | return 1; | 109 | return 1; |
90 | } | 110 | } |
91 | 111 | ||
92 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | 112 | static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) |
93 | { | 113 | { |
94 | int cpu; | 114 | int cpu; |
95 | 115 | ||
96 | /* | 116 | /* |
97 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 117 | * We're using fixed IRQ delivery, can only return one logical APIC ID. |
98 | * May as well be the first. | 118 | * May as well be the first. |
99 | */ | 119 | */ |
100 | cpu = first_cpu(cpumask); | 120 | cpu = cpumask_first(cpumask); |
101 | if ((unsigned)cpu < NR_CPUS) | 121 | if ((unsigned)cpu < nr_cpu_ids) |
102 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | 122 | return per_cpu(x86_cpu_to_logical_apicid, cpu); |
103 | else | 123 | else |
104 | return BAD_APICID; | 124 | return BAD_APICID; |
105 | } | 125 | } |
106 | 126 | ||
127 | static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
128 | const struct cpumask *andmask) | ||
129 | { | ||
130 | int cpu; | ||
131 | |||
132 | /* | ||
133 | * We're using fixed IRQ delivery, can only return one logical APIC ID. | ||
134 | * May as well be the first. | ||
135 | */ | ||
136 | for_each_cpu_and(cpu, cpumask, andmask) | ||
137 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
138 | break; | ||
139 | if (cpu < nr_cpu_ids) | ||
140 | return per_cpu(x86_cpu_to_logical_apicid, cpu); | ||
141 | return BAD_APICID; | ||
142 | } | ||
143 | |||
107 | static unsigned int get_apic_id(unsigned long x) | 144 | static unsigned int get_apic_id(unsigned long x) |
108 | { | 145 | { |
109 | unsigned int id; | 146 | unsigned int id; |
@@ -150,8 +187,10 @@ struct genapic apic_x2apic_cluster = { | |||
150 | .send_IPI_all = x2apic_send_IPI_all, | 187 | .send_IPI_all = x2apic_send_IPI_all, |
151 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | 188 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, |
152 | .send_IPI_mask = x2apic_send_IPI_mask, | 189 | .send_IPI_mask = x2apic_send_IPI_mask, |
190 | .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, | ||
153 | .send_IPI_self = x2apic_send_IPI_self, | 191 | .send_IPI_self = x2apic_send_IPI_self, |
154 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | 192 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, |
193 | .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, | ||
155 | .phys_pkg_id = phys_pkg_id, | 194 | .phys_pkg_id = phys_pkg_id, |
156 | .get_apic_id = get_apic_id, | 195 | .get_apic_id = get_apic_id, |
157 | .set_apic_id = set_apic_id, | 196 | .set_apic_id = set_apic_id, |
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c index d042211768b7..21bcc0e098ba 100644 --- a/arch/x86/kernel/genx2apic_phys.c +++ b/arch/x86/kernel/genx2apic_phys.c | |||
@@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
29 | 29 | ||
30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 30 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
31 | 31 | ||
32 | static cpumask_t x2apic_target_cpus(void) | 32 | static const struct cpumask *x2apic_target_cpus(void) |
33 | { | 33 | { |
34 | return cpumask_of_cpu(0); | 34 | return cpumask_of(0); |
35 | } | 35 | } |
36 | 36 | ||
37 | static cpumask_t x2apic_vector_allocation_domain(int cpu) | 37 | static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask) |
38 | { | 38 | { |
39 | cpumask_t domain = CPU_MASK_NONE; | 39 | cpumask_clear(retmask); |
40 | cpu_set(cpu, domain); | 40 | cpumask_set_cpu(cpu, retmask); |
41 | return domain; | ||
42 | } | 41 | } |
43 | 42 | ||
44 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | 43 | static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, |
@@ -54,32 +53,54 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector, | |||
54 | x2apic_icr_write(cfg, apicid); | 53 | x2apic_icr_write(cfg, apicid); |
55 | } | 54 | } |
56 | 55 | ||
57 | static void x2apic_send_IPI_mask(cpumask_t mask, int vector) | 56 | static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector) |
58 | { | 57 | { |
59 | unsigned long flags; | 58 | unsigned long flags; |
60 | unsigned long query_cpu; | 59 | unsigned long query_cpu; |
61 | 60 | ||
62 | local_irq_save(flags); | 61 | local_irq_save(flags); |
63 | for_each_cpu_mask(query_cpu, mask) { | 62 | for_each_cpu(query_cpu, mask) { |
64 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), | 63 | __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), |
65 | vector, APIC_DEST_PHYSICAL); | 64 | vector, APIC_DEST_PHYSICAL); |
66 | } | 65 | } |
67 | local_irq_restore(flags); | 66 | local_irq_restore(flags); |
68 | } | 67 | } |
69 | 68 | ||
70 | static void x2apic_send_IPI_allbutself(int vector) | 69 | static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, |
70 | int vector) | ||
71 | { | 71 | { |
72 | cpumask_t mask = cpu_online_map; | 72 | unsigned long flags; |
73 | unsigned long query_cpu; | ||
74 | unsigned long this_cpu = smp_processor_id(); | ||
75 | |||
76 | local_irq_save(flags); | ||
77 | for_each_cpu(query_cpu, mask) { | ||
78 | if (query_cpu != this_cpu) | ||
79 | __x2apic_send_IPI_dest( | ||
80 | per_cpu(x86_cpu_to_apicid, query_cpu), | ||
81 | vector, APIC_DEST_PHYSICAL); | ||
82 | } | ||
83 | local_irq_restore(flags); | ||
84 | } | ||
73 | 85 | ||
74 | cpu_clear(smp_processor_id(), mask); | 86 | static void x2apic_send_IPI_allbutself(int vector) |
87 | { | ||
88 | unsigned long flags; | ||
89 | unsigned long query_cpu; | ||
90 | unsigned long this_cpu = smp_processor_id(); | ||
75 | 91 | ||
76 | if (!cpus_empty(mask)) | 92 | local_irq_save(flags); |
77 | x2apic_send_IPI_mask(mask, vector); | 93 | for_each_online_cpu(query_cpu) |
94 | if (query_cpu != this_cpu) | ||
95 | __x2apic_send_IPI_dest( | ||
96 | per_cpu(x86_cpu_to_apicid, query_cpu), | ||
97 | vector, APIC_DEST_PHYSICAL); | ||
98 | local_irq_restore(flags); | ||
78 | } | 99 | } |
79 | 100 | ||
80 | static void x2apic_send_IPI_all(int vector) | 101 | static void x2apic_send_IPI_all(int vector) |
81 | { | 102 | { |
82 | x2apic_send_IPI_mask(cpu_online_map, vector); | 103 | x2apic_send_IPI_mask(cpu_online_mask, vector); |
83 | } | 104 | } |
84 | 105 | ||
85 | static int x2apic_apic_id_registered(void) | 106 | static int x2apic_apic_id_registered(void) |
@@ -87,7 +108,7 @@ static int x2apic_apic_id_registered(void) | |||
87 | return 1; | 108 | return 1; |
88 | } | 109 | } |
89 | 110 | ||
90 | static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | 111 | static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask) |
91 | { | 112 | { |
92 | int cpu; | 113 | int cpu; |
93 | 114 | ||
@@ -95,13 +116,30 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask) | |||
95 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 116 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
96 | * May as well be the first. | 117 | * May as well be the first. |
97 | */ | 118 | */ |
98 | cpu = first_cpu(cpumask); | 119 | cpu = cpumask_first(cpumask); |
99 | if ((unsigned)cpu < NR_CPUS) | 120 | if ((unsigned)cpu < nr_cpu_ids) |
100 | return per_cpu(x86_cpu_to_apicid, cpu); | 121 | return per_cpu(x86_cpu_to_apicid, cpu); |
101 | else | 122 | else |
102 | return BAD_APICID; | 123 | return BAD_APICID; |
103 | } | 124 | } |
104 | 125 | ||
126 | static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
127 | const struct cpumask *andmask) | ||
128 | { | ||
129 | int cpu; | ||
130 | |||
131 | /* | ||
132 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
133 | * May as well be the first. | ||
134 | */ | ||
135 | for_each_cpu_and(cpu, cpumask, andmask) | ||
136 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
137 | break; | ||
138 | if (cpu < nr_cpu_ids) | ||
139 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
140 | return BAD_APICID; | ||
141 | } | ||
142 | |||
105 | static unsigned int get_apic_id(unsigned long x) | 143 | static unsigned int get_apic_id(unsigned long x) |
106 | { | 144 | { |
107 | unsigned int id; | 145 | unsigned int id; |
@@ -123,12 +161,12 @@ static unsigned int phys_pkg_id(int index_msb) | |||
123 | return current_cpu_data.initial_apicid >> index_msb; | 161 | return current_cpu_data.initial_apicid >> index_msb; |
124 | } | 162 | } |
125 | 163 | ||
126 | void x2apic_send_IPI_self(int vector) | 164 | static void x2apic_send_IPI_self(int vector) |
127 | { | 165 | { |
128 | apic_write(APIC_SELF_IPI, vector); | 166 | apic_write(APIC_SELF_IPI, vector); |
129 | } | 167 | } |
130 | 168 | ||
131 | void init_x2apic_ldr(void) | 169 | static void init_x2apic_ldr(void) |
132 | { | 170 | { |
133 | return; | 171 | return; |
134 | } | 172 | } |
@@ -145,8 +183,10 @@ struct genapic apic_x2apic_phys = { | |||
145 | .send_IPI_all = x2apic_send_IPI_all, | 183 | .send_IPI_all = x2apic_send_IPI_all, |
146 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, | 184 | .send_IPI_allbutself = x2apic_send_IPI_allbutself, |
147 | .send_IPI_mask = x2apic_send_IPI_mask, | 185 | .send_IPI_mask = x2apic_send_IPI_mask, |
186 | .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself, | ||
148 | .send_IPI_self = x2apic_send_IPI_self, | 187 | .send_IPI_self = x2apic_send_IPI_self, |
149 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, | 188 | .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid, |
189 | .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and, | ||
150 | .phys_pkg_id = phys_pkg_id, | 190 | .phys_pkg_id = phys_pkg_id, |
151 | .get_apic_id = get_apic_id, | 191 | .get_apic_id = get_apic_id, |
152 | .set_apic_id = set_apic_id, | 192 | .set_apic_id = set_apic_id, |
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c index dece17289731..b193e082f6ce 100644 --- a/arch/x86/kernel/genx2apic_uv_x.c +++ b/arch/x86/kernel/genx2apic_uv_x.c | |||
@@ -79,16 +79,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second); | |||
79 | 79 | ||
80 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | 80 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ |
81 | 81 | ||
82 | static cpumask_t uv_target_cpus(void) | 82 | static const struct cpumask *uv_target_cpus(void) |
83 | { | 83 | { |
84 | return cpumask_of_cpu(0); | 84 | return cpumask_of(0); |
85 | } | 85 | } |
86 | 86 | ||
87 | static cpumask_t uv_vector_allocation_domain(int cpu) | 87 | static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) |
88 | { | 88 | { |
89 | cpumask_t domain = CPU_MASK_NONE; | 89 | cpumask_clear(retmask); |
90 | cpu_set(cpu, domain); | 90 | cpumask_set_cpu(cpu, retmask); |
91 | return domain; | ||
92 | } | 91 | } |
93 | 92 | ||
94 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) | 93 | int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) |
@@ -127,28 +126,37 @@ static void uv_send_IPI_one(int cpu, int vector) | |||
127 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 126 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
128 | } | 127 | } |
129 | 128 | ||
130 | static void uv_send_IPI_mask(cpumask_t mask, int vector) | 129 | static void uv_send_IPI_mask(const struct cpumask *mask, int vector) |
131 | { | 130 | { |
132 | unsigned int cpu; | 131 | unsigned int cpu; |
133 | 132 | ||
134 | for_each_possible_cpu(cpu) | 133 | for_each_cpu(cpu, mask) |
135 | if (cpu_isset(cpu, mask)) | 134 | uv_send_IPI_one(cpu, vector); |
135 | } | ||
136 | |||
137 | static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) | ||
138 | { | ||
139 | unsigned int cpu; | ||
140 | unsigned int this_cpu = smp_processor_id(); | ||
141 | |||
142 | for_each_cpu(cpu, mask) | ||
143 | if (cpu != this_cpu) | ||
136 | uv_send_IPI_one(cpu, vector); | 144 | uv_send_IPI_one(cpu, vector); |
137 | } | 145 | } |
138 | 146 | ||
139 | static void uv_send_IPI_allbutself(int vector) | 147 | static void uv_send_IPI_allbutself(int vector) |
140 | { | 148 | { |
141 | cpumask_t mask = cpu_online_map; | 149 | unsigned int cpu; |
142 | 150 | unsigned int this_cpu = smp_processor_id(); | |
143 | cpu_clear(smp_processor_id(), mask); | ||
144 | 151 | ||
145 | if (!cpus_empty(mask)) | 152 | for_each_online_cpu(cpu) |
146 | uv_send_IPI_mask(mask, vector); | 153 | if (cpu != this_cpu) |
154 | uv_send_IPI_one(cpu, vector); | ||
147 | } | 155 | } |
148 | 156 | ||
149 | static void uv_send_IPI_all(int vector) | 157 | static void uv_send_IPI_all(int vector) |
150 | { | 158 | { |
151 | uv_send_IPI_mask(cpu_online_map, vector); | 159 | uv_send_IPI_mask(cpu_online_mask, vector); |
152 | } | 160 | } |
153 | 161 | ||
154 | static int uv_apic_id_registered(void) | 162 | static int uv_apic_id_registered(void) |
@@ -160,7 +168,7 @@ static void uv_init_apic_ldr(void) | |||
160 | { | 168 | { |
161 | } | 169 | } |
162 | 170 | ||
163 | static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | 171 | static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) |
164 | { | 172 | { |
165 | int cpu; | 173 | int cpu; |
166 | 174 | ||
@@ -168,13 +176,30 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask) | |||
168 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | 176 | * We're using fixed IRQ delivery, can only return one phys APIC ID. |
169 | * May as well be the first. | 177 | * May as well be the first. |
170 | */ | 178 | */ |
171 | cpu = first_cpu(cpumask); | 179 | cpu = cpumask_first(cpumask); |
172 | if ((unsigned)cpu < nr_cpu_ids) | 180 | if ((unsigned)cpu < nr_cpu_ids) |
173 | return per_cpu(x86_cpu_to_apicid, cpu); | 181 | return per_cpu(x86_cpu_to_apicid, cpu); |
174 | else | 182 | else |
175 | return BAD_APICID; | 183 | return BAD_APICID; |
176 | } | 184 | } |
177 | 185 | ||
186 | static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | ||
187 | const struct cpumask *andmask) | ||
188 | { | ||
189 | int cpu; | ||
190 | |||
191 | /* | ||
192 | * We're using fixed IRQ delivery, can only return one phys APIC ID. | ||
193 | * May as well be the first. | ||
194 | */ | ||
195 | for_each_cpu_and(cpu, cpumask, andmask) | ||
196 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | ||
197 | break; | ||
198 | if (cpu < nr_cpu_ids) | ||
199 | return per_cpu(x86_cpu_to_apicid, cpu); | ||
200 | return BAD_APICID; | ||
201 | } | ||
202 | |||
178 | static unsigned int get_apic_id(unsigned long x) | 203 | static unsigned int get_apic_id(unsigned long x) |
179 | { | 204 | { |
180 | unsigned int id; | 205 | unsigned int id; |
@@ -222,8 +247,10 @@ struct genapic apic_x2apic_uv_x = { | |||
222 | .send_IPI_all = uv_send_IPI_all, | 247 | .send_IPI_all = uv_send_IPI_all, |
223 | .send_IPI_allbutself = uv_send_IPI_allbutself, | 248 | .send_IPI_allbutself = uv_send_IPI_allbutself, |
224 | .send_IPI_mask = uv_send_IPI_mask, | 249 | .send_IPI_mask = uv_send_IPI_mask, |
250 | .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, | ||
225 | .send_IPI_self = uv_send_IPI_self, | 251 | .send_IPI_self = uv_send_IPI_self, |
226 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, | 252 | .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, |
253 | .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, | ||
227 | .phys_pkg_id = phys_pkg_id, | 254 | .phys_pkg_id = phys_pkg_id, |
228 | .get_apic_id = get_apic_id, | 255 | .get_apic_id = get_apic_id, |
229 | .set_apic_id = set_apic_id, | 256 | .set_apic_id = set_apic_id, |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 845ea097383e..cd759ad90690 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -248,7 +248,7 @@ static void hpet_legacy_clockevent_register(void) | |||
248 | * Start hpet with the boot cpu mask and make it | 248 | * Start hpet with the boot cpu mask and make it |
249 | * global after the IO_APIC has been initialized. | 249 | * global after the IO_APIC has been initialized. |
250 | */ | 250 | */ |
251 | hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); | 251 | hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); |
252 | clockevents_register_device(&hpet_clockevent); | 252 | clockevents_register_device(&hpet_clockevent); |
253 | global_clock_event = &hpet_clockevent; | 253 | global_clock_event = &hpet_clockevent; |
254 | printk(KERN_DEBUG "hpet clockevent registered\n"); | 254 | printk(KERN_DEBUG "hpet clockevent registered\n"); |
@@ -303,7 +303,7 @@ static void hpet_set_mode(enum clock_event_mode mode, | |||
303 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); | 303 | struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); |
304 | hpet_setup_msi_irq(hdev->irq); | 304 | hpet_setup_msi_irq(hdev->irq); |
305 | disable_irq(hdev->irq); | 305 | disable_irq(hdev->irq); |
306 | irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu)); | 306 | irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); |
307 | enable_irq(hdev->irq); | 307 | enable_irq(hdev->irq); |
308 | } | 308 | } |
309 | break; | 309 | break; |
@@ -451,7 +451,7 @@ static int hpet_setup_irq(struct hpet_dev *dev) | |||
451 | return -1; | 451 | return -1; |
452 | 452 | ||
453 | disable_irq(dev->irq); | 453 | disable_irq(dev->irq); |
454 | irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu)); | 454 | irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); |
455 | enable_irq(dev->irq); | 455 | enable_irq(dev->irq); |
456 | 456 | ||
457 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", | 457 | printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", |
@@ -502,7 +502,7 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) | |||
502 | /* 5 usec minimum reprogramming delta. */ | 502 | /* 5 usec minimum reprogramming delta. */ |
503 | evt->min_delta_ns = 5000; | 503 | evt->min_delta_ns = 5000; |
504 | 504 | ||
505 | evt->cpumask = cpumask_of_cpu(hdev->cpu); | 505 | evt->cpumask = cpumask_of(hdev->cpu); |
506 | clockevents_register_device(evt); | 506 | clockevents_register_device(evt); |
507 | } | 507 | } |
508 | 508 | ||
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index c1b5e3ece1f2..10f92fb532f3 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c | |||
@@ -114,7 +114,7 @@ void __init setup_pit_timer(void) | |||
114 | * Start pit with the boot cpu mask and make it global after the | 114 | * Start pit with the boot cpu mask and make it global after the |
115 | * IO_APIC has been initialized. | 115 | * IO_APIC has been initialized. |
116 | */ | 116 | */ |
117 | pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id()); | 117 | pit_clockevent.cpumask = cpumask_of(smp_processor_id()); |
118 | pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, | 118 | pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, |
119 | pit_clockevent.shift); | 119 | pit_clockevent.shift); |
120 | pit_clockevent.max_delta_ns = | 120 | pit_clockevent.max_delta_ns = |
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 74917658b004..69911722b9d3 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c | |||
@@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) | |||
136 | 136 | ||
137 | struct irq_cfg { | 137 | struct irq_cfg { |
138 | struct irq_pin_list *irq_2_pin; | 138 | struct irq_pin_list *irq_2_pin; |
139 | cpumask_t domain; | 139 | cpumask_var_t domain; |
140 | cpumask_t old_domain; | 140 | cpumask_var_t old_domain; |
141 | unsigned move_cleanup_count; | 141 | unsigned move_cleanup_count; |
142 | u8 vector; | 142 | u8 vector; |
143 | u8 move_in_progress : 1; | 143 | u8 move_in_progress : 1; |
@@ -152,22 +152,22 @@ static struct irq_cfg irq_cfgx[] = { | |||
152 | #else | 152 | #else |
153 | static struct irq_cfg irq_cfgx[NR_IRQS] = { | 153 | static struct irq_cfg irq_cfgx[NR_IRQS] = { |
154 | #endif | 154 | #endif |
155 | [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, | 155 | [0] = { .vector = IRQ0_VECTOR, }, |
156 | [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, | 156 | [1] = { .vector = IRQ1_VECTOR, }, |
157 | [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, | 157 | [2] = { .vector = IRQ2_VECTOR, }, |
158 | [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, | 158 | [3] = { .vector = IRQ3_VECTOR, }, |
159 | [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, | 159 | [4] = { .vector = IRQ4_VECTOR, }, |
160 | [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, | 160 | [5] = { .vector = IRQ5_VECTOR, }, |
161 | [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, | 161 | [6] = { .vector = IRQ6_VECTOR, }, |
162 | [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, | 162 | [7] = { .vector = IRQ7_VECTOR, }, |
163 | [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, | 163 | [8] = { .vector = IRQ8_VECTOR, }, |
164 | [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, | 164 | [9] = { .vector = IRQ9_VECTOR, }, |
165 | [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, | 165 | [10] = { .vector = IRQ10_VECTOR, }, |
166 | [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, | 166 | [11] = { .vector = IRQ11_VECTOR, }, |
167 | [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, | 167 | [12] = { .vector = IRQ12_VECTOR, }, |
168 | [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, | 168 | [13] = { .vector = IRQ13_VECTOR, }, |
169 | [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, | 169 | [14] = { .vector = IRQ14_VECTOR, }, |
170 | [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, | 170 | [15] = { .vector = IRQ15_VECTOR, }, |
171 | }; | 171 | }; |
172 | 172 | ||
173 | int __init arch_early_irq_init(void) | 173 | int __init arch_early_irq_init(void) |
@@ -183,6 +183,10 @@ int __init arch_early_irq_init(void) | |||
183 | for (i = 0; i < count; i++) { | 183 | for (i = 0; i < count; i++) { |
184 | desc = irq_to_desc(i); | 184 | desc = irq_to_desc(i); |
185 | desc->chip_data = &cfg[i]; | 185 | desc->chip_data = &cfg[i]; |
186 | alloc_bootmem_cpumask_var(&cfg[i].domain); | ||
187 | alloc_bootmem_cpumask_var(&cfg[i].old_domain); | ||
188 | if (i < NR_IRQS_LEGACY) | ||
189 | cpumask_setall(cfg[i].domain); | ||
186 | } | 190 | } |
187 | 191 | ||
188 | return 0; | 192 | return 0; |
@@ -209,6 +213,20 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu) | |||
209 | node = cpu_to_node(cpu); | 213 | node = cpu_to_node(cpu); |
210 | 214 | ||
211 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); | 215 | cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); |
216 | if (cfg) { | ||
217 | /* FIXME: needs alloc_cpumask_var_node() */ | ||
218 | if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) { | ||
219 | kfree(cfg); | ||
220 | cfg = NULL; | ||
221 | } else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) { | ||
222 | free_cpumask_var(cfg->domain); | ||
223 | kfree(cfg); | ||
224 | cfg = NULL; | ||
225 | } else { | ||
226 | cpumask_clear(cfg->domain); | ||
227 | cpumask_clear(cfg->old_domain); | ||
228 | } | ||
229 | } | ||
212 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); | 230 | printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); |
213 | 231 | ||
214 | return cfg; | 232 | return cfg; |
@@ -333,13 +351,14 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | |||
333 | } | 351 | } |
334 | } | 352 | } |
335 | 353 | ||
336 | static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) | 354 | static void |
355 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
337 | { | 356 | { |
338 | struct irq_cfg *cfg = desc->chip_data; | 357 | struct irq_cfg *cfg = desc->chip_data; |
339 | 358 | ||
340 | if (!cfg->move_in_progress) { | 359 | if (!cfg->move_in_progress) { |
341 | /* it means that domain is not changed */ | 360 | /* it means that domain is not changed */ |
342 | if (!cpus_intersects(desc->affinity, mask)) | 361 | if (!cpumask_intersects(&desc->affinity, mask)) |
343 | cfg->move_desc_pending = 1; | 362 | cfg->move_desc_pending = 1; |
344 | } | 363 | } |
345 | } | 364 | } |
@@ -354,7 +373,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq) | |||
354 | #endif | 373 | #endif |
355 | 374 | ||
356 | #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC | 375 | #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC |
357 | static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask) | 376 | static inline void |
377 | set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
358 | { | 378 | { |
359 | } | 379 | } |
360 | #endif | 380 | #endif |
@@ -485,6 +505,26 @@ static void ioapic_mask_entry(int apic, int pin) | |||
485 | } | 505 | } |
486 | 506 | ||
487 | #ifdef CONFIG_SMP | 507 | #ifdef CONFIG_SMP |
508 | static void send_cleanup_vector(struct irq_cfg *cfg) | ||
509 | { | ||
510 | cpumask_var_t cleanup_mask; | ||
511 | |||
512 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | ||
513 | unsigned int i; | ||
514 | cfg->move_cleanup_count = 0; | ||
515 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
516 | cfg->move_cleanup_count++; | ||
517 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
518 | send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); | ||
519 | } else { | ||
520 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | ||
521 | cfg->move_cleanup_count = cpumask_weight(cleanup_mask); | ||
522 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
523 | free_cpumask_var(cleanup_mask); | ||
524 | } | ||
525 | cfg->move_in_progress = 0; | ||
526 | } | ||
527 | |||
488 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) | 528 | static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) |
489 | { | 529 | { |
490 | int apic, pin; | 530 | int apic, pin; |
@@ -520,41 +560,55 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
520 | } | 560 | } |
521 | } | 561 | } |
522 | 562 | ||
523 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask); | 563 | static int |
564 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | ||
524 | 565 | ||
525 | static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | 566 | /* |
567 | * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid | ||
568 | * of that, or returns BAD_APICID and leaves desc->affinity untouched. | ||
569 | */ | ||
570 | static unsigned int | ||
571 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) | ||
526 | { | 572 | { |
527 | struct irq_cfg *cfg; | 573 | struct irq_cfg *cfg; |
528 | unsigned long flags; | ||
529 | unsigned int dest; | ||
530 | cpumask_t tmp; | ||
531 | unsigned int irq; | 574 | unsigned int irq; |
532 | 575 | ||
533 | cpus_and(tmp, mask, cpu_online_map); | 576 | if (!cpumask_intersects(mask, cpu_online_mask)) |
534 | if (cpus_empty(tmp)) | 577 | return BAD_APICID; |
535 | return; | ||
536 | 578 | ||
537 | irq = desc->irq; | 579 | irq = desc->irq; |
538 | cfg = desc->chip_data; | 580 | cfg = desc->chip_data; |
539 | if (assign_irq_vector(irq, cfg, mask)) | 581 | if (assign_irq_vector(irq, cfg, mask)) |
540 | return; | 582 | return BAD_APICID; |
541 | 583 | ||
584 | cpumask_and(&desc->affinity, cfg->domain, mask); | ||
542 | set_extra_move_desc(desc, mask); | 585 | set_extra_move_desc(desc, mask); |
586 | return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); | ||
587 | } | ||
543 | 588 | ||
544 | cpus_and(tmp, cfg->domain, mask); | 589 | static void |
545 | dest = cpu_mask_to_apicid(tmp); | 590 | set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask) |
546 | /* | 591 | { |
547 | * Only the high 8 bits are valid. | 592 | struct irq_cfg *cfg; |
548 | */ | 593 | unsigned long flags; |
549 | dest = SET_APIC_LOGICAL_ID(dest); | 594 | unsigned int dest; |
595 | unsigned int irq; | ||
596 | |||
597 | irq = desc->irq; | ||
598 | cfg = desc->chip_data; | ||
550 | 599 | ||
551 | spin_lock_irqsave(&ioapic_lock, flags); | 600 | spin_lock_irqsave(&ioapic_lock, flags); |
552 | __target_IO_APIC_irq(irq, dest, cfg); | 601 | dest = set_desc_affinity(desc, mask); |
553 | desc->affinity = mask; | 602 | if (dest != BAD_APICID) { |
603 | /* Only the high 8 bits are valid. */ | ||
604 | dest = SET_APIC_LOGICAL_ID(dest); | ||
605 | __target_IO_APIC_irq(irq, dest, cfg); | ||
606 | } | ||
554 | spin_unlock_irqrestore(&ioapic_lock, flags); | 607 | spin_unlock_irqrestore(&ioapic_lock, flags); |
555 | } | 608 | } |
556 | 609 | ||
557 | static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 610 | static void |
611 | set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask) | ||
558 | { | 612 | { |
559 | struct irq_desc *desc; | 613 | struct irq_desc *desc; |
560 | 614 | ||
@@ -652,7 +706,7 @@ static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | |||
652 | } | 706 | } |
653 | 707 | ||
654 | #ifdef CONFIG_X86_64 | 708 | #ifdef CONFIG_X86_64 |
655 | void io_apic_sync(struct irq_pin_list *entry) | 709 | static void io_apic_sync(struct irq_pin_list *entry) |
656 | { | 710 | { |
657 | /* | 711 | /* |
658 | * Synchronize the IO-APIC and the CPU by doing | 712 | * Synchronize the IO-APIC and the CPU by doing |
@@ -1222,7 +1276,8 @@ void unlock_vector_lock(void) | |||
1222 | spin_unlock(&vector_lock); | 1276 | spin_unlock(&vector_lock); |
1223 | } | 1277 | } |
1224 | 1278 | ||
1225 | static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | 1279 | static int |
1280 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1226 | { | 1281 | { |
1227 | /* | 1282 | /* |
1228 | * NOTE! The local APIC isn't very good at handling | 1283 | * NOTE! The local APIC isn't very good at handling |
@@ -1237,49 +1292,49 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | |||
1237 | */ | 1292 | */ |
1238 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; | 1293 | static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; |
1239 | unsigned int old_vector; | 1294 | unsigned int old_vector; |
1240 | int cpu; | 1295 | int cpu, err; |
1296 | cpumask_var_t tmp_mask; | ||
1241 | 1297 | ||
1242 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) | 1298 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) |
1243 | return -EBUSY; | 1299 | return -EBUSY; |
1244 | 1300 | ||
1245 | /* Only try and allocate irqs on cpus that are present */ | 1301 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) |
1246 | cpus_and(mask, mask, cpu_online_map); | 1302 | return -ENOMEM; |
1247 | 1303 | ||
1248 | old_vector = cfg->vector; | 1304 | old_vector = cfg->vector; |
1249 | if (old_vector) { | 1305 | if (old_vector) { |
1250 | cpumask_t tmp; | 1306 | cpumask_and(tmp_mask, mask, cpu_online_mask); |
1251 | cpus_and(tmp, cfg->domain, mask); | 1307 | cpumask_and(tmp_mask, cfg->domain, tmp_mask); |
1252 | if (!cpus_empty(tmp)) | 1308 | if (!cpumask_empty(tmp_mask)) { |
1309 | free_cpumask_var(tmp_mask); | ||
1253 | return 0; | 1310 | return 0; |
1311 | } | ||
1254 | } | 1312 | } |
1255 | 1313 | ||
1256 | for_each_cpu_mask_nr(cpu, mask) { | 1314 | /* Only try and allocate irqs on cpus that are present */ |
1257 | cpumask_t domain, new_mask; | 1315 | err = -ENOSPC; |
1316 | for_each_cpu_and(cpu, mask, cpu_online_mask) { | ||
1258 | int new_cpu; | 1317 | int new_cpu; |
1259 | int vector, offset; | 1318 | int vector, offset; |
1260 | 1319 | ||
1261 | domain = vector_allocation_domain(cpu); | 1320 | vector_allocation_domain(cpu, tmp_mask); |
1262 | cpus_and(new_mask, domain, cpu_online_map); | ||
1263 | 1321 | ||
1264 | vector = current_vector; | 1322 | vector = current_vector; |
1265 | offset = current_offset; | 1323 | offset = current_offset; |
1266 | next: | 1324 | next: |
1267 | vector += 8; | 1325 | vector += 8; |
1268 | if (vector >= first_system_vector) { | 1326 | if (vector >= first_system_vector) { |
1269 | /* If we run out of vectors on large boxen, must share them. */ | 1327 | /* If out of vectors on large boxen, must share them. */ |
1270 | offset = (offset + 1) % 8; | 1328 | offset = (offset + 1) % 8; |
1271 | vector = FIRST_DEVICE_VECTOR + offset; | 1329 | vector = FIRST_DEVICE_VECTOR + offset; |
1272 | } | 1330 | } |
1273 | if (unlikely(current_vector == vector)) | 1331 | if (unlikely(current_vector == vector)) |
1274 | continue; | 1332 | continue; |
1275 | #ifdef CONFIG_X86_64 | 1333 | |
1276 | if (vector == IA32_SYSCALL_VECTOR) | 1334 | if (test_bit(vector, used_vectors)) |
1277 | goto next; | ||
1278 | #else | ||
1279 | if (vector == SYSCALL_VECTOR) | ||
1280 | goto next; | 1335 | goto next; |
1281 | #endif | 1336 | |
1282 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1337 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) |
1283 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) | 1338 | if (per_cpu(vector_irq, new_cpu)[vector] != -1) |
1284 | goto next; | 1339 | goto next; |
1285 | /* Found one! */ | 1340 | /* Found one! */ |
@@ -1287,18 +1342,21 @@ next: | |||
1287 | current_offset = offset; | 1342 | current_offset = offset; |
1288 | if (old_vector) { | 1343 | if (old_vector) { |
1289 | cfg->move_in_progress = 1; | 1344 | cfg->move_in_progress = 1; |
1290 | cfg->old_domain = cfg->domain; | 1345 | cpumask_copy(cfg->old_domain, cfg->domain); |
1291 | } | 1346 | } |
1292 | for_each_cpu_mask_nr(new_cpu, new_mask) | 1347 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) |
1293 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 1348 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
1294 | cfg->vector = vector; | 1349 | cfg->vector = vector; |
1295 | cfg->domain = domain; | 1350 | cpumask_copy(cfg->domain, tmp_mask); |
1296 | return 0; | 1351 | err = 0; |
1352 | break; | ||
1297 | } | 1353 | } |
1298 | return -ENOSPC; | 1354 | free_cpumask_var(tmp_mask); |
1355 | return err; | ||
1299 | } | 1356 | } |
1300 | 1357 | ||
1301 | static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | 1358 | static int |
1359 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
1302 | { | 1360 | { |
1303 | int err; | 1361 | int err; |
1304 | unsigned long flags; | 1362 | unsigned long flags; |
@@ -1311,23 +1369,20 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask) | |||
1311 | 1369 | ||
1312 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) | 1370 | static void __clear_irq_vector(int irq, struct irq_cfg *cfg) |
1313 | { | 1371 | { |
1314 | cpumask_t mask; | ||
1315 | int cpu, vector; | 1372 | int cpu, vector; |
1316 | 1373 | ||
1317 | BUG_ON(!cfg->vector); | 1374 | BUG_ON(!cfg->vector); |
1318 | 1375 | ||
1319 | vector = cfg->vector; | 1376 | vector = cfg->vector; |
1320 | cpus_and(mask, cfg->domain, cpu_online_map); | 1377 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) |
1321 | for_each_cpu_mask_nr(cpu, mask) | ||
1322 | per_cpu(vector_irq, cpu)[vector] = -1; | 1378 | per_cpu(vector_irq, cpu)[vector] = -1; |
1323 | 1379 | ||
1324 | cfg->vector = 0; | 1380 | cfg->vector = 0; |
1325 | cpus_clear(cfg->domain); | 1381 | cpumask_clear(cfg->domain); |
1326 | 1382 | ||
1327 | if (likely(!cfg->move_in_progress)) | 1383 | if (likely(!cfg->move_in_progress)) |
1328 | return; | 1384 | return; |
1329 | cpus_and(mask, cfg->old_domain, cpu_online_map); | 1385 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { |
1330 | for_each_cpu_mask_nr(cpu, mask) { | ||
1331 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | 1386 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; |
1332 | vector++) { | 1387 | vector++) { |
1333 | if (per_cpu(vector_irq, cpu)[vector] != irq) | 1388 | if (per_cpu(vector_irq, cpu)[vector] != irq) |
@@ -1350,7 +1405,7 @@ void __setup_vector_irq(int cpu) | |||
1350 | /* Mark the inuse vectors */ | 1405 | /* Mark the inuse vectors */ |
1351 | for_each_irq_desc(irq, desc) { | 1406 | for_each_irq_desc(irq, desc) { |
1352 | cfg = desc->chip_data; | 1407 | cfg = desc->chip_data; |
1353 | if (!cpu_isset(cpu, cfg->domain)) | 1408 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1354 | continue; | 1409 | continue; |
1355 | vector = cfg->vector; | 1410 | vector = cfg->vector; |
1356 | per_cpu(vector_irq, cpu)[vector] = irq; | 1411 | per_cpu(vector_irq, cpu)[vector] = irq; |
@@ -1362,7 +1417,7 @@ void __setup_vector_irq(int cpu) | |||
1362 | continue; | 1417 | continue; |
1363 | 1418 | ||
1364 | cfg = irq_cfg(irq); | 1419 | cfg = irq_cfg(irq); |
1365 | if (!cpu_isset(cpu, cfg->domain)) | 1420 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1366 | per_cpu(vector_irq, cpu)[vector] = -1; | 1421 | per_cpu(vector_irq, cpu)[vector] = -1; |
1367 | } | 1422 | } |
1368 | } | 1423 | } |
@@ -1498,18 +1553,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
1498 | { | 1553 | { |
1499 | struct irq_cfg *cfg; | 1554 | struct irq_cfg *cfg; |
1500 | struct IO_APIC_route_entry entry; | 1555 | struct IO_APIC_route_entry entry; |
1501 | cpumask_t mask; | 1556 | unsigned int dest; |
1502 | 1557 | ||
1503 | if (!IO_APIC_IRQ(irq)) | 1558 | if (!IO_APIC_IRQ(irq)) |
1504 | return; | 1559 | return; |
1505 | 1560 | ||
1506 | cfg = desc->chip_data; | 1561 | cfg = desc->chip_data; |
1507 | 1562 | ||
1508 | mask = TARGET_CPUS; | 1563 | if (assign_irq_vector(irq, cfg, TARGET_CPUS)) |
1509 | if (assign_irq_vector(irq, cfg, mask)) | ||
1510 | return; | 1564 | return; |
1511 | 1565 | ||
1512 | cpus_and(mask, cfg->domain, mask); | 1566 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
1513 | 1567 | ||
1514 | apic_printk(APIC_VERBOSE,KERN_DEBUG | 1568 | apic_printk(APIC_VERBOSE,KERN_DEBUG |
1515 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " | 1569 | "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " |
@@ -1519,8 +1573,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de | |||
1519 | 1573 | ||
1520 | 1574 | ||
1521 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, | 1575 | if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, |
1522 | cpu_mask_to_apicid(mask), trigger, polarity, | 1576 | dest, trigger, polarity, cfg->vector)) { |
1523 | cfg->vector)) { | ||
1524 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", | 1577 | printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", |
1525 | mp_ioapics[apic].mp_apicid, pin); | 1578 | mp_ioapics[apic].mp_apicid, pin); |
1526 | __clear_irq_vector(irq, cfg); | 1579 | __clear_irq_vector(irq, cfg); |
@@ -2240,7 +2293,7 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
2240 | unsigned long flags; | 2293 | unsigned long flags; |
2241 | 2294 | ||
2242 | spin_lock_irqsave(&vector_lock, flags); | 2295 | spin_lock_irqsave(&vector_lock, flags); |
2243 | send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector); | 2296 | send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); |
2244 | spin_unlock_irqrestore(&vector_lock, flags); | 2297 | spin_unlock_irqrestore(&vector_lock, flags); |
2245 | 2298 | ||
2246 | return 1; | 2299 | return 1; |
@@ -2289,18 +2342,17 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration); | |||
2289 | * as simple as edge triggered migration and we can do the irq migration | 2342 | * as simple as edge triggered migration and we can do the irq migration |
2290 | * with a simple atomic update to IO-APIC RTE. | 2343 | * with a simple atomic update to IO-APIC RTE. |
2291 | */ | 2344 | */ |
2292 | static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | 2345 | static void |
2346 | migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask) | ||
2293 | { | 2347 | { |
2294 | struct irq_cfg *cfg; | 2348 | struct irq_cfg *cfg; |
2295 | cpumask_t tmp, cleanup_mask; | ||
2296 | struct irte irte; | 2349 | struct irte irte; |
2297 | int modify_ioapic_rte; | 2350 | int modify_ioapic_rte; |
2298 | unsigned int dest; | 2351 | unsigned int dest; |
2299 | unsigned long flags; | 2352 | unsigned long flags; |
2300 | unsigned int irq; | 2353 | unsigned int irq; |
2301 | 2354 | ||
2302 | cpus_and(tmp, mask, cpu_online_map); | 2355 | if (!cpumask_intersects(mask, cpu_online_mask)) |
2303 | if (cpus_empty(tmp)) | ||
2304 | return; | 2356 | return; |
2305 | 2357 | ||
2306 | irq = desc->irq; | 2358 | irq = desc->irq; |
@@ -2313,8 +2365,7 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
2313 | 2365 | ||
2314 | set_extra_move_desc(desc, mask); | 2366 | set_extra_move_desc(desc, mask); |
2315 | 2367 | ||
2316 | cpus_and(tmp, cfg->domain, mask); | 2368 | dest = cpu_mask_to_apicid_and(cfg->domain, mask); |
2317 | dest = cpu_mask_to_apicid(tmp); | ||
2318 | 2369 | ||
2319 | modify_ioapic_rte = desc->status & IRQ_LEVEL; | 2370 | modify_ioapic_rte = desc->status & IRQ_LEVEL; |
2320 | if (modify_ioapic_rte) { | 2371 | if (modify_ioapic_rte) { |
@@ -2331,14 +2382,10 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask) | |||
2331 | */ | 2382 | */ |
2332 | modify_irte(irq, &irte); | 2383 | modify_irte(irq, &irte); |
2333 | 2384 | ||
2334 | if (cfg->move_in_progress) { | 2385 | if (cfg->move_in_progress) |
2335 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 2386 | send_cleanup_vector(cfg); |
2336 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
2337 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2338 | cfg->move_in_progress = 0; | ||
2339 | } | ||
2340 | 2387 | ||
2341 | desc->affinity = mask; | 2388 | cpumask_copy(&desc->affinity, mask); |
2342 | } | 2389 | } |
2343 | 2390 | ||
2344 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | 2391 | static int migrate_irq_remapped_level_desc(struct irq_desc *desc) |
@@ -2360,11 +2407,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc) | |||
2360 | } | 2407 | } |
2361 | 2408 | ||
2362 | /* everthing is clear. we have right of way */ | 2409 | /* everthing is clear. we have right of way */ |
2363 | migrate_ioapic_irq_desc(desc, desc->pending_mask); | 2410 | migrate_ioapic_irq_desc(desc, &desc->pending_mask); |
2364 | 2411 | ||
2365 | ret = 0; | 2412 | ret = 0; |
2366 | desc->status &= ~IRQ_MOVE_PENDING; | 2413 | desc->status &= ~IRQ_MOVE_PENDING; |
2367 | cpus_clear(desc->pending_mask); | 2414 | cpumask_clear(&desc->pending_mask); |
2368 | 2415 | ||
2369 | unmask: | 2416 | unmask: |
2370 | unmask_IO_APIC_irq_desc(desc); | 2417 | unmask_IO_APIC_irq_desc(desc); |
@@ -2389,7 +2436,7 @@ static void ir_irq_migration(struct work_struct *work) | |||
2389 | continue; | 2436 | continue; |
2390 | } | 2437 | } |
2391 | 2438 | ||
2392 | desc->chip->set_affinity(irq, desc->pending_mask); | 2439 | desc->chip->set_affinity(irq, &desc->pending_mask); |
2393 | spin_unlock_irqrestore(&desc->lock, flags); | 2440 | spin_unlock_irqrestore(&desc->lock, flags); |
2394 | } | 2441 | } |
2395 | } | 2442 | } |
@@ -2398,18 +2445,20 @@ static void ir_irq_migration(struct work_struct *work) | |||
2398 | /* | 2445 | /* |
2399 | * Migrates the IRQ destination in the process context. | 2446 | * Migrates the IRQ destination in the process context. |
2400 | */ | 2447 | */ |
2401 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask) | 2448 | static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, |
2449 | const struct cpumask *mask) | ||
2402 | { | 2450 | { |
2403 | if (desc->status & IRQ_LEVEL) { | 2451 | if (desc->status & IRQ_LEVEL) { |
2404 | desc->status |= IRQ_MOVE_PENDING; | 2452 | desc->status |= IRQ_MOVE_PENDING; |
2405 | desc->pending_mask = mask; | 2453 | cpumask_copy(&desc->pending_mask, mask); |
2406 | migrate_irq_remapped_level_desc(desc); | 2454 | migrate_irq_remapped_level_desc(desc); |
2407 | return; | 2455 | return; |
2408 | } | 2456 | } |
2409 | 2457 | ||
2410 | migrate_ioapic_irq_desc(desc, mask); | 2458 | migrate_ioapic_irq_desc(desc, mask); |
2411 | } | 2459 | } |
2412 | static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) | 2460 | static void set_ir_ioapic_affinity_irq(unsigned int irq, |
2461 | const struct cpumask *mask) | ||
2413 | { | 2462 | { |
2414 | struct irq_desc *desc = irq_to_desc(irq); | 2463 | struct irq_desc *desc = irq_to_desc(irq); |
2415 | 2464 | ||
@@ -2444,7 +2493,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
2444 | if (!cfg->move_cleanup_count) | 2493 | if (!cfg->move_cleanup_count) |
2445 | goto unlock; | 2494 | goto unlock; |
2446 | 2495 | ||
2447 | if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) | 2496 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2448 | goto unlock; | 2497 | goto unlock; |
2449 | 2498 | ||
2450 | __get_cpu_var(vector_irq)[vector] = -1; | 2499 | __get_cpu_var(vector_irq)[vector] = -1; |
@@ -2481,20 +2530,14 @@ static void irq_complete_move(struct irq_desc **descp) | |||
2481 | 2530 | ||
2482 | vector = ~get_irq_regs()->orig_ax; | 2531 | vector = ~get_irq_regs()->orig_ax; |
2483 | me = smp_processor_id(); | 2532 | me = smp_processor_id(); |
2484 | if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) { | ||
2485 | cpumask_t cleanup_mask; | ||
2486 | |||
2487 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | 2533 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC |
2488 | *descp = desc = move_irq_desc(desc, me); | 2534 | *descp = desc = move_irq_desc(desc, me); |
2489 | /* get the new one */ | 2535 | /* get the new one */ |
2490 | cfg = desc->chip_data; | 2536 | cfg = desc->chip_data; |
2491 | #endif | 2537 | #endif |
2492 | 2538 | ||
2493 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 2539 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
2494 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | 2540 | send_cleanup_vector(cfg); |
2495 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
2496 | cfg->move_in_progress = 0; | ||
2497 | } | ||
2498 | } | 2541 | } |
2499 | #else | 2542 | #else |
2500 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2543 | static inline void irq_complete_move(struct irq_desc **descp) {} |
@@ -3216,16 +3259,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3216 | struct irq_cfg *cfg; | 3259 | struct irq_cfg *cfg; |
3217 | int err; | 3260 | int err; |
3218 | unsigned dest; | 3261 | unsigned dest; |
3219 | cpumask_t tmp; | ||
3220 | 3262 | ||
3221 | cfg = irq_cfg(irq); | 3263 | cfg = irq_cfg(irq); |
3222 | tmp = TARGET_CPUS; | 3264 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
3223 | err = assign_irq_vector(irq, cfg, tmp); | ||
3224 | if (err) | 3265 | if (err) |
3225 | return err; | 3266 | return err; |
3226 | 3267 | ||
3227 | cpus_and(tmp, cfg->domain, tmp); | 3268 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
3228 | dest = cpu_mask_to_apicid(tmp); | ||
3229 | 3269 | ||
3230 | #ifdef CONFIG_INTR_REMAP | 3270 | #ifdef CONFIG_INTR_REMAP |
3231 | if (irq_remapped(irq)) { | 3271 | if (irq_remapped(irq)) { |
@@ -3279,26 +3319,18 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms | |||
3279 | } | 3319 | } |
3280 | 3320 | ||
3281 | #ifdef CONFIG_SMP | 3321 | #ifdef CONFIG_SMP |
3282 | static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3322 | static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3283 | { | 3323 | { |
3284 | struct irq_desc *desc = irq_to_desc(irq); | 3324 | struct irq_desc *desc = irq_to_desc(irq); |
3285 | struct irq_cfg *cfg; | 3325 | struct irq_cfg *cfg; |
3286 | struct msi_msg msg; | 3326 | struct msi_msg msg; |
3287 | unsigned int dest; | 3327 | unsigned int dest; |
3288 | cpumask_t tmp; | ||
3289 | 3328 | ||
3290 | cpus_and(tmp, mask, cpu_online_map); | 3329 | dest = set_desc_affinity(desc, mask); |
3291 | if (cpus_empty(tmp)) | 3330 | if (dest == BAD_APICID) |
3292 | return; | 3331 | return; |
3293 | 3332 | ||
3294 | cfg = desc->chip_data; | 3333 | cfg = desc->chip_data; |
3295 | if (assign_irq_vector(irq, cfg, mask)) | ||
3296 | return; | ||
3297 | |||
3298 | set_extra_move_desc(desc, mask); | ||
3299 | |||
3300 | cpus_and(tmp, cfg->domain, mask); | ||
3301 | dest = cpu_mask_to_apicid(tmp); | ||
3302 | 3334 | ||
3303 | read_msi_msg_desc(desc, &msg); | 3335 | read_msi_msg_desc(desc, &msg); |
3304 | 3336 | ||
@@ -3308,37 +3340,27 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3308 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3340 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3309 | 3341 | ||
3310 | write_msi_msg_desc(desc, &msg); | 3342 | write_msi_msg_desc(desc, &msg); |
3311 | desc->affinity = mask; | ||
3312 | } | 3343 | } |
3313 | #ifdef CONFIG_INTR_REMAP | 3344 | #ifdef CONFIG_INTR_REMAP |
3314 | /* | 3345 | /* |
3315 | * Migrate the MSI irq to another cpumask. This migration is | 3346 | * Migrate the MSI irq to another cpumask. This migration is |
3316 | * done in the process context using interrupt-remapping hardware. | 3347 | * done in the process context using interrupt-remapping hardware. |
3317 | */ | 3348 | */ |
3318 | static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | 3349 | static void |
3350 | ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) | ||
3319 | { | 3351 | { |
3320 | struct irq_desc *desc = irq_to_desc(irq); | 3352 | struct irq_desc *desc = irq_to_desc(irq); |
3321 | struct irq_cfg *cfg; | 3353 | struct irq_cfg *cfg = desc->chip_data; |
3322 | unsigned int dest; | 3354 | unsigned int dest; |
3323 | cpumask_t tmp, cleanup_mask; | ||
3324 | struct irte irte; | 3355 | struct irte irte; |
3325 | 3356 | ||
3326 | cpus_and(tmp, mask, cpu_online_map); | ||
3327 | if (cpus_empty(tmp)) | ||
3328 | return; | ||
3329 | |||
3330 | if (get_irte(irq, &irte)) | 3357 | if (get_irte(irq, &irte)) |
3331 | return; | 3358 | return; |
3332 | 3359 | ||
3333 | cfg = desc->chip_data; | 3360 | dest = set_desc_affinity(desc, mask); |
3334 | if (assign_irq_vector(irq, cfg, mask)) | 3361 | if (dest == BAD_APICID) |
3335 | return; | 3362 | return; |
3336 | 3363 | ||
3337 | set_extra_move_desc(desc, mask); | ||
3338 | |||
3339 | cpus_and(tmp, cfg->domain, mask); | ||
3340 | dest = cpu_mask_to_apicid(tmp); | ||
3341 | |||
3342 | irte.vector = cfg->vector; | 3364 | irte.vector = cfg->vector; |
3343 | irte.dest_id = IRTE_DEST(dest); | 3365 | irte.dest_id = IRTE_DEST(dest); |
3344 | 3366 | ||
@@ -3352,14 +3374,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask) | |||
3352 | * at the new destination. So, time to cleanup the previous | 3374 | * at the new destination. So, time to cleanup the previous |
3353 | * vector allocation. | 3375 | * vector allocation. |
3354 | */ | 3376 | */ |
3355 | if (cfg->move_in_progress) { | 3377 | if (cfg->move_in_progress) |
3356 | cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); | 3378 | send_cleanup_vector(cfg); |
3357 | cfg->move_cleanup_count = cpus_weight(cleanup_mask); | ||
3358 | send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
3359 | cfg->move_in_progress = 0; | ||
3360 | } | ||
3361 | |||
3362 | desc->affinity = mask; | ||
3363 | } | 3379 | } |
3364 | 3380 | ||
3365 | #endif | 3381 | #endif |
@@ -3550,26 +3566,18 @@ void arch_teardown_msi_irq(unsigned int irq) | |||
3550 | 3566 | ||
3551 | #ifdef CONFIG_DMAR | 3567 | #ifdef CONFIG_DMAR |
3552 | #ifdef CONFIG_SMP | 3568 | #ifdef CONFIG_SMP |
3553 | static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3569 | static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3554 | { | 3570 | { |
3555 | struct irq_desc *desc = irq_to_desc(irq); | 3571 | struct irq_desc *desc = irq_to_desc(irq); |
3556 | struct irq_cfg *cfg; | 3572 | struct irq_cfg *cfg; |
3557 | struct msi_msg msg; | 3573 | struct msi_msg msg; |
3558 | unsigned int dest; | 3574 | unsigned int dest; |
3559 | cpumask_t tmp; | ||
3560 | 3575 | ||
3561 | cpus_and(tmp, mask, cpu_online_map); | 3576 | dest = set_desc_affinity(desc, mask); |
3562 | if (cpus_empty(tmp)) | 3577 | if (dest == BAD_APICID) |
3563 | return; | 3578 | return; |
3564 | 3579 | ||
3565 | cfg = desc->chip_data; | 3580 | cfg = desc->chip_data; |
3566 | if (assign_irq_vector(irq, cfg, mask)) | ||
3567 | return; | ||
3568 | |||
3569 | set_extra_move_desc(desc, mask); | ||
3570 | |||
3571 | cpus_and(tmp, cfg->domain, mask); | ||
3572 | dest = cpu_mask_to_apicid(tmp); | ||
3573 | 3581 | ||
3574 | dmar_msi_read(irq, &msg); | 3582 | dmar_msi_read(irq, &msg); |
3575 | 3583 | ||
@@ -3579,7 +3587,6 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3579 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3587 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3580 | 3588 | ||
3581 | dmar_msi_write(irq, &msg); | 3589 | dmar_msi_write(irq, &msg); |
3582 | desc->affinity = mask; | ||
3583 | } | 3590 | } |
3584 | 3591 | ||
3585 | #endif /* CONFIG_SMP */ | 3592 | #endif /* CONFIG_SMP */ |
@@ -3613,26 +3620,18 @@ int arch_setup_dmar_msi(unsigned int irq) | |||
3613 | #ifdef CONFIG_HPET_TIMER | 3620 | #ifdef CONFIG_HPET_TIMER |
3614 | 3621 | ||
3615 | #ifdef CONFIG_SMP | 3622 | #ifdef CONFIG_SMP |
3616 | static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | 3623 | static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) |
3617 | { | 3624 | { |
3618 | struct irq_desc *desc = irq_to_desc(irq); | 3625 | struct irq_desc *desc = irq_to_desc(irq); |
3619 | struct irq_cfg *cfg; | 3626 | struct irq_cfg *cfg; |
3620 | struct msi_msg msg; | 3627 | struct msi_msg msg; |
3621 | unsigned int dest; | 3628 | unsigned int dest; |
3622 | cpumask_t tmp; | ||
3623 | 3629 | ||
3624 | cpus_and(tmp, mask, cpu_online_map); | 3630 | dest = set_desc_affinity(desc, mask); |
3625 | if (cpus_empty(tmp)) | 3631 | if (dest == BAD_APICID) |
3626 | return; | 3632 | return; |
3627 | 3633 | ||
3628 | cfg = desc->chip_data; | 3634 | cfg = desc->chip_data; |
3629 | if (assign_irq_vector(irq, cfg, mask)) | ||
3630 | return; | ||
3631 | |||
3632 | set_extra_move_desc(desc, mask); | ||
3633 | |||
3634 | cpus_and(tmp, cfg->domain, mask); | ||
3635 | dest = cpu_mask_to_apicid(tmp); | ||
3636 | 3635 | ||
3637 | hpet_msi_read(irq, &msg); | 3636 | hpet_msi_read(irq, &msg); |
3638 | 3637 | ||
@@ -3642,7 +3641,6 @@ static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask) | |||
3642 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3641 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3643 | 3642 | ||
3644 | hpet_msi_write(irq, &msg); | 3643 | hpet_msi_write(irq, &msg); |
3645 | desc->affinity = mask; | ||
3646 | } | 3644 | } |
3647 | 3645 | ||
3648 | #endif /* CONFIG_SMP */ | 3646 | #endif /* CONFIG_SMP */ |
@@ -3697,28 +3695,19 @@ static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) | |||
3697 | write_ht_irq_msg(irq, &msg); | 3695 | write_ht_irq_msg(irq, &msg); |
3698 | } | 3696 | } |
3699 | 3697 | ||
3700 | static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask) | 3698 | static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask) |
3701 | { | 3699 | { |
3702 | struct irq_desc *desc = irq_to_desc(irq); | 3700 | struct irq_desc *desc = irq_to_desc(irq); |
3703 | struct irq_cfg *cfg; | 3701 | struct irq_cfg *cfg; |
3704 | unsigned int dest; | 3702 | unsigned int dest; |
3705 | cpumask_t tmp; | ||
3706 | 3703 | ||
3707 | cpus_and(tmp, mask, cpu_online_map); | 3704 | dest = set_desc_affinity(desc, mask); |
3708 | if (cpus_empty(tmp)) | 3705 | if (dest == BAD_APICID) |
3709 | return; | 3706 | return; |
3710 | 3707 | ||
3711 | cfg = desc->chip_data; | 3708 | cfg = desc->chip_data; |
3712 | if (assign_irq_vector(irq, cfg, mask)) | ||
3713 | return; | ||
3714 | |||
3715 | set_extra_move_desc(desc, mask); | ||
3716 | |||
3717 | cpus_and(tmp, cfg->domain, mask); | ||
3718 | dest = cpu_mask_to_apicid(tmp); | ||
3719 | 3709 | ||
3720 | target_ht_irq(irq, dest, cfg->vector); | 3710 | target_ht_irq(irq, dest, cfg->vector); |
3721 | desc->affinity = mask; | ||
3722 | } | 3711 | } |
3723 | 3712 | ||
3724 | #endif | 3713 | #endif |
@@ -3738,17 +3727,14 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3738 | { | 3727 | { |
3739 | struct irq_cfg *cfg; | 3728 | struct irq_cfg *cfg; |
3740 | int err; | 3729 | int err; |
3741 | cpumask_t tmp; | ||
3742 | 3730 | ||
3743 | cfg = irq_cfg(irq); | 3731 | cfg = irq_cfg(irq); |
3744 | tmp = TARGET_CPUS; | 3732 | err = assign_irq_vector(irq, cfg, TARGET_CPUS); |
3745 | err = assign_irq_vector(irq, cfg, tmp); | ||
3746 | if (!err) { | 3733 | if (!err) { |
3747 | struct ht_irq_msg msg; | 3734 | struct ht_irq_msg msg; |
3748 | unsigned dest; | 3735 | unsigned dest; |
3749 | 3736 | ||
3750 | cpus_and(tmp, cfg->domain, tmp); | 3737 | dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS); |
3751 | dest = cpu_mask_to_apicid(tmp); | ||
3752 | 3738 | ||
3753 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); | 3739 | msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); |
3754 | 3740 | ||
@@ -3784,7 +3770,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
3784 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | 3770 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, |
3785 | unsigned long mmr_offset) | 3771 | unsigned long mmr_offset) |
3786 | { | 3772 | { |
3787 | const cpumask_t *eligible_cpu = get_cpu_mask(cpu); | 3773 | const struct cpumask *eligible_cpu = cpumask_of(cpu); |
3788 | struct irq_cfg *cfg; | 3774 | struct irq_cfg *cfg; |
3789 | int mmr_pnode; | 3775 | int mmr_pnode; |
3790 | unsigned long mmr_value; | 3776 | unsigned long mmr_value; |
@@ -3794,7 +3780,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3794 | 3780 | ||
3795 | cfg = irq_cfg(irq); | 3781 | cfg = irq_cfg(irq); |
3796 | 3782 | ||
3797 | err = assign_irq_vector(irq, cfg, *eligible_cpu); | 3783 | err = assign_irq_vector(irq, cfg, eligible_cpu); |
3798 | if (err != 0) | 3784 | if (err != 0) |
3799 | return err; | 3785 | return err; |
3800 | 3786 | ||
@@ -3813,7 +3799,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | |||
3813 | entry->polarity = 0; | 3799 | entry->polarity = 0; |
3814 | entry->trigger = 0; | 3800 | entry->trigger = 0; |
3815 | entry->mask = 0; | 3801 | entry->mask = 0; |
3816 | entry->dest = cpu_mask_to_apicid(*eligible_cpu); | 3802 | entry->dest = cpu_mask_to_apicid(eligible_cpu); |
3817 | 3803 | ||
3818 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | 3804 | mmr_pnode = uv_blade_to_pnode(mmr_blade); |
3819 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | 3805 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); |
@@ -4024,7 +4010,7 @@ void __init setup_ioapic_dest(void) | |||
4024 | int pin, ioapic, irq, irq_entry; | 4010 | int pin, ioapic, irq, irq_entry; |
4025 | struct irq_desc *desc; | 4011 | struct irq_desc *desc; |
4026 | struct irq_cfg *cfg; | 4012 | struct irq_cfg *cfg; |
4027 | cpumask_t mask; | 4013 | const struct cpumask *mask; |
4028 | 4014 | ||
4029 | if (skip_ioapic_setup == 1) | 4015 | if (skip_ioapic_setup == 1) |
4030 | return; | 4016 | return; |
@@ -4055,7 +4041,7 @@ void __init setup_ioapic_dest(void) | |||
4055 | */ | 4041 | */ |
4056 | if (desc->status & | 4042 | if (desc->status & |
4057 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) | 4043 | (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) |
4058 | mask = desc->affinity; | 4044 | mask = &desc->affinity; |
4059 | else | 4045 | else |
4060 | mask = TARGET_CPUS; | 4046 | mask = TARGET_CPUS; |
4061 | 4047 | ||
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c index f1c688e46f35..285bbf8831fa 100644 --- a/arch/x86/kernel/ipi.c +++ b/arch/x86/kernel/ipi.c | |||
@@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector) | |||
116 | /* | 116 | /* |
117 | * This is only used on smaller machines. | 117 | * This is only used on smaller machines. |
118 | */ | 118 | */ |
119 | void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) | 119 | void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector) |
120 | { | 120 | { |
121 | unsigned long mask = cpus_addr(cpumask)[0]; | 121 | unsigned long mask = cpumask_bits(cpumask)[0]; |
122 | unsigned long flags; | 122 | unsigned long flags; |
123 | 123 | ||
124 | local_irq_save(flags); | 124 | local_irq_save(flags); |
125 | WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]); | 125 | WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); |
126 | __send_IPI_dest_field(mask, vector); | 126 | __send_IPI_dest_field(mask, vector); |
127 | local_irq_restore(flags); | 127 | local_irq_restore(flags); |
128 | } | 128 | } |
129 | 129 | ||
130 | void send_IPI_mask_sequence(cpumask_t mask, int vector) | 130 | void send_IPI_mask_sequence(const struct cpumask *mask, int vector) |
131 | { | 131 | { |
132 | unsigned long flags; | 132 | unsigned long flags; |
133 | unsigned int query_cpu; | 133 | unsigned int query_cpu; |
@@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector) | |||
139 | */ | 139 | */ |
140 | 140 | ||
141 | local_irq_save(flags); | 141 | local_irq_save(flags); |
142 | for_each_possible_cpu(query_cpu) { | 142 | for_each_cpu(query_cpu, mask) |
143 | if (cpu_isset(query_cpu, mask)) { | 143 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector); |
144 | local_irq_restore(flags); | ||
145 | } | ||
146 | |||
147 | void send_IPI_mask_allbutself(const struct cpumask *mask, int vector) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | unsigned int query_cpu; | ||
151 | unsigned int this_cpu = smp_processor_id(); | ||
152 | |||
153 | /* See Hack comment above */ | ||
154 | |||
155 | local_irq_save(flags); | ||
156 | for_each_cpu(query_cpu, mask) | ||
157 | if (query_cpu != this_cpu) | ||
144 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), | 158 | __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), |
145 | vector); | 159 | vector); |
146 | } | ||
147 | } | ||
148 | local_irq_restore(flags); | 160 | local_irq_restore(flags); |
149 | } | 161 | } |
150 | 162 | ||
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 3f1d9d18df67..bce53e1352a0 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <asm/apic.h> | 9 | #include <asm/apic.h> |
10 | #include <asm/io_apic.h> | 10 | #include <asm/io_apic.h> |
11 | #include <asm/smp.h> | 11 | #include <asm/smp.h> |
12 | #include <asm/irq.h> | ||
12 | 13 | ||
13 | atomic_t irq_err_count; | 14 | atomic_t irq_err_count; |
14 | 15 | ||
@@ -190,3 +191,5 @@ u64 arch_irq_stat(void) | |||
190 | #endif | 191 | #endif |
191 | return sum; | 192 | return sum; |
192 | } | 193 | } |
194 | |||
195 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 119fc9c8ff7f..9dc5588f336a 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -233,27 +233,28 @@ unsigned int do_IRQ(struct pt_regs *regs) | |||
233 | #ifdef CONFIG_HOTPLUG_CPU | 233 | #ifdef CONFIG_HOTPLUG_CPU |
234 | #include <mach_apic.h> | 234 | #include <mach_apic.h> |
235 | 235 | ||
236 | void fixup_irqs(cpumask_t map) | 236 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
237 | void fixup_irqs(void) | ||
237 | { | 238 | { |
238 | unsigned int irq; | 239 | unsigned int irq; |
239 | static int warned; | 240 | static int warned; |
240 | struct irq_desc *desc; | 241 | struct irq_desc *desc; |
241 | 242 | ||
242 | for_each_irq_desc(irq, desc) { | 243 | for_each_irq_desc(irq, desc) { |
243 | cpumask_t mask; | 244 | const struct cpumask *affinity; |
244 | 245 | ||
245 | if (!desc) | 246 | if (!desc) |
246 | continue; | 247 | continue; |
247 | if (irq == 2) | 248 | if (irq == 2) |
248 | continue; | 249 | continue; |
249 | 250 | ||
250 | cpus_and(mask, desc->affinity, map); | 251 | affinity = &desc->affinity; |
251 | if (any_online_cpu(mask) == NR_CPUS) { | 252 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
252 | printk("Breaking affinity for irq %i\n", irq); | 253 | printk("Breaking affinity for irq %i\n", irq); |
253 | mask = map; | 254 | affinity = cpu_all_mask; |
254 | } | 255 | } |
255 | if (desc->chip->set_affinity) | 256 | if (desc->chip->set_affinity) |
256 | desc->chip->set_affinity(irq, mask); | 257 | desc->chip->set_affinity(irq, affinity); |
257 | else if (desc->action && !(warned++)) | 258 | else if (desc->action && !(warned++)) |
258 | printk("Cannot set affinity for irq %i\n", irq); | 259 | printk("Cannot set affinity for irq %i\n", irq); |
259 | } | 260 | } |
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index a174a217eb1a..6383d50f82ea 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -80,16 +80,17 @@ asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | |||
80 | } | 80 | } |
81 | 81 | ||
82 | #ifdef CONFIG_HOTPLUG_CPU | 82 | #ifdef CONFIG_HOTPLUG_CPU |
83 | void fixup_irqs(cpumask_t map) | 83 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
84 | void fixup_irqs(void) | ||
84 | { | 85 | { |
85 | unsigned int irq; | 86 | unsigned int irq; |
86 | static int warned; | 87 | static int warned; |
87 | struct irq_desc *desc; | 88 | struct irq_desc *desc; |
88 | 89 | ||
89 | for_each_irq_desc(irq, desc) { | 90 | for_each_irq_desc(irq, desc) { |
90 | cpumask_t mask; | ||
91 | int break_affinity = 0; | 91 | int break_affinity = 0; |
92 | int set_affinity = 1; | 92 | int set_affinity = 1; |
93 | const struct cpumask *affinity; | ||
93 | 94 | ||
94 | if (!desc) | 95 | if (!desc) |
95 | continue; | 96 | continue; |
@@ -99,23 +100,23 @@ void fixup_irqs(cpumask_t map) | |||
99 | /* interrupt's are disabled at this point */ | 100 | /* interrupt's are disabled at this point */ |
100 | spin_lock(&desc->lock); | 101 | spin_lock(&desc->lock); |
101 | 102 | ||
103 | affinity = &desc->affinity; | ||
102 | if (!irq_has_action(irq) || | 104 | if (!irq_has_action(irq) || |
103 | cpus_equal(desc->affinity, map)) { | 105 | cpumask_equal(affinity, cpu_online_mask)) { |
104 | spin_unlock(&desc->lock); | 106 | spin_unlock(&desc->lock); |
105 | continue; | 107 | continue; |
106 | } | 108 | } |
107 | 109 | ||
108 | cpus_and(mask, desc->affinity, map); | 110 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
109 | if (cpus_empty(mask)) { | ||
110 | break_affinity = 1; | 111 | break_affinity = 1; |
111 | mask = map; | 112 | affinity = cpu_all_mask; |
112 | } | 113 | } |
113 | 114 | ||
114 | if (desc->chip->mask) | 115 | if (desc->chip->mask) |
115 | desc->chip->mask(irq); | 116 | desc->chip->mask(irq); |
116 | 117 | ||
117 | if (desc->chip->set_affinity) | 118 | if (desc->chip->set_affinity) |
118 | desc->chip->set_affinity(irq, mask); | 119 | desc->chip->set_affinity(irq, affinity); |
119 | else if (!(warned++)) | 120 | else if (!(warned++)) |
120 | set_affinity = 0; | 121 | set_affinity = 0; |
121 | 122 | ||
diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 203384ed2b5d..84723295f88a 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c | |||
@@ -110,6 +110,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | |||
110 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 | 110 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 |
111 | }; | 111 | }; |
112 | 112 | ||
113 | int vector_used_by_percpu_irq(unsigned int vector) | ||
114 | { | ||
115 | int cpu; | ||
116 | |||
117 | for_each_online_cpu(cpu) { | ||
118 | if (per_cpu(vector_irq, cpu)[vector] != -1) | ||
119 | return 1; | ||
120 | } | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
113 | /* Overridden in paravirt.c */ | 125 | /* Overridden in paravirt.c */ |
114 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | 126 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); |
115 | 127 | ||
@@ -146,10 +158,12 @@ void __init native_init_IRQ(void) | |||
146 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 158 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
147 | 159 | ||
148 | /* IPI for single call function */ | 160 | /* IPI for single call function */ |
149 | set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt); | 161 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, |
162 | call_function_single_interrupt); | ||
150 | 163 | ||
151 | /* Low priority IPI to cleanup after moving an irq */ | 164 | /* Low priority IPI to cleanup after moving an irq */ |
152 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 165 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
166 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | ||
153 | #endif | 167 | #endif |
154 | 168 | ||
155 | #ifdef CONFIG_X86_LOCAL_APIC | 169 | #ifdef CONFIG_X86_LOCAL_APIC |
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 6190e6ef546c..31ebfe38e96c 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c | |||
@@ -69,6 +69,18 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = { | |||
69 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 | 69 | [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1 |
70 | }; | 70 | }; |
71 | 71 | ||
72 | int vector_used_by_percpu_irq(unsigned int vector) | ||
73 | { | ||
74 | int cpu; | ||
75 | |||
76 | for_each_online_cpu(cpu) { | ||
77 | if (per_cpu(vector_irq, cpu)[vector] != -1) | ||
78 | return 1; | ||
79 | } | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
72 | void __init init_ISA_irqs(void) | 84 | void __init init_ISA_irqs(void) |
73 | { | 85 | { |
74 | int i; | 86 | int i; |
@@ -121,6 +133,7 @@ static void __init smp_intr_init(void) | |||
121 | 133 | ||
122 | /* Low priority IPI to cleanup after moving an irq */ | 134 | /* Low priority IPI to cleanup after moving an irq */ |
123 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 135 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
136 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | ||
124 | #endif | 137 | #endif |
125 | } | 138 | } |
126 | 139 | ||
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index e169ae9b6a62..652fce6d2cce 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -89,17 +89,17 @@ static cycle_t kvm_clock_read(void) | |||
89 | */ | 89 | */ |
90 | static unsigned long kvm_get_tsc_khz(void) | 90 | static unsigned long kvm_get_tsc_khz(void) |
91 | { | 91 | { |
92 | return preset_lpj; | 92 | struct pvclock_vcpu_time_info *src; |
93 | src = &per_cpu(hv_clock, 0); | ||
94 | return pvclock_tsc_khz(src); | ||
93 | } | 95 | } |
94 | 96 | ||
95 | static void kvm_get_preset_lpj(void) | 97 | static void kvm_get_preset_lpj(void) |
96 | { | 98 | { |
97 | struct pvclock_vcpu_time_info *src; | ||
98 | unsigned long khz; | 99 | unsigned long khz; |
99 | u64 lpj; | 100 | u64 lpj; |
100 | 101 | ||
101 | src = &per_cpu(hv_clock, 0); | 102 | khz = kvm_get_tsc_khz(); |
102 | khz = pvclock_tsc_khz(src); | ||
103 | 103 | ||
104 | lpj = ((u64)khz * 1000); | 104 | lpj = ((u64)khz * 1000); |
105 | do_div(lpj, HZ); | 105 | do_div(lpj, HZ); |
@@ -194,5 +194,7 @@ void __init kvmclock_init(void) | |||
194 | #endif | 194 | #endif |
195 | kvm_get_preset_lpj(); | 195 | kvm_get_preset_lpj(); |
196 | clocksource_register(&kvm_clock); | 196 | clocksource_register(&kvm_clock); |
197 | pv_info.paravirt_enabled = 1; | ||
198 | pv_info.name = "KVM"; | ||
197 | } | 199 | } |
198 | } | 200 | } |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index eee32b43fee3..71f1d99a635d 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -12,8 +12,8 @@ | |||
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | #include <linux/uaccess.h> | ||
15 | 16 | ||
16 | #include <asm/uaccess.h> | ||
17 | #include <asm/system.h> | 17 | #include <asm/system.h> |
18 | #include <asm/ldt.h> | 18 | #include <asm/ldt.h> |
19 | #include <asm/desc.h> | 19 | #include <asm/desc.h> |
@@ -93,7 +93,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old) | |||
93 | if (err < 0) | 93 | if (err < 0) |
94 | return err; | 94 | return err; |
95 | 95 | ||
96 | for(i = 0; i < old->size; i++) | 96 | for (i = 0; i < old->size; i++) |
97 | write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); | 97 | write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); |
98 | return 0; | 98 | return 0; |
99 | } | 99 | } |
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c index 3b599518c322..c12314c9e86f 100644 --- a/arch/x86/kernel/mfgpt_32.c +++ b/arch/x86/kernel/mfgpt_32.c | |||
@@ -287,7 +287,7 @@ static struct clock_event_device mfgpt_clockevent = { | |||
287 | .set_mode = mfgpt_set_mode, | 287 | .set_mode = mfgpt_set_mode, |
288 | .set_next_event = mfgpt_next_event, | 288 | .set_next_event = mfgpt_next_event, |
289 | .rating = 250, | 289 | .rating = 250, |
290 | .cpumask = CPU_MASK_ALL, | 290 | .cpumask = cpu_all_mask, |
291 | .shift = 32 | 291 | .shift = 32 |
292 | }; | 292 | }; |
293 | 293 | ||
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index efc2f361fe85..666e43df51f9 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c | |||
@@ -13,8 +13,7 @@ | |||
13 | #include <asm/msr.h> | 13 | #include <asm/msr.h> |
14 | #include <asm/acpi.h> | 14 | #include <asm/acpi.h> |
15 | #include <asm/mmconfig.h> | 15 | #include <asm/mmconfig.h> |
16 | 16 | #include <asm/pci_x86.h> | |
17 | #include "../pci/pci.h" | ||
18 | 17 | ||
19 | struct pci_hostbridge_probe { | 18 | struct pci_hostbridge_probe { |
20 | u32 bus; | 19 | u32 bus; |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 45e3b69808ba..c5c5b8df1dbc 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -16,14 +16,14 @@ | |||
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/acpi.h> | 17 | #include <linux/acpi.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/smp.h> | ||
20 | #include <linux/acpi.h> | ||
19 | 21 | ||
20 | #include <asm/smp.h> | ||
21 | #include <asm/mtrr.h> | 22 | #include <asm/mtrr.h> |
22 | #include <asm/mpspec.h> | 23 | #include <asm/mpspec.h> |
23 | #include <asm/pgalloc.h> | 24 | #include <asm/pgalloc.h> |
24 | #include <asm/io_apic.h> | 25 | #include <asm/io_apic.h> |
25 | #include <asm/proto.h> | 26 | #include <asm/proto.h> |
26 | #include <asm/acpi.h> | ||
27 | #include <asm/bios_ebda.h> | 27 | #include <asm/bios_ebda.h> |
28 | #include <asm/e820.h> | 28 | #include <asm/e820.h> |
29 | #include <asm/trampoline.h> | 29 | #include <asm/trampoline.h> |
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_config_bus *m) | |||
95 | #endif | 95 | #endif |
96 | 96 | ||
97 | if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { | 97 | if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) { |
98 | set_bit(m->mpc_busid, mp_bus_not_pci); | 98 | set_bit(m->mpc_busid, mp_bus_not_pci); |
99 | #if defined(CONFIG_EISA) || defined (CONFIG_MCA) | 99 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) |
100 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; | 100 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; |
101 | #endif | 101 | #endif |
102 | } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { | 102 | } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI) - 1) == 0) { |
@@ -104,7 +104,7 @@ static void __init MP_bus_info(struct mpc_config_bus *m) | |||
104 | x86_quirks->mpc_oem_pci_bus(m); | 104 | x86_quirks->mpc_oem_pci_bus(m); |
105 | 105 | ||
106 | clear_bit(m->mpc_busid, mp_bus_not_pci); | 106 | clear_bit(m->mpc_busid, mp_bus_not_pci); |
107 | #if defined(CONFIG_EISA) || defined (CONFIG_MCA) | 107 | #if defined(CONFIG_EISA) || defined(CONFIG_MCA) |
108 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; | 108 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; |
109 | } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { | 109 | } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA) - 1) == 0) { |
110 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; | 110 | mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; |
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 8bd1bf9622a7..45a09ccdc214 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c | |||
@@ -26,11 +26,10 @@ | |||
26 | #include <linux/kernel_stat.h> | 26 | #include <linux/kernel_stat.h> |
27 | #include <linux/kdebug.h> | 27 | #include <linux/kdebug.h> |
28 | #include <linux/smp.h> | 28 | #include <linux/smp.h> |
29 | #include <linux/nmi.h> | ||
29 | 30 | ||
30 | #include <asm/i8259.h> | 31 | #include <asm/i8259.h> |
31 | #include <asm/io_apic.h> | 32 | #include <asm/io_apic.h> |
32 | #include <asm/smp.h> | ||
33 | #include <asm/nmi.h> | ||
34 | #include <asm/proto.h> | 33 | #include <asm/proto.h> |
35 | #include <asm/timer.h> | 34 | #include <asm/timer.h> |
36 | 35 | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index a35eaa379ff6..00c2bcd41463 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -52,7 +52,7 @@ static u32 *iommu_gatt_base; /* Remapping table */ | |||
52 | * to trigger bugs with some popular PCI cards, in particular 3ware (but | 52 | * to trigger bugs with some popular PCI cards, in particular 3ware (but |
53 | * has been also also seen with Qlogic at least). | 53 | * has been also also seen with Qlogic at least). |
54 | */ | 54 | */ |
55 | int iommu_fullflush = 1; | 55 | static int iommu_fullflush = 1; |
56 | 56 | ||
57 | /* Allocation bitmap for the remapping area: */ | 57 | /* Allocation bitmap for the remapping area: */ |
58 | static DEFINE_SPINLOCK(iommu_bitmap_lock); | 58 | static DEFINE_SPINLOCK(iommu_bitmap_lock); |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 61f718df6eec..bf088c61fa40 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <asm/proto.h> | 12 | #include <asm/proto.h> |
13 | #include <asm/reboot_fixups.h> | 13 | #include <asm/reboot_fixups.h> |
14 | #include <asm/reboot.h> | 14 | #include <asm/reboot.h> |
15 | #include <asm/pci_x86.h> | ||
16 | #include <asm/virtext.h> | ||
15 | 17 | ||
16 | #ifdef CONFIG_X86_32 | 18 | #ifdef CONFIG_X86_32 |
17 | # include <linux/dmi.h> | 19 | # include <linux/dmi.h> |
@@ -23,7 +25,6 @@ | |||
23 | 25 | ||
24 | #include <mach_ipi.h> | 26 | #include <mach_ipi.h> |
25 | 27 | ||
26 | |||
27 | /* | 28 | /* |
28 | * Power off function, if any | 29 | * Power off function, if any |
29 | */ | 30 | */ |
@@ -39,6 +40,12 @@ int reboot_force; | |||
39 | static int reboot_cpu = -1; | 40 | static int reboot_cpu = -1; |
40 | #endif | 41 | #endif |
41 | 42 | ||
43 | /* This is set if we need to go through the 'emergency' path. | ||
44 | * When machine_emergency_restart() is called, we may be on | ||
45 | * an inconsistent state and won't be able to do a clean cleanup | ||
46 | */ | ||
47 | static int reboot_emergency; | ||
48 | |||
42 | /* This is set by the PCI code if either type 1 or type 2 PCI is detected */ | 49 | /* This is set by the PCI code if either type 1 or type 2 PCI is detected */ |
43 | bool port_cf9_safe = false; | 50 | bool port_cf9_safe = false; |
44 | 51 | ||
@@ -368,6 +375,48 @@ static inline void kb_wait(void) | |||
368 | } | 375 | } |
369 | } | 376 | } |
370 | 377 | ||
378 | static void vmxoff_nmi(int cpu, struct die_args *args) | ||
379 | { | ||
380 | cpu_emergency_vmxoff(); | ||
381 | } | ||
382 | |||
383 | /* Use NMIs as IPIs to tell all CPUs to disable virtualization | ||
384 | */ | ||
385 | static void emergency_vmx_disable_all(void) | ||
386 | { | ||
387 | /* Just make sure we won't change CPUs while doing this */ | ||
388 | local_irq_disable(); | ||
389 | |||
390 | /* We need to disable VMX on all CPUs before rebooting, otherwise | ||
391 | * we risk hanging up the machine, because the CPU ignore INIT | ||
392 | * signals when VMX is enabled. | ||
393 | * | ||
394 | * We can't take any locks and we may be on an inconsistent | ||
395 | * state, so we use NMIs as IPIs to tell the other CPUs to disable | ||
396 | * VMX and halt. | ||
397 | * | ||
398 | * For safety, we will avoid running the nmi_shootdown_cpus() | ||
399 | * stuff unnecessarily, but we don't have a way to check | ||
400 | * if other CPUs have VMX enabled. So we will call it only if the | ||
401 | * CPU we are running on has VMX enabled. | ||
402 | * | ||
403 | * We will miss cases where VMX is not enabled on all CPUs. This | ||
404 | * shouldn't do much harm because KVM always enable VMX on all | ||
405 | * CPUs anyway. But we can miss it on the small window where KVM | ||
406 | * is still enabling VMX. | ||
407 | */ | ||
408 | if (cpu_has_vmx() && cpu_vmx_enabled()) { | ||
409 | /* Disable VMX on this CPU. | ||
410 | */ | ||
411 | cpu_vmxoff(); | ||
412 | |||
413 | /* Halt and disable VMX on the other CPUs */ | ||
414 | nmi_shootdown_cpus(vmxoff_nmi); | ||
415 | |||
416 | } | ||
417 | } | ||
418 | |||
419 | |||
371 | void __attribute__((weak)) mach_reboot_fixups(void) | 420 | void __attribute__((weak)) mach_reboot_fixups(void) |
372 | { | 421 | { |
373 | } | 422 | } |
@@ -376,6 +425,9 @@ static void native_machine_emergency_restart(void) | |||
376 | { | 425 | { |
377 | int i; | 426 | int i; |
378 | 427 | ||
428 | if (reboot_emergency) | ||
429 | emergency_vmx_disable_all(); | ||
430 | |||
379 | /* Tell the BIOS if we want cold or warm reboot */ | 431 | /* Tell the BIOS if we want cold or warm reboot */ |
380 | *((unsigned short *)__va(0x472)) = reboot_mode; | 432 | *((unsigned short *)__va(0x472)) = reboot_mode; |
381 | 433 | ||
@@ -482,13 +534,19 @@ void native_machine_shutdown(void) | |||
482 | #endif | 534 | #endif |
483 | } | 535 | } |
484 | 536 | ||
537 | static void __machine_emergency_restart(int emergency) | ||
538 | { | ||
539 | reboot_emergency = emergency; | ||
540 | machine_ops.emergency_restart(); | ||
541 | } | ||
542 | |||
485 | static void native_machine_restart(char *__unused) | 543 | static void native_machine_restart(char *__unused) |
486 | { | 544 | { |
487 | printk("machine restart\n"); | 545 | printk("machine restart\n"); |
488 | 546 | ||
489 | if (!reboot_force) | 547 | if (!reboot_force) |
490 | machine_shutdown(); | 548 | machine_shutdown(); |
491 | machine_emergency_restart(); | 549 | __machine_emergency_restart(0); |
492 | } | 550 | } |
493 | 551 | ||
494 | static void native_machine_halt(void) | 552 | static void native_machine_halt(void) |
@@ -532,7 +590,7 @@ void machine_shutdown(void) | |||
532 | 590 | ||
533 | void machine_emergency_restart(void) | 591 | void machine_emergency_restart(void) |
534 | { | 592 | { |
535 | machine_ops.emergency_restart(); | 593 | __machine_emergency_restart(1); |
536 | } | 594 | } |
537 | 595 | ||
538 | void machine_restart(char *cmd) | 596 | void machine_restart(char *cmd) |
@@ -592,10 +650,7 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
592 | 650 | ||
593 | static void smp_send_nmi_allbutself(void) | 651 | static void smp_send_nmi_allbutself(void) |
594 | { | 652 | { |
595 | cpumask_t mask = cpu_online_map; | 653 | send_IPI_allbutself(NMI_VECTOR); |
596 | cpu_clear(safe_smp_processor_id(), mask); | ||
597 | if (!cpus_empty(mask)) | ||
598 | send_IPI_mask(mask, NMI_VECTOR); | ||
599 | } | 654 | } |
600 | 655 | ||
601 | static struct notifier_block crash_nmi_nb = { | 656 | static struct notifier_block crash_nmi_nb = { |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index ae0c0d3bb770..0b63b08e7530 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -152,6 +152,11 @@ void __init setup_per_cpu_areas(void) | |||
152 | old_size = PERCPU_ENOUGH_ROOM; | 152 | old_size = PERCPU_ENOUGH_ROOM; |
153 | align = max_t(unsigned long, PAGE_SIZE, align); | 153 | align = max_t(unsigned long, PAGE_SIZE, align); |
154 | size = roundup(old_size, align); | 154 | size = roundup(old_size, align); |
155 | |||
156 | printk(KERN_INFO | ||
157 | "NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", | ||
158 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); | ||
159 | |||
155 | printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", | 160 | printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", |
156 | size); | 161 | size); |
157 | 162 | ||
@@ -168,24 +173,24 @@ void __init setup_per_cpu_areas(void) | |||
168 | "cpu %d has no node %d or node-local memory\n", | 173 | "cpu %d has no node %d or node-local memory\n", |
169 | cpu, node); | 174 | cpu, node); |
170 | if (ptr) | 175 | if (ptr) |
171 | printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n", | 176 | printk(KERN_DEBUG |
177 | "per cpu data for cpu%d at %016lx\n", | ||
172 | cpu, __pa(ptr)); | 178 | cpu, __pa(ptr)); |
173 | } | 179 | } |
174 | else { | 180 | else { |
175 | ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, | 181 | ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, |
176 | __pa(MAX_DMA_ADDRESS)); | 182 | __pa(MAX_DMA_ADDRESS)); |
177 | if (ptr) | 183 | if (ptr) |
178 | printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n", | 184 | printk(KERN_DEBUG |
179 | cpu, node, __pa(ptr)); | 185 | "per cpu data for cpu%d on node%d " |
186 | "at %016lx\n", | ||
187 | cpu, node, __pa(ptr)); | ||
180 | } | 188 | } |
181 | #endif | 189 | #endif |
182 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | 190 | per_cpu_offset(cpu) = ptr - __per_cpu_start; |
183 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 191 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
184 | } | 192 | } |
185 | 193 | ||
186 | printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", | ||
187 | NR_CPUS, nr_cpu_ids, nr_node_ids); | ||
188 | |||
189 | /* Setup percpu data maps */ | 194 | /* Setup percpu data maps */ |
190 | setup_per_cpu_maps(); | 195 | setup_per_cpu_maps(); |
191 | 196 | ||
@@ -282,7 +287,7 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable) | |||
282 | else | 287 | else |
283 | cpu_clear(cpu, *mask); | 288 | cpu_clear(cpu, *mask); |
284 | 289 | ||
285 | cpulist_scnprintf(buf, sizeof(buf), *mask); | 290 | cpulist_scnprintf(buf, sizeof(buf), mask); |
286 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | 291 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", |
287 | enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); | 292 | enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); |
288 | } | 293 | } |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 7e558db362c1..beea2649a240 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu) | |||
118 | WARN_ON(1); | 118 | WARN_ON(1); |
119 | return; | 119 | return; |
120 | } | 120 | } |
121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | 121 | send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); |
122 | } | 122 | } |
123 | 123 | ||
124 | void native_send_call_func_single_ipi(int cpu) | 124 | void native_send_call_func_single_ipi(int cpu) |
125 | { | 125 | { |
126 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); | 126 | send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); |
127 | } | 127 | } |
128 | 128 | ||
129 | void native_send_call_func_ipi(cpumask_t mask) | 129 | void native_send_call_func_ipi(const struct cpumask *mask) |
130 | { | 130 | { |
131 | cpumask_t allbutself; | 131 | cpumask_t allbutself; |
132 | 132 | ||
133 | allbutself = cpu_online_map; | 133 | allbutself = cpu_online_map; |
134 | cpu_clear(smp_processor_id(), allbutself); | 134 | cpu_clear(smp_processor_id(), allbutself); |
135 | 135 | ||
136 | if (cpus_equal(mask, allbutself) && | 136 | if (cpus_equal(*mask, allbutself) && |
137 | cpus_equal(cpu_online_map, cpu_callout_map)) | 137 | cpus_equal(cpu_online_map, cpu_callout_map)) |
138 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | 138 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); |
139 | else | 139 | else |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f8500c969442..31869bf5fabd 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -102,14 +102,8 @@ EXPORT_SYMBOL(smp_num_siblings); | |||
102 | /* Last level cache ID of each logical CPU */ | 102 | /* Last level cache ID of each logical CPU */ |
103 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | 103 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; |
104 | 104 | ||
105 | /* bitmap of online cpus */ | ||
106 | cpumask_t cpu_online_map __read_mostly; | ||
107 | EXPORT_SYMBOL(cpu_online_map); | ||
108 | |||
109 | cpumask_t cpu_callin_map; | 105 | cpumask_t cpu_callin_map; |
110 | cpumask_t cpu_callout_map; | 106 | cpumask_t cpu_callout_map; |
111 | cpumask_t cpu_possible_map; | ||
112 | EXPORT_SYMBOL(cpu_possible_map); | ||
113 | 107 | ||
114 | /* representing HT siblings of each logical CPU */ | 108 | /* representing HT siblings of each logical CPU */ |
115 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | 109 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); |
@@ -1260,6 +1254,15 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
1260 | check_nmi_watchdog(); | 1254 | check_nmi_watchdog(); |
1261 | } | 1255 | } |
1262 | 1256 | ||
1257 | static int __initdata setup_possible_cpus = -1; | ||
1258 | static int __init _setup_possible_cpus(char *str) | ||
1259 | { | ||
1260 | get_option(&str, &setup_possible_cpus); | ||
1261 | return 0; | ||
1262 | } | ||
1263 | early_param("possible_cpus", _setup_possible_cpus); | ||
1264 | |||
1265 | |||
1263 | /* | 1266 | /* |
1264 | * cpu_possible_map should be static, it cannot change as cpu's | 1267 | * cpu_possible_map should be static, it cannot change as cpu's |
1265 | * are onlined, or offlined. The reason is per-cpu data-structures | 1268 | * are onlined, or offlined. The reason is per-cpu data-structures |
@@ -1272,7 +1275,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
1272 | * | 1275 | * |
1273 | * Three ways to find out the number of additional hotplug CPUs: | 1276 | * Three ways to find out the number of additional hotplug CPUs: |
1274 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. | 1277 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. |
1275 | * - The user can overwrite it with additional_cpus=NUM | 1278 | * - The user can overwrite it with possible_cpus=NUM |
1276 | * - Otherwise don't reserve additional CPUs. | 1279 | * - Otherwise don't reserve additional CPUs. |
1277 | * We do this because additional CPUs waste a lot of memory. | 1280 | * We do this because additional CPUs waste a lot of memory. |
1278 | * -AK | 1281 | * -AK |
@@ -1285,9 +1288,17 @@ __init void prefill_possible_map(void) | |||
1285 | if (!num_processors) | 1288 | if (!num_processors) |
1286 | num_processors = 1; | 1289 | num_processors = 1; |
1287 | 1290 | ||
1288 | possible = num_processors + disabled_cpus; | 1291 | if (setup_possible_cpus == -1) |
1289 | if (possible > NR_CPUS) | 1292 | possible = num_processors + disabled_cpus; |
1290 | possible = NR_CPUS; | 1293 | else |
1294 | possible = setup_possible_cpus; | ||
1295 | |||
1296 | if (possible > CONFIG_NR_CPUS) { | ||
1297 | printk(KERN_WARNING | ||
1298 | "%d Processors exceeds NR_CPUS limit of %d\n", | ||
1299 | possible, CONFIG_NR_CPUS); | ||
1300 | possible = CONFIG_NR_CPUS; | ||
1301 | } | ||
1291 | 1302 | ||
1292 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", | 1303 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", |
1293 | possible, max_t(int, possible - num_processors, 0)); | 1304 | possible, max_t(int, possible - num_processors, 0)); |
@@ -1352,7 +1363,7 @@ void cpu_disable_common(void) | |||
1352 | lock_vector_lock(); | 1363 | lock_vector_lock(); |
1353 | remove_cpu_from_maps(cpu); | 1364 | remove_cpu_from_maps(cpu); |
1354 | unlock_vector_lock(); | 1365 | unlock_vector_lock(); |
1355 | fixup_irqs(cpu_online_map); | 1366 | fixup_irqs(); |
1356 | } | 1367 | } |
1357 | 1368 | ||
1358 | int native_cpu_disable(void) | 1369 | int native_cpu_disable(void) |
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index 8da059f949be..ce5054642247 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -163,7 +163,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |||
163 | * We have to send the IPI only to | 163 | * We have to send the IPI only to |
164 | * CPUs affected. | 164 | * CPUs affected. |
165 | */ | 165 | */ |
166 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR); | 166 | send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR); |
167 | 167 | ||
168 | while (!cpus_empty(flush_cpumask)) | 168 | while (!cpus_empty(flush_cpumask)) |
169 | /* nothing. lockup detection does not belong here */ | 169 | /* nothing. lockup detection does not belong here */ |
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 29887d7081a9..f8be6f1d2e48 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
@@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, | |||
191 | * We have to send the IPI only to | 191 | * We have to send the IPI only to |
192 | * CPUs affected. | 192 | * CPUs affected. |
193 | */ | 193 | */ |
194 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); | 194 | send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender); |
195 | 195 | ||
196 | while (!cpus_empty(f->flush_cpumask)) | 196 | while (!cpus_empty(f->flush_cpumask)) |
197 | cpu_relax(); | 197 | cpu_relax(); |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 141907ab6e22..ce6650eb64e9 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -72,9 +72,6 @@ | |||
72 | 72 | ||
73 | #include "cpu/mcheck/mce.h" | 73 | #include "cpu/mcheck/mce.h" |
74 | 74 | ||
75 | DECLARE_BITMAP(used_vectors, NR_VECTORS); | ||
76 | EXPORT_SYMBOL_GPL(used_vectors); | ||
77 | |||
78 | asmlinkage int system_call(void); | 75 | asmlinkage int system_call(void); |
79 | 76 | ||
80 | /* Do we ignore FPU interrupts ? */ | 77 | /* Do we ignore FPU interrupts ? */ |
@@ -89,6 +86,9 @@ gate_desc idt_table[256] | |||
89 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; | 86 | __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; |
90 | #endif | 87 | #endif |
91 | 88 | ||
89 | DECLARE_BITMAP(used_vectors, NR_VECTORS); | ||
90 | EXPORT_SYMBOL_GPL(used_vectors); | ||
91 | |||
92 | static int ignore_nmis; | 92 | static int ignore_nmis; |
93 | 93 | ||
94 | static inline void conditional_sti(struct pt_regs *regs) | 94 | static inline void conditional_sti(struct pt_regs *regs) |
@@ -292,8 +292,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) | |||
292 | tsk->thread.error_code = error_code; | 292 | tsk->thread.error_code = error_code; |
293 | tsk->thread.trap_no = 8; | 293 | tsk->thread.trap_no = 8; |
294 | 294 | ||
295 | /* This is always a kernel trap and never fixable (and thus must | 295 | /* |
296 | never return). */ | 296 | * This is always a kernel trap and never fixable (and thus must |
297 | * never return). | ||
298 | */ | ||
297 | for (;;) | 299 | for (;;) |
298 | die(str, regs, error_code); | 300 | die(str, regs, error_code); |
299 | } | 301 | } |
@@ -520,9 +522,11 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) | |||
520 | } | 522 | } |
521 | 523 | ||
522 | #ifdef CONFIG_X86_64 | 524 | #ifdef CONFIG_X86_64 |
523 | /* Help handler running on IST stack to switch back to user stack | 525 | /* |
524 | for scheduling or signal handling. The actual stack switch is done in | 526 | * Help handler running on IST stack to switch back to user stack |
525 | entry.S */ | 527 | * for scheduling or signal handling. The actual stack switch is done in |
528 | * entry.S | ||
529 | */ | ||
526 | asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | 530 | asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) |
527 | { | 531 | { |
528 | struct pt_regs *regs = eregs; | 532 | struct pt_regs *regs = eregs; |
@@ -532,8 +536,10 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) | |||
532 | /* Exception from user space */ | 536 | /* Exception from user space */ |
533 | else if (user_mode(eregs)) | 537 | else if (user_mode(eregs)) |
534 | regs = task_pt_regs(current); | 538 | regs = task_pt_regs(current); |
535 | /* Exception from kernel and interrupts are enabled. Move to | 539 | /* |
536 | kernel process stack. */ | 540 | * Exception from kernel and interrupts are enabled. Move to |
541 | * kernel process stack. | ||
542 | */ | ||
537 | else if (eregs->flags & X86_EFLAGS_IF) | 543 | else if (eregs->flags & X86_EFLAGS_IF) |
538 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); | 544 | regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs)); |
539 | if (eregs != regs) | 545 | if (eregs != regs) |
@@ -685,12 +691,7 @@ void math_error(void __user *ip) | |||
685 | cwd = get_fpu_cwd(task); | 691 | cwd = get_fpu_cwd(task); |
686 | swd = get_fpu_swd(task); | 692 | swd = get_fpu_swd(task); |
687 | 693 | ||
688 | err = swd & ~cwd & 0x3f; | 694 | err = swd & ~cwd; |
689 | |||
690 | #ifdef CONFIG_X86_32 | ||
691 | if (!err) | ||
692 | return; | ||
693 | #endif | ||
694 | 695 | ||
695 | if (err & 0x001) { /* Invalid op */ | 696 | if (err & 0x001) { /* Invalid op */ |
696 | /* | 697 | /* |
@@ -708,7 +709,11 @@ void math_error(void __user *ip) | |||
708 | } else if (err & 0x020) { /* Precision */ | 709 | } else if (err & 0x020) { /* Precision */ |
709 | info.si_code = FPE_FLTRES; | 710 | info.si_code = FPE_FLTRES; |
710 | } else { | 711 | } else { |
711 | info.si_code = __SI_FAULT|SI_KERNEL; /* WTF? */ | 712 | /* |
713 | * If we're using IRQ 13, or supposedly even some trap 16 | ||
714 | * implementations, it's possible we get a spurious trap... | ||
715 | */ | ||
716 | return; /* Spurious trap, no error */ | ||
712 | } | 717 | } |
713 | force_sig_info(SIGFPE, &info, task); | 718 | force_sig_info(SIGFPE, &info, task); |
714 | } | 719 | } |
@@ -941,9 +946,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) | |||
941 | 946 | ||
942 | void __init trap_init(void) | 947 | void __init trap_init(void) |
943 | { | 948 | { |
944 | #ifdef CONFIG_X86_32 | ||
945 | int i; | 949 | int i; |
946 | #endif | ||
947 | 950 | ||
948 | #ifdef CONFIG_EISA | 951 | #ifdef CONFIG_EISA |
949 | void __iomem *p = early_ioremap(0x0FFFD9, 4); | 952 | void __iomem *p = early_ioremap(0x0FFFD9, 4); |
@@ -1000,11 +1003,15 @@ void __init trap_init(void) | |||
1000 | } | 1003 | } |
1001 | 1004 | ||
1002 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); | 1005 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
1006 | #endif | ||
1003 | 1007 | ||
1004 | /* Reserve all the builtin and the syscall vector: */ | 1008 | /* Reserve all the builtin and the syscall vector: */ |
1005 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | 1009 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) |
1006 | set_bit(i, used_vectors); | 1010 | set_bit(i, used_vectors); |
1007 | 1011 | ||
1012 | #ifdef CONFIG_X86_64 | ||
1013 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); | ||
1014 | #else | ||
1008 | set_bit(SYSCALL_VECTOR, used_vectors); | 1015 | set_bit(SYSCALL_VECTOR, used_vectors); |
1009 | #endif | 1016 | #endif |
1010 | /* | 1017 | /* |
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index 254ee07f8635..c4c1f9e09402 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c | |||
@@ -226,7 +226,7 @@ static void __devinit vmi_time_init_clockevent(void) | |||
226 | /* Upper bound is clockevent's use of ulong for cycle deltas. */ | 226 | /* Upper bound is clockevent's use of ulong for cycle deltas. */ |
227 | evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); | 227 | evt->max_delta_ns = clockevent_delta2ns(ULONG_MAX, evt); |
228 | evt->min_delta_ns = clockevent_delta2ns(1, evt); | 228 | evt->min_delta_ns = clockevent_delta2ns(1, evt); |
229 | evt->cpumask = cpumask_of_cpu(cpu); | 229 | evt->cpumask = cpumask_of(cpu); |
230 | 230 | ||
231 | printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", | 231 | printk(KERN_WARNING "vmi: registering clock event %s. mult=%lu shift=%u\n", |
232 | evt->name, evt->mult, evt->shift); | 232 | evt->name, evt->mult, evt->shift); |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 15c3e6999182..2b54fe002e94 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -159,7 +159,7 @@ int save_i387_xstate(void __user *buf) | |||
159 | * Restore the extended state if present. Otherwise, restore the FP/SSE | 159 | * Restore the extended state if present. Otherwise, restore the FP/SSE |
160 | * state. | 160 | * state. |
161 | */ | 161 | */ |
162 | int restore_user_xstate(void __user *buf) | 162 | static int restore_user_xstate(void __user *buf) |
163 | { | 163 | { |
164 | struct _fpx_sw_bytes fx_sw_user; | 164 | struct _fpx_sw_bytes fx_sw_user; |
165 | u64 mask; | 165 | u64 mask; |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 59ebd37ad79e..e665d1c623ca 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -603,10 +603,29 @@ void kvm_free_pit(struct kvm *kvm) | |||
603 | 603 | ||
604 | static void __inject_pit_timer_intr(struct kvm *kvm) | 604 | static void __inject_pit_timer_intr(struct kvm *kvm) |
605 | { | 605 | { |
606 | struct kvm_vcpu *vcpu; | ||
607 | int i; | ||
608 | |||
606 | mutex_lock(&kvm->lock); | 609 | mutex_lock(&kvm->lock); |
607 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); | 610 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); |
608 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); | 611 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); |
609 | mutex_unlock(&kvm->lock); | 612 | mutex_unlock(&kvm->lock); |
613 | |||
614 | /* | ||
615 | * Provides NMI watchdog support via Virtual Wire mode. | ||
616 | * The route is: PIT -> PIC -> LVT0 in NMI mode. | ||
617 | * | ||
618 | * Note: Our Virtual Wire implementation is simplified, only | ||
619 | * propagating PIT interrupts to all VCPUs when they have set | ||
620 | * LVT0 to NMI delivery. Other PIC interrupts are just sent to | ||
621 | * VCPU0, and only if its LVT0 is in EXTINT mode. | ||
622 | */ | ||
623 | if (kvm->arch.vapics_in_nmi_mode > 0) | ||
624 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
625 | vcpu = kvm->vcpus[i]; | ||
626 | if (vcpu) | ||
627 | kvm_apic_nmi_wd_deliver(vcpu); | ||
628 | } | ||
610 | } | 629 | } |
611 | 630 | ||
612 | void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) | 631 | void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 17e41e165f1a..179dcb0103fd 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -26,10 +26,40 @@ | |||
26 | * Port from Qemu. | 26 | * Port from Qemu. |
27 | */ | 27 | */ |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/bitops.h> | ||
29 | #include "irq.h" | 30 | #include "irq.h" |
30 | 31 | ||
31 | #include <linux/kvm_host.h> | 32 | #include <linux/kvm_host.h> |
32 | 33 | ||
34 | static void pic_lock(struct kvm_pic *s) | ||
35 | { | ||
36 | spin_lock(&s->lock); | ||
37 | } | ||
38 | |||
39 | static void pic_unlock(struct kvm_pic *s) | ||
40 | { | ||
41 | struct kvm *kvm = s->kvm; | ||
42 | unsigned acks = s->pending_acks; | ||
43 | bool wakeup = s->wakeup_needed; | ||
44 | struct kvm_vcpu *vcpu; | ||
45 | |||
46 | s->pending_acks = 0; | ||
47 | s->wakeup_needed = false; | ||
48 | |||
49 | spin_unlock(&s->lock); | ||
50 | |||
51 | while (acks) { | ||
52 | kvm_notify_acked_irq(kvm, __ffs(acks)); | ||
53 | acks &= acks - 1; | ||
54 | } | ||
55 | |||
56 | if (wakeup) { | ||
57 | vcpu = s->kvm->vcpus[0]; | ||
58 | if (vcpu) | ||
59 | kvm_vcpu_kick(vcpu); | ||
60 | } | ||
61 | } | ||
62 | |||
33 | static void pic_clear_isr(struct kvm_kpic_state *s, int irq) | 63 | static void pic_clear_isr(struct kvm_kpic_state *s, int irq) |
34 | { | 64 | { |
35 | s->isr &= ~(1 << irq); | 65 | s->isr &= ~(1 << irq); |
@@ -136,17 +166,21 @@ static void pic_update_irq(struct kvm_pic *s) | |||
136 | 166 | ||
137 | void kvm_pic_update_irq(struct kvm_pic *s) | 167 | void kvm_pic_update_irq(struct kvm_pic *s) |
138 | { | 168 | { |
169 | pic_lock(s); | ||
139 | pic_update_irq(s); | 170 | pic_update_irq(s); |
171 | pic_unlock(s); | ||
140 | } | 172 | } |
141 | 173 | ||
142 | void kvm_pic_set_irq(void *opaque, int irq, int level) | 174 | void kvm_pic_set_irq(void *opaque, int irq, int level) |
143 | { | 175 | { |
144 | struct kvm_pic *s = opaque; | 176 | struct kvm_pic *s = opaque; |
145 | 177 | ||
178 | pic_lock(s); | ||
146 | if (irq >= 0 && irq < PIC_NUM_PINS) { | 179 | if (irq >= 0 && irq < PIC_NUM_PINS) { |
147 | pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); | 180 | pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); |
148 | pic_update_irq(s); | 181 | pic_update_irq(s); |
149 | } | 182 | } |
183 | pic_unlock(s); | ||
150 | } | 184 | } |
151 | 185 | ||
152 | /* | 186 | /* |
@@ -172,6 +206,7 @@ int kvm_pic_read_irq(struct kvm *kvm) | |||
172 | int irq, irq2, intno; | 206 | int irq, irq2, intno; |
173 | struct kvm_pic *s = pic_irqchip(kvm); | 207 | struct kvm_pic *s = pic_irqchip(kvm); |
174 | 208 | ||
209 | pic_lock(s); | ||
175 | irq = pic_get_irq(&s->pics[0]); | 210 | irq = pic_get_irq(&s->pics[0]); |
176 | if (irq >= 0) { | 211 | if (irq >= 0) { |
177 | pic_intack(&s->pics[0], irq); | 212 | pic_intack(&s->pics[0], irq); |
@@ -196,6 +231,7 @@ int kvm_pic_read_irq(struct kvm *kvm) | |||
196 | intno = s->pics[0].irq_base + irq; | 231 | intno = s->pics[0].irq_base + irq; |
197 | } | 232 | } |
198 | pic_update_irq(s); | 233 | pic_update_irq(s); |
234 | pic_unlock(s); | ||
199 | kvm_notify_acked_irq(kvm, irq); | 235 | kvm_notify_acked_irq(kvm, irq); |
200 | 236 | ||
201 | return intno; | 237 | return intno; |
@@ -203,7 +239,7 @@ int kvm_pic_read_irq(struct kvm *kvm) | |||
203 | 239 | ||
204 | void kvm_pic_reset(struct kvm_kpic_state *s) | 240 | void kvm_pic_reset(struct kvm_kpic_state *s) |
205 | { | 241 | { |
206 | int irq, irqbase; | 242 | int irq, irqbase, n; |
207 | struct kvm *kvm = s->pics_state->irq_request_opaque; | 243 | struct kvm *kvm = s->pics_state->irq_request_opaque; |
208 | struct kvm_vcpu *vcpu0 = kvm->vcpus[0]; | 244 | struct kvm_vcpu *vcpu0 = kvm->vcpus[0]; |
209 | 245 | ||
@@ -214,8 +250,10 @@ void kvm_pic_reset(struct kvm_kpic_state *s) | |||
214 | 250 | ||
215 | for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { | 251 | for (irq = 0; irq < PIC_NUM_PINS/2; irq++) { |
216 | if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) | 252 | if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) |
217 | if (s->irr & (1 << irq) || s->isr & (1 << irq)) | 253 | if (s->irr & (1 << irq) || s->isr & (1 << irq)) { |
218 | kvm_notify_acked_irq(kvm, irq+irqbase); | 254 | n = irq + irqbase; |
255 | s->pics_state->pending_acks |= 1 << n; | ||
256 | } | ||
219 | } | 257 | } |
220 | s->last_irr = 0; | 258 | s->last_irr = 0; |
221 | s->irr = 0; | 259 | s->irr = 0; |
@@ -406,6 +444,7 @@ static void picdev_write(struct kvm_io_device *this, | |||
406 | printk(KERN_ERR "PIC: non byte write\n"); | 444 | printk(KERN_ERR "PIC: non byte write\n"); |
407 | return; | 445 | return; |
408 | } | 446 | } |
447 | pic_lock(s); | ||
409 | switch (addr) { | 448 | switch (addr) { |
410 | case 0x20: | 449 | case 0x20: |
411 | case 0x21: | 450 | case 0x21: |
@@ -418,6 +457,7 @@ static void picdev_write(struct kvm_io_device *this, | |||
418 | elcr_ioport_write(&s->pics[addr & 1], addr, data); | 457 | elcr_ioport_write(&s->pics[addr & 1], addr, data); |
419 | break; | 458 | break; |
420 | } | 459 | } |
460 | pic_unlock(s); | ||
421 | } | 461 | } |
422 | 462 | ||
423 | static void picdev_read(struct kvm_io_device *this, | 463 | static void picdev_read(struct kvm_io_device *this, |
@@ -431,6 +471,7 @@ static void picdev_read(struct kvm_io_device *this, | |||
431 | printk(KERN_ERR "PIC: non byte read\n"); | 471 | printk(KERN_ERR "PIC: non byte read\n"); |
432 | return; | 472 | return; |
433 | } | 473 | } |
474 | pic_lock(s); | ||
434 | switch (addr) { | 475 | switch (addr) { |
435 | case 0x20: | 476 | case 0x20: |
436 | case 0x21: | 477 | case 0x21: |
@@ -444,6 +485,7 @@ static void picdev_read(struct kvm_io_device *this, | |||
444 | break; | 485 | break; |
445 | } | 486 | } |
446 | *(unsigned char *)val = data; | 487 | *(unsigned char *)val = data; |
488 | pic_unlock(s); | ||
447 | } | 489 | } |
448 | 490 | ||
449 | /* | 491 | /* |
@@ -459,7 +501,7 @@ static void pic_irq_request(void *opaque, int level) | |||
459 | s->output = level; | 501 | s->output = level; |
460 | if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { | 502 | if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { |
461 | s->pics[0].isr_ack &= ~(1 << irq); | 503 | s->pics[0].isr_ack &= ~(1 << irq); |
462 | kvm_vcpu_kick(vcpu); | 504 | s->wakeup_needed = true; |
463 | } | 505 | } |
464 | } | 506 | } |
465 | 507 | ||
@@ -469,6 +511,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) | |||
469 | s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); | 511 | s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); |
470 | if (!s) | 512 | if (!s) |
471 | return NULL; | 513 | return NULL; |
514 | spin_lock_init(&s->lock); | ||
515 | s->kvm = kvm; | ||
472 | s->pics[0].elcr_mask = 0xf8; | 516 | s->pics[0].elcr_mask = 0xf8; |
473 | s->pics[1].elcr_mask = 0xde; | 517 | s->pics[1].elcr_mask = 0xde; |
474 | s->irq_request = pic_irq_request; | 518 | s->irq_request = pic_irq_request; |
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index f17c8f5bbf31..2bf32a03ceec 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/mm_types.h> | 25 | #include <linux/mm_types.h> |
26 | #include <linux/hrtimer.h> | 26 | #include <linux/hrtimer.h> |
27 | #include <linux/kvm_host.h> | 27 | #include <linux/kvm_host.h> |
28 | #include <linux/spinlock.h> | ||
28 | 29 | ||
29 | #include "iodev.h" | 30 | #include "iodev.h" |
30 | #include "ioapic.h" | 31 | #include "ioapic.h" |
@@ -59,6 +60,10 @@ struct kvm_kpic_state { | |||
59 | }; | 60 | }; |
60 | 61 | ||
61 | struct kvm_pic { | 62 | struct kvm_pic { |
63 | spinlock_t lock; | ||
64 | bool wakeup_needed; | ||
65 | unsigned pending_acks; | ||
66 | struct kvm *kvm; | ||
62 | struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */ | 67 | struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */ |
63 | irq_request_func *irq_request; | 68 | irq_request_func *irq_request; |
64 | void *irq_request_opaque; | 69 | void *irq_request_opaque; |
@@ -87,6 +92,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s); | |||
87 | void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec); | 92 | void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec); |
88 | void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); | 93 | void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu); |
89 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); | 94 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu); |
95 | void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu); | ||
90 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); | 96 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu); |
91 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu); | 97 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu); |
92 | void __kvm_migrate_timers(struct kvm_vcpu *vcpu); | 98 | void __kvm_migrate_timers(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h index 65ef0fc2c036..8e5ee99551f6 100644 --- a/arch/x86/kvm/kvm_svm.h +++ b/arch/x86/kvm/kvm_svm.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/kvm_host.h> | 7 | #include <linux/kvm_host.h> |
8 | #include <asm/msr.h> | 8 | #include <asm/msr.h> |
9 | 9 | ||
10 | #include "svm.h" | 10 | #include <asm/svm.h> |
11 | 11 | ||
12 | static const u32 host_save_user_msrs[] = { | 12 | static const u32 host_save_user_msrs[] = { |
13 | #ifdef CONFIG_X86_64 | 13 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 0fc3cab48943..afac68c0815c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -130,6 +130,11 @@ static inline int apic_lvtt_period(struct kvm_lapic *apic) | |||
130 | return apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC; | 130 | return apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC; |
131 | } | 131 | } |
132 | 132 | ||
133 | static inline int apic_lvt_nmi_mode(u32 lvt_val) | ||
134 | { | ||
135 | return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI; | ||
136 | } | ||
137 | |||
133 | static unsigned int apic_lvt_mask[APIC_LVT_NUM] = { | 138 | static unsigned int apic_lvt_mask[APIC_LVT_NUM] = { |
134 | LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */ | 139 | LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */ |
135 | LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ | 140 | LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ |
@@ -354,6 +359,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
354 | 359 | ||
355 | case APIC_DM_NMI: | 360 | case APIC_DM_NMI: |
356 | kvm_inject_nmi(vcpu); | 361 | kvm_inject_nmi(vcpu); |
362 | kvm_vcpu_kick(vcpu); | ||
357 | break; | 363 | break; |
358 | 364 | ||
359 | case APIC_DM_INIT: | 365 | case APIC_DM_INIT: |
@@ -380,6 +386,14 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
380 | } | 386 | } |
381 | break; | 387 | break; |
382 | 388 | ||
389 | case APIC_DM_EXTINT: | ||
390 | /* | ||
391 | * Should only be called by kvm_apic_local_deliver() with LVT0, | ||
392 | * before NMI watchdog was enabled. Already handled by | ||
393 | * kvm_apic_accept_pic_intr(). | ||
394 | */ | ||
395 | break; | ||
396 | |||
383 | default: | 397 | default: |
384 | printk(KERN_ERR "TODO: unsupported delivery mode %x\n", | 398 | printk(KERN_ERR "TODO: unsupported delivery mode %x\n", |
385 | delivery_mode); | 399 | delivery_mode); |
@@ -663,6 +677,20 @@ static void start_apic_timer(struct kvm_lapic *apic) | |||
663 | apic->timer.period))); | 677 | apic->timer.period))); |
664 | } | 678 | } |
665 | 679 | ||
680 | static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) | ||
681 | { | ||
682 | int nmi_wd_enabled = apic_lvt_nmi_mode(apic_get_reg(apic, APIC_LVT0)); | ||
683 | |||
684 | if (apic_lvt_nmi_mode(lvt0_val)) { | ||
685 | if (!nmi_wd_enabled) { | ||
686 | apic_debug("Receive NMI setting on APIC_LVT0 " | ||
687 | "for cpu %d\n", apic->vcpu->vcpu_id); | ||
688 | apic->vcpu->kvm->arch.vapics_in_nmi_mode++; | ||
689 | } | ||
690 | } else if (nmi_wd_enabled) | ||
691 | apic->vcpu->kvm->arch.vapics_in_nmi_mode--; | ||
692 | } | ||
693 | |||
666 | static void apic_mmio_write(struct kvm_io_device *this, | 694 | static void apic_mmio_write(struct kvm_io_device *this, |
667 | gpa_t address, int len, const void *data) | 695 | gpa_t address, int len, const void *data) |
668 | { | 696 | { |
@@ -743,10 +771,11 @@ static void apic_mmio_write(struct kvm_io_device *this, | |||
743 | apic_set_reg(apic, APIC_ICR2, val & 0xff000000); | 771 | apic_set_reg(apic, APIC_ICR2, val & 0xff000000); |
744 | break; | 772 | break; |
745 | 773 | ||
774 | case APIC_LVT0: | ||
775 | apic_manage_nmi_watchdog(apic, val); | ||
746 | case APIC_LVTT: | 776 | case APIC_LVTT: |
747 | case APIC_LVTTHMR: | 777 | case APIC_LVTTHMR: |
748 | case APIC_LVTPC: | 778 | case APIC_LVTPC: |
749 | case APIC_LVT0: | ||
750 | case APIC_LVT1: | 779 | case APIC_LVT1: |
751 | case APIC_LVTERR: | 780 | case APIC_LVTERR: |
752 | /* TODO: Check vector */ | 781 | /* TODO: Check vector */ |
@@ -961,12 +990,26 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu) | |||
961 | return 0; | 990 | return 0; |
962 | } | 991 | } |
963 | 992 | ||
964 | static int __inject_apic_timer_irq(struct kvm_lapic *apic) | 993 | static int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) |
994 | { | ||
995 | u32 reg = apic_get_reg(apic, lvt_type); | ||
996 | int vector, mode, trig_mode; | ||
997 | |||
998 | if (apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { | ||
999 | vector = reg & APIC_VECTOR_MASK; | ||
1000 | mode = reg & APIC_MODE_MASK; | ||
1001 | trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; | ||
1002 | return __apic_accept_irq(apic, mode, vector, 1, trig_mode); | ||
1003 | } | ||
1004 | return 0; | ||
1005 | } | ||
1006 | |||
1007 | void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) | ||
965 | { | 1008 | { |
966 | int vector; | 1009 | struct kvm_lapic *apic = vcpu->arch.apic; |
967 | 1010 | ||
968 | vector = apic_lvt_vector(apic, APIC_LVTT); | 1011 | if (apic) |
969 | return __apic_accept_irq(apic, APIC_DM_FIXED, vector, 1, 0); | 1012 | kvm_apic_local_deliver(apic, APIC_LVT0); |
970 | } | 1013 | } |
971 | 1014 | ||
972 | static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) | 1015 | static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) |
@@ -1061,9 +1104,8 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) | |||
1061 | { | 1104 | { |
1062 | struct kvm_lapic *apic = vcpu->arch.apic; | 1105 | struct kvm_lapic *apic = vcpu->arch.apic; |
1063 | 1106 | ||
1064 | if (apic && apic_lvt_enabled(apic, APIC_LVTT) && | 1107 | if (apic && atomic_read(&apic->timer.pending) > 0) { |
1065 | atomic_read(&apic->timer.pending) > 0) { | 1108 | if (kvm_apic_local_deliver(apic, APIC_LVTT)) |
1066 | if (__inject_apic_timer_irq(apic)) | ||
1067 | atomic_dec(&apic->timer.pending); | 1109 | atomic_dec(&apic->timer.pending); |
1068 | } | 1110 | } |
1069 | } | 1111 | } |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 410ddbc1aa2e..83f11c7474a1 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -17,7 +17,6 @@ | |||
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include "vmx.h" | ||
21 | #include "mmu.h" | 20 | #include "mmu.h" |
22 | 21 | ||
23 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
@@ -33,6 +32,7 @@ | |||
33 | #include <asm/page.h> | 32 | #include <asm/page.h> |
34 | #include <asm/cmpxchg.h> | 33 | #include <asm/cmpxchg.h> |
35 | #include <asm/io.h> | 34 | #include <asm/io.h> |
35 | #include <asm/vmx.h> | ||
36 | 36 | ||
37 | /* | 37 | /* |
38 | * When setting this variable to true it enables Two-Dimensional-Paging | 38 | * When setting this variable to true it enables Two-Dimensional-Paging |
@@ -168,6 +168,7 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ | |||
168 | static u64 __read_mostly shadow_user_mask; | 168 | static u64 __read_mostly shadow_user_mask; |
169 | static u64 __read_mostly shadow_accessed_mask; | 169 | static u64 __read_mostly shadow_accessed_mask; |
170 | static u64 __read_mostly shadow_dirty_mask; | 170 | static u64 __read_mostly shadow_dirty_mask; |
171 | static u64 __read_mostly shadow_mt_mask; | ||
171 | 172 | ||
172 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) | 173 | void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) |
173 | { | 174 | { |
@@ -183,13 +184,14 @@ void kvm_mmu_set_base_ptes(u64 base_pte) | |||
183 | EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); | 184 | EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); |
184 | 185 | ||
185 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | 186 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
186 | u64 dirty_mask, u64 nx_mask, u64 x_mask) | 187 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask) |
187 | { | 188 | { |
188 | shadow_user_mask = user_mask; | 189 | shadow_user_mask = user_mask; |
189 | shadow_accessed_mask = accessed_mask; | 190 | shadow_accessed_mask = accessed_mask; |
190 | shadow_dirty_mask = dirty_mask; | 191 | shadow_dirty_mask = dirty_mask; |
191 | shadow_nx_mask = nx_mask; | 192 | shadow_nx_mask = nx_mask; |
192 | shadow_x_mask = x_mask; | 193 | shadow_x_mask = x_mask; |
194 | shadow_mt_mask = mt_mask; | ||
193 | } | 195 | } |
194 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); | 196 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
195 | 197 | ||
@@ -384,7 +386,9 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn) | |||
384 | { | 386 | { |
385 | int *write_count; | 387 | int *write_count; |
386 | 388 | ||
387 | write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); | 389 | gfn = unalias_gfn(kvm, gfn); |
390 | write_count = slot_largepage_idx(gfn, | ||
391 | gfn_to_memslot_unaliased(kvm, gfn)); | ||
388 | *write_count += 1; | 392 | *write_count += 1; |
389 | } | 393 | } |
390 | 394 | ||
@@ -392,16 +396,20 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) | |||
392 | { | 396 | { |
393 | int *write_count; | 397 | int *write_count; |
394 | 398 | ||
395 | write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); | 399 | gfn = unalias_gfn(kvm, gfn); |
400 | write_count = slot_largepage_idx(gfn, | ||
401 | gfn_to_memslot_unaliased(kvm, gfn)); | ||
396 | *write_count -= 1; | 402 | *write_count -= 1; |
397 | WARN_ON(*write_count < 0); | 403 | WARN_ON(*write_count < 0); |
398 | } | 404 | } |
399 | 405 | ||
400 | static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn) | 406 | static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn) |
401 | { | 407 | { |
402 | struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); | 408 | struct kvm_memory_slot *slot; |
403 | int *largepage_idx; | 409 | int *largepage_idx; |
404 | 410 | ||
411 | gfn = unalias_gfn(kvm, gfn); | ||
412 | slot = gfn_to_memslot_unaliased(kvm, gfn); | ||
405 | if (slot) { | 413 | if (slot) { |
406 | largepage_idx = slot_largepage_idx(gfn, slot); | 414 | largepage_idx = slot_largepage_idx(gfn, slot); |
407 | return *largepage_idx; | 415 | return *largepage_idx; |
@@ -613,7 +621,7 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) | |||
613 | return NULL; | 621 | return NULL; |
614 | } | 622 | } |
615 | 623 | ||
616 | static void rmap_write_protect(struct kvm *kvm, u64 gfn) | 624 | static int rmap_write_protect(struct kvm *kvm, u64 gfn) |
617 | { | 625 | { |
618 | unsigned long *rmapp; | 626 | unsigned long *rmapp; |
619 | u64 *spte; | 627 | u64 *spte; |
@@ -659,8 +667,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) | |||
659 | spte = rmap_next(kvm, rmapp, spte); | 667 | spte = rmap_next(kvm, rmapp, spte); |
660 | } | 668 | } |
661 | 669 | ||
662 | if (write_protected) | 670 | return write_protected; |
663 | kvm_flush_remote_tlbs(kvm); | ||
664 | } | 671 | } |
665 | 672 | ||
666 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) | 673 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) |
@@ -786,9 +793,11 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | |||
786 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); | 793 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
787 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); | 794 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
788 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); | 795 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); |
796 | INIT_LIST_HEAD(&sp->oos_link); | ||
789 | ASSERT(is_empty_shadow_page(sp->spt)); | 797 | ASSERT(is_empty_shadow_page(sp->spt)); |
790 | sp->slot_bitmap = 0; | 798 | bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); |
791 | sp->multimapped = 0; | 799 | sp->multimapped = 0; |
800 | sp->global = 1; | ||
792 | sp->parent_pte = parent_pte; | 801 | sp->parent_pte = parent_pte; |
793 | --vcpu->kvm->arch.n_free_mmu_pages; | 802 | --vcpu->kvm->arch.n_free_mmu_pages; |
794 | return sp; | 803 | return sp; |
@@ -900,8 +909,9 @@ static void kvm_mmu_update_unsync_bitmap(u64 *spte) | |||
900 | struct kvm_mmu_page *sp = page_header(__pa(spte)); | 909 | struct kvm_mmu_page *sp = page_header(__pa(spte)); |
901 | 910 | ||
902 | index = spte - sp->spt; | 911 | index = spte - sp->spt; |
903 | __set_bit(index, sp->unsync_child_bitmap); | 912 | if (!__test_and_set_bit(index, sp->unsync_child_bitmap)) |
904 | sp->unsync_children = 1; | 913 | sp->unsync_children++; |
914 | WARN_ON(!sp->unsync_children); | ||
905 | } | 915 | } |
906 | 916 | ||
907 | static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) | 917 | static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) |
@@ -928,7 +938,6 @@ static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) | |||
928 | 938 | ||
929 | static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 939 | static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
930 | { | 940 | { |
931 | sp->unsync_children = 1; | ||
932 | kvm_mmu_update_parents_unsync(sp); | 941 | kvm_mmu_update_parents_unsync(sp); |
933 | return 1; | 942 | return 1; |
934 | } | 943 | } |
@@ -959,38 +968,66 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) | |||
959 | { | 968 | { |
960 | } | 969 | } |
961 | 970 | ||
971 | #define KVM_PAGE_ARRAY_NR 16 | ||
972 | |||
973 | struct kvm_mmu_pages { | ||
974 | struct mmu_page_and_offset { | ||
975 | struct kvm_mmu_page *sp; | ||
976 | unsigned int idx; | ||
977 | } page[KVM_PAGE_ARRAY_NR]; | ||
978 | unsigned int nr; | ||
979 | }; | ||
980 | |||
962 | #define for_each_unsync_children(bitmap, idx) \ | 981 | #define for_each_unsync_children(bitmap, idx) \ |
963 | for (idx = find_first_bit(bitmap, 512); \ | 982 | for (idx = find_first_bit(bitmap, 512); \ |
964 | idx < 512; \ | 983 | idx < 512; \ |
965 | idx = find_next_bit(bitmap, 512, idx+1)) | 984 | idx = find_next_bit(bitmap, 512, idx+1)) |
966 | 985 | ||
967 | static int mmu_unsync_walk(struct kvm_mmu_page *sp, | 986 | int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, |
968 | struct kvm_unsync_walk *walker) | 987 | int idx) |
969 | { | 988 | { |
970 | int i, ret; | 989 | int i; |
971 | 990 | ||
972 | if (!sp->unsync_children) | 991 | if (sp->unsync) |
973 | return 0; | 992 | for (i=0; i < pvec->nr; i++) |
993 | if (pvec->page[i].sp == sp) | ||
994 | return 0; | ||
995 | |||
996 | pvec->page[pvec->nr].sp = sp; | ||
997 | pvec->page[pvec->nr].idx = idx; | ||
998 | pvec->nr++; | ||
999 | return (pvec->nr == KVM_PAGE_ARRAY_NR); | ||
1000 | } | ||
1001 | |||
1002 | static int __mmu_unsync_walk(struct kvm_mmu_page *sp, | ||
1003 | struct kvm_mmu_pages *pvec) | ||
1004 | { | ||
1005 | int i, ret, nr_unsync_leaf = 0; | ||
974 | 1006 | ||
975 | for_each_unsync_children(sp->unsync_child_bitmap, i) { | 1007 | for_each_unsync_children(sp->unsync_child_bitmap, i) { |
976 | u64 ent = sp->spt[i]; | 1008 | u64 ent = sp->spt[i]; |
977 | 1009 | ||
978 | if (is_shadow_present_pte(ent)) { | 1010 | if (is_shadow_present_pte(ent) && !is_large_pte(ent)) { |
979 | struct kvm_mmu_page *child; | 1011 | struct kvm_mmu_page *child; |
980 | child = page_header(ent & PT64_BASE_ADDR_MASK); | 1012 | child = page_header(ent & PT64_BASE_ADDR_MASK); |
981 | 1013 | ||
982 | if (child->unsync_children) { | 1014 | if (child->unsync_children) { |
983 | ret = mmu_unsync_walk(child, walker); | 1015 | if (mmu_pages_add(pvec, child, i)) |
984 | if (ret) | 1016 | return -ENOSPC; |
1017 | |||
1018 | ret = __mmu_unsync_walk(child, pvec); | ||
1019 | if (!ret) | ||
1020 | __clear_bit(i, sp->unsync_child_bitmap); | ||
1021 | else if (ret > 0) | ||
1022 | nr_unsync_leaf += ret; | ||
1023 | else | ||
985 | return ret; | 1024 | return ret; |
986 | __clear_bit(i, sp->unsync_child_bitmap); | ||
987 | } | 1025 | } |
988 | 1026 | ||
989 | if (child->unsync) { | 1027 | if (child->unsync) { |
990 | ret = walker->entry(child, walker); | 1028 | nr_unsync_leaf++; |
991 | __clear_bit(i, sp->unsync_child_bitmap); | 1029 | if (mmu_pages_add(pvec, child, i)) |
992 | if (ret) | 1030 | return -ENOSPC; |
993 | return ret; | ||
994 | } | 1031 | } |
995 | } | 1032 | } |
996 | } | 1033 | } |
@@ -998,7 +1035,17 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp, | |||
998 | if (find_first_bit(sp->unsync_child_bitmap, 512) == 512) | 1035 | if (find_first_bit(sp->unsync_child_bitmap, 512) == 512) |
999 | sp->unsync_children = 0; | 1036 | sp->unsync_children = 0; |
1000 | 1037 | ||
1001 | return 0; | 1038 | return nr_unsync_leaf; |
1039 | } | ||
1040 | |||
1041 | static int mmu_unsync_walk(struct kvm_mmu_page *sp, | ||
1042 | struct kvm_mmu_pages *pvec) | ||
1043 | { | ||
1044 | if (!sp->unsync_children) | ||
1045 | return 0; | ||
1046 | |||
1047 | mmu_pages_add(pvec, sp, 0); | ||
1048 | return __mmu_unsync_walk(sp, pvec); | ||
1002 | } | 1049 | } |
1003 | 1050 | ||
1004 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | 1051 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) |
@@ -1021,10 +1068,18 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | |||
1021 | return NULL; | 1068 | return NULL; |
1022 | } | 1069 | } |
1023 | 1070 | ||
1071 | static void kvm_unlink_unsync_global(struct kvm *kvm, struct kvm_mmu_page *sp) | ||
1072 | { | ||
1073 | list_del(&sp->oos_link); | ||
1074 | --kvm->stat.mmu_unsync_global; | ||
1075 | } | ||
1076 | |||
1024 | static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) | 1077 | static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
1025 | { | 1078 | { |
1026 | WARN_ON(!sp->unsync); | 1079 | WARN_ON(!sp->unsync); |
1027 | sp->unsync = 0; | 1080 | sp->unsync = 0; |
1081 | if (sp->global) | ||
1082 | kvm_unlink_unsync_global(kvm, sp); | ||
1028 | --kvm->stat.mmu_unsync; | 1083 | --kvm->stat.mmu_unsync; |
1029 | } | 1084 | } |
1030 | 1085 | ||
@@ -1037,7 +1092,8 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
1037 | return 1; | 1092 | return 1; |
1038 | } | 1093 | } |
1039 | 1094 | ||
1040 | rmap_write_protect(vcpu->kvm, sp->gfn); | 1095 | if (rmap_write_protect(vcpu->kvm, sp->gfn)) |
1096 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
1041 | kvm_unlink_unsync_page(vcpu->kvm, sp); | 1097 | kvm_unlink_unsync_page(vcpu->kvm, sp); |
1042 | if (vcpu->arch.mmu.sync_page(vcpu, sp)) { | 1098 | if (vcpu->arch.mmu.sync_page(vcpu, sp)) { |
1043 | kvm_mmu_zap_page(vcpu->kvm, sp); | 1099 | kvm_mmu_zap_page(vcpu->kvm, sp); |
@@ -1048,30 +1104,89 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
1048 | return 0; | 1104 | return 0; |
1049 | } | 1105 | } |
1050 | 1106 | ||
1051 | struct sync_walker { | 1107 | struct mmu_page_path { |
1052 | struct kvm_vcpu *vcpu; | 1108 | struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; |
1053 | struct kvm_unsync_walk walker; | 1109 | unsigned int idx[PT64_ROOT_LEVEL-1]; |
1054 | }; | 1110 | }; |
1055 | 1111 | ||
1056 | static int mmu_sync_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk) | 1112 | #define for_each_sp(pvec, sp, parents, i) \ |
1113 | for (i = mmu_pages_next(&pvec, &parents, -1), \ | ||
1114 | sp = pvec.page[i].sp; \ | ||
1115 | i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ | ||
1116 | i = mmu_pages_next(&pvec, &parents, i)) | ||
1117 | |||
1118 | int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, | ||
1119 | int i) | ||
1057 | { | 1120 | { |
1058 | struct sync_walker *sync_walk = container_of(walk, struct sync_walker, | 1121 | int n; |
1059 | walker); | ||
1060 | struct kvm_vcpu *vcpu = sync_walk->vcpu; | ||
1061 | 1122 | ||
1062 | kvm_sync_page(vcpu, sp); | 1123 | for (n = i+1; n < pvec->nr; n++) { |
1063 | return (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)); | 1124 | struct kvm_mmu_page *sp = pvec->page[n].sp; |
1125 | |||
1126 | if (sp->role.level == PT_PAGE_TABLE_LEVEL) { | ||
1127 | parents->idx[0] = pvec->page[n].idx; | ||
1128 | return n; | ||
1129 | } | ||
1130 | |||
1131 | parents->parent[sp->role.level-2] = sp; | ||
1132 | parents->idx[sp->role.level-1] = pvec->page[n].idx; | ||
1133 | } | ||
1134 | |||
1135 | return n; | ||
1064 | } | 1136 | } |
1065 | 1137 | ||
1066 | static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 1138 | void mmu_pages_clear_parents(struct mmu_page_path *parents) |
1067 | { | 1139 | { |
1068 | struct sync_walker walker = { | 1140 | struct kvm_mmu_page *sp; |
1069 | .walker = { .entry = mmu_sync_fn, }, | 1141 | unsigned int level = 0; |
1070 | .vcpu = vcpu, | 1142 | |
1071 | }; | 1143 | do { |
1144 | unsigned int idx = parents->idx[level]; | ||
1145 | |||
1146 | sp = parents->parent[level]; | ||
1147 | if (!sp) | ||
1148 | return; | ||
1149 | |||
1150 | --sp->unsync_children; | ||
1151 | WARN_ON((int)sp->unsync_children < 0); | ||
1152 | __clear_bit(idx, sp->unsync_child_bitmap); | ||
1153 | level++; | ||
1154 | } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); | ||
1155 | } | ||
1156 | |||
1157 | static void kvm_mmu_pages_init(struct kvm_mmu_page *parent, | ||
1158 | struct mmu_page_path *parents, | ||
1159 | struct kvm_mmu_pages *pvec) | ||
1160 | { | ||
1161 | parents->parent[parent->role.level-1] = NULL; | ||
1162 | pvec->nr = 0; | ||
1163 | } | ||
1164 | |||
1165 | static void mmu_sync_children(struct kvm_vcpu *vcpu, | ||
1166 | struct kvm_mmu_page *parent) | ||
1167 | { | ||
1168 | int i; | ||
1169 | struct kvm_mmu_page *sp; | ||
1170 | struct mmu_page_path parents; | ||
1171 | struct kvm_mmu_pages pages; | ||
1172 | |||
1173 | kvm_mmu_pages_init(parent, &parents, &pages); | ||
1174 | while (mmu_unsync_walk(parent, &pages)) { | ||
1175 | int protected = 0; | ||
1072 | 1176 | ||
1073 | while (mmu_unsync_walk(sp, &walker.walker)) | 1177 | for_each_sp(pages, sp, parents, i) |
1178 | protected |= rmap_write_protect(vcpu->kvm, sp->gfn); | ||
1179 | |||
1180 | if (protected) | ||
1181 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
1182 | |||
1183 | for_each_sp(pages, sp, parents, i) { | ||
1184 | kvm_sync_page(vcpu, sp); | ||
1185 | mmu_pages_clear_parents(&parents); | ||
1186 | } | ||
1074 | cond_resched_lock(&vcpu->kvm->mmu_lock); | 1187 | cond_resched_lock(&vcpu->kvm->mmu_lock); |
1188 | kvm_mmu_pages_init(parent, &parents, &pages); | ||
1189 | } | ||
1075 | } | 1190 | } |
1076 | 1191 | ||
1077 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | 1192 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, |
@@ -1129,7 +1244,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1129 | sp->role = role; | 1244 | sp->role = role; |
1130 | hlist_add_head(&sp->hash_link, bucket); | 1245 | hlist_add_head(&sp->hash_link, bucket); |
1131 | if (!metaphysical) { | 1246 | if (!metaphysical) { |
1132 | rmap_write_protect(vcpu->kvm, gfn); | 1247 | if (rmap_write_protect(vcpu->kvm, gfn)) |
1248 | kvm_flush_remote_tlbs(vcpu->kvm); | ||
1133 | account_shadowed(vcpu->kvm, gfn); | 1249 | account_shadowed(vcpu->kvm, gfn); |
1134 | } | 1250 | } |
1135 | if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) | 1251 | if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) |
@@ -1153,6 +1269,8 @@ static int walk_shadow(struct kvm_shadow_walk *walker, | |||
1153 | if (level == PT32E_ROOT_LEVEL) { | 1269 | if (level == PT32E_ROOT_LEVEL) { |
1154 | shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; | 1270 | shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; |
1155 | shadow_addr &= PT64_BASE_ADDR_MASK; | 1271 | shadow_addr &= PT64_BASE_ADDR_MASK; |
1272 | if (!shadow_addr) | ||
1273 | return 1; | ||
1156 | --level; | 1274 | --level; |
1157 | } | 1275 | } |
1158 | 1276 | ||
@@ -1237,33 +1355,29 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
1237 | } | 1355 | } |
1238 | } | 1356 | } |
1239 | 1357 | ||
1240 | struct zap_walker { | 1358 | static int mmu_zap_unsync_children(struct kvm *kvm, |
1241 | struct kvm_unsync_walk walker; | 1359 | struct kvm_mmu_page *parent) |
1242 | struct kvm *kvm; | ||
1243 | int zapped; | ||
1244 | }; | ||
1245 | |||
1246 | static int mmu_zap_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk) | ||
1247 | { | 1360 | { |
1248 | struct zap_walker *zap_walk = container_of(walk, struct zap_walker, | 1361 | int i, zapped = 0; |
1249 | walker); | 1362 | struct mmu_page_path parents; |
1250 | kvm_mmu_zap_page(zap_walk->kvm, sp); | 1363 | struct kvm_mmu_pages pages; |
1251 | zap_walk->zapped = 1; | ||
1252 | return 0; | ||
1253 | } | ||
1254 | 1364 | ||
1255 | static int mmu_zap_unsync_children(struct kvm *kvm, struct kvm_mmu_page *sp) | 1365 | if (parent->role.level == PT_PAGE_TABLE_LEVEL) |
1256 | { | ||
1257 | struct zap_walker walker = { | ||
1258 | .walker = { .entry = mmu_zap_fn, }, | ||
1259 | .kvm = kvm, | ||
1260 | .zapped = 0, | ||
1261 | }; | ||
1262 | |||
1263 | if (sp->role.level == PT_PAGE_TABLE_LEVEL) | ||
1264 | return 0; | 1366 | return 0; |
1265 | mmu_unsync_walk(sp, &walker.walker); | 1367 | |
1266 | return walker.zapped; | 1368 | kvm_mmu_pages_init(parent, &parents, &pages); |
1369 | while (mmu_unsync_walk(parent, &pages)) { | ||
1370 | struct kvm_mmu_page *sp; | ||
1371 | |||
1372 | for_each_sp(pages, sp, parents, i) { | ||
1373 | kvm_mmu_zap_page(kvm, sp); | ||
1374 | mmu_pages_clear_parents(&parents); | ||
1375 | } | ||
1376 | zapped += pages.nr; | ||
1377 | kvm_mmu_pages_init(parent, &parents, &pages); | ||
1378 | } | ||
1379 | |||
1380 | return zapped; | ||
1267 | } | 1381 | } |
1268 | 1382 | ||
1269 | static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) | 1383 | static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
@@ -1362,7 +1476,7 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) | |||
1362 | int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); | 1476 | int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); |
1363 | struct kvm_mmu_page *sp = page_header(__pa(pte)); | 1477 | struct kvm_mmu_page *sp = page_header(__pa(pte)); |
1364 | 1478 | ||
1365 | __set_bit(slot, &sp->slot_bitmap); | 1479 | __set_bit(slot, sp->slot_bitmap); |
1366 | } | 1480 | } |
1367 | 1481 | ||
1368 | static void mmu_convert_notrap(struct kvm_mmu_page *sp) | 1482 | static void mmu_convert_notrap(struct kvm_mmu_page *sp) |
@@ -1393,6 +1507,110 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | |||
1393 | return page; | 1507 | return page; |
1394 | } | 1508 | } |
1395 | 1509 | ||
1510 | /* | ||
1511 | * The function is based on mtrr_type_lookup() in | ||
1512 | * arch/x86/kernel/cpu/mtrr/generic.c | ||
1513 | */ | ||
1514 | static int get_mtrr_type(struct mtrr_state_type *mtrr_state, | ||
1515 | u64 start, u64 end) | ||
1516 | { | ||
1517 | int i; | ||
1518 | u64 base, mask; | ||
1519 | u8 prev_match, curr_match; | ||
1520 | int num_var_ranges = KVM_NR_VAR_MTRR; | ||
1521 | |||
1522 | if (!mtrr_state->enabled) | ||
1523 | return 0xFF; | ||
1524 | |||
1525 | /* Make end inclusive end, instead of exclusive */ | ||
1526 | end--; | ||
1527 | |||
1528 | /* Look in fixed ranges. Just return the type as per start */ | ||
1529 | if (mtrr_state->have_fixed && (start < 0x100000)) { | ||
1530 | int idx; | ||
1531 | |||
1532 | if (start < 0x80000) { | ||
1533 | idx = 0; | ||
1534 | idx += (start >> 16); | ||
1535 | return mtrr_state->fixed_ranges[idx]; | ||
1536 | } else if (start < 0xC0000) { | ||
1537 | idx = 1 * 8; | ||
1538 | idx += ((start - 0x80000) >> 14); | ||
1539 | return mtrr_state->fixed_ranges[idx]; | ||
1540 | } else if (start < 0x1000000) { | ||
1541 | idx = 3 * 8; | ||
1542 | idx += ((start - 0xC0000) >> 12); | ||
1543 | return mtrr_state->fixed_ranges[idx]; | ||
1544 | } | ||
1545 | } | ||
1546 | |||
1547 | /* | ||
1548 | * Look in variable ranges | ||
1549 | * Look of multiple ranges matching this address and pick type | ||
1550 | * as per MTRR precedence | ||
1551 | */ | ||
1552 | if (!(mtrr_state->enabled & 2)) | ||
1553 | return mtrr_state->def_type; | ||
1554 | |||
1555 | prev_match = 0xFF; | ||
1556 | for (i = 0; i < num_var_ranges; ++i) { | ||
1557 | unsigned short start_state, end_state; | ||
1558 | |||
1559 | if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11))) | ||
1560 | continue; | ||
1561 | |||
1562 | base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) + | ||
1563 | (mtrr_state->var_ranges[i].base_lo & PAGE_MASK); | ||
1564 | mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) + | ||
1565 | (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK); | ||
1566 | |||
1567 | start_state = ((start & mask) == (base & mask)); | ||
1568 | end_state = ((end & mask) == (base & mask)); | ||
1569 | if (start_state != end_state) | ||
1570 | return 0xFE; | ||
1571 | |||
1572 | if ((start & mask) != (base & mask)) | ||
1573 | continue; | ||
1574 | |||
1575 | curr_match = mtrr_state->var_ranges[i].base_lo & 0xff; | ||
1576 | if (prev_match == 0xFF) { | ||
1577 | prev_match = curr_match; | ||
1578 | continue; | ||
1579 | } | ||
1580 | |||
1581 | if (prev_match == MTRR_TYPE_UNCACHABLE || | ||
1582 | curr_match == MTRR_TYPE_UNCACHABLE) | ||
1583 | return MTRR_TYPE_UNCACHABLE; | ||
1584 | |||
1585 | if ((prev_match == MTRR_TYPE_WRBACK && | ||
1586 | curr_match == MTRR_TYPE_WRTHROUGH) || | ||
1587 | (prev_match == MTRR_TYPE_WRTHROUGH && | ||
1588 | curr_match == MTRR_TYPE_WRBACK)) { | ||
1589 | prev_match = MTRR_TYPE_WRTHROUGH; | ||
1590 | curr_match = MTRR_TYPE_WRTHROUGH; | ||
1591 | } | ||
1592 | |||
1593 | if (prev_match != curr_match) | ||
1594 | return MTRR_TYPE_UNCACHABLE; | ||
1595 | } | ||
1596 | |||
1597 | if (prev_match != 0xFF) | ||
1598 | return prev_match; | ||
1599 | |||
1600 | return mtrr_state->def_type; | ||
1601 | } | ||
1602 | |||
1603 | static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) | ||
1604 | { | ||
1605 | u8 mtrr; | ||
1606 | |||
1607 | mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, | ||
1608 | (gfn << PAGE_SHIFT) + PAGE_SIZE); | ||
1609 | if (mtrr == 0xfe || mtrr == 0xff) | ||
1610 | mtrr = MTRR_TYPE_WRBACK; | ||
1611 | return mtrr; | ||
1612 | } | ||
1613 | |||
1396 | static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 1614 | static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
1397 | { | 1615 | { |
1398 | unsigned index; | 1616 | unsigned index; |
@@ -1409,9 +1627,15 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
1409 | if (s->role.word != sp->role.word) | 1627 | if (s->role.word != sp->role.word) |
1410 | return 1; | 1628 | return 1; |
1411 | } | 1629 | } |
1412 | kvm_mmu_mark_parents_unsync(vcpu, sp); | ||
1413 | ++vcpu->kvm->stat.mmu_unsync; | 1630 | ++vcpu->kvm->stat.mmu_unsync; |
1414 | sp->unsync = 1; | 1631 | sp->unsync = 1; |
1632 | |||
1633 | if (sp->global) { | ||
1634 | list_add(&sp->oos_link, &vcpu->kvm->arch.oos_global_pages); | ||
1635 | ++vcpu->kvm->stat.mmu_unsync_global; | ||
1636 | } else | ||
1637 | kvm_mmu_mark_parents_unsync(vcpu, sp); | ||
1638 | |||
1415 | mmu_convert_notrap(sp); | 1639 | mmu_convert_notrap(sp); |
1416 | return 0; | 1640 | return 0; |
1417 | } | 1641 | } |
@@ -1437,11 +1661,24 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
1437 | static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | 1661 | static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, |
1438 | unsigned pte_access, int user_fault, | 1662 | unsigned pte_access, int user_fault, |
1439 | int write_fault, int dirty, int largepage, | 1663 | int write_fault, int dirty, int largepage, |
1440 | gfn_t gfn, pfn_t pfn, bool speculative, | 1664 | int global, gfn_t gfn, pfn_t pfn, bool speculative, |
1441 | bool can_unsync) | 1665 | bool can_unsync) |
1442 | { | 1666 | { |
1443 | u64 spte; | 1667 | u64 spte; |
1444 | int ret = 0; | 1668 | int ret = 0; |
1669 | u64 mt_mask = shadow_mt_mask; | ||
1670 | struct kvm_mmu_page *sp = page_header(__pa(shadow_pte)); | ||
1671 | |||
1672 | if (!(vcpu->arch.cr4 & X86_CR4_PGE)) | ||
1673 | global = 0; | ||
1674 | if (!global && sp->global) { | ||
1675 | sp->global = 0; | ||
1676 | if (sp->unsync) { | ||
1677 | kvm_unlink_unsync_global(vcpu->kvm, sp); | ||
1678 | kvm_mmu_mark_parents_unsync(vcpu, sp); | ||
1679 | } | ||
1680 | } | ||
1681 | |||
1445 | /* | 1682 | /* |
1446 | * We don't set the accessed bit, since we sometimes want to see | 1683 | * We don't set the accessed bit, since we sometimes want to see |
1447 | * whether the guest actually used the pte (in order to detect | 1684 | * whether the guest actually used the pte (in order to detect |
@@ -1460,6 +1697,11 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1460 | spte |= shadow_user_mask; | 1697 | spte |= shadow_user_mask; |
1461 | if (largepage) | 1698 | if (largepage) |
1462 | spte |= PT_PAGE_SIZE_MASK; | 1699 | spte |= PT_PAGE_SIZE_MASK; |
1700 | if (mt_mask) { | ||
1701 | mt_mask = get_memory_type(vcpu, gfn) << | ||
1702 | kvm_x86_ops->get_mt_mask_shift(); | ||
1703 | spte |= mt_mask; | ||
1704 | } | ||
1463 | 1705 | ||
1464 | spte |= (u64)pfn << PAGE_SHIFT; | 1706 | spte |= (u64)pfn << PAGE_SHIFT; |
1465 | 1707 | ||
@@ -1474,6 +1716,15 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1474 | 1716 | ||
1475 | spte |= PT_WRITABLE_MASK; | 1717 | spte |= PT_WRITABLE_MASK; |
1476 | 1718 | ||
1719 | /* | ||
1720 | * Optimization: for pte sync, if spte was writable the hash | ||
1721 | * lookup is unnecessary (and expensive). Write protection | ||
1722 | * is responsibility of mmu_get_page / kvm_sync_page. | ||
1723 | * Same reasoning can be applied to dirty page accounting. | ||
1724 | */ | ||
1725 | if (!can_unsync && is_writeble_pte(*shadow_pte)) | ||
1726 | goto set_pte; | ||
1727 | |||
1477 | if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { | 1728 | if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { |
1478 | pgprintk("%s: found shadow page for %lx, marking ro\n", | 1729 | pgprintk("%s: found shadow page for %lx, marking ro\n", |
1479 | __func__, gfn); | 1730 | __func__, gfn); |
@@ -1495,8 +1746,8 @@ set_pte: | |||
1495 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | 1746 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, |
1496 | unsigned pt_access, unsigned pte_access, | 1747 | unsigned pt_access, unsigned pte_access, |
1497 | int user_fault, int write_fault, int dirty, | 1748 | int user_fault, int write_fault, int dirty, |
1498 | int *ptwrite, int largepage, gfn_t gfn, | 1749 | int *ptwrite, int largepage, int global, |
1499 | pfn_t pfn, bool speculative) | 1750 | gfn_t gfn, pfn_t pfn, bool speculative) |
1500 | { | 1751 | { |
1501 | int was_rmapped = 0; | 1752 | int was_rmapped = 0; |
1502 | int was_writeble = is_writeble_pte(*shadow_pte); | 1753 | int was_writeble = is_writeble_pte(*shadow_pte); |
@@ -1529,7 +1780,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1529 | } | 1780 | } |
1530 | } | 1781 | } |
1531 | if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, | 1782 | if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, |
1532 | dirty, largepage, gfn, pfn, speculative, true)) { | 1783 | dirty, largepage, global, gfn, pfn, speculative, true)) { |
1533 | if (write_fault) | 1784 | if (write_fault) |
1534 | *ptwrite = 1; | 1785 | *ptwrite = 1; |
1535 | kvm_x86_ops->tlb_flush(vcpu); | 1786 | kvm_x86_ops->tlb_flush(vcpu); |
@@ -1586,7 +1837,7 @@ static int direct_map_entry(struct kvm_shadow_walk *_walk, | |||
1586 | || (walk->largepage && level == PT_DIRECTORY_LEVEL)) { | 1837 | || (walk->largepage && level == PT_DIRECTORY_LEVEL)) { |
1587 | mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL, | 1838 | mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL, |
1588 | 0, walk->write, 1, &walk->pt_write, | 1839 | 0, walk->write, 1, &walk->pt_write, |
1589 | walk->largepage, gfn, walk->pfn, false); | 1840 | walk->largepage, 0, gfn, walk->pfn, false); |
1590 | ++vcpu->stat.pf_fixed; | 1841 | ++vcpu->stat.pf_fixed; |
1591 | return 1; | 1842 | return 1; |
1592 | } | 1843 | } |
@@ -1773,6 +2024,15 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu) | |||
1773 | } | 2024 | } |
1774 | } | 2025 | } |
1775 | 2026 | ||
2027 | static void mmu_sync_global(struct kvm_vcpu *vcpu) | ||
2028 | { | ||
2029 | struct kvm *kvm = vcpu->kvm; | ||
2030 | struct kvm_mmu_page *sp, *n; | ||
2031 | |||
2032 | list_for_each_entry_safe(sp, n, &kvm->arch.oos_global_pages, oos_link) | ||
2033 | kvm_sync_page(vcpu, sp); | ||
2034 | } | ||
2035 | |||
1776 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) | 2036 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) |
1777 | { | 2037 | { |
1778 | spin_lock(&vcpu->kvm->mmu_lock); | 2038 | spin_lock(&vcpu->kvm->mmu_lock); |
@@ -1780,6 +2040,13 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) | |||
1780 | spin_unlock(&vcpu->kvm->mmu_lock); | 2040 | spin_unlock(&vcpu->kvm->mmu_lock); |
1781 | } | 2041 | } |
1782 | 2042 | ||
2043 | void kvm_mmu_sync_global(struct kvm_vcpu *vcpu) | ||
2044 | { | ||
2045 | spin_lock(&vcpu->kvm->mmu_lock); | ||
2046 | mmu_sync_global(vcpu); | ||
2047 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
2048 | } | ||
2049 | |||
1783 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) | 2050 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) |
1784 | { | 2051 | { |
1785 | return vaddr; | 2052 | return vaddr; |
@@ -2178,7 +2445,8 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
2178 | } | 2445 | } |
2179 | 2446 | ||
2180 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | 2447 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
2181 | const u8 *new, int bytes) | 2448 | const u8 *new, int bytes, |
2449 | bool guest_initiated) | ||
2182 | { | 2450 | { |
2183 | gfn_t gfn = gpa >> PAGE_SHIFT; | 2451 | gfn_t gfn = gpa >> PAGE_SHIFT; |
2184 | struct kvm_mmu_page *sp; | 2452 | struct kvm_mmu_page *sp; |
@@ -2204,15 +2472,17 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2204 | kvm_mmu_free_some_pages(vcpu); | 2472 | kvm_mmu_free_some_pages(vcpu); |
2205 | ++vcpu->kvm->stat.mmu_pte_write; | 2473 | ++vcpu->kvm->stat.mmu_pte_write; |
2206 | kvm_mmu_audit(vcpu, "pre pte write"); | 2474 | kvm_mmu_audit(vcpu, "pre pte write"); |
2207 | if (gfn == vcpu->arch.last_pt_write_gfn | 2475 | if (guest_initiated) { |
2208 | && !last_updated_pte_accessed(vcpu)) { | 2476 | if (gfn == vcpu->arch.last_pt_write_gfn |
2209 | ++vcpu->arch.last_pt_write_count; | 2477 | && !last_updated_pte_accessed(vcpu)) { |
2210 | if (vcpu->arch.last_pt_write_count >= 3) | 2478 | ++vcpu->arch.last_pt_write_count; |
2211 | flooded = 1; | 2479 | if (vcpu->arch.last_pt_write_count >= 3) |
2212 | } else { | 2480 | flooded = 1; |
2213 | vcpu->arch.last_pt_write_gfn = gfn; | 2481 | } else { |
2214 | vcpu->arch.last_pt_write_count = 1; | 2482 | vcpu->arch.last_pt_write_gfn = gfn; |
2215 | vcpu->arch.last_pte_updated = NULL; | 2483 | vcpu->arch.last_pt_write_count = 1; |
2484 | vcpu->arch.last_pte_updated = NULL; | ||
2485 | } | ||
2216 | } | 2486 | } |
2217 | index = kvm_page_table_hashfn(gfn); | 2487 | index = kvm_page_table_hashfn(gfn); |
2218 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 2488 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
@@ -2352,9 +2622,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); | |||
2352 | 2622 | ||
2353 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) | 2623 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) |
2354 | { | 2624 | { |
2355 | spin_lock(&vcpu->kvm->mmu_lock); | ||
2356 | vcpu->arch.mmu.invlpg(vcpu, gva); | 2625 | vcpu->arch.mmu.invlpg(vcpu, gva); |
2357 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
2358 | kvm_mmu_flush_tlb(vcpu); | 2626 | kvm_mmu_flush_tlb(vcpu); |
2359 | ++vcpu->stat.invlpg; | 2627 | ++vcpu->stat.invlpg; |
2360 | } | 2628 | } |
@@ -2451,7 +2719,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) | |||
2451 | int i; | 2719 | int i; |
2452 | u64 *pt; | 2720 | u64 *pt; |
2453 | 2721 | ||
2454 | if (!test_bit(slot, &sp->slot_bitmap)) | 2722 | if (!test_bit(slot, sp->slot_bitmap)) |
2455 | continue; | 2723 | continue; |
2456 | 2724 | ||
2457 | pt = sp->spt; | 2725 | pt = sp->spt; |
@@ -2860,8 +3128,8 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) | |||
2860 | if (sp->role.metaphysical) | 3128 | if (sp->role.metaphysical) |
2861 | continue; | 3129 | continue; |
2862 | 3130 | ||
2863 | slot = gfn_to_memslot(vcpu->kvm, sp->gfn); | ||
2864 | gfn = unalias_gfn(vcpu->kvm, sp->gfn); | 3131 | gfn = unalias_gfn(vcpu->kvm, sp->gfn); |
3132 | slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn); | ||
2865 | rmapp = &slot->rmap[gfn - slot->base_gfn]; | 3133 | rmapp = &slot->rmap[gfn - slot->base_gfn]; |
2866 | if (*rmapp) | 3134 | if (*rmapp) |
2867 | printk(KERN_ERR "%s: (%s) shadow page has writable" | 3135 | printk(KERN_ERR "%s: (%s) shadow page has writable" |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 84eee43bbe74..9fd78b6e17ad 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -82,6 +82,7 @@ struct shadow_walker { | |||
82 | int *ptwrite; | 82 | int *ptwrite; |
83 | pfn_t pfn; | 83 | pfn_t pfn; |
84 | u64 *sptep; | 84 | u64 *sptep; |
85 | gpa_t pte_gpa; | ||
85 | }; | 86 | }; |
86 | 87 | ||
87 | static gfn_t gpte_to_gfn(pt_element_t gpte) | 88 | static gfn_t gpte_to_gfn(pt_element_t gpte) |
@@ -222,7 +223,7 @@ walk: | |||
222 | if (ret) | 223 | if (ret) |
223 | goto walk; | 224 | goto walk; |
224 | pte |= PT_DIRTY_MASK; | 225 | pte |= PT_DIRTY_MASK; |
225 | kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte)); | 226 | kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte), 0); |
226 | walker->ptes[walker->level - 1] = pte; | 227 | walker->ptes[walker->level - 1] = pte; |
227 | } | 228 | } |
228 | 229 | ||
@@ -274,7 +275,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
274 | return; | 275 | return; |
275 | kvm_get_pfn(pfn); | 276 | kvm_get_pfn(pfn); |
276 | mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, | 277 | mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, |
277 | gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte), | 278 | gpte & PT_DIRTY_MASK, NULL, largepage, |
279 | gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte), | ||
278 | pfn, true); | 280 | pfn, true); |
279 | } | 281 | } |
280 | 282 | ||
@@ -301,8 +303,9 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, | |||
301 | mmu_set_spte(vcpu, sptep, access, gw->pte_access & access, | 303 | mmu_set_spte(vcpu, sptep, access, gw->pte_access & access, |
302 | sw->user_fault, sw->write_fault, | 304 | sw->user_fault, sw->write_fault, |
303 | gw->ptes[gw->level-1] & PT_DIRTY_MASK, | 305 | gw->ptes[gw->level-1] & PT_DIRTY_MASK, |
304 | sw->ptwrite, sw->largepage, gw->gfn, sw->pfn, | 306 | sw->ptwrite, sw->largepage, |
305 | false); | 307 | gw->ptes[gw->level-1] & PT_GLOBAL_MASK, |
308 | gw->gfn, sw->pfn, false); | ||
306 | sw->sptep = sptep; | 309 | sw->sptep = sptep; |
307 | return 1; | 310 | return 1; |
308 | } | 311 | } |
@@ -466,10 +469,22 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, | |||
466 | struct kvm_vcpu *vcpu, u64 addr, | 469 | struct kvm_vcpu *vcpu, u64 addr, |
467 | u64 *sptep, int level) | 470 | u64 *sptep, int level) |
468 | { | 471 | { |
472 | struct shadow_walker *sw = | ||
473 | container_of(_sw, struct shadow_walker, walker); | ||
469 | 474 | ||
470 | if (level == PT_PAGE_TABLE_LEVEL) { | 475 | /* FIXME: properly handle invlpg on large guest pages */ |
471 | if (is_shadow_present_pte(*sptep)) | 476 | if (level == PT_PAGE_TABLE_LEVEL || |
477 | ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { | ||
478 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); | ||
479 | |||
480 | sw->pte_gpa = (sp->gfn << PAGE_SHIFT); | ||
481 | sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); | ||
482 | |||
483 | if (is_shadow_present_pte(*sptep)) { | ||
472 | rmap_remove(vcpu->kvm, sptep); | 484 | rmap_remove(vcpu->kvm, sptep); |
485 | if (is_large_pte(*sptep)) | ||
486 | --vcpu->kvm->stat.lpages; | ||
487 | } | ||
473 | set_shadow_pte(sptep, shadow_trap_nonpresent_pte); | 488 | set_shadow_pte(sptep, shadow_trap_nonpresent_pte); |
474 | return 1; | 489 | return 1; |
475 | } | 490 | } |
@@ -480,11 +495,26 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, | |||
480 | 495 | ||
481 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | 496 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) |
482 | { | 497 | { |
498 | pt_element_t gpte; | ||
483 | struct shadow_walker walker = { | 499 | struct shadow_walker walker = { |
484 | .walker = { .entry = FNAME(shadow_invlpg_entry), }, | 500 | .walker = { .entry = FNAME(shadow_invlpg_entry), }, |
501 | .pte_gpa = -1, | ||
485 | }; | 502 | }; |
486 | 503 | ||
504 | spin_lock(&vcpu->kvm->mmu_lock); | ||
487 | walk_shadow(&walker.walker, vcpu, gva); | 505 | walk_shadow(&walker.walker, vcpu, gva); |
506 | spin_unlock(&vcpu->kvm->mmu_lock); | ||
507 | if (walker.pte_gpa == -1) | ||
508 | return; | ||
509 | if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte, | ||
510 | sizeof(pt_element_t))) | ||
511 | return; | ||
512 | if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { | ||
513 | if (mmu_topup_memory_caches(vcpu)) | ||
514 | return; | ||
515 | kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte, | ||
516 | sizeof(pt_element_t), 0); | ||
517 | } | ||
488 | } | 518 | } |
489 | 519 | ||
490 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | 520 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) |
@@ -580,7 +610,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
580 | nr_present++; | 610 | nr_present++; |
581 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); | 611 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); |
582 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, | 612 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, |
583 | is_dirty_pte(gpte), 0, gfn, | 613 | is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn, |
584 | spte_to_pfn(sp->spt[i]), true, false); | 614 | spte_to_pfn(sp->spt[i]), true, false); |
585 | } | 615 | } |
586 | 616 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 9c4ce657d963..1452851ae258 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -28,6 +28,8 @@ | |||
28 | 28 | ||
29 | #include <asm/desc.h> | 29 | #include <asm/desc.h> |
30 | 30 | ||
31 | #include <asm/virtext.h> | ||
32 | |||
31 | #define __ex(x) __kvm_handle_fault_on_reboot(x) | 33 | #define __ex(x) __kvm_handle_fault_on_reboot(x) |
32 | 34 | ||
33 | MODULE_AUTHOR("Qumranet"); | 35 | MODULE_AUTHOR("Qumranet"); |
@@ -245,34 +247,19 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
245 | 247 | ||
246 | static int has_svm(void) | 248 | static int has_svm(void) |
247 | { | 249 | { |
248 | uint32_t eax, ebx, ecx, edx; | 250 | const char *msg; |
249 | |||
250 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { | ||
251 | printk(KERN_INFO "has_svm: not amd\n"); | ||
252 | return 0; | ||
253 | } | ||
254 | 251 | ||
255 | cpuid(0x80000000, &eax, &ebx, &ecx, &edx); | 252 | if (!cpu_has_svm(&msg)) { |
256 | if (eax < SVM_CPUID_FUNC) { | 253 | printk(KERN_INFO "has_svn: %s\n", msg); |
257 | printk(KERN_INFO "has_svm: can't execute cpuid_8000000a\n"); | ||
258 | return 0; | 254 | return 0; |
259 | } | 255 | } |
260 | 256 | ||
261 | cpuid(0x80000001, &eax, &ebx, &ecx, &edx); | ||
262 | if (!(ecx & (1 << SVM_CPUID_FEATURE_SHIFT))) { | ||
263 | printk(KERN_DEBUG "has_svm: svm not available\n"); | ||
264 | return 0; | ||
265 | } | ||
266 | return 1; | 257 | return 1; |
267 | } | 258 | } |
268 | 259 | ||
269 | static void svm_hardware_disable(void *garbage) | 260 | static void svm_hardware_disable(void *garbage) |
270 | { | 261 | { |
271 | uint64_t efer; | 262 | cpu_svm_disable(); |
272 | |||
273 | wrmsrl(MSR_VM_HSAVE_PA, 0); | ||
274 | rdmsrl(MSR_EFER, efer); | ||
275 | wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); | ||
276 | } | 263 | } |
277 | 264 | ||
278 | static void svm_hardware_enable(void *garbage) | 265 | static void svm_hardware_enable(void *garbage) |
@@ -772,6 +759,22 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
772 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; | 759 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; |
773 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; | 760 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; |
774 | var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; | 761 | var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; |
762 | |||
763 | /* | ||
764 | * SVM always stores 0 for the 'G' bit in the CS selector in | ||
765 | * the VMCB on a VMEXIT. This hurts cross-vendor migration: | ||
766 | * Intel's VMENTRY has a check on the 'G' bit. | ||
767 | */ | ||
768 | if (seg == VCPU_SREG_CS) | ||
769 | var->g = s->limit > 0xfffff; | ||
770 | |||
771 | /* | ||
772 | * Work around a bug where the busy flag in the tr selector | ||
773 | * isn't exposed | ||
774 | */ | ||
775 | if (seg == VCPU_SREG_TR) | ||
776 | var->type |= 0x2; | ||
777 | |||
775 | var->unusable = !var->present; | 778 | var->unusable = !var->present; |
776 | } | 779 | } |
777 | 780 | ||
@@ -1099,6 +1102,7 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1099 | rep = (io_info & SVM_IOIO_REP_MASK) != 0; | 1102 | rep = (io_info & SVM_IOIO_REP_MASK) != 0; |
1100 | down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; | 1103 | down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0; |
1101 | 1104 | ||
1105 | skip_emulated_instruction(&svm->vcpu); | ||
1102 | return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); | 1106 | return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); |
1103 | } | 1107 | } |
1104 | 1108 | ||
@@ -1912,6 +1916,11 @@ static int get_npt_level(void) | |||
1912 | #endif | 1916 | #endif |
1913 | } | 1917 | } |
1914 | 1918 | ||
1919 | static int svm_get_mt_mask_shift(void) | ||
1920 | { | ||
1921 | return 0; | ||
1922 | } | ||
1923 | |||
1915 | static struct kvm_x86_ops svm_x86_ops = { | 1924 | static struct kvm_x86_ops svm_x86_ops = { |
1916 | .cpu_has_kvm_support = has_svm, | 1925 | .cpu_has_kvm_support = has_svm, |
1917 | .disabled_by_bios = is_disabled, | 1926 | .disabled_by_bios = is_disabled, |
@@ -1967,6 +1976,7 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
1967 | 1976 | ||
1968 | .set_tss_addr = svm_set_tss_addr, | 1977 | .set_tss_addr = svm_set_tss_addr, |
1969 | .get_tdp_level = get_npt_level, | 1978 | .get_tdp_level = get_npt_level, |
1979 | .get_mt_mask_shift = svm_get_mt_mask_shift, | ||
1970 | }; | 1980 | }; |
1971 | 1981 | ||
1972 | static int __init svm_init(void) | 1982 | static int __init svm_init(void) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a4018b01e1f9..6259d7467648 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -16,7 +16,6 @@ | |||
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include "irq.h" | 18 | #include "irq.h" |
19 | #include "vmx.h" | ||
20 | #include "mmu.h" | 19 | #include "mmu.h" |
21 | 20 | ||
22 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
@@ -31,6 +30,8 @@ | |||
31 | 30 | ||
32 | #include <asm/io.h> | 31 | #include <asm/io.h> |
33 | #include <asm/desc.h> | 32 | #include <asm/desc.h> |
33 | #include <asm/vmx.h> | ||
34 | #include <asm/virtext.h> | ||
34 | 35 | ||
35 | #define __ex(x) __kvm_handle_fault_on_reboot(x) | 36 | #define __ex(x) __kvm_handle_fault_on_reboot(x) |
36 | 37 | ||
@@ -90,6 +91,11 @@ struct vcpu_vmx { | |||
90 | } rmode; | 91 | } rmode; |
91 | int vpid; | 92 | int vpid; |
92 | bool emulation_required; | 93 | bool emulation_required; |
94 | |||
95 | /* Support for vnmi-less CPUs */ | ||
96 | int soft_vnmi_blocked; | ||
97 | ktime_t entry_time; | ||
98 | s64 vnmi_blocked_time; | ||
93 | }; | 99 | }; |
94 | 100 | ||
95 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) | 101 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) |
@@ -122,7 +128,7 @@ static struct vmcs_config { | |||
122 | u32 vmentry_ctrl; | 128 | u32 vmentry_ctrl; |
123 | } vmcs_config; | 129 | } vmcs_config; |
124 | 130 | ||
125 | struct vmx_capability { | 131 | static struct vmx_capability { |
126 | u32 ept; | 132 | u32 ept; |
127 | u32 vpid; | 133 | u32 vpid; |
128 | } vmx_capability; | 134 | } vmx_capability; |
@@ -957,6 +963,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
957 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data); | 963 | pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data); |
958 | 964 | ||
959 | break; | 965 | break; |
966 | case MSR_IA32_CR_PAT: | ||
967 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { | ||
968 | vmcs_write64(GUEST_IA32_PAT, data); | ||
969 | vcpu->arch.pat = data; | ||
970 | break; | ||
971 | } | ||
972 | /* Otherwise falls through to kvm_set_msr_common */ | ||
960 | default: | 973 | default: |
961 | vmx_load_host_state(vmx); | 974 | vmx_load_host_state(vmx); |
962 | msr = find_msr_entry(vmx, msr_index); | 975 | msr = find_msr_entry(vmx, msr_index); |
@@ -1032,8 +1045,7 @@ static int vmx_get_irq(struct kvm_vcpu *vcpu) | |||
1032 | 1045 | ||
1033 | static __init int cpu_has_kvm_support(void) | 1046 | static __init int cpu_has_kvm_support(void) |
1034 | { | 1047 | { |
1035 | unsigned long ecx = cpuid_ecx(1); | 1048 | return cpu_has_vmx(); |
1036 | return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */ | ||
1037 | } | 1049 | } |
1038 | 1050 | ||
1039 | static __init int vmx_disabled_by_bios(void) | 1051 | static __init int vmx_disabled_by_bios(void) |
@@ -1079,13 +1091,22 @@ static void vmclear_local_vcpus(void) | |||
1079 | __vcpu_clear(vmx); | 1091 | __vcpu_clear(vmx); |
1080 | } | 1092 | } |
1081 | 1093 | ||
1082 | static void hardware_disable(void *garbage) | 1094 | |
1095 | /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() | ||
1096 | * tricks. | ||
1097 | */ | ||
1098 | static void kvm_cpu_vmxoff(void) | ||
1083 | { | 1099 | { |
1084 | vmclear_local_vcpus(); | ||
1085 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); | 1100 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); |
1086 | write_cr4(read_cr4() & ~X86_CR4_VMXE); | 1101 | write_cr4(read_cr4() & ~X86_CR4_VMXE); |
1087 | } | 1102 | } |
1088 | 1103 | ||
1104 | static void hardware_disable(void *garbage) | ||
1105 | { | ||
1106 | vmclear_local_vcpus(); | ||
1107 | kvm_cpu_vmxoff(); | ||
1108 | } | ||
1109 | |||
1089 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, | 1110 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, |
1090 | u32 msr, u32 *result) | 1111 | u32 msr, u32 *result) |
1091 | { | 1112 | { |
@@ -1176,12 +1197,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
1176 | #ifdef CONFIG_X86_64 | 1197 | #ifdef CONFIG_X86_64 |
1177 | min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; | 1198 | min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; |
1178 | #endif | 1199 | #endif |
1179 | opt = 0; | 1200 | opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT; |
1180 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, | 1201 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, |
1181 | &_vmexit_control) < 0) | 1202 | &_vmexit_control) < 0) |
1182 | return -EIO; | 1203 | return -EIO; |
1183 | 1204 | ||
1184 | min = opt = 0; | 1205 | min = 0; |
1206 | opt = VM_ENTRY_LOAD_IA32_PAT; | ||
1185 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, | 1207 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, |
1186 | &_vmentry_control) < 0) | 1208 | &_vmentry_control) < 0) |
1187 | return -EIO; | 1209 | return -EIO; |
@@ -2087,8 +2109,9 @@ static void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr) | |||
2087 | */ | 2109 | */ |
2088 | static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | 2110 | static int vmx_vcpu_setup(struct vcpu_vmx *vmx) |
2089 | { | 2111 | { |
2090 | u32 host_sysenter_cs; | 2112 | u32 host_sysenter_cs, msr_low, msr_high; |
2091 | u32 junk; | 2113 | u32 junk; |
2114 | u64 host_pat; | ||
2092 | unsigned long a; | 2115 | unsigned long a; |
2093 | struct descriptor_table dt; | 2116 | struct descriptor_table dt; |
2094 | int i; | 2117 | int i; |
@@ -2176,6 +2199,20 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2176 | rdmsrl(MSR_IA32_SYSENTER_EIP, a); | 2199 | rdmsrl(MSR_IA32_SYSENTER_EIP, a); |
2177 | vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ | 2200 | vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ |
2178 | 2201 | ||
2202 | if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { | ||
2203 | rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); | ||
2204 | host_pat = msr_low | ((u64) msr_high << 32); | ||
2205 | vmcs_write64(HOST_IA32_PAT, host_pat); | ||
2206 | } | ||
2207 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { | ||
2208 | rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); | ||
2209 | host_pat = msr_low | ((u64) msr_high << 32); | ||
2210 | /* Write the default value follow host pat */ | ||
2211 | vmcs_write64(GUEST_IA32_PAT, host_pat); | ||
2212 | /* Keep arch.pat sync with GUEST_IA32_PAT */ | ||
2213 | vmx->vcpu.arch.pat = host_pat; | ||
2214 | } | ||
2215 | |||
2179 | for (i = 0; i < NR_VMX_MSR; ++i) { | 2216 | for (i = 0; i < NR_VMX_MSR; ++i) { |
2180 | u32 index = vmx_msr_index[i]; | 2217 | u32 index = vmx_msr_index[i]; |
2181 | u32 data_low, data_high; | 2218 | u32 data_low, data_high; |
@@ -2230,6 +2267,8 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2230 | 2267 | ||
2231 | vmx->vcpu.arch.rmode.active = 0; | 2268 | vmx->vcpu.arch.rmode.active = 0; |
2232 | 2269 | ||
2270 | vmx->soft_vnmi_blocked = 0; | ||
2271 | |||
2233 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); | 2272 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); |
2234 | kvm_set_cr8(&vmx->vcpu, 0); | 2273 | kvm_set_cr8(&vmx->vcpu, 0); |
2235 | msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; | 2274 | msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; |
@@ -2335,6 +2374,29 @@ out: | |||
2335 | return ret; | 2374 | return ret; |
2336 | } | 2375 | } |
2337 | 2376 | ||
2377 | static void enable_irq_window(struct kvm_vcpu *vcpu) | ||
2378 | { | ||
2379 | u32 cpu_based_vm_exec_control; | ||
2380 | |||
2381 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
2382 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | ||
2383 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
2384 | } | ||
2385 | |||
2386 | static void enable_nmi_window(struct kvm_vcpu *vcpu) | ||
2387 | { | ||
2388 | u32 cpu_based_vm_exec_control; | ||
2389 | |||
2390 | if (!cpu_has_virtual_nmis()) { | ||
2391 | enable_irq_window(vcpu); | ||
2392 | return; | ||
2393 | } | ||
2394 | |||
2395 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
2396 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; | ||
2397 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
2398 | } | ||
2399 | |||
2338 | static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) | 2400 | static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) |
2339 | { | 2401 | { |
2340 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2402 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
@@ -2358,10 +2420,54 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) | |||
2358 | 2420 | ||
2359 | static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | 2421 | static void vmx_inject_nmi(struct kvm_vcpu *vcpu) |
2360 | { | 2422 | { |
2423 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2424 | |||
2425 | if (!cpu_has_virtual_nmis()) { | ||
2426 | /* | ||
2427 | * Tracking the NMI-blocked state in software is built upon | ||
2428 | * finding the next open IRQ window. This, in turn, depends on | ||
2429 | * well-behaving guests: They have to keep IRQs disabled at | ||
2430 | * least as long as the NMI handler runs. Otherwise we may | ||
2431 | * cause NMI nesting, maybe breaking the guest. But as this is | ||
2432 | * highly unlikely, we can live with the residual risk. | ||
2433 | */ | ||
2434 | vmx->soft_vnmi_blocked = 1; | ||
2435 | vmx->vnmi_blocked_time = 0; | ||
2436 | } | ||
2437 | |||
2438 | ++vcpu->stat.nmi_injections; | ||
2439 | if (vcpu->arch.rmode.active) { | ||
2440 | vmx->rmode.irq.pending = true; | ||
2441 | vmx->rmode.irq.vector = NMI_VECTOR; | ||
2442 | vmx->rmode.irq.rip = kvm_rip_read(vcpu); | ||
2443 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | ||
2444 | NMI_VECTOR | INTR_TYPE_SOFT_INTR | | ||
2445 | INTR_INFO_VALID_MASK); | ||
2446 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); | ||
2447 | kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1); | ||
2448 | return; | ||
2449 | } | ||
2361 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 2450 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
2362 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); | 2451 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); |
2363 | } | 2452 | } |
2364 | 2453 | ||
2454 | static void vmx_update_window_states(struct kvm_vcpu *vcpu) | ||
2455 | { | ||
2456 | u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | ||
2457 | |||
2458 | vcpu->arch.nmi_window_open = | ||
2459 | !(guest_intr & (GUEST_INTR_STATE_STI | | ||
2460 | GUEST_INTR_STATE_MOV_SS | | ||
2461 | GUEST_INTR_STATE_NMI)); | ||
2462 | if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) | ||
2463 | vcpu->arch.nmi_window_open = 0; | ||
2464 | |||
2465 | vcpu->arch.interrupt_window_open = | ||
2466 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | ||
2467 | !(guest_intr & (GUEST_INTR_STATE_STI | | ||
2468 | GUEST_INTR_STATE_MOV_SS))); | ||
2469 | } | ||
2470 | |||
2365 | static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | 2471 | static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) |
2366 | { | 2472 | { |
2367 | int word_index = __ffs(vcpu->arch.irq_summary); | 2473 | int word_index = __ffs(vcpu->arch.irq_summary); |
@@ -2374,40 +2480,49 @@ static void kvm_do_inject_irq(struct kvm_vcpu *vcpu) | |||
2374 | kvm_queue_interrupt(vcpu, irq); | 2480 | kvm_queue_interrupt(vcpu, irq); |
2375 | } | 2481 | } |
2376 | 2482 | ||
2377 | |||
2378 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, | 2483 | static void do_interrupt_requests(struct kvm_vcpu *vcpu, |
2379 | struct kvm_run *kvm_run) | 2484 | struct kvm_run *kvm_run) |
2380 | { | 2485 | { |
2381 | u32 cpu_based_vm_exec_control; | 2486 | vmx_update_window_states(vcpu); |
2382 | |||
2383 | vcpu->arch.interrupt_window_open = | ||
2384 | ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && | ||
2385 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0); | ||
2386 | 2487 | ||
2387 | if (vcpu->arch.interrupt_window_open && | 2488 | if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { |
2388 | vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) | 2489 | if (vcpu->arch.interrupt.pending) { |
2389 | kvm_do_inject_irq(vcpu); | 2490 | enable_nmi_window(vcpu); |
2491 | } else if (vcpu->arch.nmi_window_open) { | ||
2492 | vcpu->arch.nmi_pending = false; | ||
2493 | vcpu->arch.nmi_injected = true; | ||
2494 | } else { | ||
2495 | enable_nmi_window(vcpu); | ||
2496 | return; | ||
2497 | } | ||
2498 | } | ||
2499 | if (vcpu->arch.nmi_injected) { | ||
2500 | vmx_inject_nmi(vcpu); | ||
2501 | if (vcpu->arch.nmi_pending) | ||
2502 | enable_nmi_window(vcpu); | ||
2503 | else if (vcpu->arch.irq_summary | ||
2504 | || kvm_run->request_interrupt_window) | ||
2505 | enable_irq_window(vcpu); | ||
2506 | return; | ||
2507 | } | ||
2390 | 2508 | ||
2391 | if (vcpu->arch.interrupt_window_open && vcpu->arch.interrupt.pending) | 2509 | if (vcpu->arch.interrupt_window_open) { |
2392 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); | 2510 | if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending) |
2511 | kvm_do_inject_irq(vcpu); | ||
2393 | 2512 | ||
2394 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | 2513 | if (vcpu->arch.interrupt.pending) |
2514 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); | ||
2515 | } | ||
2395 | if (!vcpu->arch.interrupt_window_open && | 2516 | if (!vcpu->arch.interrupt_window_open && |
2396 | (vcpu->arch.irq_summary || kvm_run->request_interrupt_window)) | 2517 | (vcpu->arch.irq_summary || kvm_run->request_interrupt_window)) |
2397 | /* | 2518 | enable_irq_window(vcpu); |
2398 | * Interrupts blocked. Wait for unblock. | ||
2399 | */ | ||
2400 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | ||
2401 | else | ||
2402 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; | ||
2403 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
2404 | } | 2519 | } |
2405 | 2520 | ||
2406 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) | 2521 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) |
2407 | { | 2522 | { |
2408 | int ret; | 2523 | int ret; |
2409 | struct kvm_userspace_memory_region tss_mem = { | 2524 | struct kvm_userspace_memory_region tss_mem = { |
2410 | .slot = 8, | 2525 | .slot = TSS_PRIVATE_MEMSLOT, |
2411 | .guest_phys_addr = addr, | 2526 | .guest_phys_addr = addr, |
2412 | .memory_size = PAGE_SIZE * 3, | 2527 | .memory_size = PAGE_SIZE * 3, |
2413 | .flags = 0, | 2528 | .flags = 0, |
@@ -2492,7 +2607,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2492 | set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary); | 2607 | set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary); |
2493 | } | 2608 | } |
2494 | 2609 | ||
2495 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ | 2610 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) |
2496 | return 1; /* already handled by vmx_vcpu_run() */ | 2611 | return 1; /* already handled by vmx_vcpu_run() */ |
2497 | 2612 | ||
2498 | if (is_no_device(intr_info)) { | 2613 | if (is_no_device(intr_info)) { |
@@ -2581,6 +2696,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2581 | rep = (exit_qualification & 32) != 0; | 2696 | rep = (exit_qualification & 32) != 0; |
2582 | port = exit_qualification >> 16; | 2697 | port = exit_qualification >> 16; |
2583 | 2698 | ||
2699 | skip_emulated_instruction(vcpu); | ||
2584 | return kvm_emulate_pio(vcpu, kvm_run, in, size, port); | 2700 | return kvm_emulate_pio(vcpu, kvm_run, in, size, port); |
2585 | } | 2701 | } |
2586 | 2702 | ||
@@ -2767,6 +2883,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, | |||
2767 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | 2883 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
2768 | 2884 | ||
2769 | KVMTRACE_0D(PEND_INTR, vcpu, handler); | 2885 | KVMTRACE_0D(PEND_INTR, vcpu, handler); |
2886 | ++vcpu->stat.irq_window_exits; | ||
2770 | 2887 | ||
2771 | /* | 2888 | /* |
2772 | * If the user space waits to inject interrupts, exit as soon as | 2889 | * If the user space waits to inject interrupts, exit as soon as |
@@ -2775,7 +2892,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, | |||
2775 | if (kvm_run->request_interrupt_window && | 2892 | if (kvm_run->request_interrupt_window && |
2776 | !vcpu->arch.irq_summary) { | 2893 | !vcpu->arch.irq_summary) { |
2777 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | 2894 | kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
2778 | ++vcpu->stat.irq_window_exits; | ||
2779 | return 0; | 2895 | return 0; |
2780 | } | 2896 | } |
2781 | return 1; | 2897 | return 1; |
@@ -2832,6 +2948,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2832 | 2948 | ||
2833 | static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2949 | static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2834 | { | 2950 | { |
2951 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
2835 | unsigned long exit_qualification; | 2952 | unsigned long exit_qualification; |
2836 | u16 tss_selector; | 2953 | u16 tss_selector; |
2837 | int reason; | 2954 | int reason; |
@@ -2839,6 +2956,15 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2839 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 2956 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
2840 | 2957 | ||
2841 | reason = (u32)exit_qualification >> 30; | 2958 | reason = (u32)exit_qualification >> 30; |
2959 | if (reason == TASK_SWITCH_GATE && vmx->vcpu.arch.nmi_injected && | ||
2960 | (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && | ||
2961 | (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK) | ||
2962 | == INTR_TYPE_NMI_INTR) { | ||
2963 | vcpu->arch.nmi_injected = false; | ||
2964 | if (cpu_has_virtual_nmis()) | ||
2965 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, | ||
2966 | GUEST_INTR_STATE_NMI); | ||
2967 | } | ||
2842 | tss_selector = exit_qualification; | 2968 | tss_selector = exit_qualification; |
2843 | 2969 | ||
2844 | return kvm_task_switch(vcpu, tss_selector, reason); | 2970 | return kvm_task_switch(vcpu, tss_selector, reason); |
@@ -2927,16 +3053,12 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
2927 | while (!guest_state_valid(vcpu)) { | 3053 | while (!guest_state_valid(vcpu)) { |
2928 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 3054 | err = emulate_instruction(vcpu, kvm_run, 0, 0, 0); |
2929 | 3055 | ||
2930 | switch (err) { | 3056 | if (err == EMULATE_DO_MMIO) |
2931 | case EMULATE_DONE: | 3057 | break; |
2932 | break; | 3058 | |
2933 | case EMULATE_DO_MMIO: | 3059 | if (err != EMULATE_DONE) { |
2934 | kvm_report_emulation_failure(vcpu, "mmio"); | 3060 | kvm_report_emulation_failure(vcpu, "emulation failure"); |
2935 | /* TODO: Handle MMIO */ | 3061 | return; |
2936 | return; | ||
2937 | default: | ||
2938 | kvm_report_emulation_failure(vcpu, "emulation failure"); | ||
2939 | return; | ||
2940 | } | 3062 | } |
2941 | 3063 | ||
2942 | if (signal_pending(current)) | 3064 | if (signal_pending(current)) |
@@ -2948,8 +3070,10 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu, | |||
2948 | local_irq_disable(); | 3070 | local_irq_disable(); |
2949 | preempt_disable(); | 3071 | preempt_disable(); |
2950 | 3072 | ||
2951 | /* Guest state should be valid now, no more emulation should be needed */ | 3073 | /* Guest state should be valid now except if we need to |
2952 | vmx->emulation_required = 0; | 3074 | * emulate an MMIO */ |
3075 | if (guest_state_valid(vcpu)) | ||
3076 | vmx->emulation_required = 0; | ||
2953 | } | 3077 | } |
2954 | 3078 | ||
2955 | /* | 3079 | /* |
@@ -2996,6 +3120,11 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2996 | KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), | 3120 | KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), |
2997 | (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit); | 3121 | (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit); |
2998 | 3122 | ||
3123 | /* If we need to emulate an MMIO from handle_invalid_guest_state | ||
3124 | * we just return 0 */ | ||
3125 | if (vmx->emulation_required && emulate_invalid_guest_state) | ||
3126 | return 0; | ||
3127 | |||
2999 | /* Access CR3 don't cause VMExit in paging mode, so we need | 3128 | /* Access CR3 don't cause VMExit in paging mode, so we need |
3000 | * to sync with guest real CR3. */ | 3129 | * to sync with guest real CR3. */ |
3001 | if (vm_need_ept() && is_paging(vcpu)) { | 3130 | if (vm_need_ept() && is_paging(vcpu)) { |
@@ -3012,9 +3141,32 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3012 | 3141 | ||
3013 | if ((vectoring_info & VECTORING_INFO_VALID_MASK) && | 3142 | if ((vectoring_info & VECTORING_INFO_VALID_MASK) && |
3014 | (exit_reason != EXIT_REASON_EXCEPTION_NMI && | 3143 | (exit_reason != EXIT_REASON_EXCEPTION_NMI && |
3015 | exit_reason != EXIT_REASON_EPT_VIOLATION)) | 3144 | exit_reason != EXIT_REASON_EPT_VIOLATION && |
3016 | printk(KERN_WARNING "%s: unexpected, valid vectoring info and " | 3145 | exit_reason != EXIT_REASON_TASK_SWITCH)) |
3017 | "exit reason is 0x%x\n", __func__, exit_reason); | 3146 | printk(KERN_WARNING "%s: unexpected, valid vectoring info " |
3147 | "(0x%x) and exit reason is 0x%x\n", | ||
3148 | __func__, vectoring_info, exit_reason); | ||
3149 | |||
3150 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) { | ||
3151 | if (vcpu->arch.interrupt_window_open) { | ||
3152 | vmx->soft_vnmi_blocked = 0; | ||
3153 | vcpu->arch.nmi_window_open = 1; | ||
3154 | } else if (vmx->vnmi_blocked_time > 1000000000LL && | ||
3155 | vcpu->arch.nmi_pending) { | ||
3156 | /* | ||
3157 | * This CPU don't support us in finding the end of an | ||
3158 | * NMI-blocked window if the guest runs with IRQs | ||
3159 | * disabled. So we pull the trigger after 1 s of | ||
3160 | * futile waiting, but inform the user about this. | ||
3161 | */ | ||
3162 | printk(KERN_WARNING "%s: Breaking out of NMI-blocked " | ||
3163 | "state on VCPU %d after 1 s timeout\n", | ||
3164 | __func__, vcpu->vcpu_id); | ||
3165 | vmx->soft_vnmi_blocked = 0; | ||
3166 | vmx->vcpu.arch.nmi_window_open = 1; | ||
3167 | } | ||
3168 | } | ||
3169 | |||
3018 | if (exit_reason < kvm_vmx_max_exit_handlers | 3170 | if (exit_reason < kvm_vmx_max_exit_handlers |
3019 | && kvm_vmx_exit_handlers[exit_reason]) | 3171 | && kvm_vmx_exit_handlers[exit_reason]) |
3020 | return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); | 3172 | return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); |
@@ -3042,51 +3194,6 @@ static void update_tpr_threshold(struct kvm_vcpu *vcpu) | |||
3042 | vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4); | 3194 | vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4); |
3043 | } | 3195 | } |
3044 | 3196 | ||
3045 | static void enable_irq_window(struct kvm_vcpu *vcpu) | ||
3046 | { | ||
3047 | u32 cpu_based_vm_exec_control; | ||
3048 | |||
3049 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
3050 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | ||
3051 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
3052 | } | ||
3053 | |||
3054 | static void enable_nmi_window(struct kvm_vcpu *vcpu) | ||
3055 | { | ||
3056 | u32 cpu_based_vm_exec_control; | ||
3057 | |||
3058 | if (!cpu_has_virtual_nmis()) | ||
3059 | return; | ||
3060 | |||
3061 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | ||
3062 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; | ||
3063 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | ||
3064 | } | ||
3065 | |||
3066 | static int vmx_nmi_enabled(struct kvm_vcpu *vcpu) | ||
3067 | { | ||
3068 | u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | ||
3069 | return !(guest_intr & (GUEST_INTR_STATE_NMI | | ||
3070 | GUEST_INTR_STATE_MOV_SS | | ||
3071 | GUEST_INTR_STATE_STI)); | ||
3072 | } | ||
3073 | |||
3074 | static int vmx_irq_enabled(struct kvm_vcpu *vcpu) | ||
3075 | { | ||
3076 | u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | ||
3077 | return (!(guest_intr & (GUEST_INTR_STATE_MOV_SS | | ||
3078 | GUEST_INTR_STATE_STI)) && | ||
3079 | (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)); | ||
3080 | } | ||
3081 | |||
3082 | static void enable_intr_window(struct kvm_vcpu *vcpu) | ||
3083 | { | ||
3084 | if (vcpu->arch.nmi_pending) | ||
3085 | enable_nmi_window(vcpu); | ||
3086 | else if (kvm_cpu_has_interrupt(vcpu)) | ||
3087 | enable_irq_window(vcpu); | ||
3088 | } | ||
3089 | |||
3090 | static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | 3197 | static void vmx_complete_interrupts(struct vcpu_vmx *vmx) |
3091 | { | 3198 | { |
3092 | u32 exit_intr_info; | 3199 | u32 exit_intr_info; |
@@ -3109,7 +3216,9 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | |||
3109 | if (unblock_nmi && vector != DF_VECTOR) | 3216 | if (unblock_nmi && vector != DF_VECTOR) |
3110 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, | 3217 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
3111 | GUEST_INTR_STATE_NMI); | 3218 | GUEST_INTR_STATE_NMI); |
3112 | } | 3219 | } else if (unlikely(vmx->soft_vnmi_blocked)) |
3220 | vmx->vnmi_blocked_time += | ||
3221 | ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); | ||
3113 | 3222 | ||
3114 | idt_vectoring_info = vmx->idt_vectoring_info; | 3223 | idt_vectoring_info = vmx->idt_vectoring_info; |
3115 | idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; | 3224 | idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; |
@@ -3147,26 +3256,29 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
3147 | { | 3256 | { |
3148 | update_tpr_threshold(vcpu); | 3257 | update_tpr_threshold(vcpu); |
3149 | 3258 | ||
3150 | if (cpu_has_virtual_nmis()) { | 3259 | vmx_update_window_states(vcpu); |
3151 | if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { | 3260 | |
3152 | if (vcpu->arch.interrupt.pending) { | 3261 | if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { |
3153 | enable_nmi_window(vcpu); | 3262 | if (vcpu->arch.interrupt.pending) { |
3154 | } else if (vmx_nmi_enabled(vcpu)) { | 3263 | enable_nmi_window(vcpu); |
3155 | vcpu->arch.nmi_pending = false; | 3264 | } else if (vcpu->arch.nmi_window_open) { |
3156 | vcpu->arch.nmi_injected = true; | 3265 | vcpu->arch.nmi_pending = false; |
3157 | } else { | 3266 | vcpu->arch.nmi_injected = true; |
3158 | enable_intr_window(vcpu); | 3267 | } else { |
3159 | return; | 3268 | enable_nmi_window(vcpu); |
3160 | } | ||
3161 | } | ||
3162 | if (vcpu->arch.nmi_injected) { | ||
3163 | vmx_inject_nmi(vcpu); | ||
3164 | enable_intr_window(vcpu); | ||
3165 | return; | 3269 | return; |
3166 | } | 3270 | } |
3167 | } | 3271 | } |
3272 | if (vcpu->arch.nmi_injected) { | ||
3273 | vmx_inject_nmi(vcpu); | ||
3274 | if (vcpu->arch.nmi_pending) | ||
3275 | enable_nmi_window(vcpu); | ||
3276 | else if (kvm_cpu_has_interrupt(vcpu)) | ||
3277 | enable_irq_window(vcpu); | ||
3278 | return; | ||
3279 | } | ||
3168 | if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) { | 3280 | if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) { |
3169 | if (vmx_irq_enabled(vcpu)) | 3281 | if (vcpu->arch.interrupt_window_open) |
3170 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); | 3282 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu)); |
3171 | else | 3283 | else |
3172 | enable_irq_window(vcpu); | 3284 | enable_irq_window(vcpu); |
@@ -3174,6 +3286,8 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
3174 | if (vcpu->arch.interrupt.pending) { | 3286 | if (vcpu->arch.interrupt.pending) { |
3175 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); | 3287 | vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr); |
3176 | kvm_timer_intr_post(vcpu, vcpu->arch.interrupt.nr); | 3288 | kvm_timer_intr_post(vcpu, vcpu->arch.interrupt.nr); |
3289 | if (kvm_cpu_has_interrupt(vcpu)) | ||
3290 | enable_irq_window(vcpu); | ||
3177 | } | 3291 | } |
3178 | } | 3292 | } |
3179 | 3293 | ||
@@ -3213,6 +3327,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3213 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3327 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3214 | u32 intr_info; | 3328 | u32 intr_info; |
3215 | 3329 | ||
3330 | /* Record the guest's net vcpu time for enforced NMI injections. */ | ||
3331 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) | ||
3332 | vmx->entry_time = ktime_get(); | ||
3333 | |||
3216 | /* Handle invalid guest state instead of entering VMX */ | 3334 | /* Handle invalid guest state instead of entering VMX */ |
3217 | if (vmx->emulation_required && emulate_invalid_guest_state) { | 3335 | if (vmx->emulation_required && emulate_invalid_guest_state) { |
3218 | handle_invalid_guest_state(vcpu, kvm_run); | 3336 | handle_invalid_guest_state(vcpu, kvm_run); |
@@ -3327,9 +3445,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3327 | if (vmx->rmode.irq.pending) | 3445 | if (vmx->rmode.irq.pending) |
3328 | fixup_rmode_irq(vmx); | 3446 | fixup_rmode_irq(vmx); |
3329 | 3447 | ||
3330 | vcpu->arch.interrupt_window_open = | 3448 | vmx_update_window_states(vcpu); |
3331 | (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & | ||
3332 | (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)) == 0; | ||
3333 | 3449 | ||
3334 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); | 3450 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); |
3335 | vmx->launched = 1; | 3451 | vmx->launched = 1; |
@@ -3337,7 +3453,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3337 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 3453 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
3338 | 3454 | ||
3339 | /* We need to handle NMIs before interrupts are enabled */ | 3455 | /* We need to handle NMIs before interrupts are enabled */ |
3340 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200 && | 3456 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && |
3341 | (intr_info & INTR_INFO_VALID_MASK)) { | 3457 | (intr_info & INTR_INFO_VALID_MASK)) { |
3342 | KVMTRACE_0D(NMI, vcpu, handler); | 3458 | KVMTRACE_0D(NMI, vcpu, handler); |
3343 | asm("int $2"); | 3459 | asm("int $2"); |
@@ -3455,6 +3571,11 @@ static int get_ept_level(void) | |||
3455 | return VMX_EPT_DEFAULT_GAW + 1; | 3571 | return VMX_EPT_DEFAULT_GAW + 1; |
3456 | } | 3572 | } |
3457 | 3573 | ||
3574 | static int vmx_get_mt_mask_shift(void) | ||
3575 | { | ||
3576 | return VMX_EPT_MT_EPTE_SHIFT; | ||
3577 | } | ||
3578 | |||
3458 | static struct kvm_x86_ops vmx_x86_ops = { | 3579 | static struct kvm_x86_ops vmx_x86_ops = { |
3459 | .cpu_has_kvm_support = cpu_has_kvm_support, | 3580 | .cpu_has_kvm_support = cpu_has_kvm_support, |
3460 | .disabled_by_bios = vmx_disabled_by_bios, | 3581 | .disabled_by_bios = vmx_disabled_by_bios, |
@@ -3510,6 +3631,7 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3510 | 3631 | ||
3511 | .set_tss_addr = vmx_set_tss_addr, | 3632 | .set_tss_addr = vmx_set_tss_addr, |
3512 | .get_tdp_level = get_ept_level, | 3633 | .get_tdp_level = get_ept_level, |
3634 | .get_mt_mask_shift = vmx_get_mt_mask_shift, | ||
3513 | }; | 3635 | }; |
3514 | 3636 | ||
3515 | static int __init vmx_init(void) | 3637 | static int __init vmx_init(void) |
@@ -3566,10 +3688,10 @@ static int __init vmx_init(void) | |||
3566 | bypass_guest_pf = 0; | 3688 | bypass_guest_pf = 0; |
3567 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | 3689 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | |
3568 | VMX_EPT_WRITABLE_MASK | | 3690 | VMX_EPT_WRITABLE_MASK | |
3569 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT | | ||
3570 | VMX_EPT_IGMT_BIT); | 3691 | VMX_EPT_IGMT_BIT); |
3571 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, | 3692 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, |
3572 | VMX_EPT_EXECUTABLE_MASK); | 3693 | VMX_EPT_EXECUTABLE_MASK, |
3694 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); | ||
3573 | kvm_enable_tdp(); | 3695 | kvm_enable_tdp(); |
3574 | } else | 3696 | } else |
3575 | kvm_disable_tdp(); | 3697 | kvm_disable_tdp(); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f1f8ff2f1fa2..0e6aa8141dcd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
40 | #include <asm/msr.h> | 40 | #include <asm/msr.h> |
41 | #include <asm/desc.h> | 41 | #include <asm/desc.h> |
42 | #include <asm/mtrr.h> | ||
42 | 43 | ||
43 | #define MAX_IO_MSRS 256 | 44 | #define MAX_IO_MSRS 256 |
44 | #define CR0_RESERVED_BITS \ | 45 | #define CR0_RESERVED_BITS \ |
@@ -86,6 +87,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
86 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 87 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
87 | { "hypercalls", VCPU_STAT(hypercalls) }, | 88 | { "hypercalls", VCPU_STAT(hypercalls) }, |
88 | { "request_irq", VCPU_STAT(request_irq_exits) }, | 89 | { "request_irq", VCPU_STAT(request_irq_exits) }, |
90 | { "request_nmi", VCPU_STAT(request_nmi_exits) }, | ||
89 | { "irq_exits", VCPU_STAT(irq_exits) }, | 91 | { "irq_exits", VCPU_STAT(irq_exits) }, |
90 | { "host_state_reload", VCPU_STAT(host_state_reload) }, | 92 | { "host_state_reload", VCPU_STAT(host_state_reload) }, |
91 | { "efer_reload", VCPU_STAT(efer_reload) }, | 93 | { "efer_reload", VCPU_STAT(efer_reload) }, |
@@ -93,6 +95,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
93 | { "insn_emulation", VCPU_STAT(insn_emulation) }, | 95 | { "insn_emulation", VCPU_STAT(insn_emulation) }, |
94 | { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, | 96 | { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, |
95 | { "irq_injections", VCPU_STAT(irq_injections) }, | 97 | { "irq_injections", VCPU_STAT(irq_injections) }, |
98 | { "nmi_injections", VCPU_STAT(nmi_injections) }, | ||
96 | { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, | 99 | { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, |
97 | { "mmu_pte_write", VM_STAT(mmu_pte_write) }, | 100 | { "mmu_pte_write", VM_STAT(mmu_pte_write) }, |
98 | { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, | 101 | { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, |
@@ -101,6 +104,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
101 | { "mmu_recycled", VM_STAT(mmu_recycled) }, | 104 | { "mmu_recycled", VM_STAT(mmu_recycled) }, |
102 | { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, | 105 | { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, |
103 | { "mmu_unsync", VM_STAT(mmu_unsync) }, | 106 | { "mmu_unsync", VM_STAT(mmu_unsync) }, |
107 | { "mmu_unsync_global", VM_STAT(mmu_unsync_global) }, | ||
104 | { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, | 108 | { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, |
105 | { "largepages", VM_STAT(lpages) }, | 109 | { "largepages", VM_STAT(lpages) }, |
106 | { NULL } | 110 | { NULL } |
@@ -312,6 +316,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
312 | kvm_x86_ops->set_cr0(vcpu, cr0); | 316 | kvm_x86_ops->set_cr0(vcpu, cr0); |
313 | vcpu->arch.cr0 = cr0; | 317 | vcpu->arch.cr0 = cr0; |
314 | 318 | ||
319 | kvm_mmu_sync_global(vcpu); | ||
315 | kvm_mmu_reset_context(vcpu); | 320 | kvm_mmu_reset_context(vcpu); |
316 | return; | 321 | return; |
317 | } | 322 | } |
@@ -355,6 +360,7 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
355 | } | 360 | } |
356 | kvm_x86_ops->set_cr4(vcpu, cr4); | 361 | kvm_x86_ops->set_cr4(vcpu, cr4); |
357 | vcpu->arch.cr4 = cr4; | 362 | vcpu->arch.cr4 = cr4; |
363 | kvm_mmu_sync_global(vcpu); | ||
358 | kvm_mmu_reset_context(vcpu); | 364 | kvm_mmu_reset_context(vcpu); |
359 | } | 365 | } |
360 | EXPORT_SYMBOL_GPL(kvm_set_cr4); | 366 | EXPORT_SYMBOL_GPL(kvm_set_cr4); |
@@ -449,7 +455,7 @@ static u32 msrs_to_save[] = { | |||
449 | MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, | 455 | MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, |
450 | #endif | 456 | #endif |
451 | MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, | 457 | MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, |
452 | MSR_IA32_PERF_STATUS, | 458 | MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT |
453 | }; | 459 | }; |
454 | 460 | ||
455 | static unsigned num_msrs_to_save; | 461 | static unsigned num_msrs_to_save; |
@@ -648,10 +654,38 @@ static bool msr_mtrr_valid(unsigned msr) | |||
648 | 654 | ||
649 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) | 655 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
650 | { | 656 | { |
657 | u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; | ||
658 | |||
651 | if (!msr_mtrr_valid(msr)) | 659 | if (!msr_mtrr_valid(msr)) |
652 | return 1; | 660 | return 1; |
653 | 661 | ||
654 | vcpu->arch.mtrr[msr - 0x200] = data; | 662 | if (msr == MSR_MTRRdefType) { |
663 | vcpu->arch.mtrr_state.def_type = data; | ||
664 | vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; | ||
665 | } else if (msr == MSR_MTRRfix64K_00000) | ||
666 | p[0] = data; | ||
667 | else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) | ||
668 | p[1 + msr - MSR_MTRRfix16K_80000] = data; | ||
669 | else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) | ||
670 | p[3 + msr - MSR_MTRRfix4K_C0000] = data; | ||
671 | else if (msr == MSR_IA32_CR_PAT) | ||
672 | vcpu->arch.pat = data; | ||
673 | else { /* Variable MTRRs */ | ||
674 | int idx, is_mtrr_mask; | ||
675 | u64 *pt; | ||
676 | |||
677 | idx = (msr - 0x200) / 2; | ||
678 | is_mtrr_mask = msr - 0x200 - 2 * idx; | ||
679 | if (!is_mtrr_mask) | ||
680 | pt = | ||
681 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; | ||
682 | else | ||
683 | pt = | ||
684 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; | ||
685 | *pt = data; | ||
686 | } | ||
687 | |||
688 | kvm_mmu_reset_context(vcpu); | ||
655 | return 0; | 689 | return 0; |
656 | } | 690 | } |
657 | 691 | ||
@@ -747,10 +781,37 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
747 | 781 | ||
748 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | 782 | static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) |
749 | { | 783 | { |
784 | u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; | ||
785 | |||
750 | if (!msr_mtrr_valid(msr)) | 786 | if (!msr_mtrr_valid(msr)) |
751 | return 1; | 787 | return 1; |
752 | 788 | ||
753 | *pdata = vcpu->arch.mtrr[msr - 0x200]; | 789 | if (msr == MSR_MTRRdefType) |
790 | *pdata = vcpu->arch.mtrr_state.def_type + | ||
791 | (vcpu->arch.mtrr_state.enabled << 10); | ||
792 | else if (msr == MSR_MTRRfix64K_00000) | ||
793 | *pdata = p[0]; | ||
794 | else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) | ||
795 | *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; | ||
796 | else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) | ||
797 | *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; | ||
798 | else if (msr == MSR_IA32_CR_PAT) | ||
799 | *pdata = vcpu->arch.pat; | ||
800 | else { /* Variable MTRRs */ | ||
801 | int idx, is_mtrr_mask; | ||
802 | u64 *pt; | ||
803 | |||
804 | idx = (msr - 0x200) / 2; | ||
805 | is_mtrr_mask = msr - 0x200 - 2 * idx; | ||
806 | if (!is_mtrr_mask) | ||
807 | pt = | ||
808 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; | ||
809 | else | ||
810 | pt = | ||
811 | (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; | ||
812 | *pdata = *pt; | ||
813 | } | ||
814 | |||
754 | return 0; | 815 | return 0; |
755 | } | 816 | } |
756 | 817 | ||
@@ -903,7 +964,6 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
903 | case KVM_CAP_IRQCHIP: | 964 | case KVM_CAP_IRQCHIP: |
904 | case KVM_CAP_HLT: | 965 | case KVM_CAP_HLT: |
905 | case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: | 966 | case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: |
906 | case KVM_CAP_USER_MEMORY: | ||
907 | case KVM_CAP_SET_TSS_ADDR: | 967 | case KVM_CAP_SET_TSS_ADDR: |
908 | case KVM_CAP_EXT_CPUID: | 968 | case KVM_CAP_EXT_CPUID: |
909 | case KVM_CAP_CLOCKSOURCE: | 969 | case KVM_CAP_CLOCKSOURCE: |
@@ -1188,6 +1248,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1188 | int t, times = entry->eax & 0xff; | 1248 | int t, times = entry->eax & 0xff; |
1189 | 1249 | ||
1190 | entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; | 1250 | entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; |
1251 | entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; | ||
1191 | for (t = 1; t < times && *nent < maxnent; ++t) { | 1252 | for (t = 1; t < times && *nent < maxnent; ++t) { |
1192 | do_cpuid_1_ent(&entry[t], function, 0); | 1253 | do_cpuid_1_ent(&entry[t], function, 0); |
1193 | entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; | 1254 | entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; |
@@ -1218,7 +1279,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1218 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; | 1279 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
1219 | /* read more entries until level_type is zero */ | 1280 | /* read more entries until level_type is zero */ |
1220 | for (i = 1; *nent < maxnent; ++i) { | 1281 | for (i = 1; *nent < maxnent; ++i) { |
1221 | level_type = entry[i - 1].ecx & 0xff; | 1282 | level_type = entry[i - 1].ecx & 0xff00; |
1222 | if (!level_type) | 1283 | if (!level_type) |
1223 | break; | 1284 | break; |
1224 | do_cpuid_1_ent(&entry[i], function, i); | 1285 | do_cpuid_1_ent(&entry[i], function, i); |
@@ -1318,6 +1379,15 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, | |||
1318 | return 0; | 1379 | return 0; |
1319 | } | 1380 | } |
1320 | 1381 | ||
1382 | static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) | ||
1383 | { | ||
1384 | vcpu_load(vcpu); | ||
1385 | kvm_inject_nmi(vcpu); | ||
1386 | vcpu_put(vcpu); | ||
1387 | |||
1388 | return 0; | ||
1389 | } | ||
1390 | |||
1321 | static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, | 1391 | static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, |
1322 | struct kvm_tpr_access_ctl *tac) | 1392 | struct kvm_tpr_access_ctl *tac) |
1323 | { | 1393 | { |
@@ -1377,6 +1447,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1377 | r = 0; | 1447 | r = 0; |
1378 | break; | 1448 | break; |
1379 | } | 1449 | } |
1450 | case KVM_NMI: { | ||
1451 | r = kvm_vcpu_ioctl_nmi(vcpu); | ||
1452 | if (r) | ||
1453 | goto out; | ||
1454 | r = 0; | ||
1455 | break; | ||
1456 | } | ||
1380 | case KVM_SET_CPUID: { | 1457 | case KVM_SET_CPUID: { |
1381 | struct kvm_cpuid __user *cpuid_arg = argp; | 1458 | struct kvm_cpuid __user *cpuid_arg = argp; |
1382 | struct kvm_cpuid cpuid; | 1459 | struct kvm_cpuid cpuid; |
@@ -1968,7 +2045,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1968 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); | 2045 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); |
1969 | if (ret < 0) | 2046 | if (ret < 0) |
1970 | return 0; | 2047 | return 0; |
1971 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); | 2048 | kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1); |
1972 | return 1; | 2049 | return 1; |
1973 | } | 2050 | } |
1974 | 2051 | ||
@@ -2404,8 +2481,6 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2404 | val = kvm_register_read(vcpu, VCPU_REGS_RAX); | 2481 | val = kvm_register_read(vcpu, VCPU_REGS_RAX); |
2405 | memcpy(vcpu->arch.pio_data, &val, 4); | 2482 | memcpy(vcpu->arch.pio_data, &val, 4); |
2406 | 2483 | ||
2407 | kvm_x86_ops->skip_emulated_instruction(vcpu); | ||
2408 | |||
2409 | pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); | 2484 | pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); |
2410 | if (pio_dev) { | 2485 | if (pio_dev) { |
2411 | kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); | 2486 | kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data); |
@@ -2541,7 +2616,7 @@ int kvm_arch_init(void *opaque) | |||
2541 | kvm_mmu_set_nonpresent_ptes(0ull, 0ull); | 2616 | kvm_mmu_set_nonpresent_ptes(0ull, 0ull); |
2542 | kvm_mmu_set_base_ptes(PT_PRESENT_MASK); | 2617 | kvm_mmu_set_base_ptes(PT_PRESENT_MASK); |
2543 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, | 2618 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, |
2544 | PT_DIRTY_MASK, PT64_NX_MASK, 0); | 2619 | PT_DIRTY_MASK, PT64_NX_MASK, 0, 0); |
2545 | return 0; | 2620 | return 0; |
2546 | 2621 | ||
2547 | out: | 2622 | out: |
@@ -2729,7 +2804,7 @@ static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) | |||
2729 | 2804 | ||
2730 | e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; | 2805 | e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; |
2731 | /* when no next entry is found, the current entry[i] is reselected */ | 2806 | /* when no next entry is found, the current entry[i] is reselected */ |
2732 | for (j = i + 1; j == i; j = (j + 1) % nent) { | 2807 | for (j = i + 1; ; j = (j + 1) % nent) { |
2733 | struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; | 2808 | struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; |
2734 | if (ej->function == e->function) { | 2809 | if (ej->function == e->function) { |
2735 | ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; | 2810 | ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; |
@@ -2973,7 +3048,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2973 | pr_debug("vcpu %d received sipi with vector # %x\n", | 3048 | pr_debug("vcpu %d received sipi with vector # %x\n", |
2974 | vcpu->vcpu_id, vcpu->arch.sipi_vector); | 3049 | vcpu->vcpu_id, vcpu->arch.sipi_vector); |
2975 | kvm_lapic_reset(vcpu); | 3050 | kvm_lapic_reset(vcpu); |
2976 | r = kvm_x86_ops->vcpu_reset(vcpu); | 3051 | r = kvm_arch_vcpu_reset(vcpu); |
2977 | if (r) | 3052 | if (r) |
2978 | return r; | 3053 | return r; |
2979 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 3054 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
@@ -3275,9 +3350,9 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector, | |||
3275 | kvm_desct->padding = 0; | 3350 | kvm_desct->padding = 0; |
3276 | } | 3351 | } |
3277 | 3352 | ||
3278 | static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu, | 3353 | static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu, |
3279 | u16 selector, | 3354 | u16 selector, |
3280 | struct descriptor_table *dtable) | 3355 | struct descriptor_table *dtable) |
3281 | { | 3356 | { |
3282 | if (selector & 1 << 2) { | 3357 | if (selector & 1 << 2) { |
3283 | struct kvm_segment kvm_seg; | 3358 | struct kvm_segment kvm_seg; |
@@ -3302,7 +3377,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | |||
3302 | struct descriptor_table dtable; | 3377 | struct descriptor_table dtable; |
3303 | u16 index = selector >> 3; | 3378 | u16 index = selector >> 3; |
3304 | 3379 | ||
3305 | get_segment_descritptor_dtable(vcpu, selector, &dtable); | 3380 | get_segment_descriptor_dtable(vcpu, selector, &dtable); |
3306 | 3381 | ||
3307 | if (dtable.limit < index * 8 + 7) { | 3382 | if (dtable.limit < index * 8 + 7) { |
3308 | kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); | 3383 | kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); |
@@ -3321,7 +3396,7 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, | |||
3321 | struct descriptor_table dtable; | 3396 | struct descriptor_table dtable; |
3322 | u16 index = selector >> 3; | 3397 | u16 index = selector >> 3; |
3323 | 3398 | ||
3324 | get_segment_descritptor_dtable(vcpu, selector, &dtable); | 3399 | get_segment_descriptor_dtable(vcpu, selector, &dtable); |
3325 | 3400 | ||
3326 | if (dtable.limit < index * 8 + 7) | 3401 | if (dtable.limit < index * 8 + 7) |
3327 | return 1; | 3402 | return 1; |
@@ -3900,6 +3975,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
3900 | /* We do fxsave: this must be aligned. */ | 3975 | /* We do fxsave: this must be aligned. */ |
3901 | BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF); | 3976 | BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF); |
3902 | 3977 | ||
3978 | vcpu->arch.mtrr_state.have_fixed = 1; | ||
3903 | vcpu_load(vcpu); | 3979 | vcpu_load(vcpu); |
3904 | r = kvm_arch_vcpu_reset(vcpu); | 3980 | r = kvm_arch_vcpu_reset(vcpu); |
3905 | if (r == 0) | 3981 | if (r == 0) |
@@ -3925,6 +4001,9 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
3925 | 4001 | ||
3926 | int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) | 4002 | int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) |
3927 | { | 4003 | { |
4004 | vcpu->arch.nmi_pending = false; | ||
4005 | vcpu->arch.nmi_injected = false; | ||
4006 | |||
3928 | return kvm_x86_ops->vcpu_reset(vcpu); | 4007 | return kvm_x86_ops->vcpu_reset(vcpu); |
3929 | } | 4008 | } |
3930 | 4009 | ||
@@ -4012,6 +4091,7 @@ struct kvm *kvm_arch_create_vm(void) | |||
4012 | return ERR_PTR(-ENOMEM); | 4091 | return ERR_PTR(-ENOMEM); |
4013 | 4092 | ||
4014 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); | 4093 | INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); |
4094 | INIT_LIST_HEAD(&kvm->arch.oos_global_pages); | ||
4015 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); | 4095 | INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); |
4016 | 4096 | ||
4017 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ | 4097 | /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ |
@@ -4048,8 +4128,8 @@ static void kvm_free_vcpus(struct kvm *kvm) | |||
4048 | 4128 | ||
4049 | void kvm_arch_destroy_vm(struct kvm *kvm) | 4129 | void kvm_arch_destroy_vm(struct kvm *kvm) |
4050 | { | 4130 | { |
4051 | kvm_iommu_unmap_guest(kvm); | ||
4052 | kvm_free_all_assigned_devices(kvm); | 4131 | kvm_free_all_assigned_devices(kvm); |
4132 | kvm_iommu_unmap_guest(kvm); | ||
4053 | kvm_free_pit(kvm); | 4133 | kvm_free_pit(kvm); |
4054 | kfree(kvm->arch.vpic); | 4134 | kfree(kvm->arch.vpic); |
4055 | kfree(kvm->arch.vioapic); | 4135 | kfree(kvm->arch.vioapic); |
@@ -4127,7 +4207,8 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
4127 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 4207 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
4128 | { | 4208 | { |
4129 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE | 4209 | return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE |
4130 | || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED; | 4210 | || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED |
4211 | || vcpu->arch.nmi_pending; | ||
4131 | } | 4212 | } |
4132 | 4213 | ||
4133 | static void vcpu_kick_intr(void *info) | 4214 | static void vcpu_kick_intr(void *info) |
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index ea051173b0da..d174db7a3370 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #define SrcMem32 (4<<4) /* Memory operand (32-bit). */ | 58 | #define SrcMem32 (4<<4) /* Memory operand (32-bit). */ |
59 | #define SrcImm (5<<4) /* Immediate operand. */ | 59 | #define SrcImm (5<<4) /* Immediate operand. */ |
60 | #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ | 60 | #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ |
61 | #define SrcOne (7<<4) /* Implied '1' */ | ||
61 | #define SrcMask (7<<4) | 62 | #define SrcMask (7<<4) |
62 | /* Generic ModRM decode. */ | 63 | /* Generic ModRM decode. */ |
63 | #define ModRM (1<<7) | 64 | #define ModRM (1<<7) |
@@ -70,17 +71,23 @@ | |||
70 | #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ | 71 | #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ |
71 | #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ | 72 | #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ |
72 | #define GroupMask 0xff /* Group number stored in bits 0:7 */ | 73 | #define GroupMask 0xff /* Group number stored in bits 0:7 */ |
74 | /* Source 2 operand type */ | ||
75 | #define Src2None (0<<29) | ||
76 | #define Src2CL (1<<29) | ||
77 | #define Src2ImmByte (2<<29) | ||
78 | #define Src2One (3<<29) | ||
79 | #define Src2Mask (7<<29) | ||
73 | 80 | ||
74 | enum { | 81 | enum { |
75 | Group1_80, Group1_81, Group1_82, Group1_83, | 82 | Group1_80, Group1_81, Group1_82, Group1_83, |
76 | Group1A, Group3_Byte, Group3, Group4, Group5, Group7, | 83 | Group1A, Group3_Byte, Group3, Group4, Group5, Group7, |
77 | }; | 84 | }; |
78 | 85 | ||
79 | static u16 opcode_table[256] = { | 86 | static u32 opcode_table[256] = { |
80 | /* 0x00 - 0x07 */ | 87 | /* 0x00 - 0x07 */ |
81 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, | 88 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, |
82 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 89 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
83 | 0, 0, 0, 0, | 90 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0, |
84 | /* 0x08 - 0x0F */ | 91 | /* 0x08 - 0x0F */ |
85 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, | 92 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, |
86 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 93 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
@@ -195,7 +202,7 @@ static u16 opcode_table[256] = { | |||
195 | ImplicitOps, ImplicitOps, Group | Group4, Group | Group5, | 202 | ImplicitOps, ImplicitOps, Group | Group4, Group | Group5, |
196 | }; | 203 | }; |
197 | 204 | ||
198 | static u16 twobyte_table[256] = { | 205 | static u32 twobyte_table[256] = { |
199 | /* 0x00 - 0x0F */ | 206 | /* 0x00 - 0x0F */ |
200 | 0, Group | GroupDual | Group7, 0, 0, 0, 0, ImplicitOps, 0, | 207 | 0, Group | GroupDual | Group7, 0, 0, 0, 0, ImplicitOps, 0, |
201 | ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0, | 208 | ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0, |
@@ -230,9 +237,14 @@ static u16 twobyte_table[256] = { | |||
230 | /* 0x90 - 0x9F */ | 237 | /* 0x90 - 0x9F */ |
231 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 238 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
232 | /* 0xA0 - 0xA7 */ | 239 | /* 0xA0 - 0xA7 */ |
233 | 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, 0, 0, | 240 | 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, |
241 | DstMem | SrcReg | Src2ImmByte | ModRM, | ||
242 | DstMem | SrcReg | Src2CL | ModRM, 0, 0, | ||
234 | /* 0xA8 - 0xAF */ | 243 | /* 0xA8 - 0xAF */ |
235 | 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, 0, 0, ModRM, 0, | 244 | 0, 0, 0, DstMem | SrcReg | ModRM | BitOp, |
245 | DstMem | SrcReg | Src2ImmByte | ModRM, | ||
246 | DstMem | SrcReg | Src2CL | ModRM, | ||
247 | ModRM, 0, | ||
236 | /* 0xB0 - 0xB7 */ | 248 | /* 0xB0 - 0xB7 */ |
237 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0, | 249 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 0, |
238 | DstMem | SrcReg | ModRM | BitOp, | 250 | DstMem | SrcReg | ModRM | BitOp, |
@@ -253,7 +265,7 @@ static u16 twobyte_table[256] = { | |||
253 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 | 265 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 |
254 | }; | 266 | }; |
255 | 267 | ||
256 | static u16 group_table[] = { | 268 | static u32 group_table[] = { |
257 | [Group1_80*8] = | 269 | [Group1_80*8] = |
258 | ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, | 270 | ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, |
259 | ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, | 271 | ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM, |
@@ -297,9 +309,9 @@ static u16 group_table[] = { | |||
297 | SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp, | 309 | SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp, |
298 | }; | 310 | }; |
299 | 311 | ||
300 | static u16 group2_table[] = { | 312 | static u32 group2_table[] = { |
301 | [Group7*8] = | 313 | [Group7*8] = |
302 | SrcNone | ModRM, 0, 0, 0, | 314 | SrcNone | ModRM, 0, 0, SrcNone | ModRM, |
303 | SrcNone | ModRM | DstMem | Mov, 0, | 315 | SrcNone | ModRM | DstMem | Mov, 0, |
304 | SrcMem16 | ModRM | Mov, 0, | 316 | SrcMem16 | ModRM | Mov, 0, |
305 | }; | 317 | }; |
@@ -359,49 +371,48 @@ static u16 group2_table[] = { | |||
359 | "andl %"_msk",%"_LO32 _tmp"; " \ | 371 | "andl %"_msk",%"_LO32 _tmp"; " \ |
360 | "orl %"_LO32 _tmp",%"_sav"; " | 372 | "orl %"_LO32 _tmp",%"_sav"; " |
361 | 373 | ||
374 | #ifdef CONFIG_X86_64 | ||
375 | #define ON64(x) x | ||
376 | #else | ||
377 | #define ON64(x) | ||
378 | #endif | ||
379 | |||
380 | #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \ | ||
381 | do { \ | ||
382 | __asm__ __volatile__ ( \ | ||
383 | _PRE_EFLAGS("0", "4", "2") \ | ||
384 | _op _suffix " %"_x"3,%1; " \ | ||
385 | _POST_EFLAGS("0", "4", "2") \ | ||
386 | : "=m" (_eflags), "=m" ((_dst).val), \ | ||
387 | "=&r" (_tmp) \ | ||
388 | : _y ((_src).val), "i" (EFLAGS_MASK)); \ | ||
389 | } while (0) | ||
390 | |||
391 | |||
362 | /* Raw emulation: instruction has two explicit operands. */ | 392 | /* Raw emulation: instruction has two explicit operands. */ |
363 | #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \ | 393 | #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \ |
364 | do { \ | 394 | do { \ |
365 | unsigned long _tmp; \ | 395 | unsigned long _tmp; \ |
366 | \ | 396 | \ |
367 | switch ((_dst).bytes) { \ | 397 | switch ((_dst).bytes) { \ |
368 | case 2: \ | 398 | case 2: \ |
369 | __asm__ __volatile__ ( \ | 399 | ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \ |
370 | _PRE_EFLAGS("0", "4", "2") \ | 400 | break; \ |
371 | _op"w %"_wx"3,%1; " \ | 401 | case 4: \ |
372 | _POST_EFLAGS("0", "4", "2") \ | 402 | ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \ |
373 | : "=m" (_eflags), "=m" ((_dst).val), \ | 403 | break; \ |
374 | "=&r" (_tmp) \ | 404 | case 8: \ |
375 | : _wy ((_src).val), "i" (EFLAGS_MASK)); \ | 405 | ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \ |
376 | break; \ | 406 | break; \ |
377 | case 4: \ | 407 | } \ |
378 | __asm__ __volatile__ ( \ | ||
379 | _PRE_EFLAGS("0", "4", "2") \ | ||
380 | _op"l %"_lx"3,%1; " \ | ||
381 | _POST_EFLAGS("0", "4", "2") \ | ||
382 | : "=m" (_eflags), "=m" ((_dst).val), \ | ||
383 | "=&r" (_tmp) \ | ||
384 | : _ly ((_src).val), "i" (EFLAGS_MASK)); \ | ||
385 | break; \ | ||
386 | case 8: \ | ||
387 | __emulate_2op_8byte(_op, _src, _dst, \ | ||
388 | _eflags, _qx, _qy); \ | ||
389 | break; \ | ||
390 | } \ | ||
391 | } while (0) | 408 | } while (0) |
392 | 409 | ||
393 | #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ | 410 | #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ |
394 | do { \ | 411 | do { \ |
395 | unsigned long __tmp; \ | 412 | unsigned long _tmp; \ |
396 | switch ((_dst).bytes) { \ | 413 | switch ((_dst).bytes) { \ |
397 | case 1: \ | 414 | case 1: \ |
398 | __asm__ __volatile__ ( \ | 415 | ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \ |
399 | _PRE_EFLAGS("0", "4", "2") \ | ||
400 | _op"b %"_bx"3,%1; " \ | ||
401 | _POST_EFLAGS("0", "4", "2") \ | ||
402 | : "=m" (_eflags), "=m" ((_dst).val), \ | ||
403 | "=&r" (__tmp) \ | ||
404 | : _by ((_src).val), "i" (EFLAGS_MASK)); \ | ||
405 | break; \ | 416 | break; \ |
406 | default: \ | 417 | default: \ |
407 | __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ | 418 | __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ |
@@ -425,71 +436,68 @@ static u16 group2_table[] = { | |||
425 | __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ | 436 | __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ |
426 | "w", "r", _LO32, "r", "", "r") | 437 | "w", "r", _LO32, "r", "", "r") |
427 | 438 | ||
428 | /* Instruction has only one explicit operand (no source operand). */ | 439 | /* Instruction has three operands and one operand is stored in ECX register */ |
429 | #define emulate_1op(_op, _dst, _eflags) \ | 440 | #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \ |
430 | do { \ | 441 | do { \ |
431 | unsigned long _tmp; \ | 442 | unsigned long _tmp; \ |
432 | \ | 443 | _type _clv = (_cl).val; \ |
433 | switch ((_dst).bytes) { \ | 444 | _type _srcv = (_src).val; \ |
434 | case 1: \ | 445 | _type _dstv = (_dst).val; \ |
435 | __asm__ __volatile__ ( \ | 446 | \ |
436 | _PRE_EFLAGS("0", "3", "2") \ | 447 | __asm__ __volatile__ ( \ |
437 | _op"b %1; " \ | 448 | _PRE_EFLAGS("0", "5", "2") \ |
438 | _POST_EFLAGS("0", "3", "2") \ | 449 | _op _suffix " %4,%1 \n" \ |
439 | : "=m" (_eflags), "=m" ((_dst).val), \ | 450 | _POST_EFLAGS("0", "5", "2") \ |
440 | "=&r" (_tmp) \ | 451 | : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \ |
441 | : "i" (EFLAGS_MASK)); \ | 452 | : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \ |
442 | break; \ | 453 | ); \ |
443 | case 2: \ | 454 | \ |
444 | __asm__ __volatile__ ( \ | 455 | (_cl).val = (unsigned long) _clv; \ |
445 | _PRE_EFLAGS("0", "3", "2") \ | 456 | (_src).val = (unsigned long) _srcv; \ |
446 | _op"w %1; " \ | 457 | (_dst).val = (unsigned long) _dstv; \ |
447 | _POST_EFLAGS("0", "3", "2") \ | ||
448 | : "=m" (_eflags), "=m" ((_dst).val), \ | ||
449 | "=&r" (_tmp) \ | ||
450 | : "i" (EFLAGS_MASK)); \ | ||
451 | break; \ | ||
452 | case 4: \ | ||
453 | __asm__ __volatile__ ( \ | ||
454 | _PRE_EFLAGS("0", "3", "2") \ | ||
455 | _op"l %1; " \ | ||
456 | _POST_EFLAGS("0", "3", "2") \ | ||
457 | : "=m" (_eflags), "=m" ((_dst).val), \ | ||
458 | "=&r" (_tmp) \ | ||
459 | : "i" (EFLAGS_MASK)); \ | ||
460 | break; \ | ||
461 | case 8: \ | ||
462 | __emulate_1op_8byte(_op, _dst, _eflags); \ | ||
463 | break; \ | ||
464 | } \ | ||
465 | } while (0) | 458 | } while (0) |
466 | 459 | ||
467 | /* Emulate an instruction with quadword operands (x86/64 only). */ | 460 | #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \ |
468 | #if defined(CONFIG_X86_64) | 461 | do { \ |
469 | #define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) \ | 462 | switch ((_dst).bytes) { \ |
470 | do { \ | 463 | case 2: \ |
471 | __asm__ __volatile__ ( \ | 464 | __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ |
472 | _PRE_EFLAGS("0", "4", "2") \ | 465 | "w", unsigned short); \ |
473 | _op"q %"_qx"3,%1; " \ | 466 | break; \ |
474 | _POST_EFLAGS("0", "4", "2") \ | 467 | case 4: \ |
475 | : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ | 468 | __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ |
476 | : _qy ((_src).val), "i" (EFLAGS_MASK)); \ | 469 | "l", unsigned int); \ |
470 | break; \ | ||
471 | case 8: \ | ||
472 | ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ | ||
473 | "q", unsigned long)); \ | ||
474 | break; \ | ||
475 | } \ | ||
477 | } while (0) | 476 | } while (0) |
478 | 477 | ||
479 | #define __emulate_1op_8byte(_op, _dst, _eflags) \ | 478 | #define __emulate_1op(_op, _dst, _eflags, _suffix) \ |
480 | do { \ | 479 | do { \ |
481 | __asm__ __volatile__ ( \ | 480 | unsigned long _tmp; \ |
482 | _PRE_EFLAGS("0", "3", "2") \ | 481 | \ |
483 | _op"q %1; " \ | 482 | __asm__ __volatile__ ( \ |
484 | _POST_EFLAGS("0", "3", "2") \ | 483 | _PRE_EFLAGS("0", "3", "2") \ |
485 | : "=m" (_eflags), "=m" ((_dst).val), "=&r" (_tmp) \ | 484 | _op _suffix " %1; " \ |
486 | : "i" (EFLAGS_MASK)); \ | 485 | _POST_EFLAGS("0", "3", "2") \ |
486 | : "=m" (_eflags), "+m" ((_dst).val), \ | ||
487 | "=&r" (_tmp) \ | ||
488 | : "i" (EFLAGS_MASK)); \ | ||
487 | } while (0) | 489 | } while (0) |
488 | 490 | ||
489 | #elif defined(__i386__) | 491 | /* Instruction has only one explicit operand (no source operand). */ |
490 | #define __emulate_2op_8byte(_op, _src, _dst, _eflags, _qx, _qy) | 492 | #define emulate_1op(_op, _dst, _eflags) \ |
491 | #define __emulate_1op_8byte(_op, _dst, _eflags) | 493 | do { \ |
492 | #endif /* __i386__ */ | 494 | switch ((_dst).bytes) { \ |
495 | case 1: __emulate_1op(_op, _dst, _eflags, "b"); break; \ | ||
496 | case 2: __emulate_1op(_op, _dst, _eflags, "w"); break; \ | ||
497 | case 4: __emulate_1op(_op, _dst, _eflags, "l"); break; \ | ||
498 | case 8: ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \ | ||
499 | } \ | ||
500 | } while (0) | ||
493 | 501 | ||
494 | /* Fetch next part of the instruction being emulated. */ | 502 | /* Fetch next part of the instruction being emulated. */ |
495 | #define insn_fetch(_type, _size, _eip) \ | 503 | #define insn_fetch(_type, _size, _eip) \ |
@@ -1041,6 +1049,33 @@ done_prefixes: | |||
1041 | c->src.bytes = 1; | 1049 | c->src.bytes = 1; |
1042 | c->src.val = insn_fetch(s8, 1, c->eip); | 1050 | c->src.val = insn_fetch(s8, 1, c->eip); |
1043 | break; | 1051 | break; |
1052 | case SrcOne: | ||
1053 | c->src.bytes = 1; | ||
1054 | c->src.val = 1; | ||
1055 | break; | ||
1056 | } | ||
1057 | |||
1058 | /* | ||
1059 | * Decode and fetch the second source operand: register, memory | ||
1060 | * or immediate. | ||
1061 | */ | ||
1062 | switch (c->d & Src2Mask) { | ||
1063 | case Src2None: | ||
1064 | break; | ||
1065 | case Src2CL: | ||
1066 | c->src2.bytes = 1; | ||
1067 | c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8; | ||
1068 | break; | ||
1069 | case Src2ImmByte: | ||
1070 | c->src2.type = OP_IMM; | ||
1071 | c->src2.ptr = (unsigned long *)c->eip; | ||
1072 | c->src2.bytes = 1; | ||
1073 | c->src2.val = insn_fetch(u8, 1, c->eip); | ||
1074 | break; | ||
1075 | case Src2One: | ||
1076 | c->src2.bytes = 1; | ||
1077 | c->src2.val = 1; | ||
1078 | break; | ||
1044 | } | 1079 | } |
1045 | 1080 | ||
1046 | /* Decode and fetch the destination operand: register or memory. */ | 1081 | /* Decode and fetch the destination operand: register or memory. */ |
@@ -1100,20 +1135,33 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt) | |||
1100 | c->regs[VCPU_REGS_RSP]); | 1135 | c->regs[VCPU_REGS_RSP]); |
1101 | } | 1136 | } |
1102 | 1137 | ||
1103 | static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, | 1138 | static int emulate_pop(struct x86_emulate_ctxt *ctxt, |
1104 | struct x86_emulate_ops *ops) | 1139 | struct x86_emulate_ops *ops) |
1105 | { | 1140 | { |
1106 | struct decode_cache *c = &ctxt->decode; | 1141 | struct decode_cache *c = &ctxt->decode; |
1107 | int rc; | 1142 | int rc; |
1108 | 1143 | ||
1109 | rc = ops->read_std(register_address(c, ss_base(ctxt), | 1144 | rc = ops->read_emulated(register_address(c, ss_base(ctxt), |
1110 | c->regs[VCPU_REGS_RSP]), | 1145 | c->regs[VCPU_REGS_RSP]), |
1111 | &c->dst.val, c->dst.bytes, ctxt->vcpu); | 1146 | &c->src.val, c->src.bytes, ctxt->vcpu); |
1112 | if (rc != 0) | 1147 | if (rc != 0) |
1113 | return rc; | 1148 | return rc; |
1114 | 1149 | ||
1115 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->dst.bytes); | 1150 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.bytes); |
1151 | return rc; | ||
1152 | } | ||
1153 | |||
1154 | static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, | ||
1155 | struct x86_emulate_ops *ops) | ||
1156 | { | ||
1157 | struct decode_cache *c = &ctxt->decode; | ||
1158 | int rc; | ||
1116 | 1159 | ||
1160 | c->src.bytes = c->dst.bytes; | ||
1161 | rc = emulate_pop(ctxt, ops); | ||
1162 | if (rc != 0) | ||
1163 | return rc; | ||
1164 | c->dst.val = c->src.val; | ||
1117 | return 0; | 1165 | return 0; |
1118 | } | 1166 | } |
1119 | 1167 | ||
@@ -1415,24 +1463,15 @@ special_insn: | |||
1415 | emulate_1op("dec", c->dst, ctxt->eflags); | 1463 | emulate_1op("dec", c->dst, ctxt->eflags); |
1416 | break; | 1464 | break; |
1417 | case 0x50 ... 0x57: /* push reg */ | 1465 | case 0x50 ... 0x57: /* push reg */ |
1418 | c->dst.type = OP_MEM; | 1466 | emulate_push(ctxt); |
1419 | c->dst.bytes = c->op_bytes; | ||
1420 | c->dst.val = c->src.val; | ||
1421 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], | ||
1422 | -c->op_bytes); | ||
1423 | c->dst.ptr = (void *) register_address( | ||
1424 | c, ss_base(ctxt), c->regs[VCPU_REGS_RSP]); | ||
1425 | break; | 1467 | break; |
1426 | case 0x58 ... 0x5f: /* pop reg */ | 1468 | case 0x58 ... 0x5f: /* pop reg */ |
1427 | pop_instruction: | 1469 | pop_instruction: |
1428 | if ((rc = ops->read_std(register_address(c, ss_base(ctxt), | 1470 | c->src.bytes = c->op_bytes; |
1429 | c->regs[VCPU_REGS_RSP]), c->dst.ptr, | 1471 | rc = emulate_pop(ctxt, ops); |
1430 | c->op_bytes, ctxt->vcpu)) != 0) | 1472 | if (rc != 0) |
1431 | goto done; | 1473 | goto done; |
1432 | 1474 | c->dst.val = c->src.val; | |
1433 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], | ||
1434 | c->op_bytes); | ||
1435 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
1436 | break; | 1475 | break; |
1437 | case 0x63: /* movsxd */ | 1476 | case 0x63: /* movsxd */ |
1438 | if (ctxt->mode != X86EMUL_MODE_PROT64) | 1477 | if (ctxt->mode != X86EMUL_MODE_PROT64) |
@@ -1591,7 +1630,9 @@ special_insn: | |||
1591 | emulate_push(ctxt); | 1630 | emulate_push(ctxt); |
1592 | break; | 1631 | break; |
1593 | case 0x9d: /* popf */ | 1632 | case 0x9d: /* popf */ |
1633 | c->dst.type = OP_REG; | ||
1594 | c->dst.ptr = (unsigned long *) &ctxt->eflags; | 1634 | c->dst.ptr = (unsigned long *) &ctxt->eflags; |
1635 | c->dst.bytes = c->op_bytes; | ||
1595 | goto pop_instruction; | 1636 | goto pop_instruction; |
1596 | case 0xa0 ... 0xa1: /* mov */ | 1637 | case 0xa0 ... 0xa1: /* mov */ |
1597 | c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; | 1638 | c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; |
@@ -1689,7 +1730,9 @@ special_insn: | |||
1689 | emulate_grp2(ctxt); | 1730 | emulate_grp2(ctxt); |
1690 | break; | 1731 | break; |
1691 | case 0xc3: /* ret */ | 1732 | case 0xc3: /* ret */ |
1733 | c->dst.type = OP_REG; | ||
1692 | c->dst.ptr = &c->eip; | 1734 | c->dst.ptr = &c->eip; |
1735 | c->dst.bytes = c->op_bytes; | ||
1693 | goto pop_instruction; | 1736 | goto pop_instruction; |
1694 | case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */ | 1737 | case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */ |
1695 | mov: | 1738 | mov: |
@@ -1778,7 +1821,7 @@ special_insn: | |||
1778 | c->eip = saved_eip; | 1821 | c->eip = saved_eip; |
1779 | goto cannot_emulate; | 1822 | goto cannot_emulate; |
1780 | } | 1823 | } |
1781 | return 0; | 1824 | break; |
1782 | case 0xf4: /* hlt */ | 1825 | case 0xf4: /* hlt */ |
1783 | ctxt->vcpu->arch.halt_request = 1; | 1826 | ctxt->vcpu->arch.halt_request = 1; |
1784 | break; | 1827 | break; |
@@ -1999,12 +2042,20 @@ twobyte_insn: | |||
1999 | c->src.val &= (c->dst.bytes << 3) - 1; | 2042 | c->src.val &= (c->dst.bytes << 3) - 1; |
2000 | emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags); | 2043 | emulate_2op_SrcV_nobyte("bt", c->src, c->dst, ctxt->eflags); |
2001 | break; | 2044 | break; |
2045 | case 0xa4: /* shld imm8, r, r/m */ | ||
2046 | case 0xa5: /* shld cl, r, r/m */ | ||
2047 | emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); | ||
2048 | break; | ||
2002 | case 0xab: | 2049 | case 0xab: |
2003 | bts: /* bts */ | 2050 | bts: /* bts */ |
2004 | /* only subword offset */ | 2051 | /* only subword offset */ |
2005 | c->src.val &= (c->dst.bytes << 3) - 1; | 2052 | c->src.val &= (c->dst.bytes << 3) - 1; |
2006 | emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags); | 2053 | emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags); |
2007 | break; | 2054 | break; |
2055 | case 0xac: /* shrd imm8, r, r/m */ | ||
2056 | case 0xad: /* shrd cl, r, r/m */ | ||
2057 | emulate_2op_cl("shrd", c->src2, c->src, c->dst, ctxt->eflags); | ||
2058 | break; | ||
2008 | case 0xae: /* clflush */ | 2059 | case 0xae: /* clflush */ |
2009 | break; | 2060 | break; |
2010 | case 0xb0 ... 0xb1: /* cmpxchg */ | 2061 | case 0xb0 ... 0xb1: /* cmpxchg */ |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 50a779264bb1..a7ed208f81e3 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -738,7 +738,7 @@ static void lguest_time_init(void) | |||
738 | 738 | ||
739 | /* We can't set cpumask in the initializer: damn C limitations! Set it | 739 | /* We can't set cpumask in the initializer: damn C limitations! Set it |
740 | * here and register our timer device. */ | 740 | * here and register our timer device. */ |
741 | lguest_clockevent.cpumask = cpumask_of_cpu(0); | 741 | lguest_clockevent.cpumask = cpumask_of(0); |
742 | clockevents_register_device(&lguest_clockevent); | 742 | clockevents_register_device(&lguest_clockevent); |
743 | 743 | ||
744 | /* Finally, we unblock the timer interrupt. */ | 744 | /* Finally, we unblock the timer interrupt. */ |
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c index 37b9ae4d44c5..df167f265622 100644 --- a/arch/x86/mach-default/setup.c +++ b/arch/x86/mach-default/setup.c | |||
@@ -133,29 +133,28 @@ void __init time_init_hook(void) | |||
133 | **/ | 133 | **/ |
134 | void mca_nmi_hook(void) | 134 | void mca_nmi_hook(void) |
135 | { | 135 | { |
136 | /* If I recall correctly, there's a whole bunch of other things that | 136 | /* |
137 | * If I recall correctly, there's a whole bunch of other things that | ||
137 | * we can do to check for NMI problems, but that's all I know about | 138 | * we can do to check for NMI problems, but that's all I know about |
138 | * at the moment. | 139 | * at the moment. |
139 | */ | 140 | */ |
140 | 141 | pr_warning("NMI generated from unknown source!\n"); | |
141 | printk("NMI generated from unknown source!\n"); | ||
142 | } | 142 | } |
143 | #endif | 143 | #endif |
144 | 144 | ||
145 | static __init int no_ipi_broadcast(char *str) | 145 | static __init int no_ipi_broadcast(char *str) |
146 | { | 146 | { |
147 | get_option(&str, &no_broadcast); | 147 | get_option(&str, &no_broadcast); |
148 | printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" : | 148 | pr_info("Using %s mode\n", |
149 | "IPI Broadcast"); | 149 | no_broadcast ? "No IPI Broadcast" : "IPI Broadcast"); |
150 | return 1; | 150 | return 1; |
151 | } | 151 | } |
152 | |||
153 | __setup("no_ipi_broadcast=", no_ipi_broadcast); | 152 | __setup("no_ipi_broadcast=", no_ipi_broadcast); |
154 | 153 | ||
155 | static int __init print_ipi_mode(void) | 154 | static int __init print_ipi_mode(void) |
156 | { | 155 | { |
157 | printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" : | 156 | pr_info("Using IPI %s mode\n", |
158 | "Shortcut"); | 157 | no_broadcast ? "No-Shortcut" : "Shortcut"); |
159 | return 0; | 158 | return 0; |
160 | } | 159 | } |
161 | 160 | ||
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c index 3624a364b7f3..bc4c7840b2a8 100644 --- a/arch/x86/mach-generic/bigsmp.c +++ b/arch/x86/mach-generic/bigsmp.c | |||
@@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = { | |||
42 | { } | 42 | { } |
43 | }; | 43 | }; |
44 | 44 | ||
45 | static cpumask_t vector_allocation_domain(int cpu) | 45 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
46 | { | 46 | { |
47 | return cpumask_of_cpu(cpu); | 47 | cpus_clear(*retmask); |
48 | cpu_set(cpu, *retmask); | ||
48 | } | 49 | } |
49 | 50 | ||
50 | static int probe_bigsmp(void) | 51 | static int probe_bigsmp(void) |
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c index 7b4e6d0d1690..4ba5ccaa1584 100644 --- a/arch/x86/mach-generic/es7000.c +++ b/arch/x86/mach-generic/es7000.c | |||
@@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
87 | } | 87 | } |
88 | #endif | 88 | #endif |
89 | 89 | ||
90 | static cpumask_t vector_allocation_domain(int cpu) | 90 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
91 | { | 91 | { |
92 | /* Careful. Some cpus do not strictly honor the set of cpus | 92 | /* Careful. Some cpus do not strictly honor the set of cpus |
93 | * specified in the interrupt destination when using lowest | 93 | * specified in the interrupt destination when using lowest |
@@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
97 | * deliver interrupts to the wrong hyperthread when only one | 97 | * deliver interrupts to the wrong hyperthread when only one |
98 | * hyperthread was specified in the interrupt desitination. | 98 | * hyperthread was specified in the interrupt desitination. |
99 | */ | 99 | */ |
100 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 100 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; |
101 | return domain; | ||
102 | } | 101 | } |
103 | 102 | ||
104 | struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); | 103 | struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000); |
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c index 71a309b122e6..511d7941364f 100644 --- a/arch/x86/mach-generic/numaq.c +++ b/arch/x86/mach-generic/numaq.c | |||
@@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
38 | return 0; | 38 | return 0; |
39 | } | 39 | } |
40 | 40 | ||
41 | static cpumask_t vector_allocation_domain(int cpu) | 41 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
42 | { | 42 | { |
43 | /* Careful. Some cpus do not strictly honor the set of cpus | 43 | /* Careful. Some cpus do not strictly honor the set of cpus |
44 | * specified in the interrupt destination when using lowest | 44 | * specified in the interrupt destination when using lowest |
@@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
48 | * deliver interrupts to the wrong hyperthread when only one | 48 | * deliver interrupts to the wrong hyperthread when only one |
49 | * hyperthread was specified in the interrupt desitination. | 49 | * hyperthread was specified in the interrupt desitination. |
50 | */ | 50 | */ |
51 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 51 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; |
52 | return domain; | ||
53 | } | 52 | } |
54 | 53 | ||
55 | struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); | 54 | struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq); |
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c index 2c6d234e0009..2821ffc188b5 100644 --- a/arch/x86/mach-generic/summit.c +++ b/arch/x86/mach-generic/summit.c | |||
@@ -24,7 +24,7 @@ static int probe_summit(void) | |||
24 | return 0; | 24 | return 0; |
25 | } | 25 | } |
26 | 26 | ||
27 | static cpumask_t vector_allocation_domain(int cpu) | 27 | static void vector_allocation_domain(int cpu, cpumask_t *retmask) |
28 | { | 28 | { |
29 | /* Careful. Some cpus do not strictly honor the set of cpus | 29 | /* Careful. Some cpus do not strictly honor the set of cpus |
30 | * specified in the interrupt destination when using lowest | 30 | * specified in the interrupt destination when using lowest |
@@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu) | |||
34 | * deliver interrupts to the wrong hyperthread when only one | 34 | * deliver interrupts to the wrong hyperthread when only one |
35 | * hyperthread was specified in the interrupt desitination. | 35 | * hyperthread was specified in the interrupt desitination. |
36 | */ | 36 | */ |
37 | cpumask_t domain = { { [0] = APIC_ALL_CPUS, } }; | 37 | *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } }; |
38 | return domain; | ||
39 | } | 38 | } |
40 | 39 | ||
41 | struct genapic apic_summit = APIC_INIT("summit", probe_summit); | 40 | struct genapic apic_summit = APIC_INIT("summit", probe_summit); |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 52145007bd7e..a5bc05492b1e 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -63,11 +63,6 @@ static int voyager_extended_cpus = 1; | |||
63 | /* Used for the invalidate map that's also checked in the spinlock */ | 63 | /* Used for the invalidate map that's also checked in the spinlock */ |
64 | static volatile unsigned long smp_invalidate_needed; | 64 | static volatile unsigned long smp_invalidate_needed; |
65 | 65 | ||
66 | /* Bitmask of currently online CPUs - used by setup.c for | ||
67 | /proc/cpuinfo, visible externally but still physical */ | ||
68 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
69 | EXPORT_SYMBOL(cpu_online_map); | ||
70 | |||
71 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used | 66 | /* Bitmask of CPUs present in the system - exported by i386_syms.c, used |
72 | * by scheduler but indexed physically */ | 67 | * by scheduler but indexed physically */ |
73 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | 68 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; |
@@ -218,8 +213,6 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE; | |||
218 | /* This is for the new dynamic CPU boot code */ | 213 | /* This is for the new dynamic CPU boot code */ |
219 | cpumask_t cpu_callin_map = CPU_MASK_NONE; | 214 | cpumask_t cpu_callin_map = CPU_MASK_NONE; |
220 | cpumask_t cpu_callout_map = CPU_MASK_NONE; | 215 | cpumask_t cpu_callout_map = CPU_MASK_NONE; |
221 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
222 | EXPORT_SYMBOL(cpu_possible_map); | ||
223 | 216 | ||
224 | /* The per processor IRQ masks (these are usually kept in sync) */ | 217 | /* The per processor IRQ masks (these are usually kept in sync) */ |
225 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; | 218 | static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned; |
@@ -679,7 +672,7 @@ void __init smp_boot_cpus(void) | |||
679 | 672 | ||
680 | /* loop over all the extended VIC CPUs and boot them. The | 673 | /* loop over all the extended VIC CPUs and boot them. The |
681 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ | 674 | * Quad CPUs must be bootstrapped by their extended VIC cpu */ |
682 | for (i = 0; i < NR_CPUS; i++) { | 675 | for (i = 0; i < nr_cpu_ids; i++) { |
683 | if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) | 676 | if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map)) |
684 | continue; | 677 | continue; |
685 | do_boot_cpu(i); | 678 | do_boot_cpu(i); |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8655b5bb0963..f99a6c6c432e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -435,8 +435,12 @@ static void __init set_highmem_pages_init(void) | |||
435 | #endif /* !CONFIG_NUMA */ | 435 | #endif /* !CONFIG_NUMA */ |
436 | 436 | ||
437 | #else | 437 | #else |
438 | # define permanent_kmaps_init(pgd_base) do { } while (0) | 438 | static inline void permanent_kmaps_init(pgd_t *pgd_base) |
439 | # define set_highmem_pages_init() do { } while (0) | 439 | { |
440 | } | ||
441 | static inline void set_highmem_pages_init(void) | ||
442 | { | ||
443 | } | ||
440 | #endif /* CONFIG_HIGHMEM */ | 444 | #endif /* CONFIG_HIGHMEM */ |
441 | 445 | ||
442 | void __init native_pagetable_setup_start(pgd_t *base) | 446 | void __init native_pagetable_setup_start(pgd_t *base) |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index cebcbf152d46..71a14f89f89e 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -278,7 +278,7 @@ void __init numa_init_array(void) | |||
278 | int rr, i; | 278 | int rr, i; |
279 | 279 | ||
280 | rr = first_node(node_online_map); | 280 | rr = first_node(node_online_map); |
281 | for (i = 0; i < NR_CPUS; i++) { | 281 | for (i = 0; i < nr_cpu_ids; i++) { |
282 | if (early_cpu_to_node(i) != NUMA_NO_NODE) | 282 | if (early_cpu_to_node(i) != NUMA_NO_NODE) |
283 | continue; | 283 | continue; |
284 | numa_set_node(i, rr); | 284 | numa_set_node(i, rr); |
@@ -549,7 +549,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn) | |||
549 | memnodemap[0] = 0; | 549 | memnodemap[0] = 0; |
550 | node_set_online(0); | 550 | node_set_online(0); |
551 | node_set(0, node_possible_map); | 551 | node_set(0, node_possible_map); |
552 | for (i = 0; i < NR_CPUS; i++) | 552 | for (i = 0; i < nr_cpu_ids; i++) |
553 | numa_set_node(i, 0); | 553 | numa_set_node(i, 0); |
554 | e820_register_active_regions(0, start_pfn, last_pfn); | 554 | e820_register_active_regions(0, start_pfn, last_pfn); |
555 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); | 555 | setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 51c0a2fc14fe..09737c8af074 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) | |||
382 | if (!node_online(i)) | 382 | if (!node_online(i)) |
383 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | 383 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
384 | 384 | ||
385 | for (i = 0; i < NR_CPUS; i++) { | 385 | for (i = 0; i < nr_cpu_ids; i++) { |
386 | int node = early_cpu_to_node(i); | 386 | int node = early_cpu_to_node(i); |
387 | 387 | ||
388 | if (node == NUMA_NO_NODE) | 388 | if (node == NUMA_NO_NODE) |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 1d88d2b39771..9e5752fe4d15 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <linux/irq.h> | 4 | #include <linux/irq.h> |
5 | #include <linux/dmi.h> | 5 | #include <linux/dmi.h> |
6 | #include <asm/numa.h> | 6 | #include <asm/numa.h> |
7 | #include "pci.h" | 7 | #include <asm/pci_x86.h> |
8 | 8 | ||
9 | struct pci_root_info { | 9 | struct pci_root_info { |
10 | char *name; | 10 | char *name; |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 22e057665e55..9bb09823b362 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <linux/pci.h> | 2 | #include <linux/pci.h> |
3 | #include <linux/topology.h> | 3 | #include <linux/topology.h> |
4 | #include <linux/cpu.h> | 4 | #include <linux/cpu.h> |
5 | #include "pci.h" | 5 | #include <asm/pci_x86.h> |
6 | 6 | ||
7 | #ifdef CONFIG_X86_64 | 7 | #ifdef CONFIG_X86_64 |
8 | #include <asm/pci-direct.h> | 8 | #include <asm/pci-direct.h> |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index bb1a01f089e2..62ddb73e09ed 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -14,8 +14,7 @@ | |||
14 | #include <asm/segment.h> | 14 | #include <asm/segment.h> |
15 | #include <asm/io.h> | 15 | #include <asm/io.h> |
16 | #include <asm/smp.h> | 16 | #include <asm/smp.h> |
17 | 17 | #include <asm/pci_x86.h> | |
18 | #include "pci.h" | ||
19 | 18 | ||
20 | unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | | 19 | unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | |
21 | PCI_PROBE_MMCONF; | 20 | PCI_PROBE_MMCONF; |
diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index 9a5af6c8fbe9..bd13c3e4c6db 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/dmi.h> | 7 | #include <linux/dmi.h> |
8 | #include "pci.h" | 8 | #include <asm/pci_x86.h> |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * Functions for accessing PCI base (first 256 bytes) and extended | 11 | * Functions for accessing PCI base (first 256 bytes) and extended |
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c index 86631ccbc25a..f6adf2c6d751 100644 --- a/arch/x86/pci/early.c +++ b/arch/x86/pci/early.c | |||
@@ -2,7 +2,7 @@ | |||
2 | #include <linux/pci.h> | 2 | #include <linux/pci.h> |
3 | #include <asm/pci-direct.h> | 3 | #include <asm/pci-direct.h> |
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | #include "pci.h" | 5 | #include <asm/pci_x86.h> |
6 | 6 | ||
7 | /* Direct PCI access. This is used for PCI accesses in early boot before | 7 | /* Direct PCI access. This is used for PCI accesses in early boot before |
8 | the PCI subsystem works. */ | 8 | the PCI subsystem works. */ |
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 2051dc96b8e9..7d388d5cf548 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c | |||
@@ -6,8 +6,7 @@ | |||
6 | #include <linux/dmi.h> | 6 | #include <linux/dmi.h> |
7 | #include <linux/pci.h> | 7 | #include <linux/pci.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include "pci.h" | 9 | #include <asm/pci_x86.h> |
10 | |||
11 | 10 | ||
12 | static void __devinit pci_fixup_i450nx(struct pci_dev *d) | 11 | static void __devinit pci_fixup_i450nx(struct pci_dev *d) |
13 | { | 12 | { |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 844df0cbbd3e..e51bf2cda4b0 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -34,8 +34,8 @@ | |||
34 | 34 | ||
35 | #include <asm/pat.h> | 35 | #include <asm/pat.h> |
36 | #include <asm/e820.h> | 36 | #include <asm/e820.h> |
37 | #include <asm/pci_x86.h> | ||
37 | 38 | ||
38 | #include "pci.h" | ||
39 | 39 | ||
40 | static int | 40 | static int |
41 | skip_isa_ioresource_align(struct pci_dev *dev) { | 41 | skip_isa_ioresource_align(struct pci_dev *dev) { |
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c index d6c950f81858..bec3b048e72b 100644 --- a/arch/x86/pci/init.c +++ b/arch/x86/pci/init.c | |||
@@ -1,6 +1,6 @@ | |||
1 | #include <linux/pci.h> | 1 | #include <linux/pci.h> |
2 | #include <linux/init.h> | 2 | #include <linux/init.h> |
3 | #include "pci.h" | 3 | #include <asm/pci_x86.h> |
4 | 4 | ||
5 | /* arch_initcall has too random ordering, so call the initializers | 5 | /* arch_initcall has too random ordering, so call the initializers |
6 | in the right sequence from here. */ | 6 | in the right sequence from here. */ |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index bf69dbe08bff..373b9afe6d44 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -16,8 +16,7 @@ | |||
16 | #include <asm/io_apic.h> | 16 | #include <asm/io_apic.h> |
17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
18 | #include <linux/acpi.h> | 18 | #include <linux/acpi.h> |
19 | 19 | #include <asm/pci_x86.h> | |
20 | #include "pci.h" | ||
21 | 20 | ||
22 | #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24)) | 21 | #define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24)) |
23 | #define PIRQ_VERSION 0x0100 | 22 | #define PIRQ_VERSION 0x0100 |
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c index b722dd481b39..f1065b129e9c 100644 --- a/arch/x86/pci/legacy.c +++ b/arch/x86/pci/legacy.c | |||
@@ -3,7 +3,7 @@ | |||
3 | */ | 3 | */ |
4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | #include "pci.h" | 6 | #include <asm/pci_x86.h> |
7 | 7 | ||
8 | /* | 8 | /* |
9 | * Discover remaining PCI buses in case there are peer host bridges. | 9 | * Discover remaining PCI buses in case there are peer host bridges. |
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 654a2234f8f3..89bf9242c80a 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
@@ -15,8 +15,7 @@ | |||
15 | #include <linux/acpi.h> | 15 | #include <linux/acpi.h> |
16 | #include <linux/bitmap.h> | 16 | #include <linux/bitmap.h> |
17 | #include <asm/e820.h> | 17 | #include <asm/e820.h> |
18 | 18 | #include <asm/pci_x86.h> | |
19 | #include "pci.h" | ||
20 | 19 | ||
21 | /* aperture is up to 256MB but BIOS may reserve less */ | 20 | /* aperture is up to 256MB but BIOS may reserve less */ |
22 | #define MMCONFIG_APER_MIN (2 * 1024*1024) | 21 | #define MMCONFIG_APER_MIN (2 * 1024*1024) |
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c index f3c761dce695..8b2d561046a3 100644 --- a/arch/x86/pci/mmconfig_32.c +++ b/arch/x86/pci/mmconfig_32.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/acpi.h> | 14 | #include <linux/acpi.h> |
15 | #include <asm/e820.h> | 15 | #include <asm/e820.h> |
16 | #include "pci.h" | 16 | #include <asm/pci_x86.h> |
17 | 17 | ||
18 | /* Assume systems with more busses have correct MCFG */ | 18 | /* Assume systems with more busses have correct MCFG */ |
19 | #define mmcfg_virt_addr ((void __iomem *) fix_to_virt(FIX_PCIE_MCFG)) | 19 | #define mmcfg_virt_addr ((void __iomem *) fix_to_virt(FIX_PCIE_MCFG)) |
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c index a1994163c99d..30007ffc8e11 100644 --- a/arch/x86/pci/mmconfig_64.c +++ b/arch/x86/pci/mmconfig_64.c | |||
@@ -10,8 +10,7 @@ | |||
10 | #include <linux/acpi.h> | 10 | #include <linux/acpi.h> |
11 | #include <linux/bitmap.h> | 11 | #include <linux/bitmap.h> |
12 | #include <asm/e820.h> | 12 | #include <asm/e820.h> |
13 | 13 | #include <asm/pci_x86.h> | |
14 | #include "pci.h" | ||
15 | 14 | ||
16 | /* Static virtual mapping of the MMCONFIG aperture */ | 15 | /* Static virtual mapping of the MMCONFIG aperture */ |
17 | struct mmcfg_virt { | 16 | struct mmcfg_virt { |
diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c index 1177845d3186..2089354968a2 100644 --- a/arch/x86/pci/numaq_32.c +++ b/arch/x86/pci/numaq_32.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/nodemask.h> | 7 | #include <linux/nodemask.h> |
8 | #include <mach_apic.h> | 8 | #include <mach_apic.h> |
9 | #include <asm/mpspec.h> | 9 | #include <asm/mpspec.h> |
10 | #include "pci.h" | 10 | #include <asm/pci_x86.h> |
11 | 11 | ||
12 | #define XQUAD_PORTIO_BASE 0xfe400000 | 12 | #define XQUAD_PORTIO_BASE 0xfe400000 |
13 | #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ | 13 | #define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ |
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c index e11e9e803d5f..b889d824f7c6 100644 --- a/arch/x86/pci/olpc.c +++ b/arch/x86/pci/olpc.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <asm/olpc.h> | 30 | #include <asm/olpc.h> |
31 | #include <asm/geode.h> | 31 | #include <asm/geode.h> |
32 | #include "pci.h" | 32 | #include <asm/pci_x86.h> |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * In the tables below, the first two line (8 longwords) are the | 35 | * In the tables below, the first two line (8 longwords) are the |
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index 37472fc6f729..b82cae970dfd 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c | |||
@@ -6,9 +6,8 @@ | |||
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/uaccess.h> | 8 | #include <linux/uaccess.h> |
9 | #include "pci.h" | 9 | #include <asm/pci_x86.h> |
10 | #include "pci-functions.h" | 10 | #include <asm/mach-default/pci-functions.h> |
11 | |||
12 | 11 | ||
13 | /* BIOS32 signature: "_32_" */ | 12 | /* BIOS32 signature: "_32_" */ |
14 | #define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) | 13 | #define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24)) |
diff --git a/arch/x86/pci/visws.c b/arch/x86/pci/visws.c index 42f4cb19faca..16d0c0eb0d19 100644 --- a/arch/x86/pci/visws.c +++ b/arch/x86/pci/visws.c | |||
@@ -9,11 +9,10 @@ | |||
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | 10 | ||
11 | #include <asm/setup.h> | 11 | #include <asm/setup.h> |
12 | #include <asm/pci_x86.h> | ||
12 | #include <asm/visws/cobalt.h> | 13 | #include <asm/visws/cobalt.h> |
13 | #include <asm/visws/lithium.h> | 14 | #include <asm/visws/lithium.h> |
14 | 15 | ||
15 | #include "pci.h" | ||
16 | |||
17 | static int pci_visws_enable_irq(struct pci_dev *dev) { return 0; } | 16 | static int pci_visws_enable_irq(struct pci_dev *dev) { return 0; } |
18 | static void pci_visws_disable_irq(struct pci_dev *dev) { } | 17 | static void pci_visws_disable_irq(struct pci_dev *dev) { } |
19 | 18 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 773d68d3e912..503c240e26c7 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1082,7 +1082,7 @@ static void drop_other_mm_ref(void *info) | |||
1082 | 1082 | ||
1083 | static void xen_drop_mm_ref(struct mm_struct *mm) | 1083 | static void xen_drop_mm_ref(struct mm_struct *mm) |
1084 | { | 1084 | { |
1085 | cpumask_t mask; | 1085 | cpumask_var_t mask; |
1086 | unsigned cpu; | 1086 | unsigned cpu; |
1087 | 1087 | ||
1088 | if (current->active_mm == mm) { | 1088 | if (current->active_mm == mm) { |
@@ -1094,7 +1094,16 @@ static void xen_drop_mm_ref(struct mm_struct *mm) | |||
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | /* Get the "official" set of cpus referring to our pagetable. */ | 1096 | /* Get the "official" set of cpus referring to our pagetable. */ |
1097 | mask = mm->cpu_vm_mask; | 1097 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { |
1098 | for_each_online_cpu(cpu) { | ||
1099 | if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask) | ||
1100 | && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) | ||
1101 | continue; | ||
1102 | smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); | ||
1103 | } | ||
1104 | return; | ||
1105 | } | ||
1106 | cpumask_copy(mask, &mm->cpu_vm_mask); | ||
1098 | 1107 | ||
1099 | /* It's possible that a vcpu may have a stale reference to our | 1108 | /* It's possible that a vcpu may have a stale reference to our |
1100 | cr3, because its in lazy mode, and it hasn't yet flushed | 1109 | cr3, because its in lazy mode, and it hasn't yet flushed |
@@ -1103,11 +1112,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm) | |||
1103 | if needed. */ | 1112 | if needed. */ |
1104 | for_each_online_cpu(cpu) { | 1113 | for_each_online_cpu(cpu) { |
1105 | if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) | 1114 | if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) |
1106 | cpu_set(cpu, mask); | 1115 | cpumask_set_cpu(cpu, mask); |
1107 | } | 1116 | } |
1108 | 1117 | ||
1109 | if (!cpus_empty(mask)) | 1118 | if (!cpumask_empty(mask)) |
1110 | smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); | 1119 | smp_call_function_many(mask, drop_other_mm_ref, mm, 1); |
1120 | free_cpumask_var(mask); | ||
1111 | } | 1121 | } |
1112 | #else | 1122 | #else |
1113 | static void xen_drop_mm_ref(struct mm_struct *mm) | 1123 | static void xen_drop_mm_ref(struct mm_struct *mm) |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index acd9b6705e02..c44e2069c7c7 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include "xen-ops.h" | 33 | #include "xen-ops.h" |
34 | #include "mmu.h" | 34 | #include "mmu.h" |
35 | 35 | ||
36 | cpumask_t xen_cpu_initialized_map; | 36 | cpumask_var_t xen_cpu_initialized_map; |
37 | 37 | ||
38 | static DEFINE_PER_CPU(int, resched_irq); | 38 | static DEFINE_PER_CPU(int, resched_irq); |
39 | static DEFINE_PER_CPU(int, callfunc_irq); | 39 | static DEFINE_PER_CPU(int, callfunc_irq); |
@@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void) | |||
158 | { | 158 | { |
159 | int i, rc; | 159 | int i, rc; |
160 | 160 | ||
161 | for (i = 0; i < NR_CPUS; i++) { | 161 | for (i = 0; i < nr_cpu_ids; i++) { |
162 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); | 162 | rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); |
163 | if (rc >= 0) { | 163 | if (rc >= 0) { |
164 | num_processors++; | 164 | num_processors++; |
@@ -192,11 +192,14 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
192 | if (xen_smp_intr_init(0)) | 192 | if (xen_smp_intr_init(0)) |
193 | BUG(); | 193 | BUG(); |
194 | 194 | ||
195 | xen_cpu_initialized_map = cpumask_of_cpu(0); | 195 | if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) |
196 | panic("could not allocate xen_cpu_initialized_map\n"); | ||
197 | |||
198 | cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); | ||
196 | 199 | ||
197 | /* Restrict the possible_map according to max_cpus. */ | 200 | /* Restrict the possible_map according to max_cpus. */ |
198 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { | 201 | while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { |
199 | for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) | 202 | for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) |
200 | continue; | 203 | continue; |
201 | cpu_clear(cpu, cpu_possible_map); | 204 | cpu_clear(cpu, cpu_possible_map); |
202 | } | 205 | } |
@@ -221,7 +224,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
221 | struct vcpu_guest_context *ctxt; | 224 | struct vcpu_guest_context *ctxt; |
222 | struct desc_struct *gdt; | 225 | struct desc_struct *gdt; |
223 | 226 | ||
224 | if (cpu_test_and_set(cpu, xen_cpu_initialized_map)) | 227 | if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) |
225 | return 0; | 228 | return 0; |
226 | 229 | ||
227 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); | 230 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
@@ -408,24 +411,23 @@ static void xen_smp_send_reschedule(int cpu) | |||
408 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | 411 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); |
409 | } | 412 | } |
410 | 413 | ||
411 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | 414 | static void xen_send_IPI_mask(const struct cpumask *mask, |
415 | enum ipi_vector vector) | ||
412 | { | 416 | { |
413 | unsigned cpu; | 417 | unsigned cpu; |
414 | 418 | ||
415 | cpus_and(mask, mask, cpu_online_map); | 419 | for_each_cpu_and(cpu, mask, cpu_online_mask) |
416 | |||
417 | for_each_cpu_mask_nr(cpu, mask) | ||
418 | xen_send_IPI_one(cpu, vector); | 420 | xen_send_IPI_one(cpu, vector); |
419 | } | 421 | } |
420 | 422 | ||
421 | static void xen_smp_send_call_function_ipi(cpumask_t mask) | 423 | static void xen_smp_send_call_function_ipi(const struct cpumask *mask) |
422 | { | 424 | { |
423 | int cpu; | 425 | int cpu; |
424 | 426 | ||
425 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | 427 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); |
426 | 428 | ||
427 | /* Make sure other vcpus get a chance to run if they need to. */ | 429 | /* Make sure other vcpus get a chance to run if they need to. */ |
428 | for_each_cpu_mask_nr(cpu, mask) { | 430 | for_each_cpu(cpu, mask) { |
429 | if (xen_vcpu_stolen(cpu)) { | 431 | if (xen_vcpu_stolen(cpu)) { |
430 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | 432 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); |
431 | break; | 433 | break; |
@@ -435,7 +437,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask) | |||
435 | 437 | ||
436 | static void xen_smp_send_call_function_single_ipi(int cpu) | 438 | static void xen_smp_send_call_function_single_ipi(int cpu) |
437 | { | 439 | { |
438 | xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); | 440 | xen_send_IPI_mask(cpumask_of(cpu), |
441 | XEN_CALL_FUNCTION_SINGLE_VECTOR); | ||
439 | } | 442 | } |
440 | 443 | ||
441 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | 444 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) |
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index 2a234db5949b..212ffe012b76 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -35,7 +35,8 @@ void xen_post_suspend(int suspend_cancelled) | |||
35 | pfn_to_mfn(xen_start_info->console.domU.mfn); | 35 | pfn_to_mfn(xen_start_info->console.domU.mfn); |
36 | } else { | 36 | } else { |
37 | #ifdef CONFIG_SMP | 37 | #ifdef CONFIG_SMP |
38 | xen_cpu_initialized_map = cpu_online_map; | 38 | BUG_ON(xen_cpu_initialized_map == NULL); |
39 | cpumask_copy(xen_cpu_initialized_map, cpu_online_mask); | ||
39 | #endif | 40 | #endif |
40 | xen_vcpu_restore(); | 41 | xen_vcpu_restore(); |
41 | } | 42 | } |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index c9f7cda48ed7..65d75a6be0ba 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -437,7 +437,7 @@ void xen_setup_timer(int cpu) | |||
437 | evt = &per_cpu(xen_clock_events, cpu); | 437 | evt = &per_cpu(xen_clock_events, cpu); |
438 | memcpy(evt, xen_clockevent, sizeof(*evt)); | 438 | memcpy(evt, xen_clockevent, sizeof(*evt)); |
439 | 439 | ||
440 | evt->cpumask = cpumask_of_cpu(cpu); | 440 | evt->cpumask = cpumask_of(cpu); |
441 | evt->irq = irq; | 441 | evt->irq = irq; |
442 | 442 | ||
443 | setup_runstate_info(cpu); | 443 | setup_runstate_info(cpu); |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 9e1afae8461f..c1f8faf0a2c5 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -58,7 +58,7 @@ void __init xen_init_spinlocks(void); | |||
58 | __cpuinit void xen_init_lock_cpu(int cpu); | 58 | __cpuinit void xen_init_lock_cpu(int cpu); |
59 | void xen_uninit_lock_cpu(int cpu); | 59 | void xen_uninit_lock_cpu(int cpu); |
60 | 60 | ||
61 | extern cpumask_t xen_cpu_initialized_map; | 61 | extern cpumask_var_t xen_cpu_initialized_map; |
62 | #else | 62 | #else |
63 | static inline void xen_smp_init(void) {} | 63 | static inline void xen_smp_init(void) {} |
64 | #endif | 64 | #endif |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 64f5d54f7edc..4259072f5bd0 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -109,7 +109,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); | |||
109 | */ | 109 | */ |
110 | static ssize_t print_cpus_map(char *buf, cpumask_t *map) | 110 | static ssize_t print_cpus_map(char *buf, cpumask_t *map) |
111 | { | 111 | { |
112 | int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *map); | 112 | int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map); |
113 | 113 | ||
114 | buf[n++] = '\n'; | 114 | buf[n++] = '\n'; |
115 | buf[n] = '\0'; | 115 | buf[n] = '\0'; |
diff --git a/drivers/base/node.c b/drivers/base/node.c index f5207090885a..91636cd8b6c9 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -30,8 +30,8 @@ static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf) | |||
30 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); | 30 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); |
31 | 31 | ||
32 | len = type? | 32 | len = type? |
33 | cpulist_scnprintf(buf, PAGE_SIZE-2, *mask): | 33 | cpulist_scnprintf(buf, PAGE_SIZE-2, mask) : |
34 | cpumask_scnprintf(buf, PAGE_SIZE-2, *mask); | 34 | cpumask_scnprintf(buf, PAGE_SIZE-2, mask); |
35 | buf[len++] = '\n'; | 35 | buf[len++] = '\n'; |
36 | buf[len] = '\0'; | 36 | buf[len] = '\0'; |
37 | return len; | 37 | return len; |
diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 199cd97e32e6..a8bc1cbcfa7c 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c | |||
@@ -49,8 +49,8 @@ static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf) | |||
49 | 49 | ||
50 | if (len > 1) { | 50 | if (len > 1) { |
51 | n = type? | 51 | n = type? |
52 | cpulist_scnprintf(buf, len-2, *mask): | 52 | cpulist_scnprintf(buf, len-2, mask) : |
53 | cpumask_scnprintf(buf, len-2, *mask); | 53 | cpumask_scnprintf(buf, len-2, mask); |
54 | buf[n++] = '\n'; | 54 | buf[n++] = '\n'; |
55 | buf[n] = '\0'; | 55 | buf[n] = '\0'; |
56 | } | 56 | } |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index c602b547cc6e..1697043119bd 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -190,7 +190,7 @@ config DIGIEPCA | |||
190 | 190 | ||
191 | config ESPSERIAL | 191 | config ESPSERIAL |
192 | tristate "Hayes ESP serial port support" | 192 | tristate "Hayes ESP serial port support" |
193 | depends on SERIAL_NONSTANDARD && ISA && ISA_DMA_API | 193 | depends on SERIAL_NONSTANDARD && ISA && ISA_DMA_API && BROKEN |
194 | help | 194 | help |
195 | This is a driver which supports Hayes ESP serial ports. Both single | 195 | This is a driver which supports Hayes ESP serial ports. Both single |
196 | port cards and multiport cards are supported. Make sure to read | 196 | port cards and multiport cards are supported. Make sure to read |
@@ -443,6 +443,17 @@ config UNIX98_PTYS | |||
443 | All modern Linux systems use the Unix98 ptys. Say Y unless | 443 | All modern Linux systems use the Unix98 ptys. Say Y unless |
444 | you're on an embedded system and want to conserve memory. | 444 | you're on an embedded system and want to conserve memory. |
445 | 445 | ||
446 | config DEVPTS_MULTIPLE_INSTANCES | ||
447 | bool "Support multiple instances of devpts" | ||
448 | depends on UNIX98_PTYS | ||
449 | default n | ||
450 | ---help--- | ||
451 | Enable support for multiple instances of devpts filesystem. | ||
452 | If you want to have isolated PTY namespaces (eg: in containers), | ||
453 | say Y here. Otherwise, say N. If enabled, each mount of devpts | ||
454 | filesystem with the '-o newinstance' option will create an | ||
455 | independent PTY namespace. | ||
456 | |||
446 | config LEGACY_PTYS | 457 | config LEGACY_PTYS |
447 | bool "Legacy (BSD) PTY support" | 458 | bool "Legacy (BSD) PTY support" |
448 | default y | 459 | default y |
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index b97aebd7aeb8..4e0cfdeab146 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c | |||
@@ -170,7 +170,7 @@ static __inline__ void rtsdtr_ctrl(int bits) | |||
170 | */ | 170 | */ |
171 | static void rs_stop(struct tty_struct *tty) | 171 | static void rs_stop(struct tty_struct *tty) |
172 | { | 172 | { |
173 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 173 | struct async_struct *info = tty->driver_data; |
174 | unsigned long flags; | 174 | unsigned long flags; |
175 | 175 | ||
176 | if (serial_paranoia_check(info, tty->name, "rs_stop")) | 176 | if (serial_paranoia_check(info, tty->name, "rs_stop")) |
@@ -190,7 +190,7 @@ static void rs_stop(struct tty_struct *tty) | |||
190 | 190 | ||
191 | static void rs_start(struct tty_struct *tty) | 191 | static void rs_start(struct tty_struct *tty) |
192 | { | 192 | { |
193 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 193 | struct async_struct *info = tty->driver_data; |
194 | unsigned long flags; | 194 | unsigned long flags; |
195 | 195 | ||
196 | if (serial_paranoia_check(info, tty->name, "rs_start")) | 196 | if (serial_paranoia_check(info, tty->name, "rs_start")) |
@@ -861,7 +861,7 @@ static int rs_put_char(struct tty_struct *tty, unsigned char ch) | |||
861 | 861 | ||
862 | static void rs_flush_chars(struct tty_struct *tty) | 862 | static void rs_flush_chars(struct tty_struct *tty) |
863 | { | 863 | { |
864 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 864 | struct async_struct *info = tty->driver_data; |
865 | unsigned long flags; | 865 | unsigned long flags; |
866 | 866 | ||
867 | if (serial_paranoia_check(info, tty->name, "rs_flush_chars")) | 867 | if (serial_paranoia_check(info, tty->name, "rs_flush_chars")) |
@@ -934,7 +934,7 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count | |||
934 | 934 | ||
935 | static int rs_write_room(struct tty_struct *tty) | 935 | static int rs_write_room(struct tty_struct *tty) |
936 | { | 936 | { |
937 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 937 | struct async_struct *info = tty->driver_data; |
938 | 938 | ||
939 | if (serial_paranoia_check(info, tty->name, "rs_write_room")) | 939 | if (serial_paranoia_check(info, tty->name, "rs_write_room")) |
940 | return 0; | 940 | return 0; |
@@ -943,7 +943,7 @@ static int rs_write_room(struct tty_struct *tty) | |||
943 | 943 | ||
944 | static int rs_chars_in_buffer(struct tty_struct *tty) | 944 | static int rs_chars_in_buffer(struct tty_struct *tty) |
945 | { | 945 | { |
946 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 946 | struct async_struct *info = tty->driver_data; |
947 | 947 | ||
948 | if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer")) | 948 | if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer")) |
949 | return 0; | 949 | return 0; |
@@ -952,7 +952,7 @@ static int rs_chars_in_buffer(struct tty_struct *tty) | |||
952 | 952 | ||
953 | static void rs_flush_buffer(struct tty_struct *tty) | 953 | static void rs_flush_buffer(struct tty_struct *tty) |
954 | { | 954 | { |
955 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 955 | struct async_struct *info = tty->driver_data; |
956 | unsigned long flags; | 956 | unsigned long flags; |
957 | 957 | ||
958 | if (serial_paranoia_check(info, tty->name, "rs_flush_buffer")) | 958 | if (serial_paranoia_check(info, tty->name, "rs_flush_buffer")) |
@@ -969,7 +969,7 @@ static void rs_flush_buffer(struct tty_struct *tty) | |||
969 | */ | 969 | */ |
970 | static void rs_send_xchar(struct tty_struct *tty, char ch) | 970 | static void rs_send_xchar(struct tty_struct *tty, char ch) |
971 | { | 971 | { |
972 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 972 | struct async_struct *info = tty->driver_data; |
973 | unsigned long flags; | 973 | unsigned long flags; |
974 | 974 | ||
975 | if (serial_paranoia_check(info, tty->name, "rs_send_char")) | 975 | if (serial_paranoia_check(info, tty->name, "rs_send_char")) |
@@ -1004,7 +1004,7 @@ static void rs_send_xchar(struct tty_struct *tty, char ch) | |||
1004 | */ | 1004 | */ |
1005 | static void rs_throttle(struct tty_struct * tty) | 1005 | static void rs_throttle(struct tty_struct * tty) |
1006 | { | 1006 | { |
1007 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 1007 | struct async_struct *info = tty->driver_data; |
1008 | unsigned long flags; | 1008 | unsigned long flags; |
1009 | #ifdef SERIAL_DEBUG_THROTTLE | 1009 | #ifdef SERIAL_DEBUG_THROTTLE |
1010 | char buf[64]; | 1010 | char buf[64]; |
@@ -1029,7 +1029,7 @@ static void rs_throttle(struct tty_struct * tty) | |||
1029 | 1029 | ||
1030 | static void rs_unthrottle(struct tty_struct * tty) | 1030 | static void rs_unthrottle(struct tty_struct * tty) |
1031 | { | 1031 | { |
1032 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 1032 | struct async_struct *info = tty->driver_data; |
1033 | unsigned long flags; | 1033 | unsigned long flags; |
1034 | #ifdef SERIAL_DEBUG_THROTTLE | 1034 | #ifdef SERIAL_DEBUG_THROTTLE |
1035 | char buf[64]; | 1035 | char buf[64]; |
@@ -1194,7 +1194,7 @@ static int get_lsr_info(struct async_struct * info, unsigned int __user *value) | |||
1194 | 1194 | ||
1195 | static int rs_tiocmget(struct tty_struct *tty, struct file *file) | 1195 | static int rs_tiocmget(struct tty_struct *tty, struct file *file) |
1196 | { | 1196 | { |
1197 | struct async_struct * info = (struct async_struct *)tty->driver_data; | 1197 | struct async_struct * info = tty->driver_data; |
1198 | unsigned char control, status; | 1198 | unsigned char control, status; |
1199 | unsigned long flags; | 1199 | unsigned long flags; |
1200 | 1200 | ||
@@ -1217,7 +1217,7 @@ static int rs_tiocmget(struct tty_struct *tty, struct file *file) | |||
1217 | static int rs_tiocmset(struct tty_struct *tty, struct file *file, | 1217 | static int rs_tiocmset(struct tty_struct *tty, struct file *file, |
1218 | unsigned int set, unsigned int clear) | 1218 | unsigned int set, unsigned int clear) |
1219 | { | 1219 | { |
1220 | struct async_struct * info = (struct async_struct *)tty->driver_data; | 1220 | struct async_struct * info = tty->driver_data; |
1221 | unsigned long flags; | 1221 | unsigned long flags; |
1222 | 1222 | ||
1223 | if (serial_paranoia_check(info, tty->name, "rs_ioctl")) | 1223 | if (serial_paranoia_check(info, tty->name, "rs_ioctl")) |
@@ -1244,7 +1244,7 @@ static int rs_tiocmset(struct tty_struct *tty, struct file *file, | |||
1244 | */ | 1244 | */ |
1245 | static int rs_break(struct tty_struct *tty, int break_state) | 1245 | static int rs_break(struct tty_struct *tty, int break_state) |
1246 | { | 1246 | { |
1247 | struct async_struct * info = (struct async_struct *)tty->driver_data; | 1247 | struct async_struct * info = tty->driver_data; |
1248 | unsigned long flags; | 1248 | unsigned long flags; |
1249 | 1249 | ||
1250 | if (serial_paranoia_check(info, tty->name, "rs_break")) | 1250 | if (serial_paranoia_check(info, tty->name, "rs_break")) |
@@ -1264,7 +1264,7 @@ static int rs_break(struct tty_struct *tty, int break_state) | |||
1264 | static int rs_ioctl(struct tty_struct *tty, struct file * file, | 1264 | static int rs_ioctl(struct tty_struct *tty, struct file * file, |
1265 | unsigned int cmd, unsigned long arg) | 1265 | unsigned int cmd, unsigned long arg) |
1266 | { | 1266 | { |
1267 | struct async_struct * info = (struct async_struct *)tty->driver_data; | 1267 | struct async_struct * info = tty->driver_data; |
1268 | struct async_icount cprev, cnow; /* kernel counter temps */ | 1268 | struct async_icount cprev, cnow; /* kernel counter temps */ |
1269 | struct serial_icounter_struct icount; | 1269 | struct serial_icounter_struct icount; |
1270 | void __user *argp = (void __user *)arg; | 1270 | void __user *argp = (void __user *)arg; |
@@ -1368,7 +1368,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file, | |||
1368 | 1368 | ||
1369 | static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | 1369 | static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) |
1370 | { | 1370 | { |
1371 | struct async_struct *info = (struct async_struct *)tty->driver_data; | 1371 | struct async_struct *info = tty->driver_data; |
1372 | unsigned long flags; | 1372 | unsigned long flags; |
1373 | unsigned int cflag = tty->termios->c_cflag; | 1373 | unsigned int cflag = tty->termios->c_cflag; |
1374 | 1374 | ||
@@ -1428,7 +1428,7 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
1428 | */ | 1428 | */ |
1429 | static void rs_close(struct tty_struct *tty, struct file * filp) | 1429 | static void rs_close(struct tty_struct *tty, struct file * filp) |
1430 | { | 1430 | { |
1431 | struct async_struct * info = (struct async_struct *)tty->driver_data; | 1431 | struct async_struct * info = tty->driver_data; |
1432 | struct serial_state *state; | 1432 | struct serial_state *state; |
1433 | unsigned long flags; | 1433 | unsigned long flags; |
1434 | 1434 | ||
@@ -1523,7 +1523,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) | |||
1523 | */ | 1523 | */ |
1524 | static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | 1524 | static void rs_wait_until_sent(struct tty_struct *tty, int timeout) |
1525 | { | 1525 | { |
1526 | struct async_struct * info = (struct async_struct *)tty->driver_data; | 1526 | struct async_struct * info = tty->driver_data; |
1527 | unsigned long orig_jiffies, char_time; | 1527 | unsigned long orig_jiffies, char_time; |
1528 | int lsr; | 1528 | int lsr; |
1529 | 1529 | ||
@@ -1587,7 +1587,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | |||
1587 | */ | 1587 | */ |
1588 | static void rs_hangup(struct tty_struct *tty) | 1588 | static void rs_hangup(struct tty_struct *tty) |
1589 | { | 1589 | { |
1590 | struct async_struct * info = (struct async_struct *)tty->driver_data; | 1590 | struct async_struct * info = tty->driver_data; |
1591 | struct serial_state *state = info->state; | 1591 | struct serial_state *state = info->state; |
1592 | 1592 | ||
1593 | if (serial_paranoia_check(info, tty->name, "rs_hangup")) | 1593 | if (serial_paranoia_check(info, tty->name, "rs_hangup")) |
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 5e5b1dc1a0a7..6a59f72a9c21 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c | |||
@@ -5010,7 +5010,7 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev, | |||
5010 | if (nchan == 0) { | 5010 | if (nchan == 0) { |
5011 | dev_err(&pdev->dev, "Cyclom-Y PCI host card with no " | 5011 | dev_err(&pdev->dev, "Cyclom-Y PCI host card with no " |
5012 | "Serial-Modules\n"); | 5012 | "Serial-Modules\n"); |
5013 | return -EIO; | 5013 | goto err_unmap; |
5014 | } | 5014 | } |
5015 | } else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) { | 5015 | } else if (device_id == PCI_DEVICE_ID_CYCLOM_Z_Hi) { |
5016 | struct RUNTIME_9060 __iomem *ctl_addr; | 5016 | struct RUNTIME_9060 __iomem *ctl_addr; |
diff --git a/drivers/char/epca.c b/drivers/char/epca.c index cf2461d34e5f..39ad820b2350 100644 --- a/drivers/char/epca.c +++ b/drivers/char/epca.c | |||
@@ -69,7 +69,9 @@ static int invalid_lilo_config; | |||
69 | 69 | ||
70 | /* | 70 | /* |
71 | * The ISA boards do window flipping into the same spaces so its only sane with | 71 | * The ISA boards do window flipping into the same spaces so its only sane with |
72 | * a single lock. It's still pretty efficient. | 72 | * a single lock. It's still pretty efficient. This lock guards the hardware |
73 | * and the tty_port lock guards the kernel side stuff like use counts. Take | ||
74 | * this lock inside the port lock if you must take both. | ||
73 | */ | 75 | */ |
74 | static DEFINE_SPINLOCK(epca_lock); | 76 | static DEFINE_SPINLOCK(epca_lock); |
75 | 77 | ||
@@ -156,14 +158,12 @@ static struct channel *verifyChannel(struct tty_struct *); | |||
156 | static void pc_sched_event(struct channel *, int); | 158 | static void pc_sched_event(struct channel *, int); |
157 | static void epca_error(int, char *); | 159 | static void epca_error(int, char *); |
158 | static void pc_close(struct tty_struct *, struct file *); | 160 | static void pc_close(struct tty_struct *, struct file *); |
159 | static void shutdown(struct channel *); | 161 | static void shutdown(struct channel *, struct tty_struct *tty); |
160 | static void pc_hangup(struct tty_struct *); | 162 | static void pc_hangup(struct tty_struct *); |
161 | static int pc_write_room(struct tty_struct *); | 163 | static int pc_write_room(struct tty_struct *); |
162 | static int pc_chars_in_buffer(struct tty_struct *); | 164 | static int pc_chars_in_buffer(struct tty_struct *); |
163 | static void pc_flush_buffer(struct tty_struct *); | 165 | static void pc_flush_buffer(struct tty_struct *); |
164 | static void pc_flush_chars(struct tty_struct *); | 166 | static void pc_flush_chars(struct tty_struct *); |
165 | static int block_til_ready(struct tty_struct *, struct file *, | ||
166 | struct channel *); | ||
167 | static int pc_open(struct tty_struct *, struct file *); | 167 | static int pc_open(struct tty_struct *, struct file *); |
168 | static void post_fep_init(unsigned int crd); | 168 | static void post_fep_init(unsigned int crd); |
169 | static void epcapoll(unsigned long); | 169 | static void epcapoll(unsigned long); |
@@ -173,7 +173,7 @@ static unsigned termios2digi_h(struct channel *ch, unsigned); | |||
173 | static unsigned termios2digi_i(struct channel *ch, unsigned); | 173 | static unsigned termios2digi_i(struct channel *ch, unsigned); |
174 | static unsigned termios2digi_c(struct channel *ch, unsigned); | 174 | static unsigned termios2digi_c(struct channel *ch, unsigned); |
175 | static void epcaparam(struct tty_struct *, struct channel *); | 175 | static void epcaparam(struct tty_struct *, struct channel *); |
176 | static void receive_data(struct channel *); | 176 | static void receive_data(struct channel *, struct tty_struct *tty); |
177 | static int pc_ioctl(struct tty_struct *, struct file *, | 177 | static int pc_ioctl(struct tty_struct *, struct file *, |
178 | unsigned int, unsigned long); | 178 | unsigned int, unsigned long); |
179 | static int info_ioctl(struct tty_struct *, struct file *, | 179 | static int info_ioctl(struct tty_struct *, struct file *, |
@@ -392,7 +392,7 @@ static struct channel *verifyChannel(struct tty_struct *tty) | |||
392 | * through tty->driver_data this should catch it. | 392 | * through tty->driver_data this should catch it. |
393 | */ | 393 | */ |
394 | if (tty) { | 394 | if (tty) { |
395 | struct channel *ch = (struct channel *)tty->driver_data; | 395 | struct channel *ch = tty->driver_data; |
396 | if (ch >= &digi_channels[0] && ch < &digi_channels[nbdevs]) { | 396 | if (ch >= &digi_channels[0] && ch < &digi_channels[nbdevs]) { |
397 | if (ch->magic == EPCA_MAGIC) | 397 | if (ch->magic == EPCA_MAGIC) |
398 | return ch; | 398 | return ch; |
@@ -419,76 +419,34 @@ static void epca_error(int line, char *msg) | |||
419 | static void pc_close(struct tty_struct *tty, struct file *filp) | 419 | static void pc_close(struct tty_struct *tty, struct file *filp) |
420 | { | 420 | { |
421 | struct channel *ch; | 421 | struct channel *ch; |
422 | unsigned long flags; | 422 | struct tty_port *port; |
423 | /* | 423 | /* |
424 | * verifyChannel returns the channel from the tty struct if it is | 424 | * verifyChannel returns the channel from the tty struct if it is |
425 | * valid. This serves as a sanity check. | 425 | * valid. This serves as a sanity check. |
426 | */ | 426 | */ |
427 | ch = verifyChannel(tty); | 427 | ch = verifyChannel(tty); |
428 | if (ch != NULL) { | 428 | if (ch == NULL) |
429 | spin_lock_irqsave(&epca_lock, flags); | 429 | return; |
430 | if (tty_hung_up_p(filp)) { | 430 | port = &ch->port; |
431 | spin_unlock_irqrestore(&epca_lock, flags); | ||
432 | return; | ||
433 | } | ||
434 | if (ch->port.count-- > 1) { | ||
435 | /* Begin channel is open more than once */ | ||
436 | /* | ||
437 | * Return without doing anything. Someone might still | ||
438 | * be using the channel. | ||
439 | */ | ||
440 | spin_unlock_irqrestore(&epca_lock, flags); | ||
441 | return; | ||
442 | } | ||
443 | /* Port open only once go ahead with shutdown & reset */ | ||
444 | BUG_ON(ch->port.count < 0); | ||
445 | |||
446 | /* | ||
447 | * Let the rest of the driver know the channel is being closed. | ||
448 | * This becomes important if an open is attempted before close | ||
449 | * is finished. | ||
450 | */ | ||
451 | ch->port.flags |= ASYNC_CLOSING; | ||
452 | tty->closing = 1; | ||
453 | |||
454 | spin_unlock_irqrestore(&epca_lock, flags); | ||
455 | |||
456 | if (ch->port.flags & ASYNC_INITIALIZED) { | ||
457 | /* Setup an event to indicate when the | ||
458 | transmit buffer empties */ | ||
459 | setup_empty_event(tty, ch); | ||
460 | /* 30 seconds timeout */ | ||
461 | tty_wait_until_sent(tty, 3000); | ||
462 | } | ||
463 | pc_flush_buffer(tty); | ||
464 | 431 | ||
465 | tty_ldisc_flush(tty); | 432 | if (tty_port_close_start(port, tty, filp) == 0) |
466 | shutdown(ch); | 433 | return; |
467 | 434 | ||
468 | spin_lock_irqsave(&epca_lock, flags); | 435 | pc_flush_buffer(tty); |
469 | tty->closing = 0; | 436 | shutdown(ch, tty); |
470 | ch->event = 0; | ||
471 | ch->port.tty = NULL; | ||
472 | spin_unlock_irqrestore(&epca_lock, flags); | ||
473 | 437 | ||
474 | if (ch->port.blocked_open) { | 438 | tty_port_close_end(port, tty); |
475 | if (ch->close_delay) | 439 | ch->event = 0; /* FIXME: review ch->event locking */ |
476 | msleep_interruptible(jiffies_to_msecs(ch->close_delay)); | 440 | tty_port_tty_set(port, NULL); |
477 | wake_up_interruptible(&ch->port.open_wait); | ||
478 | } | ||
479 | ch->port.flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED | | ||
480 | ASYNC_CLOSING); | ||
481 | wake_up_interruptible(&ch->port.close_wait); | ||
482 | } | ||
483 | } | 441 | } |
484 | 442 | ||
485 | static void shutdown(struct channel *ch) | 443 | static void shutdown(struct channel *ch, struct tty_struct *tty) |
486 | { | 444 | { |
487 | unsigned long flags; | 445 | unsigned long flags; |
488 | struct tty_struct *tty; | ||
489 | struct board_chan __iomem *bc; | 446 | struct board_chan __iomem *bc; |
447 | struct tty_port *port = &ch->port; | ||
490 | 448 | ||
491 | if (!(ch->port.flags & ASYNC_INITIALIZED)) | 449 | if (!(port->flags & ASYNC_INITIALIZED)) |
492 | return; | 450 | return; |
493 | 451 | ||
494 | spin_lock_irqsave(&epca_lock, flags); | 452 | spin_lock_irqsave(&epca_lock, flags); |
@@ -503,7 +461,6 @@ static void shutdown(struct channel *ch) | |||
503 | */ | 461 | */ |
504 | if (bc) | 462 | if (bc) |
505 | writeb(0, &bc->idata); | 463 | writeb(0, &bc->idata); |
506 | tty = ch->port.tty; | ||
507 | 464 | ||
508 | /* If we're a modem control device and HUPCL is on, drop RTS & DTR. */ | 465 | /* If we're a modem control device and HUPCL is on, drop RTS & DTR. */ |
509 | if (tty->termios->c_cflag & HUPCL) { | 466 | if (tty->termios->c_cflag & HUPCL) { |
@@ -517,32 +474,26 @@ static void shutdown(struct channel *ch) | |||
517 | * will have to reinitialized. Set a flag to indicate this. | 474 | * will have to reinitialized. Set a flag to indicate this. |
518 | */ | 475 | */ |
519 | /* Prevent future Digi programmed interrupts from coming active */ | 476 | /* Prevent future Digi programmed interrupts from coming active */ |
520 | ch->port.flags &= ~ASYNC_INITIALIZED; | 477 | port->flags &= ~ASYNC_INITIALIZED; |
521 | spin_unlock_irqrestore(&epca_lock, flags); | 478 | spin_unlock_irqrestore(&epca_lock, flags); |
522 | } | 479 | } |
523 | 480 | ||
524 | static void pc_hangup(struct tty_struct *tty) | 481 | static void pc_hangup(struct tty_struct *tty) |
525 | { | 482 | { |
526 | struct channel *ch; | 483 | struct channel *ch; |
484 | |||
527 | /* | 485 | /* |
528 | * verifyChannel returns the channel from the tty struct if it is | 486 | * verifyChannel returns the channel from the tty struct if it is |
529 | * valid. This serves as a sanity check. | 487 | * valid. This serves as a sanity check. |
530 | */ | 488 | */ |
531 | ch = verifyChannel(tty); | 489 | ch = verifyChannel(tty); |
532 | if (ch != NULL) { | 490 | if (ch != NULL) { |
533 | unsigned long flags; | ||
534 | |||
535 | pc_flush_buffer(tty); | 491 | pc_flush_buffer(tty); |
536 | tty_ldisc_flush(tty); | 492 | tty_ldisc_flush(tty); |
537 | shutdown(ch); | 493 | shutdown(ch, tty); |
538 | 494 | ||
539 | spin_lock_irqsave(&epca_lock, flags); | 495 | ch->event = 0; /* FIXME: review locking of ch->event */ |
540 | ch->port.tty = NULL; | 496 | tty_port_hangup(&ch->port); |
541 | ch->event = 0; | ||
542 | ch->port.count = 0; | ||
543 | ch->port.flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED); | ||
544 | spin_unlock_irqrestore(&epca_lock, flags); | ||
545 | wake_up_interruptible(&ch->port.open_wait); | ||
546 | } | 497 | } |
547 | } | 498 | } |
548 | 499 | ||
@@ -786,100 +737,22 @@ static void pc_flush_chars(struct tty_struct *tty) | |||
786 | } | 737 | } |
787 | } | 738 | } |
788 | 739 | ||
789 | static int block_til_ready(struct tty_struct *tty, | 740 | static int epca_carrier_raised(struct tty_port *port) |
790 | struct file *filp, struct channel *ch) | ||
791 | { | 741 | { |
792 | DECLARE_WAITQUEUE(wait, current); | 742 | struct channel *ch = container_of(port, struct channel, port); |
793 | int retval, do_clocal = 0; | 743 | if (ch->imodem & ch->dcd) |
794 | unsigned long flags; | 744 | return 1; |
795 | |||
796 | if (tty_hung_up_p(filp)) { | ||
797 | if (ch->port.flags & ASYNC_HUP_NOTIFY) | ||
798 | retval = -EAGAIN; | ||
799 | else | ||
800 | retval = -ERESTARTSYS; | ||
801 | return retval; | ||
802 | } | ||
803 | |||
804 | /* | ||
805 | * If the device is in the middle of being closed, then block until | ||
806 | * it's done, and then try again. | ||
807 | */ | ||
808 | if (ch->port.flags & ASYNC_CLOSING) { | ||
809 | interruptible_sleep_on(&ch->port.close_wait); | ||
810 | |||
811 | if (ch->port.flags & ASYNC_HUP_NOTIFY) | ||
812 | return -EAGAIN; | ||
813 | else | ||
814 | return -ERESTARTSYS; | ||
815 | } | ||
816 | |||
817 | if (filp->f_flags & O_NONBLOCK) { | ||
818 | /* | ||
819 | * If non-blocking mode is set, then make the check up front | ||
820 | * and then exit. | ||
821 | */ | ||
822 | ch->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
823 | return 0; | ||
824 | } | ||
825 | if (tty->termios->c_cflag & CLOCAL) | ||
826 | do_clocal = 1; | ||
827 | /* Block waiting for the carrier detect and the line to become free */ | ||
828 | |||
829 | retval = 0; | ||
830 | add_wait_queue(&ch->port.open_wait, &wait); | ||
831 | |||
832 | spin_lock_irqsave(&epca_lock, flags); | ||
833 | /* We dec count so that pc_close will know when to free things */ | ||
834 | if (!tty_hung_up_p(filp)) | ||
835 | ch->port.count--; | ||
836 | ch->port.blocked_open++; | ||
837 | while (1) { | ||
838 | set_current_state(TASK_INTERRUPTIBLE); | ||
839 | if (tty_hung_up_p(filp) || | ||
840 | !(ch->port.flags & ASYNC_INITIALIZED)) { | ||
841 | if (ch->port.flags & ASYNC_HUP_NOTIFY) | ||
842 | retval = -EAGAIN; | ||
843 | else | ||
844 | retval = -ERESTARTSYS; | ||
845 | break; | ||
846 | } | ||
847 | if (!(ch->port.flags & ASYNC_CLOSING) && | ||
848 | (do_clocal || (ch->imodem & ch->dcd))) | ||
849 | break; | ||
850 | if (signal_pending(current)) { | ||
851 | retval = -ERESTARTSYS; | ||
852 | break; | ||
853 | } | ||
854 | spin_unlock_irqrestore(&epca_lock, flags); | ||
855 | /* | ||
856 | * Allow someone else to be scheduled. We will occasionally go | ||
857 | * through this loop until one of the above conditions change. | ||
858 | * The below schedule call will allow other processes to enter | ||
859 | * and prevent this loop from hogging the cpu. | ||
860 | */ | ||
861 | schedule(); | ||
862 | spin_lock_irqsave(&epca_lock, flags); | ||
863 | } | ||
864 | |||
865 | __set_current_state(TASK_RUNNING); | ||
866 | remove_wait_queue(&ch->port.open_wait, &wait); | ||
867 | if (!tty_hung_up_p(filp)) | ||
868 | ch->port.count++; | ||
869 | ch->port.blocked_open--; | ||
870 | |||
871 | spin_unlock_irqrestore(&epca_lock, flags); | ||
872 | |||
873 | if (retval) | ||
874 | return retval; | ||
875 | |||
876 | ch->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
877 | return 0; | 745 | return 0; |
878 | } | 746 | } |
879 | 747 | ||
748 | static void epca_raise_dtr_rts(struct tty_port *port) | ||
749 | { | ||
750 | } | ||
751 | |||
880 | static int pc_open(struct tty_struct *tty, struct file *filp) | 752 | static int pc_open(struct tty_struct *tty, struct file *filp) |
881 | { | 753 | { |
882 | struct channel *ch; | 754 | struct channel *ch; |
755 | struct tty_port *port; | ||
883 | unsigned long flags; | 756 | unsigned long flags; |
884 | int line, retval, boardnum; | 757 | int line, retval, boardnum; |
885 | struct board_chan __iomem *bc; | 758 | struct board_chan __iomem *bc; |
@@ -890,6 +763,7 @@ static int pc_open(struct tty_struct *tty, struct file *filp) | |||
890 | return -ENODEV; | 763 | return -ENODEV; |
891 | 764 | ||
892 | ch = &digi_channels[line]; | 765 | ch = &digi_channels[line]; |
766 | port = &ch->port; | ||
893 | boardnum = ch->boardnum; | 767 | boardnum = ch->boardnum; |
894 | 768 | ||
895 | /* Check status of board configured in system. */ | 769 | /* Check status of board configured in system. */ |
@@ -926,22 +800,24 @@ static int pc_open(struct tty_struct *tty, struct file *filp) | |||
926 | return -ENODEV; | 800 | return -ENODEV; |
927 | } | 801 | } |
928 | 802 | ||
929 | spin_lock_irqsave(&epca_lock, flags); | 803 | spin_lock_irqsave(&port->lock, flags); |
930 | /* | 804 | /* |
931 | * Every time a channel is opened, increment a counter. This is | 805 | * Every time a channel is opened, increment a counter. This is |
932 | * necessary because we do not wish to flush and shutdown the channel | 806 | * necessary because we do not wish to flush and shutdown the channel |
933 | * until the last app holding the channel open, closes it. | 807 | * until the last app holding the channel open, closes it. |
934 | */ | 808 | */ |
935 | ch->port.count++; | 809 | port->count++; |
936 | /* | 810 | /* |
937 | * Set a kernel structures pointer to our local channel structure. This | 811 | * Set a kernel structures pointer to our local channel structure. This |
938 | * way we can get to it when passed only a tty struct. | 812 | * way we can get to it when passed only a tty struct. |
939 | */ | 813 | */ |
940 | tty->driver_data = ch; | 814 | tty->driver_data = ch; |
815 | port->tty = tty; | ||
941 | /* | 816 | /* |
942 | * If this is the first time the channel has been opened, initialize | 817 | * If this is the first time the channel has been opened, initialize |
943 | * the tty->termios struct otherwise let pc_close handle it. | 818 | * the tty->termios struct otherwise let pc_close handle it. |
944 | */ | 819 | */ |
820 | spin_lock(&epca_lock); | ||
945 | globalwinon(ch); | 821 | globalwinon(ch); |
946 | ch->statusflags = 0; | 822 | ch->statusflags = 0; |
947 | 823 | ||
@@ -956,31 +832,33 @@ static int pc_open(struct tty_struct *tty, struct file *filp) | |||
956 | writew(head, &bc->rout); | 832 | writew(head, &bc->rout); |
957 | 833 | ||
958 | /* Set the channels associated tty structure */ | 834 | /* Set the channels associated tty structure */ |
959 | ch->port.tty = tty; | ||
960 | 835 | ||
961 | /* | 836 | /* |
962 | * The below routine generally sets up parity, baud, flow control | 837 | * The below routine generally sets up parity, baud, flow control |
963 | * issues, etc.... It effect both control flags and input flags. | 838 | * issues, etc.... It effect both control flags and input flags. |
964 | */ | 839 | */ |
965 | epcaparam(tty, ch); | 840 | epcaparam(tty, ch); |
966 | ch->port.flags |= ASYNC_INITIALIZED; | ||
967 | memoff(ch); | 841 | memoff(ch); |
968 | spin_unlock_irqrestore(&epca_lock, flags); | 842 | spin_unlock(&epca_lock); |
843 | port->flags |= ASYNC_INITIALIZED; | ||
844 | spin_unlock_irqrestore(&port->lock, flags); | ||
969 | 845 | ||
970 | retval = block_til_ready(tty, filp, ch); | 846 | retval = tty_port_block_til_ready(port, tty, filp); |
971 | if (retval) | 847 | if (retval) |
972 | return retval; | 848 | return retval; |
973 | /* | 849 | /* |
974 | * Set this again in case a hangup set it to zero while this open() was | 850 | * Set this again in case a hangup set it to zero while this open() was |
975 | * waiting for the line... | 851 | * waiting for the line... |
976 | */ | 852 | */ |
977 | spin_lock_irqsave(&epca_lock, flags); | 853 | spin_lock_irqsave(&port->lock, flags); |
978 | ch->port.tty = tty; | 854 | port->tty = tty; |
855 | spin_lock(&epca_lock); | ||
979 | globalwinon(ch); | 856 | globalwinon(ch); |
980 | /* Enable Digi Data events */ | 857 | /* Enable Digi Data events */ |
981 | writeb(1, &bc->idata); | 858 | writeb(1, &bc->idata); |
982 | memoff(ch); | 859 | memoff(ch); |
983 | spin_unlock_irqrestore(&epca_lock, flags); | 860 | spin_unlock(&epca_lock); |
861 | spin_unlock_irqrestore(&port->lock, flags); | ||
984 | return 0; | 862 | return 0; |
985 | } | 863 | } |
986 | 864 | ||
@@ -1016,8 +894,11 @@ static void __exit epca_module_exit(void) | |||
1016 | } | 894 | } |
1017 | ch = card_ptr[crd]; | 895 | ch = card_ptr[crd]; |
1018 | for (count = 0; count < bd->numports; count++, ch++) { | 896 | for (count = 0; count < bd->numports; count++, ch++) { |
1019 | if (ch && ch->port.tty) | 897 | struct tty_struct *tty = tty_port_tty_get(&ch->port); |
1020 | tty_hangup(ch->port.tty); | 898 | if (tty) { |
899 | tty_hangup(tty); | ||
900 | tty_kref_put(tty); | ||
901 | } | ||
1021 | } | 902 | } |
1022 | } | 903 | } |
1023 | pci_unregister_driver(&epca_driver); | 904 | pci_unregister_driver(&epca_driver); |
@@ -1042,6 +923,11 @@ static const struct tty_operations pc_ops = { | |||
1042 | .break_ctl = pc_send_break | 923 | .break_ctl = pc_send_break |
1043 | }; | 924 | }; |
1044 | 925 | ||
926 | static const struct tty_port_operations epca_port_ops = { | ||
927 | .carrier_raised = epca_carrier_raised, | ||
928 | .raise_dtr_rts = epca_raise_dtr_rts, | ||
929 | }; | ||
930 | |||
1045 | static int info_open(struct tty_struct *tty, struct file *filp) | 931 | static int info_open(struct tty_struct *tty, struct file *filp) |
1046 | { | 932 | { |
1047 | return 0; | 933 | return 0; |
@@ -1377,6 +1263,7 @@ static void post_fep_init(unsigned int crd) | |||
1377 | u16 tseg, rseg; | 1263 | u16 tseg, rseg; |
1378 | 1264 | ||
1379 | tty_port_init(&ch->port); | 1265 | tty_port_init(&ch->port); |
1266 | ch->port.ops = &epca_port_ops; | ||
1380 | ch->brdchan = bc; | 1267 | ch->brdchan = bc; |
1381 | ch->mailbox = gd; | 1268 | ch->mailbox = gd; |
1382 | INIT_WORK(&ch->tqueue, do_softint); | 1269 | INIT_WORK(&ch->tqueue, do_softint); |
@@ -1428,7 +1315,7 @@ static void post_fep_init(unsigned int crd) | |||
1428 | ch->boardnum = crd; | 1315 | ch->boardnum = crd; |
1429 | ch->channelnum = i; | 1316 | ch->channelnum = i; |
1430 | ch->magic = EPCA_MAGIC; | 1317 | ch->magic = EPCA_MAGIC; |
1431 | ch->port.tty = NULL; | 1318 | tty_port_tty_set(&ch->port, NULL); |
1432 | 1319 | ||
1433 | if (shrinkmem) { | 1320 | if (shrinkmem) { |
1434 | fepcmd(ch, SETBUFFER, 32, 0, 0, 0); | 1321 | fepcmd(ch, SETBUFFER, 32, 0, 0, 0); |
@@ -1510,7 +1397,7 @@ static void post_fep_init(unsigned int crd) | |||
1510 | ch->fepstartca = 0; | 1397 | ch->fepstartca = 0; |
1511 | ch->fepstopca = 0; | 1398 | ch->fepstopca = 0; |
1512 | 1399 | ||
1513 | ch->close_delay = 50; | 1400 | ch->port.close_delay = 50; |
1514 | 1401 | ||
1515 | spin_unlock_irqrestore(&epca_lock, flags); | 1402 | spin_unlock_irqrestore(&epca_lock, flags); |
1516 | } | 1403 | } |
@@ -1622,15 +1509,16 @@ static void doevent(int crd) | |||
1622 | if (bc == NULL) | 1509 | if (bc == NULL) |
1623 | goto next; | 1510 | goto next; |
1624 | 1511 | ||
1512 | tty = tty_port_tty_get(&ch->port); | ||
1625 | if (event & DATA_IND) { /* Begin DATA_IND */ | 1513 | if (event & DATA_IND) { /* Begin DATA_IND */ |
1626 | receive_data(ch); | 1514 | receive_data(ch, tty); |
1627 | assertgwinon(ch); | 1515 | assertgwinon(ch); |
1628 | } /* End DATA_IND */ | 1516 | } /* End DATA_IND */ |
1629 | /* else *//* Fix for DCD transition missed bug */ | 1517 | /* else *//* Fix for DCD transition missed bug */ |
1630 | if (event & MODEMCHG_IND) { | 1518 | if (event & MODEMCHG_IND) { |
1631 | /* A modem signal change has been indicated */ | 1519 | /* A modem signal change has been indicated */ |
1632 | ch->imodem = mstat; | 1520 | ch->imodem = mstat; |
1633 | if (ch->port.flags & ASYNC_CHECK_CD) { | 1521 | if (test_bit(ASYNC_CHECK_CD, &ch->port.flags)) { |
1634 | /* We are now receiving dcd */ | 1522 | /* We are now receiving dcd */ |
1635 | if (mstat & ch->dcd) | 1523 | if (mstat & ch->dcd) |
1636 | wake_up_interruptible(&ch->port.open_wait); | 1524 | wake_up_interruptible(&ch->port.open_wait); |
@@ -1638,7 +1526,6 @@ static void doevent(int crd) | |||
1638 | pc_sched_event(ch, EPCA_EVENT_HANGUP); | 1526 | pc_sched_event(ch, EPCA_EVENT_HANGUP); |
1639 | } | 1527 | } |
1640 | } | 1528 | } |
1641 | tty = ch->port.tty; | ||
1642 | if (tty) { | 1529 | if (tty) { |
1643 | if (event & BREAK_IND) { | 1530 | if (event & BREAK_IND) { |
1644 | /* A break has been indicated */ | 1531 | /* A break has been indicated */ |
@@ -1658,6 +1545,7 @@ static void doevent(int crd) | |||
1658 | tty_wakeup(tty); | 1545 | tty_wakeup(tty); |
1659 | } | 1546 | } |
1660 | } | 1547 | } |
1548 | tty_kref_put(tty); | ||
1661 | } | 1549 | } |
1662 | next: | 1550 | next: |
1663 | globalwinon(ch); | 1551 | globalwinon(ch); |
@@ -1877,9 +1765,9 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch) | |||
1877 | * that the driver will wait on carrier detect. | 1765 | * that the driver will wait on carrier detect. |
1878 | */ | 1766 | */ |
1879 | if (ts->c_cflag & CLOCAL) | 1767 | if (ts->c_cflag & CLOCAL) |
1880 | ch->port.flags &= ~ASYNC_CHECK_CD; | 1768 | clear_bit(ASYNC_CHECK_CD, &ch->port.flags); |
1881 | else | 1769 | else |
1882 | ch->port.flags |= ASYNC_CHECK_CD; | 1770 | set_bit(ASYNC_CHECK_CD, &ch->port.flags); |
1883 | mval = ch->m_dtr | ch->m_rts; | 1771 | mval = ch->m_dtr | ch->m_rts; |
1884 | } /* End CBAUD not detected */ | 1772 | } /* End CBAUD not detected */ |
1885 | iflag = termios2digi_i(ch, ts->c_iflag); | 1773 | iflag = termios2digi_i(ch, ts->c_iflag); |
@@ -1952,11 +1840,10 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch) | |||
1952 | } | 1840 | } |
1953 | 1841 | ||
1954 | /* Caller holds lock */ | 1842 | /* Caller holds lock */ |
1955 | static void receive_data(struct channel *ch) | 1843 | static void receive_data(struct channel *ch, struct tty_struct *tty) |
1956 | { | 1844 | { |
1957 | unchar *rptr; | 1845 | unchar *rptr; |
1958 | struct ktermios *ts = NULL; | 1846 | struct ktermios *ts = NULL; |
1959 | struct tty_struct *tty; | ||
1960 | struct board_chan __iomem *bc; | 1847 | struct board_chan __iomem *bc; |
1961 | int dataToRead, wrapgap, bytesAvailable; | 1848 | int dataToRead, wrapgap, bytesAvailable; |
1962 | unsigned int tail, head; | 1849 | unsigned int tail, head; |
@@ -1969,7 +1856,6 @@ static void receive_data(struct channel *ch) | |||
1969 | globalwinon(ch); | 1856 | globalwinon(ch); |
1970 | if (ch->statusflags & RXSTOPPED) | 1857 | if (ch->statusflags & RXSTOPPED) |
1971 | return; | 1858 | return; |
1972 | tty = ch->port.tty; | ||
1973 | if (tty) | 1859 | if (tty) |
1974 | ts = tty->termios; | 1860 | ts = tty->termios; |
1975 | bc = ch->brdchan; | 1861 | bc = ch->brdchan; |
@@ -2029,7 +1915,7 @@ static void receive_data(struct channel *ch) | |||
2029 | globalwinon(ch); | 1915 | globalwinon(ch); |
2030 | writew(tail, &bc->rout); | 1916 | writew(tail, &bc->rout); |
2031 | /* Must be called with global data */ | 1917 | /* Must be called with global data */ |
2032 | tty_schedule_flip(ch->port.tty); | 1918 | tty_schedule_flip(tty); |
2033 | } | 1919 | } |
2034 | 1920 | ||
2035 | static int info_ioctl(struct tty_struct *tty, struct file *file, | 1921 | static int info_ioctl(struct tty_struct *tty, struct file *file, |
@@ -2097,7 +1983,7 @@ static int info_ioctl(struct tty_struct *tty, struct file *file, | |||
2097 | 1983 | ||
2098 | static int pc_tiocmget(struct tty_struct *tty, struct file *file) | 1984 | static int pc_tiocmget(struct tty_struct *tty, struct file *file) |
2099 | { | 1985 | { |
2100 | struct channel *ch = (struct channel *) tty->driver_data; | 1986 | struct channel *ch = tty->driver_data; |
2101 | struct board_chan __iomem *bc; | 1987 | struct board_chan __iomem *bc; |
2102 | unsigned int mstat, mflag = 0; | 1988 | unsigned int mstat, mflag = 0; |
2103 | unsigned long flags; | 1989 | unsigned long flags; |
@@ -2131,7 +2017,7 @@ static int pc_tiocmget(struct tty_struct *tty, struct file *file) | |||
2131 | static int pc_tiocmset(struct tty_struct *tty, struct file *file, | 2017 | static int pc_tiocmset(struct tty_struct *tty, struct file *file, |
2132 | unsigned int set, unsigned int clear) | 2018 | unsigned int set, unsigned int clear) |
2133 | { | 2019 | { |
2134 | struct channel *ch = (struct channel *) tty->driver_data; | 2020 | struct channel *ch = tty->driver_data; |
2135 | unsigned long flags; | 2021 | unsigned long flags; |
2136 | 2022 | ||
2137 | if (!ch) | 2023 | if (!ch) |
@@ -2178,7 +2064,7 @@ static int pc_ioctl(struct tty_struct *tty, struct file *file, | |||
2178 | unsigned int mflag, mstat; | 2064 | unsigned int mflag, mstat; |
2179 | unsigned char startc, stopc; | 2065 | unsigned char startc, stopc; |
2180 | struct board_chan __iomem *bc; | 2066 | struct board_chan __iomem *bc; |
2181 | struct channel *ch = (struct channel *) tty->driver_data; | 2067 | struct channel *ch = tty->driver_data; |
2182 | void __user *argp = (void __user *)arg; | 2068 | void __user *argp = (void __user *)arg; |
2183 | 2069 | ||
2184 | if (ch) | 2070 | if (ch) |
@@ -2352,15 +2238,16 @@ static void do_softint(struct work_struct *work) | |||
2352 | struct channel *ch = container_of(work, struct channel, tqueue); | 2238 | struct channel *ch = container_of(work, struct channel, tqueue); |
2353 | /* Called in response to a modem change event */ | 2239 | /* Called in response to a modem change event */ |
2354 | if (ch && ch->magic == EPCA_MAGIC) { | 2240 | if (ch && ch->magic == EPCA_MAGIC) { |
2355 | struct tty_struct *tty = ch->port.tty; | 2241 | struct tty_struct *tty = tty_port_tty_get(&ch->port);; |
2356 | 2242 | ||
2357 | if (tty && tty->driver_data) { | 2243 | if (tty && tty->driver_data) { |
2358 | if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { | 2244 | if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { |
2359 | tty_hangup(tty); | 2245 | tty_hangup(tty); |
2360 | wake_up_interruptible(&ch->port.open_wait); | 2246 | wake_up_interruptible(&ch->port.open_wait); |
2361 | ch->port.flags &= ~ASYNC_NORMAL_ACTIVE; | 2247 | clear_bit(ASYNC_NORMAL_ACTIVE, &ch->port.flags); |
2362 | } | 2248 | } |
2363 | } | 2249 | } |
2250 | tty_kref_put(tty); | ||
2364 | } | 2251 | } |
2365 | } | 2252 | } |
2366 | 2253 | ||
@@ -2473,7 +2360,7 @@ static void pc_unthrottle(struct tty_struct *tty) | |||
2473 | 2360 | ||
2474 | static int pc_send_break(struct tty_struct *tty, int msec) | 2361 | static int pc_send_break(struct tty_struct *tty, int msec) |
2475 | { | 2362 | { |
2476 | struct channel *ch = (struct channel *) tty->driver_data; | 2363 | struct channel *ch = tty->driver_data; |
2477 | unsigned long flags; | 2364 | unsigned long flags; |
2478 | 2365 | ||
2479 | if (msec == -1) | 2366 | if (msec == -1) |
diff --git a/drivers/char/esp.c b/drivers/char/esp.c index 7f077c0097f6..45ec263ec012 100644 --- a/drivers/char/esp.c +++ b/drivers/char/esp.c | |||
@@ -2054,6 +2054,15 @@ static void esp_hangup(struct tty_struct *tty) | |||
2054 | wake_up_interruptible(&info->port.open_wait); | 2054 | wake_up_interruptible(&info->port.open_wait); |
2055 | } | 2055 | } |
2056 | 2056 | ||
2057 | static int esp_carrier_raised(struct tty_port *port) | ||
2058 | { | ||
2059 | struct esp_struct *info = container_of(port, struct esp_struct, port); | ||
2060 | serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT); | ||
2061 | if (serial_in(info, UART_ESI_STAT2) & UART_MSR_DCD) | ||
2062 | return 1; | ||
2063 | return 0; | ||
2064 | } | ||
2065 | |||
2057 | /* | 2066 | /* |
2058 | * ------------------------------------------------------------ | 2067 | * ------------------------------------------------------------ |
2059 | * esp_open() and friends | 2068 | * esp_open() and friends |
@@ -2066,17 +2075,19 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
2066 | int retval; | 2075 | int retval; |
2067 | int do_clocal = 0; | 2076 | int do_clocal = 0; |
2068 | unsigned long flags; | 2077 | unsigned long flags; |
2078 | int cd; | ||
2079 | struct tty_port *port = &info->port; | ||
2069 | 2080 | ||
2070 | /* | 2081 | /* |
2071 | * If the device is in the middle of being closed, then block | 2082 | * If the device is in the middle of being closed, then block |
2072 | * until it's done, and then try again. | 2083 | * until it's done, and then try again. |
2073 | */ | 2084 | */ |
2074 | if (tty_hung_up_p(filp) || | 2085 | if (tty_hung_up_p(filp) || |
2075 | (info->port.flags & ASYNC_CLOSING)) { | 2086 | (port->flags & ASYNC_CLOSING)) { |
2076 | if (info->port.flags & ASYNC_CLOSING) | 2087 | if (port->flags & ASYNC_CLOSING) |
2077 | interruptible_sleep_on(&info->port.close_wait); | 2088 | interruptible_sleep_on(&port->close_wait); |
2078 | #ifdef SERIAL_DO_RESTART | 2089 | #ifdef SERIAL_DO_RESTART |
2079 | if (info->port.flags & ASYNC_HUP_NOTIFY) | 2090 | if (port->flags & ASYNC_HUP_NOTIFY) |
2080 | return -EAGAIN; | 2091 | return -EAGAIN; |
2081 | else | 2092 | else |
2082 | return -ERESTARTSYS; | 2093 | return -ERESTARTSYS; |
@@ -2091,7 +2102,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
2091 | */ | 2102 | */ |
2092 | if ((filp->f_flags & O_NONBLOCK) || | 2103 | if ((filp->f_flags & O_NONBLOCK) || |
2093 | (tty->flags & (1 << TTY_IO_ERROR))) { | 2104 | (tty->flags & (1 << TTY_IO_ERROR))) { |
2094 | info->port.flags |= ASYNC_NORMAL_ACTIVE; | 2105 | port->flags |= ASYNC_NORMAL_ACTIVE; |
2095 | return 0; | 2106 | return 0; |
2096 | } | 2107 | } |
2097 | 2108 | ||
@@ -2101,20 +2112,20 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
2101 | /* | 2112 | /* |
2102 | * Block waiting for the carrier detect and the line to become | 2113 | * Block waiting for the carrier detect and the line to become |
2103 | * free (i.e., not in use by the callout). While we are in | 2114 | * free (i.e., not in use by the callout). While we are in |
2104 | * this loop, info->port.count is dropped by one, so that | 2115 | * this loop, port->count is dropped by one, so that |
2105 | * rs_close() knows when to free things. We restore it upon | 2116 | * rs_close() knows when to free things. We restore it upon |
2106 | * exit, either normal or abnormal. | 2117 | * exit, either normal or abnormal. |
2107 | */ | 2118 | */ |
2108 | retval = 0; | 2119 | retval = 0; |
2109 | add_wait_queue(&info->port.open_wait, &wait); | 2120 | add_wait_queue(&port->open_wait, &wait); |
2110 | #ifdef SERIAL_DEBUG_OPEN | 2121 | #ifdef SERIAL_DEBUG_OPEN |
2111 | printk(KERN_DEBUG "block_til_ready before block: ttys%d, count = %d\n", | 2122 | printk(KERN_DEBUG "block_til_ready before block: ttys%d, count = %d\n", |
2112 | info->line, info->port.count); | 2123 | info->line, port->count); |
2113 | #endif | 2124 | #endif |
2114 | spin_lock_irqsave(&info->lock, flags); | 2125 | spin_lock_irqsave(&info->lock, flags); |
2115 | if (!tty_hung_up_p(filp)) | 2126 | if (!tty_hung_up_p(filp)) |
2116 | info->port.count--; | 2127 | port->count--; |
2117 | info->port.blocked_open++; | 2128 | port->blocked_open++; |
2118 | while (1) { | 2129 | while (1) { |
2119 | if ((tty->termios->c_cflag & CBAUD)) { | 2130 | if ((tty->termios->c_cflag & CBAUD)) { |
2120 | unsigned int scratch; | 2131 | unsigned int scratch; |
@@ -2129,9 +2140,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
2129 | } | 2140 | } |
2130 | set_current_state(TASK_INTERRUPTIBLE); | 2141 | set_current_state(TASK_INTERRUPTIBLE); |
2131 | if (tty_hung_up_p(filp) || | 2142 | if (tty_hung_up_p(filp) || |
2132 | !(info->port.flags & ASYNC_INITIALIZED)) { | 2143 | !(port->flags & ASYNC_INITIALIZED)) { |
2133 | #ifdef SERIAL_DO_RESTART | 2144 | #ifdef SERIAL_DO_RESTART |
2134 | if (info->port.flags & ASYNC_HUP_NOTIFY) | 2145 | if (port->flags & ASYNC_HUP_NOTIFY) |
2135 | retval = -EAGAIN; | 2146 | retval = -EAGAIN; |
2136 | else | 2147 | else |
2137 | retval = -ERESTARTSYS; | 2148 | retval = -ERESTARTSYS; |
@@ -2141,11 +2152,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
2141 | break; | 2152 | break; |
2142 | } | 2153 | } |
2143 | 2154 | ||
2144 | serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT); | 2155 | cd = tty_port_carrier_raised(port); |
2145 | if (serial_in(info, UART_ESI_STAT2) & UART_MSR_DCD) | ||
2146 | do_clocal = 1; | ||
2147 | 2156 | ||
2148 | if (!(info->port.flags & ASYNC_CLOSING) && | 2157 | if (!(port->flags & ASYNC_CLOSING) && |
2149 | (do_clocal)) | 2158 | (do_clocal)) |
2150 | break; | 2159 | break; |
2151 | if (signal_pending(current)) { | 2160 | if (signal_pending(current)) { |
@@ -2154,25 +2163,25 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
2154 | } | 2163 | } |
2155 | #ifdef SERIAL_DEBUG_OPEN | 2164 | #ifdef SERIAL_DEBUG_OPEN |
2156 | printk(KERN_DEBUG "block_til_ready blocking: ttys%d, count = %d\n", | 2165 | printk(KERN_DEBUG "block_til_ready blocking: ttys%d, count = %d\n", |
2157 | info->line, info->port.count); | 2166 | info->line, port->count); |
2158 | #endif | 2167 | #endif |
2159 | spin_unlock_irqrestore(&info->lock, flags); | 2168 | spin_unlock_irqrestore(&info->lock, flags); |
2160 | schedule(); | 2169 | schedule(); |
2161 | spin_lock_irqsave(&info->lock, flags); | 2170 | spin_lock_irqsave(&info->lock, flags); |
2162 | } | 2171 | } |
2163 | set_current_state(TASK_RUNNING); | 2172 | set_current_state(TASK_RUNNING); |
2164 | remove_wait_queue(&info->port.open_wait, &wait); | 2173 | remove_wait_queue(&port->open_wait, &wait); |
2165 | if (!tty_hung_up_p(filp)) | 2174 | if (!tty_hung_up_p(filp)) |
2166 | info->port.count++; | 2175 | port->count++; |
2167 | info->port.blocked_open--; | 2176 | port->blocked_open--; |
2168 | spin_unlock_irqrestore(&info->lock, flags); | 2177 | spin_unlock_irqrestore(&info->lock, flags); |
2169 | #ifdef SERIAL_DEBUG_OPEN | 2178 | #ifdef SERIAL_DEBUG_OPEN |
2170 | printk(KERN_DEBUG "block_til_ready after blocking: ttys%d, count = %d\n", | 2179 | printk(KERN_DEBUG "block_til_ready after blocking: ttys%d, count = %d\n", |
2171 | info->line, info->port.count); | 2180 | info->line, port->count); |
2172 | #endif | 2181 | #endif |
2173 | if (retval) | 2182 | if (retval) |
2174 | return retval; | 2183 | return retval; |
2175 | info->port.flags |= ASYNC_NORMAL_ACTIVE; | 2184 | port->flags |= ASYNC_NORMAL_ACTIVE; |
2176 | return 0; | 2185 | return 0; |
2177 | } | 2186 | } |
2178 | 2187 | ||
@@ -2329,6 +2338,10 @@ static const struct tty_operations esp_ops = { | |||
2329 | .tiocmset = esp_tiocmset, | 2338 | .tiocmset = esp_tiocmset, |
2330 | }; | 2339 | }; |
2331 | 2340 | ||
2341 | static const struct tty_port_operations esp_port_ops = { | ||
2342 | .esp_carrier_raised, | ||
2343 | }; | ||
2344 | |||
2332 | /* | 2345 | /* |
2333 | * The serial driver boot-time initialization code! | 2346 | * The serial driver boot-time initialization code! |
2334 | */ | 2347 | */ |
@@ -2415,6 +2428,8 @@ static int __init espserial_init(void) | |||
2415 | offset = 0; | 2428 | offset = 0; |
2416 | 2429 | ||
2417 | do { | 2430 | do { |
2431 | tty_port_init(&info->port); | ||
2432 | info->port.ops = &esp_port_ops; | ||
2418 | info->io_port = esp[i] + offset; | 2433 | info->io_port = esp[i] + offset; |
2419 | info->irq = irq[i]; | 2434 | info->irq = irq[i]; |
2420 | info->line = (i * 8) + (offset / 8); | 2435 | info->line = (i * 8) + (offset / 8); |
@@ -2437,8 +2452,6 @@ static int __init espserial_init(void) | |||
2437 | info->config.flow_off = flow_off; | 2452 | info->config.flow_off = flow_off; |
2438 | info->config.pio_threshold = pio_threshold; | 2453 | info->config.pio_threshold = pio_threshold; |
2439 | info->next_port = ports; | 2454 | info->next_port = ports; |
2440 | init_waitqueue_head(&info->port.open_wait); | ||
2441 | init_waitqueue_head(&info->port.close_wait); | ||
2442 | init_waitqueue_head(&info->delta_msr_wait); | 2455 | init_waitqueue_head(&info->delta_msr_wait); |
2443 | init_waitqueue_head(&info->break_wait); | 2456 | init_waitqueue_head(&info->break_wait); |
2444 | ports = info; | 2457 | ports = info; |
diff --git a/drivers/char/generic_serial.c b/drivers/char/generic_serial.c index c6090f84a2e4..9e4e569dc00d 100644 --- a/drivers/char/generic_serial.c +++ b/drivers/char/generic_serial.c | |||
@@ -376,7 +376,8 @@ static void gs_shutdown_port (struct gs_port *port) | |||
376 | 376 | ||
377 | void gs_hangup(struct tty_struct *tty) | 377 | void gs_hangup(struct tty_struct *tty) |
378 | { | 378 | { |
379 | struct gs_port *port; | 379 | struct gs_port *port; |
380 | unsigned long flags; | ||
380 | 381 | ||
381 | func_enter (); | 382 | func_enter (); |
382 | 383 | ||
@@ -386,9 +387,11 @@ void gs_hangup(struct tty_struct *tty) | |||
386 | return; | 387 | return; |
387 | 388 | ||
388 | gs_shutdown_port (port); | 389 | gs_shutdown_port (port); |
390 | spin_lock_irqsave(&port->port.lock, flags); | ||
389 | port->port.flags &= ~(ASYNC_NORMAL_ACTIVE|GS_ACTIVE); | 391 | port->port.flags &= ~(ASYNC_NORMAL_ACTIVE|GS_ACTIVE); |
390 | port->port.tty = NULL; | 392 | port->port.tty = NULL; |
391 | port->port.count = 0; | 393 | port->port.count = 0; |
394 | spin_unlock_irqrestore(&port->port.lock, flags); | ||
392 | 395 | ||
393 | wake_up_interruptible(&port->port.open_wait); | 396 | wake_up_interruptible(&port->port.open_wait); |
394 | func_exit (); | 397 | func_exit (); |
@@ -397,7 +400,8 @@ void gs_hangup(struct tty_struct *tty) | |||
397 | 400 | ||
398 | int gs_block_til_ready(void *port_, struct file * filp) | 401 | int gs_block_til_ready(void *port_, struct file * filp) |
399 | { | 402 | { |
400 | struct gs_port *port = port_; | 403 | struct gs_port *gp = port_; |
404 | struct tty_port *port = &gp->port; | ||
401 | DECLARE_WAITQUEUE(wait, current); | 405 | DECLARE_WAITQUEUE(wait, current); |
402 | int retval; | 406 | int retval; |
403 | int do_clocal = 0; | 407 | int do_clocal = 0; |
@@ -409,16 +413,16 @@ int gs_block_til_ready(void *port_, struct file * filp) | |||
409 | 413 | ||
410 | if (!port) return 0; | 414 | if (!port) return 0; |
411 | 415 | ||
412 | tty = port->port.tty; | 416 | tty = port->tty; |
413 | 417 | ||
414 | gs_dprintk (GS_DEBUG_BTR, "Entering gs_block_till_ready.\n"); | 418 | gs_dprintk (GS_DEBUG_BTR, "Entering gs_block_till_ready.\n"); |
415 | /* | 419 | /* |
416 | * If the device is in the middle of being closed, then block | 420 | * If the device is in the middle of being closed, then block |
417 | * until it's done, and then try again. | 421 | * until it's done, and then try again. |
418 | */ | 422 | */ |
419 | if (tty_hung_up_p(filp) || port->port.flags & ASYNC_CLOSING) { | 423 | if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) { |
420 | interruptible_sleep_on(&port->port.close_wait); | 424 | interruptible_sleep_on(&port->close_wait); |
421 | if (port->port.flags & ASYNC_HUP_NOTIFY) | 425 | if (port->flags & ASYNC_HUP_NOTIFY) |
422 | return -EAGAIN; | 426 | return -EAGAIN; |
423 | else | 427 | else |
424 | return -ERESTARTSYS; | 428 | return -ERESTARTSYS; |
@@ -432,7 +436,7 @@ int gs_block_til_ready(void *port_, struct file * filp) | |||
432 | */ | 436 | */ |
433 | if ((filp->f_flags & O_NONBLOCK) || | 437 | if ((filp->f_flags & O_NONBLOCK) || |
434 | (tty->flags & (1 << TTY_IO_ERROR))) { | 438 | (tty->flags & (1 << TTY_IO_ERROR))) { |
435 | port->port.flags |= ASYNC_NORMAL_ACTIVE; | 439 | port->flags |= ASYNC_NORMAL_ACTIVE; |
436 | return 0; | 440 | return 0; |
437 | } | 441 | } |
438 | 442 | ||
@@ -444,34 +448,34 @@ int gs_block_til_ready(void *port_, struct file * filp) | |||
444 | /* | 448 | /* |
445 | * Block waiting for the carrier detect and the line to become | 449 | * Block waiting for the carrier detect and the line to become |
446 | * free (i.e., not in use by the callout). While we are in | 450 | * free (i.e., not in use by the callout). While we are in |
447 | * this loop, port->port.count is dropped by one, so that | 451 | * this loop, port->count is dropped by one, so that |
448 | * rs_close() knows when to free things. We restore it upon | 452 | * rs_close() knows when to free things. We restore it upon |
449 | * exit, either normal or abnormal. | 453 | * exit, either normal or abnormal. |
450 | */ | 454 | */ |
451 | retval = 0; | 455 | retval = 0; |
452 | 456 | ||
453 | add_wait_queue(&port->port.open_wait, &wait); | 457 | add_wait_queue(&port->open_wait, &wait); |
454 | 458 | ||
455 | gs_dprintk (GS_DEBUG_BTR, "after add waitq.\n"); | 459 | gs_dprintk (GS_DEBUG_BTR, "after add waitq.\n"); |
456 | spin_lock_irqsave(&port->driver_lock, flags); | 460 | spin_lock_irqsave(&port->lock, flags); |
457 | if (!tty_hung_up_p(filp)) { | 461 | if (!tty_hung_up_p(filp)) { |
458 | port->port.count--; | 462 | port->count--; |
459 | } | 463 | } |
460 | spin_unlock_irqrestore(&port->driver_lock, flags); | 464 | port->blocked_open++; |
461 | port->port.blocked_open++; | 465 | spin_unlock_irqrestore(&port->lock, flags); |
462 | while (1) { | 466 | while (1) { |
463 | CD = port->rd->get_CD (port); | 467 | CD = tty_port_carrier_raised(port); |
464 | gs_dprintk (GS_DEBUG_BTR, "CD is now %d.\n", CD); | 468 | gs_dprintk (GS_DEBUG_BTR, "CD is now %d.\n", CD); |
465 | set_current_state (TASK_INTERRUPTIBLE); | 469 | set_current_state (TASK_INTERRUPTIBLE); |
466 | if (tty_hung_up_p(filp) || | 470 | if (tty_hung_up_p(filp) || |
467 | !(port->port.flags & ASYNC_INITIALIZED)) { | 471 | !(port->flags & ASYNC_INITIALIZED)) { |
468 | if (port->port.flags & ASYNC_HUP_NOTIFY) | 472 | if (port->flags & ASYNC_HUP_NOTIFY) |
469 | retval = -EAGAIN; | 473 | retval = -EAGAIN; |
470 | else | 474 | else |
471 | retval = -ERESTARTSYS; | 475 | retval = -ERESTARTSYS; |
472 | break; | 476 | break; |
473 | } | 477 | } |
474 | if (!(port->port.flags & ASYNC_CLOSING) && | 478 | if (!(port->flags & ASYNC_CLOSING) && |
475 | (do_clocal || CD)) | 479 | (do_clocal || CD)) |
476 | break; | 480 | break; |
477 | gs_dprintk (GS_DEBUG_BTR, "signal_pending is now: %d (%lx)\n", | 481 | gs_dprintk (GS_DEBUG_BTR, "signal_pending is now: %d (%lx)\n", |
@@ -483,19 +487,20 @@ int gs_block_til_ready(void *port_, struct file * filp) | |||
483 | schedule(); | 487 | schedule(); |
484 | } | 488 | } |
485 | gs_dprintk (GS_DEBUG_BTR, "Got out of the loop. (%d)\n", | 489 | gs_dprintk (GS_DEBUG_BTR, "Got out of the loop. (%d)\n", |
486 | port->port.blocked_open); | 490 | port->blocked_open); |
487 | set_current_state (TASK_RUNNING); | 491 | set_current_state (TASK_RUNNING); |
488 | remove_wait_queue(&port->port.open_wait, &wait); | 492 | remove_wait_queue(&port->open_wait, &wait); |
493 | |||
494 | spin_lock_irqsave(&port->lock, flags); | ||
489 | if (!tty_hung_up_p(filp)) { | 495 | if (!tty_hung_up_p(filp)) { |
490 | port->port.count++; | 496 | port->count++; |
491 | } | 497 | } |
492 | port->port.blocked_open--; | 498 | port->blocked_open--; |
493 | if (retval) | 499 | if (retval == 0) |
494 | return retval; | 500 | port->flags |= ASYNC_NORMAL_ACTIVE; |
495 | 501 | spin_unlock_irqrestore(&port->lock, flags); | |
496 | port->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
497 | func_exit (); | 502 | func_exit (); |
498 | return 0; | 503 | return retval; |
499 | } | 504 | } |
500 | 505 | ||
501 | 506 | ||
@@ -506,7 +511,7 @@ void gs_close(struct tty_struct * tty, struct file * filp) | |||
506 | 511 | ||
507 | func_enter (); | 512 | func_enter (); |
508 | 513 | ||
509 | port = (struct gs_port *) tty->driver_data; | 514 | port = tty->driver_data; |
510 | 515 | ||
511 | if (!port) return; | 516 | if (!port) return; |
512 | 517 | ||
@@ -516,10 +521,10 @@ void gs_close(struct tty_struct * tty, struct file * filp) | |||
516 | port->port.tty = tty; | 521 | port->port.tty = tty; |
517 | } | 522 | } |
518 | 523 | ||
519 | spin_lock_irqsave(&port->driver_lock, flags); | 524 | spin_lock_irqsave(&port->port.lock, flags); |
520 | 525 | ||
521 | if (tty_hung_up_p(filp)) { | 526 | if (tty_hung_up_p(filp)) { |
522 | spin_unlock_irqrestore(&port->driver_lock, flags); | 527 | spin_unlock_irqrestore(&port->port.lock, flags); |
523 | if (port->rd->hungup) | 528 | if (port->rd->hungup) |
524 | port->rd->hungup (port); | 529 | port->rd->hungup (port); |
525 | func_exit (); | 530 | func_exit (); |
@@ -538,7 +543,7 @@ void gs_close(struct tty_struct * tty, struct file * filp) | |||
538 | 543 | ||
539 | if (port->port.count) { | 544 | if (port->port.count) { |
540 | gs_dprintk(GS_DEBUG_CLOSE, "gs_close port %p: count: %d\n", port, port->port.count); | 545 | gs_dprintk(GS_DEBUG_CLOSE, "gs_close port %p: count: %d\n", port, port->port.count); |
541 | spin_unlock_irqrestore(&port->driver_lock, flags); | 546 | spin_unlock_irqrestore(&port->port.lock, flags); |
542 | func_exit (); | 547 | func_exit (); |
543 | return; | 548 | return; |
544 | } | 549 | } |
@@ -559,8 +564,10 @@ void gs_close(struct tty_struct * tty, struct file * filp) | |||
559 | * line status register. | 564 | * line status register. |
560 | */ | 565 | */ |
561 | 566 | ||
567 | spin_lock_irqsave(&port->driver_lock, flags); | ||
562 | port->rd->disable_rx_interrupts (port); | 568 | port->rd->disable_rx_interrupts (port); |
563 | spin_unlock_irqrestore(&port->driver_lock, flags); | 569 | spin_unlock_irqrestore(&port->driver_lock, flags); |
570 | spin_unlock_irqrestore(&port->port.lock, flags); | ||
564 | 571 | ||
565 | /* close has no way of returning "EINTR", so discard return value */ | 572 | /* close has no way of returning "EINTR", so discard return value */ |
566 | if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) | 573 | if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE) |
@@ -573,20 +580,25 @@ void gs_close(struct tty_struct * tty, struct file * filp) | |||
573 | tty_ldisc_flush(tty); | 580 | tty_ldisc_flush(tty); |
574 | tty->closing = 0; | 581 | tty->closing = 0; |
575 | 582 | ||
583 | spin_lock_irqsave(&port->driver_lock, flags); | ||
576 | port->event = 0; | 584 | port->event = 0; |
577 | port->rd->close (port); | 585 | port->rd->close (port); |
578 | port->rd->shutdown_port (port); | 586 | port->rd->shutdown_port (port); |
587 | spin_unlock_irqrestore(&port->driver_lock, flags); | ||
588 | |||
589 | spin_lock_irqsave(&port->port.lock, flags); | ||
579 | port->port.tty = NULL; | 590 | port->port.tty = NULL; |
580 | 591 | ||
581 | if (port->port.blocked_open) { | 592 | if (port->port.blocked_open) { |
582 | if (port->close_delay) { | 593 | if (port->close_delay) { |
583 | spin_unlock_irqrestore(&port->driver_lock, flags); | 594 | spin_unlock_irqrestore(&port->port.lock, flags); |
584 | msleep_interruptible(jiffies_to_msecs(port->close_delay)); | 595 | msleep_interruptible(jiffies_to_msecs(port->close_delay)); |
585 | spin_lock_irqsave(&port->driver_lock, flags); | 596 | spin_lock_irqsave(&port->port.lock, flags); |
586 | } | 597 | } |
587 | wake_up_interruptible(&port->port.open_wait); | 598 | wake_up_interruptible(&port->port.open_wait); |
588 | } | 599 | } |
589 | port->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING | ASYNC_INITIALIZED); | 600 | port->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING | ASYNC_INITIALIZED); |
601 | spin_unlock_irqrestore(&port->port.lock, flags); | ||
590 | wake_up_interruptible(&port->port.close_wait); | 602 | wake_up_interruptible(&port->port.close_wait); |
591 | 603 | ||
592 | func_exit (); | 604 | func_exit (); |
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 0587b66d6fc7..5a8a4c28c867 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c | |||
@@ -529,7 +529,7 @@ static void hvc_set_winsz(struct work_struct *work) | |||
529 | tty = tty_kref_get(hp->tty); | 529 | tty = tty_kref_get(hp->tty); |
530 | spin_unlock_irqrestore(&hp->lock, hvc_flags); | 530 | spin_unlock_irqrestore(&hp->lock, hvc_flags); |
531 | 531 | ||
532 | tty_do_resize(tty, tty, &ws); | 532 | tty_do_resize(tty, &ws); |
533 | tty_kref_put(tty); | 533 | tty_kref_put(tty); |
534 | } | 534 | } |
535 | 535 | ||
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index af055287271a..406f8742a260 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c | |||
@@ -997,14 +997,14 @@ out: | |||
997 | 997 | ||
998 | static int hvsi_write_room(struct tty_struct *tty) | 998 | static int hvsi_write_room(struct tty_struct *tty) |
999 | { | 999 | { |
1000 | struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data; | 1000 | struct hvsi_struct *hp = tty->driver_data; |
1001 | 1001 | ||
1002 | return N_OUTBUF - hp->n_outbuf; | 1002 | return N_OUTBUF - hp->n_outbuf; |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | static int hvsi_chars_in_buffer(struct tty_struct *tty) | 1005 | static int hvsi_chars_in_buffer(struct tty_struct *tty) |
1006 | { | 1006 | { |
1007 | struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data; | 1007 | struct hvsi_struct *hp = tty->driver_data; |
1008 | 1008 | ||
1009 | return hp->n_outbuf; | 1009 | return hp->n_outbuf; |
1010 | } | 1010 | } |
@@ -1070,7 +1070,7 @@ out: | |||
1070 | */ | 1070 | */ |
1071 | static void hvsi_throttle(struct tty_struct *tty) | 1071 | static void hvsi_throttle(struct tty_struct *tty) |
1072 | { | 1072 | { |
1073 | struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data; | 1073 | struct hvsi_struct *hp = tty->driver_data; |
1074 | 1074 | ||
1075 | pr_debug("%s\n", __func__); | 1075 | pr_debug("%s\n", __func__); |
1076 | 1076 | ||
@@ -1079,7 +1079,7 @@ static void hvsi_throttle(struct tty_struct *tty) | |||
1079 | 1079 | ||
1080 | static void hvsi_unthrottle(struct tty_struct *tty) | 1080 | static void hvsi_unthrottle(struct tty_struct *tty) |
1081 | { | 1081 | { |
1082 | struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data; | 1082 | struct hvsi_struct *hp = tty->driver_data; |
1083 | unsigned long flags; | 1083 | unsigned long flags; |
1084 | int shouldflip = 0; | 1084 | int shouldflip = 0; |
1085 | 1085 | ||
@@ -1100,7 +1100,7 @@ static void hvsi_unthrottle(struct tty_struct *tty) | |||
1100 | 1100 | ||
1101 | static int hvsi_tiocmget(struct tty_struct *tty, struct file *file) | 1101 | static int hvsi_tiocmget(struct tty_struct *tty, struct file *file) |
1102 | { | 1102 | { |
1103 | struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data; | 1103 | struct hvsi_struct *hp = tty->driver_data; |
1104 | 1104 | ||
1105 | hvsi_get_mctrl(hp); | 1105 | hvsi_get_mctrl(hp); |
1106 | return hp->mctrl; | 1106 | return hp->mctrl; |
@@ -1109,7 +1109,7 @@ static int hvsi_tiocmget(struct tty_struct *tty, struct file *file) | |||
1109 | static int hvsi_tiocmset(struct tty_struct *tty, struct file *file, | 1109 | static int hvsi_tiocmset(struct tty_struct *tty, struct file *file, |
1110 | unsigned int set, unsigned int clear) | 1110 | unsigned int set, unsigned int clear) |
1111 | { | 1111 | { |
1112 | struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data; | 1112 | struct hvsi_struct *hp = tty->driver_data; |
1113 | unsigned long flags; | 1113 | unsigned long flags; |
1114 | uint16_t new_mctrl; | 1114 | uint16_t new_mctrl; |
1115 | 1115 | ||
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index b60d425ce8d1..fc8cf7ac7f2b 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c | |||
@@ -485,7 +485,21 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = { | |||
485 | DMI_MATCH(DMI_PRODUCT_NAME, "MP061"), | 485 | DMI_MATCH(DMI_PRODUCT_NAME, "MP061"), |
486 | }, | 486 | }, |
487 | }, | 487 | }, |
488 | { } | 488 | { |
489 | .ident = "Dell Precision", | ||
490 | .matches = { | ||
491 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
492 | DMI_MATCH(DMI_PRODUCT_NAME, "Precision"), | ||
493 | }, | ||
494 | }, | ||
495 | { | ||
496 | .ident = "Dell Vostro", | ||
497 | .matches = { | ||
498 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
499 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro"), | ||
500 | }, | ||
501 | }, | ||
502 | { } | ||
489 | }; | 503 | }; |
490 | 504 | ||
491 | /* | 505 | /* |
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 04e4549299ba..24aa6e88e223 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c | |||
@@ -328,11 +328,13 @@ static inline void drop_rts(struct isi_port *port) | |||
328 | } | 328 | } |
329 | 329 | ||
330 | /* card->lock MUST NOT be held */ | 330 | /* card->lock MUST NOT be held */ |
331 | static inline void raise_dtr_rts(struct isi_port *port) | 331 | |
332 | static void isicom_raise_dtr_rts(struct tty_port *port) | ||
332 | { | 333 | { |
333 | struct isi_board *card = port->card; | 334 | struct isi_port *ip = container_of(port, struct isi_port, port); |
335 | struct isi_board *card = ip->card; | ||
334 | unsigned long base = card->base; | 336 | unsigned long base = card->base; |
335 | u16 channel = port->channel; | 337 | u16 channel = ip->channel; |
336 | 338 | ||
337 | if (!lock_card(card)) | 339 | if (!lock_card(card)) |
338 | return; | 340 | return; |
@@ -340,7 +342,7 @@ static inline void raise_dtr_rts(struct isi_port *port) | |||
340 | outw(0x8000 | (channel << card->shift_count) | 0x02, base); | 342 | outw(0x8000 | (channel << card->shift_count) | 0x02, base); |
341 | outw(0x0f04, base); | 343 | outw(0x0f04, base); |
342 | InterruptTheCard(base); | 344 | InterruptTheCard(base); |
343 | port->status |= (ISI_DTR | ISI_RTS); | 345 | ip->status |= (ISI_DTR | ISI_RTS); |
344 | unlock_card(card); | 346 | unlock_card(card); |
345 | } | 347 | } |
346 | 348 | ||
@@ -830,80 +832,10 @@ static int isicom_setup_port(struct tty_struct *tty) | |||
830 | return 0; | 832 | return 0; |
831 | } | 833 | } |
832 | 834 | ||
833 | static int block_til_ready(struct tty_struct *tty, struct file *filp, | 835 | static int isicom_carrier_raised(struct tty_port *port) |
834 | struct isi_port *port) | ||
835 | { | 836 | { |
836 | struct isi_board *card = port->card; | 837 | struct isi_port *ip = container_of(port, struct isi_port, port); |
837 | int do_clocal = 0, retval; | 838 | return (ip->status & ISI_DCD)?1 : 0; |
838 | unsigned long flags; | ||
839 | DECLARE_WAITQUEUE(wait, current); | ||
840 | |||
841 | /* block if port is in the process of being closed */ | ||
842 | |||
843 | if (tty_hung_up_p(filp) || port->port.flags & ASYNC_CLOSING) { | ||
844 | pr_dbg("block_til_ready: close in progress.\n"); | ||
845 | interruptible_sleep_on(&port->port.close_wait); | ||
846 | if (port->port.flags & ASYNC_HUP_NOTIFY) | ||
847 | return -EAGAIN; | ||
848 | else | ||
849 | return -ERESTARTSYS; | ||
850 | } | ||
851 | |||
852 | /* if non-blocking mode is set ... */ | ||
853 | |||
854 | if ((filp->f_flags & O_NONBLOCK) || | ||
855 | (tty->flags & (1 << TTY_IO_ERROR))) { | ||
856 | pr_dbg("block_til_ready: non-block mode.\n"); | ||
857 | port->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
858 | return 0; | ||
859 | } | ||
860 | |||
861 | if (C_CLOCAL(tty)) | ||
862 | do_clocal = 1; | ||
863 | |||
864 | /* block waiting for DCD to be asserted, and while | ||
865 | callout dev is busy */ | ||
866 | retval = 0; | ||
867 | add_wait_queue(&port->port.open_wait, &wait); | ||
868 | |||
869 | spin_lock_irqsave(&card->card_lock, flags); | ||
870 | if (!tty_hung_up_p(filp)) | ||
871 | port->port.count--; | ||
872 | port->port.blocked_open++; | ||
873 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
874 | |||
875 | while (1) { | ||
876 | raise_dtr_rts(port); | ||
877 | |||
878 | set_current_state(TASK_INTERRUPTIBLE); | ||
879 | if (tty_hung_up_p(filp) || !(port->port.flags & ASYNC_INITIALIZED)) { | ||
880 | if (port->port.flags & ASYNC_HUP_NOTIFY) | ||
881 | retval = -EAGAIN; | ||
882 | else | ||
883 | retval = -ERESTARTSYS; | ||
884 | break; | ||
885 | } | ||
886 | if (!(port->port.flags & ASYNC_CLOSING) && | ||
887 | (do_clocal || (port->status & ISI_DCD))) { | ||
888 | break; | ||
889 | } | ||
890 | if (signal_pending(current)) { | ||
891 | retval = -ERESTARTSYS; | ||
892 | break; | ||
893 | } | ||
894 | schedule(); | ||
895 | } | ||
896 | set_current_state(TASK_RUNNING); | ||
897 | remove_wait_queue(&port->port.open_wait, &wait); | ||
898 | spin_lock_irqsave(&card->card_lock, flags); | ||
899 | if (!tty_hung_up_p(filp)) | ||
900 | port->port.count++; | ||
901 | port->port.blocked_open--; | ||
902 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
903 | if (retval) | ||
904 | return retval; | ||
905 | port->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
906 | return 0; | ||
907 | } | 839 | } |
908 | 840 | ||
909 | static int isicom_open(struct tty_struct *tty, struct file *filp) | 841 | static int isicom_open(struct tty_struct *tty, struct file *filp) |
@@ -932,12 +864,13 @@ static int isicom_open(struct tty_struct *tty, struct file *filp) | |||
932 | 864 | ||
933 | isicom_setup_board(card); | 865 | isicom_setup_board(card); |
934 | 866 | ||
867 | /* FIXME: locking on port.count etc */ | ||
935 | port->port.count++; | 868 | port->port.count++; |
936 | tty->driver_data = port; | 869 | tty->driver_data = port; |
937 | tty_port_tty_set(&port->port, tty); | 870 | tty_port_tty_set(&port->port, tty); |
938 | error = isicom_setup_port(tty); | 871 | error = isicom_setup_port(tty); |
939 | if (error == 0) | 872 | if (error == 0) |
940 | error = block_til_ready(tty, filp, port); | 873 | error = tty_port_block_til_ready(&port->port, tty, filp); |
941 | return error; | 874 | return error; |
942 | } | 875 | } |
943 | 876 | ||
@@ -1012,76 +945,30 @@ static void isicom_flush_buffer(struct tty_struct *tty) | |||
1012 | 945 | ||
1013 | static void isicom_close(struct tty_struct *tty, struct file *filp) | 946 | static void isicom_close(struct tty_struct *tty, struct file *filp) |
1014 | { | 947 | { |
1015 | struct isi_port *port = tty->driver_data; | 948 | struct isi_port *ip = tty->driver_data; |
949 | struct tty_port *port = &ip->port; | ||
1016 | struct isi_board *card; | 950 | struct isi_board *card; |
1017 | unsigned long flags; | 951 | unsigned long flags; |
1018 | 952 | ||
1019 | if (!port) | 953 | BUG_ON(!ip); |
1020 | return; | ||
1021 | card = port->card; | ||
1022 | if (isicom_paranoia_check(port, tty->name, "isicom_close")) | ||
1023 | return; | ||
1024 | |||
1025 | pr_dbg("Close start!!!.\n"); | ||
1026 | |||
1027 | spin_lock_irqsave(&card->card_lock, flags); | ||
1028 | if (tty_hung_up_p(filp)) { | ||
1029 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
1030 | return; | ||
1031 | } | ||
1032 | |||
1033 | if (tty->count == 1 && port->port.count != 1) { | ||
1034 | printk(KERN_WARNING "ISICOM:(0x%lx) isicom_close: bad port " | ||
1035 | "count tty->count = 1 port count = %d.\n", | ||
1036 | card->base, port->port.count); | ||
1037 | port->port.count = 1; | ||
1038 | } | ||
1039 | if (--port->port.count < 0) { | ||
1040 | printk(KERN_WARNING "ISICOM:(0x%lx) isicom_close: bad port " | ||
1041 | "count for channel%d = %d", card->base, port->channel, | ||
1042 | port->port.count); | ||
1043 | port->port.count = 0; | ||
1044 | } | ||
1045 | 954 | ||
1046 | if (port->port.count) { | 955 | card = ip->card; |
1047 | spin_unlock_irqrestore(&card->card_lock, flags); | 956 | if (isicom_paranoia_check(ip, tty->name, "isicom_close")) |
1048 | return; | 957 | return; |
1049 | } | ||
1050 | port->port.flags |= ASYNC_CLOSING; | ||
1051 | tty->closing = 1; | ||
1052 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
1053 | 958 | ||
1054 | if (port->port.closing_wait != ASYNC_CLOSING_WAIT_NONE) | ||
1055 | tty_wait_until_sent(tty, port->port.closing_wait); | ||
1056 | /* indicate to the card that no more data can be received | 959 | /* indicate to the card that no more data can be received |
1057 | on this port */ | 960 | on this port */ |
1058 | spin_lock_irqsave(&card->card_lock, flags); | 961 | spin_lock_irqsave(&card->card_lock, flags); |
1059 | if (port->port.flags & ASYNC_INITIALIZED) { | 962 | if (port->flags & ASYNC_INITIALIZED) { |
1060 | card->port_status &= ~(1 << port->channel); | 963 | card->port_status &= ~(1 << ip->channel); |
1061 | outw(card->port_status, card->base + 0x02); | 964 | outw(card->port_status, card->base + 0x02); |
1062 | } | 965 | } |
1063 | isicom_shutdown_port(port); | 966 | isicom_shutdown_port(ip); |
1064 | spin_unlock_irqrestore(&card->card_lock, flags); | 967 | spin_unlock_irqrestore(&card->card_lock, flags); |
1065 | 968 | ||
1066 | isicom_flush_buffer(tty); | 969 | isicom_flush_buffer(tty); |
1067 | tty_ldisc_flush(tty); | 970 | |
1068 | 971 | tty_port_close_end(port, tty); | |
1069 | spin_lock_irqsave(&card->card_lock, flags); | ||
1070 | tty->closing = 0; | ||
1071 | |||
1072 | if (port->port.blocked_open) { | ||
1073 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
1074 | if (port->port.close_delay) { | ||
1075 | pr_dbg("scheduling until time out.\n"); | ||
1076 | msleep_interruptible( | ||
1077 | jiffies_to_msecs(port->port.close_delay)); | ||
1078 | } | ||
1079 | spin_lock_irqsave(&card->card_lock, flags); | ||
1080 | wake_up_interruptible(&port->port.open_wait); | ||
1081 | } | ||
1082 | port->port.flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING); | ||
1083 | wake_up_interruptible(&port->port.close_wait); | ||
1084 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
1085 | } | 972 | } |
1086 | 973 | ||
1087 | /* write et all */ | 974 | /* write et all */ |
@@ -1420,10 +1307,7 @@ static void isicom_hangup(struct tty_struct *tty) | |||
1420 | isicom_shutdown_port(port); | 1307 | isicom_shutdown_port(port); |
1421 | spin_unlock_irqrestore(&port->card->card_lock, flags); | 1308 | spin_unlock_irqrestore(&port->card->card_lock, flags); |
1422 | 1309 | ||
1423 | port->port.count = 0; | 1310 | tty_port_hangup(&port->port); |
1424 | port->port.flags &= ~ASYNC_NORMAL_ACTIVE; | ||
1425 | tty_port_tty_set(&port->port, NULL); | ||
1426 | wake_up_interruptible(&port->port.open_wait); | ||
1427 | } | 1311 | } |
1428 | 1312 | ||
1429 | 1313 | ||
@@ -1452,6 +1336,11 @@ static const struct tty_operations isicom_ops = { | |||
1452 | .break_ctl = isicom_send_break, | 1336 | .break_ctl = isicom_send_break, |
1453 | }; | 1337 | }; |
1454 | 1338 | ||
1339 | static const struct tty_port_operations isicom_port_ops = { | ||
1340 | .carrier_raised = isicom_carrier_raised, | ||
1341 | .raise_dtr_rts = isicom_raise_dtr_rts, | ||
1342 | }; | ||
1343 | |||
1455 | static int __devinit reset_card(struct pci_dev *pdev, | 1344 | static int __devinit reset_card(struct pci_dev *pdev, |
1456 | const unsigned int card, unsigned int *signature) | 1345 | const unsigned int card, unsigned int *signature) |
1457 | { | 1346 | { |
@@ -1794,6 +1683,7 @@ static int __init isicom_init(void) | |||
1794 | spin_lock_init(&isi_card[idx].card_lock); | 1683 | spin_lock_init(&isi_card[idx].card_lock); |
1795 | for (channel = 0; channel < 16; channel++, port++) { | 1684 | for (channel = 0; channel < 16; channel++, port++) { |
1796 | tty_port_init(&port->port); | 1685 | tty_port_init(&port->port); |
1686 | port->port.ops = &isicom_port_ops; | ||
1797 | port->magic = ISICOM_MAGIC; | 1687 | port->magic = ISICOM_MAGIC; |
1798 | port->card = &isi_card[idx]; | 1688 | port->card = &isi_card[idx]; |
1799 | port->channel = channel; | 1689 | port->channel = channel; |
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index 4b10770fa937..5c3dc6b8411c 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c | |||
@@ -151,7 +151,7 @@ static char *stli_drvversion = "5.6.0"; | |||
151 | static char *stli_serialname = "ttyE"; | 151 | static char *stli_serialname = "ttyE"; |
152 | 152 | ||
153 | static struct tty_driver *stli_serial; | 153 | static struct tty_driver *stli_serial; |
154 | 154 | static const struct tty_port_operations stli_port_ops; | |
155 | 155 | ||
156 | #define STLI_TXBUFSIZE 4096 | 156 | #define STLI_TXBUFSIZE 4096 |
157 | 157 | ||
@@ -626,8 +626,6 @@ static int stli_hostcmd(struct stlibrd *brdp, struct stliport *portp); | |||
626 | static int stli_initopen(struct tty_struct *tty, struct stlibrd *brdp, struct stliport *portp); | 626 | static int stli_initopen(struct tty_struct *tty, struct stlibrd *brdp, struct stliport *portp); |
627 | static int stli_rawopen(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait); | 627 | static int stli_rawopen(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait); |
628 | static int stli_rawclose(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait); | 628 | static int stli_rawclose(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait); |
629 | static int stli_waitcarrier(struct tty_struct *tty, struct stlibrd *brdp, | ||
630 | struct stliport *portp, struct file *filp); | ||
631 | static int stli_setport(struct tty_struct *tty); | 629 | static int stli_setport(struct tty_struct *tty); |
632 | static int stli_cmdwait(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback); | 630 | static int stli_cmdwait(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback); |
633 | static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback); | 631 | static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback); |
@@ -769,7 +767,7 @@ static int stli_parsebrd(struct stlconf *confp, char **argp) | |||
769 | break; | 767 | break; |
770 | } | 768 | } |
771 | if (i == ARRAY_SIZE(stli_brdstr)) { | 769 | if (i == ARRAY_SIZE(stli_brdstr)) { |
772 | printk("STALLION: unknown board name, %s?\n", argp[0]); | 770 | printk(KERN_WARNING "istallion: unknown board name, %s?\n", argp[0]); |
773 | return 0; | 771 | return 0; |
774 | } | 772 | } |
775 | 773 | ||
@@ -787,6 +785,7 @@ static int stli_open(struct tty_struct *tty, struct file *filp) | |||
787 | { | 785 | { |
788 | struct stlibrd *brdp; | 786 | struct stlibrd *brdp; |
789 | struct stliport *portp; | 787 | struct stliport *portp; |
788 | struct tty_port *port; | ||
790 | unsigned int minordev, brdnr, portnr; | 789 | unsigned int minordev, brdnr, portnr; |
791 | int rc; | 790 | int rc; |
792 | 791 | ||
@@ -808,30 +807,19 @@ static int stli_open(struct tty_struct *tty, struct file *filp) | |||
808 | return -ENODEV; | 807 | return -ENODEV; |
809 | if (portp->devnr < 1) | 808 | if (portp->devnr < 1) |
810 | return -ENODEV; | 809 | return -ENODEV; |
811 | 810 | port = &portp->port; | |
812 | |||
813 | /* | ||
814 | * Check if this port is in the middle of closing. If so then wait | ||
815 | * until it is closed then return error status based on flag settings. | ||
816 | * The sleep here does not need interrupt protection since the wakeup | ||
817 | * for it is done with the same context. | ||
818 | */ | ||
819 | if (portp->port.flags & ASYNC_CLOSING) { | ||
820 | interruptible_sleep_on(&portp->port.close_wait); | ||
821 | if (portp->port.flags & ASYNC_HUP_NOTIFY) | ||
822 | return -EAGAIN; | ||
823 | return -ERESTARTSYS; | ||
824 | } | ||
825 | 811 | ||
826 | /* | 812 | /* |
827 | * On the first open of the device setup the port hardware, and | 813 | * On the first open of the device setup the port hardware, and |
828 | * initialize the per port data structure. Since initializing the port | 814 | * initialize the per port data structure. Since initializing the port |
829 | * requires several commands to the board we will need to wait for any | 815 | * requires several commands to the board we will need to wait for any |
830 | * other open that is already initializing the port. | 816 | * other open that is already initializing the port. |
817 | * | ||
818 | * Review - locking | ||
831 | */ | 819 | */ |
832 | tty_port_tty_set(&portp->port, tty); | 820 | tty_port_tty_set(port, tty); |
833 | tty->driver_data = portp; | 821 | tty->driver_data = portp; |
834 | portp->port.count++; | 822 | port->count++; |
835 | 823 | ||
836 | wait_event_interruptible(portp->raw_wait, | 824 | wait_event_interruptible(portp->raw_wait, |
837 | !test_bit(ST_INITIALIZING, &portp->state)); | 825 | !test_bit(ST_INITIALIZING, &portp->state)); |
@@ -841,7 +829,8 @@ static int stli_open(struct tty_struct *tty, struct file *filp) | |||
841 | if ((portp->port.flags & ASYNC_INITIALIZED) == 0) { | 829 | if ((portp->port.flags & ASYNC_INITIALIZED) == 0) { |
842 | set_bit(ST_INITIALIZING, &portp->state); | 830 | set_bit(ST_INITIALIZING, &portp->state); |
843 | if ((rc = stli_initopen(tty, brdp, portp)) >= 0) { | 831 | if ((rc = stli_initopen(tty, brdp, portp)) >= 0) { |
844 | portp->port.flags |= ASYNC_INITIALIZED; | 832 | /* Locking */ |
833 | port->flags |= ASYNC_INITIALIZED; | ||
845 | clear_bit(TTY_IO_ERROR, &tty->flags); | 834 | clear_bit(TTY_IO_ERROR, &tty->flags); |
846 | } | 835 | } |
847 | clear_bit(ST_INITIALIZING, &portp->state); | 836 | clear_bit(ST_INITIALIZING, &portp->state); |
@@ -849,31 +838,7 @@ static int stli_open(struct tty_struct *tty, struct file *filp) | |||
849 | if (rc < 0) | 838 | if (rc < 0) |
850 | return rc; | 839 | return rc; |
851 | } | 840 | } |
852 | 841 | return tty_port_block_til_ready(&portp->port, tty, filp); | |
853 | /* | ||
854 | * Check if this port is in the middle of closing. If so then wait | ||
855 | * until it is closed then return error status, based on flag settings. | ||
856 | * The sleep here does not need interrupt protection since the wakeup | ||
857 | * for it is done with the same context. | ||
858 | */ | ||
859 | if (portp->port.flags & ASYNC_CLOSING) { | ||
860 | interruptible_sleep_on(&portp->port.close_wait); | ||
861 | if (portp->port.flags & ASYNC_HUP_NOTIFY) | ||
862 | return -EAGAIN; | ||
863 | return -ERESTARTSYS; | ||
864 | } | ||
865 | |||
866 | /* | ||
867 | * Based on type of open being done check if it can overlap with any | ||
868 | * previous opens still in effect. If we are a normal serial device | ||
869 | * then also we might have to wait for carrier. | ||
870 | */ | ||
871 | if (!(filp->f_flags & O_NONBLOCK)) { | ||
872 | if ((rc = stli_waitcarrier(tty, brdp, portp, filp)) != 0) | ||
873 | return rc; | ||
874 | } | ||
875 | portp->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
876 | return 0; | ||
877 | } | 842 | } |
878 | 843 | ||
879 | /*****************************************************************************/ | 844 | /*****************************************************************************/ |
@@ -882,25 +847,16 @@ static void stli_close(struct tty_struct *tty, struct file *filp) | |||
882 | { | 847 | { |
883 | struct stlibrd *brdp; | 848 | struct stlibrd *brdp; |
884 | struct stliport *portp; | 849 | struct stliport *portp; |
850 | struct tty_port *port; | ||
885 | unsigned long flags; | 851 | unsigned long flags; |
886 | 852 | ||
887 | portp = tty->driver_data; | 853 | portp = tty->driver_data; |
888 | if (portp == NULL) | 854 | if (portp == NULL) |
889 | return; | 855 | return; |
856 | port = &portp->port; | ||
890 | 857 | ||
891 | spin_lock_irqsave(&stli_lock, flags); | 858 | if (tty_port_close_start(port, tty, filp) == 0) |
892 | if (tty_hung_up_p(filp)) { | ||
893 | spin_unlock_irqrestore(&stli_lock, flags); | ||
894 | return; | ||
895 | } | ||
896 | if ((tty->count == 1) && (portp->port.count != 1)) | ||
897 | portp->port.count = 1; | ||
898 | if (portp->port.count-- > 1) { | ||
899 | spin_unlock_irqrestore(&stli_lock, flags); | ||
900 | return; | 859 | return; |
901 | } | ||
902 | |||
903 | portp->port.flags |= ASYNC_CLOSING; | ||
904 | 860 | ||
905 | /* | 861 | /* |
906 | * May want to wait for data to drain before closing. The BUSY flag | 862 | * May want to wait for data to drain before closing. The BUSY flag |
@@ -908,15 +864,19 @@ static void stli_close(struct tty_struct *tty, struct file *filp) | |||
908 | * updated by messages from the slave - indicating when all chars | 864 | * updated by messages from the slave - indicating when all chars |
909 | * really have drained. | 865 | * really have drained. |
910 | */ | 866 | */ |
867 | spin_lock_irqsave(&stli_lock, flags); | ||
911 | if (tty == stli_txcooktty) | 868 | if (tty == stli_txcooktty) |
912 | stli_flushchars(tty); | 869 | stli_flushchars(tty); |
913 | tty->closing = 1; | ||
914 | spin_unlock_irqrestore(&stli_lock, flags); | 870 | spin_unlock_irqrestore(&stli_lock, flags); |
915 | 871 | ||
872 | /* We end up doing this twice for the moment. This needs looking at | ||
873 | eventually. Note we still use portp->closing_wait as a result */ | ||
916 | if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE) | 874 | if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE) |
917 | tty_wait_until_sent(tty, portp->closing_wait); | 875 | tty_wait_until_sent(tty, portp->closing_wait); |
918 | 876 | ||
919 | portp->port.flags &= ~ASYNC_INITIALIZED; | 877 | /* FIXME: port locking here needs attending to */ |
878 | port->flags &= ~ASYNC_INITIALIZED; | ||
879 | |||
920 | brdp = stli_brds[portp->brdnr]; | 880 | brdp = stli_brds[portp->brdnr]; |
921 | stli_rawclose(brdp, portp, 0, 0); | 881 | stli_rawclose(brdp, portp, 0, 0); |
922 | if (tty->termios->c_cflag & HUPCL) { | 882 | if (tty->termios->c_cflag & HUPCL) { |
@@ -934,17 +894,8 @@ static void stli_close(struct tty_struct *tty, struct file *filp) | |||
934 | set_bit(ST_DOFLUSHRX, &portp->state); | 894 | set_bit(ST_DOFLUSHRX, &portp->state); |
935 | stli_flushbuffer(tty); | 895 | stli_flushbuffer(tty); |
936 | 896 | ||
937 | tty->closing = 0; | 897 | tty_port_close_end(port, tty); |
938 | tty_port_tty_set(&portp->port, NULL); | 898 | tty_port_tty_set(port, NULL); |
939 | |||
940 | if (portp->openwaitcnt) { | ||
941 | if (portp->close_delay) | ||
942 | msleep_interruptible(jiffies_to_msecs(portp->close_delay)); | ||
943 | wake_up_interruptible(&portp->port.open_wait); | ||
944 | } | ||
945 | |||
946 | portp->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | ||
947 | wake_up_interruptible(&portp->port.close_wait); | ||
948 | } | 899 | } |
949 | 900 | ||
950 | /*****************************************************************************/ | 901 | /*****************************************************************************/ |
@@ -1183,62 +1134,23 @@ static int stli_setport(struct tty_struct *tty) | |||
1183 | 1134 | ||
1184 | /*****************************************************************************/ | 1135 | /*****************************************************************************/ |
1185 | 1136 | ||
1186 | /* | 1137 | static int stli_carrier_raised(struct tty_port *port) |
1187 | * Possibly need to wait for carrier (DCD signal) to come high. Say | ||
1188 | * maybe because if we are clocal then we don't need to wait... | ||
1189 | */ | ||
1190 | |||
1191 | static int stli_waitcarrier(struct tty_struct *tty, struct stlibrd *brdp, | ||
1192 | struct stliport *portp, struct file *filp) | ||
1193 | { | 1138 | { |
1194 | unsigned long flags; | 1139 | struct stliport *portp = container_of(port, struct stliport, port); |
1195 | int rc, doclocal; | 1140 | return (portp->sigs & TIOCM_CD) ? 1 : 0; |
1196 | 1141 | } | |
1197 | rc = 0; | ||
1198 | doclocal = 0; | ||
1199 | |||
1200 | if (tty->termios->c_cflag & CLOCAL) | ||
1201 | doclocal++; | ||
1202 | |||
1203 | spin_lock_irqsave(&stli_lock, flags); | ||
1204 | portp->openwaitcnt++; | ||
1205 | if (! tty_hung_up_p(filp)) | ||
1206 | portp->port.count--; | ||
1207 | spin_unlock_irqrestore(&stli_lock, flags); | ||
1208 | |||
1209 | for (;;) { | ||
1210 | stli_mkasysigs(&portp->asig, 1, 1); | ||
1211 | if ((rc = stli_cmdwait(brdp, portp, A_SETSIGNALS, | ||
1212 | &portp->asig, sizeof(asysigs_t), 0)) < 0) | ||
1213 | break; | ||
1214 | if (tty_hung_up_p(filp) || | ||
1215 | ((portp->port.flags & ASYNC_INITIALIZED) == 0)) { | ||
1216 | if (portp->port.flags & ASYNC_HUP_NOTIFY) | ||
1217 | rc = -EBUSY; | ||
1218 | else | ||
1219 | rc = -ERESTARTSYS; | ||
1220 | break; | ||
1221 | } | ||
1222 | if (((portp->port.flags & ASYNC_CLOSING) == 0) && | ||
1223 | (doclocal || (portp->sigs & TIOCM_CD))) { | ||
1224 | break; | ||
1225 | } | ||
1226 | if (signal_pending(current)) { | ||
1227 | rc = -ERESTARTSYS; | ||
1228 | break; | ||
1229 | } | ||
1230 | interruptible_sleep_on(&portp->port.open_wait); | ||
1231 | } | ||
1232 | |||
1233 | spin_lock_irqsave(&stli_lock, flags); | ||
1234 | if (! tty_hung_up_p(filp)) | ||
1235 | portp->port.count++; | ||
1236 | portp->openwaitcnt--; | ||
1237 | spin_unlock_irqrestore(&stli_lock, flags); | ||
1238 | 1142 | ||
1239 | return rc; | 1143 | static void stli_raise_dtr_rts(struct tty_port *port) |
1144 | { | ||
1145 | struct stliport *portp = container_of(port, struct stliport, port); | ||
1146 | struct stlibrd *brdp = stli_brds[portp->brdnr]; | ||
1147 | stli_mkasysigs(&portp->asig, 1, 1); | ||
1148 | if (stli_cmdwait(brdp, portp, A_SETSIGNALS, &portp->asig, | ||
1149 | sizeof(asysigs_t), 0) < 0) | ||
1150 | printk(KERN_WARNING "istallion: dtr raise failed.\n"); | ||
1240 | } | 1151 | } |
1241 | 1152 | ||
1153 | |||
1242 | /*****************************************************************************/ | 1154 | /*****************************************************************************/ |
1243 | 1155 | ||
1244 | /* | 1156 | /* |
@@ -1550,7 +1462,7 @@ static int stli_getserial(struct stliport *portp, struct serial_struct __user *s | |||
1550 | sio.irq = 0; | 1462 | sio.irq = 0; |
1551 | sio.flags = portp->port.flags; | 1463 | sio.flags = portp->port.flags; |
1552 | sio.baud_base = portp->baud_base; | 1464 | sio.baud_base = portp->baud_base; |
1553 | sio.close_delay = portp->close_delay; | 1465 | sio.close_delay = portp->port.close_delay; |
1554 | sio.closing_wait = portp->closing_wait; | 1466 | sio.closing_wait = portp->closing_wait; |
1555 | sio.custom_divisor = portp->custom_divisor; | 1467 | sio.custom_divisor = portp->custom_divisor; |
1556 | sio.xmit_fifo_size = 0; | 1468 | sio.xmit_fifo_size = 0; |
@@ -1582,7 +1494,7 @@ static int stli_setserial(struct tty_struct *tty, struct serial_struct __user *s | |||
1582 | return -EFAULT; | 1494 | return -EFAULT; |
1583 | if (!capable(CAP_SYS_ADMIN)) { | 1495 | if (!capable(CAP_SYS_ADMIN)) { |
1584 | if ((sio.baud_base != portp->baud_base) || | 1496 | if ((sio.baud_base != portp->baud_base) || |
1585 | (sio.close_delay != portp->close_delay) || | 1497 | (sio.close_delay != portp->port.close_delay) || |
1586 | ((sio.flags & ~ASYNC_USR_MASK) != | 1498 | ((sio.flags & ~ASYNC_USR_MASK) != |
1587 | (portp->port.flags & ~ASYNC_USR_MASK))) | 1499 | (portp->port.flags & ~ASYNC_USR_MASK))) |
1588 | return -EPERM; | 1500 | return -EPERM; |
@@ -1591,7 +1503,7 @@ static int stli_setserial(struct tty_struct *tty, struct serial_struct __user *s | |||
1591 | portp->port.flags = (portp->port.flags & ~ASYNC_USR_MASK) | | 1503 | portp->port.flags = (portp->port.flags & ~ASYNC_USR_MASK) | |
1592 | (sio.flags & ASYNC_USR_MASK); | 1504 | (sio.flags & ASYNC_USR_MASK); |
1593 | portp->baud_base = sio.baud_base; | 1505 | portp->baud_base = sio.baud_base; |
1594 | portp->close_delay = sio.close_delay; | 1506 | portp->port.close_delay = sio.close_delay; |
1595 | portp->closing_wait = sio.closing_wait; | 1507 | portp->closing_wait = sio.closing_wait; |
1596 | portp->custom_divisor = sio.custom_divisor; | 1508 | portp->custom_divisor = sio.custom_divisor; |
1597 | 1509 | ||
@@ -1821,6 +1733,7 @@ static void stli_hangup(struct tty_struct *tty) | |||
1821 | { | 1733 | { |
1822 | struct stliport *portp; | 1734 | struct stliport *portp; |
1823 | struct stlibrd *brdp; | 1735 | struct stlibrd *brdp; |
1736 | struct tty_port *port; | ||
1824 | unsigned long flags; | 1737 | unsigned long flags; |
1825 | 1738 | ||
1826 | portp = tty->driver_data; | 1739 | portp = tty->driver_data; |
@@ -1831,8 +1744,11 @@ static void stli_hangup(struct tty_struct *tty) | |||
1831 | brdp = stli_brds[portp->brdnr]; | 1744 | brdp = stli_brds[portp->brdnr]; |
1832 | if (brdp == NULL) | 1745 | if (brdp == NULL) |
1833 | return; | 1746 | return; |
1747 | port = &portp->port; | ||
1834 | 1748 | ||
1835 | portp->port.flags &= ~ASYNC_INITIALIZED; | 1749 | spin_lock_irqsave(&port->lock, flags); |
1750 | port->flags &= ~ASYNC_INITIALIZED; | ||
1751 | spin_unlock_irqrestore(&port->lock, flags); | ||
1836 | 1752 | ||
1837 | if (!test_bit(ST_CLOSING, &portp->state)) | 1753 | if (!test_bit(ST_CLOSING, &portp->state)) |
1838 | stli_rawclose(brdp, portp, 0, 0); | 1754 | stli_rawclose(brdp, portp, 0, 0); |
@@ -1853,12 +1769,9 @@ static void stli_hangup(struct tty_struct *tty) | |||
1853 | clear_bit(ST_TXBUSY, &portp->state); | 1769 | clear_bit(ST_TXBUSY, &portp->state); |
1854 | clear_bit(ST_RXSTOP, &portp->state); | 1770 | clear_bit(ST_RXSTOP, &portp->state); |
1855 | set_bit(TTY_IO_ERROR, &tty->flags); | 1771 | set_bit(TTY_IO_ERROR, &tty->flags); |
1856 | tty_port_tty_set(&portp->port, NULL); | ||
1857 | portp->port.flags &= ~ASYNC_NORMAL_ACTIVE; | ||
1858 | portp->port.count = 0; | ||
1859 | spin_unlock_irqrestore(&stli_lock, flags); | 1772 | spin_unlock_irqrestore(&stli_lock, flags); |
1860 | 1773 | ||
1861 | wake_up_interruptible(&portp->port.open_wait); | 1774 | tty_port_hangup(port); |
1862 | } | 1775 | } |
1863 | 1776 | ||
1864 | /*****************************************************************************/ | 1777 | /*****************************************************************************/ |
@@ -2132,7 +2045,7 @@ static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigne | |||
2132 | unsigned char __iomem *bits; | 2045 | unsigned char __iomem *bits; |
2133 | 2046 | ||
2134 | if (test_bit(ST_CMDING, &portp->state)) { | 2047 | if (test_bit(ST_CMDING, &portp->state)) { |
2135 | printk(KERN_ERR "STALLION: command already busy, cmd=%x!\n", | 2048 | printk(KERN_ERR "istallion: command already busy, cmd=%x!\n", |
2136 | (int) cmd); | 2049 | (int) cmd); |
2137 | return; | 2050 | return; |
2138 | } | 2051 | } |
@@ -2692,16 +2605,17 @@ static int stli_initports(struct stlibrd *brdp) | |||
2692 | for (i = 0, panelnr = 0, panelport = 0; (i < brdp->nrports); i++) { | 2605 | for (i = 0, panelnr = 0, panelport = 0; (i < brdp->nrports); i++) { |
2693 | portp = kzalloc(sizeof(struct stliport), GFP_KERNEL); | 2606 | portp = kzalloc(sizeof(struct stliport), GFP_KERNEL); |
2694 | if (!portp) { | 2607 | if (!portp) { |
2695 | printk("STALLION: failed to allocate port structure\n"); | 2608 | printk(KERN_WARNING "istallion: failed to allocate port structure\n"); |
2696 | continue; | 2609 | continue; |
2697 | } | 2610 | } |
2698 | tty_port_init(&portp->port); | 2611 | tty_port_init(&portp->port); |
2612 | portp->port.ops = &stli_port_ops; | ||
2699 | portp->magic = STLI_PORTMAGIC; | 2613 | portp->magic = STLI_PORTMAGIC; |
2700 | portp->portnr = i; | 2614 | portp->portnr = i; |
2701 | portp->brdnr = brdp->brdnr; | 2615 | portp->brdnr = brdp->brdnr; |
2702 | portp->panelnr = panelnr; | 2616 | portp->panelnr = panelnr; |
2703 | portp->baud_base = STL_BAUDBASE; | 2617 | portp->baud_base = STL_BAUDBASE; |
2704 | portp->close_delay = STL_CLOSEDELAY; | 2618 | portp->port.close_delay = STL_CLOSEDELAY; |
2705 | portp->closing_wait = 30 * HZ; | 2619 | portp->closing_wait = 30 * HZ; |
2706 | init_waitqueue_head(&portp->port.open_wait); | 2620 | init_waitqueue_head(&portp->port.open_wait); |
2707 | init_waitqueue_head(&portp->port.close_wait); | 2621 | init_waitqueue_head(&portp->port.close_wait); |
@@ -2758,7 +2672,7 @@ static void __iomem *stli_ecpgetmemptr(struct stlibrd *brdp, unsigned long offse | |||
2758 | unsigned char val; | 2672 | unsigned char val; |
2759 | 2673 | ||
2760 | if (offset > brdp->memsize) { | 2674 | if (offset > brdp->memsize) { |
2761 | printk(KERN_ERR "STALLION: shared memory pointer=%x out of " | 2675 | printk(KERN_ERR "istallion: shared memory pointer=%x out of " |
2762 | "range at line=%d(%d), brd=%d\n", | 2676 | "range at line=%d(%d), brd=%d\n", |
2763 | (int) offset, line, __LINE__, brdp->brdnr); | 2677 | (int) offset, line, __LINE__, brdp->brdnr); |
2764 | ptr = NULL; | 2678 | ptr = NULL; |
@@ -2832,7 +2746,7 @@ static void __iomem *stli_ecpeigetmemptr(struct stlibrd *brdp, unsigned long off | |||
2832 | unsigned char val; | 2746 | unsigned char val; |
2833 | 2747 | ||
2834 | if (offset > brdp->memsize) { | 2748 | if (offset > brdp->memsize) { |
2835 | printk(KERN_ERR "STALLION: shared memory pointer=%x out of " | 2749 | printk(KERN_ERR "istallion: shared memory pointer=%x out of " |
2836 | "range at line=%d(%d), brd=%d\n", | 2750 | "range at line=%d(%d), brd=%d\n", |
2837 | (int) offset, line, __LINE__, brdp->brdnr); | 2751 | (int) offset, line, __LINE__, brdp->brdnr); |
2838 | ptr = NULL; | 2752 | ptr = NULL; |
@@ -2884,7 +2798,7 @@ static void __iomem *stli_ecpmcgetmemptr(struct stlibrd *brdp, unsigned long off | |||
2884 | unsigned char val; | 2798 | unsigned char val; |
2885 | 2799 | ||
2886 | if (offset > brdp->memsize) { | 2800 | if (offset > brdp->memsize) { |
2887 | printk(KERN_ERR "STALLION: shared memory pointer=%x out of " | 2801 | printk(KERN_ERR "istallion: shared memory pointer=%x out of " |
2888 | "range at line=%d(%d), brd=%d\n", | 2802 | "range at line=%d(%d), brd=%d\n", |
2889 | (int) offset, line, __LINE__, brdp->brdnr); | 2803 | (int) offset, line, __LINE__, brdp->brdnr); |
2890 | ptr = NULL; | 2804 | ptr = NULL; |
@@ -2929,7 +2843,7 @@ static void __iomem *stli_ecppcigetmemptr(struct stlibrd *brdp, unsigned long of | |||
2929 | unsigned char val; | 2843 | unsigned char val; |
2930 | 2844 | ||
2931 | if (offset > brdp->memsize) { | 2845 | if (offset > brdp->memsize) { |
2932 | printk(KERN_ERR "STALLION: shared memory pointer=%x out of " | 2846 | printk(KERN_ERR "istallion: shared memory pointer=%x out of " |
2933 | "range at line=%d(%d), board=%d\n", | 2847 | "range at line=%d(%d), board=%d\n", |
2934 | (int) offset, line, __LINE__, brdp->brdnr); | 2848 | (int) offset, line, __LINE__, brdp->brdnr); |
2935 | ptr = NULL; | 2849 | ptr = NULL; |
@@ -2994,7 +2908,7 @@ static void __iomem *stli_onbgetmemptr(struct stlibrd *brdp, unsigned long offse | |||
2994 | void __iomem *ptr; | 2908 | void __iomem *ptr; |
2995 | 2909 | ||
2996 | if (offset > brdp->memsize) { | 2910 | if (offset > brdp->memsize) { |
2997 | printk(KERN_ERR "STALLION: shared memory pointer=%x out of " | 2911 | printk(KERN_ERR "istallion: shared memory pointer=%x out of " |
2998 | "range at line=%d(%d), brd=%d\n", | 2912 | "range at line=%d(%d), brd=%d\n", |
2999 | (int) offset, line, __LINE__, brdp->brdnr); | 2913 | (int) offset, line, __LINE__, brdp->brdnr); |
3000 | ptr = NULL; | 2914 | ptr = NULL; |
@@ -3060,7 +2974,7 @@ static void __iomem *stli_onbegetmemptr(struct stlibrd *brdp, unsigned long offs | |||
3060 | unsigned char val; | 2974 | unsigned char val; |
3061 | 2975 | ||
3062 | if (offset > brdp->memsize) { | 2976 | if (offset > brdp->memsize) { |
3063 | printk(KERN_ERR "STALLION: shared memory pointer=%x out of " | 2977 | printk(KERN_ERR "istallion: shared memory pointer=%x out of " |
3064 | "range at line=%d(%d), brd=%d\n", | 2978 | "range at line=%d(%d), brd=%d\n", |
3065 | (int) offset, line, __LINE__, brdp->brdnr); | 2979 | (int) offset, line, __LINE__, brdp->brdnr); |
3066 | ptr = NULL; | 2980 | ptr = NULL; |
@@ -3499,7 +3413,7 @@ static int stli_startbrd(struct stlibrd *brdp) | |||
3499 | #endif | 3413 | #endif |
3500 | 3414 | ||
3501 | if (nrdevs < (brdp->nrports + 1)) { | 3415 | if (nrdevs < (brdp->nrports + 1)) { |
3502 | printk(KERN_ERR "STALLION: slave failed to allocate memory for " | 3416 | printk(KERN_ERR "istallion: slave failed to allocate memory for " |
3503 | "all devices, devices=%d\n", nrdevs); | 3417 | "all devices, devices=%d\n", nrdevs); |
3504 | brdp->nrports = nrdevs - 1; | 3418 | brdp->nrports = nrdevs - 1; |
3505 | } | 3419 | } |
@@ -3509,13 +3423,13 @@ static int stli_startbrd(struct stlibrd *brdp) | |||
3509 | brdp->bitsize = (nrdevs + 7) / 8; | 3423 | brdp->bitsize = (nrdevs + 7) / 8; |
3510 | memoff = readl(&hdrp->memp); | 3424 | memoff = readl(&hdrp->memp); |
3511 | if (memoff > brdp->memsize) { | 3425 | if (memoff > brdp->memsize) { |
3512 | printk(KERN_ERR "STALLION: corrupted shared memory region?\n"); | 3426 | printk(KERN_ERR "istallion: corrupted shared memory region?\n"); |
3513 | rc = -EIO; | 3427 | rc = -EIO; |
3514 | goto stli_donestartup; | 3428 | goto stli_donestartup; |
3515 | } | 3429 | } |
3516 | memp = (cdkmem_t __iomem *) EBRDGETMEMPTR(brdp, memoff); | 3430 | memp = (cdkmem_t __iomem *) EBRDGETMEMPTR(brdp, memoff); |
3517 | if (readw(&memp->dtype) != TYP_ASYNCTRL) { | 3431 | if (readw(&memp->dtype) != TYP_ASYNCTRL) { |
3518 | printk(KERN_ERR "STALLION: no slave control device found\n"); | 3432 | printk(KERN_ERR "istallion: no slave control device found\n"); |
3519 | goto stli_donestartup; | 3433 | goto stli_donestartup; |
3520 | } | 3434 | } |
3521 | memp++; | 3435 | memp++; |
@@ -3600,7 +3514,7 @@ static int __devinit stli_brdinit(struct stlibrd *brdp) | |||
3600 | retval = stli_initonb(brdp); | 3514 | retval = stli_initonb(brdp); |
3601 | break; | 3515 | break; |
3602 | default: | 3516 | default: |
3603 | printk(KERN_ERR "STALLION: board=%d is unknown board " | 3517 | printk(KERN_ERR "istallion: board=%d is unknown board " |
3604 | "type=%d\n", brdp->brdnr, brdp->brdtype); | 3518 | "type=%d\n", brdp->brdnr, brdp->brdtype); |
3605 | retval = -ENODEV; | 3519 | retval = -ENODEV; |
3606 | } | 3520 | } |
@@ -3609,7 +3523,7 @@ static int __devinit stli_brdinit(struct stlibrd *brdp) | |||
3609 | return retval; | 3523 | return retval; |
3610 | 3524 | ||
3611 | stli_initports(brdp); | 3525 | stli_initports(brdp); |
3612 | printk(KERN_INFO "STALLION: %s found, board=%d io=%x mem=%x " | 3526 | printk(KERN_INFO "istallion: %s found, board=%d io=%x mem=%x " |
3613 | "nrpanels=%d nrports=%d\n", stli_brdnames[brdp->brdtype], | 3527 | "nrpanels=%d nrports=%d\n", stli_brdnames[brdp->brdtype], |
3614 | brdp->brdnr, brdp->iobase, (int) brdp->memaddr, | 3528 | brdp->brdnr, brdp->iobase, (int) brdp->memaddr, |
3615 | brdp->nrpanels, brdp->nrports); | 3529 | brdp->nrpanels, brdp->nrports); |
@@ -3703,7 +3617,7 @@ static int stli_eisamemprobe(struct stlibrd *brdp) | |||
3703 | if (! foundit) { | 3617 | if (! foundit) { |
3704 | brdp->memaddr = 0; | 3618 | brdp->memaddr = 0; |
3705 | brdp->membase = NULL; | 3619 | brdp->membase = NULL; |
3706 | printk(KERN_ERR "STALLION: failed to probe shared memory " | 3620 | printk(KERN_ERR "istallion: failed to probe shared memory " |
3707 | "region for %s in EISA slot=%d\n", | 3621 | "region for %s in EISA slot=%d\n", |
3708 | stli_brdnames[brdp->brdtype], (brdp->iobase >> 12)); | 3622 | stli_brdnames[brdp->brdtype], (brdp->iobase >> 12)); |
3709 | return -ENODEV; | 3623 | return -ENODEV; |
@@ -3848,7 +3762,7 @@ static int __devinit stli_pciprobe(struct pci_dev *pdev, | |||
3848 | mutex_lock(&stli_brdslock); | 3762 | mutex_lock(&stli_brdslock); |
3849 | brdnr = stli_getbrdnr(); | 3763 | brdnr = stli_getbrdnr(); |
3850 | if (brdnr < 0) { | 3764 | if (brdnr < 0) { |
3851 | printk(KERN_INFO "STALLION: too many boards found, " | 3765 | printk(KERN_INFO "istallion: too many boards found, " |
3852 | "maximum supported %d\n", STL_MAXBRDS); | 3766 | "maximum supported %d\n", STL_MAXBRDS); |
3853 | mutex_unlock(&stli_brdslock); | 3767 | mutex_unlock(&stli_brdslock); |
3854 | retval = -EIO; | 3768 | retval = -EIO; |
@@ -3920,7 +3834,7 @@ static struct stlibrd *stli_allocbrd(void) | |||
3920 | 3834 | ||
3921 | brdp = kzalloc(sizeof(struct stlibrd), GFP_KERNEL); | 3835 | brdp = kzalloc(sizeof(struct stlibrd), GFP_KERNEL); |
3922 | if (!brdp) { | 3836 | if (!brdp) { |
3923 | printk(KERN_ERR "STALLION: failed to allocate memory " | 3837 | printk(KERN_ERR "istallion: failed to allocate memory " |
3924 | "(size=%Zd)\n", sizeof(struct stlibrd)); | 3838 | "(size=%Zd)\n", sizeof(struct stlibrd)); |
3925 | return NULL; | 3839 | return NULL; |
3926 | } | 3840 | } |
@@ -4518,6 +4432,11 @@ static const struct tty_operations stli_ops = { | |||
4518 | .tiocmset = stli_tiocmset, | 4432 | .tiocmset = stli_tiocmset, |
4519 | }; | 4433 | }; |
4520 | 4434 | ||
4435 | static const struct tty_port_operations stli_port_ops = { | ||
4436 | .carrier_raised = stli_carrier_raised, | ||
4437 | .raise_dtr_rts = stli_raise_dtr_rts, | ||
4438 | }; | ||
4439 | |||
4521 | /*****************************************************************************/ | 4440 | /*****************************************************************************/ |
4522 | /* | 4441 | /* |
4523 | * Loadable module initialization stuff. | 4442 | * Loadable module initialization stuff. |
@@ -4554,7 +4473,7 @@ static int __init istallion_module_init(void) | |||
4554 | 4473 | ||
4555 | stli_txcookbuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL); | 4474 | stli_txcookbuf = kmalloc(STLI_TXBUFSIZE, GFP_KERNEL); |
4556 | if (!stli_txcookbuf) { | 4475 | if (!stli_txcookbuf) { |
4557 | printk(KERN_ERR "STALLION: failed to allocate memory " | 4476 | printk(KERN_ERR "istallion: failed to allocate memory " |
4558 | "(size=%d)\n", STLI_TXBUFSIZE); | 4477 | "(size=%d)\n", STLI_TXBUFSIZE); |
4559 | retval = -ENOMEM; | 4478 | retval = -ENOMEM; |
4560 | goto err; | 4479 | goto err; |
@@ -4579,7 +4498,7 @@ static int __init istallion_module_init(void) | |||
4579 | 4498 | ||
4580 | retval = tty_register_driver(stli_serial); | 4499 | retval = tty_register_driver(stli_serial); |
4581 | if (retval) { | 4500 | if (retval) { |
4582 | printk(KERN_ERR "STALLION: failed to register serial driver\n"); | 4501 | printk(KERN_ERR "istallion: failed to register serial driver\n"); |
4583 | goto err_ttyput; | 4502 | goto err_ttyput; |
4584 | } | 4503 | } |
4585 | 4504 | ||
@@ -4593,7 +4512,7 @@ static int __init istallion_module_init(void) | |||
4593 | */ | 4512 | */ |
4594 | retval = register_chrdev(STL_SIOMEMMAJOR, "staliomem", &stli_fsiomem); | 4513 | retval = register_chrdev(STL_SIOMEMMAJOR, "staliomem", &stli_fsiomem); |
4595 | if (retval) { | 4514 | if (retval) { |
4596 | printk(KERN_ERR "STALLION: failed to register serial memory " | 4515 | printk(KERN_ERR "istallion: failed to register serial memory " |
4597 | "device\n"); | 4516 | "device\n"); |
4598 | goto err_deinit; | 4517 | goto err_deinit; |
4599 | } | 4518 | } |
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c index 12d327a2c9ba..8b0da97d5293 100644 --- a/drivers/char/moxa.c +++ b/drivers/char/moxa.c | |||
@@ -206,6 +206,7 @@ static void moxa_poll(unsigned long); | |||
206 | static void moxa_set_tty_param(struct tty_struct *, struct ktermios *); | 206 | static void moxa_set_tty_param(struct tty_struct *, struct ktermios *); |
207 | static void moxa_setup_empty_event(struct tty_struct *); | 207 | static void moxa_setup_empty_event(struct tty_struct *); |
208 | static void moxa_shut_down(struct tty_struct *); | 208 | static void moxa_shut_down(struct tty_struct *); |
209 | static int moxa_carrier_raised(struct tty_port *); | ||
209 | /* | 210 | /* |
210 | * moxa board interface functions: | 211 | * moxa board interface functions: |
211 | */ | 212 | */ |
@@ -405,6 +406,10 @@ static const struct tty_operations moxa_ops = { | |||
405 | .tiocmset = moxa_tiocmset, | 406 | .tiocmset = moxa_tiocmset, |
406 | }; | 407 | }; |
407 | 408 | ||
409 | static const struct tty_port_operations moxa_port_ops = { | ||
410 | .carrier_raised = moxa_carrier_raised, | ||
411 | }; | ||
412 | |||
408 | static struct tty_driver *moxaDriver; | 413 | static struct tty_driver *moxaDriver; |
409 | static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0); | 414 | static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0); |
410 | static DEFINE_SPINLOCK(moxa_lock); | 415 | static DEFINE_SPINLOCK(moxa_lock); |
@@ -826,6 +831,7 @@ static int moxa_init_board(struct moxa_board_conf *brd, struct device *dev) | |||
826 | 831 | ||
827 | for (i = 0, p = brd->ports; i < MAX_PORTS_PER_BOARD; i++, p++) { | 832 | for (i = 0, p = brd->ports; i < MAX_PORTS_PER_BOARD; i++, p++) { |
828 | tty_port_init(&p->port); | 833 | tty_port_init(&p->port); |
834 | p->port.ops = &moxa_port_ops; | ||
829 | p->type = PORT_16550A; | 835 | p->type = PORT_16550A; |
830 | p->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL; | 836 | p->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL; |
831 | } | 837 | } |
@@ -1115,15 +1121,27 @@ static void moxa_close_port(struct tty_struct *tty) | |||
1115 | tty_port_tty_set(&ch->port, NULL); | 1121 | tty_port_tty_set(&ch->port, NULL); |
1116 | } | 1122 | } |
1117 | 1123 | ||
1124 | static int moxa_carrier_raised(struct tty_port *port) | ||
1125 | { | ||
1126 | struct moxa_port *ch = container_of(port, struct moxa_port, port); | ||
1127 | int dcd; | ||
1128 | |||
1129 | spin_lock_bh(&moxa_lock); | ||
1130 | dcd = ch->DCDState; | ||
1131 | spin_unlock_bh(&moxa_lock); | ||
1132 | return dcd; | ||
1133 | } | ||
1134 | |||
1118 | static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp, | 1135 | static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp, |
1119 | struct moxa_port *ch) | 1136 | struct moxa_port *ch) |
1120 | { | 1137 | { |
1138 | struct tty_port *port = &ch->port; | ||
1121 | DEFINE_WAIT(wait); | 1139 | DEFINE_WAIT(wait); |
1122 | int retval = 0; | 1140 | int retval = 0; |
1123 | u8 dcd; | 1141 | u8 dcd; |
1124 | 1142 | ||
1125 | while (1) { | 1143 | while (1) { |
1126 | prepare_to_wait(&ch->port.open_wait, &wait, TASK_INTERRUPTIBLE); | 1144 | prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE); |
1127 | if (tty_hung_up_p(filp)) { | 1145 | if (tty_hung_up_p(filp)) { |
1128 | #ifdef SERIAL_DO_RESTART | 1146 | #ifdef SERIAL_DO_RESTART |
1129 | retval = -ERESTARTSYS; | 1147 | retval = -ERESTARTSYS; |
@@ -1132,9 +1150,7 @@ static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp, | |||
1132 | #endif | 1150 | #endif |
1133 | break; | 1151 | break; |
1134 | } | 1152 | } |
1135 | spin_lock_bh(&moxa_lock); | 1153 | dcd = tty_port_carrier_raised(port); |
1136 | dcd = ch->DCDState; | ||
1137 | spin_unlock_bh(&moxa_lock); | ||
1138 | if (dcd) | 1154 | if (dcd) |
1139 | break; | 1155 | break; |
1140 | 1156 | ||
@@ -1144,7 +1160,7 @@ static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp, | |||
1144 | } | 1160 | } |
1145 | schedule(); | 1161 | schedule(); |
1146 | } | 1162 | } |
1147 | finish_wait(&ch->port.open_wait, &wait); | 1163 | finish_wait(&port->open_wait, &wait); |
1148 | 1164 | ||
1149 | return retval; | 1165 | return retval; |
1150 | } | 1166 | } |
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 047766915411..402c9f217f83 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
@@ -541,74 +541,21 @@ static unsigned char mxser_get_msr(int baseaddr, int mode, int port) | |||
541 | return status; | 541 | return status; |
542 | } | 542 | } |
543 | 543 | ||
544 | static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp, | 544 | static int mxser_carrier_raised(struct tty_port *port) |
545 | struct mxser_port *port) | ||
546 | { | 545 | { |
547 | DECLARE_WAITQUEUE(wait, current); | 546 | struct mxser_port *mp = container_of(port, struct mxser_port, port); |
548 | int retval; | 547 | return (inb(mp->ioaddr + UART_MSR) & UART_MSR_DCD)?1:0; |
549 | int do_clocal = 0; | 548 | } |
550 | unsigned long flags; | ||
551 | |||
552 | /* | ||
553 | * If non-blocking mode is set, or the port is not enabled, | ||
554 | * then make the check up front and then exit. | ||
555 | */ | ||
556 | if ((filp->f_flags & O_NONBLOCK) || | ||
557 | test_bit(TTY_IO_ERROR, &tty->flags)) { | ||
558 | port->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
559 | return 0; | ||
560 | } | ||
561 | 549 | ||
562 | if (tty->termios->c_cflag & CLOCAL) | 550 | static void mxser_raise_dtr_rts(struct tty_port *port) |
563 | do_clocal = 1; | 551 | { |
552 | struct mxser_port *mp = container_of(port, struct mxser_port, port); | ||
553 | unsigned long flags; | ||
564 | 554 | ||
565 | /* | 555 | spin_lock_irqsave(&mp->slock, flags); |
566 | * Block waiting for the carrier detect and the line to become | 556 | outb(inb(mp->ioaddr + UART_MCR) | |
567 | * free (i.e., not in use by the callout). While we are in | 557 | UART_MCR_DTR | UART_MCR_RTS, mp->ioaddr + UART_MCR); |
568 | * this loop, port->port.count is dropped by one, so that | 558 | spin_unlock_irqrestore(&mp->slock, flags); |
569 | * mxser_close() knows when to free things. We restore it upon | ||
570 | * exit, either normal or abnormal. | ||
571 | */ | ||
572 | retval = 0; | ||
573 | add_wait_queue(&port->port.open_wait, &wait); | ||
574 | |||
575 | spin_lock_irqsave(&port->slock, flags); | ||
576 | if (!tty_hung_up_p(filp)) | ||
577 | port->port.count--; | ||
578 | spin_unlock_irqrestore(&port->slock, flags); | ||
579 | port->port.blocked_open++; | ||
580 | while (1) { | ||
581 | spin_lock_irqsave(&port->slock, flags); | ||
582 | outb(inb(port->ioaddr + UART_MCR) | | ||
583 | UART_MCR_DTR | UART_MCR_RTS, port->ioaddr + UART_MCR); | ||
584 | spin_unlock_irqrestore(&port->slock, flags); | ||
585 | set_current_state(TASK_INTERRUPTIBLE); | ||
586 | if (tty_hung_up_p(filp) || !(port->port.flags & ASYNC_INITIALIZED)) { | ||
587 | if (port->port.flags & ASYNC_HUP_NOTIFY) | ||
588 | retval = -EAGAIN; | ||
589 | else | ||
590 | retval = -ERESTARTSYS; | ||
591 | break; | ||
592 | } | ||
593 | if (!(port->port.flags & ASYNC_CLOSING) && | ||
594 | (do_clocal || | ||
595 | (inb(port->ioaddr + UART_MSR) & UART_MSR_DCD))) | ||
596 | break; | ||
597 | if (signal_pending(current)) { | ||
598 | retval = -ERESTARTSYS; | ||
599 | break; | ||
600 | } | ||
601 | schedule(); | ||
602 | } | ||
603 | set_current_state(TASK_RUNNING); | ||
604 | remove_wait_queue(&port->port.open_wait, &wait); | ||
605 | if (!tty_hung_up_p(filp)) | ||
606 | port->port.count++; | ||
607 | port->port.blocked_open--; | ||
608 | if (retval) | ||
609 | return retval; | ||
610 | port->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
611 | return 0; | ||
612 | } | 559 | } |
613 | 560 | ||
614 | static int mxser_set_baud(struct tty_struct *tty, long newspd) | 561 | static int mxser_set_baud(struct tty_struct *tty, long newspd) |
@@ -1087,14 +1034,14 @@ static int mxser_open(struct tty_struct *tty, struct file *filp) | |||
1087 | /* | 1034 | /* |
1088 | * Start up serial port | 1035 | * Start up serial port |
1089 | */ | 1036 | */ |
1090 | spin_lock_irqsave(&info->slock, flags); | 1037 | spin_lock_irqsave(&info->port.lock, flags); |
1091 | info->port.count++; | 1038 | info->port.count++; |
1092 | spin_unlock_irqrestore(&info->slock, flags); | 1039 | spin_unlock_irqrestore(&info->port.lock, flags); |
1093 | retval = mxser_startup(tty); | 1040 | retval = mxser_startup(tty); |
1094 | if (retval) | 1041 | if (retval) |
1095 | return retval; | 1042 | return retval; |
1096 | 1043 | ||
1097 | retval = mxser_block_til_ready(tty, filp, info); | 1044 | retval = tty_port_block_til_ready(&info->port, tty, filp); |
1098 | if (retval) | 1045 | if (retval) |
1099 | return retval; | 1046 | return retval; |
1100 | 1047 | ||
@@ -1133,58 +1080,27 @@ static void mxser_flush_buffer(struct tty_struct *tty) | |||
1133 | static void mxser_close(struct tty_struct *tty, struct file *filp) | 1080 | static void mxser_close(struct tty_struct *tty, struct file *filp) |
1134 | { | 1081 | { |
1135 | struct mxser_port *info = tty->driver_data; | 1082 | struct mxser_port *info = tty->driver_data; |
1083 | struct tty_port *port = &info->port; | ||
1136 | 1084 | ||
1137 | unsigned long timeout; | 1085 | unsigned long timeout; |
1138 | unsigned long flags; | ||
1139 | 1086 | ||
1140 | if (tty->index == MXSER_PORTS) | 1087 | if (tty->index == MXSER_PORTS) |
1141 | return; | 1088 | return; |
1142 | if (!info) | 1089 | if (!info) |
1143 | return; | 1090 | return; |
1144 | 1091 | ||
1145 | spin_lock_irqsave(&info->slock, flags); | 1092 | if (tty_port_close_start(port, tty, filp) == 0) |
1146 | |||
1147 | if (tty_hung_up_p(filp)) { | ||
1148 | spin_unlock_irqrestore(&info->slock, flags); | ||
1149 | return; | ||
1150 | } | ||
1151 | if ((tty->count == 1) && (info->port.count != 1)) { | ||
1152 | /* | ||
1153 | * Uh, oh. tty->count is 1, which means that the tty | ||
1154 | * structure will be freed. Info->port.count should always | ||
1155 | * be one in these conditions. If it's greater than | ||
1156 | * one, we've got real problems, since it means the | ||
1157 | * serial port won't be shutdown. | ||
1158 | */ | ||
1159 | printk(KERN_ERR "mxser_close: bad serial port count; " | ||
1160 | "tty->count is 1, info->port.count is %d\n", info->port.count); | ||
1161 | info->port.count = 1; | ||
1162 | } | ||
1163 | if (--info->port.count < 0) { | ||
1164 | printk(KERN_ERR "mxser_close: bad serial port count for " | ||
1165 | "ttys%d: %d\n", tty->index, info->port.count); | ||
1166 | info->port.count = 0; | ||
1167 | } | ||
1168 | if (info->port.count) { | ||
1169 | spin_unlock_irqrestore(&info->slock, flags); | ||
1170 | return; | 1093 | return; |
1171 | } | 1094 | |
1172 | info->port.flags |= ASYNC_CLOSING; | ||
1173 | spin_unlock_irqrestore(&info->slock, flags); | ||
1174 | /* | 1095 | /* |
1175 | * Save the termios structure, since this port may have | 1096 | * Save the termios structure, since this port may have |
1176 | * separate termios for callout and dialin. | 1097 | * separate termios for callout and dialin. |
1098 | * | ||
1099 | * FIXME: Can this go ? | ||
1177 | */ | 1100 | */ |
1178 | if (info->port.flags & ASYNC_NORMAL_ACTIVE) | 1101 | if (info->port.flags & ASYNC_NORMAL_ACTIVE) |
1179 | info->normal_termios = *tty->termios; | 1102 | info->normal_termios = *tty->termios; |
1180 | /* | 1103 | /* |
1181 | * Now we wait for the transmit buffer to clear; and we notify | ||
1182 | * the line discipline to only process XON/XOFF characters. | ||
1183 | */ | ||
1184 | tty->closing = 1; | ||
1185 | if (info->port.closing_wait != ASYNC_CLOSING_WAIT_NONE) | ||
1186 | tty_wait_until_sent(tty, info->port.closing_wait); | ||
1187 | /* | ||
1188 | * At this point we stop accepting input. To do this, we | 1104 | * At this point we stop accepting input. To do this, we |
1189 | * disable the receive line status interrupts, and tell the | 1105 | * disable the receive line status interrupts, and tell the |
1190 | * interrupt driver to stop checking the data ready bit in the | 1106 | * interrupt driver to stop checking the data ready bit in the |
@@ -1209,19 +1125,12 @@ static void mxser_close(struct tty_struct *tty, struct file *filp) | |||
1209 | } | 1125 | } |
1210 | } | 1126 | } |
1211 | mxser_shutdown(tty); | 1127 | mxser_shutdown(tty); |
1212 | |||
1213 | mxser_flush_buffer(tty); | 1128 | mxser_flush_buffer(tty); |
1214 | tty_ldisc_flush(tty); | ||
1215 | |||
1216 | tty->closing = 0; | ||
1217 | tty_port_tty_set(&info->port, NULL); | ||
1218 | if (info->port.blocked_open) { | ||
1219 | if (info->port.close_delay) | ||
1220 | schedule_timeout_interruptible(info->port.close_delay); | ||
1221 | wake_up_interruptible(&info->port.open_wait); | ||
1222 | } | ||
1223 | 1129 | ||
1224 | info->port.flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING); | 1130 | /* Right now the tty_port set is done outside of the close_end helper |
1131 | as we don't yet have everyone using refcounts */ | ||
1132 | tty_port_close_end(port, tty); | ||
1133 | tty_port_tty_set(port, NULL); | ||
1225 | } | 1134 | } |
1226 | 1135 | ||
1227 | static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count) | 1136 | static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count) |
@@ -2146,10 +2055,7 @@ static void mxser_hangup(struct tty_struct *tty) | |||
2146 | 2055 | ||
2147 | mxser_flush_buffer(tty); | 2056 | mxser_flush_buffer(tty); |
2148 | mxser_shutdown(tty); | 2057 | mxser_shutdown(tty); |
2149 | info->port.count = 0; | 2058 | tty_port_hangup(&info->port); |
2150 | info->port.flags &= ~ASYNC_NORMAL_ACTIVE; | ||
2151 | tty_port_tty_set(&info->port, NULL); | ||
2152 | wake_up_interruptible(&info->port.open_wait); | ||
2153 | } | 2059 | } |
2154 | 2060 | ||
2155 | /* | 2061 | /* |
@@ -2449,6 +2355,11 @@ static const struct tty_operations mxser_ops = { | |||
2449 | .tiocmset = mxser_tiocmset, | 2355 | .tiocmset = mxser_tiocmset, |
2450 | }; | 2356 | }; |
2451 | 2357 | ||
2358 | struct tty_port_operations mxser_port_ops = { | ||
2359 | .carrier_raised = mxser_carrier_raised, | ||
2360 | .raise_dtr_rts = mxser_raise_dtr_rts, | ||
2361 | }; | ||
2362 | |||
2452 | /* | 2363 | /* |
2453 | * The MOXA Smartio/Industio serial driver boot-time initialization code! | 2364 | * The MOXA Smartio/Industio serial driver boot-time initialization code! |
2454 | */ | 2365 | */ |
@@ -2482,6 +2393,7 @@ static int __devinit mxser_initbrd(struct mxser_board *brd, | |||
2482 | for (i = 0; i < brd->info->nports; i++) { | 2393 | for (i = 0; i < brd->info->nports; i++) { |
2483 | info = &brd->ports[i]; | 2394 | info = &brd->ports[i]; |
2484 | tty_port_init(&info->port); | 2395 | tty_port_init(&info->port); |
2396 | info->port.ops = &mxser_port_ops; | ||
2485 | info->board = brd; | 2397 | info->board = brd; |
2486 | info->stop_rx = 0; | 2398 | info->stop_rx = 0; |
2487 | info->ldisc_stop_rx = 0; | 2399 | info->ldisc_stop_rx = 0; |
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c index 4a8215a89ad3..d2e93e343226 100644 --- a/drivers/char/n_r3964.c +++ b/drivers/char/n_r3964.c | |||
@@ -1003,7 +1003,7 @@ static int r3964_open(struct tty_struct *tty) | |||
1003 | 1003 | ||
1004 | static void r3964_close(struct tty_struct *tty) | 1004 | static void r3964_close(struct tty_struct *tty) |
1005 | { | 1005 | { |
1006 | struct r3964_info *pInfo = (struct r3964_info *)tty->disc_data; | 1006 | struct r3964_info *pInfo = tty->disc_data; |
1007 | struct r3964_client_info *pClient, *pNext; | 1007 | struct r3964_client_info *pClient, *pNext; |
1008 | struct r3964_message *pMsg; | 1008 | struct r3964_message *pMsg; |
1009 | struct r3964_block_header *pHeader, *pNextHeader; | 1009 | struct r3964_block_header *pHeader, *pNextHeader; |
@@ -1058,7 +1058,7 @@ static void r3964_close(struct tty_struct *tty) | |||
1058 | static ssize_t r3964_read(struct tty_struct *tty, struct file *file, | 1058 | static ssize_t r3964_read(struct tty_struct *tty, struct file *file, |
1059 | unsigned char __user * buf, size_t nr) | 1059 | unsigned char __user * buf, size_t nr) |
1060 | { | 1060 | { |
1061 | struct r3964_info *pInfo = (struct r3964_info *)tty->disc_data; | 1061 | struct r3964_info *pInfo = tty->disc_data; |
1062 | struct r3964_client_info *pClient; | 1062 | struct r3964_client_info *pClient; |
1063 | struct r3964_message *pMsg; | 1063 | struct r3964_message *pMsg; |
1064 | struct r3964_client_message theMsg; | 1064 | struct r3964_client_message theMsg; |
@@ -1113,7 +1113,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file, | |||
1113 | static ssize_t r3964_write(struct tty_struct *tty, struct file *file, | 1113 | static ssize_t r3964_write(struct tty_struct *tty, struct file *file, |
1114 | const unsigned char *data, size_t count) | 1114 | const unsigned char *data, size_t count) |
1115 | { | 1115 | { |
1116 | struct r3964_info *pInfo = (struct r3964_info *)tty->disc_data; | 1116 | struct r3964_info *pInfo = tty->disc_data; |
1117 | struct r3964_block_header *pHeader; | 1117 | struct r3964_block_header *pHeader; |
1118 | struct r3964_client_info *pClient; | 1118 | struct r3964_client_info *pClient; |
1119 | unsigned char *new_data; | 1119 | unsigned char *new_data; |
@@ -1182,7 +1182,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file, | |||
1182 | static int r3964_ioctl(struct tty_struct *tty, struct file *file, | 1182 | static int r3964_ioctl(struct tty_struct *tty, struct file *file, |
1183 | unsigned int cmd, unsigned long arg) | 1183 | unsigned int cmd, unsigned long arg) |
1184 | { | 1184 | { |
1185 | struct r3964_info *pInfo = (struct r3964_info *)tty->disc_data; | 1185 | struct r3964_info *pInfo = tty->disc_data; |
1186 | if (pInfo == NULL) | 1186 | if (pInfo == NULL) |
1187 | return -EINVAL; | 1187 | return -EINVAL; |
1188 | switch (cmd) { | 1188 | switch (cmd) { |
@@ -1216,7 +1216,7 @@ static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old) | |||
1216 | static unsigned int r3964_poll(struct tty_struct *tty, struct file *file, | 1216 | static unsigned int r3964_poll(struct tty_struct *tty, struct file *file, |
1217 | struct poll_table_struct *wait) | 1217 | struct poll_table_struct *wait) |
1218 | { | 1218 | { |
1219 | struct r3964_info *pInfo = (struct r3964_info *)tty->disc_data; | 1219 | struct r3964_info *pInfo = tty->disc_data; |
1220 | struct r3964_client_info *pClient; | 1220 | struct r3964_client_info *pClient; |
1221 | struct r3964_message *pMsg = NULL; | 1221 | struct r3964_message *pMsg = NULL; |
1222 | unsigned long flags; | 1222 | unsigned long flags; |
@@ -1241,7 +1241,7 @@ static unsigned int r3964_poll(struct tty_struct *tty, struct file *file, | |||
1241 | static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp, | 1241 | static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp, |
1242 | char *fp, int count) | 1242 | char *fp, int count) |
1243 | { | 1243 | { |
1244 | struct r3964_info *pInfo = (struct r3964_info *)tty->disc_data; | 1244 | struct r3964_info *pInfo = tty->disc_data; |
1245 | const unsigned char *p; | 1245 | const unsigned char *p; |
1246 | char *f, flags = 0; | 1246 | char *f, flags = 0; |
1247 | int i; | 1247 | int i; |
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index efbfe9612658..f6f0e4ec2b51 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c | |||
@@ -47,8 +47,8 @@ | |||
47 | #include <linux/bitops.h> | 47 | #include <linux/bitops.h> |
48 | #include <linux/audit.h> | 48 | #include <linux/audit.h> |
49 | #include <linux/file.h> | 49 | #include <linux/file.h> |
50 | #include <linux/uaccess.h> | ||
50 | 51 | ||
51 | #include <asm/uaccess.h> | ||
52 | #include <asm/system.h> | 52 | #include <asm/system.h> |
53 | 53 | ||
54 | /* number of characters left in xmit buffer before select has we have room */ | 54 | /* number of characters left in xmit buffer before select has we have room */ |
@@ -62,6 +62,17 @@ | |||
62 | #define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */ | 62 | #define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */ |
63 | #define TTY_THRESHOLD_UNTHROTTLE 128 | 63 | #define TTY_THRESHOLD_UNTHROTTLE 128 |
64 | 64 | ||
65 | /* | ||
66 | * Special byte codes used in the echo buffer to represent operations | ||
67 | * or special handling of characters. Bytes in the echo buffer that | ||
68 | * are not part of such special blocks are treated as normal character | ||
69 | * codes. | ||
70 | */ | ||
71 | #define ECHO_OP_START 0xff | ||
72 | #define ECHO_OP_MOVE_BACK_COL 0x80 | ||
73 | #define ECHO_OP_SET_CANON_COL 0x81 | ||
74 | #define ECHO_OP_ERASE_TAB 0x82 | ||
75 | |||
65 | static inline unsigned char *alloc_buf(void) | 76 | static inline unsigned char *alloc_buf(void) |
66 | { | 77 | { |
67 | gfp_t prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; | 78 | gfp_t prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; |
@@ -169,6 +180,7 @@ static void check_unthrottle(struct tty_struct *tty) | |||
169 | * | 180 | * |
170 | * Locking: tty_read_lock for read fields. | 181 | * Locking: tty_read_lock for read fields. |
171 | */ | 182 | */ |
183 | |||
172 | static void reset_buffer_flags(struct tty_struct *tty) | 184 | static void reset_buffer_flags(struct tty_struct *tty) |
173 | { | 185 | { |
174 | unsigned long flags; | 186 | unsigned long flags; |
@@ -176,6 +188,11 @@ static void reset_buffer_flags(struct tty_struct *tty) | |||
176 | spin_lock_irqsave(&tty->read_lock, flags); | 188 | spin_lock_irqsave(&tty->read_lock, flags); |
177 | tty->read_head = tty->read_tail = tty->read_cnt = 0; | 189 | tty->read_head = tty->read_tail = tty->read_cnt = 0; |
178 | spin_unlock_irqrestore(&tty->read_lock, flags); | 190 | spin_unlock_irqrestore(&tty->read_lock, flags); |
191 | |||
192 | mutex_lock(&tty->echo_lock); | ||
193 | tty->echo_pos = tty->echo_cnt = tty->echo_overrun = 0; | ||
194 | mutex_unlock(&tty->echo_lock); | ||
195 | |||
179 | tty->canon_head = tty->canon_data = tty->erasing = 0; | 196 | tty->canon_head = tty->canon_data = tty->erasing = 0; |
180 | memset(&tty->read_flags, 0, sizeof tty->read_flags); | 197 | memset(&tty->read_flags, 0, sizeof tty->read_flags); |
181 | n_tty_set_room(tty); | 198 | n_tty_set_room(tty); |
@@ -266,89 +283,118 @@ static inline int is_continuation(unsigned char c, struct tty_struct *tty) | |||
266 | } | 283 | } |
267 | 284 | ||
268 | /** | 285 | /** |
269 | * opost - output post processor | 286 | * do_output_char - output one character |
270 | * @c: character (or partial unicode symbol) | 287 | * @c: character (or partial unicode symbol) |
271 | * @tty: terminal device | 288 | * @tty: terminal device |
289 | * @space: space available in tty driver write buffer | ||
272 | * | 290 | * |
273 | * Perform OPOST processing. Returns -1 when the output device is | 291 | * This is a helper function that handles one output character |
274 | * full and the character must be retried. Note that Linux currently | 292 | * (including special characters like TAB, CR, LF, etc.), |
275 | * ignores TABDLY, CRDLY, VTDLY, FFDLY and NLDLY. They simply aren't | 293 | * putting the results in the tty driver's write buffer. |
276 | * relevant in the world today. If you ever need them, add them here. | 294 | * |
295 | * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY | ||
296 | * and NLDLY. They simply aren't relevant in the world today. | ||
297 | * If you ever need them, add them here. | ||
298 | * | ||
299 | * Returns the number of bytes of buffer space used or -1 if | ||
300 | * no space left. | ||
277 | * | 301 | * |
278 | * Called from both the receive and transmit sides and can be called | 302 | * Locking: should be called under the output_lock to protect |
279 | * re-entrantly. Relies on lock_kernel() for tty->column state. | 303 | * the column state and space left in the buffer |
280 | */ | 304 | */ |
281 | 305 | ||
282 | static int opost(unsigned char c, struct tty_struct *tty) | 306 | static int do_output_char(unsigned char c, struct tty_struct *tty, int space) |
283 | { | 307 | { |
284 | int space, spaces; | 308 | int spaces; |
285 | 309 | ||
286 | space = tty_write_room(tty); | ||
287 | if (!space) | 310 | if (!space) |
288 | return -1; | 311 | return -1; |
289 | 312 | ||
290 | lock_kernel(); | 313 | switch (c) { |
291 | if (O_OPOST(tty)) { | 314 | case '\n': |
292 | switch (c) { | 315 | if (O_ONLRET(tty)) |
293 | case '\n': | 316 | tty->column = 0; |
294 | if (O_ONLRET(tty)) | 317 | if (O_ONLCR(tty)) { |
295 | tty->column = 0; | 318 | if (space < 2) |
296 | if (O_ONLCR(tty)) { | 319 | return -1; |
297 | if (space < 2) { | ||
298 | unlock_kernel(); | ||
299 | return -1; | ||
300 | } | ||
301 | tty_put_char(tty, '\r'); | ||
302 | tty->column = 0; | ||
303 | } | ||
304 | tty->canon_column = tty->column; | ||
305 | break; | ||
306 | case '\r': | ||
307 | if (O_ONOCR(tty) && tty->column == 0) { | ||
308 | unlock_kernel(); | ||
309 | return 0; | ||
310 | } | ||
311 | if (O_OCRNL(tty)) { | ||
312 | c = '\n'; | ||
313 | if (O_ONLRET(tty)) | ||
314 | tty->canon_column = tty->column = 0; | ||
315 | break; | ||
316 | } | ||
317 | tty->canon_column = tty->column = 0; | 320 | tty->canon_column = tty->column = 0; |
321 | tty_put_char(tty, '\r'); | ||
322 | tty_put_char(tty, c); | ||
323 | return 2; | ||
324 | } | ||
325 | tty->canon_column = tty->column; | ||
326 | break; | ||
327 | case '\r': | ||
328 | if (O_ONOCR(tty) && tty->column == 0) | ||
329 | return 0; | ||
330 | if (O_OCRNL(tty)) { | ||
331 | c = '\n'; | ||
332 | if (O_ONLRET(tty)) | ||
333 | tty->canon_column = tty->column = 0; | ||
318 | break; | 334 | break; |
319 | case '\t': | 335 | } |
320 | spaces = 8 - (tty->column & 7); | 336 | tty->canon_column = tty->column = 0; |
321 | if (O_TABDLY(tty) == XTABS) { | 337 | break; |
322 | if (space < spaces) { | 338 | case '\t': |
323 | unlock_kernel(); | 339 | spaces = 8 - (tty->column & 7); |
324 | return -1; | 340 | if (O_TABDLY(tty) == XTABS) { |
325 | } | 341 | if (space < spaces) |
326 | tty->column += spaces; | 342 | return -1; |
327 | tty->ops->write(tty, " ", spaces); | ||
328 | unlock_kernel(); | ||
329 | return 0; | ||
330 | } | ||
331 | tty->column += spaces; | 343 | tty->column += spaces; |
332 | break; | 344 | tty->ops->write(tty, " ", spaces); |
333 | case '\b': | 345 | return spaces; |
334 | if (tty->column > 0) | 346 | } |
335 | tty->column--; | 347 | tty->column += spaces; |
336 | break; | 348 | break; |
337 | default: | 349 | case '\b': |
350 | if (tty->column > 0) | ||
351 | tty->column--; | ||
352 | break; | ||
353 | default: | ||
354 | if (!iscntrl(c)) { | ||
338 | if (O_OLCUC(tty)) | 355 | if (O_OLCUC(tty)) |
339 | c = toupper(c); | 356 | c = toupper(c); |
340 | if (!iscntrl(c) && !is_continuation(c, tty)) | 357 | if (!is_continuation(c, tty)) |
341 | tty->column++; | 358 | tty->column++; |
342 | break; | ||
343 | } | 359 | } |
360 | break; | ||
344 | } | 361 | } |
362 | |||
345 | tty_put_char(tty, c); | 363 | tty_put_char(tty, c); |
346 | unlock_kernel(); | 364 | return 1; |
347 | return 0; | ||
348 | } | 365 | } |
349 | 366 | ||
350 | /** | 367 | /** |
351 | * opost_block - block postprocess | 368 | * process_output - output post processor |
369 | * @c: character (or partial unicode symbol) | ||
370 | * @tty: terminal device | ||
371 | * | ||
372 | * Perform OPOST processing. Returns -1 when the output device is | ||
373 | * full and the character must be retried. | ||
374 | * | ||
375 | * Locking: output_lock to protect column state and space left | ||
376 | * (also, this is called from n_tty_write under the | ||
377 | * tty layer write lock) | ||
378 | */ | ||
379 | |||
380 | static int process_output(unsigned char c, struct tty_struct *tty) | ||
381 | { | ||
382 | int space, retval; | ||
383 | |||
384 | mutex_lock(&tty->output_lock); | ||
385 | |||
386 | space = tty_write_room(tty); | ||
387 | retval = do_output_char(c, tty, space); | ||
388 | |||
389 | mutex_unlock(&tty->output_lock); | ||
390 | if (retval < 0) | ||
391 | return -1; | ||
392 | else | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | /** | ||
397 | * process_output_block - block post processor | ||
352 | * @tty: terminal device | 398 | * @tty: terminal device |
353 | * @inbuf: user buffer | 399 | * @inbuf: user buffer |
354 | * @nr: number of bytes | 400 | * @nr: number of bytes |
@@ -358,26 +404,32 @@ static int opost(unsigned char c, struct tty_struct *tty) | |||
358 | * the simple cases normally found and helps to generate blocks of | 404 | * the simple cases normally found and helps to generate blocks of |
359 | * symbols for the console driver and thus improve performance. | 405 | * symbols for the console driver and thus improve performance. |
360 | * | 406 | * |
361 | * Called from n_tty_write under the tty layer write lock. Relies | 407 | * Locking: output_lock to protect column state and space left |
362 | * on lock_kernel for the tty->column state. | 408 | * (also, this is called from n_tty_write under the |
409 | * tty layer write lock) | ||
363 | */ | 410 | */ |
364 | 411 | ||
365 | static ssize_t opost_block(struct tty_struct *tty, | 412 | static ssize_t process_output_block(struct tty_struct *tty, |
366 | const unsigned char *buf, unsigned int nr) | 413 | const unsigned char *buf, unsigned int nr) |
367 | { | 414 | { |
368 | int space; | 415 | int space; |
369 | int i; | 416 | int i; |
370 | const unsigned char *cp; | 417 | const unsigned char *cp; |
371 | 418 | ||
419 | mutex_lock(&tty->output_lock); | ||
420 | |||
372 | space = tty_write_room(tty); | 421 | space = tty_write_room(tty); |
373 | if (!space) | 422 | if (!space) { |
423 | mutex_unlock(&tty->output_lock); | ||
374 | return 0; | 424 | return 0; |
425 | } | ||
375 | if (nr > space) | 426 | if (nr > space) |
376 | nr = space; | 427 | nr = space; |
377 | 428 | ||
378 | lock_kernel(); | ||
379 | for (i = 0, cp = buf; i < nr; i++, cp++) { | 429 | for (i = 0, cp = buf; i < nr; i++, cp++) { |
380 | switch (*cp) { | 430 | unsigned char c = *cp; |
431 | |||
432 | switch (c) { | ||
381 | case '\n': | 433 | case '\n': |
382 | if (O_ONLRET(tty)) | 434 | if (O_ONLRET(tty)) |
383 | tty->column = 0; | 435 | tty->column = 0; |
@@ -399,54 +451,403 @@ static ssize_t opost_block(struct tty_struct *tty, | |||
399 | tty->column--; | 451 | tty->column--; |
400 | break; | 452 | break; |
401 | default: | 453 | default: |
402 | if (O_OLCUC(tty)) | 454 | if (!iscntrl(c)) { |
403 | goto break_out; | 455 | if (O_OLCUC(tty)) |
404 | if (!iscntrl(*cp)) | 456 | goto break_out; |
405 | tty->column++; | 457 | if (!is_continuation(c, tty)) |
458 | tty->column++; | ||
459 | } | ||
406 | break; | 460 | break; |
407 | } | 461 | } |
408 | } | 462 | } |
409 | break_out: | 463 | break_out: |
410 | if (tty->ops->flush_chars) | ||
411 | tty->ops->flush_chars(tty); | ||
412 | i = tty->ops->write(tty, buf, i); | 464 | i = tty->ops->write(tty, buf, i); |
413 | unlock_kernel(); | 465 | |
466 | mutex_unlock(&tty->output_lock); | ||
414 | return i; | 467 | return i; |
415 | } | 468 | } |
416 | 469 | ||
470 | /** | ||
471 | * process_echoes - write pending echo characters | ||
472 | * @tty: terminal device | ||
473 | * | ||
474 | * Write previously buffered echo (and other ldisc-generated) | ||
475 | * characters to the tty. | ||
476 | * | ||
477 | * Characters generated by the ldisc (including echoes) need to | ||
478 | * be buffered because the driver's write buffer can fill during | ||
479 | * heavy program output. Echoing straight to the driver will | ||
480 | * often fail under these conditions, causing lost characters and | ||
481 | * resulting mismatches of ldisc state information. | ||
482 | * | ||
483 | * Since the ldisc state must represent the characters actually sent | ||
484 | * to the driver at the time of the write, operations like certain | ||
485 | * changes in column state are also saved in the buffer and executed | ||
486 | * here. | ||
487 | * | ||
488 | * A circular fifo buffer is used so that the most recent characters | ||
489 | * are prioritized. Also, when control characters are echoed with a | ||
490 | * prefixed "^", the pair is treated atomically and thus not separated. | ||
491 | * | ||
492 | * Locking: output_lock to protect column state and space left, | ||
493 | * echo_lock to protect the echo buffer | ||
494 | */ | ||
495 | |||
496 | static void process_echoes(struct tty_struct *tty) | ||
497 | { | ||
498 | int space, nr; | ||
499 | unsigned char c; | ||
500 | unsigned char *cp, *buf_end; | ||
501 | |||
502 | if (!tty->echo_cnt) | ||
503 | return; | ||
504 | |||
505 | mutex_lock(&tty->output_lock); | ||
506 | mutex_lock(&tty->echo_lock); | ||
507 | |||
508 | space = tty_write_room(tty); | ||
509 | |||
510 | buf_end = tty->echo_buf + N_TTY_BUF_SIZE; | ||
511 | cp = tty->echo_buf + tty->echo_pos; | ||
512 | nr = tty->echo_cnt; | ||
513 | while (nr > 0) { | ||
514 | c = *cp; | ||
515 | if (c == ECHO_OP_START) { | ||
516 | unsigned char op; | ||
517 | unsigned char *opp; | ||
518 | int no_space_left = 0; | ||
519 | |||
520 | /* | ||
521 | * If the buffer byte is the start of a multi-byte | ||
522 | * operation, get the next byte, which is either the | ||
523 | * op code or a control character value. | ||
524 | */ | ||
525 | opp = cp + 1; | ||
526 | if (opp == buf_end) | ||
527 | opp -= N_TTY_BUF_SIZE; | ||
528 | op = *opp; | ||
529 | |||
530 | switch (op) { | ||
531 | unsigned int num_chars, num_bs; | ||
532 | |||
533 | case ECHO_OP_ERASE_TAB: | ||
534 | if (++opp == buf_end) | ||
535 | opp -= N_TTY_BUF_SIZE; | ||
536 | num_chars = *opp; | ||
537 | |||
538 | /* | ||
539 | * Determine how many columns to go back | ||
540 | * in order to erase the tab. | ||
541 | * This depends on the number of columns | ||
542 | * used by other characters within the tab | ||
543 | * area. If this (modulo 8) count is from | ||
544 | * the start of input rather than from a | ||
545 | * previous tab, we offset by canon column. | ||
546 | * Otherwise, tab spacing is normal. | ||
547 | */ | ||
548 | if (!(num_chars & 0x80)) | ||
549 | num_chars += tty->canon_column; | ||
550 | num_bs = 8 - (num_chars & 7); | ||
551 | |||
552 | if (num_bs > space) { | ||
553 | no_space_left = 1; | ||
554 | break; | ||
555 | } | ||
556 | space -= num_bs; | ||
557 | while (num_bs--) { | ||
558 | tty_put_char(tty, '\b'); | ||
559 | if (tty->column > 0) | ||
560 | tty->column--; | ||
561 | } | ||
562 | cp += 3; | ||
563 | nr -= 3; | ||
564 | break; | ||
565 | |||
566 | case ECHO_OP_SET_CANON_COL: | ||
567 | tty->canon_column = tty->column; | ||
568 | cp += 2; | ||
569 | nr -= 2; | ||
570 | break; | ||
571 | |||
572 | case ECHO_OP_MOVE_BACK_COL: | ||
573 | if (tty->column > 0) | ||
574 | tty->column--; | ||
575 | cp += 2; | ||
576 | nr -= 2; | ||
577 | break; | ||
578 | |||
579 | case ECHO_OP_START: | ||
580 | /* This is an escaped echo op start code */ | ||
581 | if (!space) { | ||
582 | no_space_left = 1; | ||
583 | break; | ||
584 | } | ||
585 | tty_put_char(tty, ECHO_OP_START); | ||
586 | tty->column++; | ||
587 | space--; | ||
588 | cp += 2; | ||
589 | nr -= 2; | ||
590 | break; | ||
591 | |||
592 | default: | ||
593 | if (iscntrl(op)) { | ||
594 | if (L_ECHOCTL(tty)) { | ||
595 | /* | ||
596 | * Ensure there is enough space | ||
597 | * for the whole ctrl pair. | ||
598 | */ | ||
599 | if (space < 2) { | ||
600 | no_space_left = 1; | ||
601 | break; | ||
602 | } | ||
603 | tty_put_char(tty, '^'); | ||
604 | tty_put_char(tty, op ^ 0100); | ||
605 | tty->column += 2; | ||
606 | space -= 2; | ||
607 | } else { | ||
608 | if (!space) { | ||
609 | no_space_left = 1; | ||
610 | break; | ||
611 | } | ||
612 | tty_put_char(tty, op); | ||
613 | space--; | ||
614 | } | ||
615 | } | ||
616 | /* | ||
617 | * If above falls through, this was an | ||
618 | * undefined op. | ||
619 | */ | ||
620 | cp += 2; | ||
621 | nr -= 2; | ||
622 | } | ||
623 | |||
624 | if (no_space_left) | ||
625 | break; | ||
626 | } else { | ||
627 | int retval; | ||
628 | |||
629 | retval = do_output_char(c, tty, space); | ||
630 | if (retval < 0) | ||
631 | break; | ||
632 | space -= retval; | ||
633 | cp += 1; | ||
634 | nr -= 1; | ||
635 | } | ||
636 | |||
637 | /* When end of circular buffer reached, wrap around */ | ||
638 | if (cp >= buf_end) | ||
639 | cp -= N_TTY_BUF_SIZE; | ||
640 | } | ||
641 | |||
642 | if (nr == 0) { | ||
643 | tty->echo_pos = 0; | ||
644 | tty->echo_cnt = 0; | ||
645 | tty->echo_overrun = 0; | ||
646 | } else { | ||
647 | int num_processed = tty->echo_cnt - nr; | ||
648 | tty->echo_pos += num_processed; | ||
649 | tty->echo_pos &= N_TTY_BUF_SIZE - 1; | ||
650 | tty->echo_cnt = nr; | ||
651 | if (num_processed > 0) | ||
652 | tty->echo_overrun = 0; | ||
653 | } | ||
654 | |||
655 | mutex_unlock(&tty->echo_lock); | ||
656 | mutex_unlock(&tty->output_lock); | ||
657 | |||
658 | if (tty->ops->flush_chars) | ||
659 | tty->ops->flush_chars(tty); | ||
660 | } | ||
417 | 661 | ||
418 | /** | 662 | /** |
419 | * echo_char - echo characters | 663 | * add_echo_byte - add a byte to the echo buffer |
664 | * @c: unicode byte to echo | ||
665 | * @tty: terminal device | ||
666 | * | ||
667 | * Add a character or operation byte to the echo buffer. | ||
668 | * | ||
669 | * Should be called under the echo lock to protect the echo buffer. | ||
670 | */ | ||
671 | |||
672 | static void add_echo_byte(unsigned char c, struct tty_struct *tty) | ||
673 | { | ||
674 | int new_byte_pos; | ||
675 | |||
676 | if (tty->echo_cnt == N_TTY_BUF_SIZE) { | ||
677 | /* Circular buffer is already at capacity */ | ||
678 | new_byte_pos = tty->echo_pos; | ||
679 | |||
680 | /* | ||
681 | * Since the buffer start position needs to be advanced, | ||
682 | * be sure to step by a whole operation byte group. | ||
683 | */ | ||
684 | if (tty->echo_buf[tty->echo_pos] == ECHO_OP_START) { | ||
685 | if (tty->echo_buf[(tty->echo_pos + 1) & | ||
686 | (N_TTY_BUF_SIZE - 1)] == | ||
687 | ECHO_OP_ERASE_TAB) { | ||
688 | tty->echo_pos += 3; | ||
689 | tty->echo_cnt -= 2; | ||
690 | } else { | ||
691 | tty->echo_pos += 2; | ||
692 | tty->echo_cnt -= 1; | ||
693 | } | ||
694 | } else { | ||
695 | tty->echo_pos++; | ||
696 | } | ||
697 | tty->echo_pos &= N_TTY_BUF_SIZE - 1; | ||
698 | |||
699 | tty->echo_overrun = 1; | ||
700 | } else { | ||
701 | new_byte_pos = tty->echo_pos + tty->echo_cnt; | ||
702 | new_byte_pos &= N_TTY_BUF_SIZE - 1; | ||
703 | tty->echo_cnt++; | ||
704 | } | ||
705 | |||
706 | tty->echo_buf[new_byte_pos] = c; | ||
707 | } | ||
708 | |||
709 | /** | ||
710 | * echo_move_back_col - add operation to move back a column | ||
711 | * @tty: terminal device | ||
712 | * | ||
713 | * Add an operation to the echo buffer to move back one column. | ||
714 | * | ||
715 | * Locking: echo_lock to protect the echo buffer | ||
716 | */ | ||
717 | |||
718 | static void echo_move_back_col(struct tty_struct *tty) | ||
719 | { | ||
720 | mutex_lock(&tty->echo_lock); | ||
721 | |||
722 | add_echo_byte(ECHO_OP_START, tty); | ||
723 | add_echo_byte(ECHO_OP_MOVE_BACK_COL, tty); | ||
724 | |||
725 | mutex_unlock(&tty->echo_lock); | ||
726 | } | ||
727 | |||
728 | /** | ||
729 | * echo_set_canon_col - add operation to set the canon column | ||
730 | * @tty: terminal device | ||
731 | * | ||
732 | * Add an operation to the echo buffer to set the canon column | ||
733 | * to the current column. | ||
734 | * | ||
735 | * Locking: echo_lock to protect the echo buffer | ||
736 | */ | ||
737 | |||
738 | static void echo_set_canon_col(struct tty_struct *tty) | ||
739 | { | ||
740 | mutex_lock(&tty->echo_lock); | ||
741 | |||
742 | add_echo_byte(ECHO_OP_START, tty); | ||
743 | add_echo_byte(ECHO_OP_SET_CANON_COL, tty); | ||
744 | |||
745 | mutex_unlock(&tty->echo_lock); | ||
746 | } | ||
747 | |||
748 | /** | ||
749 | * echo_erase_tab - add operation to erase a tab | ||
750 | * @num_chars: number of character columns already used | ||
751 | * @after_tab: true if num_chars starts after a previous tab | ||
752 | * @tty: terminal device | ||
753 | * | ||
754 | * Add an operation to the echo buffer to erase a tab. | ||
755 | * | ||
756 | * Called by the eraser function, which knows how many character | ||
757 | * columns have been used since either a previous tab or the start | ||
758 | * of input. This information will be used later, along with | ||
759 | * canon column (if applicable), to go back the correct number | ||
760 | * of columns. | ||
761 | * | ||
762 | * Locking: echo_lock to protect the echo buffer | ||
763 | */ | ||
764 | |||
765 | static void echo_erase_tab(unsigned int num_chars, int after_tab, | ||
766 | struct tty_struct *tty) | ||
767 | { | ||
768 | mutex_lock(&tty->echo_lock); | ||
769 | |||
770 | add_echo_byte(ECHO_OP_START, tty); | ||
771 | add_echo_byte(ECHO_OP_ERASE_TAB, tty); | ||
772 | |||
773 | /* We only need to know this modulo 8 (tab spacing) */ | ||
774 | num_chars &= 7; | ||
775 | |||
776 | /* Set the high bit as a flag if num_chars is after a previous tab */ | ||
777 | if (after_tab) | ||
778 | num_chars |= 0x80; | ||
779 | |||
780 | add_echo_byte(num_chars, tty); | ||
781 | |||
782 | mutex_unlock(&tty->echo_lock); | ||
783 | } | ||
784 | |||
785 | /** | ||
786 | * echo_char_raw - echo a character raw | ||
420 | * @c: unicode byte to echo | 787 | * @c: unicode byte to echo |
421 | * @tty: terminal device | 788 | * @tty: terminal device |
422 | * | 789 | * |
423 | * Echo user input back onto the screen. This must be called only when | 790 | * Echo user input back onto the screen. This must be called only when |
424 | * L_ECHO(tty) is true. Called from the driver receive_buf path. | 791 | * L_ECHO(tty) is true. Called from the driver receive_buf path. |
425 | * | 792 | * |
426 | * Relies on BKL for tty column locking | 793 | * This variant does not treat control characters specially. |
794 | * | ||
795 | * Locking: echo_lock to protect the echo buffer | ||
796 | */ | ||
797 | |||
798 | static void echo_char_raw(unsigned char c, struct tty_struct *tty) | ||
799 | { | ||
800 | mutex_lock(&tty->echo_lock); | ||
801 | |||
802 | if (c == ECHO_OP_START) { | ||
803 | add_echo_byte(ECHO_OP_START, tty); | ||
804 | add_echo_byte(ECHO_OP_START, tty); | ||
805 | } else { | ||
806 | add_echo_byte(c, tty); | ||
807 | } | ||
808 | |||
809 | mutex_unlock(&tty->echo_lock); | ||
810 | } | ||
811 | |||
812 | /** | ||
813 | * echo_char - echo a character | ||
814 | * @c: unicode byte to echo | ||
815 | * @tty: terminal device | ||
816 | * | ||
817 | * Echo user input back onto the screen. This must be called only when | ||
818 | * L_ECHO(tty) is true. Called from the driver receive_buf path. | ||
819 | * | ||
820 | * This variant tags control characters to be possibly echoed as | ||
821 | * as "^X" (where X is the letter representing the control char). | ||
822 | * | ||
823 | * Locking: echo_lock to protect the echo buffer | ||
427 | */ | 824 | */ |
428 | 825 | ||
429 | static void echo_char(unsigned char c, struct tty_struct *tty) | 826 | static void echo_char(unsigned char c, struct tty_struct *tty) |
430 | { | 827 | { |
431 | if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') { | 828 | mutex_lock(&tty->echo_lock); |
432 | tty_put_char(tty, '^'); | 829 | |
433 | tty_put_char(tty, c ^ 0100); | 830 | if (c == ECHO_OP_START) { |
434 | tty->column += 2; | 831 | add_echo_byte(ECHO_OP_START, tty); |
435 | } else | 832 | add_echo_byte(ECHO_OP_START, tty); |
436 | opost(c, tty); | 833 | } else { |
834 | if (iscntrl(c) && c != '\t') | ||
835 | add_echo_byte(ECHO_OP_START, tty); | ||
836 | add_echo_byte(c, tty); | ||
837 | } | ||
838 | |||
839 | mutex_unlock(&tty->echo_lock); | ||
437 | } | 840 | } |
438 | 841 | ||
439 | /** | 842 | /** |
440 | * finsh_erasing - complete erase | 843 | * finish_erasing - complete erase |
441 | * @tty: tty doing the erase | 844 | * @tty: tty doing the erase |
442 | * | ||
443 | * Relies on BKL for tty column locking | ||
444 | */ | 845 | */ |
846 | |||
445 | static inline void finish_erasing(struct tty_struct *tty) | 847 | static inline void finish_erasing(struct tty_struct *tty) |
446 | { | 848 | { |
447 | if (tty->erasing) { | 849 | if (tty->erasing) { |
448 | tty_put_char(tty, '/'); | 850 | echo_char_raw('/', tty); |
449 | tty->column++; | ||
450 | tty->erasing = 0; | 851 | tty->erasing = 0; |
451 | } | 852 | } |
452 | } | 853 | } |
@@ -460,7 +861,7 @@ static inline void finish_erasing(struct tty_struct *tty) | |||
460 | * present in the stream from the driver layer. Handles the complexities | 861 | * present in the stream from the driver layer. Handles the complexities |
461 | * of UTF-8 multibyte symbols. | 862 | * of UTF-8 multibyte symbols. |
462 | * | 863 | * |
463 | * Locking: read_lock for tty buffers, BKL for column/erasing state | 864 | * Locking: read_lock for tty buffers |
464 | */ | 865 | */ |
465 | 866 | ||
466 | static void eraser(unsigned char c, struct tty_struct *tty) | 867 | static void eraser(unsigned char c, struct tty_struct *tty) |
@@ -471,7 +872,7 @@ static void eraser(unsigned char c, struct tty_struct *tty) | |||
471 | 872 | ||
472 | /* FIXME: locking needed ? */ | 873 | /* FIXME: locking needed ? */ |
473 | if (tty->read_head == tty->canon_head) { | 874 | if (tty->read_head == tty->canon_head) { |
474 | /* opost('\a', tty); */ /* what do you think? */ | 875 | /* process_output('\a', tty); */ /* what do you think? */ |
475 | return; | 876 | return; |
476 | } | 877 | } |
477 | if (c == ERASE_CHAR(tty)) | 878 | if (c == ERASE_CHAR(tty)) |
@@ -497,7 +898,7 @@ static void eraser(unsigned char c, struct tty_struct *tty) | |||
497 | echo_char(KILL_CHAR(tty), tty); | 898 | echo_char(KILL_CHAR(tty), tty); |
498 | /* Add a newline if ECHOK is on and ECHOKE is off. */ | 899 | /* Add a newline if ECHOK is on and ECHOKE is off. */ |
499 | if (L_ECHOK(tty)) | 900 | if (L_ECHOK(tty)) |
500 | opost('\n', tty); | 901 | echo_char_raw('\n', tty); |
501 | return; | 902 | return; |
502 | } | 903 | } |
503 | kill_type = KILL; | 904 | kill_type = KILL; |
@@ -533,67 +934,61 @@ static void eraser(unsigned char c, struct tty_struct *tty) | |||
533 | if (L_ECHO(tty)) { | 934 | if (L_ECHO(tty)) { |
534 | if (L_ECHOPRT(tty)) { | 935 | if (L_ECHOPRT(tty)) { |
535 | if (!tty->erasing) { | 936 | if (!tty->erasing) { |
536 | tty_put_char(tty, '\\'); | 937 | echo_char_raw('\\', tty); |
537 | tty->column++; | ||
538 | tty->erasing = 1; | 938 | tty->erasing = 1; |
539 | } | 939 | } |
540 | /* if cnt > 1, output a multi-byte character */ | 940 | /* if cnt > 1, output a multi-byte character */ |
541 | echo_char(c, tty); | 941 | echo_char(c, tty); |
542 | while (--cnt > 0) { | 942 | while (--cnt > 0) { |
543 | head = (head+1) & (N_TTY_BUF_SIZE-1); | 943 | head = (head+1) & (N_TTY_BUF_SIZE-1); |
544 | tty_put_char(tty, tty->read_buf[head]); | 944 | echo_char_raw(tty->read_buf[head], tty); |
945 | echo_move_back_col(tty); | ||
545 | } | 946 | } |
546 | } else if (kill_type == ERASE && !L_ECHOE(tty)) { | 947 | } else if (kill_type == ERASE && !L_ECHOE(tty)) { |
547 | echo_char(ERASE_CHAR(tty), tty); | 948 | echo_char(ERASE_CHAR(tty), tty); |
548 | } else if (c == '\t') { | 949 | } else if (c == '\t') { |
549 | unsigned int col = tty->canon_column; | 950 | unsigned int num_chars = 0; |
550 | unsigned long tail = tty->canon_head; | 951 | int after_tab = 0; |
551 | 952 | unsigned long tail = tty->read_head; | |
552 | /* Find the column of the last char. */ | 953 | |
553 | while (tail != tty->read_head) { | 954 | /* |
955 | * Count the columns used for characters | ||
956 | * since the start of input or after a | ||
957 | * previous tab. | ||
958 | * This info is used to go back the correct | ||
959 | * number of columns. | ||
960 | */ | ||
961 | while (tail != tty->canon_head) { | ||
962 | tail = (tail-1) & (N_TTY_BUF_SIZE-1); | ||
554 | c = tty->read_buf[tail]; | 963 | c = tty->read_buf[tail]; |
555 | if (c == '\t') | 964 | if (c == '\t') { |
556 | col = (col | 7) + 1; | 965 | after_tab = 1; |
557 | else if (iscntrl(c)) { | 966 | break; |
967 | } else if (iscntrl(c)) { | ||
558 | if (L_ECHOCTL(tty)) | 968 | if (L_ECHOCTL(tty)) |
559 | col += 2; | 969 | num_chars += 2; |
560 | } else if (!is_continuation(c, tty)) | 970 | } else if (!is_continuation(c, tty)) { |
561 | col++; | 971 | num_chars++; |
562 | tail = (tail+1) & (N_TTY_BUF_SIZE-1); | 972 | } |
563 | } | ||
564 | |||
565 | /* should never happen */ | ||
566 | if (tty->column > 0x80000000) | ||
567 | tty->column = 0; | ||
568 | |||
569 | /* Now backup to that column. */ | ||
570 | while (tty->column > col) { | ||
571 | /* Can't use opost here. */ | ||
572 | tty_put_char(tty, '\b'); | ||
573 | if (tty->column > 0) | ||
574 | tty->column--; | ||
575 | } | 973 | } |
974 | echo_erase_tab(num_chars, after_tab, tty); | ||
576 | } else { | 975 | } else { |
577 | if (iscntrl(c) && L_ECHOCTL(tty)) { | 976 | if (iscntrl(c) && L_ECHOCTL(tty)) { |
578 | tty_put_char(tty, '\b'); | 977 | echo_char_raw('\b', tty); |
579 | tty_put_char(tty, ' '); | 978 | echo_char_raw(' ', tty); |
580 | tty_put_char(tty, '\b'); | 979 | echo_char_raw('\b', tty); |
581 | if (tty->column > 0) | ||
582 | tty->column--; | ||
583 | } | 980 | } |
584 | if (!iscntrl(c) || L_ECHOCTL(tty)) { | 981 | if (!iscntrl(c) || L_ECHOCTL(tty)) { |
585 | tty_put_char(tty, '\b'); | 982 | echo_char_raw('\b', tty); |
586 | tty_put_char(tty, ' '); | 983 | echo_char_raw(' ', tty); |
587 | tty_put_char(tty, '\b'); | 984 | echo_char_raw('\b', tty); |
588 | if (tty->column > 0) | ||
589 | tty->column--; | ||
590 | } | 985 | } |
591 | } | 986 | } |
592 | } | 987 | } |
593 | if (kill_type == ERASE) | 988 | if (kill_type == ERASE) |
594 | break; | 989 | break; |
595 | } | 990 | } |
596 | if (tty->read_head == tty->canon_head) | 991 | if (tty->read_head == tty->canon_head && L_ECHO(tty)) |
597 | finish_erasing(tty); | 992 | finish_erasing(tty); |
598 | } | 993 | } |
599 | 994 | ||
@@ -712,6 +1107,7 @@ static inline void n_tty_receive_parity_error(struct tty_struct *tty, | |||
712 | static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | 1107 | static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) |
713 | { | 1108 | { |
714 | unsigned long flags; | 1109 | unsigned long flags; |
1110 | int parmrk; | ||
715 | 1111 | ||
716 | if (tty->raw) { | 1112 | if (tty->raw) { |
717 | put_tty_queue(c, tty); | 1113 | put_tty_queue(c, tty); |
@@ -721,18 +1117,21 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | |||
721 | if (I_ISTRIP(tty)) | 1117 | if (I_ISTRIP(tty)) |
722 | c &= 0x7f; | 1118 | c &= 0x7f; |
723 | if (I_IUCLC(tty) && L_IEXTEN(tty)) | 1119 | if (I_IUCLC(tty) && L_IEXTEN(tty)) |
724 | c=tolower(c); | 1120 | c = tolower(c); |
725 | 1121 | ||
726 | if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && | 1122 | if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && |
727 | ((I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty)) || | 1123 | I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty) && |
728 | c == INTR_CHAR(tty) || c == QUIT_CHAR(tty) || c == SUSP_CHAR(tty))) | 1124 | c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) && c != SUSP_CHAR(tty)) { |
729 | start_tty(tty); | 1125 | start_tty(tty); |
1126 | process_echoes(tty); | ||
1127 | } | ||
730 | 1128 | ||
731 | if (tty->closing) { | 1129 | if (tty->closing) { |
732 | if (I_IXON(tty)) { | 1130 | if (I_IXON(tty)) { |
733 | if (c == START_CHAR(tty)) | 1131 | if (c == START_CHAR(tty)) { |
734 | start_tty(tty); | 1132 | start_tty(tty); |
735 | else if (c == STOP_CHAR(tty)) | 1133 | process_echoes(tty); |
1134 | } else if (c == STOP_CHAR(tty)) | ||
736 | stop_tty(tty); | 1135 | stop_tty(tty); |
737 | } | 1136 | } |
738 | return; | 1137 | return; |
@@ -745,19 +1144,23 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | |||
745 | * up. | 1144 | * up. |
746 | */ | 1145 | */ |
747 | if (!test_bit(c, tty->process_char_map) || tty->lnext) { | 1146 | if (!test_bit(c, tty->process_char_map) || tty->lnext) { |
748 | finish_erasing(tty); | ||
749 | tty->lnext = 0; | 1147 | tty->lnext = 0; |
1148 | parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0; | ||
1149 | if (tty->read_cnt >= (N_TTY_BUF_SIZE - parmrk - 1)) { | ||
1150 | /* beep if no space */ | ||
1151 | if (L_ECHO(tty)) | ||
1152 | process_output('\a', tty); | ||
1153 | return; | ||
1154 | } | ||
750 | if (L_ECHO(tty)) { | 1155 | if (L_ECHO(tty)) { |
751 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) { | 1156 | finish_erasing(tty); |
752 | tty_put_char(tty, '\a'); /* beep if no space */ | ||
753 | return; | ||
754 | } | ||
755 | /* Record the column of first canon char. */ | 1157 | /* Record the column of first canon char. */ |
756 | if (tty->canon_head == tty->read_head) | 1158 | if (tty->canon_head == tty->read_head) |
757 | tty->canon_column = tty->column; | 1159 | echo_set_canon_col(tty); |
758 | echo_char(c, tty); | 1160 | echo_char(c, tty); |
1161 | process_echoes(tty); | ||
759 | } | 1162 | } |
760 | if (I_PARMRK(tty) && c == (unsigned char) '\377') | 1163 | if (parmrk) |
761 | put_tty_queue(c, tty); | 1164 | put_tty_queue(c, tty); |
762 | put_tty_queue(c, tty); | 1165 | put_tty_queue(c, tty); |
763 | return; | 1166 | return; |
@@ -766,6 +1169,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | |||
766 | if (I_IXON(tty)) { | 1169 | if (I_IXON(tty)) { |
767 | if (c == START_CHAR(tty)) { | 1170 | if (c == START_CHAR(tty)) { |
768 | start_tty(tty); | 1171 | start_tty(tty); |
1172 | process_echoes(tty); | ||
769 | return; | 1173 | return; |
770 | } | 1174 | } |
771 | if (c == STOP_CHAR(tty)) { | 1175 | if (c == STOP_CHAR(tty)) { |
@@ -786,7 +1190,6 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | |||
786 | if (c == SUSP_CHAR(tty)) { | 1190 | if (c == SUSP_CHAR(tty)) { |
787 | send_signal: | 1191 | send_signal: |
788 | /* | 1192 | /* |
789 | * Echo character, and then send the signal. | ||
790 | * Note that we do not use isig() here because we want | 1193 | * Note that we do not use isig() here because we want |
791 | * the order to be: | 1194 | * the order to be: |
792 | * 1) flush, 2) echo, 3) signal | 1195 | * 1) flush, 2) echo, 3) signal |
@@ -795,8 +1198,12 @@ send_signal: | |||
795 | n_tty_flush_buffer(tty); | 1198 | n_tty_flush_buffer(tty); |
796 | tty_driver_flush_buffer(tty); | 1199 | tty_driver_flush_buffer(tty); |
797 | } | 1200 | } |
798 | if (L_ECHO(tty)) | 1201 | if (I_IXON(tty)) |
1202 | start_tty(tty); | ||
1203 | if (L_ECHO(tty)) { | ||
799 | echo_char(c, tty); | 1204 | echo_char(c, tty); |
1205 | process_echoes(tty); | ||
1206 | } | ||
800 | if (tty->pgrp) | 1207 | if (tty->pgrp) |
801 | kill_pgrp(tty->pgrp, signal, 1); | 1208 | kill_pgrp(tty->pgrp, signal, 1); |
802 | return; | 1209 | return; |
@@ -815,6 +1222,7 @@ send_signal: | |||
815 | if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) || | 1222 | if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) || |
816 | (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) { | 1223 | (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) { |
817 | eraser(c, tty); | 1224 | eraser(c, tty); |
1225 | process_echoes(tty); | ||
818 | return; | 1226 | return; |
819 | } | 1227 | } |
820 | if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) { | 1228 | if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) { |
@@ -822,8 +1230,9 @@ send_signal: | |||
822 | if (L_ECHO(tty)) { | 1230 | if (L_ECHO(tty)) { |
823 | finish_erasing(tty); | 1231 | finish_erasing(tty); |
824 | if (L_ECHOCTL(tty)) { | 1232 | if (L_ECHOCTL(tty)) { |
825 | tty_put_char(tty, '^'); | 1233 | echo_char_raw('^', tty); |
826 | tty_put_char(tty, '\b'); | 1234 | echo_char_raw('\b', tty); |
1235 | process_echoes(tty); | ||
827 | } | 1236 | } |
828 | } | 1237 | } |
829 | return; | 1238 | return; |
@@ -834,22 +1243,29 @@ send_signal: | |||
834 | 1243 | ||
835 | finish_erasing(tty); | 1244 | finish_erasing(tty); |
836 | echo_char(c, tty); | 1245 | echo_char(c, tty); |
837 | opost('\n', tty); | 1246 | echo_char_raw('\n', tty); |
838 | while (tail != tty->read_head) { | 1247 | while (tail != tty->read_head) { |
839 | echo_char(tty->read_buf[tail], tty); | 1248 | echo_char(tty->read_buf[tail], tty); |
840 | tail = (tail+1) & (N_TTY_BUF_SIZE-1); | 1249 | tail = (tail+1) & (N_TTY_BUF_SIZE-1); |
841 | } | 1250 | } |
1251 | process_echoes(tty); | ||
842 | return; | 1252 | return; |
843 | } | 1253 | } |
844 | if (c == '\n') { | 1254 | if (c == '\n') { |
1255 | if (tty->read_cnt >= N_TTY_BUF_SIZE) { | ||
1256 | if (L_ECHO(tty)) | ||
1257 | process_output('\a', tty); | ||
1258 | return; | ||
1259 | } | ||
845 | if (L_ECHO(tty) || L_ECHONL(tty)) { | 1260 | if (L_ECHO(tty) || L_ECHONL(tty)) { |
846 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) | 1261 | echo_char_raw('\n', tty); |
847 | tty_put_char(tty, '\a'); | 1262 | process_echoes(tty); |
848 | opost('\n', tty); | ||
849 | } | 1263 | } |
850 | goto handle_newline; | 1264 | goto handle_newline; |
851 | } | 1265 | } |
852 | if (c == EOF_CHAR(tty)) { | 1266 | if (c == EOF_CHAR(tty)) { |
1267 | if (tty->read_cnt >= N_TTY_BUF_SIZE) | ||
1268 | return; | ||
853 | if (tty->canon_head != tty->read_head) | 1269 | if (tty->canon_head != tty->read_head) |
854 | set_bit(TTY_PUSH, &tty->flags); | 1270 | set_bit(TTY_PUSH, &tty->flags); |
855 | c = __DISABLED_CHAR; | 1271 | c = __DISABLED_CHAR; |
@@ -857,22 +1273,28 @@ send_signal: | |||
857 | } | 1273 | } |
858 | if ((c == EOL_CHAR(tty)) || | 1274 | if ((c == EOL_CHAR(tty)) || |
859 | (c == EOL2_CHAR(tty) && L_IEXTEN(tty))) { | 1275 | (c == EOL2_CHAR(tty) && L_IEXTEN(tty))) { |
1276 | parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) | ||
1277 | ? 1 : 0; | ||
1278 | if (tty->read_cnt >= (N_TTY_BUF_SIZE - parmrk)) { | ||
1279 | if (L_ECHO(tty)) | ||
1280 | process_output('\a', tty); | ||
1281 | return; | ||
1282 | } | ||
860 | /* | 1283 | /* |
861 | * XXX are EOL_CHAR and EOL2_CHAR echoed?!? | 1284 | * XXX are EOL_CHAR and EOL2_CHAR echoed?!? |
862 | */ | 1285 | */ |
863 | if (L_ECHO(tty)) { | 1286 | if (L_ECHO(tty)) { |
864 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) | ||
865 | tty_put_char(tty, '\a'); | ||
866 | /* Record the column of first canon char. */ | 1287 | /* Record the column of first canon char. */ |
867 | if (tty->canon_head == tty->read_head) | 1288 | if (tty->canon_head == tty->read_head) |
868 | tty->canon_column = tty->column; | 1289 | echo_set_canon_col(tty); |
869 | echo_char(c, tty); | 1290 | echo_char(c, tty); |
1291 | process_echoes(tty); | ||
870 | } | 1292 | } |
871 | /* | 1293 | /* |
872 | * XXX does PARMRK doubling happen for | 1294 | * XXX does PARMRK doubling happen for |
873 | * EOL_CHAR and EOL2_CHAR? | 1295 | * EOL_CHAR and EOL2_CHAR? |
874 | */ | 1296 | */ |
875 | if (I_PARMRK(tty) && c == (unsigned char) '\377') | 1297 | if (parmrk) |
876 | put_tty_queue(c, tty); | 1298 | put_tty_queue(c, tty); |
877 | 1299 | ||
878 | handle_newline: | 1300 | handle_newline: |
@@ -889,23 +1311,27 @@ handle_newline: | |||
889 | } | 1311 | } |
890 | } | 1312 | } |
891 | 1313 | ||
892 | finish_erasing(tty); | 1314 | parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0; |
1315 | if (tty->read_cnt >= (N_TTY_BUF_SIZE - parmrk - 1)) { | ||
1316 | /* beep if no space */ | ||
1317 | if (L_ECHO(tty)) | ||
1318 | process_output('\a', tty); | ||
1319 | return; | ||
1320 | } | ||
893 | if (L_ECHO(tty)) { | 1321 | if (L_ECHO(tty)) { |
894 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) { | 1322 | finish_erasing(tty); |
895 | tty_put_char(tty, '\a'); /* beep if no space */ | ||
896 | return; | ||
897 | } | ||
898 | if (c == '\n') | 1323 | if (c == '\n') |
899 | opost('\n', tty); | 1324 | echo_char_raw('\n', tty); |
900 | else { | 1325 | else { |
901 | /* Record the column of first canon char. */ | 1326 | /* Record the column of first canon char. */ |
902 | if (tty->canon_head == tty->read_head) | 1327 | if (tty->canon_head == tty->read_head) |
903 | tty->canon_column = tty->column; | 1328 | echo_set_canon_col(tty); |
904 | echo_char(c, tty); | 1329 | echo_char(c, tty); |
905 | } | 1330 | } |
1331 | process_echoes(tty); | ||
906 | } | 1332 | } |
907 | 1333 | ||
908 | if (I_PARMRK(tty) && c == (unsigned char) '\377') | 1334 | if (parmrk) |
909 | put_tty_queue(c, tty); | 1335 | put_tty_queue(c, tty); |
910 | 1336 | ||
911 | put_tty_queue(c, tty); | 1337 | put_tty_queue(c, tty); |
@@ -923,10 +1349,11 @@ handle_newline: | |||
923 | 1349 | ||
924 | static void n_tty_write_wakeup(struct tty_struct *tty) | 1350 | static void n_tty_write_wakeup(struct tty_struct *tty) |
925 | { | 1351 | { |
926 | if (tty->fasync) { | 1352 | /* Write out any echoed characters that are still pending */ |
927 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 1353 | process_echoes(tty); |
1354 | |||
1355 | if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) | ||
928 | kill_fasync(&tty->fasync, SIGIO, POLL_OUT); | 1356 | kill_fasync(&tty->fasync, SIGIO, POLL_OUT); |
929 | } | ||
930 | } | 1357 | } |
931 | 1358 | ||
932 | /** | 1359 | /** |
@@ -1134,6 +1561,10 @@ static void n_tty_close(struct tty_struct *tty) | |||
1134 | free_buf(tty->read_buf); | 1561 | free_buf(tty->read_buf); |
1135 | tty->read_buf = NULL; | 1562 | tty->read_buf = NULL; |
1136 | } | 1563 | } |
1564 | if (tty->echo_buf) { | ||
1565 | free_buf(tty->echo_buf); | ||
1566 | tty->echo_buf = NULL; | ||
1567 | } | ||
1137 | } | 1568 | } |
1138 | 1569 | ||
1139 | /** | 1570 | /** |
@@ -1151,13 +1582,19 @@ static int n_tty_open(struct tty_struct *tty) | |||
1151 | if (!tty) | 1582 | if (!tty) |
1152 | return -EINVAL; | 1583 | return -EINVAL; |
1153 | 1584 | ||
1154 | /* This one is ugly. Currently a malloc failure here can panic */ | 1585 | /* These are ugly. Currently a malloc failure here can panic */ |
1155 | if (!tty->read_buf) { | 1586 | if (!tty->read_buf) { |
1156 | tty->read_buf = alloc_buf(); | 1587 | tty->read_buf = alloc_buf(); |
1157 | if (!tty->read_buf) | 1588 | if (!tty->read_buf) |
1158 | return -ENOMEM; | 1589 | return -ENOMEM; |
1159 | } | 1590 | } |
1591 | if (!tty->echo_buf) { | ||
1592 | tty->echo_buf = alloc_buf(); | ||
1593 | if (!tty->echo_buf) | ||
1594 | return -ENOMEM; | ||
1595 | } | ||
1160 | memset(tty->read_buf, 0, N_TTY_BUF_SIZE); | 1596 | memset(tty->read_buf, 0, N_TTY_BUF_SIZE); |
1597 | memset(tty->echo_buf, 0, N_TTY_BUF_SIZE); | ||
1161 | reset_buffer_flags(tty); | 1598 | reset_buffer_flags(tty); |
1162 | tty->column = 0; | 1599 | tty->column = 0; |
1163 | n_tty_set_termios(tty, NULL); | 1600 | n_tty_set_termios(tty, NULL); |
@@ -1487,16 +1924,23 @@ do_it_again: | |||
1487 | * @buf: userspace buffer pointer | 1924 | * @buf: userspace buffer pointer |
1488 | * @nr: size of I/O | 1925 | * @nr: size of I/O |
1489 | * | 1926 | * |
1490 | * Write function of the terminal device. This is serialized with | 1927 | * Write function of the terminal device. This is serialized with |
1491 | * respect to other write callers but not to termios changes, reads | 1928 | * respect to other write callers but not to termios changes, reads |
1492 | * and other such events. We must be careful with N_TTY as the receive | 1929 | * and other such events. Since the receive code will echo characters, |
1493 | * code will echo characters, thus calling driver write methods. | 1930 | * thus calling driver write methods, the output_lock is used in |
1931 | * the output processing functions called here as well as in the | ||
1932 | * echo processing function to protect the column state and space | ||
1933 | * left in the buffer. | ||
1494 | * | 1934 | * |
1495 | * This code must be sure never to sleep through a hangup. | 1935 | * This code must be sure never to sleep through a hangup. |
1936 | * | ||
1937 | * Locking: output_lock to protect column state and space left | ||
1938 | * (note that the process_output*() functions take this | ||
1939 | * lock themselves) | ||
1496 | */ | 1940 | */ |
1497 | 1941 | ||
1498 | static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, | 1942 | static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, |
1499 | const unsigned char *buf, size_t nr) | 1943 | const unsigned char *buf, size_t nr) |
1500 | { | 1944 | { |
1501 | const unsigned char *b = buf; | 1945 | const unsigned char *b = buf; |
1502 | DECLARE_WAITQUEUE(wait, current); | 1946 | DECLARE_WAITQUEUE(wait, current); |
@@ -1510,6 +1954,9 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, | |||
1510 | return retval; | 1954 | return retval; |
1511 | } | 1955 | } |
1512 | 1956 | ||
1957 | /* Write out any echoed characters that are still pending */ | ||
1958 | process_echoes(tty); | ||
1959 | |||
1513 | add_wait_queue(&tty->write_wait, &wait); | 1960 | add_wait_queue(&tty->write_wait, &wait); |
1514 | while (1) { | 1961 | while (1) { |
1515 | set_current_state(TASK_INTERRUPTIBLE); | 1962 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -1523,7 +1970,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, | |||
1523 | } | 1970 | } |
1524 | if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) { | 1971 | if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) { |
1525 | while (nr > 0) { | 1972 | while (nr > 0) { |
1526 | ssize_t num = opost_block(tty, b, nr); | 1973 | ssize_t num = process_output_block(tty, b, nr); |
1527 | if (num < 0) { | 1974 | if (num < 0) { |
1528 | if (num == -EAGAIN) | 1975 | if (num == -EAGAIN) |
1529 | break; | 1976 | break; |
@@ -1535,7 +1982,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, | |||
1535 | if (nr == 0) | 1982 | if (nr == 0) |
1536 | break; | 1983 | break; |
1537 | c = *b; | 1984 | c = *b; |
1538 | if (opost(c, tty) < 0) | 1985 | if (process_output(c, tty) < 0) |
1539 | break; | 1986 | break; |
1540 | b++; nr--; | 1987 | b++; nr--; |
1541 | } | 1988 | } |
@@ -1565,6 +2012,8 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, | |||
1565 | break_out: | 2012 | break_out: |
1566 | __set_current_state(TASK_RUNNING); | 2013 | __set_current_state(TASK_RUNNING); |
1567 | remove_wait_queue(&tty->write_wait, &wait); | 2014 | remove_wait_queue(&tty->write_wait, &wait); |
2015 | if (b - buf != nr && tty->fasync) | ||
2016 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | ||
1568 | return (b - buf) ? b - buf : retval; | 2017 | return (b - buf) ? b - buf : retval; |
1569 | } | 2018 | } |
1570 | 2019 | ||
@@ -1663,4 +2112,3 @@ struct tty_ldisc_ops tty_ldisc_N_TTY = { | |||
1663 | .receive_buf = n_tty_receive_buf, | 2112 | .receive_buf = n_tty_receive_buf, |
1664 | .write_wakeup = n_tty_write_wakeup | 2113 | .write_wakeup = n_tty_write_wakeup |
1665 | }; | 2114 | }; |
1666 | |||
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index 9a34a1935283..d6102b644b55 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c | |||
@@ -353,6 +353,7 @@ struct ctrl_ul { | |||
353 | 353 | ||
354 | /* This holds all information that is needed regarding a port */ | 354 | /* This holds all information that is needed regarding a port */ |
355 | struct port { | 355 | struct port { |
356 | struct tty_port port; | ||
356 | u8 update_flow_control; | 357 | u8 update_flow_control; |
357 | struct ctrl_ul ctrl_ul; | 358 | struct ctrl_ul ctrl_ul; |
358 | struct ctrl_dl ctrl_dl; | 359 | struct ctrl_dl ctrl_dl; |
@@ -365,8 +366,6 @@ struct port { | |||
365 | u8 toggle_ul; | 366 | u8 toggle_ul; |
366 | u16 token_dl; | 367 | u16 token_dl; |
367 | 368 | ||
368 | struct tty_struct *tty; | ||
369 | int tty_open_count; | ||
370 | /* mutex to ensure one access patch to this port */ | 369 | /* mutex to ensure one access patch to this port */ |
371 | struct mutex tty_sem; | 370 | struct mutex tty_sem; |
372 | wait_queue_head_t tty_wait; | 371 | wait_queue_head_t tty_wait; |
@@ -788,14 +787,14 @@ static void disable_transmit_dl(enum port_type port, struct nozomi *dc) | |||
788 | * Return 1 - send buffer to card and ack. | 787 | * Return 1 - send buffer to card and ack. |
789 | * Return 0 - don't ack, don't send buffer to card. | 788 | * Return 0 - don't ack, don't send buffer to card. |
790 | */ | 789 | */ |
791 | static int send_data(enum port_type index, const struct nozomi *dc) | 790 | static int send_data(enum port_type index, struct nozomi *dc) |
792 | { | 791 | { |
793 | u32 size = 0; | 792 | u32 size = 0; |
794 | const struct port *port = &dc->port[index]; | 793 | struct port *port = &dc->port[index]; |
795 | const u8 toggle = port->toggle_ul; | 794 | const u8 toggle = port->toggle_ul; |
796 | void __iomem *addr = port->ul_addr[toggle]; | 795 | void __iomem *addr = port->ul_addr[toggle]; |
797 | const u32 ul_size = port->ul_size[toggle]; | 796 | const u32 ul_size = port->ul_size[toggle]; |
798 | struct tty_struct *tty = port->tty; | 797 | struct tty_struct *tty = tty_port_tty_get(&port->port); |
799 | 798 | ||
800 | /* Get data from tty and place in buf for now */ | 799 | /* Get data from tty and place in buf for now */ |
801 | size = __kfifo_get(port->fifo_ul, dc->send_buf, | 800 | size = __kfifo_get(port->fifo_ul, dc->send_buf, |
@@ -803,6 +802,7 @@ static int send_data(enum port_type index, const struct nozomi *dc) | |||
803 | 802 | ||
804 | if (size == 0) { | 803 | if (size == 0) { |
805 | DBG4("No more data to send, disable link:"); | 804 | DBG4("No more data to send, disable link:"); |
805 | tty_kref_put(tty); | ||
806 | return 0; | 806 | return 0; |
807 | } | 807 | } |
808 | 808 | ||
@@ -815,6 +815,7 @@ static int send_data(enum port_type index, const struct nozomi *dc) | |||
815 | if (tty) | 815 | if (tty) |
816 | tty_wakeup(tty); | 816 | tty_wakeup(tty); |
817 | 817 | ||
818 | tty_kref_put(tty); | ||
818 | return 1; | 819 | return 1; |
819 | } | 820 | } |
820 | 821 | ||
@@ -826,7 +827,7 @@ static int receive_data(enum port_type index, struct nozomi *dc) | |||
826 | u32 offset = 4; | 827 | u32 offset = 4; |
827 | struct port *port = &dc->port[index]; | 828 | struct port *port = &dc->port[index]; |
828 | void __iomem *addr = port->dl_addr[port->toggle_dl]; | 829 | void __iomem *addr = port->dl_addr[port->toggle_dl]; |
829 | struct tty_struct *tty = port->tty; | 830 | struct tty_struct *tty = tty_port_tty_get(&port->port); |
830 | int i; | 831 | int i; |
831 | 832 | ||
832 | if (unlikely(!tty)) { | 833 | if (unlikely(!tty)) { |
@@ -870,7 +871,7 @@ static int receive_data(enum port_type index, struct nozomi *dc) | |||
870 | } | 871 | } |
871 | 872 | ||
872 | set_bit(index, &dc->flip); | 873 | set_bit(index, &dc->flip); |
873 | 874 | tty_kref_put(tty); | |
874 | return 1; | 875 | return 1; |
875 | } | 876 | } |
876 | 877 | ||
@@ -1276,9 +1277,15 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id) | |||
1276 | 1277 | ||
1277 | exit_handler: | 1278 | exit_handler: |
1278 | spin_unlock(&dc->spin_mutex); | 1279 | spin_unlock(&dc->spin_mutex); |
1279 | for (a = 0; a < NOZOMI_MAX_PORTS; a++) | 1280 | for (a = 0; a < NOZOMI_MAX_PORTS; a++) { |
1280 | if (test_and_clear_bit(a, &dc->flip)) | 1281 | struct tty_struct *tty; |
1281 | tty_flip_buffer_push(dc->port[a].tty); | 1282 | if (test_and_clear_bit(a, &dc->flip)) { |
1283 | tty = tty_port_tty_get(&dc->port[a].port); | ||
1284 | if (tty) | ||
1285 | tty_flip_buffer_push(tty); | ||
1286 | tty_kref_put(tty); | ||
1287 | } | ||
1288 | } | ||
1282 | return IRQ_HANDLED; | 1289 | return IRQ_HANDLED; |
1283 | none: | 1290 | none: |
1284 | spin_unlock(&dc->spin_mutex); | 1291 | spin_unlock(&dc->spin_mutex); |
@@ -1453,12 +1460,10 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev, | |||
1453 | 1460 | ||
1454 | for (i = 0; i < MAX_PORT; i++) { | 1461 | for (i = 0; i < MAX_PORT; i++) { |
1455 | mutex_init(&dc->port[i].tty_sem); | 1462 | mutex_init(&dc->port[i].tty_sem); |
1456 | dc->port[i].tty_open_count = 0; | 1463 | tty_port_init(&dc->port[i].port); |
1457 | dc->port[i].tty = NULL; | ||
1458 | tty_register_device(ntty_driver, dc->index_start + i, | 1464 | tty_register_device(ntty_driver, dc->index_start + i, |
1459 | &pdev->dev); | 1465 | &pdev->dev); |
1460 | } | 1466 | } |
1461 | |||
1462 | return 0; | 1467 | return 0; |
1463 | 1468 | ||
1464 | err_free_sbuf: | 1469 | err_free_sbuf: |
@@ -1482,14 +1487,16 @@ static void __devexit tty_exit(struct nozomi *dc) | |||
1482 | 1487 | ||
1483 | flush_scheduled_work(); | 1488 | flush_scheduled_work(); |
1484 | 1489 | ||
1485 | for (i = 0; i < MAX_PORT; ++i) | 1490 | for (i = 0; i < MAX_PORT; ++i) { |
1486 | if (dc->port[i].tty && \ | 1491 | struct tty_struct *tty = tty_port_tty_get(&dc->port[i].port); |
1487 | list_empty(&dc->port[i].tty->hangup_work.entry)) | 1492 | if (tty && list_empty(&tty->hangup_work.entry)) |
1488 | tty_hangup(dc->port[i].tty); | 1493 | tty_hangup(tty); |
1489 | 1494 | tty_kref_put(tty); | |
1495 | } | ||
1496 | /* Racy below - surely should wait for scheduled work to be done or | ||
1497 | complete off a hangup method ? */ | ||
1490 | while (dc->open_ttys) | 1498 | while (dc->open_ttys) |
1491 | msleep(1); | 1499 | msleep(1); |
1492 | |||
1493 | for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i) | 1500 | for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i) |
1494 | tty_unregister_device(ntty_driver, i); | 1501 | tty_unregister_device(ntty_driver, i); |
1495 | } | 1502 | } |
@@ -1579,23 +1586,22 @@ static int ntty_open(struct tty_struct *tty, struct file *file) | |||
1579 | if (mutex_lock_interruptible(&port->tty_sem)) | 1586 | if (mutex_lock_interruptible(&port->tty_sem)) |
1580 | return -ERESTARTSYS; | 1587 | return -ERESTARTSYS; |
1581 | 1588 | ||
1582 | port->tty_open_count++; | 1589 | port->port.count++; |
1583 | dc->open_ttys++; | 1590 | dc->open_ttys++; |
1584 | 1591 | ||
1585 | /* Enable interrupt downlink for channel */ | 1592 | /* Enable interrupt downlink for channel */ |
1586 | if (port->tty_open_count == 1) { | 1593 | if (port->port.count == 1) { |
1594 | /* FIXME: is this needed now ? */ | ||
1587 | tty->low_latency = 1; | 1595 | tty->low_latency = 1; |
1588 | tty->driver_data = port; | 1596 | tty->driver_data = port; |
1589 | port->tty = tty; | 1597 | tty_port_tty_set(&port->port, tty); |
1590 | DBG1("open: %d", port->token_dl); | 1598 | DBG1("open: %d", port->token_dl); |
1591 | spin_lock_irqsave(&dc->spin_mutex, flags); | 1599 | spin_lock_irqsave(&dc->spin_mutex, flags); |
1592 | dc->last_ier = dc->last_ier | port->token_dl; | 1600 | dc->last_ier = dc->last_ier | port->token_dl; |
1593 | writew(dc->last_ier, dc->reg_ier); | 1601 | writew(dc->last_ier, dc->reg_ier); |
1594 | spin_unlock_irqrestore(&dc->spin_mutex, flags); | 1602 | spin_unlock_irqrestore(&dc->spin_mutex, flags); |
1595 | } | 1603 | } |
1596 | |||
1597 | mutex_unlock(&port->tty_sem); | 1604 | mutex_unlock(&port->tty_sem); |
1598 | |||
1599 | return 0; | 1605 | return 0; |
1600 | } | 1606 | } |
1601 | 1607 | ||
@@ -1606,31 +1612,30 @@ static int ntty_open(struct tty_struct *tty, struct file *file) | |||
1606 | static void ntty_close(struct tty_struct *tty, struct file *file) | 1612 | static void ntty_close(struct tty_struct *tty, struct file *file) |
1607 | { | 1613 | { |
1608 | struct nozomi *dc = get_dc_by_tty(tty); | 1614 | struct nozomi *dc = get_dc_by_tty(tty); |
1609 | struct port *port = tty->driver_data; | 1615 | struct port *nport = tty->driver_data; |
1616 | struct tty_port *port = &nport->port; | ||
1610 | unsigned long flags; | 1617 | unsigned long flags; |
1611 | 1618 | ||
1612 | if (!dc || !port) | 1619 | if (!dc || !nport) |
1613 | return; | 1620 | return; |
1614 | 1621 | ||
1615 | if (mutex_lock_interruptible(&port->tty_sem)) | 1622 | /* Users cannot interrupt a close */ |
1616 | return; | 1623 | mutex_lock(&nport->tty_sem); |
1617 | 1624 | ||
1618 | if (!port->tty_open_count) | 1625 | WARN_ON(!port->count); |
1619 | goto exit; | ||
1620 | 1626 | ||
1621 | dc->open_ttys--; | 1627 | dc->open_ttys--; |
1622 | port->tty_open_count--; | 1628 | port->count--; |
1629 | tty_port_tty_set(port, NULL); | ||
1623 | 1630 | ||
1624 | if (port->tty_open_count == 0) { | 1631 | if (port->count == 0) { |
1625 | DBG1("close: %d", port->token_dl); | 1632 | DBG1("close: %d", nport->token_dl); |
1626 | spin_lock_irqsave(&dc->spin_mutex, flags); | 1633 | spin_lock_irqsave(&dc->spin_mutex, flags); |
1627 | dc->last_ier &= ~(port->token_dl); | 1634 | dc->last_ier &= ~(nport->token_dl); |
1628 | writew(dc->last_ier, dc->reg_ier); | 1635 | writew(dc->last_ier, dc->reg_ier); |
1629 | spin_unlock_irqrestore(&dc->spin_mutex, flags); | 1636 | spin_unlock_irqrestore(&dc->spin_mutex, flags); |
1630 | } | 1637 | } |
1631 | 1638 | mutex_unlock(&nport->tty_sem); | |
1632 | exit: | ||
1633 | mutex_unlock(&port->tty_sem); | ||
1634 | } | 1639 | } |
1635 | 1640 | ||
1636 | /* | 1641 | /* |
@@ -1660,7 +1665,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer, | |||
1660 | return -EAGAIN; | 1665 | return -EAGAIN; |
1661 | } | 1666 | } |
1662 | 1667 | ||
1663 | if (unlikely(!port->tty_open_count)) { | 1668 | if (unlikely(!port->port.count)) { |
1664 | DBG1(" "); | 1669 | DBG1(" "); |
1665 | goto exit; | 1670 | goto exit; |
1666 | } | 1671 | } |
@@ -1710,7 +1715,7 @@ static int ntty_write_room(struct tty_struct *tty) | |||
1710 | if (!mutex_trylock(&port->tty_sem)) | 1715 | if (!mutex_trylock(&port->tty_sem)) |
1711 | return 0; | 1716 | return 0; |
1712 | 1717 | ||
1713 | if (!port->tty_open_count) | 1718 | if (!port->port.count) |
1714 | goto exit; | 1719 | goto exit; |
1715 | 1720 | ||
1716 | room = port->fifo_ul->size - __kfifo_len(port->fifo_ul); | 1721 | room = port->fifo_ul->size - __kfifo_len(port->fifo_ul); |
@@ -1866,7 +1871,7 @@ static s32 ntty_chars_in_buffer(struct tty_struct *tty) | |||
1866 | goto exit_in_buffer; | 1871 | goto exit_in_buffer; |
1867 | } | 1872 | } |
1868 | 1873 | ||
1869 | if (unlikely(!port->tty_open_count)) { | 1874 | if (unlikely(!port->port.count)) { |
1870 | dev_err(&dc->pdev->dev, "No tty open?\n"); | 1875 | dev_err(&dc->pdev->dev, "No tty open?\n"); |
1871 | rval = -ENODEV; | 1876 | rval = -ENODEV; |
1872 | goto exit_in_buffer; | 1877 | goto exit_in_buffer; |
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 4d64a02612a4..dc073e167abc 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -138,20 +138,15 @@ struct _input_signal_events { | |||
138 | */ | 138 | */ |
139 | 139 | ||
140 | typedef struct _mgslpc_info { | 140 | typedef struct _mgslpc_info { |
141 | struct tty_port port; | ||
141 | void *if_ptr; /* General purpose pointer (used by SPPP) */ | 142 | void *if_ptr; /* General purpose pointer (used by SPPP) */ |
142 | int magic; | 143 | int magic; |
143 | int flags; | ||
144 | int count; /* count of opens */ | ||
145 | int line; | 144 | int line; |
146 | unsigned short close_delay; | ||
147 | unsigned short closing_wait; /* time to wait before closing */ | ||
148 | 145 | ||
149 | struct mgsl_icount icount; | 146 | struct mgsl_icount icount; |
150 | 147 | ||
151 | struct tty_struct *tty; | ||
152 | int timeout; | 148 | int timeout; |
153 | int x_char; /* xon/xoff character */ | 149 | int x_char; /* xon/xoff character */ |
154 | int blocked_open; /* # of blocked opens */ | ||
155 | unsigned char read_status_mask; | 150 | unsigned char read_status_mask; |
156 | unsigned char ignore_status_mask; | 151 | unsigned char ignore_status_mask; |
157 | 152 | ||
@@ -170,9 +165,6 @@ typedef struct _mgslpc_info { | |||
170 | int rx_buf_count; /* total number of rx buffers */ | 165 | int rx_buf_count; /* total number of rx buffers */ |
171 | int rx_frame_count; /* number of full rx buffers */ | 166 | int rx_frame_count; /* number of full rx buffers */ |
172 | 167 | ||
173 | wait_queue_head_t open_wait; | ||
174 | wait_queue_head_t close_wait; | ||
175 | |||
176 | wait_queue_head_t status_event_wait_q; | 168 | wait_queue_head_t status_event_wait_q; |
177 | wait_queue_head_t event_wait_q; | 169 | wait_queue_head_t event_wait_q; |
178 | struct timer_list tx_timer; /* HDLC transmit timeout timer */ | 170 | struct timer_list tx_timer; /* HDLC transmit timeout timer */ |
@@ -375,7 +367,7 @@ static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short | |||
375 | static void rx_start(MGSLPC_INFO *info); | 367 | static void rx_start(MGSLPC_INFO *info); |
376 | static void rx_stop(MGSLPC_INFO *info); | 368 | static void rx_stop(MGSLPC_INFO *info); |
377 | 369 | ||
378 | static void tx_start(MGSLPC_INFO *info); | 370 | static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty); |
379 | static void tx_stop(MGSLPC_INFO *info); | 371 | static void tx_stop(MGSLPC_INFO *info); |
380 | static void tx_set_idle(MGSLPC_INFO *info); | 372 | static void tx_set_idle(MGSLPC_INFO *info); |
381 | 373 | ||
@@ -389,7 +381,8 @@ static void async_mode(MGSLPC_INFO *info); | |||
389 | 381 | ||
390 | static void tx_timeout(unsigned long context); | 382 | static void tx_timeout(unsigned long context); |
391 | 383 | ||
392 | static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg); | 384 | static int carrier_raised(struct tty_port *port); |
385 | static void raise_dtr_rts(struct tty_port *port); | ||
393 | 386 | ||
394 | #if SYNCLINK_GENERIC_HDLC | 387 | #if SYNCLINK_GENERIC_HDLC |
395 | #define dev_to_port(D) (dev_to_hdlc(D)->priv) | 388 | #define dev_to_port(D) (dev_to_hdlc(D)->priv) |
@@ -410,7 +403,7 @@ static void release_resources(MGSLPC_INFO *info); | |||
410 | static void mgslpc_add_device(MGSLPC_INFO *info); | 403 | static void mgslpc_add_device(MGSLPC_INFO *info); |
411 | static void mgslpc_remove_device(MGSLPC_INFO *info); | 404 | static void mgslpc_remove_device(MGSLPC_INFO *info); |
412 | 405 | ||
413 | static bool rx_get_frame(MGSLPC_INFO *info); | 406 | static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty); |
414 | static void rx_reset_buffers(MGSLPC_INFO *info); | 407 | static void rx_reset_buffers(MGSLPC_INFO *info); |
415 | static int rx_alloc_buffers(MGSLPC_INFO *info); | 408 | static int rx_alloc_buffers(MGSLPC_INFO *info); |
416 | static void rx_free_buffers(MGSLPC_INFO *info); | 409 | static void rx_free_buffers(MGSLPC_INFO *info); |
@@ -421,7 +414,7 @@ static irqreturn_t mgslpc_isr(int irq, void *dev_id); | |||
421 | * Bottom half interrupt handlers | 414 | * Bottom half interrupt handlers |
422 | */ | 415 | */ |
423 | static void bh_handler(struct work_struct *work); | 416 | static void bh_handler(struct work_struct *work); |
424 | static void bh_transmit(MGSLPC_INFO *info); | 417 | static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty); |
425 | static void bh_status(MGSLPC_INFO *info); | 418 | static void bh_status(MGSLPC_INFO *info); |
426 | 419 | ||
427 | /* | 420 | /* |
@@ -432,10 +425,10 @@ static int tiocmset(struct tty_struct *tty, struct file *file, | |||
432 | unsigned int set, unsigned int clear); | 425 | unsigned int set, unsigned int clear); |
433 | static int get_stats(MGSLPC_INFO *info, struct mgsl_icount __user *user_icount); | 426 | static int get_stats(MGSLPC_INFO *info, struct mgsl_icount __user *user_icount); |
434 | static int get_params(MGSLPC_INFO *info, MGSL_PARAMS __user *user_params); | 427 | static int get_params(MGSLPC_INFO *info, MGSL_PARAMS __user *user_params); |
435 | static int set_params(MGSLPC_INFO *info, MGSL_PARAMS __user *new_params); | 428 | static int set_params(MGSLPC_INFO *info, MGSL_PARAMS __user *new_params, struct tty_struct *tty); |
436 | static int get_txidle(MGSLPC_INFO *info, int __user *idle_mode); | 429 | static int get_txidle(MGSLPC_INFO *info, int __user *idle_mode); |
437 | static int set_txidle(MGSLPC_INFO *info, int idle_mode); | 430 | static int set_txidle(MGSLPC_INFO *info, int idle_mode); |
438 | static int set_txenable(MGSLPC_INFO *info, int enable); | 431 | static int set_txenable(MGSLPC_INFO *info, int enable, struct tty_struct *tty); |
439 | static int tx_abort(MGSLPC_INFO *info); | 432 | static int tx_abort(MGSLPC_INFO *info); |
440 | static int set_rxenable(MGSLPC_INFO *info, int enable); | 433 | static int set_rxenable(MGSLPC_INFO *info, int enable); |
441 | static int wait_events(MGSLPC_INFO *info, int __user *mask); | 434 | static int wait_events(MGSLPC_INFO *info, int __user *mask); |
@@ -474,7 +467,7 @@ static struct tty_driver *serial_driver; | |||
474 | /* number of characters left in xmit buffer before we ask for more */ | 467 | /* number of characters left in xmit buffer before we ask for more */ |
475 | #define WAKEUP_CHARS 256 | 468 | #define WAKEUP_CHARS 256 |
476 | 469 | ||
477 | static void mgslpc_change_params(MGSLPC_INFO *info); | 470 | static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty); |
478 | static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout); | 471 | static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout); |
479 | 472 | ||
480 | /* PCMCIA prototypes */ | 473 | /* PCMCIA prototypes */ |
@@ -517,6 +510,11 @@ static void ldisc_receive_buf(struct tty_struct *tty, | |||
517 | } | 510 | } |
518 | } | 511 | } |
519 | 512 | ||
513 | static const struct tty_port_operations mgslpc_port_ops = { | ||
514 | .carrier_raised = carrier_raised, | ||
515 | .raise_dtr_rts = raise_dtr_rts | ||
516 | }; | ||
517 | |||
520 | static int mgslpc_probe(struct pcmcia_device *link) | 518 | static int mgslpc_probe(struct pcmcia_device *link) |
521 | { | 519 | { |
522 | MGSLPC_INFO *info; | 520 | MGSLPC_INFO *info; |
@@ -532,12 +530,12 @@ static int mgslpc_probe(struct pcmcia_device *link) | |||
532 | } | 530 | } |
533 | 531 | ||
534 | info->magic = MGSLPC_MAGIC; | 532 | info->magic = MGSLPC_MAGIC; |
533 | tty_port_init(&info->port); | ||
534 | info->port.ops = &mgslpc_port_ops; | ||
535 | INIT_WORK(&info->task, bh_handler); | 535 | INIT_WORK(&info->task, bh_handler); |
536 | info->max_frame_size = 4096; | 536 | info->max_frame_size = 4096; |
537 | info->close_delay = 5*HZ/10; | 537 | info->port.close_delay = 5*HZ/10; |
538 | info->closing_wait = 30*HZ; | 538 | info->port.closing_wait = 30*HZ; |
539 | init_waitqueue_head(&info->open_wait); | ||
540 | init_waitqueue_head(&info->close_wait); | ||
541 | init_waitqueue_head(&info->status_event_wait_q); | 539 | init_waitqueue_head(&info->status_event_wait_q); |
542 | init_waitqueue_head(&info->event_wait_q); | 540 | init_waitqueue_head(&info->event_wait_q); |
543 | spin_lock_init(&info->lock); | 541 | spin_lock_init(&info->lock); |
@@ -784,7 +782,7 @@ static void tx_release(struct tty_struct *tty) | |||
784 | 782 | ||
785 | spin_lock_irqsave(&info->lock,flags); | 783 | spin_lock_irqsave(&info->lock,flags); |
786 | if (!info->tx_enabled) | 784 | if (!info->tx_enabled) |
787 | tx_start(info); | 785 | tx_start(info, tty); |
788 | spin_unlock_irqrestore(&info->lock,flags); | 786 | spin_unlock_irqrestore(&info->lock,flags); |
789 | } | 787 | } |
790 | 788 | ||
@@ -823,6 +821,7 @@ static int bh_action(MGSLPC_INFO *info) | |||
823 | static void bh_handler(struct work_struct *work) | 821 | static void bh_handler(struct work_struct *work) |
824 | { | 822 | { |
825 | MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); | 823 | MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); |
824 | struct tty_struct *tty; | ||
826 | int action; | 825 | int action; |
827 | 826 | ||
828 | if (!info) | 827 | if (!info) |
@@ -833,6 +832,7 @@ static void bh_handler(struct work_struct *work) | |||
833 | __FILE__,__LINE__,info->device_name); | 832 | __FILE__,__LINE__,info->device_name); |
834 | 833 | ||
835 | info->bh_running = true; | 834 | info->bh_running = true; |
835 | tty = tty_port_tty_get(&info->port); | ||
836 | 836 | ||
837 | while((action = bh_action(info)) != 0) { | 837 | while((action = bh_action(info)) != 0) { |
838 | 838 | ||
@@ -844,10 +844,10 @@ static void bh_handler(struct work_struct *work) | |||
844 | switch (action) { | 844 | switch (action) { |
845 | 845 | ||
846 | case BH_RECEIVE: | 846 | case BH_RECEIVE: |
847 | while(rx_get_frame(info)); | 847 | while(rx_get_frame(info, tty)); |
848 | break; | 848 | break; |
849 | case BH_TRANSMIT: | 849 | case BH_TRANSMIT: |
850 | bh_transmit(info); | 850 | bh_transmit(info, tty); |
851 | break; | 851 | break; |
852 | case BH_STATUS: | 852 | case BH_STATUS: |
853 | bh_status(info); | 853 | bh_status(info); |
@@ -859,14 +859,14 @@ static void bh_handler(struct work_struct *work) | |||
859 | } | 859 | } |
860 | } | 860 | } |
861 | 861 | ||
862 | tty_kref_put(tty); | ||
862 | if (debug_level >= DEBUG_LEVEL_BH) | 863 | if (debug_level >= DEBUG_LEVEL_BH) |
863 | printk( "%s(%d):bh_handler(%s) exit\n", | 864 | printk( "%s(%d):bh_handler(%s) exit\n", |
864 | __FILE__,__LINE__,info->device_name); | 865 | __FILE__,__LINE__,info->device_name); |
865 | } | 866 | } |
866 | 867 | ||
867 | static void bh_transmit(MGSLPC_INFO *info) | 868 | static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty) |
868 | { | 869 | { |
869 | struct tty_struct *tty = info->tty; | ||
870 | if (debug_level >= DEBUG_LEVEL_BH) | 870 | if (debug_level >= DEBUG_LEVEL_BH) |
871 | printk("bh_transmit() entry on %s\n", info->device_name); | 871 | printk("bh_transmit() entry on %s\n", info->device_name); |
872 | 872 | ||
@@ -945,12 +945,11 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom) | |||
945 | issue_command(info, CHA, CMD_RXFIFO); | 945 | issue_command(info, CHA, CMD_RXFIFO); |
946 | } | 946 | } |
947 | 947 | ||
948 | static void rx_ready_async(MGSLPC_INFO *info, int tcd) | 948 | static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty) |
949 | { | 949 | { |
950 | unsigned char data, status, flag; | 950 | unsigned char data, status, flag; |
951 | int fifo_count; | 951 | int fifo_count; |
952 | int work = 0; | 952 | int work = 0; |
953 | struct tty_struct *tty = info->tty; | ||
954 | struct mgsl_icount *icount = &info->icount; | 953 | struct mgsl_icount *icount = &info->icount; |
955 | 954 | ||
956 | if (tcd) { | 955 | if (tcd) { |
@@ -1013,7 +1012,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd) | |||
1013 | } | 1012 | } |
1014 | 1013 | ||
1015 | 1014 | ||
1016 | static void tx_done(MGSLPC_INFO *info) | 1015 | static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty) |
1017 | { | 1016 | { |
1018 | if (!info->tx_active) | 1017 | if (!info->tx_active) |
1019 | return; | 1018 | return; |
@@ -1042,7 +1041,7 @@ static void tx_done(MGSLPC_INFO *info) | |||
1042 | else | 1041 | else |
1043 | #endif | 1042 | #endif |
1044 | { | 1043 | { |
1045 | if (info->tty->stopped || info->tty->hw_stopped) { | 1044 | if (tty->stopped || tty->hw_stopped) { |
1046 | tx_stop(info); | 1045 | tx_stop(info); |
1047 | return; | 1046 | return; |
1048 | } | 1047 | } |
@@ -1050,7 +1049,7 @@ static void tx_done(MGSLPC_INFO *info) | |||
1050 | } | 1049 | } |
1051 | } | 1050 | } |
1052 | 1051 | ||
1053 | static void tx_ready(MGSLPC_INFO *info) | 1052 | static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty) |
1054 | { | 1053 | { |
1055 | unsigned char fifo_count = 32; | 1054 | unsigned char fifo_count = 32; |
1056 | int c; | 1055 | int c; |
@@ -1062,7 +1061,7 @@ static void tx_ready(MGSLPC_INFO *info) | |||
1062 | if (!info->tx_active) | 1061 | if (!info->tx_active) |
1063 | return; | 1062 | return; |
1064 | } else { | 1063 | } else { |
1065 | if (info->tty->stopped || info->tty->hw_stopped) { | 1064 | if (tty->stopped || tty->hw_stopped) { |
1066 | tx_stop(info); | 1065 | tx_stop(info); |
1067 | return; | 1066 | return; |
1068 | } | 1067 | } |
@@ -1099,7 +1098,7 @@ static void tx_ready(MGSLPC_INFO *info) | |||
1099 | } | 1098 | } |
1100 | } | 1099 | } |
1101 | 1100 | ||
1102 | static void cts_change(MGSLPC_INFO *info) | 1101 | static void cts_change(MGSLPC_INFO *info, struct tty_struct *tty) |
1103 | { | 1102 | { |
1104 | get_signals(info); | 1103 | get_signals(info); |
1105 | if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) | 1104 | if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) |
@@ -1112,14 +1111,14 @@ static void cts_change(MGSLPC_INFO *info) | |||
1112 | wake_up_interruptible(&info->status_event_wait_q); | 1111 | wake_up_interruptible(&info->status_event_wait_q); |
1113 | wake_up_interruptible(&info->event_wait_q); | 1112 | wake_up_interruptible(&info->event_wait_q); |
1114 | 1113 | ||
1115 | if (info->flags & ASYNC_CTS_FLOW) { | 1114 | if (info->port.flags & ASYNC_CTS_FLOW) { |
1116 | if (info->tty->hw_stopped) { | 1115 | if (tty->hw_stopped) { |
1117 | if (info->serial_signals & SerialSignal_CTS) { | 1116 | if (info->serial_signals & SerialSignal_CTS) { |
1118 | if (debug_level >= DEBUG_LEVEL_ISR) | 1117 | if (debug_level >= DEBUG_LEVEL_ISR) |
1119 | printk("CTS tx start..."); | 1118 | printk("CTS tx start..."); |
1120 | if (info->tty) | 1119 | if (tty) |
1121 | info->tty->hw_stopped = 0; | 1120 | tty->hw_stopped = 0; |
1122 | tx_start(info); | 1121 | tx_start(info, tty); |
1123 | info->pending_bh |= BH_TRANSMIT; | 1122 | info->pending_bh |= BH_TRANSMIT; |
1124 | return; | 1123 | return; |
1125 | } | 1124 | } |
@@ -1127,8 +1126,8 @@ static void cts_change(MGSLPC_INFO *info) | |||
1127 | if (!(info->serial_signals & SerialSignal_CTS)) { | 1126 | if (!(info->serial_signals & SerialSignal_CTS)) { |
1128 | if (debug_level >= DEBUG_LEVEL_ISR) | 1127 | if (debug_level >= DEBUG_LEVEL_ISR) |
1129 | printk("CTS tx stop..."); | 1128 | printk("CTS tx stop..."); |
1130 | if (info->tty) | 1129 | if (tty) |
1131 | info->tty->hw_stopped = 1; | 1130 | tty->hw_stopped = 1; |
1132 | tx_stop(info); | 1131 | tx_stop(info); |
1133 | } | 1132 | } |
1134 | } | 1133 | } |
@@ -1136,7 +1135,7 @@ static void cts_change(MGSLPC_INFO *info) | |||
1136 | info->pending_bh |= BH_STATUS; | 1135 | info->pending_bh |= BH_STATUS; |
1137 | } | 1136 | } |
1138 | 1137 | ||
1139 | static void dcd_change(MGSLPC_INFO *info) | 1138 | static void dcd_change(MGSLPC_INFO *info, struct tty_struct *tty) |
1140 | { | 1139 | { |
1141 | get_signals(info); | 1140 | get_signals(info); |
1142 | if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) | 1141 | if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) |
@@ -1158,17 +1157,17 @@ static void dcd_change(MGSLPC_INFO *info) | |||
1158 | wake_up_interruptible(&info->status_event_wait_q); | 1157 | wake_up_interruptible(&info->status_event_wait_q); |
1159 | wake_up_interruptible(&info->event_wait_q); | 1158 | wake_up_interruptible(&info->event_wait_q); |
1160 | 1159 | ||
1161 | if (info->flags & ASYNC_CHECK_CD) { | 1160 | if (info->port.flags & ASYNC_CHECK_CD) { |
1162 | if (debug_level >= DEBUG_LEVEL_ISR) | 1161 | if (debug_level >= DEBUG_LEVEL_ISR) |
1163 | printk("%s CD now %s...", info->device_name, | 1162 | printk("%s CD now %s...", info->device_name, |
1164 | (info->serial_signals & SerialSignal_DCD) ? "on" : "off"); | 1163 | (info->serial_signals & SerialSignal_DCD) ? "on" : "off"); |
1165 | if (info->serial_signals & SerialSignal_DCD) | 1164 | if (info->serial_signals & SerialSignal_DCD) |
1166 | wake_up_interruptible(&info->open_wait); | 1165 | wake_up_interruptible(&info->port.open_wait); |
1167 | else { | 1166 | else { |
1168 | if (debug_level >= DEBUG_LEVEL_ISR) | 1167 | if (debug_level >= DEBUG_LEVEL_ISR) |
1169 | printk("doing serial hangup..."); | 1168 | printk("doing serial hangup..."); |
1170 | if (info->tty) | 1169 | if (tty) |
1171 | tty_hangup(info->tty); | 1170 | tty_hangup(tty); |
1172 | } | 1171 | } |
1173 | } | 1172 | } |
1174 | info->pending_bh |= BH_STATUS; | 1173 | info->pending_bh |= BH_STATUS; |
@@ -1214,6 +1213,7 @@ static void ri_change(MGSLPC_INFO *info) | |||
1214 | static irqreturn_t mgslpc_isr(int dummy, void *dev_id) | 1213 | static irqreturn_t mgslpc_isr(int dummy, void *dev_id) |
1215 | { | 1214 | { |
1216 | MGSLPC_INFO *info = dev_id; | 1215 | MGSLPC_INFO *info = dev_id; |
1216 | struct tty_struct *tty; | ||
1217 | unsigned short isr; | 1217 | unsigned short isr; |
1218 | unsigned char gis, pis; | 1218 | unsigned char gis, pis; |
1219 | int count=0; | 1219 | int count=0; |
@@ -1224,6 +1224,8 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id) | |||
1224 | if (!(info->p_dev->_locked)) | 1224 | if (!(info->p_dev->_locked)) |
1225 | return IRQ_HANDLED; | 1225 | return IRQ_HANDLED; |
1226 | 1226 | ||
1227 | tty = tty_port_tty_get(&info->port); | ||
1228 | |||
1227 | spin_lock(&info->lock); | 1229 | spin_lock(&info->lock); |
1228 | 1230 | ||
1229 | while ((gis = read_reg(info, CHA + GIS))) { | 1231 | while ((gis = read_reg(info, CHA + GIS))) { |
@@ -1239,9 +1241,9 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id) | |||
1239 | if (gis & (BIT1 + BIT0)) { | 1241 | if (gis & (BIT1 + BIT0)) { |
1240 | isr = read_reg16(info, CHB + ISR); | 1242 | isr = read_reg16(info, CHB + ISR); |
1241 | if (isr & IRQ_DCD) | 1243 | if (isr & IRQ_DCD) |
1242 | dcd_change(info); | 1244 | dcd_change(info, tty); |
1243 | if (isr & IRQ_CTS) | 1245 | if (isr & IRQ_CTS) |
1244 | cts_change(info); | 1246 | cts_change(info, tty); |
1245 | } | 1247 | } |
1246 | if (gis & (BIT3 + BIT2)) | 1248 | if (gis & (BIT3 + BIT2)) |
1247 | { | 1249 | { |
@@ -1258,8 +1260,8 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id) | |||
1258 | } | 1260 | } |
1259 | if (isr & IRQ_BREAK_ON) { | 1261 | if (isr & IRQ_BREAK_ON) { |
1260 | info->icount.brk++; | 1262 | info->icount.brk++; |
1261 | if (info->flags & ASYNC_SAK) | 1263 | if (info->port.flags & ASYNC_SAK) |
1262 | do_SAK(info->tty); | 1264 | do_SAK(tty); |
1263 | } | 1265 | } |
1264 | if (isr & IRQ_RXTIME) { | 1266 | if (isr & IRQ_RXTIME) { |
1265 | issue_command(info, CHA, CMD_RXFIFO_READ); | 1267 | issue_command(info, CHA, CMD_RXFIFO_READ); |
@@ -1268,7 +1270,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id) | |||
1268 | if (info->params.mode == MGSL_MODE_HDLC) | 1270 | if (info->params.mode == MGSL_MODE_HDLC) |
1269 | rx_ready_hdlc(info, isr & IRQ_RXEOM); | 1271 | rx_ready_hdlc(info, isr & IRQ_RXEOM); |
1270 | else | 1272 | else |
1271 | rx_ready_async(info, isr & IRQ_RXEOM); | 1273 | rx_ready_async(info, isr & IRQ_RXEOM, tty); |
1272 | } | 1274 | } |
1273 | 1275 | ||
1274 | /* transmit IRQs */ | 1276 | /* transmit IRQs */ |
@@ -1277,14 +1279,14 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id) | |||
1277 | info->icount.txabort++; | 1279 | info->icount.txabort++; |
1278 | else | 1280 | else |
1279 | info->icount.txunder++; | 1281 | info->icount.txunder++; |
1280 | tx_done(info); | 1282 | tx_done(info, tty); |
1281 | } | 1283 | } |
1282 | else if (isr & IRQ_ALLSENT) { | 1284 | else if (isr & IRQ_ALLSENT) { |
1283 | info->icount.txok++; | 1285 | info->icount.txok++; |
1284 | tx_done(info); | 1286 | tx_done(info, tty); |
1285 | } | 1287 | } |
1286 | else if (isr & IRQ_TXFIFO) | 1288 | else if (isr & IRQ_TXFIFO) |
1287 | tx_ready(info); | 1289 | tx_ready(info, tty); |
1288 | } | 1290 | } |
1289 | if (gis & BIT7) { | 1291 | if (gis & BIT7) { |
1290 | pis = read_reg(info, CHA + PIS); | 1292 | pis = read_reg(info, CHA + PIS); |
@@ -1308,6 +1310,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id) | |||
1308 | } | 1310 | } |
1309 | 1311 | ||
1310 | spin_unlock(&info->lock); | 1312 | spin_unlock(&info->lock); |
1313 | tty_kref_put(tty); | ||
1311 | 1314 | ||
1312 | if (debug_level >= DEBUG_LEVEL_ISR) | 1315 | if (debug_level >= DEBUG_LEVEL_ISR) |
1313 | printk("%s(%d):mgslpc_isr(%d)exit.\n", | 1316 | printk("%s(%d):mgslpc_isr(%d)exit.\n", |
@@ -1318,14 +1321,14 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id) | |||
1318 | 1321 | ||
1319 | /* Initialize and start device. | 1322 | /* Initialize and start device. |
1320 | */ | 1323 | */ |
1321 | static int startup(MGSLPC_INFO * info) | 1324 | static int startup(MGSLPC_INFO * info, struct tty_struct *tty) |
1322 | { | 1325 | { |
1323 | int retval = 0; | 1326 | int retval = 0; |
1324 | 1327 | ||
1325 | if (debug_level >= DEBUG_LEVEL_INFO) | 1328 | if (debug_level >= DEBUG_LEVEL_INFO) |
1326 | printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name); | 1329 | printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name); |
1327 | 1330 | ||
1328 | if (info->flags & ASYNC_INITIALIZED) | 1331 | if (info->port.flags & ASYNC_INITIALIZED) |
1329 | return 0; | 1332 | return 0; |
1330 | 1333 | ||
1331 | if (!info->tx_buf) { | 1334 | if (!info->tx_buf) { |
@@ -1352,30 +1355,30 @@ static int startup(MGSLPC_INFO * info) | |||
1352 | retval = adapter_test(info); | 1355 | retval = adapter_test(info); |
1353 | 1356 | ||
1354 | if ( retval ) { | 1357 | if ( retval ) { |
1355 | if (capable(CAP_SYS_ADMIN) && info->tty) | 1358 | if (capable(CAP_SYS_ADMIN) && tty) |
1356 | set_bit(TTY_IO_ERROR, &info->tty->flags); | 1359 | set_bit(TTY_IO_ERROR, &tty->flags); |
1357 | release_resources(info); | 1360 | release_resources(info); |
1358 | return retval; | 1361 | return retval; |
1359 | } | 1362 | } |
1360 | 1363 | ||
1361 | /* program hardware for current parameters */ | 1364 | /* program hardware for current parameters */ |
1362 | mgslpc_change_params(info); | 1365 | mgslpc_change_params(info, tty); |
1363 | 1366 | ||
1364 | if (info->tty) | 1367 | if (tty) |
1365 | clear_bit(TTY_IO_ERROR, &info->tty->flags); | 1368 | clear_bit(TTY_IO_ERROR, &tty->flags); |
1366 | 1369 | ||
1367 | info->flags |= ASYNC_INITIALIZED; | 1370 | info->port.flags |= ASYNC_INITIALIZED; |
1368 | 1371 | ||
1369 | return 0; | 1372 | return 0; |
1370 | } | 1373 | } |
1371 | 1374 | ||
1372 | /* Called by mgslpc_close() and mgslpc_hangup() to shutdown hardware | 1375 | /* Called by mgslpc_close() and mgslpc_hangup() to shutdown hardware |
1373 | */ | 1376 | */ |
1374 | static void shutdown(MGSLPC_INFO * info) | 1377 | static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty) |
1375 | { | 1378 | { |
1376 | unsigned long flags; | 1379 | unsigned long flags; |
1377 | 1380 | ||
1378 | if (!(info->flags & ASYNC_INITIALIZED)) | 1381 | if (!(info->port.flags & ASYNC_INITIALIZED)) |
1379 | return; | 1382 | return; |
1380 | 1383 | ||
1381 | if (debug_level >= DEBUG_LEVEL_INFO) | 1384 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -1402,7 +1405,7 @@ static void shutdown(MGSLPC_INFO * info) | |||
1402 | /* TODO:disable interrupts instead of reset to preserve signal states */ | 1405 | /* TODO:disable interrupts instead of reset to preserve signal states */ |
1403 | reset_device(info); | 1406 | reset_device(info); |
1404 | 1407 | ||
1405 | if (!info->tty || info->tty->termios->c_cflag & HUPCL) { | 1408 | if (!tty || tty->termios->c_cflag & HUPCL) { |
1406 | info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); | 1409 | info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); |
1407 | set_signals(info); | 1410 | set_signals(info); |
1408 | } | 1411 | } |
@@ -1411,13 +1414,13 @@ static void shutdown(MGSLPC_INFO * info) | |||
1411 | 1414 | ||
1412 | release_resources(info); | 1415 | release_resources(info); |
1413 | 1416 | ||
1414 | if (info->tty) | 1417 | if (tty) |
1415 | set_bit(TTY_IO_ERROR, &info->tty->flags); | 1418 | set_bit(TTY_IO_ERROR, &tty->flags); |
1416 | 1419 | ||
1417 | info->flags &= ~ASYNC_INITIALIZED; | 1420 | info->port.flags &= ~ASYNC_INITIALIZED; |
1418 | } | 1421 | } |
1419 | 1422 | ||
1420 | static void mgslpc_program_hw(MGSLPC_INFO *info) | 1423 | static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty) |
1421 | { | 1424 | { |
1422 | unsigned long flags; | 1425 | unsigned long flags; |
1423 | 1426 | ||
@@ -1443,7 +1446,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info) | |||
1443 | port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI); | 1446 | port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI); |
1444 | get_signals(info); | 1447 | get_signals(info); |
1445 | 1448 | ||
1446 | if (info->netcount || info->tty->termios->c_cflag & CREAD) | 1449 | if (info->netcount || (tty && (tty->termios->c_cflag & CREAD))) |
1447 | rx_start(info); | 1450 | rx_start(info); |
1448 | 1451 | ||
1449 | spin_unlock_irqrestore(&info->lock,flags); | 1452 | spin_unlock_irqrestore(&info->lock,flags); |
@@ -1451,19 +1454,19 @@ static void mgslpc_program_hw(MGSLPC_INFO *info) | |||
1451 | 1454 | ||
1452 | /* Reconfigure adapter based on new parameters | 1455 | /* Reconfigure adapter based on new parameters |
1453 | */ | 1456 | */ |
1454 | static void mgslpc_change_params(MGSLPC_INFO *info) | 1457 | static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty) |
1455 | { | 1458 | { |
1456 | unsigned cflag; | 1459 | unsigned cflag; |
1457 | int bits_per_char; | 1460 | int bits_per_char; |
1458 | 1461 | ||
1459 | if (!info->tty || !info->tty->termios) | 1462 | if (!tty || !tty->termios) |
1460 | return; | 1463 | return; |
1461 | 1464 | ||
1462 | if (debug_level >= DEBUG_LEVEL_INFO) | 1465 | if (debug_level >= DEBUG_LEVEL_INFO) |
1463 | printk("%s(%d):mgslpc_change_params(%s)\n", | 1466 | printk("%s(%d):mgslpc_change_params(%s)\n", |
1464 | __FILE__,__LINE__, info->device_name ); | 1467 | __FILE__,__LINE__, info->device_name ); |
1465 | 1468 | ||
1466 | cflag = info->tty->termios->c_cflag; | 1469 | cflag = tty->termios->c_cflag; |
1467 | 1470 | ||
1468 | /* if B0 rate (hangup) specified then negate DTR and RTS */ | 1471 | /* if B0 rate (hangup) specified then negate DTR and RTS */ |
1469 | /* otherwise assert DTR and RTS */ | 1472 | /* otherwise assert DTR and RTS */ |
@@ -1510,7 +1513,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info) | |||
1510 | * current data rate. | 1513 | * current data rate. |
1511 | */ | 1514 | */ |
1512 | if (info->params.data_rate <= 460800) { | 1515 | if (info->params.data_rate <= 460800) { |
1513 | info->params.data_rate = tty_get_baud_rate(info->tty); | 1516 | info->params.data_rate = tty_get_baud_rate(tty); |
1514 | } | 1517 | } |
1515 | 1518 | ||
1516 | if ( info->params.data_rate ) { | 1519 | if ( info->params.data_rate ) { |
@@ -1520,24 +1523,24 @@ static void mgslpc_change_params(MGSLPC_INFO *info) | |||
1520 | info->timeout += HZ/50; /* Add .02 seconds of slop */ | 1523 | info->timeout += HZ/50; /* Add .02 seconds of slop */ |
1521 | 1524 | ||
1522 | if (cflag & CRTSCTS) | 1525 | if (cflag & CRTSCTS) |
1523 | info->flags |= ASYNC_CTS_FLOW; | 1526 | info->port.flags |= ASYNC_CTS_FLOW; |
1524 | else | 1527 | else |
1525 | info->flags &= ~ASYNC_CTS_FLOW; | 1528 | info->port.flags &= ~ASYNC_CTS_FLOW; |
1526 | 1529 | ||
1527 | if (cflag & CLOCAL) | 1530 | if (cflag & CLOCAL) |
1528 | info->flags &= ~ASYNC_CHECK_CD; | 1531 | info->port.flags &= ~ASYNC_CHECK_CD; |
1529 | else | 1532 | else |
1530 | info->flags |= ASYNC_CHECK_CD; | 1533 | info->port.flags |= ASYNC_CHECK_CD; |
1531 | 1534 | ||
1532 | /* process tty input control flags */ | 1535 | /* process tty input control flags */ |
1533 | 1536 | ||
1534 | info->read_status_mask = 0; | 1537 | info->read_status_mask = 0; |
1535 | if (I_INPCK(info->tty)) | 1538 | if (I_INPCK(tty)) |
1536 | info->read_status_mask |= BIT7 | BIT6; | 1539 | info->read_status_mask |= BIT7 | BIT6; |
1537 | if (I_IGNPAR(info->tty)) | 1540 | if (I_IGNPAR(tty)) |
1538 | info->ignore_status_mask |= BIT7 | BIT6; | 1541 | info->ignore_status_mask |= BIT7 | BIT6; |
1539 | 1542 | ||
1540 | mgslpc_program_hw(info); | 1543 | mgslpc_program_hw(info, tty); |
1541 | } | 1544 | } |
1542 | 1545 | ||
1543 | /* Add a character to the transmit buffer | 1546 | /* Add a character to the transmit buffer |
@@ -1597,7 +1600,7 @@ static void mgslpc_flush_chars(struct tty_struct *tty) | |||
1597 | 1600 | ||
1598 | spin_lock_irqsave(&info->lock,flags); | 1601 | spin_lock_irqsave(&info->lock,flags); |
1599 | if (!info->tx_active) | 1602 | if (!info->tx_active) |
1600 | tx_start(info); | 1603 | tx_start(info, tty); |
1601 | spin_unlock_irqrestore(&info->lock,flags); | 1604 | spin_unlock_irqrestore(&info->lock,flags); |
1602 | } | 1605 | } |
1603 | 1606 | ||
@@ -1659,7 +1662,7 @@ start: | |||
1659 | if (info->tx_count && !tty->stopped && !tty->hw_stopped) { | 1662 | if (info->tx_count && !tty->stopped && !tty->hw_stopped) { |
1660 | spin_lock_irqsave(&info->lock,flags); | 1663 | spin_lock_irqsave(&info->lock,flags); |
1661 | if (!info->tx_active) | 1664 | if (!info->tx_active) |
1662 | tx_start(info); | 1665 | tx_start(info, tty); |
1663 | spin_unlock_irqrestore(&info->lock,flags); | 1666 | spin_unlock_irqrestore(&info->lock,flags); |
1664 | } | 1667 | } |
1665 | cleanup: | 1668 | cleanup: |
@@ -1764,7 +1767,7 @@ static void mgslpc_send_xchar(struct tty_struct *tty, char ch) | |||
1764 | if (ch) { | 1767 | if (ch) { |
1765 | spin_lock_irqsave(&info->lock,flags); | 1768 | spin_lock_irqsave(&info->lock,flags); |
1766 | if (!info->tx_enabled) | 1769 | if (!info->tx_enabled) |
1767 | tx_start(info); | 1770 | tx_start(info, tty); |
1768 | spin_unlock_irqrestore(&info->lock,flags); | 1771 | spin_unlock_irqrestore(&info->lock,flags); |
1769 | } | 1772 | } |
1770 | } | 1773 | } |
@@ -1862,7 +1865,7 @@ static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params) | |||
1862 | * | 1865 | * |
1863 | * Returns: 0 if success, otherwise error code | 1866 | * Returns: 0 if success, otherwise error code |
1864 | */ | 1867 | */ |
1865 | static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params) | 1868 | static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty) |
1866 | { | 1869 | { |
1867 | unsigned long flags; | 1870 | unsigned long flags; |
1868 | MGSL_PARAMS tmp_params; | 1871 | MGSL_PARAMS tmp_params; |
@@ -1883,7 +1886,7 @@ static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params) | |||
1883 | memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); | 1886 | memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); |
1884 | spin_unlock_irqrestore(&info->lock,flags); | 1887 | spin_unlock_irqrestore(&info->lock,flags); |
1885 | 1888 | ||
1886 | mgslpc_change_params(info); | 1889 | mgslpc_change_params(info, tty); |
1887 | 1890 | ||
1888 | return 0; | 1891 | return 0; |
1889 | } | 1892 | } |
@@ -1944,7 +1947,7 @@ static int set_interface(MGSLPC_INFO * info, int if_mode) | |||
1944 | return 0; | 1947 | return 0; |
1945 | } | 1948 | } |
1946 | 1949 | ||
1947 | static int set_txenable(MGSLPC_INFO * info, int enable) | 1950 | static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty) |
1948 | { | 1951 | { |
1949 | unsigned long flags; | 1952 | unsigned long flags; |
1950 | 1953 | ||
@@ -1954,7 +1957,7 @@ static int set_txenable(MGSLPC_INFO * info, int enable) | |||
1954 | spin_lock_irqsave(&info->lock,flags); | 1957 | spin_lock_irqsave(&info->lock,flags); |
1955 | if (enable) { | 1958 | if (enable) { |
1956 | if (!info->tx_enabled) | 1959 | if (!info->tx_enabled) |
1957 | tx_start(info); | 1960 | tx_start(info, tty); |
1958 | } else { | 1961 | } else { |
1959 | if (info->tx_enabled) | 1962 | if (info->tx_enabled) |
1960 | tx_stop(info); | 1963 | tx_stop(info); |
@@ -2263,6 +2266,11 @@ static int mgslpc_ioctl(struct tty_struct *tty, struct file * file, | |||
2263 | unsigned int cmd, unsigned long arg) | 2266 | unsigned int cmd, unsigned long arg) |
2264 | { | 2267 | { |
2265 | MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; | 2268 | MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; |
2269 | int error; | ||
2270 | struct mgsl_icount cnow; /* kernel counter temps */ | ||
2271 | struct serial_icounter_struct __user *p_cuser; /* user space */ | ||
2272 | void __user *argp = (void __user *)arg; | ||
2273 | unsigned long flags; | ||
2266 | 2274 | ||
2267 | if (debug_level >= DEBUG_LEVEL_INFO) | 2275 | if (debug_level >= DEBUG_LEVEL_INFO) |
2268 | printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__, | 2276 | printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__, |
@@ -2277,22 +2285,11 @@ static int mgslpc_ioctl(struct tty_struct *tty, struct file * file, | |||
2277 | return -EIO; | 2285 | return -EIO; |
2278 | } | 2286 | } |
2279 | 2287 | ||
2280 | return ioctl_common(info, cmd, arg); | ||
2281 | } | ||
2282 | |||
2283 | static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg) | ||
2284 | { | ||
2285 | int error; | ||
2286 | struct mgsl_icount cnow; /* kernel counter temps */ | ||
2287 | struct serial_icounter_struct __user *p_cuser; /* user space */ | ||
2288 | void __user *argp = (void __user *)arg; | ||
2289 | unsigned long flags; | ||
2290 | |||
2291 | switch (cmd) { | 2288 | switch (cmd) { |
2292 | case MGSL_IOCGPARAMS: | 2289 | case MGSL_IOCGPARAMS: |
2293 | return get_params(info, argp); | 2290 | return get_params(info, argp); |
2294 | case MGSL_IOCSPARAMS: | 2291 | case MGSL_IOCSPARAMS: |
2295 | return set_params(info, argp); | 2292 | return set_params(info, argp, tty); |
2296 | case MGSL_IOCGTXIDLE: | 2293 | case MGSL_IOCGTXIDLE: |
2297 | return get_txidle(info, argp); | 2294 | return get_txidle(info, argp); |
2298 | case MGSL_IOCSTXIDLE: | 2295 | case MGSL_IOCSTXIDLE: |
@@ -2302,7 +2299,7 @@ static int ioctl_common(MGSLPC_INFO *info, unsigned int cmd, unsigned long arg) | |||
2302 | case MGSL_IOCSIF: | 2299 | case MGSL_IOCSIF: |
2303 | return set_interface(info,(int)arg); | 2300 | return set_interface(info,(int)arg); |
2304 | case MGSL_IOCTXENABLE: | 2301 | case MGSL_IOCTXENABLE: |
2305 | return set_txenable(info,(int)arg); | 2302 | return set_txenable(info,(int)arg, tty); |
2306 | case MGSL_IOCRXENABLE: | 2303 | case MGSL_IOCRXENABLE: |
2307 | return set_rxenable(info,(int)arg); | 2304 | return set_rxenable(info,(int)arg); |
2308 | case MGSL_IOCTXABORT: | 2305 | case MGSL_IOCTXABORT: |
@@ -2369,7 +2366,7 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term | |||
2369 | == RELEVANT_IFLAG(old_termios->c_iflag))) | 2366 | == RELEVANT_IFLAG(old_termios->c_iflag))) |
2370 | return; | 2367 | return; |
2371 | 2368 | ||
2372 | mgslpc_change_params(info); | 2369 | mgslpc_change_params(info, tty); |
2373 | 2370 | ||
2374 | /* Handle transition to B0 status */ | 2371 | /* Handle transition to B0 status */ |
2375 | if (old_termios->c_cflag & CBAUD && | 2372 | if (old_termios->c_cflag & CBAUD && |
@@ -2404,81 +2401,34 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term | |||
2404 | static void mgslpc_close(struct tty_struct *tty, struct file * filp) | 2401 | static void mgslpc_close(struct tty_struct *tty, struct file * filp) |
2405 | { | 2402 | { |
2406 | MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; | 2403 | MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; |
2404 | struct tty_port *port = &info->port; | ||
2407 | 2405 | ||
2408 | if (mgslpc_paranoia_check(info, tty->name, "mgslpc_close")) | 2406 | if (mgslpc_paranoia_check(info, tty->name, "mgslpc_close")) |
2409 | return; | 2407 | return; |
2410 | 2408 | ||
2411 | if (debug_level >= DEBUG_LEVEL_INFO) | 2409 | if (debug_level >= DEBUG_LEVEL_INFO) |
2412 | printk("%s(%d):mgslpc_close(%s) entry, count=%d\n", | 2410 | printk("%s(%d):mgslpc_close(%s) entry, count=%d\n", |
2413 | __FILE__,__LINE__, info->device_name, info->count); | 2411 | __FILE__,__LINE__, info->device_name, port->count); |
2414 | |||
2415 | if (!info->count) | ||
2416 | return; | ||
2417 | 2412 | ||
2418 | if (tty_hung_up_p(filp)) | 2413 | WARN_ON(!port->count); |
2419 | goto cleanup; | ||
2420 | |||
2421 | if ((tty->count == 1) && (info->count != 1)) { | ||
2422 | /* | ||
2423 | * tty->count is 1 and the tty structure will be freed. | ||
2424 | * info->count should be one in this case. | ||
2425 | * if it's not, correct it so that the port is shutdown. | ||
2426 | */ | ||
2427 | printk("mgslpc_close: bad refcount; tty->count is 1, " | ||
2428 | "info->count is %d\n", info->count); | ||
2429 | info->count = 1; | ||
2430 | } | ||
2431 | 2414 | ||
2432 | info->count--; | 2415 | if (tty_port_close_start(port, tty, filp) == 0) |
2433 | |||
2434 | /* if at least one open remaining, leave hardware active */ | ||
2435 | if (info->count) | ||
2436 | goto cleanup; | 2416 | goto cleanup; |
2437 | 2417 | ||
2438 | info->flags |= ASYNC_CLOSING; | 2418 | if (port->flags & ASYNC_INITIALIZED) |
2439 | |||
2440 | /* set tty->closing to notify line discipline to | ||
2441 | * only process XON/XOFF characters. Only the N_TTY | ||
2442 | * discipline appears to use this (ppp does not). | ||
2443 | */ | ||
2444 | tty->closing = 1; | ||
2445 | |||
2446 | /* wait for transmit data to clear all layers */ | ||
2447 | |||
2448 | if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) { | ||
2449 | if (debug_level >= DEBUG_LEVEL_INFO) | ||
2450 | printk("%s(%d):mgslpc_close(%s) calling tty_wait_until_sent\n", | ||
2451 | __FILE__,__LINE__, info->device_name ); | ||
2452 | tty_wait_until_sent(tty, info->closing_wait); | ||
2453 | } | ||
2454 | |||
2455 | if (info->flags & ASYNC_INITIALIZED) | ||
2456 | mgslpc_wait_until_sent(tty, info->timeout); | 2419 | mgslpc_wait_until_sent(tty, info->timeout); |
2457 | 2420 | ||
2458 | mgslpc_flush_buffer(tty); | 2421 | mgslpc_flush_buffer(tty); |
2459 | 2422 | ||
2460 | tty_ldisc_flush(tty); | 2423 | tty_ldisc_flush(tty); |
2461 | 2424 | shutdown(info, tty); | |
2462 | shutdown(info); | 2425 | |
2463 | 2426 | tty_port_close_end(port, tty); | |
2464 | tty->closing = 0; | 2427 | tty_port_tty_set(port, NULL); |
2465 | info->tty = NULL; | ||
2466 | |||
2467 | if (info->blocked_open) { | ||
2468 | if (info->close_delay) { | ||
2469 | msleep_interruptible(jiffies_to_msecs(info->close_delay)); | ||
2470 | } | ||
2471 | wake_up_interruptible(&info->open_wait); | ||
2472 | } | ||
2473 | |||
2474 | info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | ||
2475 | |||
2476 | wake_up_interruptible(&info->close_wait); | ||
2477 | |||
2478 | cleanup: | 2428 | cleanup: |
2479 | if (debug_level >= DEBUG_LEVEL_INFO) | 2429 | if (debug_level >= DEBUG_LEVEL_INFO) |
2480 | printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__, | 2430 | printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__, |
2481 | tty->driver->name, info->count); | 2431 | tty->driver->name, port->count); |
2482 | } | 2432 | } |
2483 | 2433 | ||
2484 | /* Wait until the transmitter is empty. | 2434 | /* Wait until the transmitter is empty. |
@@ -2498,7 +2448,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout) | |||
2498 | if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent")) | 2448 | if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent")) |
2499 | return; | 2449 | return; |
2500 | 2450 | ||
2501 | if (!(info->flags & ASYNC_INITIALIZED)) | 2451 | if (!(info->port.flags & ASYNC_INITIALIZED)) |
2502 | goto exit; | 2452 | goto exit; |
2503 | 2453 | ||
2504 | orig_jiffies = jiffies; | 2454 | orig_jiffies = jiffies; |
@@ -2559,120 +2509,40 @@ static void mgslpc_hangup(struct tty_struct *tty) | |||
2559 | return; | 2509 | return; |
2560 | 2510 | ||
2561 | mgslpc_flush_buffer(tty); | 2511 | mgslpc_flush_buffer(tty); |
2562 | shutdown(info); | 2512 | shutdown(info, tty); |
2563 | 2513 | tty_port_hangup(&info->port); | |
2564 | info->count = 0; | ||
2565 | info->flags &= ~ASYNC_NORMAL_ACTIVE; | ||
2566 | info->tty = NULL; | ||
2567 | |||
2568 | wake_up_interruptible(&info->open_wait); | ||
2569 | } | 2514 | } |
2570 | 2515 | ||
2571 | /* Block the current process until the specified port | 2516 | static int carrier_raised(struct tty_port *port) |
2572 | * is ready to be opened. | ||
2573 | */ | ||
2574 | static int block_til_ready(struct tty_struct *tty, struct file *filp, | ||
2575 | MGSLPC_INFO *info) | ||
2576 | { | 2517 | { |
2577 | DECLARE_WAITQUEUE(wait, current); | 2518 | MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); |
2578 | int retval; | 2519 | unsigned long flags; |
2579 | bool do_clocal = false; | ||
2580 | bool extra_count = false; | ||
2581 | unsigned long flags; | ||
2582 | |||
2583 | if (debug_level >= DEBUG_LEVEL_INFO) | ||
2584 | printk("%s(%d):block_til_ready on %s\n", | ||
2585 | __FILE__,__LINE__, tty->driver->name ); | ||
2586 | |||
2587 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ | ||
2588 | /* nonblock mode is set or port is not enabled */ | ||
2589 | /* just verify that callout device is not active */ | ||
2590 | info->flags |= ASYNC_NORMAL_ACTIVE; | ||
2591 | return 0; | ||
2592 | } | ||
2593 | |||
2594 | if (tty->termios->c_cflag & CLOCAL) | ||
2595 | do_clocal = true; | ||
2596 | |||
2597 | /* Wait for carrier detect and the line to become | ||
2598 | * free (i.e., not in use by the callout). While we are in | ||
2599 | * this loop, info->count is dropped by one, so that | ||
2600 | * mgslpc_close() knows when to free things. We restore it upon | ||
2601 | * exit, either normal or abnormal. | ||
2602 | */ | ||
2603 | |||
2604 | retval = 0; | ||
2605 | add_wait_queue(&info->open_wait, &wait); | ||
2606 | |||
2607 | if (debug_level >= DEBUG_LEVEL_INFO) | ||
2608 | printk("%s(%d):block_til_ready before block on %s count=%d\n", | ||
2609 | __FILE__,__LINE__, tty->driver->name, info->count ); | ||
2610 | |||
2611 | spin_lock_irqsave(&info->lock, flags); | ||
2612 | if (!tty_hung_up_p(filp)) { | ||
2613 | extra_count = true; | ||
2614 | info->count--; | ||
2615 | } | ||
2616 | spin_unlock_irqrestore(&info->lock, flags); | ||
2617 | info->blocked_open++; | ||
2618 | |||
2619 | while (1) { | ||
2620 | if ((tty->termios->c_cflag & CBAUD)) { | ||
2621 | spin_lock_irqsave(&info->lock,flags); | ||
2622 | info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; | ||
2623 | set_signals(info); | ||
2624 | spin_unlock_irqrestore(&info->lock,flags); | ||
2625 | } | ||
2626 | |||
2627 | set_current_state(TASK_INTERRUPTIBLE); | ||
2628 | |||
2629 | if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){ | ||
2630 | retval = (info->flags & ASYNC_HUP_NOTIFY) ? | ||
2631 | -EAGAIN : -ERESTARTSYS; | ||
2632 | break; | ||
2633 | } | ||
2634 | |||
2635 | spin_lock_irqsave(&info->lock,flags); | ||
2636 | get_signals(info); | ||
2637 | spin_unlock_irqrestore(&info->lock,flags); | ||
2638 | |||
2639 | if (!(info->flags & ASYNC_CLOSING) && | ||
2640 | (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) { | ||
2641 | break; | ||
2642 | } | ||
2643 | |||
2644 | if (signal_pending(current)) { | ||
2645 | retval = -ERESTARTSYS; | ||
2646 | break; | ||
2647 | } | ||
2648 | |||
2649 | if (debug_level >= DEBUG_LEVEL_INFO) | ||
2650 | printk("%s(%d):block_til_ready blocking on %s count=%d\n", | ||
2651 | __FILE__,__LINE__, tty->driver->name, info->count ); | ||
2652 | |||
2653 | schedule(); | ||
2654 | } | ||
2655 | |||
2656 | set_current_state(TASK_RUNNING); | ||
2657 | remove_wait_queue(&info->open_wait, &wait); | ||
2658 | 2520 | ||
2659 | if (extra_count) | 2521 | spin_lock_irqsave(&info->lock,flags); |
2660 | info->count++; | 2522 | get_signals(info); |
2661 | info->blocked_open--; | 2523 | spin_unlock_irqrestore(&info->lock,flags); |
2662 | 2524 | ||
2663 | if (debug_level >= DEBUG_LEVEL_INFO) | 2525 | if (info->serial_signals & SerialSignal_DCD) |
2664 | printk("%s(%d):block_til_ready after blocking on %s count=%d\n", | 2526 | return 1; |
2665 | __FILE__,__LINE__, tty->driver->name, info->count ); | 2527 | return 0; |
2528 | } | ||
2666 | 2529 | ||
2667 | if (!retval) | 2530 | static void raise_dtr_rts(struct tty_port *port) |
2668 | info->flags |= ASYNC_NORMAL_ACTIVE; | 2531 | { |
2532 | MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); | ||
2533 | unsigned long flags; | ||
2669 | 2534 | ||
2670 | return retval; | 2535 | spin_lock_irqsave(&info->lock,flags); |
2536 | info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; | ||
2537 | set_signals(info); | ||
2538 | spin_unlock_irqrestore(&info->lock,flags); | ||
2671 | } | 2539 | } |
2672 | 2540 | ||
2541 | |||
2673 | static int mgslpc_open(struct tty_struct *tty, struct file * filp) | 2542 | static int mgslpc_open(struct tty_struct *tty, struct file * filp) |
2674 | { | 2543 | { |
2675 | MGSLPC_INFO *info; | 2544 | MGSLPC_INFO *info; |
2545 | struct tty_port *port; | ||
2676 | int retval, line; | 2546 | int retval, line; |
2677 | unsigned long flags; | 2547 | unsigned long flags; |
2678 | 2548 | ||
@@ -2691,23 +2561,24 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp) | |||
2691 | if (mgslpc_paranoia_check(info, tty->name, "mgslpc_open")) | 2561 | if (mgslpc_paranoia_check(info, tty->name, "mgslpc_open")) |
2692 | return -ENODEV; | 2562 | return -ENODEV; |
2693 | 2563 | ||
2564 | port = &info->port; | ||
2694 | tty->driver_data = info; | 2565 | tty->driver_data = info; |
2695 | info->tty = tty; | 2566 | tty_port_tty_set(port, tty); |
2696 | 2567 | ||
2697 | if (debug_level >= DEBUG_LEVEL_INFO) | 2568 | if (debug_level >= DEBUG_LEVEL_INFO) |
2698 | printk("%s(%d):mgslpc_open(%s), old ref count = %d\n", | 2569 | printk("%s(%d):mgslpc_open(%s), old ref count = %d\n", |
2699 | __FILE__,__LINE__,tty->driver->name, info->count); | 2570 | __FILE__,__LINE__,tty->driver->name, port->count); |
2700 | 2571 | ||
2701 | /* If port is closing, signal caller to try again */ | 2572 | /* If port is closing, signal caller to try again */ |
2702 | if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){ | 2573 | if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){ |
2703 | if (info->flags & ASYNC_CLOSING) | 2574 | if (port->flags & ASYNC_CLOSING) |
2704 | interruptible_sleep_on(&info->close_wait); | 2575 | interruptible_sleep_on(&port->close_wait); |
2705 | retval = ((info->flags & ASYNC_HUP_NOTIFY) ? | 2576 | retval = ((port->flags & ASYNC_HUP_NOTIFY) ? |
2706 | -EAGAIN : -ERESTARTSYS); | 2577 | -EAGAIN : -ERESTARTSYS); |
2707 | goto cleanup; | 2578 | goto cleanup; |
2708 | } | 2579 | } |
2709 | 2580 | ||
2710 | info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; | 2581 | tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0; |
2711 | 2582 | ||
2712 | spin_lock_irqsave(&info->netlock, flags); | 2583 | spin_lock_irqsave(&info->netlock, flags); |
2713 | if (info->netcount) { | 2584 | if (info->netcount) { |
@@ -2715,17 +2586,19 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp) | |||
2715 | spin_unlock_irqrestore(&info->netlock, flags); | 2586 | spin_unlock_irqrestore(&info->netlock, flags); |
2716 | goto cleanup; | 2587 | goto cleanup; |
2717 | } | 2588 | } |
2718 | info->count++; | 2589 | spin_lock(&port->lock); |
2590 | port->count++; | ||
2591 | spin_unlock(&port->lock); | ||
2719 | spin_unlock_irqrestore(&info->netlock, flags); | 2592 | spin_unlock_irqrestore(&info->netlock, flags); |
2720 | 2593 | ||
2721 | if (info->count == 1) { | 2594 | if (port->count == 1) { |
2722 | /* 1st open on this device, init hardware */ | 2595 | /* 1st open on this device, init hardware */ |
2723 | retval = startup(info); | 2596 | retval = startup(info, tty); |
2724 | if (retval < 0) | 2597 | if (retval < 0) |
2725 | goto cleanup; | 2598 | goto cleanup; |
2726 | } | 2599 | } |
2727 | 2600 | ||
2728 | retval = block_til_ready(tty, filp, info); | 2601 | retval = tty_port_block_til_ready(&info->port, tty, filp); |
2729 | if (retval) { | 2602 | if (retval) { |
2730 | if (debug_level >= DEBUG_LEVEL_INFO) | 2603 | if (debug_level >= DEBUG_LEVEL_INFO) |
2731 | printk("%s(%d):block_til_ready(%s) returned %d\n", | 2604 | printk("%s(%d):block_til_ready(%s) returned %d\n", |
@@ -2739,13 +2612,6 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp) | |||
2739 | retval = 0; | 2612 | retval = 0; |
2740 | 2613 | ||
2741 | cleanup: | 2614 | cleanup: |
2742 | if (retval) { | ||
2743 | if (tty->count == 1) | ||
2744 | info->tty = NULL; /* tty layer will release tty struct */ | ||
2745 | if(info->count) | ||
2746 | info->count--; | ||
2747 | } | ||
2748 | |||
2749 | return retval; | 2615 | return retval; |
2750 | } | 2616 | } |
2751 | 2617 | ||
@@ -3500,7 +3366,7 @@ static void rx_start(MGSLPC_INFO *info) | |||
3500 | info->rx_enabled = true; | 3366 | info->rx_enabled = true; |
3501 | } | 3367 | } |
3502 | 3368 | ||
3503 | static void tx_start(MGSLPC_INFO *info) | 3369 | static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty) |
3504 | { | 3370 | { |
3505 | if (debug_level >= DEBUG_LEVEL_ISR) | 3371 | if (debug_level >= DEBUG_LEVEL_ISR) |
3506 | printk("%s(%d):tx_start(%s)\n", | 3372 | printk("%s(%d):tx_start(%s)\n", |
@@ -3524,11 +3390,11 @@ static void tx_start(MGSLPC_INFO *info) | |||
3524 | if (info->params.mode == MGSL_MODE_ASYNC) { | 3390 | if (info->params.mode == MGSL_MODE_ASYNC) { |
3525 | if (!info->tx_active) { | 3391 | if (!info->tx_active) { |
3526 | info->tx_active = true; | 3392 | info->tx_active = true; |
3527 | tx_ready(info); | 3393 | tx_ready(info, tty); |
3528 | } | 3394 | } |
3529 | } else { | 3395 | } else { |
3530 | info->tx_active = true; | 3396 | info->tx_active = true; |
3531 | tx_ready(info); | 3397 | tx_ready(info, tty); |
3532 | mod_timer(&info->tx_timer, jiffies + | 3398 | mod_timer(&info->tx_timer, jiffies + |
3533 | msecs_to_jiffies(5000)); | 3399 | msecs_to_jiffies(5000)); |
3534 | } | 3400 | } |
@@ -3849,13 +3715,12 @@ static void rx_reset_buffers(MGSLPC_INFO *info) | |||
3849 | * | 3715 | * |
3850 | * Returns true if frame returned, otherwise false | 3716 | * Returns true if frame returned, otherwise false |
3851 | */ | 3717 | */ |
3852 | static bool rx_get_frame(MGSLPC_INFO *info) | 3718 | static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty) |
3853 | { | 3719 | { |
3854 | unsigned short status; | 3720 | unsigned short status; |
3855 | RXBUF *buf; | 3721 | RXBUF *buf; |
3856 | unsigned int framesize = 0; | 3722 | unsigned int framesize = 0; |
3857 | unsigned long flags; | 3723 | unsigned long flags; |
3858 | struct tty_struct *tty = info->tty; | ||
3859 | bool return_frame = false; | 3724 | bool return_frame = false; |
3860 | 3725 | ||
3861 | if (info->rx_frame_count == 0) | 3726 | if (info->rx_frame_count == 0) |
@@ -4075,7 +3940,11 @@ static void tx_timeout(unsigned long context) | |||
4075 | hdlcdev_tx_done(info); | 3940 | hdlcdev_tx_done(info); |
4076 | else | 3941 | else |
4077 | #endif | 3942 | #endif |
4078 | bh_transmit(info); | 3943 | { |
3944 | struct tty_struct *tty = tty_port_tty_get(&info->port); | ||
3945 | bh_transmit(info, tty); | ||
3946 | tty_kref_put(tty); | ||
3947 | } | ||
4079 | } | 3948 | } |
4080 | 3949 | ||
4081 | #if SYNCLINK_GENERIC_HDLC | 3950 | #if SYNCLINK_GENERIC_HDLC |
@@ -4094,11 +3963,12 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, | |||
4094 | unsigned short parity) | 3963 | unsigned short parity) |
4095 | { | 3964 | { |
4096 | MGSLPC_INFO *info = dev_to_port(dev); | 3965 | MGSLPC_INFO *info = dev_to_port(dev); |
3966 | struct tty_struct *tty; | ||
4097 | unsigned char new_encoding; | 3967 | unsigned char new_encoding; |
4098 | unsigned short new_crctype; | 3968 | unsigned short new_crctype; |
4099 | 3969 | ||
4100 | /* return error if TTY interface open */ | 3970 | /* return error if TTY interface open */ |
4101 | if (info->count) | 3971 | if (info->port.count) |
4102 | return -EBUSY; | 3972 | return -EBUSY; |
4103 | 3973 | ||
4104 | switch (encoding) | 3974 | switch (encoding) |
@@ -4123,8 +3993,11 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, | |||
4123 | info->params.crc_type = new_crctype; | 3993 | info->params.crc_type = new_crctype; |
4124 | 3994 | ||
4125 | /* if network interface up, reprogram hardware */ | 3995 | /* if network interface up, reprogram hardware */ |
4126 | if (info->netcount) | 3996 | if (info->netcount) { |
4127 | mgslpc_program_hw(info); | 3997 | tty = tty_port_tty_get(&info->port); |
3998 | mgslpc_program_hw(info, tty); | ||
3999 | tty_kref_put(tty); | ||
4000 | } | ||
4128 | 4001 | ||
4129 | return 0; | 4002 | return 0; |
4130 | } | 4003 | } |
@@ -4165,8 +4038,11 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4165 | 4038 | ||
4166 | /* start hardware transmitter if necessary */ | 4039 | /* start hardware transmitter if necessary */ |
4167 | spin_lock_irqsave(&info->lock,flags); | 4040 | spin_lock_irqsave(&info->lock,flags); |
4168 | if (!info->tx_active) | 4041 | if (!info->tx_active) { |
4169 | tx_start(info); | 4042 | struct tty_struct *tty = tty_port_tty_get(&info->port); |
4043 | tx_start(info, tty); | ||
4044 | tty_kref_put(tty); | ||
4045 | } | ||
4170 | spin_unlock_irqrestore(&info->lock,flags); | 4046 | spin_unlock_irqrestore(&info->lock,flags); |
4171 | 4047 | ||
4172 | return 0; | 4048 | return 0; |
@@ -4183,6 +4059,7 @@ static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4183 | static int hdlcdev_open(struct net_device *dev) | 4059 | static int hdlcdev_open(struct net_device *dev) |
4184 | { | 4060 | { |
4185 | MGSLPC_INFO *info = dev_to_port(dev); | 4061 | MGSLPC_INFO *info = dev_to_port(dev); |
4062 | struct tty_struct *tty; | ||
4186 | int rc; | 4063 | int rc; |
4187 | unsigned long flags; | 4064 | unsigned long flags; |
4188 | 4065 | ||
@@ -4195,7 +4072,7 @@ static int hdlcdev_open(struct net_device *dev) | |||
4195 | 4072 | ||
4196 | /* arbitrate between network and tty opens */ | 4073 | /* arbitrate between network and tty opens */ |
4197 | spin_lock_irqsave(&info->netlock, flags); | 4074 | spin_lock_irqsave(&info->netlock, flags); |
4198 | if (info->count != 0 || info->netcount != 0) { | 4075 | if (info->port.count != 0 || info->netcount != 0) { |
4199 | printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); | 4076 | printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); |
4200 | spin_unlock_irqrestore(&info->netlock, flags); | 4077 | spin_unlock_irqrestore(&info->netlock, flags); |
4201 | return -EBUSY; | 4078 | return -EBUSY; |
@@ -4203,17 +4080,19 @@ static int hdlcdev_open(struct net_device *dev) | |||
4203 | info->netcount=1; | 4080 | info->netcount=1; |
4204 | spin_unlock_irqrestore(&info->netlock, flags); | 4081 | spin_unlock_irqrestore(&info->netlock, flags); |
4205 | 4082 | ||
4083 | tty = tty_port_tty_get(&info->port); | ||
4206 | /* claim resources and init adapter */ | 4084 | /* claim resources and init adapter */ |
4207 | if ((rc = startup(info)) != 0) { | 4085 | if ((rc = startup(info, tty)) != 0) { |
4086 | tty_kref_put(tty); | ||
4208 | spin_lock_irqsave(&info->netlock, flags); | 4087 | spin_lock_irqsave(&info->netlock, flags); |
4209 | info->netcount=0; | 4088 | info->netcount=0; |
4210 | spin_unlock_irqrestore(&info->netlock, flags); | 4089 | spin_unlock_irqrestore(&info->netlock, flags); |
4211 | return rc; | 4090 | return rc; |
4212 | } | 4091 | } |
4213 | |||
4214 | /* assert DTR and RTS, apply hardware settings */ | 4092 | /* assert DTR and RTS, apply hardware settings */ |
4215 | info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; | 4093 | info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; |
4216 | mgslpc_program_hw(info); | 4094 | mgslpc_program_hw(info, tty); |
4095 | tty_kref_put(tty); | ||
4217 | 4096 | ||
4218 | /* enable network layer transmit */ | 4097 | /* enable network layer transmit */ |
4219 | dev->trans_start = jiffies; | 4098 | dev->trans_start = jiffies; |
@@ -4241,6 +4120,7 @@ static int hdlcdev_open(struct net_device *dev) | |||
4241 | static int hdlcdev_close(struct net_device *dev) | 4120 | static int hdlcdev_close(struct net_device *dev) |
4242 | { | 4121 | { |
4243 | MGSLPC_INFO *info = dev_to_port(dev); | 4122 | MGSLPC_INFO *info = dev_to_port(dev); |
4123 | struct tty_struct *tty = tty_port_tty_get(&info->port); | ||
4244 | unsigned long flags; | 4124 | unsigned long flags; |
4245 | 4125 | ||
4246 | if (debug_level >= DEBUG_LEVEL_INFO) | 4126 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -4249,8 +4129,8 @@ static int hdlcdev_close(struct net_device *dev) | |||
4249 | netif_stop_queue(dev); | 4129 | netif_stop_queue(dev); |
4250 | 4130 | ||
4251 | /* shutdown adapter and release resources */ | 4131 | /* shutdown adapter and release resources */ |
4252 | shutdown(info); | 4132 | shutdown(info, tty); |
4253 | 4133 | tty_kref_put(tty); | |
4254 | hdlc_close(dev); | 4134 | hdlc_close(dev); |
4255 | 4135 | ||
4256 | spin_lock_irqsave(&info->netlock, flags); | 4136 | spin_lock_irqsave(&info->netlock, flags); |
@@ -4281,7 +4161,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
4281 | printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); | 4161 | printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); |
4282 | 4162 | ||
4283 | /* return error if TTY interface open */ | 4163 | /* return error if TTY interface open */ |
4284 | if (info->count) | 4164 | if (info->port.count) |
4285 | return -EBUSY; | 4165 | return -EBUSY; |
4286 | 4166 | ||
4287 | if (cmd != SIOCWANDEV) | 4167 | if (cmd != SIOCWANDEV) |
@@ -4354,8 +4234,11 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
4354 | info->params.clock_speed = 0; | 4234 | info->params.clock_speed = 0; |
4355 | 4235 | ||
4356 | /* if network interface up, reprogram hardware */ | 4236 | /* if network interface up, reprogram hardware */ |
4357 | if (info->netcount) | 4237 | if (info->netcount) { |
4358 | mgslpc_program_hw(info); | 4238 | struct tty_struct *tty = tty_port_tty_get(&info->port); |
4239 | mgslpc_program_hw(info, tty); | ||
4240 | tty_kref_put(tty); | ||
4241 | } | ||
4359 | return 0; | 4242 | return 0; |
4360 | 4243 | ||
4361 | default: | 4244 | default: |
diff --git a/drivers/char/pty.c b/drivers/char/pty.c index 6d4582712b1f..112a6ba9a96f 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Added support for a Unix98-style ptmx device. | 6 | * Added support for a Unix98-style ptmx device. |
7 | * -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998 | 7 | * -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998 |
8 | * Added TTY_DO_WRITE_WAKEUP to enable n_tty to send POLL_OUT to | ||
9 | * waiting writers -- Sapan Bhatia <sapan@corewars.org> | ||
10 | * | 8 | * |
11 | * When reading this code see also fs/devpts. In particular note that the | 9 | * When reading this code see also fs/devpts. In particular note that the |
12 | * driver_data field is used by the devpts side as a binding to the devpts | 10 | * driver_data field is used by the devpts side as a binding to the devpts |
@@ -217,7 +215,6 @@ static int pty_open(struct tty_struct *tty, struct file *filp) | |||
217 | 215 | ||
218 | clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); | 216 | clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); |
219 | set_bit(TTY_THROTTLED, &tty->flags); | 217 | set_bit(TTY_THROTTLED, &tty->flags); |
220 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | ||
221 | retval = 0; | 218 | retval = 0; |
222 | out: | 219 | out: |
223 | return retval; | 220 | return retval; |
@@ -230,6 +227,55 @@ static void pty_set_termios(struct tty_struct *tty, | |||
230 | tty->termios->c_cflag |= (CS8 | CREAD); | 227 | tty->termios->c_cflag |= (CS8 | CREAD); |
231 | } | 228 | } |
232 | 229 | ||
230 | /** | ||
231 | * pty_do_resize - resize event | ||
232 | * @tty: tty being resized | ||
233 | * @real_tty: real tty (not the same as tty if using a pty/tty pair) | ||
234 | * @rows: rows (character) | ||
235 | * @cols: cols (character) | ||
236 | * | ||
237 | * Update the termios variables and send the neccessary signals to | ||
238 | * peform a terminal resize correctly | ||
239 | */ | ||
240 | |||
241 | int pty_resize(struct tty_struct *tty, struct winsize *ws) | ||
242 | { | ||
243 | struct pid *pgrp, *rpgrp; | ||
244 | unsigned long flags; | ||
245 | struct tty_struct *pty = tty->link; | ||
246 | |||
247 | /* For a PTY we need to lock the tty side */ | ||
248 | mutex_lock(&tty->termios_mutex); | ||
249 | if (!memcmp(ws, &tty->winsize, sizeof(*ws))) | ||
250 | goto done; | ||
251 | |||
252 | /* Get the PID values and reference them so we can | ||
253 | avoid holding the tty ctrl lock while sending signals. | ||
254 | We need to lock these individually however. */ | ||
255 | |||
256 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
257 | pgrp = get_pid(tty->pgrp); | ||
258 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
259 | |||
260 | spin_lock_irqsave(&pty->ctrl_lock, flags); | ||
261 | rpgrp = get_pid(pty->pgrp); | ||
262 | spin_unlock_irqrestore(&pty->ctrl_lock, flags); | ||
263 | |||
264 | if (pgrp) | ||
265 | kill_pgrp(pgrp, SIGWINCH, 1); | ||
266 | if (rpgrp != pgrp && rpgrp) | ||
267 | kill_pgrp(rpgrp, SIGWINCH, 1); | ||
268 | |||
269 | put_pid(pgrp); | ||
270 | put_pid(rpgrp); | ||
271 | |||
272 | tty->winsize = *ws; | ||
273 | pty->winsize = *ws; /* Never used so will go away soon */ | ||
274 | done: | ||
275 | mutex_unlock(&tty->termios_mutex); | ||
276 | return 0; | ||
277 | } | ||
278 | |||
233 | static int pty_install(struct tty_driver *driver, struct tty_struct *tty) | 279 | static int pty_install(struct tty_driver *driver, struct tty_struct *tty) |
234 | { | 280 | { |
235 | struct tty_struct *o_tty; | 281 | struct tty_struct *o_tty; |
@@ -290,6 +336,7 @@ static const struct tty_operations pty_ops = { | |||
290 | .chars_in_buffer = pty_chars_in_buffer, | 336 | .chars_in_buffer = pty_chars_in_buffer, |
291 | .unthrottle = pty_unthrottle, | 337 | .unthrottle = pty_unthrottle, |
292 | .set_termios = pty_set_termios, | 338 | .set_termios = pty_set_termios, |
339 | .resize = pty_resize | ||
293 | }; | 340 | }; |
294 | 341 | ||
295 | /* Traditional BSD devices */ | 342 | /* Traditional BSD devices */ |
@@ -319,6 +366,7 @@ static const struct tty_operations pty_ops_bsd = { | |||
319 | .unthrottle = pty_unthrottle, | 366 | .unthrottle = pty_unthrottle, |
320 | .set_termios = pty_set_termios, | 367 | .set_termios = pty_set_termios, |
321 | .ioctl = pty_bsd_ioctl, | 368 | .ioctl = pty_bsd_ioctl, |
369 | .resize = pty_resize | ||
322 | }; | 370 | }; |
323 | 371 | ||
324 | static void __init legacy_pty_init(void) | 372 | static void __init legacy_pty_init(void) |
@@ -561,7 +609,8 @@ static const struct tty_operations ptm_unix98_ops = { | |||
561 | .unthrottle = pty_unthrottle, | 609 | .unthrottle = pty_unthrottle, |
562 | .set_termios = pty_set_termios, | 610 | .set_termios = pty_set_termios, |
563 | .ioctl = pty_unix98_ioctl, | 611 | .ioctl = pty_unix98_ioctl, |
564 | .shutdown = pty_unix98_shutdown | 612 | .shutdown = pty_unix98_shutdown, |
613 | .resize = pty_resize | ||
565 | }; | 614 | }; |
566 | 615 | ||
567 | static const struct tty_operations pty_unix98_ops = { | 616 | static const struct tty_operations pty_unix98_ops = { |
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c index a8f68a3f14dd..2e8a6eed34be 100644 --- a/drivers/char/rio/rio_linux.c +++ b/drivers/char/rio/rio_linux.c | |||
@@ -173,7 +173,7 @@ static void rio_disable_tx_interrupts(void *ptr); | |||
173 | static void rio_enable_tx_interrupts(void *ptr); | 173 | static void rio_enable_tx_interrupts(void *ptr); |
174 | static void rio_disable_rx_interrupts(void *ptr); | 174 | static void rio_disable_rx_interrupts(void *ptr); |
175 | static void rio_enable_rx_interrupts(void *ptr); | 175 | static void rio_enable_rx_interrupts(void *ptr); |
176 | static int rio_get_CD(void *ptr); | 176 | static int rio_carrier_raised(struct tty_port *port); |
177 | static void rio_shutdown_port(void *ptr); | 177 | static void rio_shutdown_port(void *ptr); |
178 | static int rio_set_real_termios(void *ptr); | 178 | static int rio_set_real_termios(void *ptr); |
179 | static void rio_hungup(void *ptr); | 179 | static void rio_hungup(void *ptr); |
@@ -224,7 +224,6 @@ static struct real_driver rio_real_driver = { | |||
224 | rio_enable_tx_interrupts, | 224 | rio_enable_tx_interrupts, |
225 | rio_disable_rx_interrupts, | 225 | rio_disable_rx_interrupts, |
226 | rio_enable_rx_interrupts, | 226 | rio_enable_rx_interrupts, |
227 | rio_get_CD, | ||
228 | rio_shutdown_port, | 227 | rio_shutdown_port, |
229 | rio_set_real_termios, | 228 | rio_set_real_termios, |
230 | rio_chars_in_buffer, | 229 | rio_chars_in_buffer, |
@@ -476,9 +475,9 @@ static void rio_enable_rx_interrupts(void *ptr) | |||
476 | 475 | ||
477 | 476 | ||
478 | /* Jeez. Isn't this simple? */ | 477 | /* Jeez. Isn't this simple? */ |
479 | static int rio_get_CD(void *ptr) | 478 | static int rio_carrier_raised(struct tty_port *port) |
480 | { | 479 | { |
481 | struct Port *PortP = ptr; | 480 | struct Port *PortP = container_of(port, struct Port, gs.port); |
482 | int rv; | 481 | int rv; |
483 | 482 | ||
484 | func_enter(); | 483 | func_enter(); |
@@ -797,16 +796,9 @@ static int rio_init_drivers(void) | |||
797 | return 1; | 796 | return 1; |
798 | } | 797 | } |
799 | 798 | ||
800 | 799 | static const struct tty_port_operations rio_port_ops = { | |
801 | static void *ckmalloc(int size) | 800 | .carrier_raised = rio_carrier_raised, |
802 | { | 801 | }; |
803 | void *p; | ||
804 | |||
805 | p = kzalloc(size, GFP_KERNEL); | ||
806 | return p; | ||
807 | } | ||
808 | |||
809 | |||
810 | 802 | ||
811 | static int rio_init_datastructures(void) | 803 | static int rio_init_datastructures(void) |
812 | { | 804 | { |
@@ -826,33 +818,30 @@ static int rio_init_datastructures(void) | |||
826 | #define TMIO_SZ sizeof(struct termios *) | 818 | #define TMIO_SZ sizeof(struct termios *) |
827 | rio_dprintk(RIO_DEBUG_INIT, "getting : %Zd %Zd %Zd %Zd %Zd bytes\n", RI_SZ, RIO_HOSTS * HOST_SZ, RIO_PORTS * PORT_SZ, RIO_PORTS * TMIO_SZ, RIO_PORTS * TMIO_SZ); | 819 | rio_dprintk(RIO_DEBUG_INIT, "getting : %Zd %Zd %Zd %Zd %Zd bytes\n", RI_SZ, RIO_HOSTS * HOST_SZ, RIO_PORTS * PORT_SZ, RIO_PORTS * TMIO_SZ, RIO_PORTS * TMIO_SZ); |
828 | 820 | ||
829 | if (!(p = ckmalloc(RI_SZ))) | 821 | if (!(p = kzalloc(RI_SZ, GFP_KERNEL))) |
830 | goto free0; | 822 | goto free0; |
831 | if (!(p->RIOHosts = ckmalloc(RIO_HOSTS * HOST_SZ))) | 823 | if (!(p->RIOHosts = kzalloc(RIO_HOSTS * HOST_SZ, GFP_KERNEL))) |
832 | goto free1; | 824 | goto free1; |
833 | if (!(p->RIOPortp = ckmalloc(RIO_PORTS * PORT_SZ))) | 825 | if (!(p->RIOPortp = kzalloc(RIO_PORTS * PORT_SZ, GFP_KERNEL))) |
834 | goto free2; | 826 | goto free2; |
835 | p->RIOConf = RIOConf; | 827 | p->RIOConf = RIOConf; |
836 | rio_dprintk(RIO_DEBUG_INIT, "Got : %p %p %p\n", p, p->RIOHosts, p->RIOPortp); | 828 | rio_dprintk(RIO_DEBUG_INIT, "Got : %p %p %p\n", p, p->RIOHosts, p->RIOPortp); |
837 | 829 | ||
838 | #if 1 | 830 | #if 1 |
839 | for (i = 0; i < RIO_PORTS; i++) { | 831 | for (i = 0; i < RIO_PORTS; i++) { |
840 | port = p->RIOPortp[i] = ckmalloc(sizeof(struct Port)); | 832 | port = p->RIOPortp[i] = kzalloc(sizeof(struct Port), GFP_KERNEL); |
841 | if (!port) { | 833 | if (!port) { |
842 | goto free6; | 834 | goto free6; |
843 | } | 835 | } |
844 | rio_dprintk(RIO_DEBUG_INIT, "initing port %d (%d)\n", i, port->Mapped); | 836 | rio_dprintk(RIO_DEBUG_INIT, "initing port %d (%d)\n", i, port->Mapped); |
837 | tty_port_init(&port->gs.port); | ||
838 | port->gs.port.ops = &rio_port_ops; | ||
845 | port->PortNum = i; | 839 | port->PortNum = i; |
846 | port->gs.magic = RIO_MAGIC; | 840 | port->gs.magic = RIO_MAGIC; |
847 | port->gs.close_delay = HZ / 2; | 841 | port->gs.close_delay = HZ / 2; |
848 | port->gs.closing_wait = 30 * HZ; | 842 | port->gs.closing_wait = 30 * HZ; |
849 | port->gs.rd = &rio_real_driver; | 843 | port->gs.rd = &rio_real_driver; |
850 | spin_lock_init(&port->portSem); | 844 | spin_lock_init(&port->portSem); |
851 | /* | ||
852 | * Initializing wait queue | ||
853 | */ | ||
854 | init_waitqueue_head(&port->gs.port.open_wait); | ||
855 | init_waitqueue_head(&port->gs.port.close_wait); | ||
856 | } | 845 | } |
857 | #else | 846 | #else |
858 | /* We could postpone initializing them to when they are configured. */ | 847 | /* We could postpone initializing them to when they are configured. */ |
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c index 2c6c8f33d6b4..9af8d74875bc 100644 --- a/drivers/char/riscom8.c +++ b/drivers/char/riscom8.c | |||
@@ -857,98 +857,21 @@ static void rc_shutdown_port(struct tty_struct *tty, | |||
857 | rc_shutdown_board(bp); | 857 | rc_shutdown_board(bp); |
858 | } | 858 | } |
859 | 859 | ||
860 | static int block_til_ready(struct tty_struct *tty, struct file *filp, | 860 | static int carrier_raised(struct tty_port *port) |
861 | struct riscom_port *port) | ||
862 | { | 861 | { |
863 | DECLARE_WAITQUEUE(wait, current); | 862 | struct riscom_port *p = container_of(port, struct riscom_port, port); |
864 | struct riscom_board *bp = port_Board(port); | 863 | struct riscom_board *bp = port_Board(p); |
865 | int retval; | ||
866 | int do_clocal = 0; | ||
867 | int CD; | ||
868 | unsigned long flags; | 864 | unsigned long flags; |
869 | 865 | int CD; | |
870 | /* | 866 | |
871 | * If the device is in the middle of being closed, then block | ||
872 | * until it's done, and then try again. | ||
873 | */ | ||
874 | if (tty_hung_up_p(filp) || port->port.flags & ASYNC_CLOSING) { | ||
875 | interruptible_sleep_on(&port->port.close_wait); | ||
876 | if (port->port.flags & ASYNC_HUP_NOTIFY) | ||
877 | return -EAGAIN; | ||
878 | else | ||
879 | return -ERESTARTSYS; | ||
880 | } | ||
881 | |||
882 | /* | ||
883 | * If non-blocking mode is set, or the port is not enabled, | ||
884 | * then make the check up front and then exit. | ||
885 | */ | ||
886 | if ((filp->f_flags & O_NONBLOCK) || | ||
887 | (tty->flags & (1 << TTY_IO_ERROR))) { | ||
888 | port->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
889 | return 0; | ||
890 | } | ||
891 | |||
892 | if (C_CLOCAL(tty)) | ||
893 | do_clocal = 1; | ||
894 | |||
895 | /* | ||
896 | * Block waiting for the carrier detect and the line to become | ||
897 | * free (i.e., not in use by the callout). While we are in | ||
898 | * this loop, info->count is dropped by one, so that | ||
899 | * rs_close() knows when to free things. We restore it upon | ||
900 | * exit, either normal or abnormal. | ||
901 | */ | ||
902 | retval = 0; | ||
903 | add_wait_queue(&port->port.open_wait, &wait); | ||
904 | |||
905 | spin_lock_irqsave(&riscom_lock, flags); | 867 | spin_lock_irqsave(&riscom_lock, flags); |
906 | 868 | rc_out(bp, CD180_CAR, port_No(p)); | |
907 | if (!tty_hung_up_p(filp)) | 869 | CD = rc_in(bp, CD180_MSVR) & MSVR_CD; |
908 | port->port.count--; | 870 | rc_out(bp, CD180_MSVR, MSVR_RTS); |
909 | 871 | bp->DTR &= ~(1u << port_No(p)); | |
872 | rc_out(bp, RC_DTR, bp->DTR); | ||
910 | spin_unlock_irqrestore(&riscom_lock, flags); | 873 | spin_unlock_irqrestore(&riscom_lock, flags); |
911 | 874 | return CD; | |
912 | port->port.blocked_open++; | ||
913 | while (1) { | ||
914 | spin_lock_irqsave(&riscom_lock, flags); | ||
915 | |||
916 | rc_out(bp, CD180_CAR, port_No(port)); | ||
917 | CD = rc_in(bp, CD180_MSVR) & MSVR_CD; | ||
918 | rc_out(bp, CD180_MSVR, MSVR_RTS); | ||
919 | bp->DTR &= ~(1u << port_No(port)); | ||
920 | rc_out(bp, RC_DTR, bp->DTR); | ||
921 | |||
922 | spin_unlock_irqrestore(&riscom_lock, flags); | ||
923 | |||
924 | set_current_state(TASK_INTERRUPTIBLE); | ||
925 | if (tty_hung_up_p(filp) || | ||
926 | !(port->port.flags & ASYNC_INITIALIZED)) { | ||
927 | if (port->port.flags & ASYNC_HUP_NOTIFY) | ||
928 | retval = -EAGAIN; | ||
929 | else | ||
930 | retval = -ERESTARTSYS; | ||
931 | break; | ||
932 | } | ||
933 | if (!(port->port.flags & ASYNC_CLOSING) && | ||
934 | (do_clocal || CD)) | ||
935 | break; | ||
936 | if (signal_pending(current)) { | ||
937 | retval = -ERESTARTSYS; | ||
938 | break; | ||
939 | } | ||
940 | schedule(); | ||
941 | } | ||
942 | __set_current_state(TASK_RUNNING); | ||
943 | remove_wait_queue(&port->port.open_wait, &wait); | ||
944 | if (!tty_hung_up_p(filp)) | ||
945 | port->port.count++; | ||
946 | port->port.blocked_open--; | ||
947 | if (retval) | ||
948 | return retval; | ||
949 | |||
950 | port->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
951 | return 0; | ||
952 | } | 875 | } |
953 | 876 | ||
954 | static int rc_open(struct tty_struct *tty, struct file *filp) | 877 | static int rc_open(struct tty_struct *tty, struct file *filp) |
@@ -977,13 +900,13 @@ static int rc_open(struct tty_struct *tty, struct file *filp) | |||
977 | 900 | ||
978 | error = rc_setup_port(bp, port); | 901 | error = rc_setup_port(bp, port); |
979 | if (error == 0) | 902 | if (error == 0) |
980 | error = block_til_ready(tty, filp, port); | 903 | error = tty_port_block_til_ready(&port->port, tty, filp); |
981 | return error; | 904 | return error; |
982 | } | 905 | } |
983 | 906 | ||
984 | static void rc_flush_buffer(struct tty_struct *tty) | 907 | static void rc_flush_buffer(struct tty_struct *tty) |
985 | { | 908 | { |
986 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 909 | struct riscom_port *port = tty->driver_data; |
987 | unsigned long flags; | 910 | unsigned long flags; |
988 | 911 | ||
989 | if (rc_paranoia_check(port, tty->name, "rc_flush_buffer")) | 912 | if (rc_paranoia_check(port, tty->name, "rc_flush_buffer")) |
@@ -998,7 +921,7 @@ static void rc_flush_buffer(struct tty_struct *tty) | |||
998 | 921 | ||
999 | static void rc_close(struct tty_struct *tty, struct file *filp) | 922 | static void rc_close(struct tty_struct *tty, struct file *filp) |
1000 | { | 923 | { |
1001 | struct riscom_port *port = (struct riscom_port *) tty->driver_data; | 924 | struct riscom_port *port = tty->driver_data; |
1002 | struct riscom_board *bp; | 925 | struct riscom_board *bp; |
1003 | unsigned long flags; | 926 | unsigned long flags; |
1004 | unsigned long timeout; | 927 | unsigned long timeout; |
@@ -1006,40 +929,19 @@ static void rc_close(struct tty_struct *tty, struct file *filp) | |||
1006 | if (!port || rc_paranoia_check(port, tty->name, "close")) | 929 | if (!port || rc_paranoia_check(port, tty->name, "close")) |
1007 | return; | 930 | return; |
1008 | 931 | ||
1009 | spin_lock_irqsave(&riscom_lock, flags); | ||
1010 | |||
1011 | if (tty_hung_up_p(filp)) | ||
1012 | goto out; | ||
1013 | |||
1014 | bp = port_Board(port); | 932 | bp = port_Board(port); |
1015 | if ((tty->count == 1) && (port->port.count != 1)) { | 933 | |
1016 | printk(KERN_INFO "rc%d: rc_close: bad port count;" | 934 | if (tty_port_close_start(&port->port, tty, filp) == 0) |
1017 | " tty->count is 1, port count is %d\n", | 935 | return; |
1018 | board_No(bp), port->port.count); | 936 | |
1019 | port->port.count = 1; | ||
1020 | } | ||
1021 | if (--port->port.count < 0) { | ||
1022 | printk(KERN_INFO "rc%d: rc_close: bad port count " | ||
1023 | "for tty%d: %d\n", | ||
1024 | board_No(bp), port_No(port), port->port.count); | ||
1025 | port->port.count = 0; | ||
1026 | } | ||
1027 | if (port->port.count) | ||
1028 | goto out; | ||
1029 | port->port.flags |= ASYNC_CLOSING; | ||
1030 | /* | ||
1031 | * Now we wait for the transmit buffer to clear; and we notify | ||
1032 | * the line discipline to only process XON/XOFF characters. | ||
1033 | */ | ||
1034 | tty->closing = 1; | ||
1035 | if (port->port.closing_wait != ASYNC_CLOSING_WAIT_NONE) | ||
1036 | tty_wait_until_sent(tty, port->port.closing_wait); | ||
1037 | /* | 937 | /* |
1038 | * At this point we stop accepting input. To do this, we | 938 | * At this point we stop accepting input. To do this, we |
1039 | * disable the receive line status interrupts, and tell the | 939 | * disable the receive line status interrupts, and tell the |
1040 | * interrupt driver to stop checking the data ready bit in the | 940 | * interrupt driver to stop checking the data ready bit in the |
1041 | * line status register. | 941 | * line status register. |
1042 | */ | 942 | */ |
943 | |||
944 | spin_lock_irqsave(&riscom_lock, flags); | ||
1043 | port->IER &= ~IER_RXD; | 945 | port->IER &= ~IER_RXD; |
1044 | if (port->port.flags & ASYNC_INITIALIZED) { | 946 | if (port->port.flags & ASYNC_INITIALIZED) { |
1045 | port->IER &= ~IER_TXRDY; | 947 | port->IER &= ~IER_TXRDY; |
@@ -1053,33 +955,24 @@ static void rc_close(struct tty_struct *tty, struct file *filp) | |||
1053 | */ | 955 | */ |
1054 | timeout = jiffies + HZ; | 956 | timeout = jiffies + HZ; |
1055 | while (port->IER & IER_TXEMPTY) { | 957 | while (port->IER & IER_TXEMPTY) { |
958 | spin_unlock_irqrestore(&riscom_lock, flags); | ||
1056 | msleep_interruptible(jiffies_to_msecs(port->timeout)); | 959 | msleep_interruptible(jiffies_to_msecs(port->timeout)); |
960 | spin_lock_irqsave(&riscom_lock, flags); | ||
1057 | if (time_after(jiffies, timeout)) | 961 | if (time_after(jiffies, timeout)) |
1058 | break; | 962 | break; |
1059 | } | 963 | } |
1060 | } | 964 | } |
1061 | rc_shutdown_port(tty, bp, port); | 965 | rc_shutdown_port(tty, bp, port); |
1062 | rc_flush_buffer(tty); | 966 | rc_flush_buffer(tty); |
1063 | tty_ldisc_flush(tty); | ||
1064 | |||
1065 | tty->closing = 0; | ||
1066 | port->port.tty = NULL; | ||
1067 | if (port->port.blocked_open) { | ||
1068 | if (port->port.close_delay) | ||
1069 | msleep_interruptible(jiffies_to_msecs(port->port.close_delay)); | ||
1070 | wake_up_interruptible(&port->port.open_wait); | ||
1071 | } | ||
1072 | port->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | ||
1073 | wake_up_interruptible(&port->port.close_wait); | ||
1074 | |||
1075 | out: | ||
1076 | spin_unlock_irqrestore(&riscom_lock, flags); | 967 | spin_unlock_irqrestore(&riscom_lock, flags); |
968 | |||
969 | tty_port_close_end(&port->port, tty); | ||
1077 | } | 970 | } |
1078 | 971 | ||
1079 | static int rc_write(struct tty_struct *tty, | 972 | static int rc_write(struct tty_struct *tty, |
1080 | const unsigned char *buf, int count) | 973 | const unsigned char *buf, int count) |
1081 | { | 974 | { |
1082 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 975 | struct riscom_port *port = tty->driver_data; |
1083 | struct riscom_board *bp; | 976 | struct riscom_board *bp; |
1084 | int c, total = 0; | 977 | int c, total = 0; |
1085 | unsigned long flags; | 978 | unsigned long flags; |
@@ -1122,7 +1015,7 @@ static int rc_write(struct tty_struct *tty, | |||
1122 | 1015 | ||
1123 | static int rc_put_char(struct tty_struct *tty, unsigned char ch) | 1016 | static int rc_put_char(struct tty_struct *tty, unsigned char ch) |
1124 | { | 1017 | { |
1125 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1018 | struct riscom_port *port = tty->driver_data; |
1126 | unsigned long flags; | 1019 | unsigned long flags; |
1127 | int ret = 0; | 1020 | int ret = 0; |
1128 | 1021 | ||
@@ -1146,7 +1039,7 @@ out: | |||
1146 | 1039 | ||
1147 | static void rc_flush_chars(struct tty_struct *tty) | 1040 | static void rc_flush_chars(struct tty_struct *tty) |
1148 | { | 1041 | { |
1149 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1042 | struct riscom_port *port = tty->driver_data; |
1150 | unsigned long flags; | 1043 | unsigned long flags; |
1151 | 1044 | ||
1152 | if (rc_paranoia_check(port, tty->name, "rc_flush_chars")) | 1045 | if (rc_paranoia_check(port, tty->name, "rc_flush_chars")) |
@@ -1166,7 +1059,7 @@ static void rc_flush_chars(struct tty_struct *tty) | |||
1166 | 1059 | ||
1167 | static int rc_write_room(struct tty_struct *tty) | 1060 | static int rc_write_room(struct tty_struct *tty) |
1168 | { | 1061 | { |
1169 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1062 | struct riscom_port *port = tty->driver_data; |
1170 | int ret; | 1063 | int ret; |
1171 | 1064 | ||
1172 | if (rc_paranoia_check(port, tty->name, "rc_write_room")) | 1065 | if (rc_paranoia_check(port, tty->name, "rc_write_room")) |
@@ -1180,7 +1073,7 @@ static int rc_write_room(struct tty_struct *tty) | |||
1180 | 1073 | ||
1181 | static int rc_chars_in_buffer(struct tty_struct *tty) | 1074 | static int rc_chars_in_buffer(struct tty_struct *tty) |
1182 | { | 1075 | { |
1183 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1076 | struct riscom_port *port = tty->driver_data; |
1184 | 1077 | ||
1185 | if (rc_paranoia_check(port, tty->name, "rc_chars_in_buffer")) | 1078 | if (rc_paranoia_check(port, tty->name, "rc_chars_in_buffer")) |
1186 | return 0; | 1079 | return 0; |
@@ -1190,7 +1083,7 @@ static int rc_chars_in_buffer(struct tty_struct *tty) | |||
1190 | 1083 | ||
1191 | static int rc_tiocmget(struct tty_struct *tty, struct file *file) | 1084 | static int rc_tiocmget(struct tty_struct *tty, struct file *file) |
1192 | { | 1085 | { |
1193 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1086 | struct riscom_port *port = tty->driver_data; |
1194 | struct riscom_board *bp; | 1087 | struct riscom_board *bp; |
1195 | unsigned char status; | 1088 | unsigned char status; |
1196 | unsigned int result; | 1089 | unsigned int result; |
@@ -1220,7 +1113,7 @@ static int rc_tiocmget(struct tty_struct *tty, struct file *file) | |||
1220 | static int rc_tiocmset(struct tty_struct *tty, struct file *file, | 1113 | static int rc_tiocmset(struct tty_struct *tty, struct file *file, |
1221 | unsigned int set, unsigned int clear) | 1114 | unsigned int set, unsigned int clear) |
1222 | { | 1115 | { |
1223 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1116 | struct riscom_port *port = tty->driver_data; |
1224 | unsigned long flags; | 1117 | unsigned long flags; |
1225 | struct riscom_board *bp; | 1118 | struct riscom_board *bp; |
1226 | 1119 | ||
@@ -1252,7 +1145,7 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file, | |||
1252 | 1145 | ||
1253 | static int rc_send_break(struct tty_struct *tty, int length) | 1146 | static int rc_send_break(struct tty_struct *tty, int length) |
1254 | { | 1147 | { |
1255 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1148 | struct riscom_port *port = tty->driver_data; |
1256 | struct riscom_board *bp = port_Board(port); | 1149 | struct riscom_board *bp = port_Board(port); |
1257 | unsigned long flags; | 1150 | unsigned long flags; |
1258 | 1151 | ||
@@ -1345,7 +1238,7 @@ static int rc_get_serial_info(struct riscom_port *port, | |||
1345 | static int rc_ioctl(struct tty_struct *tty, struct file *filp, | 1238 | static int rc_ioctl(struct tty_struct *tty, struct file *filp, |
1346 | unsigned int cmd, unsigned long arg) | 1239 | unsigned int cmd, unsigned long arg) |
1347 | { | 1240 | { |
1348 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1241 | struct riscom_port *port = tty->driver_data; |
1349 | void __user *argp = (void __user *)arg; | 1242 | void __user *argp = (void __user *)arg; |
1350 | int retval; | 1243 | int retval; |
1351 | 1244 | ||
@@ -1371,7 +1264,7 @@ static int rc_ioctl(struct tty_struct *tty, struct file *filp, | |||
1371 | 1264 | ||
1372 | static void rc_throttle(struct tty_struct *tty) | 1265 | static void rc_throttle(struct tty_struct *tty) |
1373 | { | 1266 | { |
1374 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1267 | struct riscom_port *port = tty->driver_data; |
1375 | struct riscom_board *bp; | 1268 | struct riscom_board *bp; |
1376 | unsigned long flags; | 1269 | unsigned long flags; |
1377 | 1270 | ||
@@ -1393,7 +1286,7 @@ static void rc_throttle(struct tty_struct *tty) | |||
1393 | 1286 | ||
1394 | static void rc_unthrottle(struct tty_struct *tty) | 1287 | static void rc_unthrottle(struct tty_struct *tty) |
1395 | { | 1288 | { |
1396 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1289 | struct riscom_port *port = tty->driver_data; |
1397 | struct riscom_board *bp; | 1290 | struct riscom_board *bp; |
1398 | unsigned long flags; | 1291 | unsigned long flags; |
1399 | 1292 | ||
@@ -1415,7 +1308,7 @@ static void rc_unthrottle(struct tty_struct *tty) | |||
1415 | 1308 | ||
1416 | static void rc_stop(struct tty_struct *tty) | 1309 | static void rc_stop(struct tty_struct *tty) |
1417 | { | 1310 | { |
1418 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1311 | struct riscom_port *port = tty->driver_data; |
1419 | struct riscom_board *bp; | 1312 | struct riscom_board *bp; |
1420 | unsigned long flags; | 1313 | unsigned long flags; |
1421 | 1314 | ||
@@ -1433,7 +1326,7 @@ static void rc_stop(struct tty_struct *tty) | |||
1433 | 1326 | ||
1434 | static void rc_start(struct tty_struct *tty) | 1327 | static void rc_start(struct tty_struct *tty) |
1435 | { | 1328 | { |
1436 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1329 | struct riscom_port *port = tty->driver_data; |
1437 | struct riscom_board *bp; | 1330 | struct riscom_board *bp; |
1438 | unsigned long flags; | 1331 | unsigned long flags; |
1439 | 1332 | ||
@@ -1454,8 +1347,9 @@ static void rc_start(struct tty_struct *tty) | |||
1454 | 1347 | ||
1455 | static void rc_hangup(struct tty_struct *tty) | 1348 | static void rc_hangup(struct tty_struct *tty) |
1456 | { | 1349 | { |
1457 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1350 | struct riscom_port *port = tty->driver_data; |
1458 | struct riscom_board *bp; | 1351 | struct riscom_board *bp; |
1352 | unsigned long flags; | ||
1459 | 1353 | ||
1460 | if (rc_paranoia_check(port, tty->name, "rc_hangup")) | 1354 | if (rc_paranoia_check(port, tty->name, "rc_hangup")) |
1461 | return; | 1355 | return; |
@@ -1463,16 +1357,18 @@ static void rc_hangup(struct tty_struct *tty) | |||
1463 | bp = port_Board(port); | 1357 | bp = port_Board(port); |
1464 | 1358 | ||
1465 | rc_shutdown_port(tty, bp, port); | 1359 | rc_shutdown_port(tty, bp, port); |
1360 | spin_lock_irqsave(&port->port.lock, flags); | ||
1466 | port->port.count = 0; | 1361 | port->port.count = 0; |
1467 | port->port.flags &= ~ASYNC_NORMAL_ACTIVE; | 1362 | port->port.flags &= ~ASYNC_NORMAL_ACTIVE; |
1468 | port->port.tty = NULL; | 1363 | port->port.tty = NULL; |
1469 | wake_up_interruptible(&port->port.open_wait); | 1364 | wake_up_interruptible(&port->port.open_wait); |
1365 | spin_unlock_irqrestore(&port->port.lock, flags); | ||
1470 | } | 1366 | } |
1471 | 1367 | ||
1472 | static void rc_set_termios(struct tty_struct *tty, | 1368 | static void rc_set_termios(struct tty_struct *tty, |
1473 | struct ktermios *old_termios) | 1369 | struct ktermios *old_termios) |
1474 | { | 1370 | { |
1475 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1371 | struct riscom_port *port = tty->driver_data; |
1476 | unsigned long flags; | 1372 | unsigned long flags; |
1477 | 1373 | ||
1478 | if (rc_paranoia_check(port, tty->name, "rc_set_termios")) | 1374 | if (rc_paranoia_check(port, tty->name, "rc_set_termios")) |
@@ -1510,6 +1406,11 @@ static const struct tty_operations riscom_ops = { | |||
1510 | .break_ctl = rc_send_break, | 1406 | .break_ctl = rc_send_break, |
1511 | }; | 1407 | }; |
1512 | 1408 | ||
1409 | static const struct tty_port_operations riscom_port_ops = { | ||
1410 | .carrier_raised = carrier_raised, | ||
1411 | }; | ||
1412 | |||
1413 | |||
1513 | static int __init rc_init_drivers(void) | 1414 | static int __init rc_init_drivers(void) |
1514 | { | 1415 | { |
1515 | int error; | 1416 | int error; |
@@ -1541,6 +1442,7 @@ static int __init rc_init_drivers(void) | |||
1541 | memset(rc_port, 0, sizeof(rc_port)); | 1442 | memset(rc_port, 0, sizeof(rc_port)); |
1542 | for (i = 0; i < RC_NPORT * RC_NBOARD; i++) { | 1443 | for (i = 0; i < RC_NPORT * RC_NBOARD; i++) { |
1543 | tty_port_init(&rc_port[i].port); | 1444 | tty_port_init(&rc_port[i].port); |
1445 | rc_port[i].port.ops = &riscom_port_ops; | ||
1544 | rc_port[i].magic = RISCOM8_MAGIC; | 1446 | rc_port[i].magic = RISCOM8_MAGIC; |
1545 | } | 1447 | } |
1546 | return 0; | 1448 | return 0; |
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c index 584d791e84a6..f59fc5cea067 100644 --- a/drivers/char/rocket.c +++ b/drivers/char/rocket.c | |||
@@ -135,6 +135,7 @@ static int rcktpt_type[NUM_BOARDS]; | |||
135 | static int is_PCI[NUM_BOARDS]; | 135 | static int is_PCI[NUM_BOARDS]; |
136 | static rocketModel_t rocketModel[NUM_BOARDS]; | 136 | static rocketModel_t rocketModel[NUM_BOARDS]; |
137 | static int max_board; | 137 | static int max_board; |
138 | static const struct tty_port_operations rocket_port_ops; | ||
138 | 139 | ||
139 | /* | 140 | /* |
140 | * The following arrays define the interrupt bits corresponding to each AIOP. | 141 | * The following arrays define the interrupt bits corresponding to each AIOP. |
@@ -435,15 +436,15 @@ static void rp_do_transmit(struct r_port *info) | |||
435 | #endif | 436 | #endif |
436 | if (!info) | 437 | if (!info) |
437 | return; | 438 | return; |
438 | if (!info->port.tty) { | 439 | tty = tty_port_tty_get(&info->port); |
439 | printk(KERN_WARNING "rp: WARNING %s called with " | 440 | |
440 | "info->port.tty==NULL\n", __func__); | 441 | if (tty == NULL) { |
442 | printk(KERN_WARNING "rp: WARNING %s called with tty==NULL\n", __func__); | ||
441 | clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); | 443 | clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); |
442 | return; | 444 | return; |
443 | } | 445 | } |
444 | 446 | ||
445 | spin_lock_irqsave(&info->slock, flags); | 447 | spin_lock_irqsave(&info->slock, flags); |
446 | tty = info->port.tty; | ||
447 | info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp); | 448 | info->xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp); |
448 | 449 | ||
449 | /* Loop sending data to FIFO until done or FIFO full */ | 450 | /* Loop sending data to FIFO until done or FIFO full */ |
@@ -477,6 +478,7 @@ static void rp_do_transmit(struct r_port *info) | |||
477 | } | 478 | } |
478 | 479 | ||
479 | spin_unlock_irqrestore(&info->slock, flags); | 480 | spin_unlock_irqrestore(&info->slock, flags); |
481 | tty_kref_put(tty); | ||
480 | 482 | ||
481 | #ifdef ROCKET_DEBUG_INTR | 483 | #ifdef ROCKET_DEBUG_INTR |
482 | printk(KERN_DEBUG "(%d,%d,%d,%d)...\n", info->xmit_cnt, info->xmit_head, | 484 | printk(KERN_DEBUG "(%d,%d,%d,%d)...\n", info->xmit_cnt, info->xmit_head, |
@@ -498,18 +500,18 @@ static void rp_handle_port(struct r_port *info) | |||
498 | if (!info) | 500 | if (!info) |
499 | return; | 501 | return; |
500 | 502 | ||
501 | if ((info->flags & ROCKET_INITIALIZED) == 0) { | 503 | if ((info->port.flags & ASYNC_INITIALIZED) == 0) { |
502 | printk(KERN_WARNING "rp: WARNING: rp_handle_port called with " | 504 | printk(KERN_WARNING "rp: WARNING: rp_handle_port called with " |
503 | "info->flags & NOT_INIT\n"); | 505 | "info->flags & NOT_INIT\n"); |
504 | return; | 506 | return; |
505 | } | 507 | } |
506 | if (!info->port.tty) { | 508 | tty = tty_port_tty_get(&info->port); |
509 | if (!tty) { | ||
507 | printk(KERN_WARNING "rp: WARNING: rp_handle_port called with " | 510 | printk(KERN_WARNING "rp: WARNING: rp_handle_port called with " |
508 | "info->port.tty==NULL\n"); | 511 | "tty==NULL\n"); |
509 | return; | 512 | return; |
510 | } | 513 | } |
511 | cp = &info->channel; | 514 | cp = &info->channel; |
512 | tty = info->port.tty; | ||
513 | 515 | ||
514 | IntMask = sGetChanIntID(cp) & info->intmask; | 516 | IntMask = sGetChanIntID(cp) & info->intmask; |
515 | #ifdef ROCKET_DEBUG_INTR | 517 | #ifdef ROCKET_DEBUG_INTR |
@@ -541,6 +543,7 @@ static void rp_handle_port(struct r_port *info) | |||
541 | printk(KERN_INFO "DSR change...\n"); | 543 | printk(KERN_INFO "DSR change...\n"); |
542 | } | 544 | } |
543 | #endif | 545 | #endif |
546 | tty_kref_put(tty); | ||
544 | } | 547 | } |
545 | 548 | ||
546 | /* | 549 | /* |
@@ -649,9 +652,8 @@ static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev) | |||
649 | info->board = board; | 652 | info->board = board; |
650 | info->aiop = aiop; | 653 | info->aiop = aiop; |
651 | info->chan = chan; | 654 | info->chan = chan; |
652 | info->port.closing_wait = 3000; | 655 | tty_port_init(&info->port); |
653 | info->port.close_delay = 50; | 656 | info->port.ops = &rocket_port_ops; |
654 | init_waitqueue_head(&info->port.open_wait); | ||
655 | init_completion(&info->close_wait); | 657 | init_completion(&info->close_wait); |
656 | info->flags &= ~ROCKET_MODE_MASK; | 658 | info->flags &= ~ROCKET_MODE_MASK; |
657 | switch (pc104[board][line]) { | 659 | switch (pc104[board][line]) { |
@@ -710,7 +712,7 @@ static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev) | |||
710 | * Configures a rocketport port according to its termio settings. Called from | 712 | * Configures a rocketport port according to its termio settings. Called from |
711 | * user mode into the driver (exception handler). *info CD manipulation is spinlock protected. | 713 | * user mode into the driver (exception handler). *info CD manipulation is spinlock protected. |
712 | */ | 714 | */ |
713 | static void configure_r_port(struct r_port *info, | 715 | static void configure_r_port(struct tty_struct *tty, struct r_port *info, |
714 | struct ktermios *old_termios) | 716 | struct ktermios *old_termios) |
715 | { | 717 | { |
716 | unsigned cflag; | 718 | unsigned cflag; |
@@ -718,7 +720,7 @@ static void configure_r_port(struct r_port *info, | |||
718 | unsigned rocketMode; | 720 | unsigned rocketMode; |
719 | int bits, baud, divisor; | 721 | int bits, baud, divisor; |
720 | CHANNEL_t *cp; | 722 | CHANNEL_t *cp; |
721 | struct ktermios *t = info->port.tty->termios; | 723 | struct ktermios *t = tty->termios; |
722 | 724 | ||
723 | cp = &info->channel; | 725 | cp = &info->channel; |
724 | cflag = t->c_cflag; | 726 | cflag = t->c_cflag; |
@@ -751,7 +753,7 @@ static void configure_r_port(struct r_port *info, | |||
751 | } | 753 | } |
752 | 754 | ||
753 | /* baud rate */ | 755 | /* baud rate */ |
754 | baud = tty_get_baud_rate(info->port.tty); | 756 | baud = tty_get_baud_rate(tty); |
755 | if (!baud) | 757 | if (!baud) |
756 | baud = 9600; | 758 | baud = 9600; |
757 | divisor = ((rp_baud_base[info->board] + (baud >> 1)) / baud) - 1; | 759 | divisor = ((rp_baud_base[info->board] + (baud >> 1)) / baud) - 1; |
@@ -769,7 +771,7 @@ static void configure_r_port(struct r_port *info, | |||
769 | sSetBaud(cp, divisor); | 771 | sSetBaud(cp, divisor); |
770 | 772 | ||
771 | /* FIXME: Should really back compute a baud rate from the divisor */ | 773 | /* FIXME: Should really back compute a baud rate from the divisor */ |
772 | tty_encode_baud_rate(info->port.tty, baud, baud); | 774 | tty_encode_baud_rate(tty, baud, baud); |
773 | 775 | ||
774 | if (cflag & CRTSCTS) { | 776 | if (cflag & CRTSCTS) { |
775 | info->intmask |= DELTA_CTS; | 777 | info->intmask |= DELTA_CTS; |
@@ -794,15 +796,15 @@ static void configure_r_port(struct r_port *info, | |||
794 | * Handle software flow control in the board | 796 | * Handle software flow control in the board |
795 | */ | 797 | */ |
796 | #ifdef ROCKET_SOFT_FLOW | 798 | #ifdef ROCKET_SOFT_FLOW |
797 | if (I_IXON(info->port.tty)) { | 799 | if (I_IXON(tty)) { |
798 | sEnTxSoftFlowCtl(cp); | 800 | sEnTxSoftFlowCtl(cp); |
799 | if (I_IXANY(info->port.tty)) { | 801 | if (I_IXANY(tty)) { |
800 | sEnIXANY(cp); | 802 | sEnIXANY(cp); |
801 | } else { | 803 | } else { |
802 | sDisIXANY(cp); | 804 | sDisIXANY(cp); |
803 | } | 805 | } |
804 | sSetTxXONChar(cp, START_CHAR(info->port.tty)); | 806 | sSetTxXONChar(cp, START_CHAR(tty)); |
805 | sSetTxXOFFChar(cp, STOP_CHAR(info->port.tty)); | 807 | sSetTxXOFFChar(cp, STOP_CHAR(tty)); |
806 | } else { | 808 | } else { |
807 | sDisTxSoftFlowCtl(cp); | 809 | sDisTxSoftFlowCtl(cp); |
808 | sDisIXANY(cp); | 810 | sDisIXANY(cp); |
@@ -814,24 +816,24 @@ static void configure_r_port(struct r_port *info, | |||
814 | * Set up ignore/read mask words | 816 | * Set up ignore/read mask words |
815 | */ | 817 | */ |
816 | info->read_status_mask = STMRCVROVRH | 0xFF; | 818 | info->read_status_mask = STMRCVROVRH | 0xFF; |
817 | if (I_INPCK(info->port.tty)) | 819 | if (I_INPCK(tty)) |
818 | info->read_status_mask |= STMFRAMEH | STMPARITYH; | 820 | info->read_status_mask |= STMFRAMEH | STMPARITYH; |
819 | if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty)) | 821 | if (I_BRKINT(tty) || I_PARMRK(tty)) |
820 | info->read_status_mask |= STMBREAKH; | 822 | info->read_status_mask |= STMBREAKH; |
821 | 823 | ||
822 | /* | 824 | /* |
823 | * Characters to ignore | 825 | * Characters to ignore |
824 | */ | 826 | */ |
825 | info->ignore_status_mask = 0; | 827 | info->ignore_status_mask = 0; |
826 | if (I_IGNPAR(info->port.tty)) | 828 | if (I_IGNPAR(tty)) |
827 | info->ignore_status_mask |= STMFRAMEH | STMPARITYH; | 829 | info->ignore_status_mask |= STMFRAMEH | STMPARITYH; |
828 | if (I_IGNBRK(info->port.tty)) { | 830 | if (I_IGNBRK(tty)) { |
829 | info->ignore_status_mask |= STMBREAKH; | 831 | info->ignore_status_mask |= STMBREAKH; |
830 | /* | 832 | /* |
831 | * If we're ignoring parity and break indicators, | 833 | * If we're ignoring parity and break indicators, |
832 | * ignore overruns too. (For real raw support). | 834 | * ignore overruns too. (For real raw support). |
833 | */ | 835 | */ |
834 | if (I_IGNPAR(info->port.tty)) | 836 | if (I_IGNPAR(tty)) |
835 | info->ignore_status_mask |= STMRCVROVRH; | 837 | info->ignore_status_mask |= STMRCVROVRH; |
836 | } | 838 | } |
837 | 839 | ||
@@ -864,106 +866,17 @@ static void configure_r_port(struct r_port *info, | |||
864 | } | 866 | } |
865 | } | 867 | } |
866 | 868 | ||
867 | /* info->port.count is considered critical, protected by spinlocks. */ | 869 | static int carrier_raised(struct tty_port *port) |
868 | static int block_til_ready(struct tty_struct *tty, struct file *filp, | ||
869 | struct r_port *info) | ||
870 | { | 870 | { |
871 | DECLARE_WAITQUEUE(wait, current); | 871 | struct r_port *info = container_of(port, struct r_port, port); |
872 | int retval; | 872 | return (sGetChanStatusLo(&info->channel) & CD_ACT) ? 1 : 0; |
873 | int do_clocal = 0, extra_count = 0; | 873 | } |
874 | unsigned long flags; | ||
875 | |||
876 | /* | ||
877 | * If the device is in the middle of being closed, then block | ||
878 | * until it's done, and then try again. | ||
879 | */ | ||
880 | if (tty_hung_up_p(filp)) | ||
881 | return ((info->flags & ROCKET_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); | ||
882 | if (info->flags & ROCKET_CLOSING) { | ||
883 | if (wait_for_completion_interruptible(&info->close_wait)) | ||
884 | return -ERESTARTSYS; | ||
885 | return ((info->flags & ROCKET_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); | ||
886 | } | ||
887 | |||
888 | /* | ||
889 | * If non-blocking mode is set, or the port is not enabled, | ||
890 | * then make the check up front and then exit. | ||
891 | */ | ||
892 | if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) { | ||
893 | info->flags |= ROCKET_NORMAL_ACTIVE; | ||
894 | return 0; | ||
895 | } | ||
896 | if (tty->termios->c_cflag & CLOCAL) | ||
897 | do_clocal = 1; | ||
898 | |||
899 | /* | ||
900 | * Block waiting for the carrier detect and the line to become free. While we are in | ||
901 | * this loop, info->port.count is dropped by one, so that rp_close() knows when to free things. | ||
902 | * We restore it upon exit, either normal or abnormal. | ||
903 | */ | ||
904 | retval = 0; | ||
905 | add_wait_queue(&info->port.open_wait, &wait); | ||
906 | #ifdef ROCKET_DEBUG_OPEN | ||
907 | printk(KERN_INFO "block_til_ready before block: ttyR%d, count = %d\n", info->line, info->port.count); | ||
908 | #endif | ||
909 | spin_lock_irqsave(&info->slock, flags); | ||
910 | |||
911 | #ifdef ROCKET_DISABLE_SIMUSAGE | ||
912 | info->flags |= ROCKET_NORMAL_ACTIVE; | ||
913 | #else | ||
914 | if (!tty_hung_up_p(filp)) { | ||
915 | extra_count = 1; | ||
916 | info->port.count--; | ||
917 | } | ||
918 | #endif | ||
919 | info->port.blocked_open++; | ||
920 | |||
921 | spin_unlock_irqrestore(&info->slock, flags); | ||
922 | |||
923 | while (1) { | ||
924 | if (tty->termios->c_cflag & CBAUD) { | ||
925 | sSetDTR(&info->channel); | ||
926 | sSetRTS(&info->channel); | ||
927 | } | ||
928 | set_current_state(TASK_INTERRUPTIBLE); | ||
929 | if (tty_hung_up_p(filp) || !(info->flags & ROCKET_INITIALIZED)) { | ||
930 | if (info->flags & ROCKET_HUP_NOTIFY) | ||
931 | retval = -EAGAIN; | ||
932 | else | ||
933 | retval = -ERESTARTSYS; | ||
934 | break; | ||
935 | } | ||
936 | if (!(info->flags & ROCKET_CLOSING) && (do_clocal || (sGetChanStatusLo(&info->channel) & CD_ACT))) | ||
937 | break; | ||
938 | if (signal_pending(current)) { | ||
939 | retval = -ERESTARTSYS; | ||
940 | break; | ||
941 | } | ||
942 | #ifdef ROCKET_DEBUG_OPEN | ||
943 | printk(KERN_INFO "block_til_ready blocking: ttyR%d, count = %d, flags=0x%0x\n", | ||
944 | info->line, info->port.count, info->flags); | ||
945 | #endif | ||
946 | schedule(); /* Don't hold spinlock here, will hang PC */ | ||
947 | } | ||
948 | __set_current_state(TASK_RUNNING); | ||
949 | remove_wait_queue(&info->port.open_wait, &wait); | ||
950 | |||
951 | spin_lock_irqsave(&info->slock, flags); | ||
952 | |||
953 | if (extra_count) | ||
954 | info->port.count++; | ||
955 | info->port.blocked_open--; | ||
956 | |||
957 | spin_unlock_irqrestore(&info->slock, flags); | ||
958 | 874 | ||
959 | #ifdef ROCKET_DEBUG_OPEN | 875 | static void raise_dtr_rts(struct tty_port *port) |
960 | printk(KERN_INFO "block_til_ready after blocking: ttyR%d, count = %d\n", | 876 | { |
961 | info->line, info->port.count); | 877 | struct r_port *info = container_of(port, struct r_port, port); |
962 | #endif | 878 | sSetDTR(&info->channel); |
963 | if (retval) | 879 | sSetRTS(&info->channel); |
964 | return retval; | ||
965 | info->flags |= ROCKET_NORMAL_ACTIVE; | ||
966 | return 0; | ||
967 | } | 880 | } |
968 | 881 | ||
969 | /* | 882 | /* |
@@ -973,24 +886,26 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
973 | static int rp_open(struct tty_struct *tty, struct file *filp) | 886 | static int rp_open(struct tty_struct *tty, struct file *filp) |
974 | { | 887 | { |
975 | struct r_port *info; | 888 | struct r_port *info; |
889 | struct tty_port *port; | ||
976 | int line = 0, retval; | 890 | int line = 0, retval; |
977 | CHANNEL_t *cp; | 891 | CHANNEL_t *cp; |
978 | unsigned long page; | 892 | unsigned long page; |
979 | 893 | ||
980 | line = tty->index; | 894 | line = tty->index; |
981 | if ((line < 0) || (line >= MAX_RP_PORTS) || ((info = rp_table[line]) == NULL)) | 895 | if (line < 0 || line >= MAX_RP_PORTS || ((info = rp_table[line]) == NULL)) |
982 | return -ENXIO; | 896 | return -ENXIO; |
983 | 897 | port = &info->port; | |
898 | |||
984 | page = __get_free_page(GFP_KERNEL); | 899 | page = __get_free_page(GFP_KERNEL); |
985 | if (!page) | 900 | if (!page) |
986 | return -ENOMEM; | 901 | return -ENOMEM; |
987 | 902 | ||
988 | if (info->flags & ROCKET_CLOSING) { | 903 | if (port->flags & ASYNC_CLOSING) { |
989 | retval = wait_for_completion_interruptible(&info->close_wait); | 904 | retval = wait_for_completion_interruptible(&info->close_wait); |
990 | free_page(page); | 905 | free_page(page); |
991 | if (retval) | 906 | if (retval) |
992 | return retval; | 907 | return retval; |
993 | return ((info->flags & ROCKET_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); | 908 | return ((port->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); |
994 | } | 909 | } |
995 | 910 | ||
996 | /* | 911 | /* |
@@ -1002,9 +917,9 @@ static int rp_open(struct tty_struct *tty, struct file *filp) | |||
1002 | info->xmit_buf = (unsigned char *) page; | 917 | info->xmit_buf = (unsigned char *) page; |
1003 | 918 | ||
1004 | tty->driver_data = info; | 919 | tty->driver_data = info; |
1005 | info->port.tty = tty; | 920 | tty_port_tty_set(port, tty); |
1006 | 921 | ||
1007 | if (info->port.count++ == 0) { | 922 | if (port->count++ == 0) { |
1008 | atomic_inc(&rp_num_ports_open); | 923 | atomic_inc(&rp_num_ports_open); |
1009 | 924 | ||
1010 | #ifdef ROCKET_DEBUG_OPEN | 925 | #ifdef ROCKET_DEBUG_OPEN |
@@ -1019,7 +934,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp) | |||
1019 | /* | 934 | /* |
1020 | * Info->count is now 1; so it's safe to sleep now. | 935 | * Info->count is now 1; so it's safe to sleep now. |
1021 | */ | 936 | */ |
1022 | if ((info->flags & ROCKET_INITIALIZED) == 0) { | 937 | if (!test_bit(ASYNC_INITIALIZED, &port->flags)) { |
1023 | cp = &info->channel; | 938 | cp = &info->channel; |
1024 | sSetRxTrigger(cp, TRIG_1); | 939 | sSetRxTrigger(cp, TRIG_1); |
1025 | if (sGetChanStatus(cp) & CD_ACT) | 940 | if (sGetChanStatus(cp) & CD_ACT) |
@@ -1043,21 +958,21 @@ static int rp_open(struct tty_struct *tty, struct file *filp) | |||
1043 | sEnRxFIFO(cp); | 958 | sEnRxFIFO(cp); |
1044 | sEnTransmit(cp); | 959 | sEnTransmit(cp); |
1045 | 960 | ||
1046 | info->flags |= ROCKET_INITIALIZED; | 961 | set_bit(ASYNC_INITIALIZED, &info->port.flags); |
1047 | 962 | ||
1048 | /* | 963 | /* |
1049 | * Set up the tty->alt_speed kludge | 964 | * Set up the tty->alt_speed kludge |
1050 | */ | 965 | */ |
1051 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI) | 966 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI) |
1052 | info->port.tty->alt_speed = 57600; | 967 | tty->alt_speed = 57600; |
1053 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI) | 968 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI) |
1054 | info->port.tty->alt_speed = 115200; | 969 | tty->alt_speed = 115200; |
1055 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI) | 970 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI) |
1056 | info->port.tty->alt_speed = 230400; | 971 | tty->alt_speed = 230400; |
1057 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP) | 972 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP) |
1058 | info->port.tty->alt_speed = 460800; | 973 | tty->alt_speed = 460800; |
1059 | 974 | ||
1060 | configure_r_port(info, NULL); | 975 | configure_r_port(tty, info, NULL); |
1061 | if (tty->termios->c_cflag & CBAUD) { | 976 | if (tty->termios->c_cflag & CBAUD) { |
1062 | sSetDTR(cp); | 977 | sSetDTR(cp); |
1063 | sSetRTS(cp); | 978 | sSetRTS(cp); |
@@ -1066,7 +981,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp) | |||
1066 | /* Starts (or resets) the maint polling loop */ | 981 | /* Starts (or resets) the maint polling loop */ |
1067 | mod_timer(&rocket_timer, jiffies + POLL_PERIOD); | 982 | mod_timer(&rocket_timer, jiffies + POLL_PERIOD); |
1068 | 983 | ||
1069 | retval = block_til_ready(tty, filp, info); | 984 | retval = tty_port_block_til_ready(port, tty, filp); |
1070 | if (retval) { | 985 | if (retval) { |
1071 | #ifdef ROCKET_DEBUG_OPEN | 986 | #ifdef ROCKET_DEBUG_OPEN |
1072 | printk(KERN_INFO "rp_open returning after block_til_ready with %d\n", retval); | 987 | printk(KERN_INFO "rp_open returning after block_til_ready with %d\n", retval); |
@@ -1081,8 +996,8 @@ static int rp_open(struct tty_struct *tty, struct file *filp) | |||
1081 | */ | 996 | */ |
1082 | static void rp_close(struct tty_struct *tty, struct file *filp) | 997 | static void rp_close(struct tty_struct *tty, struct file *filp) |
1083 | { | 998 | { |
1084 | struct r_port *info = (struct r_port *) tty->driver_data; | 999 | struct r_port *info = tty->driver_data; |
1085 | unsigned long flags; | 1000 | struct tty_port *port = &info->port; |
1086 | int timeout; | 1001 | int timeout; |
1087 | CHANNEL_t *cp; | 1002 | CHANNEL_t *cp; |
1088 | 1003 | ||
@@ -1093,53 +1008,10 @@ static void rp_close(struct tty_struct *tty, struct file *filp) | |||
1093 | printk(KERN_INFO "rp_close ttyR%d, count = %d\n", info->line, info->port.count); | 1008 | printk(KERN_INFO "rp_close ttyR%d, count = %d\n", info->line, info->port.count); |
1094 | #endif | 1009 | #endif |
1095 | 1010 | ||
1096 | if (tty_hung_up_p(filp)) | 1011 | if (tty_port_close_start(port, tty, filp) == 0) |
1097 | return; | ||
1098 | spin_lock_irqsave(&info->slock, flags); | ||
1099 | |||
1100 | if ((tty->count == 1) && (info->port.count != 1)) { | ||
1101 | /* | ||
1102 | * Uh, oh. tty->count is 1, which means that the tty | ||
1103 | * structure will be freed. Info->count should always | ||
1104 | * be one in these conditions. If it's greater than | ||
1105 | * one, we've got real problems, since it means the | ||
1106 | * serial port won't be shutdown. | ||
1107 | */ | ||
1108 | printk(KERN_WARNING "rp_close: bad serial port count; " | ||
1109 | "tty->count is 1, info->port.count is %d\n", info->port.count); | ||
1110 | info->port.count = 1; | ||
1111 | } | ||
1112 | if (--info->port.count < 0) { | ||
1113 | printk(KERN_WARNING "rp_close: bad serial port count for " | ||
1114 | "ttyR%d: %d\n", info->line, info->port.count); | ||
1115 | info->port.count = 0; | ||
1116 | } | ||
1117 | if (info->port.count) { | ||
1118 | spin_unlock_irqrestore(&info->slock, flags); | ||
1119 | return; | 1012 | return; |
1120 | } | ||
1121 | info->flags |= ROCKET_CLOSING; | ||
1122 | spin_unlock_irqrestore(&info->slock, flags); | ||
1123 | 1013 | ||
1124 | cp = &info->channel; | 1014 | cp = &info->channel; |
1125 | |||
1126 | /* | ||
1127 | * Notify the line discpline to only process XON/XOFF characters | ||
1128 | */ | ||
1129 | tty->closing = 1; | ||
1130 | |||
1131 | /* | ||
1132 | * If transmission was throttled by the application request, | ||
1133 | * just flush the xmit buffer. | ||
1134 | */ | ||
1135 | if (tty->flow_stopped) | ||
1136 | rp_flush_buffer(tty); | ||
1137 | |||
1138 | /* | ||
1139 | * Wait for the transmit buffer to clear | ||
1140 | */ | ||
1141 | if (info->port.closing_wait != ROCKET_CLOSING_WAIT_NONE) | ||
1142 | tty_wait_until_sent(tty, info->port.closing_wait); | ||
1143 | /* | 1015 | /* |
1144 | * Before we drop DTR, make sure the UART transmitter | 1016 | * Before we drop DTR, make sure the UART transmitter |
1145 | * has completely drained; this is especially | 1017 | * has completely drained; this is especially |
@@ -1168,19 +1040,24 @@ static void rp_close(struct tty_struct *tty, struct file *filp) | |||
1168 | 1040 | ||
1169 | clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); | 1041 | clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); |
1170 | 1042 | ||
1171 | if (info->port.blocked_open) { | 1043 | /* We can't yet use tty_port_close_end as the buffer handling in this |
1172 | if (info->port.close_delay) { | 1044 | driver is a bit different to the usual */ |
1173 | msleep_interruptible(jiffies_to_msecs(info->port.close_delay)); | 1045 | |
1046 | if (port->blocked_open) { | ||
1047 | if (port->close_delay) { | ||
1048 | msleep_interruptible(jiffies_to_msecs(port->close_delay)); | ||
1174 | } | 1049 | } |
1175 | wake_up_interruptible(&info->port.open_wait); | 1050 | wake_up_interruptible(&port->open_wait); |
1176 | } else { | 1051 | } else { |
1177 | if (info->xmit_buf) { | 1052 | if (info->xmit_buf) { |
1178 | free_page((unsigned long) info->xmit_buf); | 1053 | free_page((unsigned long) info->xmit_buf); |
1179 | info->xmit_buf = NULL; | 1054 | info->xmit_buf = NULL; |
1180 | } | 1055 | } |
1181 | } | 1056 | } |
1182 | info->flags &= ~(ROCKET_INITIALIZED | ROCKET_CLOSING | ROCKET_NORMAL_ACTIVE); | 1057 | info->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING | ASYNC_NORMAL_ACTIVE); |
1183 | tty->closing = 0; | 1058 | tty->closing = 0; |
1059 | tty_port_tty_set(port, NULL); | ||
1060 | wake_up_interruptible(&port->close_wait); | ||
1184 | complete_all(&info->close_wait); | 1061 | complete_all(&info->close_wait); |
1185 | atomic_dec(&rp_num_ports_open); | 1062 | atomic_dec(&rp_num_ports_open); |
1186 | 1063 | ||
@@ -1195,7 +1072,7 @@ static void rp_close(struct tty_struct *tty, struct file *filp) | |||
1195 | static void rp_set_termios(struct tty_struct *tty, | 1072 | static void rp_set_termios(struct tty_struct *tty, |
1196 | struct ktermios *old_termios) | 1073 | struct ktermios *old_termios) |
1197 | { | 1074 | { |
1198 | struct r_port *info = (struct r_port *) tty->driver_data; | 1075 | struct r_port *info = tty->driver_data; |
1199 | CHANNEL_t *cp; | 1076 | CHANNEL_t *cp; |
1200 | unsigned cflag; | 1077 | unsigned cflag; |
1201 | 1078 | ||
@@ -1213,7 +1090,7 @@ static void rp_set_termios(struct tty_struct *tty, | |||
1213 | /* Or CMSPAR */ | 1090 | /* Or CMSPAR */ |
1214 | tty->termios->c_cflag &= ~CMSPAR; | 1091 | tty->termios->c_cflag &= ~CMSPAR; |
1215 | 1092 | ||
1216 | configure_r_port(info, old_termios); | 1093 | configure_r_port(tty, info, old_termios); |
1217 | 1094 | ||
1218 | cp = &info->channel; | 1095 | cp = &info->channel; |
1219 | 1096 | ||
@@ -1238,7 +1115,7 @@ static void rp_set_termios(struct tty_struct *tty, | |||
1238 | 1115 | ||
1239 | static int rp_break(struct tty_struct *tty, int break_state) | 1116 | static int rp_break(struct tty_struct *tty, int break_state) |
1240 | { | 1117 | { |
1241 | struct r_port *info = (struct r_port *) tty->driver_data; | 1118 | struct r_port *info = tty->driver_data; |
1242 | unsigned long flags; | 1119 | unsigned long flags; |
1243 | 1120 | ||
1244 | if (rocket_paranoia_check(info, "rp_break")) | 1121 | if (rocket_paranoia_check(info, "rp_break")) |
@@ -1284,7 +1161,7 @@ static int sGetChanRI(CHANNEL_T * ChP) | |||
1284 | */ | 1161 | */ |
1285 | static int rp_tiocmget(struct tty_struct *tty, struct file *file) | 1162 | static int rp_tiocmget(struct tty_struct *tty, struct file *file) |
1286 | { | 1163 | { |
1287 | struct r_port *info = (struct r_port *)tty->driver_data; | 1164 | struct r_port *info = tty->driver_data; |
1288 | unsigned int control, result, ChanStatus; | 1165 | unsigned int control, result, ChanStatus; |
1289 | 1166 | ||
1290 | ChanStatus = sGetChanStatusLo(&info->channel); | 1167 | ChanStatus = sGetChanStatusLo(&info->channel); |
@@ -1305,7 +1182,7 @@ static int rp_tiocmget(struct tty_struct *tty, struct file *file) | |||
1305 | static int rp_tiocmset(struct tty_struct *tty, struct file *file, | 1182 | static int rp_tiocmset(struct tty_struct *tty, struct file *file, |
1306 | unsigned int set, unsigned int clear) | 1183 | unsigned int set, unsigned int clear) |
1307 | { | 1184 | { |
1308 | struct r_port *info = (struct r_port *)tty->driver_data; | 1185 | struct r_port *info = tty->driver_data; |
1309 | 1186 | ||
1310 | if (set & TIOCM_RTS) | 1187 | if (set & TIOCM_RTS) |
1311 | info->channel.TxControl[3] |= SET_RTS; | 1188 | info->channel.TxControl[3] |= SET_RTS; |
@@ -1338,7 +1215,8 @@ static int get_config(struct r_port *info, struct rocket_config __user *retinfo) | |||
1338 | return 0; | 1215 | return 0; |
1339 | } | 1216 | } |
1340 | 1217 | ||
1341 | static int set_config(struct r_port *info, struct rocket_config __user *new_info) | 1218 | static int set_config(struct tty_struct *tty, struct r_port *info, |
1219 | struct rocket_config __user *new_info) | ||
1342 | { | 1220 | { |
1343 | struct rocket_config new_serial; | 1221 | struct rocket_config new_serial; |
1344 | 1222 | ||
@@ -1350,7 +1228,7 @@ static int set_config(struct r_port *info, struct rocket_config __user *new_info | |||
1350 | if ((new_serial.flags & ~ROCKET_USR_MASK) != (info->flags & ~ROCKET_USR_MASK)) | 1228 | if ((new_serial.flags & ~ROCKET_USR_MASK) != (info->flags & ~ROCKET_USR_MASK)) |
1351 | return -EPERM; | 1229 | return -EPERM; |
1352 | info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK)); | 1230 | info->flags = ((info->flags & ~ROCKET_USR_MASK) | (new_serial.flags & ROCKET_USR_MASK)); |
1353 | configure_r_port(info, NULL); | 1231 | configure_r_port(tty, info, NULL); |
1354 | return 0; | 1232 | return 0; |
1355 | } | 1233 | } |
1356 | 1234 | ||
@@ -1359,15 +1237,15 @@ static int set_config(struct r_port *info, struct rocket_config __user *new_info | |||
1359 | info->port.closing_wait = new_serial.closing_wait; | 1237 | info->port.closing_wait = new_serial.closing_wait; |
1360 | 1238 | ||
1361 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI) | 1239 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_HI) |
1362 | info->port.tty->alt_speed = 57600; | 1240 | tty->alt_speed = 57600; |
1363 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI) | 1241 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_VHI) |
1364 | info->port.tty->alt_speed = 115200; | 1242 | tty->alt_speed = 115200; |
1365 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI) | 1243 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_SHI) |
1366 | info->port.tty->alt_speed = 230400; | 1244 | tty->alt_speed = 230400; |
1367 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP) | 1245 | if ((info->flags & ROCKET_SPD_MASK) == ROCKET_SPD_WARP) |
1368 | info->port.tty->alt_speed = 460800; | 1246 | tty->alt_speed = 460800; |
1369 | 1247 | ||
1370 | configure_r_port(info, NULL); | 1248 | configure_r_port(tty, info, NULL); |
1371 | return 0; | 1249 | return 0; |
1372 | } | 1250 | } |
1373 | 1251 | ||
@@ -1434,7 +1312,7 @@ static int get_version(struct r_port *info, struct rocket_version __user *retver | |||
1434 | static int rp_ioctl(struct tty_struct *tty, struct file *file, | 1312 | static int rp_ioctl(struct tty_struct *tty, struct file *file, |
1435 | unsigned int cmd, unsigned long arg) | 1313 | unsigned int cmd, unsigned long arg) |
1436 | { | 1314 | { |
1437 | struct r_port *info = (struct r_port *) tty->driver_data; | 1315 | struct r_port *info = tty->driver_data; |
1438 | void __user *argp = (void __user *)arg; | 1316 | void __user *argp = (void __user *)arg; |
1439 | int ret = 0; | 1317 | int ret = 0; |
1440 | 1318 | ||
@@ -1452,7 +1330,7 @@ static int rp_ioctl(struct tty_struct *tty, struct file *file, | |||
1452 | ret = get_config(info, argp); | 1330 | ret = get_config(info, argp); |
1453 | break; | 1331 | break; |
1454 | case RCKP_SET_CONFIG: | 1332 | case RCKP_SET_CONFIG: |
1455 | ret = set_config(info, argp); | 1333 | ret = set_config(tty, info, argp); |
1456 | break; | 1334 | break; |
1457 | case RCKP_GET_PORTS: | 1335 | case RCKP_GET_PORTS: |
1458 | ret = get_ports(info, argp); | 1336 | ret = get_ports(info, argp); |
@@ -1472,7 +1350,7 @@ static int rp_ioctl(struct tty_struct *tty, struct file *file, | |||
1472 | 1350 | ||
1473 | static void rp_send_xchar(struct tty_struct *tty, char ch) | 1351 | static void rp_send_xchar(struct tty_struct *tty, char ch) |
1474 | { | 1352 | { |
1475 | struct r_port *info = (struct r_port *) tty->driver_data; | 1353 | struct r_port *info = tty->driver_data; |
1476 | CHANNEL_t *cp; | 1354 | CHANNEL_t *cp; |
1477 | 1355 | ||
1478 | if (rocket_paranoia_check(info, "rp_send_xchar")) | 1356 | if (rocket_paranoia_check(info, "rp_send_xchar")) |
@@ -1487,7 +1365,7 @@ static void rp_send_xchar(struct tty_struct *tty, char ch) | |||
1487 | 1365 | ||
1488 | static void rp_throttle(struct tty_struct *tty) | 1366 | static void rp_throttle(struct tty_struct *tty) |
1489 | { | 1367 | { |
1490 | struct r_port *info = (struct r_port *) tty->driver_data; | 1368 | struct r_port *info = tty->driver_data; |
1491 | CHANNEL_t *cp; | 1369 | CHANNEL_t *cp; |
1492 | 1370 | ||
1493 | #ifdef ROCKET_DEBUG_THROTTLE | 1371 | #ifdef ROCKET_DEBUG_THROTTLE |
@@ -1507,7 +1385,7 @@ static void rp_throttle(struct tty_struct *tty) | |||
1507 | 1385 | ||
1508 | static void rp_unthrottle(struct tty_struct *tty) | 1386 | static void rp_unthrottle(struct tty_struct *tty) |
1509 | { | 1387 | { |
1510 | struct r_port *info = (struct r_port *) tty->driver_data; | 1388 | struct r_port *info = tty->driver_data; |
1511 | CHANNEL_t *cp; | 1389 | CHANNEL_t *cp; |
1512 | #ifdef ROCKET_DEBUG_THROTTLE | 1390 | #ifdef ROCKET_DEBUG_THROTTLE |
1513 | printk(KERN_INFO "unthrottle %s: %d....\n", tty->name, | 1391 | printk(KERN_INFO "unthrottle %s: %d....\n", tty->name, |
@@ -1534,7 +1412,7 @@ static void rp_unthrottle(struct tty_struct *tty) | |||
1534 | */ | 1412 | */ |
1535 | static void rp_stop(struct tty_struct *tty) | 1413 | static void rp_stop(struct tty_struct *tty) |
1536 | { | 1414 | { |
1537 | struct r_port *info = (struct r_port *) tty->driver_data; | 1415 | struct r_port *info = tty->driver_data; |
1538 | 1416 | ||
1539 | #ifdef ROCKET_DEBUG_FLOW | 1417 | #ifdef ROCKET_DEBUG_FLOW |
1540 | printk(KERN_INFO "stop %s: %d %d....\n", tty->name, | 1418 | printk(KERN_INFO "stop %s: %d %d....\n", tty->name, |
@@ -1550,7 +1428,7 @@ static void rp_stop(struct tty_struct *tty) | |||
1550 | 1428 | ||
1551 | static void rp_start(struct tty_struct *tty) | 1429 | static void rp_start(struct tty_struct *tty) |
1552 | { | 1430 | { |
1553 | struct r_port *info = (struct r_port *) tty->driver_data; | 1431 | struct r_port *info = tty->driver_data; |
1554 | 1432 | ||
1555 | #ifdef ROCKET_DEBUG_FLOW | 1433 | #ifdef ROCKET_DEBUG_FLOW |
1556 | printk(KERN_INFO "start %s: %d %d....\n", tty->name, | 1434 | printk(KERN_INFO "start %s: %d %d....\n", tty->name, |
@@ -1570,7 +1448,7 @@ static void rp_start(struct tty_struct *tty) | |||
1570 | */ | 1448 | */ |
1571 | static void rp_wait_until_sent(struct tty_struct *tty, int timeout) | 1449 | static void rp_wait_until_sent(struct tty_struct *tty, int timeout) |
1572 | { | 1450 | { |
1573 | struct r_port *info = (struct r_port *) tty->driver_data; | 1451 | struct r_port *info = tty->driver_data; |
1574 | CHANNEL_t *cp; | 1452 | CHANNEL_t *cp; |
1575 | unsigned long orig_jiffies; | 1453 | unsigned long orig_jiffies; |
1576 | int check_time, exit_time; | 1454 | int check_time, exit_time; |
@@ -1627,7 +1505,7 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout) | |||
1627 | static void rp_hangup(struct tty_struct *tty) | 1505 | static void rp_hangup(struct tty_struct *tty) |
1628 | { | 1506 | { |
1629 | CHANNEL_t *cp; | 1507 | CHANNEL_t *cp; |
1630 | struct r_port *info = (struct r_port *) tty->driver_data; | 1508 | struct r_port *info = tty->driver_data; |
1631 | 1509 | ||
1632 | if (rocket_paranoia_check(info, "rp_hangup")) | 1510 | if (rocket_paranoia_check(info, "rp_hangup")) |
1633 | return; | 1511 | return; |
@@ -1636,15 +1514,13 @@ static void rp_hangup(struct tty_struct *tty) | |||
1636 | printk(KERN_INFO "rp_hangup of ttyR%d...\n", info->line); | 1514 | printk(KERN_INFO "rp_hangup of ttyR%d...\n", info->line); |
1637 | #endif | 1515 | #endif |
1638 | rp_flush_buffer(tty); | 1516 | rp_flush_buffer(tty); |
1639 | if (info->flags & ROCKET_CLOSING) | 1517 | if (info->port.flags & ASYNC_CLOSING) |
1640 | return; | 1518 | return; |
1641 | if (info->port.count) | 1519 | if (info->port.count) |
1642 | atomic_dec(&rp_num_ports_open); | 1520 | atomic_dec(&rp_num_ports_open); |
1643 | clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); | 1521 | clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]); |
1644 | 1522 | ||
1645 | info->port.count = 0; | 1523 | tty_port_hangup(&info->port); |
1646 | info->flags &= ~ROCKET_NORMAL_ACTIVE; | ||
1647 | info->port.tty = NULL; | ||
1648 | 1524 | ||
1649 | cp = &info->channel; | 1525 | cp = &info->channel; |
1650 | sDisRxFIFO(cp); | 1526 | sDisRxFIFO(cp); |
@@ -1653,7 +1529,7 @@ static void rp_hangup(struct tty_struct *tty) | |||
1653 | sDisCTSFlowCtl(cp); | 1529 | sDisCTSFlowCtl(cp); |
1654 | sDisTxSoftFlowCtl(cp); | 1530 | sDisTxSoftFlowCtl(cp); |
1655 | sClrTxXOFF(cp); | 1531 | sClrTxXOFF(cp); |
1656 | info->flags &= ~ROCKET_INITIALIZED; | 1532 | info->port.flags &= ~ASYNC_INITIALIZED; |
1657 | 1533 | ||
1658 | wake_up_interruptible(&info->port.open_wait); | 1534 | wake_up_interruptible(&info->port.open_wait); |
1659 | } | 1535 | } |
@@ -1667,7 +1543,7 @@ static void rp_hangup(struct tty_struct *tty) | |||
1667 | */ | 1543 | */ |
1668 | static int rp_put_char(struct tty_struct *tty, unsigned char ch) | 1544 | static int rp_put_char(struct tty_struct *tty, unsigned char ch) |
1669 | { | 1545 | { |
1670 | struct r_port *info = (struct r_port *) tty->driver_data; | 1546 | struct r_port *info = tty->driver_data; |
1671 | CHANNEL_t *cp; | 1547 | CHANNEL_t *cp; |
1672 | unsigned long flags; | 1548 | unsigned long flags; |
1673 | 1549 | ||
@@ -1714,7 +1590,7 @@ static int rp_put_char(struct tty_struct *tty, unsigned char ch) | |||
1714 | static int rp_write(struct tty_struct *tty, | 1590 | static int rp_write(struct tty_struct *tty, |
1715 | const unsigned char *buf, int count) | 1591 | const unsigned char *buf, int count) |
1716 | { | 1592 | { |
1717 | struct r_port *info = (struct r_port *) tty->driver_data; | 1593 | struct r_port *info = tty->driver_data; |
1718 | CHANNEL_t *cp; | 1594 | CHANNEL_t *cp; |
1719 | const unsigned char *b; | 1595 | const unsigned char *b; |
1720 | int c, retval = 0; | 1596 | int c, retval = 0; |
@@ -1764,7 +1640,8 @@ static int rp_write(struct tty_struct *tty, | |||
1764 | 1640 | ||
1765 | /* Write remaining data into the port's xmit_buf */ | 1641 | /* Write remaining data into the port's xmit_buf */ |
1766 | while (1) { | 1642 | while (1) { |
1767 | if (!info->port.tty) /* Seemingly obligatory check... */ | 1643 | /* Hung up ? */ |
1644 | if (!test_bit(ASYNC_NORMAL_ACTIVE, &info->port.flags)) | ||
1768 | goto end; | 1645 | goto end; |
1769 | c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1); | 1646 | c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1); |
1770 | c = min(c, XMIT_BUF_SIZE - info->xmit_head); | 1647 | c = min(c, XMIT_BUF_SIZE - info->xmit_head); |
@@ -1806,7 +1683,7 @@ end: | |||
1806 | */ | 1683 | */ |
1807 | static int rp_write_room(struct tty_struct *tty) | 1684 | static int rp_write_room(struct tty_struct *tty) |
1808 | { | 1685 | { |
1809 | struct r_port *info = (struct r_port *) tty->driver_data; | 1686 | struct r_port *info = tty->driver_data; |
1810 | int ret; | 1687 | int ret; |
1811 | 1688 | ||
1812 | if (rocket_paranoia_check(info, "rp_write_room")) | 1689 | if (rocket_paranoia_check(info, "rp_write_room")) |
@@ -1827,7 +1704,7 @@ static int rp_write_room(struct tty_struct *tty) | |||
1827 | */ | 1704 | */ |
1828 | static int rp_chars_in_buffer(struct tty_struct *tty) | 1705 | static int rp_chars_in_buffer(struct tty_struct *tty) |
1829 | { | 1706 | { |
1830 | struct r_port *info = (struct r_port *) tty->driver_data; | 1707 | struct r_port *info = tty->driver_data; |
1831 | CHANNEL_t *cp; | 1708 | CHANNEL_t *cp; |
1832 | 1709 | ||
1833 | if (rocket_paranoia_check(info, "rp_chars_in_buffer")) | 1710 | if (rocket_paranoia_check(info, "rp_chars_in_buffer")) |
@@ -1848,7 +1725,7 @@ static int rp_chars_in_buffer(struct tty_struct *tty) | |||
1848 | */ | 1725 | */ |
1849 | static void rp_flush_buffer(struct tty_struct *tty) | 1726 | static void rp_flush_buffer(struct tty_struct *tty) |
1850 | { | 1727 | { |
1851 | struct r_port *info = (struct r_port *) tty->driver_data; | 1728 | struct r_port *info = tty->driver_data; |
1852 | CHANNEL_t *cp; | 1729 | CHANNEL_t *cp; |
1853 | unsigned long flags; | 1730 | unsigned long flags; |
1854 | 1731 | ||
@@ -2371,6 +2248,11 @@ static const struct tty_operations rocket_ops = { | |||
2371 | .tiocmset = rp_tiocmset, | 2248 | .tiocmset = rp_tiocmset, |
2372 | }; | 2249 | }; |
2373 | 2250 | ||
2251 | static const struct tty_port_operations rocket_port_ops = { | ||
2252 | .carrier_raised = carrier_raised, | ||
2253 | .raise_dtr_rts = raise_dtr_rts, | ||
2254 | }; | ||
2255 | |||
2374 | /* | 2256 | /* |
2375 | * The module "startup" routine; it's run when the module is loaded. | 2257 | * The module "startup" routine; it's run when the module is loaded. |
2376 | */ | 2258 | */ |
diff --git a/drivers/char/rocket.h b/drivers/char/rocket.h index a8b09195ebba..ec863f35f1a9 100644 --- a/drivers/char/rocket.h +++ b/drivers/char/rocket.h | |||
@@ -39,7 +39,7 @@ struct rocket_version { | |||
39 | /* | 39 | /* |
40 | * Rocketport flags | 40 | * Rocketport flags |
41 | */ | 41 | */ |
42 | #define ROCKET_CALLOUT_NOHUP 0x00000001 | 42 | /*#define ROCKET_CALLOUT_NOHUP 0x00000001 */ |
43 | #define ROCKET_FORCE_CD 0x00000002 | 43 | #define ROCKET_FORCE_CD 0x00000002 |
44 | #define ROCKET_HUP_NOTIFY 0x00000004 | 44 | #define ROCKET_HUP_NOTIFY 0x00000004 |
45 | #define ROCKET_SPLIT_TERMIOS 0x00000008 | 45 | #define ROCKET_SPLIT_TERMIOS 0x00000008 |
diff --git a/drivers/char/rocket_int.h b/drivers/char/rocket_int.h index 21f3ff53ba32..67e0f1e778a2 100644 --- a/drivers/char/rocket_int.h +++ b/drivers/char/rocket_int.h | |||
@@ -1162,11 +1162,6 @@ struct r_port { | |||
1162 | /* number of characters left in xmit buffer before we ask for more */ | 1162 | /* number of characters left in xmit buffer before we ask for more */ |
1163 | #define WAKEUP_CHARS 256 | 1163 | #define WAKEUP_CHARS 256 |
1164 | 1164 | ||
1165 | /* Internal flags used only by the rocketport driver */ | ||
1166 | #define ROCKET_INITIALIZED 0x80000000 /* Port is active */ | ||
1167 | #define ROCKET_CLOSING 0x40000000 /* Serial port is closing */ | ||
1168 | #define ROCKET_NORMAL_ACTIVE 0x20000000 /* Normal port is active */ | ||
1169 | |||
1170 | /* | 1165 | /* |
1171 | * Assigned major numbers for the Comtrol Rocketport | 1166 | * Assigned major numbers for the Comtrol Rocketport |
1172 | */ | 1167 | */ |
diff --git a/drivers/char/selection.c b/drivers/char/selection.c index 2978a49a172b..f29fbe9b8ed7 100644 --- a/drivers/char/selection.c +++ b/drivers/char/selection.c | |||
@@ -306,7 +306,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t | |||
306 | */ | 306 | */ |
307 | int paste_selection(struct tty_struct *tty) | 307 | int paste_selection(struct tty_struct *tty) |
308 | { | 308 | { |
309 | struct vc_data *vc = (struct vc_data *)tty->driver_data; | 309 | struct vc_data *vc = tty->driver_data; |
310 | int pasted = 0; | 310 | int pasted = 0; |
311 | unsigned int count; | 311 | unsigned int count; |
312 | struct tty_ldisc *ld; | 312 | struct tty_ldisc *ld; |
diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c index 7b0c35207d9b..33872a219df6 100644 --- a/drivers/char/ser_a2232.c +++ b/drivers/char/ser_a2232.c | |||
@@ -122,7 +122,7 @@ static void a2232_disable_tx_interrupts(void *ptr); | |||
122 | static void a2232_enable_tx_interrupts(void *ptr); | 122 | static void a2232_enable_tx_interrupts(void *ptr); |
123 | static void a2232_disable_rx_interrupts(void *ptr); | 123 | static void a2232_disable_rx_interrupts(void *ptr); |
124 | static void a2232_enable_rx_interrupts(void *ptr); | 124 | static void a2232_enable_rx_interrupts(void *ptr); |
125 | static int a2232_get_CD(void *ptr); | 125 | static int a2232_carrier_raised(struct tty_port *port); |
126 | static void a2232_shutdown_port(void *ptr); | 126 | static void a2232_shutdown_port(void *ptr); |
127 | static int a2232_set_real_termios(void *ptr); | 127 | static int a2232_set_real_termios(void *ptr); |
128 | static int a2232_chars_in_buffer(void *ptr); | 128 | static int a2232_chars_in_buffer(void *ptr); |
@@ -148,7 +148,6 @@ static struct real_driver a2232_real_driver = { | |||
148 | a2232_enable_tx_interrupts, | 148 | a2232_enable_tx_interrupts, |
149 | a2232_disable_rx_interrupts, | 149 | a2232_disable_rx_interrupts, |
150 | a2232_enable_rx_interrupts, | 150 | a2232_enable_rx_interrupts, |
151 | a2232_get_CD, | ||
152 | a2232_shutdown_port, | 151 | a2232_shutdown_port, |
153 | a2232_set_real_termios, | 152 | a2232_set_real_termios, |
154 | a2232_chars_in_buffer, | 153 | a2232_chars_in_buffer, |
@@ -260,9 +259,10 @@ static void a2232_enable_rx_interrupts(void *ptr) | |||
260 | port->disable_rx = 0; | 259 | port->disable_rx = 0; |
261 | } | 260 | } |
262 | 261 | ||
263 | static int a2232_get_CD(void *ptr) | 262 | static int a2232_carrier_raised(struct tty_port *port) |
264 | { | 263 | { |
265 | return ((struct a2232_port *) ptr)->cd_status; | 264 | struct a2232_port *ap = container_of(port, struct a2232_port, gs.port); |
265 | return ap->cd_status; | ||
266 | } | 266 | } |
267 | 267 | ||
268 | static void a2232_shutdown_port(void *ptr) | 268 | static void a2232_shutdown_port(void *ptr) |
@@ -460,14 +460,14 @@ static void a2232_throttle(struct tty_struct *tty) | |||
460 | if switched on. So the only thing we can do at this | 460 | if switched on. So the only thing we can do at this |
461 | layer here is not taking any characters out of the | 461 | layer here is not taking any characters out of the |
462 | A2232 buffer any more. */ | 462 | A2232 buffer any more. */ |
463 | struct a2232_port *port = (struct a2232_port *) tty->driver_data; | 463 | struct a2232_port *port = tty->driver_data; |
464 | port->throttle_input = -1; | 464 | port->throttle_input = -1; |
465 | } | 465 | } |
466 | 466 | ||
467 | static void a2232_unthrottle(struct tty_struct *tty) | 467 | static void a2232_unthrottle(struct tty_struct *tty) |
468 | { | 468 | { |
469 | /* Unthrottle: dual to "throttle()" above. */ | 469 | /* Unthrottle: dual to "throttle()" above. */ |
470 | struct a2232_port *port = (struct a2232_port *) tty->driver_data; | 470 | struct a2232_port *port = tty->driver_data; |
471 | port->throttle_input = 0; | 471 | port->throttle_input = 0; |
472 | } | 472 | } |
473 | 473 | ||
@@ -638,6 +638,10 @@ int ch, err, n, p; | |||
638 | return IRQ_HANDLED; | 638 | return IRQ_HANDLED; |
639 | } | 639 | } |
640 | 640 | ||
641 | static const struct tty_port_operations a2232_port_ops = { | ||
642 | .carrier_raised = a2232_carrier_raised, | ||
643 | }; | ||
644 | |||
641 | static void a2232_init_portstructs(void) | 645 | static void a2232_init_portstructs(void) |
642 | { | 646 | { |
643 | struct a2232_port *port; | 647 | struct a2232_port *port; |
@@ -645,6 +649,8 @@ static void a2232_init_portstructs(void) | |||
645 | 649 | ||
646 | for (i = 0; i < MAX_A2232_BOARDS*NUMLINES; i++) { | 650 | for (i = 0; i < MAX_A2232_BOARDS*NUMLINES; i++) { |
647 | port = a2232_ports + i; | 651 | port = a2232_ports + i; |
652 | tty_port_init(&port->gs.port); | ||
653 | port->gs.port.ops = &a2232_port_ops; | ||
648 | port->which_a2232 = i/NUMLINES; | 654 | port->which_a2232 = i/NUMLINES; |
649 | port->which_port_on_a2232 = i%NUMLINES; | 655 | port->which_port_on_a2232 = i%NUMLINES; |
650 | port->disable_rx = port->throttle_input = port->cd_status = 0; | 656 | port->disable_rx = port->throttle_input = port->cd_status = 0; |
@@ -652,11 +658,6 @@ static void a2232_init_portstructs(void) | |||
652 | port->gs.close_delay = HZ/2; | 658 | port->gs.close_delay = HZ/2; |
653 | port->gs.closing_wait = 30 * HZ; | 659 | port->gs.closing_wait = 30 * HZ; |
654 | port->gs.rd = &a2232_real_driver; | 660 | port->gs.rd = &a2232_real_driver; |
655 | #ifdef NEW_WRITE_LOCKING | ||
656 | mutex_init(&(port->gs.port_write_mutex)); | ||
657 | #endif | ||
658 | init_waitqueue_head(&port->gs.port.open_wait); | ||
659 | init_waitqueue_head(&port->gs.port.close_wait); | ||
660 | } | 661 | } |
661 | } | 662 | } |
662 | 663 | ||
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index a8f15e6be594..f1f24f0ee26f 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c | |||
@@ -315,7 +315,7 @@ u_short write_cy_cmd(volatile u_char * base_addr, u_char cmd) | |||
315 | 315 | ||
316 | static void cy_stop(struct tty_struct *tty) | 316 | static void cy_stop(struct tty_struct *tty) |
317 | { | 317 | { |
318 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 318 | struct cyclades_port *info = tty->driver_data; |
319 | volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR; | 319 | volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR; |
320 | int channel; | 320 | int channel; |
321 | unsigned long flags; | 321 | unsigned long flags; |
@@ -337,7 +337,7 @@ static void cy_stop(struct tty_struct *tty) | |||
337 | 337 | ||
338 | static void cy_start(struct tty_struct *tty) | 338 | static void cy_start(struct tty_struct *tty) |
339 | { | 339 | { |
340 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 340 | struct cyclades_port *info = tty->driver_data; |
341 | volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR; | 341 | volatile unsigned char *base_addr = (unsigned char *)BASE_ADDR; |
342 | int channel; | 342 | int channel; |
343 | unsigned long flags; | 343 | unsigned long flags; |
@@ -1062,7 +1062,7 @@ static void config_setup(struct cyclades_port *info) | |||
1062 | 1062 | ||
1063 | static int cy_put_char(struct tty_struct *tty, unsigned char ch) | 1063 | static int cy_put_char(struct tty_struct *tty, unsigned char ch) |
1064 | { | 1064 | { |
1065 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1065 | struct cyclades_port *info = tty->driver_data; |
1066 | unsigned long flags; | 1066 | unsigned long flags; |
1067 | 1067 | ||
1068 | #ifdef SERIAL_DEBUG_IO | 1068 | #ifdef SERIAL_DEBUG_IO |
@@ -1090,7 +1090,7 @@ static int cy_put_char(struct tty_struct *tty, unsigned char ch) | |||
1090 | 1090 | ||
1091 | static void cy_flush_chars(struct tty_struct *tty) | 1091 | static void cy_flush_chars(struct tty_struct *tty) |
1092 | { | 1092 | { |
1093 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1093 | struct cyclades_port *info = tty->driver_data; |
1094 | unsigned long flags; | 1094 | unsigned long flags; |
1095 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | 1095 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; |
1096 | int channel; | 1096 | int channel; |
@@ -1122,7 +1122,7 @@ static void cy_flush_chars(struct tty_struct *tty) | |||
1122 | */ | 1122 | */ |
1123 | static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count) | 1123 | static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count) |
1124 | { | 1124 | { |
1125 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1125 | struct cyclades_port *info = tty->driver_data; |
1126 | unsigned long flags; | 1126 | unsigned long flags; |
1127 | int c, total = 0; | 1127 | int c, total = 0; |
1128 | 1128 | ||
@@ -1166,7 +1166,7 @@ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count) | |||
1166 | 1166 | ||
1167 | static int cy_write_room(struct tty_struct *tty) | 1167 | static int cy_write_room(struct tty_struct *tty) |
1168 | { | 1168 | { |
1169 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1169 | struct cyclades_port *info = tty->driver_data; |
1170 | int ret; | 1170 | int ret; |
1171 | 1171 | ||
1172 | #ifdef SERIAL_DEBUG_IO | 1172 | #ifdef SERIAL_DEBUG_IO |
@@ -1183,7 +1183,7 @@ static int cy_write_room(struct tty_struct *tty) | |||
1183 | 1183 | ||
1184 | static int cy_chars_in_buffer(struct tty_struct *tty) | 1184 | static int cy_chars_in_buffer(struct tty_struct *tty) |
1185 | { | 1185 | { |
1186 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1186 | struct cyclades_port *info = tty->driver_data; |
1187 | 1187 | ||
1188 | #ifdef SERIAL_DEBUG_IO | 1188 | #ifdef SERIAL_DEBUG_IO |
1189 | printk("cy_chars_in_buffer %s %d\n", tty->name, info->xmit_cnt); /* */ | 1189 | printk("cy_chars_in_buffer %s %d\n", tty->name, info->xmit_cnt); /* */ |
@@ -1197,7 +1197,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty) | |||
1197 | 1197 | ||
1198 | static void cy_flush_buffer(struct tty_struct *tty) | 1198 | static void cy_flush_buffer(struct tty_struct *tty) |
1199 | { | 1199 | { |
1200 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1200 | struct cyclades_port *info = tty->driver_data; |
1201 | unsigned long flags; | 1201 | unsigned long flags; |
1202 | 1202 | ||
1203 | #ifdef SERIAL_DEBUG_IO | 1203 | #ifdef SERIAL_DEBUG_IO |
@@ -1218,7 +1218,7 @@ static void cy_flush_buffer(struct tty_struct *tty) | |||
1218 | */ | 1218 | */ |
1219 | static void cy_throttle(struct tty_struct *tty) | 1219 | static void cy_throttle(struct tty_struct *tty) |
1220 | { | 1220 | { |
1221 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1221 | struct cyclades_port *info = tty->driver_data; |
1222 | unsigned long flags; | 1222 | unsigned long flags; |
1223 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | 1223 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; |
1224 | int channel; | 1224 | int channel; |
@@ -1250,7 +1250,7 @@ static void cy_throttle(struct tty_struct *tty) | |||
1250 | 1250 | ||
1251 | static void cy_unthrottle(struct tty_struct *tty) | 1251 | static void cy_unthrottle(struct tty_struct *tty) |
1252 | { | 1252 | { |
1253 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1253 | struct cyclades_port *info = tty->driver_data; |
1254 | unsigned long flags; | 1254 | unsigned long flags; |
1255 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | 1255 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; |
1256 | int channel; | 1256 | int channel; |
@@ -1345,7 +1345,7 @@ check_and_exit: | |||
1345 | 1345 | ||
1346 | static int cy_tiocmget(struct tty_struct *tty, struct file *file) | 1346 | static int cy_tiocmget(struct tty_struct *tty, struct file *file) |
1347 | { | 1347 | { |
1348 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1348 | struct cyclades_port *info = tty->driver_data; |
1349 | int channel; | 1349 | int channel; |
1350 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | 1350 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; |
1351 | unsigned long flags; | 1351 | unsigned long flags; |
@@ -1369,7 +1369,7 @@ static int | |||
1369 | cy_tiocmset(struct tty_struct *tty, struct file *file, | 1369 | cy_tiocmset(struct tty_struct *tty, struct file *file, |
1370 | unsigned int set, unsigned int clear) | 1370 | unsigned int set, unsigned int clear) |
1371 | { | 1371 | { |
1372 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1372 | struct cyclades_port *info = tty->driver_data; |
1373 | int channel; | 1373 | int channel; |
1374 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; | 1374 | volatile unsigned char *base_addr = (u_char *) BASE_ADDR; |
1375 | unsigned long flags; | 1375 | unsigned long flags; |
@@ -1532,7 +1532,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file, | |||
1532 | unsigned int cmd, unsigned long arg) | 1532 | unsigned int cmd, unsigned long arg) |
1533 | { | 1533 | { |
1534 | unsigned long val; | 1534 | unsigned long val; |
1535 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1535 | struct cyclades_port *info = tty->driver_data; |
1536 | int ret_val = 0; | 1536 | int ret_val = 0; |
1537 | void __user *argp = (void __user *)arg; | 1537 | void __user *argp = (void __user *)arg; |
1538 | 1538 | ||
@@ -1607,7 +1607,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file, | |||
1607 | 1607 | ||
1608 | static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | 1608 | static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios) |
1609 | { | 1609 | { |
1610 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1610 | struct cyclades_port *info = tty->driver_data; |
1611 | 1611 | ||
1612 | #ifdef SERIAL_DEBUG_OTHER | 1612 | #ifdef SERIAL_DEBUG_OTHER |
1613 | printk("cy_set_termios %s\n", tty->name); | 1613 | printk("cy_set_termios %s\n", tty->name); |
@@ -1631,7 +1631,7 @@ static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
1631 | 1631 | ||
1632 | static void cy_close(struct tty_struct *tty, struct file *filp) | 1632 | static void cy_close(struct tty_struct *tty, struct file *filp) |
1633 | { | 1633 | { |
1634 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1634 | struct cyclades_port *info = tty->driver_data; |
1635 | 1635 | ||
1636 | /* CP('C'); */ | 1636 | /* CP('C'); */ |
1637 | #ifdef SERIAL_DEBUG_OTHER | 1637 | #ifdef SERIAL_DEBUG_OTHER |
@@ -1698,7 +1698,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp) | |||
1698 | */ | 1698 | */ |
1699 | void cy_hangup(struct tty_struct *tty) | 1699 | void cy_hangup(struct tty_struct *tty) |
1700 | { | 1700 | { |
1701 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1701 | struct cyclades_port *info = tty->driver_data; |
1702 | 1702 | ||
1703 | #ifdef SERIAL_DEBUG_OTHER | 1703 | #ifdef SERIAL_DEBUG_OTHER |
1704 | printk("cy_hangup %s\n", tty->name); /* */ | 1704 | printk("cy_hangup %s\n", tty->name); /* */ |
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index a16b94f12eb2..3c67c3d83de9 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c | |||
@@ -1450,7 +1450,7 @@ static int sx_open(struct tty_struct *tty, struct file *filp) | |||
1450 | 1450 | ||
1451 | static void sx_flush_buffer(struct tty_struct *tty) | 1451 | static void sx_flush_buffer(struct tty_struct *tty) |
1452 | { | 1452 | { |
1453 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1453 | struct specialix_port *port = tty->driver_data; |
1454 | unsigned long flags; | 1454 | unsigned long flags; |
1455 | struct specialix_board *bp; | 1455 | struct specialix_board *bp; |
1456 | 1456 | ||
@@ -1472,7 +1472,7 @@ static void sx_flush_buffer(struct tty_struct *tty) | |||
1472 | 1472 | ||
1473 | static void sx_close(struct tty_struct *tty, struct file *filp) | 1473 | static void sx_close(struct tty_struct *tty, struct file *filp) |
1474 | { | 1474 | { |
1475 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1475 | struct specialix_port *port = tty->driver_data; |
1476 | struct specialix_board *bp; | 1476 | struct specialix_board *bp; |
1477 | unsigned long flags; | 1477 | unsigned long flags; |
1478 | unsigned long timeout; | 1478 | unsigned long timeout; |
@@ -1585,7 +1585,7 @@ static void sx_close(struct tty_struct *tty, struct file *filp) | |||
1585 | static int sx_write(struct tty_struct *tty, | 1585 | static int sx_write(struct tty_struct *tty, |
1586 | const unsigned char *buf, int count) | 1586 | const unsigned char *buf, int count) |
1587 | { | 1587 | { |
1588 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1588 | struct specialix_port *port = tty->driver_data; |
1589 | struct specialix_board *bp; | 1589 | struct specialix_board *bp; |
1590 | int c, total = 0; | 1590 | int c, total = 0; |
1591 | unsigned long flags; | 1591 | unsigned long flags; |
@@ -1637,7 +1637,7 @@ static int sx_write(struct tty_struct *tty, | |||
1637 | 1637 | ||
1638 | static int sx_put_char(struct tty_struct *tty, unsigned char ch) | 1638 | static int sx_put_char(struct tty_struct *tty, unsigned char ch) |
1639 | { | 1639 | { |
1640 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1640 | struct specialix_port *port = tty->driver_data; |
1641 | unsigned long flags; | 1641 | unsigned long flags; |
1642 | struct specialix_board *bp; | 1642 | struct specialix_board *bp; |
1643 | 1643 | ||
@@ -1676,7 +1676,7 @@ static int sx_put_char(struct tty_struct *tty, unsigned char ch) | |||
1676 | 1676 | ||
1677 | static void sx_flush_chars(struct tty_struct *tty) | 1677 | static void sx_flush_chars(struct tty_struct *tty) |
1678 | { | 1678 | { |
1679 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1679 | struct specialix_port *port = tty->driver_data; |
1680 | unsigned long flags; | 1680 | unsigned long flags; |
1681 | struct specialix_board *bp = port_Board(port); | 1681 | struct specialix_board *bp = port_Board(port); |
1682 | 1682 | ||
@@ -1703,7 +1703,7 @@ static void sx_flush_chars(struct tty_struct *tty) | |||
1703 | 1703 | ||
1704 | static int sx_write_room(struct tty_struct *tty) | 1704 | static int sx_write_room(struct tty_struct *tty) |
1705 | { | 1705 | { |
1706 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1706 | struct specialix_port *port = tty->driver_data; |
1707 | int ret; | 1707 | int ret; |
1708 | 1708 | ||
1709 | func_enter(); | 1709 | func_enter(); |
@@ -1724,7 +1724,7 @@ static int sx_write_room(struct tty_struct *tty) | |||
1724 | 1724 | ||
1725 | static int sx_chars_in_buffer(struct tty_struct *tty) | 1725 | static int sx_chars_in_buffer(struct tty_struct *tty) |
1726 | { | 1726 | { |
1727 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1727 | struct specialix_port *port = tty->driver_data; |
1728 | 1728 | ||
1729 | func_enter(); | 1729 | func_enter(); |
1730 | 1730 | ||
@@ -1738,7 +1738,7 @@ static int sx_chars_in_buffer(struct tty_struct *tty) | |||
1738 | 1738 | ||
1739 | static int sx_tiocmget(struct tty_struct *tty, struct file *file) | 1739 | static int sx_tiocmget(struct tty_struct *tty, struct file *file) |
1740 | { | 1740 | { |
1741 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1741 | struct specialix_port *port = tty->driver_data; |
1742 | struct specialix_board *bp; | 1742 | struct specialix_board *bp; |
1743 | unsigned char status; | 1743 | unsigned char status; |
1744 | unsigned int result; | 1744 | unsigned int result; |
@@ -1780,7 +1780,7 @@ static int sx_tiocmget(struct tty_struct *tty, struct file *file) | |||
1780 | static int sx_tiocmset(struct tty_struct *tty, struct file *file, | 1780 | static int sx_tiocmset(struct tty_struct *tty, struct file *file, |
1781 | unsigned int set, unsigned int clear) | 1781 | unsigned int set, unsigned int clear) |
1782 | { | 1782 | { |
1783 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1783 | struct specialix_port *port = tty->driver_data; |
1784 | unsigned long flags; | 1784 | unsigned long flags; |
1785 | struct specialix_board *bp; | 1785 | struct specialix_board *bp; |
1786 | 1786 | ||
@@ -1820,7 +1820,7 @@ static int sx_tiocmset(struct tty_struct *tty, struct file *file, | |||
1820 | 1820 | ||
1821 | static int sx_send_break(struct tty_struct *tty, int length) | 1821 | static int sx_send_break(struct tty_struct *tty, int length) |
1822 | { | 1822 | { |
1823 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1823 | struct specialix_port *port = tty->driver_data; |
1824 | struct specialix_board *bp = port_Board(port); | 1824 | struct specialix_board *bp = port_Board(port); |
1825 | unsigned long flags; | 1825 | unsigned long flags; |
1826 | 1826 | ||
@@ -1931,7 +1931,7 @@ static int sx_get_serial_info(struct specialix_port *port, | |||
1931 | static int sx_ioctl(struct tty_struct *tty, struct file *filp, | 1931 | static int sx_ioctl(struct tty_struct *tty, struct file *filp, |
1932 | unsigned int cmd, unsigned long arg) | 1932 | unsigned int cmd, unsigned long arg) |
1933 | { | 1933 | { |
1934 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1934 | struct specialix_port *port = tty->driver_data; |
1935 | void __user *argp = (void __user *)arg; | 1935 | void __user *argp = (void __user *)arg; |
1936 | 1936 | ||
1937 | func_enter(); | 1937 | func_enter(); |
@@ -1959,7 +1959,7 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp, | |||
1959 | 1959 | ||
1960 | static void sx_throttle(struct tty_struct *tty) | 1960 | static void sx_throttle(struct tty_struct *tty) |
1961 | { | 1961 | { |
1962 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1962 | struct specialix_port *port = tty->driver_data; |
1963 | struct specialix_board *bp; | 1963 | struct specialix_board *bp; |
1964 | unsigned long flags; | 1964 | unsigned long flags; |
1965 | 1965 | ||
@@ -2004,7 +2004,7 @@ static void sx_throttle(struct tty_struct *tty) | |||
2004 | 2004 | ||
2005 | static void sx_unthrottle(struct tty_struct *tty) | 2005 | static void sx_unthrottle(struct tty_struct *tty) |
2006 | { | 2006 | { |
2007 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 2007 | struct specialix_port *port = tty->driver_data; |
2008 | struct specialix_board *bp; | 2008 | struct specialix_board *bp; |
2009 | unsigned long flags; | 2009 | unsigned long flags; |
2010 | 2010 | ||
@@ -2045,7 +2045,7 @@ static void sx_unthrottle(struct tty_struct *tty) | |||
2045 | 2045 | ||
2046 | static void sx_stop(struct tty_struct *tty) | 2046 | static void sx_stop(struct tty_struct *tty) |
2047 | { | 2047 | { |
2048 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 2048 | struct specialix_port *port = tty->driver_data; |
2049 | struct specialix_board *bp; | 2049 | struct specialix_board *bp; |
2050 | unsigned long flags; | 2050 | unsigned long flags; |
2051 | 2051 | ||
@@ -2072,7 +2072,7 @@ static void sx_stop(struct tty_struct *tty) | |||
2072 | 2072 | ||
2073 | static void sx_start(struct tty_struct *tty) | 2073 | static void sx_start(struct tty_struct *tty) |
2074 | { | 2074 | { |
2075 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 2075 | struct specialix_port *port = tty->driver_data; |
2076 | struct specialix_board *bp; | 2076 | struct specialix_board *bp; |
2077 | unsigned long flags; | 2077 | unsigned long flags; |
2078 | 2078 | ||
@@ -2100,7 +2100,7 @@ static void sx_start(struct tty_struct *tty) | |||
2100 | 2100 | ||
2101 | static void sx_hangup(struct tty_struct *tty) | 2101 | static void sx_hangup(struct tty_struct *tty) |
2102 | { | 2102 | { |
2103 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 2103 | struct specialix_port *port = tty->driver_data; |
2104 | struct specialix_board *bp; | 2104 | struct specialix_board *bp; |
2105 | unsigned long flags; | 2105 | unsigned long flags; |
2106 | 2106 | ||
@@ -2135,7 +2135,7 @@ static void sx_hangup(struct tty_struct *tty) | |||
2135 | static void sx_set_termios(struct tty_struct *tty, | 2135 | static void sx_set_termios(struct tty_struct *tty, |
2136 | struct ktermios *old_termios) | 2136 | struct ktermios *old_termios) |
2137 | { | 2137 | { |
2138 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 2138 | struct specialix_port *port = tty->driver_data; |
2139 | unsigned long flags; | 2139 | unsigned long flags; |
2140 | struct specialix_board *bp; | 2140 | struct specialix_board *bp; |
2141 | 2141 | ||
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index 963b03fb29e5..e1e0dd89ac9a 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c | |||
@@ -130,6 +130,8 @@ static char stl_unwanted[SC26198_RXFIFOSIZE]; | |||
130 | static DEFINE_MUTEX(stl_brdslock); | 130 | static DEFINE_MUTEX(stl_brdslock); |
131 | static struct stlbrd *stl_brds[STL_MAXBRDS]; | 131 | static struct stlbrd *stl_brds[STL_MAXBRDS]; |
132 | 132 | ||
133 | static const struct tty_port_operations stl_port_ops; | ||
134 | |||
133 | /* | 135 | /* |
134 | * Per board state flags. Used with the state field of the board struct. | 136 | * Per board state flags. Used with the state field of the board struct. |
135 | * Not really much here! | 137 | * Not really much here! |
@@ -407,7 +409,6 @@ static int stl_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, uns | |||
407 | static int stl_brdinit(struct stlbrd *brdp); | 409 | static int stl_brdinit(struct stlbrd *brdp); |
408 | static int stl_getportstats(struct tty_struct *tty, struct stlport *portp, comstats_t __user *cp); | 410 | static int stl_getportstats(struct tty_struct *tty, struct stlport *portp, comstats_t __user *cp); |
409 | static int stl_clrportstats(struct stlport *portp, comstats_t __user *cp); | 411 | static int stl_clrportstats(struct stlport *portp, comstats_t __user *cp); |
410 | static int stl_waitcarrier(struct tty_struct *tty, struct stlport *portp, struct file *filp); | ||
411 | 412 | ||
412 | /* | 413 | /* |
413 | * CD1400 uart specific handling functions. | 414 | * CD1400 uart specific handling functions. |
@@ -703,8 +704,9 @@ static int stl_open(struct tty_struct *tty, struct file *filp) | |||
703 | { | 704 | { |
704 | struct stlport *portp; | 705 | struct stlport *portp; |
705 | struct stlbrd *brdp; | 706 | struct stlbrd *brdp; |
707 | struct tty_port *port; | ||
706 | unsigned int minordev, brdnr, panelnr; | 708 | unsigned int minordev, brdnr, panelnr; |
707 | int portnr, rc; | 709 | int portnr; |
708 | 710 | ||
709 | pr_debug("stl_open(tty=%p,filp=%p): device=%s\n", tty, filp, tty->name); | 711 | pr_debug("stl_open(tty=%p,filp=%p): device=%s\n", tty, filp, tty->name); |
710 | 712 | ||
@@ -715,6 +717,7 @@ static int stl_open(struct tty_struct *tty, struct file *filp) | |||
715 | brdp = stl_brds[brdnr]; | 717 | brdp = stl_brds[brdnr]; |
716 | if (brdp == NULL) | 718 | if (brdp == NULL) |
717 | return -ENODEV; | 719 | return -ENODEV; |
720 | |||
718 | minordev = MINOR2PORT(minordev); | 721 | minordev = MINOR2PORT(minordev); |
719 | for (portnr = -1, panelnr = 0; panelnr < STL_MAXPANELS; panelnr++) { | 722 | for (portnr = -1, panelnr = 0; panelnr < STL_MAXPANELS; panelnr++) { |
720 | if (brdp->panels[panelnr] == NULL) | 723 | if (brdp->panels[panelnr] == NULL) |
@@ -731,16 +734,17 @@ static int stl_open(struct tty_struct *tty, struct file *filp) | |||
731 | portp = brdp->panels[panelnr]->ports[portnr]; | 734 | portp = brdp->panels[panelnr]->ports[portnr]; |
732 | if (portp == NULL) | 735 | if (portp == NULL) |
733 | return -ENODEV; | 736 | return -ENODEV; |
737 | port = &portp->port; | ||
734 | 738 | ||
735 | /* | 739 | /* |
736 | * On the first open of the device setup the port hardware, and | 740 | * On the first open of the device setup the port hardware, and |
737 | * initialize the per port data structure. | 741 | * initialize the per port data structure. |
738 | */ | 742 | */ |
739 | tty_port_tty_set(&portp->port, tty); | 743 | tty_port_tty_set(port, tty); |
740 | tty->driver_data = portp; | 744 | tty->driver_data = portp; |
741 | portp->port.count++; | 745 | port->count++; |
742 | 746 | ||
743 | if ((portp->port.flags & ASYNC_INITIALIZED) == 0) { | 747 | if ((port->flags & ASYNC_INITIALIZED) == 0) { |
744 | if (!portp->tx.buf) { | 748 | if (!portp->tx.buf) { |
745 | portp->tx.buf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL); | 749 | portp->tx.buf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL); |
746 | if (!portp->tx.buf) | 750 | if (!portp->tx.buf) |
@@ -754,91 +758,24 @@ static int stl_open(struct tty_struct *tty, struct file *filp) | |||
754 | stl_enablerxtx(portp, 1, 1); | 758 | stl_enablerxtx(portp, 1, 1); |
755 | stl_startrxtx(portp, 1, 0); | 759 | stl_startrxtx(portp, 1, 0); |
756 | clear_bit(TTY_IO_ERROR, &tty->flags); | 760 | clear_bit(TTY_IO_ERROR, &tty->flags); |
757 | portp->port.flags |= ASYNC_INITIALIZED; | 761 | port->flags |= ASYNC_INITIALIZED; |
758 | } | ||
759 | |||
760 | /* | ||
761 | * Check if this port is in the middle of closing. If so then wait | ||
762 | * until it is closed then return error status, based on flag settings. | ||
763 | * The sleep here does not need interrupt protection since the wakeup | ||
764 | * for it is done with the same context. | ||
765 | */ | ||
766 | if (portp->port.flags & ASYNC_CLOSING) { | ||
767 | interruptible_sleep_on(&portp->port.close_wait); | ||
768 | if (portp->port.flags & ASYNC_HUP_NOTIFY) | ||
769 | return -EAGAIN; | ||
770 | return -ERESTARTSYS; | ||
771 | } | 762 | } |
772 | 763 | return tty_port_block_til_ready(port, tty, filp); | |
773 | /* | ||
774 | * Based on type of open being done check if it can overlap with any | ||
775 | * previous opens still in effect. If we are a normal serial device | ||
776 | * then also we might have to wait for carrier. | ||
777 | */ | ||
778 | if (!(filp->f_flags & O_NONBLOCK)) | ||
779 | if ((rc = stl_waitcarrier(tty, portp, filp)) != 0) | ||
780 | return rc; | ||
781 | |||
782 | portp->port.flags |= ASYNC_NORMAL_ACTIVE; | ||
783 | |||
784 | return 0; | ||
785 | } | 764 | } |
786 | 765 | ||
787 | /*****************************************************************************/ | 766 | /*****************************************************************************/ |
788 | 767 | ||
789 | /* | 768 | static int stl_carrier_raised(struct tty_port *port) |
790 | * Possibly need to wait for carrier (DCD signal) to come high. Say | ||
791 | * maybe because if we are clocal then we don't need to wait... | ||
792 | */ | ||
793 | |||
794 | static int stl_waitcarrier(struct tty_struct *tty, struct stlport *portp, | ||
795 | struct file *filp) | ||
796 | { | 769 | { |
797 | unsigned long flags; | 770 | struct stlport *portp = container_of(port, struct stlport, port); |
798 | int rc, doclocal; | 771 | return (portp->sigs & TIOCM_CD) ? 1 : 0; |
799 | 772 | } | |
800 | pr_debug("stl_waitcarrier(portp=%p,filp=%p)\n", portp, filp); | ||
801 | |||
802 | rc = 0; | ||
803 | doclocal = 0; | ||
804 | |||
805 | spin_lock_irqsave(&stallion_lock, flags); | ||
806 | |||
807 | if (tty->termios->c_cflag & CLOCAL) | ||
808 | doclocal++; | ||
809 | |||
810 | portp->openwaitcnt++; | ||
811 | if (! tty_hung_up_p(filp)) | ||
812 | portp->port.count--; | ||
813 | |||
814 | for (;;) { | ||
815 | /* Takes brd_lock internally */ | ||
816 | stl_setsignals(portp, 1, 1); | ||
817 | if (tty_hung_up_p(filp) || | ||
818 | ((portp->port.flags & ASYNC_INITIALIZED) == 0)) { | ||
819 | if (portp->port.flags & ASYNC_HUP_NOTIFY) | ||
820 | rc = -EBUSY; | ||
821 | else | ||
822 | rc = -ERESTARTSYS; | ||
823 | break; | ||
824 | } | ||
825 | if (((portp->port.flags & ASYNC_CLOSING) == 0) && | ||
826 | (doclocal || (portp->sigs & TIOCM_CD))) | ||
827 | break; | ||
828 | if (signal_pending(current)) { | ||
829 | rc = -ERESTARTSYS; | ||
830 | break; | ||
831 | } | ||
832 | /* FIXME */ | ||
833 | interruptible_sleep_on(&portp->port.open_wait); | ||
834 | } | ||
835 | |||
836 | if (! tty_hung_up_p(filp)) | ||
837 | portp->port.count++; | ||
838 | portp->openwaitcnt--; | ||
839 | spin_unlock_irqrestore(&stallion_lock, flags); | ||
840 | 773 | ||
841 | return rc; | 774 | static void stl_raise_dtr_rts(struct tty_port *port) |
775 | { | ||
776 | struct stlport *portp = container_of(port, struct stlport, port); | ||
777 | /* Takes brd_lock internally */ | ||
778 | stl_setsignals(portp, 1, 1); | ||
842 | } | 779 | } |
843 | 780 | ||
844 | /*****************************************************************************/ | 781 | /*****************************************************************************/ |
@@ -890,47 +827,29 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout) | |||
890 | static void stl_close(struct tty_struct *tty, struct file *filp) | 827 | static void stl_close(struct tty_struct *tty, struct file *filp) |
891 | { | 828 | { |
892 | struct stlport *portp; | 829 | struct stlport *portp; |
830 | struct tty_port *port; | ||
893 | unsigned long flags; | 831 | unsigned long flags; |
894 | 832 | ||
895 | pr_debug("stl_close(tty=%p,filp=%p)\n", tty, filp); | 833 | pr_debug("stl_close(tty=%p,filp=%p)\n", tty, filp); |
896 | 834 | ||
897 | portp = tty->driver_data; | 835 | portp = tty->driver_data; |
898 | if (portp == NULL) | 836 | BUG_ON(portp == NULL); |
899 | return; | ||
900 | 837 | ||
901 | spin_lock_irqsave(&stallion_lock, flags); | 838 | port = &portp->port; |
902 | if (tty_hung_up_p(filp)) { | ||
903 | spin_unlock_irqrestore(&stallion_lock, flags); | ||
904 | return; | ||
905 | } | ||
906 | if ((tty->count == 1) && (portp->port.count != 1)) | ||
907 | portp->port.count = 1; | ||
908 | if (portp->port.count-- > 1) { | ||
909 | spin_unlock_irqrestore(&stallion_lock, flags); | ||
910 | return; | ||
911 | } | ||
912 | |||
913 | portp->port.count = 0; | ||
914 | portp->port.flags |= ASYNC_CLOSING; | ||
915 | 839 | ||
840 | if (tty_port_close_start(port, tty, filp) == 0) | ||
841 | return; | ||
916 | /* | 842 | /* |
917 | * May want to wait for any data to drain before closing. The BUSY | 843 | * May want to wait for any data to drain before closing. The BUSY |
918 | * flag keeps track of whether we are still sending or not - it is | 844 | * flag keeps track of whether we are still sending or not - it is |
919 | * very accurate for the cd1400, not quite so for the sc26198. | 845 | * very accurate for the cd1400, not quite so for the sc26198. |
920 | * (The sc26198 has no "end-of-data" interrupt only empty FIFO) | 846 | * (The sc26198 has no "end-of-data" interrupt only empty FIFO) |
921 | */ | 847 | */ |
922 | tty->closing = 1; | ||
923 | |||
924 | spin_unlock_irqrestore(&stallion_lock, flags); | ||
925 | |||
926 | if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE) | ||
927 | tty_wait_until_sent(tty, portp->closing_wait); | ||
928 | stl_waituntilsent(tty, (HZ / 2)); | 848 | stl_waituntilsent(tty, (HZ / 2)); |
929 | 849 | ||
930 | 850 | spin_lock_irqsave(&port->lock, flags); | |
931 | spin_lock_irqsave(&stallion_lock, flags); | ||
932 | portp->port.flags &= ~ASYNC_INITIALIZED; | 851 | portp->port.flags &= ~ASYNC_INITIALIZED; |
933 | spin_unlock_irqrestore(&stallion_lock, flags); | 852 | spin_unlock_irqrestore(&port->lock, flags); |
934 | 853 | ||
935 | stl_disableintrs(portp); | 854 | stl_disableintrs(portp); |
936 | if (tty->termios->c_cflag & HUPCL) | 855 | if (tty->termios->c_cflag & HUPCL) |
@@ -944,20 +863,9 @@ static void stl_close(struct tty_struct *tty, struct file *filp) | |||
944 | portp->tx.head = NULL; | 863 | portp->tx.head = NULL; |
945 | portp->tx.tail = NULL; | 864 | portp->tx.tail = NULL; |
946 | } | 865 | } |
947 | set_bit(TTY_IO_ERROR, &tty->flags); | ||
948 | tty_ldisc_flush(tty); | ||
949 | 866 | ||
950 | tty->closing = 0; | 867 | tty_port_close_end(port, tty); |
951 | tty_port_tty_set(&portp->port, NULL); | 868 | tty_port_tty_set(port, NULL); |
952 | |||
953 | if (portp->openwaitcnt) { | ||
954 | if (portp->close_delay) | ||
955 | msleep_interruptible(jiffies_to_msecs(portp->close_delay)); | ||
956 | wake_up_interruptible(&portp->port.open_wait); | ||
957 | } | ||
958 | |||
959 | portp->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | ||
960 | wake_up_interruptible(&portp->port.close_wait); | ||
961 | } | 869 | } |
962 | 870 | ||
963 | /*****************************************************************************/ | 871 | /*****************************************************************************/ |
@@ -1405,14 +1313,20 @@ static void stl_stop(struct tty_struct *tty) | |||
1405 | static void stl_hangup(struct tty_struct *tty) | 1313 | static void stl_hangup(struct tty_struct *tty) |
1406 | { | 1314 | { |
1407 | struct stlport *portp; | 1315 | struct stlport *portp; |
1316 | struct tty_port *port; | ||
1317 | unsigned long flags; | ||
1408 | 1318 | ||
1409 | pr_debug("stl_hangup(tty=%p)\n", tty); | 1319 | pr_debug("stl_hangup(tty=%p)\n", tty); |
1410 | 1320 | ||
1411 | portp = tty->driver_data; | 1321 | portp = tty->driver_data; |
1412 | if (portp == NULL) | 1322 | if (portp == NULL) |
1413 | return; | 1323 | return; |
1324 | port = &portp->port; | ||
1325 | |||
1326 | spin_lock_irqsave(&port->lock, flags); | ||
1327 | port->flags &= ~ASYNC_INITIALIZED; | ||
1328 | spin_unlock_irqrestore(&port->lock, flags); | ||
1414 | 1329 | ||
1415 | portp->port.flags &= ~ASYNC_INITIALIZED; | ||
1416 | stl_disableintrs(portp); | 1330 | stl_disableintrs(portp); |
1417 | if (tty->termios->c_cflag & HUPCL) | 1331 | if (tty->termios->c_cflag & HUPCL) |
1418 | stl_setsignals(portp, 0, 0); | 1332 | stl_setsignals(portp, 0, 0); |
@@ -1426,10 +1340,7 @@ static void stl_hangup(struct tty_struct *tty) | |||
1426 | portp->tx.head = NULL; | 1340 | portp->tx.head = NULL; |
1427 | portp->tx.tail = NULL; | 1341 | portp->tx.tail = NULL; |
1428 | } | 1342 | } |
1429 | tty_port_tty_set(&portp->port, NULL); | 1343 | tty_port_hangup(port); |
1430 | portp->port.flags &= ~ASYNC_NORMAL_ACTIVE; | ||
1431 | portp->port.count = 0; | ||
1432 | wake_up_interruptible(&portp->port.open_wait); | ||
1433 | } | 1344 | } |
1434 | 1345 | ||
1435 | /*****************************************************************************/ | 1346 | /*****************************************************************************/ |
@@ -1776,6 +1687,7 @@ static int __devinit stl_initports(struct stlbrd *brdp, struct stlpanel *panelp) | |||
1776 | break; | 1687 | break; |
1777 | } | 1688 | } |
1778 | tty_port_init(&portp->port); | 1689 | tty_port_init(&portp->port); |
1690 | portp->port.ops = &stl_port_ops; | ||
1779 | portp->magic = STL_PORTMAGIC; | 1691 | portp->magic = STL_PORTMAGIC; |
1780 | portp->portnr = i; | 1692 | portp->portnr = i; |
1781 | portp->brdnr = panelp->brdnr; | 1693 | portp->brdnr = panelp->brdnr; |
@@ -2659,6 +2571,11 @@ static const struct tty_operations stl_ops = { | |||
2659 | .tiocmset = stl_tiocmset, | 2571 | .tiocmset = stl_tiocmset, |
2660 | }; | 2572 | }; |
2661 | 2573 | ||
2574 | static const struct tty_port_operations stl_port_ops = { | ||
2575 | .carrier_raised = stl_carrier_raised, | ||
2576 | .raise_dtr_rts = stl_raise_dtr_rts, | ||
2577 | }; | ||
2578 | |||
2662 | /*****************************************************************************/ | 2579 | /*****************************************************************************/ |
2663 | /* CD1400 HARDWARE FUNCTIONS */ | 2580 | /* CD1400 HARDWARE FUNCTIONS */ |
2664 | /*****************************************************************************/ | 2581 | /*****************************************************************************/ |
diff --git a/drivers/char/sx.c b/drivers/char/sx.c index ba4e86281fbf..b60be7b0decf 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c | |||
@@ -279,7 +279,7 @@ static void sx_disable_tx_interrupts(void *ptr); | |||
279 | static void sx_enable_tx_interrupts(void *ptr); | 279 | static void sx_enable_tx_interrupts(void *ptr); |
280 | static void sx_disable_rx_interrupts(void *ptr); | 280 | static void sx_disable_rx_interrupts(void *ptr); |
281 | static void sx_enable_rx_interrupts(void *ptr); | 281 | static void sx_enable_rx_interrupts(void *ptr); |
282 | static int sx_get_CD(void *ptr); | 282 | static int sx_carrier_raised(struct tty_port *port); |
283 | static void sx_shutdown_port(void *ptr); | 283 | static void sx_shutdown_port(void *ptr); |
284 | static int sx_set_real_termios(void *ptr); | 284 | static int sx_set_real_termios(void *ptr); |
285 | static void sx_close(void *ptr); | 285 | static void sx_close(void *ptr); |
@@ -360,7 +360,6 @@ static struct real_driver sx_real_driver = { | |||
360 | sx_enable_tx_interrupts, | 360 | sx_enable_tx_interrupts, |
361 | sx_disable_rx_interrupts, | 361 | sx_disable_rx_interrupts, |
362 | sx_enable_rx_interrupts, | 362 | sx_enable_rx_interrupts, |
363 | sx_get_CD, | ||
364 | sx_shutdown_port, | 363 | sx_shutdown_port, |
365 | sx_set_real_termios, | 364 | sx_set_real_termios, |
366 | sx_chars_in_buffer, | 365 | sx_chars_in_buffer, |
@@ -791,7 +790,7 @@ static int sx_getsignals(struct sx_port *port) | |||
791 | sx_dprintk(SX_DEBUG_MODEMSIGNALS, "getsignals: %d/%d (%d/%d) " | 790 | sx_dprintk(SX_DEBUG_MODEMSIGNALS, "getsignals: %d/%d (%d/%d) " |
792 | "%02x/%02x\n", | 791 | "%02x/%02x\n", |
793 | (o_stat & OP_DTR) != 0, (o_stat & OP_RTS) != 0, | 792 | (o_stat & OP_DTR) != 0, (o_stat & OP_RTS) != 0, |
794 | port->c_dcd, sx_get_CD(port), | 793 | port->c_dcd, tty_port_carrier_raised(&port->gs.port), |
795 | sx_read_channel_byte(port, hi_ip), | 794 | sx_read_channel_byte(port, hi_ip), |
796 | sx_read_channel_byte(port, hi_state)); | 795 | sx_read_channel_byte(port, hi_state)); |
797 | 796 | ||
@@ -1190,7 +1189,7 @@ static inline void sx_check_modem_signals(struct sx_port *port) | |||
1190 | 1189 | ||
1191 | hi_state = sx_read_channel_byte(port, hi_state); | 1190 | hi_state = sx_read_channel_byte(port, hi_state); |
1192 | sx_dprintk(SX_DEBUG_MODEMSIGNALS, "Checking modem signals (%d/%d)\n", | 1191 | sx_dprintk(SX_DEBUG_MODEMSIGNALS, "Checking modem signals (%d/%d)\n", |
1193 | port->c_dcd, sx_get_CD(port)); | 1192 | port->c_dcd, tty_port_carrier_raised(&port->gs.port)); |
1194 | 1193 | ||
1195 | if (hi_state & ST_BREAK) { | 1194 | if (hi_state & ST_BREAK) { |
1196 | hi_state &= ~ST_BREAK; | 1195 | hi_state &= ~ST_BREAK; |
@@ -1202,11 +1201,11 @@ static inline void sx_check_modem_signals(struct sx_port *port) | |||
1202 | hi_state &= ~ST_DCD; | 1201 | hi_state &= ~ST_DCD; |
1203 | sx_dprintk(SX_DEBUG_MODEMSIGNALS, "got a DCD change.\n"); | 1202 | sx_dprintk(SX_DEBUG_MODEMSIGNALS, "got a DCD change.\n"); |
1204 | sx_write_channel_byte(port, hi_state, hi_state); | 1203 | sx_write_channel_byte(port, hi_state, hi_state); |
1205 | c_dcd = sx_get_CD(port); | 1204 | c_dcd = tty_port_carrier_raised(&port->gs.port); |
1206 | sx_dprintk(SX_DEBUG_MODEMSIGNALS, "DCD is now %d\n", c_dcd); | 1205 | sx_dprintk(SX_DEBUG_MODEMSIGNALS, "DCD is now %d\n", c_dcd); |
1207 | if (c_dcd != port->c_dcd) { | 1206 | if (c_dcd != port->c_dcd) { |
1208 | port->c_dcd = c_dcd; | 1207 | port->c_dcd = c_dcd; |
1209 | if (sx_get_CD(port)) { | 1208 | if (tty_port_carrier_raised(&port->gs.port)) { |
1210 | /* DCD went UP */ | 1209 | /* DCD went UP */ |
1211 | if ((sx_read_channel_byte(port, hi_hstat) != | 1210 | if ((sx_read_channel_byte(port, hi_hstat) != |
1212 | HS_IDLE_CLOSED) && | 1211 | HS_IDLE_CLOSED) && |
@@ -1415,13 +1414,10 @@ static void sx_enable_rx_interrupts(void *ptr) | |||
1415 | } | 1414 | } |
1416 | 1415 | ||
1417 | /* Jeez. Isn't this simple? */ | 1416 | /* Jeez. Isn't this simple? */ |
1418 | static int sx_get_CD(void *ptr) | 1417 | static int sx_carrier_raised(struct tty_port *port) |
1419 | { | 1418 | { |
1420 | struct sx_port *port = ptr; | 1419 | struct sx_port *sp = container_of(port, struct sx_port, gs.port); |
1421 | func_enter2(); | 1420 | return ((sx_read_channel_byte(sp, hi_ip) & IP_DCD) != 0); |
1422 | |||
1423 | func_exit(); | ||
1424 | return ((sx_read_channel_byte(port, hi_ip) & IP_DCD) != 0); | ||
1425 | } | 1421 | } |
1426 | 1422 | ||
1427 | /* Jeez. Isn't this simple? */ | 1423 | /* Jeez. Isn't this simple? */ |
@@ -1536,7 +1532,7 @@ static int sx_open(struct tty_struct *tty, struct file *filp) | |||
1536 | } | 1532 | } |
1537 | /* tty->low_latency = 1; */ | 1533 | /* tty->low_latency = 1; */ |
1538 | 1534 | ||
1539 | port->c_dcd = sx_get_CD(port); | 1535 | port->c_dcd = sx_carrier_raised(&port->gs.port); |
1540 | sx_dprintk(SX_DEBUG_OPEN, "at open: cd=%d\n", port->c_dcd); | 1536 | sx_dprintk(SX_DEBUG_OPEN, "at open: cd=%d\n", port->c_dcd); |
1541 | 1537 | ||
1542 | func_exit(); | 1538 | func_exit(); |
@@ -1945,7 +1941,7 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp, | |||
1945 | 1941 | ||
1946 | static void sx_throttle(struct tty_struct *tty) | 1942 | static void sx_throttle(struct tty_struct *tty) |
1947 | { | 1943 | { |
1948 | struct sx_port *port = (struct sx_port *)tty->driver_data; | 1944 | struct sx_port *port = tty->driver_data; |
1949 | 1945 | ||
1950 | func_enter2(); | 1946 | func_enter2(); |
1951 | /* If the port is using any type of input flow | 1947 | /* If the port is using any type of input flow |
@@ -1959,7 +1955,7 @@ static void sx_throttle(struct tty_struct *tty) | |||
1959 | 1955 | ||
1960 | static void sx_unthrottle(struct tty_struct *tty) | 1956 | static void sx_unthrottle(struct tty_struct *tty) |
1961 | { | 1957 | { |
1962 | struct sx_port *port = (struct sx_port *)tty->driver_data; | 1958 | struct sx_port *port = tty->driver_data; |
1963 | 1959 | ||
1964 | func_enter2(); | 1960 | func_enter2(); |
1965 | /* Always unthrottle even if flow control is not enabled on | 1961 | /* Always unthrottle even if flow control is not enabled on |
@@ -2354,6 +2350,10 @@ static const struct tty_operations sx_ops = { | |||
2354 | .tiocmset = sx_tiocmset, | 2350 | .tiocmset = sx_tiocmset, |
2355 | }; | 2351 | }; |
2356 | 2352 | ||
2353 | static const struct tty_port_operations sx_port_ops = { | ||
2354 | .carrier_raised = sx_carrier_raised, | ||
2355 | }; | ||
2356 | |||
2357 | static int sx_init_drivers(void) | 2357 | static int sx_init_drivers(void) |
2358 | { | 2358 | { |
2359 | int error; | 2359 | int error; |
@@ -2410,6 +2410,7 @@ static int sx_init_portstructs(int nboards, int nports) | |||
2410 | for (j = 0; j < boards[i].nports; j++) { | 2410 | for (j = 0; j < boards[i].nports; j++) { |
2411 | sx_dprintk(SX_DEBUG_INIT, "initing port %d\n", j); | 2411 | sx_dprintk(SX_DEBUG_INIT, "initing port %d\n", j); |
2412 | tty_port_init(&port->gs.port); | 2412 | tty_port_init(&port->gs.port); |
2413 | port->gs.port.ops = &sx_port_ops; | ||
2413 | port->gs.magic = SX_MAGIC; | 2414 | port->gs.magic = SX_MAGIC; |
2414 | port->gs.close_delay = HZ / 2; | 2415 | port->gs.close_delay = HZ / 2; |
2415 | port->gs.closing_wait = 30 * HZ; | 2416 | port->gs.closing_wait = 30 * HZ; |
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index 500f5176b6ba..b8063d4cad32 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c | |||
@@ -977,7 +977,7 @@ static void ldisc_receive_buf(struct tty_struct *tty, | |||
977 | */ | 977 | */ |
978 | static void mgsl_stop(struct tty_struct *tty) | 978 | static void mgsl_stop(struct tty_struct *tty) |
979 | { | 979 | { |
980 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 980 | struct mgsl_struct *info = tty->driver_data; |
981 | unsigned long flags; | 981 | unsigned long flags; |
982 | 982 | ||
983 | if (mgsl_paranoia_check(info, tty->name, "mgsl_stop")) | 983 | if (mgsl_paranoia_check(info, tty->name, "mgsl_stop")) |
@@ -1000,7 +1000,7 @@ static void mgsl_stop(struct tty_struct *tty) | |||
1000 | */ | 1000 | */ |
1001 | static void mgsl_start(struct tty_struct *tty) | 1001 | static void mgsl_start(struct tty_struct *tty) |
1002 | { | 1002 | { |
1003 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 1003 | struct mgsl_struct *info = tty->driver_data; |
1004 | unsigned long flags; | 1004 | unsigned long flags; |
1005 | 1005 | ||
1006 | if (mgsl_paranoia_check(info, tty->name, "mgsl_start")) | 1006 | if (mgsl_paranoia_check(info, tty->name, "mgsl_start")) |
@@ -2057,7 +2057,7 @@ static int mgsl_put_char(struct tty_struct *tty, unsigned char ch) | |||
2057 | */ | 2057 | */ |
2058 | static void mgsl_flush_chars(struct tty_struct *tty) | 2058 | static void mgsl_flush_chars(struct tty_struct *tty) |
2059 | { | 2059 | { |
2060 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2060 | struct mgsl_struct *info = tty->driver_data; |
2061 | unsigned long flags; | 2061 | unsigned long flags; |
2062 | 2062 | ||
2063 | if ( debug_level >= DEBUG_LEVEL_INFO ) | 2063 | if ( debug_level >= DEBUG_LEVEL_INFO ) |
@@ -2109,7 +2109,7 @@ static int mgsl_write(struct tty_struct * tty, | |||
2109 | const unsigned char *buf, int count) | 2109 | const unsigned char *buf, int count) |
2110 | { | 2110 | { |
2111 | int c, ret = 0; | 2111 | int c, ret = 0; |
2112 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2112 | struct mgsl_struct *info = tty->driver_data; |
2113 | unsigned long flags; | 2113 | unsigned long flags; |
2114 | 2114 | ||
2115 | if ( debug_level >= DEBUG_LEVEL_INFO ) | 2115 | if ( debug_level >= DEBUG_LEVEL_INFO ) |
@@ -2232,7 +2232,7 @@ cleanup: | |||
2232 | */ | 2232 | */ |
2233 | static int mgsl_write_room(struct tty_struct *tty) | 2233 | static int mgsl_write_room(struct tty_struct *tty) |
2234 | { | 2234 | { |
2235 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2235 | struct mgsl_struct *info = tty->driver_data; |
2236 | int ret; | 2236 | int ret; |
2237 | 2237 | ||
2238 | if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room")) | 2238 | if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room")) |
@@ -2267,7 +2267,7 @@ static int mgsl_write_room(struct tty_struct *tty) | |||
2267 | */ | 2267 | */ |
2268 | static int mgsl_chars_in_buffer(struct tty_struct *tty) | 2268 | static int mgsl_chars_in_buffer(struct tty_struct *tty) |
2269 | { | 2269 | { |
2270 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2270 | struct mgsl_struct *info = tty->driver_data; |
2271 | 2271 | ||
2272 | if (debug_level >= DEBUG_LEVEL_INFO) | 2272 | if (debug_level >= DEBUG_LEVEL_INFO) |
2273 | printk("%s(%d):mgsl_chars_in_buffer(%s)\n", | 2273 | printk("%s(%d):mgsl_chars_in_buffer(%s)\n", |
@@ -2301,7 +2301,7 @@ static int mgsl_chars_in_buffer(struct tty_struct *tty) | |||
2301 | */ | 2301 | */ |
2302 | static void mgsl_flush_buffer(struct tty_struct *tty) | 2302 | static void mgsl_flush_buffer(struct tty_struct *tty) |
2303 | { | 2303 | { |
2304 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2304 | struct mgsl_struct *info = tty->driver_data; |
2305 | unsigned long flags; | 2305 | unsigned long flags; |
2306 | 2306 | ||
2307 | if (debug_level >= DEBUG_LEVEL_INFO) | 2307 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -2329,7 +2329,7 @@ static void mgsl_flush_buffer(struct tty_struct *tty) | |||
2329 | */ | 2329 | */ |
2330 | static void mgsl_send_xchar(struct tty_struct *tty, char ch) | 2330 | static void mgsl_send_xchar(struct tty_struct *tty, char ch) |
2331 | { | 2331 | { |
2332 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2332 | struct mgsl_struct *info = tty->driver_data; |
2333 | unsigned long flags; | 2333 | unsigned long flags; |
2334 | 2334 | ||
2335 | if (debug_level >= DEBUG_LEVEL_INFO) | 2335 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -2358,7 +2358,7 @@ static void mgsl_send_xchar(struct tty_struct *tty, char ch) | |||
2358 | */ | 2358 | */ |
2359 | static void mgsl_throttle(struct tty_struct * tty) | 2359 | static void mgsl_throttle(struct tty_struct * tty) |
2360 | { | 2360 | { |
2361 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2361 | struct mgsl_struct *info = tty->driver_data; |
2362 | unsigned long flags; | 2362 | unsigned long flags; |
2363 | 2363 | ||
2364 | if (debug_level >= DEBUG_LEVEL_INFO) | 2364 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -2388,7 +2388,7 @@ static void mgsl_throttle(struct tty_struct * tty) | |||
2388 | */ | 2388 | */ |
2389 | static void mgsl_unthrottle(struct tty_struct * tty) | 2389 | static void mgsl_unthrottle(struct tty_struct * tty) |
2390 | { | 2390 | { |
2391 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2391 | struct mgsl_struct *info = tty->driver_data; |
2392 | unsigned long flags; | 2392 | unsigned long flags; |
2393 | 2393 | ||
2394 | if (debug_level >= DEBUG_LEVEL_INFO) | 2394 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -2841,7 +2841,7 @@ static int modem_input_wait(struct mgsl_struct *info,int arg) | |||
2841 | */ | 2841 | */ |
2842 | static int tiocmget(struct tty_struct *tty, struct file *file) | 2842 | static int tiocmget(struct tty_struct *tty, struct file *file) |
2843 | { | 2843 | { |
2844 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2844 | struct mgsl_struct *info = tty->driver_data; |
2845 | unsigned int result; | 2845 | unsigned int result; |
2846 | unsigned long flags; | 2846 | unsigned long flags; |
2847 | 2847 | ||
@@ -2867,7 +2867,7 @@ static int tiocmget(struct tty_struct *tty, struct file *file) | |||
2867 | static int tiocmset(struct tty_struct *tty, struct file *file, | 2867 | static int tiocmset(struct tty_struct *tty, struct file *file, |
2868 | unsigned int set, unsigned int clear) | 2868 | unsigned int set, unsigned int clear) |
2869 | { | 2869 | { |
2870 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2870 | struct mgsl_struct *info = tty->driver_data; |
2871 | unsigned long flags; | 2871 | unsigned long flags; |
2872 | 2872 | ||
2873 | if (debug_level >= DEBUG_LEVEL_INFO) | 2873 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -2898,7 +2898,7 @@ static int tiocmset(struct tty_struct *tty, struct file *file, | |||
2898 | */ | 2898 | */ |
2899 | static int mgsl_break(struct tty_struct *tty, int break_state) | 2899 | static int mgsl_break(struct tty_struct *tty, int break_state) |
2900 | { | 2900 | { |
2901 | struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; | 2901 | struct mgsl_struct * info = tty->driver_data; |
2902 | unsigned long flags; | 2902 | unsigned long flags; |
2903 | 2903 | ||
2904 | if (debug_level >= DEBUG_LEVEL_INFO) | 2904 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -2932,7 +2932,7 @@ static int mgsl_break(struct tty_struct *tty, int break_state) | |||
2932 | static int mgsl_ioctl(struct tty_struct *tty, struct file * file, | 2932 | static int mgsl_ioctl(struct tty_struct *tty, struct file * file, |
2933 | unsigned int cmd, unsigned long arg) | 2933 | unsigned int cmd, unsigned long arg) |
2934 | { | 2934 | { |
2935 | struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; | 2935 | struct mgsl_struct * info = tty->driver_data; |
2936 | int ret; | 2936 | int ret; |
2937 | 2937 | ||
2938 | if (debug_level >= DEBUG_LEVEL_INFO) | 2938 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -3042,7 +3042,7 @@ static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigne | |||
3042 | */ | 3042 | */ |
3043 | static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | 3043 | static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios) |
3044 | { | 3044 | { |
3045 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 3045 | struct mgsl_struct *info = tty->driver_data; |
3046 | unsigned long flags; | 3046 | unsigned long flags; |
3047 | 3047 | ||
3048 | if (debug_level >= DEBUG_LEVEL_INFO) | 3048 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -3096,7 +3096,7 @@ static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termio | |||
3096 | */ | 3096 | */ |
3097 | static void mgsl_close(struct tty_struct *tty, struct file * filp) | 3097 | static void mgsl_close(struct tty_struct *tty, struct file * filp) |
3098 | { | 3098 | { |
3099 | struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; | 3099 | struct mgsl_struct * info = tty->driver_data; |
3100 | 3100 | ||
3101 | if (mgsl_paranoia_check(info, tty->name, "mgsl_close")) | 3101 | if (mgsl_paranoia_check(info, tty->name, "mgsl_close")) |
3102 | return; | 3102 | return; |
@@ -3104,70 +3104,18 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp) | |||
3104 | if (debug_level >= DEBUG_LEVEL_INFO) | 3104 | if (debug_level >= DEBUG_LEVEL_INFO) |
3105 | printk("%s(%d):mgsl_close(%s) entry, count=%d\n", | 3105 | printk("%s(%d):mgsl_close(%s) entry, count=%d\n", |
3106 | __FILE__,__LINE__, info->device_name, info->port.count); | 3106 | __FILE__,__LINE__, info->device_name, info->port.count); |
3107 | |||
3108 | if (!info->port.count) | ||
3109 | return; | ||
3110 | 3107 | ||
3111 | if (tty_hung_up_p(filp)) | 3108 | if (tty_port_close_start(&info->port, tty, filp) == 0) |
3112 | goto cleanup; | 3109 | goto cleanup; |
3113 | 3110 | ||
3114 | if ((tty->count == 1) && (info->port.count != 1)) { | ||
3115 | /* | ||
3116 | * tty->count is 1 and the tty structure will be freed. | ||
3117 | * info->port.count should be one in this case. | ||
3118 | * if it's not, correct it so that the port is shutdown. | ||
3119 | */ | ||
3120 | printk("mgsl_close: bad refcount; tty->count is 1, " | ||
3121 | "info->port.count is %d\n", info->port.count); | ||
3122 | info->port.count = 1; | ||
3123 | } | ||
3124 | |||
3125 | info->port.count--; | ||
3126 | |||
3127 | /* if at least one open remaining, leave hardware active */ | ||
3128 | if (info->port.count) | ||
3129 | goto cleanup; | ||
3130 | |||
3131 | info->port.flags |= ASYNC_CLOSING; | ||
3132 | |||
3133 | /* set tty->closing to notify line discipline to | ||
3134 | * only process XON/XOFF characters. Only the N_TTY | ||
3135 | * discipline appears to use this (ppp does not). | ||
3136 | */ | ||
3137 | tty->closing = 1; | ||
3138 | |||
3139 | /* wait for transmit data to clear all layers */ | ||
3140 | |||
3141 | if (info->port.closing_wait != ASYNC_CLOSING_WAIT_NONE) { | ||
3142 | if (debug_level >= DEBUG_LEVEL_INFO) | ||
3143 | printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n", | ||
3144 | __FILE__,__LINE__, info->device_name ); | ||
3145 | tty_wait_until_sent(tty, info->port.closing_wait); | ||
3146 | } | ||
3147 | |||
3148 | if (info->port.flags & ASYNC_INITIALIZED) | 3111 | if (info->port.flags & ASYNC_INITIALIZED) |
3149 | mgsl_wait_until_sent(tty, info->timeout); | 3112 | mgsl_wait_until_sent(tty, info->timeout); |
3150 | |||
3151 | mgsl_flush_buffer(tty); | 3113 | mgsl_flush_buffer(tty); |
3152 | |||
3153 | tty_ldisc_flush(tty); | 3114 | tty_ldisc_flush(tty); |
3154 | |||
3155 | shutdown(info); | 3115 | shutdown(info); |
3156 | 3116 | ||
3157 | tty->closing = 0; | 3117 | tty_port_close_end(&info->port, tty); |
3158 | info->port.tty = NULL; | 3118 | info->port.tty = NULL; |
3159 | |||
3160 | if (info->port.blocked_open) { | ||
3161 | if (info->port.close_delay) { | ||
3162 | msleep_interruptible(jiffies_to_msecs(info->port.close_delay)); | ||
3163 | } | ||
3164 | wake_up_interruptible(&info->port.open_wait); | ||
3165 | } | ||
3166 | |||
3167 | info->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | ||
3168 | |||
3169 | wake_up_interruptible(&info->port.close_wait); | ||
3170 | |||
3171 | cleanup: | 3119 | cleanup: |
3172 | if (debug_level >= DEBUG_LEVEL_INFO) | 3120 | if (debug_level >= DEBUG_LEVEL_INFO) |
3173 | printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, | 3121 | printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__, |
@@ -3188,7 +3136,7 @@ cleanup: | |||
3188 | */ | 3136 | */ |
3189 | static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) | 3137 | static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) |
3190 | { | 3138 | { |
3191 | struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; | 3139 | struct mgsl_struct * info = tty->driver_data; |
3192 | unsigned long orig_jiffies, char_time; | 3140 | unsigned long orig_jiffies, char_time; |
3193 | 3141 | ||
3194 | if (!info ) | 3142 | if (!info ) |
@@ -3261,7 +3209,7 @@ exit: | |||
3261 | */ | 3209 | */ |
3262 | static void mgsl_hangup(struct tty_struct *tty) | 3210 | static void mgsl_hangup(struct tty_struct *tty) |
3263 | { | 3211 | { |
3264 | struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; | 3212 | struct mgsl_struct * info = tty->driver_data; |
3265 | 3213 | ||
3266 | if (debug_level >= DEBUG_LEVEL_INFO) | 3214 | if (debug_level >= DEBUG_LEVEL_INFO) |
3267 | printk("%s(%d):mgsl_hangup(%s)\n", | 3215 | printk("%s(%d):mgsl_hangup(%s)\n", |
@@ -3281,6 +3229,35 @@ static void mgsl_hangup(struct tty_struct *tty) | |||
3281 | 3229 | ||
3282 | } /* end of mgsl_hangup() */ | 3230 | } /* end of mgsl_hangup() */ |
3283 | 3231 | ||
3232 | /* | ||
3233 | * carrier_raised() | ||
3234 | * | ||
3235 | * Return true if carrier is raised | ||
3236 | */ | ||
3237 | |||
3238 | static int carrier_raised(struct tty_port *port) | ||
3239 | { | ||
3240 | unsigned long flags; | ||
3241 | struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); | ||
3242 | |||
3243 | spin_lock_irqsave(&info->irq_spinlock, flags); | ||
3244 | usc_get_serial_signals(info); | ||
3245 | spin_unlock_irqrestore(&info->irq_spinlock, flags); | ||
3246 | return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; | ||
3247 | } | ||
3248 | |||
3249 | static void raise_dtr_rts(struct tty_port *port) | ||
3250 | { | ||
3251 | struct mgsl_struct *info = container_of(port, struct mgsl_struct, port); | ||
3252 | unsigned long flags; | ||
3253 | |||
3254 | spin_lock_irqsave(&info->irq_spinlock,flags); | ||
3255 | info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; | ||
3256 | usc_set_serial_signals(info); | ||
3257 | spin_unlock_irqrestore(&info->irq_spinlock,flags); | ||
3258 | } | ||
3259 | |||
3260 | |||
3284 | /* block_til_ready() | 3261 | /* block_til_ready() |
3285 | * | 3262 | * |
3286 | * Block the current process until the specified port | 3263 | * Block the current process until the specified port |
@@ -3302,6 +3279,8 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
3302 | bool do_clocal = false; | 3279 | bool do_clocal = false; |
3303 | bool extra_count = false; | 3280 | bool extra_count = false; |
3304 | unsigned long flags; | 3281 | unsigned long flags; |
3282 | int dcd; | ||
3283 | struct tty_port *port = &info->port; | ||
3305 | 3284 | ||
3306 | if (debug_level >= DEBUG_LEVEL_INFO) | 3285 | if (debug_level >= DEBUG_LEVEL_INFO) |
3307 | printk("%s(%d):block_til_ready on %s\n", | 3286 | printk("%s(%d):block_til_ready on %s\n", |
@@ -3309,7 +3288,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
3309 | 3288 | ||
3310 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ | 3289 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ |
3311 | /* nonblock mode is set or port is not enabled */ | 3290 | /* nonblock mode is set or port is not enabled */ |
3312 | info->port.flags |= ASYNC_NORMAL_ACTIVE; | 3291 | port->flags |= ASYNC_NORMAL_ACTIVE; |
3313 | return 0; | 3292 | return 0; |
3314 | } | 3293 | } |
3315 | 3294 | ||
@@ -3318,50 +3297,42 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
3318 | 3297 | ||
3319 | /* Wait for carrier detect and the line to become | 3298 | /* Wait for carrier detect and the line to become |
3320 | * free (i.e., not in use by the callout). While we are in | 3299 | * free (i.e., not in use by the callout). While we are in |
3321 | * this loop, info->port.count is dropped by one, so that | 3300 | * this loop, port->count is dropped by one, so that |
3322 | * mgsl_close() knows when to free things. We restore it upon | 3301 | * mgsl_close() knows when to free things. We restore it upon |
3323 | * exit, either normal or abnormal. | 3302 | * exit, either normal or abnormal. |
3324 | */ | 3303 | */ |
3325 | 3304 | ||
3326 | retval = 0; | 3305 | retval = 0; |
3327 | add_wait_queue(&info->port.open_wait, &wait); | 3306 | add_wait_queue(&port->open_wait, &wait); |
3328 | 3307 | ||
3329 | if (debug_level >= DEBUG_LEVEL_INFO) | 3308 | if (debug_level >= DEBUG_LEVEL_INFO) |
3330 | printk("%s(%d):block_til_ready before block on %s count=%d\n", | 3309 | printk("%s(%d):block_til_ready before block on %s count=%d\n", |
3331 | __FILE__,__LINE__, tty->driver->name, info->port.count ); | 3310 | __FILE__,__LINE__, tty->driver->name, port->count ); |
3332 | 3311 | ||
3333 | spin_lock_irqsave(&info->irq_spinlock, flags); | 3312 | spin_lock_irqsave(&info->irq_spinlock, flags); |
3334 | if (!tty_hung_up_p(filp)) { | 3313 | if (!tty_hung_up_p(filp)) { |
3335 | extra_count = true; | 3314 | extra_count = true; |
3336 | info->port.count--; | 3315 | port->count--; |
3337 | } | 3316 | } |
3338 | spin_unlock_irqrestore(&info->irq_spinlock, flags); | 3317 | spin_unlock_irqrestore(&info->irq_spinlock, flags); |
3339 | info->port.blocked_open++; | 3318 | port->blocked_open++; |
3340 | 3319 | ||
3341 | while (1) { | 3320 | while (1) { |
3342 | if (tty->termios->c_cflag & CBAUD) { | 3321 | if (tty->termios->c_cflag & CBAUD) |
3343 | spin_lock_irqsave(&info->irq_spinlock,flags); | 3322 | tty_port_raise_dtr_rts(port); |
3344 | info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; | ||
3345 | usc_set_serial_signals(info); | ||
3346 | spin_unlock_irqrestore(&info->irq_spinlock,flags); | ||
3347 | } | ||
3348 | 3323 | ||
3349 | set_current_state(TASK_INTERRUPTIBLE); | 3324 | set_current_state(TASK_INTERRUPTIBLE); |
3350 | 3325 | ||
3351 | if (tty_hung_up_p(filp) || !(info->port.flags & ASYNC_INITIALIZED)){ | 3326 | if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ |
3352 | retval = (info->port.flags & ASYNC_HUP_NOTIFY) ? | 3327 | retval = (port->flags & ASYNC_HUP_NOTIFY) ? |
3353 | -EAGAIN : -ERESTARTSYS; | 3328 | -EAGAIN : -ERESTARTSYS; |
3354 | break; | 3329 | break; |
3355 | } | 3330 | } |
3356 | 3331 | ||
3357 | spin_lock_irqsave(&info->irq_spinlock,flags); | 3332 | dcd = tty_port_carrier_raised(&info->port); |
3358 | usc_get_serial_signals(info); | ||
3359 | spin_unlock_irqrestore(&info->irq_spinlock,flags); | ||
3360 | 3333 | ||
3361 | if (!(info->port.flags & ASYNC_CLOSING) && | 3334 | if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd)) |
3362 | (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) { | ||
3363 | break; | 3335 | break; |
3364 | } | ||
3365 | 3336 | ||
3366 | if (signal_pending(current)) { | 3337 | if (signal_pending(current)) { |
3367 | retval = -ERESTARTSYS; | 3338 | retval = -ERESTARTSYS; |
@@ -3370,24 +3341,25 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
3370 | 3341 | ||
3371 | if (debug_level >= DEBUG_LEVEL_INFO) | 3342 | if (debug_level >= DEBUG_LEVEL_INFO) |
3372 | printk("%s(%d):block_til_ready blocking on %s count=%d\n", | 3343 | printk("%s(%d):block_til_ready blocking on %s count=%d\n", |
3373 | __FILE__,__LINE__, tty->driver->name, info->port.count ); | 3344 | __FILE__,__LINE__, tty->driver->name, port->count ); |
3374 | 3345 | ||
3375 | schedule(); | 3346 | schedule(); |
3376 | } | 3347 | } |
3377 | 3348 | ||
3378 | set_current_state(TASK_RUNNING); | 3349 | set_current_state(TASK_RUNNING); |
3379 | remove_wait_queue(&info->port.open_wait, &wait); | 3350 | remove_wait_queue(&port->open_wait, &wait); |
3380 | 3351 | ||
3352 | /* FIXME: Racy on hangup during close wait */ | ||
3381 | if (extra_count) | 3353 | if (extra_count) |
3382 | info->port.count++; | 3354 | port->count++; |
3383 | info->port.blocked_open--; | 3355 | port->blocked_open--; |
3384 | 3356 | ||
3385 | if (debug_level >= DEBUG_LEVEL_INFO) | 3357 | if (debug_level >= DEBUG_LEVEL_INFO) |
3386 | printk("%s(%d):block_til_ready after blocking on %s count=%d\n", | 3358 | printk("%s(%d):block_til_ready after blocking on %s count=%d\n", |
3387 | __FILE__,__LINE__, tty->driver->name, info->port.count ); | 3359 | __FILE__,__LINE__, tty->driver->name, port->count ); |
3388 | 3360 | ||
3389 | if (!retval) | 3361 | if (!retval) |
3390 | info->port.flags |= ASYNC_NORMAL_ACTIVE; | 3362 | port->flags |= ASYNC_NORMAL_ACTIVE; |
3391 | 3363 | ||
3392 | return retval; | 3364 | return retval; |
3393 | 3365 | ||
@@ -4304,6 +4276,12 @@ static void mgsl_add_device( struct mgsl_struct *info ) | |||
4304 | 4276 | ||
4305 | } /* end of mgsl_add_device() */ | 4277 | } /* end of mgsl_add_device() */ |
4306 | 4278 | ||
4279 | static const struct tty_port_operations mgsl_port_ops = { | ||
4280 | .carrier_raised = carrier_raised, | ||
4281 | .raise_dtr_rts = raise_dtr_rts, | ||
4282 | }; | ||
4283 | |||
4284 | |||
4307 | /* mgsl_allocate_device() | 4285 | /* mgsl_allocate_device() |
4308 | * | 4286 | * |
4309 | * Allocate and initialize a device instance structure | 4287 | * Allocate and initialize a device instance structure |
@@ -4322,6 +4300,7 @@ static struct mgsl_struct* mgsl_allocate_device(void) | |||
4322 | printk("Error can't allocate device instance data\n"); | 4300 | printk("Error can't allocate device instance data\n"); |
4323 | } else { | 4301 | } else { |
4324 | tty_port_init(&info->port); | 4302 | tty_port_init(&info->port); |
4303 | info->port.ops = &mgsl_port_ops; | ||
4325 | info->magic = MGSL_MAGIC; | 4304 | info->magic = MGSL_MAGIC; |
4326 | INIT_WORK(&info->task, mgsl_bh_handler); | 4305 | INIT_WORK(&info->task, mgsl_bh_handler); |
4327 | info->max_frame_size = 4096; | 4306 | info->max_frame_size = 4096; |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 08911ed66494..53544e21f191 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -720,44 +720,9 @@ static void close(struct tty_struct *tty, struct file *filp) | |||
720 | return; | 720 | return; |
721 | DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count)); | 721 | DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count)); |
722 | 722 | ||
723 | if (!info->port.count) | 723 | if (tty_port_close_start(&info->port, tty, filp) == 0) |
724 | return; | ||
725 | |||
726 | if (tty_hung_up_p(filp)) | ||
727 | goto cleanup; | ||
728 | |||
729 | if ((tty->count == 1) && (info->port.count != 1)) { | ||
730 | /* | ||
731 | * tty->count is 1 and the tty structure will be freed. | ||
732 | * info->port.count should be one in this case. | ||
733 | * if it's not, correct it so that the port is shutdown. | ||
734 | */ | ||
735 | DBGERR(("%s close: bad refcount; tty->count=1, " | ||
736 | "info->port.count=%d\n", info->device_name, info->port.count)); | ||
737 | info->port.count = 1; | ||
738 | } | ||
739 | |||
740 | info->port.count--; | ||
741 | |||
742 | /* if at least one open remaining, leave hardware active */ | ||
743 | if (info->port.count) | ||
744 | goto cleanup; | 724 | goto cleanup; |
745 | 725 | ||
746 | info->port.flags |= ASYNC_CLOSING; | ||
747 | |||
748 | /* set tty->closing to notify line discipline to | ||
749 | * only process XON/XOFF characters. Only the N_TTY | ||
750 | * discipline appears to use this (ppp does not). | ||
751 | */ | ||
752 | tty->closing = 1; | ||
753 | |||
754 | /* wait for transmit data to clear all layers */ | ||
755 | |||
756 | if (info->port.closing_wait != ASYNC_CLOSING_WAIT_NONE) { | ||
757 | DBGINFO(("%s call tty_wait_until_sent\n", info->device_name)); | ||
758 | tty_wait_until_sent(tty, info->port.closing_wait); | ||
759 | } | ||
760 | |||
761 | if (info->port.flags & ASYNC_INITIALIZED) | 726 | if (info->port.flags & ASYNC_INITIALIZED) |
762 | wait_until_sent(tty, info->timeout); | 727 | wait_until_sent(tty, info->timeout); |
763 | flush_buffer(tty); | 728 | flush_buffer(tty); |
@@ -765,20 +730,8 @@ static void close(struct tty_struct *tty, struct file *filp) | |||
765 | 730 | ||
766 | shutdown(info); | 731 | shutdown(info); |
767 | 732 | ||
768 | tty->closing = 0; | 733 | tty_port_close_end(&info->port, tty); |
769 | info->port.tty = NULL; | 734 | info->port.tty = NULL; |
770 | |||
771 | if (info->port.blocked_open) { | ||
772 | if (info->port.close_delay) { | ||
773 | msleep_interruptible(jiffies_to_msecs(info->port.close_delay)); | ||
774 | } | ||
775 | wake_up_interruptible(&info->port.open_wait); | ||
776 | } | ||
777 | |||
778 | info->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | ||
779 | |||
780 | wake_up_interruptible(&info->port.close_wait); | ||
781 | |||
782 | cleanup: | 735 | cleanup: |
783 | DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count)); | 736 | DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count)); |
784 | } | 737 | } |
@@ -3132,6 +3085,29 @@ static int tiocmset(struct tty_struct *tty, struct file *file, | |||
3132 | return 0; | 3085 | return 0; |
3133 | } | 3086 | } |
3134 | 3087 | ||
3088 | static int carrier_raised(struct tty_port *port) | ||
3089 | { | ||
3090 | unsigned long flags; | ||
3091 | struct slgt_info *info = container_of(port, struct slgt_info, port); | ||
3092 | |||
3093 | spin_lock_irqsave(&info->lock,flags); | ||
3094 | get_signals(info); | ||
3095 | spin_unlock_irqrestore(&info->lock,flags); | ||
3096 | return (info->signals & SerialSignal_DCD) ? 1 : 0; | ||
3097 | } | ||
3098 | |||
3099 | static void raise_dtr_rts(struct tty_port *port) | ||
3100 | { | ||
3101 | unsigned long flags; | ||
3102 | struct slgt_info *info = container_of(port, struct slgt_info, port); | ||
3103 | |||
3104 | spin_lock_irqsave(&info->lock,flags); | ||
3105 | info->signals |= SerialSignal_RTS + SerialSignal_DTR; | ||
3106 | set_signals(info); | ||
3107 | spin_unlock_irqrestore(&info->lock,flags); | ||
3108 | } | ||
3109 | |||
3110 | |||
3135 | /* | 3111 | /* |
3136 | * block current process until the device is ready to open | 3112 | * block current process until the device is ready to open |
3137 | */ | 3113 | */ |
@@ -3143,12 +3119,14 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
3143 | bool do_clocal = false; | 3119 | bool do_clocal = false; |
3144 | bool extra_count = false; | 3120 | bool extra_count = false; |
3145 | unsigned long flags; | 3121 | unsigned long flags; |
3122 | int cd; | ||
3123 | struct tty_port *port = &info->port; | ||
3146 | 3124 | ||
3147 | DBGINFO(("%s block_til_ready\n", tty->driver->name)); | 3125 | DBGINFO(("%s block_til_ready\n", tty->driver->name)); |
3148 | 3126 | ||
3149 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ | 3127 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ |
3150 | /* nonblock mode is set or port is not enabled */ | 3128 | /* nonblock mode is set or port is not enabled */ |
3151 | info->port.flags |= ASYNC_NORMAL_ACTIVE; | 3129 | port->flags |= ASYNC_NORMAL_ACTIVE; |
3152 | return 0; | 3130 | return 0; |
3153 | } | 3131 | } |
3154 | 3132 | ||
@@ -3157,46 +3135,38 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
3157 | 3135 | ||
3158 | /* Wait for carrier detect and the line to become | 3136 | /* Wait for carrier detect and the line to become |
3159 | * free (i.e., not in use by the callout). While we are in | 3137 | * free (i.e., not in use by the callout). While we are in |
3160 | * this loop, info->port.count is dropped by one, so that | 3138 | * this loop, port->count is dropped by one, so that |
3161 | * close() knows when to free things. We restore it upon | 3139 | * close() knows when to free things. We restore it upon |
3162 | * exit, either normal or abnormal. | 3140 | * exit, either normal or abnormal. |
3163 | */ | 3141 | */ |
3164 | 3142 | ||
3165 | retval = 0; | 3143 | retval = 0; |
3166 | add_wait_queue(&info->port.open_wait, &wait); | 3144 | add_wait_queue(&port->open_wait, &wait); |
3167 | 3145 | ||
3168 | spin_lock_irqsave(&info->lock, flags); | 3146 | spin_lock_irqsave(&info->lock, flags); |
3169 | if (!tty_hung_up_p(filp)) { | 3147 | if (!tty_hung_up_p(filp)) { |
3170 | extra_count = true; | 3148 | extra_count = true; |
3171 | info->port.count--; | 3149 | port->count--; |
3172 | } | 3150 | } |
3173 | spin_unlock_irqrestore(&info->lock, flags); | 3151 | spin_unlock_irqrestore(&info->lock, flags); |
3174 | info->port.blocked_open++; | 3152 | port->blocked_open++; |
3175 | 3153 | ||
3176 | while (1) { | 3154 | while (1) { |
3177 | if ((tty->termios->c_cflag & CBAUD)) { | 3155 | if ((tty->termios->c_cflag & CBAUD)) |
3178 | spin_lock_irqsave(&info->lock,flags); | 3156 | tty_port_raise_dtr_rts(port); |
3179 | info->signals |= SerialSignal_RTS + SerialSignal_DTR; | ||
3180 | set_signals(info); | ||
3181 | spin_unlock_irqrestore(&info->lock,flags); | ||
3182 | } | ||
3183 | 3157 | ||
3184 | set_current_state(TASK_INTERRUPTIBLE); | 3158 | set_current_state(TASK_INTERRUPTIBLE); |
3185 | 3159 | ||
3186 | if (tty_hung_up_p(filp) || !(info->port.flags & ASYNC_INITIALIZED)){ | 3160 | if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ |
3187 | retval = (info->port.flags & ASYNC_HUP_NOTIFY) ? | 3161 | retval = (port->flags & ASYNC_HUP_NOTIFY) ? |
3188 | -EAGAIN : -ERESTARTSYS; | 3162 | -EAGAIN : -ERESTARTSYS; |
3189 | break; | 3163 | break; |
3190 | } | 3164 | } |
3191 | 3165 | ||
3192 | spin_lock_irqsave(&info->lock,flags); | 3166 | cd = tty_port_carrier_raised(port); |
3193 | get_signals(info); | ||
3194 | spin_unlock_irqrestore(&info->lock,flags); | ||
3195 | 3167 | ||
3196 | if (!(info->port.flags & ASYNC_CLOSING) && | 3168 | if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd )) |
3197 | (do_clocal || (info->signals & SerialSignal_DCD)) ) { | ||
3198 | break; | 3169 | break; |
3199 | } | ||
3200 | 3170 | ||
3201 | if (signal_pending(current)) { | 3171 | if (signal_pending(current)) { |
3202 | retval = -ERESTARTSYS; | 3172 | retval = -ERESTARTSYS; |
@@ -3208,14 +3178,14 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
3208 | } | 3178 | } |
3209 | 3179 | ||
3210 | set_current_state(TASK_RUNNING); | 3180 | set_current_state(TASK_RUNNING); |
3211 | remove_wait_queue(&info->port.open_wait, &wait); | 3181 | remove_wait_queue(&port->open_wait, &wait); |
3212 | 3182 | ||
3213 | if (extra_count) | 3183 | if (extra_count) |
3214 | info->port.count++; | 3184 | port->count++; |
3215 | info->port.blocked_open--; | 3185 | port->blocked_open--; |
3216 | 3186 | ||
3217 | if (!retval) | 3187 | if (!retval) |
3218 | info->port.flags |= ASYNC_NORMAL_ACTIVE; | 3188 | port->flags |= ASYNC_NORMAL_ACTIVE; |
3219 | 3189 | ||
3220 | DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval)); | 3190 | DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval)); |
3221 | return retval; | 3191 | return retval; |
@@ -3444,6 +3414,11 @@ static void add_device(struct slgt_info *info) | |||
3444 | #endif | 3414 | #endif |
3445 | } | 3415 | } |
3446 | 3416 | ||
3417 | static const struct tty_port_operations slgt_port_ops = { | ||
3418 | .carrier_raised = carrier_raised, | ||
3419 | .raise_dtr_rts = raise_dtr_rts, | ||
3420 | }; | ||
3421 | |||
3447 | /* | 3422 | /* |
3448 | * allocate device instance structure, return NULL on failure | 3423 | * allocate device instance structure, return NULL on failure |
3449 | */ | 3424 | */ |
@@ -3458,6 +3433,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev | |||
3458 | driver_name, adapter_num, port_num)); | 3433 | driver_name, adapter_num, port_num)); |
3459 | } else { | 3434 | } else { |
3460 | tty_port_init(&info->port); | 3435 | tty_port_init(&info->port); |
3436 | info->port.ops = &slgt_port_ops; | ||
3461 | info->magic = MGSL_MAGIC; | 3437 | info->magic = MGSL_MAGIC; |
3462 | INIT_WORK(&info->task, bh_handler); | 3438 | INIT_WORK(&info->task, bh_handler); |
3463 | info->max_frame_size = 4096; | 3439 | info->max_frame_size = 4096; |
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index 6bdb44f7bec2..7b0c5b2dd263 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c | |||
@@ -558,6 +558,7 @@ static void release_resources(SLMP_INFO *info); | |||
558 | 558 | ||
559 | static int startup(SLMP_INFO *info); | 559 | static int startup(SLMP_INFO *info); |
560 | static int block_til_ready(struct tty_struct *tty, struct file * filp,SLMP_INFO *info); | 560 | static int block_til_ready(struct tty_struct *tty, struct file * filp,SLMP_INFO *info); |
561 | static int carrier_raised(struct tty_port *port); | ||
561 | static void shutdown(SLMP_INFO *info); | 562 | static void shutdown(SLMP_INFO *info); |
562 | static void program_hw(SLMP_INFO *info); | 563 | static void program_hw(SLMP_INFO *info); |
563 | static void change_params(SLMP_INFO *info); | 564 | static void change_params(SLMP_INFO *info); |
@@ -800,7 +801,7 @@ cleanup: | |||
800 | */ | 801 | */ |
801 | static void close(struct tty_struct *tty, struct file *filp) | 802 | static void close(struct tty_struct *tty, struct file *filp) |
802 | { | 803 | { |
803 | SLMP_INFO * info = (SLMP_INFO *)tty->driver_data; | 804 | SLMP_INFO * info = tty->driver_data; |
804 | 805 | ||
805 | if (sanity_check(info, tty->name, "close")) | 806 | if (sanity_check(info, tty->name, "close")) |
806 | return; | 807 | return; |
@@ -809,70 +810,18 @@ static void close(struct tty_struct *tty, struct file *filp) | |||
809 | printk("%s(%d):%s close() entry, count=%d\n", | 810 | printk("%s(%d):%s close() entry, count=%d\n", |
810 | __FILE__,__LINE__, info->device_name, info->port.count); | 811 | __FILE__,__LINE__, info->device_name, info->port.count); |
811 | 812 | ||
812 | if (!info->port.count) | 813 | if (tty_port_close_start(&info->port, tty, filp) == 0) |
813 | return; | ||
814 | |||
815 | if (tty_hung_up_p(filp)) | ||
816 | goto cleanup; | ||
817 | |||
818 | if ((tty->count == 1) && (info->port.count != 1)) { | ||
819 | /* | ||
820 | * tty->count is 1 and the tty structure will be freed. | ||
821 | * info->port.count should be one in this case. | ||
822 | * if it's not, correct it so that the port is shutdown. | ||
823 | */ | ||
824 | printk("%s(%d):%s close: bad refcount; tty->count is 1, " | ||
825 | "info->port.count is %d\n", | ||
826 | __FILE__,__LINE__, info->device_name, info->port.count); | ||
827 | info->port.count = 1; | ||
828 | } | ||
829 | |||
830 | info->port.count--; | ||
831 | |||
832 | /* if at least one open remaining, leave hardware active */ | ||
833 | if (info->port.count) | ||
834 | goto cleanup; | 814 | goto cleanup; |
835 | 815 | ||
836 | info->port.flags |= ASYNC_CLOSING; | ||
837 | |||
838 | /* set tty->closing to notify line discipline to | ||
839 | * only process XON/XOFF characters. Only the N_TTY | ||
840 | * discipline appears to use this (ppp does not). | ||
841 | */ | ||
842 | tty->closing = 1; | ||
843 | |||
844 | /* wait for transmit data to clear all layers */ | ||
845 | |||
846 | if (info->port.closing_wait != ASYNC_CLOSING_WAIT_NONE) { | ||
847 | if (debug_level >= DEBUG_LEVEL_INFO) | ||
848 | printk("%s(%d):%s close() calling tty_wait_until_sent\n", | ||
849 | __FILE__,__LINE__, info->device_name ); | ||
850 | tty_wait_until_sent(tty, info->port.closing_wait); | ||
851 | } | ||
852 | |||
853 | if (info->port.flags & ASYNC_INITIALIZED) | 816 | if (info->port.flags & ASYNC_INITIALIZED) |
854 | wait_until_sent(tty, info->timeout); | 817 | wait_until_sent(tty, info->timeout); |
855 | 818 | ||
856 | flush_buffer(tty); | 819 | flush_buffer(tty); |
857 | |||
858 | tty_ldisc_flush(tty); | 820 | tty_ldisc_flush(tty); |
859 | |||
860 | shutdown(info); | 821 | shutdown(info); |
861 | 822 | ||
862 | tty->closing = 0; | 823 | tty_port_close_end(&info->port, tty); |
863 | info->port.tty = NULL; | 824 | info->port.tty = NULL; |
864 | |||
865 | if (info->port.blocked_open) { | ||
866 | if (info->port.close_delay) { | ||
867 | msleep_interruptible(jiffies_to_msecs(info->port.close_delay)); | ||
868 | } | ||
869 | wake_up_interruptible(&info->port.open_wait); | ||
870 | } | ||
871 | |||
872 | info->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | ||
873 | |||
874 | wake_up_interruptible(&info->port.close_wait); | ||
875 | |||
876 | cleanup: | 825 | cleanup: |
877 | if (debug_level >= DEBUG_LEVEL_INFO) | 826 | if (debug_level >= DEBUG_LEVEL_INFO) |
878 | printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__, | 827 | printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__, |
@@ -884,7 +833,7 @@ cleanup: | |||
884 | */ | 833 | */ |
885 | static void hangup(struct tty_struct *tty) | 834 | static void hangup(struct tty_struct *tty) |
886 | { | 835 | { |
887 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 836 | SLMP_INFO *info = tty->driver_data; |
888 | 837 | ||
889 | if (debug_level >= DEBUG_LEVEL_INFO) | 838 | if (debug_level >= DEBUG_LEVEL_INFO) |
890 | printk("%s(%d):%s hangup()\n", | 839 | printk("%s(%d):%s hangup()\n", |
@@ -907,7 +856,7 @@ static void hangup(struct tty_struct *tty) | |||
907 | */ | 856 | */ |
908 | static void set_termios(struct tty_struct *tty, struct ktermios *old_termios) | 857 | static void set_termios(struct tty_struct *tty, struct ktermios *old_termios) |
909 | { | 858 | { |
910 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 859 | SLMP_INFO *info = tty->driver_data; |
911 | unsigned long flags; | 860 | unsigned long flags; |
912 | 861 | ||
913 | if (debug_level >= DEBUG_LEVEL_INFO) | 862 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -960,7 +909,7 @@ static int write(struct tty_struct *tty, | |||
960 | const unsigned char *buf, int count) | 909 | const unsigned char *buf, int count) |
961 | { | 910 | { |
962 | int c, ret = 0; | 911 | int c, ret = 0; |
963 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 912 | SLMP_INFO *info = tty->driver_data; |
964 | unsigned long flags; | 913 | unsigned long flags; |
965 | 914 | ||
966 | if (debug_level >= DEBUG_LEVEL_INFO) | 915 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -1038,7 +987,7 @@ cleanup: | |||
1038 | */ | 987 | */ |
1039 | static int put_char(struct tty_struct *tty, unsigned char ch) | 988 | static int put_char(struct tty_struct *tty, unsigned char ch) |
1040 | { | 989 | { |
1041 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 990 | SLMP_INFO *info = tty->driver_data; |
1042 | unsigned long flags; | 991 | unsigned long flags; |
1043 | int ret = 0; | 992 | int ret = 0; |
1044 | 993 | ||
@@ -1075,7 +1024,7 @@ static int put_char(struct tty_struct *tty, unsigned char ch) | |||
1075 | */ | 1024 | */ |
1076 | static void send_xchar(struct tty_struct *tty, char ch) | 1025 | static void send_xchar(struct tty_struct *tty, char ch) |
1077 | { | 1026 | { |
1078 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1027 | SLMP_INFO *info = tty->driver_data; |
1079 | unsigned long flags; | 1028 | unsigned long flags; |
1080 | 1029 | ||
1081 | if (debug_level >= DEBUG_LEVEL_INFO) | 1030 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -1099,7 +1048,7 @@ static void send_xchar(struct tty_struct *tty, char ch) | |||
1099 | */ | 1048 | */ |
1100 | static void wait_until_sent(struct tty_struct *tty, int timeout) | 1049 | static void wait_until_sent(struct tty_struct *tty, int timeout) |
1101 | { | 1050 | { |
1102 | SLMP_INFO * info = (SLMP_INFO *)tty->driver_data; | 1051 | SLMP_INFO * info = tty->driver_data; |
1103 | unsigned long orig_jiffies, char_time; | 1052 | unsigned long orig_jiffies, char_time; |
1104 | 1053 | ||
1105 | if (!info ) | 1054 | if (!info ) |
@@ -1166,7 +1115,7 @@ exit: | |||
1166 | */ | 1115 | */ |
1167 | static int write_room(struct tty_struct *tty) | 1116 | static int write_room(struct tty_struct *tty) |
1168 | { | 1117 | { |
1169 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1118 | SLMP_INFO *info = tty->driver_data; |
1170 | int ret; | 1119 | int ret; |
1171 | 1120 | ||
1172 | if (sanity_check(info, tty->name, "write_room")) | 1121 | if (sanity_check(info, tty->name, "write_room")) |
@@ -1193,7 +1142,7 @@ static int write_room(struct tty_struct *tty) | |||
1193 | */ | 1142 | */ |
1194 | static void flush_chars(struct tty_struct *tty) | 1143 | static void flush_chars(struct tty_struct *tty) |
1195 | { | 1144 | { |
1196 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1145 | SLMP_INFO *info = tty->driver_data; |
1197 | unsigned long flags; | 1146 | unsigned long flags; |
1198 | 1147 | ||
1199 | if ( debug_level >= DEBUG_LEVEL_INFO ) | 1148 | if ( debug_level >= DEBUG_LEVEL_INFO ) |
@@ -1232,7 +1181,7 @@ static void flush_chars(struct tty_struct *tty) | |||
1232 | */ | 1181 | */ |
1233 | static void flush_buffer(struct tty_struct *tty) | 1182 | static void flush_buffer(struct tty_struct *tty) |
1234 | { | 1183 | { |
1235 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1184 | SLMP_INFO *info = tty->driver_data; |
1236 | unsigned long flags; | 1185 | unsigned long flags; |
1237 | 1186 | ||
1238 | if (debug_level >= DEBUG_LEVEL_INFO) | 1187 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -1254,7 +1203,7 @@ static void flush_buffer(struct tty_struct *tty) | |||
1254 | */ | 1203 | */ |
1255 | static void tx_hold(struct tty_struct *tty) | 1204 | static void tx_hold(struct tty_struct *tty) |
1256 | { | 1205 | { |
1257 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1206 | SLMP_INFO *info = tty->driver_data; |
1258 | unsigned long flags; | 1207 | unsigned long flags; |
1259 | 1208 | ||
1260 | if (sanity_check(info, tty->name, "tx_hold")) | 1209 | if (sanity_check(info, tty->name, "tx_hold")) |
@@ -1274,7 +1223,7 @@ static void tx_hold(struct tty_struct *tty) | |||
1274 | */ | 1223 | */ |
1275 | static void tx_release(struct tty_struct *tty) | 1224 | static void tx_release(struct tty_struct *tty) |
1276 | { | 1225 | { |
1277 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1226 | SLMP_INFO *info = tty->driver_data; |
1278 | unsigned long flags; | 1227 | unsigned long flags; |
1279 | 1228 | ||
1280 | if (sanity_check(info, tty->name, "tx_release")) | 1229 | if (sanity_check(info, tty->name, "tx_release")) |
@@ -1304,7 +1253,7 @@ static void tx_release(struct tty_struct *tty) | |||
1304 | static int do_ioctl(struct tty_struct *tty, struct file *file, | 1253 | static int do_ioctl(struct tty_struct *tty, struct file *file, |
1305 | unsigned int cmd, unsigned long arg) | 1254 | unsigned int cmd, unsigned long arg) |
1306 | { | 1255 | { |
1307 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1256 | SLMP_INFO *info = tty->driver_data; |
1308 | int error; | 1257 | int error; |
1309 | struct mgsl_icount cnow; /* kernel counter temps */ | 1258 | struct mgsl_icount cnow; /* kernel counter temps */ |
1310 | struct serial_icounter_struct __user *p_cuser; /* user space */ | 1259 | struct serial_icounter_struct __user *p_cuser; /* user space */ |
@@ -1515,7 +1464,7 @@ done: | |||
1515 | */ | 1464 | */ |
1516 | static int chars_in_buffer(struct tty_struct *tty) | 1465 | static int chars_in_buffer(struct tty_struct *tty) |
1517 | { | 1466 | { |
1518 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1467 | SLMP_INFO *info = tty->driver_data; |
1519 | 1468 | ||
1520 | if (sanity_check(info, tty->name, "chars_in_buffer")) | 1469 | if (sanity_check(info, tty->name, "chars_in_buffer")) |
1521 | return 0; | 1470 | return 0; |
@@ -1531,7 +1480,7 @@ static int chars_in_buffer(struct tty_struct *tty) | |||
1531 | */ | 1480 | */ |
1532 | static void throttle(struct tty_struct * tty) | 1481 | static void throttle(struct tty_struct * tty) |
1533 | { | 1482 | { |
1534 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1483 | SLMP_INFO *info = tty->driver_data; |
1535 | unsigned long flags; | 1484 | unsigned long flags; |
1536 | 1485 | ||
1537 | if (debug_level >= DEBUG_LEVEL_INFO) | 1486 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -1556,7 +1505,7 @@ static void throttle(struct tty_struct * tty) | |||
1556 | */ | 1505 | */ |
1557 | static void unthrottle(struct tty_struct * tty) | 1506 | static void unthrottle(struct tty_struct * tty) |
1558 | { | 1507 | { |
1559 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1508 | SLMP_INFO *info = tty->driver_data; |
1560 | unsigned long flags; | 1509 | unsigned long flags; |
1561 | 1510 | ||
1562 | if (debug_level >= DEBUG_LEVEL_INFO) | 1511 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -1587,7 +1536,7 @@ static void unthrottle(struct tty_struct * tty) | |||
1587 | static int set_break(struct tty_struct *tty, int break_state) | 1536 | static int set_break(struct tty_struct *tty, int break_state) |
1588 | { | 1537 | { |
1589 | unsigned char RegValue; | 1538 | unsigned char RegValue; |
1590 | SLMP_INFO * info = (SLMP_INFO *)tty->driver_data; | 1539 | SLMP_INFO * info = tty->driver_data; |
1591 | unsigned long flags; | 1540 | unsigned long flags; |
1592 | 1541 | ||
1593 | if (debug_level >= DEBUG_LEVEL_INFO) | 1542 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -3269,7 +3218,7 @@ static int modem_input_wait(SLMP_INFO *info,int arg) | |||
3269 | */ | 3218 | */ |
3270 | static int tiocmget(struct tty_struct *tty, struct file *file) | 3219 | static int tiocmget(struct tty_struct *tty, struct file *file) |
3271 | { | 3220 | { |
3272 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 3221 | SLMP_INFO *info = tty->driver_data; |
3273 | unsigned int result; | 3222 | unsigned int result; |
3274 | unsigned long flags; | 3223 | unsigned long flags; |
3275 | 3224 | ||
@@ -3295,7 +3244,7 @@ static int tiocmget(struct tty_struct *tty, struct file *file) | |||
3295 | static int tiocmset(struct tty_struct *tty, struct file *file, | 3244 | static int tiocmset(struct tty_struct *tty, struct file *file, |
3296 | unsigned int set, unsigned int clear) | 3245 | unsigned int set, unsigned int clear) |
3297 | { | 3246 | { |
3298 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 3247 | SLMP_INFO *info = tty->driver_data; |
3299 | unsigned long flags; | 3248 | unsigned long flags; |
3300 | 3249 | ||
3301 | if (debug_level >= DEBUG_LEVEL_INFO) | 3250 | if (debug_level >= DEBUG_LEVEL_INFO) |
@@ -3318,7 +3267,28 @@ static int tiocmset(struct tty_struct *tty, struct file *file, | |||
3318 | return 0; | 3267 | return 0; |
3319 | } | 3268 | } |
3320 | 3269 | ||
3270 | static int carrier_raised(struct tty_port *port) | ||
3271 | { | ||
3272 | SLMP_INFO *info = container_of(port, SLMP_INFO, port); | ||
3273 | unsigned long flags; | ||
3274 | |||
3275 | spin_lock_irqsave(&info->lock,flags); | ||
3276 | get_signals(info); | ||
3277 | spin_unlock_irqrestore(&info->lock,flags); | ||
3321 | 3278 | ||
3279 | return (info->serial_signals & SerialSignal_DCD) ? 1 : 0; | ||
3280 | } | ||
3281 | |||
3282 | static void raise_dtr_rts(struct tty_port *port) | ||
3283 | { | ||
3284 | SLMP_INFO *info = container_of(port, SLMP_INFO, port); | ||
3285 | unsigned long flags; | ||
3286 | |||
3287 | spin_lock_irqsave(&info->lock,flags); | ||
3288 | info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; | ||
3289 | set_signals(info); | ||
3290 | spin_unlock_irqrestore(&info->lock,flags); | ||
3291 | } | ||
3322 | 3292 | ||
3323 | /* Block the current process until the specified port is ready to open. | 3293 | /* Block the current process until the specified port is ready to open. |
3324 | */ | 3294 | */ |
@@ -3330,6 +3300,8 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
3330 | bool do_clocal = false; | 3300 | bool do_clocal = false; |
3331 | bool extra_count = false; | 3301 | bool extra_count = false; |
3332 | unsigned long flags; | 3302 | unsigned long flags; |
3303 | int cd; | ||
3304 | struct tty_port *port = &info->port; | ||
3333 | 3305 | ||
3334 | if (debug_level >= DEBUG_LEVEL_INFO) | 3306 | if (debug_level >= DEBUG_LEVEL_INFO) |
3335 | printk("%s(%d):%s block_til_ready()\n", | 3307 | printk("%s(%d):%s block_til_ready()\n", |
@@ -3338,7 +3310,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
3338 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ | 3310 | if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ |
3339 | /* nonblock mode is set or port is not enabled */ | 3311 | /* nonblock mode is set or port is not enabled */ |
3340 | /* just verify that callout device is not active */ | 3312 | /* just verify that callout device is not active */ |
3341 | info->port.flags |= ASYNC_NORMAL_ACTIVE; | 3313 | port->flags |= ASYNC_NORMAL_ACTIVE; |
3342 | return 0; | 3314 | return 0; |
3343 | } | 3315 | } |
3344 | 3316 | ||
@@ -3347,50 +3319,42 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
3347 | 3319 | ||
3348 | /* Wait for carrier detect and the line to become | 3320 | /* Wait for carrier detect and the line to become |
3349 | * free (i.e., not in use by the callout). While we are in | 3321 | * free (i.e., not in use by the callout). While we are in |
3350 | * this loop, info->port.count is dropped by one, so that | 3322 | * this loop, port->count is dropped by one, so that |
3351 | * close() knows when to free things. We restore it upon | 3323 | * close() knows when to free things. We restore it upon |
3352 | * exit, either normal or abnormal. | 3324 | * exit, either normal or abnormal. |
3353 | */ | 3325 | */ |
3354 | 3326 | ||
3355 | retval = 0; | 3327 | retval = 0; |
3356 | add_wait_queue(&info->port.open_wait, &wait); | 3328 | add_wait_queue(&port->open_wait, &wait); |
3357 | 3329 | ||
3358 | if (debug_level >= DEBUG_LEVEL_INFO) | 3330 | if (debug_level >= DEBUG_LEVEL_INFO) |
3359 | printk("%s(%d):%s block_til_ready() before block, count=%d\n", | 3331 | printk("%s(%d):%s block_til_ready() before block, count=%d\n", |
3360 | __FILE__,__LINE__, tty->driver->name, info->port.count ); | 3332 | __FILE__,__LINE__, tty->driver->name, port->count ); |
3361 | 3333 | ||
3362 | spin_lock_irqsave(&info->lock, flags); | 3334 | spin_lock_irqsave(&info->lock, flags); |
3363 | if (!tty_hung_up_p(filp)) { | 3335 | if (!tty_hung_up_p(filp)) { |
3364 | extra_count = true; | 3336 | extra_count = true; |
3365 | info->port.count--; | 3337 | port->count--; |
3366 | } | 3338 | } |
3367 | spin_unlock_irqrestore(&info->lock, flags); | 3339 | spin_unlock_irqrestore(&info->lock, flags); |
3368 | info->port.blocked_open++; | 3340 | port->blocked_open++; |
3369 | 3341 | ||
3370 | while (1) { | 3342 | while (1) { |
3371 | if ((tty->termios->c_cflag & CBAUD)) { | 3343 | if (tty->termios->c_cflag & CBAUD) |
3372 | spin_lock_irqsave(&info->lock,flags); | 3344 | tty_port_raise_dtr_rts(port); |
3373 | info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; | ||
3374 | set_signals(info); | ||
3375 | spin_unlock_irqrestore(&info->lock,flags); | ||
3376 | } | ||
3377 | 3345 | ||
3378 | set_current_state(TASK_INTERRUPTIBLE); | 3346 | set_current_state(TASK_INTERRUPTIBLE); |
3379 | 3347 | ||
3380 | if (tty_hung_up_p(filp) || !(info->port.flags & ASYNC_INITIALIZED)){ | 3348 | if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){ |
3381 | retval = (info->port.flags & ASYNC_HUP_NOTIFY) ? | 3349 | retval = (port->flags & ASYNC_HUP_NOTIFY) ? |
3382 | -EAGAIN : -ERESTARTSYS; | 3350 | -EAGAIN : -ERESTARTSYS; |
3383 | break; | 3351 | break; |
3384 | } | 3352 | } |
3385 | 3353 | ||
3386 | spin_lock_irqsave(&info->lock,flags); | 3354 | cd = tty_port_carrier_raised(port); |
3387 | get_signals(info); | ||
3388 | spin_unlock_irqrestore(&info->lock,flags); | ||
3389 | 3355 | ||
3390 | if (!(info->port.flags & ASYNC_CLOSING) && | 3356 | if (!(port->flags & ASYNC_CLOSING) && (do_clocal || cd)) |
3391 | (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) { | ||
3392 | break; | 3357 | break; |
3393 | } | ||
3394 | 3358 | ||
3395 | if (signal_pending(current)) { | 3359 | if (signal_pending(current)) { |
3396 | retval = -ERESTARTSYS; | 3360 | retval = -ERESTARTSYS; |
@@ -3399,24 +3363,24 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp, | |||
3399 | 3363 | ||
3400 | if (debug_level >= DEBUG_LEVEL_INFO) | 3364 | if (debug_level >= DEBUG_LEVEL_INFO) |
3401 | printk("%s(%d):%s block_til_ready() count=%d\n", | 3365 | printk("%s(%d):%s block_til_ready() count=%d\n", |
3402 | __FILE__,__LINE__, tty->driver->name, info->port.count ); | 3366 | __FILE__,__LINE__, tty->driver->name, port->count ); |
3403 | 3367 | ||
3404 | schedule(); | 3368 | schedule(); |
3405 | } | 3369 | } |
3406 | 3370 | ||
3407 | set_current_state(TASK_RUNNING); | 3371 | set_current_state(TASK_RUNNING); |
3408 | remove_wait_queue(&info->port.open_wait, &wait); | 3372 | remove_wait_queue(&port->open_wait, &wait); |
3409 | 3373 | ||
3410 | if (extra_count) | 3374 | if (extra_count) |
3411 | info->port.count++; | 3375 | port->count++; |
3412 | info->port.blocked_open--; | 3376 | port->blocked_open--; |
3413 | 3377 | ||
3414 | if (debug_level >= DEBUG_LEVEL_INFO) | 3378 | if (debug_level >= DEBUG_LEVEL_INFO) |
3415 | printk("%s(%d):%s block_til_ready() after, count=%d\n", | 3379 | printk("%s(%d):%s block_til_ready() after, count=%d\n", |
3416 | __FILE__,__LINE__, tty->driver->name, info->port.count ); | 3380 | __FILE__,__LINE__, tty->driver->name, port->count ); |
3417 | 3381 | ||
3418 | if (!retval) | 3382 | if (!retval) |
3419 | info->port.flags |= ASYNC_NORMAL_ACTIVE; | 3383 | port->flags |= ASYNC_NORMAL_ACTIVE; |
3420 | 3384 | ||
3421 | return retval; | 3385 | return retval; |
3422 | } | 3386 | } |
@@ -3782,6 +3746,11 @@ static void add_device(SLMP_INFO *info) | |||
3782 | #endif | 3746 | #endif |
3783 | } | 3747 | } |
3784 | 3748 | ||
3749 | static const struct tty_port_operations port_ops = { | ||
3750 | .carrier_raised = carrier_raised, | ||
3751 | .raise_dtr_rts = raise_dtr_rts, | ||
3752 | }; | ||
3753 | |||
3785 | /* Allocate and initialize a device instance structure | 3754 | /* Allocate and initialize a device instance structure |
3786 | * | 3755 | * |
3787 | * Return Value: pointer to SLMP_INFO if success, otherwise NULL | 3756 | * Return Value: pointer to SLMP_INFO if success, otherwise NULL |
@@ -3798,6 +3767,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev) | |||
3798 | __FILE__,__LINE__, adapter_num, port_num); | 3767 | __FILE__,__LINE__, adapter_num, port_num); |
3799 | } else { | 3768 | } else { |
3800 | tty_port_init(&info->port); | 3769 | tty_port_init(&info->port); |
3770 | info->port.ops = &port_ops; | ||
3801 | info->magic = MGSL_MAGIC; | 3771 | info->magic = MGSL_MAGIC; |
3802 | INIT_WORK(&info->task, bh_handler); | 3772 | INIT_WORK(&info->task, bh_handler); |
3803 | info->max_frame_size = 4096; | 3773 | info->max_frame_size = 4096; |
@@ -3940,6 +3910,7 @@ static const struct tty_operations ops = { | |||
3940 | .tiocmset = tiocmset, | 3910 | .tiocmset = tiocmset, |
3941 | }; | 3911 | }; |
3942 | 3912 | ||
3913 | |||
3943 | static void synclinkmp_cleanup(void) | 3914 | static void synclinkmp_cleanup(void) |
3944 | { | 3915 | { |
3945 | int rc; | 3916 | int rc; |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index db15f9ba7c0b..d33e5ab06177 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -1111,9 +1111,7 @@ void tty_write_message(struct tty_struct *tty, char *msg) | |||
1111 | * Locks the line discipline as required | 1111 | * Locks the line discipline as required |
1112 | * Writes to the tty driver are serialized by the atomic_write_lock | 1112 | * Writes to the tty driver are serialized by the atomic_write_lock |
1113 | * and are then processed in chunks to the device. The line discipline | 1113 | * and are then processed in chunks to the device. The line discipline |
1114 | * write method will not be involked in parallel for each device | 1114 | * write method will not be invoked in parallel for each device. |
1115 | * The line discipline write method is called under the big | ||
1116 | * kernel lock for historical reasons. New code should not rely on this. | ||
1117 | */ | 1115 | */ |
1118 | 1116 | ||
1119 | static ssize_t tty_write(struct file *file, const char __user *buf, | 1117 | static ssize_t tty_write(struct file *file, const char __user *buf, |
@@ -1213,7 +1211,7 @@ static void tty_line_name(struct tty_driver *driver, int index, char *p) | |||
1213 | * be held until the 'fast-open' is also done. Will change once we | 1211 | * be held until the 'fast-open' is also done. Will change once we |
1214 | * have refcounting in the driver and per driver locking | 1212 | * have refcounting in the driver and per driver locking |
1215 | */ | 1213 | */ |
1216 | struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, | 1214 | static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, |
1217 | struct inode *inode, int idx) | 1215 | struct inode *inode, int idx) |
1218 | { | 1216 | { |
1219 | struct tty_struct *tty; | 1217 | struct tty_struct *tty; |
@@ -2050,7 +2048,6 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg) | |||
2050 | /** | 2048 | /** |
2051 | * tty_do_resize - resize event | 2049 | * tty_do_resize - resize event |
2052 | * @tty: tty being resized | 2050 | * @tty: tty being resized |
2053 | * @real_tty: real tty (not the same as tty if using a pty/tty pair) | ||
2054 | * @rows: rows (character) | 2051 | * @rows: rows (character) |
2055 | * @cols: cols (character) | 2052 | * @cols: cols (character) |
2056 | * | 2053 | * |
@@ -2058,41 +2055,34 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg) | |||
2058 | * peform a terminal resize correctly | 2055 | * peform a terminal resize correctly |
2059 | */ | 2056 | */ |
2060 | 2057 | ||
2061 | int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, | 2058 | int tty_do_resize(struct tty_struct *tty, struct winsize *ws) |
2062 | struct winsize *ws) | ||
2063 | { | 2059 | { |
2064 | struct pid *pgrp, *rpgrp; | 2060 | struct pid *pgrp; |
2065 | unsigned long flags; | 2061 | unsigned long flags; |
2066 | 2062 | ||
2067 | /* For a PTY we need to lock the tty side */ | 2063 | /* Lock the tty */ |
2068 | mutex_lock(&real_tty->termios_mutex); | 2064 | mutex_lock(&tty->termios_mutex); |
2069 | if (!memcmp(ws, &real_tty->winsize, sizeof(*ws))) | 2065 | if (!memcmp(ws, &tty->winsize, sizeof(*ws))) |
2070 | goto done; | 2066 | goto done; |
2071 | /* Get the PID values and reference them so we can | 2067 | /* Get the PID values and reference them so we can |
2072 | avoid holding the tty ctrl lock while sending signals */ | 2068 | avoid holding the tty ctrl lock while sending signals */ |
2073 | spin_lock_irqsave(&tty->ctrl_lock, flags); | 2069 | spin_lock_irqsave(&tty->ctrl_lock, flags); |
2074 | pgrp = get_pid(tty->pgrp); | 2070 | pgrp = get_pid(tty->pgrp); |
2075 | rpgrp = get_pid(real_tty->pgrp); | ||
2076 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | 2071 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); |
2077 | 2072 | ||
2078 | if (pgrp) | 2073 | if (pgrp) |
2079 | kill_pgrp(pgrp, SIGWINCH, 1); | 2074 | kill_pgrp(pgrp, SIGWINCH, 1); |
2080 | if (rpgrp != pgrp && rpgrp) | ||
2081 | kill_pgrp(rpgrp, SIGWINCH, 1); | ||
2082 | |||
2083 | put_pid(pgrp); | 2075 | put_pid(pgrp); |
2084 | put_pid(rpgrp); | ||
2085 | 2076 | ||
2086 | tty->winsize = *ws; | 2077 | tty->winsize = *ws; |
2087 | real_tty->winsize = *ws; | ||
2088 | done: | 2078 | done: |
2089 | mutex_unlock(&real_tty->termios_mutex); | 2079 | mutex_unlock(&tty->termios_mutex); |
2090 | return 0; | 2080 | return 0; |
2091 | } | 2081 | } |
2092 | 2082 | ||
2093 | /** | 2083 | /** |
2094 | * tiocswinsz - implement window size set ioctl | 2084 | * tiocswinsz - implement window size set ioctl |
2095 | * @tty; tty | 2085 | * @tty; tty side of tty |
2096 | * @arg: user buffer for result | 2086 | * @arg: user buffer for result |
2097 | * | 2087 | * |
2098 | * Copies the user idea of the window size to the kernel. Traditionally | 2088 | * Copies the user idea of the window size to the kernel. Traditionally |
@@ -2105,17 +2095,16 @@ done: | |||
2105 | * then calls into the default method. | 2095 | * then calls into the default method. |
2106 | */ | 2096 | */ |
2107 | 2097 | ||
2108 | static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, | 2098 | static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg) |
2109 | struct winsize __user *arg) | ||
2110 | { | 2099 | { |
2111 | struct winsize tmp_ws; | 2100 | struct winsize tmp_ws; |
2112 | if (copy_from_user(&tmp_ws, arg, sizeof(*arg))) | 2101 | if (copy_from_user(&tmp_ws, arg, sizeof(*arg))) |
2113 | return -EFAULT; | 2102 | return -EFAULT; |
2114 | 2103 | ||
2115 | if (tty->ops->resize) | 2104 | if (tty->ops->resize) |
2116 | return tty->ops->resize(tty, real_tty, &tmp_ws); | 2105 | return tty->ops->resize(tty, &tmp_ws); |
2117 | else | 2106 | else |
2118 | return tty_do_resize(tty, real_tty, &tmp_ws); | 2107 | return tty_do_resize(tty, &tmp_ws); |
2119 | } | 2108 | } |
2120 | 2109 | ||
2121 | /** | 2110 | /** |
@@ -2540,7 +2529,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
2540 | case TIOCGWINSZ: | 2529 | case TIOCGWINSZ: |
2541 | return tiocgwinsz(real_tty, p); | 2530 | return tiocgwinsz(real_tty, p); |
2542 | case TIOCSWINSZ: | 2531 | case TIOCSWINSZ: |
2543 | return tiocswinsz(tty, real_tty, p); | 2532 | return tiocswinsz(real_tty, p); |
2544 | case TIOCCONS: | 2533 | case TIOCCONS: |
2545 | return real_tty != tty ? -EINVAL : tioccons(file); | 2534 | return real_tty != tty ? -EINVAL : tioccons(file); |
2546 | case FIONBIO: | 2535 | case FIONBIO: |
@@ -2785,6 +2774,8 @@ void initialize_tty_struct(struct tty_struct *tty, | |||
2785 | INIT_WORK(&tty->hangup_work, do_tty_hangup); | 2774 | INIT_WORK(&tty->hangup_work, do_tty_hangup); |
2786 | mutex_init(&tty->atomic_read_lock); | 2775 | mutex_init(&tty->atomic_read_lock); |
2787 | mutex_init(&tty->atomic_write_lock); | 2776 | mutex_init(&tty->atomic_write_lock); |
2777 | mutex_init(&tty->output_lock); | ||
2778 | mutex_init(&tty->echo_lock); | ||
2788 | spin_lock_init(&tty->read_lock); | 2779 | spin_lock_init(&tty->read_lock); |
2789 | spin_lock_init(&tty->ctrl_lock); | 2780 | spin_lock_init(&tty->ctrl_lock); |
2790 | INIT_LIST_HEAD(&tty->tty_files); | 2781 | INIT_LIST_HEAD(&tty->tty_files); |
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index f307f135cbfb..7a84b406a952 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c | |||
@@ -316,8 +316,7 @@ struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) | |||
316 | { | 316 | { |
317 | /* wait_event is a macro */ | 317 | /* wait_event is a macro */ |
318 | wait_event(tty_ldisc_wait, tty_ldisc_try(tty)); | 318 | wait_event(tty_ldisc_wait, tty_ldisc_try(tty)); |
319 | if (tty->ldisc.refcount == 0) | 319 | WARN_ON(tty->ldisc.refcount == 0); |
320 | printk(KERN_ERR "tty_ldisc_ref_wait\n"); | ||
321 | return &tty->ldisc; | 320 | return &tty->ldisc; |
322 | } | 321 | } |
323 | 322 | ||
@@ -376,15 +375,17 @@ EXPORT_SYMBOL_GPL(tty_ldisc_deref); | |||
376 | * @tty: terminal to activate ldisc on | 375 | * @tty: terminal to activate ldisc on |
377 | * | 376 | * |
378 | * Set the TTY_LDISC flag when the line discipline can be called | 377 | * Set the TTY_LDISC flag when the line discipline can be called |
379 | * again. Do necessary wakeups for existing sleepers. | 378 | * again. Do necessary wakeups for existing sleepers. Clear the LDISC |
379 | * changing flag to indicate any ldisc change is now over. | ||
380 | * | 380 | * |
381 | * Note: nobody should set this bit except via this function. Clearing | 381 | * Note: nobody should set the TTY_LDISC bit except via this function. |
382 | * directly is allowed. | 382 | * Clearing directly is allowed. |
383 | */ | 383 | */ |
384 | 384 | ||
385 | void tty_ldisc_enable(struct tty_struct *tty) | 385 | void tty_ldisc_enable(struct tty_struct *tty) |
386 | { | 386 | { |
387 | set_bit(TTY_LDISC, &tty->flags); | 387 | set_bit(TTY_LDISC, &tty->flags); |
388 | clear_bit(TTY_LDISC_CHANGING, &tty->flags); | ||
388 | wake_up(&tty_ldisc_wait); | 389 | wake_up(&tty_ldisc_wait); |
389 | } | 390 | } |
390 | 391 | ||
@@ -496,7 +497,14 @@ restart: | |||
496 | * reference to the line discipline. The TTY_LDISC bit | 497 | * reference to the line discipline. The TTY_LDISC bit |
497 | * prevents anyone taking a reference once it is clear. | 498 | * prevents anyone taking a reference once it is clear. |
498 | * We need the lock to avoid racing reference takers. | 499 | * We need the lock to avoid racing reference takers. |
500 | * | ||
501 | * We must clear the TTY_LDISC bit here to avoid a livelock | ||
502 | * with a userspace app continually trying to use the tty in | ||
503 | * parallel to the change and re-referencing the tty. | ||
499 | */ | 504 | */ |
505 | clear_bit(TTY_LDISC, &tty->flags); | ||
506 | if (o_tty) | ||
507 | clear_bit(TTY_LDISC, &o_tty->flags); | ||
500 | 508 | ||
501 | spin_lock_irqsave(&tty_ldisc_lock, flags); | 509 | spin_lock_irqsave(&tty_ldisc_lock, flags); |
502 | if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) { | 510 | if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) { |
@@ -528,7 +536,7 @@ restart: | |||
528 | * If the TTY_LDISC bit is set, then we are racing against | 536 | * If the TTY_LDISC bit is set, then we are racing against |
529 | * another ldisc change | 537 | * another ldisc change |
530 | */ | 538 | */ |
531 | if (!test_bit(TTY_LDISC, &tty->flags)) { | 539 | if (test_bit(TTY_LDISC_CHANGING, &tty->flags)) { |
532 | struct tty_ldisc *ld; | 540 | struct tty_ldisc *ld; |
533 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 541 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
534 | tty_ldisc_put(new_ldisc.ops); | 542 | tty_ldisc_put(new_ldisc.ops); |
@@ -536,10 +544,14 @@ restart: | |||
536 | tty_ldisc_deref(ld); | 544 | tty_ldisc_deref(ld); |
537 | goto restart; | 545 | goto restart; |
538 | } | 546 | } |
539 | 547 | /* | |
540 | clear_bit(TTY_LDISC, &tty->flags); | 548 | * This flag is used to avoid two parallel ldisc changes. Once |
549 | * open and close are fine grained locked this may work better | ||
550 | * as a mutex shared with the open/close/hup paths | ||
551 | */ | ||
552 | set_bit(TTY_LDISC_CHANGING, &tty->flags); | ||
541 | if (o_tty) | 553 | if (o_tty) |
542 | clear_bit(TTY_LDISC, &o_tty->flags); | 554 | set_bit(TTY_LDISC_CHANGING, &o_tty->flags); |
543 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 555 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
544 | 556 | ||
545 | /* | 557 | /* |
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c index c8f8024cb40e..9b8004c72686 100644 --- a/drivers/char/tty_port.c +++ b/drivers/char/tty_port.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/tty.h> | 7 | #include <linux/tty.h> |
8 | #include <linux/tty_driver.h> | 8 | #include <linux/tty_driver.h> |
9 | #include <linux/tty_flip.h> | 9 | #include <linux/tty_flip.h> |
10 | #include <linux/serial.h> | ||
10 | #include <linux/timer.h> | 11 | #include <linux/timer.h> |
11 | #include <linux/string.h> | 12 | #include <linux/string.h> |
12 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
@@ -94,3 +95,227 @@ void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty) | |||
94 | spin_unlock_irqrestore(&port->lock, flags); | 95 | spin_unlock_irqrestore(&port->lock, flags); |
95 | } | 96 | } |
96 | EXPORT_SYMBOL(tty_port_tty_set); | 97 | EXPORT_SYMBOL(tty_port_tty_set); |
98 | |||
99 | /** | ||
100 | * tty_port_hangup - hangup helper | ||
101 | * @port: tty port | ||
102 | * | ||
103 | * Perform port level tty hangup flag and count changes. Drop the tty | ||
104 | * reference. | ||
105 | */ | ||
106 | |||
107 | void tty_port_hangup(struct tty_port *port) | ||
108 | { | ||
109 | unsigned long flags; | ||
110 | |||
111 | spin_lock_irqsave(&port->lock, flags); | ||
112 | port->count = 0; | ||
113 | port->flags &= ~ASYNC_NORMAL_ACTIVE; | ||
114 | if (port->tty) | ||
115 | tty_kref_put(port->tty); | ||
116 | port->tty = NULL; | ||
117 | spin_unlock_irqrestore(&port->lock, flags); | ||
118 | wake_up_interruptible(&port->open_wait); | ||
119 | } | ||
120 | EXPORT_SYMBOL(tty_port_hangup); | ||
121 | |||
122 | /** | ||
123 | * tty_port_carrier_raised - carrier raised check | ||
124 | * @port: tty port | ||
125 | * | ||
126 | * Wrapper for the carrier detect logic. For the moment this is used | ||
127 | * to hide some internal details. This will eventually become entirely | ||
128 | * internal to the tty port. | ||
129 | */ | ||
130 | |||
131 | int tty_port_carrier_raised(struct tty_port *port) | ||
132 | { | ||
133 | if (port->ops->carrier_raised == NULL) | ||
134 | return 1; | ||
135 | return port->ops->carrier_raised(port); | ||
136 | } | ||
137 | EXPORT_SYMBOL(tty_port_carrier_raised); | ||
138 | |||
139 | /** | ||
140 | * tty_port_raise_dtr_rts - Riase DTR/RTS | ||
141 | * @port: tty port | ||
142 | * | ||
143 | * Wrapper for the DTR/RTS raise logic. For the moment this is used | ||
144 | * to hide some internal details. This will eventually become entirely | ||
145 | * internal to the tty port. | ||
146 | */ | ||
147 | |||
148 | void tty_port_raise_dtr_rts(struct tty_port *port) | ||
149 | { | ||
150 | if (port->ops->raise_dtr_rts) | ||
151 | port->ops->raise_dtr_rts(port); | ||
152 | } | ||
153 | EXPORT_SYMBOL(tty_port_raise_dtr_rts); | ||
154 | |||
155 | /** | ||
156 | * tty_port_block_til_ready - Waiting logic for tty open | ||
157 | * @port: the tty port being opened | ||
158 | * @tty: the tty device being bound | ||
159 | * @filp: the file pointer of the opener | ||
160 | * | ||
161 | * Implement the core POSIX/SuS tty behaviour when opening a tty device. | ||
162 | * Handles: | ||
163 | * - hangup (both before and during) | ||
164 | * - non blocking open | ||
165 | * - rts/dtr/dcd | ||
166 | * - signals | ||
167 | * - port flags and counts | ||
168 | * | ||
169 | * The passed tty_port must implement the carrier_raised method if it can | ||
170 | * do carrier detect and the raise_dtr_rts method if it supports software | ||
171 | * management of these lines. Note that the dtr/rts raise is done each | ||
172 | * iteration as a hangup may have previously dropped them while we wait. | ||
173 | */ | ||
174 | |||
175 | int tty_port_block_til_ready(struct tty_port *port, | ||
176 | struct tty_struct *tty, struct file *filp) | ||
177 | { | ||
178 | int do_clocal = 0, retval; | ||
179 | unsigned long flags; | ||
180 | DECLARE_WAITQUEUE(wait, current); | ||
181 | int cd; | ||
182 | |||
183 | /* block if port is in the process of being closed */ | ||
184 | if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) { | ||
185 | interruptible_sleep_on(&port->close_wait); | ||
186 | if (port->flags & ASYNC_HUP_NOTIFY) | ||
187 | return -EAGAIN; | ||
188 | else | ||
189 | return -ERESTARTSYS; | ||
190 | } | ||
191 | |||
192 | /* if non-blocking mode is set we can pass directly to open unless | ||
193 | the port has just hung up or is in another error state */ | ||
194 | if ((filp->f_flags & O_NONBLOCK) || | ||
195 | (tty->flags & (1 << TTY_IO_ERROR))) { | ||
196 | port->flags |= ASYNC_NORMAL_ACTIVE; | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | if (C_CLOCAL(tty)) | ||
201 | do_clocal = 1; | ||
202 | |||
203 | /* Block waiting until we can proceed. We may need to wait for the | ||
204 | carrier, but we must also wait for any close that is in progress | ||
205 | before the next open may complete */ | ||
206 | |||
207 | retval = 0; | ||
208 | add_wait_queue(&port->open_wait, &wait); | ||
209 | |||
210 | /* The port lock protects the port counts */ | ||
211 | spin_lock_irqsave(&port->lock, flags); | ||
212 | if (!tty_hung_up_p(filp)) | ||
213 | port->count--; | ||
214 | port->blocked_open++; | ||
215 | spin_unlock_irqrestore(&port->lock, flags); | ||
216 | |||
217 | while (1) { | ||
218 | /* Indicate we are open */ | ||
219 | if (tty->termios->c_cflag & CBAUD) | ||
220 | tty_port_raise_dtr_rts(port); | ||
221 | |||
222 | set_current_state(TASK_INTERRUPTIBLE); | ||
223 | /* Check for a hangup or uninitialised port. Return accordingly */ | ||
224 | if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) { | ||
225 | if (port->flags & ASYNC_HUP_NOTIFY) | ||
226 | retval = -EAGAIN; | ||
227 | else | ||
228 | retval = -ERESTARTSYS; | ||
229 | break; | ||
230 | } | ||
231 | /* Probe the carrier. For devices with no carrier detect this | ||
232 | will always return true */ | ||
233 | cd = tty_port_carrier_raised(port); | ||
234 | if (!(port->flags & ASYNC_CLOSING) && | ||
235 | (do_clocal || cd)) | ||
236 | break; | ||
237 | if (signal_pending(current)) { | ||
238 | retval = -ERESTARTSYS; | ||
239 | break; | ||
240 | } | ||
241 | schedule(); | ||
242 | } | ||
243 | set_current_state(TASK_RUNNING); | ||
244 | remove_wait_queue(&port->open_wait, &wait); | ||
245 | |||
246 | /* Update counts. A parallel hangup will have set count to zero and | ||
247 | we must not mess that up further */ | ||
248 | spin_lock_irqsave(&port->lock, flags); | ||
249 | if (!tty_hung_up_p(filp)) | ||
250 | port->count++; | ||
251 | port->blocked_open--; | ||
252 | if (retval == 0) | ||
253 | port->flags |= ASYNC_NORMAL_ACTIVE; | ||
254 | spin_unlock_irqrestore(&port->lock, flags); | ||
255 | return 0; | ||
256 | |||
257 | } | ||
258 | EXPORT_SYMBOL(tty_port_block_til_ready); | ||
259 | |||
260 | int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, struct file *filp) | ||
261 | { | ||
262 | unsigned long flags; | ||
263 | |||
264 | spin_lock_irqsave(&port->lock, flags); | ||
265 | if (tty_hung_up_p(filp)) { | ||
266 | spin_unlock_irqrestore(&port->lock, flags); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | if( tty->count == 1 && port->count != 1) { | ||
271 | printk(KERN_WARNING | ||
272 | "tty_port_close_start: tty->count = 1 port count = %d.\n", | ||
273 | port->count); | ||
274 | port->count = 1; | ||
275 | } | ||
276 | if (--port->count < 0) { | ||
277 | printk(KERN_WARNING "tty_port_close_start: count = %d\n", | ||
278 | port->count); | ||
279 | port->count = 0; | ||
280 | } | ||
281 | |||
282 | if (port->count) { | ||
283 | spin_unlock_irqrestore(&port->lock, flags); | ||
284 | return 0; | ||
285 | } | ||
286 | port->flags |= ASYNC_CLOSING; | ||
287 | tty->closing = 1; | ||
288 | spin_unlock_irqrestore(&port->lock, flags); | ||
289 | /* Don't block on a stalled port, just pull the chain */ | ||
290 | if (tty->flow_stopped) | ||
291 | tty_driver_flush_buffer(tty); | ||
292 | if (port->flags & ASYNC_INITIALIZED && | ||
293 | port->closing_wait != ASYNC_CLOSING_WAIT_NONE) | ||
294 | tty_wait_until_sent(tty, port->closing_wait); | ||
295 | return 1; | ||
296 | } | ||
297 | EXPORT_SYMBOL(tty_port_close_start); | ||
298 | |||
299 | void tty_port_close_end(struct tty_port *port, struct tty_struct *tty) | ||
300 | { | ||
301 | unsigned long flags; | ||
302 | |||
303 | tty_ldisc_flush(tty); | ||
304 | |||
305 | spin_lock_irqsave(&port->lock, flags); | ||
306 | tty->closing = 0; | ||
307 | |||
308 | if (port->blocked_open) { | ||
309 | spin_unlock_irqrestore(&port->lock, flags); | ||
310 | if (port->close_delay) { | ||
311 | msleep_interruptible( | ||
312 | jiffies_to_msecs(port->close_delay)); | ||
313 | } | ||
314 | spin_lock_irqsave(&port->lock, flags); | ||
315 | wake_up_interruptible(&port->open_wait); | ||
316 | } | ||
317 | port->flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING); | ||
318 | wake_up_interruptible(&port->close_wait); | ||
319 | spin_unlock_irqrestore(&port->lock, flags); | ||
320 | } | ||
321 | EXPORT_SYMBOL(tty_port_close_end); | ||
diff --git a/drivers/char/vme_scc.c b/drivers/char/vme_scc.c index 1718b3c481db..0e8234bd0e19 100644 --- a/drivers/char/vme_scc.c +++ b/drivers/char/vme_scc.c | |||
@@ -69,7 +69,7 @@ static void scc_disable_tx_interrupts(void * ptr); | |||
69 | static void scc_enable_tx_interrupts(void * ptr); | 69 | static void scc_enable_tx_interrupts(void * ptr); |
70 | static void scc_disable_rx_interrupts(void * ptr); | 70 | static void scc_disable_rx_interrupts(void * ptr); |
71 | static void scc_enable_rx_interrupts(void * ptr); | 71 | static void scc_enable_rx_interrupts(void * ptr); |
72 | static int scc_get_CD(void * ptr); | 72 | static int scc_carrier_raised(struct tty_port *port); |
73 | static void scc_shutdown_port(void * ptr); | 73 | static void scc_shutdown_port(void * ptr); |
74 | static int scc_set_real_termios(void *ptr); | 74 | static int scc_set_real_termios(void *ptr); |
75 | static void scc_hungup(void *ptr); | 75 | static void scc_hungup(void *ptr); |
@@ -100,7 +100,6 @@ static struct real_driver scc_real_driver = { | |||
100 | scc_enable_tx_interrupts, | 100 | scc_enable_tx_interrupts, |
101 | scc_disable_rx_interrupts, | 101 | scc_disable_rx_interrupts, |
102 | scc_enable_rx_interrupts, | 102 | scc_enable_rx_interrupts, |
103 | scc_get_CD, | ||
104 | scc_shutdown_port, | 103 | scc_shutdown_port, |
105 | scc_set_real_termios, | 104 | scc_set_real_termios, |
106 | scc_chars_in_buffer, | 105 | scc_chars_in_buffer, |
@@ -129,6 +128,10 @@ static const struct tty_operations scc_ops = { | |||
129 | .break_ctl = scc_break_ctl, | 128 | .break_ctl = scc_break_ctl, |
130 | }; | 129 | }; |
131 | 130 | ||
131 | static const struct tty_port_operations scc_port_ops = { | ||
132 | .carrier_raised = scc_carrier_raised, | ||
133 | }; | ||
134 | |||
132 | /*---------------------------------------------------------------------------- | 135 | /*---------------------------------------------------------------------------- |
133 | * vme_scc_init() and support functions | 136 | * vme_scc_init() and support functions |
134 | *---------------------------------------------------------------------------*/ | 137 | *---------------------------------------------------------------------------*/ |
@@ -176,6 +179,8 @@ static void scc_init_portstructs(void) | |||
176 | 179 | ||
177 | for (i = 0; i < 2; i++) { | 180 | for (i = 0; i < 2; i++) { |
178 | port = scc_ports + i; | 181 | port = scc_ports + i; |
182 | tty_port_init(&port->gs.port); | ||
183 | port->gs.port.ops = &scc_port_ops; | ||
179 | port->gs.magic = SCC_MAGIC; | 184 | port->gs.magic = SCC_MAGIC; |
180 | port->gs.close_delay = HZ/2; | 185 | port->gs.close_delay = HZ/2; |
181 | port->gs.closing_wait = 30 * HZ; | 186 | port->gs.closing_wait = 30 * HZ; |
@@ -624,10 +629,10 @@ static void scc_enable_rx_interrupts(void *ptr) | |||
624 | } | 629 | } |
625 | 630 | ||
626 | 631 | ||
627 | static int scc_get_CD(void *ptr) | 632 | static int scc_carrier_raised(struct tty_port *port) |
628 | { | 633 | { |
629 | struct scc_port *port = ptr; | 634 | struct scc_port *sc = container_of(port, struct scc_port, gs.port); |
630 | unsigned channel = port->channel; | 635 | unsigned channel = sc->channel; |
631 | 636 | ||
632 | return !!(scc_last_status_reg[channel] & SR_DCD); | 637 | return !!(scc_last_status_reg[channel] & SR_DCD); |
633 | } | 638 | } |
@@ -638,7 +643,7 @@ static void scc_shutdown_port(void *ptr) | |||
638 | struct scc_port *port = ptr; | 643 | struct scc_port *port = ptr; |
639 | 644 | ||
640 | port->gs.port.flags &= ~ GS_ACTIVE; | 645 | port->gs.port.flags &= ~ GS_ACTIVE; |
641 | if (port->gs.port.tty && port->gs.port.tty->termios->c_cflag & HUPCL) { | 646 | if (port->gs.port.tty && (port->gs.port.tty->termios->c_cflag & HUPCL)) { |
642 | scc_setsignals (port, 0, 0); | 647 | scc_setsignals (port, 0, 0); |
643 | } | 648 | } |
644 | } | 649 | } |
@@ -779,7 +784,7 @@ static void scc_setsignals(struct scc_port *port, int dtr, int rts) | |||
779 | 784 | ||
780 | static void scc_send_xchar(struct tty_struct *tty, char ch) | 785 | static void scc_send_xchar(struct tty_struct *tty, char ch) |
781 | { | 786 | { |
782 | struct scc_port *port = (struct scc_port *)tty->driver_data; | 787 | struct scc_port *port = tty->driver_data; |
783 | 788 | ||
784 | port->x_char = ch; | 789 | port->x_char = ch; |
785 | if (ch) | 790 | if (ch) |
@@ -896,7 +901,7 @@ static int scc_open (struct tty_struct * tty, struct file * filp) | |||
896 | return retval; | 901 | return retval; |
897 | } | 902 | } |
898 | 903 | ||
899 | port->c_dcd = scc_get_CD (port); | 904 | port->c_dcd = tty_port_carrier_raised(&port->gs.port); |
900 | 905 | ||
901 | scc_enable_rx_interrupts(port); | 906 | scc_enable_rx_interrupts(port); |
902 | 907 | ||
@@ -906,7 +911,7 @@ static int scc_open (struct tty_struct * tty, struct file * filp) | |||
906 | 911 | ||
907 | static void scc_throttle (struct tty_struct * tty) | 912 | static void scc_throttle (struct tty_struct * tty) |
908 | { | 913 | { |
909 | struct scc_port *port = (struct scc_port *)tty->driver_data; | 914 | struct scc_port *port = tty->driver_data; |
910 | unsigned long flags; | 915 | unsigned long flags; |
911 | SCC_ACCESS_INIT(port); | 916 | SCC_ACCESS_INIT(port); |
912 | 917 | ||
@@ -922,7 +927,7 @@ static void scc_throttle (struct tty_struct * tty) | |||
922 | 927 | ||
923 | static void scc_unthrottle (struct tty_struct * tty) | 928 | static void scc_unthrottle (struct tty_struct * tty) |
924 | { | 929 | { |
925 | struct scc_port *port = (struct scc_port *)tty->driver_data; | 930 | struct scc_port *port = tty->driver_data; |
926 | unsigned long flags; | 931 | unsigned long flags; |
927 | SCC_ACCESS_INIT(port); | 932 | SCC_ACCESS_INIT(port); |
928 | 933 | ||
@@ -945,7 +950,7 @@ static int scc_ioctl(struct tty_struct *tty, struct file *file, | |||
945 | 950 | ||
946 | static int scc_break_ctl(struct tty_struct *tty, int break_state) | 951 | static int scc_break_ctl(struct tty_struct *tty, int break_state) |
947 | { | 952 | { |
948 | struct scc_port *port = (struct scc_port *)tty->driver_data; | 953 | struct scc_port *port = tty->driver_data; |
949 | unsigned long flags; | 954 | unsigned long flags; |
950 | SCC_ACCESS_INIT(port); | 955 | SCC_ACCESS_INIT(port); |
951 | 956 | ||
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 008176edbd64..80014213fb53 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -819,8 +819,8 @@ static inline int resize_screen(struct vc_data *vc, int width, int height, | |||
819 | * ctrl_lock of the tty IFF a tty is passed. | 819 | * ctrl_lock of the tty IFF a tty is passed. |
820 | */ | 820 | */ |
821 | 821 | ||
822 | static int vc_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, | 822 | static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, |
823 | struct vc_data *vc, unsigned int cols, unsigned int lines) | 823 | unsigned int cols, unsigned int lines) |
824 | { | 824 | { |
825 | unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0; | 825 | unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0; |
826 | unsigned int old_cols, old_rows, old_row_size, old_screen_size; | 826 | unsigned int old_cols, old_rows, old_row_size, old_screen_size; |
@@ -932,7 +932,7 @@ static int vc_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, | |||
932 | ws.ws_row = vc->vc_rows; | 932 | ws.ws_row = vc->vc_rows; |
933 | ws.ws_col = vc->vc_cols; | 933 | ws.ws_col = vc->vc_cols; |
934 | ws.ws_ypixel = vc->vc_scan_lines; | 934 | ws.ws_ypixel = vc->vc_scan_lines; |
935 | tty_do_resize(tty, real_tty, &ws); | 935 | tty_do_resize(tty, &ws); |
936 | } | 936 | } |
937 | 937 | ||
938 | if (CON_IS_VISIBLE(vc)) | 938 | if (CON_IS_VISIBLE(vc)) |
@@ -954,13 +954,12 @@ static int vc_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, | |||
954 | 954 | ||
955 | int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows) | 955 | int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows) |
956 | { | 956 | { |
957 | return vc_do_resize(vc->vc_tty, vc->vc_tty, vc, cols, rows); | 957 | return vc_do_resize(vc->vc_tty, vc, cols, rows); |
958 | } | 958 | } |
959 | 959 | ||
960 | /** | 960 | /** |
961 | * vt_resize - resize a VT | 961 | * vt_resize - resize a VT |
962 | * @tty: tty to resize | 962 | * @tty: tty to resize |
963 | * @real_tty: tty if a pty/tty pair | ||
964 | * @ws: winsize attributes | 963 | * @ws: winsize attributes |
965 | * | 964 | * |
966 | * Resize a virtual terminal. This is called by the tty layer as we | 965 | * Resize a virtual terminal. This is called by the tty layer as we |
@@ -971,14 +970,13 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows) | |||
971 | * termios_mutex and the tty ctrl_lock in that order. | 970 | * termios_mutex and the tty ctrl_lock in that order. |
972 | */ | 971 | */ |
973 | 972 | ||
974 | int vt_resize(struct tty_struct *tty, struct tty_struct *real_tty, | 973 | int vt_resize(struct tty_struct *tty, struct winsize *ws) |
975 | struct winsize *ws) | ||
976 | { | 974 | { |
977 | struct vc_data *vc = tty->driver_data; | 975 | struct vc_data *vc = tty->driver_data; |
978 | int ret; | 976 | int ret; |
979 | 977 | ||
980 | acquire_console_sem(); | 978 | acquire_console_sem(); |
981 | ret = vc_do_resize(tty, real_tty, vc, ws->ws_col, ws->ws_row); | 979 | ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row); |
982 | release_console_sem(); | 980 | release_console_sem(); |
983 | return ret; | 981 | return ret; |
984 | } | 982 | } |
@@ -2679,7 +2677,7 @@ static int con_write_room(struct tty_struct *tty) | |||
2679 | { | 2677 | { |
2680 | if (tty->stopped) | 2678 | if (tty->stopped) |
2681 | return 0; | 2679 | return 0; |
2682 | return 4096; /* No limit, really; we're not buffering */ | 2680 | return 32768; /* No limit, really; we're not buffering */ |
2683 | } | 2681 | } |
2684 | 2682 | ||
2685 | static int con_chars_in_buffer(struct tty_struct *tty) | 2683 | static int con_chars_in_buffer(struct tty_struct *tty) |
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 8944ce508e2f..a2dee0eb6dad 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c | |||
@@ -366,7 +366,7 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_ | |||
366 | int vt_ioctl(struct tty_struct *tty, struct file * file, | 366 | int vt_ioctl(struct tty_struct *tty, struct file * file, |
367 | unsigned int cmd, unsigned long arg) | 367 | unsigned int cmd, unsigned long arg) |
368 | { | 368 | { |
369 | struct vc_data *vc = (struct vc_data *)tty->driver_data; | 369 | struct vc_data *vc = tty->driver_data; |
370 | struct console_font_op op; /* used in multiple places here */ | 370 | struct console_font_op op; /* used in multiple places here */ |
371 | struct kbd_struct * kbd; | 371 | struct kbd_struct * kbd; |
372 | unsigned int console; | 372 | unsigned int console; |
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index f450588e5858..254f1064d973 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c | |||
@@ -154,7 +154,6 @@ static struct tc_clkevt_device clkevt = { | |||
154 | .shift = 32, | 154 | .shift = 32, |
155 | /* Should be lower than at91rm9200's system timer */ | 155 | /* Should be lower than at91rm9200's system timer */ |
156 | .rating = 125, | 156 | .rating = 125, |
157 | .cpumask = CPU_MASK_CPU0, | ||
158 | .set_next_event = tc_next_event, | 157 | .set_next_event = tc_next_event, |
159 | .set_mode = tc_mode, | 158 | .set_mode = tc_mode, |
160 | }, | 159 | }, |
@@ -195,6 +194,7 @@ static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) | |||
195 | clkevt.clkevt.max_delta_ns | 194 | clkevt.clkevt.max_delta_ns |
196 | = clockevent_delta2ns(0xffff, &clkevt.clkevt); | 195 | = clockevent_delta2ns(0xffff, &clkevt.clkevt); |
197 | clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1; | 196 | clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1; |
197 | clkevt.clkevt.cpumask = cpumask_of(0); | ||
198 | 198 | ||
199 | setup_irq(irq, &tc_irqaction); | 199 | setup_irq(irq, &tc_irqaction); |
200 | 200 | ||
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index c9f21e3d4ead..4ee85fcf9aaf 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -137,6 +137,7 @@ config BLK_DEV_DELKIN | |||
137 | 137 | ||
138 | config BLK_DEV_IDECD | 138 | config BLK_DEV_IDECD |
139 | tristate "Include IDE/ATAPI CDROM support" | 139 | tristate "Include IDE/ATAPI CDROM support" |
140 | select IDE_ATAPI | ||
140 | ---help--- | 141 | ---help--- |
141 | If you have a CD-ROM drive using the ATAPI protocol, say Y. ATAPI is | 142 | If you have a CD-ROM drive using the ATAPI protocol, say Y. ATAPI is |
142 | a newer protocol used by IDE CD-ROM and TAPE drives, similar to the | 143 | a newer protocol used by IDE CD-ROM and TAPE drives, similar to the |
@@ -185,23 +186,6 @@ config BLK_DEV_IDETAPE | |||
185 | To compile this driver as a module, choose M here: the | 186 | To compile this driver as a module, choose M here: the |
186 | module will be called ide-tape. | 187 | module will be called ide-tape. |
187 | 188 | ||
188 | config BLK_DEV_IDESCSI | ||
189 | tristate "SCSI emulation support (DEPRECATED)" | ||
190 | depends on SCSI | ||
191 | select IDE_ATAPI | ||
192 | ---help--- | ||
193 | WARNING: ide-scsi is no longer needed for cd writing applications! | ||
194 | The 2.6 kernel supports direct writing to ide-cd, which eliminates | ||
195 | the need for ide-scsi + the entire scsi stack just for writing a | ||
196 | cd. The new method is more efficient in every way. | ||
197 | |||
198 | This will provide SCSI host adapter emulation for IDE ATAPI devices, | ||
199 | and will allow you to use a SCSI device driver instead of a native | ||
200 | ATAPI driver. | ||
201 | |||
202 | If both this SCSI emulation and native ATAPI support are compiled | ||
203 | into the kernel, the native support will be used. | ||
204 | |||
205 | config BLK_DEV_IDEACPI | 189 | config BLK_DEV_IDEACPI |
206 | bool "IDE ACPI support" | 190 | bool "IDE ACPI support" |
207 | depends on ACPI | 191 | depends on ACPI |
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile index 177e3f8523ed..410728992e6a 100644 --- a/drivers/ide/Makefile +++ b/drivers/ide/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | EXTRA_CFLAGS += -Idrivers/ide | 5 | EXTRA_CFLAGS += -Idrivers/ide |
6 | 6 | ||
7 | ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \ | 7 | ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \ |
8 | ide-taskfile.o ide-pm.o ide-park.o ide-pio-blacklist.o | 8 | ide-taskfile.o ide-pm.o ide-park.o ide-pio-blacklist.o ide-sysfs.o |
9 | 9 | ||
10 | # core IDE code | 10 | # core IDE code |
11 | ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o | 11 | ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o |
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 4e58b9e7a58a..e8688c0f8645 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c | |||
@@ -3,6 +3,7 @@ | |||
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
6 | #include <linux/cdrom.h> | ||
6 | #include <linux/delay.h> | 7 | #include <linux/delay.h> |
7 | #include <linux/ide.h> | 8 | #include <linux/ide.h> |
8 | #include <scsi/scsi.h> | 9 | #include <scsi/scsi.h> |
@@ -14,6 +15,13 @@ | |||
14 | #define debug_log(fmt, args...) do {} while (0) | 15 | #define debug_log(fmt, args...) do {} while (0) |
15 | #endif | 16 | #endif |
16 | 17 | ||
18 | #define ATAPI_MIN_CDB_BYTES 12 | ||
19 | |||
20 | static inline int dev_is_idecd(ide_drive_t *drive) | ||
21 | { | ||
22 | return drive->media == ide_cdrom || drive->media == ide_optical; | ||
23 | } | ||
24 | |||
17 | /* | 25 | /* |
18 | * Check whether we can support a device, | 26 | * Check whether we can support a device, |
19 | * based on the ATAPI IDENTIFY command results. | 27 | * based on the ATAPI IDENTIFY command results. |
@@ -233,18 +241,49 @@ void ide_retry_pc(ide_drive_t *drive, struct gendisk *disk) | |||
233 | } | 241 | } |
234 | EXPORT_SYMBOL_GPL(ide_retry_pc); | 242 | EXPORT_SYMBOL_GPL(ide_retry_pc); |
235 | 243 | ||
236 | int ide_scsi_expiry(ide_drive_t *drive) | 244 | int ide_cd_expiry(ide_drive_t *drive) |
237 | { | 245 | { |
238 | struct ide_atapi_pc *pc = drive->pc; | 246 | struct request *rq = HWGROUP(drive)->rq; |
247 | unsigned long wait = 0; | ||
239 | 248 | ||
240 | debug_log("%s called for %lu at %lu\n", __func__, | 249 | debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]); |
241 | pc->scsi_cmd->serial_number, jiffies); | ||
242 | 250 | ||
243 | pc->flags |= PC_FLAG_TIMEDOUT; | 251 | /* |
252 | * Some commands are *slow* and normally take a long time to complete. | ||
253 | * Usually we can use the ATAPI "disconnect" to bypass this, but not all | ||
254 | * commands/drives support that. Let ide_timer_expiry keep polling us | ||
255 | * for these. | ||
256 | */ | ||
257 | switch (rq->cmd[0]) { | ||
258 | case GPCMD_BLANK: | ||
259 | case GPCMD_FORMAT_UNIT: | ||
260 | case GPCMD_RESERVE_RZONE_TRACK: | ||
261 | case GPCMD_CLOSE_TRACK: | ||
262 | case GPCMD_FLUSH_CACHE: | ||
263 | wait = ATAPI_WAIT_PC; | ||
264 | break; | ||
265 | default: | ||
266 | if (!(rq->cmd_flags & REQ_QUIET)) | ||
267 | printk(KERN_INFO "cmd 0x%x timed out\n", | ||
268 | rq->cmd[0]); | ||
269 | wait = 0; | ||
270 | break; | ||
271 | } | ||
272 | return wait; | ||
273 | } | ||
274 | EXPORT_SYMBOL_GPL(ide_cd_expiry); | ||
244 | 275 | ||
245 | return 0; /* we do not want the IDE subsystem to retry */ | 276 | int ide_cd_get_xferlen(struct request *rq) |
277 | { | ||
278 | if (blk_fs_request(rq)) | ||
279 | return 32768; | ||
280 | else if (blk_sense_request(rq) || blk_pc_request(rq) || | ||
281 | rq->cmd_type == REQ_TYPE_ATA_PC) | ||
282 | return rq->data_len; | ||
283 | else | ||
284 | return 0; | ||
246 | } | 285 | } |
247 | EXPORT_SYMBOL_GPL(ide_scsi_expiry); | 286 | EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); |
248 | 287 | ||
249 | /* | 288 | /* |
250 | * This is the usual interrupt handler which will be called during a packet | 289 | * This is the usual interrupt handler which will be called during a packet |
@@ -258,21 +297,14 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) | |||
258 | struct request *rq = hwif->hwgroup->rq; | 297 | struct request *rq = hwif->hwgroup->rq; |
259 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; | 298 | const struct ide_tp_ops *tp_ops = hwif->tp_ops; |
260 | xfer_func_t *xferfunc; | 299 | xfer_func_t *xferfunc; |
261 | ide_expiry_t *expiry; | ||
262 | unsigned int timeout, temp; | 300 | unsigned int timeout, temp; |
263 | u16 bcount; | 301 | u16 bcount; |
264 | u8 stat, ireason, scsi = !!(drive->dev_flags & IDE_DFLAG_SCSI), dsc = 0; | 302 | u8 stat, ireason, dsc = 0; |
265 | 303 | ||
266 | debug_log("Enter %s - interrupt handler\n", __func__); | 304 | debug_log("Enter %s - interrupt handler\n", __func__); |
267 | 305 | ||
268 | if (scsi) { | 306 | timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD |
269 | timeout = ide_scsi_get_timeout(pc); | 307 | : WAIT_TAPE_CMD; |
270 | expiry = ide_scsi_expiry; | ||
271 | } else { | ||
272 | timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD | ||
273 | : WAIT_TAPE_CMD; | ||
274 | expiry = NULL; | ||
275 | } | ||
276 | 308 | ||
277 | if (pc->flags & PC_FLAG_TIMEDOUT) { | 309 | if (pc->flags & PC_FLAG_TIMEDOUT) { |
278 | drive->pc_callback(drive, 0); | 310 | drive->pc_callback(drive, 0); |
@@ -284,8 +316,8 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) | |||
284 | 316 | ||
285 | if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { | 317 | if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) { |
286 | if (hwif->dma_ops->dma_end(drive) || | 318 | if (hwif->dma_ops->dma_end(drive) || |
287 | (drive->media == ide_tape && !scsi && (stat & ATA_ERR))) { | 319 | (drive->media == ide_tape && (stat & ATA_ERR))) { |
288 | if (drive->media == ide_floppy && !scsi) | 320 | if (drive->media == ide_floppy) |
289 | printk(KERN_ERR "%s: DMA %s error\n", | 321 | printk(KERN_ERR "%s: DMA %s error\n", |
290 | drive->name, rq_data_dir(pc->rq) | 322 | drive->name, rq_data_dir(pc->rq) |
291 | ? "write" : "read"); | 323 | ? "write" : "read"); |
@@ -307,7 +339,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) | |||
307 | 339 | ||
308 | local_irq_enable_in_hardirq(); | 340 | local_irq_enable_in_hardirq(); |
309 | 341 | ||
310 | if (drive->media == ide_tape && !scsi && | 342 | if (drive->media == ide_tape && |
311 | (stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE) | 343 | (stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE) |
312 | stat &= ~ATA_ERR; | 344 | stat &= ~ATA_ERR; |
313 | 345 | ||
@@ -315,11 +347,8 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) | |||
315 | /* Error detected */ | 347 | /* Error detected */ |
316 | debug_log("%s: I/O error\n", drive->name); | 348 | debug_log("%s: I/O error\n", drive->name); |
317 | 349 | ||
318 | if (drive->media != ide_tape || scsi) { | 350 | if (drive->media != ide_tape) |
319 | pc->rq->errors++; | 351 | pc->rq->errors++; |
320 | if (scsi) | ||
321 | goto cmd_finished; | ||
322 | } | ||
323 | 352 | ||
324 | if (rq->cmd[0] == REQUEST_SENSE) { | 353 | if (rq->cmd[0] == REQUEST_SENSE) { |
325 | printk(KERN_ERR "%s: I/O error in request sense" | 354 | printk(KERN_ERR "%s: I/O error in request sense" |
@@ -335,7 +364,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) | |||
335 | /* queued, but not started */ | 364 | /* queued, but not started */ |
336 | return ide_stopped; | 365 | return ide_stopped; |
337 | } | 366 | } |
338 | cmd_finished: | ||
339 | pc->error = 0; | 367 | pc->error = 0; |
340 | 368 | ||
341 | if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0) | 369 | if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) && (stat & ATA_DSC) == 0) |
@@ -382,25 +410,8 @@ cmd_finished: | |||
382 | "us more data than expected - " | 410 | "us more data than expected - " |
383 | "discarding data\n", | 411 | "discarding data\n", |
384 | drive->name); | 412 | drive->name); |
385 | if (scsi) | 413 | |
386 | temp = pc->buf_size - pc->xferred; | 414 | ide_pad_transfer(drive, 0, bcount); |
387 | else | ||
388 | temp = 0; | ||
389 | if (temp) { | ||
390 | if (pc->sg) | ||
391 | drive->pc_io_buffers(drive, pc, | ||
392 | temp, 0); | ||
393 | else | ||
394 | tp_ops->input_data(drive, NULL, | ||
395 | pc->cur_pos, temp); | ||
396 | printk(KERN_ERR "%s: transferred %d of " | ||
397 | "%d bytes\n", | ||
398 | drive->name, | ||
399 | temp, bcount); | ||
400 | } | ||
401 | pc->xferred += temp; | ||
402 | pc->cur_pos += temp; | ||
403 | ide_pad_transfer(drive, 0, bcount - temp); | ||
404 | goto next_irq; | 415 | goto next_irq; |
405 | } | 416 | } |
406 | debug_log("The device wants to send us more data than " | 417 | debug_log("The device wants to send us more data than " |
@@ -410,14 +421,13 @@ cmd_finished: | |||
410 | } else | 421 | } else |
411 | xferfunc = tp_ops->output_data; | 422 | xferfunc = tp_ops->output_data; |
412 | 423 | ||
413 | if ((drive->media == ide_floppy && !scsi && !pc->buf) || | 424 | if ((drive->media == ide_floppy && !pc->buf) || |
414 | (drive->media == ide_tape && !scsi && pc->bh) || | 425 | (drive->media == ide_tape && pc->bh)) { |
415 | (scsi && pc->sg)) { | ||
416 | int done = drive->pc_io_buffers(drive, pc, bcount, | 426 | int done = drive->pc_io_buffers(drive, pc, bcount, |
417 | !!(pc->flags & PC_FLAG_WRITING)); | 427 | !!(pc->flags & PC_FLAG_WRITING)); |
418 | 428 | ||
419 | /* FIXME: don't do partial completions */ | 429 | /* FIXME: don't do partial completions */ |
420 | if (drive->media == ide_floppy && !scsi) | 430 | if (drive->media == ide_floppy) |
421 | ide_end_request(drive, 1, done >> 9); | 431 | ide_end_request(drive, 1, done >> 9); |
422 | } else | 432 | } else |
423 | xferfunc(drive, NULL, pc->cur_pos, bcount); | 433 | xferfunc(drive, NULL, pc->cur_pos, bcount); |
@@ -430,7 +440,7 @@ cmd_finished: | |||
430 | rq->cmd[0], bcount); | 440 | rq->cmd[0], bcount); |
431 | next_irq: | 441 | next_irq: |
432 | /* And set the interrupt handler again */ | 442 | /* And set the interrupt handler again */ |
433 | ide_set_handler(drive, ide_pc_intr, timeout, expiry); | 443 | ide_set_handler(drive, ide_pc_intr, timeout, NULL); |
434 | return ide_started; | 444 | return ide_started; |
435 | } | 445 | } |
436 | 446 | ||
@@ -479,11 +489,12 @@ static int ide_delayed_transfer_pc(ide_drive_t *drive) | |||
479 | 489 | ||
480 | static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) | 490 | static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) |
481 | { | 491 | { |
482 | struct ide_atapi_pc *pc = drive->pc; | 492 | struct ide_atapi_pc *uninitialized_var(pc); |
483 | ide_hwif_t *hwif = drive->hwif; | 493 | ide_hwif_t *hwif = drive->hwif; |
484 | struct request *rq = hwif->hwgroup->rq; | 494 | struct request *rq = hwif->hwgroup->rq; |
485 | ide_expiry_t *expiry; | 495 | ide_expiry_t *expiry; |
486 | unsigned int timeout; | 496 | unsigned int timeout; |
497 | int cmd_len; | ||
487 | ide_startstop_t startstop; | 498 | ide_startstop_t startstop; |
488 | u8 ireason; | 499 | u8 ireason; |
489 | 500 | ||
@@ -493,101 +504,124 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive) | |||
493 | return startstop; | 504 | return startstop; |
494 | } | 505 | } |
495 | 506 | ||
496 | ireason = ide_read_ireason(drive); | 507 | if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) { |
497 | if (drive->media == ide_tape && | 508 | if (drive->dma) |
498 | (drive->dev_flags & IDE_DFLAG_SCSI) == 0) | 509 | drive->waiting_for_dma = 1; |
499 | ireason = ide_wait_ireason(drive, ireason); | ||
500 | |||
501 | if ((ireason & ATAPI_COD) == 0 || (ireason & ATAPI_IO)) { | ||
502 | printk(KERN_ERR "%s: (IO,CoD) != (0,1) while issuing " | ||
503 | "a packet command\n", drive->name); | ||
504 | return ide_do_reset(drive); | ||
505 | } | 510 | } |
506 | 511 | ||
507 | /* | 512 | if (dev_is_idecd(drive)) { |
508 | * If necessary schedule the packet transfer to occur 'timeout' | 513 | /* ATAPI commands get padded out to 12 bytes minimum */ |
509 | * miliseconds later in ide_delayed_transfer_pc() after the device | 514 | cmd_len = COMMAND_SIZE(rq->cmd[0]); |
510 | * says it's ready for a packet. | 515 | if (cmd_len < ATAPI_MIN_CDB_BYTES) |
511 | */ | 516 | cmd_len = ATAPI_MIN_CDB_BYTES; |
512 | if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) { | 517 | |
513 | timeout = drive->pc_delay; | 518 | timeout = rq->timeout; |
514 | expiry = &ide_delayed_transfer_pc; | 519 | expiry = ide_cd_expiry; |
515 | } else { | 520 | } else { |
516 | if (drive->dev_flags & IDE_DFLAG_SCSI) { | 521 | pc = drive->pc; |
517 | timeout = ide_scsi_get_timeout(pc); | 522 | |
518 | expiry = ide_scsi_expiry; | 523 | cmd_len = ATAPI_MIN_CDB_BYTES; |
524 | |||
525 | /* | ||
526 | * If necessary schedule the packet transfer to occur 'timeout' | ||
527 | * miliseconds later in ide_delayed_transfer_pc() after the | ||
528 | * device says it's ready for a packet. | ||
529 | */ | ||
530 | if (drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) { | ||
531 | timeout = drive->pc_delay; | ||
532 | expiry = &ide_delayed_transfer_pc; | ||
519 | } else { | 533 | } else { |
520 | timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD | 534 | timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD |
521 | : WAIT_TAPE_CMD; | 535 | : WAIT_TAPE_CMD; |
522 | expiry = NULL; | 536 | expiry = NULL; |
523 | } | 537 | } |
538 | |||
539 | ireason = ide_read_ireason(drive); | ||
540 | if (drive->media == ide_tape) | ||
541 | ireason = ide_wait_ireason(drive, ireason); | ||
542 | |||
543 | if ((ireason & ATAPI_COD) == 0 || (ireason & ATAPI_IO)) { | ||
544 | printk(KERN_ERR "%s: (IO,CoD) != (0,1) while issuing " | ||
545 | "a packet command\n", drive->name); | ||
546 | |||
547 | return ide_do_reset(drive); | ||
548 | } | ||
524 | } | 549 | } |
525 | 550 | ||
526 | /* Set the interrupt routine */ | 551 | /* Set the interrupt routine */ |
527 | ide_set_handler(drive, ide_pc_intr, timeout, expiry); | 552 | ide_set_handler(drive, ide_pc_intr, timeout, expiry); |
528 | 553 | ||
529 | /* Begin DMA, if necessary */ | 554 | /* Begin DMA, if necessary */ |
530 | if (pc->flags & PC_FLAG_DMA_OK) { | 555 | if (dev_is_idecd(drive)) { |
531 | pc->flags |= PC_FLAG_DMA_IN_PROGRESS; | 556 | if (drive->dma) |
532 | hwif->dma_ops->dma_start(drive); | 557 | hwif->dma_ops->dma_start(drive); |
558 | } else { | ||
559 | if (pc->flags & PC_FLAG_DMA_OK) { | ||
560 | pc->flags |= PC_FLAG_DMA_IN_PROGRESS; | ||
561 | hwif->dma_ops->dma_start(drive); | ||
562 | } | ||
533 | } | 563 | } |
534 | 564 | ||
535 | /* Send the actual packet */ | 565 | /* Send the actual packet */ |
536 | if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) | 566 | if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0) |
537 | hwif->tp_ops->output_data(drive, NULL, rq->cmd, 12); | 567 | hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len); |
538 | 568 | ||
539 | return ide_started; | 569 | return ide_started; |
540 | } | 570 | } |
541 | 571 | ||
542 | ide_startstop_t ide_issue_pc(ide_drive_t *drive, unsigned int timeout, | 572 | ide_startstop_t ide_issue_pc(ide_drive_t *drive) |
543 | ide_expiry_t *expiry) | ||
544 | { | 573 | { |
545 | struct ide_atapi_pc *pc = drive->pc; | 574 | struct ide_atapi_pc *pc; |
546 | ide_hwif_t *hwif = drive->hwif; | 575 | ide_hwif_t *hwif = drive->hwif; |
576 | ide_expiry_t *expiry = NULL; | ||
577 | unsigned int timeout; | ||
547 | u32 tf_flags; | 578 | u32 tf_flags; |
548 | u16 bcount; | 579 | u16 bcount; |
549 | u8 scsi = !!(drive->dev_flags & IDE_DFLAG_SCSI); | ||
550 | 580 | ||
551 | /* We haven't transferred any data yet */ | 581 | if (dev_is_idecd(drive)) { |
552 | pc->xferred = 0; | 582 | tf_flags = IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL; |
553 | pc->cur_pos = pc->buf; | 583 | bcount = ide_cd_get_xferlen(hwif->hwgroup->rq); |
584 | expiry = ide_cd_expiry; | ||
585 | timeout = ATAPI_WAIT_PC; | ||
554 | 586 | ||
555 | /* Request to transfer the entire buffer at once */ | 587 | if (drive->dma) |
556 | if (drive->media == ide_tape && scsi == 0) | 588 | drive->dma = !hwif->dma_ops->dma_setup(drive); |
557 | bcount = pc->req_xfer; | 589 | } else { |
558 | else | 590 | pc = drive->pc; |
559 | bcount = min(pc->req_xfer, 63 * 1024); | ||
560 | 591 | ||
561 | if (pc->flags & PC_FLAG_DMA_ERROR) { | 592 | /* We haven't transferred any data yet */ |
562 | pc->flags &= ~PC_FLAG_DMA_ERROR; | 593 | pc->xferred = 0; |
563 | ide_dma_off(drive); | 594 | pc->cur_pos = pc->buf; |
564 | } | ||
565 | 595 | ||
566 | if ((pc->flags & PC_FLAG_DMA_OK) && | 596 | tf_flags = IDE_TFLAG_OUT_DEVICE; |
567 | (drive->dev_flags & IDE_DFLAG_USING_DMA)) { | 597 | bcount = ((drive->media == ide_tape) ? |
568 | if (scsi) | 598 | pc->req_xfer : |
569 | hwif->sg_mapped = 1; | 599 | min(pc->req_xfer, 63 * 1024)); |
570 | drive->dma = !hwif->dma_ops->dma_setup(drive); | ||
571 | if (scsi) | ||
572 | hwif->sg_mapped = 0; | ||
573 | } | ||
574 | 600 | ||
575 | if (!drive->dma) | 601 | if (pc->flags & PC_FLAG_DMA_ERROR) { |
576 | pc->flags &= ~PC_FLAG_DMA_OK; | 602 | pc->flags &= ~PC_FLAG_DMA_ERROR; |
603 | ide_dma_off(drive); | ||
604 | } | ||
577 | 605 | ||
578 | if (scsi) | 606 | if ((pc->flags & PC_FLAG_DMA_OK) && |
579 | tf_flags = 0; | 607 | (drive->dev_flags & IDE_DFLAG_USING_DMA)) |
580 | else if (drive->media == ide_cdrom || drive->media == ide_optical) | 608 | drive->dma = !hwif->dma_ops->dma_setup(drive); |
581 | tf_flags = IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL; | 609 | |
582 | else | 610 | if (!drive->dma) |
583 | tf_flags = IDE_TFLAG_OUT_DEVICE; | 611 | pc->flags &= ~PC_FLAG_DMA_OK; |
612 | |||
613 | timeout = (drive->media == ide_floppy) ? WAIT_FLOPPY_CMD | ||
614 | : WAIT_TAPE_CMD; | ||
615 | } | ||
584 | 616 | ||
585 | ide_pktcmd_tf_load(drive, tf_flags, bcount, drive->dma); | 617 | ide_pktcmd_tf_load(drive, tf_flags, bcount, drive->dma); |
586 | 618 | ||
587 | /* Issue the packet command */ | 619 | /* Issue the packet command */ |
588 | if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) { | 620 | if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) { |
621 | if (drive->dma) | ||
622 | drive->waiting_for_dma = 0; | ||
589 | ide_execute_command(drive, ATA_CMD_PACKET, ide_transfer_pc, | 623 | ide_execute_command(drive, ATA_CMD_PACKET, ide_transfer_pc, |
590 | timeout, NULL); | 624 | timeout, expiry); |
591 | return ide_started; | 625 | return ide_started; |
592 | } else { | 626 | } else { |
593 | ide_execute_pkt_cmd(drive); | 627 | ide_execute_pkt_cmd(drive); |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 5daa4dd1b018..1a7410f88249 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -53,14 +53,6 @@ | |||
53 | 53 | ||
54 | #include "ide-cd.h" | 54 | #include "ide-cd.h" |
55 | 55 | ||
56 | #define IDECD_DEBUG_LOG 1 | ||
57 | |||
58 | #if IDECD_DEBUG_LOG | ||
59 | #define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, args) | ||
60 | #else | ||
61 | #define ide_debug_log(lvl, fmt, args...) do {} while (0) | ||
62 | #endif | ||
63 | |||
64 | static DEFINE_MUTEX(idecd_ref_mutex); | 56 | static DEFINE_MUTEX(idecd_ref_mutex); |
65 | 57 | ||
66 | static void ide_cd_release(struct kref *); | 58 | static void ide_cd_release(struct kref *); |
@@ -519,37 +511,8 @@ end_request: | |||
519 | return 1; | 511 | return 1; |
520 | } | 512 | } |
521 | 513 | ||
522 | static int cdrom_timer_expiry(ide_drive_t *drive) | 514 | static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *); |
523 | { | 515 | static ide_startstop_t cdrom_newpc_intr(ide_drive_t *); |
524 | struct request *rq = HWGROUP(drive)->rq; | ||
525 | unsigned long wait = 0; | ||
526 | |||
527 | ide_debug_log(IDE_DBG_RQ, "Call %s: rq->cmd[0]: 0x%x\n", __func__, | ||
528 | rq->cmd[0]); | ||
529 | |||
530 | /* | ||
531 | * Some commands are *slow* and normally take a long time to complete. | ||
532 | * Usually we can use the ATAPI "disconnect" to bypass this, but not all | ||
533 | * commands/drives support that. Let ide_timer_expiry keep polling us | ||
534 | * for these. | ||
535 | */ | ||
536 | switch (rq->cmd[0]) { | ||
537 | case GPCMD_BLANK: | ||
538 | case GPCMD_FORMAT_UNIT: | ||
539 | case GPCMD_RESERVE_RZONE_TRACK: | ||
540 | case GPCMD_CLOSE_TRACK: | ||
541 | case GPCMD_FLUSH_CACHE: | ||
542 | wait = ATAPI_WAIT_PC; | ||
543 | break; | ||
544 | default: | ||
545 | if (!(rq->cmd_flags & REQ_QUIET)) | ||
546 | printk(KERN_INFO PFX "cmd 0x%x timed out\n", | ||
547 | rq->cmd[0]); | ||
548 | wait = 0; | ||
549 | break; | ||
550 | } | ||
551 | return wait; | ||
552 | } | ||
553 | 516 | ||
554 | /* | 517 | /* |
555 | * Set up the device registers for transferring a packet command on DEV, | 518 | * Set up the device registers for transferring a packet command on DEV, |
@@ -559,11 +522,13 @@ static int cdrom_timer_expiry(ide_drive_t *drive) | |||
559 | * called when the interrupt from the drive arrives. Otherwise, HANDLER | 522 | * called when the interrupt from the drive arrives. Otherwise, HANDLER |
560 | * will be called immediately after the drive is prepared for the transfer. | 523 | * will be called immediately after the drive is prepared for the transfer. |
561 | */ | 524 | */ |
562 | static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, | 525 | static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive) |
563 | int xferlen, | ||
564 | ide_handler_t *handler) | ||
565 | { | 526 | { |
566 | ide_hwif_t *hwif = drive->hwif; | 527 | ide_hwif_t *hwif = drive->hwif; |
528 | struct request *rq = hwif->hwgroup->rq; | ||
529 | int xferlen; | ||
530 | |||
531 | xferlen = ide_cd_get_xferlen(rq); | ||
567 | 532 | ||
568 | ide_debug_log(IDE_DBG_PC, "Call %s, xferlen: %d\n", __func__, xferlen); | 533 | ide_debug_log(IDE_DBG_PC, "Call %s, xferlen: %d\n", __func__, xferlen); |
569 | 534 | ||
@@ -581,13 +546,14 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, | |||
581 | drive->waiting_for_dma = 0; | 546 | drive->waiting_for_dma = 0; |
582 | 547 | ||
583 | /* packet command */ | 548 | /* packet command */ |
584 | ide_execute_command(drive, ATA_CMD_PACKET, handler, | 549 | ide_execute_command(drive, ATA_CMD_PACKET, |
585 | ATAPI_WAIT_PC, cdrom_timer_expiry); | 550 | cdrom_transfer_packet_command, |
551 | ATAPI_WAIT_PC, ide_cd_expiry); | ||
586 | return ide_started; | 552 | return ide_started; |
587 | } else { | 553 | } else { |
588 | ide_execute_pkt_cmd(drive); | 554 | ide_execute_pkt_cmd(drive); |
589 | 555 | ||
590 | return (*handler) (drive); | 556 | return cdrom_transfer_packet_command(drive); |
591 | } | 557 | } |
592 | } | 558 | } |
593 | 559 | ||
@@ -598,11 +564,10 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, | |||
598 | * there's data ready. | 564 | * there's data ready. |
599 | */ | 565 | */ |
600 | #define ATAPI_MIN_CDB_BYTES 12 | 566 | #define ATAPI_MIN_CDB_BYTES 12 |
601 | static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive, | 567 | static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive) |
602 | struct request *rq, | ||
603 | ide_handler_t *handler) | ||
604 | { | 568 | { |
605 | ide_hwif_t *hwif = drive->hwif; | 569 | ide_hwif_t *hwif = drive->hwif; |
570 | struct request *rq = hwif->hwgroup->rq; | ||
606 | int cmd_len; | 571 | int cmd_len; |
607 | ide_startstop_t startstop; | 572 | ide_startstop_t startstop; |
608 | 573 | ||
@@ -629,7 +594,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive, | |||
629 | } | 594 | } |
630 | 595 | ||
631 | /* arm the interrupt handler */ | 596 | /* arm the interrupt handler */ |
632 | ide_set_handler(drive, handler, rq->timeout, cdrom_timer_expiry); | 597 | ide_set_handler(drive, cdrom_newpc_intr, rq->timeout, ide_cd_expiry); |
633 | 598 | ||
634 | /* ATAPI commands get padded out to 12 bytes minimum */ | 599 | /* ATAPI commands get padded out to 12 bytes minimum */ |
635 | cmd_len = COMMAND_SIZE(rq->cmd[0]); | 600 | cmd_len = COMMAND_SIZE(rq->cmd[0]); |
@@ -717,8 +682,6 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len) | |||
717 | return 1; | 682 | return 1; |
718 | } | 683 | } |
719 | 684 | ||
720 | static ide_startstop_t cdrom_newpc_intr(ide_drive_t *); | ||
721 | |||
722 | static ide_startstop_t ide_cd_prepare_rw_request(ide_drive_t *drive, | 685 | static ide_startstop_t ide_cd_prepare_rw_request(ide_drive_t *drive, |
723 | struct request *rq) | 686 | struct request *rq) |
724 | { | 687 | { |
@@ -761,20 +724,6 @@ static ide_startstop_t ide_cd_prepare_rw_request(ide_drive_t *drive, | |||
761 | } | 724 | } |
762 | 725 | ||
763 | /* | 726 | /* |
764 | * Routine to send a read/write packet command to the drive. This is usually | ||
765 | * called directly from cdrom_start_{read,write}(). However, for drq_interrupt | ||
766 | * devices, it is called from an interrupt when the drive is ready to accept | ||
767 | * the command. | ||
768 | */ | ||
769 | static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive) | ||
770 | { | ||
771 | struct request *rq = drive->hwif->hwgroup->rq; | ||
772 | |||
773 | /* send the command to the drive and return */ | ||
774 | return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); | ||
775 | } | ||
776 | |||
777 | /* | ||
778 | * Fix up a possibly partially-processed request so that we can start it over | 727 | * Fix up a possibly partially-processed request so that we can start it over |
779 | * entirely, or even put it back on the request queue. | 728 | * entirely, or even put it back on the request queue. |
780 | */ | 729 | */ |
@@ -1096,7 +1045,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1096 | } else { | 1045 | } else { |
1097 | timeout = ATAPI_WAIT_PC; | 1046 | timeout = ATAPI_WAIT_PC; |
1098 | if (!blk_fs_request(rq)) | 1047 | if (!blk_fs_request(rq)) |
1099 | expiry = cdrom_timer_expiry; | 1048 | expiry = ide_cd_expiry; |
1100 | } | 1049 | } |
1101 | 1050 | ||
1102 | ide_set_handler(drive, cdrom_newpc_intr, timeout, expiry); | 1051 | ide_set_handler(drive, cdrom_newpc_intr, timeout, expiry); |
@@ -1163,13 +1112,6 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) | |||
1163 | return ide_started; | 1112 | return ide_started; |
1164 | } | 1113 | } |
1165 | 1114 | ||
1166 | static ide_startstop_t cdrom_do_newpc_cont(ide_drive_t *drive) | ||
1167 | { | ||
1168 | struct request *rq = HWGROUP(drive)->rq; | ||
1169 | |||
1170 | return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); | ||
1171 | } | ||
1172 | |||
1173 | static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) | 1115 | static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) |
1174 | { | 1116 | { |
1175 | 1117 | ||
@@ -1214,18 +1156,12 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) | |||
1214 | static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, | 1156 | static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, |
1215 | sector_t block) | 1157 | sector_t block) |
1216 | { | 1158 | { |
1217 | ide_handler_t *fn; | ||
1218 | int xferlen; | ||
1219 | |||
1220 | ide_debug_log(IDE_DBG_RQ, "Call %s, rq->cmd[0]: 0x%x, " | 1159 | ide_debug_log(IDE_DBG_RQ, "Call %s, rq->cmd[0]: 0x%x, " |
1221 | "rq->cmd_type: 0x%x, block: %llu\n", | 1160 | "rq->cmd_type: 0x%x, block: %llu\n", |
1222 | __func__, rq->cmd[0], rq->cmd_type, | 1161 | __func__, rq->cmd[0], rq->cmd_type, |
1223 | (unsigned long long)block); | 1162 | (unsigned long long)block); |
1224 | 1163 | ||
1225 | if (blk_fs_request(rq)) { | 1164 | if (blk_fs_request(rq)) { |
1226 | xferlen = 32768; | ||
1227 | fn = cdrom_start_rw_cont; | ||
1228 | |||
1229 | if (cdrom_start_rw(drive, rq) == ide_stopped) | 1165 | if (cdrom_start_rw(drive, rq) == ide_stopped) |
1230 | return ide_stopped; | 1166 | return ide_stopped; |
1231 | 1167 | ||
@@ -1233,9 +1169,6 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, | |||
1233 | return ide_stopped; | 1169 | return ide_stopped; |
1234 | } else if (blk_sense_request(rq) || blk_pc_request(rq) || | 1170 | } else if (blk_sense_request(rq) || blk_pc_request(rq) || |
1235 | rq->cmd_type == REQ_TYPE_ATA_PC) { | 1171 | rq->cmd_type == REQ_TYPE_ATA_PC) { |
1236 | xferlen = rq->data_len; | ||
1237 | fn = cdrom_do_newpc_cont; | ||
1238 | |||
1239 | if (!rq->timeout) | 1172 | if (!rq->timeout) |
1240 | rq->timeout = ATAPI_WAIT_PC; | 1173 | rq->timeout = ATAPI_WAIT_PC; |
1241 | 1174 | ||
@@ -1250,7 +1183,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, | |||
1250 | return ide_stopped; | 1183 | return ide_stopped; |
1251 | } | 1184 | } |
1252 | 1185 | ||
1253 | return cdrom_start_packet_command(drive, xferlen, fn); | 1186 | return cdrom_start_packet_command(drive); |
1254 | } | 1187 | } |
1255 | 1188 | ||
1256 | /* | 1189 | /* |
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h index d5ce3362dbd1..bf676b262181 100644 --- a/drivers/ide/ide-cd.h +++ b/drivers/ide/ide-cd.h | |||
@@ -8,10 +8,14 @@ | |||
8 | #include <linux/cdrom.h> | 8 | #include <linux/cdrom.h> |
9 | #include <asm/byteorder.h> | 9 | #include <asm/byteorder.h> |
10 | 10 | ||
11 | /* | 11 | #define IDECD_DEBUG_LOG 0 |
12 | * typical timeout for packet command | 12 | |
13 | */ | 13 | #if IDECD_DEBUG_LOG |
14 | #define ATAPI_WAIT_PC (60 * HZ) | 14 | #define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, args) |
15 | #else | ||
16 | #define ide_debug_log(lvl, fmt, args...) do {} while (0) | ||
17 | #endif | ||
18 | |||
15 | #define ATAPI_WAIT_WRITE_BUSY (10 * HZ) | 19 | #define ATAPI_WAIT_WRITE_BUSY (10 * HZ) |
16 | 20 | ||
17 | /************************************************************************/ | 21 | /************************************************************************/ |
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index aeb1ad782f54..0a48e2dc53a2 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -197,7 +197,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive, | |||
197 | 197 | ||
198 | pc->retries++; | 198 | pc->retries++; |
199 | 199 | ||
200 | return ide_issue_pc(drive, WAIT_FLOPPY_CMD, NULL); | 200 | return ide_issue_pc(drive); |
201 | } | 201 | } |
202 | 202 | ||
203 | void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *pc) | 203 | void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *pc) |
@@ -342,38 +342,38 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, | |||
342 | * Look at the flexible disk page parameters. We ignore the CHS capacity | 342 | * Look at the flexible disk page parameters. We ignore the CHS capacity |
343 | * parameters and use the LBA parameters instead. | 343 | * parameters and use the LBA parameters instead. |
344 | */ | 344 | */ |
345 | static int ide_floppy_get_flexible_disk_page(ide_drive_t *drive) | 345 | static int ide_floppy_get_flexible_disk_page(ide_drive_t *drive, |
346 | struct ide_atapi_pc *pc) | ||
346 | { | 347 | { |
347 | struct ide_disk_obj *floppy = drive->driver_data; | 348 | struct ide_disk_obj *floppy = drive->driver_data; |
348 | struct gendisk *disk = floppy->disk; | 349 | struct gendisk *disk = floppy->disk; |
349 | struct ide_atapi_pc pc; | ||
350 | u8 *page; | 350 | u8 *page; |
351 | int capacity, lba_capacity; | 351 | int capacity, lba_capacity; |
352 | u16 transfer_rate, sector_size, cyls, rpm; | 352 | u16 transfer_rate, sector_size, cyls, rpm; |
353 | u8 heads, sectors; | 353 | u8 heads, sectors; |
354 | 354 | ||
355 | ide_floppy_create_mode_sense_cmd(&pc, IDEFLOPPY_FLEXIBLE_DISK_PAGE); | 355 | ide_floppy_create_mode_sense_cmd(pc, IDEFLOPPY_FLEXIBLE_DISK_PAGE); |
356 | 356 | ||
357 | if (ide_queue_pc_tail(drive, disk, &pc)) { | 357 | if (ide_queue_pc_tail(drive, disk, pc)) { |
358 | printk(KERN_ERR PFX "Can't get flexible disk page params\n"); | 358 | printk(KERN_ERR PFX "Can't get flexible disk page params\n"); |
359 | return 1; | 359 | return 1; |
360 | } | 360 | } |
361 | 361 | ||
362 | if (pc.buf[3] & 0x80) | 362 | if (pc->buf[3] & 0x80) |
363 | drive->dev_flags |= IDE_DFLAG_WP; | 363 | drive->dev_flags |= IDE_DFLAG_WP; |
364 | else | 364 | else |
365 | drive->dev_flags &= ~IDE_DFLAG_WP; | 365 | drive->dev_flags &= ~IDE_DFLAG_WP; |
366 | 366 | ||
367 | set_disk_ro(disk, !!(drive->dev_flags & IDE_DFLAG_WP)); | 367 | set_disk_ro(disk, !!(drive->dev_flags & IDE_DFLAG_WP)); |
368 | 368 | ||
369 | page = &pc.buf[8]; | 369 | page = &pc->buf[8]; |
370 | 370 | ||
371 | transfer_rate = be16_to_cpup((__be16 *)&pc.buf[8 + 2]); | 371 | transfer_rate = be16_to_cpup((__be16 *)&pc->buf[8 + 2]); |
372 | sector_size = be16_to_cpup((__be16 *)&pc.buf[8 + 6]); | 372 | sector_size = be16_to_cpup((__be16 *)&pc->buf[8 + 6]); |
373 | cyls = be16_to_cpup((__be16 *)&pc.buf[8 + 8]); | 373 | cyls = be16_to_cpup((__be16 *)&pc->buf[8 + 8]); |
374 | rpm = be16_to_cpup((__be16 *)&pc.buf[8 + 28]); | 374 | rpm = be16_to_cpup((__be16 *)&pc->buf[8 + 28]); |
375 | heads = pc.buf[8 + 4]; | 375 | heads = pc->buf[8 + 4]; |
376 | sectors = pc.buf[8 + 5]; | 376 | sectors = pc->buf[8 + 5]; |
377 | 377 | ||
378 | capacity = cyls * heads * sectors * sector_size; | 378 | capacity = cyls * heads * sectors * sector_size; |
379 | 379 | ||
@@ -499,7 +499,7 @@ static int ide_floppy_get_capacity(ide_drive_t *drive) | |||
499 | 499 | ||
500 | /* Clik! disk does not support get_flexible_disk_page */ | 500 | /* Clik! disk does not support get_flexible_disk_page */ |
501 | if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) | 501 | if (!(drive->atapi_flags & IDE_AFLAG_CLIK_DRIVE)) |
502 | (void) ide_floppy_get_flexible_disk_page(drive); | 502 | (void) ide_floppy_get_flexible_disk_page(drive, &pc); |
503 | 503 | ||
504 | return rc; | 504 | return rc; |
505 | } | 505 | } |
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c index 2bc51ff73fee..8f8be8546038 100644 --- a/drivers/ide/ide-floppy_ioctl.c +++ b/drivers/ide/ide-floppy_ioctl.c | |||
@@ -31,10 +31,11 @@ | |||
31 | * On exit we set nformats to the number of records we've actually initialized. | 31 | * On exit we set nformats to the number of records we've actually initialized. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | static int ide_floppy_get_format_capacities(ide_drive_t *drive, int __user *arg) | 34 | static int ide_floppy_get_format_capacities(ide_drive_t *drive, |
35 | struct ide_atapi_pc *pc, | ||
36 | int __user *arg) | ||
35 | { | 37 | { |
36 | struct ide_disk_obj *floppy = drive->driver_data; | 38 | struct ide_disk_obj *floppy = drive->driver_data; |
37 | struct ide_atapi_pc pc; | ||
38 | u8 header_len, desc_cnt; | 39 | u8 header_len, desc_cnt; |
39 | int i, blocks, length, u_array_size, u_index; | 40 | int i, blocks, length, u_array_size, u_index; |
40 | int __user *argp; | 41 | int __user *argp; |
@@ -45,13 +46,13 @@ static int ide_floppy_get_format_capacities(ide_drive_t *drive, int __user *arg) | |||
45 | if (u_array_size <= 0) | 46 | if (u_array_size <= 0) |
46 | return -EINVAL; | 47 | return -EINVAL; |
47 | 48 | ||
48 | ide_floppy_create_read_capacity_cmd(&pc); | 49 | ide_floppy_create_read_capacity_cmd(pc); |
49 | if (ide_queue_pc_tail(drive, floppy->disk, &pc)) { | 50 | if (ide_queue_pc_tail(drive, floppy->disk, pc)) { |
50 | printk(KERN_ERR "ide-floppy: Can't get floppy parameters\n"); | 51 | printk(KERN_ERR "ide-floppy: Can't get floppy parameters\n"); |
51 | return -EIO; | 52 | return -EIO; |
52 | } | 53 | } |
53 | 54 | ||
54 | header_len = pc.buf[3]; | 55 | header_len = pc->buf[3]; |
55 | desc_cnt = header_len / 8; /* capacity descriptor of 8 bytes */ | 56 | desc_cnt = header_len / 8; /* capacity descriptor of 8 bytes */ |
56 | 57 | ||
57 | u_index = 0; | 58 | u_index = 0; |
@@ -68,8 +69,8 @@ static int ide_floppy_get_format_capacities(ide_drive_t *drive, int __user *arg) | |||
68 | if (u_index >= u_array_size) | 69 | if (u_index >= u_array_size) |
69 | break; /* User-supplied buffer too small */ | 70 | break; /* User-supplied buffer too small */ |
70 | 71 | ||
71 | blocks = be32_to_cpup((__be32 *)&pc.buf[desc_start]); | 72 | blocks = be32_to_cpup((__be32 *)&pc->buf[desc_start]); |
72 | length = be16_to_cpup((__be16 *)&pc.buf[desc_start + 6]); | 73 | length = be16_to_cpup((__be16 *)&pc->buf[desc_start + 6]); |
73 | 74 | ||
74 | if (put_user(blocks, argp)) | 75 | if (put_user(blocks, argp)) |
75 | return -EFAULT; | 76 | return -EFAULT; |
@@ -111,29 +112,28 @@ static void ide_floppy_create_format_unit_cmd(struct ide_atapi_pc *pc, int b, | |||
111 | pc->flags |= PC_FLAG_WRITING; | 112 | pc->flags |= PC_FLAG_WRITING; |
112 | } | 113 | } |
113 | 114 | ||
114 | static int ide_floppy_get_sfrp_bit(ide_drive_t *drive) | 115 | static int ide_floppy_get_sfrp_bit(ide_drive_t *drive, struct ide_atapi_pc *pc) |
115 | { | 116 | { |
116 | struct ide_disk_obj *floppy = drive->driver_data; | 117 | struct ide_disk_obj *floppy = drive->driver_data; |
117 | struct ide_atapi_pc pc; | ||
118 | 118 | ||
119 | drive->atapi_flags &= ~IDE_AFLAG_SRFP; | 119 | drive->atapi_flags &= ~IDE_AFLAG_SRFP; |
120 | 120 | ||
121 | ide_floppy_create_mode_sense_cmd(&pc, IDEFLOPPY_CAPABILITIES_PAGE); | 121 | ide_floppy_create_mode_sense_cmd(pc, IDEFLOPPY_CAPABILITIES_PAGE); |
122 | pc.flags |= PC_FLAG_SUPPRESS_ERROR; | 122 | pc->flags |= PC_FLAG_SUPPRESS_ERROR; |
123 | 123 | ||
124 | if (ide_queue_pc_tail(drive, floppy->disk, &pc)) | 124 | if (ide_queue_pc_tail(drive, floppy->disk, pc)) |
125 | return 1; | 125 | return 1; |
126 | 126 | ||
127 | if (pc.buf[8 + 2] & 0x40) | 127 | if (pc->buf[8 + 2] & 0x40) |
128 | drive->atapi_flags |= IDE_AFLAG_SRFP; | 128 | drive->atapi_flags |= IDE_AFLAG_SRFP; |
129 | 129 | ||
130 | return 0; | 130 | return 0; |
131 | } | 131 | } |
132 | 132 | ||
133 | static int ide_floppy_format_unit(ide_drive_t *drive, int __user *arg) | 133 | static int ide_floppy_format_unit(ide_drive_t *drive, struct ide_atapi_pc *pc, |
134 | int __user *arg) | ||
134 | { | 135 | { |
135 | struct ide_disk_obj *floppy = drive->driver_data; | 136 | struct ide_disk_obj *floppy = drive->driver_data; |
136 | struct ide_atapi_pc pc; | ||
137 | int blocks, length, flags, err = 0; | 137 | int blocks, length, flags, err = 0; |
138 | 138 | ||
139 | if (floppy->openers > 1) { | 139 | if (floppy->openers > 1) { |
@@ -166,10 +166,10 @@ static int ide_floppy_format_unit(ide_drive_t *drive, int __user *arg) | |||
166 | goto out; | 166 | goto out; |
167 | } | 167 | } |
168 | 168 | ||
169 | (void)ide_floppy_get_sfrp_bit(drive); | 169 | ide_floppy_get_sfrp_bit(drive, pc); |
170 | ide_floppy_create_format_unit_cmd(&pc, blocks, length, flags); | 170 | ide_floppy_create_format_unit_cmd(pc, blocks, length, flags); |
171 | 171 | ||
172 | if (ide_queue_pc_tail(drive, floppy->disk, &pc)) | 172 | if (ide_queue_pc_tail(drive, floppy->disk, pc)) |
173 | err = -EIO; | 173 | err = -EIO; |
174 | 174 | ||
175 | out: | 175 | out: |
@@ -188,15 +188,16 @@ out: | |||
188 | * the dsc bit, and return either 0 or 65536. | 188 | * the dsc bit, and return either 0 or 65536. |
189 | */ | 189 | */ |
190 | 190 | ||
191 | static int ide_floppy_get_format_progress(ide_drive_t *drive, int __user *arg) | 191 | static int ide_floppy_get_format_progress(ide_drive_t *drive, |
192 | struct ide_atapi_pc *pc, | ||
193 | int __user *arg) | ||
192 | { | 194 | { |
193 | struct ide_disk_obj *floppy = drive->driver_data; | 195 | struct ide_disk_obj *floppy = drive->driver_data; |
194 | struct ide_atapi_pc pc; | ||
195 | int progress_indication = 0x10000; | 196 | int progress_indication = 0x10000; |
196 | 197 | ||
197 | if (drive->atapi_flags & IDE_AFLAG_SRFP) { | 198 | if (drive->atapi_flags & IDE_AFLAG_SRFP) { |
198 | ide_create_request_sense_cmd(drive, &pc); | 199 | ide_create_request_sense_cmd(drive, pc); |
199 | if (ide_queue_pc_tail(drive, floppy->disk, &pc)) | 200 | if (ide_queue_pc_tail(drive, floppy->disk, pc)) |
200 | return -EIO; | 201 | return -EIO; |
201 | 202 | ||
202 | if (floppy->sense_key == 2 && | 203 | if (floppy->sense_key == 2 && |
@@ -241,20 +242,21 @@ static int ide_floppy_lockdoor(ide_drive_t *drive, struct ide_atapi_pc *pc, | |||
241 | return 0; | 242 | return 0; |
242 | } | 243 | } |
243 | 244 | ||
244 | static int ide_floppy_format_ioctl(ide_drive_t *drive, fmode_t mode, | 245 | static int ide_floppy_format_ioctl(ide_drive_t *drive, struct ide_atapi_pc *pc, |
245 | unsigned int cmd, void __user *argp) | 246 | fmode_t mode, unsigned int cmd, |
247 | void __user *argp) | ||
246 | { | 248 | { |
247 | switch (cmd) { | 249 | switch (cmd) { |
248 | case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED: | 250 | case IDEFLOPPY_IOCTL_FORMAT_SUPPORTED: |
249 | return 0; | 251 | return 0; |
250 | case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY: | 252 | case IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY: |
251 | return ide_floppy_get_format_capacities(drive, argp); | 253 | return ide_floppy_get_format_capacities(drive, pc, argp); |
252 | case IDEFLOPPY_IOCTL_FORMAT_START: | 254 | case IDEFLOPPY_IOCTL_FORMAT_START: |
253 | if (!(mode & FMODE_WRITE)) | 255 | if (!(mode & FMODE_WRITE)) |
254 | return -EPERM; | 256 | return -EPERM; |
255 | return ide_floppy_format_unit(drive, (int __user *)argp); | 257 | return ide_floppy_format_unit(drive, pc, (int __user *)argp); |
256 | case IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS: | 258 | case IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS: |
257 | return ide_floppy_get_format_progress(drive, argp); | 259 | return ide_floppy_get_format_progress(drive, pc, argp); |
258 | default: | 260 | default: |
259 | return -ENOTTY; | 261 | return -ENOTTY; |
260 | } | 262 | } |
@@ -270,7 +272,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev, | |||
270 | if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) | 272 | if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) |
271 | return ide_floppy_lockdoor(drive, &pc, arg, cmd); | 273 | return ide_floppy_lockdoor(drive, &pc, arg, cmd); |
272 | 274 | ||
273 | err = ide_floppy_format_ioctl(drive, mode, cmd, argp); | 275 | err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp); |
274 | if (err != -ENOTTY) | 276 | if (err != -ENOTTY) |
275 | return err; | 277 | return err; |
276 | 278 | ||
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index ecacc008fdaf..1c36a8e83d36 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -426,9 +426,6 @@ void ide_map_sg(ide_drive_t *drive, struct request *rq) | |||
426 | ide_hwif_t *hwif = drive->hwif; | 426 | ide_hwif_t *hwif = drive->hwif; |
427 | struct scatterlist *sg = hwif->sg_table; | 427 | struct scatterlist *sg = hwif->sg_table; |
428 | 428 | ||
429 | if (hwif->sg_mapped) /* needed by ide-scsi */ | ||
430 | return; | ||
431 | |||
432 | if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { | 429 | if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) { |
433 | hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); | 430 | hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); |
434 | } else { | 431 | } else { |
@@ -667,85 +664,10 @@ void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) | |||
667 | drive->sleep = timeout + jiffies; | 664 | drive->sleep = timeout + jiffies; |
668 | drive->dev_flags |= IDE_DFLAG_SLEEPING; | 665 | drive->dev_flags |= IDE_DFLAG_SLEEPING; |
669 | } | 666 | } |
670 | |||
671 | EXPORT_SYMBOL(ide_stall_queue); | 667 | EXPORT_SYMBOL(ide_stall_queue); |
672 | 668 | ||
673 | #define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time) | ||
674 | |||
675 | /** | ||
676 | * choose_drive - select a drive to service | ||
677 | * @hwgroup: hardware group to select on | ||
678 | * | ||
679 | * choose_drive() selects the next drive which will be serviced. | ||
680 | * This is necessary because the IDE layer can't issue commands | ||
681 | * to both drives on the same cable, unlike SCSI. | ||
682 | */ | ||
683 | |||
684 | static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) | ||
685 | { | ||
686 | ide_drive_t *drive, *best; | ||
687 | |||
688 | repeat: | ||
689 | best = NULL; | ||
690 | drive = hwgroup->drive; | ||
691 | |||
692 | /* | ||
693 | * drive is doing pre-flush, ordered write, post-flush sequence. even | ||
694 | * though that is 3 requests, it must be seen as a single transaction. | ||
695 | * we must not preempt this drive until that is complete | ||
696 | */ | ||
697 | if (blk_queue_flushing(drive->queue)) { | ||
698 | /* | ||
699 | * small race where queue could get replugged during | ||
700 | * the 3-request flush cycle, just yank the plug since | ||
701 | * we want it to finish asap | ||
702 | */ | ||
703 | blk_remove_plug(drive->queue); | ||
704 | return drive; | ||
705 | } | ||
706 | |||
707 | do { | ||
708 | u8 dev_s = !!(drive->dev_flags & IDE_DFLAG_SLEEPING); | ||
709 | u8 best_s = (best && !!(best->dev_flags & IDE_DFLAG_SLEEPING)); | ||
710 | |||
711 | if ((dev_s == 0 || time_after_eq(jiffies, drive->sleep)) && | ||
712 | !elv_queue_empty(drive->queue)) { | ||
713 | if (best == NULL || | ||
714 | (dev_s && (best_s == 0 || time_before(drive->sleep, best->sleep))) || | ||
715 | (best_s == 0 && time_before(WAKEUP(drive), WAKEUP(best)))) { | ||
716 | if (!blk_queue_plugged(drive->queue)) | ||
717 | best = drive; | ||
718 | } | ||
719 | } | ||
720 | } while ((drive = drive->next) != hwgroup->drive); | ||
721 | |||
722 | if (best && (best->dev_flags & IDE_DFLAG_NICE1) && | ||
723 | (best->dev_flags & IDE_DFLAG_SLEEPING) == 0 && | ||
724 | best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) { | ||
725 | long t = (signed long)(WAKEUP(best) - jiffies); | ||
726 | if (t >= WAIT_MIN_SLEEP) { | ||
727 | /* | ||
728 | * We *may* have some time to spare, but first let's see if | ||
729 | * someone can potentially benefit from our nice mood today.. | ||
730 | */ | ||
731 | drive = best->next; | ||
732 | do { | ||
733 | if ((drive->dev_flags & IDE_DFLAG_SLEEPING) == 0 | ||
734 | && time_before(jiffies - best->service_time, WAKEUP(drive)) | ||
735 | && time_before(WAKEUP(drive), jiffies + t)) | ||
736 | { | ||
737 | ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP)); | ||
738 | goto repeat; | ||
739 | } | ||
740 | } while ((drive = drive->next) != best); | ||
741 | } | ||
742 | } | ||
743 | return best; | ||
744 | } | ||
745 | |||
746 | /* | 669 | /* |
747 | * Issue a new request to a drive from hwgroup | 670 | * Issue a new request to a drive from hwgroup |
748 | * Caller must have already done spin_lock_irqsave(&hwgroup->lock, ..); | ||
749 | * | 671 | * |
750 | * A hwgroup is a serialized group of IDE interfaces. Usually there is | 672 | * A hwgroup is a serialized group of IDE interfaces. Usually there is |
751 | * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) | 673 | * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) |
@@ -757,8 +679,7 @@ repeat: | |||
757 | * possibly along with many other devices. This is especially common in | 679 | * possibly along with many other devices. This is especially common in |
758 | * PCI-based systems with off-board IDE controller cards. | 680 | * PCI-based systems with off-board IDE controller cards. |
759 | * | 681 | * |
760 | * The IDE driver uses a per-hwgroup spinlock to protect | 682 | * The IDE driver uses a per-hwgroup lock to protect the hwgroup->busy flag. |
761 | * access to the request queues, and to protect the hwgroup->busy flag. | ||
762 | * | 683 | * |
763 | * The first thread into the driver for a particular hwgroup sets the | 684 | * The first thread into the driver for a particular hwgroup sets the |
764 | * hwgroup->busy flag to indicate that this hwgroup is now active, | 685 | * hwgroup->busy flag to indicate that this hwgroup is now active, |
@@ -778,69 +699,41 @@ repeat: | |||
778 | * the driver. This makes the driver much more friendlier to shared IRQs | 699 | * the driver. This makes the driver much more friendlier to shared IRQs |
779 | * than previous designs, while remaining 100% (?) SMP safe and capable. | 700 | * than previous designs, while remaining 100% (?) SMP safe and capable. |
780 | */ | 701 | */ |
781 | static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) | 702 | void do_ide_request(struct request_queue *q) |
782 | { | 703 | { |
783 | ide_drive_t *drive; | 704 | ide_drive_t *drive = q->queuedata; |
784 | ide_hwif_t *hwif; | 705 | ide_hwif_t *hwif = drive->hwif; |
706 | ide_hwgroup_t *hwgroup = hwif->hwgroup; | ||
785 | struct request *rq; | 707 | struct request *rq; |
786 | ide_startstop_t startstop; | 708 | ide_startstop_t startstop; |
787 | int loops = 0; | 709 | |
788 | 710 | /* | |
789 | /* caller must own hwgroup->lock */ | 711 | * drive is doing pre-flush, ordered write, post-flush sequence. even |
790 | BUG_ON(!irqs_disabled()); | 712 | * though that is 3 requests, it must be seen as a single transaction. |
791 | 713 | * we must not preempt this drive until that is complete | |
792 | while (!hwgroup->busy) { | 714 | */ |
793 | hwgroup->busy = 1; | 715 | if (blk_queue_flushing(q)) |
794 | /* for atari only */ | ||
795 | ide_get_lock(ide_intr, hwgroup); | ||
796 | drive = choose_drive(hwgroup); | ||
797 | if (drive == NULL) { | ||
798 | int sleeping = 0; | ||
799 | unsigned long sleep = 0; /* shut up, gcc */ | ||
800 | hwgroup->rq = NULL; | ||
801 | drive = hwgroup->drive; | ||
802 | do { | ||
803 | if ((drive->dev_flags & IDE_DFLAG_SLEEPING) && | ||
804 | (sleeping == 0 || | ||
805 | time_before(drive->sleep, sleep))) { | ||
806 | sleeping = 1; | ||
807 | sleep = drive->sleep; | ||
808 | } | ||
809 | } while ((drive = drive->next) != hwgroup->drive); | ||
810 | if (sleeping) { | ||
811 | /* | 716 | /* |
812 | * Take a short snooze, and then wake up this hwgroup again. | 717 | * small race where queue could get replugged during |
813 | * This gives other hwgroups on the same a chance to | 718 | * the 3-request flush cycle, just yank the plug since |
814 | * play fairly with us, just in case there are big differences | 719 | * we want it to finish asap |
815 | * in relative throughputs.. don't want to hog the cpu too much. | ||
816 | */ | 720 | */ |
817 | if (time_before(sleep, jiffies + WAIT_MIN_SLEEP)) | 721 | blk_remove_plug(q); |
818 | sleep = jiffies + WAIT_MIN_SLEEP; | ||
819 | #if 1 | ||
820 | if (timer_pending(&hwgroup->timer)) | ||
821 | printk(KERN_CRIT "ide_set_handler: timer already active\n"); | ||
822 | #endif | ||
823 | /* so that ide_timer_expiry knows what to do */ | ||
824 | hwgroup->sleeping = 1; | ||
825 | hwgroup->req_gen_timer = hwgroup->req_gen; | ||
826 | mod_timer(&hwgroup->timer, sleep); | ||
827 | /* we purposely leave hwgroup->busy==1 | ||
828 | * while sleeping */ | ||
829 | } else { | ||
830 | /* Ugly, but how can we sleep for the lock | ||
831 | * otherwise? perhaps from tq_disk? | ||
832 | */ | ||
833 | 722 | ||
834 | /* for atari only */ | 723 | spin_unlock_irq(q->queue_lock); |
835 | ide_release_lock(); | 724 | spin_lock_irq(&hwgroup->lock); |
836 | hwgroup->busy = 0; | 725 | |
837 | } | 726 | if (!ide_lock_hwgroup(hwgroup)) { |
727 | repeat: | ||
728 | hwgroup->rq = NULL; | ||
838 | 729 | ||
839 | /* no more work for this hwgroup (for now) */ | 730 | if (drive->dev_flags & IDE_DFLAG_SLEEPING) { |
840 | return; | 731 | if (time_before(drive->sleep, jiffies)) { |
732 | ide_unlock_hwgroup(hwgroup); | ||
733 | goto plug_device; | ||
734 | } | ||
841 | } | 735 | } |
842 | again: | 736 | |
843 | hwif = HWIF(drive); | ||
844 | if (hwif != hwgroup->hwif) { | 737 | if (hwif != hwgroup->hwif) { |
845 | /* | 738 | /* |
846 | * set nIEN for previous hwif, drives in the | 739 | * set nIEN for previous hwif, drives in the |
@@ -852,16 +745,20 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) | |||
852 | hwgroup->hwif = hwif; | 745 | hwgroup->hwif = hwif; |
853 | hwgroup->drive = drive; | 746 | hwgroup->drive = drive; |
854 | drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); | 747 | drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); |
855 | drive->service_start = jiffies; | ||
856 | 748 | ||
749 | spin_unlock_irq(&hwgroup->lock); | ||
750 | spin_lock_irq(q->queue_lock); | ||
857 | /* | 751 | /* |
858 | * we know that the queue isn't empty, but this can happen | 752 | * we know that the queue isn't empty, but this can happen |
859 | * if the q->prep_rq_fn() decides to kill a request | 753 | * if the q->prep_rq_fn() decides to kill a request |
860 | */ | 754 | */ |
861 | rq = elv_next_request(drive->queue); | 755 | rq = elv_next_request(drive->queue); |
756 | spin_unlock_irq(q->queue_lock); | ||
757 | spin_lock_irq(&hwgroup->lock); | ||
758 | |||
862 | if (!rq) { | 759 | if (!rq) { |
863 | hwgroup->busy = 0; | 760 | ide_unlock_hwgroup(hwgroup); |
864 | break; | 761 | goto out; |
865 | } | 762 | } |
866 | 763 | ||
867 | /* | 764 | /* |
@@ -876,53 +773,36 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) | |||
876 | * though. I hope that doesn't happen too much, hopefully not | 773 | * though. I hope that doesn't happen too much, hopefully not |
877 | * unless the subdriver triggers such a thing in its own PM | 774 | * unless the subdriver triggers such a thing in its own PM |
878 | * state machine. | 775 | * state machine. |
879 | * | ||
880 | * We count how many times we loop here to make sure we service | ||
881 | * all drives in the hwgroup without looping for ever | ||
882 | */ | 776 | */ |
883 | if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && | 777 | if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && |
884 | blk_pm_request(rq) == 0 && | 778 | blk_pm_request(rq) == 0 && |
885 | (rq->cmd_flags & REQ_PREEMPT) == 0) { | 779 | (rq->cmd_flags & REQ_PREEMPT) == 0) { |
886 | drive = drive->next ? drive->next : hwgroup->drive; | 780 | /* there should be no pending command at this point */ |
887 | if (loops++ < 4 && !blk_queue_plugged(drive->queue)) | 781 | ide_unlock_hwgroup(hwgroup); |
888 | goto again; | 782 | goto plug_device; |
889 | /* We clear busy, there should be no pending ATA command at this point. */ | ||
890 | hwgroup->busy = 0; | ||
891 | break; | ||
892 | } | 783 | } |
893 | 784 | ||
894 | hwgroup->rq = rq; | 785 | hwgroup->rq = rq; |
895 | 786 | ||
896 | /* | 787 | spin_unlock_irq(&hwgroup->lock); |
897 | * Some systems have trouble with IDE IRQs arriving while | ||
898 | * the driver is still setting things up. So, here we disable | ||
899 | * the IRQ used by this interface while the request is being started. | ||
900 | * This may look bad at first, but pretty much the same thing | ||
901 | * happens anyway when any interrupt comes in, IDE or otherwise | ||
902 | * -- the kernel masks the IRQ while it is being handled. | ||
903 | */ | ||
904 | if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) | ||
905 | disable_irq_nosync(hwif->irq); | ||
906 | spin_unlock(&hwgroup->lock); | ||
907 | local_irq_enable_in_hardirq(); | ||
908 | /* allow other IRQs while we start this request */ | ||
909 | startstop = start_request(drive, rq); | 788 | startstop = start_request(drive, rq); |
910 | spin_lock_irq(&hwgroup->lock); | 789 | spin_lock_irq(&hwgroup->lock); |
911 | if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) | 790 | |
912 | enable_irq(hwif->irq); | ||
913 | if (startstop == ide_stopped) | 791 | if (startstop == ide_stopped) |
914 | hwgroup->busy = 0; | 792 | goto repeat; |
915 | } | 793 | } else |
916 | } | 794 | goto plug_device; |
795 | out: | ||
796 | spin_unlock_irq(&hwgroup->lock); | ||
797 | spin_lock_irq(q->queue_lock); | ||
798 | return; | ||
917 | 799 | ||
918 | /* | 800 | plug_device: |
919 | * Passes the stuff to ide_do_request | 801 | spin_unlock_irq(&hwgroup->lock); |
920 | */ | 802 | spin_lock_irq(q->queue_lock); |
921 | void do_ide_request(struct request_queue *q) | ||
922 | { | ||
923 | ide_drive_t *drive = q->queuedata; | ||
924 | 803 | ||
925 | ide_do_request(HWGROUP(drive), IDE_NO_IRQ); | 804 | if (!elv_queue_empty(q)) |
805 | blk_plug_device(q); | ||
926 | } | 806 | } |
927 | 807 | ||
928 | /* | 808 | /* |
@@ -983,6 +863,17 @@ out: | |||
983 | return ret; | 863 | return ret; |
984 | } | 864 | } |
985 | 865 | ||
866 | static void ide_plug_device(ide_drive_t *drive) | ||
867 | { | ||
868 | struct request_queue *q = drive->queue; | ||
869 | unsigned long flags; | ||
870 | |||
871 | spin_lock_irqsave(q->queue_lock, flags); | ||
872 | if (!elv_queue_empty(q)) | ||
873 | blk_plug_device(q); | ||
874 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
875 | } | ||
876 | |||
986 | /** | 877 | /** |
987 | * ide_timer_expiry - handle lack of an IDE interrupt | 878 | * ide_timer_expiry - handle lack of an IDE interrupt |
988 | * @data: timer callback magic (hwgroup) | 879 | * @data: timer callback magic (hwgroup) |
@@ -1000,10 +891,12 @@ out: | |||
1000 | void ide_timer_expiry (unsigned long data) | 891 | void ide_timer_expiry (unsigned long data) |
1001 | { | 892 | { |
1002 | ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; | 893 | ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data; |
894 | ide_drive_t *uninitialized_var(drive); | ||
1003 | ide_handler_t *handler; | 895 | ide_handler_t *handler; |
1004 | ide_expiry_t *expiry; | 896 | ide_expiry_t *expiry; |
1005 | unsigned long flags; | 897 | unsigned long flags; |
1006 | unsigned long wait = -1; | 898 | unsigned long wait = -1; |
899 | int plug_device = 0; | ||
1007 | 900 | ||
1008 | spin_lock_irqsave(&hwgroup->lock, flags); | 901 | spin_lock_irqsave(&hwgroup->lock, flags); |
1009 | 902 | ||
@@ -1015,22 +908,15 @@ void ide_timer_expiry (unsigned long data) | |||
1015 | * or we were "sleeping" to give other devices a chance. | 908 | * or we were "sleeping" to give other devices a chance. |
1016 | * Either way, we don't really want to complain about anything. | 909 | * Either way, we don't really want to complain about anything. |
1017 | */ | 910 | */ |
1018 | if (hwgroup->sleeping) { | ||
1019 | hwgroup->sleeping = 0; | ||
1020 | hwgroup->busy = 0; | ||
1021 | } | ||
1022 | } else { | 911 | } else { |
1023 | ide_drive_t *drive = hwgroup->drive; | 912 | drive = hwgroup->drive; |
1024 | if (!drive) { | 913 | if (!drive) { |
1025 | printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); | 914 | printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n"); |
1026 | hwgroup->handler = NULL; | 915 | hwgroup->handler = NULL; |
1027 | } else { | 916 | } else { |
1028 | ide_hwif_t *hwif; | 917 | ide_hwif_t *hwif; |
1029 | ide_startstop_t startstop = ide_stopped; | 918 | ide_startstop_t startstop = ide_stopped; |
1030 | if (!hwgroup->busy) { | 919 | |
1031 | hwgroup->busy = 1; /* paranoia */ | ||
1032 | printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name); | ||
1033 | } | ||
1034 | if ((expiry = hwgroup->expiry) != NULL) { | 920 | if ((expiry = hwgroup->expiry) != NULL) { |
1035 | /* continue */ | 921 | /* continue */ |
1036 | if ((wait = expiry(drive)) > 0) { | 922 | if ((wait = expiry(drive)) > 0) { |
@@ -1071,15 +957,18 @@ void ide_timer_expiry (unsigned long data) | |||
1071 | ide_error(drive, "irq timeout", | 957 | ide_error(drive, "irq timeout", |
1072 | hwif->tp_ops->read_status(hwif)); | 958 | hwif->tp_ops->read_status(hwif)); |
1073 | } | 959 | } |
1074 | drive->service_time = jiffies - drive->service_start; | ||
1075 | spin_lock_irq(&hwgroup->lock); | 960 | spin_lock_irq(&hwgroup->lock); |
1076 | enable_irq(hwif->irq); | 961 | enable_irq(hwif->irq); |
1077 | if (startstop == ide_stopped) | 962 | if (startstop == ide_stopped) { |
1078 | hwgroup->busy = 0; | 963 | ide_unlock_hwgroup(hwgroup); |
964 | plug_device = 1; | ||
965 | } | ||
1079 | } | 966 | } |
1080 | } | 967 | } |
1081 | ide_do_request(hwgroup, IDE_NO_IRQ); | ||
1082 | spin_unlock_irqrestore(&hwgroup->lock, flags); | 968 | spin_unlock_irqrestore(&hwgroup->lock, flags); |
969 | |||
970 | if (plug_device) | ||
971 | ide_plug_device(drive); | ||
1083 | } | 972 | } |
1084 | 973 | ||
1085 | /** | 974 | /** |
@@ -1173,10 +1062,11 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
1173 | unsigned long flags; | 1062 | unsigned long flags; |
1174 | ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; | 1063 | ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; |
1175 | ide_hwif_t *hwif = hwgroup->hwif; | 1064 | ide_hwif_t *hwif = hwgroup->hwif; |
1176 | ide_drive_t *drive; | 1065 | ide_drive_t *uninitialized_var(drive); |
1177 | ide_handler_t *handler; | 1066 | ide_handler_t *handler; |
1178 | ide_startstop_t startstop; | 1067 | ide_startstop_t startstop; |
1179 | irqreturn_t irq_ret = IRQ_NONE; | 1068 | irqreturn_t irq_ret = IRQ_NONE; |
1069 | int plug_device = 0; | ||
1180 | 1070 | ||
1181 | spin_lock_irqsave(&hwgroup->lock, flags); | 1071 | spin_lock_irqsave(&hwgroup->lock, flags); |
1182 | 1072 | ||
@@ -1241,10 +1131,6 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
1241 | */ | 1131 | */ |
1242 | goto out; | 1132 | goto out; |
1243 | 1133 | ||
1244 | if (!hwgroup->busy) { | ||
1245 | hwgroup->busy = 1; /* paranoia */ | ||
1246 | printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); | ||
1247 | } | ||
1248 | hwgroup->handler = NULL; | 1134 | hwgroup->handler = NULL; |
1249 | hwgroup->req_gen++; | 1135 | hwgroup->req_gen++; |
1250 | del_timer(&hwgroup->timer); | 1136 | del_timer(&hwgroup->timer); |
@@ -1267,20 +1153,22 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
1267 | * same irq as is currently being serviced here, and Linux | 1153 | * same irq as is currently being serviced here, and Linux |
1268 | * won't allow another of the same (on any CPU) until we return. | 1154 | * won't allow another of the same (on any CPU) until we return. |
1269 | */ | 1155 | */ |
1270 | drive->service_time = jiffies - drive->service_start; | ||
1271 | if (startstop == ide_stopped) { | 1156 | if (startstop == ide_stopped) { |
1272 | if (hwgroup->handler == NULL) { /* paranoia */ | 1157 | if (hwgroup->handler == NULL) { /* paranoia */ |
1273 | hwgroup->busy = 0; | 1158 | ide_unlock_hwgroup(hwgroup); |
1274 | ide_do_request(hwgroup, hwif->irq); | 1159 | plug_device = 1; |
1275 | } else { | 1160 | } else |
1276 | printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler " | 1161 | printk(KERN_ERR "%s: %s: huh? expected NULL handler " |
1277 | "on exit\n", drive->name); | 1162 | "on exit\n", __func__, drive->name); |
1278 | } | ||
1279 | } | 1163 | } |
1280 | out_handled: | 1164 | out_handled: |
1281 | irq_ret = IRQ_HANDLED; | 1165 | irq_ret = IRQ_HANDLED; |
1282 | out: | 1166 | out: |
1283 | spin_unlock_irqrestore(&hwgroup->lock, flags); | 1167 | spin_unlock_irqrestore(&hwgroup->lock, flags); |
1168 | |||
1169 | if (plug_device) | ||
1170 | ide_plug_device(drive); | ||
1171 | |||
1284 | return irq_ret; | 1172 | return irq_ret; |
1285 | } | 1173 | } |
1286 | 1174 | ||
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c index 28232c64c346..1be263eb9c07 100644 --- a/drivers/ide/ide-ioctls.c +++ b/drivers/ide/ide-ioctls.c | |||
@@ -95,8 +95,7 @@ static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg) | |||
95 | return -EPERM; | 95 | return -EPERM; |
96 | 96 | ||
97 | if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) && | 97 | if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) && |
98 | (drive->media != ide_tape || | 98 | (drive->media != ide_tape)) |
99 | (drive->dev_flags & IDE_DFLAG_SCSI))) | ||
100 | return -EPERM; | 99 | return -EPERM; |
101 | 100 | ||
102 | if ((arg >> IDE_NICE_DSC_OVERLAP) & 1) | 101 | if ((arg >> IDE_NICE_DSC_OVERLAP) & 1) |
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index 63d01c55f865..678454ac2483 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c | |||
@@ -16,16 +16,19 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) | |||
16 | spin_lock_irq(&hwgroup->lock); | 16 | spin_lock_irq(&hwgroup->lock); |
17 | if (drive->dev_flags & IDE_DFLAG_PARKED) { | 17 | if (drive->dev_flags & IDE_DFLAG_PARKED) { |
18 | int reset_timer = time_before(timeout, drive->sleep); | 18 | int reset_timer = time_before(timeout, drive->sleep); |
19 | int start_queue = 0; | ||
19 | 20 | ||
20 | drive->sleep = timeout; | 21 | drive->sleep = timeout; |
21 | wake_up_all(&ide_park_wq); | 22 | wake_up_all(&ide_park_wq); |
22 | if (reset_timer && hwgroup->sleeping && | 23 | if (reset_timer && del_timer(&hwgroup->timer)) |
23 | del_timer(&hwgroup->timer)) { | 24 | start_queue = 1; |
24 | hwgroup->sleeping = 0; | 25 | spin_unlock_irq(&hwgroup->lock); |
25 | hwgroup->busy = 0; | 26 | |
27 | if (start_queue) { | ||
28 | spin_lock_irq(q->queue_lock); | ||
26 | blk_start_queueing(q); | 29 | blk_start_queueing(q); |
30 | spin_unlock_irq(q->queue_lock); | ||
27 | } | 31 | } |
28 | spin_unlock_irq(&hwgroup->lock); | ||
29 | return; | 32 | return; |
30 | } | 33 | } |
31 | spin_unlock_irq(&hwgroup->lock); | 34 | spin_unlock_irq(&hwgroup->lock); |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index a64ec259f3d1..c5adb7b9c5b5 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -101,6 +101,82 @@ static void ide_disk_init_mult_count(ide_drive_t *drive) | |||
101 | } | 101 | } |
102 | } | 102 | } |
103 | 103 | ||
104 | static void ide_classify_ata_dev(ide_drive_t *drive) | ||
105 | { | ||
106 | u16 *id = drive->id; | ||
107 | char *m = (char *)&id[ATA_ID_PROD]; | ||
108 | int is_cfa = ata_id_is_cfa(id); | ||
109 | |||
110 | /* CF devices are *not* removable in Linux definition of the term */ | ||
111 | if (is_cfa == 0 && (id[ATA_ID_CONFIG] & (1 << 7))) | ||
112 | drive->dev_flags |= IDE_DFLAG_REMOVABLE; | ||
113 | |||
114 | drive->media = ide_disk; | ||
115 | |||
116 | if (!ata_id_has_unload(drive->id)) | ||
117 | drive->dev_flags |= IDE_DFLAG_NO_UNLOAD; | ||
118 | |||
119 | printk(KERN_INFO "%s: %s, %s DISK drive\n", drive->name, m, | ||
120 | is_cfa ? "CFA" : "ATA"); | ||
121 | } | ||
122 | |||
123 | static void ide_classify_atapi_dev(ide_drive_t *drive) | ||
124 | { | ||
125 | u16 *id = drive->id; | ||
126 | char *m = (char *)&id[ATA_ID_PROD]; | ||
127 | u8 type = (id[ATA_ID_CONFIG] >> 8) & 0x1f; | ||
128 | |||
129 | printk(KERN_INFO "%s: %s, ATAPI ", drive->name, m); | ||
130 | switch (type) { | ||
131 | case ide_floppy: | ||
132 | if (!strstr(m, "CD-ROM")) { | ||
133 | if (!strstr(m, "oppy") && | ||
134 | !strstr(m, "poyp") && | ||
135 | !strstr(m, "ZIP")) | ||
136 | printk(KERN_CONT "cdrom or floppy?, assuming "); | ||
137 | if (drive->media != ide_cdrom) { | ||
138 | printk(KERN_CONT "FLOPPY"); | ||
139 | drive->dev_flags |= IDE_DFLAG_REMOVABLE; | ||
140 | break; | ||
141 | } | ||
142 | } | ||
143 | /* Early cdrom models used zero */ | ||
144 | type = ide_cdrom; | ||
145 | case ide_cdrom: | ||
146 | drive->dev_flags |= IDE_DFLAG_REMOVABLE; | ||
147 | #ifdef CONFIG_PPC | ||
148 | /* kludge for Apple PowerBook internal zip */ | ||
149 | if (!strstr(m, "CD-ROM") && strstr(m, "ZIP")) { | ||
150 | printk(KERN_CONT "FLOPPY"); | ||
151 | type = ide_floppy; | ||
152 | break; | ||
153 | } | ||
154 | #endif | ||
155 | printk(KERN_CONT "CD/DVD-ROM"); | ||
156 | break; | ||
157 | case ide_tape: | ||
158 | printk(KERN_CONT "TAPE"); | ||
159 | break; | ||
160 | case ide_optical: | ||
161 | printk(KERN_CONT "OPTICAL"); | ||
162 | drive->dev_flags |= IDE_DFLAG_REMOVABLE; | ||
163 | break; | ||
164 | default: | ||
165 | printk(KERN_CONT "UNKNOWN (type %d)", type); | ||
166 | break; | ||
167 | } | ||
168 | |||
169 | printk(KERN_CONT " drive\n"); | ||
170 | drive->media = type; | ||
171 | /* an ATAPI device ignores DRDY */ | ||
172 | drive->ready_stat = 0; | ||
173 | if (ata_id_cdb_intr(id)) | ||
174 | drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT; | ||
175 | drive->dev_flags |= IDE_DFLAG_DOORLOCKING; | ||
176 | /* we don't do head unloading on ATAPI devices */ | ||
177 | drive->dev_flags |= IDE_DFLAG_NO_UNLOAD; | ||
178 | } | ||
179 | |||
104 | /** | 180 | /** |
105 | * do_identify - identify a drive | 181 | * do_identify - identify a drive |
106 | * @drive: drive to identify | 182 | * @drive: drive to identify |
@@ -117,7 +193,7 @@ static void do_identify(ide_drive_t *drive, u8 cmd) | |||
117 | u16 *id = drive->id; | 193 | u16 *id = drive->id; |
118 | char *m = (char *)&id[ATA_ID_PROD]; | 194 | char *m = (char *)&id[ATA_ID_PROD]; |
119 | unsigned long flags; | 195 | unsigned long flags; |
120 | int bswap = 1, is_cfa; | 196 | int bswap = 1; |
121 | 197 | ||
122 | /* local CPU only; some systems need this */ | 198 | /* local CPU only; some systems need this */ |
123 | local_irq_save(flags); | 199 | local_irq_save(flags); |
@@ -154,91 +230,23 @@ static void do_identify(ide_drive_t *drive, u8 cmd) | |||
154 | if (strstr(m, "E X A B Y T E N E S T")) | 230 | if (strstr(m, "E X A B Y T E N E S T")) |
155 | goto err_misc; | 231 | goto err_misc; |
156 | 232 | ||
157 | printk(KERN_INFO "%s: %s, ", drive->name, m); | ||
158 | |||
159 | drive->dev_flags |= IDE_DFLAG_PRESENT; | 233 | drive->dev_flags |= IDE_DFLAG_PRESENT; |
160 | drive->dev_flags &= ~IDE_DFLAG_DEAD; | 234 | drive->dev_flags &= ~IDE_DFLAG_DEAD; |
161 | 235 | ||
162 | /* | 236 | /* |
163 | * Check for an ATAPI device | 237 | * Check for an ATAPI device |
164 | */ | 238 | */ |
165 | if (cmd == ATA_CMD_ID_ATAPI) { | 239 | if (cmd == ATA_CMD_ID_ATAPI) |
166 | u8 type = (id[ATA_ID_CONFIG] >> 8) & 0x1f; | 240 | ide_classify_atapi_dev(drive); |
167 | 241 | else | |
168 | printk(KERN_CONT "ATAPI "); | ||
169 | switch (type) { | ||
170 | case ide_floppy: | ||
171 | if (!strstr(m, "CD-ROM")) { | ||
172 | if (!strstr(m, "oppy") && | ||
173 | !strstr(m, "poyp") && | ||
174 | !strstr(m, "ZIP")) | ||
175 | printk(KERN_CONT "cdrom or floppy?, assuming "); | ||
176 | if (drive->media != ide_cdrom) { | ||
177 | printk(KERN_CONT "FLOPPY"); | ||
178 | drive->dev_flags |= IDE_DFLAG_REMOVABLE; | ||
179 | break; | ||
180 | } | ||
181 | } | ||
182 | /* Early cdrom models used zero */ | ||
183 | type = ide_cdrom; | ||
184 | case ide_cdrom: | ||
185 | drive->dev_flags |= IDE_DFLAG_REMOVABLE; | ||
186 | #ifdef CONFIG_PPC | ||
187 | /* kludge for Apple PowerBook internal zip */ | ||
188 | if (!strstr(m, "CD-ROM") && strstr(m, "ZIP")) { | ||
189 | printk(KERN_CONT "FLOPPY"); | ||
190 | type = ide_floppy; | ||
191 | break; | ||
192 | } | ||
193 | #endif | ||
194 | printk(KERN_CONT "CD/DVD-ROM"); | ||
195 | break; | ||
196 | case ide_tape: | ||
197 | printk(KERN_CONT "TAPE"); | ||
198 | break; | ||
199 | case ide_optical: | ||
200 | printk(KERN_CONT "OPTICAL"); | ||
201 | drive->dev_flags |= IDE_DFLAG_REMOVABLE; | ||
202 | break; | ||
203 | default: | ||
204 | printk(KERN_CONT "UNKNOWN (type %d)", type); | ||
205 | break; | ||
206 | } | ||
207 | printk(KERN_CONT " drive\n"); | ||
208 | drive->media = type; | ||
209 | /* an ATAPI device ignores DRDY */ | ||
210 | drive->ready_stat = 0; | ||
211 | if (ata_id_cdb_intr(id)) | ||
212 | drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT; | ||
213 | drive->dev_flags |= IDE_DFLAG_DOORLOCKING; | ||
214 | /* we don't do head unloading on ATAPI devices */ | ||
215 | drive->dev_flags |= IDE_DFLAG_NO_UNLOAD; | ||
216 | return; | ||
217 | } | ||
218 | |||
219 | /* | 242 | /* |
220 | * Not an ATAPI device: looks like a "regular" hard disk | 243 | * Not an ATAPI device: looks like a "regular" hard disk |
221 | */ | 244 | */ |
222 | 245 | ide_classify_ata_dev(drive); | |
223 | is_cfa = ata_id_is_cfa(id); | ||
224 | |||
225 | /* CF devices are *not* removable in Linux definition of the term */ | ||
226 | if (is_cfa == 0 && (id[ATA_ID_CONFIG] & (1 << 7))) | ||
227 | drive->dev_flags |= IDE_DFLAG_REMOVABLE; | ||
228 | |||
229 | drive->media = ide_disk; | ||
230 | |||
231 | if (!ata_id_has_unload(drive->id)) | ||
232 | drive->dev_flags |= IDE_DFLAG_NO_UNLOAD; | ||
233 | |||
234 | printk(KERN_CONT "%s DISK drive\n", is_cfa ? "CFA" : "ATA"); | ||
235 | |||
236 | return; | 246 | return; |
237 | |||
238 | err_misc: | 247 | err_misc: |
239 | kfree(id); | 248 | kfree(id); |
240 | drive->dev_flags &= ~IDE_DFLAG_PRESENT; | 249 | drive->dev_flags &= ~IDE_DFLAG_PRESENT; |
241 | return; | ||
242 | } | 250 | } |
243 | 251 | ||
244 | /** | 252 | /** |
@@ -641,14 +649,9 @@ static int ide_register_port(ide_hwif_t *hwif) | |||
641 | /* register with global device tree */ | 649 | /* register with global device tree */ |
642 | dev_set_name(&hwif->gendev, hwif->name); | 650 | dev_set_name(&hwif->gendev, hwif->name); |
643 | hwif->gendev.driver_data = hwif; | 651 | hwif->gendev.driver_data = hwif; |
644 | if (hwif->gendev.parent == NULL) { | 652 | hwif->gendev.parent = hwif->dev; |
645 | if (hwif->dev) | ||
646 | hwif->gendev.parent = hwif->dev; | ||
647 | else | ||
648 | /* Would like to do = &device_legacy */ | ||
649 | hwif->gendev.parent = NULL; | ||
650 | } | ||
651 | hwif->gendev.release = hwif_release_dev; | 653 | hwif->gendev.release = hwif_release_dev; |
654 | |||
652 | ret = device_register(&hwif->gendev); | 655 | ret = device_register(&hwif->gendev); |
653 | if (ret < 0) { | 656 | if (ret < 0) { |
654 | printk(KERN_WARNING "IDE: %s: device_register error: %d\n", | 657 | printk(KERN_WARNING "IDE: %s: device_register error: %d\n", |
@@ -878,8 +881,7 @@ static int ide_init_queue(ide_drive_t *drive) | |||
878 | * do not. | 881 | * do not. |
879 | */ | 882 | */ |
880 | 883 | ||
881 | q = blk_init_queue_node(do_ide_request, &hwif->hwgroup->lock, | 884 | q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif)); |
882 | hwif_to_node(hwif)); | ||
883 | if (!q) | 885 | if (!q) |
884 | return 1; | 886 | return 1; |
885 | 887 | ||
@@ -1139,8 +1141,6 @@ static struct kobject *ata_probe(dev_t dev, int *part, void *data) | |||
1139 | 1141 | ||
1140 | if (drive->media == ide_disk) | 1142 | if (drive->media == ide_disk) |
1141 | request_module("ide-disk"); | 1143 | request_module("ide-disk"); |
1142 | if (drive->dev_flags & IDE_DFLAG_SCSI) | ||
1143 | request_module("ide-scsi"); | ||
1144 | if (drive->media == ide_cdrom || drive->media == ide_optical) | 1144 | if (drive->media == ide_cdrom || drive->media == ide_optical) |
1145 | request_module("ide-cd"); | 1145 | request_module("ide-cd"); |
1146 | if (drive->media == ide_tape) | 1146 | if (drive->media == ide_tape) |
@@ -1417,58 +1417,6 @@ static void ide_port_cable_detect(ide_hwif_t *hwif) | |||
1417 | } | 1417 | } |
1418 | } | 1418 | } |
1419 | 1419 | ||
1420 | static ssize_t store_delete_devices(struct device *portdev, | ||
1421 | struct device_attribute *attr, | ||
1422 | const char *buf, size_t n) | ||
1423 | { | ||
1424 | ide_hwif_t *hwif = dev_get_drvdata(portdev); | ||
1425 | |||
1426 | if (strncmp(buf, "1", n)) | ||
1427 | return -EINVAL; | ||
1428 | |||
1429 | ide_port_unregister_devices(hwif); | ||
1430 | |||
1431 | return n; | ||
1432 | }; | ||
1433 | |||
1434 | static DEVICE_ATTR(delete_devices, S_IWUSR, NULL, store_delete_devices); | ||
1435 | |||
1436 | static ssize_t store_scan(struct device *portdev, | ||
1437 | struct device_attribute *attr, | ||
1438 | const char *buf, size_t n) | ||
1439 | { | ||
1440 | ide_hwif_t *hwif = dev_get_drvdata(portdev); | ||
1441 | |||
1442 | if (strncmp(buf, "1", n)) | ||
1443 | return -EINVAL; | ||
1444 | |||
1445 | ide_port_unregister_devices(hwif); | ||
1446 | ide_port_scan(hwif); | ||
1447 | |||
1448 | return n; | ||
1449 | }; | ||
1450 | |||
1451 | static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); | ||
1452 | |||
1453 | static struct device_attribute *ide_port_attrs[] = { | ||
1454 | &dev_attr_delete_devices, | ||
1455 | &dev_attr_scan, | ||
1456 | NULL | ||
1457 | }; | ||
1458 | |||
1459 | static int ide_sysfs_register_port(ide_hwif_t *hwif) | ||
1460 | { | ||
1461 | int i, uninitialized_var(rc); | ||
1462 | |||
1463 | for (i = 0; ide_port_attrs[i]; i++) { | ||
1464 | rc = device_create_file(hwif->portdev, ide_port_attrs[i]); | ||
1465 | if (rc) | ||
1466 | break; | ||
1467 | } | ||
1468 | |||
1469 | return rc; | ||
1470 | } | ||
1471 | |||
1472 | static unsigned int ide_indexes; | 1420 | static unsigned int ide_indexes; |
1473 | 1421 | ||
1474 | /** | 1422 | /** |
@@ -1655,9 +1603,6 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, | |||
1655 | if (hwif == NULL) | 1603 | if (hwif == NULL) |
1656 | continue; | 1604 | continue; |
1657 | 1605 | ||
1658 | if (hwif->chipset == ide_unknown) | ||
1659 | hwif->chipset = ide_generic; | ||
1660 | |||
1661 | if (hwif->present) | 1606 | if (hwif->present) |
1662 | hwif_register_devices(hwif); | 1607 | hwif_register_devices(hwif); |
1663 | } | 1608 | } |
diff --git a/drivers/ide/ide-sysfs.c b/drivers/ide/ide-sysfs.c new file mode 100644 index 000000000000..883ffacaf45a --- /dev/null +++ b/drivers/ide/ide-sysfs.c | |||
@@ -0,0 +1,125 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/ide.h> | ||
3 | |||
4 | char *ide_media_string(ide_drive_t *drive) | ||
5 | { | ||
6 | switch (drive->media) { | ||
7 | case ide_disk: | ||
8 | return "disk"; | ||
9 | case ide_cdrom: | ||
10 | return "cdrom"; | ||
11 | case ide_tape: | ||
12 | return "tape"; | ||
13 | case ide_floppy: | ||
14 | return "floppy"; | ||
15 | case ide_optical: | ||
16 | return "optical"; | ||
17 | default: | ||
18 | return "UNKNOWN"; | ||
19 | } | ||
20 | } | ||
21 | |||
22 | static ssize_t media_show(struct device *dev, struct device_attribute *attr, | ||
23 | char *buf) | ||
24 | { | ||
25 | ide_drive_t *drive = to_ide_device(dev); | ||
26 | return sprintf(buf, "%s\n", ide_media_string(drive)); | ||
27 | } | ||
28 | |||
29 | static ssize_t drivename_show(struct device *dev, struct device_attribute *attr, | ||
30 | char *buf) | ||
31 | { | ||
32 | ide_drive_t *drive = to_ide_device(dev); | ||
33 | return sprintf(buf, "%s\n", drive->name); | ||
34 | } | ||
35 | |||
36 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | ||
37 | char *buf) | ||
38 | { | ||
39 | ide_drive_t *drive = to_ide_device(dev); | ||
40 | return sprintf(buf, "ide:m-%s\n", ide_media_string(drive)); | ||
41 | } | ||
42 | |||
43 | static ssize_t model_show(struct device *dev, struct device_attribute *attr, | ||
44 | char *buf) | ||
45 | { | ||
46 | ide_drive_t *drive = to_ide_device(dev); | ||
47 | return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_PROD]); | ||
48 | } | ||
49 | |||
50 | static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, | ||
51 | char *buf) | ||
52 | { | ||
53 | ide_drive_t *drive = to_ide_device(dev); | ||
54 | return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_FW_REV]); | ||
55 | } | ||
56 | |||
57 | static ssize_t serial_show(struct device *dev, struct device_attribute *attr, | ||
58 | char *buf) | ||
59 | { | ||
60 | ide_drive_t *drive = to_ide_device(dev); | ||
61 | return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_SERNO]); | ||
62 | } | ||
63 | |||
64 | struct device_attribute ide_dev_attrs[] = { | ||
65 | __ATTR_RO(media), | ||
66 | __ATTR_RO(drivename), | ||
67 | __ATTR_RO(modalias), | ||
68 | __ATTR_RO(model), | ||
69 | __ATTR_RO(firmware), | ||
70 | __ATTR(serial, 0400, serial_show, NULL), | ||
71 | __ATTR(unload_heads, 0644, ide_park_show, ide_park_store), | ||
72 | __ATTR_NULL | ||
73 | }; | ||
74 | |||
75 | static ssize_t store_delete_devices(struct device *portdev, | ||
76 | struct device_attribute *attr, | ||
77 | const char *buf, size_t n) | ||
78 | { | ||
79 | ide_hwif_t *hwif = dev_get_drvdata(portdev); | ||
80 | |||
81 | if (strncmp(buf, "1", n)) | ||
82 | return -EINVAL; | ||
83 | |||
84 | ide_port_unregister_devices(hwif); | ||
85 | |||
86 | return n; | ||
87 | }; | ||
88 | |||
89 | static DEVICE_ATTR(delete_devices, S_IWUSR, NULL, store_delete_devices); | ||
90 | |||
91 | static ssize_t store_scan(struct device *portdev, | ||
92 | struct device_attribute *attr, | ||
93 | const char *buf, size_t n) | ||
94 | { | ||
95 | ide_hwif_t *hwif = dev_get_drvdata(portdev); | ||
96 | |||
97 | if (strncmp(buf, "1", n)) | ||
98 | return -EINVAL; | ||
99 | |||
100 | ide_port_unregister_devices(hwif); | ||
101 | ide_port_scan(hwif); | ||
102 | |||
103 | return n; | ||
104 | }; | ||
105 | |||
106 | static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan); | ||
107 | |||
108 | static struct device_attribute *ide_port_attrs[] = { | ||
109 | &dev_attr_delete_devices, | ||
110 | &dev_attr_scan, | ||
111 | NULL | ||
112 | }; | ||
113 | |||
114 | int ide_sysfs_register_port(ide_hwif_t *hwif) | ||
115 | { | ||
116 | int i, uninitialized_var(rc); | ||
117 | |||
118 | for (i = 0; ide_port_attrs[i]; i++) { | ||
119 | rc = device_create_file(hwif->portdev, ide_port_attrs[i]); | ||
120 | if (rc) | ||
121 | break; | ||
122 | } | ||
123 | |||
124 | return rc; | ||
125 | } | ||
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index a2d470eb2b55..5d2aa22cd6e4 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -694,7 +694,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive, | |||
694 | 694 | ||
695 | pc->retries++; | 695 | pc->retries++; |
696 | 696 | ||
697 | return ide_issue_pc(drive, WAIT_TAPE_CMD, NULL); | 697 | return ide_issue_pc(drive); |
698 | } | 698 | } |
699 | 699 | ||
700 | /* A mode sense command is used to "sense" tape parameters. */ | 700 | /* A mode sense command is used to "sense" tape parameters. */ |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index f0f09f702e9c..46a2d4ca812b 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -440,81 +440,13 @@ static int ide_bus_match(struct device *dev, struct device_driver *drv) | |||
440 | return 1; | 440 | return 1; |
441 | } | 441 | } |
442 | 442 | ||
443 | static char *media_string(ide_drive_t *drive) | ||
444 | { | ||
445 | switch (drive->media) { | ||
446 | case ide_disk: | ||
447 | return "disk"; | ||
448 | case ide_cdrom: | ||
449 | return "cdrom"; | ||
450 | case ide_tape: | ||
451 | return "tape"; | ||
452 | case ide_floppy: | ||
453 | return "floppy"; | ||
454 | case ide_optical: | ||
455 | return "optical"; | ||
456 | default: | ||
457 | return "UNKNOWN"; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | static ssize_t media_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
462 | { | ||
463 | ide_drive_t *drive = to_ide_device(dev); | ||
464 | return sprintf(buf, "%s\n", media_string(drive)); | ||
465 | } | ||
466 | |||
467 | static ssize_t drivename_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
468 | { | ||
469 | ide_drive_t *drive = to_ide_device(dev); | ||
470 | return sprintf(buf, "%s\n", drive->name); | ||
471 | } | ||
472 | |||
473 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
474 | { | ||
475 | ide_drive_t *drive = to_ide_device(dev); | ||
476 | return sprintf(buf, "ide:m-%s\n", media_string(drive)); | ||
477 | } | ||
478 | |||
479 | static ssize_t model_show(struct device *dev, struct device_attribute *attr, | ||
480 | char *buf) | ||
481 | { | ||
482 | ide_drive_t *drive = to_ide_device(dev); | ||
483 | return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_PROD]); | ||
484 | } | ||
485 | |||
486 | static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, | ||
487 | char *buf) | ||
488 | { | ||
489 | ide_drive_t *drive = to_ide_device(dev); | ||
490 | return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_FW_REV]); | ||
491 | } | ||
492 | |||
493 | static ssize_t serial_show(struct device *dev, struct device_attribute *attr, | ||
494 | char *buf) | ||
495 | { | ||
496 | ide_drive_t *drive = to_ide_device(dev); | ||
497 | return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_SERNO]); | ||
498 | } | ||
499 | |||
500 | static struct device_attribute ide_dev_attrs[] = { | ||
501 | __ATTR_RO(media), | ||
502 | __ATTR_RO(drivename), | ||
503 | __ATTR_RO(modalias), | ||
504 | __ATTR_RO(model), | ||
505 | __ATTR_RO(firmware), | ||
506 | __ATTR(serial, 0400, serial_show, NULL), | ||
507 | __ATTR(unload_heads, 0644, ide_park_show, ide_park_store), | ||
508 | __ATTR_NULL | ||
509 | }; | ||
510 | |||
511 | static int ide_uevent(struct device *dev, struct kobj_uevent_env *env) | 443 | static int ide_uevent(struct device *dev, struct kobj_uevent_env *env) |
512 | { | 444 | { |
513 | ide_drive_t *drive = to_ide_device(dev); | 445 | ide_drive_t *drive = to_ide_device(dev); |
514 | 446 | ||
515 | add_uevent_var(env, "MEDIA=%s", media_string(drive)); | 447 | add_uevent_var(env, "MEDIA=%s", ide_media_string(drive)); |
516 | add_uevent_var(env, "DRIVENAME=%s", drive->name); | 448 | add_uevent_var(env, "DRIVENAME=%s", drive->name); |
517 | add_uevent_var(env, "MODALIAS=ide:m-%s", media_string(drive)); | 449 | add_uevent_var(env, "MODALIAS=ide:m-%s", ide_media_string(drive)); |
518 | return 0; | 450 | return 0; |
519 | } | 451 | } |
520 | 452 | ||
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c index 13b63e7fa353..b4ef218072cd 100644 --- a/drivers/ide/tx4938ide.c +++ b/drivers/ide/tx4938ide.c | |||
@@ -216,16 +216,17 @@ static const struct ide_tp_ops tx4938ide_tp_ops = { | |||
216 | #endif /* __BIG_ENDIAN */ | 216 | #endif /* __BIG_ENDIAN */ |
217 | 217 | ||
218 | static const struct ide_port_ops tx4938ide_port_ops = { | 218 | static const struct ide_port_ops tx4938ide_port_ops = { |
219 | .set_pio_mode = tx4938ide_set_pio_mode, | 219 | .set_pio_mode = tx4938ide_set_pio_mode, |
220 | }; | 220 | }; |
221 | 221 | ||
222 | static const struct ide_port_info tx4938ide_port_info __initdata = { | 222 | static const struct ide_port_info tx4938ide_port_info __initdata = { |
223 | .port_ops = &tx4938ide_port_ops, | 223 | .port_ops = &tx4938ide_port_ops, |
224 | #ifdef __BIG_ENDIAN | 224 | #ifdef __BIG_ENDIAN |
225 | .tp_ops = &tx4938ide_tp_ops, | 225 | .tp_ops = &tx4938ide_tp_ops, |
226 | #endif | 226 | #endif |
227 | .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, | 227 | .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, |
228 | .pio_mask = ATA_PIO5, | 228 | .pio_mask = ATA_PIO5, |
229 | .chipset = ide_generic, | ||
229 | }; | 230 | }; |
230 | 231 | ||
231 | static int __init tx4938ide_probe(struct platform_device *pdev) | 232 | static int __init tx4938ide_probe(struct platform_device *pdev) |
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c index 97cd9e0f66f6..4a8c5a21bd4c 100644 --- a/drivers/ide/tx4939ide.c +++ b/drivers/ide/tx4939ide.c | |||
@@ -623,33 +623,34 @@ static const struct ide_tp_ops tx4939ide_tp_ops = { | |||
623 | #endif /* __LITTLE_ENDIAN */ | 623 | #endif /* __LITTLE_ENDIAN */ |
624 | 624 | ||
625 | static const struct ide_port_ops tx4939ide_port_ops = { | 625 | static const struct ide_port_ops tx4939ide_port_ops = { |
626 | .set_pio_mode = tx4939ide_set_pio_mode, | 626 | .set_pio_mode = tx4939ide_set_pio_mode, |
627 | .set_dma_mode = tx4939ide_set_dma_mode, | 627 | .set_dma_mode = tx4939ide_set_dma_mode, |
628 | .clear_irq = tx4939ide_clear_irq, | 628 | .clear_irq = tx4939ide_clear_irq, |
629 | .cable_detect = tx4939ide_cable_detect, | 629 | .cable_detect = tx4939ide_cable_detect, |
630 | }; | 630 | }; |
631 | 631 | ||
632 | static const struct ide_dma_ops tx4939ide_dma_ops = { | 632 | static const struct ide_dma_ops tx4939ide_dma_ops = { |
633 | .dma_host_set = tx4939ide_dma_host_set, | 633 | .dma_host_set = tx4939ide_dma_host_set, |
634 | .dma_setup = tx4939ide_dma_setup, | 634 | .dma_setup = tx4939ide_dma_setup, |
635 | .dma_exec_cmd = ide_dma_exec_cmd, | 635 | .dma_exec_cmd = ide_dma_exec_cmd, |
636 | .dma_start = ide_dma_start, | 636 | .dma_start = ide_dma_start, |
637 | .dma_end = tx4939ide_dma_end, | 637 | .dma_end = tx4939ide_dma_end, |
638 | .dma_test_irq = tx4939ide_dma_test_irq, | 638 | .dma_test_irq = tx4939ide_dma_test_irq, |
639 | .dma_lost_irq = ide_dma_lost_irq, | 639 | .dma_lost_irq = ide_dma_lost_irq, |
640 | .dma_timeout = ide_dma_timeout, | 640 | .dma_timeout = ide_dma_timeout, |
641 | }; | 641 | }; |
642 | 642 | ||
643 | static const struct ide_port_info tx4939ide_port_info __initdata = { | 643 | static const struct ide_port_info tx4939ide_port_info __initdata = { |
644 | .init_hwif = tx4939ide_init_hwif, | 644 | .init_hwif = tx4939ide_init_hwif, |
645 | .init_dma = tx4939ide_init_dma, | 645 | .init_dma = tx4939ide_init_dma, |
646 | .port_ops = &tx4939ide_port_ops, | 646 | .port_ops = &tx4939ide_port_ops, |
647 | .dma_ops = &tx4939ide_dma_ops, | 647 | .dma_ops = &tx4939ide_dma_ops, |
648 | .tp_ops = &tx4939ide_tp_ops, | 648 | .tp_ops = &tx4939ide_tp_ops, |
649 | .host_flags = IDE_HFLAG_MMIO, | 649 | .host_flags = IDE_HFLAG_MMIO, |
650 | .pio_mask = ATA_PIO4, | 650 | .pio_mask = ATA_PIO4, |
651 | .mwdma_mask = ATA_MWDMA2, | 651 | .mwdma_mask = ATA_MWDMA2, |
652 | .udma_mask = ATA_UDMA5, | 652 | .udma_mask = ATA_UDMA5, |
653 | .chipset = ide_generic, | ||
653 | }; | 654 | }; |
654 | 655 | ||
655 | static int __init tx4939ide_probe(struct platform_device *pdev) | 656 | static int __init tx4939ide_probe(struct platform_device *pdev) |
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c index a1039068f95c..415fab0125ac 100644 --- a/drivers/lguest/interrupts_and_traps.c +++ b/drivers/lguest/interrupts_and_traps.c | |||
@@ -222,11 +222,16 @@ bool check_syscall_vector(struct lguest *lg) | |||
222 | int init_interrupts(void) | 222 | int init_interrupts(void) |
223 | { | 223 | { |
224 | /* If they want some strange system call vector, reserve it now */ | 224 | /* If they want some strange system call vector, reserve it now */ |
225 | if (syscall_vector != SYSCALL_VECTOR | 225 | if (syscall_vector != SYSCALL_VECTOR) { |
226 | && test_and_set_bit(syscall_vector, used_vectors)) { | 226 | if (test_bit(syscall_vector, used_vectors) || |
227 | printk("lg: couldn't reserve syscall %u\n", syscall_vector); | 227 | vector_used_by_percpu_irq(syscall_vector)) { |
228 | return -EBUSY; | 228 | printk(KERN_ERR "lg: couldn't reserve syscall %u\n", |
229 | syscall_vector); | ||
230 | return -EBUSY; | ||
231 | } | ||
232 | set_bit(syscall_vector, used_vectors); | ||
229 | } | 233 | } |
234 | |||
230 | return 0; | 235 | return 0; |
231 | } | 236 | } |
232 | 237 | ||
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 9f7896a25f1b..c4918b86ed19 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -3,6 +3,8 @@ | |||
3 | * Driver for Option High Speed Mobile Devices. | 3 | * Driver for Option High Speed Mobile Devices. |
4 | * | 4 | * |
5 | * Copyright (C) 2008 Option International | 5 | * Copyright (C) 2008 Option International |
6 | * Filip Aben <f.aben@option.com> | ||
7 | * Denis Joseph Barrow <d.barow@option.com> | ||
6 | * Copyright (C) 2007 Andrew Bird (Sphere Systems Ltd) | 8 | * Copyright (C) 2007 Andrew Bird (Sphere Systems Ltd) |
7 | * <ajb@spheresystems.co.uk> | 9 | * <ajb@spheresystems.co.uk> |
8 | * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> | 10 | * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de> |
@@ -39,8 +41,11 @@ | |||
39 | * port is opened, as this have a huge impact on the network port | 41 | * port is opened, as this have a huge impact on the network port |
40 | * throughput. | 42 | * throughput. |
41 | * | 43 | * |
42 | * Interface 2: Standard modem interface - circuit switched interface, should | 44 | * Interface 2: Standard modem interface - circuit switched interface, this |
43 | * not be used. | 45 | * can be used to make a standard ppp connection however it |
46 | * should not be used in conjunction with the IP network interface | ||
47 | * enabled for USB performance reasons i.e. if using this set | ||
48 | * ideally disable_net=1. | ||
44 | * | 49 | * |
45 | *****************************************************************************/ | 50 | *****************************************************************************/ |
46 | 51 | ||
@@ -63,6 +68,8 @@ | |||
63 | #include <linux/usb/cdc.h> | 68 | #include <linux/usb/cdc.h> |
64 | #include <net/arp.h> | 69 | #include <net/arp.h> |
65 | #include <asm/byteorder.h> | 70 | #include <asm/byteorder.h> |
71 | #include <linux/serial_core.h> | ||
72 | #include <linux/serial.h> | ||
66 | 73 | ||
67 | 74 | ||
68 | #define DRIVER_VERSION "1.2" | 75 | #define DRIVER_VERSION "1.2" |
@@ -182,6 +189,41 @@ enum rx_ctrl_state{ | |||
182 | RX_PENDING | 189 | RX_PENDING |
183 | }; | 190 | }; |
184 | 191 | ||
192 | #define BM_REQUEST_TYPE (0xa1) | ||
193 | #define B_NOTIFICATION (0x20) | ||
194 | #define W_VALUE (0x0) | ||
195 | #define W_INDEX (0x2) | ||
196 | #define W_LENGTH (0x2) | ||
197 | |||
198 | #define B_OVERRUN (0x1<<6) | ||
199 | #define B_PARITY (0x1<<5) | ||
200 | #define B_FRAMING (0x1<<4) | ||
201 | #define B_RING_SIGNAL (0x1<<3) | ||
202 | #define B_BREAK (0x1<<2) | ||
203 | #define B_TX_CARRIER (0x1<<1) | ||
204 | #define B_RX_CARRIER (0x1<<0) | ||
205 | |||
206 | struct hso_serial_state_notification { | ||
207 | u8 bmRequestType; | ||
208 | u8 bNotification; | ||
209 | u16 wValue; | ||
210 | u16 wIndex; | ||
211 | u16 wLength; | ||
212 | u16 UART_state_bitmap; | ||
213 | } __attribute__((packed)); | ||
214 | |||
215 | struct hso_tiocmget { | ||
216 | struct mutex mutex; | ||
217 | wait_queue_head_t waitq; | ||
218 | int intr_completed; | ||
219 | struct usb_endpoint_descriptor *endp; | ||
220 | struct urb *urb; | ||
221 | struct hso_serial_state_notification serial_state_notification; | ||
222 | u16 prev_UART_state_bitmap; | ||
223 | struct uart_icount icount; | ||
224 | }; | ||
225 | |||
226 | |||
185 | struct hso_serial { | 227 | struct hso_serial { |
186 | struct hso_device *parent; | 228 | struct hso_device *parent; |
187 | int magic; | 229 | int magic; |
@@ -219,6 +261,7 @@ struct hso_serial { | |||
219 | spinlock_t serial_lock; | 261 | spinlock_t serial_lock; |
220 | 262 | ||
221 | int (*write_data) (struct hso_serial *serial); | 263 | int (*write_data) (struct hso_serial *serial); |
264 | struct hso_tiocmget *tiocmget; | ||
222 | /* Hacks required to get flow control | 265 | /* Hacks required to get flow control |
223 | * working on the serial receive buffers | 266 | * working on the serial receive buffers |
224 | * so as not to drop characters on the floor. | 267 | * so as not to drop characters on the floor. |
@@ -305,7 +348,7 @@ static void async_get_intf(struct work_struct *data); | |||
305 | static void async_put_intf(struct work_struct *data); | 348 | static void async_put_intf(struct work_struct *data); |
306 | static int hso_put_activity(struct hso_device *hso_dev); | 349 | static int hso_put_activity(struct hso_device *hso_dev); |
307 | static int hso_get_activity(struct hso_device *hso_dev); | 350 | static int hso_get_activity(struct hso_device *hso_dev); |
308 | 351 | static void tiocmget_intr_callback(struct urb *urb); | |
309 | /*****************************************************************************/ | 352 | /*****************************************************************************/ |
310 | /* Helping functions */ | 353 | /* Helping functions */ |
311 | /*****************************************************************************/ | 354 | /*****************************************************************************/ |
@@ -362,8 +405,6 @@ static struct tty_driver *tty_drv; | |||
362 | static struct hso_device *serial_table[HSO_SERIAL_TTY_MINORS]; | 405 | static struct hso_device *serial_table[HSO_SERIAL_TTY_MINORS]; |
363 | static struct hso_device *network_table[HSO_MAX_NET_DEVICES]; | 406 | static struct hso_device *network_table[HSO_MAX_NET_DEVICES]; |
364 | static spinlock_t serial_table_lock; | 407 | static spinlock_t serial_table_lock; |
365 | static struct ktermios *hso_serial_termios[HSO_SERIAL_TTY_MINORS]; | ||
366 | static struct ktermios *hso_serial_termios_locked[HSO_SERIAL_TTY_MINORS]; | ||
367 | 408 | ||
368 | static const s32 default_port_spec[] = { | 409 | static const s32 default_port_spec[] = { |
369 | HSO_INTF_MUX | HSO_PORT_NETWORK, | 410 | HSO_INTF_MUX | HSO_PORT_NETWORK, |
@@ -1009,23 +1050,11 @@ static void read_bulk_callback(struct urb *urb) | |||
1009 | 1050 | ||
1010 | /* Serial driver functions */ | 1051 | /* Serial driver functions */ |
1011 | 1052 | ||
1012 | static void _hso_serial_set_termios(struct tty_struct *tty, | 1053 | static void hso_init_termios(struct ktermios *termios) |
1013 | struct ktermios *old) | ||
1014 | { | 1054 | { |
1015 | struct hso_serial *serial = get_serial_by_tty(tty); | ||
1016 | struct ktermios *termios; | ||
1017 | |||
1018 | if ((!tty) || (!tty->termios) || (!serial)) { | ||
1019 | printk(KERN_ERR "%s: no tty structures", __func__); | ||
1020 | return; | ||
1021 | } | ||
1022 | |||
1023 | D4("port %d", serial->minor); | ||
1024 | |||
1025 | /* | 1055 | /* |
1026 | * The default requirements for this device are: | 1056 | * The default requirements for this device are: |
1027 | */ | 1057 | */ |
1028 | termios = tty->termios; | ||
1029 | termios->c_iflag &= | 1058 | termios->c_iflag &= |
1030 | ~(IGNBRK /* disable ignore break */ | 1059 | ~(IGNBRK /* disable ignore break */ |
1031 | | BRKINT /* disable break causes interrupt */ | 1060 | | BRKINT /* disable break causes interrupt */ |
@@ -1057,15 +1086,38 @@ static void _hso_serial_set_termios(struct tty_struct *tty, | |||
1057 | termios->c_cflag |= CS8; /* character size 8 bits */ | 1086 | termios->c_cflag |= CS8; /* character size 8 bits */ |
1058 | 1087 | ||
1059 | /* baud rate 115200 */ | 1088 | /* baud rate 115200 */ |
1060 | tty_encode_baud_rate(serial->tty, 115200, 115200); | 1089 | tty_termios_encode_baud_rate(termios, 115200, 115200); |
1090 | } | ||
1091 | |||
1092 | static void _hso_serial_set_termios(struct tty_struct *tty, | ||
1093 | struct ktermios *old) | ||
1094 | { | ||
1095 | struct hso_serial *serial = get_serial_by_tty(tty); | ||
1096 | struct ktermios *termios; | ||
1097 | |||
1098 | if (!serial) { | ||
1099 | printk(KERN_ERR "%s: no tty structures", __func__); | ||
1100 | return; | ||
1101 | } | ||
1102 | |||
1103 | D4("port %d", serial->minor); | ||
1061 | 1104 | ||
1062 | /* | 1105 | /* |
1063 | * Force low_latency on; otherwise the pushes are scheduled; | 1106 | * Fix up unsupported bits |
1064 | * this is bad as it opens up the possibility of dropping bytes | ||
1065 | * on the floor. We don't want to drop bytes on the floor. :) | ||
1066 | */ | 1107 | */ |
1067 | serial->tty->low_latency = 1; | 1108 | termios = tty->termios; |
1068 | return; | 1109 | termios->c_iflag &= ~IXON; /* disable enable XON/XOFF flow control */ |
1110 | |||
1111 | termios->c_cflag &= | ||
1112 | ~(CSIZE /* no size */ | ||
1113 | | PARENB /* disable parity bit */ | ||
1114 | | CBAUD /* clear current baud rate */ | ||
1115 | | CBAUDEX); /* clear current buad rate */ | ||
1116 | |||
1117 | termios->c_cflag |= CS8; /* character size 8 bits */ | ||
1118 | |||
1119 | /* baud rate 115200 */ | ||
1120 | tty_encode_baud_rate(tty, 115200, 115200); | ||
1069 | } | 1121 | } |
1070 | 1122 | ||
1071 | static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb) | 1123 | static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb) |
@@ -1228,6 +1280,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) | |||
1228 | 1280 | ||
1229 | /* sanity check */ | 1281 | /* sanity check */ |
1230 | if (serial == NULL || serial->magic != HSO_SERIAL_MAGIC) { | 1282 | if (serial == NULL || serial->magic != HSO_SERIAL_MAGIC) { |
1283 | WARN_ON(1); | ||
1231 | tty->driver_data = NULL; | 1284 | tty->driver_data = NULL; |
1232 | D1("Failed to open port"); | 1285 | D1("Failed to open port"); |
1233 | return -ENODEV; | 1286 | return -ENODEV; |
@@ -1242,8 +1295,10 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp) | |||
1242 | kref_get(&serial->parent->ref); | 1295 | kref_get(&serial->parent->ref); |
1243 | 1296 | ||
1244 | /* setup */ | 1297 | /* setup */ |
1298 | spin_lock_irq(&serial->serial_lock); | ||
1245 | tty->driver_data = serial; | 1299 | tty->driver_data = serial; |
1246 | serial->tty = tty; | 1300 | serial->tty = tty_kref_get(tty); |
1301 | spin_unlock_irq(&serial->serial_lock); | ||
1247 | 1302 | ||
1248 | /* check for port already opened, if not set the termios */ | 1303 | /* check for port already opened, if not set the termios */ |
1249 | serial->open_count++; | 1304 | serial->open_count++; |
@@ -1285,6 +1340,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) | |||
1285 | 1340 | ||
1286 | D1("Closing serial port"); | 1341 | D1("Closing serial port"); |
1287 | 1342 | ||
1343 | /* Open failed, no close cleanup required */ | ||
1344 | if (serial == NULL) | ||
1345 | return; | ||
1346 | |||
1288 | mutex_lock(&serial->parent->mutex); | 1347 | mutex_lock(&serial->parent->mutex); |
1289 | usb_gone = serial->parent->usb_gone; | 1348 | usb_gone = serial->parent->usb_gone; |
1290 | 1349 | ||
@@ -1297,10 +1356,13 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp) | |||
1297 | kref_put(&serial->parent->ref, hso_serial_ref_free); | 1356 | kref_put(&serial->parent->ref, hso_serial_ref_free); |
1298 | if (serial->open_count <= 0) { | 1357 | if (serial->open_count <= 0) { |
1299 | serial->open_count = 0; | 1358 | serial->open_count = 0; |
1300 | if (serial->tty) { | 1359 | spin_lock_irq(&serial->serial_lock); |
1360 | if (serial->tty == tty) { | ||
1301 | serial->tty->driver_data = NULL; | 1361 | serial->tty->driver_data = NULL; |
1302 | serial->tty = NULL; | 1362 | serial->tty = NULL; |
1363 | tty_kref_put(tty); | ||
1303 | } | 1364 | } |
1365 | spin_unlock_irq(&serial->serial_lock); | ||
1304 | if (!usb_gone) | 1366 | if (!usb_gone) |
1305 | hso_stop_serial_device(serial->parent); | 1367 | hso_stop_serial_device(serial->parent); |
1306 | tasklet_kill(&serial->unthrottle_tasklet); | 1368 | tasklet_kill(&serial->unthrottle_tasklet); |
@@ -1400,25 +1462,217 @@ static int hso_serial_chars_in_buffer(struct tty_struct *tty) | |||
1400 | 1462 | ||
1401 | return chars; | 1463 | return chars; |
1402 | } | 1464 | } |
1465 | int tiocmget_submit_urb(struct hso_serial *serial, | ||
1466 | struct hso_tiocmget *tiocmget, | ||
1467 | struct usb_device *usb) | ||
1468 | { | ||
1469 | int result; | ||
1470 | |||
1471 | if (serial->parent->usb_gone) | ||
1472 | return -ENODEV; | ||
1473 | usb_fill_int_urb(tiocmget->urb, usb, | ||
1474 | usb_rcvintpipe(usb, | ||
1475 | tiocmget->endp-> | ||
1476 | bEndpointAddress & 0x7F), | ||
1477 | &tiocmget->serial_state_notification, | ||
1478 | sizeof(struct hso_serial_state_notification), | ||
1479 | tiocmget_intr_callback, serial, | ||
1480 | tiocmget->endp->bInterval); | ||
1481 | result = usb_submit_urb(tiocmget->urb, GFP_ATOMIC); | ||
1482 | if (result) { | ||
1483 | dev_warn(&usb->dev, "%s usb_submit_urb failed %d\n", __func__, | ||
1484 | result); | ||
1485 | } | ||
1486 | return result; | ||
1487 | |||
1488 | } | ||
1489 | |||
1490 | static void tiocmget_intr_callback(struct urb *urb) | ||
1491 | { | ||
1492 | struct hso_serial *serial = urb->context; | ||
1493 | struct hso_tiocmget *tiocmget; | ||
1494 | int status = urb->status; | ||
1495 | u16 UART_state_bitmap, prev_UART_state_bitmap; | ||
1496 | struct uart_icount *icount; | ||
1497 | struct hso_serial_state_notification *serial_state_notification; | ||
1498 | struct usb_device *usb; | ||
1499 | |||
1500 | /* Sanity checks */ | ||
1501 | if (!serial) | ||
1502 | return; | ||
1503 | if (status) { | ||
1504 | log_usb_status(status, __func__); | ||
1505 | return; | ||
1506 | } | ||
1507 | tiocmget = serial->tiocmget; | ||
1508 | if (!tiocmget) | ||
1509 | return; | ||
1510 | usb = serial->parent->usb; | ||
1511 | serial_state_notification = &tiocmget->serial_state_notification; | ||
1512 | if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE || | ||
1513 | serial_state_notification->bNotification != B_NOTIFICATION || | ||
1514 | le16_to_cpu(serial_state_notification->wValue) != W_VALUE || | ||
1515 | le16_to_cpu(serial_state_notification->wIndex) != W_INDEX || | ||
1516 | le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) { | ||
1517 | dev_warn(&usb->dev, | ||
1518 | "hso received invalid serial state notification\n"); | ||
1519 | DUMP(serial_state_notification, | ||
1520 | sizeof(hso_serial_state_notifation)) | ||
1521 | } else { | ||
1522 | |||
1523 | UART_state_bitmap = le16_to_cpu(serial_state_notification-> | ||
1524 | UART_state_bitmap); | ||
1525 | prev_UART_state_bitmap = tiocmget->prev_UART_state_bitmap; | ||
1526 | icount = &tiocmget->icount; | ||
1527 | spin_lock(&serial->serial_lock); | ||
1528 | if ((UART_state_bitmap & B_OVERRUN) != | ||
1529 | (prev_UART_state_bitmap & B_OVERRUN)) | ||
1530 | icount->parity++; | ||
1531 | if ((UART_state_bitmap & B_PARITY) != | ||
1532 | (prev_UART_state_bitmap & B_PARITY)) | ||
1533 | icount->parity++; | ||
1534 | if ((UART_state_bitmap & B_FRAMING) != | ||
1535 | (prev_UART_state_bitmap & B_FRAMING)) | ||
1536 | icount->frame++; | ||
1537 | if ((UART_state_bitmap & B_RING_SIGNAL) && | ||
1538 | !(prev_UART_state_bitmap & B_RING_SIGNAL)) | ||
1539 | icount->rng++; | ||
1540 | if ((UART_state_bitmap & B_BREAK) != | ||
1541 | (prev_UART_state_bitmap & B_BREAK)) | ||
1542 | icount->brk++; | ||
1543 | if ((UART_state_bitmap & B_TX_CARRIER) != | ||
1544 | (prev_UART_state_bitmap & B_TX_CARRIER)) | ||
1545 | icount->dsr++; | ||
1546 | if ((UART_state_bitmap & B_RX_CARRIER) != | ||
1547 | (prev_UART_state_bitmap & B_RX_CARRIER)) | ||
1548 | icount->dcd++; | ||
1549 | tiocmget->prev_UART_state_bitmap = UART_state_bitmap; | ||
1550 | spin_unlock(&serial->serial_lock); | ||
1551 | tiocmget->intr_completed = 1; | ||
1552 | wake_up_interruptible(&tiocmget->waitq); | ||
1553 | } | ||
1554 | memset(serial_state_notification, 0, | ||
1555 | sizeof(struct hso_serial_state_notification)); | ||
1556 | tiocmget_submit_urb(serial, | ||
1557 | tiocmget, | ||
1558 | serial->parent->usb); | ||
1559 | } | ||
1560 | |||
1561 | /* | ||
1562 | * next few functions largely stolen from drivers/serial/serial_core.c | ||
1563 | */ | ||
1564 | /* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change | ||
1565 | * - mask passed in arg for lines of interest | ||
1566 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) | ||
1567 | * Caller should use TIOCGICOUNT to see which one it was | ||
1568 | */ | ||
1569 | static int | ||
1570 | hso_wait_modem_status(struct hso_serial *serial, unsigned long arg) | ||
1571 | { | ||
1572 | DECLARE_WAITQUEUE(wait, current); | ||
1573 | struct uart_icount cprev, cnow; | ||
1574 | struct hso_tiocmget *tiocmget; | ||
1575 | int ret; | ||
1576 | |||
1577 | tiocmget = serial->tiocmget; | ||
1578 | if (!tiocmget) | ||
1579 | return -ENOENT; | ||
1580 | /* | ||
1581 | * note the counters on entry | ||
1582 | */ | ||
1583 | spin_lock_irq(&serial->serial_lock); | ||
1584 | memcpy(&cprev, &tiocmget->icount, sizeof(struct uart_icount)); | ||
1585 | spin_unlock_irq(&serial->serial_lock); | ||
1586 | add_wait_queue(&tiocmget->waitq, &wait); | ||
1587 | for (;;) { | ||
1588 | spin_lock_irq(&serial->serial_lock); | ||
1589 | memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); | ||
1590 | spin_unlock_irq(&serial->serial_lock); | ||
1591 | set_current_state(TASK_INTERRUPTIBLE); | ||
1592 | if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || | ||
1593 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || | ||
1594 | ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd))) { | ||
1595 | ret = 0; | ||
1596 | break; | ||
1597 | } | ||
1598 | schedule(); | ||
1599 | /* see if a signal did it */ | ||
1600 | if (signal_pending(current)) { | ||
1601 | ret = -ERESTARTSYS; | ||
1602 | break; | ||
1603 | } | ||
1604 | cprev = cnow; | ||
1605 | } | ||
1606 | current->state = TASK_RUNNING; | ||
1607 | remove_wait_queue(&tiocmget->waitq, &wait); | ||
1608 | |||
1609 | return ret; | ||
1610 | } | ||
1611 | |||
1612 | /* | ||
1613 | * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) | ||
1614 | * Return: write counters to the user passed counter struct | ||
1615 | * NB: both 1->0 and 0->1 transitions are counted except for | ||
1616 | * RI where only 0->1 is counted. | ||
1617 | */ | ||
1618 | static int hso_get_count(struct hso_serial *serial, | ||
1619 | struct serial_icounter_struct __user *icnt) | ||
1620 | { | ||
1621 | struct serial_icounter_struct icount; | ||
1622 | struct uart_icount cnow; | ||
1623 | struct hso_tiocmget *tiocmget = serial->tiocmget; | ||
1624 | |||
1625 | if (!tiocmget) | ||
1626 | return -ENOENT; | ||
1627 | spin_lock_irq(&serial->serial_lock); | ||
1628 | memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); | ||
1629 | spin_unlock_irq(&serial->serial_lock); | ||
1630 | |||
1631 | icount.cts = cnow.cts; | ||
1632 | icount.dsr = cnow.dsr; | ||
1633 | icount.rng = cnow.rng; | ||
1634 | icount.dcd = cnow.dcd; | ||
1635 | icount.rx = cnow.rx; | ||
1636 | icount.tx = cnow.tx; | ||
1637 | icount.frame = cnow.frame; | ||
1638 | icount.overrun = cnow.overrun; | ||
1639 | icount.parity = cnow.parity; | ||
1640 | icount.brk = cnow.brk; | ||
1641 | icount.buf_overrun = cnow.buf_overrun; | ||
1642 | |||
1643 | return copy_to_user(icnt, &icount, sizeof(icount)) ? -EFAULT : 0; | ||
1644 | } | ||
1645 | |||
1403 | 1646 | ||
1404 | static int hso_serial_tiocmget(struct tty_struct *tty, struct file *file) | 1647 | static int hso_serial_tiocmget(struct tty_struct *tty, struct file *file) |
1405 | { | 1648 | { |
1406 | unsigned int value; | 1649 | int retval; |
1407 | struct hso_serial *serial = get_serial_by_tty(tty); | 1650 | struct hso_serial *serial = get_serial_by_tty(tty); |
1408 | unsigned long flags; | 1651 | struct hso_tiocmget *tiocmget; |
1652 | u16 UART_state_bitmap; | ||
1409 | 1653 | ||
1410 | /* sanity check */ | 1654 | /* sanity check */ |
1411 | if (!serial) { | 1655 | if (!serial) { |
1412 | D1("no tty structures"); | 1656 | D1("no tty structures"); |
1413 | return -EINVAL; | 1657 | return -EINVAL; |
1414 | } | 1658 | } |
1415 | 1659 | spin_lock_irq(&serial->serial_lock); | |
1416 | spin_lock_irqsave(&serial->serial_lock, flags); | 1660 | retval = ((serial->rts_state) ? TIOCM_RTS : 0) | |
1417 | value = ((serial->rts_state) ? TIOCM_RTS : 0) | | ||
1418 | ((serial->dtr_state) ? TIOCM_DTR : 0); | 1661 | ((serial->dtr_state) ? TIOCM_DTR : 0); |
1419 | spin_unlock_irqrestore(&serial->serial_lock, flags); | 1662 | tiocmget = serial->tiocmget; |
1663 | if (tiocmget) { | ||
1420 | 1664 | ||
1421 | return value; | 1665 | UART_state_bitmap = le16_to_cpu( |
1666 | tiocmget->prev_UART_state_bitmap); | ||
1667 | if (UART_state_bitmap & B_RING_SIGNAL) | ||
1668 | retval |= TIOCM_RNG; | ||
1669 | if (UART_state_bitmap & B_RX_CARRIER) | ||
1670 | retval |= TIOCM_CD; | ||
1671 | if (UART_state_bitmap & B_TX_CARRIER) | ||
1672 | retval |= TIOCM_DSR; | ||
1673 | } | ||
1674 | spin_unlock_irq(&serial->serial_lock); | ||
1675 | return retval; | ||
1422 | } | 1676 | } |
1423 | 1677 | ||
1424 | static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, | 1678 | static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, |
@@ -1460,6 +1714,32 @@ static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, | |||
1460 | USB_CTRL_SET_TIMEOUT); | 1714 | USB_CTRL_SET_TIMEOUT); |
1461 | } | 1715 | } |
1462 | 1716 | ||
1717 | static int hso_serial_ioctl(struct tty_struct *tty, struct file *file, | ||
1718 | unsigned int cmd, unsigned long arg) | ||
1719 | { | ||
1720 | struct hso_serial *serial = get_serial_by_tty(tty); | ||
1721 | void __user *uarg = (void __user *)arg; | ||
1722 | int ret = 0; | ||
1723 | D4("IOCTL cmd: %d, arg: %ld", cmd, arg); | ||
1724 | |||
1725 | if (!serial) | ||
1726 | return -ENODEV; | ||
1727 | switch (cmd) { | ||
1728 | case TIOCMIWAIT: | ||
1729 | ret = hso_wait_modem_status(serial, arg); | ||
1730 | break; | ||
1731 | |||
1732 | case TIOCGICOUNT: | ||
1733 | ret = hso_get_count(serial, uarg); | ||
1734 | break; | ||
1735 | default: | ||
1736 | ret = -ENOIOCTLCMD; | ||
1737 | break; | ||
1738 | } | ||
1739 | return ret; | ||
1740 | } | ||
1741 | |||
1742 | |||
1463 | /* starts a transmit */ | 1743 | /* starts a transmit */ |
1464 | static void hso_kick_transmit(struct hso_serial *serial) | 1744 | static void hso_kick_transmit(struct hso_serial *serial) |
1465 | { | 1745 | { |
@@ -1653,6 +1933,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) | |||
1653 | { | 1933 | { |
1654 | struct hso_serial *serial = urb->context; | 1934 | struct hso_serial *serial = urb->context; |
1655 | int status = urb->status; | 1935 | int status = urb->status; |
1936 | struct tty_struct *tty; | ||
1656 | 1937 | ||
1657 | /* sanity check */ | 1938 | /* sanity check */ |
1658 | if (!serial) { | 1939 | if (!serial) { |
@@ -1662,14 +1943,18 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb) | |||
1662 | 1943 | ||
1663 | spin_lock(&serial->serial_lock); | 1944 | spin_lock(&serial->serial_lock); |
1664 | serial->tx_urb_used = 0; | 1945 | serial->tx_urb_used = 0; |
1946 | tty = tty_kref_get(serial->tty); | ||
1665 | spin_unlock(&serial->serial_lock); | 1947 | spin_unlock(&serial->serial_lock); |
1666 | if (status) { | 1948 | if (status) { |
1667 | log_usb_status(status, __func__); | 1949 | log_usb_status(status, __func__); |
1950 | tty_kref_put(tty); | ||
1668 | return; | 1951 | return; |
1669 | } | 1952 | } |
1670 | hso_put_activity(serial->parent); | 1953 | hso_put_activity(serial->parent); |
1671 | if (serial->tty) | 1954 | if (tty) { |
1672 | tty_wakeup(serial->tty); | 1955 | tty_wakeup(tty); |
1956 | tty_kref_put(tty); | ||
1957 | } | ||
1673 | hso_kick_transmit(serial); | 1958 | hso_kick_transmit(serial); |
1674 | 1959 | ||
1675 | D1(" "); | 1960 | D1(" "); |
@@ -1706,6 +1991,7 @@ static void ctrl_callback(struct urb *urb) | |||
1706 | struct hso_serial *serial = urb->context; | 1991 | struct hso_serial *serial = urb->context; |
1707 | struct usb_ctrlrequest *req; | 1992 | struct usb_ctrlrequest *req; |
1708 | int status = urb->status; | 1993 | int status = urb->status; |
1994 | struct tty_struct *tty; | ||
1709 | 1995 | ||
1710 | /* sanity check */ | 1996 | /* sanity check */ |
1711 | if (!serial) | 1997 | if (!serial) |
@@ -1713,9 +1999,11 @@ static void ctrl_callback(struct urb *urb) | |||
1713 | 1999 | ||
1714 | spin_lock(&serial->serial_lock); | 2000 | spin_lock(&serial->serial_lock); |
1715 | serial->tx_urb_used = 0; | 2001 | serial->tx_urb_used = 0; |
2002 | tty = tty_kref_get(serial->tty); | ||
1716 | spin_unlock(&serial->serial_lock); | 2003 | spin_unlock(&serial->serial_lock); |
1717 | if (status) { | 2004 | if (status) { |
1718 | log_usb_status(status, __func__); | 2005 | log_usb_status(status, __func__); |
2006 | tty_kref_put(tty); | ||
1719 | return; | 2007 | return; |
1720 | } | 2008 | } |
1721 | 2009 | ||
@@ -1734,25 +2022,31 @@ static void ctrl_callback(struct urb *urb) | |||
1734 | spin_unlock(&serial->serial_lock); | 2022 | spin_unlock(&serial->serial_lock); |
1735 | } else { | 2023 | } else { |
1736 | hso_put_activity(serial->parent); | 2024 | hso_put_activity(serial->parent); |
1737 | if (serial->tty) | 2025 | if (tty) |
1738 | tty_wakeup(serial->tty); | 2026 | tty_wakeup(tty); |
1739 | /* response to a write command */ | 2027 | /* response to a write command */ |
1740 | hso_kick_transmit(serial); | 2028 | hso_kick_transmit(serial); |
1741 | } | 2029 | } |
2030 | tty_kref_put(tty); | ||
1742 | } | 2031 | } |
1743 | 2032 | ||
1744 | /* handle RX data for serial port */ | 2033 | /* handle RX data for serial port */ |
1745 | static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) | 2034 | static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) |
1746 | { | 2035 | { |
1747 | struct tty_struct *tty = serial->tty; | 2036 | struct tty_struct *tty; |
1748 | int write_length_remaining = 0; | 2037 | int write_length_remaining = 0; |
1749 | int curr_write_len; | 2038 | int curr_write_len; |
2039 | |||
1750 | /* Sanity check */ | 2040 | /* Sanity check */ |
1751 | if (urb == NULL || serial == NULL) { | 2041 | if (urb == NULL || serial == NULL) { |
1752 | D1("serial = NULL"); | 2042 | D1("serial = NULL"); |
1753 | return -2; | 2043 | return -2; |
1754 | } | 2044 | } |
1755 | 2045 | ||
2046 | spin_lock(&serial->serial_lock); | ||
2047 | tty = tty_kref_get(serial->tty); | ||
2048 | spin_unlock(&serial->serial_lock); | ||
2049 | |||
1756 | /* Push data to tty */ | 2050 | /* Push data to tty */ |
1757 | if (tty) { | 2051 | if (tty) { |
1758 | write_length_remaining = urb->actual_length - | 2052 | write_length_remaining = urb->actual_length - |
@@ -1774,6 +2068,7 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) | |||
1774 | serial->curr_rx_urb_offset = 0; | 2068 | serial->curr_rx_urb_offset = 0; |
1775 | serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; | 2069 | serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; |
1776 | } | 2070 | } |
2071 | tty_kref_put(tty); | ||
1777 | return write_length_remaining; | 2072 | return write_length_remaining; |
1778 | } | 2073 | } |
1779 | 2074 | ||
@@ -1922,7 +2217,10 @@ static int hso_start_serial_device(struct hso_device *hso_dev, gfp_t flags) | |||
1922 | serial->shared_int->use_count++; | 2217 | serial->shared_int->use_count++; |
1923 | mutex_unlock(&serial->shared_int->shared_int_lock); | 2218 | mutex_unlock(&serial->shared_int->shared_int_lock); |
1924 | } | 2219 | } |
1925 | 2220 | if (serial->tiocmget) | |
2221 | tiocmget_submit_urb(serial, | ||
2222 | serial->tiocmget, | ||
2223 | serial->parent->usb); | ||
1926 | return result; | 2224 | return result; |
1927 | } | 2225 | } |
1928 | 2226 | ||
@@ -1930,6 +2228,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) | |||
1930 | { | 2228 | { |
1931 | int i; | 2229 | int i; |
1932 | struct hso_serial *serial = dev2ser(hso_dev); | 2230 | struct hso_serial *serial = dev2ser(hso_dev); |
2231 | struct hso_tiocmget *tiocmget; | ||
1933 | 2232 | ||
1934 | if (!serial) | 2233 | if (!serial) |
1935 | return -ENODEV; | 2234 | return -ENODEV; |
@@ -1958,6 +2257,11 @@ static int hso_stop_serial_device(struct hso_device *hso_dev) | |||
1958 | } | 2257 | } |
1959 | mutex_unlock(&serial->shared_int->shared_int_lock); | 2258 | mutex_unlock(&serial->shared_int->shared_int_lock); |
1960 | } | 2259 | } |
2260 | tiocmget = serial->tiocmget; | ||
2261 | if (tiocmget) { | ||
2262 | wake_up_interruptible(&tiocmget->waitq); | ||
2263 | usb_kill_urb(tiocmget->urb); | ||
2264 | } | ||
1961 | 2265 | ||
1962 | return 0; | 2266 | return 0; |
1963 | } | 2267 | } |
@@ -2304,6 +2608,20 @@ exit: | |||
2304 | return NULL; | 2608 | return NULL; |
2305 | } | 2609 | } |
2306 | 2610 | ||
2611 | static void hso_free_tiomget(struct hso_serial *serial) | ||
2612 | { | ||
2613 | struct hso_tiocmget *tiocmget = serial->tiocmget; | ||
2614 | if (tiocmget) { | ||
2615 | kfree(tiocmget); | ||
2616 | if (tiocmget->urb) { | ||
2617 | usb_free_urb(tiocmget->urb); | ||
2618 | tiocmget->urb = NULL; | ||
2619 | } | ||
2620 | serial->tiocmget = NULL; | ||
2621 | |||
2622 | } | ||
2623 | } | ||
2624 | |||
2307 | /* Frees an AT channel ( goes for both mux and non-mux ) */ | 2625 | /* Frees an AT channel ( goes for both mux and non-mux ) */ |
2308 | static void hso_free_serial_device(struct hso_device *hso_dev) | 2626 | static void hso_free_serial_device(struct hso_device *hso_dev) |
2309 | { | 2627 | { |
@@ -2322,6 +2640,7 @@ static void hso_free_serial_device(struct hso_device *hso_dev) | |||
2322 | else | 2640 | else |
2323 | mutex_unlock(&serial->shared_int->shared_int_lock); | 2641 | mutex_unlock(&serial->shared_int->shared_int_lock); |
2324 | } | 2642 | } |
2643 | hso_free_tiomget(serial); | ||
2325 | kfree(serial); | 2644 | kfree(serial); |
2326 | hso_free_device(hso_dev); | 2645 | hso_free_device(hso_dev); |
2327 | } | 2646 | } |
@@ -2333,6 +2652,7 @@ static struct hso_device *hso_create_bulk_serial_device( | |||
2333 | struct hso_device *hso_dev; | 2652 | struct hso_device *hso_dev; |
2334 | struct hso_serial *serial; | 2653 | struct hso_serial *serial; |
2335 | int num_urbs; | 2654 | int num_urbs; |
2655 | struct hso_tiocmget *tiocmget; | ||
2336 | 2656 | ||
2337 | hso_dev = hso_create_device(interface, port); | 2657 | hso_dev = hso_create_device(interface, port); |
2338 | if (!hso_dev) | 2658 | if (!hso_dev) |
@@ -2345,8 +2665,27 @@ static struct hso_device *hso_create_bulk_serial_device( | |||
2345 | serial->parent = hso_dev; | 2665 | serial->parent = hso_dev; |
2346 | hso_dev->port_data.dev_serial = serial; | 2666 | hso_dev->port_data.dev_serial = serial; |
2347 | 2667 | ||
2348 | if (port & HSO_PORT_MODEM) | 2668 | if ((port & HSO_PORT_MASK) == HSO_PORT_MODEM) { |
2349 | num_urbs = 2; | 2669 | num_urbs = 2; |
2670 | serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget), | ||
2671 | GFP_KERNEL); | ||
2672 | /* it isn't going to break our heart if serial->tiocmget | ||
2673 | * allocation fails don't bother checking this. | ||
2674 | */ | ||
2675 | if (serial->tiocmget) { | ||
2676 | tiocmget = serial->tiocmget; | ||
2677 | tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL); | ||
2678 | if (tiocmget->urb) { | ||
2679 | mutex_init(&tiocmget->mutex); | ||
2680 | init_waitqueue_head(&tiocmget->waitq); | ||
2681 | tiocmget->endp = hso_get_ep( | ||
2682 | interface, | ||
2683 | USB_ENDPOINT_XFER_INT, | ||
2684 | USB_DIR_IN); | ||
2685 | } else | ||
2686 | hso_free_tiomget(serial); | ||
2687 | } | ||
2688 | } | ||
2350 | else | 2689 | else |
2351 | num_urbs = 1; | 2690 | num_urbs = 1; |
2352 | 2691 | ||
@@ -2382,6 +2721,7 @@ static struct hso_device *hso_create_bulk_serial_device( | |||
2382 | exit2: | 2721 | exit2: |
2383 | hso_serial_common_free(serial); | 2722 | hso_serial_common_free(serial); |
2384 | exit: | 2723 | exit: |
2724 | hso_free_tiomget(serial); | ||
2385 | kfree(serial); | 2725 | kfree(serial); |
2386 | hso_free_device(hso_dev); | 2726 | hso_free_device(hso_dev); |
2387 | return NULL; | 2727 | return NULL; |
@@ -2786,15 +3126,20 @@ static void hso_serial_ref_free(struct kref *ref) | |||
2786 | static void hso_free_interface(struct usb_interface *interface) | 3126 | static void hso_free_interface(struct usb_interface *interface) |
2787 | { | 3127 | { |
2788 | struct hso_serial *hso_dev; | 3128 | struct hso_serial *hso_dev; |
3129 | struct tty_struct *tty; | ||
2789 | int i; | 3130 | int i; |
2790 | 3131 | ||
2791 | for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { | 3132 | for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { |
2792 | if (serial_table[i] | 3133 | if (serial_table[i] |
2793 | && (serial_table[i]->interface == interface)) { | 3134 | && (serial_table[i]->interface == interface)) { |
2794 | hso_dev = dev2ser(serial_table[i]); | 3135 | hso_dev = dev2ser(serial_table[i]); |
2795 | if (hso_dev->tty) | 3136 | spin_lock_irq(&hso_dev->serial_lock); |
2796 | tty_hangup(hso_dev->tty); | 3137 | tty = tty_kref_get(hso_dev->tty); |
3138 | spin_unlock_irq(&hso_dev->serial_lock); | ||
3139 | if (tty) | ||
3140 | tty_hangup(tty); | ||
2797 | mutex_lock(&hso_dev->parent->mutex); | 3141 | mutex_lock(&hso_dev->parent->mutex); |
3142 | tty_kref_put(tty); | ||
2798 | hso_dev->parent->usb_gone = 1; | 3143 | hso_dev->parent->usb_gone = 1; |
2799 | mutex_unlock(&hso_dev->parent->mutex); | 3144 | mutex_unlock(&hso_dev->parent->mutex); |
2800 | kref_put(&serial_table[i]->ref, hso_serial_ref_free); | 3145 | kref_put(&serial_table[i]->ref, hso_serial_ref_free); |
@@ -2887,6 +3232,7 @@ static const struct tty_operations hso_serial_ops = { | |||
2887 | .close = hso_serial_close, | 3232 | .close = hso_serial_close, |
2888 | .write = hso_serial_write, | 3233 | .write = hso_serial_write, |
2889 | .write_room = hso_serial_write_room, | 3234 | .write_room = hso_serial_write_room, |
3235 | .ioctl = hso_serial_ioctl, | ||
2890 | .set_termios = hso_serial_set_termios, | 3236 | .set_termios = hso_serial_set_termios, |
2891 | .chars_in_buffer = hso_serial_chars_in_buffer, | 3237 | .chars_in_buffer = hso_serial_chars_in_buffer, |
2892 | .tiocmget = hso_serial_tiocmget, | 3238 | .tiocmget = hso_serial_tiocmget, |
@@ -2939,9 +3285,7 @@ static int __init hso_init(void) | |||
2939 | tty_drv->subtype = SERIAL_TYPE_NORMAL; | 3285 | tty_drv->subtype = SERIAL_TYPE_NORMAL; |
2940 | tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; | 3286 | tty_drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; |
2941 | tty_drv->init_termios = tty_std_termios; | 3287 | tty_drv->init_termios = tty_std_termios; |
2942 | tty_drv->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; | 3288 | hso_init_termios(&tty_drv->init_termios); |
2943 | tty_drv->termios = hso_serial_termios; | ||
2944 | tty_drv->termios_locked = hso_serial_termios_locked; | ||
2945 | tty_set_operations(tty_drv, &hso_serial_ops); | 3289 | tty_set_operations(tty_drv, &hso_serial_ops); |
2946 | 3290 | ||
2947 | /* register the tty driver */ | 3291 | /* register the tty driver */ |
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 7beffcab2745..9dedbbd218c3 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c | |||
@@ -704,16 +704,17 @@ static unsigned int iosapic_startup_irq(unsigned int irq) | |||
704 | } | 704 | } |
705 | 705 | ||
706 | #ifdef CONFIG_SMP | 706 | #ifdef CONFIG_SMP |
707 | static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest) | 707 | static void iosapic_set_affinity_irq(unsigned int irq, |
708 | const struct cpumask *dest) | ||
708 | { | 709 | { |
709 | struct vector_info *vi = iosapic_get_vector(irq); | 710 | struct vector_info *vi = iosapic_get_vector(irq); |
710 | u32 d0, d1, dummy_d0; | 711 | u32 d0, d1, dummy_d0; |
711 | unsigned long flags; | 712 | unsigned long flags; |
712 | 713 | ||
713 | if (cpu_check_affinity(irq, &dest)) | 714 | if (cpu_check_affinity(irq, dest)) |
714 | return; | 715 | return; |
715 | 716 | ||
716 | vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest)); | 717 | vi->txn_addr = txn_affinity_addr(irq, cpumask_first(dest)); |
717 | 718 | ||
718 | spin_lock_irqsave(&iosapic_lock, flags); | 719 | spin_lock_irqsave(&iosapic_lock, flags); |
719 | /* d1 contains the destination CPU, so only want to set that | 720 | /* d1 contains the destination CPU, so only want to set that |
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 8514c3a1746a..c2e1bcbb28a7 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | #include "cpqphp.h" | 46 | #include "cpqphp.h" |
47 | #include "cpqphp_nvram.h" | 47 | #include "cpqphp_nvram.h" |
48 | #include "../../../arch/x86/pci/pci.h" /* horrible hack showing how processor dependent we are... */ | 48 | #include <asm/pci_x86.h> |
49 | 49 | ||
50 | 50 | ||
51 | /* Global variables */ | 51 | /* Global variables */ |
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index 09021930589f..df146be9d2e9 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include "../pci.h" | 37 | #include "../pci.h" |
38 | #include "cpqphp.h" | 38 | #include "cpqphp.h" |
39 | #include "cpqphp_nvram.h" | 39 | #include "cpqphp_nvram.h" |
40 | #include "../../../arch/x86/pci/pci.h" /* horrible hack showing how processor dependent we are... */ | 40 | #include <asm/pci_x86.h> |
41 | 41 | ||
42 | 42 | ||
43 | u8 cpqhp_nic_irq; | 43 | u8 cpqhp_nic_irq; |
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 633e743442ac..dd18f857dfb0 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #include <linux/wait.h> | 36 | #include <linux/wait.h> |
37 | #include "../pci.h" | 37 | #include "../pci.h" |
38 | #include "../../../arch/x86/pci/pci.h" /* for struct irq_routing_table */ | 38 | #include <asm/pci_x86.h> /* for struct irq_routing_table */ |
39 | #include "ibmphp.h" | 39 | #include "ibmphp.h" |
40 | 40 | ||
41 | #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) | 41 | #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 5d72866897a8..c88485860a0a 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -74,7 +74,7 @@ static ssize_t local_cpus_show(struct device *dev, | |||
74 | int len; | 74 | int len; |
75 | 75 | ||
76 | mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); | 76 | mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); |
77 | len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); | 77 | len = cpumask_scnprintf(buf, PAGE_SIZE-2, &mask); |
78 | buf[len++] = '\n'; | 78 | buf[len++] = '\n'; |
79 | buf[len] = '\0'; | 79 | buf[len] = '\0'; |
80 | return len; | 80 | return len; |
@@ -88,7 +88,7 @@ static ssize_t local_cpulist_show(struct device *dev, | |||
88 | int len; | 88 | int len; |
89 | 89 | ||
90 | mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); | 90 | mask = pcibus_to_cpumask(to_pci_dev(dev)->bus); |
91 | len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); | 91 | len = cpulist_scnprintf(buf, PAGE_SIZE-2, &mask); |
92 | buf[len++] = '\n'; | 92 | buf[len++] = '\n'; |
93 | buf[len] = '\0'; | 93 | buf[len] = '\0'; |
94 | return len; | 94 | return len; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 003a9b3c293f..5b3f5937ecf5 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -55,8 +55,8 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev, | |||
55 | 55 | ||
56 | cpumask = pcibus_to_cpumask(to_pci_bus(dev)); | 56 | cpumask = pcibus_to_cpumask(to_pci_bus(dev)); |
57 | ret = type? | 57 | ret = type? |
58 | cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask): | 58 | cpulist_scnprintf(buf, PAGE_SIZE-2, &cpumask) : |
59 | cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask); | 59 | cpumask_scnprintf(buf, PAGE_SIZE-2, &cpumask); |
60 | buf[ret++] = '\n'; | 60 | buf[ret++] = '\n'; |
61 | buf[ret] = '\0'; | 61 | buf[ret] = '\0'; |
62 | return ret; | 62 | return ret; |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 152d4aa9354f..b7322976d2b7 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -21,7 +21,7 @@ config SCSI | |||
21 | You also need to say Y here if you have a device which speaks | 21 | You also need to say Y here if you have a device which speaks |
22 | the SCSI protocol. Examples of this include the parallel port | 22 | the SCSI protocol. Examples of this include the parallel port |
23 | version of the IOMEGA ZIP drive, USB storage devices, Fibre | 23 | version of the IOMEGA ZIP drive, USB storage devices, Fibre |
24 | Channel, FireWire storage and the IDE-SCSI emulation driver. | 24 | Channel, and FireWire storage. |
25 | 25 | ||
26 | To compile this driver as a module, choose M here and read | 26 | To compile this driver as a module, choose M here and read |
27 | <file:Documentation/scsi/scsi.txt>. | 27 | <file:Documentation/scsi/scsi.txt>. |
@@ -101,9 +101,9 @@ config CHR_DEV_OSST | |||
101 | ---help--- | 101 | ---help--- |
102 | The OnStream SC-x0 SCSI tape drives cannot be driven by the | 102 | The OnStream SC-x0 SCSI tape drives cannot be driven by the |
103 | standard st driver, but instead need this special osst driver and | 103 | standard st driver, but instead need this special osst driver and |
104 | use the /dev/osstX char device nodes (major 206). Via usb-storage | 104 | use the /dev/osstX char device nodes (major 206). Via usb-storage, |
105 | and ide-scsi, you may be able to drive the USB-x0 and DI-x0 drives | 105 | you may be able to drive the USB-x0 and DI-x0 drives as well. |
106 | as well. Note that there is also a second generation of OnStream | 106 | Note that there is also a second generation of OnStream |
107 | tape drives (ADR-x0) that supports the standard SCSI-2 commands for | 107 | tape drives (ADR-x0) that supports the standard SCSI-2 commands for |
108 | tapes (QIC-157) and can be driven by the standard driver st. | 108 | tapes (QIC-157) and can be driven by the standard driver st. |
109 | For more information, you may have a look at the SCSI-HOWTO | 109 | For more information, you may have a look at the SCSI-HOWTO |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 1410697257cb..7461eb09a031 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -105,7 +105,6 @@ obj-$(CONFIG_SCSI_GDTH) += gdth.o | |||
105 | obj-$(CONFIG_SCSI_INITIO) += initio.o | 105 | obj-$(CONFIG_SCSI_INITIO) += initio.o |
106 | obj-$(CONFIG_SCSI_INIA100) += a100u2w.o | 106 | obj-$(CONFIG_SCSI_INIA100) += a100u2w.o |
107 | obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o | 107 | obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o |
108 | obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o | ||
109 | obj-$(CONFIG_SCSI_MESH) += mesh.o | 108 | obj-$(CONFIG_SCSI_MESH) += mesh.o |
110 | obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o | 109 | obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o |
111 | obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o | 110 | obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o |
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c deleted file mode 100644 index c24140aff8e7..000000000000 --- a/drivers/scsi/ide-scsi.c +++ /dev/null | |||
@@ -1,840 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1996-1999 Gadi Oxman <gadio@netvision.net.il> | ||
3 | * Copyright (C) 2004-2005 Bartlomiej Zolnierkiewicz | ||
4 | */ | ||
5 | |||
6 | /* | ||
7 | * Emulation of a SCSI host adapter for IDE ATAPI devices. | ||
8 | * | ||
9 | * With this driver, one can use the Linux SCSI drivers instead of the | ||
10 | * native IDE ATAPI drivers. | ||
11 | * | ||
12 | * Ver 0.1 Dec 3 96 Initial version. | ||
13 | * Ver 0.2 Jan 26 97 Fixed bug in cleanup_module() and added emulation | ||
14 | * of MODE_SENSE_6/MODE_SELECT_6 for cdroms. Thanks | ||
15 | * to Janos Farkas for pointing this out. | ||
16 | * Avoid using bitfields in structures for m68k. | ||
17 | * Added Scatter/Gather and DMA support. | ||
18 | * Ver 0.4 Dec 7 97 Add support for ATAPI PD/CD drives. | ||
19 | * Use variable timeout for each command. | ||
20 | * Ver 0.5 Jan 2 98 Fix previous PD/CD support. | ||
21 | * Allow disabling of SCSI-6 to SCSI-10 transformation. | ||
22 | * Ver 0.6 Jan 27 98 Allow disabling of SCSI command translation layer | ||
23 | * for access through /dev/sg. | ||
24 | * Fix MODE_SENSE_6/MODE_SELECT_6/INQUIRY translation. | ||
25 | * Ver 0.7 Dec 04 98 Ignore commands where lun != 0 to avoid multiple | ||
26 | * detection of devices with CONFIG_SCSI_MULTI_LUN | ||
27 | * Ver 0.8 Feb 05 99 Optical media need translation too. Reverse 0.7. | ||
28 | * Ver 0.9 Jul 04 99 Fix a bug in SG_SET_TRANSFORM. | ||
29 | * Ver 0.91 Jun 10 02 Fix "off by one" error in transforms | ||
30 | * Ver 0.92 Dec 31 02 Implement new SCSI mid level API | ||
31 | */ | ||
32 | |||
33 | #define IDESCSI_VERSION "0.92" | ||
34 | |||
35 | #include <linux/module.h> | ||
36 | #include <linux/types.h> | ||
37 | #include <linux/string.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/mm.h> | ||
40 | #include <linux/ioport.h> | ||
41 | #include <linux/blkdev.h> | ||
42 | #include <linux/errno.h> | ||
43 | #include <linux/slab.h> | ||
44 | #include <linux/ide.h> | ||
45 | #include <linux/scatterlist.h> | ||
46 | #include <linux/delay.h> | ||
47 | #include <linux/mutex.h> | ||
48 | #include <linux/bitops.h> | ||
49 | |||
50 | #include <asm/io.h> | ||
51 | #include <asm/uaccess.h> | ||
52 | |||
53 | #include <scsi/scsi.h> | ||
54 | #include <scsi/scsi_cmnd.h> | ||
55 | #include <scsi/scsi_device.h> | ||
56 | #include <scsi/scsi_host.h> | ||
57 | #include <scsi/scsi_tcq.h> | ||
58 | #include <scsi/sg.h> | ||
59 | |||
60 | #define IDESCSI_DEBUG_LOG 0 | ||
61 | |||
62 | #if IDESCSI_DEBUG_LOG | ||
63 | #define debug_log(fmt, args...) \ | ||
64 | printk(KERN_INFO "ide-scsi: " fmt, ## args) | ||
65 | #else | ||
66 | #define debug_log(fmt, args...) do {} while (0) | ||
67 | #endif | ||
68 | |||
69 | /* | ||
70 | * SCSI command transformation layer | ||
71 | */ | ||
72 | #define IDESCSI_SG_TRANSFORM 1 /* /dev/sg transformation */ | ||
73 | |||
74 | /* | ||
75 | * Log flags | ||
76 | */ | ||
77 | #define IDESCSI_LOG_CMD 0 /* Log SCSI commands */ | ||
78 | |||
79 | typedef struct ide_scsi_obj { | ||
80 | ide_drive_t *drive; | ||
81 | ide_driver_t *driver; | ||
82 | struct gendisk *disk; | ||
83 | struct Scsi_Host *host; | ||
84 | |||
85 | unsigned long transform; /* SCSI cmd translation layer */ | ||
86 | unsigned long log; /* log flags */ | ||
87 | } idescsi_scsi_t; | ||
88 | |||
89 | static DEFINE_MUTEX(idescsi_ref_mutex); | ||
90 | /* Set by module param to skip cd */ | ||
91 | static int idescsi_nocd; | ||
92 | |||
93 | #define ide_scsi_g(disk) \ | ||
94 | container_of((disk)->private_data, struct ide_scsi_obj, driver) | ||
95 | |||
96 | static struct ide_scsi_obj *ide_scsi_get(struct gendisk *disk) | ||
97 | { | ||
98 | struct ide_scsi_obj *scsi = NULL; | ||
99 | |||
100 | mutex_lock(&idescsi_ref_mutex); | ||
101 | scsi = ide_scsi_g(disk); | ||
102 | if (scsi) { | ||
103 | if (ide_device_get(scsi->drive)) | ||
104 | scsi = NULL; | ||
105 | else | ||
106 | scsi_host_get(scsi->host); | ||
107 | } | ||
108 | mutex_unlock(&idescsi_ref_mutex); | ||
109 | return scsi; | ||
110 | } | ||
111 | |||
112 | static void ide_scsi_put(struct ide_scsi_obj *scsi) | ||
113 | { | ||
114 | ide_drive_t *drive = scsi->drive; | ||
115 | |||
116 | mutex_lock(&idescsi_ref_mutex); | ||
117 | scsi_host_put(scsi->host); | ||
118 | ide_device_put(drive); | ||
119 | mutex_unlock(&idescsi_ref_mutex); | ||
120 | } | ||
121 | |||
122 | static inline idescsi_scsi_t *scsihost_to_idescsi(struct Scsi_Host *host) | ||
123 | { | ||
124 | return (idescsi_scsi_t*) (&host[1]); | ||
125 | } | ||
126 | |||
127 | static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive) | ||
128 | { | ||
129 | return scsihost_to_idescsi(ide_drive->driver_data); | ||
130 | } | ||
131 | |||
132 | static void ide_scsi_hex_dump(u8 *data, int len) | ||
133 | { | ||
134 | print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, data, len, 0); | ||
135 | } | ||
136 | |||
137 | static int idescsi_end_request(ide_drive_t *, int, int); | ||
138 | |||
139 | static void ide_scsi_callback(ide_drive_t *drive, int dsc) | ||
140 | { | ||
141 | idescsi_scsi_t *scsi = drive_to_idescsi(drive); | ||
142 | struct ide_atapi_pc *pc = drive->pc; | ||
143 | |||
144 | if (pc->flags & PC_FLAG_TIMEDOUT) | ||
145 | debug_log("%s: got timed out packet %lu at %lu\n", __func__, | ||
146 | pc->scsi_cmd->serial_number, jiffies); | ||
147 | /* end this request now - scsi should retry it*/ | ||
148 | else if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) | ||
149 | printk(KERN_INFO "Packet command completed, %d bytes" | ||
150 | " transferred\n", pc->xferred); | ||
151 | |||
152 | idescsi_end_request(drive, 1, 0); | ||
153 | } | ||
154 | |||
155 | static int idescsi_check_condition(ide_drive_t *drive, | ||
156 | struct request *failed_cmd) | ||
157 | { | ||
158 | idescsi_scsi_t *scsi = drive_to_idescsi(drive); | ||
159 | struct ide_atapi_pc *pc; | ||
160 | struct request *rq; | ||
161 | u8 *buf; | ||
162 | |||
163 | /* stuff a sense request in front of our current request */ | ||
164 | pc = kzalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC); | ||
165 | rq = blk_get_request(drive->queue, READ, GFP_ATOMIC); | ||
166 | buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_ATOMIC); | ||
167 | if (!pc || !rq || !buf) { | ||
168 | kfree(buf); | ||
169 | if (rq) | ||
170 | blk_put_request(rq); | ||
171 | kfree(pc); | ||
172 | return -ENOMEM; | ||
173 | } | ||
174 | rq->special = (char *) pc; | ||
175 | pc->rq = rq; | ||
176 | pc->buf = buf; | ||
177 | pc->c[0] = REQUEST_SENSE; | ||
178 | pc->c[4] = pc->req_xfer = pc->buf_size = SCSI_SENSE_BUFFERSIZE; | ||
179 | rq->cmd_type = REQ_TYPE_SENSE; | ||
180 | rq->cmd_flags |= REQ_PREEMPT; | ||
181 | pc->timeout = jiffies + WAIT_READY; | ||
182 | /* NOTE! Save the failed packet command in "rq->buffer" */ | ||
183 | rq->buffer = (void *) failed_cmd->special; | ||
184 | pc->scsi_cmd = ((struct ide_atapi_pc *) failed_cmd->special)->scsi_cmd; | ||
185 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { | ||
186 | printk ("ide-scsi: %s: queue cmd = ", drive->name); | ||
187 | ide_scsi_hex_dump(pc->c, 6); | ||
188 | } | ||
189 | rq->rq_disk = scsi->disk; | ||
190 | rq->ref_count++; | ||
191 | memcpy(rq->cmd, pc->c, 12); | ||
192 | ide_do_drive_cmd(drive, rq); | ||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | static ide_startstop_t | ||
197 | idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) | ||
198 | { | ||
199 | ide_hwif_t *hwif = drive->hwif; | ||
200 | |||
201 | if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) | ||
202 | /* force an abort */ | ||
203 | hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE); | ||
204 | |||
205 | rq->errors++; | ||
206 | |||
207 | idescsi_end_request(drive, 0, 0); | ||
208 | |||
209 | return ide_stopped; | ||
210 | } | ||
211 | |||
212 | static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs) | ||
213 | { | ||
214 | idescsi_scsi_t *scsi = drive_to_idescsi(drive); | ||
215 | struct request *rq = HWGROUP(drive)->rq; | ||
216 | struct ide_atapi_pc *pc = (struct ide_atapi_pc *) rq->special; | ||
217 | int log = test_bit(IDESCSI_LOG_CMD, &scsi->log); | ||
218 | struct Scsi_Host *host; | ||
219 | int errors = rq->errors; | ||
220 | unsigned long flags; | ||
221 | |||
222 | if (!blk_special_request(rq) && !blk_sense_request(rq)) { | ||
223 | ide_end_request(drive, uptodate, nrsecs); | ||
224 | return 0; | ||
225 | } | ||
226 | ide_end_drive_cmd (drive, 0, 0); | ||
227 | if (blk_sense_request(rq)) { | ||
228 | struct ide_atapi_pc *opc = (struct ide_atapi_pc *) rq->buffer; | ||
229 | if (log) { | ||
230 | printk ("ide-scsi: %s: wrap up check %lu, rst = ", drive->name, opc->scsi_cmd->serial_number); | ||
231 | ide_scsi_hex_dump(pc->buf, 16); | ||
232 | } | ||
233 | memcpy((void *) opc->scsi_cmd->sense_buffer, pc->buf, | ||
234 | SCSI_SENSE_BUFFERSIZE); | ||
235 | kfree(pc->buf); | ||
236 | kfree(pc); | ||
237 | blk_put_request(rq); | ||
238 | pc = opc; | ||
239 | rq = pc->rq; | ||
240 | pc->scsi_cmd->result = (CHECK_CONDITION << 1) | | ||
241 | (((pc->flags & PC_FLAG_TIMEDOUT) ? | ||
242 | DID_TIME_OUT : | ||
243 | DID_OK) << 16); | ||
244 | } else if (pc->flags & PC_FLAG_TIMEDOUT) { | ||
245 | if (log) | ||
246 | printk (KERN_WARNING "ide-scsi: %s: timed out for %lu\n", | ||
247 | drive->name, pc->scsi_cmd->serial_number); | ||
248 | pc->scsi_cmd->result = DID_TIME_OUT << 16; | ||
249 | } else if (errors >= ERROR_MAX) { | ||
250 | pc->scsi_cmd->result = DID_ERROR << 16; | ||
251 | if (log) | ||
252 | printk ("ide-scsi: %s: I/O error for %lu\n", drive->name, pc->scsi_cmd->serial_number); | ||
253 | } else if (errors) { | ||
254 | if (log) | ||
255 | printk ("ide-scsi: %s: check condition for %lu\n", drive->name, pc->scsi_cmd->serial_number); | ||
256 | if (!idescsi_check_condition(drive, rq)) | ||
257 | /* we started a request sense, so we'll be back, exit for now */ | ||
258 | return 0; | ||
259 | pc->scsi_cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16); | ||
260 | } else { | ||
261 | pc->scsi_cmd->result = DID_OK << 16; | ||
262 | } | ||
263 | host = pc->scsi_cmd->device->host; | ||
264 | spin_lock_irqsave(host->host_lock, flags); | ||
265 | pc->done(pc->scsi_cmd); | ||
266 | spin_unlock_irqrestore(host->host_lock, flags); | ||
267 | kfree(pc); | ||
268 | blk_put_request(rq); | ||
269 | drive->pc = NULL; | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | static inline int idescsi_set_direction(struct ide_atapi_pc *pc) | ||
274 | { | ||
275 | switch (pc->c[0]) { | ||
276 | case READ_6: case READ_10: case READ_12: | ||
277 | pc->flags &= ~PC_FLAG_WRITING; | ||
278 | return 0; | ||
279 | case WRITE_6: case WRITE_10: case WRITE_12: | ||
280 | pc->flags |= PC_FLAG_WRITING; | ||
281 | return 0; | ||
282 | default: | ||
283 | return 1; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static int idescsi_map_sg(ide_drive_t *drive, struct ide_atapi_pc *pc) | ||
288 | { | ||
289 | ide_hwif_t *hwif = drive->hwif; | ||
290 | struct scatterlist *sg, *scsi_sg; | ||
291 | int segments; | ||
292 | |||
293 | if (!pc->req_xfer || pc->req_xfer % 1024) | ||
294 | return 1; | ||
295 | |||
296 | if (idescsi_set_direction(pc)) | ||
297 | return 1; | ||
298 | |||
299 | sg = hwif->sg_table; | ||
300 | scsi_sg = scsi_sglist(pc->scsi_cmd); | ||
301 | segments = scsi_sg_count(pc->scsi_cmd); | ||
302 | |||
303 | if (segments > hwif->sg_max_nents) | ||
304 | return 1; | ||
305 | |||
306 | hwif->sg_nents = segments; | ||
307 | memcpy(sg, scsi_sg, sizeof(*sg) * segments); | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive, | ||
313 | struct ide_atapi_pc *pc) | ||
314 | { | ||
315 | /* Set the current packet command */ | ||
316 | drive->pc = pc; | ||
317 | |||
318 | return ide_issue_pc(drive, ide_scsi_get_timeout(pc), ide_scsi_expiry); | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * idescsi_do_request is our request handling function. | ||
323 | */ | ||
324 | static ide_startstop_t idescsi_do_request (ide_drive_t *drive, struct request *rq, sector_t block) | ||
325 | { | ||
326 | debug_log("dev: %s, cmd: %x, errors: %d\n", rq->rq_disk->disk_name, | ||
327 | rq->cmd[0], rq->errors); | ||
328 | debug_log("sector: %ld, nr_sectors: %ld, current_nr_sectors: %d\n", | ||
329 | rq->sector, rq->nr_sectors, rq->current_nr_sectors); | ||
330 | |||
331 | if (blk_sense_request(rq) || blk_special_request(rq)) { | ||
332 | struct ide_atapi_pc *pc = (struct ide_atapi_pc *)rq->special; | ||
333 | |||
334 | if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && | ||
335 | idescsi_map_sg(drive, pc) == 0) | ||
336 | pc->flags |= PC_FLAG_DMA_OK; | ||
337 | |||
338 | return idescsi_issue_pc(drive, pc); | ||
339 | } | ||
340 | blk_dump_rq_flags(rq, "ide-scsi: unsup command"); | ||
341 | idescsi_end_request (drive, 0, 0); | ||
342 | return ide_stopped; | ||
343 | } | ||
344 | |||
345 | #ifdef CONFIG_IDE_PROC_FS | ||
346 | static ide_proc_entry_t idescsi_proc[] = { | ||
347 | { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL }, | ||
348 | { NULL, 0, NULL, NULL } | ||
349 | }; | ||
350 | |||
351 | #define ide_scsi_devset_get(name, field) \ | ||
352 | static int get_##name(ide_drive_t *drive) \ | ||
353 | { \ | ||
354 | idescsi_scsi_t *scsi = drive_to_idescsi(drive); \ | ||
355 | return scsi->field; \ | ||
356 | } | ||
357 | |||
358 | #define ide_scsi_devset_set(name, field) \ | ||
359 | static int set_##name(ide_drive_t *drive, int arg) \ | ||
360 | { \ | ||
361 | idescsi_scsi_t *scsi = drive_to_idescsi(drive); \ | ||
362 | scsi->field = arg; \ | ||
363 | return 0; \ | ||
364 | } | ||
365 | |||
366 | #define ide_scsi_devset_rw_field(_name, _field) \ | ||
367 | ide_scsi_devset_get(_name, _field); \ | ||
368 | ide_scsi_devset_set(_name, _field); \ | ||
369 | IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name); | ||
370 | |||
371 | ide_devset_rw_field(bios_cyl, bios_cyl); | ||
372 | ide_devset_rw_field(bios_head, bios_head); | ||
373 | ide_devset_rw_field(bios_sect, bios_sect); | ||
374 | |||
375 | ide_scsi_devset_rw_field(transform, transform); | ||
376 | ide_scsi_devset_rw_field(log, log); | ||
377 | |||
378 | static const struct ide_proc_devset idescsi_settings[] = { | ||
379 | IDE_PROC_DEVSET(bios_cyl, 0, 1023), | ||
380 | IDE_PROC_DEVSET(bios_head, 0, 255), | ||
381 | IDE_PROC_DEVSET(bios_sect, 0, 63), | ||
382 | IDE_PROC_DEVSET(log, 0, 1), | ||
383 | IDE_PROC_DEVSET(transform, 0, 3), | ||
384 | { 0 }, | ||
385 | }; | ||
386 | |||
387 | static ide_proc_entry_t *ide_scsi_proc_entries(ide_drive_t *drive) | ||
388 | { | ||
389 | return idescsi_proc; | ||
390 | } | ||
391 | |||
392 | static const struct ide_proc_devset *ide_scsi_proc_devsets(ide_drive_t *drive) | ||
393 | { | ||
394 | return idescsi_settings; | ||
395 | } | ||
396 | #endif | ||
397 | |||
398 | /* | ||
399 | * Driver initialization. | ||
400 | */ | ||
401 | static void idescsi_setup (ide_drive_t *drive, idescsi_scsi_t *scsi) | ||
402 | { | ||
403 | clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform); | ||
404 | #if IDESCSI_DEBUG_LOG | ||
405 | set_bit(IDESCSI_LOG_CMD, &scsi->log); | ||
406 | #endif /* IDESCSI_DEBUG_LOG */ | ||
407 | |||
408 | drive->pc_callback = ide_scsi_callback; | ||
409 | drive->pc_update_buffers = NULL; | ||
410 | drive->pc_io_buffers = ide_io_buffers; | ||
411 | |||
412 | ide_proc_register_driver(drive, scsi->driver); | ||
413 | } | ||
414 | |||
415 | static void ide_scsi_remove(ide_drive_t *drive) | ||
416 | { | ||
417 | struct Scsi_Host *scsihost = drive->driver_data; | ||
418 | struct ide_scsi_obj *scsi = scsihost_to_idescsi(scsihost); | ||
419 | struct gendisk *g = scsi->disk; | ||
420 | |||
421 | scsi_remove_host(scsihost); | ||
422 | ide_proc_unregister_driver(drive, scsi->driver); | ||
423 | |||
424 | ide_unregister_region(g); | ||
425 | |||
426 | drive->driver_data = NULL; | ||
427 | g->private_data = NULL; | ||
428 | put_disk(g); | ||
429 | |||
430 | ide_scsi_put(scsi); | ||
431 | |||
432 | drive->dev_flags &= ~IDE_DFLAG_SCSI; | ||
433 | } | ||
434 | |||
435 | static int ide_scsi_probe(ide_drive_t *); | ||
436 | |||
437 | static ide_driver_t idescsi_driver = { | ||
438 | .gen_driver = { | ||
439 | .owner = THIS_MODULE, | ||
440 | .name = "ide-scsi", | ||
441 | .bus = &ide_bus_type, | ||
442 | }, | ||
443 | .probe = ide_scsi_probe, | ||
444 | .remove = ide_scsi_remove, | ||
445 | .version = IDESCSI_VERSION, | ||
446 | .do_request = idescsi_do_request, | ||
447 | .end_request = idescsi_end_request, | ||
448 | .error = idescsi_atapi_error, | ||
449 | #ifdef CONFIG_IDE_PROC_FS | ||
450 | .proc_entries = ide_scsi_proc_entries, | ||
451 | .proc_devsets = ide_scsi_proc_devsets, | ||
452 | #endif | ||
453 | }; | ||
454 | |||
455 | static int idescsi_ide_open(struct block_device *bdev, fmode_t mode) | ||
456 | { | ||
457 | struct ide_scsi_obj *scsi = ide_scsi_get(bdev->bd_disk); | ||
458 | |||
459 | if (!scsi) | ||
460 | return -ENXIO; | ||
461 | |||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | static int idescsi_ide_release(struct gendisk *disk, fmode_t mode) | ||
466 | { | ||
467 | ide_scsi_put(ide_scsi_g(disk)); | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static int idescsi_ide_ioctl(struct block_device *bdev, fmode_t mode, | ||
472 | unsigned int cmd, unsigned long arg) | ||
473 | { | ||
474 | struct ide_scsi_obj *scsi = ide_scsi_g(bdev->bd_disk); | ||
475 | return generic_ide_ioctl(scsi->drive, bdev, cmd, arg); | ||
476 | } | ||
477 | |||
478 | static struct block_device_operations idescsi_ops = { | ||
479 | .owner = THIS_MODULE, | ||
480 | .open = idescsi_ide_open, | ||
481 | .release = idescsi_ide_release, | ||
482 | .locked_ioctl = idescsi_ide_ioctl, | ||
483 | }; | ||
484 | |||
485 | static int idescsi_slave_configure(struct scsi_device * sdp) | ||
486 | { | ||
487 | /* Configure detected device */ | ||
488 | sdp->use_10_for_rw = 1; | ||
489 | sdp->use_10_for_ms = 1; | ||
490 | scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, sdp->host->cmd_per_lun); | ||
491 | return 0; | ||
492 | } | ||
493 | |||
494 | static const char *idescsi_info (struct Scsi_Host *host) | ||
495 | { | ||
496 | return "SCSI host adapter emulation for IDE ATAPI devices"; | ||
497 | } | ||
498 | |||
499 | static int idescsi_ioctl (struct scsi_device *dev, int cmd, void __user *arg) | ||
500 | { | ||
501 | idescsi_scsi_t *scsi = scsihost_to_idescsi(dev->host); | ||
502 | |||
503 | if (cmd == SG_SET_TRANSFORM) { | ||
504 | if (arg) | ||
505 | set_bit(IDESCSI_SG_TRANSFORM, &scsi->transform); | ||
506 | else | ||
507 | clear_bit(IDESCSI_SG_TRANSFORM, &scsi->transform); | ||
508 | return 0; | ||
509 | } else if (cmd == SG_GET_TRANSFORM) | ||
510 | return put_user(test_bit(IDESCSI_SG_TRANSFORM, &scsi->transform), (int __user *) arg); | ||
511 | return -EINVAL; | ||
512 | } | ||
513 | |||
514 | static int idescsi_queue (struct scsi_cmnd *cmd, | ||
515 | void (*done)(struct scsi_cmnd *)) | ||
516 | { | ||
517 | struct Scsi_Host *host = cmd->device->host; | ||
518 | idescsi_scsi_t *scsi = scsihost_to_idescsi(host); | ||
519 | ide_drive_t *drive = scsi->drive; | ||
520 | struct request *rq = NULL; | ||
521 | struct ide_atapi_pc *pc = NULL; | ||
522 | int write = cmd->sc_data_direction == DMA_TO_DEVICE; | ||
523 | |||
524 | if (!drive) { | ||
525 | scmd_printk (KERN_ERR, cmd, "drive not present\n"); | ||
526 | goto abort; | ||
527 | } | ||
528 | scsi = drive_to_idescsi(drive); | ||
529 | pc = kmalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC); | ||
530 | rq = blk_get_request(drive->queue, write, GFP_ATOMIC); | ||
531 | if (rq == NULL || pc == NULL) { | ||
532 | printk (KERN_ERR "ide-scsi: %s: out of memory\n", drive->name); | ||
533 | goto abort; | ||
534 | } | ||
535 | |||
536 | memset (pc->c, 0, 12); | ||
537 | pc->flags = 0; | ||
538 | if (cmd->sc_data_direction == DMA_TO_DEVICE) | ||
539 | pc->flags |= PC_FLAG_WRITING; | ||
540 | pc->rq = rq; | ||
541 | memcpy (pc->c, cmd->cmnd, cmd->cmd_len); | ||
542 | pc->buf = NULL; | ||
543 | pc->sg = scsi_sglist(cmd); | ||
544 | pc->sg_cnt = scsi_sg_count(cmd); | ||
545 | pc->b_count = 0; | ||
546 | pc->req_xfer = pc->buf_size = scsi_bufflen(cmd); | ||
547 | pc->scsi_cmd = cmd; | ||
548 | pc->done = done; | ||
549 | pc->timeout = jiffies + cmd->request->timeout; | ||
550 | |||
551 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) { | ||
552 | printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number); | ||
553 | ide_scsi_hex_dump(cmd->cmnd, cmd->cmd_len); | ||
554 | if (memcmp(pc->c, cmd->cmnd, cmd->cmd_len)) { | ||
555 | printk ("ide-scsi: %s: que %lu, tsl = ", drive->name, cmd->serial_number); | ||
556 | ide_scsi_hex_dump(pc->c, 12); | ||
557 | } | ||
558 | } | ||
559 | |||
560 | rq->special = (char *) pc; | ||
561 | rq->cmd_type = REQ_TYPE_SPECIAL; | ||
562 | spin_unlock_irq(host->host_lock); | ||
563 | rq->ref_count++; | ||
564 | memcpy(rq->cmd, pc->c, 12); | ||
565 | blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL); | ||
566 | spin_lock_irq(host->host_lock); | ||
567 | return 0; | ||
568 | abort: | ||
569 | kfree (pc); | ||
570 | if (rq) | ||
571 | blk_put_request(rq); | ||
572 | cmd->result = DID_ERROR << 16; | ||
573 | done(cmd); | ||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | static int idescsi_eh_abort (struct scsi_cmnd *cmd) | ||
578 | { | ||
579 | idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host); | ||
580 | ide_drive_t *drive = scsi->drive; | ||
581 | ide_hwif_t *hwif; | ||
582 | ide_hwgroup_t *hwgroup; | ||
583 | int busy; | ||
584 | int ret = FAILED; | ||
585 | |||
586 | struct ide_atapi_pc *pc; | ||
587 | |||
588 | /* In idescsi_eh_abort we try to gently pry our command from the ide subsystem */ | ||
589 | |||
590 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) | ||
591 | printk (KERN_WARNING "ide-scsi: abort called for %lu\n", cmd->serial_number); | ||
592 | |||
593 | if (!drive) { | ||
594 | printk (KERN_WARNING "ide-scsi: Drive not set in idescsi_eh_abort\n"); | ||
595 | WARN_ON(1); | ||
596 | goto no_drive; | ||
597 | } | ||
598 | |||
599 | hwif = drive->hwif; | ||
600 | hwgroup = hwif->hwgroup; | ||
601 | |||
602 | /* First give it some more time, how much is "right" is hard to say :-( | ||
603 | FIXME - uses mdelay which causes latency? */ | ||
604 | busy = ide_wait_not_busy(hwif, 100); | ||
605 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) | ||
606 | printk (KERN_WARNING "ide-scsi: drive did%s become ready\n", busy?" not":""); | ||
607 | |||
608 | spin_lock_irq(&hwgroup->lock); | ||
609 | |||
610 | /* If there is no pc running we're done (our interrupt took care of it) */ | ||
611 | pc = drive->pc; | ||
612 | if (pc == NULL) { | ||
613 | ret = SUCCESS; | ||
614 | goto ide_unlock; | ||
615 | } | ||
616 | |||
617 | /* It's somewhere in flight. Does ide subsystem agree? */ | ||
618 | if (pc->scsi_cmd->serial_number == cmd->serial_number && !busy && | ||
619 | elv_queue_empty(drive->queue) && HWGROUP(drive)->rq != pc->rq) { | ||
620 | /* | ||
621 | * FIXME - not sure this condition can ever occur | ||
622 | */ | ||
623 | printk (KERN_ERR "ide-scsi: cmd aborted!\n"); | ||
624 | |||
625 | if (blk_sense_request(pc->rq)) | ||
626 | kfree(pc->buf); | ||
627 | /* we need to call blk_put_request twice. */ | ||
628 | blk_put_request(pc->rq); | ||
629 | blk_put_request(pc->rq); | ||
630 | kfree(pc); | ||
631 | drive->pc = NULL; | ||
632 | |||
633 | ret = SUCCESS; | ||
634 | } | ||
635 | |||
636 | ide_unlock: | ||
637 | spin_unlock_irq(&hwgroup->lock); | ||
638 | no_drive: | ||
639 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) | ||
640 | printk (KERN_WARNING "ide-scsi: abort returns %s\n", ret == SUCCESS?"success":"failed"); | ||
641 | |||
642 | return ret; | ||
643 | } | ||
644 | |||
645 | static int idescsi_eh_reset (struct scsi_cmnd *cmd) | ||
646 | { | ||
647 | struct request *req; | ||
648 | idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host); | ||
649 | ide_drive_t *drive = scsi->drive; | ||
650 | ide_hwgroup_t *hwgroup; | ||
651 | int ready = 0; | ||
652 | int ret = SUCCESS; | ||
653 | |||
654 | struct ide_atapi_pc *pc; | ||
655 | |||
656 | /* In idescsi_eh_reset we forcefully remove the command from the ide subsystem and reset the device. */ | ||
657 | |||
658 | if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) | ||
659 | printk (KERN_WARNING "ide-scsi: reset called for %lu\n", cmd->serial_number); | ||
660 | |||
661 | if (!drive) { | ||
662 | printk (KERN_WARNING "ide-scsi: Drive not set in idescsi_eh_reset\n"); | ||
663 | WARN_ON(1); | ||
664 | return FAILED; | ||
665 | } | ||
666 | |||
667 | hwgroup = drive->hwif->hwgroup; | ||
668 | |||
669 | spin_lock_irq(cmd->device->host->host_lock); | ||
670 | spin_lock(&hwgroup->lock); | ||
671 | |||
672 | pc = drive->pc; | ||
673 | if (pc) | ||
674 | req = pc->rq; | ||
675 | |||
676 | if (pc == NULL || req != hwgroup->rq || hwgroup->handler == NULL) { | ||
677 | printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n"); | ||
678 | spin_unlock(&hwgroup->lock); | ||
679 | spin_unlock_irq(cmd->device->host->host_lock); | ||
680 | return FAILED; | ||
681 | } | ||
682 | |||
683 | /* kill current request */ | ||
684 | if (__blk_end_request(req, -EIO, 0)) | ||
685 | BUG(); | ||
686 | if (blk_sense_request(req)) | ||
687 | kfree(pc->buf); | ||
688 | kfree(pc); | ||
689 | drive->pc = NULL; | ||
690 | blk_put_request(req); | ||
691 | |||
692 | /* now nuke the drive queue */ | ||
693 | while ((req = elv_next_request(drive->queue))) { | ||
694 | if (__blk_end_request(req, -EIO, 0)) | ||
695 | BUG(); | ||
696 | } | ||
697 | |||
698 | hwgroup->rq = NULL; | ||
699 | hwgroup->handler = NULL; | ||
700 | hwgroup->busy = 1; /* will set this to zero when ide reset finished */ | ||
701 | spin_unlock(&hwgroup->lock); | ||
702 | |||
703 | ide_do_reset(drive); | ||
704 | |||
705 | /* ide_do_reset starts a polling handler which restarts itself every 50ms until the reset finishes */ | ||
706 | |||
707 | do { | ||
708 | spin_unlock_irq(cmd->device->host->host_lock); | ||
709 | msleep(50); | ||
710 | spin_lock_irq(cmd->device->host->host_lock); | ||
711 | } while ( HWGROUP(drive)->handler ); | ||
712 | |||
713 | ready = drive_is_ready(drive); | ||
714 | HWGROUP(drive)->busy--; | ||
715 | if (!ready) { | ||
716 | printk (KERN_ERR "ide-scsi: reset failed!\n"); | ||
717 | ret = FAILED; | ||
718 | } | ||
719 | |||
720 | spin_unlock_irq(cmd->device->host->host_lock); | ||
721 | return ret; | ||
722 | } | ||
723 | |||
724 | static int idescsi_bios(struct scsi_device *sdev, struct block_device *bdev, | ||
725 | sector_t capacity, int *parm) | ||
726 | { | ||
727 | idescsi_scsi_t *idescsi = scsihost_to_idescsi(sdev->host); | ||
728 | ide_drive_t *drive = idescsi->drive; | ||
729 | |||
730 | if (drive->bios_cyl && drive->bios_head && drive->bios_sect) { | ||
731 | parm[0] = drive->bios_head; | ||
732 | parm[1] = drive->bios_sect; | ||
733 | parm[2] = drive->bios_cyl; | ||
734 | } | ||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | static struct scsi_host_template idescsi_template = { | ||
739 | .module = THIS_MODULE, | ||
740 | .name = "idescsi", | ||
741 | .info = idescsi_info, | ||
742 | .slave_configure = idescsi_slave_configure, | ||
743 | .ioctl = idescsi_ioctl, | ||
744 | .queuecommand = idescsi_queue, | ||
745 | .eh_abort_handler = idescsi_eh_abort, | ||
746 | .eh_host_reset_handler = idescsi_eh_reset, | ||
747 | .bios_param = idescsi_bios, | ||
748 | .can_queue = 40, | ||
749 | .this_id = -1, | ||
750 | .sg_tablesize = 256, | ||
751 | .cmd_per_lun = 5, | ||
752 | .max_sectors = 128, | ||
753 | .use_clustering = DISABLE_CLUSTERING, | ||
754 | .emulated = 1, | ||
755 | .proc_name = "ide-scsi", | ||
756 | }; | ||
757 | |||
758 | static int ide_scsi_probe(ide_drive_t *drive) | ||
759 | { | ||
760 | idescsi_scsi_t *idescsi; | ||
761 | struct Scsi_Host *host; | ||
762 | struct gendisk *g; | ||
763 | static int warned; | ||
764 | int err = -ENOMEM; | ||
765 | u16 last_lun; | ||
766 | |||
767 | if (!warned && drive->media == ide_cdrom) { | ||
768 | printk(KERN_WARNING "ide-scsi is deprecated for cd burning! Use ide-cd and give dev=/dev/hdX as device\n"); | ||
769 | warned = 1; | ||
770 | } | ||
771 | |||
772 | if (idescsi_nocd && drive->media == ide_cdrom) | ||
773 | return -ENODEV; | ||
774 | |||
775 | if (!strstr("ide-scsi", drive->driver_req) || | ||
776 | drive->media == ide_disk || | ||
777 | !(host = scsi_host_alloc(&idescsi_template,sizeof(idescsi_scsi_t)))) | ||
778 | return -ENODEV; | ||
779 | |||
780 | drive->dev_flags |= IDE_DFLAG_SCSI; | ||
781 | |||
782 | g = alloc_disk(1 << PARTN_BITS); | ||
783 | if (!g) | ||
784 | goto out_host_put; | ||
785 | |||
786 | ide_init_disk(g, drive); | ||
787 | |||
788 | host->max_id = 1; | ||
789 | |||
790 | last_lun = drive->id[ATA_ID_LAST_LUN]; | ||
791 | if (last_lun) | ||
792 | debug_log("%s: last_lun=%u\n", drive->name, last_lun); | ||
793 | |||
794 | if ((last_lun & 7) != 7) | ||
795 | host->max_lun = (last_lun & 7) + 1; | ||
796 | else | ||
797 | host->max_lun = 1; | ||
798 | |||
799 | drive->driver_data = host; | ||
800 | idescsi = scsihost_to_idescsi(host); | ||
801 | idescsi->drive = drive; | ||
802 | idescsi->driver = &idescsi_driver; | ||
803 | idescsi->host = host; | ||
804 | idescsi->disk = g; | ||
805 | g->private_data = &idescsi->driver; | ||
806 | err = 0; | ||
807 | idescsi_setup(drive, idescsi); | ||
808 | g->fops = &idescsi_ops; | ||
809 | ide_register_region(g); | ||
810 | err = scsi_add_host(host, &drive->gendev); | ||
811 | if (!err) { | ||
812 | scsi_scan_host(host); | ||
813 | return 0; | ||
814 | } | ||
815 | /* fall through on error */ | ||
816 | ide_unregister_region(g); | ||
817 | ide_proc_unregister_driver(drive, &idescsi_driver); | ||
818 | |||
819 | put_disk(g); | ||
820 | out_host_put: | ||
821 | drive->dev_flags &= ~IDE_DFLAG_SCSI; | ||
822 | scsi_host_put(host); | ||
823 | return err; | ||
824 | } | ||
825 | |||
826 | static int __init init_idescsi_module(void) | ||
827 | { | ||
828 | return driver_register(&idescsi_driver.gen_driver); | ||
829 | } | ||
830 | |||
831 | static void __exit exit_idescsi_module(void) | ||
832 | { | ||
833 | driver_unregister(&idescsi_driver.gen_driver); | ||
834 | } | ||
835 | |||
836 | module_param(idescsi_nocd, int, 0600); | ||
837 | MODULE_PARM_DESC(idescsi_nocd, "Disable handling of CD-ROMs so they may be driven by ide-cd"); | ||
838 | module_init(init_idescsi_module); | ||
839 | module_exit(exit_idescsi_module); | ||
840 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 303272af386e..daa00567bc44 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
@@ -279,6 +279,13 @@ static const struct serial8250_config uart_config[] = { | |||
279 | .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, | 279 | .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, |
280 | .flags = UART_CAP_FIFO, | 280 | .flags = UART_CAP_FIFO, |
281 | }, | 281 | }, |
282 | [PORT_OCTEON] = { | ||
283 | .name = "OCTEON", | ||
284 | .fifo_size = 64, | ||
285 | .tx_loadsz = 64, | ||
286 | .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, | ||
287 | .flags = UART_CAP_FIFO, | ||
288 | }, | ||
282 | }; | 289 | }; |
283 | 290 | ||
284 | #if defined (CONFIG_SERIAL_8250_AU1X00) | 291 | #if defined (CONFIG_SERIAL_8250_AU1X00) |
@@ -303,16 +310,16 @@ static const u8 au_io_out_map[] = { | |||
303 | }; | 310 | }; |
304 | 311 | ||
305 | /* sane hardware needs no mapping */ | 312 | /* sane hardware needs no mapping */ |
306 | static inline int map_8250_in_reg(struct uart_8250_port *up, int offset) | 313 | static inline int map_8250_in_reg(struct uart_port *p, int offset) |
307 | { | 314 | { |
308 | if (up->port.iotype != UPIO_AU) | 315 | if (p->iotype != UPIO_AU) |
309 | return offset; | 316 | return offset; |
310 | return au_io_in_map[offset]; | 317 | return au_io_in_map[offset]; |
311 | } | 318 | } |
312 | 319 | ||
313 | static inline int map_8250_out_reg(struct uart_8250_port *up, int offset) | 320 | static inline int map_8250_out_reg(struct uart_port *p, int offset) |
314 | { | 321 | { |
315 | if (up->port.iotype != UPIO_AU) | 322 | if (p->iotype != UPIO_AU) |
316 | return offset; | 323 | return offset; |
317 | return au_io_out_map[offset]; | 324 | return au_io_out_map[offset]; |
318 | } | 325 | } |
@@ -341,16 +348,16 @@ static const u8 | |||
341 | [UART_SCR] = 0x2c | 348 | [UART_SCR] = 0x2c |
342 | }; | 349 | }; |
343 | 350 | ||
344 | static inline int map_8250_in_reg(struct uart_8250_port *up, int offset) | 351 | static inline int map_8250_in_reg(struct uart_port *p, int offset) |
345 | { | 352 | { |
346 | if (up->port.iotype != UPIO_RM9000) | 353 | if (p->iotype != UPIO_RM9000) |
347 | return offset; | 354 | return offset; |
348 | return regmap_in[offset]; | 355 | return regmap_in[offset]; |
349 | } | 356 | } |
350 | 357 | ||
351 | static inline int map_8250_out_reg(struct uart_8250_port *up, int offset) | 358 | static inline int map_8250_out_reg(struct uart_port *p, int offset) |
352 | { | 359 | { |
353 | if (up->port.iotype != UPIO_RM9000) | 360 | if (p->iotype != UPIO_RM9000) |
354 | return offset; | 361 | return offset; |
355 | return regmap_out[offset]; | 362 | return regmap_out[offset]; |
356 | } | 363 | } |
@@ -363,108 +370,170 @@ static inline int map_8250_out_reg(struct uart_8250_port *up, int offset) | |||
363 | 370 | ||
364 | #endif | 371 | #endif |
365 | 372 | ||
366 | static unsigned int serial_in(struct uart_8250_port *up, int offset) | 373 | static unsigned int hub6_serial_in(struct uart_port *p, int offset) |
367 | { | 374 | { |
368 | unsigned int tmp; | 375 | offset = map_8250_in_reg(p, offset) << p->regshift; |
369 | offset = map_8250_in_reg(up, offset) << up->port.regshift; | 376 | outb(p->hub6 - 1 + offset, p->iobase); |
377 | return inb(p->iobase + 1); | ||
378 | } | ||
370 | 379 | ||
371 | switch (up->port.iotype) { | 380 | static void hub6_serial_out(struct uart_port *p, int offset, int value) |
372 | case UPIO_HUB6: | 381 | { |
373 | outb(up->port.hub6 - 1 + offset, up->port.iobase); | 382 | offset = map_8250_out_reg(p, offset) << p->regshift; |
374 | return inb(up->port.iobase + 1); | 383 | outb(p->hub6 - 1 + offset, p->iobase); |
384 | outb(value, p->iobase + 1); | ||
385 | } | ||
375 | 386 | ||
376 | case UPIO_MEM: | 387 | static unsigned int mem_serial_in(struct uart_port *p, int offset) |
377 | case UPIO_DWAPB: | 388 | { |
378 | return readb(up->port.membase + offset); | 389 | offset = map_8250_in_reg(p, offset) << p->regshift; |
390 | return readb(p->membase + offset); | ||
391 | } | ||
379 | 392 | ||
380 | case UPIO_RM9000: | 393 | static void mem_serial_out(struct uart_port *p, int offset, int value) |
381 | case UPIO_MEM32: | 394 | { |
382 | return readl(up->port.membase + offset); | 395 | offset = map_8250_out_reg(p, offset) << p->regshift; |
396 | writeb(value, p->membase + offset); | ||
397 | } | ||
398 | |||
399 | static void mem32_serial_out(struct uart_port *p, int offset, int value) | ||
400 | { | ||
401 | offset = map_8250_out_reg(p, offset) << p->regshift; | ||
402 | writel(value, p->membase + offset); | ||
403 | } | ||
404 | |||
405 | static unsigned int mem32_serial_in(struct uart_port *p, int offset) | ||
406 | { | ||
407 | offset = map_8250_in_reg(p, offset) << p->regshift; | ||
408 | return readl(p->membase + offset); | ||
409 | } | ||
383 | 410 | ||
384 | #ifdef CONFIG_SERIAL_8250_AU1X00 | 411 | #ifdef CONFIG_SERIAL_8250_AU1X00 |
385 | case UPIO_AU: | 412 | static unsigned int au_serial_in(struct uart_port *p, int offset) |
386 | return __raw_readl(up->port.membase + offset); | 413 | { |
414 | offset = map_8250_in_reg(p, offset) << p->regshift; | ||
415 | return __raw_readl(p->membase + offset); | ||
416 | } | ||
417 | |||
418 | static void au_serial_out(struct uart_port *p, int offset, int value) | ||
419 | { | ||
420 | offset = map_8250_out_reg(p, offset) << p->regshift; | ||
421 | __raw_writel(value, p->membase + offset); | ||
422 | } | ||
387 | #endif | 423 | #endif |
388 | 424 | ||
389 | case UPIO_TSI: | 425 | static unsigned int tsi_serial_in(struct uart_port *p, int offset) |
390 | if (offset == UART_IIR) { | 426 | { |
391 | tmp = readl(up->port.membase + (UART_IIR & ~3)); | 427 | unsigned int tmp; |
392 | return (tmp >> 16) & 0xff; /* UART_IIR % 4 == 2 */ | 428 | offset = map_8250_in_reg(p, offset) << p->regshift; |
393 | } else | 429 | if (offset == UART_IIR) { |
394 | return readb(up->port.membase + offset); | 430 | tmp = readl(p->membase + (UART_IIR & ~3)); |
431 | return (tmp >> 16) & 0xff; /* UART_IIR % 4 == 2 */ | ||
432 | } else | ||
433 | return readb(p->membase + offset); | ||
434 | } | ||
395 | 435 | ||
396 | default: | 436 | static void tsi_serial_out(struct uart_port *p, int offset, int value) |
397 | return inb(up->port.iobase + offset); | 437 | { |
398 | } | 438 | offset = map_8250_out_reg(p, offset) << p->regshift; |
439 | if (!((offset == UART_IER) && (value & UART_IER_UUE))) | ||
440 | writeb(value, p->membase + offset); | ||
399 | } | 441 | } |
400 | 442 | ||
401 | static void | 443 | static void dwapb_serial_out(struct uart_port *p, int offset, int value) |
402 | serial_out(struct uart_8250_port *up, int offset, int value) | ||
403 | { | 444 | { |
404 | /* Save the offset before it's remapped */ | ||
405 | int save_offset = offset; | 445 | int save_offset = offset; |
406 | offset = map_8250_out_reg(up, offset) << up->port.regshift; | 446 | offset = map_8250_out_reg(p, offset) << p->regshift; |
447 | /* Save the LCR value so it can be re-written when a | ||
448 | * Busy Detect interrupt occurs. */ | ||
449 | if (save_offset == UART_LCR) { | ||
450 | struct uart_8250_port *up = (struct uart_8250_port *)p; | ||
451 | up->lcr = value; | ||
452 | } | ||
453 | writeb(value, p->membase + offset); | ||
454 | /* Read the IER to ensure any interrupt is cleared before | ||
455 | * returning from ISR. */ | ||
456 | if (save_offset == UART_TX || save_offset == UART_IER) | ||
457 | value = p->serial_in(p, UART_IER); | ||
458 | } | ||
407 | 459 | ||
408 | switch (up->port.iotype) { | 460 | static unsigned int io_serial_in(struct uart_port *p, int offset) |
461 | { | ||
462 | offset = map_8250_in_reg(p, offset) << p->regshift; | ||
463 | return inb(p->iobase + offset); | ||
464 | } | ||
465 | |||
466 | static void io_serial_out(struct uart_port *p, int offset, int value) | ||
467 | { | ||
468 | offset = map_8250_out_reg(p, offset) << p->regshift; | ||
469 | outb(value, p->iobase + offset); | ||
470 | } | ||
471 | |||
472 | static void set_io_from_upio(struct uart_port *p) | ||
473 | { | ||
474 | switch (p->iotype) { | ||
409 | case UPIO_HUB6: | 475 | case UPIO_HUB6: |
410 | outb(up->port.hub6 - 1 + offset, up->port.iobase); | 476 | p->serial_in = hub6_serial_in; |
411 | outb(value, up->port.iobase + 1); | 477 | p->serial_out = hub6_serial_out; |
412 | break; | 478 | break; |
413 | 479 | ||
414 | case UPIO_MEM: | 480 | case UPIO_MEM: |
415 | writeb(value, up->port.membase + offset); | 481 | p->serial_in = mem_serial_in; |
482 | p->serial_out = mem_serial_out; | ||
416 | break; | 483 | break; |
417 | 484 | ||
418 | case UPIO_RM9000: | 485 | case UPIO_RM9000: |
419 | case UPIO_MEM32: | 486 | case UPIO_MEM32: |
420 | writel(value, up->port.membase + offset); | 487 | p->serial_in = mem32_serial_in; |
488 | p->serial_out = mem32_serial_out; | ||
421 | break; | 489 | break; |
422 | 490 | ||
423 | #ifdef CONFIG_SERIAL_8250_AU1X00 | 491 | #ifdef CONFIG_SERIAL_8250_AU1X00 |
424 | case UPIO_AU: | 492 | case UPIO_AU: |
425 | __raw_writel(value, up->port.membase + offset); | 493 | p->serial_in = au_serial_in; |
494 | p->serial_out = au_serial_out; | ||
426 | break; | 495 | break; |
427 | #endif | 496 | #endif |
428 | case UPIO_TSI: | 497 | case UPIO_TSI: |
429 | if (!((offset == UART_IER) && (value & UART_IER_UUE))) | 498 | p->serial_in = tsi_serial_in; |
430 | writeb(value, up->port.membase + offset); | 499 | p->serial_out = tsi_serial_out; |
431 | break; | 500 | break; |
432 | 501 | ||
433 | case UPIO_DWAPB: | 502 | case UPIO_DWAPB: |
434 | /* Save the LCR value so it can be re-written when a | 503 | p->serial_in = mem_serial_in; |
435 | * Busy Detect interrupt occurs. */ | 504 | p->serial_out = dwapb_serial_out; |
436 | if (save_offset == UART_LCR) | ||
437 | up->lcr = value; | ||
438 | writeb(value, up->port.membase + offset); | ||
439 | /* Read the IER to ensure any interrupt is cleared before | ||
440 | * returning from ISR. */ | ||
441 | if (save_offset == UART_TX || save_offset == UART_IER) | ||
442 | value = serial_in(up, UART_IER); | ||
443 | break; | 505 | break; |
444 | 506 | ||
445 | default: | 507 | default: |
446 | outb(value, up->port.iobase + offset); | 508 | p->serial_in = io_serial_in; |
509 | p->serial_out = io_serial_out; | ||
510 | break; | ||
447 | } | 511 | } |
448 | } | 512 | } |
449 | 513 | ||
450 | static void | 514 | static void |
451 | serial_out_sync(struct uart_8250_port *up, int offset, int value) | 515 | serial_out_sync(struct uart_8250_port *up, int offset, int value) |
452 | { | 516 | { |
453 | switch (up->port.iotype) { | 517 | struct uart_port *p = &up->port; |
518 | switch (p->iotype) { | ||
454 | case UPIO_MEM: | 519 | case UPIO_MEM: |
455 | case UPIO_MEM32: | 520 | case UPIO_MEM32: |
456 | #ifdef CONFIG_SERIAL_8250_AU1X00 | 521 | #ifdef CONFIG_SERIAL_8250_AU1X00 |
457 | case UPIO_AU: | 522 | case UPIO_AU: |
458 | #endif | 523 | #endif |
459 | case UPIO_DWAPB: | 524 | case UPIO_DWAPB: |
460 | serial_out(up, offset, value); | 525 | p->serial_out(p, offset, value); |
461 | serial_in(up, UART_LCR); /* safe, no side-effects */ | 526 | p->serial_in(p, UART_LCR); /* safe, no side-effects */ |
462 | break; | 527 | break; |
463 | default: | 528 | default: |
464 | serial_out(up, offset, value); | 529 | p->serial_out(p, offset, value); |
465 | } | 530 | } |
466 | } | 531 | } |
467 | 532 | ||
533 | #define serial_in(up, offset) \ | ||
534 | (up->port.serial_in(&(up)->port, (offset))) | ||
535 | #define serial_out(up, offset, value) \ | ||
536 | (up->port.serial_out(&(up)->port, (offset), (value))) | ||
468 | /* | 537 | /* |
469 | * We used to support using pause I/O for certain machines. We | 538 | * We used to support using pause I/O for certain machines. We |
470 | * haven't supported this for a while, but just in case it's badly | 539 | * haven't supported this for a while, but just in case it's badly |
@@ -2576,6 +2645,7 @@ static void __init serial8250_isa_init_ports(void) | |||
2576 | up->port.membase = old_serial_port[i].iomem_base; | 2645 | up->port.membase = old_serial_port[i].iomem_base; |
2577 | up->port.iotype = old_serial_port[i].io_type; | 2646 | up->port.iotype = old_serial_port[i].io_type; |
2578 | up->port.regshift = old_serial_port[i].iomem_reg_shift; | 2647 | up->port.regshift = old_serial_port[i].iomem_reg_shift; |
2648 | set_io_from_upio(&up->port); | ||
2579 | if (share_irqs) | 2649 | if (share_irqs) |
2580 | up->port.flags |= UPF_SHARE_IRQ; | 2650 | up->port.flags |= UPF_SHARE_IRQ; |
2581 | } | 2651 | } |
@@ -2752,12 +2822,30 @@ static struct uart_driver serial8250_reg = { | |||
2752 | */ | 2822 | */ |
2753 | int __init early_serial_setup(struct uart_port *port) | 2823 | int __init early_serial_setup(struct uart_port *port) |
2754 | { | 2824 | { |
2825 | struct uart_port *p; | ||
2826 | |||
2755 | if (port->line >= ARRAY_SIZE(serial8250_ports)) | 2827 | if (port->line >= ARRAY_SIZE(serial8250_ports)) |
2756 | return -ENODEV; | 2828 | return -ENODEV; |
2757 | 2829 | ||
2758 | serial8250_isa_init_ports(); | 2830 | serial8250_isa_init_ports(); |
2759 | serial8250_ports[port->line].port = *port; | 2831 | p = &serial8250_ports[port->line].port; |
2760 | serial8250_ports[port->line].port.ops = &serial8250_pops; | 2832 | p->iobase = port->iobase; |
2833 | p->membase = port->membase; | ||
2834 | p->irq = port->irq; | ||
2835 | p->uartclk = port->uartclk; | ||
2836 | p->fifosize = port->fifosize; | ||
2837 | p->regshift = port->regshift; | ||
2838 | p->iotype = port->iotype; | ||
2839 | p->flags = port->flags; | ||
2840 | p->mapbase = port->mapbase; | ||
2841 | p->private_data = port->private_data; | ||
2842 | |||
2843 | set_io_from_upio(p); | ||
2844 | if (port->serial_in) | ||
2845 | p->serial_in = port->serial_in; | ||
2846 | if (port->serial_out) | ||
2847 | p->serial_out = port->serial_out; | ||
2848 | |||
2761 | return 0; | 2849 | return 0; |
2762 | } | 2850 | } |
2763 | 2851 | ||
@@ -2822,6 +2910,9 @@ static int __devinit serial8250_probe(struct platform_device *dev) | |||
2822 | port.mapbase = p->mapbase; | 2910 | port.mapbase = p->mapbase; |
2823 | port.hub6 = p->hub6; | 2911 | port.hub6 = p->hub6; |
2824 | port.private_data = p->private_data; | 2912 | port.private_data = p->private_data; |
2913 | port.type = p->type; | ||
2914 | port.serial_in = p->serial_in; | ||
2915 | port.serial_out = p->serial_out; | ||
2825 | port.dev = &dev->dev; | 2916 | port.dev = &dev->dev; |
2826 | if (share_irqs) | 2917 | if (share_irqs) |
2827 | port.flags |= UPF_SHARE_IRQ; | 2918 | port.flags |= UPF_SHARE_IRQ; |
@@ -2976,6 +3067,20 @@ int serial8250_register_port(struct uart_port *port) | |||
2976 | if (port->dev) | 3067 | if (port->dev) |
2977 | uart->port.dev = port->dev; | 3068 | uart->port.dev = port->dev; |
2978 | 3069 | ||
3070 | if (port->flags & UPF_FIXED_TYPE) { | ||
3071 | uart->port.type = port->type; | ||
3072 | uart->port.fifosize = uart_config[port->type].fifo_size; | ||
3073 | uart->capabilities = uart_config[port->type].flags; | ||
3074 | uart->tx_loadsz = uart_config[port->type].tx_loadsz; | ||
3075 | } | ||
3076 | |||
3077 | set_io_from_upio(&uart->port); | ||
3078 | /* Possibly override default I/O functions. */ | ||
3079 | if (port->serial_in) | ||
3080 | uart->port.serial_in = port->serial_in; | ||
3081 | if (port->serial_out) | ||
3082 | uart->port.serial_out = port->serial_out; | ||
3083 | |||
2979 | ret = uart_add_one_port(&serial8250_reg, &uart->port); | 3084 | ret = uart_add_one_port(&serial8250_reg, &uart->port); |
2980 | if (ret == 0) | 3085 | if (ret == 0) |
2981 | ret = uart->port.line; | 3086 | ret = uart->port.line; |
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index 5450a0e5ecdb..c088146b7513 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
@@ -42,7 +42,8 @@ struct pci_serial_quirk { | |||
42 | u32 subvendor; | 42 | u32 subvendor; |
43 | u32 subdevice; | 43 | u32 subdevice; |
44 | int (*init)(struct pci_dev *dev); | 44 | int (*init)(struct pci_dev *dev); |
45 | int (*setup)(struct serial_private *, struct pciserial_board *, | 45 | int (*setup)(struct serial_private *, |
46 | const struct pciserial_board *, | ||
46 | struct uart_port *, int); | 47 | struct uart_port *, int); |
47 | void (*exit)(struct pci_dev *dev); | 48 | void (*exit)(struct pci_dev *dev); |
48 | }; | 49 | }; |
@@ -107,7 +108,7 @@ setup_port(struct serial_private *priv, struct uart_port *port, | |||
107 | * ADDI-DATA GmbH communication cards <info@addi-data.com> | 108 | * ADDI-DATA GmbH communication cards <info@addi-data.com> |
108 | */ | 109 | */ |
109 | static int addidata_apci7800_setup(struct serial_private *priv, | 110 | static int addidata_apci7800_setup(struct serial_private *priv, |
110 | struct pciserial_board *board, | 111 | const struct pciserial_board *board, |
111 | struct uart_port *port, int idx) | 112 | struct uart_port *port, int idx) |
112 | { | 113 | { |
113 | unsigned int bar = 0, offset = board->first_offset; | 114 | unsigned int bar = 0, offset = board->first_offset; |
@@ -134,7 +135,7 @@ static int addidata_apci7800_setup(struct serial_private *priv, | |||
134 | * Not that ugly ;) -- HW | 135 | * Not that ugly ;) -- HW |
135 | */ | 136 | */ |
136 | static int | 137 | static int |
137 | afavlab_setup(struct serial_private *priv, struct pciserial_board *board, | 138 | afavlab_setup(struct serial_private *priv, const struct pciserial_board *board, |
138 | struct uart_port *port, int idx) | 139 | struct uart_port *port, int idx) |
139 | { | 140 | { |
140 | unsigned int bar, offset = board->first_offset; | 141 | unsigned int bar, offset = board->first_offset; |
@@ -188,8 +189,9 @@ static int pci_hp_diva_init(struct pci_dev *dev) | |||
188 | * some serial ports are supposed to be hidden on certain models. | 189 | * some serial ports are supposed to be hidden on certain models. |
189 | */ | 190 | */ |
190 | static int | 191 | static int |
191 | pci_hp_diva_setup(struct serial_private *priv, struct pciserial_board *board, | 192 | pci_hp_diva_setup(struct serial_private *priv, |
192 | struct uart_port *port, int idx) | 193 | const struct pciserial_board *board, |
194 | struct uart_port *port, int idx) | ||
193 | { | 195 | { |
194 | unsigned int offset = board->first_offset; | 196 | unsigned int offset = board->first_offset; |
195 | unsigned int bar = FL_GET_BASE(board->flags); | 197 | unsigned int bar = FL_GET_BASE(board->flags); |
@@ -306,7 +308,7 @@ static void __devexit pci_plx9050_exit(struct pci_dev *dev) | |||
306 | 308 | ||
307 | /* SBS Technologies Inc. PMC-OCTPRO and P-OCTAL cards */ | 309 | /* SBS Technologies Inc. PMC-OCTPRO and P-OCTAL cards */ |
308 | static int | 310 | static int |
309 | sbs_setup(struct serial_private *priv, struct pciserial_board *board, | 311 | sbs_setup(struct serial_private *priv, const struct pciserial_board *board, |
310 | struct uart_port *port, int idx) | 312 | struct uart_port *port, int idx) |
311 | { | 313 | { |
312 | unsigned int bar, offset = board->first_offset; | 314 | unsigned int bar, offset = board->first_offset; |
@@ -463,7 +465,7 @@ static int pci_siig_init(struct pci_dev *dev) | |||
463 | } | 465 | } |
464 | 466 | ||
465 | static int pci_siig_setup(struct serial_private *priv, | 467 | static int pci_siig_setup(struct serial_private *priv, |
466 | struct pciserial_board *board, | 468 | const struct pciserial_board *board, |
467 | struct uart_port *port, int idx) | 469 | struct uart_port *port, int idx) |
468 | { | 470 | { |
469 | unsigned int bar = FL_GET_BASE(board->flags) + idx, offset = 0; | 471 | unsigned int bar = FL_GET_BASE(board->flags) + idx, offset = 0; |
@@ -534,7 +536,8 @@ static int pci_timedia_init(struct pci_dev *dev) | |||
534 | * Ugh, this is ugly as all hell --- TYT | 536 | * Ugh, this is ugly as all hell --- TYT |
535 | */ | 537 | */ |
536 | static int | 538 | static int |
537 | pci_timedia_setup(struct serial_private *priv, struct pciserial_board *board, | 539 | pci_timedia_setup(struct serial_private *priv, |
540 | const struct pciserial_board *board, | ||
538 | struct uart_port *port, int idx) | 541 | struct uart_port *port, int idx) |
539 | { | 542 | { |
540 | unsigned int bar = 0, offset = board->first_offset; | 543 | unsigned int bar = 0, offset = board->first_offset; |
@@ -568,7 +571,7 @@ pci_timedia_setup(struct serial_private *priv, struct pciserial_board *board, | |||
568 | */ | 571 | */ |
569 | static int | 572 | static int |
570 | titan_400l_800l_setup(struct serial_private *priv, | 573 | titan_400l_800l_setup(struct serial_private *priv, |
571 | struct pciserial_board *board, | 574 | const struct pciserial_board *board, |
572 | struct uart_port *port, int idx) | 575 | struct uart_port *port, int idx) |
573 | { | 576 | { |
574 | unsigned int bar, offset = board->first_offset; | 577 | unsigned int bar, offset = board->first_offset; |
@@ -737,8 +740,41 @@ static void __devexit pci_ite887x_exit(struct pci_dev *dev) | |||
737 | release_region(ioport, ITE_887x_IOSIZE); | 740 | release_region(ioport, ITE_887x_IOSIZE); |
738 | } | 741 | } |
739 | 742 | ||
743 | /* | ||
744 | * Oxford Semiconductor Inc. | ||
745 | * Check that device is part of the Tornado range of devices, then determine | ||
746 | * the number of ports available on the device. | ||
747 | */ | ||
748 | static int pci_oxsemi_tornado_init(struct pci_dev *dev) | ||
749 | { | ||
750 | u8 __iomem *p; | ||
751 | unsigned long deviceID; | ||
752 | unsigned int number_uarts = 0; | ||
753 | |||
754 | /* OxSemi Tornado devices are all 0xCxxx */ | ||
755 | if (dev->vendor == PCI_VENDOR_ID_OXSEMI && | ||
756 | (dev->device & 0xF000) != 0xC000) | ||
757 | return 0; | ||
758 | |||
759 | p = pci_iomap(dev, 0, 5); | ||
760 | if (p == NULL) | ||
761 | return -ENOMEM; | ||
762 | |||
763 | deviceID = ioread32(p); | ||
764 | /* Tornado device */ | ||
765 | if (deviceID == 0x07000200) { | ||
766 | number_uarts = ioread8(p + 4); | ||
767 | printk(KERN_DEBUG | ||
768 | "%d ports detected on Oxford PCI Express device\n", | ||
769 | number_uarts); | ||
770 | } | ||
771 | pci_iounmap(dev, p); | ||
772 | return number_uarts; | ||
773 | } | ||
774 | |||
740 | static int | 775 | static int |
741 | pci_default_setup(struct serial_private *priv, struct pciserial_board *board, | 776 | pci_default_setup(struct serial_private *priv, |
777 | const struct pciserial_board *board, | ||
742 | struct uart_port *port, int idx) | 778 | struct uart_port *port, int idx) |
743 | { | 779 | { |
744 | unsigned int bar, offset = board->first_offset, maxnr; | 780 | unsigned int bar, offset = board->first_offset, maxnr; |
@@ -1018,6 +1054,25 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
1018 | .setup = pci_default_setup, | 1054 | .setup = pci_default_setup, |
1019 | }, | 1055 | }, |
1020 | /* | 1056 | /* |
1057 | * For Oxford Semiconductor and Mainpine | ||
1058 | */ | ||
1059 | { | ||
1060 | .vendor = PCI_VENDOR_ID_OXSEMI, | ||
1061 | .device = PCI_ANY_ID, | ||
1062 | .subvendor = PCI_ANY_ID, | ||
1063 | .subdevice = PCI_ANY_ID, | ||
1064 | .init = pci_oxsemi_tornado_init, | ||
1065 | .setup = pci_default_setup, | ||
1066 | }, | ||
1067 | { | ||
1068 | .vendor = PCI_VENDOR_ID_MAINPINE, | ||
1069 | .device = PCI_ANY_ID, | ||
1070 | .subvendor = PCI_ANY_ID, | ||
1071 | .subdevice = PCI_ANY_ID, | ||
1072 | .init = pci_oxsemi_tornado_init, | ||
1073 | .setup = pci_default_setup, | ||
1074 | }, | ||
1075 | /* | ||
1021 | * Default "match everything" terminator entry | 1076 | * Default "match everything" terminator entry |
1022 | */ | 1077 | */ |
1023 | { | 1078 | { |
@@ -1048,7 +1103,7 @@ static struct pci_serial_quirk *find_quirk(struct pci_dev *dev) | |||
1048 | } | 1103 | } |
1049 | 1104 | ||
1050 | static inline int get_pci_irq(struct pci_dev *dev, | 1105 | static inline int get_pci_irq(struct pci_dev *dev, |
1051 | struct pciserial_board *board) | 1106 | const struct pciserial_board *board) |
1052 | { | 1107 | { |
1053 | if (board->flags & FL_NOIRQ) | 1108 | if (board->flags & FL_NOIRQ) |
1054 | return 0; | 1109 | return 0; |
@@ -1843,8 +1898,8 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) | |||
1843 | } | 1898 | } |
1844 | 1899 | ||
1845 | static inline int | 1900 | static inline int |
1846 | serial_pci_matches(struct pciserial_board *board, | 1901 | serial_pci_matches(const struct pciserial_board *board, |
1847 | struct pciserial_board *guessed) | 1902 | const struct pciserial_board *guessed) |
1848 | { | 1903 | { |
1849 | return | 1904 | return |
1850 | board->num_ports == guessed->num_ports && | 1905 | board->num_ports == guessed->num_ports && |
@@ -1854,54 +1909,14 @@ serial_pci_matches(struct pciserial_board *board, | |||
1854 | board->first_offset == guessed->first_offset; | 1909 | board->first_offset == guessed->first_offset; |
1855 | } | 1910 | } |
1856 | 1911 | ||
1857 | /* | ||
1858 | * Oxford Semiconductor Inc. | ||
1859 | * Check that device is part of the Tornado range of devices, then determine | ||
1860 | * the number of ports available on the device. | ||
1861 | */ | ||
1862 | static int pci_oxsemi_tornado_init(struct pci_dev *dev, struct pciserial_board *board) | ||
1863 | { | ||
1864 | u8 __iomem *p; | ||
1865 | unsigned long deviceID; | ||
1866 | unsigned int number_uarts; | ||
1867 | |||
1868 | /* OxSemi Tornado devices are all 0xCxxx */ | ||
1869 | if (dev->vendor == PCI_VENDOR_ID_OXSEMI && | ||
1870 | (dev->device & 0xF000) != 0xC000) | ||
1871 | return 0; | ||
1872 | |||
1873 | p = pci_iomap(dev, 0, 5); | ||
1874 | if (p == NULL) | ||
1875 | return -ENOMEM; | ||
1876 | |||
1877 | deviceID = ioread32(p); | ||
1878 | /* Tornado device */ | ||
1879 | if (deviceID == 0x07000200) { | ||
1880 | number_uarts = ioread8(p + 4); | ||
1881 | board->num_ports = number_uarts; | ||
1882 | printk(KERN_DEBUG | ||
1883 | "%d ports detected on Oxford PCI Express device\n", | ||
1884 | number_uarts); | ||
1885 | } | ||
1886 | pci_iounmap(dev, p); | ||
1887 | return 0; | ||
1888 | } | ||
1889 | |||
1890 | struct serial_private * | 1912 | struct serial_private * |
1891 | pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board) | 1913 | pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) |
1892 | { | 1914 | { |
1893 | struct uart_port serial_port; | 1915 | struct uart_port serial_port; |
1894 | struct serial_private *priv; | 1916 | struct serial_private *priv; |
1895 | struct pci_serial_quirk *quirk; | 1917 | struct pci_serial_quirk *quirk; |
1896 | int rc, nr_ports, i; | 1918 | int rc, nr_ports, i; |
1897 | 1919 | ||
1898 | /* | ||
1899 | * Find number of ports on board | ||
1900 | */ | ||
1901 | if (dev->vendor == PCI_VENDOR_ID_OXSEMI || | ||
1902 | dev->vendor == PCI_VENDOR_ID_MAINPINE) | ||
1903 | pci_oxsemi_tornado_init(dev, board); | ||
1904 | |||
1905 | nr_ports = board->num_ports; | 1920 | nr_ports = board->num_ports; |
1906 | 1921 | ||
1907 | /* | 1922 | /* |
@@ -2028,7 +2043,8 @@ static int __devinit | |||
2028 | pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) | 2043 | pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) |
2029 | { | 2044 | { |
2030 | struct serial_private *priv; | 2045 | struct serial_private *priv; |
2031 | struct pciserial_board *board, tmp; | 2046 | const struct pciserial_board *board; |
2047 | struct pciserial_board tmp; | ||
2032 | int rc; | 2048 | int rc; |
2033 | 2049 | ||
2034 | if (ent->driver_data >= ARRAY_SIZE(pci_boards)) { | 2050 | if (ent->driver_data >= ARRAY_SIZE(pci_boards)) { |
@@ -2055,7 +2071,7 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) | |||
2055 | * We matched one of our class entries. Try to | 2071 | * We matched one of our class entries. Try to |
2056 | * determine the parameters of this board. | 2072 | * determine the parameters of this board. |
2057 | */ | 2073 | */ |
2058 | rc = serial_pci_guess_board(dev, board); | 2074 | rc = serial_pci_guess_board(dev, &tmp); |
2059 | if (rc) | 2075 | if (rc) |
2060 | goto disable; | 2076 | goto disable; |
2061 | } else { | 2077 | } else { |
@@ -2271,6 +2287,9 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2271 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM8, | 2287 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM8, |
2272 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2288 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2273 | pbn_b2_8_115200 }, | 2289 | pbn_b2_8_115200 }, |
2290 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_7803, | ||
2291 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
2292 | pbn_b2_8_460800 }, | ||
2274 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM8, | 2293 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM8, |
2275 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2294 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2276 | pbn_b2_8_115200 }, | 2295 | pbn_b2_8_115200 }, |
@@ -2372,6 +2391,9 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2372 | * For now just used the hex ID 0x950a. | 2391 | * For now just used the hex ID 0x950a. |
2373 | */ | 2392 | */ |
2374 | { PCI_VENDOR_ID_OXSEMI, 0x950a, | 2393 | { PCI_VENDOR_ID_OXSEMI, 0x950a, |
2394 | PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL, 0, 0, | ||
2395 | pbn_b0_2_115200 }, | ||
2396 | { PCI_VENDOR_ID_OXSEMI, 0x950a, | ||
2375 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2397 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2376 | pbn_b0_2_1130000 }, | 2398 | pbn_b0_2_1130000 }, |
2377 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, | 2399 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, |
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index 569f0e2476c6..318d69dce8e1 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c | |||
@@ -22,7 +22,8 @@ | |||
22 | #include <linux/tty_flip.h> | 22 | #include <linux/tty_flip.h> |
23 | #include <linux/serial_core.h> | 23 | #include <linux/serial_core.h> |
24 | 24 | ||
25 | #ifdef CONFIG_KGDB_UART | 25 | #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ |
26 | defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) | ||
26 | #include <linux/kgdb.h> | 27 | #include <linux/kgdb.h> |
27 | #include <asm/irq_regs.h> | 28 | #include <asm/irq_regs.h> |
28 | #endif | 29 | #endif |
@@ -45,6 +46,16 @@ | |||
45 | static struct bfin_serial_port bfin_serial_ports[BFIN_UART_NR_PORTS]; | 46 | static struct bfin_serial_port bfin_serial_ports[BFIN_UART_NR_PORTS]; |
46 | static int nr_active_ports = ARRAY_SIZE(bfin_serial_resource); | 47 | static int nr_active_ports = ARRAY_SIZE(bfin_serial_resource); |
47 | 48 | ||
49 | #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ | ||
50 | defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) | ||
51 | |||
52 | # ifndef CONFIG_SERIAL_BFIN_PIO | ||
53 | # error KGDB only support UART in PIO mode. | ||
54 | # endif | ||
55 | |||
56 | static int kgdboc_port_line; | ||
57 | static int kgdboc_break_enabled; | ||
58 | #endif | ||
48 | /* | 59 | /* |
49 | * Setup for console. Argument comes from the menuconfig | 60 | * Setup for console. Argument comes from the menuconfig |
50 | */ | 61 | */ |
@@ -62,13 +73,17 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart); | |||
62 | 73 | ||
63 | static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); | 74 | static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); |
64 | 75 | ||
76 | static void bfin_serial_reset_irda(struct uart_port *port); | ||
77 | |||
65 | /* | 78 | /* |
66 | * interrupts are disabled on entry | 79 | * interrupts are disabled on entry |
67 | */ | 80 | */ |
68 | static void bfin_serial_stop_tx(struct uart_port *port) | 81 | static void bfin_serial_stop_tx(struct uart_port *port) |
69 | { | 82 | { |
70 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 83 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
84 | #ifdef CONFIG_SERIAL_BFIN_DMA | ||
71 | struct circ_buf *xmit = &uart->port.info->xmit; | 85 | struct circ_buf *xmit = &uart->port.info->xmit; |
86 | #endif | ||
72 | 87 | ||
73 | while (!(UART_GET_LSR(uart) & TEMT)) | 88 | while (!(UART_GET_LSR(uart) & TEMT)) |
74 | cpu_relax(); | 89 | cpu_relax(); |
@@ -94,6 +109,14 @@ static void bfin_serial_stop_tx(struct uart_port *port) | |||
94 | static void bfin_serial_start_tx(struct uart_port *port) | 109 | static void bfin_serial_start_tx(struct uart_port *port) |
95 | { | 110 | { |
96 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 111 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
112 | struct tty_struct *tty = uart->port.info->port.tty; | ||
113 | |||
114 | /* | ||
115 | * To avoid losting RX interrupt, we reset IR function | ||
116 | * before sending data. | ||
117 | */ | ||
118 | if (tty->termios->c_line == N_IRDA) | ||
119 | bfin_serial_reset_irda(port); | ||
97 | 120 | ||
98 | #ifdef CONFIG_SERIAL_BFIN_DMA | 121 | #ifdef CONFIG_SERIAL_BFIN_DMA |
99 | if (uart->tx_done) | 122 | if (uart->tx_done) |
@@ -110,9 +133,7 @@ static void bfin_serial_start_tx(struct uart_port *port) | |||
110 | static void bfin_serial_stop_rx(struct uart_port *port) | 133 | static void bfin_serial_stop_rx(struct uart_port *port) |
111 | { | 134 | { |
112 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 135 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
113 | #ifdef CONFIG_KGDB_UART | 136 | |
114 | if (uart->port.line != CONFIG_KGDB_UART_PORT) | ||
115 | #endif | ||
116 | UART_CLEAR_IER(uart, ERBFI); | 137 | UART_CLEAR_IER(uart, ERBFI); |
117 | } | 138 | } |
118 | 139 | ||
@@ -123,49 +144,6 @@ static void bfin_serial_enable_ms(struct uart_port *port) | |||
123 | { | 144 | { |
124 | } | 145 | } |
125 | 146 | ||
126 | #ifdef CONFIG_KGDB_UART | ||
127 | static int kgdb_entry_state; | ||
128 | |||
129 | void kgdb_put_debug_char(int chr) | ||
130 | { | ||
131 | struct bfin_serial_port *uart; | ||
132 | |||
133 | if (CONFIG_KGDB_UART_PORT < 0 | ||
134 | || CONFIG_KGDB_UART_PORT >= BFIN_UART_NR_PORTS) | ||
135 | uart = &bfin_serial_ports[0]; | ||
136 | else | ||
137 | uart = &bfin_serial_ports[CONFIG_KGDB_UART_PORT]; | ||
138 | |||
139 | while (!(UART_GET_LSR(uart) & THRE)) { | ||
140 | SSYNC(); | ||
141 | } | ||
142 | |||
143 | UART_CLEAR_DLAB(uart); | ||
144 | UART_PUT_CHAR(uart, (unsigned char)chr); | ||
145 | SSYNC(); | ||
146 | } | ||
147 | |||
148 | int kgdb_get_debug_char(void) | ||
149 | { | ||
150 | struct bfin_serial_port *uart; | ||
151 | unsigned char chr; | ||
152 | |||
153 | if (CONFIG_KGDB_UART_PORT < 0 | ||
154 | || CONFIG_KGDB_UART_PORT >= BFIN_UART_NR_PORTS) | ||
155 | uart = &bfin_serial_ports[0]; | ||
156 | else | ||
157 | uart = &bfin_serial_ports[CONFIG_KGDB_UART_PORT]; | ||
158 | |||
159 | while(!(UART_GET_LSR(uart) & DR)) { | ||
160 | SSYNC(); | ||
161 | } | ||
162 | UART_CLEAR_DLAB(uart); | ||
163 | chr = UART_GET_CHAR(uart); | ||
164 | SSYNC(); | ||
165 | |||
166 | return chr; | ||
167 | } | ||
168 | #endif | ||
169 | 147 | ||
170 | #if ANOMALY_05000363 && defined(CONFIG_SERIAL_BFIN_PIO) | 148 | #if ANOMALY_05000363 && defined(CONFIG_SERIAL_BFIN_PIO) |
171 | # define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold) | 149 | # define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold) |
@@ -178,7 +156,7 @@ int kgdb_get_debug_char(void) | |||
178 | #ifdef CONFIG_SERIAL_BFIN_PIO | 156 | #ifdef CONFIG_SERIAL_BFIN_PIO |
179 | static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | 157 | static void bfin_serial_rx_chars(struct bfin_serial_port *uart) |
180 | { | 158 | { |
181 | struct tty_struct *tty = uart->port.info->port.tty; | 159 | struct tty_struct *tty = NULL; |
182 | unsigned int status, ch, flg; | 160 | unsigned int status, ch, flg; |
183 | static struct timeval anomaly_start = { .tv_sec = 0 }; | 161 | static struct timeval anomaly_start = { .tv_sec = 0 }; |
184 | 162 | ||
@@ -188,27 +166,18 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | |||
188 | ch = UART_GET_CHAR(uart); | 166 | ch = UART_GET_CHAR(uart); |
189 | uart->port.icount.rx++; | 167 | uart->port.icount.rx++; |
190 | 168 | ||
191 | #ifdef CONFIG_KGDB_UART | 169 | #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ |
192 | if (uart->port.line == CONFIG_KGDB_UART_PORT) { | 170 | defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) |
193 | struct pt_regs *regs = get_irq_regs(); | 171 | if (kgdb_connected && kgdboc_port_line == uart->port.line) |
194 | if (uart->port.cons->index == CONFIG_KGDB_UART_PORT && ch == 0x1) { /* Ctrl + A */ | 172 | if (ch == 0x3) {/* Ctrl + C */ |
195 | kgdb_breakkey_pressed(regs); | 173 | kgdb_breakpoint(); |
196 | return; | ||
197 | } else if (kgdb_entry_state == 0 && ch == '$') {/* connection from KGDB */ | ||
198 | kgdb_entry_state = 1; | ||
199 | } else if (kgdb_entry_state == 1 && ch == 'q') { | ||
200 | kgdb_entry_state = 0; | ||
201 | kgdb_breakkey_pressed(regs); | ||
202 | return; | ||
203 | } else if (ch == 0x3) {/* Ctrl + C */ | ||
204 | kgdb_entry_state = 0; | ||
205 | kgdb_breakkey_pressed(regs); | ||
206 | return; | 174 | return; |
207 | } else { | ||
208 | kgdb_entry_state = 0; | ||
209 | } | 175 | } |
210 | } | 176 | |
177 | if (!uart->port.info || !uart->port.info->tty) | ||
178 | return; | ||
211 | #endif | 179 | #endif |
180 | tty = uart->port.info->tty; | ||
212 | 181 | ||
213 | if (ANOMALY_05000363) { | 182 | if (ANOMALY_05000363) { |
214 | /* The BF533 (and BF561) family of processors have a nice anomaly | 183 | /* The BF533 (and BF561) family of processors have a nice anomaly |
@@ -250,6 +219,7 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | |||
250 | return; | 219 | return; |
251 | 220 | ||
252 | known_good_char: | 221 | known_good_char: |
222 | status &= ~BI; | ||
253 | anomaly_start.tv_sec = 0; | 223 | anomaly_start.tv_sec = 0; |
254 | } | 224 | } |
255 | } | 225 | } |
@@ -445,7 +415,9 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
445 | 415 | ||
446 | void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) | 416 | void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) |
447 | { | 417 | { |
448 | int x_pos, pos; | 418 | int x_pos, pos, flags; |
419 | |||
420 | spin_lock_irqsave(&uart->port.lock, flags); | ||
449 | 421 | ||
450 | uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); | 422 | uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); |
451 | x_pos = get_dma_curr_xcount(uart->rx_dma_channel); | 423 | x_pos = get_dma_curr_xcount(uart->rx_dma_channel); |
@@ -463,6 +435,8 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) | |||
463 | uart->rx_dma_buf.tail = uart->rx_dma_buf.head; | 435 | uart->rx_dma_buf.tail = uart->rx_dma_buf.head; |
464 | } | 436 | } |
465 | 437 | ||
438 | spin_unlock_irqrestore(&uart->port.lock, flags); | ||
439 | |||
466 | mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); | 440 | mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); |
467 | } | 441 | } |
468 | 442 | ||
@@ -497,10 +471,9 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) | |||
497 | spin_lock(&uart->port.lock); | 471 | spin_lock(&uart->port.lock); |
498 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); | 472 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); |
499 | clear_dma_irqstat(uart->rx_dma_channel); | 473 | clear_dma_irqstat(uart->rx_dma_channel); |
474 | bfin_serial_dma_rx_chars(uart); | ||
500 | spin_unlock(&uart->port.lock); | 475 | spin_unlock(&uart->port.lock); |
501 | 476 | ||
502 | mod_timer(&(uart->rx_dma_timer), jiffies); | ||
503 | |||
504 | return IRQ_HANDLED; | 477 | return IRQ_HANDLED; |
505 | } | 478 | } |
506 | #endif | 479 | #endif |
@@ -630,16 +603,16 @@ static int bfin_serial_startup(struct uart_port *port) | |||
630 | uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; | 603 | uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; |
631 | add_timer(&(uart->rx_dma_timer)); | 604 | add_timer(&(uart->rx_dma_timer)); |
632 | #else | 605 | #else |
606 | #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ | ||
607 | defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) | ||
608 | if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled) | ||
609 | kgdboc_break_enabled = 0; | ||
610 | else { | ||
611 | # endif | ||
633 | if (request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED, | 612 | if (request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED, |
634 | "BFIN_UART_RX", uart)) { | 613 | "BFIN_UART_RX", uart)) { |
635 | # ifdef CONFIG_KGDB_UART | ||
636 | if (uart->port.line != CONFIG_KGDB_UART_PORT) { | ||
637 | # endif | ||
638 | printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n"); | 614 | printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n"); |
639 | return -EBUSY; | 615 | return -EBUSY; |
640 | # ifdef CONFIG_KGDB_UART | ||
641 | } | ||
642 | # endif | ||
643 | } | 616 | } |
644 | 617 | ||
645 | if (request_irq | 618 | if (request_irq |
@@ -685,6 +658,10 @@ static int bfin_serial_startup(struct uart_port *port) | |||
685 | } | 658 | } |
686 | } | 659 | } |
687 | # endif | 660 | # endif |
661 | #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ | ||
662 | defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) | ||
663 | } | ||
664 | # endif | ||
688 | #endif | 665 | #endif |
689 | UART_SET_IER(uart, ERBFI); | 666 | UART_SET_IER(uart, ERBFI); |
690 | return 0; | 667 | return 0; |
@@ -716,9 +693,6 @@ static void bfin_serial_shutdown(struct uart_port *port) | |||
716 | break; | 693 | break; |
717 | }; | 694 | }; |
718 | #endif | 695 | #endif |
719 | #ifdef CONFIG_KGDB_UART | ||
720 | if (uart->port.line != CONFIG_KGDB_UART_PORT) | ||
721 | #endif | ||
722 | free_irq(uart->port.irq, uart); | 696 | free_irq(uart->port.irq, uart); |
723 | free_irq(uart->port.irq+1, uart); | 697 | free_irq(uart->port.irq+1, uart); |
724 | #endif | 698 | #endif |
@@ -887,6 +861,65 @@ static void bfin_serial_set_ldisc(struct uart_port *port) | |||
887 | } | 861 | } |
888 | } | 862 | } |
889 | 863 | ||
864 | #ifdef CONFIG_CONSOLE_POLL | ||
865 | static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr) | ||
866 | { | ||
867 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | ||
868 | |||
869 | while (!(UART_GET_LSR(uart) & THRE)) | ||
870 | cpu_relax(); | ||
871 | |||
872 | UART_CLEAR_DLAB(uart); | ||
873 | UART_PUT_CHAR(uart, (unsigned char)chr); | ||
874 | } | ||
875 | |||
876 | static int bfin_serial_poll_get_char(struct uart_port *port) | ||
877 | { | ||
878 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | ||
879 | unsigned char chr; | ||
880 | |||
881 | while (!(UART_GET_LSR(uart) & DR)) | ||
882 | cpu_relax(); | ||
883 | |||
884 | UART_CLEAR_DLAB(uart); | ||
885 | chr = UART_GET_CHAR(uart); | ||
886 | |||
887 | return chr; | ||
888 | } | ||
889 | #endif | ||
890 | |||
891 | #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ | ||
892 | defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) | ||
893 | static void bfin_kgdboc_port_shutdown(struct uart_port *port) | ||
894 | { | ||
895 | if (kgdboc_break_enabled) { | ||
896 | kgdboc_break_enabled = 0; | ||
897 | bfin_serial_shutdown(port); | ||
898 | } | ||
899 | } | ||
900 | |||
901 | static int bfin_kgdboc_port_startup(struct uart_port *port) | ||
902 | { | ||
903 | kgdboc_port_line = port->line; | ||
904 | kgdboc_break_enabled = !bfin_serial_startup(port); | ||
905 | return 0; | ||
906 | } | ||
907 | #endif | ||
908 | |||
909 | static void bfin_serial_reset_irda(struct uart_port *port) | ||
910 | { | ||
911 | int line = port->line; | ||
912 | unsigned short val; | ||
913 | |||
914 | val = UART_GET_GCTL(&bfin_serial_ports[line]); | ||
915 | val &= ~(IREN | RPOLC); | ||
916 | UART_PUT_GCTL(&bfin_serial_ports[line], val); | ||
917 | SSYNC(); | ||
918 | val |= (IREN | RPOLC); | ||
919 | UART_PUT_GCTL(&bfin_serial_ports[line], val); | ||
920 | SSYNC(); | ||
921 | } | ||
922 | |||
890 | static struct uart_ops bfin_serial_pops = { | 923 | static struct uart_ops bfin_serial_pops = { |
891 | .tx_empty = bfin_serial_tx_empty, | 924 | .tx_empty = bfin_serial_tx_empty, |
892 | .set_mctrl = bfin_serial_set_mctrl, | 925 | .set_mctrl = bfin_serial_set_mctrl, |
@@ -905,6 +938,15 @@ static struct uart_ops bfin_serial_pops = { | |||
905 | .request_port = bfin_serial_request_port, | 938 | .request_port = bfin_serial_request_port, |
906 | .config_port = bfin_serial_config_port, | 939 | .config_port = bfin_serial_config_port, |
907 | .verify_port = bfin_serial_verify_port, | 940 | .verify_port = bfin_serial_verify_port, |
941 | #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ | ||
942 | defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) | ||
943 | .kgdboc_port_startup = bfin_kgdboc_port_startup, | ||
944 | .kgdboc_port_shutdown = bfin_kgdboc_port_shutdown, | ||
945 | #endif | ||
946 | #ifdef CONFIG_CONSOLE_POLL | ||
947 | .poll_put_char = bfin_serial_poll_put_char, | ||
948 | .poll_get_char = bfin_serial_poll_get_char, | ||
949 | #endif | ||
908 | }; | 950 | }; |
909 | 951 | ||
910 | static void __init bfin_serial_init_ports(void) | 952 | static void __init bfin_serial_init_ports(void) |
@@ -950,7 +992,7 @@ static void __init bfin_serial_init_ports(void) | |||
950 | 992 | ||
951 | } | 993 | } |
952 | 994 | ||
953 | #ifdef CONFIG_SERIAL_BFIN_CONSOLE | 995 | #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) |
954 | /* | 996 | /* |
955 | * If the port was already initialised (eg, by a boot loader), | 997 | * If the port was already initialised (eg, by a boot loader), |
956 | * try to determine the current setup. | 998 | * try to determine the current setup. |
@@ -994,24 +1036,20 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud, | |||
994 | } | 1036 | } |
995 | pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits); | 1037 | pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits); |
996 | } | 1038 | } |
997 | #endif | ||
998 | 1039 | ||
999 | #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) | ||
1000 | static struct uart_driver bfin_serial_reg; | 1040 | static struct uart_driver bfin_serial_reg; |
1001 | 1041 | ||
1002 | static int __init | 1042 | static int __init |
1003 | bfin_serial_console_setup(struct console *co, char *options) | 1043 | bfin_serial_console_setup(struct console *co, char *options) |
1004 | { | 1044 | { |
1005 | struct bfin_serial_port *uart; | 1045 | struct bfin_serial_port *uart; |
1006 | # ifdef CONFIG_SERIAL_BFIN_CONSOLE | ||
1007 | int baud = 57600; | 1046 | int baud = 57600; |
1008 | int bits = 8; | 1047 | int bits = 8; |
1009 | int parity = 'n'; | 1048 | int parity = 'n'; |
1010 | # ifdef CONFIG_SERIAL_BFIN_CTSRTS | 1049 | # ifdef CONFIG_SERIAL_BFIN_CTSRTS |
1011 | int flow = 'r'; | 1050 | int flow = 'r'; |
1012 | # else | 1051 | # else |
1013 | int flow = 'n'; | 1052 | int flow = 'n'; |
1014 | # endif | ||
1015 | # endif | 1053 | # endif |
1016 | 1054 | ||
1017 | /* | 1055 | /* |
@@ -1023,16 +1061,12 @@ bfin_serial_console_setup(struct console *co, char *options) | |||
1023 | co->index = 0; | 1061 | co->index = 0; |
1024 | uart = &bfin_serial_ports[co->index]; | 1062 | uart = &bfin_serial_ports[co->index]; |
1025 | 1063 | ||
1026 | # ifdef CONFIG_SERIAL_BFIN_CONSOLE | ||
1027 | if (options) | 1064 | if (options) |
1028 | uart_parse_options(options, &baud, &parity, &bits, &flow); | 1065 | uart_parse_options(options, &baud, &parity, &bits, &flow); |
1029 | else | 1066 | else |
1030 | bfin_serial_console_get_options(uart, &baud, &parity, &bits); | 1067 | bfin_serial_console_get_options(uart, &baud, &parity, &bits); |
1031 | 1068 | ||
1032 | return uart_set_options(&uart->port, co, baud, parity, bits, flow); | 1069 | return uart_set_options(&uart->port, co, baud, parity, bits, flow); |
1033 | # else | ||
1034 | return 0; | ||
1035 | # endif | ||
1036 | } | 1070 | } |
1037 | #endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) || | 1071 | #endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) || |
1038 | defined (CONFIG_EARLY_PRINTK) */ | 1072 | defined (CONFIG_EARLY_PRINTK) */ |
@@ -1076,10 +1110,7 @@ static int __init bfin_serial_rs_console_init(void) | |||
1076 | { | 1110 | { |
1077 | bfin_serial_init_ports(); | 1111 | bfin_serial_init_ports(); |
1078 | register_console(&bfin_serial_console); | 1112 | register_console(&bfin_serial_console); |
1079 | #ifdef CONFIG_KGDB_UART | 1113 | |
1080 | kgdb_entry_state = 0; | ||
1081 | init_kgdb_uart(); | ||
1082 | #endif | ||
1083 | return 0; | 1114 | return 0; |
1084 | } | 1115 | } |
1085 | console_initcall(bfin_serial_rs_console_init); | 1116 | console_initcall(bfin_serial_rs_console_init); |
@@ -1144,7 +1175,7 @@ struct console __init *bfin_earlyserial_init(unsigned int port, | |||
1144 | return &bfin_early_serial_console; | 1175 | return &bfin_early_serial_console; |
1145 | } | 1176 | } |
1146 | 1177 | ||
1147 | #endif /* CONFIG_SERIAL_BFIN_CONSOLE */ | 1178 | #endif /* CONFIG_EARLY_PRINTK */ |
1148 | 1179 | ||
1149 | static struct uart_driver bfin_serial_reg = { | 1180 | static struct uart_driver bfin_serial_reg = { |
1150 | .owner = THIS_MODULE, | 1181 | .owner = THIS_MODULE, |
@@ -1235,10 +1266,6 @@ static struct platform_driver bfin_serial_driver = { | |||
1235 | static int __init bfin_serial_init(void) | 1266 | static int __init bfin_serial_init(void) |
1236 | { | 1267 | { |
1237 | int ret; | 1268 | int ret; |
1238 | #ifdef CONFIG_KGDB_UART | ||
1239 | struct bfin_serial_port *uart = &bfin_serial_ports[CONFIG_KGDB_UART_PORT]; | ||
1240 | struct ktermios t; | ||
1241 | #endif | ||
1242 | 1269 | ||
1243 | pr_info("Serial: Blackfin serial driver\n"); | 1270 | pr_info("Serial: Blackfin serial driver\n"); |
1244 | 1271 | ||
@@ -1252,21 +1279,6 @@ static int __init bfin_serial_init(void) | |||
1252 | uart_unregister_driver(&bfin_serial_reg); | 1279 | uart_unregister_driver(&bfin_serial_reg); |
1253 | } | 1280 | } |
1254 | } | 1281 | } |
1255 | #ifdef CONFIG_KGDB_UART | ||
1256 | if (uart->port.cons->index != CONFIG_KGDB_UART_PORT) { | ||
1257 | request_irq(uart->port.irq, bfin_serial_rx_int, | ||
1258 | IRQF_DISABLED, "BFIN_UART_RX", uart); | ||
1259 | pr_info("Request irq for kgdb uart port\n"); | ||
1260 | UART_SET_IER(uart, ERBFI); | ||
1261 | SSYNC(); | ||
1262 | t.c_cflag = CS8|B57600; | ||
1263 | t.c_iflag = 0; | ||
1264 | t.c_oflag = 0; | ||
1265 | t.c_lflag = ICANON; | ||
1266 | t.c_line = CONFIG_KGDB_UART_PORT; | ||
1267 | bfin_serial_set_termios(&uart->port, &t, &t); | ||
1268 | } | ||
1269 | #endif | ||
1270 | return ret; | 1282 | return ret; |
1271 | } | 1283 | } |
1272 | 1284 | ||
@@ -1276,6 +1288,7 @@ static void __exit bfin_serial_exit(void) | |||
1276 | uart_unregister_driver(&bfin_serial_reg); | 1288 | uart_unregister_driver(&bfin_serial_reg); |
1277 | } | 1289 | } |
1278 | 1290 | ||
1291 | |||
1279 | module_init(bfin_serial_init); | 1292 | module_init(bfin_serial_init); |
1280 | module_exit(bfin_serial_exit); | 1293 | module_exit(bfin_serial_exit); |
1281 | 1294 | ||
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c index dd8564d25051..529c0ff7952c 100644 --- a/drivers/serial/bfin_sport_uart.c +++ b/drivers/serial/bfin_sport_uart.c | |||
@@ -99,7 +99,7 @@ static void sport_stop_tx(struct uart_port *port); | |||
99 | 99 | ||
100 | static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value) | 100 | static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value) |
101 | { | 101 | { |
102 | pr_debug("%s value:%x\n", __FUNCTION__, value); | 102 | pr_debug("%s value:%x\n", __func__, value); |
103 | /* Place a Start and Stop bit */ | 103 | /* Place a Start and Stop bit */ |
104 | __asm__ volatile ( | 104 | __asm__ volatile ( |
105 | "R2 = b#01111111100;\n\t" | 105 | "R2 = b#01111111100;\n\t" |
@@ -110,7 +110,7 @@ static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value) | |||
110 | :"=r"(value) | 110 | :"=r"(value) |
111 | :"0"(value) | 111 | :"0"(value) |
112 | :"R2", "R3"); | 112 | :"R2", "R3"); |
113 | pr_debug("%s value:%x\n", __FUNCTION__, value); | 113 | pr_debug("%s value:%x\n", __func__, value); |
114 | 114 | ||
115 | SPORT_PUT_TX(up, value); | 115 | SPORT_PUT_TX(up, value); |
116 | } | 116 | } |
@@ -120,7 +120,7 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up) | |||
120 | unsigned int value, extract; | 120 | unsigned int value, extract; |
121 | 121 | ||
122 | value = SPORT_GET_RX32(up); | 122 | value = SPORT_GET_RX32(up); |
123 | pr_debug("%s value:%x\n", __FUNCTION__, value); | 123 | pr_debug("%s value:%x\n", __func__, value); |
124 | 124 | ||
125 | /* Extract 8 bits data */ | 125 | /* Extract 8 bits data */ |
126 | __asm__ volatile ( | 126 | __asm__ volatile ( |
@@ -151,12 +151,12 @@ static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate) | |||
151 | /* Set TCR1 and TCR2 */ | 151 | /* Set TCR1 and TCR2 */ |
152 | SPORT_PUT_TCR1(up, (LTFS | ITFS | TFSR | TLSBIT | ITCLK)); | 152 | SPORT_PUT_TCR1(up, (LTFS | ITFS | TFSR | TLSBIT | ITCLK)); |
153 | SPORT_PUT_TCR2(up, 10); | 153 | SPORT_PUT_TCR2(up, 10); |
154 | pr_debug("%s TCR1:%x, TCR2:%x\n", __FUNCTION__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); | 154 | pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); |
155 | 155 | ||
156 | /* Set RCR1 and RCR2 */ | 156 | /* Set RCR1 and RCR2 */ |
157 | SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK)); | 157 | SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK)); |
158 | SPORT_PUT_RCR2(up, 28); | 158 | SPORT_PUT_RCR2(up, 28); |
159 | pr_debug("%s RCR1:%x, RCR2:%x\n", __FUNCTION__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up)); | 159 | pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up)); |
160 | 160 | ||
161 | tclkdiv = sclk/(2 * baud_rate) - 1; | 161 | tclkdiv = sclk/(2 * baud_rate) - 1; |
162 | tfsdiv = 12; | 162 | tfsdiv = 12; |
@@ -166,7 +166,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate) | |||
166 | SPORT_PUT_RCLKDIV(up, rclkdiv); | 166 | SPORT_PUT_RCLKDIV(up, rclkdiv); |
167 | SSYNC(); | 167 | SSYNC(); |
168 | pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n", | 168 | pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n", |
169 | __FUNCTION__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv); | 169 | __func__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv); |
170 | 170 | ||
171 | return 0; | 171 | return 0; |
172 | } | 172 | } |
@@ -231,7 +231,7 @@ static int sport_startup(struct uart_port *port) | |||
231 | char buffer[20]; | 231 | char buffer[20]; |
232 | int retval; | 232 | int retval; |
233 | 233 | ||
234 | pr_debug("%s enter\n", __FUNCTION__); | 234 | pr_debug("%s enter\n", __func__); |
235 | memset(buffer, 20, '\0'); | 235 | memset(buffer, 20, '\0'); |
236 | snprintf(buffer, 20, "%s rx", up->name); | 236 | snprintf(buffer, 20, "%s rx", up->name); |
237 | retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up); | 237 | retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up); |
@@ -320,7 +320,7 @@ static unsigned int sport_tx_empty(struct uart_port *port) | |||
320 | unsigned int stat; | 320 | unsigned int stat; |
321 | 321 | ||
322 | stat = SPORT_GET_STAT(up); | 322 | stat = SPORT_GET_STAT(up); |
323 | pr_debug("%s stat:%04x\n", __FUNCTION__, stat); | 323 | pr_debug("%s stat:%04x\n", __func__, stat); |
324 | if (stat & TXHRE) { | 324 | if (stat & TXHRE) { |
325 | return TIOCSER_TEMT; | 325 | return TIOCSER_TEMT; |
326 | } else | 326 | } else |
@@ -329,13 +329,13 @@ static unsigned int sport_tx_empty(struct uart_port *port) | |||
329 | 329 | ||
330 | static unsigned int sport_get_mctrl(struct uart_port *port) | 330 | static unsigned int sport_get_mctrl(struct uart_port *port) |
331 | { | 331 | { |
332 | pr_debug("%s enter\n", __FUNCTION__); | 332 | pr_debug("%s enter\n", __func__); |
333 | return (TIOCM_CTS | TIOCM_CD | TIOCM_DSR); | 333 | return (TIOCM_CTS | TIOCM_CD | TIOCM_DSR); |
334 | } | 334 | } |
335 | 335 | ||
336 | static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl) | 336 | static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl) |
337 | { | 337 | { |
338 | pr_debug("%s enter\n", __FUNCTION__); | 338 | pr_debug("%s enter\n", __func__); |
339 | } | 339 | } |
340 | 340 | ||
341 | static void sport_stop_tx(struct uart_port *port) | 341 | static void sport_stop_tx(struct uart_port *port) |
@@ -343,7 +343,7 @@ static void sport_stop_tx(struct uart_port *port) | |||
343 | struct sport_uart_port *up = (struct sport_uart_port *)port; | 343 | struct sport_uart_port *up = (struct sport_uart_port *)port; |
344 | unsigned int stat; | 344 | unsigned int stat; |
345 | 345 | ||
346 | pr_debug("%s enter\n", __FUNCTION__); | 346 | pr_debug("%s enter\n", __func__); |
347 | 347 | ||
348 | stat = SPORT_GET_STAT(up); | 348 | stat = SPORT_GET_STAT(up); |
349 | while(!(stat & TXHRE)) { | 349 | while(!(stat & TXHRE)) { |
@@ -366,21 +366,21 @@ static void sport_start_tx(struct uart_port *port) | |||
366 | { | 366 | { |
367 | struct sport_uart_port *up = (struct sport_uart_port *)port; | 367 | struct sport_uart_port *up = (struct sport_uart_port *)port; |
368 | 368 | ||
369 | pr_debug("%s enter\n", __FUNCTION__); | 369 | pr_debug("%s enter\n", __func__); |
370 | /* Write data into SPORT FIFO before enable SPROT to transmit */ | 370 | /* Write data into SPORT FIFO before enable SPROT to transmit */ |
371 | sport_uart_tx_chars(up); | 371 | sport_uart_tx_chars(up); |
372 | 372 | ||
373 | /* Enable transmit, then an interrupt will generated */ | 373 | /* Enable transmit, then an interrupt will generated */ |
374 | SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN)); | 374 | SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN)); |
375 | SSYNC(); | 375 | SSYNC(); |
376 | pr_debug("%s exit\n", __FUNCTION__); | 376 | pr_debug("%s exit\n", __func__); |
377 | } | 377 | } |
378 | 378 | ||
379 | static void sport_stop_rx(struct uart_port *port) | 379 | static void sport_stop_rx(struct uart_port *port) |
380 | { | 380 | { |
381 | struct sport_uart_port *up = (struct sport_uart_port *)port; | 381 | struct sport_uart_port *up = (struct sport_uart_port *)port; |
382 | 382 | ||
383 | pr_debug("%s enter\n", __FUNCTION__); | 383 | pr_debug("%s enter\n", __func__); |
384 | /* Disable sport to stop rx */ | 384 | /* Disable sport to stop rx */ |
385 | SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN)); | 385 | SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN)); |
386 | SSYNC(); | 386 | SSYNC(); |
@@ -388,19 +388,19 @@ static void sport_stop_rx(struct uart_port *port) | |||
388 | 388 | ||
389 | static void sport_enable_ms(struct uart_port *port) | 389 | static void sport_enable_ms(struct uart_port *port) |
390 | { | 390 | { |
391 | pr_debug("%s enter\n", __FUNCTION__); | 391 | pr_debug("%s enter\n", __func__); |
392 | } | 392 | } |
393 | 393 | ||
394 | static void sport_break_ctl(struct uart_port *port, int break_state) | 394 | static void sport_break_ctl(struct uart_port *port, int break_state) |
395 | { | 395 | { |
396 | pr_debug("%s enter\n", __FUNCTION__); | 396 | pr_debug("%s enter\n", __func__); |
397 | } | 397 | } |
398 | 398 | ||
399 | static void sport_shutdown(struct uart_port *port) | 399 | static void sport_shutdown(struct uart_port *port) |
400 | { | 400 | { |
401 | struct sport_uart_port *up = (struct sport_uart_port *)port; | 401 | struct sport_uart_port *up = (struct sport_uart_port *)port; |
402 | 402 | ||
403 | pr_debug("%s enter\n", __FUNCTION__); | 403 | pr_debug("%s enter\n", __func__); |
404 | 404 | ||
405 | /* Disable sport */ | 405 | /* Disable sport */ |
406 | SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); | 406 | SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); |
@@ -421,7 +421,7 @@ static void sport_shutdown(struct uart_port *port) | |||
421 | static void sport_set_termios(struct uart_port *port, | 421 | static void sport_set_termios(struct uart_port *port, |
422 | struct termios *termios, struct termios *old) | 422 | struct termios *termios, struct termios *old) |
423 | { | 423 | { |
424 | pr_debug("%s enter, c_cflag:%08x\n", __FUNCTION__, termios->c_cflag); | 424 | pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag); |
425 | uart_update_timeout(port, CS8 ,port->uartclk); | 425 | uart_update_timeout(port, CS8 ,port->uartclk); |
426 | } | 426 | } |
427 | 427 | ||
@@ -429,18 +429,18 @@ static const char *sport_type(struct uart_port *port) | |||
429 | { | 429 | { |
430 | struct sport_uart_port *up = (struct sport_uart_port *)port; | 430 | struct sport_uart_port *up = (struct sport_uart_port *)port; |
431 | 431 | ||
432 | pr_debug("%s enter\n", __FUNCTION__); | 432 | pr_debug("%s enter\n", __func__); |
433 | return up->name; | 433 | return up->name; |
434 | } | 434 | } |
435 | 435 | ||
436 | static void sport_release_port(struct uart_port *port) | 436 | static void sport_release_port(struct uart_port *port) |
437 | { | 437 | { |
438 | pr_debug("%s enter\n", __FUNCTION__); | 438 | pr_debug("%s enter\n", __func__); |
439 | } | 439 | } |
440 | 440 | ||
441 | static int sport_request_port(struct uart_port *port) | 441 | static int sport_request_port(struct uart_port *port) |
442 | { | 442 | { |
443 | pr_debug("%s enter\n", __FUNCTION__); | 443 | pr_debug("%s enter\n", __func__); |
444 | return 0; | 444 | return 0; |
445 | } | 445 | } |
446 | 446 | ||
@@ -448,13 +448,13 @@ static void sport_config_port(struct uart_port *port, int flags) | |||
448 | { | 448 | { |
449 | struct sport_uart_port *up = (struct sport_uart_port *)port; | 449 | struct sport_uart_port *up = (struct sport_uart_port *)port; |
450 | 450 | ||
451 | pr_debug("%s enter\n", __FUNCTION__); | 451 | pr_debug("%s enter\n", __func__); |
452 | up->port.type = PORT_BFIN_SPORT; | 452 | up->port.type = PORT_BFIN_SPORT; |
453 | } | 453 | } |
454 | 454 | ||
455 | static int sport_verify_port(struct uart_port *port, struct serial_struct *ser) | 455 | static int sport_verify_port(struct uart_port *port, struct serial_struct *ser) |
456 | { | 456 | { |
457 | pr_debug("%s enter\n", __FUNCTION__); | 457 | pr_debug("%s enter\n", __func__); |
458 | return 0; | 458 | return 0; |
459 | } | 459 | } |
460 | 460 | ||
@@ -527,7 +527,7 @@ static int sport_uart_suspend(struct platform_device *dev, pm_message_t state) | |||
527 | { | 527 | { |
528 | struct sport_uart_port *sport = platform_get_drvdata(dev); | 528 | struct sport_uart_port *sport = platform_get_drvdata(dev); |
529 | 529 | ||
530 | pr_debug("%s enter\n", __FUNCTION__); | 530 | pr_debug("%s enter\n", __func__); |
531 | if (sport) | 531 | if (sport) |
532 | uart_suspend_port(&sport_uart_reg, &sport->port); | 532 | uart_suspend_port(&sport_uart_reg, &sport->port); |
533 | 533 | ||
@@ -538,7 +538,7 @@ static int sport_uart_resume(struct platform_device *dev) | |||
538 | { | 538 | { |
539 | struct sport_uart_port *sport = platform_get_drvdata(dev); | 539 | struct sport_uart_port *sport = platform_get_drvdata(dev); |
540 | 540 | ||
541 | pr_debug("%s enter\n", __FUNCTION__); | 541 | pr_debug("%s enter\n", __func__); |
542 | if (sport) | 542 | if (sport) |
543 | uart_resume_port(&sport_uart_reg, &sport->port); | 543 | uart_resume_port(&sport_uart_reg, &sport->port); |
544 | 544 | ||
@@ -547,7 +547,7 @@ static int sport_uart_resume(struct platform_device *dev) | |||
547 | 547 | ||
548 | static int sport_uart_probe(struct platform_device *dev) | 548 | static int sport_uart_probe(struct platform_device *dev) |
549 | { | 549 | { |
550 | pr_debug("%s enter\n", __FUNCTION__); | 550 | pr_debug("%s enter\n", __func__); |
551 | sport_uart_ports[dev->id].port.dev = &dev->dev; | 551 | sport_uart_ports[dev->id].port.dev = &dev->dev; |
552 | uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port); | 552 | uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port); |
553 | platform_set_drvdata(dev, &sport_uart_ports[dev->id]); | 553 | platform_set_drvdata(dev, &sport_uart_ports[dev->id]); |
@@ -559,7 +559,7 @@ static int sport_uart_remove(struct platform_device *dev) | |||
559 | { | 559 | { |
560 | struct sport_uart_port *sport = platform_get_drvdata(dev); | 560 | struct sport_uart_port *sport = platform_get_drvdata(dev); |
561 | 561 | ||
562 | pr_debug("%s enter\n", __FUNCTION__); | 562 | pr_debug("%s enter\n", __func__); |
563 | platform_set_drvdata(dev, NULL); | 563 | platform_set_drvdata(dev, NULL); |
564 | 564 | ||
565 | if (sport) | 565 | if (sport) |
@@ -582,7 +582,7 @@ static int __init sport_uart_init(void) | |||
582 | { | 582 | { |
583 | int ret; | 583 | int ret; |
584 | 584 | ||
585 | pr_debug("%s enter\n", __FUNCTION__); | 585 | pr_debug("%s enter\n", __func__); |
586 | ret = uart_register_driver(&sport_uart_reg); | 586 | ret = uart_register_driver(&sport_uart_reg); |
587 | if (ret != 0) { | 587 | if (ret != 0) { |
588 | printk(KERN_ERR "Failed to register %s:%d\n", | 588 | printk(KERN_ERR "Failed to register %s:%d\n", |
@@ -597,13 +597,13 @@ static int __init sport_uart_init(void) | |||
597 | } | 597 | } |
598 | 598 | ||
599 | 599 | ||
600 | pr_debug("%s exit\n", __FUNCTION__); | 600 | pr_debug("%s exit\n", __func__); |
601 | return ret; | 601 | return ret; |
602 | } | 602 | } |
603 | 603 | ||
604 | static void __exit sport_uart_exit(void) | 604 | static void __exit sport_uart_exit(void) |
605 | { | 605 | { |
606 | pr_debug("%s enter\n", __FUNCTION__); | 606 | pr_debug("%s enter\n", __func__); |
607 | platform_driver_unregister(&sport_uart_driver); | 607 | platform_driver_unregister(&sport_uart_driver); |
608 | uart_unregister_driver(&sport_uart_reg); | 608 | uart_unregister_driver(&sport_uart_reg); |
609 | } | 609 | } |
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c index a697914ae3d0..3547558d2caf 100644 --- a/drivers/serial/jsm/jsm_tty.c +++ b/drivers/serial/jsm/jsm_tty.c | |||
@@ -272,7 +272,7 @@ static void jsm_tty_close(struct uart_port *port) | |||
272 | jsm_printk(CLOSE, INFO, &channel->ch_bd->pci_dev, "start\n"); | 272 | jsm_printk(CLOSE, INFO, &channel->ch_bd->pci_dev, "start\n"); |
273 | 273 | ||
274 | bd = channel->ch_bd; | 274 | bd = channel->ch_bd; |
275 | ts = channel->uart_port.info->port.tty->termios; | 275 | ts = port->info->port.tty->termios; |
276 | 276 | ||
277 | channel->ch_flags &= ~(CH_STOPI); | 277 | channel->ch_flags &= ~(CH_STOPI); |
278 | 278 | ||
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 874786a11fe9..dc68b7e0c930 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c | |||
@@ -50,7 +50,7 @@ static struct lock_class_key port_lock_key; | |||
50 | 50 | ||
51 | #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) | 51 | #define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8) |
52 | 52 | ||
53 | #define uart_users(state) ((state)->count + ((state)->info ? (state)->info->port.blocked_open : 0)) | 53 | #define uart_users(state) ((state)->count + (state)->info.port.blocked_open) |
54 | 54 | ||
55 | #ifdef CONFIG_SERIAL_CORE_CONSOLE | 55 | #ifdef CONFIG_SERIAL_CORE_CONSOLE |
56 | #define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line) | 56 | #define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line) |
@@ -94,7 +94,7 @@ static void __uart_start(struct tty_struct *tty) | |||
94 | struct uart_state *state = tty->driver_data; | 94 | struct uart_state *state = tty->driver_data; |
95 | struct uart_port *port = state->port; | 95 | struct uart_port *port = state->port; |
96 | 96 | ||
97 | if (!uart_circ_empty(&state->info->xmit) && state->info->xmit.buf && | 97 | if (!uart_circ_empty(&state->info.xmit) && state->info.xmit.buf && |
98 | !tty->stopped && !tty->hw_stopped) | 98 | !tty->stopped && !tty->hw_stopped) |
99 | port->ops->start_tx(port); | 99 | port->ops->start_tx(port); |
100 | } | 100 | } |
@@ -113,7 +113,7 @@ static void uart_start(struct tty_struct *tty) | |||
113 | static void uart_tasklet_action(unsigned long data) | 113 | static void uart_tasklet_action(unsigned long data) |
114 | { | 114 | { |
115 | struct uart_state *state = (struct uart_state *)data; | 115 | struct uart_state *state = (struct uart_state *)data; |
116 | tty_wakeup(state->info->port.tty); | 116 | tty_wakeup(state->info.port.tty); |
117 | } | 117 | } |
118 | 118 | ||
119 | static inline void | 119 | static inline void |
@@ -139,7 +139,7 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) | |||
139 | */ | 139 | */ |
140 | static int uart_startup(struct uart_state *state, int init_hw) | 140 | static int uart_startup(struct uart_state *state, int init_hw) |
141 | { | 141 | { |
142 | struct uart_info *info = state->info; | 142 | struct uart_info *info = &state->info; |
143 | struct uart_port *port = state->port; | 143 | struct uart_port *port = state->port; |
144 | unsigned long page; | 144 | unsigned long page; |
145 | int retval = 0; | 145 | int retval = 0; |
@@ -212,14 +212,15 @@ static int uart_startup(struct uart_state *state, int init_hw) | |||
212 | */ | 212 | */ |
213 | static void uart_shutdown(struct uart_state *state) | 213 | static void uart_shutdown(struct uart_state *state) |
214 | { | 214 | { |
215 | struct uart_info *info = state->info; | 215 | struct uart_info *info = &state->info; |
216 | struct uart_port *port = state->port; | 216 | struct uart_port *port = state->port; |
217 | struct tty_struct *tty = info->port.tty; | ||
217 | 218 | ||
218 | /* | 219 | /* |
219 | * Set the TTY IO error marker | 220 | * Set the TTY IO error marker |
220 | */ | 221 | */ |
221 | if (info->port.tty) | 222 | if (tty) |
222 | set_bit(TTY_IO_ERROR, &info->port.tty->flags); | 223 | set_bit(TTY_IO_ERROR, &tty->flags); |
223 | 224 | ||
224 | if (info->flags & UIF_INITIALIZED) { | 225 | if (info->flags & UIF_INITIALIZED) { |
225 | info->flags &= ~UIF_INITIALIZED; | 226 | info->flags &= ~UIF_INITIALIZED; |
@@ -227,7 +228,7 @@ static void uart_shutdown(struct uart_state *state) | |||
227 | /* | 228 | /* |
228 | * Turn off DTR and RTS early. | 229 | * Turn off DTR and RTS early. |
229 | */ | 230 | */ |
230 | if (!info->port.tty || (info->port.tty->termios->c_cflag & HUPCL)) | 231 | if (!tty || (tty->termios->c_cflag & HUPCL)) |
231 | uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); | 232 | uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); |
232 | 233 | ||
233 | /* | 234 | /* |
@@ -427,7 +428,7 @@ EXPORT_SYMBOL(uart_get_divisor); | |||
427 | static void | 428 | static void |
428 | uart_change_speed(struct uart_state *state, struct ktermios *old_termios) | 429 | uart_change_speed(struct uart_state *state, struct ktermios *old_termios) |
429 | { | 430 | { |
430 | struct tty_struct *tty = state->info->port.tty; | 431 | struct tty_struct *tty = state->info.port.tty; |
431 | struct uart_port *port = state->port; | 432 | struct uart_port *port = state->port; |
432 | struct ktermios *termios; | 433 | struct ktermios *termios; |
433 | 434 | ||
@@ -444,14 +445,14 @@ uart_change_speed(struct uart_state *state, struct ktermios *old_termios) | |||
444 | * Set flags based on termios cflag | 445 | * Set flags based on termios cflag |
445 | */ | 446 | */ |
446 | if (termios->c_cflag & CRTSCTS) | 447 | if (termios->c_cflag & CRTSCTS) |
447 | state->info->flags |= UIF_CTS_FLOW; | 448 | state->info.flags |= UIF_CTS_FLOW; |
448 | else | 449 | else |
449 | state->info->flags &= ~UIF_CTS_FLOW; | 450 | state->info.flags &= ~UIF_CTS_FLOW; |
450 | 451 | ||
451 | if (termios->c_cflag & CLOCAL) | 452 | if (termios->c_cflag & CLOCAL) |
452 | state->info->flags &= ~UIF_CHECK_CD; | 453 | state->info.flags &= ~UIF_CHECK_CD; |
453 | else | 454 | else |
454 | state->info->flags |= UIF_CHECK_CD; | 455 | state->info.flags |= UIF_CHECK_CD; |
455 | 456 | ||
456 | port->ops->set_termios(port, termios, old_termios); | 457 | port->ops->set_termios(port, termios, old_termios); |
457 | } | 458 | } |
@@ -479,7 +480,7 @@ static int uart_put_char(struct tty_struct *tty, unsigned char ch) | |||
479 | { | 480 | { |
480 | struct uart_state *state = tty->driver_data; | 481 | struct uart_state *state = tty->driver_data; |
481 | 482 | ||
482 | return __uart_put_char(state->port, &state->info->xmit, ch); | 483 | return __uart_put_char(state->port, &state->info.xmit, ch); |
483 | } | 484 | } |
484 | 485 | ||
485 | static void uart_flush_chars(struct tty_struct *tty) | 486 | static void uart_flush_chars(struct tty_struct *tty) |
@@ -500,13 +501,13 @@ uart_write(struct tty_struct *tty, const unsigned char *buf, int count) | |||
500 | * This means you called this function _after_ the port was | 501 | * This means you called this function _after_ the port was |
501 | * closed. No cookie for you. | 502 | * closed. No cookie for you. |
502 | */ | 503 | */ |
503 | if (!state || !state->info) { | 504 | if (!state) { |
504 | WARN_ON(1); | 505 | WARN_ON(1); |
505 | return -EL3HLT; | 506 | return -EL3HLT; |
506 | } | 507 | } |
507 | 508 | ||
508 | port = state->port; | 509 | port = state->port; |
509 | circ = &state->info->xmit; | 510 | circ = &state->info.xmit; |
510 | 511 | ||
511 | if (!circ->buf) | 512 | if (!circ->buf) |
512 | return 0; | 513 | return 0; |
@@ -537,7 +538,7 @@ static int uart_write_room(struct tty_struct *tty) | |||
537 | int ret; | 538 | int ret; |
538 | 539 | ||
539 | spin_lock_irqsave(&state->port->lock, flags); | 540 | spin_lock_irqsave(&state->port->lock, flags); |
540 | ret = uart_circ_chars_free(&state->info->xmit); | 541 | ret = uart_circ_chars_free(&state->info.xmit); |
541 | spin_unlock_irqrestore(&state->port->lock, flags); | 542 | spin_unlock_irqrestore(&state->port->lock, flags); |
542 | return ret; | 543 | return ret; |
543 | } | 544 | } |
@@ -549,7 +550,7 @@ static int uart_chars_in_buffer(struct tty_struct *tty) | |||
549 | int ret; | 550 | int ret; |
550 | 551 | ||
551 | spin_lock_irqsave(&state->port->lock, flags); | 552 | spin_lock_irqsave(&state->port->lock, flags); |
552 | ret = uart_circ_chars_pending(&state->info->xmit); | 553 | ret = uart_circ_chars_pending(&state->info.xmit); |
553 | spin_unlock_irqrestore(&state->port->lock, flags); | 554 | spin_unlock_irqrestore(&state->port->lock, flags); |
554 | return ret; | 555 | return ret; |
555 | } | 556 | } |
@@ -564,7 +565,7 @@ static void uart_flush_buffer(struct tty_struct *tty) | |||
564 | * This means you called this function _after_ the port was | 565 | * This means you called this function _after_ the port was |
565 | * closed. No cookie for you. | 566 | * closed. No cookie for you. |
566 | */ | 567 | */ |
567 | if (!state || !state->info) { | 568 | if (!state) { |
568 | WARN_ON(1); | 569 | WARN_ON(1); |
569 | return; | 570 | return; |
570 | } | 571 | } |
@@ -573,7 +574,7 @@ static void uart_flush_buffer(struct tty_struct *tty) | |||
573 | pr_debug("uart_flush_buffer(%d) called\n", tty->index); | 574 | pr_debug("uart_flush_buffer(%d) called\n", tty->index); |
574 | 575 | ||
575 | spin_lock_irqsave(&port->lock, flags); | 576 | spin_lock_irqsave(&port->lock, flags); |
576 | uart_circ_clear(&state->info->xmit); | 577 | uart_circ_clear(&state->info.xmit); |
577 | if (port->ops->flush_buffer) | 578 | if (port->ops->flush_buffer) |
578 | port->ops->flush_buffer(port); | 579 | port->ops->flush_buffer(port); |
579 | spin_unlock_irqrestore(&port->lock, flags); | 580 | spin_unlock_irqrestore(&port->lock, flags); |
@@ -837,15 +838,15 @@ static int uart_set_info(struct uart_state *state, | |||
837 | state->closing_wait = closing_wait; | 838 | state->closing_wait = closing_wait; |
838 | if (new_serial.xmit_fifo_size) | 839 | if (new_serial.xmit_fifo_size) |
839 | port->fifosize = new_serial.xmit_fifo_size; | 840 | port->fifosize = new_serial.xmit_fifo_size; |
840 | if (state->info->port.tty) | 841 | if (state->info.port.tty) |
841 | state->info->port.tty->low_latency = | 842 | state->info.port.tty->low_latency = |
842 | (port->flags & UPF_LOW_LATENCY) ? 1 : 0; | 843 | (port->flags & UPF_LOW_LATENCY) ? 1 : 0; |
843 | 844 | ||
844 | check_and_exit: | 845 | check_and_exit: |
845 | retval = 0; | 846 | retval = 0; |
846 | if (port->type == PORT_UNKNOWN) | 847 | if (port->type == PORT_UNKNOWN) |
847 | goto exit; | 848 | goto exit; |
848 | if (state->info->flags & UIF_INITIALIZED) { | 849 | if (state->info.flags & UIF_INITIALIZED) { |
849 | if (((old_flags ^ port->flags) & UPF_SPD_MASK) || | 850 | if (((old_flags ^ port->flags) & UPF_SPD_MASK) || |
850 | old_custom_divisor != port->custom_divisor) { | 851 | old_custom_divisor != port->custom_divisor) { |
851 | /* | 852 | /* |
@@ -858,7 +859,7 @@ static int uart_set_info(struct uart_state *state, | |||
858 | printk(KERN_NOTICE | 859 | printk(KERN_NOTICE |
859 | "%s sets custom speed on %s. This " | 860 | "%s sets custom speed on %s. This " |
860 | "is deprecated.\n", current->comm, | 861 | "is deprecated.\n", current->comm, |
861 | tty_name(state->info->port.tty, buf)); | 862 | tty_name(state->info.port.tty, buf)); |
862 | } | 863 | } |
863 | uart_change_speed(state, NULL); | 864 | uart_change_speed(state, NULL); |
864 | } | 865 | } |
@@ -889,8 +890,8 @@ static int uart_get_lsr_info(struct uart_state *state, | |||
889 | * interrupt happens). | 890 | * interrupt happens). |
890 | */ | 891 | */ |
891 | if (port->x_char || | 892 | if (port->x_char || |
892 | ((uart_circ_chars_pending(&state->info->xmit) > 0) && | 893 | ((uart_circ_chars_pending(&state->info.xmit) > 0) && |
893 | !state->info->port.tty->stopped && !state->info->port.tty->hw_stopped)) | 894 | !state->info.port.tty->stopped && !state->info.port.tty->hw_stopped)) |
894 | result &= ~TIOCSER_TEMT; | 895 | result &= ~TIOCSER_TEMT; |
895 | 896 | ||
896 | return put_user(result, value); | 897 | return put_user(result, value); |
@@ -1017,7 +1018,7 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg) | |||
1017 | port->ops->enable_ms(port); | 1018 | port->ops->enable_ms(port); |
1018 | spin_unlock_irq(&port->lock); | 1019 | spin_unlock_irq(&port->lock); |
1019 | 1020 | ||
1020 | add_wait_queue(&state->info->delta_msr_wait, &wait); | 1021 | add_wait_queue(&state->info.delta_msr_wait, &wait); |
1021 | for (;;) { | 1022 | for (;;) { |
1022 | spin_lock_irq(&port->lock); | 1023 | spin_lock_irq(&port->lock); |
1023 | memcpy(&cnow, &port->icount, sizeof(struct uart_icount)); | 1024 | memcpy(&cnow, &port->icount, sizeof(struct uart_icount)); |
@@ -1045,7 +1046,7 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg) | |||
1045 | } | 1046 | } |
1046 | 1047 | ||
1047 | current->state = TASK_RUNNING; | 1048 | current->state = TASK_RUNNING; |
1048 | remove_wait_queue(&state->info->delta_msr_wait, &wait); | 1049 | remove_wait_queue(&state->info.delta_msr_wait, &wait); |
1049 | 1050 | ||
1050 | return ret; | 1051 | return ret; |
1051 | } | 1052 | } |
@@ -1241,7 +1242,7 @@ static void uart_set_termios(struct tty_struct *tty, | |||
1241 | */ | 1242 | */ |
1242 | if (!(old_termios->c_cflag & CLOCAL) && | 1243 | if (!(old_termios->c_cflag & CLOCAL) && |
1243 | (tty->termios->c_cflag & CLOCAL)) | 1244 | (tty->termios->c_cflag & CLOCAL)) |
1244 | wake_up_interruptible(&state->info->port.open_wait); | 1245 | wake_up_interruptible(&info->port.open_wait); |
1245 | #endif | 1246 | #endif |
1246 | } | 1247 | } |
1247 | 1248 | ||
@@ -1303,7 +1304,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) | |||
1303 | * At this point, we stop accepting input. To do this, we | 1304 | * At this point, we stop accepting input. To do this, we |
1304 | * disable the receive line status interrupts. | 1305 | * disable the receive line status interrupts. |
1305 | */ | 1306 | */ |
1306 | if (state->info->flags & UIF_INITIALIZED) { | 1307 | if (state->info.flags & UIF_INITIALIZED) { |
1307 | unsigned long flags; | 1308 | unsigned long flags; |
1308 | spin_lock_irqsave(&port->lock, flags); | 1309 | spin_lock_irqsave(&port->lock, flags); |
1309 | port->ops->stop_rx(port); | 1310 | port->ops->stop_rx(port); |
@@ -1322,9 +1323,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp) | |||
1322 | tty_ldisc_flush(tty); | 1323 | tty_ldisc_flush(tty); |
1323 | 1324 | ||
1324 | tty->closing = 0; | 1325 | tty->closing = 0; |
1325 | state->info->port.tty = NULL; | 1326 | state->info.port.tty = NULL; |
1326 | 1327 | ||
1327 | if (state->info->port.blocked_open) { | 1328 | if (state->info.port.blocked_open) { |
1328 | if (state->close_delay) | 1329 | if (state->close_delay) |
1329 | msleep_interruptible(state->close_delay); | 1330 | msleep_interruptible(state->close_delay); |
1330 | } else if (!uart_console(port)) { | 1331 | } else if (!uart_console(port)) { |
@@ -1334,8 +1335,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp) | |||
1334 | /* | 1335 | /* |
1335 | * Wake up anyone trying to open this port. | 1336 | * Wake up anyone trying to open this port. |
1336 | */ | 1337 | */ |
1337 | state->info->flags &= ~UIF_NORMAL_ACTIVE; | 1338 | state->info.flags &= ~UIF_NORMAL_ACTIVE; |
1338 | wake_up_interruptible(&state->info->port.open_wait); | 1339 | wake_up_interruptible(&state->info.port.open_wait); |
1339 | 1340 | ||
1340 | done: | 1341 | done: |
1341 | mutex_unlock(&state->mutex); | 1342 | mutex_unlock(&state->mutex); |
@@ -1409,19 +1410,20 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout) | |||
1409 | static void uart_hangup(struct tty_struct *tty) | 1410 | static void uart_hangup(struct tty_struct *tty) |
1410 | { | 1411 | { |
1411 | struct uart_state *state = tty->driver_data; | 1412 | struct uart_state *state = tty->driver_data; |
1413 | struct uart_info *info = &state->info; | ||
1412 | 1414 | ||
1413 | BUG_ON(!kernel_locked()); | 1415 | BUG_ON(!kernel_locked()); |
1414 | pr_debug("uart_hangup(%d)\n", state->port->line); | 1416 | pr_debug("uart_hangup(%d)\n", state->port->line); |
1415 | 1417 | ||
1416 | mutex_lock(&state->mutex); | 1418 | mutex_lock(&state->mutex); |
1417 | if (state->info && state->info->flags & UIF_NORMAL_ACTIVE) { | 1419 | if (info->flags & UIF_NORMAL_ACTIVE) { |
1418 | uart_flush_buffer(tty); | 1420 | uart_flush_buffer(tty); |
1419 | uart_shutdown(state); | 1421 | uart_shutdown(state); |
1420 | state->count = 0; | 1422 | state->count = 0; |
1421 | state->info->flags &= ~UIF_NORMAL_ACTIVE; | 1423 | info->flags &= ~UIF_NORMAL_ACTIVE; |
1422 | state->info->port.tty = NULL; | 1424 | info->port.tty = NULL; |
1423 | wake_up_interruptible(&state->info->port.open_wait); | 1425 | wake_up_interruptible(&info->port.open_wait); |
1424 | wake_up_interruptible(&state->info->delta_msr_wait); | 1426 | wake_up_interruptible(&info->delta_msr_wait); |
1425 | } | 1427 | } |
1426 | mutex_unlock(&state->mutex); | 1428 | mutex_unlock(&state->mutex); |
1427 | } | 1429 | } |
@@ -1434,7 +1436,7 @@ static void uart_hangup(struct tty_struct *tty) | |||
1434 | */ | 1436 | */ |
1435 | static void uart_update_termios(struct uart_state *state) | 1437 | static void uart_update_termios(struct uart_state *state) |
1436 | { | 1438 | { |
1437 | struct tty_struct *tty = state->info->port.tty; | 1439 | struct tty_struct *tty = state->info.port.tty; |
1438 | struct uart_port *port = state->port; | 1440 | struct uart_port *port = state->port; |
1439 | 1441 | ||
1440 | if (uart_console(port) && port->cons->cflag) { | 1442 | if (uart_console(port) && port->cons->cflag) { |
@@ -1469,7 +1471,7 @@ static int | |||
1469 | uart_block_til_ready(struct file *filp, struct uart_state *state) | 1471 | uart_block_til_ready(struct file *filp, struct uart_state *state) |
1470 | { | 1472 | { |
1471 | DECLARE_WAITQUEUE(wait, current); | 1473 | DECLARE_WAITQUEUE(wait, current); |
1472 | struct uart_info *info = state->info; | 1474 | struct uart_info *info = &state->info; |
1473 | struct uart_port *port = state->port; | 1475 | struct uart_port *port = state->port; |
1474 | unsigned int mctrl; | 1476 | unsigned int mctrl; |
1475 | 1477 | ||
@@ -1563,28 +1565,6 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line) | |||
1563 | ret = -ENXIO; | 1565 | ret = -ENXIO; |
1564 | goto err_unlock; | 1566 | goto err_unlock; |
1565 | } | 1567 | } |
1566 | |||
1567 | /* BKL: RACE HERE - LEAK */ | ||
1568 | /* We should move this into the uart_state structure and kill off | ||
1569 | this whole complexity */ | ||
1570 | if (!state->info) { | ||
1571 | state->info = kzalloc(sizeof(struct uart_info), GFP_KERNEL); | ||
1572 | if (state->info) { | ||
1573 | init_waitqueue_head(&state->info->port.open_wait); | ||
1574 | init_waitqueue_head(&state->info->delta_msr_wait); | ||
1575 | |||
1576 | /* | ||
1577 | * Link the info into the other structures. | ||
1578 | */ | ||
1579 | state->port->info = state->info; | ||
1580 | |||
1581 | tasklet_init(&state->info->tlet, uart_tasklet_action, | ||
1582 | (unsigned long)state); | ||
1583 | } else { | ||
1584 | ret = -ENOMEM; | ||
1585 | goto err_unlock; | ||
1586 | } | ||
1587 | } | ||
1588 | return state; | 1568 | return state; |
1589 | 1569 | ||
1590 | err_unlock: | 1570 | err_unlock: |
@@ -1641,9 +1621,10 @@ static int uart_open(struct tty_struct *tty, struct file *filp) | |||
1641 | * Any failures from here onwards should not touch the count. | 1621 | * Any failures from here onwards should not touch the count. |
1642 | */ | 1622 | */ |
1643 | tty->driver_data = state; | 1623 | tty->driver_data = state; |
1624 | state->port->info = &state->info; | ||
1644 | tty->low_latency = (state->port->flags & UPF_LOW_LATENCY) ? 1 : 0; | 1625 | tty->low_latency = (state->port->flags & UPF_LOW_LATENCY) ? 1 : 0; |
1645 | tty->alt_speed = 0; | 1626 | tty->alt_speed = 0; |
1646 | state->info->port.tty = tty; | 1627 | state->info.port.tty = tty; |
1647 | 1628 | ||
1648 | /* | 1629 | /* |
1649 | * If the port is in the middle of closing, bail out now. | 1630 | * If the port is in the middle of closing, bail out now. |
@@ -1676,8 +1657,8 @@ static int uart_open(struct tty_struct *tty, struct file *filp) | |||
1676 | /* | 1657 | /* |
1677 | * If this is the first open to succeed, adjust things to suit. | 1658 | * If this is the first open to succeed, adjust things to suit. |
1678 | */ | 1659 | */ |
1679 | if (retval == 0 && !(state->info->flags & UIF_NORMAL_ACTIVE)) { | 1660 | if (retval == 0 && !(state->info.flags & UIF_NORMAL_ACTIVE)) { |
1680 | state->info->flags |= UIF_NORMAL_ACTIVE; | 1661 | state->info.flags |= UIF_NORMAL_ACTIVE; |
1681 | 1662 | ||
1682 | uart_update_termios(state); | 1663 | uart_update_termios(state); |
1683 | } | 1664 | } |
@@ -2028,11 +2009,11 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) | |||
2028 | } | 2009 | } |
2029 | port->suspended = 1; | 2010 | port->suspended = 1; |
2030 | 2011 | ||
2031 | if (state->info && state->info->flags & UIF_INITIALIZED) { | 2012 | if (state->info.flags & UIF_INITIALIZED) { |
2032 | const struct uart_ops *ops = port->ops; | 2013 | const struct uart_ops *ops = port->ops; |
2033 | int tries; | 2014 | int tries; |
2034 | 2015 | ||
2035 | state->info->flags = (state->info->flags & ~UIF_INITIALIZED) | 2016 | state->info.flags = (state->info.flags & ~UIF_INITIALIZED) |
2036 | | UIF_SUSPENDED; | 2017 | | UIF_SUSPENDED; |
2037 | 2018 | ||
2038 | spin_lock_irq(&port->lock); | 2019 | spin_lock_irq(&port->lock); |
@@ -2107,15 +2088,15 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) | |||
2107 | /* | 2088 | /* |
2108 | * If that's unset, use the tty termios setting. | 2089 | * If that's unset, use the tty termios setting. |
2109 | */ | 2090 | */ |
2110 | if (state->info && state->info->port.tty && termios.c_cflag == 0) | 2091 | if (state->info.port.tty && termios.c_cflag == 0) |
2111 | termios = *state->info->port.tty->termios; | 2092 | termios = *state->info.port.tty->termios; |
2112 | 2093 | ||
2113 | uart_change_pm(state, 0); | 2094 | uart_change_pm(state, 0); |
2114 | port->ops->set_termios(port, &termios, NULL); | 2095 | port->ops->set_termios(port, &termios, NULL); |
2115 | console_start(port->cons); | 2096 | console_start(port->cons); |
2116 | } | 2097 | } |
2117 | 2098 | ||
2118 | if (state->info && state->info->flags & UIF_SUSPENDED) { | 2099 | if (state->info.flags & UIF_SUSPENDED) { |
2119 | const struct uart_ops *ops = port->ops; | 2100 | const struct uart_ops *ops = port->ops; |
2120 | int ret; | 2101 | int ret; |
2121 | 2102 | ||
@@ -2130,7 +2111,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) | |||
2130 | ops->set_mctrl(port, port->mctrl); | 2111 | ops->set_mctrl(port, port->mctrl); |
2131 | ops->start_tx(port); | 2112 | ops->start_tx(port); |
2132 | spin_unlock_irq(&port->lock); | 2113 | spin_unlock_irq(&port->lock); |
2133 | state->info->flags |= UIF_INITIALIZED; | 2114 | state->info.flags |= UIF_INITIALIZED; |
2134 | } else { | 2115 | } else { |
2135 | /* | 2116 | /* |
2136 | * Failed to resume - maybe hardware went away? | 2117 | * Failed to resume - maybe hardware went away? |
@@ -2140,7 +2121,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) | |||
2140 | uart_shutdown(state); | 2121 | uart_shutdown(state); |
2141 | } | 2122 | } |
2142 | 2123 | ||
2143 | state->info->flags &= ~UIF_SUSPENDED; | 2124 | state->info.flags &= ~UIF_SUSPENDED; |
2144 | } | 2125 | } |
2145 | 2126 | ||
2146 | mutex_unlock(&state->mutex); | 2127 | mutex_unlock(&state->mutex); |
@@ -2198,11 +2179,14 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state, | |||
2198 | * Now do the auto configuration stuff. Note that config_port | 2179 | * Now do the auto configuration stuff. Note that config_port |
2199 | * is expected to claim the resources and map the port for us. | 2180 | * is expected to claim the resources and map the port for us. |
2200 | */ | 2181 | */ |
2201 | flags = UART_CONFIG_TYPE; | 2182 | flags = 0; |
2202 | if (port->flags & UPF_AUTO_IRQ) | 2183 | if (port->flags & UPF_AUTO_IRQ) |
2203 | flags |= UART_CONFIG_IRQ; | 2184 | flags |= UART_CONFIG_IRQ; |
2204 | if (port->flags & UPF_BOOT_AUTOCONF) { | 2185 | if (port->flags & UPF_BOOT_AUTOCONF) { |
2205 | port->type = PORT_UNKNOWN; | 2186 | if (!(port->flags & UPF_FIXED_TYPE)) { |
2187 | port->type = PORT_UNKNOWN; | ||
2188 | flags |= UART_CONFIG_TYPE; | ||
2189 | } | ||
2206 | port->ops->config_port(port, flags); | 2190 | port->ops->config_port(port, flags); |
2207 | } | 2191 | } |
2208 | 2192 | ||
@@ -2383,8 +2367,12 @@ int uart_register_driver(struct uart_driver *drv) | |||
2383 | 2367 | ||
2384 | state->close_delay = 500; /* .5 seconds */ | 2368 | state->close_delay = 500; /* .5 seconds */ |
2385 | state->closing_wait = 30000; /* 30 seconds */ | 2369 | state->closing_wait = 30000; /* 30 seconds */ |
2386 | |||
2387 | mutex_init(&state->mutex); | 2370 | mutex_init(&state->mutex); |
2371 | |||
2372 | tty_port_init(&state->info.port); | ||
2373 | init_waitqueue_head(&state->info.delta_msr_wait); | ||
2374 | tasklet_init(&state->info.tlet, uart_tasklet_action, | ||
2375 | (unsigned long)state); | ||
2388 | } | 2376 | } |
2389 | 2377 | ||
2390 | retval = tty_register_driver(normal); | 2378 | retval = tty_register_driver(normal); |
@@ -2455,7 +2443,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port) | |||
2455 | state->pm_state = -1; | 2443 | state->pm_state = -1; |
2456 | 2444 | ||
2457 | port->cons = drv->cons; | 2445 | port->cons = drv->cons; |
2458 | port->info = state->info; | 2446 | port->info = &state->info; |
2459 | 2447 | ||
2460 | /* | 2448 | /* |
2461 | * If this port is a console, then the spinlock is already | 2449 | * If this port is a console, then the spinlock is already |
@@ -2527,18 +2515,11 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) | |||
2527 | */ | 2515 | */ |
2528 | tty_unregister_device(drv->tty_driver, port->line); | 2516 | tty_unregister_device(drv->tty_driver, port->line); |
2529 | 2517 | ||
2530 | info = state->info; | 2518 | info = &state->info; |
2531 | if (info && info->port.tty) | 2519 | if (info && info->port.tty) |
2532 | tty_vhangup(info->port.tty); | 2520 | tty_vhangup(info->port.tty); |
2533 | 2521 | ||
2534 | /* | 2522 | /* |
2535 | * All users of this port should now be disconnected from | ||
2536 | * this driver, and the port shut down. We should be the | ||
2537 | * only thread fiddling with this port from now on. | ||
2538 | */ | ||
2539 | state->info = NULL; | ||
2540 | |||
2541 | /* | ||
2542 | * Free the port IO and memory resources, if any. | 2523 | * Free the port IO and memory resources, if any. |
2543 | */ | 2524 | */ |
2544 | if (port->type != PORT_UNKNOWN) | 2525 | if (port->type != PORT_UNKNOWN) |
@@ -2552,10 +2533,8 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) | |||
2552 | /* | 2533 | /* |
2553 | * Kill the tasklet, and free resources. | 2534 | * Kill the tasklet, and free resources. |
2554 | */ | 2535 | */ |
2555 | if (info) { | 2536 | if (info) |
2556 | tasklet_kill(&info->tlet); | 2537 | tasklet_kill(&info->tlet); |
2557 | kfree(info); | ||
2558 | } | ||
2559 | 2538 | ||
2560 | state->port = NULL; | 2539 | state->port = NULL; |
2561 | mutex_unlock(&port_mutex); | 2540 | mutex_unlock(&port_mutex); |
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c index 64be4d88df11..8582236e4cad 100644 --- a/drivers/usb/host/hwa-hc.c +++ b/drivers/usb/host/hwa-hc.c | |||
@@ -54,7 +54,6 @@ | |||
54 | * DWA). | 54 | * DWA). |
55 | */ | 55 | */ |
56 | #include <linux/kernel.h> | 56 | #include <linux/kernel.h> |
57 | #include <linux/version.h> | ||
58 | #include <linux/init.h> | 57 | #include <linux/init.h> |
59 | #include <linux/module.h> | 58 | #include <linux/module.h> |
60 | #include <linux/workqueue.h> | 59 | #include <linux/workqueue.h> |
@@ -63,16 +62,12 @@ | |||
63 | #include "../wusbcore/wa-hc.h" | 62 | #include "../wusbcore/wa-hc.h" |
64 | #include "../wusbcore/wusbhc.h" | 63 | #include "../wusbcore/wusbhc.h" |
65 | 64 | ||
66 | #define D_LOCAL 0 | ||
67 | #include <linux/uwb/debug.h> | ||
68 | |||
69 | struct hwahc { | 65 | struct hwahc { |
70 | struct wusbhc wusbhc; /* has to be 1st */ | 66 | struct wusbhc wusbhc; /* has to be 1st */ |
71 | struct wahc wa; | 67 | struct wahc wa; |
72 | u8 buffer[16]; /* for misc usb transactions */ | ||
73 | }; | 68 | }; |
74 | 69 | ||
75 | /** | 70 | /* |
76 | * FIXME should be wusbhc | 71 | * FIXME should be wusbhc |
77 | * | 72 | * |
78 | * NOTE: we need to cache the Cluster ID because later...there is no | 73 | * NOTE: we need to cache the Cluster ID because later...there is no |
@@ -126,7 +121,6 @@ static int hwahc_op_reset(struct usb_hcd *usb_hcd) | |||
126 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | 121 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); |
127 | struct device *dev = &hwahc->wa.usb_iface->dev; | 122 | struct device *dev = &hwahc->wa.usb_iface->dev; |
128 | 123 | ||
129 | d_fnstart(4, dev, "(hwahc %p)\n", hwahc); | ||
130 | mutex_lock(&wusbhc->mutex); | 124 | mutex_lock(&wusbhc->mutex); |
131 | wa_nep_disarm(&hwahc->wa); | 125 | wa_nep_disarm(&hwahc->wa); |
132 | result = __wa_set_feature(&hwahc->wa, WA_RESET); | 126 | result = __wa_set_feature(&hwahc->wa, WA_RESET); |
@@ -134,7 +128,6 @@ static int hwahc_op_reset(struct usb_hcd *usb_hcd) | |||
134 | dev_err(dev, "error commanding HC to reset: %d\n", result); | 128 | dev_err(dev, "error commanding HC to reset: %d\n", result); |
135 | goto error_unlock; | 129 | goto error_unlock; |
136 | } | 130 | } |
137 | d_printf(3, dev, "reset: waiting for device to change state\n"); | ||
138 | result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0); | 131 | result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0); |
139 | if (result < 0) { | 132 | if (result < 0) { |
140 | dev_err(dev, "error waiting for HC to reset: %d\n", result); | 133 | dev_err(dev, "error waiting for HC to reset: %d\n", result); |
@@ -142,7 +135,6 @@ static int hwahc_op_reset(struct usb_hcd *usb_hcd) | |||
142 | } | 135 | } |
143 | error_unlock: | 136 | error_unlock: |
144 | mutex_unlock(&wusbhc->mutex); | 137 | mutex_unlock(&wusbhc->mutex); |
145 | d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); | ||
146 | return result; | 138 | return result; |
147 | } | 139 | } |
148 | 140 | ||
@@ -155,15 +147,9 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd) | |||
155 | int result; | 147 | int result; |
156 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | 148 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); |
157 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | 149 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); |
158 | struct device *dev = &hwahc->wa.usb_iface->dev; | ||
159 | 150 | ||
160 | /* Set up a Host Info WUSB Information Element */ | ||
161 | d_fnstart(4, dev, "(hwahc %p)\n", hwahc); | ||
162 | result = -ENOSPC; | 151 | result = -ENOSPC; |
163 | mutex_lock(&wusbhc->mutex); | 152 | mutex_lock(&wusbhc->mutex); |
164 | /* Start the numbering from the top so that the bottom | ||
165 | * range of the unauth addr space is used for devices, | ||
166 | * the top for HCs; use 0xfe - RC# */ | ||
167 | addr = wusb_cluster_id_get(); | 153 | addr = wusb_cluster_id_get(); |
168 | if (addr == 0) | 154 | if (addr == 0) |
169 | goto error_cluster_id_get; | 155 | goto error_cluster_id_get; |
@@ -171,22 +157,14 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd) | |||
171 | if (result < 0) | 157 | if (result < 0) |
172 | goto error_set_cluster_id; | 158 | goto error_set_cluster_id; |
173 | 159 | ||
174 | result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); | ||
175 | if (result < 0) { | ||
176 | dev_err(dev, "cannot listen to notifications: %d\n", result); | ||
177 | goto error_stop; | ||
178 | } | ||
179 | usb_hcd->uses_new_polling = 1; | 160 | usb_hcd->uses_new_polling = 1; |
180 | usb_hcd->poll_rh = 1; | 161 | usb_hcd->poll_rh = 1; |
181 | usb_hcd->state = HC_STATE_RUNNING; | 162 | usb_hcd->state = HC_STATE_RUNNING; |
182 | result = 0; | 163 | result = 0; |
183 | out: | 164 | out: |
184 | mutex_unlock(&wusbhc->mutex); | 165 | mutex_unlock(&wusbhc->mutex); |
185 | d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); | ||
186 | return result; | 166 | return result; |
187 | 167 | ||
188 | error_stop: | ||
189 | __wa_stop(&hwahc->wa); | ||
190 | error_set_cluster_id: | 168 | error_set_cluster_id: |
191 | wusb_cluster_id_put(wusbhc->cluster_id); | 169 | wusb_cluster_id_put(wusbhc->cluster_id); |
192 | error_cluster_id_get: | 170 | error_cluster_id_get: |
@@ -194,39 +172,6 @@ error_cluster_id_get: | |||
194 | 172 | ||
195 | } | 173 | } |
196 | 174 | ||
197 | /* | ||
198 | * FIXME: break this function up | ||
199 | */ | ||
200 | static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) | ||
201 | { | ||
202 | int result; | ||
203 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
204 | struct device *dev = &hwahc->wa.usb_iface->dev; | ||
205 | |||
206 | /* Set up a Host Info WUSB Information Element */ | ||
207 | d_fnstart(4, dev, "(hwahc %p)\n", hwahc); | ||
208 | result = -ENOSPC; | ||
209 | |||
210 | result = __wa_set_feature(&hwahc->wa, WA_ENABLE); | ||
211 | if (result < 0) { | ||
212 | dev_err(dev, "error commanding HC to start: %d\n", result); | ||
213 | goto error_stop; | ||
214 | } | ||
215 | result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); | ||
216 | if (result < 0) { | ||
217 | dev_err(dev, "error waiting for HC to start: %d\n", result); | ||
218 | goto error_stop; | ||
219 | } | ||
220 | result = 0; | ||
221 | out: | ||
222 | d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); | ||
223 | return result; | ||
224 | |||
225 | error_stop: | ||
226 | result = __wa_clear_feature(&hwahc->wa, WA_ENABLE); | ||
227 | goto out; | ||
228 | } | ||
229 | |||
230 | static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg) | 175 | static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg) |
231 | { | 176 | { |
232 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | 177 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); |
@@ -246,18 +191,6 @@ static int hwahc_op_resume(struct usb_hcd *usb_hcd) | |||
246 | return -ENOSYS; | 191 | return -ENOSYS; |
247 | } | 192 | } |
248 | 193 | ||
249 | static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc) | ||
250 | { | ||
251 | int result; | ||
252 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
253 | struct device *dev = &hwahc->wa.usb_iface->dev; | ||
254 | |||
255 | d_fnstart(4, dev, "(hwahc %p)\n", hwahc); | ||
256 | /* Nothing for now */ | ||
257 | d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); | ||
258 | return; | ||
259 | } | ||
260 | |||
261 | /* | 194 | /* |
262 | * No need to abort pipes, as when this is called, all the children | 195 | * No need to abort pipes, as when this is called, all the children |
263 | * has been disconnected and that has done it [through | 196 | * has been disconnected and that has done it [through |
@@ -266,21 +199,11 @@ static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc) | |||
266 | */ | 199 | */ |
267 | static void hwahc_op_stop(struct usb_hcd *usb_hcd) | 200 | static void hwahc_op_stop(struct usb_hcd *usb_hcd) |
268 | { | 201 | { |
269 | int result; | ||
270 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); | 202 | struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); |
271 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
272 | struct wahc *wa = &hwahc->wa; | ||
273 | struct device *dev = &wa->usb_iface->dev; | ||
274 | 203 | ||
275 | d_fnstart(4, dev, "(hwahc %p)\n", hwahc); | ||
276 | mutex_lock(&wusbhc->mutex); | 204 | mutex_lock(&wusbhc->mutex); |
277 | wusbhc_stop(wusbhc); | ||
278 | wa_nep_disarm(&hwahc->wa); | ||
279 | result = __wa_stop(&hwahc->wa); | ||
280 | wusb_cluster_id_put(wusbhc->cluster_id); | 205 | wusb_cluster_id_put(wusbhc->cluster_id); |
281 | mutex_unlock(&wusbhc->mutex); | 206 | mutex_unlock(&wusbhc->mutex); |
282 | d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); | ||
283 | return; | ||
284 | } | 207 | } |
285 | 208 | ||
286 | static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd) | 209 | static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd) |
@@ -325,6 +248,54 @@ static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd, | |||
325 | rpipe_ep_disable(&hwahc->wa, ep); | 248 | rpipe_ep_disable(&hwahc->wa, ep); |
326 | } | 249 | } |
327 | 250 | ||
251 | static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) | ||
252 | { | ||
253 | int result; | ||
254 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
255 | struct device *dev = &hwahc->wa.usb_iface->dev; | ||
256 | |||
257 | result = __wa_set_feature(&hwahc->wa, WA_ENABLE); | ||
258 | if (result < 0) { | ||
259 | dev_err(dev, "error commanding HC to start: %d\n", result); | ||
260 | goto error_stop; | ||
261 | } | ||
262 | result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); | ||
263 | if (result < 0) { | ||
264 | dev_err(dev, "error waiting for HC to start: %d\n", result); | ||
265 | goto error_stop; | ||
266 | } | ||
267 | result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); | ||
268 | if (result < 0) { | ||
269 | dev_err(dev, "cannot listen to notifications: %d\n", result); | ||
270 | goto error_stop; | ||
271 | } | ||
272 | return result; | ||
273 | |||
274 | error_stop: | ||
275 | __wa_clear_feature(&hwahc->wa, WA_ENABLE); | ||
276 | return result; | ||
277 | } | ||
278 | |||
279 | static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay) | ||
280 | { | ||
281 | struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); | ||
282 | struct wahc *wa = &hwahc->wa; | ||
283 | u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; | ||
284 | int ret; | ||
285 | |||
286 | ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | ||
287 | WUSB_REQ_CHAN_STOP, | ||
288 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, | ||
289 | delay * 1000, | ||
290 | iface_no, | ||
291 | NULL, 0, 1000 /* FIXME: arbitrary */); | ||
292 | if (ret == 0) | ||
293 | msleep(delay); | ||
294 | |||
295 | wa_nep_disarm(&hwahc->wa); | ||
296 | __wa_stop(&hwahc->wa); | ||
297 | } | ||
298 | |||
328 | /* | 299 | /* |
329 | * Set the UWB MAS allocation for the WUSB cluster | 300 | * Set the UWB MAS allocation for the WUSB cluster |
330 | * | 301 | * |
@@ -581,11 +552,11 @@ static int wa_fill_descr(struct wahc *wa) | |||
581 | itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); | 552 | itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); |
582 | while (itr_size >= sizeof(*hdr)) { | 553 | while (itr_size >= sizeof(*hdr)) { |
583 | hdr = (struct usb_descriptor_header *) itr; | 554 | hdr = (struct usb_descriptor_header *) itr; |
584 | d_printf(3, dev, "Extra device descriptor: " | 555 | dev_dbg(dev, "Extra device descriptor: " |
585 | "type %02x/%u bytes @ %zu (%zu left)\n", | 556 | "type %02x/%u bytes @ %zu (%zu left)\n", |
586 | hdr->bDescriptorType, hdr->bLength, | 557 | hdr->bDescriptorType, hdr->bLength, |
587 | (itr - usb_dev->rawdescriptors[actconfig_idx]), | 558 | (itr - usb_dev->rawdescriptors[actconfig_idx]), |
588 | itr_size); | 559 | itr_size); |
589 | if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER) | 560 | if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER) |
590 | goto found; | 561 | goto found; |
591 | itr += hdr->bLength; | 562 | itr += hdr->bLength; |
@@ -794,7 +765,6 @@ static void hwahc_destroy(struct hwahc *hwahc) | |||
794 | { | 765 | { |
795 | struct wusbhc *wusbhc = &hwahc->wusbhc; | 766 | struct wusbhc *wusbhc = &hwahc->wusbhc; |
796 | 767 | ||
797 | d_fnstart(1, NULL, "(hwahc %p)\n", hwahc); | ||
798 | mutex_lock(&wusbhc->mutex); | 768 | mutex_lock(&wusbhc->mutex); |
799 | __wa_destroy(&hwahc->wa); | 769 | __wa_destroy(&hwahc->wa); |
800 | wusbhc_destroy(&hwahc->wusbhc); | 770 | wusbhc_destroy(&hwahc->wusbhc); |
@@ -804,7 +774,6 @@ static void hwahc_destroy(struct hwahc *hwahc) | |||
804 | usb_put_intf(hwahc->wa.usb_iface); | 774 | usb_put_intf(hwahc->wa.usb_iface); |
805 | usb_put_dev(hwahc->wa.usb_dev); | 775 | usb_put_dev(hwahc->wa.usb_dev); |
806 | mutex_unlock(&wusbhc->mutex); | 776 | mutex_unlock(&wusbhc->mutex); |
807 | d_fnend(1, NULL, "(hwahc %p) = void\n", hwahc); | ||
808 | } | 777 | } |
809 | 778 | ||
810 | static void hwahc_init(struct hwahc *hwahc) | 779 | static void hwahc_init(struct hwahc *hwahc) |
@@ -821,7 +790,6 @@ static int hwahc_probe(struct usb_interface *usb_iface, | |||
821 | struct hwahc *hwahc; | 790 | struct hwahc *hwahc; |
822 | struct device *dev = &usb_iface->dev; | 791 | struct device *dev = &usb_iface->dev; |
823 | 792 | ||
824 | d_fnstart(4, dev, "(%p, %p)\n", usb_iface, id); | ||
825 | result = -ENOMEM; | 793 | result = -ENOMEM; |
826 | usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa"); | 794 | usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa"); |
827 | if (usb_hcd == NULL) { | 795 | if (usb_hcd == NULL) { |
@@ -848,7 +816,6 @@ static int hwahc_probe(struct usb_interface *usb_iface, | |||
848 | dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result); | 816 | dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result); |
849 | goto error_wusbhc_b_create; | 817 | goto error_wusbhc_b_create; |
850 | } | 818 | } |
851 | d_fnend(4, dev, "(%p, %p) = 0\n", usb_iface, id); | ||
852 | return 0; | 819 | return 0; |
853 | 820 | ||
854 | error_wusbhc_b_create: | 821 | error_wusbhc_b_create: |
@@ -858,7 +825,6 @@ error_add_hcd: | |||
858 | error_hwahc_create: | 825 | error_hwahc_create: |
859 | usb_put_hcd(usb_hcd); | 826 | usb_put_hcd(usb_hcd); |
860 | error_alloc: | 827 | error_alloc: |
861 | d_fnend(4, dev, "(%p, %p) = %d\n", usb_iface, id, result); | ||
862 | return result; | 828 | return result; |
863 | } | 829 | } |
864 | 830 | ||
@@ -872,16 +838,12 @@ static void hwahc_disconnect(struct usb_interface *usb_iface) | |||
872 | wusbhc = usb_hcd_to_wusbhc(usb_hcd); | 838 | wusbhc = usb_hcd_to_wusbhc(usb_hcd); |
873 | hwahc = container_of(wusbhc, struct hwahc, wusbhc); | 839 | hwahc = container_of(wusbhc, struct hwahc, wusbhc); |
874 | 840 | ||
875 | d_fnstart(1, NULL, "(hwahc %p [usb_iface %p])\n", hwahc, usb_iface); | ||
876 | wusbhc_b_destroy(&hwahc->wusbhc); | 841 | wusbhc_b_destroy(&hwahc->wusbhc); |
877 | usb_remove_hcd(usb_hcd); | 842 | usb_remove_hcd(usb_hcd); |
878 | hwahc_destroy(hwahc); | 843 | hwahc_destroy(hwahc); |
879 | usb_put_hcd(usb_hcd); | 844 | usb_put_hcd(usb_hcd); |
880 | d_fnend(1, NULL, "(hwahc %p [usb_iface %p]) = void\n", hwahc, | ||
881 | usb_iface); | ||
882 | } | 845 | } |
883 | 846 | ||
884 | /** USB device ID's that we handle */ | ||
885 | static struct usb_device_id hwahc_id_table[] = { | 847 | static struct usb_device_id hwahc_id_table[] = { |
886 | /* FIXME: use class labels for this */ | 848 | /* FIXME: use class labels for this */ |
887 | { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), }, | 849 | { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), }, |
@@ -898,18 +860,7 @@ static struct usb_driver hwahc_driver = { | |||
898 | 860 | ||
899 | static int __init hwahc_driver_init(void) | 861 | static int __init hwahc_driver_init(void) |
900 | { | 862 | { |
901 | int result; | 863 | return usb_register(&hwahc_driver); |
902 | result = usb_register(&hwahc_driver); | ||
903 | if (result < 0) { | ||
904 | printk(KERN_ERR "WA-CDS: Cannot register USB driver: %d\n", | ||
905 | result); | ||
906 | goto error_usb_register; | ||
907 | } | ||
908 | return 0; | ||
909 | |||
910 | error_usb_register: | ||
911 | return result; | ||
912 | |||
913 | } | 864 | } |
914 | module_init(hwahc_driver_init); | 865 | module_init(hwahc_driver_init); |
915 | 866 | ||
diff --git a/drivers/usb/host/whci/Kbuild b/drivers/usb/host/whci/Kbuild index 26a3871ea0f9..11e5040b8337 100644 --- a/drivers/usb/host/whci/Kbuild +++ b/drivers/usb/host/whci/Kbuild | |||
@@ -2,6 +2,7 @@ obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o | |||
2 | 2 | ||
3 | whci-hcd-y := \ | 3 | whci-hcd-y := \ |
4 | asl.o \ | 4 | asl.o \ |
5 | debug.o \ | ||
5 | hcd.o \ | 6 | hcd.o \ |
6 | hw.o \ | 7 | hw.o \ |
7 | init.o \ | 8 | init.o \ |
diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c index 4d7078e50572..577c0d29849d 100644 --- a/drivers/usb/host/whci/asl.c +++ b/drivers/usb/host/whci/asl.c | |||
@@ -19,32 +19,11 @@ | |||
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/uwb/umc.h> | 20 | #include <linux/uwb/umc.h> |
21 | #include <linux/usb.h> | 21 | #include <linux/usb.h> |
22 | #define D_LOCAL 0 | ||
23 | #include <linux/uwb/debug.h> | ||
24 | 22 | ||
25 | #include "../../wusbcore/wusbhc.h" | 23 | #include "../../wusbcore/wusbhc.h" |
26 | 24 | ||
27 | #include "whcd.h" | 25 | #include "whcd.h" |
28 | 26 | ||
29 | #if D_LOCAL >= 4 | ||
30 | static void dump_asl(struct whc *whc, const char *tag) | ||
31 | { | ||
32 | struct device *dev = &whc->umc->dev; | ||
33 | struct whc_qset *qset; | ||
34 | |||
35 | d_printf(4, dev, "ASL %s\n", tag); | ||
36 | |||
37 | list_for_each_entry(qset, &whc->async_list, list_node) { | ||
38 | dump_qset(qset, dev); | ||
39 | } | ||
40 | } | ||
41 | #else | ||
42 | static inline void dump_asl(struct whc *whc, const char *tag) | ||
43 | { | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | |||
48 | static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, | 27 | static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, |
49 | struct whc_qset **next, struct whc_qset **prev) | 28 | struct whc_qset **next, struct whc_qset **prev) |
50 | { | 29 | { |
@@ -179,11 +158,26 @@ void asl_stop(struct whc *whc) | |||
179 | 1000, "stop ASL"); | 158 | 1000, "stop ASL"); |
180 | } | 159 | } |
181 | 160 | ||
161 | /** | ||
162 | * asl_update - request an ASL update and wait for the hardware to be synced | ||
163 | * @whc: the WHCI HC | ||
164 | * @wusbcmd: WUSBCMD value to start the update. | ||
165 | * | ||
166 | * If the WUSB HC is inactive (i.e., the ASL is stopped) then the | ||
167 | * update must be skipped as the hardware may not respond to update | ||
168 | * requests. | ||
169 | */ | ||
182 | void asl_update(struct whc *whc, uint32_t wusbcmd) | 170 | void asl_update(struct whc *whc, uint32_t wusbcmd) |
183 | { | 171 | { |
184 | whc_write_wusbcmd(whc, wusbcmd, wusbcmd); | 172 | struct wusbhc *wusbhc = &whc->wusbhc; |
185 | wait_event(whc->async_list_wq, | 173 | |
186 | (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0); | 174 | mutex_lock(&wusbhc->mutex); |
175 | if (wusbhc->active) { | ||
176 | whc_write_wusbcmd(whc, wusbcmd, wusbcmd); | ||
177 | wait_event(whc->async_list_wq, | ||
178 | (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0); | ||
179 | } | ||
180 | mutex_unlock(&wusbhc->mutex); | ||
187 | } | 181 | } |
188 | 182 | ||
189 | /** | 183 | /** |
@@ -202,8 +196,6 @@ void scan_async_work(struct work_struct *work) | |||
202 | 196 | ||
203 | spin_lock_irq(&whc->lock); | 197 | spin_lock_irq(&whc->lock); |
204 | 198 | ||
205 | dump_asl(whc, "before processing"); | ||
206 | |||
207 | /* | 199 | /* |
208 | * Transerve the software list backwards so new qsets can be | 200 | * Transerve the software list backwards so new qsets can be |
209 | * safely inserted into the ASL without making it non-circular. | 201 | * safely inserted into the ASL without making it non-circular. |
@@ -217,8 +209,6 @@ void scan_async_work(struct work_struct *work) | |||
217 | update |= process_qset(whc, qset); | 209 | update |= process_qset(whc, qset); |
218 | } | 210 | } |
219 | 211 | ||
220 | dump_asl(whc, "after processing"); | ||
221 | |||
222 | spin_unlock_irq(&whc->lock); | 212 | spin_unlock_irq(&whc->lock); |
223 | 213 | ||
224 | if (update) { | 214 | if (update) { |
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c new file mode 100644 index 000000000000..cf2d45946c57 --- /dev/null +++ b/drivers/usb/host/whci/debug.c | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * Wireless Host Controller (WHC) debug. | ||
3 | * | ||
4 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/debugfs.h> | ||
20 | #include <linux/seq_file.h> | ||
21 | |||
22 | #include "../../wusbcore/wusbhc.h" | ||
23 | |||
24 | #include "whcd.h" | ||
25 | |||
26 | struct whc_dbg { | ||
27 | struct dentry *di_f; | ||
28 | struct dentry *asl_f; | ||
29 | struct dentry *pzl_f; | ||
30 | }; | ||
31 | |||
32 | void qset_print(struct seq_file *s, struct whc_qset *qset) | ||
33 | { | ||
34 | struct whc_std *std; | ||
35 | struct urb *urb = NULL; | ||
36 | int i; | ||
37 | |||
38 | seq_printf(s, "qset %08x\n", (u32)qset->qset_dma); | ||
39 | seq_printf(s, " -> %08x\n", (u32)qset->qh.link); | ||
40 | seq_printf(s, " info: %08x %08x %08x\n", | ||
41 | qset->qh.info1, qset->qh.info2, qset->qh.info3); | ||
42 | seq_printf(s, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); | ||
43 | seq_printf(s, " TD: sts: %08x opts: %08x\n", | ||
44 | qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); | ||
45 | |||
46 | for (i = 0; i < WHCI_QSET_TD_MAX; i++) { | ||
47 | seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", | ||
48 | i == qset->td_start ? 'S' : ' ', | ||
49 | i == qset->td_end ? 'E' : ' ', | ||
50 | i, qset->qtd[i].status, qset->qtd[i].options, | ||
51 | (u32)qset->qtd[i].page_list_ptr); | ||
52 | } | ||
53 | seq_printf(s, " ntds: %d\n", qset->ntds); | ||
54 | list_for_each_entry(std, &qset->stds, list_node) { | ||
55 | if (urb != std->urb) { | ||
56 | urb = std->urb; | ||
57 | seq_printf(s, " urb %p transferred: %d bytes\n", urb, | ||
58 | urb->actual_length); | ||
59 | } | ||
60 | if (std->qtd) | ||
61 | seq_printf(s, " sTD[%td]: %zu bytes @ %08x\n", | ||
62 | std->qtd - &qset->qtd[0], | ||
63 | std->len, std->num_pointers ? | ||
64 | (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); | ||
65 | else | ||
66 | seq_printf(s, " sTD[-]: %zd bytes @ %08x\n", | ||
67 | std->len, std->num_pointers ? | ||
68 | (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); | ||
69 | } | ||
70 | } | ||
71 | |||
72 | static int di_print(struct seq_file *s, void *p) | ||
73 | { | ||
74 | struct whc *whc = s->private; | ||
75 | char buf[72]; | ||
76 | int d; | ||
77 | |||
78 | for (d = 0; d < whc->n_devices; d++) { | ||
79 | struct di_buf_entry *di = &whc->di_buf[d]; | ||
80 | |||
81 | bitmap_scnprintf(buf, sizeof(buf), | ||
82 | (unsigned long *)di->availability_info, UWB_NUM_MAS); | ||
83 | |||
84 | seq_printf(s, "DI[%d]\n", d); | ||
85 | seq_printf(s, " availability: %s\n", buf); | ||
86 | seq_printf(s, " %c%c key idx: %d dev addr: %d\n", | ||
87 | (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ', | ||
88 | (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ', | ||
89 | (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8, | ||
90 | (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK)); | ||
91 | } | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static int asl_print(struct seq_file *s, void *p) | ||
96 | { | ||
97 | struct whc *whc = s->private; | ||
98 | struct whc_qset *qset; | ||
99 | |||
100 | list_for_each_entry(qset, &whc->async_list, list_node) { | ||
101 | qset_print(s, qset); | ||
102 | } | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int pzl_print(struct seq_file *s, void *p) | ||
108 | { | ||
109 | struct whc *whc = s->private; | ||
110 | struct whc_qset *qset; | ||
111 | int period; | ||
112 | |||
113 | for (period = 0; period < 5; period++) { | ||
114 | seq_printf(s, "Period %d\n", period); | ||
115 | list_for_each_entry(qset, &whc->periodic_list[period], list_node) { | ||
116 | qset_print(s, qset); | ||
117 | } | ||
118 | } | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static int di_open(struct inode *inode, struct file *file) | ||
123 | { | ||
124 | return single_open(file, di_print, inode->i_private); | ||
125 | } | ||
126 | |||
127 | static int asl_open(struct inode *inode, struct file *file) | ||
128 | { | ||
129 | return single_open(file, asl_print, inode->i_private); | ||
130 | } | ||
131 | |||
132 | static int pzl_open(struct inode *inode, struct file *file) | ||
133 | { | ||
134 | return single_open(file, pzl_print, inode->i_private); | ||
135 | } | ||
136 | |||
137 | static struct file_operations di_fops = { | ||
138 | .open = di_open, | ||
139 | .read = seq_read, | ||
140 | .llseek = seq_lseek, | ||
141 | .release = single_release, | ||
142 | .owner = THIS_MODULE, | ||
143 | }; | ||
144 | |||
145 | static struct file_operations asl_fops = { | ||
146 | .open = asl_open, | ||
147 | .read = seq_read, | ||
148 | .llseek = seq_lseek, | ||
149 | .release = single_release, | ||
150 | .owner = THIS_MODULE, | ||
151 | }; | ||
152 | |||
153 | static struct file_operations pzl_fops = { | ||
154 | .open = pzl_open, | ||
155 | .read = seq_read, | ||
156 | .llseek = seq_lseek, | ||
157 | .release = single_release, | ||
158 | .owner = THIS_MODULE, | ||
159 | }; | ||
160 | |||
161 | void whc_dbg_init(struct whc *whc) | ||
162 | { | ||
163 | if (whc->wusbhc.pal.debugfs_dir == NULL) | ||
164 | return; | ||
165 | |||
166 | whc->dbg = kzalloc(sizeof(struct whc_dbg), GFP_KERNEL); | ||
167 | if (whc->dbg == NULL) | ||
168 | return; | ||
169 | |||
170 | whc->dbg->di_f = debugfs_create_file("di", 0444, | ||
171 | whc->wusbhc.pal.debugfs_dir, whc, | ||
172 | &di_fops); | ||
173 | whc->dbg->asl_f = debugfs_create_file("asl", 0444, | ||
174 | whc->wusbhc.pal.debugfs_dir, whc, | ||
175 | &asl_fops); | ||
176 | whc->dbg->pzl_f = debugfs_create_file("pzl", 0444, | ||
177 | whc->wusbhc.pal.debugfs_dir, whc, | ||
178 | &pzl_fops); | ||
179 | } | ||
180 | |||
181 | void whc_dbg_clean_up(struct whc *whc) | ||
182 | { | ||
183 | if (whc->dbg) { | ||
184 | debugfs_remove(whc->dbg->pzl_f); | ||
185 | debugfs_remove(whc->dbg->asl_f); | ||
186 | debugfs_remove(whc->dbg->di_f); | ||
187 | kfree(whc->dbg); | ||
188 | } | ||
189 | } | ||
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c index ef3ad4dca945..1569afd6245b 100644 --- a/drivers/usb/host/whci/hcd.c +++ b/drivers/usb/host/whci/hcd.c | |||
@@ -15,7 +15,6 @@ | |||
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
21 | #include <linux/uwb/umc.h> | 20 | #include <linux/uwb/umc.h> |
@@ -92,8 +91,6 @@ static void whc_stop(struct usb_hcd *usb_hcd) | |||
92 | 91 | ||
93 | mutex_lock(&wusbhc->mutex); | 92 | mutex_lock(&wusbhc->mutex); |
94 | 93 | ||
95 | wusbhc_stop(wusbhc); | ||
96 | |||
97 | /* stop HC */ | 94 | /* stop HC */ |
98 | le_writel(0, whc->base + WUSBINTR); | 95 | le_writel(0, whc->base + WUSBINTR); |
99 | whc_write_wusbcmd(whc, WUSBCMD_RUN, 0); | 96 | whc_write_wusbcmd(whc, WUSBCMD_RUN, 0); |
@@ -276,6 +273,8 @@ static int whc_probe(struct umc_dev *umc) | |||
276 | goto error_wusbhc_b_create; | 273 | goto error_wusbhc_b_create; |
277 | } | 274 | } |
278 | 275 | ||
276 | whc_dbg_init(whc); | ||
277 | |||
279 | return 0; | 278 | return 0; |
280 | 279 | ||
281 | error_wusbhc_b_create: | 280 | error_wusbhc_b_create: |
@@ -299,6 +298,7 @@ static void whc_remove(struct umc_dev *umc) | |||
299 | struct whc *whc = wusbhc_to_whc(wusbhc); | 298 | struct whc *whc = wusbhc_to_whc(wusbhc); |
300 | 299 | ||
301 | if (usb_hcd) { | 300 | if (usb_hcd) { |
301 | whc_dbg_clean_up(whc); | ||
302 | wusbhc_b_destroy(wusbhc); | 302 | wusbhc_b_destroy(wusbhc); |
303 | usb_remove_hcd(usb_hcd); | 303 | usb_remove_hcd(usb_hcd); |
304 | wusbhc_destroy(wusbhc); | 304 | wusbhc_destroy(wusbhc); |
diff --git a/drivers/usb/host/whci/hw.c b/drivers/usb/host/whci/hw.c index ac86e59c1225..d498e7203217 100644 --- a/drivers/usb/host/whci/hw.c +++ b/drivers/usb/host/whci/hw.c | |||
@@ -50,6 +50,7 @@ int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len) | |||
50 | unsigned long flags; | 50 | unsigned long flags; |
51 | dma_addr_t dma_addr; | 51 | dma_addr_t dma_addr; |
52 | int t; | 52 | int t; |
53 | int ret = 0; | ||
53 | 54 | ||
54 | mutex_lock(&whc->mutex); | 55 | mutex_lock(&whc->mutex); |
55 | 56 | ||
@@ -61,7 +62,8 @@ int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len) | |||
61 | dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n", | 62 | dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n", |
62 | le_readl(whc->base + WUSBGENCMDSTS), | 63 | le_readl(whc->base + WUSBGENCMDSTS), |
63 | le_readl(whc->base + WUSBGENCMDPARAMS)); | 64 | le_readl(whc->base + WUSBGENCMDPARAMS)); |
64 | return -ETIMEDOUT; | 65 | ret = -ETIMEDOUT; |
66 | goto out; | ||
65 | } | 67 | } |
66 | 68 | ||
67 | if (addr) { | 69 | if (addr) { |
@@ -80,8 +82,8 @@ int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len) | |||
80 | whc->base + WUSBGENCMDSTS); | 82 | whc->base + WUSBGENCMDSTS); |
81 | 83 | ||
82 | spin_unlock_irqrestore(&whc->lock, flags); | 84 | spin_unlock_irqrestore(&whc->lock, flags); |
83 | 85 | out: | |
84 | mutex_unlock(&whc->mutex); | 86 | mutex_unlock(&whc->mutex); |
85 | 87 | ||
86 | return 0; | 88 | return ret; |
87 | } | 89 | } |
diff --git a/drivers/usb/host/whci/int.c b/drivers/usb/host/whci/int.c index fce01174aa9b..6aae70028101 100644 --- a/drivers/usb/host/whci/int.c +++ b/drivers/usb/host/whci/int.c | |||
@@ -15,7 +15,6 @@ | |||
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
21 | #include <linux/uwb/umc.h> | 20 | #include <linux/uwb/umc.h> |
diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c index 8d62df0c330b..2ae5abf69a6a 100644 --- a/drivers/usb/host/whci/pzl.c +++ b/drivers/usb/host/whci/pzl.c | |||
@@ -19,35 +19,11 @@ | |||
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/uwb/umc.h> | 20 | #include <linux/uwb/umc.h> |
21 | #include <linux/usb.h> | 21 | #include <linux/usb.h> |
22 | #define D_LOCAL 0 | ||
23 | #include <linux/uwb/debug.h> | ||
24 | 22 | ||
25 | #include "../../wusbcore/wusbhc.h" | 23 | #include "../../wusbcore/wusbhc.h" |
26 | 24 | ||
27 | #include "whcd.h" | 25 | #include "whcd.h" |
28 | 26 | ||
29 | #if D_LOCAL >= 4 | ||
30 | static void dump_pzl(struct whc *whc, const char *tag) | ||
31 | { | ||
32 | struct device *dev = &whc->umc->dev; | ||
33 | struct whc_qset *qset; | ||
34 | int period = 0; | ||
35 | |||
36 | d_printf(4, dev, "PZL %s\n", tag); | ||
37 | |||
38 | for (period = 0; period < 5; period++) { | ||
39 | d_printf(4, dev, "Period %d\n", period); | ||
40 | list_for_each_entry(qset, &whc->periodic_list[period], list_node) { | ||
41 | dump_qset(qset, dev); | ||
42 | } | ||
43 | } | ||
44 | } | ||
45 | #else | ||
46 | static inline void dump_pzl(struct whc *whc, const char *tag) | ||
47 | { | ||
48 | } | ||
49 | #endif | ||
50 | |||
51 | static void update_pzl_pointers(struct whc *whc, int period, u64 addr) | 27 | static void update_pzl_pointers(struct whc *whc, int period, u64 addr) |
52 | { | 28 | { |
53 | switch (period) { | 29 | switch (period) { |
@@ -195,11 +171,26 @@ void pzl_stop(struct whc *whc) | |||
195 | 1000, "stop PZL"); | 171 | 1000, "stop PZL"); |
196 | } | 172 | } |
197 | 173 | ||
174 | /** | ||
175 | * pzl_update - request a PZL update and wait for the hardware to be synced | ||
176 | * @whc: the WHCI HC | ||
177 | * @wusbcmd: WUSBCMD value to start the update. | ||
178 | * | ||
179 | * If the WUSB HC is inactive (i.e., the PZL is stopped) then the | ||
180 | * update must be skipped as the hardware may not respond to update | ||
181 | * requests. | ||
182 | */ | ||
198 | void pzl_update(struct whc *whc, uint32_t wusbcmd) | 183 | void pzl_update(struct whc *whc, uint32_t wusbcmd) |
199 | { | 184 | { |
200 | whc_write_wusbcmd(whc, wusbcmd, wusbcmd); | 185 | struct wusbhc *wusbhc = &whc->wusbhc; |
201 | wait_event(whc->periodic_list_wq, | 186 | |
202 | (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0); | 187 | mutex_lock(&wusbhc->mutex); |
188 | if (wusbhc->active) { | ||
189 | whc_write_wusbcmd(whc, wusbcmd, wusbcmd); | ||
190 | wait_event(whc->periodic_list_wq, | ||
191 | (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0); | ||
192 | } | ||
193 | mutex_unlock(&wusbhc->mutex); | ||
203 | } | 194 | } |
204 | 195 | ||
205 | static void update_pzl_hw_view(struct whc *whc) | 196 | static void update_pzl_hw_view(struct whc *whc) |
@@ -235,8 +226,6 @@ void scan_periodic_work(struct work_struct *work) | |||
235 | 226 | ||
236 | spin_lock_irq(&whc->lock); | 227 | spin_lock_irq(&whc->lock); |
237 | 228 | ||
238 | dump_pzl(whc, "before processing"); | ||
239 | |||
240 | for (period = 4; period >= 0; period--) { | 229 | for (period = 4; period >= 0; period--) { |
241 | list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { | 230 | list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { |
242 | if (!qset->in_hw_list) | 231 | if (!qset->in_hw_list) |
@@ -248,8 +237,6 @@ void scan_periodic_work(struct work_struct *work) | |||
248 | if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED)) | 237 | if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED)) |
249 | update_pzl_hw_view(whc); | 238 | update_pzl_hw_view(whc); |
250 | 239 | ||
251 | dump_pzl(whc, "after processing"); | ||
252 | |||
253 | spin_unlock_irq(&whc->lock); | 240 | spin_unlock_irq(&whc->lock); |
254 | 241 | ||
255 | if (update) { | 242 | if (update) { |
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index 0420037d2e18..7be74314ee12 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c | |||
@@ -24,46 +24,6 @@ | |||
24 | 24 | ||
25 | #include "whcd.h" | 25 | #include "whcd.h" |
26 | 26 | ||
27 | void dump_qset(struct whc_qset *qset, struct device *dev) | ||
28 | { | ||
29 | struct whc_std *std; | ||
30 | struct urb *urb = NULL; | ||
31 | int i; | ||
32 | |||
33 | dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma); | ||
34 | dev_dbg(dev, " -> %08x\n", (u32)qset->qh.link); | ||
35 | dev_dbg(dev, " info: %08x %08x %08x\n", | ||
36 | qset->qh.info1, qset->qh.info2, qset->qh.info3); | ||
37 | dev_dbg(dev, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); | ||
38 | dev_dbg(dev, " TD: sts: %08x opts: %08x\n", | ||
39 | qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); | ||
40 | |||
41 | for (i = 0; i < WHCI_QSET_TD_MAX; i++) { | ||
42 | dev_dbg(dev, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", | ||
43 | i == qset->td_start ? 'S' : ' ', | ||
44 | i == qset->td_end ? 'E' : ' ', | ||
45 | i, qset->qtd[i].status, qset->qtd[i].options, | ||
46 | (u32)qset->qtd[i].page_list_ptr); | ||
47 | } | ||
48 | dev_dbg(dev, " ntds: %d\n", qset->ntds); | ||
49 | list_for_each_entry(std, &qset->stds, list_node) { | ||
50 | if (urb != std->urb) { | ||
51 | urb = std->urb; | ||
52 | dev_dbg(dev, " urb %p transferred: %d bytes\n", urb, | ||
53 | urb->actual_length); | ||
54 | } | ||
55 | if (std->qtd) | ||
56 | dev_dbg(dev, " sTD[%td]: %zu bytes @ %08x\n", | ||
57 | std->qtd - &qset->qtd[0], | ||
58 | std->len, std->num_pointers ? | ||
59 | (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); | ||
60 | else | ||
61 | dev_dbg(dev, " sTD[-]: %zd bytes @ %08x\n", | ||
62 | std->len, std->num_pointers ? | ||
63 | (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) | 27 | struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) |
68 | { | 28 | { |
69 | struct whc_qset *qset; | 29 | struct whc_qset *qset; |
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h index 1d2a53bd39fd..0f3540f04f53 100644 --- a/drivers/usb/host/whci/whcd.h +++ b/drivers/usb/host/whci/whcd.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #define __WHCD_H | 21 | #define __WHCD_H |
22 | 22 | ||
23 | #include <linux/uwb/whci.h> | 23 | #include <linux/uwb/whci.h> |
24 | #include <linux/uwb/umc.h> | ||
24 | #include <linux/workqueue.h> | 25 | #include <linux/workqueue.h> |
25 | 26 | ||
26 | #include "whci-hc.h" | 27 | #include "whci-hc.h" |
@@ -28,6 +29,7 @@ | |||
28 | /* Generic command timeout. */ | 29 | /* Generic command timeout. */ |
29 | #define WHC_GENCMD_TIMEOUT_MS 100 | 30 | #define WHC_GENCMD_TIMEOUT_MS 100 |
30 | 31 | ||
32 | struct whc_dbg; | ||
31 | 33 | ||
32 | struct whc { | 34 | struct whc { |
33 | struct wusbhc wusbhc; | 35 | struct wusbhc wusbhc; |
@@ -69,6 +71,8 @@ struct whc { | |||
69 | struct list_head periodic_removed_list; | 71 | struct list_head periodic_removed_list; |
70 | wait_queue_head_t periodic_list_wq; | 72 | wait_queue_head_t periodic_list_wq; |
71 | struct work_struct periodic_work; | 73 | struct work_struct periodic_work; |
74 | |||
75 | struct whc_dbg *dbg; | ||
72 | }; | 76 | }; |
73 | 77 | ||
74 | #define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc)) | 78 | #define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc)) |
@@ -136,7 +140,7 @@ int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len); | |||
136 | 140 | ||
137 | /* wusb.c */ | 141 | /* wusb.c */ |
138 | int whc_wusbhc_start(struct wusbhc *wusbhc); | 142 | int whc_wusbhc_start(struct wusbhc *wusbhc); |
139 | void whc_wusbhc_stop(struct wusbhc *wusbhc); | 143 | void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay); |
140 | int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, | 144 | int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, |
141 | u8 handle, struct wuie_hdr *wuie); | 145 | u8 handle, struct wuie_hdr *wuie); |
142 | int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle); | 146 | int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle); |
@@ -190,8 +194,11 @@ void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, | |||
190 | struct whc_qtd *qtd); | 194 | struct whc_qtd *qtd); |
191 | enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset); | 195 | enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset); |
192 | void qset_remove_complete(struct whc *whc, struct whc_qset *qset); | 196 | void qset_remove_complete(struct whc *whc, struct whc_qset *qset); |
193 | void dump_qset(struct whc_qset *qset, struct device *dev); | ||
194 | void pzl_update(struct whc *whc, uint32_t wusbcmd); | 197 | void pzl_update(struct whc *whc, uint32_t wusbcmd); |
195 | void asl_update(struct whc *whc, uint32_t wusbcmd); | 198 | void asl_update(struct whc *whc, uint32_t wusbcmd); |
196 | 199 | ||
200 | /* debug.c */ | ||
201 | void whc_dbg_init(struct whc *whc); | ||
202 | void whc_dbg_clean_up(struct whc *whc); | ||
203 | |||
197 | #endif /* #ifndef __WHCD_H */ | 204 | #endif /* #ifndef __WHCD_H */ |
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h index bff1eb7a35cf..51df7e313b38 100644 --- a/drivers/usb/host/whci/whci-hc.h +++ b/drivers/usb/host/whci/whci-hc.h | |||
@@ -410,6 +410,8 @@ struct dn_buf_entry { | |||
410 | # define WUSBDNTSCTRL_SLOTS(s) ((s) << 0) | 410 | # define WUSBDNTSCTRL_SLOTS(s) ((s) << 0) |
411 | 411 | ||
412 | #define WUSBTIME 0x68 | 412 | #define WUSBTIME 0x68 |
413 | # define WUSBTIME_CHANNEL_TIME_MASK 0x00ffffff | ||
414 | |||
413 | #define WUSBBPST 0x6c | 415 | #define WUSBBPST 0x6c |
414 | #define WUSBDIBUPDATED 0x70 | 416 | #define WUSBDIBUPDATED 0x70 |
415 | 417 | ||
diff --git a/drivers/usb/host/whci/wusb.c b/drivers/usb/host/whci/wusb.c index 66e4ddcd961d..f24efdebad17 100644 --- a/drivers/usb/host/whci/wusb.c +++ b/drivers/usb/host/whci/wusb.c | |||
@@ -15,47 +15,19 @@ | |||
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
21 | #include <linux/uwb/umc.h> | 20 | #include <linux/uwb/umc.h> |
22 | #define D_LOCAL 1 | ||
23 | #include <linux/uwb/debug.h> | ||
24 | 21 | ||
25 | #include "../../wusbcore/wusbhc.h" | 22 | #include "../../wusbcore/wusbhc.h" |
26 | 23 | ||
27 | #include "whcd.h" | 24 | #include "whcd.h" |
28 | 25 | ||
29 | #if D_LOCAL >= 1 | ||
30 | static void dump_di(struct whc *whc, int idx) | ||
31 | { | ||
32 | struct di_buf_entry *di = &whc->di_buf[idx]; | ||
33 | struct device *dev = &whc->umc->dev; | ||
34 | char buf[128]; | ||
35 | |||
36 | bitmap_scnprintf(buf, sizeof(buf), (unsigned long *)di->availability_info, UWB_NUM_MAS); | ||
37 | |||
38 | d_printf(1, dev, "DI[%d]\n", idx); | ||
39 | d_printf(1, dev, " availability: %s\n", buf); | ||
40 | d_printf(1, dev, " %c%c key idx: %d dev addr: %d\n", | ||
41 | (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ', | ||
42 | (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ', | ||
43 | (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8, | ||
44 | (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK)); | ||
45 | } | ||
46 | #else | ||
47 | static inline void dump_di(struct whc *whc, int idx) | ||
48 | { | ||
49 | } | ||
50 | #endif | ||
51 | |||
52 | static int whc_update_di(struct whc *whc, int idx) | 26 | static int whc_update_di(struct whc *whc, int idx) |
53 | { | 27 | { |
54 | int offset = idx / 32; | 28 | int offset = idx / 32; |
55 | u32 bit = 1 << (idx % 32); | 29 | u32 bit = 1 << (idx % 32); |
56 | 30 | ||
57 | dump_di(whc, idx); | ||
58 | |||
59 | le_writel(bit, whc->base + WUSBDIBUPDATED + offset); | 31 | le_writel(bit, whc->base + WUSBDIBUPDATED + offset); |
60 | 32 | ||
61 | return whci_wait_for(&whc->umc->dev, | 33 | return whci_wait_for(&whc->umc->dev, |
@@ -64,8 +36,9 @@ static int whc_update_di(struct whc *whc, int idx) | |||
64 | } | 36 | } |
65 | 37 | ||
66 | /* | 38 | /* |
67 | * WHCI starts and stops MMCs based on there being a valid GTK so | 39 | * WHCI starts MMCs based on there being a valid GTK so these need |
68 | * these need only start/stop the asynchronous and periodic schedules. | 40 | * only start/stop the asynchronous and periodic schedules and send a |
41 | * channel stop command. | ||
69 | */ | 42 | */ |
70 | 43 | ||
71 | int whc_wusbhc_start(struct wusbhc *wusbhc) | 44 | int whc_wusbhc_start(struct wusbhc *wusbhc) |
@@ -78,12 +51,20 @@ int whc_wusbhc_start(struct wusbhc *wusbhc) | |||
78 | return 0; | 51 | return 0; |
79 | } | 52 | } |
80 | 53 | ||
81 | void whc_wusbhc_stop(struct wusbhc *wusbhc) | 54 | void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay) |
82 | { | 55 | { |
83 | struct whc *whc = wusbhc_to_whc(wusbhc); | 56 | struct whc *whc = wusbhc_to_whc(wusbhc); |
57 | u32 stop_time, now_time; | ||
58 | int ret; | ||
84 | 59 | ||
85 | pzl_stop(whc); | 60 | pzl_stop(whc); |
86 | asl_stop(whc); | 61 | asl_stop(whc); |
62 | |||
63 | now_time = le_readl(whc->base + WUSBTIME) & WUSBTIME_CHANNEL_TIME_MASK; | ||
64 | stop_time = (now_time + ((delay * 8) << 7)) & 0x00ffffff; | ||
65 | ret = whc_do_gencmd(whc, WUSBGENCMDSTS_CHAN_STOP, stop_time, NULL, 0); | ||
66 | if (ret == 0) | ||
67 | msleep(delay); | ||
87 | } | 68 | } |
88 | 69 | ||
89 | int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, | 70 | int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, |
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 5b95009d2fbb..19e24045b137 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c | |||
@@ -241,12 +241,25 @@ static void usb_console_write(struct console *co, | |||
241 | } | 241 | } |
242 | } | 242 | } |
243 | 243 | ||
244 | static struct tty_driver *usb_console_device(struct console *co, int *index) | ||
245 | { | ||
246 | struct tty_driver **p = (struct tty_driver **)co->data; | ||
247 | |||
248 | if (!*p) | ||
249 | return NULL; | ||
250 | |||
251 | *index = co->index; | ||
252 | return *p; | ||
253 | } | ||
254 | |||
244 | static struct console usbcons = { | 255 | static struct console usbcons = { |
245 | .name = "ttyUSB", | 256 | .name = "ttyUSB", |
246 | .write = usb_console_write, | 257 | .write = usb_console_write, |
258 | .device = usb_console_device, | ||
247 | .setup = usb_console_setup, | 259 | .setup = usb_console_setup, |
248 | .flags = CON_PRINTBUFFER, | 260 | .flags = CON_PRINTBUFFER, |
249 | .index = -1, | 261 | .index = -1, |
262 | .data = &usb_serial_tty_driver, | ||
250 | }; | 263 | }; |
251 | 264 | ||
252 | void usb_serial_console_disconnect(struct usb_serial *serial) | 265 | void usb_serial_console_disconnect(struct usb_serial *serial) |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index fb6f2933b01b..ef6cfa5a447f 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -1054,6 +1054,8 @@ static int set_serial_info(struct tty_struct *tty, | |||
1054 | 1054 | ||
1055 | if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) | 1055 | if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) |
1056 | return -EFAULT; | 1056 | return -EFAULT; |
1057 | |||
1058 | lock_kernel(); | ||
1057 | old_priv = *priv; | 1059 | old_priv = *priv; |
1058 | 1060 | ||
1059 | /* Do error checking and permission checking */ | 1061 | /* Do error checking and permission checking */ |
@@ -1069,8 +1071,10 @@ static int set_serial_info(struct tty_struct *tty, | |||
1069 | } | 1071 | } |
1070 | 1072 | ||
1071 | if ((new_serial.baud_base != priv->baud_base) && | 1073 | if ((new_serial.baud_base != priv->baud_base) && |
1072 | (new_serial.baud_base < 9600)) | 1074 | (new_serial.baud_base < 9600)) { |
1075 | unlock_kernel(); | ||
1073 | return -EINVAL; | 1076 | return -EINVAL; |
1077 | } | ||
1074 | 1078 | ||
1075 | /* Make the changes - these are privileged changes! */ | 1079 | /* Make the changes - these are privileged changes! */ |
1076 | 1080 | ||
@@ -1098,8 +1102,11 @@ check_and_exit: | |||
1098 | (priv->flags & ASYNC_SPD_MASK)) || | 1102 | (priv->flags & ASYNC_SPD_MASK)) || |
1099 | (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) && | 1103 | (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) && |
1100 | (old_priv.custom_divisor != priv->custom_divisor))) { | 1104 | (old_priv.custom_divisor != priv->custom_divisor))) { |
1105 | unlock_kernel(); | ||
1101 | change_speed(tty, port); | 1106 | change_speed(tty, port); |
1102 | } | 1107 | } |
1108 | else | ||
1109 | unlock_kernel(); | ||
1103 | return 0; | 1110 | return 0; |
1104 | 1111 | ||
1105 | } /* set_serial_info */ | 1112 | } /* set_serial_info */ |
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index dc36a052766f..fcd9082f3e7f 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c | |||
@@ -878,6 +878,7 @@ static void mct_u232_break_ctl(struct tty_struct *tty, int break_state) | |||
878 | 878 | ||
879 | dbg("%sstate=%d", __func__, break_state); | 879 | dbg("%sstate=%d", __func__, break_state); |
880 | 880 | ||
881 | /* LOCKING */ | ||
881 | if (break_state) | 882 | if (break_state) |
882 | lcr |= MCT_U232_SET_BREAK; | 883 | lcr |= MCT_U232_SET_BREAK; |
883 | 884 | ||
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index 07710cf31d0d..82930a7d5093 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c | |||
@@ -721,10 +721,10 @@ static void mct_u232_break_ctl(struct tty_struct *tty, int break_state) | |||
721 | 721 | ||
722 | spin_lock_irqsave(&priv->lock, flags); | 722 | spin_lock_irqsave(&priv->lock, flags); |
723 | lcr = priv->last_lcr; | 723 | lcr = priv->last_lcr; |
724 | spin_unlock_irqrestore(&priv->lock, flags); | ||
725 | 724 | ||
726 | if (break_state) | 725 | if (break_state) |
727 | lcr |= MCT_U232_SET_BREAK; | 726 | lcr |= MCT_U232_SET_BREAK; |
727 | spin_unlock_irqrestore(&priv->lock, flags); | ||
728 | 728 | ||
729 | mct_u232_set_line_ctrl(serial, lcr); | 729 | mct_u232_set_line_ctrl(serial, lcr); |
730 | } /* mct_u232_break_ctl */ | 730 | } /* mct_u232_break_ctl */ |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index fda4a6421c44..96a8c7713212 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -1343,6 +1343,7 @@ static void mos7840_break(struct tty_struct *tty, int break_state) | |||
1343 | else | 1343 | else |
1344 | data = mos7840_port->shadowLCR & ~LCR_SET_BREAK; | 1344 | data = mos7840_port->shadowLCR & ~LCR_SET_BREAK; |
1345 | 1345 | ||
1346 | /* FIXME: no locking on shadowLCR anywhere in driver */ | ||
1346 | mos7840_port->shadowLCR = data; | 1347 | mos7840_port->shadowLCR = data; |
1347 | dbg("mcs7840_break mos7840_port->shadowLCR is %x\n", | 1348 | dbg("mcs7840_break mos7840_port->shadowLCR is %x\n", |
1348 | mos7840_port->shadowLCR); | 1349 | mos7840_port->shadowLCR); |
@@ -2214,10 +2215,12 @@ static int mos7840_set_modem_info(struct moschip_port *mos7840_port, | |||
2214 | break; | 2215 | break; |
2215 | } | 2216 | } |
2216 | 2217 | ||
2218 | lock_kernel(); | ||
2217 | mos7840_port->shadowMCR = mcr; | 2219 | mos7840_port->shadowMCR = mcr; |
2218 | 2220 | ||
2219 | Data = mos7840_port->shadowMCR; | 2221 | Data = mos7840_port->shadowMCR; |
2220 | status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); | 2222 | status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, Data); |
2223 | unlock_kernel(); | ||
2221 | if (status < 0) { | 2224 | if (status < 0) { |
2222 | dbg("setting MODEM_CONTROL_REGISTER Failed\n"); | 2225 | dbg("setting MODEM_CONTROL_REGISTER Failed\n"); |
2223 | return -1; | 2226 | return -1; |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 0f2b67244af6..d9bf9a5c20ec 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -442,7 +442,7 @@ static void sierra_indat_callback(struct urb *urb) | |||
442 | " endpoint %02x.", __func__, status, endpoint); | 442 | " endpoint %02x.", __func__, status, endpoint); |
443 | } else { | 443 | } else { |
444 | if (urb->actual_length) { | 444 | if (urb->actual_length) { |
445 | tty = tty_port_tty_get(&port->port); | 445 | tty = tty_port_tty_get(&port->port); |
446 | tty_buffer_request_room(tty, urb->actual_length); | 446 | tty_buffer_request_room(tty, urb->actual_length); |
447 | tty_insert_flip_string(tty, data, urb->actual_length); | 447 | tty_insert_flip_string(tty, data, urb->actual_length); |
448 | tty_flip_buffer_push(tty); | 448 | tty_flip_buffer_push(tty); |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 794b5ffe4397..080ade223d53 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -269,15 +269,19 @@ static void serial_close(struct tty_struct *tty, struct file *filp) | |||
269 | return; | 269 | return; |
270 | } | 270 | } |
271 | 271 | ||
272 | --port->port.count; | 272 | if (port->port.count == 1) |
273 | if (port->port.count == 0) | ||
274 | /* only call the device specific close if this | 273 | /* only call the device specific close if this |
275 | * port is being closed by the last owner */ | 274 | * port is being closed by the last owner. Ensure we do |
275 | * this before we drop the port count. The call is protected | ||
276 | * by the port mutex | ||
277 | */ | ||
276 | port->serial->type->close(tty, port, filp); | 278 | port->serial->type->close(tty, port, filp); |
277 | 279 | ||
278 | if (port->port.count == (port->console? 1 : 0)) { | 280 | if (port->port.count == (port->console ? 2 : 1)) { |
279 | struct tty_struct *tty = tty_port_tty_get(&port->port); | 281 | struct tty_struct *tty = tty_port_tty_get(&port->port); |
280 | if (tty) { | 282 | if (tty) { |
283 | /* We must do this before we drop the port count to | ||
284 | zero. */ | ||
281 | if (tty->driver_data) | 285 | if (tty->driver_data) |
282 | tty->driver_data = NULL; | 286 | tty->driver_data = NULL; |
283 | tty_port_tty_set(&port->port, NULL); | 287 | tty_port_tty_set(&port->port, NULL); |
@@ -285,13 +289,14 @@ static void serial_close(struct tty_struct *tty, struct file *filp) | |||
285 | } | 289 | } |
286 | } | 290 | } |
287 | 291 | ||
288 | if (port->port.count == 0) { | 292 | if (port->port.count == 1) { |
289 | mutex_lock(&port->serial->disc_mutex); | 293 | mutex_lock(&port->serial->disc_mutex); |
290 | if (!port->serial->disconnected) | 294 | if (!port->serial->disconnected) |
291 | usb_autopm_put_interface(port->serial->interface); | 295 | usb_autopm_put_interface(port->serial->interface); |
292 | mutex_unlock(&port->serial->disc_mutex); | 296 | mutex_unlock(&port->serial->disc_mutex); |
293 | module_put(port->serial->type->driver.owner); | 297 | module_put(port->serial->type->driver.owner); |
294 | } | 298 | } |
299 | --port->port.count; | ||
295 | 300 | ||
296 | mutex_unlock(&port->mutex); | 301 | mutex_unlock(&port->mutex); |
297 | usb_serial_put(port->serial); | 302 | usb_serial_put(port->serial); |
@@ -334,6 +339,10 @@ static int serial_chars_in_buffer(struct tty_struct *tty) | |||
334 | dbg("%s = port %d", __func__, port->number); | 339 | dbg("%s = port %d", __func__, port->number); |
335 | 340 | ||
336 | WARN_ON(!port->port.count); | 341 | WARN_ON(!port->port.count); |
342 | /* if the device was unplugged then any remaining characters | ||
343 | fell out of the connector ;) */ | ||
344 | if (port->serial->disconnected) | ||
345 | return 0; | ||
337 | /* pass on to the driver specific version of this function */ | 346 | /* pass on to the driver specific version of this function */ |
338 | return port->serial->type->chars_in_buffer(tty); | 347 | return port->serial->type->chars_in_buffer(tty); |
339 | } | 348 | } |
@@ -373,9 +382,7 @@ static int serial_ioctl(struct tty_struct *tty, struct file *file, | |||
373 | /* pass on to the driver specific version of this function | 382 | /* pass on to the driver specific version of this function |
374 | if it is available */ | 383 | if it is available */ |
375 | if (port->serial->type->ioctl) { | 384 | if (port->serial->type->ioctl) { |
376 | lock_kernel(); | ||
377 | retval = port->serial->type->ioctl(tty, file, cmd, arg); | 385 | retval = port->serial->type->ioctl(tty, file, cmd, arg); |
378 | unlock_kernel(); | ||
379 | } else | 386 | } else |
380 | retval = -ENOIOCTLCMD; | 387 | retval = -ENOIOCTLCMD; |
381 | return retval; | 388 | return retval; |
@@ -404,11 +411,8 @@ static int serial_break(struct tty_struct *tty, int break_state) | |||
404 | WARN_ON(!port->port.count); | 411 | WARN_ON(!port->port.count); |
405 | /* pass on to the driver specific version of this function | 412 | /* pass on to the driver specific version of this function |
406 | if it is available */ | 413 | if it is available */ |
407 | if (port->serial->type->break_ctl) { | 414 | if (port->serial->type->break_ctl) |
408 | lock_kernel(); | ||
409 | port->serial->type->break_ctl(tty, break_state); | 415 | port->serial->type->break_ctl(tty, break_state); |
410 | unlock_kernel(); | ||
411 | } | ||
412 | return 0; | 416 | return 0; |
413 | } | 417 | } |
414 | 418 | ||
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c index ab4788d1785a..1335cbe1191d 100644 --- a/drivers/usb/wusbcore/cbaf.c +++ b/drivers/usb/wusbcore/cbaf.c | |||
@@ -88,7 +88,6 @@ | |||
88 | */ | 88 | */ |
89 | #include <linux/module.h> | 89 | #include <linux/module.h> |
90 | #include <linux/ctype.h> | 90 | #include <linux/ctype.h> |
91 | #include <linux/version.h> | ||
92 | #include <linux/usb.h> | 91 | #include <linux/usb.h> |
93 | #include <linux/interrupt.h> | 92 | #include <linux/interrupt.h> |
94 | #include <linux/delay.h> | 93 | #include <linux/delay.h> |
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c index c36c4389baae..9ec7fd5da489 100644 --- a/drivers/usb/wusbcore/crypto.c +++ b/drivers/usb/wusbcore/crypto.c | |||
@@ -51,9 +51,17 @@ | |||
51 | #include <linux/uwb.h> | 51 | #include <linux/uwb.h> |
52 | #include <linux/usb/wusb.h> | 52 | #include <linux/usb/wusb.h> |
53 | #include <linux/scatterlist.h> | 53 | #include <linux/scatterlist.h> |
54 | #define D_LOCAL 0 | ||
55 | #include <linux/uwb/debug.h> | ||
56 | 54 | ||
55 | static int debug_crypto_verify = 0; | ||
56 | |||
57 | module_param(debug_crypto_verify, int, 0); | ||
58 | MODULE_PARM_DESC(debug_crypto_verify, "verify the key generation algorithms"); | ||
59 | |||
60 | static void wusb_key_dump(const void *buf, size_t len) | ||
61 | { | ||
62 | print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_OFFSET, 16, 1, | ||
63 | buf, len, 0); | ||
64 | } | ||
57 | 65 | ||
58 | /* | 66 | /* |
59 | * Block of data, as understood by AES-CCM | 67 | * Block of data, as understood by AES-CCM |
@@ -203,9 +211,6 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, | |||
203 | const u8 bzero[16] = { 0 }; | 211 | const u8 bzero[16] = { 0 }; |
204 | size_t zero_padding; | 212 | size_t zero_padding; |
205 | 213 | ||
206 | d_fnstart(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " | ||
207 | "n %p, a %p, b %p, blen %zu)\n", | ||
208 | tfm_cbc, tfm_aes, mic, n, a, b, blen); | ||
209 | /* | 214 | /* |
210 | * These checks should be compile time optimized out | 215 | * These checks should be compile time optimized out |
211 | * ensure @a fills b1's mac_header and following fields | 216 | * ensure @a fills b1's mac_header and following fields |
@@ -247,16 +252,6 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, | |||
247 | b1.la = cpu_to_be16(blen + 14); | 252 | b1.la = cpu_to_be16(blen + 14); |
248 | memcpy(&b1.mac_header, a, sizeof(*a)); | 253 | memcpy(&b1.mac_header, a, sizeof(*a)); |
249 | 254 | ||
250 | d_printf(4, NULL, "I: B0 (%zu bytes)\n", sizeof(b0)); | ||
251 | d_dump(4, NULL, &b0, sizeof(b0)); | ||
252 | d_printf(4, NULL, "I: B1 (%zu bytes)\n", sizeof(b1)); | ||
253 | d_dump(4, NULL, &b1, sizeof(b1)); | ||
254 | d_printf(4, NULL, "I: B (%zu bytes)\n", blen); | ||
255 | d_dump(4, NULL, b, blen); | ||
256 | d_printf(4, NULL, "I: B 0-padding (%zu bytes)\n", zero_padding); | ||
257 | d_printf(4, NULL, "D: IV before crypto (%zu)\n", ivsize); | ||
258 | d_dump(4, NULL, iv, ivsize); | ||
259 | |||
260 | sg_init_table(sg, ARRAY_SIZE(sg)); | 255 | sg_init_table(sg, ARRAY_SIZE(sg)); |
261 | sg_set_buf(&sg[0], &b0, sizeof(b0)); | 256 | sg_set_buf(&sg[0], &b0, sizeof(b0)); |
262 | sg_set_buf(&sg[1], &b1, sizeof(b1)); | 257 | sg_set_buf(&sg[1], &b1, sizeof(b1)); |
@@ -273,8 +268,6 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, | |||
273 | result); | 268 | result); |
274 | goto error_cbc_crypt; | 269 | goto error_cbc_crypt; |
275 | } | 270 | } |
276 | d_printf(4, NULL, "D: MIC tag\n"); | ||
277 | d_dump(4, NULL, iv, ivsize); | ||
278 | 271 | ||
279 | /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5] | 272 | /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5] |
280 | * The procedure is to AES crypt the A0 block and XOR the MIC | 273 | * The procedure is to AES crypt the A0 block and XOR the MIC |
@@ -289,17 +282,10 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc, | |||
289 | ax.counter = 0; | 282 | ax.counter = 0; |
290 | crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); | 283 | crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); |
291 | bytewise_xor(mic, &ax, iv, 8); | 284 | bytewise_xor(mic, &ax, iv, 8); |
292 | d_printf(4, NULL, "D: CTR[MIC]\n"); | ||
293 | d_dump(4, NULL, &ax, 8); | ||
294 | d_printf(4, NULL, "D: CCM-MIC tag\n"); | ||
295 | d_dump(4, NULL, mic, 8); | ||
296 | result = 8; | 285 | result = 8; |
297 | error_cbc_crypt: | 286 | error_cbc_crypt: |
298 | kfree(dst_buf); | 287 | kfree(dst_buf); |
299 | error_dst_buf: | 288 | error_dst_buf: |
300 | d_fnend(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " | ||
301 | "n %p, a %p, b %p, blen %zu)\n", | ||
302 | tfm_cbc, tfm_aes, mic, n, a, b, blen); | ||
303 | return result; | 289 | return result; |
304 | } | 290 | } |
305 | 291 | ||
@@ -321,10 +307,6 @@ ssize_t wusb_prf(void *out, size_t out_size, | |||
321 | u64 sfn = 0; | 307 | u64 sfn = 0; |
322 | __le64 sfn_le; | 308 | __le64 sfn_le; |
323 | 309 | ||
324 | d_fnstart(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " | ||
325 | "a %p, b %p, blen %zu, len %zu)\n", out, out_size, | ||
326 | key, _n, a, b, blen, len); | ||
327 | |||
328 | tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); | 310 | tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); |
329 | if (IS_ERR(tfm_cbc)) { | 311 | if (IS_ERR(tfm_cbc)) { |
330 | result = PTR_ERR(tfm_cbc); | 312 | result = PTR_ERR(tfm_cbc); |
@@ -366,9 +348,6 @@ error_alloc_aes: | |||
366 | error_setkey_cbc: | 348 | error_setkey_cbc: |
367 | crypto_free_blkcipher(tfm_cbc); | 349 | crypto_free_blkcipher(tfm_cbc); |
368 | error_alloc_cbc: | 350 | error_alloc_cbc: |
369 | d_fnend(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " | ||
370 | "a %p, b %p, blen %zu, len %zu) = %d\n", out, out_size, | ||
371 | key, _n, a, b, blen, len, (int)bytes); | ||
372 | return result; | 351 | return result; |
373 | } | 352 | } |
374 | 353 | ||
@@ -422,14 +401,14 @@ static int wusb_oob_mic_verify(void) | |||
422 | "mismatch between MIC result and WUSB1.0[A2]\n"); | 401 | "mismatch between MIC result and WUSB1.0[A2]\n"); |
423 | hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC); | 402 | hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC); |
424 | printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size); | 403 | printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size); |
425 | dump_bytes(NULL, &stv_hsmic_hs, hs_size); | 404 | wusb_key_dump(&stv_hsmic_hs, hs_size); |
426 | printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n", | 405 | printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n", |
427 | sizeof(stv_hsmic_n)); | 406 | sizeof(stv_hsmic_n)); |
428 | dump_bytes(NULL, &stv_hsmic_n, sizeof(stv_hsmic_n)); | 407 | wusb_key_dump(&stv_hsmic_n, sizeof(stv_hsmic_n)); |
429 | printk(KERN_ERR "E: MIC out:\n"); | 408 | printk(KERN_ERR "E: MIC out:\n"); |
430 | dump_bytes(NULL, mic, sizeof(mic)); | 409 | wusb_key_dump(mic, sizeof(mic)); |
431 | printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n"); | 410 | printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n"); |
432 | dump_bytes(NULL, stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC)); | 411 | wusb_key_dump(stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC)); |
433 | result = -EINVAL; | 412 | result = -EINVAL; |
434 | } else | 413 | } else |
435 | result = 0; | 414 | result = 0; |
@@ -497,19 +476,16 @@ static int wusb_key_derive_verify(void) | |||
497 | printk(KERN_ERR "E: WUSB key derivation test: " | 476 | printk(KERN_ERR "E: WUSB key derivation test: " |
498 | "mismatch between key derivation result " | 477 | "mismatch between key derivation result " |
499 | "and WUSB1.0[A1] Errata 2006/12\n"); | 478 | "and WUSB1.0[A1] Errata 2006/12\n"); |
500 | printk(KERN_ERR "E: keydvt in: key (%zu bytes)\n", | 479 | printk(KERN_ERR "E: keydvt in: key\n"); |
501 | sizeof(stv_key_a1)); | 480 | wusb_key_dump(stv_key_a1, sizeof(stv_key_a1)); |
502 | dump_bytes(NULL, stv_key_a1, sizeof(stv_key_a1)); | 481 | printk(KERN_ERR "E: keydvt in: nonce\n"); |
503 | printk(KERN_ERR "E: keydvt in: nonce (%zu bytes)\n", | 482 | wusb_key_dump( &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1)); |
504 | sizeof(stv_keydvt_n_a1)); | 483 | printk(KERN_ERR "E: keydvt in: hnonce & dnonce\n"); |
505 | dump_bytes(NULL, &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1)); | 484 | wusb_key_dump(&stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1)); |
506 | printk(KERN_ERR "E: keydvt in: hnonce & dnonce (%zu bytes)\n", | ||
507 | sizeof(stv_keydvt_in_a1)); | ||
508 | dump_bytes(NULL, &stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1)); | ||
509 | printk(KERN_ERR "E: keydvt out: KCK\n"); | 485 | printk(KERN_ERR "E: keydvt out: KCK\n"); |
510 | dump_bytes(NULL, &keydvt_out.kck, sizeof(keydvt_out.kck)); | 486 | wusb_key_dump(&keydvt_out.kck, sizeof(keydvt_out.kck)); |
511 | printk(KERN_ERR "E: keydvt out: PTK\n"); | 487 | printk(KERN_ERR "E: keydvt out: PTK\n"); |
512 | dump_bytes(NULL, &keydvt_out.ptk, sizeof(keydvt_out.ptk)); | 488 | wusb_key_dump(&keydvt_out.ptk, sizeof(keydvt_out.ptk)); |
513 | result = -EINVAL; | 489 | result = -EINVAL; |
514 | } else | 490 | } else |
515 | result = 0; | 491 | result = 0; |
@@ -526,10 +502,13 @@ int wusb_crypto_init(void) | |||
526 | { | 502 | { |
527 | int result; | 503 | int result; |
528 | 504 | ||
529 | result = wusb_key_derive_verify(); | 505 | if (debug_crypto_verify) { |
530 | if (result < 0) | 506 | result = wusb_key_derive_verify(); |
531 | return result; | 507 | if (result < 0) |
532 | return wusb_oob_mic_verify(); | 508 | return result; |
509 | return wusb_oob_mic_verify(); | ||
510 | } | ||
511 | return 0; | ||
533 | } | 512 | } |
534 | 513 | ||
535 | void wusb_crypto_exit(void) | 514 | void wusb_crypto_exit(void) |
diff --git a/drivers/usb/wusbcore/dev-sysfs.c b/drivers/usb/wusbcore/dev-sysfs.c index 7897a19652e5..101834576236 100644 --- a/drivers/usb/wusbcore/dev-sysfs.c +++ b/drivers/usb/wusbcore/dev-sysfs.c | |||
@@ -28,10 +28,6 @@ | |||
28 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
29 | #include "wusbhc.h" | 29 | #include "wusbhc.h" |
30 | 30 | ||
31 | #undef D_LOCAL | ||
32 | #define D_LOCAL 4 | ||
33 | #include <linux/uwb/debug.h> | ||
34 | |||
35 | static ssize_t wusb_disconnect_store(struct device *dev, | 31 | static ssize_t wusb_disconnect_store(struct device *dev, |
36 | struct device_attribute *attr, | 32 | struct device_attribute *attr, |
37 | const char *buf, size_t size) | 33 | const char *buf, size_t size) |
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index f45d777bef34..e2e7e4bc8463 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c | |||
@@ -57,9 +57,6 @@ | |||
57 | * Called by notif.c:wusb_handle_dn_connect() | 57 | * Called by notif.c:wusb_handle_dn_connect() |
58 | * when a DN_Connect is received. | 58 | * when a DN_Connect is received. |
59 | * | 59 | * |
60 | * wusbhc_devconnect_auth() Called by rh.c:wusbhc_rh_port_reset() when | ||
61 | * doing the device connect sequence. | ||
62 | * | ||
63 | * wusb_devconnect_acked() Ack done, release resources. | 60 | * wusb_devconnect_acked() Ack done, release resources. |
64 | * | 61 | * |
65 | * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn() | 62 | * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn() |
@@ -69,9 +66,6 @@ | |||
69 | * process a disconenct request from a | 66 | * process a disconenct request from a |
70 | * device. | 67 | * device. |
71 | * | 68 | * |
72 | * wusb_dev_reset() Called by rh.c:wusbhc_rh_port_reset() when | ||
73 | * resetting a device. | ||
74 | * | ||
75 | * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when | 69 | * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when |
76 | * disabling a port. | 70 | * disabling a port. |
77 | * | 71 | * |
@@ -97,10 +91,6 @@ | |||
97 | #include <linux/workqueue.h> | 91 | #include <linux/workqueue.h> |
98 | #include "wusbhc.h" | 92 | #include "wusbhc.h" |
99 | 93 | ||
100 | #undef D_LOCAL | ||
101 | #define D_LOCAL 1 | ||
102 | #include <linux/uwb/debug.h> | ||
103 | |||
104 | static void wusbhc_devconnect_acked_work(struct work_struct *work); | 94 | static void wusbhc_devconnect_acked_work(struct work_struct *work); |
105 | 95 | ||
106 | static void wusb_dev_free(struct wusb_dev *wusb_dev) | 96 | static void wusb_dev_free(struct wusb_dev *wusb_dev) |
@@ -240,6 +230,7 @@ static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc, | |||
240 | list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); | 230 | list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); |
241 | wusbhc->cack_count++; | 231 | wusbhc->cack_count++; |
242 | wusbhc_fill_cack_ie(wusbhc); | 232 | wusbhc_fill_cack_ie(wusbhc); |
233 | |||
243 | return wusb_dev; | 234 | return wusb_dev; |
244 | } | 235 | } |
245 | 236 | ||
@@ -250,12 +241,9 @@ static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc, | |||
250 | */ | 241 | */ |
251 | static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | 242 | static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) |
252 | { | 243 | { |
253 | struct device *dev = wusbhc->dev; | ||
254 | d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev); | ||
255 | list_del_init(&wusb_dev->cack_node); | 244 | list_del_init(&wusb_dev->cack_node); |
256 | wusbhc->cack_count--; | 245 | wusbhc->cack_count--; |
257 | wusbhc_fill_cack_ie(wusbhc); | 246 | wusbhc_fill_cack_ie(wusbhc); |
258 | d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev); | ||
259 | } | 247 | } |
260 | 248 | ||
261 | /* | 249 | /* |
@@ -263,14 +251,11 @@ static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | |||
263 | static | 251 | static |
264 | void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | 252 | void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) |
265 | { | 253 | { |
266 | struct device *dev = wusbhc->dev; | ||
267 | d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev); | ||
268 | wusbhc_cack_rm(wusbhc, wusb_dev); | 254 | wusbhc_cack_rm(wusbhc, wusb_dev); |
269 | if (wusbhc->cack_count) | 255 | if (wusbhc->cack_count) |
270 | wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); | 256 | wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); |
271 | else | 257 | else |
272 | wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr); | 258 | wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr); |
273 | d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev); | ||
274 | } | 259 | } |
275 | 260 | ||
276 | static void wusbhc_devconnect_acked_work(struct work_struct *work) | 261 | static void wusbhc_devconnect_acked_work(struct work_struct *work) |
@@ -320,7 +305,6 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc, | |||
320 | struct wusb_port *port; | 305 | struct wusb_port *port; |
321 | unsigned idx, devnum; | 306 | unsigned idx, devnum; |
322 | 307 | ||
323 | d_fnstart(3, dev, "(%p, %p, %s)\n", wusbhc, dnc, pr_cdid); | ||
324 | mutex_lock(&wusbhc->mutex); | 308 | mutex_lock(&wusbhc->mutex); |
325 | 309 | ||
326 | /* Check we are not handling it already */ | 310 | /* Check we are not handling it already */ |
@@ -366,16 +350,13 @@ void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc, | |||
366 | port->wusb_dev = wusb_dev; | 350 | port->wusb_dev = wusb_dev; |
367 | port->status |= USB_PORT_STAT_CONNECTION; | 351 | port->status |= USB_PORT_STAT_CONNECTION; |
368 | port->change |= USB_PORT_STAT_C_CONNECTION; | 352 | port->change |= USB_PORT_STAT_C_CONNECTION; |
369 | port->reset_count = 0; | ||
370 | /* Now the port status changed to connected; khubd will | 353 | /* Now the port status changed to connected; khubd will |
371 | * pick the change up and try to reset the port to bring it to | 354 | * pick the change up and try to reset the port to bring it to |
372 | * the enabled state--so this process returns up to the stack | 355 | * the enabled state--so this process returns up to the stack |
373 | * and it calls back into wusbhc_rh_port_reset() who will call | 356 | * and it calls back into wusbhc_rh_port_reset(). |
374 | * devconnect_auth(). | ||
375 | */ | 357 | */ |
376 | error_unlock: | 358 | error_unlock: |
377 | mutex_unlock(&wusbhc->mutex); | 359 | mutex_unlock(&wusbhc->mutex); |
378 | d_fnend(3, dev, "(%p, %p, %s) = void\n", wusbhc, dnc, pr_cdid); | ||
379 | return; | 360 | return; |
380 | 361 | ||
381 | } | 362 | } |
@@ -398,10 +379,8 @@ error_unlock: | |||
398 | static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, | 379 | static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, |
399 | struct wusb_port *port) | 380 | struct wusb_port *port) |
400 | { | 381 | { |
401 | struct device *dev = wusbhc->dev; | ||
402 | struct wusb_dev *wusb_dev = port->wusb_dev; | 382 | struct wusb_dev *wusb_dev = port->wusb_dev; |
403 | 383 | ||
404 | d_fnstart(3, dev, "(wusbhc %p, port %p)\n", wusbhc, port); | ||
405 | port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | 384 | port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE |
406 | | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET | 385 | | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET |
407 | | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); | 386 | | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); |
@@ -413,15 +392,11 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, | |||
413 | wusb_dev_put(wusb_dev); | 392 | wusb_dev_put(wusb_dev); |
414 | } | 393 | } |
415 | port->wusb_dev = NULL; | 394 | port->wusb_dev = NULL; |
416 | /* don't reset the reset_count to zero or wusbhc_rh_port_reset will get | ||
417 | * confused! We only reset to zero when we connect a new device. | ||
418 | */ | ||
419 | 395 | ||
420 | /* After a device disconnects, change the GTK (see [WUSB] | 396 | /* After a device disconnects, change the GTK (see [WUSB] |
421 | * section 6.2.11.2). */ | 397 | * section 6.2.11.2). */ |
422 | wusbhc_gtk_rekey(wusbhc); | 398 | wusbhc_gtk_rekey(wusbhc); |
423 | 399 | ||
424 | d_fnend(3, dev, "(wusbhc %p, port %p) = void\n", wusbhc, port); | ||
425 | /* The Wireless USB part has forgotten about the device already; now | 400 | /* The Wireless USB part has forgotten about the device already; now |
426 | * khubd's timer will pick up the disconnection and remove the USB | 401 | * khubd's timer will pick up the disconnection and remove the USB |
427 | * device from the system | 402 | * device from the system |
@@ -429,39 +404,6 @@ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, | |||
429 | } | 404 | } |
430 | 405 | ||
431 | /* | 406 | /* |
432 | * Authenticate a device into the WUSB Cluster | ||
433 | * | ||
434 | * Called from the Root Hub code (rh.c:wusbhc_rh_port_reset()) when | ||
435 | * asking for a reset on a port that is not enabled (ie: first connect | ||
436 | * on the port). | ||
437 | * | ||
438 | * Performs the 4way handshake to allow the device to comunicate w/ the | ||
439 | * WUSB Cluster securely; once done, issue a request to the device for | ||
440 | * it to change to address 0. | ||
441 | * | ||
442 | * This mimics the reset step of Wired USB that once resetting a | ||
443 | * device, leaves the port in enabled state and the dev with the | ||
444 | * default address (0). | ||
445 | * | ||
446 | * WUSB1.0[7.1.2] | ||
447 | * | ||
448 | * @port_idx: port where the change happened--This is the index into | ||
449 | * the wusbhc port array, not the USB port number. | ||
450 | */ | ||
451 | int wusbhc_devconnect_auth(struct wusbhc *wusbhc, u8 port_idx) | ||
452 | { | ||
453 | struct device *dev = wusbhc->dev; | ||
454 | struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); | ||
455 | |||
456 | d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); | ||
457 | port->status &= ~USB_PORT_STAT_RESET; | ||
458 | port->status |= USB_PORT_STAT_ENABLE; | ||
459 | port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE; | ||
460 | d_fnend(3, dev, "(%p, %u) = 0\n", wusbhc, port_idx); | ||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * Refresh the list of keep alives to emit in the MMC | 407 | * Refresh the list of keep alives to emit in the MMC |
466 | * | 408 | * |
467 | * Some devices don't respond to keep alives unless they've been | 409 | * Some devices don't respond to keep alives unless they've been |
@@ -528,21 +470,15 @@ static void __wusbhc_keep_alive(struct wusbhc *wusbhc) | |||
528 | */ | 470 | */ |
529 | static void wusbhc_keep_alive_run(struct work_struct *ws) | 471 | static void wusbhc_keep_alive_run(struct work_struct *ws) |
530 | { | 472 | { |
531 | struct delayed_work *dw = | 473 | struct delayed_work *dw = container_of(ws, struct delayed_work, work); |
532 | container_of(ws, struct delayed_work, work); | 474 | struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer); |
533 | struct wusbhc *wusbhc = | 475 | |
534 | container_of(dw, struct wusbhc, keep_alive_timer); | 476 | mutex_lock(&wusbhc->mutex); |
535 | 477 | __wusbhc_keep_alive(wusbhc); | |
536 | d_fnstart(5, wusbhc->dev, "(wusbhc %p)\n", wusbhc); | 478 | mutex_unlock(&wusbhc->mutex); |
537 | if (wusbhc->active) { | 479 | |
538 | mutex_lock(&wusbhc->mutex); | 480 | queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, |
539 | __wusbhc_keep_alive(wusbhc); | 481 | msecs_to_jiffies(wusbhc->trust_timeout / 2)); |
540 | mutex_unlock(&wusbhc->mutex); | ||
541 | queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, | ||
542 | (wusbhc->trust_timeout * CONFIG_HZ)/1000/2); | ||
543 | } | ||
544 | d_fnend(5, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); | ||
545 | return; | ||
546 | } | 482 | } |
547 | 483 | ||
548 | /* | 484 | /* |
@@ -585,10 +521,6 @@ static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr) | |||
585 | */ | 521 | */ |
586 | static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) | 522 | static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) |
587 | { | 523 | { |
588 | struct device *dev = wusbhc->dev; | ||
589 | |||
590 | d_printf(2, dev, "DN ALIVE: device 0x%02x pong\n", wusb_dev->addr); | ||
591 | |||
592 | mutex_lock(&wusbhc->mutex); | 524 | mutex_lock(&wusbhc->mutex); |
593 | wusb_dev->entry_ts = jiffies; | 525 | wusb_dev->entry_ts = jiffies; |
594 | __wusbhc_keep_alive(wusbhc); | 526 | __wusbhc_keep_alive(wusbhc); |
@@ -621,11 +553,10 @@ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc, | |||
621 | "no-beacon" | 553 | "no-beacon" |
622 | }; | 554 | }; |
623 | 555 | ||
624 | d_fnstart(3, dev, "(%p, %p, %zu)\n", wusbhc, dn_hdr, size); | ||
625 | if (size < sizeof(*dnc)) { | 556 | if (size < sizeof(*dnc)) { |
626 | dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n", | 557 | dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n", |
627 | size, sizeof(*dnc)); | 558 | size, sizeof(*dnc)); |
628 | goto out; | 559 | return; |
629 | } | 560 | } |
630 | 561 | ||
631 | dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr); | 562 | dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr); |
@@ -637,10 +568,6 @@ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc, | |||
637 | wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect"); | 568 | wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect"); |
638 | /* ACK the connect */ | 569 | /* ACK the connect */ |
639 | wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid); | 570 | wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid); |
640 | out: | ||
641 | d_fnend(3, dev, "(%p, %p, %zu) = void\n", | ||
642 | wusbhc, dn_hdr, size); | ||
643 | return; | ||
644 | } | 571 | } |
645 | 572 | ||
646 | /* | 573 | /* |
@@ -662,60 +589,6 @@ static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev * | |||
662 | } | 589 | } |
663 | 590 | ||
664 | /* | 591 | /* |
665 | * Reset a WUSB device on a HWA | ||
666 | * | ||
667 | * @wusbhc | ||
668 | * @port_idx Index of the port where the device is | ||
669 | * | ||
670 | * In Wireless USB, a reset is more or less equivalent to a full | ||
671 | * disconnect; so we just do a full disconnect and send the device a | ||
672 | * Device Reset IE (WUSB1.0[7.5.11]) giving it a few millisecs (6 MMCs). | ||
673 | * | ||
674 | * @wusbhc should be refcounted and unlocked | ||
675 | */ | ||
676 | int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port_idx) | ||
677 | { | ||
678 | int result; | ||
679 | struct device *dev = wusbhc->dev; | ||
680 | struct wusb_dev *wusb_dev; | ||
681 | struct wuie_reset *ie; | ||
682 | |||
683 | d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); | ||
684 | mutex_lock(&wusbhc->mutex); | ||
685 | result = 0; | ||
686 | wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; | ||
687 | if (wusb_dev == NULL) { | ||
688 | /* reset no device? ignore */ | ||
689 | dev_dbg(dev, "RESET: no device at port %u, ignoring\n", | ||
690 | port_idx); | ||
691 | goto error_unlock; | ||
692 | } | ||
693 | result = -ENOMEM; | ||
694 | ie = kzalloc(sizeof(*ie), GFP_KERNEL); | ||
695 | if (ie == NULL) | ||
696 | goto error_unlock; | ||
697 | ie->hdr.bLength = sizeof(ie->hdr) + sizeof(ie->CDID); | ||
698 | ie->hdr.bIEIdentifier = WUIE_ID_RESET_DEVICE; | ||
699 | ie->CDID = wusb_dev->cdid; | ||
700 | result = wusbhc_mmcie_set(wusbhc, 0xff, 6, &ie->hdr); | ||
701 | if (result < 0) { | ||
702 | dev_err(dev, "RESET: cant's set MMC: %d\n", result); | ||
703 | goto error_kfree; | ||
704 | } | ||
705 | __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); | ||
706 | |||
707 | /* 120ms, hopefully 6 MMCs (FIXME) */ | ||
708 | msleep(120); | ||
709 | wusbhc_mmcie_rm(wusbhc, &ie->hdr); | ||
710 | error_kfree: | ||
711 | kfree(ie); | ||
712 | error_unlock: | ||
713 | mutex_unlock(&wusbhc->mutex); | ||
714 | d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result); | ||
715 | return result; | ||
716 | } | ||
717 | |||
718 | /* | ||
719 | * Handle a Device Notification coming a host | 592 | * Handle a Device Notification coming a host |
720 | * | 593 | * |
721 | * The Device Notification comes from a host (HWA, DWA or WHCI) | 594 | * The Device Notification comes from a host (HWA, DWA or WHCI) |
@@ -735,19 +608,17 @@ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr, | |||
735 | struct device *dev = wusbhc->dev; | 608 | struct device *dev = wusbhc->dev; |
736 | struct wusb_dev *wusb_dev; | 609 | struct wusb_dev *wusb_dev; |
737 | 610 | ||
738 | d_fnstart(3, dev, "(%p, %p)\n", wusbhc, dn_hdr); | ||
739 | |||
740 | if (size < sizeof(struct wusb_dn_hdr)) { | 611 | if (size < sizeof(struct wusb_dn_hdr)) { |
741 | dev_err(dev, "DN data shorter than DN header (%d < %d)\n", | 612 | dev_err(dev, "DN data shorter than DN header (%d < %d)\n", |
742 | (int)size, (int)sizeof(struct wusb_dn_hdr)); | 613 | (int)size, (int)sizeof(struct wusb_dn_hdr)); |
743 | goto out; | 614 | return; |
744 | } | 615 | } |
745 | 616 | ||
746 | wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); | 617 | wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); |
747 | if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { | 618 | if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { |
748 | dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", | 619 | dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", |
749 | dn_hdr->bType, srcaddr); | 620 | dn_hdr->bType, srcaddr); |
750 | goto out; | 621 | return; |
751 | } | 622 | } |
752 | 623 | ||
753 | switch (dn_hdr->bType) { | 624 | switch (dn_hdr->bType) { |
@@ -772,9 +643,6 @@ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr, | |||
772 | dev_warn(dev, "unknown DN %u (%d octets) from %u\n", | 643 | dev_warn(dev, "unknown DN %u (%d octets) from %u\n", |
773 | dn_hdr->bType, (int)size, srcaddr); | 644 | dn_hdr->bType, (int)size, srcaddr); |
774 | } | 645 | } |
775 | out: | ||
776 | d_fnend(3, dev, "(%p, %p) = void\n", wusbhc, dn_hdr); | ||
777 | return; | ||
778 | } | 646 | } |
779 | EXPORT_SYMBOL_GPL(wusbhc_handle_dn); | 647 | EXPORT_SYMBOL_GPL(wusbhc_handle_dn); |
780 | 648 | ||
@@ -804,59 +672,30 @@ void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx) | |||
804 | struct wusb_dev *wusb_dev; | 672 | struct wusb_dev *wusb_dev; |
805 | struct wuie_disconnect *ie; | 673 | struct wuie_disconnect *ie; |
806 | 674 | ||
807 | d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); | ||
808 | result = 0; | ||
809 | wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; | 675 | wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; |
810 | if (wusb_dev == NULL) { | 676 | if (wusb_dev == NULL) { |
811 | /* reset no device? ignore */ | 677 | /* reset no device? ignore */ |
812 | dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n", | 678 | dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n", |
813 | port_idx); | 679 | port_idx); |
814 | goto error; | 680 | return; |
815 | } | 681 | } |
816 | __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); | 682 | __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); |
817 | 683 | ||
818 | result = -ENOMEM; | ||
819 | ie = kzalloc(sizeof(*ie), GFP_KERNEL); | 684 | ie = kzalloc(sizeof(*ie), GFP_KERNEL); |
820 | if (ie == NULL) | 685 | if (ie == NULL) |
821 | goto error; | 686 | return; |
822 | ie->hdr.bLength = sizeof(*ie); | 687 | ie->hdr.bLength = sizeof(*ie); |
823 | ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT; | 688 | ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT; |
824 | ie->bDeviceAddress = wusb_dev->addr; | 689 | ie->bDeviceAddress = wusb_dev->addr; |
825 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr); | 690 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr); |
826 | if (result < 0) { | 691 | if (result < 0) |
827 | dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result); | 692 | dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result); |
828 | goto error_kfree; | 693 | else { |
694 | /* At least 6 MMCs, assuming at least 1 MMC per zone. */ | ||
695 | msleep(7*4); | ||
696 | wusbhc_mmcie_rm(wusbhc, &ie->hdr); | ||
829 | } | 697 | } |
830 | |||
831 | /* 120ms, hopefully 6 MMCs */ | ||
832 | msleep(100); | ||
833 | wusbhc_mmcie_rm(wusbhc, &ie->hdr); | ||
834 | error_kfree: | ||
835 | kfree(ie); | 698 | kfree(ie); |
836 | error: | ||
837 | d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result); | ||
838 | return; | ||
839 | } | ||
840 | |||
841 | static void wusb_cap_descr_printf(const unsigned level, struct device *dev, | ||
842 | const struct usb_wireless_cap_descriptor *wcd) | ||
843 | { | ||
844 | d_printf(level, dev, | ||
845 | "WUSB Capability Descriptor\n" | ||
846 | " bDevCapabilityType 0x%02x\n" | ||
847 | " bmAttributes 0x%02x\n" | ||
848 | " wPhyRates 0x%04x\n" | ||
849 | " bmTFITXPowerInfo 0x%02x\n" | ||
850 | " bmFFITXPowerInfo 0x%02x\n" | ||
851 | " bmBandGroup 0x%04x\n" | ||
852 | " bReserved 0x%02x\n", | ||
853 | wcd->bDevCapabilityType, | ||
854 | wcd->bmAttributes, | ||
855 | le16_to_cpu(wcd->wPHYRates), | ||
856 | wcd->bmTFITXPowerInfo, | ||
857 | wcd->bmFFITXPowerInfo, | ||
858 | wcd->bmBandGroup, | ||
859 | wcd->bReserved); | ||
860 | } | 699 | } |
861 | 700 | ||
862 | /* | 701 | /* |
@@ -899,8 +738,6 @@ static int wusb_dev_bos_grok(struct usb_device *usb_dev, | |||
899 | } | 738 | } |
900 | cap_size = cap_hdr->bLength; | 739 | cap_size = cap_hdr->bLength; |
901 | cap_type = cap_hdr->bDevCapabilityType; | 740 | cap_type = cap_hdr->bDevCapabilityType; |
902 | d_printf(4, dev, "BOS Capability: 0x%02x (%zu bytes)\n", | ||
903 | cap_type, cap_size); | ||
904 | if (cap_size == 0) | 741 | if (cap_size == 0) |
905 | break; | 742 | break; |
906 | if (cap_size > top - itr) { | 743 | if (cap_size > top - itr) { |
@@ -912,7 +749,6 @@ static int wusb_dev_bos_grok(struct usb_device *usb_dev, | |||
912 | result = -EBADF; | 749 | result = -EBADF; |
913 | goto error_bad_cap; | 750 | goto error_bad_cap; |
914 | } | 751 | } |
915 | d_dump(3, dev, itr, cap_size); | ||
916 | switch (cap_type) { | 752 | switch (cap_type) { |
917 | case USB_CAP_TYPE_WIRELESS_USB: | 753 | case USB_CAP_TYPE_WIRELESS_USB: |
918 | if (cap_size != sizeof(*wusb_dev->wusb_cap_descr)) | 754 | if (cap_size != sizeof(*wusb_dev->wusb_cap_descr)) |
@@ -920,10 +756,8 @@ static int wusb_dev_bos_grok(struct usb_device *usb_dev, | |||
920 | "descriptor is %zu bytes vs %zu " | 756 | "descriptor is %zu bytes vs %zu " |
921 | "needed\n", cap_size, | 757 | "needed\n", cap_size, |
922 | sizeof(*wusb_dev->wusb_cap_descr)); | 758 | sizeof(*wusb_dev->wusb_cap_descr)); |
923 | else { | 759 | else |
924 | wusb_dev->wusb_cap_descr = itr; | 760 | wusb_dev->wusb_cap_descr = itr; |
925 | wusb_cap_descr_printf(3, dev, itr); | ||
926 | } | ||
927 | break; | 761 | break; |
928 | default: | 762 | default: |
929 | dev_err(dev, "BUG? Unknown BOS capability 0x%02x " | 763 | dev_err(dev, "BUG? Unknown BOS capability 0x%02x " |
@@ -988,9 +822,7 @@ static int wusb_dev_bos_add(struct usb_device *usb_dev, | |||
988 | "%zu bytes): %zd\n", desc_size, result); | 822 | "%zu bytes): %zd\n", desc_size, result); |
989 | goto error_get_descriptor; | 823 | goto error_get_descriptor; |
990 | } | 824 | } |
991 | d_printf(2, dev, "Got BOS descriptor %zd bytes, %u capabilities\n", | 825 | |
992 | result, bos->bNumDeviceCaps); | ||
993 | d_dump(2, dev, bos, result); | ||
994 | result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result); | 826 | result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result); |
995 | if (result < 0) | 827 | if (result < 0) |
996 | goto error_bad_bos; | 828 | goto error_bad_bos; |
@@ -1056,8 +888,6 @@ static void wusb_dev_add_ncb(struct usb_device *usb_dev) | |||
1056 | if (usb_dev->wusb == 0 || usb_dev->devnum == 1) | 888 | if (usb_dev->wusb == 0 || usb_dev->devnum == 1) |
1057 | return; /* skip non wusb and wusb RHs */ | 889 | return; /* skip non wusb and wusb RHs */ |
1058 | 890 | ||
1059 | d_fnstart(3, dev, "(usb_dev %p)\n", usb_dev); | ||
1060 | |||
1061 | wusbhc = wusbhc_get_by_usb_dev(usb_dev); | 891 | wusbhc = wusbhc_get_by_usb_dev(usb_dev); |
1062 | if (wusbhc == NULL) | 892 | if (wusbhc == NULL) |
1063 | goto error_nodev; | 893 | goto error_nodev; |
@@ -1087,7 +917,6 @@ out: | |||
1087 | wusb_dev_put(wusb_dev); | 917 | wusb_dev_put(wusb_dev); |
1088 | wusbhc_put(wusbhc); | 918 | wusbhc_put(wusbhc); |
1089 | error_nodev: | 919 | error_nodev: |
1090 | d_fnend(3, dev, "(usb_dev %p) = void\n", usb_dev); | ||
1091 | return; | 920 | return; |
1092 | 921 | ||
1093 | wusb_dev_sysfs_rm(wusb_dev); | 922 | wusb_dev_sysfs_rm(wusb_dev); |
@@ -1174,11 +1003,10 @@ EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev); | |||
1174 | 1003 | ||
1175 | void wusb_dev_destroy(struct kref *_wusb_dev) | 1004 | void wusb_dev_destroy(struct kref *_wusb_dev) |
1176 | { | 1005 | { |
1177 | struct wusb_dev *wusb_dev | 1006 | struct wusb_dev *wusb_dev = container_of(_wusb_dev, struct wusb_dev, refcnt); |
1178 | = container_of(_wusb_dev, struct wusb_dev, refcnt); | 1007 | |
1179 | list_del_init(&wusb_dev->cack_node); | 1008 | list_del_init(&wusb_dev->cack_node); |
1180 | wusb_dev_free(wusb_dev); | 1009 | wusb_dev_free(wusb_dev); |
1181 | d_fnend(1, NULL, "%s (wusb_dev %p) = void\n", __func__, wusb_dev); | ||
1182 | } | 1010 | } |
1183 | EXPORT_SYMBOL_GPL(wusb_dev_destroy); | 1011 | EXPORT_SYMBOL_GPL(wusb_dev_destroy); |
1184 | 1012 | ||
@@ -1190,8 +1018,6 @@ EXPORT_SYMBOL_GPL(wusb_dev_destroy); | |||
1190 | */ | 1018 | */ |
1191 | int wusbhc_devconnect_create(struct wusbhc *wusbhc) | 1019 | int wusbhc_devconnect_create(struct wusbhc *wusbhc) |
1192 | { | 1020 | { |
1193 | d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc); | ||
1194 | |||
1195 | wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE; | 1021 | wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE; |
1196 | wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr); | 1022 | wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr); |
1197 | INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run); | 1023 | INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run); |
@@ -1200,7 +1026,6 @@ int wusbhc_devconnect_create(struct wusbhc *wusbhc) | |||
1200 | wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr); | 1026 | wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr); |
1201 | INIT_LIST_HEAD(&wusbhc->cack_list); | 1027 | INIT_LIST_HEAD(&wusbhc->cack_list); |
1202 | 1028 | ||
1203 | d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); | ||
1204 | return 0; | 1029 | return 0; |
1205 | } | 1030 | } |
1206 | 1031 | ||
@@ -1209,8 +1034,7 @@ int wusbhc_devconnect_create(struct wusbhc *wusbhc) | |||
1209 | */ | 1034 | */ |
1210 | void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) | 1035 | void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) |
1211 | { | 1036 | { |
1212 | d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc); | 1037 | /* no op */ |
1213 | d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); | ||
1214 | } | 1038 | } |
1215 | 1039 | ||
1216 | /* | 1040 | /* |
@@ -1222,8 +1046,7 @@ void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) | |||
1222 | * FIXME: This also enables the keep alives but this is not necessary | 1046 | * FIXME: This also enables the keep alives but this is not necessary |
1223 | * until there are connected and authenticated devices. | 1047 | * until there are connected and authenticated devices. |
1224 | */ | 1048 | */ |
1225 | int wusbhc_devconnect_start(struct wusbhc *wusbhc, | 1049 | int wusbhc_devconnect_start(struct wusbhc *wusbhc) |
1226 | const struct wusb_ckhdid *chid) | ||
1227 | { | 1050 | { |
1228 | struct device *dev = wusbhc->dev; | 1051 | struct device *dev = wusbhc->dev; |
1229 | struct wuie_host_info *hi; | 1052 | struct wuie_host_info *hi; |
@@ -1236,7 +1059,7 @@ int wusbhc_devconnect_start(struct wusbhc *wusbhc, | |||
1236 | hi->hdr.bLength = sizeof(*hi); | 1059 | hi->hdr.bLength = sizeof(*hi); |
1237 | hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO; | 1060 | hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO; |
1238 | hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL); | 1061 | hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL); |
1239 | hi->CHID = *chid; | 1062 | hi->CHID = wusbhc->chid; |
1240 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr); | 1063 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr); |
1241 | if (result < 0) { | 1064 | if (result < 0) { |
1242 | dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result); | 1065 | dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result); |
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c index cfa77a01cebd..3b52161e6e9c 100644 --- a/drivers/usb/wusbcore/mmc.c +++ b/drivers/usb/wusbcore/mmc.c | |||
@@ -159,15 +159,35 @@ found: | |||
159 | } | 159 | } |
160 | EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm); | 160 | EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm); |
161 | 161 | ||
162 | static int wusbhc_mmc_start(struct wusbhc *wusbhc) | ||
163 | { | ||
164 | int ret; | ||
165 | |||
166 | mutex_lock(&wusbhc->mutex); | ||
167 | ret = wusbhc->start(wusbhc); | ||
168 | if (ret >= 0) | ||
169 | wusbhc->active = 1; | ||
170 | mutex_unlock(&wusbhc->mutex); | ||
171 | |||
172 | return ret; | ||
173 | } | ||
174 | |||
175 | static void wusbhc_mmc_stop(struct wusbhc *wusbhc) | ||
176 | { | ||
177 | mutex_lock(&wusbhc->mutex); | ||
178 | wusbhc->active = 0; | ||
179 | wusbhc->stop(wusbhc, WUSB_CHANNEL_STOP_DELAY_MS); | ||
180 | mutex_unlock(&wusbhc->mutex); | ||
181 | } | ||
182 | |||
162 | /* | 183 | /* |
163 | * wusbhc_start - start transmitting MMCs and accepting connections | 184 | * wusbhc_start - start transmitting MMCs and accepting connections |
164 | * @wusbhc: the HC to start | 185 | * @wusbhc: the HC to start |
165 | * @chid: the CHID to use for this host | ||
166 | * | 186 | * |
167 | * Establishes a cluster reservation, enables device connections, and | 187 | * Establishes a cluster reservation, enables device connections, and |
168 | * starts MMCs with appropriate DNTS parameters. | 188 | * starts MMCs with appropriate DNTS parameters. |
169 | */ | 189 | */ |
170 | int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) | 190 | int wusbhc_start(struct wusbhc *wusbhc) |
171 | { | 191 | { |
172 | int result; | 192 | int result; |
173 | struct device *dev = wusbhc->dev; | 193 | struct device *dev = wusbhc->dev; |
@@ -181,7 +201,7 @@ int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) | |||
181 | goto error_rsv_establish; | 201 | goto error_rsv_establish; |
182 | } | 202 | } |
183 | 203 | ||
184 | result = wusbhc_devconnect_start(wusbhc, chid); | 204 | result = wusbhc_devconnect_start(wusbhc); |
185 | if (result < 0) { | 205 | if (result < 0) { |
186 | dev_err(dev, "error enabling device connections: %d\n", result); | 206 | dev_err(dev, "error enabling device connections: %d\n", result); |
187 | goto error_devconnect_start; | 207 | goto error_devconnect_start; |
@@ -199,12 +219,12 @@ int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) | |||
199 | dev_err(dev, "Cannot set DNTS parameters: %d\n", result); | 219 | dev_err(dev, "Cannot set DNTS parameters: %d\n", result); |
200 | goto error_set_num_dnts; | 220 | goto error_set_num_dnts; |
201 | } | 221 | } |
202 | result = wusbhc->start(wusbhc); | 222 | result = wusbhc_mmc_start(wusbhc); |
203 | if (result < 0) { | 223 | if (result < 0) { |
204 | dev_err(dev, "error starting wusbch: %d\n", result); | 224 | dev_err(dev, "error starting wusbch: %d\n", result); |
205 | goto error_wusbhc_start; | 225 | goto error_wusbhc_start; |
206 | } | 226 | } |
207 | wusbhc->active = 1; | 227 | |
208 | return 0; | 228 | return 0; |
209 | 229 | ||
210 | error_wusbhc_start: | 230 | error_wusbhc_start: |
@@ -219,76 +239,17 @@ error_rsv_establish: | |||
219 | } | 239 | } |
220 | 240 | ||
221 | /* | 241 | /* |
222 | * Disconnect all from the WUSB Channel | ||
223 | * | ||
224 | * Send a Host Disconnect IE in the MMC, wait, don't send it any more | ||
225 | */ | ||
226 | static int __wusbhc_host_disconnect_ie(struct wusbhc *wusbhc) | ||
227 | { | ||
228 | int result = -ENOMEM; | ||
229 | struct wuie_host_disconnect *host_disconnect_ie; | ||
230 | might_sleep(); | ||
231 | host_disconnect_ie = kmalloc(sizeof(*host_disconnect_ie), GFP_KERNEL); | ||
232 | if (host_disconnect_ie == NULL) | ||
233 | goto error_alloc; | ||
234 | host_disconnect_ie->hdr.bLength = sizeof(*host_disconnect_ie); | ||
235 | host_disconnect_ie->hdr.bIEIdentifier = WUIE_ID_HOST_DISCONNECT; | ||
236 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &host_disconnect_ie->hdr); | ||
237 | if (result < 0) | ||
238 | goto error_mmcie_set; | ||
239 | |||
240 | /* WUSB1.0[8.5.3.1 & 7.5.2] */ | ||
241 | msleep(100); | ||
242 | wusbhc_mmcie_rm(wusbhc, &host_disconnect_ie->hdr); | ||
243 | error_mmcie_set: | ||
244 | kfree(host_disconnect_ie); | ||
245 | error_alloc: | ||
246 | return result; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * wusbhc_stop - stop transmitting MMCs | 242 | * wusbhc_stop - stop transmitting MMCs |
251 | * @wusbhc: the HC to stop | 243 | * @wusbhc: the HC to stop |
252 | * | 244 | * |
253 | * Send a Host Disconnect IE, wait, remove all the MMCs (stop sending MMCs). | 245 | * Stops the WUSB channel and removes the cluster reservation. |
254 | * | ||
255 | * If we can't allocate a Host Stop IE, screw it, we don't notify the | ||
256 | * devices we are disconnecting... | ||
257 | */ | 246 | */ |
258 | void wusbhc_stop(struct wusbhc *wusbhc) | 247 | void wusbhc_stop(struct wusbhc *wusbhc) |
259 | { | 248 | { |
260 | if (wusbhc->active) { | 249 | wusbhc_mmc_stop(wusbhc); |
261 | wusbhc->active = 0; | 250 | wusbhc_sec_stop(wusbhc); |
262 | wusbhc->stop(wusbhc); | 251 | wusbhc_devconnect_stop(wusbhc); |
263 | wusbhc_sec_stop(wusbhc); | 252 | wusbhc_rsv_terminate(wusbhc); |
264 | __wusbhc_host_disconnect_ie(wusbhc); | ||
265 | wusbhc_devconnect_stop(wusbhc); | ||
266 | wusbhc_rsv_terminate(wusbhc); | ||
267 | } | ||
268 | } | ||
269 | EXPORT_SYMBOL_GPL(wusbhc_stop); | ||
270 | |||
271 | /* | ||
272 | * Change the CHID in a WUSB Channel | ||
273 | * | ||
274 | * If it is just a new CHID, send a Host Disconnect IE and then change | ||
275 | * the CHID IE. | ||
276 | */ | ||
277 | static int __wusbhc_chid_change(struct wusbhc *wusbhc, | ||
278 | const struct wusb_ckhdid *chid) | ||
279 | { | ||
280 | int result = -ENOSYS; | ||
281 | struct device *dev = wusbhc->dev; | ||
282 | dev_err(dev, "%s() not implemented yet\n", __func__); | ||
283 | return result; | ||
284 | |||
285 | BUG_ON(wusbhc->wuie_host_info == NULL); | ||
286 | __wusbhc_host_disconnect_ie(wusbhc); | ||
287 | wusbhc->wuie_host_info->CHID = *chid; | ||
288 | result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->wuie_host_info->hdr); | ||
289 | if (result < 0) | ||
290 | dev_err(dev, "Can't update Host Info WUSB IE: %d\n", result); | ||
291 | return result; | ||
292 | } | 253 | } |
293 | 254 | ||
294 | /* | 255 | /* |
@@ -306,16 +267,19 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) | |||
306 | chid = NULL; | 267 | chid = NULL; |
307 | 268 | ||
308 | mutex_lock(&wusbhc->mutex); | 269 | mutex_lock(&wusbhc->mutex); |
309 | if (wusbhc->active) { | 270 | if (chid) { |
310 | if (chid) | 271 | if (wusbhc->active) { |
311 | result = __wusbhc_chid_change(wusbhc, chid); | 272 | mutex_unlock(&wusbhc->mutex); |
312 | else | 273 | return -EBUSY; |
313 | wusbhc_stop(wusbhc); | 274 | } |
314 | } else { | 275 | wusbhc->chid = *chid; |
315 | if (chid) | ||
316 | wusbhc_start(wusbhc, chid); | ||
317 | } | 276 | } |
318 | mutex_unlock(&wusbhc->mutex); | 277 | mutex_unlock(&wusbhc->mutex); |
278 | |||
279 | if (chid) | ||
280 | result = uwb_radio_start(&wusbhc->pal); | ||
281 | else | ||
282 | uwb_radio_stop(&wusbhc->pal); | ||
319 | return result; | 283 | return result; |
320 | } | 284 | } |
321 | EXPORT_SYMBOL_GPL(wusbhc_chid_set); | 285 | EXPORT_SYMBOL_GPL(wusbhc_chid_set); |
diff --git a/drivers/usb/wusbcore/pal.c b/drivers/usb/wusbcore/pal.c index 7cc51e9905cf..d0b172c5ecc7 100644 --- a/drivers/usb/wusbcore/pal.c +++ b/drivers/usb/wusbcore/pal.c | |||
@@ -18,6 +18,16 @@ | |||
18 | */ | 18 | */ |
19 | #include "wusbhc.h" | 19 | #include "wusbhc.h" |
20 | 20 | ||
21 | static void wusbhc_channel_changed(struct uwb_pal *pal, int channel) | ||
22 | { | ||
23 | struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal); | ||
24 | |||
25 | if (channel < 0) | ||
26 | wusbhc_stop(wusbhc); | ||
27 | else | ||
28 | wusbhc_start(wusbhc); | ||
29 | } | ||
30 | |||
21 | /** | 31 | /** |
22 | * wusbhc_pal_register - register the WUSB HC as a UWB PAL | 32 | * wusbhc_pal_register - register the WUSB HC as a UWB PAL |
23 | * @wusbhc: the WUSB HC | 33 | * @wusbhc: the WUSB HC |
@@ -28,8 +38,10 @@ int wusbhc_pal_register(struct wusbhc *wusbhc) | |||
28 | 38 | ||
29 | wusbhc->pal.name = "wusbhc"; | 39 | wusbhc->pal.name = "wusbhc"; |
30 | wusbhc->pal.device = wusbhc->usb_hcd.self.controller; | 40 | wusbhc->pal.device = wusbhc->usb_hcd.self.controller; |
41 | wusbhc->pal.rc = wusbhc->uwb_rc; | ||
42 | wusbhc->pal.channel_changed = wusbhc_channel_changed; | ||
31 | 43 | ||
32 | return uwb_pal_register(wusbhc->uwb_rc, &wusbhc->pal); | 44 | return uwb_pal_register(&wusbhc->pal); |
33 | } | 45 | } |
34 | 46 | ||
35 | /** | 47 | /** |
@@ -38,5 +50,5 @@ int wusbhc_pal_register(struct wusbhc *wusbhc) | |||
38 | */ | 50 | */ |
39 | void wusbhc_pal_unregister(struct wusbhc *wusbhc) | 51 | void wusbhc_pal_unregister(struct wusbhc *wusbhc) |
40 | { | 52 | { |
41 | uwb_pal_unregister(wusbhc->uwb_rc, &wusbhc->pal); | 53 | uwb_pal_unregister(&wusbhc->pal); |
42 | } | 54 | } |
diff --git a/drivers/usb/wusbcore/reservation.c b/drivers/usb/wusbcore/reservation.c index fc63e77ded2d..4ed97360c046 100644 --- a/drivers/usb/wusbcore/reservation.c +++ b/drivers/usb/wusbcore/reservation.c | |||
@@ -48,18 +48,19 @@ static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv) | |||
48 | { | 48 | { |
49 | struct wusbhc *wusbhc = rsv->pal_priv; | 49 | struct wusbhc *wusbhc = rsv->pal_priv; |
50 | struct device *dev = wusbhc->dev; | 50 | struct device *dev = wusbhc->dev; |
51 | struct uwb_mas_bm mas; | ||
51 | char buf[72]; | 52 | char buf[72]; |
52 | 53 | ||
53 | switch (rsv->state) { | 54 | switch (rsv->state) { |
54 | case UWB_RSV_STATE_O_ESTABLISHED: | 55 | case UWB_RSV_STATE_O_ESTABLISHED: |
55 | bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS); | 56 | uwb_rsv_get_usable_mas(rsv, &mas); |
57 | bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); | ||
56 | dev_dbg(dev, "established reservation: %s\n", buf); | 58 | dev_dbg(dev, "established reservation: %s\n", buf); |
57 | wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas); | 59 | wusbhc_bwa_set(wusbhc, rsv->stream, &mas); |
58 | break; | 60 | break; |
59 | case UWB_RSV_STATE_NONE: | 61 | case UWB_RSV_STATE_NONE: |
60 | dev_dbg(dev, "removed reservation\n"); | 62 | dev_dbg(dev, "removed reservation\n"); |
61 | wusbhc_bwa_set(wusbhc, 0, NULL); | 63 | wusbhc_bwa_set(wusbhc, 0, NULL); |
62 | wusbhc->rsv = NULL; | ||
63 | break; | 64 | break; |
64 | default: | 65 | default: |
65 | dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state); | 66 | dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state); |
@@ -86,13 +87,12 @@ int wusbhc_rsv_establish(struct wusbhc *wusbhc) | |||
86 | bcid.data[0] = wusbhc->cluster_id; | 87 | bcid.data[0] = wusbhc->cluster_id; |
87 | bcid.data[1] = 0; | 88 | bcid.data[1] = 0; |
88 | 89 | ||
89 | rsv->owner = &rc->uwb_dev; | ||
90 | rsv->target.type = UWB_RSV_TARGET_DEVADDR; | 90 | rsv->target.type = UWB_RSV_TARGET_DEVADDR; |
91 | rsv->target.devaddr = bcid; | 91 | rsv->target.devaddr = bcid; |
92 | rsv->type = UWB_DRP_TYPE_PRIVATE; | 92 | rsv->type = UWB_DRP_TYPE_PRIVATE; |
93 | rsv->max_mas = 256; | 93 | rsv->max_mas = 256; /* try to get as much as possible */ |
94 | rsv->min_mas = 16; /* one MAS per zone? */ | 94 | rsv->min_mas = 15; /* one MAS per zone */ |
95 | rsv->sparsity = 16; /* at least one MAS in each zone? */ | 95 | rsv->max_interval = 1; /* max latency is one zone */ |
96 | rsv->is_multicast = true; | 96 | rsv->is_multicast = true; |
97 | 97 | ||
98 | ret = uwb_rsv_establish(rsv); | 98 | ret = uwb_rsv_establish(rsv); |
@@ -105,11 +105,14 @@ int wusbhc_rsv_establish(struct wusbhc *wusbhc) | |||
105 | 105 | ||
106 | 106 | ||
107 | /** | 107 | /** |
108 | * wusbhc_rsv_terminate - terminate any cluster reservation | 108 | * wusbhc_rsv_terminate - terminate the cluster reservation |
109 | * @wusbhc: the WUSB host whose reservation is to be terminated | 109 | * @wusbhc: the WUSB host whose reservation is to be terminated |
110 | */ | 110 | */ |
111 | void wusbhc_rsv_terminate(struct wusbhc *wusbhc) | 111 | void wusbhc_rsv_terminate(struct wusbhc *wusbhc) |
112 | { | 112 | { |
113 | if (wusbhc->rsv) | 113 | if (wusbhc->rsv) { |
114 | uwb_rsv_terminate(wusbhc->rsv); | 114 | uwb_rsv_terminate(wusbhc->rsv); |
115 | uwb_rsv_destroy(wusbhc->rsv); | ||
116 | wusbhc->rsv = NULL; | ||
117 | } | ||
115 | } | 118 | } |
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c index 267a64325106..95c6fa3bf6b2 100644 --- a/drivers/usb/wusbcore/rh.c +++ b/drivers/usb/wusbcore/rh.c | |||
@@ -71,19 +71,20 @@ | |||
71 | */ | 71 | */ |
72 | #include "wusbhc.h" | 72 | #include "wusbhc.h" |
73 | 73 | ||
74 | #define D_LOCAL 0 | ||
75 | #include <linux/uwb/debug.h> | ||
76 | |||
77 | /* | 74 | /* |
78 | * Reset a fake port | 75 | * Reset a fake port |
79 | * | 76 | * |
80 | * This can be called to reset a port from any other state or to reset | 77 | * Using a Reset Device IE is too heavyweight as it causes the device |
81 | * it when connecting. In Wireless USB they are different; when doing | 78 | * to enter the UnConnected state and leave the cluster, this can mean |
82 | * a new connect that involves going over the authentication. When | 79 | * that when the device reconnects it is connected to a different fake |
83 | * just reseting, its a different story. | 80 | * port. |
81 | * | ||
82 | * Instead, reset authenticated devices with a SetAddress(0), followed | ||
83 | * by a SetAddresss(AuthAddr). | ||
84 | * | 84 | * |
85 | * The Linux USB stack resets a port twice before it considers it | 85 | * For unauthenticated devices just pretend to reset but do nothing. |
86 | * enabled, so we have to detect and ignore that. | 86 | * If the device initialization continues to fail it will eventually |
87 | * time out after TrustTimeout and enter the UnConnected state. | ||
87 | * | 88 | * |
88 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. | 89 | * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. |
89 | * | 90 | * |
@@ -97,20 +98,20 @@ static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx) | |||
97 | { | 98 | { |
98 | int result = 0; | 99 | int result = 0; |
99 | struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); | 100 | struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); |
101 | struct wusb_dev *wusb_dev = port->wusb_dev; | ||
100 | 102 | ||
101 | d_fnstart(3, wusbhc->dev, "(wusbhc %p port_idx %u)\n", | 103 | port->status |= USB_PORT_STAT_RESET; |
102 | wusbhc, port_idx); | 104 | port->change |= USB_PORT_STAT_C_RESET; |
103 | if (port->reset_count == 0) { | 105 | |
104 | wusbhc_devconnect_auth(wusbhc, port_idx); | 106 | if (wusb_dev->addr & WUSB_DEV_ADDR_UNAUTH) |
105 | port->reset_count++; | 107 | result = 0; |
106 | } else if (port->reset_count == 1) | ||
107 | /* see header */ | ||
108 | d_printf(2, wusbhc->dev, "Ignoring second reset on port_idx " | ||
109 | "%u\n", port_idx); | ||
110 | else | 108 | else |
111 | result = wusbhc_dev_reset(wusbhc, port_idx); | 109 | result = wusb_dev_update_address(wusbhc, wusb_dev); |
112 | d_fnend(3, wusbhc->dev, "(wusbhc %p port_idx %u) = %d\n", | 110 | |
113 | wusbhc, port_idx, result); | 111 | port->status &= ~USB_PORT_STAT_RESET; |
112 | port->status |= USB_PORT_STAT_ENABLE; | ||
113 | port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE; | ||
114 | |||
114 | return result; | 115 | return result; |
115 | } | 116 | } |
116 | 117 | ||
@@ -138,7 +139,6 @@ int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf) | |||
138 | size_t cnt, size; | 139 | size_t cnt, size; |
139 | unsigned long *buf = (unsigned long *) _buf; | 140 | unsigned long *buf = (unsigned long *) _buf; |
140 | 141 | ||
141 | d_fnstart(1, wusbhc->dev, "(wusbhc %p)\n", wusbhc); | ||
142 | /* WE DON'T LOCK, see comment */ | 142 | /* WE DON'T LOCK, see comment */ |
143 | size = wusbhc->ports_max + 1 /* hub bit */; | 143 | size = wusbhc->ports_max + 1 /* hub bit */; |
144 | size = (size + 8 - 1) / 8; /* round to bytes */ | 144 | size = (size + 8 - 1) / 8; /* round to bytes */ |
@@ -147,8 +147,6 @@ int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf) | |||
147 | set_bit(cnt + 1, buf); | 147 | set_bit(cnt + 1, buf); |
148 | else | 148 | else |
149 | clear_bit(cnt + 1, buf); | 149 | clear_bit(cnt + 1, buf); |
150 | d_fnend(1, wusbhc->dev, "(wusbhc %p) %u, buffer:\n", wusbhc, (int)size); | ||
151 | d_dump(1, wusbhc->dev, _buf, size); | ||
152 | return size; | 150 | return size; |
153 | } | 151 | } |
154 | EXPORT_SYMBOL_GPL(wusbhc_rh_status_data); | 152 | EXPORT_SYMBOL_GPL(wusbhc_rh_status_data); |
@@ -197,9 +195,7 @@ static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue, | |||
197 | static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature) | 195 | static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature) |
198 | { | 196 | { |
199 | int result; | 197 | int result; |
200 | struct device *dev = wusbhc->dev; | ||
201 | 198 | ||
202 | d_fnstart(4, dev, "(%p, feature 0x%04u)\n", wusbhc, feature); | ||
203 | switch (feature) { | 199 | switch (feature) { |
204 | case C_HUB_LOCAL_POWER: | 200 | case C_HUB_LOCAL_POWER: |
205 | /* FIXME: maybe plug bit 0 to the power input status, | 201 | /* FIXME: maybe plug bit 0 to the power input status, |
@@ -211,7 +207,6 @@ static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature) | |||
211 | default: | 207 | default: |
212 | result = -EPIPE; | 208 | result = -EPIPE; |
213 | } | 209 | } |
214 | d_fnend(4, dev, "(%p, feature 0x%04u), %d\n", wusbhc, feature, result); | ||
215 | return result; | 210 | return result; |
216 | } | 211 | } |
217 | 212 | ||
@@ -238,14 +233,10 @@ static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf, | |||
238 | static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature, | 233 | static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature, |
239 | u8 selector, u8 port_idx) | 234 | u8 selector, u8 port_idx) |
240 | { | 235 | { |
241 | int result = -EINVAL; | ||
242 | struct device *dev = wusbhc->dev; | 236 | struct device *dev = wusbhc->dev; |
243 | 237 | ||
244 | d_fnstart(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d)\n", | ||
245 | feature, selector, port_idx); | ||
246 | |||
247 | if (port_idx > wusbhc->ports_max) | 238 | if (port_idx > wusbhc->ports_max) |
248 | goto error; | 239 | return -EINVAL; |
249 | 240 | ||
250 | switch (feature) { | 241 | switch (feature) { |
251 | /* According to USB2.0[11.24.2.13]p2, these features | 242 | /* According to USB2.0[11.24.2.13]p2, these features |
@@ -255,35 +246,27 @@ static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature, | |||
255 | case USB_PORT_FEAT_C_SUSPEND: | 246 | case USB_PORT_FEAT_C_SUSPEND: |
256 | case USB_PORT_FEAT_C_CONNECTION: | 247 | case USB_PORT_FEAT_C_CONNECTION: |
257 | case USB_PORT_FEAT_C_RESET: | 248 | case USB_PORT_FEAT_C_RESET: |
258 | result = 0; | 249 | return 0; |
259 | break; | ||
260 | |||
261 | case USB_PORT_FEAT_POWER: | 250 | case USB_PORT_FEAT_POWER: |
262 | /* No such thing, but we fake it works */ | 251 | /* No such thing, but we fake it works */ |
263 | mutex_lock(&wusbhc->mutex); | 252 | mutex_lock(&wusbhc->mutex); |
264 | wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER; | 253 | wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER; |
265 | mutex_unlock(&wusbhc->mutex); | 254 | mutex_unlock(&wusbhc->mutex); |
266 | result = 0; | 255 | return 0; |
267 | break; | ||
268 | case USB_PORT_FEAT_RESET: | 256 | case USB_PORT_FEAT_RESET: |
269 | result = wusbhc_rh_port_reset(wusbhc, port_idx); | 257 | return wusbhc_rh_port_reset(wusbhc, port_idx); |
270 | break; | ||
271 | case USB_PORT_FEAT_ENABLE: | 258 | case USB_PORT_FEAT_ENABLE: |
272 | case USB_PORT_FEAT_SUSPEND: | 259 | case USB_PORT_FEAT_SUSPEND: |
273 | dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n", | 260 | dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n", |
274 | port_idx, feature, selector); | 261 | port_idx, feature, selector); |
275 | result = -ENOSYS; | 262 | return -ENOSYS; |
276 | break; | ||
277 | default: | 263 | default: |
278 | dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n", | 264 | dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n", |
279 | port_idx, feature, selector); | 265 | port_idx, feature, selector); |
280 | result = -EPIPE; | 266 | return -EPIPE; |
281 | break; | ||
282 | } | 267 | } |
283 | error: | 268 | |
284 | d_fnend(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d) = %d\n", | 269 | return 0; |
285 | feature, selector, port_idx, result); | ||
286 | return result; | ||
287 | } | 270 | } |
288 | 271 | ||
289 | /* | 272 | /* |
@@ -294,17 +277,13 @@ error: | |||
294 | static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature, | 277 | static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature, |
295 | u8 selector, u8 port_idx) | 278 | u8 selector, u8 port_idx) |
296 | { | 279 | { |
297 | int result = -EINVAL; | 280 | int result = 0; |
298 | struct device *dev = wusbhc->dev; | 281 | struct device *dev = wusbhc->dev; |
299 | 282 | ||
300 | d_fnstart(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d)\n", | ||
301 | wusbhc, feature, selector, port_idx); | ||
302 | |||
303 | if (port_idx > wusbhc->ports_max) | 283 | if (port_idx > wusbhc->ports_max) |
304 | goto error; | 284 | return -EINVAL; |
305 | 285 | ||
306 | mutex_lock(&wusbhc->mutex); | 286 | mutex_lock(&wusbhc->mutex); |
307 | result = 0; | ||
308 | switch (feature) { | 287 | switch (feature) { |
309 | case USB_PORT_FEAT_POWER: /* fake port always on */ | 288 | case USB_PORT_FEAT_POWER: /* fake port always on */ |
310 | /* According to USB2.0[11.24.2.7.1.4], no need to implement? */ | 289 | /* According to USB2.0[11.24.2.7.1.4], no need to implement? */ |
@@ -324,10 +303,8 @@ static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature, | |||
324 | break; | 303 | break; |
325 | case USB_PORT_FEAT_SUSPEND: | 304 | case USB_PORT_FEAT_SUSPEND: |
326 | case USB_PORT_FEAT_C_SUSPEND: | 305 | case USB_PORT_FEAT_C_SUSPEND: |
327 | case 0xffff: /* ??? FIXME */ | ||
328 | dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n", | 306 | dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n", |
329 | port_idx, feature, selector); | 307 | port_idx, feature, selector); |
330 | /* dump_stack(); */ | ||
331 | result = -ENOSYS; | 308 | result = -ENOSYS; |
332 | break; | 309 | break; |
333 | default: | 310 | default: |
@@ -337,9 +314,7 @@ static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature, | |||
337 | break; | 314 | break; |
338 | } | 315 | } |
339 | mutex_unlock(&wusbhc->mutex); | 316 | mutex_unlock(&wusbhc->mutex); |
340 | error: | 317 | |
341 | d_fnend(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d) = " | ||
342 | "%d\n", wusbhc, feature, selector, port_idx, result); | ||
343 | return result; | 318 | return result; |
344 | } | 319 | } |
345 | 320 | ||
@@ -351,22 +326,17 @@ error: | |||
351 | static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx, | 326 | static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx, |
352 | u32 *_buf, u16 wLength) | 327 | u32 *_buf, u16 wLength) |
353 | { | 328 | { |
354 | int result = -EINVAL; | ||
355 | u16 *buf = (u16 *) _buf; | 329 | u16 *buf = (u16 *) _buf; |
356 | 330 | ||
357 | d_fnstart(1, wusbhc->dev, "(wusbhc %p port_idx %u wLength %u)\n", | ||
358 | wusbhc, port_idx, wLength); | ||
359 | if (port_idx > wusbhc->ports_max) | 331 | if (port_idx > wusbhc->ports_max) |
360 | goto error; | 332 | return -EINVAL; |
333 | |||
361 | mutex_lock(&wusbhc->mutex); | 334 | mutex_lock(&wusbhc->mutex); |
362 | buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status); | 335 | buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status); |
363 | buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change); | 336 | buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change); |
364 | result = 0; | ||
365 | mutex_unlock(&wusbhc->mutex); | 337 | mutex_unlock(&wusbhc->mutex); |
366 | error: | 338 | |
367 | d_fnend(1, wusbhc->dev, "(wusbhc %p) = %d, buffer:\n", wusbhc, result); | 339 | return 0; |
368 | d_dump(1, wusbhc->dev, _buf, wLength); | ||
369 | return result; | ||
370 | } | 340 | } |
371 | 341 | ||
372 | /* | 342 | /* |
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c index a101cad6a8d4..f4aa28eca70d 100644 --- a/drivers/usb/wusbcore/security.c +++ b/drivers/usb/wusbcore/security.c | |||
@@ -27,19 +27,6 @@ | |||
27 | #include <linux/random.h> | 27 | #include <linux/random.h> |
28 | #include "wusbhc.h" | 28 | #include "wusbhc.h" |
29 | 29 | ||
30 | /* | ||
31 | * DEBUG & SECURITY WARNING!!!! | ||
32 | * | ||
33 | * If you enable this past 1, the debug code will weaken the | ||
34 | * cryptographic safety of the system (on purpose, for debugging). | ||
35 | * | ||
36 | * Weaken means: | ||
37 | * we print secret keys and intermediate values all the way, | ||
38 | */ | ||
39 | #undef D_LOCAL | ||
40 | #define D_LOCAL 2 | ||
41 | #include <linux/uwb/debug.h> | ||
42 | |||
43 | static void wusbhc_set_gtk_callback(struct urb *urb); | 30 | static void wusbhc_set_gtk_callback(struct urb *urb); |
44 | static void wusbhc_gtk_rekey_done_work(struct work_struct *work); | 31 | static void wusbhc_gtk_rekey_done_work(struct work_struct *work); |
45 | 32 | ||
@@ -219,7 +206,6 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc, | |||
219 | const void *itr, *top; | 206 | const void *itr, *top; |
220 | char buf[64]; | 207 | char buf[64]; |
221 | 208 | ||
222 | d_fnstart(3, dev, "(usb_dev %p, wusb_dev %p)\n", usb_dev, wusb_dev); | ||
223 | result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, | 209 | result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, |
224 | 0, &secd, sizeof(secd)); | 210 | 0, &secd, sizeof(secd)); |
225 | if (result < sizeof(secd)) { | 211 | if (result < sizeof(secd)) { |
@@ -228,8 +214,6 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc, | |||
228 | goto error_secd; | 214 | goto error_secd; |
229 | } | 215 | } |
230 | secd_size = le16_to_cpu(secd.wTotalLength); | 216 | secd_size = le16_to_cpu(secd.wTotalLength); |
231 | d_printf(5, dev, "got %d bytes of sec descriptor, total is %d\n", | ||
232 | result, secd_size); | ||
233 | secd_buf = kmalloc(secd_size, GFP_KERNEL); | 217 | secd_buf = kmalloc(secd_size, GFP_KERNEL); |
234 | if (secd_buf == NULL) { | 218 | if (secd_buf == NULL) { |
235 | dev_err(dev, "Can't allocate space for security descriptors\n"); | 219 | dev_err(dev, "Can't allocate space for security descriptors\n"); |
@@ -242,7 +226,6 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc, | |||
242 | "not enough data: %d\n", result); | 226 | "not enough data: %d\n", result); |
243 | goto error_secd_all; | 227 | goto error_secd_all; |
244 | } | 228 | } |
245 | d_printf(5, dev, "got %d bytes of sec descriptors\n", result); | ||
246 | bytes = 0; | 229 | bytes = 0; |
247 | itr = secd_buf + sizeof(secd); | 230 | itr = secd_buf + sizeof(secd); |
248 | top = secd_buf + result; | 231 | top = secd_buf + result; |
@@ -279,14 +262,12 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc, | |||
279 | goto error_no_ccm1; | 262 | goto error_no_ccm1; |
280 | } | 263 | } |
281 | wusb_dev->ccm1_etd = *ccm1_etd; | 264 | wusb_dev->ccm1_etd = *ccm1_etd; |
282 | dev_info(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", | 265 | dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", |
283 | buf, wusb_et_name(ccm1_etd->bEncryptionType), | 266 | buf, wusb_et_name(ccm1_etd->bEncryptionType), |
284 | ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); | 267 | ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); |
285 | result = 0; | 268 | result = 0; |
286 | kfree(secd_buf); | 269 | kfree(secd_buf); |
287 | out: | 270 | out: |
288 | d_fnend(3, dev, "(usb_dev %p, wusb_dev %p) = %d\n", | ||
289 | usb_dev, wusb_dev, result); | ||
290 | return result; | 271 | return result; |
291 | 272 | ||
292 | 273 | ||
@@ -303,32 +284,6 @@ void wusb_dev_sec_rm(struct wusb_dev *wusb_dev) | |||
303 | /* Nothing so far */ | 284 | /* Nothing so far */ |
304 | } | 285 | } |
305 | 286 | ||
306 | static void hs_printk(unsigned level, struct device *dev, | ||
307 | struct usb_handshake *hs) | ||
308 | { | ||
309 | d_printf(level, dev, | ||
310 | " bMessageNumber: %u\n" | ||
311 | " bStatus: %u\n" | ||
312 | " tTKID: %02x %02x %02x\n" | ||
313 | " CDID: %02x %02x %02x %02x %02x %02x %02x %02x\n" | ||
314 | " %02x %02x %02x %02x %02x %02x %02x %02x\n" | ||
315 | " nonce: %02x %02x %02x %02x %02x %02x %02x %02x\n" | ||
316 | " %02x %02x %02x %02x %02x %02x %02x %02x\n" | ||
317 | " MIC: %02x %02x %02x %02x %02x %02x %02x %02x\n", | ||
318 | hs->bMessageNumber, hs->bStatus, | ||
319 | hs->tTKID[2], hs->tTKID[1], hs->tTKID[0], | ||
320 | hs->CDID[0], hs->CDID[1], hs->CDID[2], hs->CDID[3], | ||
321 | hs->CDID[4], hs->CDID[5], hs->CDID[6], hs->CDID[7], | ||
322 | hs->CDID[8], hs->CDID[9], hs->CDID[10], hs->CDID[11], | ||
323 | hs->CDID[12], hs->CDID[13], hs->CDID[14], hs->CDID[15], | ||
324 | hs->nonce[0], hs->nonce[1], hs->nonce[2], hs->nonce[3], | ||
325 | hs->nonce[4], hs->nonce[5], hs->nonce[6], hs->nonce[7], | ||
326 | hs->nonce[8], hs->nonce[9], hs->nonce[10], hs->nonce[11], | ||
327 | hs->nonce[12], hs->nonce[13], hs->nonce[14], hs->nonce[15], | ||
328 | hs->MIC[0], hs->MIC[1], hs->MIC[2], hs->MIC[3], | ||
329 | hs->MIC[4], hs->MIC[5], hs->MIC[6], hs->MIC[7]); | ||
330 | } | ||
331 | |||
332 | /** | 287 | /** |
333 | * Update the address of an unauthenticated WUSB device | 288 | * Update the address of an unauthenticated WUSB device |
334 | * | 289 | * |
@@ -338,8 +293,7 @@ static void hs_printk(unsigned level, struct device *dev, | |||
338 | * Before the device's address (as known by it) was usb_dev->devnum | | 293 | * Before the device's address (as known by it) was usb_dev->devnum | |
339 | * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum. | 294 | * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum. |
340 | */ | 295 | */ |
341 | static int wusb_dev_update_address(struct wusbhc *wusbhc, | 296 | int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) |
342 | struct wusb_dev *wusb_dev) | ||
343 | { | 297 | { |
344 | int result = -ENOMEM; | 298 | int result = -ENOMEM; |
345 | struct usb_device *usb_dev = wusb_dev->usb_dev; | 299 | struct usb_device *usb_dev = wusb_dev->usb_dev; |
@@ -422,9 +376,6 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, | |||
422 | get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce)); | 376 | get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce)); |
423 | memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */ | 377 | memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */ |
424 | 378 | ||
425 | d_printf(1, dev, "I: sending hs1:\n"); | ||
426 | hs_printk(2, dev, &hs[0]); | ||
427 | |||
428 | result = usb_control_msg( | 379 | result = usb_control_msg( |
429 | usb_dev, usb_sndctrlpipe(usb_dev, 0), | 380 | usb_dev, usb_sndctrlpipe(usb_dev, 0), |
430 | USB_REQ_SET_HANDSHAKE, | 381 | USB_REQ_SET_HANDSHAKE, |
@@ -445,8 +396,6 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, | |||
445 | dev_err(dev, "Handshake2: request failed: %d\n", result); | 396 | dev_err(dev, "Handshake2: request failed: %d\n", result); |
446 | goto error_hs2; | 397 | goto error_hs2; |
447 | } | 398 | } |
448 | d_printf(1, dev, "got HS2:\n"); | ||
449 | hs_printk(2, dev, &hs[1]); | ||
450 | 399 | ||
451 | result = -EINVAL; | 400 | result = -EINVAL; |
452 | if (hs[1].bMessageNumber != 2) { | 401 | if (hs[1].bMessageNumber != 2) { |
@@ -487,10 +436,6 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, | |||
487 | result); | 436 | result); |
488 | goto error_hs2; | 437 | goto error_hs2; |
489 | } | 438 | } |
490 | d_printf(2, dev, "KCK:\n"); | ||
491 | d_dump(2, dev, keydvt_out.kck, sizeof(keydvt_out.kck)); | ||
492 | d_printf(2, dev, "PTK:\n"); | ||
493 | d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk)); | ||
494 | 439 | ||
495 | /* Compute MIC and verify it */ | 440 | /* Compute MIC and verify it */ |
496 | result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]); | 441 | result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]); |
@@ -500,8 +445,6 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, | |||
500 | goto error_hs2; | 445 | goto error_hs2; |
501 | } | 446 | } |
502 | 447 | ||
503 | d_printf(2, dev, "MIC:\n"); | ||
504 | d_dump(2, dev, mic, sizeof(mic)); | ||
505 | if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) { | 448 | if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) { |
506 | dev_err(dev, "Handshake2 failed: MIC mismatch\n"); | 449 | dev_err(dev, "Handshake2 failed: MIC mismatch\n"); |
507 | goto error_hs2; | 450 | goto error_hs2; |
@@ -521,9 +464,6 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, | |||
521 | goto error_hs2; | 464 | goto error_hs2; |
522 | } | 465 | } |
523 | 466 | ||
524 | d_printf(1, dev, "I: sending hs3:\n"); | ||
525 | hs_printk(2, dev, &hs[2]); | ||
526 | |||
527 | result = usb_control_msg( | 467 | result = usb_control_msg( |
528 | usb_dev, usb_sndctrlpipe(usb_dev, 0), | 468 | usb_dev, usb_sndctrlpipe(usb_dev, 0), |
529 | USB_REQ_SET_HANDSHAKE, | 469 | USB_REQ_SET_HANDSHAKE, |
@@ -534,14 +474,11 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, | |||
534 | goto error_hs3; | 474 | goto error_hs3; |
535 | } | 475 | } |
536 | 476 | ||
537 | d_printf(1, dev, "I: turning on encryption on host for device\n"); | ||
538 | d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk)); | ||
539 | result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid, | 477 | result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid, |
540 | keydvt_out.ptk, sizeof(keydvt_out.ptk)); | 478 | keydvt_out.ptk, sizeof(keydvt_out.ptk)); |
541 | if (result < 0) | 479 | if (result < 0) |
542 | goto error_wusbhc_set_ptk; | 480 | goto error_wusbhc_set_ptk; |
543 | 481 | ||
544 | d_printf(1, dev, "I: setting a GTK\n"); | ||
545 | result = wusb_dev_set_gtk(wusbhc, wusb_dev); | 482 | result = wusb_dev_set_gtk(wusbhc, wusb_dev); |
546 | if (result < 0) { | 483 | if (result < 0) { |
547 | dev_err(dev, "Set GTK for device: request failed: %d\n", | 484 | dev_err(dev, "Set GTK for device: request failed: %d\n", |
@@ -551,13 +488,12 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, | |||
551 | 488 | ||
552 | /* Update the device's address from unauth to auth */ | 489 | /* Update the device's address from unauth to auth */ |
553 | if (usb_dev->authenticated == 0) { | 490 | if (usb_dev->authenticated == 0) { |
554 | d_printf(1, dev, "I: updating addres to auth from non-auth\n"); | ||
555 | result = wusb_dev_update_address(wusbhc, wusb_dev); | 491 | result = wusb_dev_update_address(wusbhc, wusb_dev); |
556 | if (result < 0) | 492 | if (result < 0) |
557 | goto error_dev_update_address; | 493 | goto error_dev_update_address; |
558 | } | 494 | } |
559 | result = 0; | 495 | result = 0; |
560 | d_printf(1, dev, "I: 4way handshke done, device authenticated\n"); | 496 | dev_info(dev, "device authenticated\n"); |
561 | 497 | ||
562 | error_dev_update_address: | 498 | error_dev_update_address: |
563 | error_wusbhc_set_gtk: | 499 | error_wusbhc_set_gtk: |
@@ -570,10 +506,8 @@ error_hs1: | |||
570 | memset(&keydvt_in, 0, sizeof(keydvt_in)); | 506 | memset(&keydvt_in, 0, sizeof(keydvt_in)); |
571 | memset(&ccm_n, 0, sizeof(ccm_n)); | 507 | memset(&ccm_n, 0, sizeof(ccm_n)); |
572 | memset(mic, 0, sizeof(mic)); | 508 | memset(mic, 0, sizeof(mic)); |
573 | if (result < 0) { | 509 | if (result < 0) |
574 | /* error path */ | ||
575 | wusb_dev_set_encryption(usb_dev, 0); | 510 | wusb_dev_set_encryption(usb_dev, 0); |
576 | } | ||
577 | error_dev_set_encryption: | 511 | error_dev_set_encryption: |
578 | kfree(hs); | 512 | kfree(hs); |
579 | error_kzalloc: | 513 | error_kzalloc: |
diff --git a/drivers/usb/wusbcore/wa-nep.c b/drivers/usb/wusbcore/wa-nep.c index 3f542990c73f..17d2626038be 100644 --- a/drivers/usb/wusbcore/wa-nep.c +++ b/drivers/usb/wusbcore/wa-nep.c | |||
@@ -51,7 +51,7 @@ | |||
51 | */ | 51 | */ |
52 | #include <linux/workqueue.h> | 52 | #include <linux/workqueue.h> |
53 | #include <linux/ctype.h> | 53 | #include <linux/ctype.h> |
54 | #include <linux/uwb/debug.h> | 54 | |
55 | #include "wa-hc.h" | 55 | #include "wa-hc.h" |
56 | #include "wusbhc.h" | 56 | #include "wusbhc.h" |
57 | 57 | ||
@@ -139,13 +139,10 @@ static void wa_notif_dispatch(struct work_struct *ws) | |||
139 | /* FIXME: unimplemented WA NOTIFs */ | 139 | /* FIXME: unimplemented WA NOTIFs */ |
140 | /* fallthru */ | 140 | /* fallthru */ |
141 | default: | 141 | default: |
142 | if (printk_ratelimit()) { | 142 | dev_err(dev, "HWA: unknown notification 0x%x, " |
143 | dev_err(dev, "HWA: unknown notification 0x%x, " | 143 | "%zu bytes; discarding\n", |
144 | "%zu bytes; discarding\n", | 144 | notif_hdr->bNotifyType, |
145 | notif_hdr->bNotifyType, | 145 | (size_t)notif_hdr->bLength); |
146 | (size_t)notif_hdr->bLength); | ||
147 | dump_bytes(dev, notif_hdr, 16); | ||
148 | } | ||
149 | break; | 146 | break; |
150 | } | 147 | } |
151 | } | 148 | } |
@@ -160,12 +157,9 @@ out: | |||
160 | * discard the data, as this should not happen. | 157 | * discard the data, as this should not happen. |
161 | */ | 158 | */ |
162 | exhausted_buffer: | 159 | exhausted_buffer: |
163 | if (!printk_ratelimit()) | ||
164 | goto out; | ||
165 | dev_warn(dev, "HWA: device sent short notification, " | 160 | dev_warn(dev, "HWA: device sent short notification, " |
166 | "%d bytes missing; discarding %d bytes.\n", | 161 | "%d bytes missing; discarding %d bytes.\n", |
167 | missing, (int)size); | 162 | missing, (int)size); |
168 | dump_bytes(dev, itr, size); | ||
169 | goto out; | 163 | goto out; |
170 | } | 164 | } |
171 | 165 | ||
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c index f18e4aae66e9..7369655f69cd 100644 --- a/drivers/usb/wusbcore/wa-rpipe.c +++ b/drivers/usb/wusbcore/wa-rpipe.c | |||
@@ -60,13 +60,10 @@ | |||
60 | #include <linux/init.h> | 60 | #include <linux/init.h> |
61 | #include <asm/atomic.h> | 61 | #include <asm/atomic.h> |
62 | #include <linux/bitmap.h> | 62 | #include <linux/bitmap.h> |
63 | |||
63 | #include "wusbhc.h" | 64 | #include "wusbhc.h" |
64 | #include "wa-hc.h" | 65 | #include "wa-hc.h" |
65 | 66 | ||
66 | #define D_LOCAL 0 | ||
67 | #include <linux/uwb/debug.h> | ||
68 | |||
69 | |||
70 | static int __rpipe_get_descr(struct wahc *wa, | 67 | static int __rpipe_get_descr(struct wahc *wa, |
71 | struct usb_rpipe_descriptor *descr, u16 index) | 68 | struct usb_rpipe_descriptor *descr, u16 index) |
72 | { | 69 | { |
@@ -76,7 +73,6 @@ static int __rpipe_get_descr(struct wahc *wa, | |||
76 | /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor() | 73 | /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor() |
77 | * function because the arguments are different. | 74 | * function because the arguments are different. |
78 | */ | 75 | */ |
79 | d_printf(1, dev, "rpipe %u: get descr\n", index); | ||
80 | result = usb_control_msg( | 76 | result = usb_control_msg( |
81 | wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), | 77 | wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), |
82 | USB_REQ_GET_DESCRIPTOR, | 78 | USB_REQ_GET_DESCRIPTOR, |
@@ -115,7 +111,6 @@ static int __rpipe_set_descr(struct wahc *wa, | |||
115 | /* we cannot use the usb_get_descriptor() function because the | 111 | /* we cannot use the usb_get_descriptor() function because the |
116 | * arguments are different. | 112 | * arguments are different. |
117 | */ | 113 | */ |
118 | d_printf(1, dev, "rpipe %u: set descr\n", index); | ||
119 | result = usb_control_msg( | 114 | result = usb_control_msg( |
120 | wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | 115 | wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), |
121 | USB_REQ_SET_DESCRIPTOR, | 116 | USB_REQ_SET_DESCRIPTOR, |
@@ -174,13 +169,12 @@ void rpipe_destroy(struct kref *_rpipe) | |||
174 | { | 169 | { |
175 | struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt); | 170 | struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt); |
176 | u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex); | 171 | u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex); |
177 | d_fnstart(1, NULL, "(rpipe %p %u)\n", rpipe, index); | 172 | |
178 | if (rpipe->ep) | 173 | if (rpipe->ep) |
179 | rpipe->ep->hcpriv = NULL; | 174 | rpipe->ep->hcpriv = NULL; |
180 | rpipe_put_idx(rpipe->wa, index); | 175 | rpipe_put_idx(rpipe->wa, index); |
181 | wa_put(rpipe->wa); | 176 | wa_put(rpipe->wa); |
182 | kfree(rpipe); | 177 | kfree(rpipe); |
183 | d_fnend(1, NULL, "(rpipe %p %u)\n", rpipe, index); | ||
184 | } | 178 | } |
185 | EXPORT_SYMBOL_GPL(rpipe_destroy); | 179 | EXPORT_SYMBOL_GPL(rpipe_destroy); |
186 | 180 | ||
@@ -202,7 +196,6 @@ static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs, | |||
202 | struct wa_rpipe *rpipe; | 196 | struct wa_rpipe *rpipe; |
203 | struct device *dev = &wa->usb_iface->dev; | 197 | struct device *dev = &wa->usb_iface->dev; |
204 | 198 | ||
205 | d_fnstart(3, dev, "(wa %p crs 0x%02x)\n", wa, crs); | ||
206 | rpipe = kzalloc(sizeof(*rpipe), gfp); | 199 | rpipe = kzalloc(sizeof(*rpipe), gfp); |
207 | if (rpipe == NULL) | 200 | if (rpipe == NULL) |
208 | return -ENOMEM; | 201 | return -ENOMEM; |
@@ -223,14 +216,12 @@ static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs, | |||
223 | } | 216 | } |
224 | *prpipe = NULL; | 217 | *prpipe = NULL; |
225 | kfree(rpipe); | 218 | kfree(rpipe); |
226 | d_fnend(3, dev, "(wa %p crs 0x%02x) = -ENXIO\n", wa, crs); | ||
227 | return -ENXIO; | 219 | return -ENXIO; |
228 | 220 | ||
229 | found: | 221 | found: |
230 | set_bit(rpipe_idx, wa->rpipe_bm); | 222 | set_bit(rpipe_idx, wa->rpipe_bm); |
231 | rpipe->wa = wa_get(wa); | 223 | rpipe->wa = wa_get(wa); |
232 | *prpipe = rpipe; | 224 | *prpipe = rpipe; |
233 | d_fnstart(3, dev, "(wa %p crs 0x%02x) = 0\n", wa, crs); | ||
234 | return 0; | 225 | return 0; |
235 | } | 226 | } |
236 | 227 | ||
@@ -239,7 +230,6 @@ static int __rpipe_reset(struct wahc *wa, unsigned index) | |||
239 | int result; | 230 | int result; |
240 | struct device *dev = &wa->usb_iface->dev; | 231 | struct device *dev = &wa->usb_iface->dev; |
241 | 232 | ||
242 | d_printf(1, dev, "rpipe %u: reset\n", index); | ||
243 | result = usb_control_msg( | 233 | result = usb_control_msg( |
244 | wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), | 234 | wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), |
245 | USB_REQ_RPIPE_RESET, | 235 | USB_REQ_RPIPE_RESET, |
@@ -276,7 +266,6 @@ static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find( | |||
276 | struct usb_descriptor_header *hdr; | 266 | struct usb_descriptor_header *hdr; |
277 | struct usb_wireless_ep_comp_descriptor *epcd; | 267 | struct usb_wireless_ep_comp_descriptor *epcd; |
278 | 268 | ||
279 | d_fnstart(3, dev, "(ep %p)\n", ep); | ||
280 | if (ep->desc.bEndpointAddress == 0) { | 269 | if (ep->desc.bEndpointAddress == 0) { |
281 | epcd = &epc0; | 270 | epcd = &epc0; |
282 | goto out; | 271 | goto out; |
@@ -310,7 +299,6 @@ static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find( | |||
310 | itr_size -= hdr->bDescriptorType; | 299 | itr_size -= hdr->bDescriptorType; |
311 | } | 300 | } |
312 | out: | 301 | out: |
313 | d_fnend(3, dev, "(ep %p) = %p\n", ep, epcd); | ||
314 | return epcd; | 302 | return epcd; |
315 | } | 303 | } |
316 | 304 | ||
@@ -329,8 +317,6 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa, | |||
329 | struct usb_wireless_ep_comp_descriptor *epcd; | 317 | struct usb_wireless_ep_comp_descriptor *epcd; |
330 | u8 unauth; | 318 | u8 unauth; |
331 | 319 | ||
332 | d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n", | ||
333 | rpipe, wa, ep, urb); | ||
334 | epcd = rpipe_epc_find(dev, ep); | 320 | epcd = rpipe_epc_find(dev, ep); |
335 | if (epcd == NULL) { | 321 | if (epcd == NULL) { |
336 | dev_err(dev, "ep 0x%02x: can't find companion descriptor\n", | 322 | dev_err(dev, "ep 0x%02x: can't find companion descriptor\n", |
@@ -350,10 +336,12 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa, | |||
350 | /* FIXME: use maximum speed as supported or recommended by device */ | 336 | /* FIXME: use maximum speed as supported or recommended by device */ |
351 | rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ? | 337 | rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ? |
352 | UWB_PHY_RATE_53 : UWB_PHY_RATE_200; | 338 | UWB_PHY_RATE_53 : UWB_PHY_RATE_200; |
353 | d_printf(2, dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n", | 339 | |
354 | urb->dev->devnum, urb->dev->devnum | unauth, | 340 | dev_dbg(dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n", |
355 | le16_to_cpu(rpipe->descr.wRPipeIndex), | 341 | urb->dev->devnum, urb->dev->devnum | unauth, |
356 | usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed); | 342 | le16_to_cpu(rpipe->descr.wRPipeIndex), |
343 | usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed); | ||
344 | |||
357 | /* see security.c:wusb_update_address() */ | 345 | /* see security.c:wusb_update_address() */ |
358 | if (unlikely(urb->dev->devnum == 0x80)) | 346 | if (unlikely(urb->dev->devnum == 0x80)) |
359 | rpipe->descr.bDeviceAddress = 0; | 347 | rpipe->descr.bDeviceAddress = 0; |
@@ -384,8 +372,6 @@ static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa, | |||
384 | } | 372 | } |
385 | result = 0; | 373 | result = 0; |
386 | error: | 374 | error: |
387 | d_fnend(3, dev, "(rpipe %p wa %p ep %p urb %p) = %d\n", | ||
388 | rpipe, wa, ep, urb, result); | ||
389 | return result; | 375 | return result; |
390 | } | 376 | } |
391 | 377 | ||
@@ -405,8 +391,6 @@ static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa, | |||
405 | u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0; | 391 | u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0; |
406 | u8 portnum = wusb_port_no_to_idx(urb->dev->portnum); | 392 | u8 portnum = wusb_port_no_to_idx(urb->dev->portnum); |
407 | 393 | ||
408 | d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n", | ||
409 | rpipe, wa, ep, urb); | ||
410 | #define AIM_CHECK(rdf, val, text) \ | 394 | #define AIM_CHECK(rdf, val, text) \ |
411 | do { \ | 395 | do { \ |
412 | if (rpipe->descr.rdf != (val)) { \ | 396 | if (rpipe->descr.rdf != (val)) { \ |
@@ -451,8 +435,6 @@ int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep, | |||
451 | struct wa_rpipe *rpipe; | 435 | struct wa_rpipe *rpipe; |
452 | u8 eptype; | 436 | u8 eptype; |
453 | 437 | ||
454 | d_fnstart(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, | ||
455 | gfp); | ||
456 | mutex_lock(&wa->rpipe_mutex); | 438 | mutex_lock(&wa->rpipe_mutex); |
457 | rpipe = ep->hcpriv; | 439 | rpipe = ep->hcpriv; |
458 | if (rpipe != NULL) { | 440 | if (rpipe != NULL) { |
@@ -462,9 +444,9 @@ int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep, | |||
462 | goto error; | 444 | goto error; |
463 | } | 445 | } |
464 | __rpipe_get(rpipe); | 446 | __rpipe_get(rpipe); |
465 | d_printf(2, dev, "ep 0x%02x: reusing rpipe %u\n", | 447 | dev_dbg(dev, "ep 0x%02x: reusing rpipe %u\n", |
466 | ep->desc.bEndpointAddress, | 448 | ep->desc.bEndpointAddress, |
467 | le16_to_cpu(rpipe->descr.wRPipeIndex)); | 449 | le16_to_cpu(rpipe->descr.wRPipeIndex)); |
468 | } else { | 450 | } else { |
469 | /* hmm, assign idle rpipe, aim it */ | 451 | /* hmm, assign idle rpipe, aim it */ |
470 | result = -ENOBUFS; | 452 | result = -ENOBUFS; |
@@ -480,14 +462,12 @@ int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep, | |||
480 | ep->hcpriv = rpipe; | 462 | ep->hcpriv = rpipe; |
481 | rpipe->ep = ep; | 463 | rpipe->ep = ep; |
482 | __rpipe_get(rpipe); /* for caching into ep->hcpriv */ | 464 | __rpipe_get(rpipe); /* for caching into ep->hcpriv */ |
483 | d_printf(2, dev, "ep 0x%02x: using rpipe %u\n", | 465 | dev_dbg(dev, "ep 0x%02x: using rpipe %u\n", |
484 | ep->desc.bEndpointAddress, | 466 | ep->desc.bEndpointAddress, |
485 | le16_to_cpu(rpipe->descr.wRPipeIndex)); | 467 | le16_to_cpu(rpipe->descr.wRPipeIndex)); |
486 | } | 468 | } |
487 | d_dump(4, dev, &rpipe->descr, sizeof(rpipe->descr)); | ||
488 | error: | 469 | error: |
489 | mutex_unlock(&wa->rpipe_mutex); | 470 | mutex_unlock(&wa->rpipe_mutex); |
490 | d_fnend(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, gfp); | ||
491 | return result; | 471 | return result; |
492 | } | 472 | } |
493 | 473 | ||
@@ -507,7 +487,7 @@ int wa_rpipes_create(struct wahc *wa) | |||
507 | void wa_rpipes_destroy(struct wahc *wa) | 487 | void wa_rpipes_destroy(struct wahc *wa) |
508 | { | 488 | { |
509 | struct device *dev = &wa->usb_iface->dev; | 489 | struct device *dev = &wa->usb_iface->dev; |
510 | d_fnstart(3, dev, "(wa %p)\n", wa); | 490 | |
511 | if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) { | 491 | if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) { |
512 | char buf[256]; | 492 | char buf[256]; |
513 | WARN_ON(1); | 493 | WARN_ON(1); |
@@ -515,7 +495,6 @@ void wa_rpipes_destroy(struct wahc *wa) | |||
515 | dev_err(dev, "BUG: pipes not released on exit: %s\n", buf); | 495 | dev_err(dev, "BUG: pipes not released on exit: %s\n", buf); |
516 | } | 496 | } |
517 | kfree(wa->rpipe_bm); | 497 | kfree(wa->rpipe_bm); |
518 | d_fnend(3, dev, "(wa %p)\n", wa); | ||
519 | } | 498 | } |
520 | 499 | ||
521 | /* | 500 | /* |
@@ -530,33 +509,20 @@ void wa_rpipes_destroy(struct wahc *wa) | |||
530 | */ | 509 | */ |
531 | void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep) | 510 | void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep) |
532 | { | 511 | { |
533 | struct device *dev = &wa->usb_iface->dev; | ||
534 | struct wa_rpipe *rpipe; | 512 | struct wa_rpipe *rpipe; |
535 | d_fnstart(2, dev, "(wa %p ep %p)\n", wa, ep); | 513 | |
536 | mutex_lock(&wa->rpipe_mutex); | 514 | mutex_lock(&wa->rpipe_mutex); |
537 | rpipe = ep->hcpriv; | 515 | rpipe = ep->hcpriv; |
538 | if (rpipe != NULL) { | 516 | if (rpipe != NULL) { |
539 | unsigned rc = atomic_read(&rpipe->refcnt.refcount); | ||
540 | int result; | ||
541 | u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex); | 517 | u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex); |
542 | 518 | ||
543 | if (rc != 1) | 519 | usb_control_msg( |
544 | d_printf(1, dev, "(wa %p ep %p) rpipe %p refcnt %u\n", | ||
545 | wa, ep, rpipe, rc); | ||
546 | |||
547 | d_printf(1, dev, "rpipe %u: abort\n", index); | ||
548 | result = usb_control_msg( | ||
549 | wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), | 520 | wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), |
550 | USB_REQ_RPIPE_ABORT, | 521 | USB_REQ_RPIPE_ABORT, |
551 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, | 522 | USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, |
552 | 0, index, NULL, 0, 1000 /* FIXME: arbitrary */); | 523 | 0, index, NULL, 0, 1000 /* FIXME: arbitrary */); |
553 | if (result < 0 && result != -ENODEV /* dev is gone */) | ||
554 | d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n", | ||
555 | wa, index, result); | ||
556 | rpipe_put(rpipe); | 524 | rpipe_put(rpipe); |
557 | } | 525 | } |
558 | mutex_unlock(&wa->rpipe_mutex); | 526 | mutex_unlock(&wa->rpipe_mutex); |
559 | d_fnend(2, dev, "(wa %p ep %p)\n", wa, ep); | ||
560 | return; | ||
561 | } | 527 | } |
562 | EXPORT_SYMBOL_GPL(rpipe_ep_disable); | 528 | EXPORT_SYMBOL_GPL(rpipe_ep_disable); |
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index c038635d1c64..238a96aee3a1 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c | |||
@@ -82,13 +82,10 @@ | |||
82 | #include <linux/init.h> | 82 | #include <linux/init.h> |
83 | #include <linux/spinlock.h> | 83 | #include <linux/spinlock.h> |
84 | #include <linux/hash.h> | 84 | #include <linux/hash.h> |
85 | |||
85 | #include "wa-hc.h" | 86 | #include "wa-hc.h" |
86 | #include "wusbhc.h" | 87 | #include "wusbhc.h" |
87 | 88 | ||
88 | #undef D_LOCAL | ||
89 | #define D_LOCAL 0 /* 0 disabled, > 0 different levels... */ | ||
90 | #include <linux/uwb/debug.h> | ||
91 | |||
92 | enum { | 89 | enum { |
93 | WA_SEGS_MAX = 255, | 90 | WA_SEGS_MAX = 255, |
94 | }; | 91 | }; |
@@ -180,7 +177,6 @@ static void wa_xfer_destroy(struct kref *_xfer) | |||
180 | } | 177 | } |
181 | } | 178 | } |
182 | kfree(xfer); | 179 | kfree(xfer); |
183 | d_printf(2, NULL, "xfer %p destroyed\n", xfer); | ||
184 | } | 180 | } |
185 | 181 | ||
186 | static void wa_xfer_get(struct wa_xfer *xfer) | 182 | static void wa_xfer_get(struct wa_xfer *xfer) |
@@ -190,10 +186,7 @@ static void wa_xfer_get(struct wa_xfer *xfer) | |||
190 | 186 | ||
191 | static void wa_xfer_put(struct wa_xfer *xfer) | 187 | static void wa_xfer_put(struct wa_xfer *xfer) |
192 | { | 188 | { |
193 | d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n", | ||
194 | xfer, atomic_read(&xfer->refcnt.refcount)); | ||
195 | kref_put(&xfer->refcnt, wa_xfer_destroy); | 189 | kref_put(&xfer->refcnt, wa_xfer_destroy); |
196 | d_fnend(3, NULL, "(xfer %p) = void\n", xfer); | ||
197 | } | 190 | } |
198 | 191 | ||
199 | /* | 192 | /* |
@@ -209,7 +202,7 @@ static void wa_xfer_put(struct wa_xfer *xfer) | |||
209 | static void wa_xfer_giveback(struct wa_xfer *xfer) | 202 | static void wa_xfer_giveback(struct wa_xfer *xfer) |
210 | { | 203 | { |
211 | unsigned long flags; | 204 | unsigned long flags; |
212 | d_fnstart(3, NULL, "(xfer %p)\n", xfer); | 205 | |
213 | spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); | 206 | spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); |
214 | list_del_init(&xfer->list_node); | 207 | list_del_init(&xfer->list_node); |
215 | spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); | 208 | spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); |
@@ -217,7 +210,6 @@ static void wa_xfer_giveback(struct wa_xfer *xfer) | |||
217 | wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); | 210 | wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); |
218 | wa_put(xfer->wa); | 211 | wa_put(xfer->wa); |
219 | wa_xfer_put(xfer); | 212 | wa_xfer_put(xfer); |
220 | d_fnend(3, NULL, "(xfer %p) = void\n", xfer); | ||
221 | } | 213 | } |
222 | 214 | ||
223 | /* | 215 | /* |
@@ -227,13 +219,10 @@ static void wa_xfer_giveback(struct wa_xfer *xfer) | |||
227 | */ | 219 | */ |
228 | static void wa_xfer_completion(struct wa_xfer *xfer) | 220 | static void wa_xfer_completion(struct wa_xfer *xfer) |
229 | { | 221 | { |
230 | d_fnstart(3, NULL, "(xfer %p)\n", xfer); | ||
231 | if (xfer->wusb_dev) | 222 | if (xfer->wusb_dev) |
232 | wusb_dev_put(xfer->wusb_dev); | 223 | wusb_dev_put(xfer->wusb_dev); |
233 | rpipe_put(xfer->ep->hcpriv); | 224 | rpipe_put(xfer->ep->hcpriv); |
234 | wa_xfer_giveback(xfer); | 225 | wa_xfer_giveback(xfer); |
235 | d_fnend(3, NULL, "(xfer %p) = void\n", xfer); | ||
236 | return; | ||
237 | } | 226 | } |
238 | 227 | ||
239 | /* | 228 | /* |
@@ -243,12 +232,12 @@ static void wa_xfer_completion(struct wa_xfer *xfer) | |||
243 | */ | 232 | */ |
244 | static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) | 233 | static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) |
245 | { | 234 | { |
235 | struct device *dev = &xfer->wa->usb_iface->dev; | ||
246 | unsigned result, cnt; | 236 | unsigned result, cnt; |
247 | struct wa_seg *seg; | 237 | struct wa_seg *seg; |
248 | struct urb *urb = xfer->urb; | 238 | struct urb *urb = xfer->urb; |
249 | unsigned found_short = 0; | 239 | unsigned found_short = 0; |
250 | 240 | ||
251 | d_fnstart(3, NULL, "(xfer %p)\n", xfer); | ||
252 | result = xfer->segs_done == xfer->segs_submitted; | 241 | result = xfer->segs_done == xfer->segs_submitted; |
253 | if (result == 0) | 242 | if (result == 0) |
254 | goto out; | 243 | goto out; |
@@ -258,10 +247,8 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) | |||
258 | switch (seg->status) { | 247 | switch (seg->status) { |
259 | case WA_SEG_DONE: | 248 | case WA_SEG_DONE: |
260 | if (found_short && seg->result > 0) { | 249 | if (found_short && seg->result > 0) { |
261 | if (printk_ratelimit()) | 250 | dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n", |
262 | printk(KERN_ERR "xfer %p#%u: bad short " | 251 | xfer, cnt, seg->result); |
263 | "segments (%zu)\n", xfer, cnt, | ||
264 | seg->result); | ||
265 | urb->status = -EINVAL; | 252 | urb->status = -EINVAL; |
266 | goto out; | 253 | goto out; |
267 | } | 254 | } |
@@ -269,36 +256,30 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) | |||
269 | if (seg->result < xfer->seg_size | 256 | if (seg->result < xfer->seg_size |
270 | && cnt != xfer->segs-1) | 257 | && cnt != xfer->segs-1) |
271 | found_short = 1; | 258 | found_short = 1; |
272 | d_printf(2, NULL, "xfer %p#%u: DONE short %d " | 259 | dev_dbg(dev, "xfer %p#%u: DONE short %d " |
273 | "result %zu urb->actual_length %d\n", | 260 | "result %zu urb->actual_length %d\n", |
274 | xfer, seg->index, found_short, seg->result, | 261 | xfer, seg->index, found_short, seg->result, |
275 | urb->actual_length); | 262 | urb->actual_length); |
276 | break; | 263 | break; |
277 | case WA_SEG_ERROR: | 264 | case WA_SEG_ERROR: |
278 | xfer->result = seg->result; | 265 | xfer->result = seg->result; |
279 | d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n", | 266 | dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n", |
280 | xfer, seg->index, seg->result); | 267 | xfer, seg->index, seg->result); |
281 | goto out; | 268 | goto out; |
282 | case WA_SEG_ABORTED: | 269 | case WA_SEG_ABORTED: |
283 | WARN_ON(urb->status != -ECONNRESET | 270 | dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n", |
284 | && urb->status != -ENOENT); | 271 | xfer, seg->index, urb->status); |
285 | d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n", | ||
286 | xfer, seg->index, urb->status); | ||
287 | xfer->result = urb->status; | 272 | xfer->result = urb->status; |
288 | goto out; | 273 | goto out; |
289 | default: | 274 | default: |
290 | /* if (printk_ratelimit()) */ | 275 | dev_warn(dev, "xfer %p#%u: is_done bad state %d\n", |
291 | printk(KERN_ERR "xfer %p#%u: " | 276 | xfer, cnt, seg->status); |
292 | "is_done bad state %d\n", | ||
293 | xfer, cnt, seg->status); | ||
294 | xfer->result = -EINVAL; | 277 | xfer->result = -EINVAL; |
295 | WARN_ON(1); | ||
296 | goto out; | 278 | goto out; |
297 | } | 279 | } |
298 | } | 280 | } |
299 | xfer->result = 0; | 281 | xfer->result = 0; |
300 | out: | 282 | out: |
301 | d_fnend(3, NULL, "(xfer %p) = void\n", xfer); | ||
302 | return result; | 283 | return result; |
303 | } | 284 | } |
304 | 285 | ||
@@ -424,8 +405,6 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer, | |||
424 | struct urb *urb = xfer->urb; | 405 | struct urb *urb = xfer->urb; |
425 | struct wa_rpipe *rpipe = xfer->ep->hcpriv; | 406 | struct wa_rpipe *rpipe = xfer->ep->hcpriv; |
426 | 407 | ||
427 | d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n", | ||
428 | xfer, rpipe, urb); | ||
429 | switch (rpipe->descr.bmAttribute & 0x3) { | 408 | switch (rpipe->descr.bmAttribute & 0x3) { |
430 | case USB_ENDPOINT_XFER_CONTROL: | 409 | case USB_ENDPOINT_XFER_CONTROL: |
431 | *pxfer_type = WA_XFER_TYPE_CTL; | 410 | *pxfer_type = WA_XFER_TYPE_CTL; |
@@ -472,12 +451,10 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer, | |||
472 | if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) | 451 | if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) |
473 | xfer->segs = 1; | 452 | xfer->segs = 1; |
474 | error: | 453 | error: |
475 | d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n", | ||
476 | xfer, rpipe, urb, (int)result); | ||
477 | return result; | 454 | return result; |
478 | } | 455 | } |
479 | 456 | ||
480 | /** Fill in the common request header and xfer-type specific data. */ | 457 | /* Fill in the common request header and xfer-type specific data. */ |
481 | static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, | 458 | static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, |
482 | struct wa_xfer_hdr *xfer_hdr0, | 459 | struct wa_xfer_hdr *xfer_hdr0, |
483 | enum wa_xfer_type xfer_type, | 460 | enum wa_xfer_type xfer_type, |
@@ -534,14 +511,13 @@ static void wa_seg_dto_cb(struct urb *urb) | |||
534 | unsigned rpipe_ready = 0; | 511 | unsigned rpipe_ready = 0; |
535 | u8 done = 0; | 512 | u8 done = 0; |
536 | 513 | ||
537 | d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); | ||
538 | switch (urb->status) { | 514 | switch (urb->status) { |
539 | case 0: | 515 | case 0: |
540 | spin_lock_irqsave(&xfer->lock, flags); | 516 | spin_lock_irqsave(&xfer->lock, flags); |
541 | wa = xfer->wa; | 517 | wa = xfer->wa; |
542 | dev = &wa->usb_iface->dev; | 518 | dev = &wa->usb_iface->dev; |
543 | d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n", | 519 | dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n", |
544 | xfer, seg->index, urb->actual_length); | 520 | xfer, seg->index, urb->actual_length); |
545 | if (seg->status < WA_SEG_PENDING) | 521 | if (seg->status < WA_SEG_PENDING) |
546 | seg->status = WA_SEG_PENDING; | 522 | seg->status = WA_SEG_PENDING; |
547 | seg->result = urb->actual_length; | 523 | seg->result = urb->actual_length; |
@@ -555,9 +531,8 @@ static void wa_seg_dto_cb(struct urb *urb) | |||
555 | wa = xfer->wa; | 531 | wa = xfer->wa; |
556 | dev = &wa->usb_iface->dev; | 532 | dev = &wa->usb_iface->dev; |
557 | rpipe = xfer->ep->hcpriv; | 533 | rpipe = xfer->ep->hcpriv; |
558 | if (printk_ratelimit()) | 534 | dev_dbg(dev, "xfer %p#%u: data out error %d\n", |
559 | dev_err(dev, "xfer %p#%u: data out error %d\n", | 535 | xfer, seg->index, urb->status); |
560 | xfer, seg->index, urb->status); | ||
561 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, | 536 | if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, |
562 | EDC_ERROR_TIMEFRAME)){ | 537 | EDC_ERROR_TIMEFRAME)){ |
563 | dev_err(dev, "DTO: URB max acceptable errors " | 538 | dev_err(dev, "DTO: URB max acceptable errors " |
@@ -578,7 +553,6 @@ static void wa_seg_dto_cb(struct urb *urb) | |||
578 | if (rpipe_ready) | 553 | if (rpipe_ready) |
579 | wa_xfer_delayed_run(rpipe); | 554 | wa_xfer_delayed_run(rpipe); |
580 | } | 555 | } |
581 | d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); | ||
582 | } | 556 | } |
583 | 557 | ||
584 | /* | 558 | /* |
@@ -610,14 +584,12 @@ static void wa_seg_cb(struct urb *urb) | |||
610 | unsigned rpipe_ready; | 584 | unsigned rpipe_ready; |
611 | u8 done = 0; | 585 | u8 done = 0; |
612 | 586 | ||
613 | d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); | ||
614 | switch (urb->status) { | 587 | switch (urb->status) { |
615 | case 0: | 588 | case 0: |
616 | spin_lock_irqsave(&xfer->lock, flags); | 589 | spin_lock_irqsave(&xfer->lock, flags); |
617 | wa = xfer->wa; | 590 | wa = xfer->wa; |
618 | dev = &wa->usb_iface->dev; | 591 | dev = &wa->usb_iface->dev; |
619 | d_printf(2, dev, "xfer %p#%u: request done\n", | 592 | dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index); |
620 | xfer, seg->index); | ||
621 | if (xfer->is_inbound && seg->status < WA_SEG_PENDING) | 593 | if (xfer->is_inbound && seg->status < WA_SEG_PENDING) |
622 | seg->status = WA_SEG_PENDING; | 594 | seg->status = WA_SEG_PENDING; |
623 | spin_unlock_irqrestore(&xfer->lock, flags); | 595 | spin_unlock_irqrestore(&xfer->lock, flags); |
@@ -652,7 +624,6 @@ static void wa_seg_cb(struct urb *urb) | |||
652 | if (rpipe_ready) | 624 | if (rpipe_ready) |
653 | wa_xfer_delayed_run(rpipe); | 625 | wa_xfer_delayed_run(rpipe); |
654 | } | 626 | } |
655 | d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); | ||
656 | } | 627 | } |
657 | 628 | ||
658 | /* | 629 | /* |
@@ -750,9 +721,6 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) | |||
750 | size_t xfer_hdr_size, cnt, transfer_size; | 721 | size_t xfer_hdr_size, cnt, transfer_size; |
751 | struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; | 722 | struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; |
752 | 723 | ||
753 | d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n", | ||
754 | xfer, xfer->ep->hcpriv, urb); | ||
755 | |||
756 | result = __wa_xfer_setup_sizes(xfer, &xfer_type); | 724 | result = __wa_xfer_setup_sizes(xfer, &xfer_type); |
757 | if (result < 0) | 725 | if (result < 0) |
758 | goto error_setup_sizes; | 726 | goto error_setup_sizes; |
@@ -788,8 +756,6 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) | |||
788 | result = 0; | 756 | result = 0; |
789 | error_setup_segs: | 757 | error_setup_segs: |
790 | error_setup_sizes: | 758 | error_setup_sizes: |
791 | d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n", | ||
792 | xfer, xfer->ep->hcpriv, urb, result); | ||
793 | return result; | 759 | return result; |
794 | } | 760 | } |
795 | 761 | ||
@@ -843,9 +809,6 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) | |||
843 | struct wa_xfer *xfer; | 809 | struct wa_xfer *xfer; |
844 | unsigned long flags; | 810 | unsigned long flags; |
845 | 811 | ||
846 | d_fnstart(1, dev, "(rpipe #%d) %d segments available\n", | ||
847 | le16_to_cpu(rpipe->descr.wRPipeIndex), | ||
848 | atomic_read(&rpipe->segs_available)); | ||
849 | spin_lock_irqsave(&rpipe->seg_lock, flags); | 812 | spin_lock_irqsave(&rpipe->seg_lock, flags); |
850 | while (atomic_read(&rpipe->segs_available) > 0 | 813 | while (atomic_read(&rpipe->segs_available) > 0 |
851 | && !list_empty(&rpipe->seg_list)) { | 814 | && !list_empty(&rpipe->seg_list)) { |
@@ -854,10 +817,8 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) | |||
854 | list_del(&seg->list_node); | 817 | list_del(&seg->list_node); |
855 | xfer = seg->xfer; | 818 | xfer = seg->xfer; |
856 | result = __wa_seg_submit(rpipe, xfer, seg); | 819 | result = __wa_seg_submit(rpipe, xfer, seg); |
857 | d_printf(1, dev, "xfer %p#%u submitted from delayed " | 820 | dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n", |
858 | "[%d segments available] %d\n", | 821 | xfer, seg->index, atomic_read(&rpipe->segs_available), result); |
859 | xfer, seg->index, | ||
860 | atomic_read(&rpipe->segs_available), result); | ||
861 | if (unlikely(result < 0)) { | 822 | if (unlikely(result < 0)) { |
862 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | 823 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); |
863 | spin_lock_irqsave(&xfer->lock, flags); | 824 | spin_lock_irqsave(&xfer->lock, flags); |
@@ -868,10 +829,6 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) | |||
868 | } | 829 | } |
869 | } | 830 | } |
870 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | 831 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); |
871 | d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n", | ||
872 | le16_to_cpu(rpipe->descr.wRPipeIndex), | ||
873 | atomic_read(&rpipe->segs_available)); | ||
874 | |||
875 | } | 832 | } |
876 | 833 | ||
877 | /* | 834 | /* |
@@ -894,9 +851,6 @@ static int __wa_xfer_submit(struct wa_xfer *xfer) | |||
894 | u8 available; | 851 | u8 available; |
895 | u8 empty; | 852 | u8 empty; |
896 | 853 | ||
897 | d_fnstart(3, dev, "(xfer %p [rpipe %p])\n", | ||
898 | xfer, xfer->ep->hcpriv); | ||
899 | |||
900 | spin_lock_irqsave(&wa->xfer_list_lock, flags); | 854 | spin_lock_irqsave(&wa->xfer_list_lock, flags); |
901 | list_add_tail(&xfer->list_node, &wa->xfer_list); | 855 | list_add_tail(&xfer->list_node, &wa->xfer_list); |
902 | spin_unlock_irqrestore(&wa->xfer_list_lock, flags); | 856 | spin_unlock_irqrestore(&wa->xfer_list_lock, flags); |
@@ -908,30 +862,24 @@ static int __wa_xfer_submit(struct wa_xfer *xfer) | |||
908 | available = atomic_read(&rpipe->segs_available); | 862 | available = atomic_read(&rpipe->segs_available); |
909 | empty = list_empty(&rpipe->seg_list); | 863 | empty = list_empty(&rpipe->seg_list); |
910 | seg = xfer->seg[cnt]; | 864 | seg = xfer->seg[cnt]; |
911 | d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n", | 865 | dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n", |
912 | xfer, cnt, available, empty, | 866 | xfer, cnt, available, empty, |
913 | available == 0 || !empty ? "delayed" : "submitted"); | 867 | available == 0 || !empty ? "delayed" : "submitted"); |
914 | if (available == 0 || !empty) { | 868 | if (available == 0 || !empty) { |
915 | d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt); | 869 | dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt); |
916 | seg->status = WA_SEG_DELAYED; | 870 | seg->status = WA_SEG_DELAYED; |
917 | list_add_tail(&seg->list_node, &rpipe->seg_list); | 871 | list_add_tail(&seg->list_node, &rpipe->seg_list); |
918 | } else { | 872 | } else { |
919 | result = __wa_seg_submit(rpipe, xfer, seg); | 873 | result = __wa_seg_submit(rpipe, xfer, seg); |
920 | if (result < 0) | 874 | if (result < 0) { |
875 | __wa_xfer_abort(xfer); | ||
921 | goto error_seg_submit; | 876 | goto error_seg_submit; |
877 | } | ||
922 | } | 878 | } |
923 | xfer->segs_submitted++; | 879 | xfer->segs_submitted++; |
924 | } | 880 | } |
925 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | ||
926 | d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer, | ||
927 | xfer->ep->hcpriv); | ||
928 | return result; | ||
929 | |||
930 | error_seg_submit: | 881 | error_seg_submit: |
931 | __wa_xfer_abort(xfer); | ||
932 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); | 882 | spin_unlock_irqrestore(&rpipe->seg_lock, flags); |
933 | d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer, | ||
934 | xfer->ep->hcpriv); | ||
935 | return result; | 883 | return result; |
936 | } | 884 | } |
937 | 885 | ||
@@ -964,11 +912,9 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer) | |||
964 | struct urb *urb = xfer->urb; | 912 | struct urb *urb = xfer->urb; |
965 | struct wahc *wa = xfer->wa; | 913 | struct wahc *wa = xfer->wa; |
966 | struct wusbhc *wusbhc = wa->wusb; | 914 | struct wusbhc *wusbhc = wa->wusb; |
967 | struct device *dev = &wa->usb_iface->dev; | ||
968 | struct wusb_dev *wusb_dev; | 915 | struct wusb_dev *wusb_dev; |
969 | unsigned done; | 916 | unsigned done; |
970 | 917 | ||
971 | d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb); | ||
972 | result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); | 918 | result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); |
973 | if (result < 0) | 919 | if (result < 0) |
974 | goto error_rpipe_get; | 920 | goto error_rpipe_get; |
@@ -997,7 +943,6 @@ static void wa_urb_enqueue_b(struct wa_xfer *xfer) | |||
997 | if (result < 0) | 943 | if (result < 0) |
998 | goto error_xfer_submit; | 944 | goto error_xfer_submit; |
999 | spin_unlock_irqrestore(&xfer->lock, flags); | 945 | spin_unlock_irqrestore(&xfer->lock, flags); |
1000 | d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb); | ||
1001 | return; | 946 | return; |
1002 | 947 | ||
1003 | /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() | 948 | /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() |
@@ -1015,7 +960,6 @@ error_dev_gone: | |||
1015 | error_rpipe_get: | 960 | error_rpipe_get: |
1016 | xfer->result = result; | 961 | xfer->result = result; |
1017 | wa_xfer_giveback(xfer); | 962 | wa_xfer_giveback(xfer); |
1018 | d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result); | ||
1019 | return; | 963 | return; |
1020 | 964 | ||
1021 | error_xfer_submit: | 965 | error_xfer_submit: |
@@ -1024,8 +968,6 @@ error_xfer_submit: | |||
1024 | spin_unlock_irqrestore(&xfer->lock, flags); | 968 | spin_unlock_irqrestore(&xfer->lock, flags); |
1025 | if (done) | 969 | if (done) |
1026 | wa_xfer_completion(xfer); | 970 | wa_xfer_completion(xfer); |
1027 | d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result); | ||
1028 | return; | ||
1029 | } | 971 | } |
1030 | 972 | ||
1031 | /* | 973 | /* |
@@ -1041,11 +983,9 @@ error_xfer_submit: | |||
1041 | void wa_urb_enqueue_run(struct work_struct *ws) | 983 | void wa_urb_enqueue_run(struct work_struct *ws) |
1042 | { | 984 | { |
1043 | struct wahc *wa = container_of(ws, struct wahc, xfer_work); | 985 | struct wahc *wa = container_of(ws, struct wahc, xfer_work); |
1044 | struct device *dev = &wa->usb_iface->dev; | ||
1045 | struct wa_xfer *xfer, *next; | 986 | struct wa_xfer *xfer, *next; |
1046 | struct urb *urb; | 987 | struct urb *urb; |
1047 | 988 | ||
1048 | d_fnstart(3, dev, "(wa %p)\n", wa); | ||
1049 | spin_lock_irq(&wa->xfer_list_lock); | 989 | spin_lock_irq(&wa->xfer_list_lock); |
1050 | list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list, | 990 | list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list, |
1051 | list_node) { | 991 | list_node) { |
@@ -1059,7 +999,6 @@ void wa_urb_enqueue_run(struct work_struct *ws) | |||
1059 | spin_lock_irq(&wa->xfer_list_lock); | 999 | spin_lock_irq(&wa->xfer_list_lock); |
1060 | } | 1000 | } |
1061 | spin_unlock_irq(&wa->xfer_list_lock); | 1001 | spin_unlock_irq(&wa->xfer_list_lock); |
1062 | d_fnend(3, dev, "(wa %p) = void\n", wa); | ||
1063 | } | 1002 | } |
1064 | EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); | 1003 | EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); |
1065 | 1004 | ||
@@ -1084,9 +1023,6 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, | |||
1084 | unsigned long my_flags; | 1023 | unsigned long my_flags; |
1085 | unsigned cant_sleep = irqs_disabled() | in_atomic(); | 1024 | unsigned cant_sleep = irqs_disabled() | in_atomic(); |
1086 | 1025 | ||
1087 | d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n", | ||
1088 | wa, ep, urb, urb->transfer_buffer_length, gfp); | ||
1089 | |||
1090 | if (urb->transfer_buffer == NULL | 1026 | if (urb->transfer_buffer == NULL |
1091 | && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) | 1027 | && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) |
1092 | && urb->transfer_buffer_length != 0) { | 1028 | && urb->transfer_buffer_length != 0) { |
@@ -1108,11 +1044,13 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, | |||
1108 | xfer->gfp = gfp; | 1044 | xfer->gfp = gfp; |
1109 | xfer->ep = ep; | 1045 | xfer->ep = ep; |
1110 | urb->hcpriv = xfer; | 1046 | urb->hcpriv = xfer; |
1111 | d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", | 1047 | |
1112 | xfer, urb, urb->pipe, urb->transfer_buffer_length, | 1048 | dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", |
1113 | urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", | 1049 | xfer, urb, urb->pipe, urb->transfer_buffer_length, |
1114 | urb->pipe & USB_DIR_IN ? "inbound" : "outbound", | 1050 | urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", |
1115 | cant_sleep ? "deferred" : "inline"); | 1051 | urb->pipe & USB_DIR_IN ? "inbound" : "outbound", |
1052 | cant_sleep ? "deferred" : "inline"); | ||
1053 | |||
1116 | if (cant_sleep) { | 1054 | if (cant_sleep) { |
1117 | usb_get_urb(urb); | 1055 | usb_get_urb(urb); |
1118 | spin_lock_irqsave(&wa->xfer_list_lock, my_flags); | 1056 | spin_lock_irqsave(&wa->xfer_list_lock, my_flags); |
@@ -1122,15 +1060,11 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, | |||
1122 | } else { | 1060 | } else { |
1123 | wa_urb_enqueue_b(xfer); | 1061 | wa_urb_enqueue_b(xfer); |
1124 | } | 1062 | } |
1125 | d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n", | ||
1126 | wa, ep, urb, urb->transfer_buffer_length, gfp); | ||
1127 | return 0; | 1063 | return 0; |
1128 | 1064 | ||
1129 | error_dequeued: | 1065 | error_dequeued: |
1130 | kfree(xfer); | 1066 | kfree(xfer); |
1131 | error_kmalloc: | 1067 | error_kmalloc: |
1132 | d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n", | ||
1133 | wa, ep, urb, urb->transfer_buffer_length, gfp, result); | ||
1134 | return result; | 1068 | return result; |
1135 | } | 1069 | } |
1136 | EXPORT_SYMBOL_GPL(wa_urb_enqueue); | 1070 | EXPORT_SYMBOL_GPL(wa_urb_enqueue); |
@@ -1155,7 +1089,6 @@ EXPORT_SYMBOL_GPL(wa_urb_enqueue); | |||
1155 | */ | 1089 | */ |
1156 | int wa_urb_dequeue(struct wahc *wa, struct urb *urb) | 1090 | int wa_urb_dequeue(struct wahc *wa, struct urb *urb) |
1157 | { | 1091 | { |
1158 | struct device *dev = &wa->usb_iface->dev; | ||
1159 | unsigned long flags, flags2; | 1092 | unsigned long flags, flags2; |
1160 | struct wa_xfer *xfer; | 1093 | struct wa_xfer *xfer; |
1161 | struct wa_seg *seg; | 1094 | struct wa_seg *seg; |
@@ -1163,9 +1096,6 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb) | |||
1163 | unsigned cnt; | 1096 | unsigned cnt; |
1164 | unsigned rpipe_ready = 0; | 1097 | unsigned rpipe_ready = 0; |
1165 | 1098 | ||
1166 | d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb); | ||
1167 | |||
1168 | d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb); | ||
1169 | xfer = urb->hcpriv; | 1099 | xfer = urb->hcpriv; |
1170 | if (xfer == NULL) { | 1100 | if (xfer == NULL) { |
1171 | /* NOthing setup yet enqueue will see urb->status != | 1101 | /* NOthing setup yet enqueue will see urb->status != |
@@ -1234,13 +1164,11 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb) | |||
1234 | wa_xfer_completion(xfer); | 1164 | wa_xfer_completion(xfer); |
1235 | if (rpipe_ready) | 1165 | if (rpipe_ready) |
1236 | wa_xfer_delayed_run(rpipe); | 1166 | wa_xfer_delayed_run(rpipe); |
1237 | d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); | ||
1238 | return 0; | 1167 | return 0; |
1239 | 1168 | ||
1240 | out_unlock: | 1169 | out_unlock: |
1241 | spin_unlock_irqrestore(&xfer->lock, flags); | 1170 | spin_unlock_irqrestore(&xfer->lock, flags); |
1242 | out: | 1171 | out: |
1243 | d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); | ||
1244 | return 0; | 1172 | return 0; |
1245 | 1173 | ||
1246 | dequeue_delayed: | 1174 | dequeue_delayed: |
@@ -1250,7 +1178,6 @@ dequeue_delayed: | |||
1250 | spin_unlock_irqrestore(&xfer->lock, flags); | 1178 | spin_unlock_irqrestore(&xfer->lock, flags); |
1251 | wa_xfer_giveback(xfer); | 1179 | wa_xfer_giveback(xfer); |
1252 | usb_put_urb(urb); /* we got a ref in enqueue() */ | 1180 | usb_put_urb(urb); /* we got a ref in enqueue() */ |
1253 | d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); | ||
1254 | return 0; | 1181 | return 0; |
1255 | } | 1182 | } |
1256 | EXPORT_SYMBOL_GPL(wa_urb_dequeue); | 1183 | EXPORT_SYMBOL_GPL(wa_urb_dequeue); |
@@ -1326,7 +1253,6 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) | |||
1326 | u8 usb_status; | 1253 | u8 usb_status; |
1327 | unsigned rpipe_ready = 0; | 1254 | unsigned rpipe_ready = 0; |
1328 | 1255 | ||
1329 | d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer); | ||
1330 | spin_lock_irqsave(&xfer->lock, flags); | 1256 | spin_lock_irqsave(&xfer->lock, flags); |
1331 | seg_idx = xfer_result->bTransferSegment & 0x7f; | 1257 | seg_idx = xfer_result->bTransferSegment & 0x7f; |
1332 | if (unlikely(seg_idx >= xfer->segs)) | 1258 | if (unlikely(seg_idx >= xfer->segs)) |
@@ -1334,8 +1260,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) | |||
1334 | seg = xfer->seg[seg_idx]; | 1260 | seg = xfer->seg[seg_idx]; |
1335 | rpipe = xfer->ep->hcpriv; | 1261 | rpipe = xfer->ep->hcpriv; |
1336 | usb_status = xfer_result->bTransferStatus; | 1262 | usb_status = xfer_result->bTransferStatus; |
1337 | d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", | 1263 | dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", |
1338 | xfer, seg_idx, usb_status, seg->status); | 1264 | xfer, seg_idx, usb_status, seg->status); |
1339 | if (seg->status == WA_SEG_ABORTED | 1265 | if (seg->status == WA_SEG_ABORTED |
1340 | || seg->status == WA_SEG_ERROR) /* already handled */ | 1266 | || seg->status == WA_SEG_ERROR) /* already handled */ |
1341 | goto segment_aborted; | 1267 | goto segment_aborted; |
@@ -1391,10 +1317,8 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) | |||
1391 | wa_xfer_completion(xfer); | 1317 | wa_xfer_completion(xfer); |
1392 | if (rpipe_ready) | 1318 | if (rpipe_ready) |
1393 | wa_xfer_delayed_run(rpipe); | 1319 | wa_xfer_delayed_run(rpipe); |
1394 | d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer); | ||
1395 | return; | 1320 | return; |
1396 | 1321 | ||
1397 | |||
1398 | error_submit_buf_in: | 1322 | error_submit_buf_in: |
1399 | if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { | 1323 | if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { |
1400 | dev_err(dev, "DTI: URB max acceptable errors " | 1324 | dev_err(dev, "DTI: URB max acceptable errors " |
@@ -1416,11 +1340,8 @@ error_complete: | |||
1416 | wa_xfer_completion(xfer); | 1340 | wa_xfer_completion(xfer); |
1417 | if (rpipe_ready) | 1341 | if (rpipe_ready) |
1418 | wa_xfer_delayed_run(rpipe); | 1342 | wa_xfer_delayed_run(rpipe); |
1419 | d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n", | ||
1420 | wa, xfer); | ||
1421 | return; | 1343 | return; |
1422 | 1344 | ||
1423 | |||
1424 | error_bad_seg: | 1345 | error_bad_seg: |
1425 | spin_unlock_irqrestore(&xfer->lock, flags); | 1346 | spin_unlock_irqrestore(&xfer->lock, flags); |
1426 | wa_urb_dequeue(wa, xfer->urb); | 1347 | wa_urb_dequeue(wa, xfer->urb); |
@@ -1431,17 +1352,11 @@ error_bad_seg: | |||
1431 | "exceeded, resetting device\n"); | 1352 | "exceeded, resetting device\n"); |
1432 | wa_reset_all(wa); | 1353 | wa_reset_all(wa); |
1433 | } | 1354 | } |
1434 | d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer); | ||
1435 | return; | 1355 | return; |
1436 | 1356 | ||
1437 | |||
1438 | segment_aborted: | 1357 | segment_aborted: |
1439 | /* nothing to do, as the aborter did the completion */ | 1358 | /* nothing to do, as the aborter did the completion */ |
1440 | spin_unlock_irqrestore(&xfer->lock, flags); | 1359 | spin_unlock_irqrestore(&xfer->lock, flags); |
1441 | d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n", | ||
1442 | wa, xfer); | ||
1443 | return; | ||
1444 | |||
1445 | } | 1360 | } |
1446 | 1361 | ||
1447 | /* | 1362 | /* |
@@ -1465,15 +1380,14 @@ static void wa_buf_in_cb(struct urb *urb) | |||
1465 | unsigned long flags; | 1380 | unsigned long flags; |
1466 | u8 done = 0; | 1381 | u8 done = 0; |
1467 | 1382 | ||
1468 | d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); | ||
1469 | switch (urb->status) { | 1383 | switch (urb->status) { |
1470 | case 0: | 1384 | case 0: |
1471 | spin_lock_irqsave(&xfer->lock, flags); | 1385 | spin_lock_irqsave(&xfer->lock, flags); |
1472 | wa = xfer->wa; | 1386 | wa = xfer->wa; |
1473 | dev = &wa->usb_iface->dev; | 1387 | dev = &wa->usb_iface->dev; |
1474 | rpipe = xfer->ep->hcpriv; | 1388 | rpipe = xfer->ep->hcpriv; |
1475 | d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n", | 1389 | dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n", |
1476 | xfer, seg->index, (size_t)urb->actual_length); | 1390 | xfer, seg->index, (size_t)urb->actual_length); |
1477 | seg->status = WA_SEG_DONE; | 1391 | seg->status = WA_SEG_DONE; |
1478 | seg->result = urb->actual_length; | 1392 | seg->result = urb->actual_length; |
1479 | xfer->segs_done++; | 1393 | xfer->segs_done++; |
@@ -1514,7 +1428,6 @@ static void wa_buf_in_cb(struct urb *urb) | |||
1514 | if (rpipe_ready) | 1428 | if (rpipe_ready) |
1515 | wa_xfer_delayed_run(rpipe); | 1429 | wa_xfer_delayed_run(rpipe); |
1516 | } | 1430 | } |
1517 | d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); | ||
1518 | } | 1431 | } |
1519 | 1432 | ||
1520 | /* | 1433 | /* |
@@ -1553,14 +1466,12 @@ static void wa_xfer_result_cb(struct urb *urb) | |||
1553 | struct wa_xfer *xfer; | 1466 | struct wa_xfer *xfer; |
1554 | u8 usb_status; | 1467 | u8 usb_status; |
1555 | 1468 | ||
1556 | d_fnstart(3, dev, "(%p)\n", wa); | ||
1557 | BUG_ON(wa->dti_urb != urb); | 1469 | BUG_ON(wa->dti_urb != urb); |
1558 | switch (wa->dti_urb->status) { | 1470 | switch (wa->dti_urb->status) { |
1559 | case 0: | 1471 | case 0: |
1560 | /* We have a xfer result buffer; check it */ | 1472 | /* We have a xfer result buffer; check it */ |
1561 | d_printf(2, dev, "DTI: xfer result %d bytes at %p\n", | 1473 | dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", |
1562 | urb->actual_length, urb->transfer_buffer); | 1474 | urb->actual_length, urb->transfer_buffer); |
1563 | d_dump(3, dev, urb->transfer_buffer, urb->actual_length); | ||
1564 | if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { | 1475 | if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { |
1565 | dev_err(dev, "DTI Error: xfer result--bad size " | 1476 | dev_err(dev, "DTI Error: xfer result--bad size " |
1566 | "xfer result (%d bytes vs %zu needed)\n", | 1477 | "xfer result (%d bytes vs %zu needed)\n", |
@@ -1622,7 +1533,6 @@ static void wa_xfer_result_cb(struct urb *urb) | |||
1622 | wa_reset_all(wa); | 1533 | wa_reset_all(wa); |
1623 | } | 1534 | } |
1624 | out: | 1535 | out: |
1625 | d_fnend(3, dev, "(%p) = void\n", wa); | ||
1626 | return; | 1536 | return; |
1627 | } | 1537 | } |
1628 | 1538 | ||
@@ -1653,7 +1563,6 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr) | |||
1653 | struct wa_notif_xfer *notif_xfer; | 1563 | struct wa_notif_xfer *notif_xfer; |
1654 | const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; | 1564 | const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; |
1655 | 1565 | ||
1656 | d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr); | ||
1657 | notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); | 1566 | notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); |
1658 | BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); | 1567 | BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); |
1659 | 1568 | ||
@@ -1693,7 +1602,6 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr) | |||
1693 | goto error_dti_urb_submit; | 1602 | goto error_dti_urb_submit; |
1694 | } | 1603 | } |
1695 | out: | 1604 | out: |
1696 | d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr); | ||
1697 | return; | 1605 | return; |
1698 | 1606 | ||
1699 | error_dti_urb_submit: | 1607 | error_dti_urb_submit: |
@@ -1704,6 +1612,4 @@ error_buf_in_urb_alloc: | |||
1704 | error_dti_urb_alloc: | 1612 | error_dti_urb_alloc: |
1705 | error: | 1613 | error: |
1706 | wa_reset_all(wa); | 1614 | wa_reset_all(wa); |
1707 | d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr); | ||
1708 | return; | ||
1709 | } | 1615 | } |
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h index d0c132434f1b..797c2453a35b 100644 --- a/drivers/usb/wusbcore/wusbhc.h +++ b/drivers/usb/wusbcore/wusbhc.h | |||
@@ -64,6 +64,13 @@ | |||
64 | #include <linux/uwb.h> | 64 | #include <linux/uwb.h> |
65 | #include <linux/usb/wusb.h> | 65 | #include <linux/usb/wusb.h> |
66 | 66 | ||
67 | /* | ||
68 | * Time from a WUSB channel stop request to the last transmitted MMC. | ||
69 | * | ||
70 | * This needs to be > 4.096 ms in case no MMCs can be transmitted in | ||
71 | * zone 0. | ||
72 | */ | ||
73 | #define WUSB_CHANNEL_STOP_DELAY_MS 8 | ||
67 | 74 | ||
68 | /** | 75 | /** |
69 | * Wireless USB device | 76 | * Wireless USB device |
@@ -147,7 +154,6 @@ struct wusb_port { | |||
147 | u16 status; | 154 | u16 status; |
148 | u16 change; | 155 | u16 change; |
149 | struct wusb_dev *wusb_dev; /* connected device's info */ | 156 | struct wusb_dev *wusb_dev; /* connected device's info */ |
150 | unsigned reset_count; | ||
151 | u32 ptk_tkid; | 157 | u32 ptk_tkid; |
152 | }; | 158 | }; |
153 | 159 | ||
@@ -198,21 +204,18 @@ struct wusb_port { | |||
198 | * @mmcies_max Max number of Information Elements this HC can send | 204 | * @mmcies_max Max number of Information Elements this HC can send |
199 | * in its MMC. Read-only. | 205 | * in its MMC. Read-only. |
200 | * | 206 | * |
207 | * @start Start the WUSB channel. | ||
208 | * | ||
209 | * @stop Stop the WUSB channel after the specified number of | ||
210 | * milliseconds. Channel Stop IEs should be transmitted | ||
211 | * as required by [WUSB] 4.16.2.1. | ||
212 | * | ||
201 | * @mmcie_add HC specific operation (WHCI or HWA) for adding an | 213 | * @mmcie_add HC specific operation (WHCI or HWA) for adding an |
202 | * MMCIE. | 214 | * MMCIE. |
203 | * | 215 | * |
204 | * @mmcie_rm HC specific operation (WHCI or HWA) for removing an | 216 | * @mmcie_rm HC specific operation (WHCI or HWA) for removing an |
205 | * MMCIE. | 217 | * MMCIE. |
206 | * | 218 | * |
207 | * @enc_types Array which describes the encryptions methods | ||
208 | * supported by the host as described in WUSB1.0 -- | ||
209 | * one entry per supported method. As of WUSB1.0 there | ||
210 | * is only four methods, we make space for eight just in | ||
211 | * case they decide to add some more (and pray they do | ||
212 | * it in sequential order). if 'enc_types[enc_method] | ||
213 | * != 0', then it is supported by the host. enc_method | ||
214 | * is USB_ENC_TYPE*. | ||
215 | * | ||
216 | * @set_ptk: Set the PTK and enable encryption for a device. Or, if | 219 | * @set_ptk: Set the PTK and enable encryption for a device. Or, if |
217 | * the supplied key is NULL, disable encryption for that | 220 | * the supplied key is NULL, disable encryption for that |
218 | * device. | 221 | * device. |
@@ -249,7 +252,8 @@ struct wusbhc { | |||
249 | struct uwb_pal pal; | 252 | struct uwb_pal pal; |
250 | 253 | ||
251 | unsigned trust_timeout; /* in jiffies */ | 254 | unsigned trust_timeout; /* in jiffies */ |
252 | struct wuie_host_info *wuie_host_info; /* Includes CHID */ | 255 | struct wusb_ckhdid chid; |
256 | struct wuie_host_info *wuie_host_info; | ||
253 | 257 | ||
254 | struct mutex mutex; /* locks everything else */ | 258 | struct mutex mutex; /* locks everything else */ |
255 | u16 cluster_id; /* Wireless USB Cluster ID */ | 259 | u16 cluster_id; /* Wireless USB Cluster ID */ |
@@ -269,7 +273,7 @@ struct wusbhc { | |||
269 | u8 mmcies_max; | 273 | u8 mmcies_max; |
270 | /* FIXME: make wusbhc_ops? */ | 274 | /* FIXME: make wusbhc_ops? */ |
271 | int (*start)(struct wusbhc *wusbhc); | 275 | int (*start)(struct wusbhc *wusbhc); |
272 | void (*stop)(struct wusbhc *wusbhc); | 276 | void (*stop)(struct wusbhc *wusbhc, int delay); |
273 | int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, | 277 | int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, |
274 | u8 handle, struct wuie_hdr *wuie); | 278 | u8 handle, struct wuie_hdr *wuie); |
275 | int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle); | 279 | int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle); |
@@ -373,20 +377,17 @@ static inline void wusbhc_put(struct wusbhc *wusbhc) | |||
373 | usb_put_hcd(&wusbhc->usb_hcd); | 377 | usb_put_hcd(&wusbhc->usb_hcd); |
374 | } | 378 | } |
375 | 379 | ||
376 | int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid); | 380 | int wusbhc_start(struct wusbhc *wusbhc); |
377 | void wusbhc_stop(struct wusbhc *wusbhc); | 381 | void wusbhc_stop(struct wusbhc *wusbhc); |
378 | extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *); | 382 | extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *); |
379 | 383 | ||
380 | /* Device connect handling */ | 384 | /* Device connect handling */ |
381 | extern int wusbhc_devconnect_create(struct wusbhc *); | 385 | extern int wusbhc_devconnect_create(struct wusbhc *); |
382 | extern void wusbhc_devconnect_destroy(struct wusbhc *); | 386 | extern void wusbhc_devconnect_destroy(struct wusbhc *); |
383 | extern int wusbhc_devconnect_start(struct wusbhc *wusbhc, | 387 | extern int wusbhc_devconnect_start(struct wusbhc *wusbhc); |
384 | const struct wusb_ckhdid *chid); | ||
385 | extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc); | 388 | extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc); |
386 | extern int wusbhc_devconnect_auth(struct wusbhc *, u8); | ||
387 | extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr, | 389 | extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr, |
388 | struct wusb_dn_hdr *dn_hdr, size_t size); | 390 | struct wusb_dn_hdr *dn_hdr, size_t size); |
389 | extern int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port); | ||
390 | extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port); | 391 | extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port); |
391 | extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, | 392 | extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, |
392 | void *priv); | 393 | void *priv); |
@@ -432,6 +433,7 @@ extern void wusb_dev_sec_rm(struct wusb_dev *) ; | |||
432 | extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *, | 433 | extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *, |
433 | struct wusb_ckhdid *ck); | 434 | struct wusb_ckhdid *ck); |
434 | void wusbhc_gtk_rekey(struct wusbhc *wusbhc); | 435 | void wusbhc_gtk_rekey(struct wusbhc *wusbhc); |
436 | int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev); | ||
435 | 437 | ||
436 | 438 | ||
437 | /* WUSB Cluster ID handling */ | 439 | /* WUSB Cluster ID handling */ |
diff --git a/drivers/uwb/Makefile b/drivers/uwb/Makefile index 257e6908304c..2f98d080fe78 100644 --- a/drivers/uwb/Makefile +++ b/drivers/uwb/Makefile | |||
@@ -6,6 +6,7 @@ obj-$(CONFIG_UWB_I1480U) += i1480/ | |||
6 | 6 | ||
7 | uwb-objs := \ | 7 | uwb-objs := \ |
8 | address.o \ | 8 | address.o \ |
9 | allocator.o \ | ||
9 | beacon.o \ | 10 | beacon.o \ |
10 | driver.o \ | 11 | driver.o \ |
11 | drp.o \ | 12 | drp.o \ |
@@ -13,10 +14,12 @@ uwb-objs := \ | |||
13 | drp-ie.o \ | 14 | drp-ie.o \ |
14 | est.o \ | 15 | est.o \ |
15 | ie.o \ | 16 | ie.o \ |
17 | ie-rcv.o \ | ||
16 | lc-dev.o \ | 18 | lc-dev.o \ |
17 | lc-rc.o \ | 19 | lc-rc.o \ |
18 | neh.o \ | 20 | neh.o \ |
19 | pal.o \ | 21 | pal.o \ |
22 | radio.o \ | ||
20 | reset.o \ | 23 | reset.o \ |
21 | rsv.o \ | 24 | rsv.o \ |
22 | scan.o \ | 25 | scan.o \ |
diff --git a/drivers/uwb/address.c b/drivers/uwb/address.c index 1664ae5f1706..ad21b1d7218c 100644 --- a/drivers/uwb/address.c +++ b/drivers/uwb/address.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/device.h> | 28 | #include <linux/device.h> |
29 | #include <linux/random.h> | 29 | #include <linux/random.h> |
30 | #include <linux/etherdevice.h> | 30 | #include <linux/etherdevice.h> |
31 | #include <linux/uwb/debug.h> | 31 | |
32 | #include "uwb-internal.h" | 32 | #include "uwb-internal.h" |
33 | 33 | ||
34 | 34 | ||
diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c new file mode 100644 index 000000000000..c8185e6b0cd5 --- /dev/null +++ b/drivers/uwb/allocator.c | |||
@@ -0,0 +1,386 @@ | |||
1 | /* | ||
2 | * UWB reservation management. | ||
3 | * | ||
4 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/uwb.h> | ||
21 | |||
22 | #include "uwb-internal.h" | ||
23 | |||
24 | static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai) | ||
25 | { | ||
26 | int col, mas, safe_mas, unsafe_mas; | ||
27 | unsigned char *bm = ai->bm; | ||
28 | struct uwb_rsv_col_info *ci = ai->ci; | ||
29 | unsigned char c; | ||
30 | |||
31 | for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) { | ||
32 | |||
33 | safe_mas = ci->csi.safe_mas_per_col; | ||
34 | unsafe_mas = ci->csi.unsafe_mas_per_col; | ||
35 | |||
36 | for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) { | ||
37 | if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) { | ||
38 | |||
39 | if (safe_mas > 0) { | ||
40 | safe_mas--; | ||
41 | c = UWB_RSV_MAS_SAFE; | ||
42 | } else if (unsafe_mas > 0) { | ||
43 | unsafe_mas--; | ||
44 | c = UWB_RSV_MAS_UNSAFE; | ||
45 | } else { | ||
46 | break; | ||
47 | } | ||
48 | bm[col * UWB_MAS_PER_ZONE + mas] = c; | ||
49 | } | ||
50 | } | ||
51 | } | ||
52 | } | ||
53 | |||
54 | static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai) | ||
55 | { | ||
56 | int mas, col, rows; | ||
57 | unsigned char *bm = ai->bm; | ||
58 | struct uwb_rsv_row_info *ri = &ai->ri; | ||
59 | unsigned char c; | ||
60 | |||
61 | rows = 1; | ||
62 | c = UWB_RSV_MAS_SAFE; | ||
63 | for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) { | ||
64 | if (ri->avail[mas] == 1) { | ||
65 | |||
66 | if (rows > ri->used_rows) { | ||
67 | break; | ||
68 | } else if (rows > 7) { | ||
69 | c = UWB_RSV_MAS_UNSAFE; | ||
70 | } | ||
71 | |||
72 | for (col = 0; col < UWB_NUM_ZONES; col++) { | ||
73 | if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) { | ||
74 | bm[col * UWB_NUM_ZONES + mas] = c; | ||
75 | if(c == UWB_RSV_MAS_SAFE) | ||
76 | ai->safe_allocated_mases++; | ||
77 | else | ||
78 | ai->unsafe_allocated_mases++; | ||
79 | } | ||
80 | } | ||
81 | rows++; | ||
82 | } | ||
83 | } | ||
84 | ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Find the best column set for a given availability, interval, num safe mas and | ||
89 | * num unsafe mas. | ||
90 | * | ||
91 | * The different sets are tried in order as shown below, depending on the interval. | ||
92 | * | ||
93 | * interval = 16 | ||
94 | * deep = 0 | ||
95 | * set 1 -> { 8 } | ||
96 | * deep = 1 | ||
97 | * set 1 -> { 4 } | ||
98 | * set 2 -> { 12 } | ||
99 | * deep = 2 | ||
100 | * set 1 -> { 2 } | ||
101 | * set 2 -> { 6 } | ||
102 | * set 3 -> { 10 } | ||
103 | * set 4 -> { 14 } | ||
104 | * deep = 3 | ||
105 | * set 1 -> { 1 } | ||
106 | * set 2 -> { 3 } | ||
107 | * set 3 -> { 5 } | ||
108 | * set 4 -> { 7 } | ||
109 | * set 5 -> { 9 } | ||
110 | * set 6 -> { 11 } | ||
111 | * set 7 -> { 13 } | ||
112 | * set 8 -> { 15 } | ||
113 | * | ||
114 | * interval = 8 | ||
115 | * deep = 0 | ||
116 | * set 1 -> { 4 12 } | ||
117 | * deep = 1 | ||
118 | * set 1 -> { 2 10 } | ||
119 | * set 2 -> { 6 14 } | ||
120 | * deep = 2 | ||
121 | * set 1 -> { 1 9 } | ||
122 | * set 2 -> { 3 11 } | ||
123 | * set 3 -> { 5 13 } | ||
124 | * set 4 -> { 7 15 } | ||
125 | * | ||
126 | * interval = 4 | ||
127 | * deep = 0 | ||
128 | * set 1 -> { 2 6 10 14 } | ||
129 | * deep = 1 | ||
130 | * set 1 -> { 1 5 9 13 } | ||
131 | * set 2 -> { 3 7 11 15 } | ||
132 | * | ||
133 | * interval = 2 | ||
134 | * deep = 0 | ||
135 | * set 1 -> { 1 3 5 7 9 11 13 15 } | ||
136 | */ | ||
137 | static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval, | ||
138 | int num_safe_mas, int num_unsafe_mas) | ||
139 | { | ||
140 | struct uwb_rsv_col_info *ci = ai->ci; | ||
141 | struct uwb_rsv_col_set_info *csi = &ci->csi; | ||
142 | struct uwb_rsv_col_set_info tmp_csi; | ||
143 | int deep, set, col, start_col_deep, col_start_set; | ||
144 | int start_col, max_mas_in_set, lowest_max_mas_in_deep; | ||
145 | int n_mas; | ||
146 | int found = UWB_RSV_ALLOC_NOT_FOUND; | ||
147 | |||
148 | tmp_csi.start_col = 0; | ||
149 | start_col_deep = interval; | ||
150 | n_mas = num_unsafe_mas + num_safe_mas; | ||
151 | |||
152 | for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) { | ||
153 | start_col_deep /= 2; | ||
154 | col_start_set = 0; | ||
155 | lowest_max_mas_in_deep = UWB_MAS_PER_ZONE; | ||
156 | |||
157 | for (set = 1; set <= (1 << deep); set++) { | ||
158 | max_mas_in_set = 0; | ||
159 | start_col = start_col_deep + col_start_set; | ||
160 | for (col = start_col; col < UWB_NUM_ZONES; col += interval) { | ||
161 | |||
162 | if (ci[col].max_avail_safe >= num_safe_mas && | ||
163 | ci[col].max_avail_unsafe >= n_mas) { | ||
164 | if (ci[col].highest_mas[n_mas] > max_mas_in_set) | ||
165 | max_mas_in_set = ci[col].highest_mas[n_mas]; | ||
166 | } else { | ||
167 | max_mas_in_set = 0; | ||
168 | break; | ||
169 | } | ||
170 | } | ||
171 | if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) { | ||
172 | lowest_max_mas_in_deep = max_mas_in_set; | ||
173 | |||
174 | tmp_csi.start_col = start_col; | ||
175 | } | ||
176 | col_start_set += (interval >> deep); | ||
177 | } | ||
178 | |||
179 | if (lowest_max_mas_in_deep < 8) { | ||
180 | csi->start_col = tmp_csi.start_col; | ||
181 | found = UWB_RSV_ALLOC_FOUND; | ||
182 | break; | ||
183 | } else if ((lowest_max_mas_in_deep > 8) && | ||
184 | (lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) && | ||
185 | (found == UWB_RSV_ALLOC_NOT_FOUND)) { | ||
186 | csi->start_col = tmp_csi.start_col; | ||
187 | found = UWB_RSV_ALLOC_FOUND; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | if (found == UWB_RSV_ALLOC_FOUND) { | ||
192 | csi->interval = interval; | ||
193 | csi->safe_mas_per_col = num_safe_mas; | ||
194 | csi->unsafe_mas_per_col = num_unsafe_mas; | ||
195 | |||
196 | ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas; | ||
197 | ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas; | ||
198 | ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; | ||
199 | ai->interval = interval; | ||
200 | } | ||
201 | return found; | ||
202 | } | ||
203 | |||
204 | static void get_row_descriptors(struct uwb_rsv_alloc_info *ai) | ||
205 | { | ||
206 | unsigned char *bm = ai->bm; | ||
207 | struct uwb_rsv_row_info *ri = &ai->ri; | ||
208 | int col, mas; | ||
209 | |||
210 | ri->free_rows = 16; | ||
211 | for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { | ||
212 | ri->avail[mas] = 1; | ||
213 | for (col = 1; col < UWB_NUM_ZONES; col++) { | ||
214 | if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) { | ||
215 | ri->free_rows--; | ||
216 | ri->avail[mas]=0; | ||
217 | break; | ||
218 | } | ||
219 | } | ||
220 | } | ||
221 | } | ||
222 | |||
223 | static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci) | ||
224 | { | ||
225 | int mas; | ||
226 | int block_count = 0, start_block = 0; | ||
227 | int previous_avail = 0; | ||
228 | int available = 0; | ||
229 | int safe_mas_in_row[UWB_MAS_PER_ZONE] = { | ||
230 | 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, | ||
231 | }; | ||
232 | |||
233 | rci->max_avail_safe = 0; | ||
234 | |||
235 | for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { | ||
236 | if (!bm[column * UWB_NUM_ZONES + mas]) { | ||
237 | available++; | ||
238 | rci->max_avail_unsafe = available; | ||
239 | |||
240 | rci->highest_mas[available] = mas; | ||
241 | |||
242 | if (previous_avail) { | ||
243 | block_count++; | ||
244 | if ((block_count > safe_mas_in_row[start_block]) && | ||
245 | (!rci->max_avail_safe)) | ||
246 | rci->max_avail_safe = available - 1; | ||
247 | } else { | ||
248 | previous_avail = 1; | ||
249 | start_block = mas; | ||
250 | block_count = 1; | ||
251 | } | ||
252 | } else { | ||
253 | previous_avail = 0; | ||
254 | } | ||
255 | } | ||
256 | if (!rci->max_avail_safe) | ||
257 | rci->max_avail_safe = rci->max_avail_unsafe; | ||
258 | } | ||
259 | |||
260 | static void get_column_descriptors(struct uwb_rsv_alloc_info *ai) | ||
261 | { | ||
262 | unsigned char *bm = ai->bm; | ||
263 | struct uwb_rsv_col_info *ci = ai->ci; | ||
264 | int col; | ||
265 | |||
266 | for (col = 1; col < UWB_NUM_ZONES; col++) { | ||
267 | uwb_rsv_fill_column_info(bm, col, &ci[col]); | ||
268 | } | ||
269 | } | ||
270 | |||
271 | static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai) | ||
272 | { | ||
273 | int n_rows; | ||
274 | int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW; | ||
275 | int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW; | ||
276 | if (ai->min_mas % UWB_USABLE_MAS_PER_ROW) | ||
277 | min_rows++; | ||
278 | for (n_rows = max_rows; n_rows >= min_rows; n_rows--) { | ||
279 | if (n_rows <= ai->ri.free_rows) { | ||
280 | ai->ri.used_rows = n_rows; | ||
281 | ai->interval = 1; /* row reservation */ | ||
282 | uwb_rsv_fill_row_alloc(ai); | ||
283 | return UWB_RSV_ALLOC_FOUND; | ||
284 | } | ||
285 | } | ||
286 | return UWB_RSV_ALLOC_NOT_FOUND; | ||
287 | } | ||
288 | |||
289 | static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval) | ||
290 | { | ||
291 | int n_safe, n_unsafe, n_mas; | ||
292 | int n_column = UWB_NUM_ZONES / interval; | ||
293 | int max_per_zone = ai->max_mas / n_column; | ||
294 | int min_per_zone = ai->min_mas / n_column; | ||
295 | |||
296 | if (ai->min_mas % n_column) | ||
297 | min_per_zone++; | ||
298 | |||
299 | if (min_per_zone > UWB_MAS_PER_ZONE) { | ||
300 | return UWB_RSV_ALLOC_NOT_FOUND; | ||
301 | } | ||
302 | |||
303 | if (max_per_zone > UWB_MAS_PER_ZONE) { | ||
304 | max_per_zone = UWB_MAS_PER_ZONE; | ||
305 | } | ||
306 | |||
307 | for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) { | ||
308 | if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND) | ||
309 | continue; | ||
310 | for (n_safe = n_mas; n_safe >= 0; n_safe--) { | ||
311 | n_unsafe = n_mas - n_safe; | ||
312 | if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) { | ||
313 | uwb_rsv_fill_column_alloc(ai); | ||
314 | return UWB_RSV_ALLOC_FOUND; | ||
315 | } | ||
316 | } | ||
317 | } | ||
318 | return UWB_RSV_ALLOC_NOT_FOUND; | ||
319 | } | ||
320 | |||
321 | int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, | ||
322 | struct uwb_mas_bm *result) | ||
323 | { | ||
324 | struct uwb_rsv_alloc_info *ai; | ||
325 | int interval; | ||
326 | int bit_index; | ||
327 | |||
328 | ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); | ||
329 | |||
330 | ai->min_mas = rsv->min_mas; | ||
331 | ai->max_mas = rsv->max_mas; | ||
332 | ai->max_interval = rsv->max_interval; | ||
333 | |||
334 | |||
335 | /* fill the not available vector from the available bm */ | ||
336 | for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { | ||
337 | if (!test_bit(bit_index, available->bm)) | ||
338 | ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL; | ||
339 | } | ||
340 | |||
341 | if (ai->max_interval == 1) { | ||
342 | get_row_descriptors(ai); | ||
343 | if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) | ||
344 | goto alloc_found; | ||
345 | else | ||
346 | goto alloc_not_found; | ||
347 | } | ||
348 | |||
349 | get_column_descriptors(ai); | ||
350 | |||
351 | for (interval = 16; interval >= 2; interval>>=1) { | ||
352 | if (interval > ai->max_interval) | ||
353 | continue; | ||
354 | if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND) | ||
355 | goto alloc_found; | ||
356 | } | ||
357 | |||
358 | /* try row reservation if no column is found */ | ||
359 | get_row_descriptors(ai); | ||
360 | if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) | ||
361 | goto alloc_found; | ||
362 | else | ||
363 | goto alloc_not_found; | ||
364 | |||
365 | alloc_found: | ||
366 | bitmap_zero(result->bm, UWB_NUM_MAS); | ||
367 | bitmap_zero(result->unsafe_bm, UWB_NUM_MAS); | ||
368 | /* fill the safe and unsafe bitmaps */ | ||
369 | for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { | ||
370 | if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE) | ||
371 | set_bit(bit_index, result->bm); | ||
372 | else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE) | ||
373 | set_bit(bit_index, result->unsafe_bm); | ||
374 | } | ||
375 | bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS); | ||
376 | |||
377 | result->safe = ai->safe_allocated_mases; | ||
378 | result->unsafe = ai->unsafe_allocated_mases; | ||
379 | |||
380 | kfree(ai); | ||
381 | return UWB_RSV_ALLOC_FOUND; | ||
382 | |||
383 | alloc_not_found: | ||
384 | kfree(ai); | ||
385 | return UWB_RSV_ALLOC_NOT_FOUND; | ||
386 | } | ||
diff --git a/drivers/uwb/beacon.c b/drivers/uwb/beacon.c index 46b18eec5026..36bc3158006f 100644 --- a/drivers/uwb/beacon.c +++ b/drivers/uwb/beacon.c | |||
@@ -22,19 +22,16 @@ | |||
22 | * | 22 | * |
23 | * FIXME: docs | 23 | * FIXME: docs |
24 | */ | 24 | */ |
25 | |||
26 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
27 | #include <linux/init.h> | 26 | #include <linux/init.h> |
28 | #include <linux/module.h> | 27 | #include <linux/module.h> |
29 | #include <linux/device.h> | 28 | #include <linux/device.h> |
30 | #include <linux/err.h> | 29 | #include <linux/err.h> |
31 | #include <linux/kdev_t.h> | 30 | #include <linux/kdev_t.h> |
32 | #include "uwb-internal.h" | ||
33 | 31 | ||
34 | #define D_LOCAL 0 | 32 | #include "uwb-internal.h" |
35 | #include <linux/uwb/debug.h> | ||
36 | 33 | ||
37 | /** Start Beaconing command structure */ | 34 | /* Start Beaconing command structure */ |
38 | struct uwb_rc_cmd_start_beacon { | 35 | struct uwb_rc_cmd_start_beacon { |
39 | struct uwb_rccb rccb; | 36 | struct uwb_rccb rccb; |
40 | __le16 wBPSTOffset; | 37 | __le16 wBPSTOffset; |
@@ -119,7 +116,6 @@ int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset) | |||
119 | int result; | 116 | int result; |
120 | struct device *dev = &rc->uwb_dev.dev; | 117 | struct device *dev = &rc->uwb_dev.dev; |
121 | 118 | ||
122 | mutex_lock(&rc->uwb_dev.mutex); | ||
123 | if (channel < 0) | 119 | if (channel < 0) |
124 | channel = -1; | 120 | channel = -1; |
125 | if (channel == -1) | 121 | if (channel == -1) |
@@ -128,7 +124,7 @@ int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset) | |||
128 | /* channel >= 0...dah */ | 124 | /* channel >= 0...dah */ |
129 | result = uwb_rc_start_beacon(rc, bpst_offset, channel); | 125 | result = uwb_rc_start_beacon(rc, bpst_offset, channel); |
130 | if (result < 0) | 126 | if (result < 0) |
131 | goto out_up; | 127 | return result; |
132 | if (le16_to_cpu(rc->ies->wIELength) > 0) { | 128 | if (le16_to_cpu(rc->ies->wIELength) > 0) { |
133 | result = uwb_rc_set_ie(rc, rc->ies); | 129 | result = uwb_rc_set_ie(rc, rc->ies); |
134 | if (result < 0) { | 130 | if (result < 0) { |
@@ -137,19 +133,12 @@ int uwb_rc_beacon(struct uwb_rc *rc, int channel, unsigned bpst_offset) | |||
137 | result = uwb_rc_stop_beacon(rc); | 133 | result = uwb_rc_stop_beacon(rc); |
138 | channel = -1; | 134 | channel = -1; |
139 | bpst_offset = 0; | 135 | bpst_offset = 0; |
140 | } else | 136 | } |
141 | result = 0; | ||
142 | } | 137 | } |
143 | } | 138 | } |
144 | 139 | ||
145 | if (result < 0) | 140 | if (result >= 0) |
146 | goto out_up; | 141 | rc->beaconing = channel; |
147 | rc->beaconing = channel; | ||
148 | |||
149 | uwb_notify(rc, NULL, uwb_bg_joined(rc) ? UWB_NOTIF_BG_JOIN : UWB_NOTIF_BG_LEAVE); | ||
150 | |||
151 | out_up: | ||
152 | mutex_unlock(&rc->uwb_dev.mutex); | ||
153 | return result; | 142 | return result; |
154 | } | 143 | } |
155 | 144 | ||
@@ -168,12 +157,6 @@ out_up: | |||
168 | * FIXME: use something faster for search than a list | 157 | * FIXME: use something faster for search than a list |
169 | */ | 158 | */ |
170 | 159 | ||
171 | struct uwb_beca uwb_beca = { | ||
172 | .list = LIST_HEAD_INIT(uwb_beca.list), | ||
173 | .mutex = __MUTEX_INITIALIZER(uwb_beca.mutex) | ||
174 | }; | ||
175 | |||
176 | |||
177 | void uwb_bce_kfree(struct kref *_bce) | 160 | void uwb_bce_kfree(struct kref *_bce) |
178 | { | 161 | { |
179 | struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); | 162 | struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); |
@@ -185,13 +168,11 @@ void uwb_bce_kfree(struct kref *_bce) | |||
185 | 168 | ||
186 | /* Find a beacon by dev addr in the cache */ | 169 | /* Find a beacon by dev addr in the cache */ |
187 | static | 170 | static |
188 | struct uwb_beca_e *__uwb_beca_find_bydev(const struct uwb_dev_addr *dev_addr) | 171 | struct uwb_beca_e *__uwb_beca_find_bydev(struct uwb_rc *rc, |
172 | const struct uwb_dev_addr *dev_addr) | ||
189 | { | 173 | { |
190 | struct uwb_beca_e *bce, *next; | 174 | struct uwb_beca_e *bce, *next; |
191 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | 175 | list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { |
192 | d_printf(6, NULL, "looking for addr %02x:%02x in %02x:%02x\n", | ||
193 | dev_addr->data[0], dev_addr->data[1], | ||
194 | bce->dev_addr.data[0], bce->dev_addr.data[1]); | ||
195 | if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) | 176 | if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) |
196 | goto out; | 177 | goto out; |
197 | } | 178 | } |
@@ -202,10 +183,11 @@ out: | |||
202 | 183 | ||
203 | /* Find a beacon by dev addr in the cache */ | 184 | /* Find a beacon by dev addr in the cache */ |
204 | static | 185 | static |
205 | struct uwb_beca_e *__uwb_beca_find_bymac(const struct uwb_mac_addr *mac_addr) | 186 | struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc, |
187 | const struct uwb_mac_addr *mac_addr) | ||
206 | { | 188 | { |
207 | struct uwb_beca_e *bce, *next; | 189 | struct uwb_beca_e *bce, *next; |
208 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | 190 | list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { |
209 | if (!memcmp(bce->mac_addr, mac_addr->data, | 191 | if (!memcmp(bce->mac_addr, mac_addr->data, |
210 | sizeof(struct uwb_mac_addr))) | 192 | sizeof(struct uwb_mac_addr))) |
211 | goto out; | 193 | goto out; |
@@ -229,11 +211,11 @@ struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, | |||
229 | struct uwb_dev *found = NULL; | 211 | struct uwb_dev *found = NULL; |
230 | struct uwb_beca_e *bce; | 212 | struct uwb_beca_e *bce; |
231 | 213 | ||
232 | mutex_lock(&uwb_beca.mutex); | 214 | mutex_lock(&rc->uwb_beca.mutex); |
233 | bce = __uwb_beca_find_bydev(devaddr); | 215 | bce = __uwb_beca_find_bydev(rc, devaddr); |
234 | if (bce) | 216 | if (bce) |
235 | found = uwb_dev_try_get(rc, bce->uwb_dev); | 217 | found = uwb_dev_try_get(rc, bce->uwb_dev); |
236 | mutex_unlock(&uwb_beca.mutex); | 218 | mutex_unlock(&rc->uwb_beca.mutex); |
237 | 219 | ||
238 | return found; | 220 | return found; |
239 | } | 221 | } |
@@ -249,11 +231,11 @@ struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, | |||
249 | struct uwb_dev *found = NULL; | 231 | struct uwb_dev *found = NULL; |
250 | struct uwb_beca_e *bce; | 232 | struct uwb_beca_e *bce; |
251 | 233 | ||
252 | mutex_lock(&uwb_beca.mutex); | 234 | mutex_lock(&rc->uwb_beca.mutex); |
253 | bce = __uwb_beca_find_bymac(macaddr); | 235 | bce = __uwb_beca_find_bymac(rc, macaddr); |
254 | if (bce) | 236 | if (bce) |
255 | found = uwb_dev_try_get(rc, bce->uwb_dev); | 237 | found = uwb_dev_try_get(rc, bce->uwb_dev); |
256 | mutex_unlock(&uwb_beca.mutex); | 238 | mutex_unlock(&rc->uwb_beca.mutex); |
257 | 239 | ||
258 | return found; | 240 | return found; |
259 | } | 241 | } |
@@ -274,7 +256,9 @@ static void uwb_beca_e_init(struct uwb_beca_e *bce) | |||
274 | * @bf: Beacon frame (part of b, really) | 256 | * @bf: Beacon frame (part of b, really) |
275 | * @ts_jiffies: Timestamp (in jiffies) when the beacon was received | 257 | * @ts_jiffies: Timestamp (in jiffies) when the beacon was received |
276 | */ | 258 | */ |
277 | struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be, | 259 | static |
260 | struct uwb_beca_e *__uwb_beca_add(struct uwb_rc *rc, | ||
261 | struct uwb_rc_evt_beacon *be, | ||
278 | struct uwb_beacon_frame *bf, | 262 | struct uwb_beacon_frame *bf, |
279 | unsigned long ts_jiffies) | 263 | unsigned long ts_jiffies) |
280 | { | 264 | { |
@@ -286,7 +270,7 @@ struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be, | |||
286 | uwb_beca_e_init(bce); | 270 | uwb_beca_e_init(bce); |
287 | bce->ts_jiffies = ts_jiffies; | 271 | bce->ts_jiffies = ts_jiffies; |
288 | bce->uwb_dev = NULL; | 272 | bce->uwb_dev = NULL; |
289 | list_add(&bce->node, &uwb_beca.list); | 273 | list_add(&bce->node, &rc->uwb_beca.list); |
290 | return bce; | 274 | return bce; |
291 | } | 275 | } |
292 | 276 | ||
@@ -295,33 +279,32 @@ struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be, | |||
295 | * | 279 | * |
296 | * Remove associated devicest too. | 280 | * Remove associated devicest too. |
297 | */ | 281 | */ |
298 | void uwb_beca_purge(void) | 282 | void uwb_beca_purge(struct uwb_rc *rc) |
299 | { | 283 | { |
300 | struct uwb_beca_e *bce, *next; | 284 | struct uwb_beca_e *bce, *next; |
301 | unsigned long expires; | 285 | unsigned long expires; |
302 | 286 | ||
303 | mutex_lock(&uwb_beca.mutex); | 287 | mutex_lock(&rc->uwb_beca.mutex); |
304 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | 288 | list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { |
305 | expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms); | 289 | expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms); |
306 | if (time_after(jiffies, expires)) { | 290 | if (time_after(jiffies, expires)) { |
307 | uwbd_dev_offair(bce); | 291 | uwbd_dev_offair(bce); |
308 | list_del(&bce->node); | ||
309 | uwb_bce_put(bce); | ||
310 | } | 292 | } |
311 | } | 293 | } |
312 | mutex_unlock(&uwb_beca.mutex); | 294 | mutex_unlock(&rc->uwb_beca.mutex); |
313 | } | 295 | } |
314 | 296 | ||
315 | /* Clean up the whole beacon cache. Called on shutdown */ | 297 | /* Clean up the whole beacon cache. Called on shutdown */ |
316 | void uwb_beca_release(void) | 298 | void uwb_beca_release(struct uwb_rc *rc) |
317 | { | 299 | { |
318 | struct uwb_beca_e *bce, *next; | 300 | struct uwb_beca_e *bce, *next; |
319 | mutex_lock(&uwb_beca.mutex); | 301 | |
320 | list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { | 302 | mutex_lock(&rc->uwb_beca.mutex); |
303 | list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { | ||
321 | list_del(&bce->node); | 304 | list_del(&bce->node); |
322 | uwb_bce_put(bce); | 305 | uwb_bce_put(bce); |
323 | } | 306 | } |
324 | mutex_unlock(&uwb_beca.mutex); | 307 | mutex_unlock(&rc->uwb_beca.mutex); |
325 | } | 308 | } |
326 | 309 | ||
327 | static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, | 310 | static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, |
@@ -349,22 +332,22 @@ ssize_t uwb_bce_print_IEs(struct uwb_dev *uwb_dev, struct uwb_beca_e *bce, | |||
349 | ssize_t result = 0; | 332 | ssize_t result = 0; |
350 | struct uwb_rc_evt_beacon *be; | 333 | struct uwb_rc_evt_beacon *be; |
351 | struct uwb_beacon_frame *bf; | 334 | struct uwb_beacon_frame *bf; |
352 | struct uwb_buf_ctx ctx = { | 335 | int ies_len; |
353 | .buf = buf, | 336 | struct uwb_ie_hdr *ies; |
354 | .bytes = 0, | ||
355 | .size = size | ||
356 | }; | ||
357 | 337 | ||
358 | mutex_lock(&bce->mutex); | 338 | mutex_lock(&bce->mutex); |
339 | |||
359 | be = bce->be; | 340 | be = bce->be; |
360 | if (be == NULL) | 341 | if (be) { |
361 | goto out; | 342 | bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo; |
362 | bf = (void *) be->BeaconInfo; | 343 | ies_len = be->wBeaconInfoLength - sizeof(struct uwb_beacon_frame); |
363 | uwb_ie_for_each(uwb_dev, uwb_ie_dump_hex, &ctx, | 344 | ies = (struct uwb_ie_hdr *)bf->IEData; |
364 | bf->IEData, be->wBeaconInfoLength - sizeof(*bf)); | 345 | |
365 | result = ctx.bytes; | 346 | result = uwb_ie_dump_hex(ies, ies_len, buf, size); |
366 | out: | 347 | } |
348 | |||
367 | mutex_unlock(&bce->mutex); | 349 | mutex_unlock(&bce->mutex); |
350 | |||
368 | return result; | 351 | return result; |
369 | } | 352 | } |
370 | 353 | ||
@@ -437,18 +420,18 @@ int uwbd_evt_handle_rc_beacon(struct uwb_event *evt) | |||
437 | if (uwb_mac_addr_bcast(&bf->Device_Identifier)) | 420 | if (uwb_mac_addr_bcast(&bf->Device_Identifier)) |
438 | return 0; | 421 | return 0; |
439 | 422 | ||
440 | mutex_lock(&uwb_beca.mutex); | 423 | mutex_lock(&rc->uwb_beca.mutex); |
441 | bce = __uwb_beca_find_bymac(&bf->Device_Identifier); | 424 | bce = __uwb_beca_find_bymac(rc, &bf->Device_Identifier); |
442 | if (bce == NULL) { | 425 | if (bce == NULL) { |
443 | /* Not in there, a new device is pinging */ | 426 | /* Not in there, a new device is pinging */ |
444 | uwb_beacon_print(evt->rc, be, bf); | 427 | uwb_beacon_print(evt->rc, be, bf); |
445 | bce = __uwb_beca_add(be, bf, evt->ts_jiffies); | 428 | bce = __uwb_beca_add(rc, be, bf, evt->ts_jiffies); |
446 | if (bce == NULL) { | 429 | if (bce == NULL) { |
447 | mutex_unlock(&uwb_beca.mutex); | 430 | mutex_unlock(&rc->uwb_beca.mutex); |
448 | return -ENOMEM; | 431 | return -ENOMEM; |
449 | } | 432 | } |
450 | } | 433 | } |
451 | mutex_unlock(&uwb_beca.mutex); | 434 | mutex_unlock(&rc->uwb_beca.mutex); |
452 | 435 | ||
453 | mutex_lock(&bce->mutex); | 436 | mutex_lock(&bce->mutex); |
454 | /* purge old beacon data */ | 437 | /* purge old beacon data */ |
@@ -588,19 +571,6 @@ error: | |||
588 | return result; | 571 | return result; |
589 | } | 572 | } |
590 | 573 | ||
591 | /** | ||
592 | * uwb_bg_joined - is the RC in a beacon group? | ||
593 | * @rc: the radio controller | ||
594 | * | ||
595 | * Returns true if the radio controller is in a beacon group (even if | ||
596 | * it's the sole member). | ||
597 | */ | ||
598 | int uwb_bg_joined(struct uwb_rc *rc) | ||
599 | { | ||
600 | return rc->beaconing != -1; | ||
601 | } | ||
602 | EXPORT_SYMBOL_GPL(uwb_bg_joined); | ||
603 | |||
604 | /* | 574 | /* |
605 | * Print beaconing state. | 575 | * Print beaconing state. |
606 | */ | 576 | */ |
@@ -619,9 +589,6 @@ static ssize_t uwb_rc_beacon_show(struct device *dev, | |||
619 | 589 | ||
620 | /* | 590 | /* |
621 | * Start beaconing on the specified channel, or stop beaconing. | 591 | * Start beaconing on the specified channel, or stop beaconing. |
622 | * | ||
623 | * The BPST offset of when to start searching for a beacon group to | ||
624 | * join may be specified. | ||
625 | */ | 592 | */ |
626 | static ssize_t uwb_rc_beacon_store(struct device *dev, | 593 | static ssize_t uwb_rc_beacon_store(struct device *dev, |
627 | struct device_attribute *attr, | 594 | struct device_attribute *attr, |
@@ -630,12 +597,11 @@ static ssize_t uwb_rc_beacon_store(struct device *dev, | |||
630 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | 597 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); |
631 | struct uwb_rc *rc = uwb_dev->rc; | 598 | struct uwb_rc *rc = uwb_dev->rc; |
632 | int channel; | 599 | int channel; |
633 | unsigned bpst_offset = 0; | ||
634 | ssize_t result = -EINVAL; | 600 | ssize_t result = -EINVAL; |
635 | 601 | ||
636 | result = sscanf(buf, "%d %u\n", &channel, &bpst_offset); | 602 | result = sscanf(buf, "%d", &channel); |
637 | if (result >= 1) | 603 | if (result >= 1) |
638 | result = uwb_rc_beacon(rc, channel, bpst_offset); | 604 | result = uwb_radio_force_channel(rc, channel); |
639 | 605 | ||
640 | return result < 0 ? result : size; | 606 | return result < 0 ? result : size; |
641 | } | 607 | } |
diff --git a/drivers/uwb/driver.c b/drivers/uwb/driver.c index 521cdeb84971..da77e41de990 100644 --- a/drivers/uwb/driver.c +++ b/drivers/uwb/driver.c | |||
@@ -53,7 +53,7 @@ | |||
53 | #include <linux/err.h> | 53 | #include <linux/err.h> |
54 | #include <linux/kdev_t.h> | 54 | #include <linux/kdev_t.h> |
55 | #include <linux/random.h> | 55 | #include <linux/random.h> |
56 | #include <linux/uwb/debug.h> | 56 | |
57 | #include "uwb-internal.h" | 57 | #include "uwb-internal.h" |
58 | 58 | ||
59 | 59 | ||
@@ -118,7 +118,6 @@ static int __init uwb_subsys_init(void) | |||
118 | result = class_register(&uwb_rc_class); | 118 | result = class_register(&uwb_rc_class); |
119 | if (result < 0) | 119 | if (result < 0) |
120 | goto error_uwb_rc_class_register; | 120 | goto error_uwb_rc_class_register; |
121 | uwbd_start(); | ||
122 | uwb_dbg_init(); | 121 | uwb_dbg_init(); |
123 | return 0; | 122 | return 0; |
124 | 123 | ||
@@ -132,7 +131,6 @@ module_init(uwb_subsys_init); | |||
132 | static void __exit uwb_subsys_exit(void) | 131 | static void __exit uwb_subsys_exit(void) |
133 | { | 132 | { |
134 | uwb_dbg_exit(); | 133 | uwb_dbg_exit(); |
135 | uwbd_stop(); | ||
136 | class_unregister(&uwb_rc_class); | 134 | class_unregister(&uwb_rc_class); |
137 | uwb_est_destroy(); | 135 | uwb_est_destroy(); |
138 | return; | 136 | return; |
diff --git a/drivers/uwb/drp-avail.c b/drivers/uwb/drp-avail.c index 3febd8552808..40a540a5a72e 100644 --- a/drivers/uwb/drp-avail.c +++ b/drivers/uwb/drp-avail.c | |||
@@ -58,7 +58,7 @@ void uwb_drp_avail_init(struct uwb_rc *rc) | |||
58 | * | 58 | * |
59 | * avail = global & local & pending | 59 | * avail = global & local & pending |
60 | */ | 60 | */ |
61 | static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) | 61 | void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) |
62 | { | 62 | { |
63 | bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); | 63 | bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); |
64 | bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); | 64 | bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); |
@@ -105,6 +105,7 @@ void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas) | |||
105 | bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); | 105 | bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); |
106 | bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); | 106 | bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); |
107 | rc->drp_avail.ie_valid = false; | 107 | rc->drp_avail.ie_valid = false; |
108 | uwb_rsv_handle_drp_avail_change(rc); | ||
108 | } | 109 | } |
109 | 110 | ||
110 | /** | 111 | /** |
@@ -280,6 +281,7 @@ int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt) | |||
280 | mutex_lock(&rc->rsvs_mutex); | 281 | mutex_lock(&rc->rsvs_mutex); |
281 | bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); | 282 | bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); |
282 | rc->drp_avail.ie_valid = false; | 283 | rc->drp_avail.ie_valid = false; |
284 | uwb_rsv_handle_drp_avail_change(rc); | ||
283 | mutex_unlock(&rc->rsvs_mutex); | 285 | mutex_unlock(&rc->rsvs_mutex); |
284 | 286 | ||
285 | uwb_rsv_sched_update(rc); | 287 | uwb_rsv_sched_update(rc); |
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c index 882724c5f126..2840d7bf9e67 100644 --- a/drivers/uwb/drp-ie.c +++ b/drivers/uwb/drp-ie.c | |||
@@ -16,13 +16,102 @@ | |||
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ | 18 | */ |
19 | #include <linux/version.h> | ||
20 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
21 | #include <linux/random.h> | 20 | #include <linux/random.h> |
22 | #include <linux/uwb.h> | 21 | #include <linux/uwb.h> |
23 | 22 | ||
24 | #include "uwb-internal.h" | 23 | #include "uwb-internal.h" |
25 | 24 | ||
25 | |||
26 | /* | ||
27 | * Return the reason code for a reservations's DRP IE. | ||
28 | */ | ||
29 | int uwb_rsv_reason_code(struct uwb_rsv *rsv) | ||
30 | { | ||
31 | static const int reason_codes[] = { | ||
32 | [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED, | ||
33 | [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED, | ||
34 | [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED, | ||
35 | [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED, | ||
36 | [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED, | ||
37 | [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED, | ||
38 | [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED, | ||
39 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, | ||
40 | [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, | ||
41 | [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT, | ||
42 | [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING, | ||
43 | [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED, | ||
44 | [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED, | ||
45 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, | ||
46 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, | ||
47 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, | ||
48 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, | ||
49 | }; | ||
50 | |||
51 | return reason_codes[rsv->state]; | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Return the reason code for a reservations's companion DRP IE . | ||
56 | */ | ||
57 | int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv) | ||
58 | { | ||
59 | static const int companion_reason_codes[] = { | ||
60 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, | ||
61 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, | ||
62 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, | ||
63 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, | ||
64 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, | ||
65 | }; | ||
66 | |||
67 | return companion_reason_codes[rsv->state]; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Return the status bit for a reservations's DRP IE. | ||
72 | */ | ||
73 | int uwb_rsv_status(struct uwb_rsv *rsv) | ||
74 | { | ||
75 | static const int statuses[] = { | ||
76 | [UWB_RSV_STATE_O_INITIATED] = 0, | ||
77 | [UWB_RSV_STATE_O_PENDING] = 0, | ||
78 | [UWB_RSV_STATE_O_MODIFIED] = 1, | ||
79 | [UWB_RSV_STATE_O_ESTABLISHED] = 1, | ||
80 | [UWB_RSV_STATE_O_TO_BE_MOVED] = 0, | ||
81 | [UWB_RSV_STATE_O_MOVE_COMBINING] = 1, | ||
82 | [UWB_RSV_STATE_O_MOVE_REDUCING] = 1, | ||
83 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1, | ||
84 | [UWB_RSV_STATE_T_ACCEPTED] = 1, | ||
85 | [UWB_RSV_STATE_T_CONFLICT] = 0, | ||
86 | [UWB_RSV_STATE_T_PENDING] = 0, | ||
87 | [UWB_RSV_STATE_T_DENIED] = 0, | ||
88 | [UWB_RSV_STATE_T_RESIZED] = 1, | ||
89 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, | ||
90 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1, | ||
91 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1, | ||
92 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1, | ||
93 | |||
94 | }; | ||
95 | |||
96 | return statuses[rsv->state]; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Return the status bit for a reservations's companion DRP IE . | ||
101 | */ | ||
102 | int uwb_rsv_companion_status(struct uwb_rsv *rsv) | ||
103 | { | ||
104 | static const int companion_statuses[] = { | ||
105 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0, | ||
106 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, | ||
107 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0, | ||
108 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0, | ||
109 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0, | ||
110 | }; | ||
111 | |||
112 | return companion_statuses[rsv->state]; | ||
113 | } | ||
114 | |||
26 | /* | 115 | /* |
27 | * Allocate a DRP IE. | 116 | * Allocate a DRP IE. |
28 | * | 117 | * |
@@ -34,16 +123,12 @@ | |||
34 | static struct uwb_ie_drp *uwb_drp_ie_alloc(void) | 123 | static struct uwb_ie_drp *uwb_drp_ie_alloc(void) |
35 | { | 124 | { |
36 | struct uwb_ie_drp *drp_ie; | 125 | struct uwb_ie_drp *drp_ie; |
37 | unsigned tiebreaker; | ||
38 | 126 | ||
39 | drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + | 127 | drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + |
40 | UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), | 128 | UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), |
41 | GFP_KERNEL); | 129 | GFP_KERNEL); |
42 | if (drp_ie) { | 130 | if (drp_ie) { |
43 | drp_ie->hdr.element_id = UWB_IE_DRP; | 131 | drp_ie->hdr.element_id = UWB_IE_DRP; |
44 | |||
45 | get_random_bytes(&tiebreaker, sizeof(unsigned)); | ||
46 | uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1); | ||
47 | } | 132 | } |
48 | return drp_ie; | 133 | return drp_ie; |
49 | } | 134 | } |
@@ -104,43 +189,17 @@ static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie, | |||
104 | */ | 189 | */ |
105 | int uwb_drp_ie_update(struct uwb_rsv *rsv) | 190 | int uwb_drp_ie_update(struct uwb_rsv *rsv) |
106 | { | 191 | { |
107 | struct device *dev = &rsv->rc->uwb_dev.dev; | ||
108 | struct uwb_ie_drp *drp_ie; | 192 | struct uwb_ie_drp *drp_ie; |
109 | int reason_code, status; | 193 | struct uwb_rsv_move *mv; |
194 | int unsafe; | ||
110 | 195 | ||
111 | switch (rsv->state) { | 196 | if (rsv->state == UWB_RSV_STATE_NONE) { |
112 | case UWB_RSV_STATE_NONE: | ||
113 | kfree(rsv->drp_ie); | 197 | kfree(rsv->drp_ie); |
114 | rsv->drp_ie = NULL; | 198 | rsv->drp_ie = NULL; |
115 | return 0; | 199 | return 0; |
116 | case UWB_RSV_STATE_O_INITIATED: | ||
117 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
118 | status = 0; | ||
119 | break; | ||
120 | case UWB_RSV_STATE_O_PENDING: | ||
121 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
122 | status = 0; | ||
123 | break; | ||
124 | case UWB_RSV_STATE_O_MODIFIED: | ||
125 | reason_code = UWB_DRP_REASON_MODIFIED; | ||
126 | status = 1; | ||
127 | break; | ||
128 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
129 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
130 | status = 1; | ||
131 | break; | ||
132 | case UWB_RSV_STATE_T_ACCEPTED: | ||
133 | reason_code = UWB_DRP_REASON_ACCEPTED; | ||
134 | status = 1; | ||
135 | break; | ||
136 | case UWB_RSV_STATE_T_DENIED: | ||
137 | reason_code = UWB_DRP_REASON_DENIED; | ||
138 | status = 0; | ||
139 | break; | ||
140 | default: | ||
141 | dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state); | ||
142 | return -EINVAL; | ||
143 | } | 200 | } |
201 | |||
202 | unsafe = rsv->mas.unsafe ? 1 : 0; | ||
144 | 203 | ||
145 | if (rsv->drp_ie == NULL) { | 204 | if (rsv->drp_ie == NULL) { |
146 | rsv->drp_ie = uwb_drp_ie_alloc(); | 205 | rsv->drp_ie = uwb_drp_ie_alloc(); |
@@ -149,9 +208,11 @@ int uwb_drp_ie_update(struct uwb_rsv *rsv) | |||
149 | } | 208 | } |
150 | drp_ie = rsv->drp_ie; | 209 | drp_ie = rsv->drp_ie; |
151 | 210 | ||
211 | uwb_ie_drp_set_unsafe(drp_ie, unsafe); | ||
212 | uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker); | ||
152 | uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); | 213 | uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); |
153 | uwb_ie_drp_set_status(drp_ie, status); | 214 | uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv)); |
154 | uwb_ie_drp_set_reason_code(drp_ie, reason_code); | 215 | uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv)); |
155 | uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); | 216 | uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); |
156 | uwb_ie_drp_set_type(drp_ie, rsv->type); | 217 | uwb_ie_drp_set_type(drp_ie, rsv->type); |
157 | 218 | ||
@@ -169,6 +230,27 @@ int uwb_drp_ie_update(struct uwb_rsv *rsv) | |||
169 | 230 | ||
170 | uwb_drp_ie_from_bm(drp_ie, &rsv->mas); | 231 | uwb_drp_ie_from_bm(drp_ie, &rsv->mas); |
171 | 232 | ||
233 | if (uwb_rsv_has_two_drp_ies(rsv)) { | ||
234 | mv = &rsv->mv; | ||
235 | if (mv->companion_drp_ie == NULL) { | ||
236 | mv->companion_drp_ie = uwb_drp_ie_alloc(); | ||
237 | if (mv->companion_drp_ie == NULL) | ||
238 | return -ENOMEM; | ||
239 | } | ||
240 | drp_ie = mv->companion_drp_ie; | ||
241 | |||
242 | /* keep all the same configuration of the main drp_ie */ | ||
243 | memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp)); | ||
244 | |||
245 | |||
246 | /* FIXME: handle properly the unsafe bit */ | ||
247 | uwb_ie_drp_set_unsafe(drp_ie, 1); | ||
248 | uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv)); | ||
249 | uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv)); | ||
250 | |||
251 | uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas); | ||
252 | } | ||
253 | |||
172 | rsv->ie_valid = true; | 254 | rsv->ie_valid = true; |
173 | return 0; | 255 | return 0; |
174 | } | 256 | } |
@@ -219,6 +301,8 @@ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) | |||
219 | u8 zone; | 301 | u8 zone; |
220 | u16 zone_mask; | 302 | u16 zone_mask; |
221 | 303 | ||
304 | bitmap_zero(bm->bm, UWB_NUM_MAS); | ||
305 | |||
222 | for (cnt = 0; cnt < numallocs; cnt++) { | 306 | for (cnt = 0; cnt < numallocs; cnt++) { |
223 | alloc = &drp_ie->allocs[cnt]; | 307 | alloc = &drp_ie->allocs[cnt]; |
224 | zone_bm = le16_to_cpu(alloc->zone_bm); | 308 | zone_bm = le16_to_cpu(alloc->zone_bm); |
@@ -230,3 +314,4 @@ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie) | |||
230 | } | 314 | } |
231 | } | 315 | } |
232 | } | 316 | } |
317 | |||
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c index c0b1e5e2bd08..2b4f9406789d 100644 --- a/drivers/uwb/drp.c +++ b/drivers/uwb/drp.c | |||
@@ -23,6 +23,59 @@ | |||
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include "uwb-internal.h" | 24 | #include "uwb-internal.h" |
25 | 25 | ||
26 | |||
27 | /* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */ | ||
28 | enum uwb_drp_conflict_action { | ||
29 | /* Reservation is mantained, no action needed */ | ||
30 | UWB_DRP_CONFLICT_MANTAIN = 0, | ||
31 | |||
32 | /* the device shall not transmit frames in conflicting MASs in | ||
33 | * the following superframe. If the device is the reservation | ||
34 | * target, it shall also set the Reason Code in its DRP IE to | ||
35 | * Conflict in its beacon in the following superframe. | ||
36 | */ | ||
37 | UWB_DRP_CONFLICT_ACT1, | ||
38 | |||
39 | /* the device shall not set the Reservation Status bit to ONE | ||
40 | * and shall not transmit frames in conflicting MASs. If the | ||
41 | * device is the reservation target, it shall also set the | ||
42 | * Reason Code in its DRP IE to Conflict. | ||
43 | */ | ||
44 | UWB_DRP_CONFLICT_ACT2, | ||
45 | |||
46 | /* the device shall not transmit frames in conflicting MASs in | ||
47 | * the following superframe. It shall remove the conflicting | ||
48 | * MASs from the reservation or set the Reservation Status to | ||
49 | * ZERO in its beacon in the following superframe. If the | ||
50 | * device is the reservation target, it shall also set the | ||
51 | * Reason Code in its DRP IE to Conflict. | ||
52 | */ | ||
53 | UWB_DRP_CONFLICT_ACT3, | ||
54 | }; | ||
55 | |||
56 | |||
57 | static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg, | ||
58 | struct uwb_rceb *reply, ssize_t reply_size) | ||
59 | { | ||
60 | struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply; | ||
61 | |||
62 | if (r != NULL) { | ||
63 | if (r->bResultCode != UWB_RC_RES_SUCCESS) | ||
64 | dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n", | ||
65 | uwb_rc_strerror(r->bResultCode), r->bResultCode); | ||
66 | } else | ||
67 | dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n"); | ||
68 | |||
69 | spin_lock(&rc->rsvs_lock); | ||
70 | if (rc->set_drp_ie_pending > 1) { | ||
71 | rc->set_drp_ie_pending = 0; | ||
72 | uwb_rsv_queue_update(rc); | ||
73 | } else { | ||
74 | rc->set_drp_ie_pending = 0; | ||
75 | } | ||
76 | spin_unlock(&rc->rsvs_lock); | ||
77 | } | ||
78 | |||
26 | /** | 79 | /** |
27 | * Construct and send the SET DRP IE | 80 | * Construct and send the SET DRP IE |
28 | * | 81 | * |
@@ -37,28 +90,32 @@ | |||
37 | * | 90 | * |
38 | * A DRP Availability IE is appended. | 91 | * A DRP Availability IE is appended. |
39 | * | 92 | * |
40 | * rc->uwb_dev.mutex is held | 93 | * rc->rsvs_mutex is held |
41 | * | 94 | * |
42 | * FIXME We currently ignore the returned value indicating the remaining space | 95 | * FIXME We currently ignore the returned value indicating the remaining space |
43 | * in beacon. This could be used to deny reservation requests earlier if | 96 | * in beacon. This could be used to deny reservation requests earlier if |
44 | * determined that they would cause the beacon space to be exceeded. | 97 | * determined that they would cause the beacon space to be exceeded. |
45 | */ | 98 | */ |
46 | static | 99 | int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) |
47 | int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc) | ||
48 | { | 100 | { |
49 | int result; | 101 | int result; |
50 | struct device *dev = &rc->uwb_dev.dev; | ||
51 | struct uwb_rc_cmd_set_drp_ie *cmd; | 102 | struct uwb_rc_cmd_set_drp_ie *cmd; |
52 | struct uwb_rc_evt_set_drp_ie reply; | ||
53 | struct uwb_rsv *rsv; | 103 | struct uwb_rsv *rsv; |
104 | struct uwb_rsv_move *mv; | ||
54 | int num_bytes = 0; | 105 | int num_bytes = 0; |
55 | u8 *IEDataptr; | 106 | u8 *IEDataptr; |
56 | 107 | ||
57 | result = -ENOMEM; | 108 | result = -ENOMEM; |
58 | /* First traverse all reservations to determine memory needed. */ | 109 | /* First traverse all reservations to determine memory needed. */ |
59 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | 110 | list_for_each_entry(rsv, &rc->reservations, rc_node) { |
60 | if (rsv->drp_ie != NULL) | 111 | if (rsv->drp_ie != NULL) { |
61 | num_bytes += rsv->drp_ie->hdr.length + 2; | 112 | num_bytes += rsv->drp_ie->hdr.length + 2; |
113 | if (uwb_rsv_has_two_drp_ies(rsv) && | ||
114 | (rsv->mv.companion_drp_ie != NULL)) { | ||
115 | mv = &rsv->mv; | ||
116 | num_bytes += mv->companion_drp_ie->hdr.length + 2; | ||
117 | } | ||
118 | } | ||
62 | } | 119 | } |
63 | num_bytes += sizeof(rc->drp_avail.ie); | 120 | num_bytes += sizeof(rc->drp_avail.ie); |
64 | cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); | 121 | cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); |
@@ -69,128 +126,322 @@ int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc) | |||
69 | cmd->wIELength = num_bytes; | 126 | cmd->wIELength = num_bytes; |
70 | IEDataptr = (u8 *)&cmd->IEData[0]; | 127 | IEDataptr = (u8 *)&cmd->IEData[0]; |
71 | 128 | ||
129 | /* FIXME: DRV avail IE is not always needed */ | ||
130 | /* put DRP avail IE first */ | ||
131 | memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); | ||
132 | IEDataptr += sizeof(struct uwb_ie_drp_avail); | ||
133 | |||
72 | /* Next traverse all reservations to place IEs in allocated memory. */ | 134 | /* Next traverse all reservations to place IEs in allocated memory. */ |
73 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | 135 | list_for_each_entry(rsv, &rc->reservations, rc_node) { |
74 | if (rsv->drp_ie != NULL) { | 136 | if (rsv->drp_ie != NULL) { |
75 | memcpy(IEDataptr, rsv->drp_ie, | 137 | memcpy(IEDataptr, rsv->drp_ie, |
76 | rsv->drp_ie->hdr.length + 2); | 138 | rsv->drp_ie->hdr.length + 2); |
77 | IEDataptr += rsv->drp_ie->hdr.length + 2; | 139 | IEDataptr += rsv->drp_ie->hdr.length + 2; |
140 | |||
141 | if (uwb_rsv_has_two_drp_ies(rsv) && | ||
142 | (rsv->mv.companion_drp_ie != NULL)) { | ||
143 | mv = &rsv->mv; | ||
144 | memcpy(IEDataptr, mv->companion_drp_ie, | ||
145 | mv->companion_drp_ie->hdr.length + 2); | ||
146 | IEDataptr += mv->companion_drp_ie->hdr.length + 2; | ||
147 | } | ||
78 | } | 148 | } |
79 | } | 149 | } |
80 | memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); | ||
81 | 150 | ||
82 | reply.rceb.bEventType = UWB_RC_CET_GENERAL; | 151 | result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes, |
83 | reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE; | 152 | UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE, |
84 | result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb, | 153 | uwb_rc_set_drp_cmd_done, NULL); |
85 | sizeof(*cmd) + num_bytes, &reply.rceb, | 154 | |
86 | sizeof(reply)); | 155 | rc->set_drp_ie_pending = 1; |
87 | if (result < 0) | 156 | |
88 | goto error_cmd; | ||
89 | result = le16_to_cpu(reply.wRemainingSpace); | ||
90 | if (reply.bResultCode != UWB_RC_RES_SUCCESS) { | ||
91 | dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution " | ||
92 | "failed: %s (%d). RemainingSpace in beacon " | ||
93 | "= %d\n", uwb_rc_strerror(reply.bResultCode), | ||
94 | reply.bResultCode, result); | ||
95 | result = -EIO; | ||
96 | } else { | ||
97 | dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon " | ||
98 | "= %d.\n", result); | ||
99 | result = 0; | ||
100 | } | ||
101 | error_cmd: | ||
102 | kfree(cmd); | 157 | kfree(cmd); |
103 | error: | 158 | error: |
104 | return result; | 159 | return result; |
105 | |||
106 | } | 160 | } |
107 | /** | 161 | |
108 | * Send all DRP IEs associated with this host | 162 | /* |
109 | * | 163 | * Evaluate the action to perform using conflict resolution rules |
110 | * @returns: >= 0 number of bytes still available in the beacon | ||
111 | * < 0 errno code on error. | ||
112 | * | 164 | * |
113 | * As per the protocol we obtain the host controller device lock to access | 165 | * Return a uwb_drp_conflict_action. |
114 | * bandwidth structures. | ||
115 | */ | 166 | */ |
116 | int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) | 167 | static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot, |
168 | struct uwb_rsv *rsv, int our_status) | ||
117 | { | 169 | { |
118 | int result; | 170 | int our_tie_breaker = rsv->tiebreaker; |
171 | int our_type = rsv->type; | ||
172 | int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot; | ||
173 | |||
174 | int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie); | ||
175 | int ext_status = uwb_ie_drp_status(ext_drp_ie); | ||
176 | int ext_type = uwb_ie_drp_type(ext_drp_ie); | ||
177 | |||
178 | |||
179 | /* [ECMA-368 2nd Edition] 17.4.6 */ | ||
180 | if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) { | ||
181 | return UWB_DRP_CONFLICT_MANTAIN; | ||
182 | } | ||
119 | 183 | ||
120 | mutex_lock(&rc->uwb_dev.mutex); | 184 | /* [ECMA-368 2nd Edition] 17.4.6-1 */ |
121 | result = uwb_rc_gen_send_drp_ie(rc); | 185 | if (our_type == UWB_DRP_TYPE_ALIEN_BP) { |
122 | mutex_unlock(&rc->uwb_dev.mutex); | 186 | return UWB_DRP_CONFLICT_MANTAIN; |
123 | return result; | 187 | } |
188 | |||
189 | /* [ECMA-368 2nd Edition] 17.4.6-2 */ | ||
190 | if (ext_type == UWB_DRP_TYPE_ALIEN_BP) { | ||
191 | /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */ | ||
192 | return UWB_DRP_CONFLICT_ACT1; | ||
193 | } | ||
194 | |||
195 | /* [ECMA-368 2nd Edition] 17.4.6-3 */ | ||
196 | if (our_status == 0 && ext_status == 1) { | ||
197 | return UWB_DRP_CONFLICT_ACT2; | ||
198 | } | ||
199 | |||
200 | /* [ECMA-368 2nd Edition] 17.4.6-4 */ | ||
201 | if (our_status == 1 && ext_status == 0) { | ||
202 | return UWB_DRP_CONFLICT_MANTAIN; | ||
203 | } | ||
204 | |||
205 | /* [ECMA-368 2nd Edition] 17.4.6-5a */ | ||
206 | if (our_tie_breaker == ext_tie_breaker && | ||
207 | our_beacon_slot < ext_beacon_slot) { | ||
208 | return UWB_DRP_CONFLICT_MANTAIN; | ||
209 | } | ||
210 | |||
211 | /* [ECMA-368 2nd Edition] 17.4.6-5b */ | ||
212 | if (our_tie_breaker != ext_tie_breaker && | ||
213 | our_beacon_slot > ext_beacon_slot) { | ||
214 | return UWB_DRP_CONFLICT_MANTAIN; | ||
215 | } | ||
216 | |||
217 | if (our_status == 0) { | ||
218 | if (our_tie_breaker == ext_tie_breaker) { | ||
219 | /* [ECMA-368 2nd Edition] 17.4.6-6a */ | ||
220 | if (our_beacon_slot > ext_beacon_slot) { | ||
221 | return UWB_DRP_CONFLICT_ACT2; | ||
222 | } | ||
223 | } else { | ||
224 | /* [ECMA-368 2nd Edition] 17.4.6-6b */ | ||
225 | if (our_beacon_slot < ext_beacon_slot) { | ||
226 | return UWB_DRP_CONFLICT_ACT2; | ||
227 | } | ||
228 | } | ||
229 | } else { | ||
230 | if (our_tie_breaker == ext_tie_breaker) { | ||
231 | /* [ECMA-368 2nd Edition] 17.4.6-7a */ | ||
232 | if (our_beacon_slot > ext_beacon_slot) { | ||
233 | return UWB_DRP_CONFLICT_ACT3; | ||
234 | } | ||
235 | } else { | ||
236 | /* [ECMA-368 2nd Edition] 17.4.6-7b */ | ||
237 | if (our_beacon_slot < ext_beacon_slot) { | ||
238 | return UWB_DRP_CONFLICT_ACT3; | ||
239 | } | ||
240 | } | ||
241 | } | ||
242 | return UWB_DRP_CONFLICT_MANTAIN; | ||
124 | } | 243 | } |
125 | 244 | ||
126 | void uwb_drp_handle_timeout(struct uwb_rsv *rsv) | 245 | static void handle_conflict_normal(struct uwb_ie_drp *drp_ie, |
246 | int ext_beacon_slot, | ||
247 | struct uwb_rsv *rsv, | ||
248 | struct uwb_mas_bm *conflicting_mas) | ||
127 | { | 249 | { |
128 | struct device *dev = &rsv->rc->uwb_dev.dev; | 250 | struct uwb_rc *rc = rsv->rc; |
251 | struct uwb_rsv_move *mv = &rsv->mv; | ||
252 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
253 | int action; | ||
254 | |||
255 | action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv)); | ||
256 | |||
257 | if (uwb_rsv_is_owner(rsv)) { | ||
258 | switch(action) { | ||
259 | case UWB_DRP_CONFLICT_ACT2: | ||
260 | /* try move */ | ||
261 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED); | ||
262 | if (bow->can_reserve_extra_mases == false) | ||
263 | uwb_rsv_backoff_win_increment(rc); | ||
264 | |||
265 | break; | ||
266 | case UWB_DRP_CONFLICT_ACT3: | ||
267 | uwb_rsv_backoff_win_increment(rc); | ||
268 | /* drop some mases with reason modified */ | ||
269 | /* put in the companion the mases to be dropped */ | ||
270 | bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); | ||
271 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
272 | default: | ||
273 | break; | ||
274 | } | ||
275 | } else { | ||
276 | switch(action) { | ||
277 | case UWB_DRP_CONFLICT_ACT2: | ||
278 | case UWB_DRP_CONFLICT_ACT3: | ||
279 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); | ||
280 | default: | ||
281 | break; | ||
282 | } | ||
129 | 283 | ||
130 | dev_dbg(dev, "reservation timeout in state %s (%d)\n", | 284 | } |
131 | uwb_rsv_state_str(rsv->state), rsv->state); | 285 | |
286 | } | ||
132 | 287 | ||
133 | switch (rsv->state) { | 288 | static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot, |
134 | case UWB_RSV_STATE_O_INITIATED: | 289 | struct uwb_rsv *rsv, bool companion_only, |
135 | if (rsv->is_multicast) { | 290 | struct uwb_mas_bm *conflicting_mas) |
136 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | 291 | { |
137 | return; | 292 | struct uwb_rc *rc = rsv->rc; |
293 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
294 | struct uwb_rsv_move *mv = &rsv->mv; | ||
295 | int action; | ||
296 | |||
297 | if (companion_only) { | ||
298 | /* status of companion is 0 at this point */ | ||
299 | action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0); | ||
300 | if (uwb_rsv_is_owner(rsv)) { | ||
301 | switch(action) { | ||
302 | case UWB_DRP_CONFLICT_ACT2: | ||
303 | case UWB_DRP_CONFLICT_ACT3: | ||
304 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
305 | rsv->needs_release_companion_mas = false; | ||
306 | if (bow->can_reserve_extra_mases == false) | ||
307 | uwb_rsv_backoff_win_increment(rc); | ||
308 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
309 | } | ||
310 | } else { /* rsv is target */ | ||
311 | switch(action) { | ||
312 | case UWB_DRP_CONFLICT_ACT2: | ||
313 | case UWB_DRP_CONFLICT_ACT3: | ||
314 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT); | ||
315 | /* send_drp_avail_ie = true; */ | ||
316 | } | ||
138 | } | 317 | } |
139 | break; | 318 | } else { /* also base part of the reservation is conflicting */ |
140 | case UWB_RSV_STATE_O_ESTABLISHED: | 319 | if (uwb_rsv_is_owner(rsv)) { |
141 | if (rsv->is_multicast) | 320 | uwb_rsv_backoff_win_increment(rc); |
142 | return; | 321 | /* remove companion part */ |
143 | break; | 322 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); |
144 | default: | 323 | |
145 | break; | 324 | /* drop some mases with reason modified */ |
325 | |||
326 | /* put in the companion the mases to be dropped */ | ||
327 | bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); | ||
328 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
329 | } else { /* it is a target rsv */ | ||
330 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); | ||
331 | /* send_drp_avail_ie = true; */ | ||
332 | } | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv, | ||
337 | struct uwb_rc_evt_drp *drp_evt, | ||
338 | struct uwb_ie_drp *drp_ie, | ||
339 | struct uwb_mas_bm *conflicting_mas) | ||
340 | { | ||
341 | struct uwb_rsv_move *mv; | ||
342 | |||
343 | /* check if the conflicting reservation has two drp_ies */ | ||
344 | if (uwb_rsv_has_two_drp_ies(rsv)) { | ||
345 | mv = &rsv->mv; | ||
346 | if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { | ||
347 | handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, | ||
348 | rsv, false, conflicting_mas); | ||
349 | } else { | ||
350 | if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { | ||
351 | handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, | ||
352 | rsv, true, conflicting_mas); | ||
353 | } | ||
354 | } | ||
355 | } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { | ||
356 | handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas); | ||
146 | } | 357 | } |
147 | uwb_rsv_remove(rsv); | ||
148 | } | 358 | } |
149 | 359 | ||
360 | static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc, | ||
361 | struct uwb_rc_evt_drp *drp_evt, | ||
362 | struct uwb_ie_drp *drp_ie, | ||
363 | struct uwb_mas_bm *conflicting_mas) | ||
364 | { | ||
365 | struct uwb_rsv *rsv; | ||
366 | |||
367 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
368 | uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas); | ||
369 | } | ||
370 | } | ||
371 | |||
150 | /* | 372 | /* |
151 | * Based on the DRP IE, transition a target reservation to a new | 373 | * Based on the DRP IE, transition a target reservation to a new |
152 | * state. | 374 | * state. |
153 | */ | 375 | */ |
154 | static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, | 376 | static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, |
155 | struct uwb_ie_drp *drp_ie) | 377 | struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt) |
156 | { | 378 | { |
157 | struct device *dev = &rc->uwb_dev.dev; | 379 | struct device *dev = &rc->uwb_dev.dev; |
380 | struct uwb_rsv_move *mv = &rsv->mv; | ||
158 | int status; | 381 | int status; |
159 | enum uwb_drp_reason reason_code; | 382 | enum uwb_drp_reason reason_code; |
160 | 383 | struct uwb_mas_bm mas; | |
384 | |||
161 | status = uwb_ie_drp_status(drp_ie); | 385 | status = uwb_ie_drp_status(drp_ie); |
162 | reason_code = uwb_ie_drp_reason_code(drp_ie); | 386 | reason_code = uwb_ie_drp_reason_code(drp_ie); |
387 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
163 | 388 | ||
164 | if (status) { | 389 | switch (reason_code) { |
165 | switch (reason_code) { | 390 | case UWB_DRP_REASON_ACCEPTED: |
166 | case UWB_DRP_REASON_ACCEPTED: | 391 | |
167 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | 392 | if (rsv->state == UWB_RSV_STATE_T_CONFLICT) { |
168 | break; | 393 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); |
169 | case UWB_DRP_REASON_MODIFIED: | ||
170 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | ||
171 | reason_code, status); | ||
172 | break; | 394 | break; |
173 | default: | ||
174 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
175 | reason_code, status); | ||
176 | } | 395 | } |
177 | } else { | 396 | |
178 | switch (reason_code) { | 397 | if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) { |
179 | case UWB_DRP_REASON_ACCEPTED: | 398 | /* drp_ie is companion */ |
180 | /* New reservations are handled in uwb_rsv_find(). */ | 399 | if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) |
181 | break; | 400 | /* stroke companion */ |
182 | case UWB_DRP_REASON_DENIED: | 401 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); |
183 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 402 | } else { |
184 | break; | 403 | if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { |
185 | case UWB_DRP_REASON_CONFLICT: | 404 | if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) { |
186 | case UWB_DRP_REASON_MODIFIED: | 405 | /* FIXME: there is a conflict, find |
187 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | 406 | * the conflicting reservations and |
188 | reason_code, status); | 407 | * take a sensible action. Consider |
408 | * that in drp_ie there is the | ||
409 | * "neighbour" */ | ||
410 | uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); | ||
411 | } else { | ||
412 | /* accept the extra reservation */ | ||
413 | bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS); | ||
414 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); | ||
415 | } | ||
416 | } else { | ||
417 | if (status) { | ||
418 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
419 | } | ||
420 | } | ||
421 | |||
422 | } | ||
423 | break; | ||
424 | |||
425 | case UWB_DRP_REASON_MODIFIED: | ||
426 | /* check to see if we have already modified the reservation */ | ||
427 | if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { | ||
428 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
189 | break; | 429 | break; |
190 | default: | ||
191 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
192 | reason_code, status); | ||
193 | } | 430 | } |
431 | |||
432 | /* find if the owner wants to expand or reduce */ | ||
433 | if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { | ||
434 | /* owner is reducing */ | ||
435 | bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS); | ||
436 | uwb_drp_avail_release(rsv->rc, &mv->companion_mas); | ||
437 | } | ||
438 | |||
439 | bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); | ||
440 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED); | ||
441 | break; | ||
442 | default: | ||
443 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | ||
444 | reason_code, status); | ||
194 | } | 445 | } |
195 | } | 446 | } |
196 | 447 | ||
@@ -199,23 +450,60 @@ static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, | |||
199 | * state. | 450 | * state. |
200 | */ | 451 | */ |
201 | static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, | 452 | static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, |
202 | struct uwb_ie_drp *drp_ie) | 453 | struct uwb_dev *src, struct uwb_ie_drp *drp_ie, |
454 | struct uwb_rc_evt_drp *drp_evt) | ||
203 | { | 455 | { |
204 | struct device *dev = &rc->uwb_dev.dev; | 456 | struct device *dev = &rc->uwb_dev.dev; |
457 | struct uwb_rsv_move *mv = &rsv->mv; | ||
205 | int status; | 458 | int status; |
206 | enum uwb_drp_reason reason_code; | 459 | enum uwb_drp_reason reason_code; |
460 | struct uwb_mas_bm mas; | ||
207 | 461 | ||
208 | status = uwb_ie_drp_status(drp_ie); | 462 | status = uwb_ie_drp_status(drp_ie); |
209 | reason_code = uwb_ie_drp_reason_code(drp_ie); | 463 | reason_code = uwb_ie_drp_reason_code(drp_ie); |
464 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
210 | 465 | ||
211 | if (status) { | 466 | if (status) { |
212 | switch (reason_code) { | 467 | switch (reason_code) { |
213 | case UWB_DRP_REASON_ACCEPTED: | 468 | case UWB_DRP_REASON_ACCEPTED: |
214 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | 469 | switch (rsv->state) { |
215 | break; | 470 | case UWB_RSV_STATE_O_PENDING: |
216 | case UWB_DRP_REASON_MODIFIED: | 471 | case UWB_RSV_STATE_O_INITIATED: |
217 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | 472 | case UWB_RSV_STATE_O_ESTABLISHED: |
218 | reason_code, status); | 473 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); |
474 | break; | ||
475 | case UWB_RSV_STATE_O_MODIFIED: | ||
476 | if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { | ||
477 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
478 | } else { | ||
479 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
480 | } | ||
481 | break; | ||
482 | |||
483 | case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */ | ||
484 | if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { | ||
485 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
486 | } else { | ||
487 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
488 | } | ||
489 | break; | ||
490 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
491 | if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) { | ||
492 | /* Companion reservation accepted */ | ||
493 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
494 | } else { | ||
495 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
496 | } | ||
497 | break; | ||
498 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
499 | if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) | ||
500 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
501 | else | ||
502 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
503 | break; | ||
504 | default: | ||
505 | break; | ||
506 | } | ||
219 | break; | 507 | break; |
220 | default: | 508 | default: |
221 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | 509 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", |
@@ -230,9 +518,10 @@ static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, | |||
230 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 518 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); |
231 | break; | 519 | break; |
232 | case UWB_DRP_REASON_CONFLICT: | 520 | case UWB_DRP_REASON_CONFLICT: |
233 | case UWB_DRP_REASON_MODIFIED: | 521 | /* resolve the conflict */ |
234 | dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", | 522 | bitmap_complement(mas.bm, src->last_availability_bm, |
235 | reason_code, status); | 523 | UWB_NUM_MAS); |
524 | uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas); | ||
236 | break; | 525 | break; |
237 | default: | 526 | default: |
238 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", | 527 | dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", |
@@ -241,12 +530,110 @@ static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, | |||
241 | } | 530 | } |
242 | } | 531 | } |
243 | 532 | ||
533 | static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt) | ||
534 | { | ||
535 | unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US; | ||
536 | mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us)); | ||
537 | } | ||
538 | |||
539 | static void uwb_cnflt_update_work(struct work_struct *work) | ||
540 | { | ||
541 | struct uwb_cnflt_alien *cnflt = container_of(work, | ||
542 | struct uwb_cnflt_alien, | ||
543 | cnflt_update_work); | ||
544 | struct uwb_cnflt_alien *c; | ||
545 | struct uwb_rc *rc = cnflt->rc; | ||
546 | |||
547 | unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; | ||
548 | |||
549 | mutex_lock(&rc->rsvs_mutex); | ||
550 | |||
551 | list_del(&cnflt->rc_node); | ||
552 | |||
553 | /* update rc global conflicting alien bitmap */ | ||
554 | bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); | ||
555 | |||
556 | list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) { | ||
557 | bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS); | ||
558 | } | ||
559 | |||
560 | queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); | ||
561 | |||
562 | kfree(cnflt); | ||
563 | mutex_unlock(&rc->rsvs_mutex); | ||
564 | } | ||
565 | |||
566 | static void uwb_cnflt_timer(unsigned long arg) | ||
567 | { | ||
568 | struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg; | ||
569 | |||
570 | queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work); | ||
571 | } | ||
572 | |||
244 | /* | 573 | /* |
245 | * Process a received DRP IE, it's either for a reservation owned by | 574 | * We have received an DRP_IE of type Alien BP and we need to make |
246 | * the RC or targeted at it (or it's for a WUSB cluster reservation). | 575 | * sure we do not transmit in conflicting MASs. |
247 | */ | 576 | */ |
248 | static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, | 577 | static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) |
249 | struct uwb_ie_drp *drp_ie) | 578 | { |
579 | struct device *dev = &rc->uwb_dev.dev; | ||
580 | struct uwb_mas_bm mas; | ||
581 | struct uwb_cnflt_alien *cnflt; | ||
582 | char buf[72]; | ||
583 | unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; | ||
584 | |||
585 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
586 | bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); | ||
587 | |||
588 | list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) { | ||
589 | if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) { | ||
590 | /* Existing alien BP reservation conflicting | ||
591 | * bitmap, just reset the timer */ | ||
592 | uwb_cnflt_alien_stroke_timer(cnflt); | ||
593 | return; | ||
594 | } | ||
595 | } | ||
596 | |||
597 | /* New alien BP reservation conflicting bitmap */ | ||
598 | |||
599 | /* alloc and initialize new uwb_cnflt_alien */ | ||
600 | cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL); | ||
601 | if (!cnflt) | ||
602 | dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n"); | ||
603 | INIT_LIST_HEAD(&cnflt->rc_node); | ||
604 | init_timer(&cnflt->timer); | ||
605 | cnflt->timer.function = uwb_cnflt_timer; | ||
606 | cnflt->timer.data = (unsigned long)cnflt; | ||
607 | |||
608 | cnflt->rc = rc; | ||
609 | INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work); | ||
610 | |||
611 | bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS); | ||
612 | |||
613 | list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list); | ||
614 | |||
615 | /* update rc global conflicting alien bitmap */ | ||
616 | bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS); | ||
617 | |||
618 | queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); | ||
619 | |||
620 | /* start the timer */ | ||
621 | uwb_cnflt_alien_stroke_timer(cnflt); | ||
622 | } | ||
623 | |||
624 | static void uwb_drp_process_not_involved(struct uwb_rc *rc, | ||
625 | struct uwb_rc_evt_drp *drp_evt, | ||
626 | struct uwb_ie_drp *drp_ie) | ||
627 | { | ||
628 | struct uwb_mas_bm mas; | ||
629 | |||
630 | uwb_drp_ie_to_bm(&mas, drp_ie); | ||
631 | uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); | ||
632 | } | ||
633 | |||
634 | static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src, | ||
635 | struct uwb_rc_evt_drp *drp_evt, | ||
636 | struct uwb_ie_drp *drp_ie) | ||
250 | { | 637 | { |
251 | struct uwb_rsv *rsv; | 638 | struct uwb_rsv *rsv; |
252 | 639 | ||
@@ -259,7 +646,7 @@ static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, | |||
259 | */ | 646 | */ |
260 | return; | 647 | return; |
261 | } | 648 | } |
262 | 649 | ||
263 | /* | 650 | /* |
264 | * Do nothing with DRP IEs for reservations that have been | 651 | * Do nothing with DRP IEs for reservations that have been |
265 | * terminated. | 652 | * terminated. |
@@ -268,13 +655,43 @@ static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, | |||
268 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 655 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); |
269 | return; | 656 | return; |
270 | } | 657 | } |
271 | 658 | ||
272 | if (uwb_ie_drp_owner(drp_ie)) | 659 | if (uwb_ie_drp_owner(drp_ie)) |
273 | uwb_drp_process_target(rc, rsv, drp_ie); | 660 | uwb_drp_process_target(rc, rsv, drp_ie, drp_evt); |
661 | else | ||
662 | uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt); | ||
663 | |||
664 | } | ||
665 | |||
666 | |||
667 | static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) | ||
668 | { | ||
669 | return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0; | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | * Process a received DRP IE. | ||
674 | */ | ||
675 | static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | ||
676 | struct uwb_dev *src, struct uwb_ie_drp *drp_ie) | ||
677 | { | ||
678 | if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP) | ||
679 | uwb_drp_handle_alien_drp(rc, drp_ie); | ||
680 | else if (uwb_drp_involves_us(rc, drp_ie)) | ||
681 | uwb_drp_process_involved(rc, src, drp_evt, drp_ie); | ||
274 | else | 682 | else |
275 | uwb_drp_process_owner(rc, rsv, drp_ie); | 683 | uwb_drp_process_not_involved(rc, drp_evt, drp_ie); |
276 | } | 684 | } |
277 | 685 | ||
686 | /* | ||
687 | * Process a received DRP Availability IE | ||
688 | */ | ||
689 | static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src, | ||
690 | struct uwb_ie_drp_avail *drp_availability_ie) | ||
691 | { | ||
692 | bitmap_copy(src->last_availability_bm, | ||
693 | drp_availability_ie->bmp, UWB_NUM_MAS); | ||
694 | } | ||
278 | 695 | ||
279 | /* | 696 | /* |
280 | * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) | 697 | * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) |
@@ -296,10 +713,10 @@ void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | |||
296 | 713 | ||
297 | switch (ie_hdr->element_id) { | 714 | switch (ie_hdr->element_id) { |
298 | case UWB_IE_DRP_AVAILABILITY: | 715 | case UWB_IE_DRP_AVAILABILITY: |
299 | /* FIXME: does something need to be done with this? */ | 716 | uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr); |
300 | break; | 717 | break; |
301 | case UWB_IE_DRP: | 718 | case UWB_IE_DRP: |
302 | uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr); | 719 | uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr); |
303 | break; | 720 | break; |
304 | default: | 721 | default: |
305 | dev_warn(dev, "unexpected IE in DRP notification\n"); | 722 | dev_warn(dev, "unexpected IE in DRP notification\n"); |
@@ -312,55 +729,6 @@ void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | |||
312 | (int)ielen); | 729 | (int)ielen); |
313 | } | 730 | } |
314 | 731 | ||
315 | |||
316 | /* | ||
317 | * Go through all the DRP IEs and find the ones that conflict with our | ||
318 | * reservations. | ||
319 | * | ||
320 | * FIXME: must resolve the conflict according the the rules in | ||
321 | * [ECMA-368]. | ||
322 | */ | ||
323 | static | ||
324 | void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, | ||
325 | size_t ielen, struct uwb_dev *src_dev) | ||
326 | { | ||
327 | struct device *dev = &rc->uwb_dev.dev; | ||
328 | struct uwb_ie_hdr *ie_hdr; | ||
329 | struct uwb_ie_drp *drp_ie; | ||
330 | void *ptr; | ||
331 | |||
332 | ptr = drp_evt->ie_data; | ||
333 | for (;;) { | ||
334 | ie_hdr = uwb_ie_next(&ptr, &ielen); | ||
335 | if (!ie_hdr) | ||
336 | break; | ||
337 | |||
338 | drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr); | ||
339 | |||
340 | /* FIXME: check if this DRP IE conflicts. */ | ||
341 | } | ||
342 | |||
343 | if (ielen > 0) | ||
344 | dev_warn(dev, "%d octets remaining in DRP notification\n", | ||
345 | (int)ielen); | ||
346 | } | ||
347 | |||
348 | |||
349 | /* | ||
350 | * Terminate all reservations owned by, or targeted at, 'uwb_dev'. | ||
351 | */ | ||
352 | static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev) | ||
353 | { | ||
354 | struct uwb_rsv *rsv; | ||
355 | |||
356 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
357 | if (rsv->owner == uwb_dev | ||
358 | || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev)) | ||
359 | uwb_rsv_remove(rsv); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | |||
364 | /** | 732 | /** |
365 | * uwbd_evt_handle_rc_drp - handle a DRP_IE event | 733 | * uwbd_evt_handle_rc_drp - handle a DRP_IE event |
366 | * @evt: the DRP_IE event from the radio controller | 734 | * @evt: the DRP_IE event from the radio controller |
@@ -401,7 +769,6 @@ int uwbd_evt_handle_rc_drp(struct uwb_event *evt) | |||
401 | size_t ielength, bytes_left; | 769 | size_t ielength, bytes_left; |
402 | struct uwb_dev_addr src_addr; | 770 | struct uwb_dev_addr src_addr; |
403 | struct uwb_dev *src_dev; | 771 | struct uwb_dev *src_dev; |
404 | int reason; | ||
405 | 772 | ||
406 | /* Is there enough data to decode the event (and any IEs in | 773 | /* Is there enough data to decode the event (and any IEs in |
407 | its payload)? */ | 774 | its payload)? */ |
@@ -437,22 +804,8 @@ int uwbd_evt_handle_rc_drp(struct uwb_event *evt) | |||
437 | 804 | ||
438 | mutex_lock(&rc->rsvs_mutex); | 805 | mutex_lock(&rc->rsvs_mutex); |
439 | 806 | ||
440 | reason = uwb_rc_evt_drp_reason(drp_evt); | 807 | /* We do not distinguish from the reason */ |
441 | 808 | uwb_drp_process_all(rc, drp_evt, ielength, src_dev); | |
442 | switch (reason) { | ||
443 | case UWB_DRP_NOTIF_DRP_IE_RCVD: | ||
444 | uwb_drp_process_all(rc, drp_evt, ielength, src_dev); | ||
445 | break; | ||
446 | case UWB_DRP_NOTIF_CONFLICT: | ||
447 | uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev); | ||
448 | break; | ||
449 | case UWB_DRP_NOTIF_TERMINATE: | ||
450 | uwb_drp_terminate_all(rc, src_dev); | ||
451 | break; | ||
452 | default: | ||
453 | dev_warn(dev, "ignored DRP event with reason code: %d\n", reason); | ||
454 | break; | ||
455 | } | ||
456 | 809 | ||
457 | mutex_unlock(&rc->rsvs_mutex); | 810 | mutex_unlock(&rc->rsvs_mutex); |
458 | 811 | ||
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c index 5fe566b7c845..328fcc2b6099 100644 --- a/drivers/uwb/est.c +++ b/drivers/uwb/est.c | |||
@@ -40,10 +40,8 @@ | |||
40 | * uwb_est_get_size() | 40 | * uwb_est_get_size() |
41 | */ | 41 | */ |
42 | #include <linux/spinlock.h> | 42 | #include <linux/spinlock.h> |
43 | #define D_LOCAL 0 | ||
44 | #include <linux/uwb/debug.h> | ||
45 | #include "uwb-internal.h" | ||
46 | 43 | ||
44 | #include "uwb-internal.h" | ||
47 | 45 | ||
48 | struct uwb_est { | 46 | struct uwb_est { |
49 | u16 type_event_high; | 47 | u16 type_event_high; |
@@ -52,7 +50,6 @@ struct uwb_est { | |||
52 | const struct uwb_est_entry *entry; | 50 | const struct uwb_est_entry *entry; |
53 | }; | 51 | }; |
54 | 52 | ||
55 | |||
56 | static struct uwb_est *uwb_est; | 53 | static struct uwb_est *uwb_est; |
57 | static u8 uwb_est_size; | 54 | static u8 uwb_est_size; |
58 | static u8 uwb_est_used; | 55 | static u8 uwb_est_used; |
@@ -440,21 +437,12 @@ ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb, | |||
440 | u8 *ptr = (u8 *) rceb; | 437 | u8 *ptr = (u8 *) rceb; |
441 | 438 | ||
442 | read_lock_irqsave(&uwb_est_lock, flags); | 439 | read_lock_irqsave(&uwb_est_lock, flags); |
443 | d_printf(2, dev, "Size query for event 0x%02x/%04x/%02x," | ||
444 | " buffer size %ld\n", | ||
445 | (unsigned) rceb->bEventType, | ||
446 | (unsigned) le16_to_cpu(rceb->wEvent), | ||
447 | (unsigned) rceb->bEventContext, | ||
448 | (long) rceb_size); | ||
449 | size = -ENOSPC; | 440 | size = -ENOSPC; |
450 | if (rceb_size < sizeof(*rceb)) | 441 | if (rceb_size < sizeof(*rceb)) |
451 | goto out; | 442 | goto out; |
452 | event = le16_to_cpu(rceb->wEvent); | 443 | event = le16_to_cpu(rceb->wEvent); |
453 | type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; | 444 | type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; |
454 | for (itr = 0; itr < uwb_est_used; itr++) { | 445 | for (itr = 0; itr < uwb_est_used; itr++) { |
455 | d_printf(3, dev, "Checking EST 0x%04x/%04x/%04x\n", | ||
456 | uwb_est[itr].type_event_high, uwb_est[itr].vendor, | ||
457 | uwb_est[itr].product); | ||
458 | if (uwb_est[itr].type_event_high != type_event_high) | 446 | if (uwb_est[itr].type_event_high != type_event_high) |
459 | continue; | 447 | continue; |
460 | size = uwb_est_get_size(rc, &uwb_est[itr], | 448 | size = uwb_est_get_size(rc, &uwb_est[itr], |
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 3d26fa0f8ae1..559f8784acf3 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c | |||
@@ -51,16 +51,14 @@ | |||
51 | * | 51 | * |
52 | * | 52 | * |
53 | */ | 53 | */ |
54 | #include <linux/version.h> | ||
55 | #include <linux/init.h> | 54 | #include <linux/init.h> |
56 | #include <linux/module.h> | 55 | #include <linux/module.h> |
57 | #include <linux/usb.h> | 56 | #include <linux/usb.h> |
58 | #include <linux/usb/wusb.h> | 57 | #include <linux/usb/wusb.h> |
59 | #include <linux/usb/wusb-wa.h> | 58 | #include <linux/usb/wusb-wa.h> |
60 | #include <linux/uwb.h> | 59 | #include <linux/uwb.h> |
60 | |||
61 | #include "uwb-internal.h" | 61 | #include "uwb-internal.h" |
62 | #define D_LOCAL 1 | ||
63 | #include <linux/uwb/debug.h> | ||
64 | 62 | ||
65 | /* The device uses commands and events from the WHCI specification, although | 63 | /* The device uses commands and events from the WHCI specification, although |
66 | * reporting itself as WUSB compliant. */ | 64 | * reporting itself as WUSB compliant. */ |
@@ -631,17 +629,13 @@ void hwarc_neep_cb(struct urb *urb) | |||
631 | 629 | ||
632 | switch (result = urb->status) { | 630 | switch (result = urb->status) { |
633 | case 0: | 631 | case 0: |
634 | d_printf(3, dev, "NEEP: receive stat %d, %zu bytes\n", | ||
635 | urb->status, (size_t)urb->actual_length); | ||
636 | uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer, | 632 | uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer, |
637 | urb->actual_length); | 633 | urb->actual_length); |
638 | break; | 634 | break; |
639 | case -ECONNRESET: /* Not an error, but a controlled situation; */ | 635 | case -ECONNRESET: /* Not an error, but a controlled situation; */ |
640 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ | 636 | case -ENOENT: /* (we killed the URB)...so, no broadcast */ |
641 | d_printf(2, dev, "NEEP: URB reset/noent %d\n", urb->status); | ||
642 | goto out; | 637 | goto out; |
643 | case -ESHUTDOWN: /* going away! */ | 638 | case -ESHUTDOWN: /* going away! */ |
644 | d_printf(2, dev, "NEEP: URB down %d\n", urb->status); | ||
645 | goto out; | 639 | goto out; |
646 | default: /* On general errors, retry unless it gets ugly */ | 640 | default: /* On general errors, retry unless it gets ugly */ |
647 | if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS, | 641 | if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS, |
@@ -650,7 +644,6 @@ void hwarc_neep_cb(struct urb *urb) | |||
650 | dev_err(dev, "NEEP: URB error %d\n", urb->status); | 644 | dev_err(dev, "NEEP: URB error %d\n", urb->status); |
651 | } | 645 | } |
652 | result = usb_submit_urb(urb, GFP_ATOMIC); | 646 | result = usb_submit_urb(urb, GFP_ATOMIC); |
653 | d_printf(3, dev, "NEEP: submit %d\n", result); | ||
654 | if (result < 0) { | 647 | if (result < 0) { |
655 | dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", | 648 | dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", |
656 | result); | 649 | result); |
@@ -759,11 +752,11 @@ static int hwarc_get_version(struct uwb_rc *rc) | |||
759 | itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); | 752 | itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); |
760 | while (itr_size >= sizeof(*hdr)) { | 753 | while (itr_size >= sizeof(*hdr)) { |
761 | hdr = (struct usb_descriptor_header *) itr; | 754 | hdr = (struct usb_descriptor_header *) itr; |
762 | d_printf(3, dev, "Extra device descriptor: " | 755 | dev_dbg(dev, "Extra device descriptor: " |
763 | "type %02x/%u bytes @ %zu (%zu left)\n", | 756 | "type %02x/%u bytes @ %zu (%zu left)\n", |
764 | hdr->bDescriptorType, hdr->bLength, | 757 | hdr->bDescriptorType, hdr->bLength, |
765 | (itr - usb_dev->rawdescriptors[actconfig_idx]), | 758 | (itr - usb_dev->rawdescriptors[actconfig_idx]), |
766 | itr_size); | 759 | itr_size); |
767 | if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL) | 760 | if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL) |
768 | goto found; | 761 | goto found; |
769 | itr += hdr->bLength; | 762 | itr += hdr->bLength; |
@@ -795,8 +788,7 @@ found: | |||
795 | goto error; | 788 | goto error; |
796 | } | 789 | } |
797 | rc->version = version; | 790 | rc->version = version; |
798 | d_printf(3, dev, "Device supports WUSB protocol version 0x%04x \n", | 791 | dev_dbg(dev, "Device supports WUSB protocol version 0x%04x \n", rc->version); |
799 | rc->version); | ||
800 | result = 0; | 792 | result = 0; |
801 | error: | 793 | error: |
802 | return result; | 794 | return result; |
@@ -877,11 +869,28 @@ static void hwarc_disconnect(struct usb_interface *iface) | |||
877 | uwb_rc_rm(uwb_rc); | 869 | uwb_rc_rm(uwb_rc); |
878 | usb_put_intf(hwarc->usb_iface); | 870 | usb_put_intf(hwarc->usb_iface); |
879 | usb_put_dev(hwarc->usb_dev); | 871 | usb_put_dev(hwarc->usb_dev); |
880 | d_printf(1, &hwarc->usb_iface->dev, "freed hwarc %p\n", hwarc); | ||
881 | kfree(hwarc); | 872 | kfree(hwarc); |
882 | uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */ | 873 | uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */ |
883 | } | 874 | } |
884 | 875 | ||
876 | static int hwarc_pre_reset(struct usb_interface *iface) | ||
877 | { | ||
878 | struct hwarc *hwarc = usb_get_intfdata(iface); | ||
879 | struct uwb_rc *uwb_rc = hwarc->uwb_rc; | ||
880 | |||
881 | uwb_rc_pre_reset(uwb_rc); | ||
882 | return 0; | ||
883 | } | ||
884 | |||
885 | static int hwarc_post_reset(struct usb_interface *iface) | ||
886 | { | ||
887 | struct hwarc *hwarc = usb_get_intfdata(iface); | ||
888 | struct uwb_rc *uwb_rc = hwarc->uwb_rc; | ||
889 | |||
890 | uwb_rc_post_reset(uwb_rc); | ||
891 | return 0; | ||
892 | } | ||
893 | |||
885 | /** USB device ID's that we handle */ | 894 | /** USB device ID's that we handle */ |
886 | static struct usb_device_id hwarc_id_table[] = { | 895 | static struct usb_device_id hwarc_id_table[] = { |
887 | /* D-Link DUB-1210 */ | 896 | /* D-Link DUB-1210 */ |
@@ -898,20 +907,16 @@ MODULE_DEVICE_TABLE(usb, hwarc_id_table); | |||
898 | 907 | ||
899 | static struct usb_driver hwarc_driver = { | 908 | static struct usb_driver hwarc_driver = { |
900 | .name = "hwa-rc", | 909 | .name = "hwa-rc", |
910 | .id_table = hwarc_id_table, | ||
901 | .probe = hwarc_probe, | 911 | .probe = hwarc_probe, |
902 | .disconnect = hwarc_disconnect, | 912 | .disconnect = hwarc_disconnect, |
903 | .id_table = hwarc_id_table, | 913 | .pre_reset = hwarc_pre_reset, |
914 | .post_reset = hwarc_post_reset, | ||
904 | }; | 915 | }; |
905 | 916 | ||
906 | static int __init hwarc_driver_init(void) | 917 | static int __init hwarc_driver_init(void) |
907 | { | 918 | { |
908 | int result; | 919 | return usb_register(&hwarc_driver); |
909 | result = usb_register(&hwarc_driver); | ||
910 | if (result < 0) | ||
911 | printk(KERN_ERR "HWA-RC: Cannot register USB driver: %d\n", | ||
912 | result); | ||
913 | return result; | ||
914 | |||
915 | } | 920 | } |
916 | module_init(hwarc_driver_init); | 921 | module_init(hwarc_driver_init); |
917 | 922 | ||
diff --git a/drivers/uwb/i1480/dfu/dfu.c b/drivers/uwb/i1480/dfu/dfu.c index 9097b3b30385..da7b1d08003c 100644 --- a/drivers/uwb/i1480/dfu/dfu.c +++ b/drivers/uwb/i1480/dfu/dfu.c | |||
@@ -34,10 +34,7 @@ | |||
34 | #include <linux/uwb.h> | 34 | #include <linux/uwb.h> |
35 | #include <linux/random.h> | 35 | #include <linux/random.h> |
36 | 36 | ||
37 | #define D_LOCAL 0 | 37 | /* |
38 | #include <linux/uwb/debug.h> | ||
39 | |||
40 | /** | ||
41 | * i1480_rceb_check - Check RCEB for expected field values | 38 | * i1480_rceb_check - Check RCEB for expected field values |
42 | * @i1480: pointer to device for which RCEB is being checked | 39 | * @i1480: pointer to device for which RCEB is being checked |
43 | * @rceb: RCEB being checked | 40 | * @rceb: RCEB being checked |
@@ -83,7 +80,7 @@ int i1480_rceb_check(const struct i1480 *i1480, const struct uwb_rceb *rceb, | |||
83 | EXPORT_SYMBOL_GPL(i1480_rceb_check); | 80 | EXPORT_SYMBOL_GPL(i1480_rceb_check); |
84 | 81 | ||
85 | 82 | ||
86 | /** | 83 | /* |
87 | * Execute a Radio Control Command | 84 | * Execute a Radio Control Command |
88 | * | 85 | * |
89 | * Command data has to be in i1480->cmd_buf. | 86 | * Command data has to be in i1480->cmd_buf. |
@@ -101,7 +98,6 @@ ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size, | |||
101 | u8 expected_type = reply->bEventType; | 98 | u8 expected_type = reply->bEventType; |
102 | u8 context; | 99 | u8 context; |
103 | 100 | ||
104 | d_fnstart(3, i1480->dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); | ||
105 | init_completion(&i1480->evt_complete); | 101 | init_completion(&i1480->evt_complete); |
106 | i1480->evt_result = -EINPROGRESS; | 102 | i1480->evt_result = -EINPROGRESS; |
107 | do { | 103 | do { |
@@ -150,8 +146,6 @@ ssize_t i1480_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size, | |||
150 | result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, | 146 | result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, |
151 | expected_type, expected_event); | 147 | expected_type, expected_event); |
152 | error: | 148 | error: |
153 | d_fnend(3, i1480->dev, "(%p, %s, %zu) = %zd\n", | ||
154 | i1480, cmd_name, cmd_size, result); | ||
155 | return result; | 149 | return result; |
156 | } | 150 | } |
157 | EXPORT_SYMBOL_GPL(i1480_cmd); | 151 | EXPORT_SYMBOL_GPL(i1480_cmd); |
diff --git a/drivers/uwb/i1480/dfu/mac.c b/drivers/uwb/i1480/dfu/mac.c index 2e4d8f07c165..694d0daf88ab 100644 --- a/drivers/uwb/i1480/dfu/mac.c +++ b/drivers/uwb/i1480/dfu/mac.c | |||
@@ -31,9 +31,6 @@ | |||
31 | #include <linux/uwb.h> | 31 | #include <linux/uwb.h> |
32 | #include "i1480-dfu.h" | 32 | #include "i1480-dfu.h" |
33 | 33 | ||
34 | #define D_LOCAL 0 | ||
35 | #include <linux/uwb/debug.h> | ||
36 | |||
37 | /* | 34 | /* |
38 | * Descriptor for a continuous segment of MAC fw data | 35 | * Descriptor for a continuous segment of MAC fw data |
39 | */ | 36 | */ |
@@ -184,10 +181,6 @@ ssize_t i1480_fw_cmp(struct i1480 *i1480, struct fw_hdr *hdr) | |||
184 | } | 181 | } |
185 | if (memcmp(i1480->cmd_buf, bin + src_itr, result)) { | 182 | if (memcmp(i1480->cmd_buf, bin + src_itr, result)) { |
186 | u8 *buf = i1480->cmd_buf; | 183 | u8 *buf = i1480->cmd_buf; |
187 | d_printf(2, i1480->dev, | ||
188 | "original data @ %p + %u, %zu bytes\n", | ||
189 | bin, src_itr, result); | ||
190 | d_dump(4, i1480->dev, bin + src_itr, result); | ||
191 | for (cnt = 0; cnt < result; cnt++) | 184 | for (cnt = 0; cnt < result; cnt++) |
192 | if (bin[src_itr + cnt] != buf[cnt]) { | 185 | if (bin[src_itr + cnt] != buf[cnt]) { |
193 | dev_err(i1480->dev, "byte failed at " | 186 | dev_err(i1480->dev, "byte failed at " |
@@ -224,7 +217,6 @@ int mac_fw_hdrs_push(struct i1480 *i1480, struct fw_hdr *hdr, | |||
224 | struct fw_hdr *hdr_itr; | 217 | struct fw_hdr *hdr_itr; |
225 | int verif_retry_count; | 218 | int verif_retry_count; |
226 | 219 | ||
227 | d_fnstart(3, dev, "(%p, %p)\n", i1480, hdr); | ||
228 | /* Now, header by header, push them to the hw */ | 220 | /* Now, header by header, push them to the hw */ |
229 | for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) { | 221 | for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) { |
230 | verif_retry_count = 0; | 222 | verif_retry_count = 0; |
@@ -264,7 +256,6 @@ retry: | |||
264 | break; | 256 | break; |
265 | } | 257 | } |
266 | } | 258 | } |
267 | d_fnend(3, dev, "(%zd)\n", result); | ||
268 | return result; | 259 | return result; |
269 | } | 260 | } |
270 | 261 | ||
@@ -337,11 +328,9 @@ int __mac_fw_upload(struct i1480 *i1480, const char *fw_name, | |||
337 | const struct firmware *fw; | 328 | const struct firmware *fw; |
338 | struct fw_hdr *fw_hdrs; | 329 | struct fw_hdr *fw_hdrs; |
339 | 330 | ||
340 | d_fnstart(3, i1480->dev, "(%p, %s, %s)\n", i1480, fw_name, fw_tag); | ||
341 | result = request_firmware(&fw, fw_name, i1480->dev); | 331 | result = request_firmware(&fw, fw_name, i1480->dev); |
342 | if (result < 0) /* Up to caller to complain on -ENOENT */ | 332 | if (result < 0) /* Up to caller to complain on -ENOENT */ |
343 | goto out; | 333 | goto out; |
344 | d_printf(3, i1480->dev, "%s fw '%s': uploading\n", fw_tag, fw_name); | ||
345 | result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size); | 334 | result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size); |
346 | if (result < 0) { | 335 | if (result < 0) { |
347 | dev_err(i1480->dev, "%s fw '%s': failed to parse firmware " | 336 | dev_err(i1480->dev, "%s fw '%s': failed to parse firmware " |
@@ -363,8 +352,6 @@ out_hdrs_release: | |||
363 | out_release: | 352 | out_release: |
364 | release_firmware(fw); | 353 | release_firmware(fw); |
365 | out: | 354 | out: |
366 | d_fnend(3, i1480->dev, "(%p, %s, %s) = %d\n", i1480, fw_name, fw_tag, | ||
367 | result); | ||
368 | return result; | 355 | return result; |
369 | } | 356 | } |
370 | 357 | ||
@@ -433,7 +420,6 @@ int i1480_fw_is_running_q(struct i1480 *i1480) | |||
433 | int result; | 420 | int result; |
434 | u32 *val = (u32 *) i1480->cmd_buf; | 421 | u32 *val = (u32 *) i1480->cmd_buf; |
435 | 422 | ||
436 | d_fnstart(3, i1480->dev, "(i1480 %p)\n", i1480); | ||
437 | for (cnt = 0; cnt < 10; cnt++) { | 423 | for (cnt = 0; cnt < 10; cnt++) { |
438 | msleep(100); | 424 | msleep(100); |
439 | result = i1480->read(i1480, 0x80080000, 4); | 425 | result = i1480->read(i1480, 0x80080000, 4); |
@@ -447,7 +433,6 @@ int i1480_fw_is_running_q(struct i1480 *i1480) | |||
447 | dev_err(i1480->dev, "Timed out waiting for fw to start\n"); | 433 | dev_err(i1480->dev, "Timed out waiting for fw to start\n"); |
448 | result = -ETIMEDOUT; | 434 | result = -ETIMEDOUT; |
449 | out: | 435 | out: |
450 | d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); | ||
451 | return result; | 436 | return result; |
452 | 437 | ||
453 | } | 438 | } |
@@ -467,7 +452,6 @@ int i1480_mac_fw_upload(struct i1480 *i1480) | |||
467 | int result = 0, deprecated_name = 0; | 452 | int result = 0, deprecated_name = 0; |
468 | struct i1480_rceb *rcebe = (void *) i1480->evt_buf; | 453 | struct i1480_rceb *rcebe = (void *) i1480->evt_buf; |
469 | 454 | ||
470 | d_fnstart(3, i1480->dev, "(%p)\n", i1480); | ||
471 | result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC"); | 455 | result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC"); |
472 | if (result == -ENOENT) { | 456 | if (result == -ENOENT) { |
473 | result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate, | 457 | result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate, |
@@ -501,7 +485,6 @@ int i1480_mac_fw_upload(struct i1480 *i1480) | |||
501 | dev_err(i1480->dev, "MAC fw '%s': initialization event returns " | 485 | dev_err(i1480->dev, "MAC fw '%s': initialization event returns " |
502 | "wrong size (%zu bytes vs %zu needed)\n", | 486 | "wrong size (%zu bytes vs %zu needed)\n", |
503 | i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe)); | 487 | i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe)); |
504 | dump_bytes(i1480->dev, rcebe, min(i1480->evt_result, (ssize_t)32)); | ||
505 | goto error_size; | 488 | goto error_size; |
506 | } | 489 | } |
507 | result = -EIO; | 490 | result = -EIO; |
@@ -522,6 +505,5 @@ error_fw_not_running: | |||
522 | error_init_timeout: | 505 | error_init_timeout: |
523 | error_size: | 506 | error_size: |
524 | error_setup: | 507 | error_setup: |
525 | d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); | ||
526 | return result; | 508 | return result; |
527 | } | 509 | } |
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index 98eeeff051aa..686795e97195 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c | |||
@@ -35,7 +35,6 @@ | |||
35 | * the functions are i1480_usb_NAME(). | 35 | * the functions are i1480_usb_NAME(). |
36 | */ | 36 | */ |
37 | #include <linux/module.h> | 37 | #include <linux/module.h> |
38 | #include <linux/version.h> | ||
39 | #include <linux/usb.h> | 38 | #include <linux/usb.h> |
40 | #include <linux/interrupt.h> | 39 | #include <linux/interrupt.h> |
41 | #include <linux/delay.h> | 40 | #include <linux/delay.h> |
@@ -44,10 +43,6 @@ | |||
44 | #include <linux/usb/wusb-wa.h> | 43 | #include <linux/usb/wusb-wa.h> |
45 | #include "i1480-dfu.h" | 44 | #include "i1480-dfu.h" |
46 | 45 | ||
47 | #define D_LOCAL 0 | ||
48 | #include <linux/uwb/debug.h> | ||
49 | |||
50 | |||
51 | struct i1480_usb { | 46 | struct i1480_usb { |
52 | struct i1480 i1480; | 47 | struct i1480 i1480; |
53 | struct usb_device *usb_dev; | 48 | struct usb_device *usb_dev; |
@@ -118,8 +113,6 @@ int i1480_usb_write(struct i1480 *i1480, u32 memory_address, | |||
118 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); | 113 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); |
119 | size_t buffer_size, itr = 0; | 114 | size_t buffer_size, itr = 0; |
120 | 115 | ||
121 | d_fnstart(3, i1480->dev, "(%p, 0x%08x, %p, %zu)\n", | ||
122 | i1480, memory_address, buffer, size); | ||
123 | BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ | 116 | BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ |
124 | while (size > 0) { | 117 | while (size > 0) { |
125 | buffer_size = size < i1480->buf_size ? size : i1480->buf_size; | 118 | buffer_size = size < i1480->buf_size ? size : i1480->buf_size; |
@@ -132,16 +125,10 @@ int i1480_usb_write(struct i1480 *i1480, u32 memory_address, | |||
132 | i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); | 125 | i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); |
133 | if (result < 0) | 126 | if (result < 0) |
134 | break; | 127 | break; |
135 | d_printf(3, i1480->dev, | ||
136 | "wrote @ 0x%08x %u bytes (of %zu bytes requested)\n", | ||
137 | memory_address, result, buffer_size); | ||
138 | d_dump(4, i1480->dev, i1480->cmd_buf, result); | ||
139 | itr += result; | 128 | itr += result; |
140 | memory_address += result; | 129 | memory_address += result; |
141 | size -= result; | 130 | size -= result; |
142 | } | 131 | } |
143 | d_fnend(3, i1480->dev, "(%p, 0x%08x, %p, %zu) = %d\n", | ||
144 | i1480, memory_address, buffer, size, result); | ||
145 | return result; | 132 | return result; |
146 | } | 133 | } |
147 | 134 | ||
@@ -166,8 +153,6 @@ int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size) | |||
166 | size_t itr, read_size = i1480->buf_size; | 153 | size_t itr, read_size = i1480->buf_size; |
167 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); | 154 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); |
168 | 155 | ||
169 | d_fnstart(3, i1480->dev, "(%p, 0x%08x, %zu)\n", | ||
170 | i1480, addr, size); | ||
171 | BUG_ON(size > i1480->buf_size); | 156 | BUG_ON(size > i1480->buf_size); |
172 | BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ | 157 | BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ |
173 | BUG_ON(read_size > 512); | 158 | BUG_ON(read_size > 512); |
@@ -201,10 +186,6 @@ int i1480_usb_read(struct i1480 *i1480, u32 addr, size_t size) | |||
201 | } | 186 | } |
202 | result = bytes; | 187 | result = bytes; |
203 | out: | 188 | out: |
204 | d_fnend(3, i1480->dev, "(%p, 0x%08x, %zu) = %zd\n", | ||
205 | i1480, addr, size, result); | ||
206 | if (result > 0) | ||
207 | d_dump(4, i1480->dev, i1480->cmd_buf, result); | ||
208 | return result; | 189 | return result; |
209 | } | 190 | } |
210 | 191 | ||
@@ -260,7 +241,6 @@ int i1480_usb_wait_init_done(struct i1480 *i1480) | |||
260 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); | 241 | struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); |
261 | struct usb_endpoint_descriptor *epd; | 242 | struct usb_endpoint_descriptor *epd; |
262 | 243 | ||
263 | d_fnstart(3, dev, "(%p)\n", i1480); | ||
264 | init_completion(&i1480->evt_complete); | 244 | init_completion(&i1480->evt_complete); |
265 | i1480->evt_result = -EINPROGRESS; | 245 | i1480->evt_result = -EINPROGRESS; |
266 | epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; | 246 | epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; |
@@ -282,14 +262,12 @@ int i1480_usb_wait_init_done(struct i1480 *i1480) | |||
282 | goto error_wait; | 262 | goto error_wait; |
283 | } | 263 | } |
284 | usb_kill_urb(i1480_usb->neep_urb); | 264 | usb_kill_urb(i1480_usb->neep_urb); |
285 | d_fnend(3, dev, "(%p) = 0\n", i1480); | ||
286 | return 0; | 265 | return 0; |
287 | 266 | ||
288 | error_wait: | 267 | error_wait: |
289 | usb_kill_urb(i1480_usb->neep_urb); | 268 | usb_kill_urb(i1480_usb->neep_urb); |
290 | error_submit: | 269 | error_submit: |
291 | i1480->evt_result = result; | 270 | i1480->evt_result = result; |
292 | d_fnend(3, dev, "(%p) = %d\n", i1480, result); | ||
293 | return result; | 271 | return result; |
294 | } | 272 | } |
295 | 273 | ||
@@ -320,7 +298,6 @@ int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size) | |||
320 | struct uwb_rccb *cmd = i1480->cmd_buf; | 298 | struct uwb_rccb *cmd = i1480->cmd_buf; |
321 | u8 iface_no; | 299 | u8 iface_no; |
322 | 300 | ||
323 | d_fnstart(3, dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); | ||
324 | /* Post a read on the notification & event endpoint */ | 301 | /* Post a read on the notification & event endpoint */ |
325 | iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber; | 302 | iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber; |
326 | epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; | 303 | epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; |
@@ -348,15 +325,11 @@ int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size) | |||
348 | cmd_name, result); | 325 | cmd_name, result); |
349 | goto error_submit_ep0; | 326 | goto error_submit_ep0; |
350 | } | 327 | } |
351 | d_fnend(3, dev, "(%p, %s, %zu) = %d\n", | ||
352 | i1480, cmd_name, cmd_size, result); | ||
353 | return result; | 328 | return result; |
354 | 329 | ||
355 | error_submit_ep0: | 330 | error_submit_ep0: |
356 | usb_kill_urb(i1480_usb->neep_urb); | 331 | usb_kill_urb(i1480_usb->neep_urb); |
357 | error_submit_ep1: | 332 | error_submit_ep1: |
358 | d_fnend(3, dev, "(%p, %s, %zu) = %d\n", | ||
359 | i1480, cmd_name, cmd_size, result); | ||
360 | return result; | 333 | return result; |
361 | } | 334 | } |
362 | 335 | ||
diff --git a/drivers/uwb/i1480/i1480u-wlp/lc.c b/drivers/uwb/i1480/i1480u-wlp/lc.c index 737d60cd5b73..049c05d4cc6a 100644 --- a/drivers/uwb/i1480/i1480u-wlp/lc.c +++ b/drivers/uwb/i1480/i1480u-wlp/lc.c | |||
@@ -55,10 +55,9 @@ | |||
55 | * is being removed. | 55 | * is being removed. |
56 | * i1480u_rm() | 56 | * i1480u_rm() |
57 | */ | 57 | */ |
58 | #include <linux/version.h> | ||
59 | #include <linux/if_arp.h> | 58 | #include <linux/if_arp.h> |
60 | #include <linux/etherdevice.h> | 59 | #include <linux/etherdevice.h> |
61 | #include <linux/uwb/debug.h> | 60 | |
62 | #include "i1480u-wlp.h" | 61 | #include "i1480u-wlp.h" |
63 | 62 | ||
64 | 63 | ||
@@ -207,7 +206,7 @@ int i1480u_add(struct i1480u *i1480u, struct usb_interface *iface) | |||
207 | wlp->fill_device_info = i1480u_fill_device_info; | 206 | wlp->fill_device_info = i1480u_fill_device_info; |
208 | wlp->stop_queue = i1480u_stop_queue; | 207 | wlp->stop_queue = i1480u_stop_queue; |
209 | wlp->start_queue = i1480u_start_queue; | 208 | wlp->start_queue = i1480u_start_queue; |
210 | result = wlp_setup(wlp, rc); | 209 | result = wlp_setup(wlp, rc, net_dev); |
211 | if (result < 0) { | 210 | if (result < 0) { |
212 | dev_err(&iface->dev, "Cannot setup WLP\n"); | 211 | dev_err(&iface->dev, "Cannot setup WLP\n"); |
213 | goto error_wlp_setup; | 212 | goto error_wlp_setup; |
diff --git a/drivers/uwb/i1480/i1480u-wlp/netdev.c b/drivers/uwb/i1480/i1480u-wlp/netdev.c index 8802ac43d872..e3873ffb942c 100644 --- a/drivers/uwb/i1480/i1480u-wlp/netdev.c +++ b/drivers/uwb/i1480/i1480u-wlp/netdev.c | |||
@@ -41,7 +41,7 @@ | |||
41 | 41 | ||
42 | #include <linux/if_arp.h> | 42 | #include <linux/if_arp.h> |
43 | #include <linux/etherdevice.h> | 43 | #include <linux/etherdevice.h> |
44 | #include <linux/uwb/debug.h> | 44 | |
45 | #include "i1480u-wlp.h" | 45 | #include "i1480u-wlp.h" |
46 | 46 | ||
47 | struct i1480u_cmd_set_ip_mas { | 47 | struct i1480u_cmd_set_ip_mas { |
@@ -207,6 +207,11 @@ int i1480u_open(struct net_device *net_dev) | |||
207 | result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */ | 207 | result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */ |
208 | if (result < 0) | 208 | if (result < 0) |
209 | goto error_rx_setup; | 209 | goto error_rx_setup; |
210 | |||
211 | result = uwb_radio_start(&wlp->pal); | ||
212 | if (result < 0) | ||
213 | goto error_radio_start; | ||
214 | |||
210 | netif_wake_queue(net_dev); | 215 | netif_wake_queue(net_dev); |
211 | #ifdef i1480u_FLOW_CONTROL | 216 | #ifdef i1480u_FLOW_CONTROL |
212 | result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);; | 217 | result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);; |
@@ -215,25 +220,20 @@ int i1480u_open(struct net_device *net_dev) | |||
215 | goto error_notif_urb_submit; | 220 | goto error_notif_urb_submit; |
216 | } | 221 | } |
217 | #endif | 222 | #endif |
218 | i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb; | ||
219 | i1480u->uwb_notifs_handler.data = i1480u; | ||
220 | if (uwb_bg_joined(rc)) | ||
221 | netif_carrier_on(net_dev); | ||
222 | else | ||
223 | netif_carrier_off(net_dev); | ||
224 | uwb_notifs_register(rc, &i1480u->uwb_notifs_handler); | ||
225 | /* Interface is up with an address, now we can create WSS */ | 223 | /* Interface is up with an address, now we can create WSS */ |
226 | result = wlp_wss_setup(net_dev, &wlp->wss); | 224 | result = wlp_wss_setup(net_dev, &wlp->wss); |
227 | if (result < 0) { | 225 | if (result < 0) { |
228 | dev_err(dev, "Can't create WSS: %d. \n", result); | 226 | dev_err(dev, "Can't create WSS: %d. \n", result); |
229 | goto error_notif_deregister; | 227 | goto error_wss_setup; |
230 | } | 228 | } |
231 | return 0; | 229 | return 0; |
232 | error_notif_deregister: | 230 | error_wss_setup: |
233 | uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); | ||
234 | #ifdef i1480u_FLOW_CONTROL | 231 | #ifdef i1480u_FLOW_CONTROL |
232 | usb_kill_urb(i1480u->notif_urb); | ||
235 | error_notif_urb_submit: | 233 | error_notif_urb_submit: |
236 | #endif | 234 | #endif |
235 | uwb_radio_stop(&wlp->pal); | ||
236 | error_radio_start: | ||
237 | netif_stop_queue(net_dev); | 237 | netif_stop_queue(net_dev); |
238 | i1480u_rx_release(i1480u); | 238 | i1480u_rx_release(i1480u); |
239 | error_rx_setup: | 239 | error_rx_setup: |
@@ -248,16 +248,15 @@ int i1480u_stop(struct net_device *net_dev) | |||
248 | { | 248 | { |
249 | struct i1480u *i1480u = netdev_priv(net_dev); | 249 | struct i1480u *i1480u = netdev_priv(net_dev); |
250 | struct wlp *wlp = &i1480u->wlp; | 250 | struct wlp *wlp = &i1480u->wlp; |
251 | struct uwb_rc *rc = wlp->rc; | ||
252 | 251 | ||
253 | BUG_ON(wlp->rc == NULL); | 252 | BUG_ON(wlp->rc == NULL); |
254 | wlp_wss_remove(&wlp->wss); | 253 | wlp_wss_remove(&wlp->wss); |
255 | uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); | ||
256 | netif_carrier_off(net_dev); | 254 | netif_carrier_off(net_dev); |
257 | #ifdef i1480u_FLOW_CONTROL | 255 | #ifdef i1480u_FLOW_CONTROL |
258 | usb_kill_urb(i1480u->notif_urb); | 256 | usb_kill_urb(i1480u->notif_urb); |
259 | #endif | 257 | #endif |
260 | netif_stop_queue(net_dev); | 258 | netif_stop_queue(net_dev); |
259 | uwb_radio_stop(&wlp->pal); | ||
261 | i1480u_rx_release(i1480u); | 260 | i1480u_rx_release(i1480u); |
262 | i1480u_tx_release(i1480u); | 261 | i1480u_tx_release(i1480u); |
263 | return 0; | 262 | return 0; |
@@ -303,34 +302,6 @@ int i1480u_change_mtu(struct net_device *net_dev, int mtu) | |||
303 | return 0; | 302 | return 0; |
304 | } | 303 | } |
305 | 304 | ||
306 | |||
307 | /** | ||
308 | * Callback function to handle events from UWB | ||
309 | * When we see other devices we know the carrier is ok, | ||
310 | * if we are the only device in the beacon group we set the carrier | ||
311 | * state to off. | ||
312 | * */ | ||
313 | void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev, | ||
314 | enum uwb_notifs event) | ||
315 | { | ||
316 | struct i1480u *i1480u = data; | ||
317 | struct net_device *net_dev = i1480u->net_dev; | ||
318 | struct device *dev = &i1480u->usb_iface->dev; | ||
319 | switch (event) { | ||
320 | case UWB_NOTIF_BG_JOIN: | ||
321 | netif_carrier_on(net_dev); | ||
322 | dev_info(dev, "Link is up\n"); | ||
323 | break; | ||
324 | case UWB_NOTIF_BG_LEAVE: | ||
325 | netif_carrier_off(net_dev); | ||
326 | dev_info(dev, "Link is down\n"); | ||
327 | break; | ||
328 | default: | ||
329 | dev_err(dev, "don't know how to handle event %d from uwb\n", | ||
330 | event); | ||
331 | } | ||
332 | } | ||
333 | |||
334 | /** | 305 | /** |
335 | * Stop the network queue | 306 | * Stop the network queue |
336 | * | 307 | * |
diff --git a/drivers/uwb/i1480/i1480u-wlp/rx.c b/drivers/uwb/i1480/i1480u-wlp/rx.c index 9fc035354a76..34f4cf9a7d34 100644 --- a/drivers/uwb/i1480/i1480u-wlp/rx.c +++ b/drivers/uwb/i1480/i1480u-wlp/rx.c | |||
@@ -68,11 +68,7 @@ | |||
68 | #include <linux/etherdevice.h> | 68 | #include <linux/etherdevice.h> |
69 | #include "i1480u-wlp.h" | 69 | #include "i1480u-wlp.h" |
70 | 70 | ||
71 | #define D_LOCAL 0 | 71 | /* |
72 | #include <linux/uwb/debug.h> | ||
73 | |||
74 | |||
75 | /** | ||
76 | * Setup the RX context | 72 | * Setup the RX context |
77 | * | 73 | * |
78 | * Each URB is provided with a transfer_buffer that is the data field | 74 | * Each URB is provided with a transfer_buffer that is the data field |
@@ -129,7 +125,7 @@ error: | |||
129 | } | 125 | } |
130 | 126 | ||
131 | 127 | ||
132 | /** Release resources associated to the rx context */ | 128 | /* Release resources associated to the rx context */ |
133 | void i1480u_rx_release(struct i1480u *i1480u) | 129 | void i1480u_rx_release(struct i1480u *i1480u) |
134 | { | 130 | { |
135 | int cnt; | 131 | int cnt; |
@@ -155,7 +151,7 @@ void i1480u_rx_unlink_urbs(struct i1480u *i1480u) | |||
155 | } | 151 | } |
156 | } | 152 | } |
157 | 153 | ||
158 | /** Fix an out-of-sequence packet */ | 154 | /* Fix an out-of-sequence packet */ |
159 | #define i1480u_fix(i1480u, msg...) \ | 155 | #define i1480u_fix(i1480u, msg...) \ |
160 | do { \ | 156 | do { \ |
161 | if (printk_ratelimit()) \ | 157 | if (printk_ratelimit()) \ |
@@ -166,7 +162,7 @@ do { \ | |||
166 | } while (0) | 162 | } while (0) |
167 | 163 | ||
168 | 164 | ||
169 | /** Drop an out-of-sequence packet */ | 165 | /* Drop an out-of-sequence packet */ |
170 | #define i1480u_drop(i1480u, msg...) \ | 166 | #define i1480u_drop(i1480u, msg...) \ |
171 | do { \ | 167 | do { \ |
172 | if (printk_ratelimit()) \ | 168 | if (printk_ratelimit()) \ |
@@ -177,7 +173,7 @@ do { \ | |||
177 | 173 | ||
178 | 174 | ||
179 | 175 | ||
180 | /** Finalizes setting up the SKB and delivers it | 176 | /* Finalizes setting up the SKB and delivers it |
181 | * | 177 | * |
182 | * We first pass the incoming frame to WLP substack for verification. It | 178 | * We first pass the incoming frame to WLP substack for verification. It |
183 | * may also be a WLP association frame in which case WLP will take over the | 179 | * may also be a WLP association frame in which case WLP will take over the |
@@ -192,18 +188,11 @@ void i1480u_skb_deliver(struct i1480u *i1480u) | |||
192 | struct net_device *net_dev = i1480u->net_dev; | 188 | struct net_device *net_dev = i1480u->net_dev; |
193 | struct device *dev = &i1480u->usb_iface->dev; | 189 | struct device *dev = &i1480u->usb_iface->dev; |
194 | 190 | ||
195 | d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n", | ||
196 | i1480u->rx_skb, i1480u->rx_skb->len); | ||
197 | d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len); | ||
198 | should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb, | 191 | should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb, |
199 | &i1480u->rx_srcaddr); | 192 | &i1480u->rx_srcaddr); |
200 | if (!should_parse) | 193 | if (!should_parse) |
201 | goto out; | 194 | goto out; |
202 | i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev); | 195 | i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev); |
203 | d_printf(5, dev, "RX delivered skb(%p), %u bytes\n", | ||
204 | i1480u->rx_skb, i1480u->rx_skb->len); | ||
205 | d_dump(7, dev, i1480u->rx_skb->data, | ||
206 | i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len); | ||
207 | i1480u->stats.rx_packets++; | 196 | i1480u->stats.rx_packets++; |
208 | i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size; | 197 | i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size; |
209 | net_dev->last_rx = jiffies; | 198 | net_dev->last_rx = jiffies; |
@@ -216,7 +205,7 @@ out: | |||
216 | } | 205 | } |
217 | 206 | ||
218 | 207 | ||
219 | /** | 208 | /* |
220 | * Process a buffer of data received from the USB RX endpoint | 209 | * Process a buffer of data received from the USB RX endpoint |
221 | * | 210 | * |
222 | * First fragment arrives with next or last fragment. All other fragments | 211 | * First fragment arrives with next or last fragment. All other fragments |
@@ -404,7 +393,7 @@ out: | |||
404 | } | 393 | } |
405 | 394 | ||
406 | 395 | ||
407 | /** | 396 | /* |
408 | * Called when an RX URB has finished receiving or has found some kind | 397 | * Called when an RX URB has finished receiving or has found some kind |
409 | * of error condition. | 398 | * of error condition. |
410 | * | 399 | * |
diff --git a/drivers/uwb/i1480/i1480u-wlp/sysfs.c b/drivers/uwb/i1480/i1480u-wlp/sysfs.c index a1d8ca6ac935..4ffaf546cc6c 100644 --- a/drivers/uwb/i1480/i1480u-wlp/sysfs.c +++ b/drivers/uwb/i1480/i1480u-wlp/sysfs.c | |||
@@ -25,8 +25,8 @@ | |||
25 | 25 | ||
26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
27 | #include <linux/etherdevice.h> | 27 | #include <linux/etherdevice.h> |
28 | #include <linux/uwb/debug.h> | ||
29 | #include <linux/device.h> | 28 | #include <linux/device.h> |
29 | |||
30 | #include "i1480u-wlp.h" | 30 | #include "i1480u-wlp.h" |
31 | 31 | ||
32 | 32 | ||
@@ -226,7 +226,6 @@ ssize_t wlp_tx_inflight_store(struct i1480u_tx_inflight *inflight, | |||
226 | * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a | 226 | * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a |
227 | * class_device_attr_NAME or device_attr_NAME (for group registration). | 227 | * class_device_attr_NAME or device_attr_NAME (for group registration). |
228 | */ | 228 | */ |
229 | #include <linux/version.h> | ||
230 | 229 | ||
231 | #define i1480u_SHOW(name, fn, param) \ | 230 | #define i1480u_SHOW(name, fn, param) \ |
232 | static ssize_t i1480u_show_##name(struct device *dev, \ | 231 | static ssize_t i1480u_show_##name(struct device *dev, \ |
diff --git a/drivers/uwb/i1480/i1480u-wlp/tx.c b/drivers/uwb/i1480/i1480u-wlp/tx.c index 3426bfb68240..39032cc3503e 100644 --- a/drivers/uwb/i1480/i1480u-wlp/tx.c +++ b/drivers/uwb/i1480/i1480u-wlp/tx.c | |||
@@ -55,8 +55,6 @@ | |||
55 | */ | 55 | */ |
56 | 56 | ||
57 | #include "i1480u-wlp.h" | 57 | #include "i1480u-wlp.h" |
58 | #define D_LOCAL 5 | ||
59 | #include <linux/uwb/debug.h> | ||
60 | 58 | ||
61 | enum { | 59 | enum { |
62 | /* This is only for Next and Last TX packets */ | 60 | /* This is only for Next and Last TX packets */ |
@@ -64,7 +62,7 @@ enum { | |||
64 | - sizeof(struct untd_hdr_rst), | 62 | - sizeof(struct untd_hdr_rst), |
65 | }; | 63 | }; |
66 | 64 | ||
67 | /** Free resources allocated to a i1480u tx context. */ | 65 | /* Free resources allocated to a i1480u tx context. */ |
68 | static | 66 | static |
69 | void i1480u_tx_free(struct i1480u_tx *wtx) | 67 | void i1480u_tx_free(struct i1480u_tx *wtx) |
70 | { | 68 | { |
@@ -99,7 +97,7 @@ void i1480u_tx_unlink_urbs(struct i1480u *i1480u) | |||
99 | } | 97 | } |
100 | 98 | ||
101 | 99 | ||
102 | /** | 100 | /* |
103 | * Callback for a completed tx USB URB. | 101 | * Callback for a completed tx USB URB. |
104 | * | 102 | * |
105 | * TODO: | 103 | * TODO: |
@@ -149,8 +147,6 @@ void i1480u_tx_cb(struct urb *urb) | |||
149 | <= i1480u->tx_inflight.threshold | 147 | <= i1480u->tx_inflight.threshold |
150 | && netif_queue_stopped(net_dev) | 148 | && netif_queue_stopped(net_dev) |
151 | && i1480u->tx_inflight.threshold != 0) { | 149 | && i1480u->tx_inflight.threshold != 0) { |
152 | if (d_test(2) && printk_ratelimit()) | ||
153 | d_printf(2, dev, "Restart queue. \n"); | ||
154 | netif_start_queue(net_dev); | 150 | netif_start_queue(net_dev); |
155 | atomic_inc(&i1480u->tx_inflight.restart_count); | 151 | atomic_inc(&i1480u->tx_inflight.restart_count); |
156 | } | 152 | } |
@@ -158,7 +154,7 @@ void i1480u_tx_cb(struct urb *urb) | |||
158 | } | 154 | } |
159 | 155 | ||
160 | 156 | ||
161 | /** | 157 | /* |
162 | * Given a buffer that doesn't fit in a single fragment, create an | 158 | * Given a buffer that doesn't fit in a single fragment, create an |
163 | * scatter/gather structure for delivery to the USB pipe. | 159 | * scatter/gather structure for delivery to the USB pipe. |
164 | * | 160 | * |
@@ -253,15 +249,11 @@ int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
253 | /* Now do each remaining fragment */ | 249 | /* Now do each remaining fragment */ |
254 | result = -EINVAL; | 250 | result = -EINVAL; |
255 | while (pl_size_left > 0) { | 251 | while (pl_size_left > 0) { |
256 | d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n", | ||
257 | pl_size_left, buf_itr - wtx->buf); | ||
258 | if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf | 252 | if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf |
259 | > wtx->buf_size) { | 253 | > wtx->buf_size) { |
260 | printk(KERN_ERR "BUG: no space for header\n"); | 254 | printk(KERN_ERR "BUG: no space for header\n"); |
261 | goto error_bug; | 255 | goto error_bug; |
262 | } | 256 | } |
263 | d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n", | ||
264 | pl_size_left, buf_itr - wtx->buf); | ||
265 | untd_hdr_rst = buf_itr; | 257 | untd_hdr_rst = buf_itr; |
266 | buf_itr += sizeof(*untd_hdr_rst); | 258 | buf_itr += sizeof(*untd_hdr_rst); |
267 | if (pl_size_left > i1480u_MAX_PL_SIZE) { | 259 | if (pl_size_left > i1480u_MAX_PL_SIZE) { |
@@ -271,9 +263,6 @@ int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
271 | frg_pl_size = pl_size_left; | 263 | frg_pl_size = pl_size_left; |
272 | untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); | 264 | untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); |
273 | } | 265 | } |
274 | d_printf(5, NULL, | ||
275 | "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", | ||
276 | pl_size_left, buf_itr - wtx->buf, frg_pl_size); | ||
277 | untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); | 266 | untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); |
278 | untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); | 267 | untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); |
279 | untd_hdr_rst->padding = 0; | 268 | untd_hdr_rst->padding = 0; |
@@ -286,9 +275,6 @@ int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
286 | buf_itr += frg_pl_size; | 275 | buf_itr += frg_pl_size; |
287 | pl_itr += frg_pl_size; | 276 | pl_itr += frg_pl_size; |
288 | pl_size_left -= frg_pl_size; | 277 | pl_size_left -= frg_pl_size; |
289 | d_printf(5, NULL, | ||
290 | "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", | ||
291 | pl_size_left, buf_itr - wtx->buf, frg_pl_size); | ||
292 | } | 278 | } |
293 | dev_kfree_skb_irq(skb); | 279 | dev_kfree_skb_irq(skb); |
294 | return 0; | 280 | return 0; |
@@ -308,7 +294,7 @@ error_buf_alloc: | |||
308 | } | 294 | } |
309 | 295 | ||
310 | 296 | ||
311 | /** | 297 | /* |
312 | * Given a buffer that fits in a single fragment, fill out a @wtx | 298 | * Given a buffer that fits in a single fragment, fill out a @wtx |
313 | * struct for transmitting it down the USB pipe. | 299 | * struct for transmitting it down the USB pipe. |
314 | * | 300 | * |
@@ -346,7 +332,7 @@ int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb, | |||
346 | } | 332 | } |
347 | 333 | ||
348 | 334 | ||
349 | /** | 335 | /* |
350 | * Given a skb to transmit, massage it to become palatable for the TX pipe | 336 | * Given a skb to transmit, massage it to become palatable for the TX pipe |
351 | * | 337 | * |
352 | * This will break the buffer in chunks smaller than | 338 | * This will break the buffer in chunks smaller than |
@@ -425,7 +411,7 @@ error_wtx_alloc: | |||
425 | return NULL; | 411 | return NULL; |
426 | } | 412 | } |
427 | 413 | ||
428 | /** | 414 | /* |
429 | * Actual fragmentation and transmission of frame | 415 | * Actual fragmentation and transmission of frame |
430 | * | 416 | * |
431 | * @wlp: WLP substack data structure | 417 | * @wlp: WLP substack data structure |
@@ -447,20 +433,12 @@ int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, | |||
447 | struct i1480u_tx *wtx; | 433 | struct i1480u_tx *wtx; |
448 | struct wlp_tx_hdr *wlp_tx_hdr; | 434 | struct wlp_tx_hdr *wlp_tx_hdr; |
449 | static unsigned char dev_bcast[2] = { 0xff, 0xff }; | 435 | static unsigned char dev_bcast[2] = { 0xff, 0xff }; |
450 | #if 0 | ||
451 | int lockup = 50; | ||
452 | #endif | ||
453 | 436 | ||
454 | d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, | ||
455 | net_dev); | ||
456 | BUG_ON(i1480u->wlp.rc == NULL); | 437 | BUG_ON(i1480u->wlp.rc == NULL); |
457 | if ((net_dev->flags & IFF_UP) == 0) | 438 | if ((net_dev->flags & IFF_UP) == 0) |
458 | goto out; | 439 | goto out; |
459 | result = -EBUSY; | 440 | result = -EBUSY; |
460 | if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { | 441 | if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { |
461 | if (d_test(2) && printk_ratelimit()) | ||
462 | d_printf(2, dev, "Max frames in flight " | ||
463 | "stopping queue.\n"); | ||
464 | netif_stop_queue(net_dev); | 442 | netif_stop_queue(net_dev); |
465 | goto error_max_inflight; | 443 | goto error_max_inflight; |
466 | } | 444 | } |
@@ -489,21 +467,6 @@ int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, | |||
489 | wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); | 467 | wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); |
490 | } | 468 | } |
491 | 469 | ||
492 | #if 0 | ||
493 | dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len); | ||
494 | dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
495 | #endif | ||
496 | #if 0 | ||
497 | /* simulates a device lockup after every lockup# packets */ | ||
498 | if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) { | ||
499 | /* Simulate a dropped transmit interrupt */ | ||
500 | net_dev->trans_start = jiffies; | ||
501 | netif_stop_queue(net_dev); | ||
502 | dev_err(dev, "Simulate lockup at %ld\n", jiffies); | ||
503 | return result; | ||
504 | } | ||
505 | #endif | ||
506 | |||
507 | result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ | 470 | result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ |
508 | if (result < 0) { | 471 | if (result < 0) { |
509 | dev_err(dev, "TX: cannot submit URB: %d\n", result); | 472 | dev_err(dev, "TX: cannot submit URB: %d\n", result); |
@@ -513,8 +476,6 @@ int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb, | |||
513 | } | 476 | } |
514 | atomic_inc(&i1480u->tx_inflight.count); | 477 | atomic_inc(&i1480u->tx_inflight.count); |
515 | net_dev->trans_start = jiffies; | 478 | net_dev->trans_start = jiffies; |
516 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
517 | net_dev, result); | ||
518 | return result; | 479 | return result; |
519 | 480 | ||
520 | error_tx_urb_submit: | 481 | error_tx_urb_submit: |
@@ -522,13 +483,11 @@ error_tx_urb_submit: | |||
522 | error_wtx_alloc: | 483 | error_wtx_alloc: |
523 | error_max_inflight: | 484 | error_max_inflight: |
524 | out: | 485 | out: |
525 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
526 | net_dev, result); | ||
527 | return result; | 486 | return result; |
528 | } | 487 | } |
529 | 488 | ||
530 | 489 | ||
531 | /** | 490 | /* |
532 | * Transmit an skb Called when an skbuf has to be transmitted | 491 | * Transmit an skb Called when an skbuf has to be transmitted |
533 | * | 492 | * |
534 | * The skb is first passed to WLP substack to ensure this is a valid | 493 | * The skb is first passed to WLP substack to ensure this is a valid |
@@ -551,9 +510,6 @@ int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
551 | struct device *dev = &i1480u->usb_iface->dev; | 510 | struct device *dev = &i1480u->usb_iface->dev; |
552 | struct uwb_dev_addr dst; | 511 | struct uwb_dev_addr dst; |
553 | 512 | ||
554 | d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, | ||
555 | net_dev); | ||
556 | BUG_ON(i1480u->wlp.rc == NULL); | ||
557 | if ((net_dev->flags & IFF_UP) == 0) | 513 | if ((net_dev->flags & IFF_UP) == 0) |
558 | goto error; | 514 | goto error; |
559 | result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); | 515 | result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); |
@@ -562,31 +518,25 @@ int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |||
562 | "Dropping packet.\n", result); | 518 | "Dropping packet.\n", result); |
563 | goto error; | 519 | goto error; |
564 | } else if (result == 1) { | 520 | } else if (result == 1) { |
565 | d_printf(6, dev, "WLP will transmit frame. \n"); | ||
566 | /* trans_start time will be set when WLP actually transmits | 521 | /* trans_start time will be set when WLP actually transmits |
567 | * the frame */ | 522 | * the frame */ |
568 | goto out; | 523 | goto out; |
569 | } | 524 | } |
570 | d_printf(6, dev, "Transmitting frame. \n"); | ||
571 | result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); | 525 | result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); |
572 | if (result < 0) { | 526 | if (result < 0) { |
573 | dev_err(dev, "Frame TX failed (%d).\n", result); | 527 | dev_err(dev, "Frame TX failed (%d).\n", result); |
574 | goto error; | 528 | goto error; |
575 | } | 529 | } |
576 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
577 | net_dev, result); | ||
578 | return NETDEV_TX_OK; | 530 | return NETDEV_TX_OK; |
579 | error: | 531 | error: |
580 | dev_kfree_skb_any(skb); | 532 | dev_kfree_skb_any(skb); |
581 | i1480u->stats.tx_dropped++; | 533 | i1480u->stats.tx_dropped++; |
582 | out: | 534 | out: |
583 | d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, | ||
584 | net_dev, result); | ||
585 | return NETDEV_TX_OK; | 535 | return NETDEV_TX_OK; |
586 | } | 536 | } |
587 | 537 | ||
588 | 538 | ||
589 | /** | 539 | /* |
590 | * Called when a pkt transmission doesn't complete in a reasonable period | 540 | * Called when a pkt transmission doesn't complete in a reasonable period |
591 | * Device reset may sleep - do it outside of interrupt context (delayed) | 541 | * Device reset may sleep - do it outside of interrupt context (delayed) |
592 | */ | 542 | */ |
diff --git a/drivers/uwb/ie-rcv.c b/drivers/uwb/ie-rcv.c new file mode 100644 index 000000000000..917e6d78a798 --- /dev/null +++ b/drivers/uwb/ie-rcv.c | |||
@@ -0,0 +1,55 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * IE Received notification handling. | ||
4 | * | ||
5 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #include <linux/errno.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/device.h> | ||
23 | #include <linux/bitmap.h> | ||
24 | #include "uwb-internal.h" | ||
25 | |||
26 | /* | ||
27 | * Process an incoming IE Received notification. | ||
28 | */ | ||
29 | int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *evt) | ||
30 | { | ||
31 | int result = -EINVAL; | ||
32 | struct device *dev = &evt->rc->uwb_dev.dev; | ||
33 | struct uwb_rc_evt_ie_rcv *iercv; | ||
34 | size_t iesize; | ||
35 | |||
36 | /* Is there enough data to decode it? */ | ||
37 | if (evt->notif.size < sizeof(*iercv)) { | ||
38 | dev_err(dev, "IE Received notification: Not enough data to " | ||
39 | "decode (%zu vs %zu bytes needed)\n", | ||
40 | evt->notif.size, sizeof(*iercv)); | ||
41 | goto error; | ||
42 | } | ||
43 | iercv = container_of(evt->notif.rceb, struct uwb_rc_evt_ie_rcv, rceb); | ||
44 | iesize = le16_to_cpu(iercv->wIELength); | ||
45 | |||
46 | dev_dbg(dev, "IE received, element ID=%d\n", iercv->IEData[0]); | ||
47 | |||
48 | if (iercv->IEData[0] == UWB_RELINQUISH_REQUEST_IE) { | ||
49 | dev_warn(dev, "unhandled Relinquish Request IE\n"); | ||
50 | } | ||
51 | |||
52 | return 0; | ||
53 | error: | ||
54 | return result; | ||
55 | } | ||
diff --git a/drivers/uwb/ie.c b/drivers/uwb/ie.c index cf6f3d152b9d..ab976686175b 100644 --- a/drivers/uwb/ie.c +++ b/drivers/uwb/ie.c | |||
@@ -25,8 +25,6 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include "uwb-internal.h" | 27 | #include "uwb-internal.h" |
28 | #define D_LOCAL 0 | ||
29 | #include <linux/uwb/debug.h> | ||
30 | 28 | ||
31 | /** | 29 | /** |
32 | * uwb_ie_next - get the next IE in a buffer | 30 | * uwb_ie_next - get the next IE in a buffer |
@@ -61,6 +59,42 @@ struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len) | |||
61 | EXPORT_SYMBOL_GPL(uwb_ie_next); | 59 | EXPORT_SYMBOL_GPL(uwb_ie_next); |
62 | 60 | ||
63 | /** | 61 | /** |
62 | * uwb_ie_dump_hex - print IEs to a character buffer | ||
63 | * @ies: the IEs to print. | ||
64 | * @len: length of all the IEs. | ||
65 | * @buf: the destination buffer. | ||
66 | * @size: size of @buf. | ||
67 | * | ||
68 | * Returns the number of characters written. | ||
69 | */ | ||
70 | int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len, | ||
71 | char *buf, size_t size) | ||
72 | { | ||
73 | void *ptr; | ||
74 | const struct uwb_ie_hdr *ie; | ||
75 | int r = 0; | ||
76 | u8 *d; | ||
77 | |||
78 | ptr = (void *)ies; | ||
79 | for (;;) { | ||
80 | ie = uwb_ie_next(&ptr, &len); | ||
81 | if (!ie) | ||
82 | break; | ||
83 | |||
84 | r += scnprintf(buf + r, size - r, "%02x %02x", | ||
85 | (unsigned)ie->element_id, | ||
86 | (unsigned)ie->length); | ||
87 | d = (uint8_t *)ie + sizeof(struct uwb_ie_hdr); | ||
88 | while (d != ptr && r < size) | ||
89 | r += scnprintf(buf + r, size - r, " %02x", (unsigned)*d++); | ||
90 | if (r < size) | ||
91 | buf[r++] = '\n'; | ||
92 | }; | ||
93 | |||
94 | return r; | ||
95 | } | ||
96 | |||
97 | /** | ||
64 | * Get the IEs that a radio controller is sending in its beacon | 98 | * Get the IEs that a radio controller is sending in its beacon |
65 | * | 99 | * |
66 | * @uwb_rc: UWB Radio Controller | 100 | * @uwb_rc: UWB Radio Controller |
@@ -70,6 +104,7 @@ EXPORT_SYMBOL_GPL(uwb_ie_next); | |||
70 | * anything. Once done with the iedata buffer, call | 104 | * anything. Once done with the iedata buffer, call |
71 | * uwb_rc_ie_release(iedata). Don't call kfree on it. | 105 | * uwb_rc_ie_release(iedata). Don't call kfree on it. |
72 | */ | 106 | */ |
107 | static | ||
73 | ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) | 108 | ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) |
74 | { | 109 | { |
75 | ssize_t result; | 110 | ssize_t result; |
@@ -78,148 +113,35 @@ ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) | |||
78 | struct uwb_rceb *reply = NULL; | 113 | struct uwb_rceb *reply = NULL; |
79 | struct uwb_rc_evt_get_ie *get_ie; | 114 | struct uwb_rc_evt_get_ie *get_ie; |
80 | 115 | ||
81 | d_fnstart(3, dev, "(%p, %p)\n", uwb_rc, pget_ie); | ||
82 | result = -ENOMEM; | ||
83 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); | 116 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); |
84 | if (cmd == NULL) | 117 | if (cmd == NULL) |
85 | goto error_kzalloc; | 118 | return -ENOMEM; |
119 | |||
86 | cmd->bCommandType = UWB_RC_CET_GENERAL; | 120 | cmd->bCommandType = UWB_RC_CET_GENERAL; |
87 | cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE); | 121 | cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE); |
88 | result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd), | 122 | result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd), |
89 | UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE, | 123 | UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE, |
90 | &reply); | 124 | &reply); |
125 | kfree(cmd); | ||
91 | if (result < 0) | 126 | if (result < 0) |
92 | goto error_cmd; | 127 | return result; |
128 | |||
93 | get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); | 129 | get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); |
94 | if (result < sizeof(*get_ie)) { | 130 | if (result < sizeof(*get_ie)) { |
95 | dev_err(dev, "not enough data returned for decoding GET IE " | 131 | dev_err(dev, "not enough data returned for decoding GET IE " |
96 | "(%zu bytes received vs %zu needed)\n", | 132 | "(%zu bytes received vs %zu needed)\n", |
97 | result, sizeof(*get_ie)); | 133 | result, sizeof(*get_ie)); |
98 | result = -EINVAL; | 134 | return -EINVAL; |
99 | } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) { | 135 | } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) { |
100 | dev_err(dev, "not enough data returned for decoding GET IE " | 136 | dev_err(dev, "not enough data returned for decoding GET IE " |
101 | "payload (%zu bytes received vs %zu needed)\n", result, | 137 | "payload (%zu bytes received vs %zu needed)\n", result, |
102 | sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)); | 138 | sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)); |
103 | result = -EINVAL; | ||
104 | } else | ||
105 | *pget_ie = get_ie; | ||
106 | error_cmd: | ||
107 | kfree(cmd); | ||
108 | error_kzalloc: | ||
109 | d_fnend(3, dev, "(%p, %p) = %d\n", uwb_rc, pget_ie, (int)result); | ||
110 | return result; | ||
111 | } | ||
112 | EXPORT_SYMBOL_GPL(uwb_rc_get_ie); | ||
113 | |||
114 | |||
115 | /* | ||
116 | * Given a pointer to an IE, print it in ASCII/hex followed by a new line | ||
117 | * | ||
118 | * @ie_hdr: pointer to the IE header. Length is in there, and it is | ||
119 | * guaranteed that the ie_hdr->length bytes following it are | ||
120 | * safely accesible. | ||
121 | * | ||
122 | * @_data: context data passed from uwb_ie_for_each(), an struct output_ctx | ||
123 | */ | ||
124 | int uwb_ie_dump_hex(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, | ||
125 | size_t offset, void *_ctx) | ||
126 | { | ||
127 | struct uwb_buf_ctx *ctx = _ctx; | ||
128 | const u8 *pl = (void *)(ie_hdr + 1); | ||
129 | u8 pl_itr; | ||
130 | |||
131 | ctx->bytes += scnprintf(ctx->buf + ctx->bytes, ctx->size - ctx->bytes, | ||
132 | "%02x %02x ", (unsigned) ie_hdr->element_id, | ||
133 | (unsigned) ie_hdr->length); | ||
134 | pl_itr = 0; | ||
135 | while (pl_itr < ie_hdr->length && ctx->bytes < ctx->size) | ||
136 | ctx->bytes += scnprintf(ctx->buf + ctx->bytes, | ||
137 | ctx->size - ctx->bytes, | ||
138 | "%02x ", (unsigned) pl[pl_itr++]); | ||
139 | if (ctx->bytes < ctx->size) | ||
140 | ctx->buf[ctx->bytes++] = '\n'; | ||
141 | return 0; | ||
142 | } | ||
143 | EXPORT_SYMBOL_GPL(uwb_ie_dump_hex); | ||
144 | |||
145 | |||
146 | /** | ||
147 | * Verify that a pointer in a buffer points to valid IE | ||
148 | * | ||
149 | * @start: pointer to start of buffer in which IE appears | ||
150 | * @itr: pointer to IE inside buffer that will be verified | ||
151 | * @top: pointer to end of buffer | ||
152 | * | ||
153 | * @returns: 0 if IE is valid, <0 otherwise | ||
154 | * | ||
155 | * Verification involves checking that the buffer can contain a | ||
156 | * header and the amount of data reported in the IE header can be found in | ||
157 | * the buffer. | ||
158 | */ | ||
159 | static | ||
160 | int uwb_rc_ie_verify(struct uwb_dev *uwb_dev, const void *start, | ||
161 | const void *itr, const void *top) | ||
162 | { | ||
163 | struct device *dev = &uwb_dev->dev; | ||
164 | const struct uwb_ie_hdr *ie_hdr; | ||
165 | |||
166 | if (top - itr < sizeof(*ie_hdr)) { | ||
167 | dev_err(dev, "Bad IE: no data to decode header " | ||
168 | "(%zu bytes left vs %zu needed) at offset %zu\n", | ||
169 | top - itr, sizeof(*ie_hdr), itr - start); | ||
170 | return -EINVAL; | ||
171 | } | ||
172 | ie_hdr = itr; | ||
173 | itr += sizeof(*ie_hdr); | ||
174 | if (top - itr < ie_hdr->length) { | ||
175 | dev_err(dev, "Bad IE: not enough data for payload " | ||
176 | "(%zu bytes left vs %zu needed) at offset %zu\n", | ||
177 | top - itr, (size_t)ie_hdr->length, | ||
178 | (void *)ie_hdr - start); | ||
179 | return -EINVAL; | 139 | return -EINVAL; |
180 | } | 140 | } |
181 | return 0; | ||
182 | } | ||
183 | 141 | ||
184 | 142 | *pget_ie = get_ie; | |
185 | /** | ||
186 | * Walk a buffer filled with consecutive IE's a buffer | ||
187 | * | ||
188 | * @uwb_dev: UWB device this IEs belong to (for err messages mainly) | ||
189 | * | ||
190 | * @fn: function to call with each IE; if it returns 0, we keep | ||
191 | * traversing the buffer. If it returns !0, we'll stop and return | ||
192 | * that value. | ||
193 | * | ||
194 | * @data: pointer passed to @fn | ||
195 | * | ||
196 | * @buf: buffer where the consecutive IEs are located | ||
197 | * | ||
198 | * @size: size of @buf | ||
199 | * | ||
200 | * Each IE is checked for basic correctness (there is space left for | ||
201 | * the header and the payload). If that test is failed, we stop | ||
202 | * processing. For every good IE, @fn is called. | ||
203 | */ | ||
204 | ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, | ||
205 | const void *buf, size_t size) | ||
206 | { | ||
207 | ssize_t result = 0; | ||
208 | const struct uwb_ie_hdr *ie_hdr; | ||
209 | const void *itr = buf, *top = itr + size; | ||
210 | |||
211 | while (itr < top) { | ||
212 | if (uwb_rc_ie_verify(uwb_dev, buf, itr, top) != 0) | ||
213 | break; | ||
214 | ie_hdr = itr; | ||
215 | itr += sizeof(*ie_hdr) + ie_hdr->length; | ||
216 | result = fn(uwb_dev, ie_hdr, itr - buf, data); | ||
217 | if (result != 0) | ||
218 | break; | ||
219 | } | ||
220 | return result; | 143 | return result; |
221 | } | 144 | } |
222 | EXPORT_SYMBOL_GPL(uwb_ie_for_each); | ||
223 | 145 | ||
224 | 146 | ||
225 | /** | 147 | /** |
@@ -256,70 +178,6 @@ error_cmd: | |||
256 | return result; | 178 | return result; |
257 | } | 179 | } |
258 | 180 | ||
259 | /** | ||
260 | * Determine by IE id if IE is host settable | ||
261 | * WUSB 1.0 [8.6.2.8 Table 8.85] | ||
262 | * | ||
263 | * EXCEPTION: | ||
264 | * All but UWB_IE_WLP appears in Table 8.85 from WUSB 1.0. Setting this IE | ||
265 | * is required for the WLP substack to perform association with its WSS so | ||
266 | * we hope that the WUSB spec will be changed to reflect this. | ||
267 | */ | ||
268 | static | ||
269 | int uwb_rc_ie_is_host_settable(enum uwb_ie element_id) | ||
270 | { | ||
271 | if (element_id == UWB_PCA_AVAILABILITY || | ||
272 | element_id == UWB_BP_SWITCH_IE || | ||
273 | element_id == UWB_MAC_CAPABILITIES_IE || | ||
274 | element_id == UWB_PHY_CAPABILITIES_IE || | ||
275 | element_id == UWB_APP_SPEC_PROBE_IE || | ||
276 | element_id == UWB_IDENTIFICATION_IE || | ||
277 | element_id == UWB_MASTER_KEY_ID_IE || | ||
278 | element_id == UWB_IE_WLP || | ||
279 | element_id == UWB_APP_SPEC_IE) | ||
280 | return 1; | ||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | |||
285 | /** | ||
286 | * Extract Host Settable IEs from IE | ||
287 | * | ||
288 | * @ie_data: pointer to buffer containing all IEs | ||
289 | * @size: size of buffer | ||
290 | * | ||
291 | * @returns: length of buffer that only includes host settable IEs | ||
292 | * | ||
293 | * Given a buffer of IEs we move all Host Settable IEs to front of buffer | ||
294 | * by overwriting the IEs that are not Host Settable. | ||
295 | * Buffer length is adjusted accordingly. | ||
296 | */ | ||
297 | static | ||
298 | ssize_t uwb_rc_parse_host_settable_ie(struct uwb_dev *uwb_dev, | ||
299 | void *ie_data, size_t size) | ||
300 | { | ||
301 | size_t new_len = size; | ||
302 | struct uwb_ie_hdr *ie_hdr; | ||
303 | size_t ie_length; | ||
304 | void *itr = ie_data, *top = itr + size; | ||
305 | |||
306 | while (itr < top) { | ||
307 | if (uwb_rc_ie_verify(uwb_dev, ie_data, itr, top) != 0) | ||
308 | break; | ||
309 | ie_hdr = itr; | ||
310 | ie_length = sizeof(*ie_hdr) + ie_hdr->length; | ||
311 | if (uwb_rc_ie_is_host_settable(ie_hdr->element_id)) { | ||
312 | itr += ie_length; | ||
313 | } else { | ||
314 | memmove(itr, itr + ie_length, top - (itr + ie_length)); | ||
315 | new_len -= ie_length; | ||
316 | top -= ie_length; | ||
317 | } | ||
318 | } | ||
319 | return new_len; | ||
320 | } | ||
321 | |||
322 | |||
323 | /* Cleanup the whole IE management subsystem */ | 181 | /* Cleanup the whole IE management subsystem */ |
324 | void uwb_rc_ie_init(struct uwb_rc *uwb_rc) | 182 | void uwb_rc_ie_init(struct uwb_rc *uwb_rc) |
325 | { | 183 | { |
@@ -328,49 +186,34 @@ void uwb_rc_ie_init(struct uwb_rc *uwb_rc) | |||
328 | 186 | ||
329 | 187 | ||
330 | /** | 188 | /** |
331 | * Set up cache for host settable IEs currently being transmitted | 189 | * uwb_rc_ie_setup - setup a radio controller's IE manager |
190 | * @uwb_rc: the radio controller. | ||
332 | * | 191 | * |
333 | * First we just call GET-IE to get the current IEs being transmitted | 192 | * The current set of IEs are obtained from the hardware with a GET-IE |
334 | * (or we workaround and pretend we did) and (because the format is | 193 | * command (since the radio controller is not yet beaconing this will |
335 | * the same) reuse that as the IE cache (with the command prefix, as | 194 | * be just the hardware's MAC and PHY Capability IEs). |
336 | * explained in 'struct uwb_rc'). | ||
337 | * | 195 | * |
338 | * @returns: size of cache created | 196 | * Returns 0 on success; -ve on an error. |
339 | */ | 197 | */ |
340 | ssize_t uwb_rc_ie_setup(struct uwb_rc *uwb_rc) | 198 | int uwb_rc_ie_setup(struct uwb_rc *uwb_rc) |
341 | { | 199 | { |
342 | struct device *dev = &uwb_rc->uwb_dev.dev; | 200 | struct uwb_rc_evt_get_ie *ie_info = NULL; |
343 | ssize_t result; | 201 | int capacity; |
344 | size_t capacity; | 202 | |
345 | struct uwb_rc_evt_get_ie *ie_info; | 203 | capacity = uwb_rc_get_ie(uwb_rc, &ie_info); |
204 | if (capacity < 0) | ||
205 | return capacity; | ||
346 | 206 | ||
347 | d_fnstart(3, dev, "(%p)\n", uwb_rc); | ||
348 | mutex_lock(&uwb_rc->ies_mutex); | 207 | mutex_lock(&uwb_rc->ies_mutex); |
349 | result = uwb_rc_get_ie(uwb_rc, &ie_info); | 208 | |
350 | if (result < 0) | 209 | uwb_rc->ies = (struct uwb_rc_cmd_set_ie *)ie_info; |
351 | goto error_get_ie; | ||
352 | capacity = result; | ||
353 | d_printf(5, dev, "Got IEs %zu bytes (%zu long at %p)\n", result, | ||
354 | (size_t)le16_to_cpu(ie_info->wIELength), ie_info); | ||
355 | |||
356 | /* Remove IEs that host should not set. */ | ||
357 | result = uwb_rc_parse_host_settable_ie(&uwb_rc->uwb_dev, | ||
358 | ie_info->IEData, le16_to_cpu(ie_info->wIELength)); | ||
359 | if (result < 0) | ||
360 | goto error_parse; | ||
361 | d_printf(5, dev, "purged non-settable IEs to %zu bytes\n", result); | ||
362 | uwb_rc->ies = (void *) ie_info; | ||
363 | uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL; | 210 | uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL; |
364 | uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE); | 211 | uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE); |
365 | uwb_rc->ies_capacity = capacity; | 212 | uwb_rc->ies_capacity = capacity; |
366 | d_printf(5, dev, "IE cache at %p %zu bytes, %zu capacity\n", | 213 | |
367 | ie_info, result, capacity); | ||
368 | result = 0; | ||
369 | error_parse: | ||
370 | error_get_ie: | ||
371 | mutex_unlock(&uwb_rc->ies_mutex); | 214 | mutex_unlock(&uwb_rc->ies_mutex); |
372 | d_fnend(3, dev, "(%p) = %zu\n", uwb_rc, result); | 215 | |
373 | return result; | 216 | return 0; |
374 | } | 217 | } |
375 | 218 | ||
376 | 219 | ||
@@ -383,26 +226,47 @@ void uwb_rc_ie_release(struct uwb_rc *uwb_rc) | |||
383 | } | 226 | } |
384 | 227 | ||
385 | 228 | ||
386 | static | 229 | static int uwb_rc_ie_add_one(struct uwb_rc *rc, const struct uwb_ie_hdr *new_ie) |
387 | int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, | ||
388 | size_t offset, void *_ctx) | ||
389 | { | 230 | { |
390 | size_t *acc_size = _ctx; | 231 | struct uwb_rc_cmd_set_ie *new_ies; |
391 | *acc_size += sizeof(*ie_hdr) + ie_hdr->length; | 232 | void *ptr, *prev_ie; |
392 | d_printf(6, &uwb_dev->dev, "new acc size %zu\n", *acc_size); | 233 | struct uwb_ie_hdr *ie; |
234 | size_t length, new_ie_len, new_capacity, size, prev_size; | ||
235 | |||
236 | length = le16_to_cpu(rc->ies->wIELength); | ||
237 | new_ie_len = sizeof(struct uwb_ie_hdr) + new_ie->length; | ||
238 | new_capacity = sizeof(struct uwb_rc_cmd_set_ie) + length + new_ie_len; | ||
239 | |||
240 | if (new_capacity > rc->ies_capacity) { | ||
241 | new_ies = krealloc(rc->ies, new_capacity, GFP_KERNEL); | ||
242 | if (!new_ies) | ||
243 | return -ENOMEM; | ||
244 | rc->ies = new_ies; | ||
245 | } | ||
246 | |||
247 | ptr = rc->ies->IEData; | ||
248 | size = length; | ||
249 | for (;;) { | ||
250 | prev_ie = ptr; | ||
251 | prev_size = size; | ||
252 | ie = uwb_ie_next(&ptr, &size); | ||
253 | if (!ie || ie->element_id > new_ie->element_id) | ||
254 | break; | ||
255 | } | ||
256 | |||
257 | memmove(prev_ie + new_ie_len, prev_ie, prev_size); | ||
258 | memcpy(prev_ie, new_ie, new_ie_len); | ||
259 | rc->ies->wIELength = cpu_to_le16(length + new_ie_len); | ||
260 | |||
393 | return 0; | 261 | return 0; |
394 | } | 262 | } |
395 | 263 | ||
396 | |||
397 | /** | 264 | /** |
398 | * Add a new IE to IEs currently being transmitted by device | 265 | * uwb_rc_ie_add - add new IEs to the radio controller's beacon |
399 | * | 266 | * @uwb_rc: the radio controller. |
400 | * @ies: the buffer containing the new IE or IEs to be added to | 267 | * @ies: the buffer containing the new IE or IEs to be added to |
401 | * the device's beacon. The buffer will be verified for | 268 | * the device's beacon. |
402 | * consistence (meaning the headers should be right) and | 269 | * @size: length of all the IEs. |
403 | * consistent with the buffer size. | ||
404 | * @size: size of @ies (in bytes, total buffer size) | ||
405 | * @returns: 0 if ok, <0 errno code on error | ||
406 | * | 270 | * |
407 | * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB | 271 | * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB |
408 | * after the device sent the first beacon that includes the IEs specified | 272 | * after the device sent the first beacon that includes the IEs specified |
@@ -411,66 +275,40 @@ int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, | |||
411 | * we start beaconing. | 275 | * we start beaconing. |
412 | * | 276 | * |
413 | * Setting an IE on the device will overwrite all current IEs in device. So | 277 | * Setting an IE on the device will overwrite all current IEs in device. So |
414 | * we take the current IEs being transmitted by the device, append the | 278 | * we take the current IEs being transmitted by the device, insert the |
415 | * new one, and call SET IE with all the IEs needed. | 279 | * new one, and call SET IE with all the IEs needed. |
416 | * | 280 | * |
417 | * The local IE cache will only be updated with the new IE if SET IE | 281 | * Returns 0 on success; or -ENOMEM. |
418 | * completed successfully. | ||
419 | */ | 282 | */ |
420 | int uwb_rc_ie_add(struct uwb_rc *uwb_rc, | 283 | int uwb_rc_ie_add(struct uwb_rc *uwb_rc, |
421 | const struct uwb_ie_hdr *ies, size_t size) | 284 | const struct uwb_ie_hdr *ies, size_t size) |
422 | { | 285 | { |
423 | int result = 0; | 286 | int result = 0; |
424 | struct device *dev = &uwb_rc->uwb_dev.dev; | 287 | void *ptr; |
425 | struct uwb_rc_cmd_set_ie *new_ies; | 288 | const struct uwb_ie_hdr *ie; |
426 | size_t ies_size, total_size, acc_size = 0; | 289 | |
427 | |||
428 | if (uwb_rc->ies == NULL) | ||
429 | return -ESHUTDOWN; | ||
430 | uwb_ie_for_each(&uwb_rc->uwb_dev, __acc_size, &acc_size, ies, size); | ||
431 | if (acc_size != size) { | ||
432 | dev_err(dev, "BUG: bad IEs, misconstructed headers " | ||
433 | "[%zu bytes reported vs %zu calculated]\n", | ||
434 | size, acc_size); | ||
435 | WARN_ON(1); | ||
436 | return -EINVAL; | ||
437 | } | ||
438 | mutex_lock(&uwb_rc->ies_mutex); | 290 | mutex_lock(&uwb_rc->ies_mutex); |
439 | ies_size = le16_to_cpu(uwb_rc->ies->wIELength); | 291 | |
440 | total_size = sizeof(*uwb_rc->ies) + ies_size; | 292 | ptr = (void *)ies; |
441 | if (total_size + size > uwb_rc->ies_capacity) { | 293 | for (;;) { |
442 | d_printf(4, dev, "Reallocating IE cache from %p capacity %zu " | 294 | ie = uwb_ie_next(&ptr, &size); |
443 | "to capacity %zu\n", uwb_rc->ies, uwb_rc->ies_capacity, | 295 | if (!ie) |
444 | total_size + size); | 296 | break; |
445 | new_ies = kzalloc(total_size + size, GFP_KERNEL); | 297 | |
446 | if (new_ies == NULL) { | 298 | result = uwb_rc_ie_add_one(uwb_rc, ie); |
447 | dev_err(dev, "No memory for adding new IE\n"); | 299 | if (result < 0) |
448 | result = -ENOMEM; | 300 | break; |
449 | goto error_alloc; | ||
450 | } | ||
451 | memcpy(new_ies, uwb_rc->ies, total_size); | ||
452 | uwb_rc->ies_capacity = total_size + size; | ||
453 | kfree(uwb_rc->ies); | ||
454 | uwb_rc->ies = new_ies; | ||
455 | d_printf(4, dev, "New IE cache at %p capacity %zu\n", | ||
456 | uwb_rc->ies, uwb_rc->ies_capacity); | ||
457 | } | 301 | } |
458 | memcpy((void *)uwb_rc->ies + total_size, ies, size); | 302 | if (result >= 0) { |
459 | uwb_rc->ies->wIELength = cpu_to_le16(ies_size + size); | 303 | if (size == 0) { |
460 | if (uwb_rc->beaconing != -1) { | 304 | if (uwb_rc->beaconing != -1) |
461 | result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); | 305 | result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); |
462 | if (result < 0) { | ||
463 | dev_err(dev, "Cannot set new IE on device: %d\n", | ||
464 | result); | ||
465 | uwb_rc->ies->wIELength = cpu_to_le16(ies_size); | ||
466 | } else | 306 | } else |
467 | result = 0; | 307 | result = -EINVAL; |
468 | } | 308 | } |
469 | d_printf(4, dev, "IEs now occupy %hu bytes of %zu capacity at %p\n", | 309 | |
470 | le16_to_cpu(uwb_rc->ies->wIELength), uwb_rc->ies_capacity, | ||
471 | uwb_rc->ies); | ||
472 | error_alloc: | ||
473 | mutex_unlock(&uwb_rc->ies_mutex); | 310 | mutex_unlock(&uwb_rc->ies_mutex); |
311 | |||
474 | return result; | 312 | return result; |
475 | } | 313 | } |
476 | EXPORT_SYMBOL_GPL(uwb_rc_ie_add); | 314 | EXPORT_SYMBOL_GPL(uwb_rc_ie_add); |
@@ -489,53 +327,52 @@ EXPORT_SYMBOL_GPL(uwb_rc_ie_add); | |||
489 | * beacon. We don't reallocate, we just mark the size smaller. | 327 | * beacon. We don't reallocate, we just mark the size smaller. |
490 | */ | 328 | */ |
491 | static | 329 | static |
492 | int uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) | 330 | void uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) |
493 | { | 331 | { |
494 | struct uwb_ie_hdr *ie_hdr; | 332 | struct uwb_ie_hdr *ie; |
495 | size_t new_len = le16_to_cpu(uwb_rc->ies->wIELength); | 333 | size_t len = le16_to_cpu(uwb_rc->ies->wIELength); |
496 | void *itr = uwb_rc->ies->IEData; | 334 | void *ptr; |
497 | void *top = itr + new_len; | 335 | size_t size; |
498 | 336 | ||
499 | while (itr < top) { | 337 | ptr = uwb_rc->ies->IEData; |
500 | ie_hdr = itr; | 338 | size = len; |
501 | if (ie_hdr->element_id != to_remove) { | 339 | for (;;) { |
502 | itr += sizeof(*ie_hdr) + ie_hdr->length; | 340 | ie = uwb_ie_next(&ptr, &size); |
503 | } else { | 341 | if (!ie) |
504 | int ie_length; | 342 | break; |
505 | ie_length = sizeof(*ie_hdr) + ie_hdr->length; | 343 | if (ie->element_id == to_remove) { |
506 | if (top - itr != ie_length) | 344 | len -= sizeof(struct uwb_ie_hdr) + ie->length; |
507 | memmove(itr, itr + ie_length, top - itr + ie_length); | 345 | memmove(ie, ptr, size); |
508 | top -= ie_length; | 346 | ptr = ie; |
509 | new_len -= ie_length; | ||
510 | } | 347 | } |
511 | } | 348 | } |
512 | uwb_rc->ies->wIELength = cpu_to_le16(new_len); | 349 | uwb_rc->ies->wIELength = cpu_to_le16(len); |
513 | return 0; | ||
514 | } | 350 | } |
515 | 351 | ||
516 | 352 | ||
517 | /** | 353 | /** |
518 | * Remove an IE currently being transmitted by device | 354 | * uwb_rc_ie_rm - remove an IE from the radio controller's beacon |
355 | * @uwb_rc: the radio controller. | ||
356 | * @element_id: the element ID of the IE to remove. | ||
519 | * | 357 | * |
520 | * @element_id: id of IE to be removed from device's beacon | 358 | * Only IEs previously added with uwb_rc_ie_add() may be removed. |
359 | * | ||
360 | * Returns 0 on success; or -ve the SET-IE command to the radio | ||
361 | * controller failed. | ||
521 | */ | 362 | */ |
522 | int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) | 363 | int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) |
523 | { | 364 | { |
524 | struct device *dev = &uwb_rc->uwb_dev.dev; | 365 | int result = 0; |
525 | int result; | ||
526 | 366 | ||
527 | if (uwb_rc->ies == NULL) | ||
528 | return -ESHUTDOWN; | ||
529 | mutex_lock(&uwb_rc->ies_mutex); | 367 | mutex_lock(&uwb_rc->ies_mutex); |
530 | result = uwb_rc_ie_cache_rm(uwb_rc, element_id); | 368 | |
531 | if (result < 0) | 369 | uwb_rc_ie_cache_rm(uwb_rc, element_id); |
532 | dev_err(dev, "Cannot remove IE from cache.\n"); | 370 | |
533 | if (uwb_rc->beaconing != -1) { | 371 | if (uwb_rc->beaconing != -1) |
534 | result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); | 372 | result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); |
535 | if (result < 0) | 373 | |
536 | dev_err(dev, "Cannot set new IE on device.\n"); | ||
537 | } | ||
538 | mutex_unlock(&uwb_rc->ies_mutex); | 374 | mutex_unlock(&uwb_rc->ies_mutex); |
375 | |||
539 | return result; | 376 | return result; |
540 | } | 377 | } |
541 | EXPORT_SYMBOL_GPL(uwb_rc_ie_rm); | 378 | EXPORT_SYMBOL_GPL(uwb_rc_ie_rm); |
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c index 15f856c9689a..e9fe1bb7eb23 100644 --- a/drivers/uwb/lc-dev.c +++ b/drivers/uwb/lc-dev.c | |||
@@ -22,7 +22,6 @@ | |||
22 | * | 22 | * |
23 | * FIXME: docs | 23 | * FIXME: docs |
24 | */ | 24 | */ |
25 | |||
26 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
27 | #include <linux/device.h> | 26 | #include <linux/device.h> |
28 | #include <linux/err.h> | 27 | #include <linux/err.h> |
@@ -30,10 +29,6 @@ | |||
30 | #include <linux/random.h> | 29 | #include <linux/random.h> |
31 | #include "uwb-internal.h" | 30 | #include "uwb-internal.h" |
32 | 31 | ||
33 | #define D_LOCAL 1 | ||
34 | #include <linux/uwb/debug.h> | ||
35 | |||
36 | |||
37 | /* We initialize addresses to 0xff (invalid, as it is bcast) */ | 32 | /* We initialize addresses to 0xff (invalid, as it is bcast) */ |
38 | static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) | 33 | static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) |
39 | { | 34 | { |
@@ -104,12 +99,9 @@ static void uwb_dev_sys_release(struct device *dev) | |||
104 | { | 99 | { |
105 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); | 100 | struct uwb_dev *uwb_dev = to_uwb_dev(dev); |
106 | 101 | ||
107 | d_fnstart(4, NULL, "(dev %p uwb_dev %p)\n", dev, uwb_dev); | ||
108 | uwb_bce_put(uwb_dev->bce); | 102 | uwb_bce_put(uwb_dev->bce); |
109 | d_printf(0, &uwb_dev->dev, "uwb_dev %p freed\n", uwb_dev); | ||
110 | memset(uwb_dev, 0x69, sizeof(*uwb_dev)); | 103 | memset(uwb_dev, 0x69, sizeof(*uwb_dev)); |
111 | kfree(uwb_dev); | 104 | kfree(uwb_dev); |
112 | d_fnend(4, NULL, "(dev %p uwb_dev %p) = void\n", dev, uwb_dev); | ||
113 | } | 105 | } |
114 | 106 | ||
115 | /* | 107 | /* |
@@ -275,12 +267,8 @@ static struct attribute_group *groups[] = { | |||
275 | */ | 267 | */ |
276 | static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) | 268 | static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) |
277 | { | 269 | { |
278 | int result; | ||
279 | struct device *dev; | 270 | struct device *dev; |
280 | 271 | ||
281 | d_fnstart(4, NULL, "(uwb_dev %p parent_dev %p)\n", uwb_dev, parent_dev); | ||
282 | BUG_ON(parent_dev == NULL); | ||
283 | |||
284 | dev = &uwb_dev->dev; | 272 | dev = &uwb_dev->dev; |
285 | /* Device sysfs files are only useful for neighbor devices not | 273 | /* Device sysfs files are only useful for neighbor devices not |
286 | local radio controllers. */ | 274 | local radio controllers. */ |
@@ -289,18 +277,14 @@ static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) | |||
289 | dev->parent = parent_dev; | 277 | dev->parent = parent_dev; |
290 | dev_set_drvdata(dev, uwb_dev); | 278 | dev_set_drvdata(dev, uwb_dev); |
291 | 279 | ||
292 | result = device_add(dev); | 280 | return device_add(dev); |
293 | d_fnend(4, NULL, "(uwb_dev %p parent_dev %p) = %d\n", uwb_dev, parent_dev, result); | ||
294 | return result; | ||
295 | } | 281 | } |
296 | 282 | ||
297 | 283 | ||
298 | static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) | 284 | static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) |
299 | { | 285 | { |
300 | d_fnstart(4, NULL, "(uwb_dev %p)\n", uwb_dev); | ||
301 | dev_set_drvdata(&uwb_dev->dev, NULL); | 286 | dev_set_drvdata(&uwb_dev->dev, NULL); |
302 | device_del(&uwb_dev->dev); | 287 | device_del(&uwb_dev->dev); |
303 | d_fnend(4, NULL, "(uwb_dev %p) = void\n", uwb_dev); | ||
304 | } | 288 | } |
305 | 289 | ||
306 | 290 | ||
@@ -384,7 +368,6 @@ int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc) | |||
384 | struct device *dev = &uwb_dev->dev; | 368 | struct device *dev = &uwb_dev->dev; |
385 | char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; | 369 | char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; |
386 | 370 | ||
387 | d_fnstart(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p)\n", dev, uwb_dev, rc); | ||
388 | uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); | 371 | uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); |
389 | uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); | 372 | uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); |
390 | dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", | 373 | dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", |
@@ -392,8 +375,10 @@ int __uwb_dev_offair(struct uwb_dev *uwb_dev, struct uwb_rc *rc) | |||
392 | rc ? rc->uwb_dev.dev.parent->bus->name : "n/a", | 375 | rc ? rc->uwb_dev.dev.parent->bus->name : "n/a", |
393 | rc ? dev_name(rc->uwb_dev.dev.parent) : ""); | 376 | rc ? dev_name(rc->uwb_dev.dev.parent) : ""); |
394 | uwb_dev_rm(uwb_dev); | 377 | uwb_dev_rm(uwb_dev); |
378 | list_del(&uwb_dev->bce->node); | ||
379 | uwb_bce_put(uwb_dev->bce); | ||
395 | uwb_dev_put(uwb_dev); /* for the creation in _onair() */ | 380 | uwb_dev_put(uwb_dev); /* for the creation in _onair() */ |
396 | d_fnend(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p) = 0\n", dev, uwb_dev, rc); | 381 | |
397 | return 0; | 382 | return 0; |
398 | } | 383 | } |
399 | 384 | ||
diff --git a/drivers/uwb/lc-rc.c b/drivers/uwb/lc-rc.c index ee5772f00d42..9cf21e6bb624 100644 --- a/drivers/uwb/lc-rc.c +++ b/drivers/uwb/lc-rc.c | |||
@@ -36,8 +36,6 @@ | |||
36 | #include <linux/etherdevice.h> | 36 | #include <linux/etherdevice.h> |
37 | #include <linux/usb.h> | 37 | #include <linux/usb.h> |
38 | 38 | ||
39 | #define D_LOCAL 1 | ||
40 | #include <linux/uwb/debug.h> | ||
41 | #include "uwb-internal.h" | 39 | #include "uwb-internal.h" |
42 | 40 | ||
43 | static int uwb_rc_index_match(struct device *dev, void *data) | 41 | static int uwb_rc_index_match(struct device *dev, void *data) |
@@ -81,9 +79,7 @@ static void uwb_rc_sys_release(struct device *dev) | |||
81 | struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); | 79 | struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); |
82 | struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); | 80 | struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); |
83 | 81 | ||
84 | uwb_rc_neh_destroy(rc); | ||
85 | uwb_rc_ie_release(rc); | 82 | uwb_rc_ie_release(rc); |
86 | d_printf(1, dev, "freed uwb_rc %p\n", rc); | ||
87 | kfree(rc); | 83 | kfree(rc); |
88 | } | 84 | } |
89 | 85 | ||
@@ -100,6 +96,8 @@ void uwb_rc_init(struct uwb_rc *rc) | |||
100 | rc->scan_type = UWB_SCAN_DISABLED; | 96 | rc->scan_type = UWB_SCAN_DISABLED; |
101 | INIT_LIST_HEAD(&rc->notifs_chain.list); | 97 | INIT_LIST_HEAD(&rc->notifs_chain.list); |
102 | mutex_init(&rc->notifs_chain.mutex); | 98 | mutex_init(&rc->notifs_chain.mutex); |
99 | INIT_LIST_HEAD(&rc->uwb_beca.list); | ||
100 | mutex_init(&rc->uwb_beca.mutex); | ||
103 | uwb_drp_avail_init(rc); | 101 | uwb_drp_avail_init(rc); |
104 | uwb_rc_ie_init(rc); | 102 | uwb_rc_ie_init(rc); |
105 | uwb_rsv_init(rc); | 103 | uwb_rsv_init(rc); |
@@ -191,9 +189,9 @@ static int uwb_rc_setup(struct uwb_rc *rc) | |||
191 | int result; | 189 | int result; |
192 | struct device *dev = &rc->uwb_dev.dev; | 190 | struct device *dev = &rc->uwb_dev.dev; |
193 | 191 | ||
194 | result = uwb_rc_reset(rc); | 192 | result = uwb_radio_setup(rc); |
195 | if (result < 0) { | 193 | if (result < 0) { |
196 | dev_err(dev, "cannot reset UWB radio: %d\n", result); | 194 | dev_err(dev, "cannot setup UWB radio: %d\n", result); |
197 | goto error; | 195 | goto error; |
198 | } | 196 | } |
199 | result = uwb_rc_mac_addr_setup(rc); | 197 | result = uwb_rc_mac_addr_setup(rc); |
@@ -250,6 +248,12 @@ int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv) | |||
250 | 248 | ||
251 | rc->priv = priv; | 249 | rc->priv = priv; |
252 | 250 | ||
251 | init_waitqueue_head(&rc->uwbd.wq); | ||
252 | INIT_LIST_HEAD(&rc->uwbd.event_list); | ||
253 | spin_lock_init(&rc->uwbd.event_list_lock); | ||
254 | |||
255 | uwbd_start(rc); | ||
256 | |||
253 | result = rc->start(rc); | 257 | result = rc->start(rc); |
254 | if (result < 0) | 258 | if (result < 0) |
255 | goto error_rc_start; | 259 | goto error_rc_start; |
@@ -284,7 +288,7 @@ error_sys_add: | |||
284 | error_dev_add: | 288 | error_dev_add: |
285 | error_rc_setup: | 289 | error_rc_setup: |
286 | rc->stop(rc); | 290 | rc->stop(rc); |
287 | uwbd_flush(rc); | 291 | uwbd_stop(rc); |
288 | error_rc_start: | 292 | error_rc_start: |
289 | return result; | 293 | return result; |
290 | } | 294 | } |
@@ -306,25 +310,24 @@ void uwb_rc_rm(struct uwb_rc *rc) | |||
306 | rc->ready = 0; | 310 | rc->ready = 0; |
307 | 311 | ||
308 | uwb_dbg_del_rc(rc); | 312 | uwb_dbg_del_rc(rc); |
309 | uwb_rsv_cleanup(rc); | 313 | uwb_rsv_remove_all(rc); |
310 | uwb_rc_ie_rm(rc, UWB_IDENTIFICATION_IE); | 314 | uwb_radio_shutdown(rc); |
311 | if (rc->beaconing >= 0) | ||
312 | uwb_rc_beacon(rc, -1, 0); | ||
313 | if (rc->scan_type != UWB_SCAN_DISABLED) | ||
314 | uwb_rc_scan(rc, rc->scanning, UWB_SCAN_DISABLED, 0); | ||
315 | uwb_rc_reset(rc); | ||
316 | 315 | ||
317 | rc->stop(rc); | 316 | rc->stop(rc); |
318 | uwbd_flush(rc); | 317 | |
318 | uwbd_stop(rc); | ||
319 | uwb_rc_neh_destroy(rc); | ||
319 | 320 | ||
320 | uwb_dev_lock(&rc->uwb_dev); | 321 | uwb_dev_lock(&rc->uwb_dev); |
321 | rc->priv = NULL; | 322 | rc->priv = NULL; |
322 | rc->cmd = NULL; | 323 | rc->cmd = NULL; |
323 | uwb_dev_unlock(&rc->uwb_dev); | 324 | uwb_dev_unlock(&rc->uwb_dev); |
324 | mutex_lock(&uwb_beca.mutex); | 325 | mutex_lock(&rc->uwb_beca.mutex); |
325 | uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); | 326 | uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); |
326 | __uwb_rc_sys_rm(rc); | 327 | __uwb_rc_sys_rm(rc); |
327 | mutex_unlock(&uwb_beca.mutex); | 328 | mutex_unlock(&rc->uwb_beca.mutex); |
329 | uwb_rsv_cleanup(rc); | ||
330 | uwb_beca_release(rc); | ||
328 | uwb_dev_rm(&rc->uwb_dev); | 331 | uwb_dev_rm(&rc->uwb_dev); |
329 | } | 332 | } |
330 | EXPORT_SYMBOL_GPL(uwb_rc_rm); | 333 | EXPORT_SYMBOL_GPL(uwb_rc_rm); |
@@ -468,28 +471,3 @@ void uwb_rc_put(struct uwb_rc *rc) | |||
468 | __uwb_rc_put(rc); | 471 | __uwb_rc_put(rc); |
469 | } | 472 | } |
470 | EXPORT_SYMBOL_GPL(uwb_rc_put); | 473 | EXPORT_SYMBOL_GPL(uwb_rc_put); |
471 | |||
472 | /* | ||
473 | * | ||
474 | * | ||
475 | */ | ||
476 | ssize_t uwb_rc_print_IEs(struct uwb_rc *uwb_rc, char *buf, size_t size) | ||
477 | { | ||
478 | ssize_t result; | ||
479 | struct uwb_rc_evt_get_ie *ie_info; | ||
480 | struct uwb_buf_ctx ctx; | ||
481 | |||
482 | result = uwb_rc_get_ie(uwb_rc, &ie_info); | ||
483 | if (result < 0) | ||
484 | goto error_get_ie; | ||
485 | ctx.buf = buf; | ||
486 | ctx.size = size; | ||
487 | ctx.bytes = 0; | ||
488 | uwb_ie_for_each(&uwb_rc->uwb_dev, uwb_ie_dump_hex, &ctx, | ||
489 | ie_info->IEData, result - sizeof(*ie_info)); | ||
490 | result = ctx.bytes; | ||
491 | kfree(ie_info); | ||
492 | error_get_ie: | ||
493 | return result; | ||
494 | } | ||
495 | |||
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c index 9b4eb64327ac..0af8916d9bef 100644 --- a/drivers/uwb/neh.c +++ b/drivers/uwb/neh.c | |||
@@ -86,8 +86,6 @@ | |||
86 | #include <linux/err.h> | 86 | #include <linux/err.h> |
87 | 87 | ||
88 | #include "uwb-internal.h" | 88 | #include "uwb-internal.h" |
89 | #define D_LOCAL 0 | ||
90 | #include <linux/uwb/debug.h> | ||
91 | 89 | ||
92 | /* | 90 | /* |
93 | * UWB Radio Controller Notification/Event Handle | 91 | * UWB Radio Controller Notification/Event Handle |
@@ -254,7 +252,6 @@ error_kzalloc: | |||
254 | 252 | ||
255 | static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) | 253 | static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) |
256 | { | 254 | { |
257 | del_timer(&neh->timer); | ||
258 | __uwb_rc_ctx_put(rc, neh); | 255 | __uwb_rc_ctx_put(rc, neh); |
259 | list_del(&neh->list_node); | 256 | list_del(&neh->list_node); |
260 | } | 257 | } |
@@ -275,6 +272,7 @@ void uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) | |||
275 | __uwb_rc_neh_rm(rc, neh); | 272 | __uwb_rc_neh_rm(rc, neh); |
276 | spin_unlock_irqrestore(&rc->neh_lock, flags); | 273 | spin_unlock_irqrestore(&rc->neh_lock, flags); |
277 | 274 | ||
275 | del_timer_sync(&neh->timer); | ||
278 | uwb_rc_neh_put(neh); | 276 | uwb_rc_neh_put(neh); |
279 | } | 277 | } |
280 | 278 | ||
@@ -349,7 +347,7 @@ struct uwb_rc_neh *uwb_rc_neh_lookup(struct uwb_rc *rc, | |||
349 | } | 347 | } |
350 | 348 | ||
351 | 349 | ||
352 | /** | 350 | /* |
353 | * Process notifications coming from the radio control interface | 351 | * Process notifications coming from the radio control interface |
354 | * | 352 | * |
355 | * @rc: UWB Radio Control Interface descriptor | 353 | * @rc: UWB Radio Control Interface descriptor |
@@ -401,23 +399,6 @@ void uwb_rc_notif(struct uwb_rc *rc, struct uwb_rceb *rceb, ssize_t size) | |||
401 | uwb_evt->notif.size = size; | 399 | uwb_evt->notif.size = size; |
402 | uwb_evt->notif.rceb = rceb; | 400 | uwb_evt->notif.rceb = rceb; |
403 | 401 | ||
404 | switch (le16_to_cpu(rceb->wEvent)) { | ||
405 | /* Trap some vendor specific events | ||
406 | * | ||
407 | * FIXME: move this to handling in ptc-est, where we | ||
408 | * register a NULL event handler for these two guys | ||
409 | * using the Intel IDs. | ||
410 | */ | ||
411 | case 0x0103: | ||
412 | dev_info(dev, "FIXME: DEVICE ADD\n"); | ||
413 | return; | ||
414 | case 0x0104: | ||
415 | dev_info(dev, "FIXME: DEVICE RM\n"); | ||
416 | return; | ||
417 | default: | ||
418 | break; | ||
419 | } | ||
420 | |||
421 | uwbd_event_queue(uwb_evt); | 402 | uwbd_event_queue(uwb_evt); |
422 | } | 403 | } |
423 | 404 | ||
@@ -438,9 +419,10 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size | |||
438 | rceb->bEventContext, size); | 419 | rceb->bEventContext, size); |
439 | } else { | 420 | } else { |
440 | neh = uwb_rc_neh_lookup(rc, rceb); | 421 | neh = uwb_rc_neh_lookup(rc, rceb); |
441 | if (neh) | 422 | if (neh) { |
423 | del_timer_sync(&neh->timer); | ||
442 | uwb_rc_neh_cb(neh, rceb, size); | 424 | uwb_rc_neh_cb(neh, rceb, size); |
443 | else | 425 | } else |
444 | dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", | 426 | dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", |
445 | rceb->bEventType, le16_to_cpu(rceb->wEvent), | 427 | rceb->bEventType, le16_to_cpu(rceb->wEvent), |
446 | rceb->bEventContext, size); | 428 | rceb->bEventContext, size); |
@@ -495,8 +477,6 @@ void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size) | |||
495 | size_t size, real_size, event_size; | 477 | size_t size, real_size, event_size; |
496 | int needtofree; | 478 | int needtofree; |
497 | 479 | ||
498 | d_fnstart(3, dev, "(rc %p buf %p %zu buf_size)\n", rc, buf, buf_size); | ||
499 | d_printf(2, dev, "groking event block: %zu bytes\n", buf_size); | ||
500 | itr = buf; | 480 | itr = buf; |
501 | size = buf_size; | 481 | size = buf_size; |
502 | while (size > 0) { | 482 | while (size > 0) { |
@@ -544,10 +524,7 @@ void uwb_rc_neh_grok(struct uwb_rc *rc, void *buf, size_t buf_size) | |||
544 | 524 | ||
545 | itr += real_size; | 525 | itr += real_size; |
546 | size -= real_size; | 526 | size -= real_size; |
547 | d_printf(2, dev, "consumed %zd bytes, %zu left\n", | ||
548 | event_size, size); | ||
549 | } | 527 | } |
550 | d_fnend(3, dev, "(rc %p buf %p %zu buf_size) = void\n", rc, buf, buf_size); | ||
551 | } | 528 | } |
552 | EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); | 529 | EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); |
553 | 530 | ||
@@ -562,16 +539,22 @@ EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); | |||
562 | */ | 539 | */ |
563 | void uwb_rc_neh_error(struct uwb_rc *rc, int error) | 540 | void uwb_rc_neh_error(struct uwb_rc *rc, int error) |
564 | { | 541 | { |
565 | struct uwb_rc_neh *neh, *next; | 542 | struct uwb_rc_neh *neh; |
566 | unsigned long flags; | 543 | unsigned long flags; |
567 | 544 | ||
568 | BUG_ON(error >= 0); | 545 | for (;;) { |
569 | spin_lock_irqsave(&rc->neh_lock, flags); | 546 | spin_lock_irqsave(&rc->neh_lock, flags); |
570 | list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { | 547 | if (list_empty(&rc->neh_list)) { |
548 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
549 | break; | ||
550 | } | ||
551 | neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node); | ||
571 | __uwb_rc_neh_rm(rc, neh); | 552 | __uwb_rc_neh_rm(rc, neh); |
553 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
554 | |||
555 | del_timer_sync(&neh->timer); | ||
572 | uwb_rc_neh_cb(neh, NULL, error); | 556 | uwb_rc_neh_cb(neh, NULL, error); |
573 | } | 557 | } |
574 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
575 | } | 558 | } |
576 | EXPORT_SYMBOL_GPL(uwb_rc_neh_error); | 559 | EXPORT_SYMBOL_GPL(uwb_rc_neh_error); |
577 | 560 | ||
@@ -583,10 +566,14 @@ static void uwb_rc_neh_timer(unsigned long arg) | |||
583 | unsigned long flags; | 566 | unsigned long flags; |
584 | 567 | ||
585 | spin_lock_irqsave(&rc->neh_lock, flags); | 568 | spin_lock_irqsave(&rc->neh_lock, flags); |
586 | __uwb_rc_neh_rm(rc, neh); | 569 | if (neh->context) |
570 | __uwb_rc_neh_rm(rc, neh); | ||
571 | else | ||
572 | neh = NULL; | ||
587 | spin_unlock_irqrestore(&rc->neh_lock, flags); | 573 | spin_unlock_irqrestore(&rc->neh_lock, flags); |
588 | 574 | ||
589 | uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); | 575 | if (neh) |
576 | uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); | ||
590 | } | 577 | } |
591 | 578 | ||
592 | /** Initializes the @rc's neh subsystem | 579 | /** Initializes the @rc's neh subsystem |
@@ -605,12 +592,19 @@ void uwb_rc_neh_create(struct uwb_rc *rc) | |||
605 | void uwb_rc_neh_destroy(struct uwb_rc *rc) | 592 | void uwb_rc_neh_destroy(struct uwb_rc *rc) |
606 | { | 593 | { |
607 | unsigned long flags; | 594 | unsigned long flags; |
608 | struct uwb_rc_neh *neh, *next; | 595 | struct uwb_rc_neh *neh; |
609 | 596 | ||
610 | spin_lock_irqsave(&rc->neh_lock, flags); | 597 | for (;;) { |
611 | list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { | 598 | spin_lock_irqsave(&rc->neh_lock, flags); |
599 | if (list_empty(&rc->neh_list)) { | ||
600 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
601 | break; | ||
602 | } | ||
603 | neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node); | ||
612 | __uwb_rc_neh_rm(rc, neh); | 604 | __uwb_rc_neh_rm(rc, neh); |
605 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
606 | |||
607 | del_timer_sync(&neh->timer); | ||
613 | uwb_rc_neh_put(neh); | 608 | uwb_rc_neh_put(neh); |
614 | } | 609 | } |
615 | spin_unlock_irqrestore(&rc->neh_lock, flags); | ||
616 | } | 610 | } |
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c index 1afb38eacb9a..99a19c199095 100644 --- a/drivers/uwb/pal.c +++ b/drivers/uwb/pal.c | |||
@@ -16,6 +16,7 @@ | |||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/debugfs.h> | ||
19 | #include <linux/uwb.h> | 20 | #include <linux/uwb.h> |
20 | 21 | ||
21 | #include "uwb-internal.h" | 22 | #include "uwb-internal.h" |
@@ -32,13 +33,13 @@ EXPORT_SYMBOL_GPL(uwb_pal_init); | |||
32 | 33 | ||
33 | /** | 34 | /** |
34 | * uwb_pal_register - register a UWB PAL | 35 | * uwb_pal_register - register a UWB PAL |
35 | * @rc: the radio controller the PAL will be using | ||
36 | * @pal: the PAL | 36 | * @pal: the PAL |
37 | * | 37 | * |
38 | * The PAL must be initialized with uwb_pal_init(). | 38 | * The PAL must be initialized with uwb_pal_init(). |
39 | */ | 39 | */ |
40 | int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal) | 40 | int uwb_pal_register(struct uwb_pal *pal) |
41 | { | 41 | { |
42 | struct uwb_rc *rc = pal->rc; | ||
42 | int ret; | 43 | int ret; |
43 | 44 | ||
44 | if (pal->device) { | 45 | if (pal->device) { |
@@ -54,9 +55,11 @@ int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal) | |||
54 | } | 55 | } |
55 | } | 56 | } |
56 | 57 | ||
57 | spin_lock(&rc->pal_lock); | 58 | pal->debugfs_dir = uwb_dbg_create_pal_dir(pal); |
59 | |||
60 | mutex_lock(&rc->uwb_dev.mutex); | ||
58 | list_add(&pal->node, &rc->pals); | 61 | list_add(&pal->node, &rc->pals); |
59 | spin_unlock(&rc->pal_lock); | 62 | mutex_unlock(&rc->uwb_dev.mutex); |
60 | 63 | ||
61 | return 0; | 64 | return 0; |
62 | } | 65 | } |
@@ -64,14 +67,19 @@ EXPORT_SYMBOL_GPL(uwb_pal_register); | |||
64 | 67 | ||
65 | /** | 68 | /** |
66 | * uwb_pal_register - unregister a UWB PAL | 69 | * uwb_pal_register - unregister a UWB PAL |
67 | * @rc: the radio controller the PAL was using | ||
68 | * @pal: the PAL | 70 | * @pal: the PAL |
69 | */ | 71 | */ |
70 | void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal) | 72 | void uwb_pal_unregister(struct uwb_pal *pal) |
71 | { | 73 | { |
72 | spin_lock(&rc->pal_lock); | 74 | struct uwb_rc *rc = pal->rc; |
75 | |||
76 | uwb_radio_stop(pal); | ||
77 | |||
78 | mutex_lock(&rc->uwb_dev.mutex); | ||
73 | list_del(&pal->node); | 79 | list_del(&pal->node); |
74 | spin_unlock(&rc->pal_lock); | 80 | mutex_unlock(&rc->uwb_dev.mutex); |
81 | |||
82 | debugfs_remove(pal->debugfs_dir); | ||
75 | 83 | ||
76 | if (pal->device) { | 84 | if (pal->device) { |
77 | sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name); | 85 | sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name); |
@@ -86,6 +94,5 @@ EXPORT_SYMBOL_GPL(uwb_pal_unregister); | |||
86 | */ | 94 | */ |
87 | void uwb_rc_pal_init(struct uwb_rc *rc) | 95 | void uwb_rc_pal_init(struct uwb_rc *rc) |
88 | { | 96 | { |
89 | spin_lock_init(&rc->pal_lock); | ||
90 | INIT_LIST_HEAD(&rc->pals); | 97 | INIT_LIST_HEAD(&rc->pals); |
91 | } | 98 | } |
diff --git a/drivers/uwb/radio.c b/drivers/uwb/radio.c new file mode 100644 index 000000000000..f0d55495f5e9 --- /dev/null +++ b/drivers/uwb/radio.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * UWB radio (channel) management. | ||
3 | * | ||
4 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version | ||
8 | * 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/uwb.h> | ||
20 | |||
21 | #include "uwb-internal.h" | ||
22 | |||
23 | |||
24 | static int uwb_radio_select_channel(struct uwb_rc *rc) | ||
25 | { | ||
26 | /* | ||
27 | * Default to channel 9 (BG1, TFC1) unless the user has | ||
28 | * selected a specific channel or there are no active PALs. | ||
29 | */ | ||
30 | if (rc->active_pals == 0) | ||
31 | return -1; | ||
32 | if (rc->beaconing_forced) | ||
33 | return rc->beaconing_forced; | ||
34 | return 9; | ||
35 | } | ||
36 | |||
37 | |||
38 | /* | ||
39 | * Notify all active PALs that the channel has changed. | ||
40 | */ | ||
41 | static void uwb_radio_channel_changed(struct uwb_rc *rc, int channel) | ||
42 | { | ||
43 | struct uwb_pal *pal; | ||
44 | |||
45 | list_for_each_entry(pal, &rc->pals, node) { | ||
46 | if (pal->channel && channel != pal->channel) { | ||
47 | pal->channel = channel; | ||
48 | if (pal->channel_changed) | ||
49 | pal->channel_changed(pal, pal->channel); | ||
50 | } | ||
51 | } | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Change to a new channel and notify any active PALs of the new | ||
56 | * channel. | ||
57 | * | ||
58 | * When stopping the radio, PALs need to be notified first so they can | ||
59 | * terminate any active reservations. | ||
60 | */ | ||
61 | static int uwb_radio_change_channel(struct uwb_rc *rc, int channel) | ||
62 | { | ||
63 | int ret = 0; | ||
64 | |||
65 | if (channel == -1) | ||
66 | uwb_radio_channel_changed(rc, channel); | ||
67 | |||
68 | if (channel != rc->beaconing) { | ||
69 | if (rc->beaconing != -1 && channel != -1) { | ||
70 | /* | ||
71 | * FIXME: should signal the channel change | ||
72 | * with a Channel Change IE. | ||
73 | */ | ||
74 | ret = uwb_radio_change_channel(rc, -1); | ||
75 | if (ret < 0) | ||
76 | return ret; | ||
77 | } | ||
78 | ret = uwb_rc_beacon(rc, channel, 0); | ||
79 | } | ||
80 | |||
81 | if (channel != -1) | ||
82 | uwb_radio_channel_changed(rc, rc->beaconing); | ||
83 | |||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | /** | ||
88 | * uwb_radio_start - request that the radio be started | ||
89 | * @pal: the PAL making the request. | ||
90 | * | ||
91 | * If the radio is not already active, aa suitable channel is selected | ||
92 | * and beacons are started. | ||
93 | */ | ||
94 | int uwb_radio_start(struct uwb_pal *pal) | ||
95 | { | ||
96 | struct uwb_rc *rc = pal->rc; | ||
97 | int ret = 0; | ||
98 | |||
99 | mutex_lock(&rc->uwb_dev.mutex); | ||
100 | |||
101 | if (!pal->channel) { | ||
102 | pal->channel = -1; | ||
103 | rc->active_pals++; | ||
104 | ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); | ||
105 | } | ||
106 | |||
107 | mutex_unlock(&rc->uwb_dev.mutex); | ||
108 | return ret; | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(uwb_radio_start); | ||
111 | |||
112 | /** | ||
113 | * uwb_radio_stop - request tha the radio be stopped. | ||
114 | * @pal: the PAL making the request. | ||
115 | * | ||
116 | * Stops the radio if no other PAL is making use of it. | ||
117 | */ | ||
118 | void uwb_radio_stop(struct uwb_pal *pal) | ||
119 | { | ||
120 | struct uwb_rc *rc = pal->rc; | ||
121 | |||
122 | mutex_lock(&rc->uwb_dev.mutex); | ||
123 | |||
124 | if (pal->channel) { | ||
125 | rc->active_pals--; | ||
126 | uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); | ||
127 | pal->channel = 0; | ||
128 | } | ||
129 | |||
130 | mutex_unlock(&rc->uwb_dev.mutex); | ||
131 | } | ||
132 | EXPORT_SYMBOL_GPL(uwb_radio_stop); | ||
133 | |||
134 | /* | ||
135 | * uwb_radio_force_channel - force a specific channel to be used | ||
136 | * @rc: the radio controller. | ||
137 | * @channel: the channel to use; -1 to force the radio to stop; 0 to | ||
138 | * use the default channel selection algorithm. | ||
139 | */ | ||
140 | int uwb_radio_force_channel(struct uwb_rc *rc, int channel) | ||
141 | { | ||
142 | int ret = 0; | ||
143 | |||
144 | mutex_lock(&rc->uwb_dev.mutex); | ||
145 | |||
146 | rc->beaconing_forced = channel; | ||
147 | ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); | ||
148 | |||
149 | mutex_unlock(&rc->uwb_dev.mutex); | ||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * uwb_radio_setup - setup the radio manager | ||
155 | * @rc: the radio controller. | ||
156 | * | ||
157 | * The radio controller is reset to ensure it's in a known state | ||
158 | * before it's used. | ||
159 | */ | ||
160 | int uwb_radio_setup(struct uwb_rc *rc) | ||
161 | { | ||
162 | return uwb_rc_reset(rc); | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * uwb_radio_reset_state - reset any radio manager state | ||
167 | * @rc: the radio controller. | ||
168 | * | ||
169 | * All internal radio manager state is reset to values corresponding | ||
170 | * to a reset radio controller. | ||
171 | */ | ||
172 | void uwb_radio_reset_state(struct uwb_rc *rc) | ||
173 | { | ||
174 | struct uwb_pal *pal; | ||
175 | |||
176 | mutex_lock(&rc->uwb_dev.mutex); | ||
177 | |||
178 | list_for_each_entry(pal, &rc->pals, node) { | ||
179 | if (pal->channel) { | ||
180 | pal->channel = -1; | ||
181 | if (pal->channel_changed) | ||
182 | pal->channel_changed(pal, -1); | ||
183 | } | ||
184 | } | ||
185 | |||
186 | rc->beaconing = -1; | ||
187 | rc->scanning = -1; | ||
188 | |||
189 | mutex_unlock(&rc->uwb_dev.mutex); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * uwb_radio_shutdown - shutdown the radio manager | ||
194 | * @rc: the radio controller. | ||
195 | * | ||
196 | * The radio controller is reset. | ||
197 | */ | ||
198 | void uwb_radio_shutdown(struct uwb_rc *rc) | ||
199 | { | ||
200 | uwb_radio_reset_state(rc); | ||
201 | uwb_rc_reset(rc); | ||
202 | } | ||
diff --git a/drivers/uwb/reset.c b/drivers/uwb/reset.c index 8de856fa7958..70f8050221ff 100644 --- a/drivers/uwb/reset.c +++ b/drivers/uwb/reset.c | |||
@@ -32,8 +32,6 @@ | |||
32 | #include <linux/err.h> | 32 | #include <linux/err.h> |
33 | 33 | ||
34 | #include "uwb-internal.h" | 34 | #include "uwb-internal.h" |
35 | #define D_LOCAL 0 | ||
36 | #include <linux/uwb/debug.h> | ||
37 | 35 | ||
38 | /** | 36 | /** |
39 | * Command result codes (WUSB1.0[T8-69]) | 37 | * Command result codes (WUSB1.0[T8-69]) |
@@ -323,17 +321,16 @@ int uwbd_msg_handle_reset(struct uwb_event *evt) | |||
323 | struct uwb_rc *rc = evt->rc; | 321 | struct uwb_rc *rc = evt->rc; |
324 | int ret; | 322 | int ret; |
325 | 323 | ||
326 | /* Need to prevent the RC hardware module going away while in | ||
327 | the rc->reset() call. */ | ||
328 | if (!try_module_get(rc->owner)) | ||
329 | return 0; | ||
330 | |||
331 | dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); | 324 | dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); |
332 | ret = rc->reset(rc); | 325 | ret = rc->reset(rc); |
333 | if (ret) | 326 | if (ret) { |
334 | dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); | 327 | dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); |
335 | 328 | goto error; | |
336 | module_put(rc->owner); | 329 | } |
330 | return 0; | ||
331 | error: | ||
332 | /* Nothing can be done except try the reset again. */ | ||
333 | uwb_rc_reset_all(rc); | ||
337 | return ret; | 334 | return ret; |
338 | } | 335 | } |
339 | 336 | ||
@@ -360,3 +357,33 @@ void uwb_rc_reset_all(struct uwb_rc *rc) | |||
360 | uwbd_event_queue(evt); | 357 | uwbd_event_queue(evt); |
361 | } | 358 | } |
362 | EXPORT_SYMBOL_GPL(uwb_rc_reset_all); | 359 | EXPORT_SYMBOL_GPL(uwb_rc_reset_all); |
360 | |||
361 | void uwb_rc_pre_reset(struct uwb_rc *rc) | ||
362 | { | ||
363 | rc->stop(rc); | ||
364 | uwbd_flush(rc); | ||
365 | |||
366 | uwb_radio_reset_state(rc); | ||
367 | uwb_rsv_remove_all(rc); | ||
368 | } | ||
369 | EXPORT_SYMBOL_GPL(uwb_rc_pre_reset); | ||
370 | |||
371 | void uwb_rc_post_reset(struct uwb_rc *rc) | ||
372 | { | ||
373 | int ret; | ||
374 | |||
375 | ret = rc->start(rc); | ||
376 | if (ret) | ||
377 | goto error; | ||
378 | ret = uwb_rc_mac_addr_set(rc, &rc->uwb_dev.mac_addr); | ||
379 | if (ret) | ||
380 | goto error; | ||
381 | ret = uwb_rc_dev_addr_set(rc, &rc->uwb_dev.dev_addr); | ||
382 | if (ret) | ||
383 | goto error; | ||
384 | return; | ||
385 | error: | ||
386 | /* Nothing can be done except try the reset again. */ | ||
387 | uwb_rc_reset_all(rc); | ||
388 | } | ||
389 | EXPORT_SYMBOL_GPL(uwb_rc_post_reset); | ||
diff --git a/drivers/uwb/rsv.c b/drivers/uwb/rsv.c index bae16204576d..ec6eecb32f30 100644 --- a/drivers/uwb/rsv.c +++ b/drivers/uwb/rsv.c | |||
@@ -15,23 +15,33 @@ | |||
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | #include <linux/version.h> | ||
19 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
20 | #include <linux/uwb.h> | 19 | #include <linux/uwb.h> |
20 | #include <linux/random.h> | ||
21 | 21 | ||
22 | #include "uwb-internal.h" | 22 | #include "uwb-internal.h" |
23 | 23 | ||
24 | static void uwb_rsv_timer(unsigned long arg); | 24 | static void uwb_rsv_timer(unsigned long arg); |
25 | 25 | ||
26 | static const char *rsv_states[] = { | 26 | static const char *rsv_states[] = { |
27 | [UWB_RSV_STATE_NONE] = "none", | 27 | [UWB_RSV_STATE_NONE] = "none ", |
28 | [UWB_RSV_STATE_O_INITIATED] = "initiated", | 28 | [UWB_RSV_STATE_O_INITIATED] = "o initiated ", |
29 | [UWB_RSV_STATE_O_PENDING] = "pending", | 29 | [UWB_RSV_STATE_O_PENDING] = "o pending ", |
30 | [UWB_RSV_STATE_O_MODIFIED] = "modified", | 30 | [UWB_RSV_STATE_O_MODIFIED] = "o modified ", |
31 | [UWB_RSV_STATE_O_ESTABLISHED] = "established", | 31 | [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", |
32 | [UWB_RSV_STATE_T_ACCEPTED] = "accepted", | 32 | [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", |
33 | [UWB_RSV_STATE_T_DENIED] = "denied", | 33 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", |
34 | [UWB_RSV_STATE_T_PENDING] = "pending", | 34 | [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", |
35 | [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", | ||
36 | [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", | ||
37 | [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", | ||
38 | [UWB_RSV_STATE_T_PENDING] = "t pending ", | ||
39 | [UWB_RSV_STATE_T_DENIED] = "t denied ", | ||
40 | [UWB_RSV_STATE_T_RESIZED] = "t resized ", | ||
41 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", | ||
42 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", | ||
43 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", | ||
44 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", | ||
35 | }; | 45 | }; |
36 | 46 | ||
37 | static const char *rsv_types[] = { | 47 | static const char *rsv_types[] = { |
@@ -42,6 +52,31 @@ static const char *rsv_types[] = { | |||
42 | [UWB_DRP_TYPE_PCA] = "pca", | 52 | [UWB_DRP_TYPE_PCA] = "pca", |
43 | }; | 53 | }; |
44 | 54 | ||
55 | bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) | ||
56 | { | ||
57 | static const bool has_two_drp_ies[] = { | ||
58 | [UWB_RSV_STATE_O_INITIATED] = false, | ||
59 | [UWB_RSV_STATE_O_PENDING] = false, | ||
60 | [UWB_RSV_STATE_O_MODIFIED] = false, | ||
61 | [UWB_RSV_STATE_O_ESTABLISHED] = false, | ||
62 | [UWB_RSV_STATE_O_TO_BE_MOVED] = false, | ||
63 | [UWB_RSV_STATE_O_MOVE_COMBINING] = false, | ||
64 | [UWB_RSV_STATE_O_MOVE_REDUCING] = false, | ||
65 | [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, | ||
66 | [UWB_RSV_STATE_T_ACCEPTED] = false, | ||
67 | [UWB_RSV_STATE_T_CONFLICT] = false, | ||
68 | [UWB_RSV_STATE_T_PENDING] = false, | ||
69 | [UWB_RSV_STATE_T_DENIED] = false, | ||
70 | [UWB_RSV_STATE_T_RESIZED] = false, | ||
71 | [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, | ||
72 | [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, | ||
73 | [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, | ||
74 | [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, | ||
75 | }; | ||
76 | |||
77 | return has_two_drp_ies[rsv->state]; | ||
78 | } | ||
79 | |||
45 | /** | 80 | /** |
46 | * uwb_rsv_state_str - return a string for a reservation state | 81 | * uwb_rsv_state_str - return a string for a reservation state |
47 | * @state: the reservation state. | 82 | * @state: the reservation state. |
@@ -66,7 +101,7 @@ const char *uwb_rsv_type_str(enum uwb_drp_type type) | |||
66 | } | 101 | } |
67 | EXPORT_SYMBOL_GPL(uwb_rsv_type_str); | 102 | EXPORT_SYMBOL_GPL(uwb_rsv_type_str); |
68 | 103 | ||
69 | static void uwb_rsv_dump(struct uwb_rsv *rsv) | 104 | void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) |
70 | { | 105 | { |
71 | struct device *dev = &rsv->rc->uwb_dev.dev; | 106 | struct device *dev = &rsv->rc->uwb_dev.dev; |
72 | struct uwb_dev_addr devaddr; | 107 | struct uwb_dev_addr devaddr; |
@@ -82,6 +117,23 @@ static void uwb_rsv_dump(struct uwb_rsv *rsv) | |||
82 | dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); | 117 | dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); |
83 | } | 118 | } |
84 | 119 | ||
120 | static void uwb_rsv_release(struct kref *kref) | ||
121 | { | ||
122 | struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref); | ||
123 | |||
124 | kfree(rsv); | ||
125 | } | ||
126 | |||
127 | void uwb_rsv_get(struct uwb_rsv *rsv) | ||
128 | { | ||
129 | kref_get(&rsv->kref); | ||
130 | } | ||
131 | |||
132 | void uwb_rsv_put(struct uwb_rsv *rsv) | ||
133 | { | ||
134 | kref_put(&rsv->kref, uwb_rsv_release); | ||
135 | } | ||
136 | |||
85 | /* | 137 | /* |
86 | * Get a free stream index for a reservation. | 138 | * Get a free stream index for a reservation. |
87 | * | 139 | * |
@@ -92,6 +144,7 @@ static void uwb_rsv_dump(struct uwb_rsv *rsv) | |||
92 | static int uwb_rsv_get_stream(struct uwb_rsv *rsv) | 144 | static int uwb_rsv_get_stream(struct uwb_rsv *rsv) |
93 | { | 145 | { |
94 | struct uwb_rc *rc = rsv->rc; | 146 | struct uwb_rc *rc = rsv->rc; |
147 | struct device *dev = &rc->uwb_dev.dev; | ||
95 | unsigned long *streams_bm; | 148 | unsigned long *streams_bm; |
96 | int stream; | 149 | int stream; |
97 | 150 | ||
@@ -113,12 +166,15 @@ static int uwb_rsv_get_stream(struct uwb_rsv *rsv) | |||
113 | rsv->stream = stream; | 166 | rsv->stream = stream; |
114 | set_bit(stream, streams_bm); | 167 | set_bit(stream, streams_bm); |
115 | 168 | ||
169 | dev_dbg(dev, "get stream %d\n", rsv->stream); | ||
170 | |||
116 | return 0; | 171 | return 0; |
117 | } | 172 | } |
118 | 173 | ||
119 | static void uwb_rsv_put_stream(struct uwb_rsv *rsv) | 174 | static void uwb_rsv_put_stream(struct uwb_rsv *rsv) |
120 | { | 175 | { |
121 | struct uwb_rc *rc = rsv->rc; | 176 | struct uwb_rc *rc = rsv->rc; |
177 | struct device *dev = &rc->uwb_dev.dev; | ||
122 | unsigned long *streams_bm; | 178 | unsigned long *streams_bm; |
123 | 179 | ||
124 | switch (rsv->target.type) { | 180 | switch (rsv->target.type) { |
@@ -133,86 +189,52 @@ static void uwb_rsv_put_stream(struct uwb_rsv *rsv) | |||
133 | } | 189 | } |
134 | 190 | ||
135 | clear_bit(rsv->stream, streams_bm); | 191 | clear_bit(rsv->stream, streams_bm); |
192 | |||
193 | dev_dbg(dev, "put stream %d\n", rsv->stream); | ||
136 | } | 194 | } |
137 | 195 | ||
138 | /* | 196 | void uwb_rsv_backoff_win_timer(unsigned long arg) |
139 | * Generate a MAS allocation with a single row component. | ||
140 | */ | ||
141 | static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas, | ||
142 | int first_mas, int mas_per_zone, | ||
143 | int zs, int ze) | ||
144 | { | 197 | { |
145 | struct uwb_mas_bm col; | 198 | struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; |
146 | int z; | 199 | struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); |
147 | 200 | struct device *dev = &rc->uwb_dev.dev; | |
148 | bitmap_zero(mas->bm, UWB_NUM_MAS); | 201 | |
149 | bitmap_zero(col.bm, UWB_NUM_MAS); | 202 | bow->can_reserve_extra_mases = true; |
150 | bitmap_fill(col.bm, mas_per_zone); | 203 | if (bow->total_expired <= 4) { |
151 | bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS); | 204 | bow->total_expired++; |
152 | 205 | } else { | |
153 | for (z = zs; z <= ze; z++) { | 206 | /* after 4 backoff window has expired we can exit from |
154 | bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS); | 207 | * the backoff procedure */ |
155 | bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS); | 208 | bow->total_expired = 0; |
209 | bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; | ||
156 | } | 210 | } |
211 | dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n); | ||
212 | |||
213 | /* try to relocate all the "to be moved" relocations */ | ||
214 | uwb_rsv_handle_drp_avail_change(rc); | ||
157 | } | 215 | } |
158 | 216 | ||
159 | /* | 217 | void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) |
160 | * Allocate some MAS for this reservation based on current local | ||
161 | * availability, the reservation parameters (max_mas, min_mas, | ||
162 | * sparsity), and the WiMedia rules for MAS allocations. | ||
163 | * | ||
164 | * Returns -EBUSY is insufficient free MAS are available. | ||
165 | * | ||
166 | * FIXME: to simplify this, only safe reservations with a single row | ||
167 | * component in zones 1 to 15 are tried (zone 0 is skipped to avoid | ||
168 | * problems with the MAS reserved for the BP). | ||
169 | * | ||
170 | * [ECMA-368] section B.2. | ||
171 | */ | ||
172 | static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv) | ||
173 | { | 218 | { |
174 | static const int safe_mas_in_row[UWB_NUM_ZONES] = { | 219 | struct uwb_drp_backoff_win *bow = &rc->bow; |
175 | 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, | 220 | struct device *dev = &rc->uwb_dev.dev; |
176 | }; | 221 | unsigned timeout_us; |
177 | int n, r; | ||
178 | struct uwb_mas_bm mas; | ||
179 | bool found = false; | ||
180 | 222 | ||
181 | /* | 223 | dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); |
182 | * Search all valid safe allocations until either: too few MAS | ||
183 | * are available; or the smallest allocation with sufficient | ||
184 | * MAS is found. | ||
185 | * | ||
186 | * The top of the zones are preferred, so space for larger | ||
187 | * allocations is available in the bottom of the zone (e.g., a | ||
188 | * 15 MAS allocation should start in row 14 leaving space for | ||
189 | * a 120 MAS allocation at row 0). | ||
190 | */ | ||
191 | for (n = safe_mas_in_row[0]; n >= 1; n--) { | ||
192 | int num_mas; | ||
193 | 224 | ||
194 | num_mas = n * (UWB_NUM_ZONES - 1); | 225 | bow->can_reserve_extra_mases = false; |
195 | if (num_mas < rsv->min_mas) | ||
196 | break; | ||
197 | if (found && num_mas < rsv->max_mas) | ||
198 | break; | ||
199 | 226 | ||
200 | for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) { | 227 | if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) |
201 | if (safe_mas_in_row[r] < n) | 228 | return; |
202 | continue; | ||
203 | uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES); | ||
204 | if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) { | ||
205 | found = true; | ||
206 | break; | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | 229 | ||
211 | if (!found) | 230 | bow->window <<= 1; |
212 | return -EBUSY; | 231 | bow->n = random32() & (bow->window - 1); |
232 | dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n); | ||
213 | 233 | ||
214 | bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); | 234 | /* reset the timer associated variables */ |
215 | return 0; | 235 | timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; |
236 | bow->total_expired = 0; | ||
237 | mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); | ||
216 | } | 238 | } |
217 | 239 | ||
218 | static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) | 240 | static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) |
@@ -225,13 +247,16 @@ static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) | |||
225 | * received. | 247 | * received. |
226 | */ | 248 | */ |
227 | if (rsv->is_multicast) { | 249 | if (rsv->is_multicast) { |
228 | if (rsv->state == UWB_RSV_STATE_O_INITIATED) | 250 | if (rsv->state == UWB_RSV_STATE_O_INITIATED |
251 | || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING | ||
252 | || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING | ||
253 | || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) | ||
229 | sframes = 1; | 254 | sframes = 1; |
230 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) | 255 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) |
231 | sframes = 0; | 256 | sframes = 0; |
257 | |||
232 | } | 258 | } |
233 | 259 | ||
234 | rsv->expired = false; | ||
235 | if (sframes > 0) { | 260 | if (sframes > 0) { |
236 | /* | 261 | /* |
237 | * Add an additional 2 superframes to account for the | 262 | * Add an additional 2 superframes to account for the |
@@ -253,7 +278,7 @@ static void uwb_rsv_state_update(struct uwb_rsv *rsv, | |||
253 | rsv->state = new_state; | 278 | rsv->state = new_state; |
254 | rsv->ie_valid = false; | 279 | rsv->ie_valid = false; |
255 | 280 | ||
256 | uwb_rsv_dump(rsv); | 281 | uwb_rsv_dump("SU", rsv); |
257 | 282 | ||
258 | uwb_rsv_stroke_timer(rsv); | 283 | uwb_rsv_stroke_timer(rsv); |
259 | uwb_rsv_sched_update(rsv->rc); | 284 | uwb_rsv_sched_update(rsv->rc); |
@@ -267,10 +292,17 @@ static void uwb_rsv_callback(struct uwb_rsv *rsv) | |||
267 | 292 | ||
268 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | 293 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) |
269 | { | 294 | { |
295 | struct uwb_rsv_move *mv = &rsv->mv; | ||
296 | |||
270 | if (rsv->state == new_state) { | 297 | if (rsv->state == new_state) { |
271 | switch (rsv->state) { | 298 | switch (rsv->state) { |
272 | case UWB_RSV_STATE_O_ESTABLISHED: | 299 | case UWB_RSV_STATE_O_ESTABLISHED: |
300 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
301 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
302 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
273 | case UWB_RSV_STATE_T_ACCEPTED: | 303 | case UWB_RSV_STATE_T_ACCEPTED: |
304 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
305 | case UWB_RSV_STATE_T_RESIZED: | ||
274 | case UWB_RSV_STATE_NONE: | 306 | case UWB_RSV_STATE_NONE: |
275 | uwb_rsv_stroke_timer(rsv); | 307 | uwb_rsv_stroke_timer(rsv); |
276 | break; | 308 | break; |
@@ -282,10 +314,10 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
282 | return; | 314 | return; |
283 | } | 315 | } |
284 | 316 | ||
317 | uwb_rsv_dump("SC", rsv); | ||
318 | |||
285 | switch (new_state) { | 319 | switch (new_state) { |
286 | case UWB_RSV_STATE_NONE: | 320 | case UWB_RSV_STATE_NONE: |
287 | uwb_drp_avail_release(rsv->rc, &rsv->mas); | ||
288 | uwb_rsv_put_stream(rsv); | ||
289 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); | 321 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); |
290 | uwb_rsv_callback(rsv); | 322 | uwb_rsv_callback(rsv); |
291 | break; | 323 | break; |
@@ -295,12 +327,45 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
295 | case UWB_RSV_STATE_O_PENDING: | 327 | case UWB_RSV_STATE_O_PENDING: |
296 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); | 328 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); |
297 | break; | 329 | break; |
330 | case UWB_RSV_STATE_O_MODIFIED: | ||
331 | /* in the companion there are the MASes to drop */ | ||
332 | bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); | ||
333 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); | ||
334 | break; | ||
298 | case UWB_RSV_STATE_O_ESTABLISHED: | 335 | case UWB_RSV_STATE_O_ESTABLISHED: |
336 | if (rsv->state == UWB_RSV_STATE_O_MODIFIED | ||
337 | || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { | ||
338 | uwb_drp_avail_release(rsv->rc, &mv->companion_mas); | ||
339 | rsv->needs_release_companion_mas = false; | ||
340 | } | ||
299 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); | 341 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); |
300 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); | 342 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); |
301 | uwb_rsv_callback(rsv); | 343 | uwb_rsv_callback(rsv); |
302 | break; | 344 | break; |
345 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
346 | rsv->needs_release_companion_mas = true; | ||
347 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
348 | break; | ||
349 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
350 | rsv->needs_release_companion_mas = false; | ||
351 | uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); | ||
352 | bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); | ||
353 | rsv->mas.safe += mv->companion_mas.safe; | ||
354 | rsv->mas.unsafe += mv->companion_mas.unsafe; | ||
355 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
356 | break; | ||
357 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
358 | bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); | ||
359 | rsv->needs_release_companion_mas = true; | ||
360 | rsv->mas.safe = mv->final_mas.safe; | ||
361 | rsv->mas.unsafe = mv->final_mas.unsafe; | ||
362 | bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); | ||
363 | bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); | ||
364 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
365 | break; | ||
303 | case UWB_RSV_STATE_T_ACCEPTED: | 366 | case UWB_RSV_STATE_T_ACCEPTED: |
367 | case UWB_RSV_STATE_T_RESIZED: | ||
368 | rsv->needs_release_companion_mas = false; | ||
304 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); | 369 | uwb_drp_avail_reserve(rsv->rc, &rsv->mas); |
305 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); | 370 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); |
306 | uwb_rsv_callback(rsv); | 371 | uwb_rsv_callback(rsv); |
@@ -308,12 +373,82 @@ void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) | |||
308 | case UWB_RSV_STATE_T_DENIED: | 373 | case UWB_RSV_STATE_T_DENIED: |
309 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); | 374 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); |
310 | break; | 375 | break; |
376 | case UWB_RSV_STATE_T_CONFLICT: | ||
377 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); | ||
378 | break; | ||
379 | case UWB_RSV_STATE_T_PENDING: | ||
380 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); | ||
381 | break; | ||
382 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
383 | rsv->needs_release_companion_mas = true; | ||
384 | uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); | ||
385 | uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); | ||
386 | break; | ||
311 | default: | 387 | default: |
312 | dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", | 388 | dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", |
313 | uwb_rsv_state_str(new_state), new_state); | 389 | uwb_rsv_state_str(new_state), new_state); |
314 | } | 390 | } |
315 | } | 391 | } |
316 | 392 | ||
393 | static void uwb_rsv_handle_timeout_work(struct work_struct *work) | ||
394 | { | ||
395 | struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, | ||
396 | handle_timeout_work); | ||
397 | struct uwb_rc *rc = rsv->rc; | ||
398 | |||
399 | mutex_lock(&rc->rsvs_mutex); | ||
400 | |||
401 | uwb_rsv_dump("TO", rsv); | ||
402 | |||
403 | switch (rsv->state) { | ||
404 | case UWB_RSV_STATE_O_INITIATED: | ||
405 | if (rsv->is_multicast) { | ||
406 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
407 | goto unlock; | ||
408 | } | ||
409 | break; | ||
410 | case UWB_RSV_STATE_O_MOVE_EXPANDING: | ||
411 | if (rsv->is_multicast) { | ||
412 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); | ||
413 | goto unlock; | ||
414 | } | ||
415 | break; | ||
416 | case UWB_RSV_STATE_O_MOVE_COMBINING: | ||
417 | if (rsv->is_multicast) { | ||
418 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); | ||
419 | goto unlock; | ||
420 | } | ||
421 | break; | ||
422 | case UWB_RSV_STATE_O_MOVE_REDUCING: | ||
423 | if (rsv->is_multicast) { | ||
424 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); | ||
425 | goto unlock; | ||
426 | } | ||
427 | break; | ||
428 | case UWB_RSV_STATE_O_ESTABLISHED: | ||
429 | if (rsv->is_multicast) | ||
430 | goto unlock; | ||
431 | break; | ||
432 | case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: | ||
433 | /* | ||
434 | * The time out could be for the main or of the | ||
435 | * companion DRP, assume it's for the companion and | ||
436 | * drop that first. A further time out is required to | ||
437 | * drop the main. | ||
438 | */ | ||
439 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); | ||
440 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
441 | goto unlock; | ||
442 | default: | ||
443 | break; | ||
444 | } | ||
445 | |||
446 | uwb_rsv_remove(rsv); | ||
447 | |||
448 | unlock: | ||
449 | mutex_unlock(&rc->rsvs_mutex); | ||
450 | } | ||
451 | |||
317 | static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) | 452 | static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) |
318 | { | 453 | { |
319 | struct uwb_rsv *rsv; | 454 | struct uwb_rsv *rsv; |
@@ -324,23 +459,17 @@ static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) | |||
324 | 459 | ||
325 | INIT_LIST_HEAD(&rsv->rc_node); | 460 | INIT_LIST_HEAD(&rsv->rc_node); |
326 | INIT_LIST_HEAD(&rsv->pal_node); | 461 | INIT_LIST_HEAD(&rsv->pal_node); |
462 | kref_init(&rsv->kref); | ||
327 | init_timer(&rsv->timer); | 463 | init_timer(&rsv->timer); |
328 | rsv->timer.function = uwb_rsv_timer; | 464 | rsv->timer.function = uwb_rsv_timer; |
329 | rsv->timer.data = (unsigned long)rsv; | 465 | rsv->timer.data = (unsigned long)rsv; |
330 | 466 | ||
331 | rsv->rc = rc; | 467 | rsv->rc = rc; |
468 | INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); | ||
332 | 469 | ||
333 | return rsv; | 470 | return rsv; |
334 | } | 471 | } |
335 | 472 | ||
336 | static void uwb_rsv_free(struct uwb_rsv *rsv) | ||
337 | { | ||
338 | uwb_dev_put(rsv->owner); | ||
339 | if (rsv->target.type == UWB_RSV_TARGET_DEV) | ||
340 | uwb_dev_put(rsv->target.dev); | ||
341 | kfree(rsv); | ||
342 | } | ||
343 | |||
344 | /** | 473 | /** |
345 | * uwb_rsv_create - allocate and initialize a UWB reservation structure | 474 | * uwb_rsv_create - allocate and initialize a UWB reservation structure |
346 | * @rc: the radio controller | 475 | * @rc: the radio controller |
@@ -371,26 +500,36 @@ EXPORT_SYMBOL_GPL(uwb_rsv_create); | |||
371 | 500 | ||
372 | void uwb_rsv_remove(struct uwb_rsv *rsv) | 501 | void uwb_rsv_remove(struct uwb_rsv *rsv) |
373 | { | 502 | { |
503 | uwb_rsv_dump("RM", rsv); | ||
504 | |||
374 | if (rsv->state != UWB_RSV_STATE_NONE) | 505 | if (rsv->state != UWB_RSV_STATE_NONE) |
375 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 506 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); |
507 | |||
508 | if (rsv->needs_release_companion_mas) | ||
509 | uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); | ||
510 | uwb_drp_avail_release(rsv->rc, &rsv->mas); | ||
511 | |||
512 | if (uwb_rsv_is_owner(rsv)) | ||
513 | uwb_rsv_put_stream(rsv); | ||
514 | |||
376 | del_timer_sync(&rsv->timer); | 515 | del_timer_sync(&rsv->timer); |
377 | list_del(&rsv->rc_node); | 516 | uwb_dev_put(rsv->owner); |
378 | uwb_rsv_free(rsv); | 517 | if (rsv->target.type == UWB_RSV_TARGET_DEV) |
518 | uwb_dev_put(rsv->target.dev); | ||
519 | |||
520 | list_del_init(&rsv->rc_node); | ||
521 | uwb_rsv_put(rsv); | ||
379 | } | 522 | } |
380 | 523 | ||
381 | /** | 524 | /** |
382 | * uwb_rsv_destroy - free a UWB reservation structure | 525 | * uwb_rsv_destroy - free a UWB reservation structure |
383 | * @rsv: the reservation to free | 526 | * @rsv: the reservation to free |
384 | * | 527 | * |
385 | * The reservation will be terminated if it is pending or established. | 528 | * The reservation must already be terminated. |
386 | */ | 529 | */ |
387 | void uwb_rsv_destroy(struct uwb_rsv *rsv) | 530 | void uwb_rsv_destroy(struct uwb_rsv *rsv) |
388 | { | 531 | { |
389 | struct uwb_rc *rc = rsv->rc; | 532 | uwb_rsv_put(rsv); |
390 | |||
391 | mutex_lock(&rc->rsvs_mutex); | ||
392 | uwb_rsv_remove(rsv); | ||
393 | mutex_unlock(&rc->rsvs_mutex); | ||
394 | } | 533 | } |
395 | EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | 534 | EXPORT_SYMBOL_GPL(uwb_rsv_destroy); |
396 | 535 | ||
@@ -399,7 +538,7 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | |||
399 | * @rsv: the reservation | 538 | * @rsv: the reservation |
400 | * | 539 | * |
401 | * The PAL should fill in @rsv's owner, target, type, max_mas, | 540 | * The PAL should fill in @rsv's owner, target, type, max_mas, |
402 | * min_mas, sparsity and is_multicast fields. If the target is a | 541 | * min_mas, max_interval and is_multicast fields. If the target is a |
403 | * uwb_dev it must be referenced. | 542 | * uwb_dev it must be referenced. |
404 | * | 543 | * |
405 | * The reservation's callback will be called when the reservation is | 544 | * The reservation's callback will be called when the reservation is |
@@ -408,20 +547,32 @@ EXPORT_SYMBOL_GPL(uwb_rsv_destroy); | |||
408 | int uwb_rsv_establish(struct uwb_rsv *rsv) | 547 | int uwb_rsv_establish(struct uwb_rsv *rsv) |
409 | { | 548 | { |
410 | struct uwb_rc *rc = rsv->rc; | 549 | struct uwb_rc *rc = rsv->rc; |
550 | struct uwb_mas_bm available; | ||
411 | int ret; | 551 | int ret; |
412 | 552 | ||
413 | mutex_lock(&rc->rsvs_mutex); | 553 | mutex_lock(&rc->rsvs_mutex); |
414 | |||
415 | ret = uwb_rsv_get_stream(rsv); | 554 | ret = uwb_rsv_get_stream(rsv); |
416 | if (ret) | 555 | if (ret) |
417 | goto out; | 556 | goto out; |
418 | 557 | ||
419 | ret = uwb_rsv_alloc_mas(rsv); | 558 | rsv->tiebreaker = random32() & 1; |
420 | if (ret) { | 559 | /* get available mas bitmap */ |
560 | uwb_drp_available(rc, &available); | ||
561 | |||
562 | ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); | ||
563 | if (ret == UWB_RSV_ALLOC_NOT_FOUND) { | ||
564 | ret = -EBUSY; | ||
565 | uwb_rsv_put_stream(rsv); | ||
566 | goto out; | ||
567 | } | ||
568 | |||
569 | ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); | ||
570 | if (ret != 0) { | ||
421 | uwb_rsv_put_stream(rsv); | 571 | uwb_rsv_put_stream(rsv); |
422 | goto out; | 572 | goto out; |
423 | } | 573 | } |
424 | 574 | ||
575 | uwb_rsv_get(rsv); | ||
425 | list_add_tail(&rsv->rc_node, &rc->reservations); | 576 | list_add_tail(&rsv->rc_node, &rc->reservations); |
426 | rsv->owner = &rc->uwb_dev; | 577 | rsv->owner = &rc->uwb_dev; |
427 | uwb_dev_get(rsv->owner); | 578 | uwb_dev_get(rsv->owner); |
@@ -437,16 +588,71 @@ EXPORT_SYMBOL_GPL(uwb_rsv_establish); | |||
437 | * @rsv: the reservation to modify | 588 | * @rsv: the reservation to modify |
438 | * @max_mas: new maximum MAS to reserve | 589 | * @max_mas: new maximum MAS to reserve |
439 | * @min_mas: new minimum MAS to reserve | 590 | * @min_mas: new minimum MAS to reserve |
440 | * @sparsity: new sparsity to use | 591 | * @max_interval: new max_interval to use |
441 | * | 592 | * |
442 | * FIXME: implement this once there are PALs that use it. | 593 | * FIXME: implement this once there are PALs that use it. |
443 | */ | 594 | */ |
444 | int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity) | 595 | int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) |
445 | { | 596 | { |
446 | return -ENOSYS; | 597 | return -ENOSYS; |
447 | } | 598 | } |
448 | EXPORT_SYMBOL_GPL(uwb_rsv_modify); | 599 | EXPORT_SYMBOL_GPL(uwb_rsv_modify); |
449 | 600 | ||
601 | /* | ||
602 | * move an already established reservation (rc->rsvs_mutex must to be | ||
603 | * taken when tis function is called) | ||
604 | */ | ||
605 | int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) | ||
606 | { | ||
607 | struct uwb_rc *rc = rsv->rc; | ||
608 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
609 | struct device *dev = &rc->uwb_dev.dev; | ||
610 | struct uwb_rsv_move *mv; | ||
611 | int ret = 0; | ||
612 | |||
613 | if (bow->can_reserve_extra_mases == false) | ||
614 | return -EBUSY; | ||
615 | |||
616 | mv = &rsv->mv; | ||
617 | |||
618 | if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { | ||
619 | |||
620 | if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { | ||
621 | /* We want to move the reservation */ | ||
622 | bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); | ||
623 | uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); | ||
624 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); | ||
625 | } | ||
626 | } else { | ||
627 | dev_dbg(dev, "new allocation not found\n"); | ||
628 | } | ||
629 | |||
630 | return ret; | ||
631 | } | ||
632 | |||
633 | /* It will try to move every reservation in state O_ESTABLISHED giving | ||
634 | * to the MAS allocator algorithm an availability that is the real one | ||
635 | * plus the allocation already established from the reservation. */ | ||
636 | void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) | ||
637 | { | ||
638 | struct uwb_drp_backoff_win *bow = &rc->bow; | ||
639 | struct uwb_rsv *rsv; | ||
640 | struct uwb_mas_bm mas; | ||
641 | |||
642 | if (bow->can_reserve_extra_mases == false) | ||
643 | return; | ||
644 | |||
645 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
646 | if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || | ||
647 | rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { | ||
648 | uwb_drp_available(rc, &mas); | ||
649 | bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); | ||
650 | uwb_rsv_try_move(rsv, &mas); | ||
651 | } | ||
652 | } | ||
653 | |||
654 | } | ||
655 | |||
450 | /** | 656 | /** |
451 | * uwb_rsv_terminate - terminate an established reservation | 657 | * uwb_rsv_terminate - terminate an established reservation |
452 | * @rsv: the reservation to terminate | 658 | * @rsv: the reservation to terminate |
@@ -463,7 +669,8 @@ void uwb_rsv_terminate(struct uwb_rsv *rsv) | |||
463 | 669 | ||
464 | mutex_lock(&rc->rsvs_mutex); | 670 | mutex_lock(&rc->rsvs_mutex); |
465 | 671 | ||
466 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | 672 | if (rsv->state != UWB_RSV_STATE_NONE) |
673 | uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); | ||
467 | 674 | ||
468 | mutex_unlock(&rc->rsvs_mutex); | 675 | mutex_unlock(&rc->rsvs_mutex); |
469 | } | 676 | } |
@@ -477,9 +684,14 @@ EXPORT_SYMBOL_GPL(uwb_rsv_terminate); | |||
477 | * | 684 | * |
478 | * Reservation requests from peers are denied unless a PAL accepts it | 685 | * Reservation requests from peers are denied unless a PAL accepts it |
479 | * by calling this function. | 686 | * by calling this function. |
687 | * | ||
688 | * The PAL call uwb_rsv_destroy() for all accepted reservations before | ||
689 | * calling uwb_pal_unregister(). | ||
480 | */ | 690 | */ |
481 | void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) | 691 | void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) |
482 | { | 692 | { |
693 | uwb_rsv_get(rsv); | ||
694 | |||
483 | rsv->callback = cb; | 695 | rsv->callback = cb; |
484 | rsv->pal_priv = pal_priv; | 696 | rsv->pal_priv = pal_priv; |
485 | rsv->state = UWB_RSV_STATE_T_ACCEPTED; | 697 | rsv->state = UWB_RSV_STATE_T_ACCEPTED; |
@@ -530,9 +742,9 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, | |||
530 | uwb_dev_get(rsv->owner); | 742 | uwb_dev_get(rsv->owner); |
531 | rsv->target.type = UWB_RSV_TARGET_DEV; | 743 | rsv->target.type = UWB_RSV_TARGET_DEV; |
532 | rsv->target.dev = &rc->uwb_dev; | 744 | rsv->target.dev = &rc->uwb_dev; |
745 | uwb_dev_get(&rc->uwb_dev); | ||
533 | rsv->type = uwb_ie_drp_type(drp_ie); | 746 | rsv->type = uwb_ie_drp_type(drp_ie); |
534 | rsv->stream = uwb_ie_drp_stream_index(drp_ie); | 747 | rsv->stream = uwb_ie_drp_stream_index(drp_ie); |
535 | set_bit(rsv->stream, rsv->owner->streams); | ||
536 | uwb_drp_ie_to_bm(&rsv->mas, drp_ie); | 748 | uwb_drp_ie_to_bm(&rsv->mas, drp_ie); |
537 | 749 | ||
538 | /* | 750 | /* |
@@ -540,24 +752,46 @@ static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc, | |||
540 | * deny the request. | 752 | * deny the request. |
541 | */ | 753 | */ |
542 | rsv->state = UWB_RSV_STATE_T_DENIED; | 754 | rsv->state = UWB_RSV_STATE_T_DENIED; |
543 | spin_lock(&rc->pal_lock); | 755 | mutex_lock(&rc->uwb_dev.mutex); |
544 | list_for_each_entry(pal, &rc->pals, node) { | 756 | list_for_each_entry(pal, &rc->pals, node) { |
545 | if (pal->new_rsv) | 757 | if (pal->new_rsv) |
546 | pal->new_rsv(rsv); | 758 | pal->new_rsv(pal, rsv); |
547 | if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) | 759 | if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) |
548 | break; | 760 | break; |
549 | } | 761 | } |
550 | spin_unlock(&rc->pal_lock); | 762 | mutex_unlock(&rc->uwb_dev.mutex); |
551 | 763 | ||
552 | list_add_tail(&rsv->rc_node, &rc->reservations); | 764 | list_add_tail(&rsv->rc_node, &rc->reservations); |
553 | state = rsv->state; | 765 | state = rsv->state; |
554 | rsv->state = UWB_RSV_STATE_NONE; | 766 | rsv->state = UWB_RSV_STATE_NONE; |
555 | uwb_rsv_set_state(rsv, state); | 767 | |
768 | /* FIXME: do something sensible here */ | ||
769 | if (state == UWB_RSV_STATE_T_ACCEPTED | ||
770 | && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { | ||
771 | /* FIXME: do something sensible here */ | ||
772 | } else { | ||
773 | uwb_rsv_set_state(rsv, state); | ||
774 | } | ||
556 | 775 | ||
557 | return rsv; | 776 | return rsv; |
558 | } | 777 | } |
559 | 778 | ||
560 | /** | 779 | /** |
780 | * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations | ||
781 | * @rsv: the reservation. | ||
782 | * @mas: returns the available MAS. | ||
783 | * | ||
784 | * The usable MAS of a reservation may be less than the negotiated MAS | ||
785 | * if alien BPs are present. | ||
786 | */ | ||
787 | void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) | ||
788 | { | ||
789 | bitmap_zero(mas->bm, UWB_NUM_MAS); | ||
790 | bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); | ||
791 | } | ||
792 | EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); | ||
793 | |||
794 | /** | ||
561 | * uwb_rsv_find - find a reservation for a received DRP IE. | 795 | * uwb_rsv_find - find a reservation for a received DRP IE. |
562 | * @rc: the radio controller | 796 | * @rc: the radio controller |
563 | * @src: source of the DRP IE | 797 | * @src: source of the DRP IE |
@@ -596,8 +830,6 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc) | |||
596 | bool ie_updated = false; | 830 | bool ie_updated = false; |
597 | 831 | ||
598 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | 832 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { |
599 | if (rsv->expired) | ||
600 | uwb_drp_handle_timeout(rsv); | ||
601 | if (!rsv->ie_valid) { | 833 | if (!rsv->ie_valid) { |
602 | uwb_drp_ie_update(rsv); | 834 | uwb_drp_ie_update(rsv); |
603 | ie_updated = true; | 835 | ie_updated = true; |
@@ -607,9 +839,47 @@ static bool uwb_rsv_update_all(struct uwb_rc *rc) | |||
607 | return ie_updated; | 839 | return ie_updated; |
608 | } | 840 | } |
609 | 841 | ||
842 | void uwb_rsv_queue_update(struct uwb_rc *rc) | ||
843 | { | ||
844 | unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; | ||
845 | |||
846 | queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); | ||
847 | } | ||
848 | |||
849 | /** | ||
850 | * uwb_rsv_sched_update - schedule an update of the DRP IEs | ||
851 | * @rc: the radio controller. | ||
852 | * | ||
853 | * To improve performance and ensure correctness with [ECMA-368] the | ||
854 | * number of SET-DRP-IE commands that are done are limited. | ||
855 | * | ||
856 | * DRP IEs update come from two sources: DRP events from the hardware | ||
857 | * which all occur at the beginning of the superframe ('syncronous' | ||
858 | * events) and reservation establishment/termination requests from | ||
859 | * PALs or timers ('asynchronous' events). | ||
860 | * | ||
861 | * A delayed work ensures that all the synchronous events result in | ||
862 | * one SET-DRP-IE command. | ||
863 | * | ||
864 | * Additional logic (the set_drp_ie_pending and rsv_updated_postponed | ||
865 | * flags) will prevent an asynchrous event starting a SET-DRP-IE | ||
866 | * command if one is currently awaiting a response. | ||
867 | * | ||
868 | * FIXME: this does leave a window where an asynchrous event can delay | ||
869 | * the SET-DRP-IE for a synchronous event by one superframe. | ||
870 | */ | ||
610 | void uwb_rsv_sched_update(struct uwb_rc *rc) | 871 | void uwb_rsv_sched_update(struct uwb_rc *rc) |
611 | { | 872 | { |
612 | queue_work(rc->rsv_workq, &rc->rsv_update_work); | 873 | spin_lock(&rc->rsvs_lock); |
874 | if (!delayed_work_pending(&rc->rsv_update_work)) { | ||
875 | if (rc->set_drp_ie_pending > 0) { | ||
876 | rc->set_drp_ie_pending++; | ||
877 | goto unlock; | ||
878 | } | ||
879 | uwb_rsv_queue_update(rc); | ||
880 | } | ||
881 | unlock: | ||
882 | spin_unlock(&rc->rsvs_lock); | ||
613 | } | 883 | } |
614 | 884 | ||
615 | /* | 885 | /* |
@@ -618,7 +888,8 @@ void uwb_rsv_sched_update(struct uwb_rc *rc) | |||
618 | */ | 888 | */ |
619 | static void uwb_rsv_update_work(struct work_struct *work) | 889 | static void uwb_rsv_update_work(struct work_struct *work) |
620 | { | 890 | { |
621 | struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work); | 891 | struct uwb_rc *rc = container_of(work, struct uwb_rc, |
892 | rsv_update_work.work); | ||
622 | bool ie_updated; | 893 | bool ie_updated; |
623 | 894 | ||
624 | mutex_lock(&rc->rsvs_mutex); | 895 | mutex_lock(&rc->rsvs_mutex); |
@@ -630,25 +901,71 @@ static void uwb_rsv_update_work(struct work_struct *work) | |||
630 | ie_updated = true; | 901 | ie_updated = true; |
631 | } | 902 | } |
632 | 903 | ||
633 | if (ie_updated) | 904 | if (ie_updated && (rc->set_drp_ie_pending == 0)) |
634 | uwb_rc_send_all_drp_ie(rc); | 905 | uwb_rc_send_all_drp_ie(rc); |
635 | 906 | ||
636 | mutex_unlock(&rc->rsvs_mutex); | 907 | mutex_unlock(&rc->rsvs_mutex); |
637 | } | 908 | } |
638 | 909 | ||
910 | static void uwb_rsv_alien_bp_work(struct work_struct *work) | ||
911 | { | ||
912 | struct uwb_rc *rc = container_of(work, struct uwb_rc, | ||
913 | rsv_alien_bp_work.work); | ||
914 | struct uwb_rsv *rsv; | ||
915 | |||
916 | mutex_lock(&rc->rsvs_mutex); | ||
917 | |||
918 | list_for_each_entry(rsv, &rc->reservations, rc_node) { | ||
919 | if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { | ||
920 | rsv->callback(rsv); | ||
921 | } | ||
922 | } | ||
923 | |||
924 | mutex_unlock(&rc->rsvs_mutex); | ||
925 | } | ||
926 | |||
639 | static void uwb_rsv_timer(unsigned long arg) | 927 | static void uwb_rsv_timer(unsigned long arg) |
640 | { | 928 | { |
641 | struct uwb_rsv *rsv = (struct uwb_rsv *)arg; | 929 | struct uwb_rsv *rsv = (struct uwb_rsv *)arg; |
642 | 930 | ||
643 | rsv->expired = true; | 931 | queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); |
644 | uwb_rsv_sched_update(rsv->rc); | 932 | } |
933 | |||
934 | /** | ||
935 | * uwb_rsv_remove_all - remove all reservations | ||
936 | * @rc: the radio controller | ||
937 | * | ||
938 | * A DRP IE update is not done. | ||
939 | */ | ||
940 | void uwb_rsv_remove_all(struct uwb_rc *rc) | ||
941 | { | ||
942 | struct uwb_rsv *rsv, *t; | ||
943 | |||
944 | mutex_lock(&rc->rsvs_mutex); | ||
945 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | ||
946 | uwb_rsv_remove(rsv); | ||
947 | } | ||
948 | /* Cancel any postponed update. */ | ||
949 | rc->set_drp_ie_pending = 0; | ||
950 | mutex_unlock(&rc->rsvs_mutex); | ||
951 | |||
952 | cancel_delayed_work_sync(&rc->rsv_update_work); | ||
645 | } | 953 | } |
646 | 954 | ||
647 | void uwb_rsv_init(struct uwb_rc *rc) | 955 | void uwb_rsv_init(struct uwb_rc *rc) |
648 | { | 956 | { |
649 | INIT_LIST_HEAD(&rc->reservations); | 957 | INIT_LIST_HEAD(&rc->reservations); |
958 | INIT_LIST_HEAD(&rc->cnflt_alien_list); | ||
650 | mutex_init(&rc->rsvs_mutex); | 959 | mutex_init(&rc->rsvs_mutex); |
651 | INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work); | 960 | spin_lock_init(&rc->rsvs_lock); |
961 | INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); | ||
962 | INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); | ||
963 | rc->bow.can_reserve_extra_mases = true; | ||
964 | rc->bow.total_expired = 0; | ||
965 | rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; | ||
966 | init_timer(&rc->bow.timer); | ||
967 | rc->bow.timer.function = uwb_rsv_backoff_win_timer; | ||
968 | rc->bow.timer.data = (unsigned long)&rc->bow; | ||
652 | 969 | ||
653 | bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); | 970 | bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); |
654 | } | 971 | } |
@@ -667,14 +984,6 @@ int uwb_rsv_setup(struct uwb_rc *rc) | |||
667 | 984 | ||
668 | void uwb_rsv_cleanup(struct uwb_rc *rc) | 985 | void uwb_rsv_cleanup(struct uwb_rc *rc) |
669 | { | 986 | { |
670 | struct uwb_rsv *rsv, *t; | 987 | uwb_rsv_remove_all(rc); |
671 | |||
672 | mutex_lock(&rc->rsvs_mutex); | ||
673 | list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { | ||
674 | uwb_rsv_remove(rsv); | ||
675 | } | ||
676 | mutex_unlock(&rc->rsvs_mutex); | ||
677 | |||
678 | cancel_work_sync(&rc->rsv_update_work); | ||
679 | destroy_workqueue(rc->rsv_workq); | 988 | destroy_workqueue(rc->rsv_workq); |
680 | } | 989 | } |
diff --git a/drivers/uwb/umc-bus.c b/drivers/uwb/umc-bus.c index 2d8d62d9f53e..5ad36164c13b 100644 --- a/drivers/uwb/umc-bus.c +++ b/drivers/uwb/umc-bus.c | |||
@@ -11,23 +11,48 @@ | |||
11 | #include <linux/uwb/umc.h> | 11 | #include <linux/uwb/umc.h> |
12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
13 | 13 | ||
14 | static int umc_bus_unbind_helper(struct device *dev, void *data) | 14 | static int umc_bus_pre_reset_helper(struct device *dev, void *data) |
15 | { | 15 | { |
16 | struct device *parent = data; | 16 | int ret = 0; |
17 | 17 | ||
18 | if (dev->parent == parent && dev->driver) | 18 | if (dev->driver) { |
19 | device_release_driver(dev); | 19 | struct umc_dev *umc = to_umc_dev(dev); |
20 | return 0; | 20 | struct umc_driver *umc_drv = to_umc_driver(dev->driver); |
21 | |||
22 | if (umc_drv->pre_reset) | ||
23 | ret = umc_drv->pre_reset(umc); | ||
24 | else | ||
25 | device_release_driver(dev); | ||
26 | } | ||
27 | return ret; | ||
28 | } | ||
29 | |||
30 | static int umc_bus_post_reset_helper(struct device *dev, void *data) | ||
31 | { | ||
32 | int ret = 0; | ||
33 | |||
34 | if (dev->driver) { | ||
35 | struct umc_dev *umc = to_umc_dev(dev); | ||
36 | struct umc_driver *umc_drv = to_umc_driver(dev->driver); | ||
37 | |||
38 | if (umc_drv->post_reset) | ||
39 | ret = umc_drv->post_reset(umc); | ||
40 | } else | ||
41 | ret = device_attach(dev); | ||
42 | |||
43 | return ret; | ||
21 | } | 44 | } |
22 | 45 | ||
23 | /** | 46 | /** |
24 | * umc_controller_reset - reset the whole UMC controller | 47 | * umc_controller_reset - reset the whole UMC controller |
25 | * @umc: the UMC device for the radio controller. | 48 | * @umc: the UMC device for the radio controller. |
26 | * | 49 | * |
27 | * Drivers will be unbound from all UMC devices belonging to the | 50 | * Drivers or all capabilities of the controller will have their |
28 | * controller and then the radio controller will be rebound. The | 51 | * pre_reset methods called or be unbound from their device. Then all |
29 | * radio controller is expected to do a full hardware reset when it is | 52 | * post_reset methods will be called or the drivers will be rebound. |
30 | * probed. | 53 | * |
54 | * Radio controllers must provide pre_reset and post_reset methods and | ||
55 | * reset the hardware in their start method. | ||
31 | * | 56 | * |
32 | * If this is called while a probe() or remove() is in progress it | 57 | * If this is called while a probe() or remove() is in progress it |
33 | * will return -EAGAIN and not perform the reset. | 58 | * will return -EAGAIN and not perform the reset. |
@@ -35,14 +60,13 @@ static int umc_bus_unbind_helper(struct device *dev, void *data) | |||
35 | int umc_controller_reset(struct umc_dev *umc) | 60 | int umc_controller_reset(struct umc_dev *umc) |
36 | { | 61 | { |
37 | struct device *parent = umc->dev.parent; | 62 | struct device *parent = umc->dev.parent; |
38 | int ret; | 63 | int ret = 0; |
39 | 64 | ||
40 | if (down_trylock(&parent->sem)) | 65 | if(down_trylock(&parent->sem)) |
41 | return -EAGAIN; | 66 | return -EAGAIN; |
42 | bus_for_each_dev(&umc_bus_type, NULL, parent, umc_bus_unbind_helper); | 67 | ret = device_for_each_child(parent, parent, umc_bus_pre_reset_helper); |
43 | ret = device_attach(&umc->dev); | 68 | if (ret >= 0) |
44 | if (ret == 1) | 69 | device_for_each_child(parent, parent, umc_bus_post_reset_helper); |
45 | ret = 0; | ||
46 | up(&parent->sem); | 70 | up(&parent->sem); |
47 | 71 | ||
48 | return ret; | 72 | return ret; |
@@ -75,10 +99,10 @@ static int umc_bus_rescan_helper(struct device *dev, void *data) | |||
75 | if (!dev->driver) | 99 | if (!dev->driver) |
76 | ret = device_attach(dev); | 100 | ret = device_attach(dev); |
77 | 101 | ||
78 | return ret < 0 ? ret : 0; | 102 | return ret; |
79 | } | 103 | } |
80 | 104 | ||
81 | static void umc_bus_rescan(void) | 105 | static void umc_bus_rescan(struct device *parent) |
82 | { | 106 | { |
83 | int err; | 107 | int err; |
84 | 108 | ||
@@ -86,7 +110,7 @@ static void umc_bus_rescan(void) | |||
86 | * We can't use bus_rescan_devices() here as it deadlocks when | 110 | * We can't use bus_rescan_devices() here as it deadlocks when |
87 | * it tries to retake the dev->parent semaphore. | 111 | * it tries to retake the dev->parent semaphore. |
88 | */ | 112 | */ |
89 | err = bus_for_each_dev(&umc_bus_type, NULL, NULL, umc_bus_rescan_helper); | 113 | err = device_for_each_child(parent, NULL, umc_bus_rescan_helper); |
90 | if (err < 0) | 114 | if (err < 0) |
91 | printk(KERN_WARNING "%s: rescan of bus failed: %d\n", | 115 | printk(KERN_WARNING "%s: rescan of bus failed: %d\n", |
92 | KBUILD_MODNAME, err); | 116 | KBUILD_MODNAME, err); |
@@ -120,7 +144,7 @@ static int umc_device_probe(struct device *dev) | |||
120 | if (err) | 144 | if (err) |
121 | put_device(dev); | 145 | put_device(dev); |
122 | else | 146 | else |
123 | umc_bus_rescan(); | 147 | umc_bus_rescan(dev->parent); |
124 | 148 | ||
125 | return err; | 149 | return err; |
126 | } | 150 | } |
diff --git a/drivers/uwb/umc-dev.c b/drivers/uwb/umc-dev.c index aa44e1c1a102..1fc7d8270bb8 100644 --- a/drivers/uwb/umc-dev.c +++ b/drivers/uwb/umc-dev.c | |||
@@ -7,8 +7,6 @@ | |||
7 | */ | 7 | */ |
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/uwb/umc.h> | 9 | #include <linux/uwb/umc.h> |
10 | #define D_LOCAL 0 | ||
11 | #include <linux/uwb/debug.h> | ||
12 | 10 | ||
13 | static void umc_device_release(struct device *dev) | 11 | static void umc_device_release(struct device *dev) |
14 | { | 12 | { |
@@ -31,8 +29,7 @@ struct umc_dev *umc_device_create(struct device *parent, int n) | |||
31 | 29 | ||
32 | umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL); | 30 | umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL); |
33 | if (umc) { | 31 | if (umc) { |
34 | snprintf(umc->dev.bus_id, sizeof(umc->dev.bus_id), "%s-%d", | 32 | dev_set_name(&umc->dev, "%s-%d", dev_name(parent), n); |
35 | parent->bus_id, n); | ||
36 | umc->dev.parent = parent; | 33 | umc->dev.parent = parent; |
37 | umc->dev.bus = &umc_bus_type; | 34 | umc->dev.bus = &umc_bus_type; |
38 | umc->dev.release = umc_device_release; | 35 | umc->dev.release = umc_device_release; |
@@ -54,8 +51,6 @@ int umc_device_register(struct umc_dev *umc) | |||
54 | { | 51 | { |
55 | int err; | 52 | int err; |
56 | 53 | ||
57 | d_fnstart(3, &umc->dev, "(umc_dev %p)\n", umc); | ||
58 | |||
59 | err = request_resource(umc->resource.parent, &umc->resource); | 54 | err = request_resource(umc->resource.parent, &umc->resource); |
60 | if (err < 0) { | 55 | if (err < 0) { |
61 | dev_err(&umc->dev, "can't allocate resource range " | 56 | dev_err(&umc->dev, "can't allocate resource range " |
@@ -69,13 +64,11 @@ int umc_device_register(struct umc_dev *umc) | |||
69 | err = device_register(&umc->dev); | 64 | err = device_register(&umc->dev); |
70 | if (err < 0) | 65 | if (err < 0) |
71 | goto error_device_register; | 66 | goto error_device_register; |
72 | d_fnend(3, &umc->dev, "(umc_dev %p) = 0\n", umc); | ||
73 | return 0; | 67 | return 0; |
74 | 68 | ||
75 | error_device_register: | 69 | error_device_register: |
76 | release_resource(&umc->resource); | 70 | release_resource(&umc->resource); |
77 | error_request_resource: | 71 | error_request_resource: |
78 | d_fnend(3, &umc->dev, "(umc_dev %p) = %d\n", umc, err); | ||
79 | return err; | 72 | return err; |
80 | } | 73 | } |
81 | EXPORT_SYMBOL_GPL(umc_device_register); | 74 | EXPORT_SYMBOL_GPL(umc_device_register); |
@@ -95,10 +88,8 @@ void umc_device_unregister(struct umc_dev *umc) | |||
95 | if (!umc) | 88 | if (!umc) |
96 | return; | 89 | return; |
97 | dev = get_device(&umc->dev); | 90 | dev = get_device(&umc->dev); |
98 | d_fnstart(3, dev, "(umc_dev %p)\n", umc); | ||
99 | device_unregister(&umc->dev); | 91 | device_unregister(&umc->dev); |
100 | release_resource(&umc->resource); | 92 | release_resource(&umc->resource); |
101 | d_fnend(3, dev, "(umc_dev %p) = void\n", umc); | ||
102 | put_device(dev); | 93 | put_device(dev); |
103 | } | 94 | } |
104 | EXPORT_SYMBOL_GPL(umc_device_unregister); | 95 | EXPORT_SYMBOL_GPL(umc_device_unregister); |
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c index 6d232c35d07d..4a42993700c1 100644 --- a/drivers/uwb/uwb-debug.c +++ b/drivers/uwb/uwb-debug.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * | 4 | * |
5 | * Copyright (C) 2005-2006 Intel Corporation | 5 | * Copyright (C) 2005-2006 Intel Corporation |
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | 6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> |
7 | * Copyright (C) 2008 Cambridge Silicon Radio Ltd. | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License version | 10 | * modify it under the terms of the GNU General Public License version |
@@ -33,31 +34,9 @@ | |||
33 | #include <linux/seq_file.h> | 34 | #include <linux/seq_file.h> |
34 | 35 | ||
35 | #include <linux/uwb/debug-cmd.h> | 36 | #include <linux/uwb/debug-cmd.h> |
36 | #define D_LOCAL 0 | ||
37 | #include <linux/uwb/debug.h> | ||
38 | 37 | ||
39 | #include "uwb-internal.h" | 38 | #include "uwb-internal.h" |
40 | 39 | ||
41 | void dump_bytes(struct device *dev, const void *_buf, size_t rsize) | ||
42 | { | ||
43 | const char *buf = _buf; | ||
44 | char line[32]; | ||
45 | size_t offset = 0; | ||
46 | int cnt, cnt2; | ||
47 | for (cnt = 0; cnt < rsize; cnt += 8) { | ||
48 | size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8; | ||
49 | for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) { | ||
50 | offset += scnprintf(line + offset, sizeof(line) - offset, | ||
51 | "%02x ", buf[cnt + cnt2] & 0xff); | ||
52 | } | ||
53 | if (dev) | ||
54 | dev_info(dev, "%s\n", line); | ||
55 | else | ||
56 | printk(KERN_INFO "%s\n", line); | ||
57 | } | ||
58 | } | ||
59 | EXPORT_SYMBOL_GPL(dump_bytes); | ||
60 | |||
61 | /* | 40 | /* |
62 | * Debug interface | 41 | * Debug interface |
63 | * | 42 | * |
@@ -84,26 +63,23 @@ struct uwb_dbg { | |||
84 | struct dentry *reservations_f; | 63 | struct dentry *reservations_f; |
85 | struct dentry *accept_f; | 64 | struct dentry *accept_f; |
86 | struct dentry *drp_avail_f; | 65 | struct dentry *drp_avail_f; |
66 | spinlock_t list_lock; | ||
87 | }; | 67 | }; |
88 | 68 | ||
89 | static struct dentry *root_dir; | 69 | static struct dentry *root_dir; |
90 | 70 | ||
91 | static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) | 71 | static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) |
92 | { | 72 | { |
93 | struct uwb_rc *rc = rsv->rc; | 73 | struct uwb_dbg *dbg = rsv->pal_priv; |
94 | struct device *dev = &rc->uwb_dev.dev; | ||
95 | struct uwb_dev_addr devaddr; | ||
96 | char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; | ||
97 | |||
98 | uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); | ||
99 | if (rsv->target.type == UWB_RSV_TARGET_DEV) | ||
100 | devaddr = rsv->target.dev->dev_addr; | ||
101 | else | ||
102 | devaddr = rsv->target.devaddr; | ||
103 | uwb_dev_addr_print(target, sizeof(target), &devaddr); | ||
104 | 74 | ||
105 | dev_dbg(dev, "debug: rsv %s -> %s: %s\n", | 75 | uwb_rsv_dump("debug", rsv); |
106 | owner, target, uwb_rsv_state_str(rsv->state)); | 76 | |
77 | if (rsv->state == UWB_RSV_STATE_NONE) { | ||
78 | spin_lock(&dbg->list_lock); | ||
79 | list_del(&rsv->pal_node); | ||
80 | spin_unlock(&dbg->list_lock); | ||
81 | uwb_rsv_destroy(rsv); | ||
82 | } | ||
107 | } | 83 | } |
108 | 84 | ||
109 | static int cmd_rsv_establish(struct uwb_rc *rc, | 85 | static int cmd_rsv_establish(struct uwb_rc *rc, |
@@ -119,26 +95,27 @@ static int cmd_rsv_establish(struct uwb_rc *rc, | |||
119 | if (target == NULL) | 95 | if (target == NULL) |
120 | return -ENODEV; | 96 | return -ENODEV; |
121 | 97 | ||
122 | rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL); | 98 | rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, rc->dbg); |
123 | if (rsv == NULL) { | 99 | if (rsv == NULL) { |
124 | uwb_dev_put(target); | 100 | uwb_dev_put(target); |
125 | return -ENOMEM; | 101 | return -ENOMEM; |
126 | } | 102 | } |
127 | 103 | ||
128 | rsv->owner = &rc->uwb_dev; | 104 | rsv->target.type = UWB_RSV_TARGET_DEV; |
129 | rsv->target.type = UWB_RSV_TARGET_DEV; | 105 | rsv->target.dev = target; |
130 | rsv->target.dev = target; | 106 | rsv->type = cmd->type; |
131 | rsv->type = cmd->type; | 107 | rsv->max_mas = cmd->max_mas; |
132 | rsv->max_mas = cmd->max_mas; | 108 | rsv->min_mas = cmd->min_mas; |
133 | rsv->min_mas = cmd->min_mas; | 109 | rsv->max_interval = cmd->max_interval; |
134 | rsv->sparsity = cmd->sparsity; | ||
135 | 110 | ||
136 | ret = uwb_rsv_establish(rsv); | 111 | ret = uwb_rsv_establish(rsv); |
137 | if (ret) | 112 | if (ret) |
138 | uwb_rsv_destroy(rsv); | 113 | uwb_rsv_destroy(rsv); |
139 | else | 114 | else { |
115 | spin_lock(&(rc->dbg)->list_lock); | ||
140 | list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); | 116 | list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); |
141 | 117 | spin_unlock(&(rc->dbg)->list_lock); | |
118 | } | ||
142 | return ret; | 119 | return ret; |
143 | } | 120 | } |
144 | 121 | ||
@@ -148,21 +125,40 @@ static int cmd_rsv_terminate(struct uwb_rc *rc, | |||
148 | struct uwb_rsv *rsv, *found = NULL; | 125 | struct uwb_rsv *rsv, *found = NULL; |
149 | int i = 0; | 126 | int i = 0; |
150 | 127 | ||
128 | spin_lock(&(rc->dbg)->list_lock); | ||
129 | |||
151 | list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { | 130 | list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { |
152 | if (i == cmd->index) { | 131 | if (i == cmd->index) { |
153 | found = rsv; | 132 | found = rsv; |
133 | uwb_rsv_get(found); | ||
154 | break; | 134 | break; |
155 | } | 135 | } |
136 | i++; | ||
156 | } | 137 | } |
138 | |||
139 | spin_unlock(&(rc->dbg)->list_lock); | ||
140 | |||
157 | if (!found) | 141 | if (!found) |
158 | return -EINVAL; | 142 | return -EINVAL; |
159 | 143 | ||
160 | list_del(&found->pal_node); | ||
161 | uwb_rsv_terminate(found); | 144 | uwb_rsv_terminate(found); |
145 | uwb_rsv_put(found); | ||
162 | 146 | ||
163 | return 0; | 147 | return 0; |
164 | } | 148 | } |
165 | 149 | ||
150 | static int cmd_ie_add(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_add) | ||
151 | { | ||
152 | return uwb_rc_ie_add(rc, | ||
153 | (const struct uwb_ie_hdr *) ie_to_add->data, | ||
154 | ie_to_add->len); | ||
155 | } | ||
156 | |||
157 | static int cmd_ie_rm(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_rm) | ||
158 | { | ||
159 | return uwb_rc_ie_rm(rc, ie_to_rm->data[0]); | ||
160 | } | ||
161 | |||
166 | static int command_open(struct inode *inode, struct file *file) | 162 | static int command_open(struct inode *inode, struct file *file) |
167 | { | 163 | { |
168 | file->private_data = inode->i_private; | 164 | file->private_data = inode->i_private; |
@@ -175,8 +171,8 @@ static ssize_t command_write(struct file *file, const char __user *buf, | |||
175 | { | 171 | { |
176 | struct uwb_rc *rc = file->private_data; | 172 | struct uwb_rc *rc = file->private_data; |
177 | struct uwb_dbg_cmd cmd; | 173 | struct uwb_dbg_cmd cmd; |
178 | int ret; | 174 | int ret = 0; |
179 | 175 | ||
180 | if (len != sizeof(struct uwb_dbg_cmd)) | 176 | if (len != sizeof(struct uwb_dbg_cmd)) |
181 | return -EINVAL; | 177 | return -EINVAL; |
182 | 178 | ||
@@ -190,6 +186,18 @@ static ssize_t command_write(struct file *file, const char __user *buf, | |||
190 | case UWB_DBG_CMD_RSV_TERMINATE: | 186 | case UWB_DBG_CMD_RSV_TERMINATE: |
191 | ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); | 187 | ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); |
192 | break; | 188 | break; |
189 | case UWB_DBG_CMD_IE_ADD: | ||
190 | ret = cmd_ie_add(rc, &cmd.ie_add); | ||
191 | break; | ||
192 | case UWB_DBG_CMD_IE_RM: | ||
193 | ret = cmd_ie_rm(rc, &cmd.ie_rm); | ||
194 | break; | ||
195 | case UWB_DBG_CMD_RADIO_START: | ||
196 | ret = uwb_radio_start(&rc->dbg->pal); | ||
197 | break; | ||
198 | case UWB_DBG_CMD_RADIO_STOP: | ||
199 | uwb_radio_stop(&rc->dbg->pal); | ||
200 | break; | ||
193 | default: | 201 | default: |
194 | return -EINVAL; | 202 | return -EINVAL; |
195 | } | 203 | } |
@@ -283,12 +291,26 @@ static struct file_operations drp_avail_fops = { | |||
283 | .owner = THIS_MODULE, | 291 | .owner = THIS_MODULE, |
284 | }; | 292 | }; |
285 | 293 | ||
286 | static void uwb_dbg_new_rsv(struct uwb_rsv *rsv) | 294 | static void uwb_dbg_channel_changed(struct uwb_pal *pal, int channel) |
295 | { | ||
296 | struct device *dev = &pal->rc->uwb_dev.dev; | ||
297 | |||
298 | if (channel > 0) | ||
299 | dev_info(dev, "debug: channel %d started\n", channel); | ||
300 | else | ||
301 | dev_info(dev, "debug: channel stopped\n"); | ||
302 | } | ||
303 | |||
304 | static void uwb_dbg_new_rsv(struct uwb_pal *pal, struct uwb_rsv *rsv) | ||
287 | { | 305 | { |
288 | struct uwb_rc *rc = rsv->rc; | 306 | struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal); |
289 | 307 | ||
290 | if (rc->dbg->accept) | 308 | if (dbg->accept) { |
291 | uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL); | 309 | spin_lock(&dbg->list_lock); |
310 | list_add_tail(&rsv->pal_node, &dbg->rsvs); | ||
311 | spin_unlock(&dbg->list_lock); | ||
312 | uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg); | ||
313 | } | ||
292 | } | 314 | } |
293 | 315 | ||
294 | /** | 316 | /** |
@@ -302,10 +324,14 @@ void uwb_dbg_add_rc(struct uwb_rc *rc) | |||
302 | return; | 324 | return; |
303 | 325 | ||
304 | INIT_LIST_HEAD(&rc->dbg->rsvs); | 326 | INIT_LIST_HEAD(&rc->dbg->rsvs); |
327 | spin_lock_init(&(rc->dbg)->list_lock); | ||
305 | 328 | ||
306 | uwb_pal_init(&rc->dbg->pal); | 329 | uwb_pal_init(&rc->dbg->pal); |
330 | rc->dbg->pal.rc = rc; | ||
331 | rc->dbg->pal.channel_changed = uwb_dbg_channel_changed; | ||
307 | rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; | 332 | rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; |
308 | uwb_pal_register(rc, &rc->dbg->pal); | 333 | uwb_pal_register(&rc->dbg->pal); |
334 | |||
309 | if (root_dir) { | 335 | if (root_dir) { |
310 | rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), | 336 | rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), |
311 | root_dir); | 337 | root_dir); |
@@ -325,7 +351,7 @@ void uwb_dbg_add_rc(struct uwb_rc *rc) | |||
325 | } | 351 | } |
326 | 352 | ||
327 | /** | 353 | /** |
328 | * uwb_dbg_add_rc - remove a radio controller's debug interface | 354 | * uwb_dbg_del_rc - remove a radio controller's debug interface |
329 | * @rc: the radio controller | 355 | * @rc: the radio controller |
330 | */ | 356 | */ |
331 | void uwb_dbg_del_rc(struct uwb_rc *rc) | 357 | void uwb_dbg_del_rc(struct uwb_rc *rc) |
@@ -336,10 +362,10 @@ void uwb_dbg_del_rc(struct uwb_rc *rc) | |||
336 | return; | 362 | return; |
337 | 363 | ||
338 | list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { | 364 | list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { |
339 | uwb_rsv_destroy(rsv); | 365 | uwb_rsv_terminate(rsv); |
340 | } | 366 | } |
341 | 367 | ||
342 | uwb_pal_unregister(rc, &rc->dbg->pal); | 368 | uwb_pal_unregister(&rc->dbg->pal); |
343 | 369 | ||
344 | if (root_dir) { | 370 | if (root_dir) { |
345 | debugfs_remove(rc->dbg->drp_avail_f); | 371 | debugfs_remove(rc->dbg->drp_avail_f); |
@@ -365,3 +391,16 @@ void uwb_dbg_exit(void) | |||
365 | { | 391 | { |
366 | debugfs_remove(root_dir); | 392 | debugfs_remove(root_dir); |
367 | } | 393 | } |
394 | |||
395 | /** | ||
396 | * uwb_dbg_create_pal_dir - create a debugfs directory for a PAL | ||
397 | * @pal: The PAL. | ||
398 | */ | ||
399 | struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal) | ||
400 | { | ||
401 | struct uwb_rc *rc = pal->rc; | ||
402 | |||
403 | if (root_dir && rc->dbg && rc->dbg->root_d && pal->name) | ||
404 | return debugfs_create_dir(pal->name, rc->dbg->root_d); | ||
405 | return NULL; | ||
406 | } | ||
diff --git a/drivers/uwb/uwb-internal.h b/drivers/uwb/uwb-internal.h index 2ad307d12961..d5bcfc1c227a 100644 --- a/drivers/uwb/uwb-internal.h +++ b/drivers/uwb/uwb-internal.h | |||
@@ -66,14 +66,14 @@ extern int uwb_rc_scan(struct uwb_rc *rc, | |||
66 | unsigned channel, enum uwb_scan_type type, | 66 | unsigned channel, enum uwb_scan_type type, |
67 | unsigned bpst_offset); | 67 | unsigned bpst_offset); |
68 | extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc); | 68 | extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc); |
69 | extern ssize_t uwb_rc_print_IEs(struct uwb_rc *rc, char *, size_t); | 69 | |
70 | extern void uwb_rc_ie_init(struct uwb_rc *); | 70 | void uwb_rc_ie_init(struct uwb_rc *); |
71 | extern void uwb_rc_ie_init(struct uwb_rc *); | 71 | int uwb_rc_ie_setup(struct uwb_rc *); |
72 | extern ssize_t uwb_rc_ie_setup(struct uwb_rc *); | 72 | void uwb_rc_ie_release(struct uwb_rc *); |
73 | extern void uwb_rc_ie_release(struct uwb_rc *); | 73 | int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len, |
74 | extern int uwb_rc_ie_add(struct uwb_rc *, | 74 | char *buf, size_t size); |
75 | const struct uwb_ie_hdr *, size_t); | 75 | int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); |
76 | extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); | 76 | |
77 | 77 | ||
78 | extern const char *uwb_rc_strerror(unsigned code); | 78 | extern const char *uwb_rc_strerror(unsigned code); |
79 | 79 | ||
@@ -92,6 +92,12 @@ extern const char *uwb_rc_strerror(unsigned code); | |||
92 | 92 | ||
93 | struct uwb_rc_neh; | 93 | struct uwb_rc_neh; |
94 | 94 | ||
95 | extern int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, | ||
96 | struct uwb_rccb *cmd, size_t cmd_size, | ||
97 | u8 expected_type, u16 expected_event, | ||
98 | uwb_rc_cmd_cb_f cb, void *arg); | ||
99 | |||
100 | |||
95 | void uwb_rc_neh_create(struct uwb_rc *rc); | 101 | void uwb_rc_neh_create(struct uwb_rc *rc); |
96 | void uwb_rc_neh_destroy(struct uwb_rc *rc); | 102 | void uwb_rc_neh_destroy(struct uwb_rc *rc); |
97 | 103 | ||
@@ -106,7 +112,69 @@ void uwb_rc_neh_put(struct uwb_rc_neh *neh); | |||
106 | extern int uwb_est_create(void); | 112 | extern int uwb_est_create(void); |
107 | extern void uwb_est_destroy(void); | 113 | extern void uwb_est_destroy(void); |
108 | 114 | ||
115 | /* | ||
116 | * UWB conflicting alien reservations | ||
117 | */ | ||
118 | struct uwb_cnflt_alien { | ||
119 | struct uwb_rc *rc; | ||
120 | struct list_head rc_node; | ||
121 | struct uwb_mas_bm mas; | ||
122 | struct timer_list timer; | ||
123 | struct work_struct cnflt_update_work; | ||
124 | }; | ||
125 | |||
126 | enum uwb_uwb_rsv_alloc_result { | ||
127 | UWB_RSV_ALLOC_FOUND = 0, | ||
128 | UWB_RSV_ALLOC_NOT_FOUND, | ||
129 | }; | ||
130 | |||
131 | enum uwb_rsv_mas_status { | ||
132 | UWB_RSV_MAS_NOT_AVAIL = 1, | ||
133 | UWB_RSV_MAS_SAFE, | ||
134 | UWB_RSV_MAS_UNSAFE, | ||
135 | }; | ||
136 | |||
137 | struct uwb_rsv_col_set_info { | ||
138 | unsigned char start_col; | ||
139 | unsigned char interval; | ||
140 | unsigned char safe_mas_per_col; | ||
141 | unsigned char unsafe_mas_per_col; | ||
142 | }; | ||
143 | |||
144 | struct uwb_rsv_col_info { | ||
145 | unsigned char max_avail_safe; | ||
146 | unsigned char max_avail_unsafe; | ||
147 | unsigned char highest_mas[UWB_MAS_PER_ZONE]; | ||
148 | struct uwb_rsv_col_set_info csi; | ||
149 | }; | ||
150 | |||
151 | struct uwb_rsv_row_info { | ||
152 | unsigned char avail[UWB_MAS_PER_ZONE]; | ||
153 | unsigned char free_rows; | ||
154 | unsigned char used_rows; | ||
155 | }; | ||
156 | |||
157 | /* | ||
158 | * UWB find allocation | ||
159 | */ | ||
160 | struct uwb_rsv_alloc_info { | ||
161 | unsigned char bm[UWB_MAS_PER_ZONE * UWB_NUM_ZONES]; | ||
162 | struct uwb_rsv_col_info ci[UWB_NUM_ZONES]; | ||
163 | struct uwb_rsv_row_info ri; | ||
164 | struct uwb_mas_bm *not_available; | ||
165 | struct uwb_mas_bm *result; | ||
166 | int min_mas; | ||
167 | int max_mas; | ||
168 | int max_interval; | ||
169 | int total_allocated_mases; | ||
170 | int safe_allocated_mases; | ||
171 | int unsafe_allocated_mases; | ||
172 | int interval; | ||
173 | }; | ||
109 | 174 | ||
175 | int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, | ||
176 | struct uwb_mas_bm *result); | ||
177 | void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc); | ||
110 | /* | 178 | /* |
111 | * UWB Events & management daemon | 179 | * UWB Events & management daemon |
112 | */ | 180 | */ |
@@ -160,13 +228,14 @@ struct uwb_event { | |||
160 | }; | 228 | }; |
161 | }; | 229 | }; |
162 | 230 | ||
163 | extern void uwbd_start(void); | 231 | extern void uwbd_start(struct uwb_rc *rc); |
164 | extern void uwbd_stop(void); | 232 | extern void uwbd_stop(struct uwb_rc *rc); |
165 | extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask); | 233 | extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask); |
166 | extern void uwbd_event_queue(struct uwb_event *); | 234 | extern void uwbd_event_queue(struct uwb_event *); |
167 | void uwbd_flush(struct uwb_rc *rc); | 235 | void uwbd_flush(struct uwb_rc *rc); |
168 | 236 | ||
169 | /* UWB event handlers */ | 237 | /* UWB event handlers */ |
238 | extern int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *); | ||
170 | extern int uwbd_evt_handle_rc_beacon(struct uwb_event *); | 239 | extern int uwbd_evt_handle_rc_beacon(struct uwb_event *); |
171 | extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *); | 240 | extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *); |
172 | extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *); | 241 | extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *); |
@@ -193,15 +262,6 @@ int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt); | |||
193 | 262 | ||
194 | extern unsigned long beacon_timeout_ms; | 263 | extern unsigned long beacon_timeout_ms; |
195 | 264 | ||
196 | /** Beacon cache list */ | ||
197 | struct uwb_beca { | ||
198 | struct list_head list; | ||
199 | size_t entries; | ||
200 | struct mutex mutex; | ||
201 | }; | ||
202 | |||
203 | extern struct uwb_beca uwb_beca; | ||
204 | |||
205 | /** | 265 | /** |
206 | * Beacon cache entry | 266 | * Beacon cache entry |
207 | * | 267 | * |
@@ -228,9 +288,6 @@ struct uwb_beca_e { | |||
228 | struct uwb_beacon_frame; | 288 | struct uwb_beacon_frame; |
229 | extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *, | 289 | extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *, |
230 | char *, size_t); | 290 | char *, size_t); |
231 | extern struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *, | ||
232 | struct uwb_beacon_frame *, | ||
233 | unsigned long); | ||
234 | 291 | ||
235 | extern void uwb_bce_kfree(struct kref *_bce); | 292 | extern void uwb_bce_kfree(struct kref *_bce); |
236 | static inline void uwb_bce_get(struct uwb_beca_e *bce) | 293 | static inline void uwb_bce_get(struct uwb_beca_e *bce) |
@@ -241,14 +298,19 @@ static inline void uwb_bce_put(struct uwb_beca_e *bce) | |||
241 | { | 298 | { |
242 | kref_put(&bce->refcnt, uwb_bce_kfree); | 299 | kref_put(&bce->refcnt, uwb_bce_kfree); |
243 | } | 300 | } |
244 | extern void uwb_beca_purge(void); | 301 | extern void uwb_beca_purge(struct uwb_rc *rc); |
245 | extern void uwb_beca_release(void); | 302 | extern void uwb_beca_release(struct uwb_rc *rc); |
246 | 303 | ||
247 | struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, | 304 | struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, |
248 | const struct uwb_dev_addr *devaddr); | 305 | const struct uwb_dev_addr *devaddr); |
249 | struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, | 306 | struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, |
250 | const struct uwb_mac_addr *macaddr); | 307 | const struct uwb_mac_addr *macaddr); |
251 | 308 | ||
309 | int uwb_radio_setup(struct uwb_rc *rc); | ||
310 | void uwb_radio_reset_state(struct uwb_rc *rc); | ||
311 | void uwb_radio_shutdown(struct uwb_rc *rc); | ||
312 | int uwb_radio_force_channel(struct uwb_rc *rc, int channel); | ||
313 | |||
252 | /* -- UWB Sysfs representation */ | 314 | /* -- UWB Sysfs representation */ |
253 | extern struct class uwb_rc_class; | 315 | extern struct class uwb_rc_class; |
254 | extern struct device_attribute dev_attr_mac_address; | 316 | extern struct device_attribute dev_attr_mac_address; |
@@ -259,18 +321,29 @@ extern struct device_attribute dev_attr_scan; | |||
259 | void uwb_rsv_init(struct uwb_rc *rc); | 321 | void uwb_rsv_init(struct uwb_rc *rc); |
260 | int uwb_rsv_setup(struct uwb_rc *rc); | 322 | int uwb_rsv_setup(struct uwb_rc *rc); |
261 | void uwb_rsv_cleanup(struct uwb_rc *rc); | 323 | void uwb_rsv_cleanup(struct uwb_rc *rc); |
324 | void uwb_rsv_remove_all(struct uwb_rc *rc); | ||
325 | void uwb_rsv_get(struct uwb_rsv *rsv); | ||
326 | void uwb_rsv_put(struct uwb_rsv *rsv); | ||
327 | bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv); | ||
328 | void uwb_rsv_dump(char *text, struct uwb_rsv *rsv); | ||
329 | int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available); | ||
330 | void uwb_rsv_backoff_win_timer(unsigned long arg); | ||
331 | void uwb_rsv_backoff_win_increment(struct uwb_rc *rc); | ||
332 | int uwb_rsv_status(struct uwb_rsv *rsv); | ||
333 | int uwb_rsv_companion_status(struct uwb_rsv *rsv); | ||
262 | 334 | ||
263 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); | 335 | void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); |
264 | void uwb_rsv_remove(struct uwb_rsv *rsv); | 336 | void uwb_rsv_remove(struct uwb_rsv *rsv); |
265 | struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, | 337 | struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, |
266 | struct uwb_ie_drp *drp_ie); | 338 | struct uwb_ie_drp *drp_ie); |
267 | void uwb_rsv_sched_update(struct uwb_rc *rc); | 339 | void uwb_rsv_sched_update(struct uwb_rc *rc); |
340 | void uwb_rsv_queue_update(struct uwb_rc *rc); | ||
268 | 341 | ||
269 | void uwb_drp_handle_timeout(struct uwb_rsv *rsv); | ||
270 | int uwb_drp_ie_update(struct uwb_rsv *rsv); | 342 | int uwb_drp_ie_update(struct uwb_rsv *rsv); |
271 | void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); | 343 | void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); |
272 | 344 | ||
273 | void uwb_drp_avail_init(struct uwb_rc *rc); | 345 | void uwb_drp_avail_init(struct uwb_rc *rc); |
346 | void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail); | ||
274 | int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); | 347 | int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); |
275 | void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); | 348 | void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); |
276 | void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); | 349 | void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); |
@@ -289,8 +362,7 @@ void uwb_dbg_init(void); | |||
289 | void uwb_dbg_exit(void); | 362 | void uwb_dbg_exit(void); |
290 | void uwb_dbg_add_rc(struct uwb_rc *rc); | 363 | void uwb_dbg_add_rc(struct uwb_rc *rc); |
291 | void uwb_dbg_del_rc(struct uwb_rc *rc); | 364 | void uwb_dbg_del_rc(struct uwb_rc *rc); |
292 | 365 | struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal); | |
293 | /* Workarounds for version specific stuff */ | ||
294 | 366 | ||
295 | static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) | 367 | static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) |
296 | { | 368 | { |
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c index 78908416e42c..57bd6bfef37e 100644 --- a/drivers/uwb/uwbd.c +++ b/drivers/uwb/uwbd.c | |||
@@ -68,17 +68,13 @@ | |||
68 | * | 68 | * |
69 | * Handler functions are called normally uwbd_evt_handle_*(). | 69 | * Handler functions are called normally uwbd_evt_handle_*(). |
70 | */ | 70 | */ |
71 | |||
72 | #include <linux/kthread.h> | 71 | #include <linux/kthread.h> |
73 | #include <linux/module.h> | 72 | #include <linux/module.h> |
74 | #include <linux/freezer.h> | 73 | #include <linux/freezer.h> |
75 | #include "uwb-internal.h" | ||
76 | |||
77 | #define D_LOCAL 1 | ||
78 | #include <linux/uwb/debug.h> | ||
79 | 74 | ||
75 | #include "uwb-internal.h" | ||
80 | 76 | ||
81 | /** | 77 | /* |
82 | * UWBD Event handler function signature | 78 | * UWBD Event handler function signature |
83 | * | 79 | * |
84 | * Return !0 if the event needs not to be freed (ie the handler | 80 | * Return !0 if the event needs not to be freed (ie the handler |
@@ -101,9 +97,12 @@ struct uwbd_event { | |||
101 | const char *name; | 97 | const char *name; |
102 | }; | 98 | }; |
103 | 99 | ||
104 | /** Table of handlers for and properties of the UWBD Radio Control Events */ | 100 | /* Table of handlers for and properties of the UWBD Radio Control Events */ |
105 | static | 101 | static struct uwbd_event uwbd_urc_events[] = { |
106 | struct uwbd_event uwbd_events[] = { | 102 | [UWB_RC_EVT_IE_RCV] = { |
103 | .handler = uwbd_evt_handle_rc_ie_rcv, | ||
104 | .name = "IE_RECEIVED" | ||
105 | }, | ||
107 | [UWB_RC_EVT_BEACON] = { | 106 | [UWB_RC_EVT_BEACON] = { |
108 | .handler = uwbd_evt_handle_rc_beacon, | 107 | .handler = uwbd_evt_handle_rc_beacon, |
109 | .name = "BEACON_RECEIVED" | 108 | .name = "BEACON_RECEIVED" |
@@ -142,23 +141,15 @@ struct uwbd_evt_type_handler { | |||
142 | size_t size; | 141 | size_t size; |
143 | }; | 142 | }; |
144 | 143 | ||
145 | #define UWBD_EVT_TYPE_HANDLER(n,a) { \ | 144 | /* Table of handlers for each UWBD Event type. */ |
146 | .name = (n), \ | 145 | static struct uwbd_evt_type_handler uwbd_urc_evt_type_handlers[] = { |
147 | .uwbd_events = (a), \ | 146 | [UWB_RC_CET_GENERAL] = { |
148 | .size = sizeof(a)/sizeof((a)[0]) \ | 147 | .name = "URC", |
149 | } | 148 | .uwbd_events = uwbd_urc_events, |
150 | 149 | .size = ARRAY_SIZE(uwbd_urc_events), | |
151 | 150 | }, | |
152 | /** Table of handlers for each UWBD Event type. */ | ||
153 | static | ||
154 | struct uwbd_evt_type_handler uwbd_evt_type_handlers[] = { | ||
155 | [UWB_RC_CET_GENERAL] = UWBD_EVT_TYPE_HANDLER("RC", uwbd_events) | ||
156 | }; | 151 | }; |
157 | 152 | ||
158 | static const | ||
159 | size_t uwbd_evt_type_handlers_len = | ||
160 | sizeof(uwbd_evt_type_handlers) / sizeof(uwbd_evt_type_handlers[0]); | ||
161 | |||
162 | static const struct uwbd_event uwbd_message_handlers[] = { | 153 | static const struct uwbd_event uwbd_message_handlers[] = { |
163 | [UWB_EVT_MSG_RESET] = { | 154 | [UWB_EVT_MSG_RESET] = { |
164 | .handler = uwbd_msg_handle_reset, | 155 | .handler = uwbd_msg_handle_reset, |
@@ -166,9 +157,7 @@ static const struct uwbd_event uwbd_message_handlers[] = { | |||
166 | }, | 157 | }, |
167 | }; | 158 | }; |
168 | 159 | ||
169 | static DEFINE_MUTEX(uwbd_event_mutex); | 160 | /* |
170 | |||
171 | /** | ||
172 | * Handle an URC event passed to the UWB Daemon | 161 | * Handle an URC event passed to the UWB Daemon |
173 | * | 162 | * |
174 | * @evt: the event to handle | 163 | * @evt: the event to handle |
@@ -188,6 +177,7 @@ static DEFINE_MUTEX(uwbd_event_mutex); | |||
188 | static | 177 | static |
189 | int uwbd_event_handle_urc(struct uwb_event *evt) | 178 | int uwbd_event_handle_urc(struct uwb_event *evt) |
190 | { | 179 | { |
180 | int result = -EINVAL; | ||
191 | struct uwbd_evt_type_handler *type_table; | 181 | struct uwbd_evt_type_handler *type_table; |
192 | uwbd_evt_handler_f handler; | 182 | uwbd_evt_handler_f handler; |
193 | u8 type, context; | 183 | u8 type, context; |
@@ -197,26 +187,24 @@ int uwbd_event_handle_urc(struct uwb_event *evt) | |||
197 | event = le16_to_cpu(evt->notif.rceb->wEvent); | 187 | event = le16_to_cpu(evt->notif.rceb->wEvent); |
198 | context = evt->notif.rceb->bEventContext; | 188 | context = evt->notif.rceb->bEventContext; |
199 | 189 | ||
200 | if (type > uwbd_evt_type_handlers_len) { | 190 | if (type > ARRAY_SIZE(uwbd_urc_evt_type_handlers)) |
201 | printk(KERN_ERR "UWBD: event type %u: unknown (too high)\n", type); | 191 | goto out; |
202 | return -EINVAL; | 192 | type_table = &uwbd_urc_evt_type_handlers[type]; |
203 | } | 193 | if (type_table->uwbd_events == NULL) |
204 | type_table = &uwbd_evt_type_handlers[type]; | 194 | goto out; |
205 | if (type_table->uwbd_events == NULL) { | 195 | if (event > type_table->size) |
206 | printk(KERN_ERR "UWBD: event type %u: unknown\n", type); | 196 | goto out; |
207 | return -EINVAL; | ||
208 | } | ||
209 | if (event > type_table->size) { | ||
210 | printk(KERN_ERR "UWBD: event %s[%u]: unknown (too high)\n", | ||
211 | type_table->name, event); | ||
212 | return -EINVAL; | ||
213 | } | ||
214 | handler = type_table->uwbd_events[event].handler; | 197 | handler = type_table->uwbd_events[event].handler; |
215 | if (handler == NULL) { | 198 | if (handler == NULL) |
216 | printk(KERN_ERR "UWBD: event %s[%u]: unknown\n", type_table->name, event); | 199 | goto out; |
217 | return -EINVAL; | 200 | |
218 | } | 201 | result = (*handler)(evt); |
219 | return (*handler)(evt); | 202 | out: |
203 | if (result < 0) | ||
204 | dev_err(&evt->rc->uwb_dev.dev, | ||
205 | "UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n", | ||
206 | type, event, context, result); | ||
207 | return result; | ||
220 | } | 208 | } |
221 | 209 | ||
222 | static void uwbd_event_handle_message(struct uwb_event *evt) | 210 | static void uwbd_event_handle_message(struct uwb_event *evt) |
@@ -231,19 +219,10 @@ static void uwbd_event_handle_message(struct uwb_event *evt) | |||
231 | return; | 219 | return; |
232 | } | 220 | } |
233 | 221 | ||
234 | /* If this is a reset event we need to drop the | ||
235 | * uwbd_event_mutex or it deadlocks when the reset handler | ||
236 | * attempts to flush the uwbd events. */ | ||
237 | if (evt->message == UWB_EVT_MSG_RESET) | ||
238 | mutex_unlock(&uwbd_event_mutex); | ||
239 | |||
240 | result = uwbd_message_handlers[evt->message].handler(evt); | 222 | result = uwbd_message_handlers[evt->message].handler(evt); |
241 | if (result < 0) | 223 | if (result < 0) |
242 | dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", | 224 | dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", |
243 | uwbd_message_handlers[evt->message].name, result); | 225 | uwbd_message_handlers[evt->message].name, result); |
244 | |||
245 | if (evt->message == UWB_EVT_MSG_RESET) | ||
246 | mutex_lock(&uwbd_event_mutex); | ||
247 | } | 226 | } |
248 | 227 | ||
249 | static void uwbd_event_handle(struct uwb_event *evt) | 228 | static void uwbd_event_handle(struct uwb_event *evt) |
@@ -271,20 +250,6 @@ static void uwbd_event_handle(struct uwb_event *evt) | |||
271 | 250 | ||
272 | __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ | 251 | __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ |
273 | } | 252 | } |
274 | /* The UWB Daemon */ | ||
275 | |||
276 | |||
277 | /** Daemon's PID: used to decide if we can queue or not */ | ||
278 | static int uwbd_pid; | ||
279 | /** Daemon's task struct for managing the kthread */ | ||
280 | static struct task_struct *uwbd_task; | ||
281 | /** Daemon's waitqueue for waiting for new events */ | ||
282 | static DECLARE_WAIT_QUEUE_HEAD(uwbd_wq); | ||
283 | /** Daemon's list of events; we queue/dequeue here */ | ||
284 | static struct list_head uwbd_event_list = LIST_HEAD_INIT(uwbd_event_list); | ||
285 | /** Daemon's list lock to protect concurent access */ | ||
286 | static DEFINE_SPINLOCK(uwbd_event_list_lock); | ||
287 | |||
288 | 253 | ||
289 | /** | 254 | /** |
290 | * UWB Daemon | 255 | * UWB Daemon |
@@ -298,65 +263,58 @@ static DEFINE_SPINLOCK(uwbd_event_list_lock); | |||
298 | * FIXME: should change so we don't have a 1HZ timer all the time, but | 263 | * FIXME: should change so we don't have a 1HZ timer all the time, but |
299 | * only if there are devices. | 264 | * only if there are devices. |
300 | */ | 265 | */ |
301 | static int uwbd(void *unused) | 266 | static int uwbd(void *param) |
302 | { | 267 | { |
268 | struct uwb_rc *rc = param; | ||
303 | unsigned long flags; | 269 | unsigned long flags; |
304 | struct list_head list = LIST_HEAD_INIT(list); | 270 | struct uwb_event *evt; |
305 | struct uwb_event *evt, *nxt; | ||
306 | int should_stop = 0; | 271 | int should_stop = 0; |
272 | |||
307 | while (1) { | 273 | while (1) { |
308 | wait_event_interruptible_timeout( | 274 | wait_event_interruptible_timeout( |
309 | uwbd_wq, | 275 | rc->uwbd.wq, |
310 | !list_empty(&uwbd_event_list) | 276 | !list_empty(&rc->uwbd.event_list) |
311 | || (should_stop = kthread_should_stop()), | 277 | || (should_stop = kthread_should_stop()), |
312 | HZ); | 278 | HZ); |
313 | if (should_stop) | 279 | if (should_stop) |
314 | break; | 280 | break; |
315 | try_to_freeze(); | 281 | try_to_freeze(); |
316 | 282 | ||
317 | mutex_lock(&uwbd_event_mutex); | 283 | spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); |
318 | spin_lock_irqsave(&uwbd_event_list_lock, flags); | 284 | if (!list_empty(&rc->uwbd.event_list)) { |
319 | list_splice_init(&uwbd_event_list, &list); | 285 | evt = list_first_entry(&rc->uwbd.event_list, struct uwb_event, list_node); |
320 | spin_unlock_irqrestore(&uwbd_event_list_lock, flags); | ||
321 | list_for_each_entry_safe(evt, nxt, &list, list_node) { | ||
322 | list_del(&evt->list_node); | 286 | list_del(&evt->list_node); |
287 | } else | ||
288 | evt = NULL; | ||
289 | spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); | ||
290 | |||
291 | if (evt) { | ||
323 | uwbd_event_handle(evt); | 292 | uwbd_event_handle(evt); |
324 | kfree(evt); | 293 | kfree(evt); |
325 | } | 294 | } |
326 | mutex_unlock(&uwbd_event_mutex); | ||
327 | 295 | ||
328 | uwb_beca_purge(); /* Purge devices that left */ | 296 | uwb_beca_purge(rc); /* Purge devices that left */ |
329 | } | 297 | } |
330 | return 0; | 298 | return 0; |
331 | } | 299 | } |
332 | 300 | ||
333 | 301 | ||
334 | /** Start the UWB daemon */ | 302 | /** Start the UWB daemon */ |
335 | void uwbd_start(void) | 303 | void uwbd_start(struct uwb_rc *rc) |
336 | { | 304 | { |
337 | uwbd_task = kthread_run(uwbd, NULL, "uwbd"); | 305 | rc->uwbd.task = kthread_run(uwbd, rc, "uwbd"); |
338 | if (uwbd_task == NULL) | 306 | if (rc->uwbd.task == NULL) |
339 | printk(KERN_ERR "UWB: Cannot start management daemon; " | 307 | printk(KERN_ERR "UWB: Cannot start management daemon; " |
340 | "UWB won't work\n"); | 308 | "UWB won't work\n"); |
341 | else | 309 | else |
342 | uwbd_pid = uwbd_task->pid; | 310 | rc->uwbd.pid = rc->uwbd.task->pid; |
343 | } | 311 | } |
344 | 312 | ||
345 | /* Stop the UWB daemon and free any unprocessed events */ | 313 | /* Stop the UWB daemon and free any unprocessed events */ |
346 | void uwbd_stop(void) | 314 | void uwbd_stop(struct uwb_rc *rc) |
347 | { | 315 | { |
348 | unsigned long flags; | 316 | kthread_stop(rc->uwbd.task); |
349 | struct uwb_event *evt, *nxt; | 317 | uwbd_flush(rc); |
350 | kthread_stop(uwbd_task); | ||
351 | spin_lock_irqsave(&uwbd_event_list_lock, flags); | ||
352 | uwbd_pid = 0; | ||
353 | list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { | ||
354 | if (evt->type == UWB_EVT_TYPE_NOTIF) | ||
355 | kfree(evt->notif.rceb); | ||
356 | kfree(evt); | ||
357 | } | ||
358 | spin_unlock_irqrestore(&uwbd_event_list_lock, flags); | ||
359 | uwb_beca_release(); | ||
360 | } | 318 | } |
361 | 319 | ||
362 | /* | 320 | /* |
@@ -373,18 +331,20 @@ void uwbd_stop(void) | |||
373 | */ | 331 | */ |
374 | void uwbd_event_queue(struct uwb_event *evt) | 332 | void uwbd_event_queue(struct uwb_event *evt) |
375 | { | 333 | { |
334 | struct uwb_rc *rc = evt->rc; | ||
376 | unsigned long flags; | 335 | unsigned long flags; |
377 | spin_lock_irqsave(&uwbd_event_list_lock, flags); | 336 | |
378 | if (uwbd_pid != 0) { | 337 | spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); |
379 | list_add(&evt->list_node, &uwbd_event_list); | 338 | if (rc->uwbd.pid != 0) { |
380 | wake_up_all(&uwbd_wq); | 339 | list_add(&evt->list_node, &rc->uwbd.event_list); |
340 | wake_up_all(&rc->uwbd.wq); | ||
381 | } else { | 341 | } else { |
382 | __uwb_rc_put(evt->rc); | 342 | __uwb_rc_put(evt->rc); |
383 | if (evt->type == UWB_EVT_TYPE_NOTIF) | 343 | if (evt->type == UWB_EVT_TYPE_NOTIF) |
384 | kfree(evt->notif.rceb); | 344 | kfree(evt->notif.rceb); |
385 | kfree(evt); | 345 | kfree(evt); |
386 | } | 346 | } |
387 | spin_unlock_irqrestore(&uwbd_event_list_lock, flags); | 347 | spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); |
388 | return; | 348 | return; |
389 | } | 349 | } |
390 | 350 | ||
@@ -392,10 +352,8 @@ void uwbd_flush(struct uwb_rc *rc) | |||
392 | { | 352 | { |
393 | struct uwb_event *evt, *nxt; | 353 | struct uwb_event *evt, *nxt; |
394 | 354 | ||
395 | mutex_lock(&uwbd_event_mutex); | 355 | spin_lock_irq(&rc->uwbd.event_list_lock); |
396 | 356 | list_for_each_entry_safe(evt, nxt, &rc->uwbd.event_list, list_node) { | |
397 | spin_lock_irq(&uwbd_event_list_lock); | ||
398 | list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { | ||
399 | if (evt->rc == rc) { | 357 | if (evt->rc == rc) { |
400 | __uwb_rc_put(rc); | 358 | __uwb_rc_put(rc); |
401 | list_del(&evt->list_node); | 359 | list_del(&evt->list_node); |
@@ -404,7 +362,5 @@ void uwbd_flush(struct uwb_rc *rc) | |||
404 | kfree(evt); | 362 | kfree(evt); |
405 | } | 363 | } |
406 | } | 364 | } |
407 | spin_unlock_irq(&uwbd_event_list_lock); | 365 | spin_unlock_irq(&rc->uwbd.event_list_lock); |
408 | |||
409 | mutex_unlock(&uwbd_event_mutex); | ||
410 | } | 366 | } |
diff --git a/drivers/uwb/whc-rc.c b/drivers/uwb/whc-rc.c index 1711deadb114..19a1dd129212 100644 --- a/drivers/uwb/whc-rc.c +++ b/drivers/uwb/whc-rc.c | |||
@@ -39,7 +39,6 @@ | |||
39 | * them to the hw and transfer the replies/notifications back to the | 39 | * them to the hw and transfer the replies/notifications back to the |
40 | * UWB stack through the UWB daemon (UWBD). | 40 | * UWB stack through the UWB daemon (UWBD). |
41 | */ | 41 | */ |
42 | #include <linux/version.h> | ||
43 | #include <linux/init.h> | 42 | #include <linux/init.h> |
44 | #include <linux/module.h> | 43 | #include <linux/module.h> |
45 | #include <linux/pci.h> | 44 | #include <linux/pci.h> |
@@ -49,10 +48,8 @@ | |||
49 | #include <linux/uwb.h> | 48 | #include <linux/uwb.h> |
50 | #include <linux/uwb/whci.h> | 49 | #include <linux/uwb/whci.h> |
51 | #include <linux/uwb/umc.h> | 50 | #include <linux/uwb/umc.h> |
52 | #include "uwb-internal.h" | ||
53 | 51 | ||
54 | #define D_LOCAL 0 | 52 | #include "uwb-internal.h" |
55 | #include <linux/uwb/debug.h> | ||
56 | 53 | ||
57 | /** | 54 | /** |
58 | * Descriptor for an instance of the UWB Radio Control Driver that | 55 | * Descriptor for an instance of the UWB Radio Control Driver that |
@@ -98,13 +95,8 @@ static int whcrc_cmd(struct uwb_rc *uwb_rc, | |||
98 | struct device *dev = &whcrc->umc_dev->dev; | 95 | struct device *dev = &whcrc->umc_dev->dev; |
99 | u32 urccmd; | 96 | u32 urccmd; |
100 | 97 | ||
101 | d_fnstart(3, dev, "(%p, %p, %zu)\n", uwb_rc, cmd, cmd_size); | 98 | if (cmd_size >= 4096) |
102 | might_sleep(); | 99 | return -EINVAL; |
103 | |||
104 | if (cmd_size >= 4096) { | ||
105 | result = -E2BIG; | ||
106 | goto error; | ||
107 | } | ||
108 | 100 | ||
109 | /* | 101 | /* |
110 | * If the URC is halted, then the hardware has reset itself. | 102 | * If the URC is halted, then the hardware has reset itself. |
@@ -115,16 +107,14 @@ static int whcrc_cmd(struct uwb_rc *uwb_rc, | |||
115 | if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { | 107 | if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { |
116 | dev_err(dev, "requesting reset of halted radio controller\n"); | 108 | dev_err(dev, "requesting reset of halted radio controller\n"); |
117 | uwb_rc_reset_all(uwb_rc); | 109 | uwb_rc_reset_all(uwb_rc); |
118 | result = -EIO; | 110 | return -EIO; |
119 | goto error; | ||
120 | } | 111 | } |
121 | 112 | ||
122 | result = wait_event_timeout(whcrc->cmd_wq, | 113 | result = wait_event_timeout(whcrc->cmd_wq, |
123 | !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); | 114 | !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); |
124 | if (result == 0) { | 115 | if (result == 0) { |
125 | dev_err(dev, "device is not ready to execute commands\n"); | 116 | dev_err(dev, "device is not ready to execute commands\n"); |
126 | result = -ETIMEDOUT; | 117 | return -ETIMEDOUT; |
127 | goto error; | ||
128 | } | 118 | } |
129 | 119 | ||
130 | memmove(whcrc->cmd_buf, cmd, cmd_size); | 120 | memmove(whcrc->cmd_buf, cmd, cmd_size); |
@@ -137,10 +127,7 @@ static int whcrc_cmd(struct uwb_rc *uwb_rc, | |||
137 | whcrc->rc_base + URCCMD); | 127 | whcrc->rc_base + URCCMD); |
138 | spin_unlock(&whcrc->irq_lock); | 128 | spin_unlock(&whcrc->irq_lock); |
139 | 129 | ||
140 | error: | 130 | return 0; |
141 | d_fnend(3, dev, "(%p, %p, %zu) = %d\n", | ||
142 | uwb_rc, cmd, cmd_size, result); | ||
143 | return result; | ||
144 | } | 131 | } |
145 | 132 | ||
146 | static int whcrc_reset(struct uwb_rc *rc) | 133 | static int whcrc_reset(struct uwb_rc *rc) |
@@ -167,34 +154,25 @@ static int whcrc_reset(struct uwb_rc *rc) | |||
167 | static | 154 | static |
168 | void whcrc_enable_events(struct whcrc *whcrc) | 155 | void whcrc_enable_events(struct whcrc *whcrc) |
169 | { | 156 | { |
170 | struct device *dev = &whcrc->umc_dev->dev; | ||
171 | u32 urccmd; | 157 | u32 urccmd; |
172 | 158 | ||
173 | d_fnstart(4, dev, "(whcrc %p)\n", whcrc); | ||
174 | |||
175 | le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); | 159 | le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); |
176 | 160 | ||
177 | spin_lock(&whcrc->irq_lock); | 161 | spin_lock(&whcrc->irq_lock); |
178 | urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; | 162 | urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; |
179 | le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); | 163 | le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); |
180 | spin_unlock(&whcrc->irq_lock); | 164 | spin_unlock(&whcrc->irq_lock); |
181 | |||
182 | d_fnend(4, dev, "(whcrc %p) = void\n", whcrc); | ||
183 | } | 165 | } |
184 | 166 | ||
185 | static void whcrc_event_work(struct work_struct *work) | 167 | static void whcrc_event_work(struct work_struct *work) |
186 | { | 168 | { |
187 | struct whcrc *whcrc = container_of(work, struct whcrc, event_work); | 169 | struct whcrc *whcrc = container_of(work, struct whcrc, event_work); |
188 | struct device *dev = &whcrc->umc_dev->dev; | ||
189 | size_t size; | 170 | size_t size; |
190 | u64 urcevtaddr; | 171 | u64 urcevtaddr; |
191 | 172 | ||
192 | urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); | 173 | urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); |
193 | size = urcevtaddr & URCEVTADDR_OFFSET_MASK; | 174 | size = urcevtaddr & URCEVTADDR_OFFSET_MASK; |
194 | 175 | ||
195 | d_printf(3, dev, "received %zu octet event\n", size); | ||
196 | d_dump(4, dev, whcrc->evt_buf, size > 32 ? 32 : size); | ||
197 | |||
198 | uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); | 176 | uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); |
199 | whcrc_enable_events(whcrc); | 177 | whcrc_enable_events(whcrc); |
200 | } | 178 | } |
@@ -217,22 +195,15 @@ irqreturn_t whcrc_irq_cb(int irq, void *_whcrc) | |||
217 | return IRQ_NONE; | 195 | return IRQ_NONE; |
218 | le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); | 196 | le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); |
219 | 197 | ||
220 | d_printf(4, dev, "acked 0x%08x, urcsts 0x%08x\n", | ||
221 | le_readl(whcrc->rc_base + URCSTS), urcsts); | ||
222 | |||
223 | if (urcsts & URCSTS_HSE) { | 198 | if (urcsts & URCSTS_HSE) { |
224 | dev_err(dev, "host system error -- hardware halted\n"); | 199 | dev_err(dev, "host system error -- hardware halted\n"); |
225 | /* FIXME: do something sensible here */ | 200 | /* FIXME: do something sensible here */ |
226 | goto out; | 201 | goto out; |
227 | } | 202 | } |
228 | if (urcsts & URCSTS_ER) { | 203 | if (urcsts & URCSTS_ER) |
229 | d_printf(3, dev, "ER: event ready\n"); | ||
230 | schedule_work(&whcrc->event_work); | 204 | schedule_work(&whcrc->event_work); |
231 | } | 205 | if (urcsts & URCSTS_RCI) |
232 | if (urcsts & URCSTS_RCI) { | ||
233 | d_printf(3, dev, "RCI: ready to execute another command\n"); | ||
234 | wake_up_all(&whcrc->cmd_wq); | 206 | wake_up_all(&whcrc->cmd_wq); |
235 | } | ||
236 | out: | 207 | out: |
237 | return IRQ_HANDLED; | 208 | return IRQ_HANDLED; |
238 | } | 209 | } |
@@ -251,8 +222,7 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc) | |||
251 | whcrc->area = umc_dev->resource.start; | 222 | whcrc->area = umc_dev->resource.start; |
252 | whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1; | 223 | whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1; |
253 | result = -EBUSY; | 224 | result = -EBUSY; |
254 | if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) | 225 | if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) { |
255 | == NULL) { | ||
256 | dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", | 226 | dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", |
257 | whcrc->rc_len, whcrc->area, result); | 227 | whcrc->rc_len, whcrc->area, result); |
258 | goto error_request_region; | 228 | goto error_request_region; |
@@ -287,8 +257,6 @@ int whcrc_setup_rc_umc(struct whcrc *whcrc) | |||
287 | dev_err(dev, "Can't allocate evt transfer buffer\n"); | 257 | dev_err(dev, "Can't allocate evt transfer buffer\n"); |
288 | goto error_evt_buffer; | 258 | goto error_evt_buffer; |
289 | } | 259 | } |
290 | d_printf(3, dev, "UWB RC Interface: %zu bytes at 0x%p, irq %u\n", | ||
291 | whcrc->rc_len, whcrc->rc_base, umc_dev->irq); | ||
292 | return 0; | 260 | return 0; |
293 | 261 | ||
294 | error_evt_buffer: | 262 | error_evt_buffer: |
@@ -333,47 +301,23 @@ void whcrc_release_rc_umc(struct whcrc *whcrc) | |||
333 | static int whcrc_start_rc(struct uwb_rc *rc) | 301 | static int whcrc_start_rc(struct uwb_rc *rc) |
334 | { | 302 | { |
335 | struct whcrc *whcrc = rc->priv; | 303 | struct whcrc *whcrc = rc->priv; |
336 | int result = 0; | ||
337 | struct device *dev = &whcrc->umc_dev->dev; | 304 | struct device *dev = &whcrc->umc_dev->dev; |
338 | unsigned long start, duration; | ||
339 | 305 | ||
340 | /* Reset the thing */ | 306 | /* Reset the thing */ |
341 | le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); | 307 | le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); |
342 | if (d_test(3)) | ||
343 | start = jiffies; | ||
344 | if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, | 308 | if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, |
345 | 5000, "device to reset at init") < 0) { | 309 | 5000, "hardware reset") < 0) |
346 | result = -EBUSY; | 310 | return -EBUSY; |
347 | goto error; | ||
348 | } else if (d_test(3)) { | ||
349 | duration = jiffies - start; | ||
350 | if (duration > msecs_to_jiffies(40)) | ||
351 | dev_err(dev, "Device took %ums to " | ||
352 | "reset. MAX expected: 40ms\n", | ||
353 | jiffies_to_msecs(duration)); | ||
354 | } | ||
355 | 311 | ||
356 | /* Set the event buffer, start the controller (enable IRQs later) */ | 312 | /* Set the event buffer, start the controller (enable IRQs later) */ |
357 | le_writel(0, whcrc->rc_base + URCINTR); | 313 | le_writel(0, whcrc->rc_base + URCINTR); |
358 | le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); | 314 | le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); |
359 | result = -ETIMEDOUT; | ||
360 | if (d_test(3)) | ||
361 | start = jiffies; | ||
362 | if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, | 315 | if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, |
363 | 5000, "device to start") < 0) | 316 | 5000, "radio controller start") < 0) |
364 | goto error; | 317 | return -ETIMEDOUT; |
365 | if (d_test(3)) { | ||
366 | duration = jiffies - start; | ||
367 | if (duration > msecs_to_jiffies(40)) | ||
368 | dev_err(dev, "Device took %ums to start. " | ||
369 | "MAX expected: 40ms\n", | ||
370 | jiffies_to_msecs(duration)); | ||
371 | } | ||
372 | whcrc_enable_events(whcrc); | 318 | whcrc_enable_events(whcrc); |
373 | result = 0; | ||
374 | le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); | 319 | le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); |
375 | error: | 320 | return 0; |
376 | return result; | ||
377 | } | 321 | } |
378 | 322 | ||
379 | 323 | ||
@@ -395,7 +339,7 @@ void whcrc_stop_rc(struct uwb_rc *rc) | |||
395 | 339 | ||
396 | le_writel(0, whcrc->rc_base + URCCMD); | 340 | le_writel(0, whcrc->rc_base + URCCMD); |
397 | whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, | 341 | whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, |
398 | URCSTS_HALTED, 0, 40, "URCSTS.HALTED"); | 342 | URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop"); |
399 | } | 343 | } |
400 | 344 | ||
401 | static void whcrc_init(struct whcrc *whcrc) | 345 | static void whcrc_init(struct whcrc *whcrc) |
@@ -421,7 +365,6 @@ int whcrc_probe(struct umc_dev *umc_dev) | |||
421 | struct whcrc *whcrc; | 365 | struct whcrc *whcrc; |
422 | struct device *dev = &umc_dev->dev; | 366 | struct device *dev = &umc_dev->dev; |
423 | 367 | ||
424 | d_fnstart(3, dev, "(umc_dev %p)\n", umc_dev); | ||
425 | result = -ENOMEM; | 368 | result = -ENOMEM; |
426 | uwb_rc = uwb_rc_alloc(); | 369 | uwb_rc = uwb_rc_alloc(); |
427 | if (uwb_rc == NULL) { | 370 | if (uwb_rc == NULL) { |
@@ -453,7 +396,6 @@ int whcrc_probe(struct umc_dev *umc_dev) | |||
453 | if (result < 0) | 396 | if (result < 0) |
454 | goto error_rc_add; | 397 | goto error_rc_add; |
455 | umc_set_drvdata(umc_dev, whcrc); | 398 | umc_set_drvdata(umc_dev, whcrc); |
456 | d_fnend(3, dev, "(umc_dev %p) = 0\n", umc_dev); | ||
457 | return 0; | 399 | return 0; |
458 | 400 | ||
459 | error_rc_add: | 401 | error_rc_add: |
@@ -463,7 +405,6 @@ error_setup_rc_umc: | |||
463 | error_alloc: | 405 | error_alloc: |
464 | uwb_rc_put(uwb_rc); | 406 | uwb_rc_put(uwb_rc); |
465 | error_rc_alloc: | 407 | error_rc_alloc: |
466 | d_fnend(3, dev, "(umc_dev %p) = %d\n", umc_dev, result); | ||
467 | return result; | 408 | return result; |
468 | } | 409 | } |
469 | 410 | ||
@@ -486,7 +427,24 @@ static void whcrc_remove(struct umc_dev *umc_dev) | |||
486 | whcrc_release_rc_umc(whcrc); | 427 | whcrc_release_rc_umc(whcrc); |
487 | kfree(whcrc); | 428 | kfree(whcrc); |
488 | uwb_rc_put(uwb_rc); | 429 | uwb_rc_put(uwb_rc); |
489 | d_printf(1, &umc_dev->dev, "freed whcrc %p\n", whcrc); | 430 | } |
431 | |||
432 | static int whcrc_pre_reset(struct umc_dev *umc) | ||
433 | { | ||
434 | struct whcrc *whcrc = umc_get_drvdata(umc); | ||
435 | struct uwb_rc *uwb_rc = whcrc->uwb_rc; | ||
436 | |||
437 | uwb_rc_pre_reset(uwb_rc); | ||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | static int whcrc_post_reset(struct umc_dev *umc) | ||
442 | { | ||
443 | struct whcrc *whcrc = umc_get_drvdata(umc); | ||
444 | struct uwb_rc *uwb_rc = whcrc->uwb_rc; | ||
445 | |||
446 | uwb_rc_post_reset(uwb_rc); | ||
447 | return 0; | ||
490 | } | 448 | } |
491 | 449 | ||
492 | /* PCI device ID's that we handle [so it gets loaded] */ | 450 | /* PCI device ID's that we handle [so it gets loaded] */ |
@@ -497,10 +455,12 @@ static struct pci_device_id whcrc_id_table[] = { | |||
497 | MODULE_DEVICE_TABLE(pci, whcrc_id_table); | 455 | MODULE_DEVICE_TABLE(pci, whcrc_id_table); |
498 | 456 | ||
499 | static struct umc_driver whcrc_driver = { | 457 | static struct umc_driver whcrc_driver = { |
500 | .name = "whc-rc", | 458 | .name = "whc-rc", |
501 | .cap_id = UMC_CAP_ID_WHCI_RC, | 459 | .cap_id = UMC_CAP_ID_WHCI_RC, |
502 | .probe = whcrc_probe, | 460 | .probe = whcrc_probe, |
503 | .remove = whcrc_remove, | 461 | .remove = whcrc_remove, |
462 | .pre_reset = whcrc_pre_reset, | ||
463 | .post_reset = whcrc_post_reset, | ||
504 | }; | 464 | }; |
505 | 465 | ||
506 | static int __init whcrc_driver_init(void) | 466 | static int __init whcrc_driver_init(void) |
diff --git a/drivers/uwb/whci.c b/drivers/uwb/whci.c index 3df2388f908f..1f8964ed9882 100644 --- a/drivers/uwb/whci.c +++ b/drivers/uwb/whci.c | |||
@@ -67,11 +67,11 @@ int whci_wait_for(struct device *dev, u32 __iomem *reg, u32 mask, u32 result, | |||
67 | val = le_readl(reg); | 67 | val = le_readl(reg); |
68 | if ((val & mask) == result) | 68 | if ((val & mask) == result) |
69 | break; | 69 | break; |
70 | msleep(10); | ||
71 | if (t >= max_ms) { | 70 | if (t >= max_ms) { |
72 | dev_err(dev, "timed out waiting for %s ", tag); | 71 | dev_err(dev, "%s timed out\n", tag); |
73 | return -ETIMEDOUT; | 72 | return -ETIMEDOUT; |
74 | } | 73 | } |
74 | msleep(10); | ||
75 | t += 10; | 75 | t += 10; |
76 | } | 76 | } |
77 | return 0; | 77 | return 0; |
@@ -111,7 +111,7 @@ static int whci_add_cap(struct whci_card *card, int n) | |||
111 | + UWBCAPDATA_TO_OFFSET(capdata); | 111 | + UWBCAPDATA_TO_OFFSET(capdata); |
112 | umc->resource.end = umc->resource.start | 112 | umc->resource.end = umc->resource.start |
113 | + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; | 113 | + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; |
114 | umc->resource.name = umc->dev.bus_id; | 114 | umc->resource.name = dev_name(&umc->dev); |
115 | umc->resource.flags = card->pci->resource[bar].flags; | 115 | umc->resource.flags = card->pci->resource[bar].flags; |
116 | umc->resource.parent = &card->pci->resource[bar]; | 116 | umc->resource.parent = &card->pci->resource[bar]; |
117 | umc->irq = card->pci->irq; | 117 | umc->irq = card->pci->irq; |
diff --git a/drivers/uwb/wlp/eda.c b/drivers/uwb/wlp/eda.c index 10985fa233cc..69e020039718 100644 --- a/drivers/uwb/wlp/eda.c +++ b/drivers/uwb/wlp/eda.c | |||
@@ -51,9 +51,7 @@ | |||
51 | * the tag and address of the transmitting neighbor. | 51 | * the tag and address of the transmitting neighbor. |
52 | */ | 52 | */ |
53 | 53 | ||
54 | #define D_LOCAL 5 | ||
55 | #include <linux/netdevice.h> | 54 | #include <linux/netdevice.h> |
56 | #include <linux/uwb/debug.h> | ||
57 | #include <linux/etherdevice.h> | 55 | #include <linux/etherdevice.h> |
58 | #include <linux/wlp.h> | 56 | #include <linux/wlp.h> |
59 | #include "wlp-internal.h" | 57 | #include "wlp-internal.h" |
@@ -304,7 +302,6 @@ int wlp_eda_for_virtual(struct wlp_eda *eda, | |||
304 | { | 302 | { |
305 | int result = 0; | 303 | int result = 0; |
306 | struct wlp *wlp = container_of(eda, struct wlp, eda); | 304 | struct wlp *wlp = container_of(eda, struct wlp, eda); |
307 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
308 | struct wlp_eda_node *itr; | 305 | struct wlp_eda_node *itr; |
309 | unsigned long flags; | 306 | unsigned long flags; |
310 | int found = 0; | 307 | int found = 0; |
@@ -313,26 +310,14 @@ int wlp_eda_for_virtual(struct wlp_eda *eda, | |||
313 | list_for_each_entry(itr, &eda->cache, list_node) { | 310 | list_for_each_entry(itr, &eda->cache, list_node) { |
314 | if (!memcmp(itr->virt_addr, virt_addr, | 311 | if (!memcmp(itr->virt_addr, virt_addr, |
315 | sizeof(itr->virt_addr))) { | 312 | sizeof(itr->virt_addr))) { |
316 | d_printf(6, dev, "EDA: looking for %pM hit %02x:%02x " | ||
317 | "wss %p tag 0x%02x state %u\n", | ||
318 | virt_addr, | ||
319 | itr->dev_addr.data[1], | ||
320 | itr->dev_addr.data[0], itr->wss, | ||
321 | itr->tag, itr->state); | ||
322 | result = (*function)(wlp, itr, priv); | 313 | result = (*function)(wlp, itr, priv); |
323 | *dev_addr = itr->dev_addr; | 314 | *dev_addr = itr->dev_addr; |
324 | found = 1; | 315 | found = 1; |
325 | break; | 316 | break; |
326 | } else | 317 | } |
327 | d_printf(6, dev, "EDA: looking for %pM against %pM miss\n", | ||
328 | virt_addr, itr->virt_addr); | ||
329 | } | 318 | } |
330 | if (!found) { | 319 | if (!found) |
331 | if (printk_ratelimit()) | ||
332 | dev_err(dev, "EDA: Eth addr %pM not found.\n", | ||
333 | virt_addr); | ||
334 | result = -ENODEV; | 320 | result = -ENODEV; |
335 | } | ||
336 | spin_unlock_irqrestore(&eda->lock, flags); | 321 | spin_unlock_irqrestore(&eda->lock, flags); |
337 | return result; | 322 | return result; |
338 | } | 323 | } |
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c index a64cb8241713..aa42fcee4c4f 100644 --- a/drivers/uwb/wlp/messages.c +++ b/drivers/uwb/wlp/messages.c | |||
@@ -24,8 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/wlp.h> | 26 | #include <linux/wlp.h> |
27 | #define D_LOCAL 6 | 27 | |
28 | #include <linux/uwb/debug.h> | ||
29 | #include "wlp-internal.h" | 28 | #include "wlp-internal.h" |
30 | 29 | ||
31 | static | 30 | static |
@@ -105,24 +104,18 @@ static inline void wlp_set_attr_hdr(struct wlp_attr_hdr *hdr, unsigned type, | |||
105 | #define wlp_set(type, type_code, name) \ | 104 | #define wlp_set(type, type_code, name) \ |
106 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ | 105 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ |
107 | { \ | 106 | { \ |
108 | d_fnstart(6, NULL, "(attribute %p)\n", attr); \ | ||
109 | wlp_set_attr_hdr(&attr->hdr, type_code, \ | 107 | wlp_set_attr_hdr(&attr->hdr, type_code, \ |
110 | sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ | 108 | sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ |
111 | attr->name = value; \ | 109 | attr->name = value; \ |
112 | d_dump(6, NULL, attr, sizeof(*attr)); \ | ||
113 | d_fnend(6, NULL, "(attribute %p)\n", attr); \ | ||
114 | return sizeof(*attr); \ | 110 | return sizeof(*attr); \ |
115 | } | 111 | } |
116 | 112 | ||
117 | #define wlp_pset(type, type_code, name) \ | 113 | #define wlp_pset(type, type_code, name) \ |
118 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ | 114 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ |
119 | { \ | 115 | { \ |
120 | d_fnstart(6, NULL, "(attribute %p)\n", attr); \ | ||
121 | wlp_set_attr_hdr(&attr->hdr, type_code, \ | 116 | wlp_set_attr_hdr(&attr->hdr, type_code, \ |
122 | sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ | 117 | sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ |
123 | attr->name = *value; \ | 118 | attr->name = *value; \ |
124 | d_dump(6, NULL, attr, sizeof(*attr)); \ | ||
125 | d_fnend(6, NULL, "(attribute %p)\n", attr); \ | ||
126 | return sizeof(*attr); \ | 119 | return sizeof(*attr); \ |
127 | } | 120 | } |
128 | 121 | ||
@@ -139,11 +132,8 @@ static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ | |||
139 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \ | 132 | static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \ |
140 | size_t len) \ | 133 | size_t len) \ |
141 | { \ | 134 | { \ |
142 | d_fnstart(6, NULL, "(attribute %p)\n", attr); \ | ||
143 | wlp_set_attr_hdr(&attr->hdr, type_code, len); \ | 135 | wlp_set_attr_hdr(&attr->hdr, type_code, len); \ |
144 | memcpy(attr->name, value, len); \ | 136 | memcpy(attr->name, value, len); \ |
145 | d_dump(6, NULL, attr, sizeof(*attr) + len); \ | ||
146 | d_fnend(6, NULL, "(attribute %p)\n", attr); \ | ||
147 | return sizeof(*attr) + len; \ | 137 | return sizeof(*attr) + len; \ |
148 | } | 138 | } |
149 | 139 | ||
@@ -182,7 +172,7 @@ static size_t wlp_set_wss_info(struct wlp_attr_wss_info *attr, | |||
182 | size_t datalen; | 172 | size_t datalen; |
183 | void *ptr = attr->wss_info; | 173 | void *ptr = attr->wss_info; |
184 | size_t used = sizeof(*attr); | 174 | size_t used = sizeof(*attr); |
185 | d_fnstart(6, NULL, "(attribute %p)\n", attr); | 175 | |
186 | datalen = sizeof(struct wlp_wss_info) + strlen(wss->name); | 176 | datalen = sizeof(struct wlp_wss_info) + strlen(wss->name); |
187 | wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen); | 177 | wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen); |
188 | used = wlp_set_wssid(ptr, &wss->wssid); | 178 | used = wlp_set_wssid(ptr, &wss->wssid); |
@@ -190,9 +180,6 @@ static size_t wlp_set_wss_info(struct wlp_attr_wss_info *attr, | |||
190 | used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll); | 180 | used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll); |
191 | used += wlp_set_wss_sec_status(ptr + used, wss->secure_status); | 181 | used += wlp_set_wss_sec_status(ptr + used, wss->secure_status); |
192 | used += wlp_set_wss_bcast(ptr + used, &wss->bcast); | 182 | used += wlp_set_wss_bcast(ptr + used, &wss->bcast); |
193 | d_dump(6, NULL, attr, sizeof(*attr) + datalen); | ||
194 | d_fnend(6, NULL, "(attribute %p, used %d)\n", | ||
195 | attr, (int)(sizeof(*attr) + used)); | ||
196 | return sizeof(*attr) + used; | 183 | return sizeof(*attr) + used; |
197 | } | 184 | } |
198 | 185 | ||
@@ -414,7 +401,6 @@ static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp, | |||
414 | size_t used = 0; | 401 | size_t used = 0; |
415 | ssize_t result = -EINVAL; | 402 | ssize_t result = -EINVAL; |
416 | 403 | ||
417 | d_printf(6, dev, "WLP: WSS info: Retrieving WSS name\n"); | ||
418 | result = wlp_get_wss_name(wlp, ptr, info->name, buflen); | 404 | result = wlp_get_wss_name(wlp, ptr, info->name, buflen); |
419 | if (result < 0) { | 405 | if (result < 0) { |
420 | dev_err(dev, "WLP: unable to obtain WSS name from " | 406 | dev_err(dev, "WLP: unable to obtain WSS name from " |
@@ -422,7 +408,7 @@ static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp, | |||
422 | goto error_parse; | 408 | goto error_parse; |
423 | } | 409 | } |
424 | used += result; | 410 | used += result; |
425 | d_printf(6, dev, "WLP: WSS info: Retrieving accept enroll\n"); | 411 | |
426 | result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll, | 412 | result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll, |
427 | buflen - used); | 413 | buflen - used); |
428 | if (result < 0) { | 414 | if (result < 0) { |
@@ -437,7 +423,7 @@ static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp, | |||
437 | goto error_parse; | 423 | goto error_parse; |
438 | } | 424 | } |
439 | used += result; | 425 | used += result; |
440 | d_printf(6, dev, "WLP: WSS info: Retrieving secure status\n"); | 426 | |
441 | result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status, | 427 | result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status, |
442 | buflen - used); | 428 | buflen - used); |
443 | if (result < 0) { | 429 | if (result < 0) { |
@@ -452,7 +438,7 @@ static ssize_t wlp_get_wss_info_attrs(struct wlp *wlp, | |||
452 | goto error_parse; | 438 | goto error_parse; |
453 | } | 439 | } |
454 | used += result; | 440 | used += result; |
455 | d_printf(6, dev, "WLP: WSS info: Retrieving broadcast\n"); | 441 | |
456 | result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast, | 442 | result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast, |
457 | buflen - used); | 443 | buflen - used); |
458 | if (result < 0) { | 444 | if (result < 0) { |
@@ -530,7 +516,7 @@ static ssize_t wlp_get_wss_info(struct wlp *wlp, struct wlp_attr_wss_info *attr, | |||
530 | len = result; | 516 | len = result; |
531 | used = sizeof(*attr); | 517 | used = sizeof(*attr); |
532 | ptr = attr; | 518 | ptr = attr; |
533 | d_printf(6, dev, "WLP: WSS info: Retrieving WSSID\n"); | 519 | |
534 | result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used); | 520 | result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used); |
535 | if (result < 0) { | 521 | if (result < 0) { |
536 | dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n"); | 522 | dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n"); |
@@ -553,8 +539,6 @@ static ssize_t wlp_get_wss_info(struct wlp *wlp, struct wlp_attr_wss_info *attr, | |||
553 | goto out; | 539 | goto out; |
554 | } | 540 | } |
555 | result = used; | 541 | result = used; |
556 | d_printf(6, dev, "WLP: Successfully parsed WLP information " | ||
557 | "attribute. used %zu bytes\n", used); | ||
558 | out: | 542 | out: |
559 | return result; | 543 | return result; |
560 | } | 544 | } |
@@ -598,8 +582,6 @@ static ssize_t wlp_get_all_wss_info(struct wlp *wlp, | |||
598 | struct wlp_wssid_e *wssid_e; | 582 | struct wlp_wssid_e *wssid_e; |
599 | char buf[WLP_WSS_UUID_STRSIZE]; | 583 | char buf[WLP_WSS_UUID_STRSIZE]; |
600 | 584 | ||
601 | d_fnstart(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d \n", | ||
602 | wlp, attr, neighbor, wss, (int)buflen); | ||
603 | if (buflen < 0) | 585 | if (buflen < 0) |
604 | goto out; | 586 | goto out; |
605 | 587 | ||
@@ -638,8 +620,7 @@ static ssize_t wlp_get_all_wss_info(struct wlp *wlp, | |||
638 | wss->accept_enroll = wss_info.accept_enroll; | 620 | wss->accept_enroll = wss_info.accept_enroll; |
639 | wss->state = WLP_WSS_STATE_PART_ENROLLED; | 621 | wss->state = WLP_WSS_STATE_PART_ENROLLED; |
640 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); | 622 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); |
641 | d_printf(2, dev, "WLP: Found WSS %s. Enrolling.\n", | 623 | dev_dbg(dev, "WLP: Found WSS %s. Enrolling.\n", buf); |
642 | buf); | ||
643 | } else { | 624 | } else { |
644 | wssid_e = wlp_create_wssid_e(wlp, neighbor); | 625 | wssid_e = wlp_create_wssid_e(wlp, neighbor); |
645 | if (wssid_e == NULL) { | 626 | if (wssid_e == NULL) { |
@@ -660,9 +641,6 @@ error_parse: | |||
660 | if (result < 0 && !enroll) /* this was a discovery */ | 641 | if (result < 0 && !enroll) /* this was a discovery */ |
661 | wlp_remove_neighbor_tmp_info(neighbor); | 642 | wlp_remove_neighbor_tmp_info(neighbor); |
662 | out: | 643 | out: |
663 | d_fnend(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d, " | ||
664 | "result %d \n", wlp, attr, neighbor, wss, (int)buflen, | ||
665 | (int)result); | ||
666 | return result; | 644 | return result; |
667 | 645 | ||
668 | } | 646 | } |
@@ -718,7 +696,6 @@ static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss, | |||
718 | struct sk_buff *_skb; | 696 | struct sk_buff *_skb; |
719 | void *d1_itr; | 697 | void *d1_itr; |
720 | 698 | ||
721 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
722 | if (wlp->dev_info == NULL) { | 699 | if (wlp->dev_info == NULL) { |
723 | result = __wlp_setup_device_info(wlp); | 700 | result = __wlp_setup_device_info(wlp); |
724 | if (result < 0) { | 701 | if (result < 0) { |
@@ -728,24 +705,6 @@ static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss, | |||
728 | } | 705 | } |
729 | } | 706 | } |
730 | info = wlp->dev_info; | 707 | info = wlp->dev_info; |
731 | d_printf(6, dev, "Local properties:\n" | ||
732 | "Device name (%d bytes): %s\n" | ||
733 | "Model name (%d bytes): %s\n" | ||
734 | "Manufacturer (%d bytes): %s\n" | ||
735 | "Model number (%d bytes): %s\n" | ||
736 | "Serial number (%d bytes): %s\n" | ||
737 | "Primary device type: \n" | ||
738 | " Category: %d \n" | ||
739 | " OUI: %02x:%02x:%02x \n" | ||
740 | " OUI Subdivision: %u \n", | ||
741 | (int)strlen(info->name), info->name, | ||
742 | (int)strlen(info->model_name), info->model_name, | ||
743 | (int)strlen(info->manufacturer), info->manufacturer, | ||
744 | (int)strlen(info->model_nr), info->model_nr, | ||
745 | (int)strlen(info->serial), info->serial, | ||
746 | info->prim_dev_type.category, | ||
747 | info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], | ||
748 | info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); | ||
749 | _skb = dev_alloc_skb(sizeof(*_d1) | 708 | _skb = dev_alloc_skb(sizeof(*_d1) |
750 | + sizeof(struct wlp_attr_uuid_e) | 709 | + sizeof(struct wlp_attr_uuid_e) |
751 | + sizeof(struct wlp_attr_wss_sel_mthd) | 710 | + sizeof(struct wlp_attr_wss_sel_mthd) |
@@ -768,7 +727,6 @@ static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss, | |||
768 | goto error; | 727 | goto error; |
769 | } | 728 | } |
770 | _d1 = (void *) _skb->data; | 729 | _d1 = (void *) _skb->data; |
771 | d_printf(6, dev, "D1 starts at %p \n", _d1); | ||
772 | _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | 730 | _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); |
773 | _d1->hdr.type = WLP_FRAME_ASSOCIATION; | 731 | _d1->hdr.type = WLP_FRAME_ASSOCIATION; |
774 | _d1->type = WLP_ASSOC_D1; | 732 | _d1->type = WLP_ASSOC_D1; |
@@ -791,25 +749,8 @@ static int wlp_build_assoc_d1(struct wlp *wlp, struct wlp_wss *wss, | |||
791 | used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type); | 749 | used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type); |
792 | used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE); | 750 | used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE); |
793 | skb_put(_skb, sizeof(*_d1) + used); | 751 | skb_put(_skb, sizeof(*_d1) + used); |
794 | d_printf(6, dev, "D1 message:\n"); | ||
795 | d_dump(6, dev, _d1, sizeof(*_d1) | ||
796 | + sizeof(struct wlp_attr_uuid_e) | ||
797 | + sizeof(struct wlp_attr_wss_sel_mthd) | ||
798 | + sizeof(struct wlp_attr_dev_name) | ||
799 | + strlen(info->name) | ||
800 | + sizeof(struct wlp_attr_manufacturer) | ||
801 | + strlen(info->manufacturer) | ||
802 | + sizeof(struct wlp_attr_model_name) | ||
803 | + strlen(info->model_name) | ||
804 | + sizeof(struct wlp_attr_model_nr) | ||
805 | + strlen(info->model_nr) | ||
806 | + sizeof(struct wlp_attr_serial) | ||
807 | + strlen(info->serial) | ||
808 | + sizeof(struct wlp_attr_prim_dev_type) | ||
809 | + sizeof(struct wlp_attr_wlp_assc_err)); | ||
810 | *skb = _skb; | 752 | *skb = _skb; |
811 | error: | 753 | error: |
812 | d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); | ||
813 | return result; | 754 | return result; |
814 | } | 755 | } |
815 | 756 | ||
@@ -837,7 +778,6 @@ int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss, | |||
837 | void *d2_itr; | 778 | void *d2_itr; |
838 | size_t mem_needed; | 779 | size_t mem_needed; |
839 | 780 | ||
840 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
841 | if (wlp->dev_info == NULL) { | 781 | if (wlp->dev_info == NULL) { |
842 | result = __wlp_setup_device_info(wlp); | 782 | result = __wlp_setup_device_info(wlp); |
843 | if (result < 0) { | 783 | if (result < 0) { |
@@ -847,24 +787,6 @@ int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss, | |||
847 | } | 787 | } |
848 | } | 788 | } |
849 | info = wlp->dev_info; | 789 | info = wlp->dev_info; |
850 | d_printf(6, dev, "Local properties:\n" | ||
851 | "Device name (%d bytes): %s\n" | ||
852 | "Model name (%d bytes): %s\n" | ||
853 | "Manufacturer (%d bytes): %s\n" | ||
854 | "Model number (%d bytes): %s\n" | ||
855 | "Serial number (%d bytes): %s\n" | ||
856 | "Primary device type: \n" | ||
857 | " Category: %d \n" | ||
858 | " OUI: %02x:%02x:%02x \n" | ||
859 | " OUI Subdivision: %u \n", | ||
860 | (int)strlen(info->name), info->name, | ||
861 | (int)strlen(info->model_name), info->model_name, | ||
862 | (int)strlen(info->manufacturer), info->manufacturer, | ||
863 | (int)strlen(info->model_nr), info->model_nr, | ||
864 | (int)strlen(info->serial), info->serial, | ||
865 | info->prim_dev_type.category, | ||
866 | info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], | ||
867 | info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); | ||
868 | mem_needed = sizeof(*_d2) | 790 | mem_needed = sizeof(*_d2) |
869 | + sizeof(struct wlp_attr_uuid_e) | 791 | + sizeof(struct wlp_attr_uuid_e) |
870 | + sizeof(struct wlp_attr_uuid_r) | 792 | + sizeof(struct wlp_attr_uuid_r) |
@@ -892,7 +814,6 @@ int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss, | |||
892 | goto error; | 814 | goto error; |
893 | } | 815 | } |
894 | _d2 = (void *) _skb->data; | 816 | _d2 = (void *) _skb->data; |
895 | d_printf(6, dev, "D2 starts at %p \n", _d2); | ||
896 | _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | 817 | _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); |
897 | _d2->hdr.type = WLP_FRAME_ASSOCIATION; | 818 | _d2->hdr.type = WLP_FRAME_ASSOCIATION; |
898 | _d2->type = WLP_ASSOC_D2; | 819 | _d2->type = WLP_ASSOC_D2; |
@@ -917,11 +838,8 @@ int wlp_build_assoc_d2(struct wlp *wlp, struct wlp_wss *wss, | |||
917 | used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type); | 838 | used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type); |
918 | used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE); | 839 | used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE); |
919 | skb_put(_skb, sizeof(*_d2) + used); | 840 | skb_put(_skb, sizeof(*_d2) + used); |
920 | d_printf(6, dev, "D2 message:\n"); | ||
921 | d_dump(6, dev, _d2, mem_needed); | ||
922 | *skb = _skb; | 841 | *skb = _skb; |
923 | error: | 842 | error: |
924 | d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); | ||
925 | return result; | 843 | return result; |
926 | } | 844 | } |
927 | 845 | ||
@@ -947,7 +865,6 @@ int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb, | |||
947 | struct sk_buff *_skb; | 865 | struct sk_buff *_skb; |
948 | struct wlp_nonce tmp; | 866 | struct wlp_nonce tmp; |
949 | 867 | ||
950 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
951 | _skb = dev_alloc_skb(sizeof(*f0)); | 868 | _skb = dev_alloc_skb(sizeof(*f0)); |
952 | if (_skb == NULL) { | 869 | if (_skb == NULL) { |
953 | dev_err(dev, "WLP: Unable to allocate memory for F0 " | 870 | dev_err(dev, "WLP: Unable to allocate memory for F0 " |
@@ -955,7 +872,6 @@ int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb, | |||
955 | goto error_alloc; | 872 | goto error_alloc; |
956 | } | 873 | } |
957 | f0 = (void *) _skb->data; | 874 | f0 = (void *) _skb->data; |
958 | d_printf(6, dev, "F0 starts at %p \n", f0); | ||
959 | f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | 875 | f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); |
960 | f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION; | 876 | f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION; |
961 | f0->f0_hdr.type = WLP_ASSOC_F0; | 877 | f0->f0_hdr.type = WLP_ASSOC_F0; |
@@ -969,7 +885,6 @@ int wlp_build_assoc_f0(struct wlp *wlp, struct sk_buff **skb, | |||
969 | *skb = _skb; | 885 | *skb = _skb; |
970 | result = 0; | 886 | result = 0; |
971 | error_alloc: | 887 | error_alloc: |
972 | d_fnend(6, dev, "wlp %p, result %d \n", wlp, result); | ||
973 | return result; | 888 | return result; |
974 | } | 889 | } |
975 | 890 | ||
@@ -1242,12 +1157,9 @@ void wlp_handle_d1_frame(struct work_struct *ws) | |||
1242 | enum wlp_wss_sel_mthd sel_mthd = 0; | 1157 | enum wlp_wss_sel_mthd sel_mthd = 0; |
1243 | struct wlp_device_info dev_info; | 1158 | struct wlp_device_info dev_info; |
1244 | enum wlp_assc_error assc_err; | 1159 | enum wlp_assc_error assc_err; |
1245 | char uuid[WLP_WSS_UUID_STRSIZE]; | ||
1246 | struct sk_buff *resp = NULL; | 1160 | struct sk_buff *resp = NULL; |
1247 | 1161 | ||
1248 | /* Parse D1 frame */ | 1162 | /* Parse D1 frame */ |
1249 | d_fnstart(6, dev, "WLP: handle D1 frame. wlp = %p, skb = %p\n", | ||
1250 | wlp, skb); | ||
1251 | mutex_lock(&wss->mutex); | 1163 | mutex_lock(&wss->mutex); |
1252 | mutex_lock(&wlp->mutex); /* to access wlp->uuid */ | 1164 | mutex_lock(&wlp->mutex); /* to access wlp->uuid */ |
1253 | memset(&dev_info, 0, sizeof(dev_info)); | 1165 | memset(&dev_info, 0, sizeof(dev_info)); |
@@ -1258,30 +1170,6 @@ void wlp_handle_d1_frame(struct work_struct *ws) | |||
1258 | kfree_skb(skb); | 1170 | kfree_skb(skb); |
1259 | goto out; | 1171 | goto out; |
1260 | } | 1172 | } |
1261 | wlp_wss_uuid_print(uuid, sizeof(uuid), &uuid_e); | ||
1262 | d_printf(6, dev, "From D1 frame:\n" | ||
1263 | "UUID-E: %s\n" | ||
1264 | "Selection method: %d\n" | ||
1265 | "Device name (%d bytes): %s\n" | ||
1266 | "Model name (%d bytes): %s\n" | ||
1267 | "Manufacturer (%d bytes): %s\n" | ||
1268 | "Model number (%d bytes): %s\n" | ||
1269 | "Serial number (%d bytes): %s\n" | ||
1270 | "Primary device type: \n" | ||
1271 | " Category: %d \n" | ||
1272 | " OUI: %02x:%02x:%02x \n" | ||
1273 | " OUI Subdivision: %u \n", | ||
1274 | uuid, sel_mthd, | ||
1275 | (int)strlen(dev_info.name), dev_info.name, | ||
1276 | (int)strlen(dev_info.model_name), dev_info.model_name, | ||
1277 | (int)strlen(dev_info.manufacturer), dev_info.manufacturer, | ||
1278 | (int)strlen(dev_info.model_nr), dev_info.model_nr, | ||
1279 | (int)strlen(dev_info.serial), dev_info.serial, | ||
1280 | dev_info.prim_dev_type.category, | ||
1281 | dev_info.prim_dev_type.OUI[0], | ||
1282 | dev_info.prim_dev_type.OUI[1], | ||
1283 | dev_info.prim_dev_type.OUI[2], | ||
1284 | dev_info.prim_dev_type.OUIsubdiv); | ||
1285 | 1173 | ||
1286 | kfree_skb(skb); | 1174 | kfree_skb(skb); |
1287 | if (!wlp_uuid_is_set(&wlp->uuid)) { | 1175 | if (!wlp_uuid_is_set(&wlp->uuid)) { |
@@ -1316,7 +1204,6 @@ out: | |||
1316 | kfree(frame_ctx); | 1204 | kfree(frame_ctx); |
1317 | mutex_unlock(&wlp->mutex); | 1205 | mutex_unlock(&wlp->mutex); |
1318 | mutex_unlock(&wss->mutex); | 1206 | mutex_unlock(&wss->mutex); |
1319 | d_fnend(6, dev, "WLP: handle D1 frame. wlp = %p\n", wlp); | ||
1320 | } | 1207 | } |
1321 | 1208 | ||
1322 | /** | 1209 | /** |
@@ -1546,10 +1433,8 @@ int wlp_parse_c3c4_frame(struct wlp *wlp, struct sk_buff *skb, | |||
1546 | void *ptr = skb->data; | 1433 | void *ptr = skb->data; |
1547 | size_t len = skb->len; | 1434 | size_t len = skb->len; |
1548 | size_t used; | 1435 | size_t used; |
1549 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
1550 | struct wlp_frame_assoc *assoc = ptr; | 1436 | struct wlp_frame_assoc *assoc = ptr; |
1551 | 1437 | ||
1552 | d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); | ||
1553 | used = sizeof(*assoc); | 1438 | used = sizeof(*assoc); |
1554 | result = wlp_get_wssid(wlp, ptr + used, wssid, len - used); | 1439 | result = wlp_get_wssid(wlp, ptr + used, wssid, len - used); |
1555 | if (result < 0) { | 1440 | if (result < 0) { |
@@ -1572,14 +1457,7 @@ int wlp_parse_c3c4_frame(struct wlp *wlp, struct sk_buff *skb, | |||
1572 | wlp_assoc_frame_str(assoc->type)); | 1457 | wlp_assoc_frame_str(assoc->type)); |
1573 | goto error_parse; | 1458 | goto error_parse; |
1574 | } | 1459 | } |
1575 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
1576 | d_printf(6, dev, "WLP: parsed: WSSID %s, tag 0x%02x, virt " | ||
1577 | "%02x:%02x:%02x:%02x:%02x:%02x \n", buf, *tag, | ||
1578 | virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], | ||
1579 | virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); | ||
1580 | |||
1581 | error_parse: | 1460 | error_parse: |
1582 | d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); | ||
1583 | return result; | 1461 | return result; |
1584 | } | 1462 | } |
1585 | 1463 | ||
@@ -1600,7 +1478,6 @@ int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss, | |||
1600 | } *c; | 1478 | } *c; |
1601 | struct sk_buff *_skb; | 1479 | struct sk_buff *_skb; |
1602 | 1480 | ||
1603 | d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); | ||
1604 | _skb = dev_alloc_skb(sizeof(*c)); | 1481 | _skb = dev_alloc_skb(sizeof(*c)); |
1605 | if (_skb == NULL) { | 1482 | if (_skb == NULL) { |
1606 | dev_err(dev, "WLP: Unable to allocate memory for C1/C2 " | 1483 | dev_err(dev, "WLP: Unable to allocate memory for C1/C2 " |
@@ -1608,7 +1485,6 @@ int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss, | |||
1608 | goto error_alloc; | 1485 | goto error_alloc; |
1609 | } | 1486 | } |
1610 | c = (void *) _skb->data; | 1487 | c = (void *) _skb->data; |
1611 | d_printf(6, dev, "C1/C2 starts at %p \n", c); | ||
1612 | c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | 1488 | c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); |
1613 | c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; | 1489 | c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; |
1614 | c->c_hdr.type = type; | 1490 | c->c_hdr.type = type; |
@@ -1616,12 +1492,9 @@ int wlp_build_assoc_c1c2(struct wlp *wlp, struct wlp_wss *wss, | |||
1616 | wlp_set_msg_type(&c->c_hdr.msg_type, type); | 1492 | wlp_set_msg_type(&c->c_hdr.msg_type, type); |
1617 | wlp_set_wssid(&c->wssid, &wss->wssid); | 1493 | wlp_set_wssid(&c->wssid, &wss->wssid); |
1618 | skb_put(_skb, sizeof(*c)); | 1494 | skb_put(_skb, sizeof(*c)); |
1619 | d_printf(6, dev, "C1/C2 message:\n"); | ||
1620 | d_dump(6, dev, c, sizeof(*c)); | ||
1621 | *skb = _skb; | 1495 | *skb = _skb; |
1622 | result = 0; | 1496 | result = 0; |
1623 | error_alloc: | 1497 | error_alloc: |
1624 | d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); | ||
1625 | return result; | 1498 | return result; |
1626 | } | 1499 | } |
1627 | 1500 | ||
@@ -1660,7 +1533,6 @@ int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss, | |||
1660 | } *c; | 1533 | } *c; |
1661 | struct sk_buff *_skb; | 1534 | struct sk_buff *_skb; |
1662 | 1535 | ||
1663 | d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); | ||
1664 | _skb = dev_alloc_skb(sizeof(*c)); | 1536 | _skb = dev_alloc_skb(sizeof(*c)); |
1665 | if (_skb == NULL) { | 1537 | if (_skb == NULL) { |
1666 | dev_err(dev, "WLP: Unable to allocate memory for C3/C4 " | 1538 | dev_err(dev, "WLP: Unable to allocate memory for C3/C4 " |
@@ -1668,7 +1540,6 @@ int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss, | |||
1668 | goto error_alloc; | 1540 | goto error_alloc; |
1669 | } | 1541 | } |
1670 | c = (void *) _skb->data; | 1542 | c = (void *) _skb->data; |
1671 | d_printf(6, dev, "C3/C4 starts at %p \n", c); | ||
1672 | c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); | 1543 | c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); |
1673 | c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; | 1544 | c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; |
1674 | c->c_hdr.type = type; | 1545 | c->c_hdr.type = type; |
@@ -1678,12 +1549,9 @@ int wlp_build_assoc_c3c4(struct wlp *wlp, struct wlp_wss *wss, | |||
1678 | wlp_set_wss_tag(&c->wss_tag, wss->tag); | 1549 | wlp_set_wss_tag(&c->wss_tag, wss->tag); |
1679 | wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr); | 1550 | wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr); |
1680 | skb_put(_skb, sizeof(*c)); | 1551 | skb_put(_skb, sizeof(*c)); |
1681 | d_printf(6, dev, "C3/C4 message:\n"); | ||
1682 | d_dump(6, dev, c, sizeof(*c)); | ||
1683 | *skb = _skb; | 1552 | *skb = _skb; |
1684 | result = 0; | 1553 | result = 0; |
1685 | error_alloc: | 1554 | error_alloc: |
1686 | d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); | ||
1687 | return result; | 1555 | return result; |
1688 | } | 1556 | } |
1689 | 1557 | ||
@@ -1709,10 +1577,7 @@ static int wlp_send_assoc_##type(struct wlp *wlp, struct wlp_wss *wss, \ | |||
1709 | struct device *dev = &wlp->rc->uwb_dev.dev; \ | 1577 | struct device *dev = &wlp->rc->uwb_dev.dev; \ |
1710 | int result; \ | 1578 | int result; \ |
1711 | struct sk_buff *skb = NULL; \ | 1579 | struct sk_buff *skb = NULL; \ |
1712 | d_fnstart(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ | 1580 | \ |
1713 | wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ | ||
1714 | d_printf(6, dev, "WLP: Constructing %s frame. \n", \ | ||
1715 | wlp_assoc_frame_str(id)); \ | ||
1716 | /* Build the frame */ \ | 1581 | /* Build the frame */ \ |
1717 | result = wlp_build_assoc_##type(wlp, wss, &skb); \ | 1582 | result = wlp_build_assoc_##type(wlp, wss, &skb); \ |
1718 | if (result < 0) { \ | 1583 | if (result < 0) { \ |
@@ -1721,9 +1586,6 @@ static int wlp_send_assoc_##type(struct wlp *wlp, struct wlp_wss *wss, \ | |||
1721 | goto error_build_assoc; \ | 1586 | goto error_build_assoc; \ |
1722 | } \ | 1587 | } \ |
1723 | /* Send the frame */ \ | 1588 | /* Send the frame */ \ |
1724 | d_printf(6, dev, "Transmitting %s frame to %02x:%02x \n", \ | ||
1725 | wlp_assoc_frame_str(id), \ | ||
1726 | dev_addr->data[1], dev_addr->data[0]); \ | ||
1727 | BUG_ON(wlp->xmit_frame == NULL); \ | 1589 | BUG_ON(wlp->xmit_frame == NULL); \ |
1728 | result = wlp->xmit_frame(wlp, skb, dev_addr); \ | 1590 | result = wlp->xmit_frame(wlp, skb, dev_addr); \ |
1729 | if (result < 0) { \ | 1591 | if (result < 0) { \ |
@@ -1740,8 +1602,6 @@ error_xmit: \ | |||
1740 | /* We could try again ... */ \ | 1602 | /* We could try again ... */ \ |
1741 | dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \ | 1603 | dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \ |
1742 | error_build_assoc: \ | 1604 | error_build_assoc: \ |
1743 | d_fnend(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ | ||
1744 | wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ | ||
1745 | return result; \ | 1605 | return result; \ |
1746 | } | 1606 | } |
1747 | 1607 | ||
@@ -1794,12 +1654,9 @@ void wlp_handle_c1_frame(struct work_struct *ws) | |||
1794 | struct uwb_dev_addr *src = &frame_ctx->src; | 1654 | struct uwb_dev_addr *src = &frame_ctx->src; |
1795 | int result; | 1655 | int result; |
1796 | struct wlp_uuid wssid; | 1656 | struct wlp_uuid wssid; |
1797 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
1798 | struct sk_buff *resp = NULL; | 1657 | struct sk_buff *resp = NULL; |
1799 | 1658 | ||
1800 | /* Parse C1 frame */ | 1659 | /* Parse C1 frame */ |
1801 | d_fnstart(6, dev, "WLP: handle C1 frame. wlp = %p, c1 = %p\n", | ||
1802 | wlp, c1); | ||
1803 | mutex_lock(&wss->mutex); | 1660 | mutex_lock(&wss->mutex); |
1804 | result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid, | 1661 | result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid, |
1805 | len - sizeof(*c1)); | 1662 | len - sizeof(*c1)); |
@@ -1807,12 +1664,8 @@ void wlp_handle_c1_frame(struct work_struct *ws) | |||
1807 | dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n"); | 1664 | dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n"); |
1808 | goto out; | 1665 | goto out; |
1809 | } | 1666 | } |
1810 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); | ||
1811 | d_printf(6, dev, "Received C1 frame with WSSID %s \n", buf); | ||
1812 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) | 1667 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) |
1813 | && wss->state == WLP_WSS_STATE_ACTIVE) { | 1668 | && wss->state == WLP_WSS_STATE_ACTIVE) { |
1814 | d_printf(6, dev, "WSSID from C1 frame is known locally " | ||
1815 | "and is active\n"); | ||
1816 | /* Construct C2 frame */ | 1669 | /* Construct C2 frame */ |
1817 | result = wlp_build_assoc_c2(wlp, wss, &resp); | 1670 | result = wlp_build_assoc_c2(wlp, wss, &resp); |
1818 | if (result < 0) { | 1671 | if (result < 0) { |
@@ -1820,8 +1673,6 @@ void wlp_handle_c1_frame(struct work_struct *ws) | |||
1820 | goto out; | 1673 | goto out; |
1821 | } | 1674 | } |
1822 | } else { | 1675 | } else { |
1823 | d_printf(6, dev, "WSSID from C1 frame is not known locally " | ||
1824 | "or is not active\n"); | ||
1825 | /* Construct F0 frame */ | 1676 | /* Construct F0 frame */ |
1826 | result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); | 1677 | result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); |
1827 | if (result < 0) { | 1678 | if (result < 0) { |
@@ -1830,8 +1681,6 @@ void wlp_handle_c1_frame(struct work_struct *ws) | |||
1830 | } | 1681 | } |
1831 | } | 1682 | } |
1832 | /* Send C2 frame */ | 1683 | /* Send C2 frame */ |
1833 | d_printf(6, dev, "Transmitting response (C2/F0) frame to %02x:%02x \n", | ||
1834 | src->data[1], src->data[0]); | ||
1835 | BUG_ON(wlp->xmit_frame == NULL); | 1684 | BUG_ON(wlp->xmit_frame == NULL); |
1836 | result = wlp->xmit_frame(wlp, resp, src); | 1685 | result = wlp->xmit_frame(wlp, resp, src); |
1837 | if (result < 0) { | 1686 | if (result < 0) { |
@@ -1846,7 +1695,6 @@ out: | |||
1846 | kfree_skb(frame_ctx->skb); | 1695 | kfree_skb(frame_ctx->skb); |
1847 | kfree(frame_ctx); | 1696 | kfree(frame_ctx); |
1848 | mutex_unlock(&wss->mutex); | 1697 | mutex_unlock(&wss->mutex); |
1849 | d_fnend(6, dev, "WLP: handle C1 frame. wlp = %p\n", wlp); | ||
1850 | } | 1698 | } |
1851 | 1699 | ||
1852 | /** | 1700 | /** |
@@ -1868,27 +1716,20 @@ void wlp_handle_c3_frame(struct work_struct *ws) | |||
1868 | struct sk_buff *skb = frame_ctx->skb; | 1716 | struct sk_buff *skb = frame_ctx->skb; |
1869 | struct uwb_dev_addr *src = &frame_ctx->src; | 1717 | struct uwb_dev_addr *src = &frame_ctx->src; |
1870 | int result; | 1718 | int result; |
1871 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
1872 | struct sk_buff *resp = NULL; | 1719 | struct sk_buff *resp = NULL; |
1873 | struct wlp_uuid wssid; | 1720 | struct wlp_uuid wssid; |
1874 | u8 tag; | 1721 | u8 tag; |
1875 | struct uwb_mac_addr virt_addr; | 1722 | struct uwb_mac_addr virt_addr; |
1876 | 1723 | ||
1877 | /* Parse C3 frame */ | 1724 | /* Parse C3 frame */ |
1878 | d_fnstart(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", | ||
1879 | wlp, skb); | ||
1880 | mutex_lock(&wss->mutex); | 1725 | mutex_lock(&wss->mutex); |
1881 | result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); | 1726 | result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); |
1882 | if (result < 0) { | 1727 | if (result < 0) { |
1883 | dev_err(dev, "WLP: unable to obtain values from C3 frame.\n"); | 1728 | dev_err(dev, "WLP: unable to obtain values from C3 frame.\n"); |
1884 | goto out; | 1729 | goto out; |
1885 | } | 1730 | } |
1886 | wlp_wss_uuid_print(buf, sizeof(buf), &wssid); | ||
1887 | d_printf(6, dev, "Received C3 frame with WSSID %s \n", buf); | ||
1888 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) | 1731 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) |
1889 | && wss->state >= WLP_WSS_STATE_ACTIVE) { | 1732 | && wss->state >= WLP_WSS_STATE_ACTIVE) { |
1890 | d_printf(6, dev, "WSSID from C3 frame is known locally " | ||
1891 | "and is active\n"); | ||
1892 | result = wlp_eda_update_node(&wlp->eda, src, wss, | 1733 | result = wlp_eda_update_node(&wlp->eda, src, wss, |
1893 | (void *) virt_addr.data, tag, | 1734 | (void *) virt_addr.data, tag, |
1894 | WLP_WSS_CONNECTED); | 1735 | WLP_WSS_CONNECTED); |
@@ -1913,8 +1754,6 @@ void wlp_handle_c3_frame(struct work_struct *ws) | |||
1913 | } | 1754 | } |
1914 | } | 1755 | } |
1915 | } else { | 1756 | } else { |
1916 | d_printf(6, dev, "WSSID from C3 frame is not known locally " | ||
1917 | "or is not active\n"); | ||
1918 | /* Construct F0 frame */ | 1757 | /* Construct F0 frame */ |
1919 | result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); | 1758 | result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); |
1920 | if (result < 0) { | 1759 | if (result < 0) { |
@@ -1923,8 +1762,6 @@ void wlp_handle_c3_frame(struct work_struct *ws) | |||
1923 | } | 1762 | } |
1924 | } | 1763 | } |
1925 | /* Send C4 frame */ | 1764 | /* Send C4 frame */ |
1926 | d_printf(6, dev, "Transmitting response (C4/F0) frame to %02x:%02x \n", | ||
1927 | src->data[1], src->data[0]); | ||
1928 | BUG_ON(wlp->xmit_frame == NULL); | 1765 | BUG_ON(wlp->xmit_frame == NULL); |
1929 | result = wlp->xmit_frame(wlp, resp, src); | 1766 | result = wlp->xmit_frame(wlp, resp, src); |
1930 | if (result < 0) { | 1767 | if (result < 0) { |
@@ -1939,8 +1776,6 @@ out: | |||
1939 | kfree_skb(frame_ctx->skb); | 1776 | kfree_skb(frame_ctx->skb); |
1940 | kfree(frame_ctx); | 1777 | kfree(frame_ctx); |
1941 | mutex_unlock(&wss->mutex); | 1778 | mutex_unlock(&wss->mutex); |
1942 | d_fnend(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", | ||
1943 | wlp, skb); | ||
1944 | } | 1779 | } |
1945 | 1780 | ||
1946 | 1781 | ||
diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c index 1bb9b1f97d47..0370399ff4bb 100644 --- a/drivers/uwb/wlp/sysfs.c +++ b/drivers/uwb/wlp/sysfs.c | |||
@@ -23,8 +23,8 @@ | |||
23 | * FIXME: Docs | 23 | * FIXME: Docs |
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | |||
27 | #include <linux/wlp.h> | 26 | #include <linux/wlp.h> |
27 | |||
28 | #include "wlp-internal.h" | 28 | #include "wlp-internal.h" |
29 | 29 | ||
30 | static | 30 | static |
diff --git a/drivers/uwb/wlp/txrx.c b/drivers/uwb/wlp/txrx.c index c701bd1a2887..cd2035768b47 100644 --- a/drivers/uwb/wlp/txrx.c +++ b/drivers/uwb/wlp/txrx.c | |||
@@ -26,12 +26,10 @@ | |||
26 | 26 | ||
27 | #include <linux/etherdevice.h> | 27 | #include <linux/etherdevice.h> |
28 | #include <linux/wlp.h> | 28 | #include <linux/wlp.h> |
29 | #define D_LOCAL 5 | ||
30 | #include <linux/uwb/debug.h> | ||
31 | #include "wlp-internal.h" | ||
32 | 29 | ||
30 | #include "wlp-internal.h" | ||
33 | 31 | ||
34 | /** | 32 | /* |
35 | * Direct incoming association msg to correct parsing routine | 33 | * Direct incoming association msg to correct parsing routine |
36 | * | 34 | * |
37 | * We only expect D1, E1, C1, C3 messages as new. All other incoming | 35 | * We only expect D1, E1, C1, C3 messages as new. All other incoming |
@@ -48,35 +46,31 @@ void wlp_direct_assoc_frame(struct wlp *wlp, struct sk_buff *skb, | |||
48 | struct device *dev = &wlp->rc->uwb_dev.dev; | 46 | struct device *dev = &wlp->rc->uwb_dev.dev; |
49 | struct wlp_frame_assoc *assoc = (void *) skb->data; | 47 | struct wlp_frame_assoc *assoc = (void *) skb->data; |
50 | struct wlp_assoc_frame_ctx *frame_ctx; | 48 | struct wlp_assoc_frame_ctx *frame_ctx; |
51 | d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); | 49 | |
52 | frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC); | 50 | frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC); |
53 | if (frame_ctx == NULL) { | 51 | if (frame_ctx == NULL) { |
54 | dev_err(dev, "WLP: Unable to allocate memory for association " | 52 | dev_err(dev, "WLP: Unable to allocate memory for association " |
55 | "frame handling.\n"); | 53 | "frame handling.\n"); |
56 | kfree_skb(skb); | 54 | kfree_skb(skb); |
57 | goto out; | 55 | return; |
58 | } | 56 | } |
59 | frame_ctx->wlp = wlp; | 57 | frame_ctx->wlp = wlp; |
60 | frame_ctx->skb = skb; | 58 | frame_ctx->skb = skb; |
61 | frame_ctx->src = *src; | 59 | frame_ctx->src = *src; |
62 | switch (assoc->type) { | 60 | switch (assoc->type) { |
63 | case WLP_ASSOC_D1: | 61 | case WLP_ASSOC_D1: |
64 | d_printf(5, dev, "Received a D1 frame.\n"); | ||
65 | INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame); | 62 | INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame); |
66 | schedule_work(&frame_ctx->ws); | 63 | schedule_work(&frame_ctx->ws); |
67 | break; | 64 | break; |
68 | case WLP_ASSOC_E1: | 65 | case WLP_ASSOC_E1: |
69 | d_printf(5, dev, "Received a E1 frame. FIXME?\n"); | ||
70 | kfree_skb(skb); /* Temporary until we handle it */ | 66 | kfree_skb(skb); /* Temporary until we handle it */ |
71 | kfree(frame_ctx); /* Temporary until we handle it */ | 67 | kfree(frame_ctx); /* Temporary until we handle it */ |
72 | break; | 68 | break; |
73 | case WLP_ASSOC_C1: | 69 | case WLP_ASSOC_C1: |
74 | d_printf(5, dev, "Received a C1 frame.\n"); | ||
75 | INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame); | 70 | INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame); |
76 | schedule_work(&frame_ctx->ws); | 71 | schedule_work(&frame_ctx->ws); |
77 | break; | 72 | break; |
78 | case WLP_ASSOC_C3: | 73 | case WLP_ASSOC_C3: |
79 | d_printf(5, dev, "Received a C3 frame.\n"); | ||
80 | INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame); | 74 | INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame); |
81 | schedule_work(&frame_ctx->ws); | 75 | schedule_work(&frame_ctx->ws); |
82 | break; | 76 | break; |
@@ -87,11 +81,9 @@ void wlp_direct_assoc_frame(struct wlp *wlp, struct sk_buff *skb, | |||
87 | kfree(frame_ctx); | 81 | kfree(frame_ctx); |
88 | break; | 82 | break; |
89 | } | 83 | } |
90 | out: | ||
91 | d_fnend(5, dev, "wlp %p\n", wlp); | ||
92 | } | 84 | } |
93 | 85 | ||
94 | /** | 86 | /* |
95 | * Process incoming association frame | 87 | * Process incoming association frame |
96 | * | 88 | * |
97 | * Although it could be possible to deal with some incoming association | 89 | * Although it could be possible to deal with some incoming association |
@@ -112,7 +104,6 @@ void wlp_receive_assoc_frame(struct wlp *wlp, struct sk_buff *skb, | |||
112 | struct wlp_frame_assoc *assoc = (void *) skb->data; | 104 | struct wlp_frame_assoc *assoc = (void *) skb->data; |
113 | struct wlp_session *session = wlp->session; | 105 | struct wlp_session *session = wlp->session; |
114 | u8 version; | 106 | u8 version; |
115 | d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); | ||
116 | 107 | ||
117 | if (wlp_get_version(wlp, &assoc->version, &version, | 108 | if (wlp_get_version(wlp, &assoc->version, &version, |
118 | sizeof(assoc->version)) < 0) | 109 | sizeof(assoc->version)) < 0) |
@@ -150,14 +141,12 @@ void wlp_receive_assoc_frame(struct wlp *wlp, struct sk_buff *skb, | |||
150 | } else { | 141 | } else { |
151 | wlp_direct_assoc_frame(wlp, skb, src); | 142 | wlp_direct_assoc_frame(wlp, skb, src); |
152 | } | 143 | } |
153 | d_fnend(5, dev, "wlp %p\n", wlp); | ||
154 | return; | 144 | return; |
155 | error: | 145 | error: |
156 | kfree_skb(skb); | 146 | kfree_skb(skb); |
157 | d_fnend(5, dev, "wlp %p\n", wlp); | ||
158 | } | 147 | } |
159 | 148 | ||
160 | /** | 149 | /* |
161 | * Verify incoming frame is from connected neighbor, prep to pass to WLP client | 150 | * Verify incoming frame is from connected neighbor, prep to pass to WLP client |
162 | * | 151 | * |
163 | * Verification proceeds according to WLP 0.99 [7.3.1]. The source address | 152 | * Verification proceeds according to WLP 0.99 [7.3.1]. The source address |
@@ -176,7 +165,6 @@ int wlp_verify_prep_rx_frame(struct wlp *wlp, struct sk_buff *skb, | |||
176 | struct wlp_eda_node eda_entry; | 165 | struct wlp_eda_node eda_entry; |
177 | struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data; | 166 | struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data; |
178 | 167 | ||
179 | d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); | ||
180 | /*verify*/ | 168 | /*verify*/ |
181 | result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry); | 169 | result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry); |
182 | if (result < 0) { | 170 | if (result < 0) { |
@@ -207,11 +195,10 @@ int wlp_verify_prep_rx_frame(struct wlp *wlp, struct sk_buff *skb, | |||
207 | /*prep*/ | 195 | /*prep*/ |
208 | skb_pull(skb, sizeof(*hdr)); | 196 | skb_pull(skb, sizeof(*hdr)); |
209 | out: | 197 | out: |
210 | d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); | ||
211 | return result; | 198 | return result; |
212 | } | 199 | } |
213 | 200 | ||
214 | /** | 201 | /* |
215 | * Receive a WLP frame from device | 202 | * Receive a WLP frame from device |
216 | * | 203 | * |
217 | * @returns: 1 if calling function should free the skb | 204 | * @returns: 1 if calling function should free the skb |
@@ -226,14 +213,12 @@ int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb, | |||
226 | struct wlp_frame_hdr *hdr; | 213 | struct wlp_frame_hdr *hdr; |
227 | int result = 0; | 214 | int result = 0; |
228 | 215 | ||
229 | d_fnstart(6, dev, "skb (%p), len (%u)\n", skb, len); | ||
230 | if (len < sizeof(*hdr)) { | 216 | if (len < sizeof(*hdr)) { |
231 | dev_err(dev, "Not enough data to parse WLP header.\n"); | 217 | dev_err(dev, "Not enough data to parse WLP header.\n"); |
232 | result = -EINVAL; | 218 | result = -EINVAL; |
233 | goto out; | 219 | goto out; |
234 | } | 220 | } |
235 | hdr = ptr; | 221 | hdr = ptr; |
236 | d_dump(6, dev, hdr, sizeof(*hdr)); | ||
237 | if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) { | 222 | if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) { |
238 | dev_err(dev, "Not a WLP frame type.\n"); | 223 | dev_err(dev, "Not a WLP frame type.\n"); |
239 | result = -EINVAL; | 224 | result = -EINVAL; |
@@ -270,7 +255,6 @@ int wlp_receive_frame(struct device *dev, struct wlp *wlp, struct sk_buff *skb, | |||
270 | "WLP header.\n"); | 255 | "WLP header.\n"); |
271 | goto out; | 256 | goto out; |
272 | } | 257 | } |
273 | d_printf(5, dev, "Association frame received.\n"); | ||
274 | wlp_receive_assoc_frame(wlp, skb, src); | 258 | wlp_receive_assoc_frame(wlp, skb, src); |
275 | break; | 259 | break; |
276 | default: | 260 | default: |
@@ -283,13 +267,12 @@ out: | |||
283 | kfree_skb(skb); | 267 | kfree_skb(skb); |
284 | result = 0; | 268 | result = 0; |
285 | } | 269 | } |
286 | d_fnend(6, dev, "skb (%p)\n", skb); | ||
287 | return result; | 270 | return result; |
288 | } | 271 | } |
289 | EXPORT_SYMBOL_GPL(wlp_receive_frame); | 272 | EXPORT_SYMBOL_GPL(wlp_receive_frame); |
290 | 273 | ||
291 | 274 | ||
292 | /** | 275 | /* |
293 | * Verify frame from network stack, prepare for further transmission | 276 | * Verify frame from network stack, prepare for further transmission |
294 | * | 277 | * |
295 | * @skb: the socket buffer that needs to be prepared for transmission (it | 278 | * @skb: the socket buffer that needs to be prepared for transmission (it |
@@ -343,9 +326,7 @@ int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp, | |||
343 | int result = -EINVAL; | 326 | int result = -EINVAL; |
344 | struct ethhdr *eth_hdr = (void *) skb->data; | 327 | struct ethhdr *eth_hdr = (void *) skb->data; |
345 | 328 | ||
346 | d_fnstart(6, dev, "wlp (%p), skb (%p) \n", wlp, skb); | ||
347 | if (is_broadcast_ether_addr(eth_hdr->h_dest)) { | 329 | if (is_broadcast_ether_addr(eth_hdr->h_dest)) { |
348 | d_printf(6, dev, "WLP: handling broadcast frame. \n"); | ||
349 | result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); | 330 | result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); |
350 | if (result < 0) { | 331 | if (result < 0) { |
351 | if (printk_ratelimit()) | 332 | if (printk_ratelimit()) |
@@ -357,7 +338,6 @@ int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp, | |||
357 | result = 1; | 338 | result = 1; |
358 | /* Frame will be transmitted by WLP. */ | 339 | /* Frame will be transmitted by WLP. */ |
359 | } else { | 340 | } else { |
360 | d_printf(6, dev, "WLP: handling unicast frame. \n"); | ||
361 | result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst, | 341 | result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst, |
362 | wlp_wss_prep_hdr, skb); | 342 | wlp_wss_prep_hdr, skb); |
363 | if (unlikely(result < 0)) { | 343 | if (unlikely(result < 0)) { |
@@ -368,7 +348,6 @@ int wlp_prepare_tx_frame(struct device *dev, struct wlp *wlp, | |||
368 | } | 348 | } |
369 | } | 349 | } |
370 | out: | 350 | out: |
371 | d_fnend(6, dev, "wlp (%p), skb (%p). result = %d \n", wlp, skb, result); | ||
372 | return result; | 351 | return result; |
373 | } | 352 | } |
374 | EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame); | 353 | EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame); |
diff --git a/drivers/uwb/wlp/wlp-internal.h b/drivers/uwb/wlp/wlp-internal.h index 1c94fabfb1a7..3e8d5de7c5b9 100644 --- a/drivers/uwb/wlp/wlp-internal.h +++ b/drivers/uwb/wlp/wlp-internal.h | |||
@@ -42,10 +42,6 @@ enum wlp_wss_connect { | |||
42 | extern struct kobj_type wss_ktype; | 42 | extern struct kobj_type wss_ktype; |
43 | extern struct attribute_group wss_attr_group; | 43 | extern struct attribute_group wss_attr_group; |
44 | 44 | ||
45 | extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t); | ||
46 | extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); | ||
47 | |||
48 | |||
49 | /* This should be changed to a dynamic array where entries are sorted | 45 | /* This should be changed to a dynamic array where entries are sorted |
50 | * by eth_addr and search is done in a binary form | 46 | * by eth_addr and search is done in a binary form |
51 | * | 47 | * |
diff --git a/drivers/uwb/wlp/wlp-lc.c b/drivers/uwb/wlp/wlp-lc.c index 0799402e73fb..13db739c4e39 100644 --- a/drivers/uwb/wlp/wlp-lc.c +++ b/drivers/uwb/wlp/wlp-lc.c | |||
@@ -21,12 +21,9 @@ | |||
21 | * | 21 | * |
22 | * FIXME: docs | 22 | * FIXME: docs |
23 | */ | 23 | */ |
24 | |||
25 | #include <linux/wlp.h> | 24 | #include <linux/wlp.h> |
26 | #define D_LOCAL 6 | ||
27 | #include <linux/uwb/debug.h> | ||
28 | #include "wlp-internal.h" | ||
29 | 25 | ||
26 | #include "wlp-internal.h" | ||
30 | 27 | ||
31 | static | 28 | static |
32 | void wlp_neighbor_init(struct wlp_neighbor_e *neighbor) | 29 | void wlp_neighbor_init(struct wlp_neighbor_e *neighbor) |
@@ -61,11 +58,6 @@ int __wlp_alloc_device_info(struct wlp *wlp) | |||
61 | static | 58 | static |
62 | void __wlp_fill_device_info(struct wlp *wlp) | 59 | void __wlp_fill_device_info(struct wlp *wlp) |
63 | { | 60 | { |
64 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
65 | |||
66 | BUG_ON(wlp->fill_device_info == NULL); | ||
67 | d_printf(6, dev, "Retrieving device information " | ||
68 | "from device driver.\n"); | ||
69 | wlp->fill_device_info(wlp, wlp->dev_info); | 61 | wlp->fill_device_info(wlp, wlp->dev_info); |
70 | } | 62 | } |
71 | 63 | ||
@@ -127,7 +119,7 @@ void wlp_remove_neighbor_tmp_info(struct wlp_neighbor_e *neighbor) | |||
127 | } | 119 | } |
128 | } | 120 | } |
129 | 121 | ||
130 | /** | 122 | /* |
131 | * Populate WLP neighborhood cache with neighbor information | 123 | * Populate WLP neighborhood cache with neighbor information |
132 | * | 124 | * |
133 | * A new neighbor is found. If it is discoverable then we add it to the | 125 | * A new neighbor is found. If it is discoverable then we add it to the |
@@ -141,10 +133,7 @@ int wlp_add_neighbor(struct wlp *wlp, struct uwb_dev *dev) | |||
141 | int discoverable; | 133 | int discoverable; |
142 | struct wlp_neighbor_e *neighbor; | 134 | struct wlp_neighbor_e *neighbor; |
143 | 135 | ||
144 | d_fnstart(6, &dev->dev, "uwb %p \n", dev); | 136 | /* |
145 | d_printf(6, &dev->dev, "Found neighbor device %02x:%02x \n", | ||
146 | dev->dev_addr.data[1], dev->dev_addr.data[0]); | ||
147 | /** | ||
148 | * FIXME: | 137 | * FIXME: |
149 | * Use contents of WLP IE found in beacon cache to determine if | 138 | * Use contents of WLP IE found in beacon cache to determine if |
150 | * neighbor is discoverable. | 139 | * neighbor is discoverable. |
@@ -167,7 +156,6 @@ int wlp_add_neighbor(struct wlp *wlp, struct uwb_dev *dev) | |||
167 | list_add(&neighbor->node, &wlp->neighbors); | 156 | list_add(&neighbor->node, &wlp->neighbors); |
168 | } | 157 | } |
169 | error_no_mem: | 158 | error_no_mem: |
170 | d_fnend(6, &dev->dev, "uwb %p, result = %d \n", dev, result); | ||
171 | return result; | 159 | return result; |
172 | } | 160 | } |
173 | 161 | ||
@@ -255,8 +243,6 @@ int wlp_d1d2_exchange(struct wlp *wlp, struct wlp_neighbor_e *neighbor, | |||
255 | dev_err(dev, "Unable to send D1 frame to neighbor " | 243 | dev_err(dev, "Unable to send D1 frame to neighbor " |
256 | "%02x:%02x (%d)\n", dev_addr->data[1], | 244 | "%02x:%02x (%d)\n", dev_addr->data[1], |
257 | dev_addr->data[0], result); | 245 | dev_addr->data[0], result); |
258 | d_printf(6, dev, "Add placeholders into buffer next to " | ||
259 | "neighbor information we have (dev address).\n"); | ||
260 | goto out; | 246 | goto out; |
261 | } | 247 | } |
262 | /* Create session, wait for response */ | 248 | /* Create session, wait for response */ |
@@ -284,8 +270,6 @@ int wlp_d1d2_exchange(struct wlp *wlp, struct wlp_neighbor_e *neighbor, | |||
284 | /* Parse message in session->data: it will be either D2 or F0 */ | 270 | /* Parse message in session->data: it will be either D2 or F0 */ |
285 | skb = session.data; | 271 | skb = session.data; |
286 | resp = (void *) skb->data; | 272 | resp = (void *) skb->data; |
287 | d_printf(6, dev, "Received response to D1 frame. \n"); | ||
288 | d_dump(6, dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
289 | 273 | ||
290 | if (resp->type == WLP_ASSOC_F0) { | 274 | if (resp->type == WLP_ASSOC_F0) { |
291 | result = wlp_parse_f0(wlp, skb); | 275 | result = wlp_parse_f0(wlp, skb); |
@@ -337,10 +321,9 @@ int wlp_enroll_neighbor(struct wlp *wlp, struct wlp_neighbor_e *neighbor, | |||
337 | struct device *dev = &wlp->rc->uwb_dev.dev; | 321 | struct device *dev = &wlp->rc->uwb_dev.dev; |
338 | char buf[WLP_WSS_UUID_STRSIZE]; | 322 | char buf[WLP_WSS_UUID_STRSIZE]; |
339 | struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; | 323 | struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; |
324 | |||
340 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | 325 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); |
341 | d_fnstart(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", | 326 | |
342 | wlp, neighbor, wss, wssid, buf); | ||
343 | d_printf(6, dev, "Complete me.\n"); | ||
344 | result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid); | 327 | result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid); |
345 | if (result < 0) { | 328 | if (result < 0) { |
346 | dev_err(dev, "WLP: D1/D2 message exchange for enrollment " | 329 | dev_err(dev, "WLP: D1/D2 message exchange for enrollment " |
@@ -360,13 +343,10 @@ int wlp_enroll_neighbor(struct wlp *wlp, struct wlp_neighbor_e *neighbor, | |||
360 | goto error; | 343 | goto error; |
361 | } else { | 344 | } else { |
362 | wss->state = WLP_WSS_STATE_ENROLLED; | 345 | wss->state = WLP_WSS_STATE_ENROLLED; |
363 | d_printf(2, dev, "WLP: Success Enrollment into unsecure WSS " | 346 | dev_dbg(dev, "WLP: Success Enrollment into unsecure WSS " |
364 | "%s using neighbor %02x:%02x. \n", buf, | 347 | "%s using neighbor %02x:%02x. \n", |
365 | dev_addr->data[1], dev_addr->data[0]); | 348 | buf, dev_addr->data[1], dev_addr->data[0]); |
366 | } | 349 | } |
367 | |||
368 | d_fnend(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", | ||
369 | wlp, neighbor, wss, wssid, buf); | ||
370 | out: | 350 | out: |
371 | return result; | 351 | return result; |
372 | error: | 352 | error: |
@@ -449,7 +429,6 @@ ssize_t wlp_discover(struct wlp *wlp) | |||
449 | int result = 0; | 429 | int result = 0; |
450 | struct device *dev = &wlp->rc->uwb_dev.dev; | 430 | struct device *dev = &wlp->rc->uwb_dev.dev; |
451 | 431 | ||
452 | d_fnstart(6, dev, "wlp %p \n", wlp); | ||
453 | mutex_lock(&wlp->nbmutex); | 432 | mutex_lock(&wlp->nbmutex); |
454 | /* Clear current neighborhood cache. */ | 433 | /* Clear current neighborhood cache. */ |
455 | __wlp_neighbors_release(wlp); | 434 | __wlp_neighbors_release(wlp); |
@@ -469,7 +448,6 @@ ssize_t wlp_discover(struct wlp *wlp) | |||
469 | } | 448 | } |
470 | error_dev_for_each: | 449 | error_dev_for_each: |
471 | mutex_unlock(&wlp->nbmutex); | 450 | mutex_unlock(&wlp->nbmutex); |
472 | d_fnend(6, dev, "wlp %p \n", wlp); | ||
473 | return result; | 451 | return result; |
474 | } | 452 | } |
475 | 453 | ||
@@ -492,9 +470,6 @@ void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev, | |||
492 | int result; | 470 | int result; |
493 | switch (event) { | 471 | switch (event) { |
494 | case UWB_NOTIF_ONAIR: | 472 | case UWB_NOTIF_ONAIR: |
495 | d_printf(6, dev, "UWB device %02x:%02x is onair\n", | ||
496 | uwb_dev->dev_addr.data[1], | ||
497 | uwb_dev->dev_addr.data[0]); | ||
498 | result = wlp_eda_create_node(&wlp->eda, | 473 | result = wlp_eda_create_node(&wlp->eda, |
499 | uwb_dev->mac_addr.data, | 474 | uwb_dev->mac_addr.data, |
500 | &uwb_dev->dev_addr); | 475 | &uwb_dev->dev_addr); |
@@ -505,18 +480,11 @@ void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev, | |||
505 | uwb_dev->dev_addr.data[0]); | 480 | uwb_dev->dev_addr.data[0]); |
506 | break; | 481 | break; |
507 | case UWB_NOTIF_OFFAIR: | 482 | case UWB_NOTIF_OFFAIR: |
508 | d_printf(6, dev, "UWB device %02x:%02x is offair\n", | ||
509 | uwb_dev->dev_addr.data[1], | ||
510 | uwb_dev->dev_addr.data[0]); | ||
511 | wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr); | 483 | wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr); |
512 | mutex_lock(&wlp->nbmutex); | 484 | mutex_lock(&wlp->nbmutex); |
513 | list_for_each_entry_safe(neighbor, next, &wlp->neighbors, | 485 | list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) { |
514 | node) { | 486 | if (neighbor->uwb_dev == uwb_dev) |
515 | if (neighbor->uwb_dev == uwb_dev) { | ||
516 | d_printf(6, dev, "Removing device from " | ||
517 | "neighborhood.\n"); | ||
518 | __wlp_neighbor_release(neighbor); | 487 | __wlp_neighbor_release(neighbor); |
519 | } | ||
520 | } | 488 | } |
521 | mutex_unlock(&wlp->nbmutex); | 489 | mutex_unlock(&wlp->nbmutex); |
522 | break; | 490 | break; |
@@ -526,38 +494,47 @@ void wlp_uwb_notifs_cb(void *_wlp, struct uwb_dev *uwb_dev, | |||
526 | } | 494 | } |
527 | } | 495 | } |
528 | 496 | ||
529 | int wlp_setup(struct wlp *wlp, struct uwb_rc *rc) | 497 | static void wlp_channel_changed(struct uwb_pal *pal, int channel) |
498 | { | ||
499 | struct wlp *wlp = container_of(pal, struct wlp, pal); | ||
500 | |||
501 | if (channel < 0) | ||
502 | netif_carrier_off(wlp->ndev); | ||
503 | else | ||
504 | netif_carrier_on(wlp->ndev); | ||
505 | } | ||
506 | |||
507 | int wlp_setup(struct wlp *wlp, struct uwb_rc *rc, struct net_device *ndev) | ||
530 | { | 508 | { |
531 | struct device *dev = &rc->uwb_dev.dev; | ||
532 | int result; | 509 | int result; |
533 | 510 | ||
534 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
535 | BUG_ON(wlp->fill_device_info == NULL); | 511 | BUG_ON(wlp->fill_device_info == NULL); |
536 | BUG_ON(wlp->xmit_frame == NULL); | 512 | BUG_ON(wlp->xmit_frame == NULL); |
537 | BUG_ON(wlp->stop_queue == NULL); | 513 | BUG_ON(wlp->stop_queue == NULL); |
538 | BUG_ON(wlp->start_queue == NULL); | 514 | BUG_ON(wlp->start_queue == NULL); |
515 | |||
539 | wlp->rc = rc; | 516 | wlp->rc = rc; |
517 | wlp->ndev = ndev; | ||
540 | wlp_eda_init(&wlp->eda);/* Set up address cache */ | 518 | wlp_eda_init(&wlp->eda);/* Set up address cache */ |
541 | wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb; | 519 | wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb; |
542 | wlp->uwb_notifs_handler.data = wlp; | 520 | wlp->uwb_notifs_handler.data = wlp; |
543 | uwb_notifs_register(rc, &wlp->uwb_notifs_handler); | 521 | uwb_notifs_register(rc, &wlp->uwb_notifs_handler); |
544 | 522 | ||
545 | uwb_pal_init(&wlp->pal); | 523 | uwb_pal_init(&wlp->pal); |
546 | result = uwb_pal_register(rc, &wlp->pal); | 524 | wlp->pal.rc = rc; |
525 | wlp->pal.channel_changed = wlp_channel_changed; | ||
526 | result = uwb_pal_register(&wlp->pal); | ||
547 | if (result < 0) | 527 | if (result < 0) |
548 | uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); | 528 | uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); |
549 | 529 | ||
550 | d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); | ||
551 | return result; | 530 | return result; |
552 | } | 531 | } |
553 | EXPORT_SYMBOL_GPL(wlp_setup); | 532 | EXPORT_SYMBOL_GPL(wlp_setup); |
554 | 533 | ||
555 | void wlp_remove(struct wlp *wlp) | 534 | void wlp_remove(struct wlp *wlp) |
556 | { | 535 | { |
557 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
558 | d_fnstart(6, dev, "wlp %p\n", wlp); | ||
559 | wlp_neighbors_release(wlp); | 536 | wlp_neighbors_release(wlp); |
560 | uwb_pal_unregister(wlp->rc, &wlp->pal); | 537 | uwb_pal_unregister(&wlp->pal); |
561 | uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); | 538 | uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); |
562 | wlp_eda_release(&wlp->eda); | 539 | wlp_eda_release(&wlp->eda); |
563 | mutex_lock(&wlp->mutex); | 540 | mutex_lock(&wlp->mutex); |
@@ -565,9 +542,6 @@ void wlp_remove(struct wlp *wlp) | |||
565 | kfree(wlp->dev_info); | 542 | kfree(wlp->dev_info); |
566 | mutex_unlock(&wlp->mutex); | 543 | mutex_unlock(&wlp->mutex); |
567 | wlp->rc = NULL; | 544 | wlp->rc = NULL; |
568 | /* We have to use NULL here because this function can be called | ||
569 | * when the device disappeared. */ | ||
570 | d_fnend(6, NULL, "wlp %p\n", wlp); | ||
571 | } | 545 | } |
572 | EXPORT_SYMBOL_GPL(wlp_remove); | 546 | EXPORT_SYMBOL_GPL(wlp_remove); |
573 | 547 | ||
diff --git a/drivers/uwb/wlp/wss-lc.c b/drivers/uwb/wlp/wss-lc.c index 96b18c9bd6e9..5913c7a5d922 100644 --- a/drivers/uwb/wlp/wss-lc.c +++ b/drivers/uwb/wlp/wss-lc.c | |||
@@ -43,14 +43,11 @@ | |||
43 | * wlp_wss_release() | 43 | * wlp_wss_release() |
44 | * wlp_wss_reset() | 44 | * wlp_wss_reset() |
45 | */ | 45 | */ |
46 | |||
47 | #include <linux/etherdevice.h> /* for is_valid_ether_addr */ | 46 | #include <linux/etherdevice.h> /* for is_valid_ether_addr */ |
48 | #include <linux/skbuff.h> | 47 | #include <linux/skbuff.h> |
49 | #include <linux/wlp.h> | 48 | #include <linux/wlp.h> |
50 | #define D_LOCAL 5 | ||
51 | #include <linux/uwb/debug.h> | ||
52 | #include "wlp-internal.h" | ||
53 | 49 | ||
50 | #include "wlp-internal.h" | ||
54 | 51 | ||
55 | size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key) | 52 | size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key) |
56 | { | 53 | { |
@@ -116,9 +113,6 @@ struct uwb_mac_addr wlp_wss_sel_bcast_addr(struct wlp_wss *wss) | |||
116 | */ | 113 | */ |
117 | void wlp_wss_reset(struct wlp_wss *wss) | 114 | void wlp_wss_reset(struct wlp_wss *wss) |
118 | { | 115 | { |
119 | struct wlp *wlp = container_of(wss, struct wlp, wss); | ||
120 | struct device *dev = &wlp->rc->uwb_dev.dev; | ||
121 | d_fnstart(5, dev, "wss (%p) \n", wss); | ||
122 | memset(&wss->wssid, 0, sizeof(wss->wssid)); | 116 | memset(&wss->wssid, 0, sizeof(wss->wssid)); |
123 | wss->hash = 0; | 117 | wss->hash = 0; |
124 | memset(&wss->name[0], 0, sizeof(wss->name)); | 118 | memset(&wss->name[0], 0, sizeof(wss->name)); |
@@ -127,7 +121,6 @@ void wlp_wss_reset(struct wlp_wss *wss) | |||
127 | memset(&wss->master_key[0], 0, sizeof(wss->master_key)); | 121 | memset(&wss->master_key[0], 0, sizeof(wss->master_key)); |
128 | wss->tag = 0; | 122 | wss->tag = 0; |
129 | wss->state = WLP_WSS_STATE_NONE; | 123 | wss->state = WLP_WSS_STATE_NONE; |
130 | d_fnend(5, dev, "wss (%p) \n", wss); | ||
131 | } | 124 | } |
132 | 125 | ||
133 | /** | 126 | /** |
@@ -145,7 +138,6 @@ int wlp_wss_sysfs_add(struct wlp_wss *wss, char *wssid_str) | |||
145 | struct device *dev = &wlp->rc->uwb_dev.dev; | 138 | struct device *dev = &wlp->rc->uwb_dev.dev; |
146 | int result; | 139 | int result; |
147 | 140 | ||
148 | d_fnstart(5, dev, "wss (%p), wssid: %s\n", wss, wssid_str); | ||
149 | result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str); | 141 | result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str); |
150 | if (result < 0) | 142 | if (result < 0) |
151 | return result; | 143 | return result; |
@@ -162,7 +154,6 @@ int wlp_wss_sysfs_add(struct wlp_wss *wss, char *wssid_str) | |||
162 | result); | 154 | result); |
163 | goto error_sysfs_create_group; | 155 | goto error_sysfs_create_group; |
164 | } | 156 | } |
165 | d_fnend(5, dev, "Completed. result = %d \n", result); | ||
166 | return 0; | 157 | return 0; |
167 | error_sysfs_create_group: | 158 | error_sysfs_create_group: |
168 | 159 | ||
@@ -214,22 +205,14 @@ int wlp_wss_enroll_target(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
214 | struct wlp *wlp = container_of(wss, struct wlp, wss); | 205 | struct wlp *wlp = container_of(wss, struct wlp, wss); |
215 | struct device *dev = &wlp->rc->uwb_dev.dev; | 206 | struct device *dev = &wlp->rc->uwb_dev.dev; |
216 | struct wlp_neighbor_e *neighbor; | 207 | struct wlp_neighbor_e *neighbor; |
217 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
218 | int result = -ENXIO; | 208 | int result = -ENXIO; |
219 | struct uwb_dev_addr *dev_addr; | 209 | struct uwb_dev_addr *dev_addr; |
220 | 210 | ||
221 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
222 | d_fnstart(5, dev, "wss %p, wssid %s, registrar %02x:%02x \n", | ||
223 | wss, buf, dest->data[1], dest->data[0]); | ||
224 | mutex_lock(&wlp->nbmutex); | 211 | mutex_lock(&wlp->nbmutex); |
225 | list_for_each_entry(neighbor, &wlp->neighbors, node) { | 212 | list_for_each_entry(neighbor, &wlp->neighbors, node) { |
226 | dev_addr = &neighbor->uwb_dev->dev_addr; | 213 | dev_addr = &neighbor->uwb_dev->dev_addr; |
227 | if (!memcmp(dest, dev_addr, sizeof(*dest))) { | 214 | if (!memcmp(dest, dev_addr, sizeof(*dest))) { |
228 | d_printf(5, dev, "Neighbor %02x:%02x is valid, " | 215 | result = wlp_enroll_neighbor(wlp, neighbor, wss, wssid); |
229 | "enrolling. \n", | ||
230 | dev_addr->data[1], dev_addr->data[0]); | ||
231 | result = wlp_enroll_neighbor(wlp, neighbor, wss, | ||
232 | wssid); | ||
233 | break; | 216 | break; |
234 | } | 217 | } |
235 | } | 218 | } |
@@ -237,8 +220,6 @@ int wlp_wss_enroll_target(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
237 | dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n", | 220 | dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n", |
238 | dest->data[1], dest->data[0]); | 221 | dest->data[1], dest->data[0]); |
239 | mutex_unlock(&wlp->nbmutex); | 222 | mutex_unlock(&wlp->nbmutex); |
240 | d_fnend(5, dev, "wss %p, wssid %s, registrar %02x:%02x, result %d \n", | ||
241 | wss, buf, dest->data[1], dest->data[0], result); | ||
242 | return result; | 223 | return result; |
243 | } | 224 | } |
244 | 225 | ||
@@ -260,16 +241,11 @@ int wlp_wss_enroll_discovered(struct wlp_wss *wss, struct wlp_uuid *wssid) | |||
260 | char buf[WLP_WSS_UUID_STRSIZE]; | 241 | char buf[WLP_WSS_UUID_STRSIZE]; |
261 | int result = -ENXIO; | 242 | int result = -ENXIO; |
262 | 243 | ||
263 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | 244 | |
264 | d_fnstart(5, dev, "wss %p, wssid %s \n", wss, buf); | ||
265 | mutex_lock(&wlp->nbmutex); | 245 | mutex_lock(&wlp->nbmutex); |
266 | list_for_each_entry(neighbor, &wlp->neighbors, node) { | 246 | list_for_each_entry(neighbor, &wlp->neighbors, node) { |
267 | list_for_each_entry(wssid_e, &neighbor->wssid, node) { | 247 | list_for_each_entry(wssid_e, &neighbor->wssid, node) { |
268 | if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) { | 248 | if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) { |
269 | d_printf(5, dev, "Found WSSID %s in neighbor " | ||
270 | "%02x:%02x cache. \n", buf, | ||
271 | neighbor->uwb_dev->dev_addr.data[1], | ||
272 | neighbor->uwb_dev->dev_addr.data[0]); | ||
273 | result = wlp_enroll_neighbor(wlp, neighbor, | 249 | result = wlp_enroll_neighbor(wlp, neighbor, |
274 | wss, wssid); | 250 | wss, wssid); |
275 | if (result == 0) /* enrollment success */ | 251 | if (result == 0) /* enrollment success */ |
@@ -279,10 +255,11 @@ int wlp_wss_enroll_discovered(struct wlp_wss *wss, struct wlp_uuid *wssid) | |||
279 | } | 255 | } |
280 | } | 256 | } |
281 | out: | 257 | out: |
282 | if (result == -ENXIO) | 258 | if (result == -ENXIO) { |
259 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
283 | dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf); | 260 | dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf); |
261 | } | ||
284 | mutex_unlock(&wlp->nbmutex); | 262 | mutex_unlock(&wlp->nbmutex); |
285 | d_fnend(5, dev, "wss %p, wssid %s, result %d \n", wss, buf, result); | ||
286 | return result; | 263 | return result; |
287 | } | 264 | } |
288 | 265 | ||
@@ -307,27 +284,22 @@ int wlp_wss_enroll(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
307 | struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; | 284 | struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; |
308 | 285 | ||
309 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | 286 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); |
287 | |||
310 | if (wss->state != WLP_WSS_STATE_NONE) { | 288 | if (wss->state != WLP_WSS_STATE_NONE) { |
311 | dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf); | 289 | dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf); |
312 | result = -EEXIST; | 290 | result = -EEXIST; |
313 | goto error; | 291 | goto error; |
314 | } | 292 | } |
315 | if (!memcmp(&bcast, devaddr, sizeof(bcast))) { | 293 | if (!memcmp(&bcast, devaddr, sizeof(bcast))) |
316 | d_printf(5, dev, "Request to enroll in discovered WSS " | ||
317 | "with WSSID %s \n", buf); | ||
318 | result = wlp_wss_enroll_discovered(wss, wssid); | 294 | result = wlp_wss_enroll_discovered(wss, wssid); |
319 | } else { | 295 | else |
320 | d_printf(5, dev, "Request to enroll in WSSID %s with " | ||
321 | "registrar %02x:%02x\n", buf, devaddr->data[1], | ||
322 | devaddr->data[0]); | ||
323 | result = wlp_wss_enroll_target(wss, wssid, devaddr); | 296 | result = wlp_wss_enroll_target(wss, wssid, devaddr); |
324 | } | ||
325 | if (result < 0) { | 297 | if (result < 0) { |
326 | dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n", | 298 | dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n", |
327 | buf, result); | 299 | buf, result); |
328 | goto error; | 300 | goto error; |
329 | } | 301 | } |
330 | d_printf(2, dev, "Successfully enrolled into WSS %s \n", buf); | 302 | dev_dbg(dev, "Successfully enrolled into WSS %s \n", buf); |
331 | result = wlp_wss_sysfs_add(wss, buf); | 303 | result = wlp_wss_sysfs_add(wss, buf); |
332 | if (result < 0) { | 304 | if (result < 0) { |
333 | dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n"); | 305 | dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n"); |
@@ -363,7 +335,6 @@ int wlp_wss_activate(struct wlp_wss *wss) | |||
363 | u8 hash; /* only include one hash */ | 335 | u8 hash; /* only include one hash */ |
364 | } ie_data; | 336 | } ie_data; |
365 | 337 | ||
366 | d_fnstart(5, dev, "Activating WSS %p. \n", wss); | ||
367 | BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED); | 338 | BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED); |
368 | wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid); | 339 | wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid); |
369 | wss->tag = wss->hash; | 340 | wss->tag = wss->hash; |
@@ -382,7 +353,6 @@ int wlp_wss_activate(struct wlp_wss *wss) | |||
382 | wss->state = WLP_WSS_STATE_ACTIVE; | 353 | wss->state = WLP_WSS_STATE_ACTIVE; |
383 | result = 0; | 354 | result = 0; |
384 | error_wlp_ie: | 355 | error_wlp_ie: |
385 | d_fnend(5, dev, "Activating WSS %p, result = %d \n", wss, result); | ||
386 | return result; | 356 | return result; |
387 | } | 357 | } |
388 | 358 | ||
@@ -405,7 +375,6 @@ int wlp_wss_enroll_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
405 | int result = 0; | 375 | int result = 0; |
406 | char buf[WLP_WSS_UUID_STRSIZE]; | 376 | char buf[WLP_WSS_UUID_STRSIZE]; |
407 | 377 | ||
408 | d_fnstart(5, dev, "Enrollment and activation requested. \n"); | ||
409 | mutex_lock(&wss->mutex); | 378 | mutex_lock(&wss->mutex); |
410 | result = wlp_wss_enroll(wss, wssid, devaddr); | 379 | result = wlp_wss_enroll(wss, wssid, devaddr); |
411 | if (result < 0) { | 380 | if (result < 0) { |
@@ -424,7 +393,6 @@ int wlp_wss_enroll_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
424 | error_activate: | 393 | error_activate: |
425 | error_enroll: | 394 | error_enroll: |
426 | mutex_unlock(&wss->mutex); | 395 | mutex_unlock(&wss->mutex); |
427 | d_fnend(5, dev, "Completed. result = %d \n", result); | ||
428 | return result; | 396 | return result; |
429 | } | 397 | } |
430 | 398 | ||
@@ -447,11 +415,9 @@ int wlp_wss_create_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
447 | struct device *dev = &wlp->rc->uwb_dev.dev; | 415 | struct device *dev = &wlp->rc->uwb_dev.dev; |
448 | int result = 0; | 416 | int result = 0; |
449 | char buf[WLP_WSS_UUID_STRSIZE]; | 417 | char buf[WLP_WSS_UUID_STRSIZE]; |
450 | d_fnstart(5, dev, "Request to create new WSS.\n"); | 418 | |
451 | result = wlp_wss_uuid_print(buf, sizeof(buf), wssid); | 419 | result = wlp_wss_uuid_print(buf, sizeof(buf), wssid); |
452 | d_printf(5, dev, "Request to create WSS: WSSID=%s, name=%s, " | 420 | |
453 | "sec_status=%u, accepting enrollment=%u \n", | ||
454 | buf, name, sec_status, accept); | ||
455 | if (!mutex_trylock(&wss->mutex)) { | 421 | if (!mutex_trylock(&wss->mutex)) { |
456 | dev_err(dev, "WLP: WLP association session in progress.\n"); | 422 | dev_err(dev, "WLP: WLP association session in progress.\n"); |
457 | return -EBUSY; | 423 | return -EBUSY; |
@@ -498,7 +464,6 @@ int wlp_wss_create_activate(struct wlp_wss *wss, struct wlp_uuid *wssid, | |||
498 | result = 0; | 464 | result = 0; |
499 | out: | 465 | out: |
500 | mutex_unlock(&wss->mutex); | 466 | mutex_unlock(&wss->mutex); |
501 | d_fnend(5, dev, "Completed. result = %d \n", result); | ||
502 | return result; | 467 | return result; |
503 | } | 468 | } |
504 | 469 | ||
@@ -520,16 +485,12 @@ int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss, | |||
520 | { | 485 | { |
521 | int result = 0; | 486 | int result = 0; |
522 | struct device *dev = &wlp->rc->uwb_dev.dev; | 487 | struct device *dev = &wlp->rc->uwb_dev.dev; |
523 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
524 | DECLARE_COMPLETION_ONSTACK(completion); | 488 | DECLARE_COMPLETION_ONSTACK(completion); |
525 | struct wlp_session session; | 489 | struct wlp_session session; |
526 | struct sk_buff *skb; | 490 | struct sk_buff *skb; |
527 | struct wlp_frame_assoc *resp; | 491 | struct wlp_frame_assoc *resp; |
528 | struct wlp_uuid wssid; | 492 | struct wlp_uuid wssid; |
529 | 493 | ||
530 | wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); | ||
531 | d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
532 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
533 | mutex_lock(&wlp->mutex); | 494 | mutex_lock(&wlp->mutex); |
534 | /* Send C1 association frame */ | 495 | /* Send C1 association frame */ |
535 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1); | 496 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1); |
@@ -565,8 +526,6 @@ int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss, | |||
565 | /* Parse message in session->data: it will be either C2 or F0 */ | 526 | /* Parse message in session->data: it will be either C2 or F0 */ |
566 | skb = session.data; | 527 | skb = session.data; |
567 | resp = (void *) skb->data; | 528 | resp = (void *) skb->data; |
568 | d_printf(5, dev, "Received response to C1 frame. \n"); | ||
569 | d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
570 | if (resp->type == WLP_ASSOC_F0) { | 529 | if (resp->type == WLP_ASSOC_F0) { |
571 | result = wlp_parse_f0(wlp, skb); | 530 | result = wlp_parse_f0(wlp, skb); |
572 | if (result < 0) | 531 | if (result < 0) |
@@ -584,11 +543,9 @@ int wlp_wss_is_active(struct wlp *wlp, struct wlp_wss *wss, | |||
584 | result = 0; | 543 | result = 0; |
585 | goto error_resp_parse; | 544 | goto error_resp_parse; |
586 | } | 545 | } |
587 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) { | 546 | if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) |
588 | d_printf(5, dev, "WSSID in C2 frame matches local " | ||
589 | "active WSS.\n"); | ||
590 | result = 1; | 547 | result = 1; |
591 | } else { | 548 | else { |
592 | dev_err(dev, "WLP: Received a C2 frame without matching " | 549 | dev_err(dev, "WLP: Received a C2 frame without matching " |
593 | "WSSID.\n"); | 550 | "WSSID.\n"); |
594 | result = 0; | 551 | result = 0; |
@@ -598,8 +555,6 @@ error_resp_parse: | |||
598 | out: | 555 | out: |
599 | wlp->session = NULL; | 556 | wlp->session = NULL; |
600 | mutex_unlock(&wlp->mutex); | 557 | mutex_unlock(&wlp->mutex); |
601 | d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
602 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
603 | return result; | 558 | return result; |
604 | } | 559 | } |
605 | 560 | ||
@@ -620,16 +575,8 @@ int wlp_wss_activate_connection(struct wlp *wlp, struct wlp_wss *wss, | |||
620 | { | 575 | { |
621 | struct device *dev = &wlp->rc->uwb_dev.dev; | 576 | struct device *dev = &wlp->rc->uwb_dev.dev; |
622 | int result = 0; | 577 | int result = 0; |
623 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
624 | wlp_wss_uuid_print(buf, sizeof(buf), wssid); | ||
625 | d_fnstart(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " | ||
626 | "%02x:%02x:%02x:%02x:%02x:%02x \n", wlp, wss, buf, *tag, | ||
627 | virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], | ||
628 | virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); | ||
629 | 578 | ||
630 | if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) { | 579 | if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) { |
631 | d_printf(5, dev, "WSSID from neighbor frame matches local " | ||
632 | "active WSS.\n"); | ||
633 | /* Update EDA cache */ | 580 | /* Update EDA cache */ |
634 | result = wlp_eda_update_node(&wlp->eda, dev_addr, wss, | 581 | result = wlp_eda_update_node(&wlp->eda, dev_addr, wss, |
635 | (void *) virt_addr->data, *tag, | 582 | (void *) virt_addr->data, *tag, |
@@ -638,18 +585,9 @@ int wlp_wss_activate_connection(struct wlp *wlp, struct wlp_wss *wss, | |||
638 | dev_err(dev, "WLP: Unable to update EDA cache " | 585 | dev_err(dev, "WLP: Unable to update EDA cache " |
639 | "with new connected neighbor information.\n"); | 586 | "with new connected neighbor information.\n"); |
640 | } else { | 587 | } else { |
641 | dev_err(dev, "WLP: Neighbor does not have matching " | 588 | dev_err(dev, "WLP: Neighbor does not have matching WSSID.\n"); |
642 | "WSSID.\n"); | ||
643 | result = -EINVAL; | 589 | result = -EINVAL; |
644 | } | 590 | } |
645 | |||
646 | d_fnend(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " | ||
647 | "%02x:%02x:%02x:%02x:%02x:%02x, result = %d \n", | ||
648 | wlp, wss, buf, *tag, | ||
649 | virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], | ||
650 | virt_addr->data[3], virt_addr->data[4], virt_addr->data[5], | ||
651 | result); | ||
652 | |||
653 | return result; | 591 | return result; |
654 | } | 592 | } |
655 | 593 | ||
@@ -665,7 +603,6 @@ int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss, | |||
665 | { | 603 | { |
666 | int result; | 604 | int result; |
667 | struct device *dev = &wlp->rc->uwb_dev.dev; | 605 | struct device *dev = &wlp->rc->uwb_dev.dev; |
668 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
669 | struct wlp_uuid wssid; | 606 | struct wlp_uuid wssid; |
670 | u8 tag; | 607 | u8 tag; |
671 | struct uwb_mac_addr virt_addr; | 608 | struct uwb_mac_addr virt_addr; |
@@ -674,9 +611,6 @@ int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss, | |||
674 | struct wlp_frame_assoc *resp; | 611 | struct wlp_frame_assoc *resp; |
675 | struct sk_buff *skb; | 612 | struct sk_buff *skb; |
676 | 613 | ||
677 | wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); | ||
678 | d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
679 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
680 | mutex_lock(&wlp->mutex); | 614 | mutex_lock(&wlp->mutex); |
681 | /* Send C3 association frame */ | 615 | /* Send C3 association frame */ |
682 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3); | 616 | result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3); |
@@ -711,8 +645,6 @@ int wlp_wss_connect_neighbor(struct wlp *wlp, struct wlp_wss *wss, | |||
711 | /* Parse message in session->data: it will be either C4 or F0 */ | 645 | /* Parse message in session->data: it will be either C4 or F0 */ |
712 | skb = session.data; | 646 | skb = session.data; |
713 | resp = (void *) skb->data; | 647 | resp = (void *) skb->data; |
714 | d_printf(5, dev, "Received response to C3 frame. \n"); | ||
715 | d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); | ||
716 | if (resp->type == WLP_ASSOC_F0) { | 648 | if (resp->type == WLP_ASSOC_F0) { |
717 | result = wlp_parse_f0(wlp, skb); | 649 | result = wlp_parse_f0(wlp, skb); |
718 | if (result < 0) | 650 | if (result < 0) |
@@ -744,8 +676,6 @@ out: | |||
744 | WLP_WSS_CONNECT_FAILED); | 676 | WLP_WSS_CONNECT_FAILED); |
745 | wlp->session = NULL; | 677 | wlp->session = NULL; |
746 | mutex_unlock(&wlp->mutex); | 678 | mutex_unlock(&wlp->mutex); |
747 | d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
748 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
749 | return result; | 679 | return result; |
750 | } | 680 | } |
751 | 681 | ||
@@ -780,12 +710,8 @@ void wlp_wss_connect_send(struct work_struct *ws) | |||
780 | struct wlp_wss *wss = &wlp->wss; | 710 | struct wlp_wss *wss = &wlp->wss; |
781 | int result; | 711 | int result; |
782 | struct device *dev = &wlp->rc->uwb_dev.dev; | 712 | struct device *dev = &wlp->rc->uwb_dev.dev; |
783 | char buf[WLP_WSS_UUID_STRSIZE]; | ||
784 | 713 | ||
785 | mutex_lock(&wss->mutex); | 714 | mutex_lock(&wss->mutex); |
786 | wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); | ||
787 | d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", | ||
788 | wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); | ||
789 | if (wss->state < WLP_WSS_STATE_ACTIVE) { | 715 | if (wss->state < WLP_WSS_STATE_ACTIVE) { |
790 | if (printk_ratelimit()) | 716 | if (printk_ratelimit()) |
791 | dev_err(dev, "WLP: Attempting to connect with " | 717 | dev_err(dev, "WLP: Attempting to connect with " |
@@ -836,7 +762,6 @@ out: | |||
836 | BUG_ON(wlp->start_queue == NULL); | 762 | BUG_ON(wlp->start_queue == NULL); |
837 | wlp->start_queue(wlp); | 763 | wlp->start_queue(wlp); |
838 | mutex_unlock(&wss->mutex); | 764 | mutex_unlock(&wss->mutex); |
839 | d_fnend(5, dev, "wlp %p, wss %p (wssid %s)\n", wlp, wss, buf); | ||
840 | } | 765 | } |
841 | 766 | ||
842 | /** | 767 | /** |
@@ -855,7 +780,6 @@ int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
855 | struct sk_buff *skb = _skb; | 780 | struct sk_buff *skb = _skb; |
856 | struct wlp_frame_std_abbrv_hdr *std_hdr; | 781 | struct wlp_frame_std_abbrv_hdr *std_hdr; |
857 | 782 | ||
858 | d_fnstart(6, dev, "wlp %p \n", wlp); | ||
859 | if (eda_entry->state == WLP_WSS_CONNECTED) { | 783 | if (eda_entry->state == WLP_WSS_CONNECTED) { |
860 | /* Add WLP header */ | 784 | /* Add WLP header */ |
861 | BUG_ON(skb_headroom(skb) < sizeof(*std_hdr)); | 785 | BUG_ON(skb_headroom(skb) < sizeof(*std_hdr)); |
@@ -873,7 +797,6 @@ int wlp_wss_prep_hdr(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
873 | dev_addr->data[0]); | 797 | dev_addr->data[0]); |
874 | result = -EINVAL; | 798 | result = -EINVAL; |
875 | } | 799 | } |
876 | d_fnend(6, dev, "wlp %p \n", wlp); | ||
877 | return result; | 800 | return result; |
878 | } | 801 | } |
879 | 802 | ||
@@ -893,16 +816,9 @@ int wlp_wss_connect_prep(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
893 | { | 816 | { |
894 | int result = 0; | 817 | int result = 0; |
895 | struct device *dev = &wlp->rc->uwb_dev.dev; | 818 | struct device *dev = &wlp->rc->uwb_dev.dev; |
896 | struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; | ||
897 | unsigned char *eth_addr = eda_entry->eth_addr; | ||
898 | struct sk_buff *skb = _skb; | 819 | struct sk_buff *skb = _skb; |
899 | struct wlp_assoc_conn_ctx *conn_ctx; | 820 | struct wlp_assoc_conn_ctx *conn_ctx; |
900 | 821 | ||
901 | d_fnstart(5, dev, "wlp %p\n", wlp); | ||
902 | d_printf(5, dev, "To neighbor %02x:%02x with eth " | ||
903 | "%02x:%02x:%02x:%02x:%02x:%02x\n", dev_addr->data[1], | ||
904 | dev_addr->data[0], eth_addr[0], eth_addr[1], eth_addr[2], | ||
905 | eth_addr[3], eth_addr[4], eth_addr[5]); | ||
906 | if (eda_entry->state == WLP_WSS_UNCONNECTED) { | 822 | if (eda_entry->state == WLP_WSS_UNCONNECTED) { |
907 | /* We don't want any more packets while we set up connection */ | 823 | /* We don't want any more packets while we set up connection */ |
908 | BUG_ON(wlp->stop_queue == NULL); | 824 | BUG_ON(wlp->stop_queue == NULL); |
@@ -929,12 +845,9 @@ int wlp_wss_connect_prep(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
929 | "previously. Not retrying. \n"); | 845 | "previously. Not retrying. \n"); |
930 | result = -ENONET; | 846 | result = -ENONET; |
931 | goto out; | 847 | goto out; |
932 | } else { /* eda_entry->state == WLP_WSS_CONNECTED */ | 848 | } else /* eda_entry->state == WLP_WSS_CONNECTED */ |
933 | d_printf(5, dev, "Neighbor is connected, preparing frame.\n"); | ||
934 | result = wlp_wss_prep_hdr(wlp, eda_entry, skb); | 849 | result = wlp_wss_prep_hdr(wlp, eda_entry, skb); |
935 | } | ||
936 | out: | 850 | out: |
937 | d_fnend(5, dev, "wlp %p, result = %d \n", wlp, result); | ||
938 | return result; | 851 | return result; |
939 | } | 852 | } |
940 | 853 | ||
@@ -957,8 +870,6 @@ int wlp_wss_send_copy(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
957 | struct sk_buff *copy; | 870 | struct sk_buff *copy; |
958 | struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; | 871 | struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; |
959 | 872 | ||
960 | d_fnstart(5, dev, "to neighbor %02x:%02x, skb (%p) \n", | ||
961 | dev_addr->data[1], dev_addr->data[0], skb); | ||
962 | copy = skb_copy(skb, GFP_ATOMIC); | 873 | copy = skb_copy(skb, GFP_ATOMIC); |
963 | if (copy == NULL) { | 874 | if (copy == NULL) { |
964 | if (printk_ratelimit()) | 875 | if (printk_ratelimit()) |
@@ -988,8 +899,6 @@ int wlp_wss_send_copy(struct wlp *wlp, struct wlp_eda_node *eda_entry, | |||
988 | dev_kfree_skb_irq(copy);/*we need to free if tx fails */ | 899 | dev_kfree_skb_irq(copy);/*we need to free if tx fails */ |
989 | } | 900 | } |
990 | out: | 901 | out: |
991 | d_fnend(5, dev, "to neighbor %02x:%02x \n", dev_addr->data[1], | ||
992 | dev_addr->data[0]); | ||
993 | return result; | 902 | return result; |
994 | } | 903 | } |
995 | 904 | ||
@@ -1005,7 +914,7 @@ int wlp_wss_setup(struct net_device *net_dev, struct wlp_wss *wss) | |||
1005 | struct wlp *wlp = container_of(wss, struct wlp, wss); | 914 | struct wlp *wlp = container_of(wss, struct wlp, wss); |
1006 | struct device *dev = &wlp->rc->uwb_dev.dev; | 915 | struct device *dev = &wlp->rc->uwb_dev.dev; |
1007 | int result = 0; | 916 | int result = 0; |
1008 | d_fnstart(5, dev, "wss (%p) \n", wss); | 917 | |
1009 | mutex_lock(&wss->mutex); | 918 | mutex_lock(&wss->mutex); |
1010 | wss->kobj.parent = &net_dev->dev.kobj; | 919 | wss->kobj.parent = &net_dev->dev.kobj; |
1011 | if (!is_valid_ether_addr(net_dev->dev_addr)) { | 920 | if (!is_valid_ether_addr(net_dev->dev_addr)) { |
@@ -1018,7 +927,6 @@ int wlp_wss_setup(struct net_device *net_dev, struct wlp_wss *wss) | |||
1018 | sizeof(wss->virtual_addr.data)); | 927 | sizeof(wss->virtual_addr.data)); |
1019 | out: | 928 | out: |
1020 | mutex_unlock(&wss->mutex); | 929 | mutex_unlock(&wss->mutex); |
1021 | d_fnend(5, dev, "wss (%p) \n", wss); | ||
1022 | return result; | 930 | return result; |
1023 | } | 931 | } |
1024 | EXPORT_SYMBOL_GPL(wlp_wss_setup); | 932 | EXPORT_SYMBOL_GPL(wlp_wss_setup); |
@@ -1035,8 +943,7 @@ EXPORT_SYMBOL_GPL(wlp_wss_setup); | |||
1035 | void wlp_wss_remove(struct wlp_wss *wss) | 943 | void wlp_wss_remove(struct wlp_wss *wss) |
1036 | { | 944 | { |
1037 | struct wlp *wlp = container_of(wss, struct wlp, wss); | 945 | struct wlp *wlp = container_of(wss, struct wlp, wss); |
1038 | struct device *dev = &wlp->rc->uwb_dev.dev; | 946 | |
1039 | d_fnstart(5, dev, "wss (%p) \n", wss); | ||
1040 | mutex_lock(&wss->mutex); | 947 | mutex_lock(&wss->mutex); |
1041 | if (wss->state == WLP_WSS_STATE_ACTIVE) | 948 | if (wss->state == WLP_WSS_STATE_ACTIVE) |
1042 | uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP); | 949 | uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP); |
@@ -1050,6 +957,5 @@ void wlp_wss_remove(struct wlp_wss *wss) | |||
1050 | wlp_eda_release(&wlp->eda); | 957 | wlp_eda_release(&wlp->eda); |
1051 | wlp_eda_init(&wlp->eda); | 958 | wlp_eda_init(&wlp->eda); |
1052 | mutex_unlock(&wss->mutex); | 959 | mutex_unlock(&wss->mutex); |
1053 | d_fnend(5, dev, "wss (%p) \n", wss); | ||
1054 | } | 960 | } |
1055 | EXPORT_SYMBOL_GPL(wlp_wss_remove); | 961 | EXPORT_SYMBOL_GPL(wlp_wss_remove); |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index e26733a9df21..eb0dfdeaa949 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -585,7 +585,7 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
585 | spin_unlock(&irq_mapping_update_lock); | 585 | spin_unlock(&irq_mapping_update_lock); |
586 | 586 | ||
587 | /* new event channels are always bound to cpu 0 */ | 587 | /* new event channels are always bound to cpu 0 */ |
588 | irq_set_affinity(irq, cpumask_of_cpu(0)); | 588 | irq_set_affinity(irq, cpumask_of(0)); |
589 | 589 | ||
590 | /* Unmask the event channel. */ | 590 | /* Unmask the event channel. */ |
591 | enable_irq(irq); | 591 | enable_irq(irq); |
@@ -614,9 +614,9 @@ static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
614 | } | 614 | } |
615 | 615 | ||
616 | 616 | ||
617 | static void set_affinity_irq(unsigned irq, cpumask_t dest) | 617 | static void set_affinity_irq(unsigned irq, const struct cpumask *dest) |
618 | { | 618 | { |
619 | unsigned tcpu = first_cpu(dest); | 619 | unsigned tcpu = cpumask_first(dest); |
620 | rebind_irq_to_cpu(irq, tcpu); | 620 | rebind_irq_to_cpu(irq, tcpu); |
621 | } | 621 | } |
622 | 622 | ||
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index c16d9be1b017..3bbdb9d02376 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c | |||
@@ -79,9 +79,12 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops, | |||
79 | if (IS_ERR(anon_inode_inode)) | 79 | if (IS_ERR(anon_inode_inode)) |
80 | return -ENODEV; | 80 | return -ENODEV; |
81 | 81 | ||
82 | if (fops->owner && !try_module_get(fops->owner)) | ||
83 | return -ENOENT; | ||
84 | |||
82 | error = get_unused_fd_flags(flags); | 85 | error = get_unused_fd_flags(flags); |
83 | if (error < 0) | 86 | if (error < 0) |
84 | return error; | 87 | goto err_module; |
85 | fd = error; | 88 | fd = error; |
86 | 89 | ||
87 | /* | 90 | /* |
@@ -128,6 +131,8 @@ err_dput: | |||
128 | dput(dentry); | 131 | dput(dentry); |
129 | err_put_unused_fd: | 132 | err_put_unused_fd: |
130 | put_unused_fd(fd); | 133 | put_unused_fd(fd); |
134 | err_module: | ||
135 | module_put(fops->owner); | ||
131 | return error; | 136 | return error; |
132 | } | 137 | } |
133 | EXPORT_SYMBOL_GPL(anon_inode_getfd); | 138 | EXPORT_SYMBOL_GPL(anon_inode_getfd); |
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 5d61b7c06e13..fff96e152c0c 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c | |||
@@ -27,25 +27,32 @@ | |||
27 | #define DEVPTS_SUPER_MAGIC 0x1cd1 | 27 | #define DEVPTS_SUPER_MAGIC 0x1cd1 |
28 | 28 | ||
29 | #define DEVPTS_DEFAULT_MODE 0600 | 29 | #define DEVPTS_DEFAULT_MODE 0600 |
30 | /* | ||
31 | * ptmx is a new node in /dev/pts and will be unused in legacy (single- | ||
32 | * instance) mode. To prevent surprises in user space, set permissions of | ||
33 | * ptmx to 0. Use 'chmod' or remount with '-o ptmxmode' to set meaningful | ||
34 | * permissions. | ||
35 | */ | ||
36 | #define DEVPTS_DEFAULT_PTMX_MODE 0000 | ||
30 | #define PTMX_MINOR 2 | 37 | #define PTMX_MINOR 2 |
31 | 38 | ||
32 | extern int pty_limit; /* Config limit on Unix98 ptys */ | 39 | extern int pty_limit; /* Config limit on Unix98 ptys */ |
33 | static DEFINE_IDA(allocated_ptys); | ||
34 | static DEFINE_MUTEX(allocated_ptys_lock); | 40 | static DEFINE_MUTEX(allocated_ptys_lock); |
35 | 41 | ||
36 | static struct vfsmount *devpts_mnt; | 42 | static struct vfsmount *devpts_mnt; |
37 | static struct dentry *devpts_root; | ||
38 | 43 | ||
39 | static struct { | 44 | struct pts_mount_opts { |
40 | int setuid; | 45 | int setuid; |
41 | int setgid; | 46 | int setgid; |
42 | uid_t uid; | 47 | uid_t uid; |
43 | gid_t gid; | 48 | gid_t gid; |
44 | umode_t mode; | 49 | umode_t mode; |
45 | } config = {.mode = DEVPTS_DEFAULT_MODE}; | 50 | umode_t ptmxmode; |
51 | int newinstance; | ||
52 | }; | ||
46 | 53 | ||
47 | enum { | 54 | enum { |
48 | Opt_uid, Opt_gid, Opt_mode, | 55 | Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_newinstance, |
49 | Opt_err | 56 | Opt_err |
50 | }; | 57 | }; |
51 | 58 | ||
@@ -53,18 +60,50 @@ static const match_table_t tokens = { | |||
53 | {Opt_uid, "uid=%u"}, | 60 | {Opt_uid, "uid=%u"}, |
54 | {Opt_gid, "gid=%u"}, | 61 | {Opt_gid, "gid=%u"}, |
55 | {Opt_mode, "mode=%o"}, | 62 | {Opt_mode, "mode=%o"}, |
63 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
64 | {Opt_ptmxmode, "ptmxmode=%o"}, | ||
65 | {Opt_newinstance, "newinstance"}, | ||
66 | #endif | ||
56 | {Opt_err, NULL} | 67 | {Opt_err, NULL} |
57 | }; | 68 | }; |
58 | 69 | ||
59 | static int devpts_remount(struct super_block *sb, int *flags, char *data) | 70 | struct pts_fs_info { |
71 | struct ida allocated_ptys; | ||
72 | struct pts_mount_opts mount_opts; | ||
73 | struct dentry *ptmx_dentry; | ||
74 | }; | ||
75 | |||
76 | static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) | ||
77 | { | ||
78 | return sb->s_fs_info; | ||
79 | } | ||
80 | |||
81 | static inline struct super_block *pts_sb_from_inode(struct inode *inode) | ||
82 | { | ||
83 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
84 | if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) | ||
85 | return inode->i_sb; | ||
86 | #endif | ||
87 | return devpts_mnt->mnt_sb; | ||
88 | } | ||
89 | |||
90 | #define PARSE_MOUNT 0 | ||
91 | #define PARSE_REMOUNT 1 | ||
92 | |||
93 | static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) | ||
60 | { | 94 | { |
61 | char *p; | 95 | char *p; |
62 | 96 | ||
63 | config.setuid = 0; | 97 | opts->setuid = 0; |
64 | config.setgid = 0; | 98 | opts->setgid = 0; |
65 | config.uid = 0; | 99 | opts->uid = 0; |
66 | config.gid = 0; | 100 | opts->gid = 0; |
67 | config.mode = DEVPTS_DEFAULT_MODE; | 101 | opts->mode = DEVPTS_DEFAULT_MODE; |
102 | opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; | ||
103 | |||
104 | /* newinstance makes sense only on initial mount */ | ||
105 | if (op == PARSE_MOUNT) | ||
106 | opts->newinstance = 0; | ||
68 | 107 | ||
69 | while ((p = strsep(&data, ",")) != NULL) { | 108 | while ((p = strsep(&data, ",")) != NULL) { |
70 | substring_t args[MAX_OPT_ARGS]; | 109 | substring_t args[MAX_OPT_ARGS]; |
@@ -79,20 +118,32 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) | |||
79 | case Opt_uid: | 118 | case Opt_uid: |
80 | if (match_int(&args[0], &option)) | 119 | if (match_int(&args[0], &option)) |
81 | return -EINVAL; | 120 | return -EINVAL; |
82 | config.uid = option; | 121 | opts->uid = option; |
83 | config.setuid = 1; | 122 | opts->setuid = 1; |
84 | break; | 123 | break; |
85 | case Opt_gid: | 124 | case Opt_gid: |
86 | if (match_int(&args[0], &option)) | 125 | if (match_int(&args[0], &option)) |
87 | return -EINVAL; | 126 | return -EINVAL; |
88 | config.gid = option; | 127 | opts->gid = option; |
89 | config.setgid = 1; | 128 | opts->setgid = 1; |
90 | break; | 129 | break; |
91 | case Opt_mode: | 130 | case Opt_mode: |
92 | if (match_octal(&args[0], &option)) | 131 | if (match_octal(&args[0], &option)) |
93 | return -EINVAL; | 132 | return -EINVAL; |
94 | config.mode = option & S_IALLUGO; | 133 | opts->mode = option & S_IALLUGO; |
134 | break; | ||
135 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
136 | case Opt_ptmxmode: | ||
137 | if (match_octal(&args[0], &option)) | ||
138 | return -EINVAL; | ||
139 | opts->ptmxmode = option & S_IALLUGO; | ||
140 | break; | ||
141 | case Opt_newinstance: | ||
142 | /* newinstance makes sense only on initial mount */ | ||
143 | if (op == PARSE_MOUNT) | ||
144 | opts->newinstance = 1; | ||
95 | break; | 145 | break; |
146 | #endif | ||
96 | default: | 147 | default: |
97 | printk(KERN_ERR "devpts: called with bogus options\n"); | 148 | printk(KERN_ERR "devpts: called with bogus options\n"); |
98 | return -EINVAL; | 149 | return -EINVAL; |
@@ -102,13 +153,108 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) | |||
102 | return 0; | 153 | return 0; |
103 | } | 154 | } |
104 | 155 | ||
156 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
157 | static int mknod_ptmx(struct super_block *sb) | ||
158 | { | ||
159 | int mode; | ||
160 | int rc = -ENOMEM; | ||
161 | struct dentry *dentry; | ||
162 | struct inode *inode; | ||
163 | struct dentry *root = sb->s_root; | ||
164 | struct pts_fs_info *fsi = DEVPTS_SB(sb); | ||
165 | struct pts_mount_opts *opts = &fsi->mount_opts; | ||
166 | |||
167 | mutex_lock(&root->d_inode->i_mutex); | ||
168 | |||
169 | /* If we have already created ptmx node, return */ | ||
170 | if (fsi->ptmx_dentry) { | ||
171 | rc = 0; | ||
172 | goto out; | ||
173 | } | ||
174 | |||
175 | dentry = d_alloc_name(root, "ptmx"); | ||
176 | if (!dentry) { | ||
177 | printk(KERN_NOTICE "Unable to alloc dentry for ptmx node\n"); | ||
178 | goto out; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * Create a new 'ptmx' node in this mount of devpts. | ||
183 | */ | ||
184 | inode = new_inode(sb); | ||
185 | if (!inode) { | ||
186 | printk(KERN_ERR "Unable to alloc inode for ptmx node\n"); | ||
187 | dput(dentry); | ||
188 | goto out; | ||
189 | } | ||
190 | |||
191 | inode->i_ino = 2; | ||
192 | inode->i_uid = inode->i_gid = 0; | ||
193 | inode->i_blocks = 0; | ||
194 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | ||
195 | |||
196 | mode = S_IFCHR|opts->ptmxmode; | ||
197 | init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2)); | ||
198 | |||
199 | d_add(dentry, inode); | ||
200 | |||
201 | fsi->ptmx_dentry = dentry; | ||
202 | rc = 0; | ||
203 | |||
204 | printk(KERN_DEBUG "Created ptmx node in devpts ino %lu\n", | ||
205 | inode->i_ino); | ||
206 | out: | ||
207 | mutex_unlock(&root->d_inode->i_mutex); | ||
208 | return rc; | ||
209 | } | ||
210 | |||
211 | static void update_ptmx_mode(struct pts_fs_info *fsi) | ||
212 | { | ||
213 | struct inode *inode; | ||
214 | if (fsi->ptmx_dentry) { | ||
215 | inode = fsi->ptmx_dentry->d_inode; | ||
216 | inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode; | ||
217 | } | ||
218 | } | ||
219 | #else | ||
220 | static inline void update_ptmx_mode(struct pts_fs_info *fsi) | ||
221 | { | ||
222 | return; | ||
223 | } | ||
224 | #endif | ||
225 | |||
226 | static int devpts_remount(struct super_block *sb, int *flags, char *data) | ||
227 | { | ||
228 | int err; | ||
229 | struct pts_fs_info *fsi = DEVPTS_SB(sb); | ||
230 | struct pts_mount_opts *opts = &fsi->mount_opts; | ||
231 | |||
232 | err = parse_mount_options(data, PARSE_REMOUNT, opts); | ||
233 | |||
234 | /* | ||
235 | * parse_mount_options() restores options to default values | ||
236 | * before parsing and may have changed ptmxmode. So, update the | ||
237 | * mode in the inode too. Bogus options don't fail the remount, | ||
238 | * so do this even on error return. | ||
239 | */ | ||
240 | update_ptmx_mode(fsi); | ||
241 | |||
242 | return err; | ||
243 | } | ||
244 | |||
105 | static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) | 245 | static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) |
106 | { | 246 | { |
107 | if (config.setuid) | 247 | struct pts_fs_info *fsi = DEVPTS_SB(vfs->mnt_sb); |
108 | seq_printf(seq, ",uid=%u", config.uid); | 248 | struct pts_mount_opts *opts = &fsi->mount_opts; |
109 | if (config.setgid) | 249 | |
110 | seq_printf(seq, ",gid=%u", config.gid); | 250 | if (opts->setuid) |
111 | seq_printf(seq, ",mode=%03o", config.mode); | 251 | seq_printf(seq, ",uid=%u", opts->uid); |
252 | if (opts->setgid) | ||
253 | seq_printf(seq, ",gid=%u", opts->gid); | ||
254 | seq_printf(seq, ",mode=%03o", opts->mode); | ||
255 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
256 | seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode); | ||
257 | #endif | ||
112 | 258 | ||
113 | return 0; | 259 | return 0; |
114 | } | 260 | } |
@@ -119,10 +265,25 @@ static const struct super_operations devpts_sops = { | |||
119 | .show_options = devpts_show_options, | 265 | .show_options = devpts_show_options, |
120 | }; | 266 | }; |
121 | 267 | ||
268 | static void *new_pts_fs_info(void) | ||
269 | { | ||
270 | struct pts_fs_info *fsi; | ||
271 | |||
272 | fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL); | ||
273 | if (!fsi) | ||
274 | return NULL; | ||
275 | |||
276 | ida_init(&fsi->allocated_ptys); | ||
277 | fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; | ||
278 | fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; | ||
279 | |||
280 | return fsi; | ||
281 | } | ||
282 | |||
122 | static int | 283 | static int |
123 | devpts_fill_super(struct super_block *s, void *data, int silent) | 284 | devpts_fill_super(struct super_block *s, void *data, int silent) |
124 | { | 285 | { |
125 | struct inode * inode; | 286 | struct inode *inode; |
126 | 287 | ||
127 | s->s_blocksize = 1024; | 288 | s->s_blocksize = 1024; |
128 | s->s_blocksize_bits = 10; | 289 | s->s_blocksize_bits = 10; |
@@ -130,9 +291,13 @@ devpts_fill_super(struct super_block *s, void *data, int silent) | |||
130 | s->s_op = &devpts_sops; | 291 | s->s_op = &devpts_sops; |
131 | s->s_time_gran = 1; | 292 | s->s_time_gran = 1; |
132 | 293 | ||
294 | s->s_fs_info = new_pts_fs_info(); | ||
295 | if (!s->s_fs_info) | ||
296 | goto fail; | ||
297 | |||
133 | inode = new_inode(s); | 298 | inode = new_inode(s); |
134 | if (!inode) | 299 | if (!inode) |
135 | goto fail; | 300 | goto free_fsi; |
136 | inode->i_ino = 1; | 301 | inode->i_ino = 1; |
137 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 302 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
138 | inode->i_blocks = 0; | 303 | inode->i_blocks = 0; |
@@ -142,27 +307,226 @@ devpts_fill_super(struct super_block *s, void *data, int silent) | |||
142 | inode->i_fop = &simple_dir_operations; | 307 | inode->i_fop = &simple_dir_operations; |
143 | inode->i_nlink = 2; | 308 | inode->i_nlink = 2; |
144 | 309 | ||
145 | devpts_root = s->s_root = d_alloc_root(inode); | 310 | s->s_root = d_alloc_root(inode); |
146 | if (s->s_root) | 311 | if (s->s_root) |
147 | return 0; | 312 | return 0; |
148 | 313 | ||
149 | printk("devpts: get root dentry failed\n"); | 314 | printk(KERN_ERR "devpts: get root dentry failed\n"); |
150 | iput(inode); | 315 | iput(inode); |
316 | |||
317 | free_fsi: | ||
318 | kfree(s->s_fs_info); | ||
151 | fail: | 319 | fail: |
152 | return -ENOMEM; | 320 | return -ENOMEM; |
153 | } | 321 | } |
154 | 322 | ||
323 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
324 | static int compare_init_pts_sb(struct super_block *s, void *p) | ||
325 | { | ||
326 | if (devpts_mnt) | ||
327 | return devpts_mnt->mnt_sb == s; | ||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Safely parse the mount options in @data and update @opts. | ||
333 | * | ||
334 | * devpts ends up parsing options two times during mount, due to the | ||
335 | * two modes of operation it supports. The first parse occurs in | ||
336 | * devpts_get_sb() when determining the mode (single-instance or | ||
337 | * multi-instance mode). The second parse happens in devpts_remount() | ||
338 | * or new_pts_mount() depending on the mode. | ||
339 | * | ||
340 | * Parsing of options modifies the @data making subsequent parsing | ||
341 | * incorrect. So make a local copy of @data and parse it. | ||
342 | * | ||
343 | * Return: 0 On success, -errno on error | ||
344 | */ | ||
345 | static int safe_parse_mount_options(void *data, struct pts_mount_opts *opts) | ||
346 | { | ||
347 | int rc; | ||
348 | void *datacp; | ||
349 | |||
350 | if (!data) | ||
351 | return 0; | ||
352 | |||
353 | /* Use kstrdup() ? */ | ||
354 | datacp = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
355 | if (!datacp) | ||
356 | return -ENOMEM; | ||
357 | |||
358 | memcpy(datacp, data, PAGE_SIZE); | ||
359 | rc = parse_mount_options((char *)datacp, PARSE_MOUNT, opts); | ||
360 | kfree(datacp); | ||
361 | |||
362 | return rc; | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * Mount a new (private) instance of devpts. PTYs created in this | ||
367 | * instance are independent of the PTYs in other devpts instances. | ||
368 | */ | ||
369 | static int new_pts_mount(struct file_system_type *fs_type, int flags, | ||
370 | void *data, struct vfsmount *mnt) | ||
371 | { | ||
372 | int err; | ||
373 | struct pts_fs_info *fsi; | ||
374 | struct pts_mount_opts *opts; | ||
375 | |||
376 | printk(KERN_NOTICE "devpts: newinstance mount\n"); | ||
377 | |||
378 | err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt); | ||
379 | if (err) | ||
380 | return err; | ||
381 | |||
382 | fsi = DEVPTS_SB(mnt->mnt_sb); | ||
383 | opts = &fsi->mount_opts; | ||
384 | |||
385 | err = parse_mount_options(data, PARSE_MOUNT, opts); | ||
386 | if (err) | ||
387 | goto fail; | ||
388 | |||
389 | err = mknod_ptmx(mnt->mnt_sb); | ||
390 | if (err) | ||
391 | goto fail; | ||
392 | |||
393 | return 0; | ||
394 | |||
395 | fail: | ||
396 | dput(mnt->mnt_sb->s_root); | ||
397 | deactivate_super(mnt->mnt_sb); | ||
398 | return err; | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Check if 'newinstance' mount option was specified in @data. | ||
403 | * | ||
404 | * Return: -errno on error (eg: invalid mount options specified) | ||
405 | * : 1 if 'newinstance' mount option was specified | ||
406 | * : 0 if 'newinstance' mount option was NOT specified | ||
407 | */ | ||
408 | static int is_new_instance_mount(void *data) | ||
409 | { | ||
410 | int rc; | ||
411 | struct pts_mount_opts opts; | ||
412 | |||
413 | if (!data) | ||
414 | return 0; | ||
415 | |||
416 | rc = safe_parse_mount_options(data, &opts); | ||
417 | if (!rc) | ||
418 | rc = opts.newinstance; | ||
419 | |||
420 | return rc; | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * get_init_pts_sb() | ||
425 | * | ||
426 | * This interface is needed to support multiple namespace semantics in | ||
427 | * devpts while preserving backward compatibility of the current 'single- | ||
428 | * namespace' semantics. i.e all mounts of devpts without the 'newinstance' | ||
429 | * mount option should bind to the initial kernel mount, like | ||
430 | * get_sb_single(). | ||
431 | * | ||
432 | * Mounts with 'newinstance' option create a new private namespace. | ||
433 | * | ||
434 | * But for single-mount semantics, devpts cannot use get_sb_single(), | ||
435 | * because get_sb_single()/sget() find and use the super-block from | ||
436 | * the most recent mount of devpts. But that recent mount may be a | ||
437 | * 'newinstance' mount and get_sb_single() would pick the newinstance | ||
438 | * super-block instead of the initial super-block. | ||
439 | * | ||
440 | * This interface is identical to get_sb_single() except that it | ||
441 | * consistently selects the 'single-namespace' superblock even in the | ||
442 | * presence of the private namespace (i.e 'newinstance') super-blocks. | ||
443 | */ | ||
444 | static int get_init_pts_sb(struct file_system_type *fs_type, int flags, | ||
445 | void *data, struct vfsmount *mnt) | ||
446 | { | ||
447 | struct super_block *s; | ||
448 | int error; | ||
449 | |||
450 | s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); | ||
451 | if (IS_ERR(s)) | ||
452 | return PTR_ERR(s); | ||
453 | |||
454 | if (!s->s_root) { | ||
455 | s->s_flags = flags; | ||
456 | error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); | ||
457 | if (error) { | ||
458 | up_write(&s->s_umount); | ||
459 | deactivate_super(s); | ||
460 | return error; | ||
461 | } | ||
462 | s->s_flags |= MS_ACTIVE; | ||
463 | } | ||
464 | do_remount_sb(s, flags, data, 0); | ||
465 | return simple_set_mnt(mnt, s); | ||
466 | } | ||
467 | |||
468 | /* | ||
469 | * Mount or remount the initial kernel mount of devpts. This type of | ||
470 | * mount maintains the legacy, single-instance semantics, while the | ||
471 | * kernel still allows multiple-instances. | ||
472 | */ | ||
473 | static int init_pts_mount(struct file_system_type *fs_type, int flags, | ||
474 | void *data, struct vfsmount *mnt) | ||
475 | { | ||
476 | int err; | ||
477 | |||
478 | err = get_init_pts_sb(fs_type, flags, data, mnt); | ||
479 | if (err) | ||
480 | return err; | ||
481 | |||
482 | err = mknod_ptmx(mnt->mnt_sb); | ||
483 | if (err) { | ||
484 | dput(mnt->mnt_sb->s_root); | ||
485 | deactivate_super(mnt->mnt_sb); | ||
486 | } | ||
487 | |||
488 | return err; | ||
489 | } | ||
490 | |||
155 | static int devpts_get_sb(struct file_system_type *fs_type, | 491 | static int devpts_get_sb(struct file_system_type *fs_type, |
156 | int flags, const char *dev_name, void *data, struct vfsmount *mnt) | 492 | int flags, const char *dev_name, void *data, struct vfsmount *mnt) |
157 | { | 493 | { |
494 | int new; | ||
495 | |||
496 | new = is_new_instance_mount(data); | ||
497 | if (new < 0) | ||
498 | return new; | ||
499 | |||
500 | if (new) | ||
501 | return new_pts_mount(fs_type, flags, data, mnt); | ||
502 | |||
503 | return init_pts_mount(fs_type, flags, data, mnt); | ||
504 | } | ||
505 | #else | ||
506 | /* | ||
507 | * This supports only the legacy single-instance semantics (no | ||
508 | * multiple-instance semantics) | ||
509 | */ | ||
510 | static int devpts_get_sb(struct file_system_type *fs_type, int flags, | ||
511 | const char *dev_name, void *data, struct vfsmount *mnt) | ||
512 | { | ||
158 | return get_sb_single(fs_type, flags, data, devpts_fill_super, mnt); | 513 | return get_sb_single(fs_type, flags, data, devpts_fill_super, mnt); |
159 | } | 514 | } |
515 | #endif | ||
516 | |||
517 | static void devpts_kill_sb(struct super_block *sb) | ||
518 | { | ||
519 | struct pts_fs_info *fsi = DEVPTS_SB(sb); | ||
520 | |||
521 | kfree(fsi); | ||
522 | kill_litter_super(sb); | ||
523 | } | ||
160 | 524 | ||
161 | static struct file_system_type devpts_fs_type = { | 525 | static struct file_system_type devpts_fs_type = { |
162 | .owner = THIS_MODULE, | 526 | .owner = THIS_MODULE, |
163 | .name = "devpts", | 527 | .name = "devpts", |
164 | .get_sb = devpts_get_sb, | 528 | .get_sb = devpts_get_sb, |
165 | .kill_sb = kill_anon_super, | 529 | .kill_sb = devpts_kill_sb, |
166 | }; | 530 | }; |
167 | 531 | ||
168 | /* | 532 | /* |
@@ -172,16 +536,17 @@ static struct file_system_type devpts_fs_type = { | |||
172 | 536 | ||
173 | int devpts_new_index(struct inode *ptmx_inode) | 537 | int devpts_new_index(struct inode *ptmx_inode) |
174 | { | 538 | { |
539 | struct super_block *sb = pts_sb_from_inode(ptmx_inode); | ||
540 | struct pts_fs_info *fsi = DEVPTS_SB(sb); | ||
175 | int index; | 541 | int index; |
176 | int ida_ret; | 542 | int ida_ret; |
177 | 543 | ||
178 | retry: | 544 | retry: |
179 | if (!ida_pre_get(&allocated_ptys, GFP_KERNEL)) { | 545 | if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) |
180 | return -ENOMEM; | 546 | return -ENOMEM; |
181 | } | ||
182 | 547 | ||
183 | mutex_lock(&allocated_ptys_lock); | 548 | mutex_lock(&allocated_ptys_lock); |
184 | ida_ret = ida_get_new(&allocated_ptys, &index); | 549 | ida_ret = ida_get_new(&fsi->allocated_ptys, &index); |
185 | if (ida_ret < 0) { | 550 | if (ida_ret < 0) { |
186 | mutex_unlock(&allocated_ptys_lock); | 551 | mutex_unlock(&allocated_ptys_lock); |
187 | if (ida_ret == -EAGAIN) | 552 | if (ida_ret == -EAGAIN) |
@@ -190,7 +555,7 @@ retry: | |||
190 | } | 555 | } |
191 | 556 | ||
192 | if (index >= pty_limit) { | 557 | if (index >= pty_limit) { |
193 | ida_remove(&allocated_ptys, index); | 558 | ida_remove(&fsi->allocated_ptys, index); |
194 | mutex_unlock(&allocated_ptys_lock); | 559 | mutex_unlock(&allocated_ptys_lock); |
195 | return -EIO; | 560 | return -EIO; |
196 | } | 561 | } |
@@ -200,18 +565,26 @@ retry: | |||
200 | 565 | ||
201 | void devpts_kill_index(struct inode *ptmx_inode, int idx) | 566 | void devpts_kill_index(struct inode *ptmx_inode, int idx) |
202 | { | 567 | { |
568 | struct super_block *sb = pts_sb_from_inode(ptmx_inode); | ||
569 | struct pts_fs_info *fsi = DEVPTS_SB(sb); | ||
570 | |||
203 | mutex_lock(&allocated_ptys_lock); | 571 | mutex_lock(&allocated_ptys_lock); |
204 | ida_remove(&allocated_ptys, idx); | 572 | ida_remove(&fsi->allocated_ptys, idx); |
205 | mutex_unlock(&allocated_ptys_lock); | 573 | mutex_unlock(&allocated_ptys_lock); |
206 | } | 574 | } |
207 | 575 | ||
208 | int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) | 576 | int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) |
209 | { | 577 | { |
210 | int number = tty->index; /* tty layer puts index from devpts_new_index() in here */ | 578 | /* tty layer puts index from devpts_new_index() in here */ |
579 | int number = tty->index; | ||
211 | struct tty_driver *driver = tty->driver; | 580 | struct tty_driver *driver = tty->driver; |
212 | dev_t device = MKDEV(driver->major, driver->minor_start+number); | 581 | dev_t device = MKDEV(driver->major, driver->minor_start+number); |
213 | struct dentry *dentry; | 582 | struct dentry *dentry; |
214 | struct inode *inode = new_inode(devpts_mnt->mnt_sb); | 583 | struct super_block *sb = pts_sb_from_inode(ptmx_inode); |
584 | struct inode *inode = new_inode(sb); | ||
585 | struct dentry *root = sb->s_root; | ||
586 | struct pts_fs_info *fsi = DEVPTS_SB(sb); | ||
587 | struct pts_mount_opts *opts = &fsi->mount_opts; | ||
215 | char s[12]; | 588 | char s[12]; |
216 | 589 | ||
217 | /* We're supposed to be given the slave end of a pty */ | 590 | /* We're supposed to be given the slave end of a pty */ |
@@ -221,25 +594,25 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) | |||
221 | if (!inode) | 594 | if (!inode) |
222 | return -ENOMEM; | 595 | return -ENOMEM; |
223 | 596 | ||
224 | inode->i_ino = number+2; | 597 | inode->i_ino = number + 3; |
225 | inode->i_uid = config.setuid ? config.uid : current_fsuid(); | 598 | inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); |
226 | inode->i_gid = config.setgid ? config.gid : current_fsgid(); | 599 | inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); |
227 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 600 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
228 | init_special_inode(inode, S_IFCHR|config.mode, device); | 601 | init_special_inode(inode, S_IFCHR|opts->mode, device); |
229 | inode->i_private = tty; | 602 | inode->i_private = tty; |
230 | tty->driver_data = inode; | 603 | tty->driver_data = inode; |
231 | 604 | ||
232 | sprintf(s, "%d", number); | 605 | sprintf(s, "%d", number); |
233 | 606 | ||
234 | mutex_lock(&devpts_root->d_inode->i_mutex); | 607 | mutex_lock(&root->d_inode->i_mutex); |
235 | 608 | ||
236 | dentry = d_alloc_name(devpts_root, s); | 609 | dentry = d_alloc_name(root, s); |
237 | if (!IS_ERR(dentry)) { | 610 | if (!IS_ERR(dentry)) { |
238 | d_add(dentry, inode); | 611 | d_add(dentry, inode); |
239 | fsnotify_create(devpts_root->d_inode, dentry); | 612 | fsnotify_create(root->d_inode, dentry); |
240 | } | 613 | } |
241 | 614 | ||
242 | mutex_unlock(&devpts_root->d_inode->i_mutex); | 615 | mutex_unlock(&root->d_inode->i_mutex); |
243 | 616 | ||
244 | return 0; | 617 | return 0; |
245 | } | 618 | } |
@@ -256,20 +629,27 @@ struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number) | |||
256 | void devpts_pty_kill(struct tty_struct *tty) | 629 | void devpts_pty_kill(struct tty_struct *tty) |
257 | { | 630 | { |
258 | struct inode *inode = tty->driver_data; | 631 | struct inode *inode = tty->driver_data; |
632 | struct super_block *sb = pts_sb_from_inode(inode); | ||
633 | struct dentry *root = sb->s_root; | ||
259 | struct dentry *dentry; | 634 | struct dentry *dentry; |
260 | 635 | ||
261 | BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); | 636 | BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); |
262 | 637 | ||
263 | mutex_lock(&devpts_root->d_inode->i_mutex); | 638 | mutex_lock(&root->d_inode->i_mutex); |
264 | 639 | ||
265 | dentry = d_find_alias(inode); | 640 | dentry = d_find_alias(inode); |
266 | if (dentry && !IS_ERR(dentry)) { | 641 | if (IS_ERR(dentry)) |
642 | goto out; | ||
643 | |||
644 | if (dentry) { | ||
267 | inode->i_nlink--; | 645 | inode->i_nlink--; |
268 | d_delete(dentry); | 646 | d_delete(dentry); |
269 | dput(dentry); | 647 | dput(dentry); /* d_alloc_name() in devpts_pty_new() */ |
270 | } | 648 | } |
271 | 649 | ||
272 | mutex_unlock(&devpts_root->d_inode->i_mutex); | 650 | dput(dentry); /* d_find_alias above */ |
651 | out: | ||
652 | mutex_unlock(&root->d_inode->i_mutex); | ||
273 | } | 653 | } |
274 | 654 | ||
275 | static int __init init_devpts_fs(void) | 655 | static int __init init_devpts_fs(void) |
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 54bbf6e04ee8..0e9e2bc0ee96 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h | |||
@@ -40,6 +40,9 @@ | |||
40 | #ifndef node_to_cpumask | 40 | #ifndef node_to_cpumask |
41 | #define node_to_cpumask(node) ((void)node, cpu_online_map) | 41 | #define node_to_cpumask(node) ((void)node, cpu_online_map) |
42 | #endif | 42 | #endif |
43 | #ifndef cpumask_of_node | ||
44 | #define cpumask_of_node(node) ((void)node, cpu_online_mask) | ||
45 | #endif | ||
43 | #ifndef node_to_first_cpu | 46 | #ifndef node_to_first_cpu |
44 | #define node_to_first_cpu(node) ((void)(node),0) | 47 | #define node_to_first_cpu(node) ((void)(node),0) |
45 | #endif | 48 | #endif |
@@ -54,9 +57,18 @@ | |||
54 | ) | 57 | ) |
55 | #endif | 58 | #endif |
56 | 59 | ||
60 | #ifndef cpumask_of_pcibus | ||
61 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | ||
62 | cpu_all_mask : \ | ||
63 | cpumask_of_node(pcibus_to_node(bus))) | ||
64 | #endif | ||
65 | |||
57 | #endif /* CONFIG_NUMA */ | 66 | #endif /* CONFIG_NUMA */ |
58 | 67 | ||
59 | /* returns pointer to cpumask for specified node */ | 68 | /* |
69 | * returns pointer to cpumask for specified node | ||
70 | * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)" | ||
71 | */ | ||
60 | #ifndef node_to_cpumask_ptr | 72 | #ifndef node_to_cpumask_ptr |
61 | 73 | ||
62 | #define node_to_cpumask_ptr(v, node) \ | 74 | #define node_to_cpumask_ptr(v, node) \ |
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h index c5dd66916692..b96a6d2ffbc3 100644 --- a/include/asm-m32r/smp.h +++ b/include/asm-m32r/smp.h | |||
@@ -63,8 +63,6 @@ extern volatile int cpu_2_physid[NR_CPUS]; | |||
63 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 63 | #define raw_smp_processor_id() (current_thread_info()->cpu) |
64 | 64 | ||
65 | extern cpumask_t cpu_callout_map; | 65 | extern cpumask_t cpu_callout_map; |
66 | extern cpumask_t cpu_possible_map; | ||
67 | extern cpumask_t cpu_present_map; | ||
68 | 66 | ||
69 | static __inline__ int hard_smp_processor_id(void) | 67 | static __inline__ int hard_smp_processor_id(void) |
70 | { | 68 | { |
diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h index 3209dd46ea7d..b24ff086a662 100644 --- a/include/linux/8250_pci.h +++ b/include/linux/8250_pci.h | |||
@@ -31,7 +31,7 @@ struct pciserial_board { | |||
31 | struct serial_private; | 31 | struct serial_private; |
32 | 32 | ||
33 | struct serial_private * | 33 | struct serial_private * |
34 | pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board); | 34 | pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board); |
35 | void pciserial_remove_ports(struct serial_private *priv); | 35 | void pciserial_remove_ports(struct serial_private *priv); |
36 | void pciserial_suspend_ports(struct serial_private *priv); | 36 | void pciserial_suspend_ports(struct serial_private *priv); |
37 | void pciserial_resume_ports(struct serial_private *priv); | 37 | void pciserial_resume_ports(struct serial_private *priv); |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index ed3a5d473e52..cea153697ec7 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -82,13 +82,13 @@ struct clock_event_device { | |||
82 | int shift; | 82 | int shift; |
83 | int rating; | 83 | int rating; |
84 | int irq; | 84 | int irq; |
85 | cpumask_t cpumask; | 85 | const struct cpumask *cpumask; |
86 | int (*set_next_event)(unsigned long evt, | 86 | int (*set_next_event)(unsigned long evt, |
87 | struct clock_event_device *); | 87 | struct clock_event_device *); |
88 | void (*set_mode)(enum clock_event_mode mode, | 88 | void (*set_mode)(enum clock_event_mode mode, |
89 | struct clock_event_device *); | 89 | struct clock_event_device *); |
90 | void (*event_handler)(struct clock_event_device *); | 90 | void (*event_handler)(struct clock_event_device *); |
91 | void (*broadcast)(cpumask_t mask); | 91 | void (*broadcast)(const struct cpumask *mask); |
92 | struct list_head list; | 92 | struct list_head list; |
93 | enum clock_event_mode mode; | 93 | enum clock_event_mode mode; |
94 | ktime_t next_event; | 94 | ktime_t next_event; |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 5c8351b859f0..af40f8eb86f0 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -61,3 +61,8 @@ | |||
61 | #define noinline __attribute__((noinline)) | 61 | #define noinline __attribute__((noinline)) |
62 | #define __attribute_const__ __attribute__((__const__)) | 62 | #define __attribute_const__ __attribute__((__const__)) |
63 | #define __maybe_unused __attribute__((unused)) | 63 | #define __maybe_unused __attribute__((unused)) |
64 | |||
65 | #define __gcc_header(x) #x | ||
66 | #define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) | ||
67 | #define gcc_header(x) _gcc_header(x) | ||
68 | #include gcc_header(__GNUC__) | ||
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h index e5eb795f78a1..2befe6513ce4 100644 --- a/include/linux/compiler-gcc3.h +++ b/include/linux/compiler-gcc3.h | |||
@@ -2,9 +2,6 @@ | |||
2 | #error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead." | 2 | #error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead." |
3 | #endif | 3 | #endif |
4 | 4 | ||
5 | /* These definitions are for GCC v3.x. */ | ||
6 | #include <linux/compiler-gcc.h> | ||
7 | |||
8 | #if __GNUC_MINOR__ >= 3 | 5 | #if __GNUC_MINOR__ >= 3 |
9 | # define __used __attribute__((__used__)) | 6 | # define __used __attribute__((__used__)) |
10 | #else | 7 | #else |
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 974f5b7bb205..09992718f9e8 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h | |||
@@ -2,8 +2,10 @@ | |||
2 | #error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead." | 2 | #error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead." |
3 | #endif | 3 | #endif |
4 | 4 | ||
5 | /* These definitions are for GCC v4.x. */ | 5 | /* GCC 4.1.[01] miscompiles __weak */ |
6 | #include <linux/compiler-gcc.h> | 6 | #if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1 |
7 | # error Your version of gcc miscompiles the __weak directive | ||
8 | #endif | ||
7 | 9 | ||
8 | #define __used __attribute__((__used__)) | 10 | #define __used __attribute__((__used__)) |
9 | #define __must_check __attribute__((warn_unused_result)) | 11 | #define __must_check __attribute__((warn_unused_result)) |
@@ -16,7 +18,7 @@ | |||
16 | */ | 18 | */ |
17 | #define uninitialized_var(x) x = x | 19 | #define uninitialized_var(x) x = x |
18 | 20 | ||
19 | #if !(__GNUC__ == 4 && __GNUC_MINOR__ < 3) | 21 | #if __GNUC_MINOR__ >= 3 |
20 | /* Mark functions as cold. gcc will assume any path leading to a call | 22 | /* Mark functions as cold. gcc will assume any path leading to a call |
21 | to them will be unlikely. This means a lot of manual unlikely()s | 23 | to them will be unlikely. This means a lot of manual unlikely()s |
22 | are unnecessary now for any paths leading to the usual suspects | 24 | are unnecessary now for any paths leading to the usual suspects |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index ea7c6be354b7..d95da1020f1c 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -36,12 +36,8 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
36 | 36 | ||
37 | #ifdef __KERNEL__ | 37 | #ifdef __KERNEL__ |
38 | 38 | ||
39 | #if __GNUC__ >= 4 | 39 | #ifdef __GNUC__ |
40 | # include <linux/compiler-gcc4.h> | 40 | #include <linux/compiler-gcc.h> |
41 | #elif __GNUC__ == 3 && __GNUC_MINOR__ >= 2 | ||
42 | # include <linux/compiler-gcc3.h> | ||
43 | #else | ||
44 | # error Sorry, your compiler is too old/not recognized. | ||
45 | #endif | 41 | #endif |
46 | 42 | ||
47 | #define notrace __attribute__((no_instrument_function)) | 43 | #define notrace __attribute__((no_instrument_function)) |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 21e1dd43e52a..d4bf52603e6b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -339,36 +339,6 @@ extern cpumask_t cpu_mask_all; | |||
339 | #endif | 339 | #endif |
340 | #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) | 340 | #define CPUMASK_PTR(v, m) cpumask_t *v = &(m->v) |
341 | 341 | ||
342 | #define cpumask_scnprintf(buf, len, src) \ | ||
343 | __cpumask_scnprintf((buf), (len), &(src), NR_CPUS) | ||
344 | static inline int __cpumask_scnprintf(char *buf, int len, | ||
345 | const cpumask_t *srcp, int nbits) | ||
346 | { | ||
347 | return bitmap_scnprintf(buf, len, srcp->bits, nbits); | ||
348 | } | ||
349 | |||
350 | #define cpumask_parse_user(ubuf, ulen, dst) \ | ||
351 | __cpumask_parse_user((ubuf), (ulen), &(dst), NR_CPUS) | ||
352 | static inline int __cpumask_parse_user(const char __user *buf, int len, | ||
353 | cpumask_t *dstp, int nbits) | ||
354 | { | ||
355 | return bitmap_parse_user(buf, len, dstp->bits, nbits); | ||
356 | } | ||
357 | |||
358 | #define cpulist_scnprintf(buf, len, src) \ | ||
359 | __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) | ||
360 | static inline int __cpulist_scnprintf(char *buf, int len, | ||
361 | const cpumask_t *srcp, int nbits) | ||
362 | { | ||
363 | return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); | ||
364 | } | ||
365 | |||
366 | #define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS) | ||
367 | static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits) | ||
368 | { | ||
369 | return bitmap_parselist(buf, dstp->bits, nbits); | ||
370 | } | ||
371 | |||
372 | #define cpu_remap(oldbit, old, new) \ | 342 | #define cpu_remap(oldbit, old, new) \ |
373 | __cpu_remap((oldbit), &(old), &(new), NR_CPUS) | 343 | __cpu_remap((oldbit), &(old), &(new), NR_CPUS) |
374 | static inline int __cpu_remap(int oldbit, | 344 | static inline int __cpu_remap(int oldbit, |
@@ -540,9 +510,6 @@ extern cpumask_t cpu_active_map; | |||
540 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 510 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
541 | } | 511 | } |
542 | 512 | ||
543 | /* This produces more efficient code. */ | ||
544 | #define nr_cpumask_bits NR_CPUS | ||
545 | |||
546 | #else /* NR_CPUS > BITS_PER_LONG */ | 513 | #else /* NR_CPUS > BITS_PER_LONG */ |
547 | 514 | ||
548 | #define CPU_BITS_ALL \ | 515 | #define CPU_BITS_ALL \ |
@@ -550,9 +517,15 @@ extern cpumask_t cpu_active_map; | |||
550 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ | 517 | [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ |
551 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ | 518 | [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
552 | } | 519 | } |
520 | #endif /* NR_CPUS > BITS_PER_LONG */ | ||
553 | 521 | ||
522 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
523 | /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, | ||
524 | * not all bits may be allocated. */ | ||
554 | #define nr_cpumask_bits nr_cpu_ids | 525 | #define nr_cpumask_bits nr_cpu_ids |
555 | #endif /* NR_CPUS > BITS_PER_LONG */ | 526 | #else |
527 | #define nr_cpumask_bits NR_CPUS | ||
528 | #endif | ||
556 | 529 | ||
557 | /* verify cpu argument to cpumask_* operators */ | 530 | /* verify cpu argument to cpumask_* operators */ |
558 | static inline unsigned int cpumask_check(unsigned int cpu) | 531 | static inline unsigned int cpumask_check(unsigned int cpu) |
@@ -946,6 +919,63 @@ static inline void cpumask_copy(struct cpumask *dstp, | |||
946 | #define cpumask_of(cpu) (get_cpu_mask(cpu)) | 919 | #define cpumask_of(cpu) (get_cpu_mask(cpu)) |
947 | 920 | ||
948 | /** | 921 | /** |
922 | * cpumask_scnprintf - print a cpumask into a string as comma-separated hex | ||
923 | * @buf: the buffer to sprintf into | ||
924 | * @len: the length of the buffer | ||
925 | * @srcp: the cpumask to print | ||
926 | * | ||
927 | * If len is zero, returns zero. Otherwise returns the length of the | ||
928 | * (nul-terminated) @buf string. | ||
929 | */ | ||
930 | static inline int cpumask_scnprintf(char *buf, int len, | ||
931 | const struct cpumask *srcp) | ||
932 | { | ||
933 | return bitmap_scnprintf(buf, len, srcp->bits, nr_cpumask_bits); | ||
934 | } | ||
935 | |||
936 | /** | ||
937 | * cpumask_parse_user - extract a cpumask from a user string | ||
938 | * @buf: the buffer to extract from | ||
939 | * @len: the length of the buffer | ||
940 | * @dstp: the cpumask to set. | ||
941 | * | ||
942 | * Returns -errno, or 0 for success. | ||
943 | */ | ||
944 | static inline int cpumask_parse_user(const char __user *buf, int len, | ||
945 | struct cpumask *dstp) | ||
946 | { | ||
947 | return bitmap_parse_user(buf, len, dstp->bits, nr_cpumask_bits); | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * cpulist_scnprintf - print a cpumask into a string as comma-separated list | ||
952 | * @buf: the buffer to sprintf into | ||
953 | * @len: the length of the buffer | ||
954 | * @srcp: the cpumask to print | ||
955 | * | ||
956 | * If len is zero, returns zero. Otherwise returns the length of the | ||
957 | * (nul-terminated) @buf string. | ||
958 | */ | ||
959 | static inline int cpulist_scnprintf(char *buf, int len, | ||
960 | const struct cpumask *srcp) | ||
961 | { | ||
962 | return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpumask_bits); | ||
963 | } | ||
964 | |||
965 | /** | ||
966 | * cpulist_parse_user - extract a cpumask from a user string of ranges | ||
967 | * @buf: the buffer to extract from | ||
968 | * @len: the length of the buffer | ||
969 | * @dstp: the cpumask to set. | ||
970 | * | ||
971 | * Returns -errno, or 0 for success. | ||
972 | */ | ||
973 | static inline int cpulist_parse(const char *buf, struct cpumask *dstp) | ||
974 | { | ||
975 | return bitmap_parselist(buf, dstp->bits, nr_cpumask_bits); | ||
976 | } | ||
977 | |||
978 | /** | ||
949 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * | 979 | * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * |
950 | * @bitmap: the bitmap | 980 | * @bitmap: the bitmap |
951 | * | 981 | * |
diff --git a/include/linux/generic_serial.h b/include/linux/generic_serial.h index 4cc913939817..fadff28505bb 100644 --- a/include/linux/generic_serial.h +++ b/include/linux/generic_serial.h | |||
@@ -21,7 +21,6 @@ struct real_driver { | |||
21 | void (*enable_tx_interrupts) (void *); | 21 | void (*enable_tx_interrupts) (void *); |
22 | void (*disable_rx_interrupts) (void *); | 22 | void (*disable_rx_interrupts) (void *); |
23 | void (*enable_rx_interrupts) (void *); | 23 | void (*enable_rx_interrupts) (void *); |
24 | int (*get_CD) (void *); | ||
25 | void (*shutdown_port) (void*); | 24 | void (*shutdown_port) (void*); |
26 | int (*set_real_termios) (void*); | 25 | int (*set_real_termios) (void*); |
27 | int (*chars_in_buffer) (void*); | 26 | int (*chars_in_buffer) (void*); |
diff --git a/include/linux/ide.h b/include/linux/ide.h index e99c56de7f56..db5ef8ae1ab9 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -32,13 +32,6 @@ | |||
32 | # define SUPPORT_VLB_SYNC 1 | 32 | # define SUPPORT_VLB_SYNC 1 |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* | ||
36 | * Used to indicate "no IRQ", should be a value that cannot be an IRQ | ||
37 | * number. | ||
38 | */ | ||
39 | |||
40 | #define IDE_NO_IRQ (-1) | ||
41 | |||
42 | typedef unsigned char byte; /* used everywhere */ | 35 | typedef unsigned char byte; /* used everywhere */ |
43 | 36 | ||
44 | /* | 37 | /* |
@@ -403,6 +396,7 @@ enum { | |||
403 | * This is used for several packet commands (not for READ/WRITE commands). | 396 | * This is used for several packet commands (not for READ/WRITE commands). |
404 | */ | 397 | */ |
405 | #define IDE_PC_BUFFER_SIZE 256 | 398 | #define IDE_PC_BUFFER_SIZE 256 |
399 | #define ATAPI_WAIT_PC (60 * HZ) | ||
406 | 400 | ||
407 | struct ide_atapi_pc { | 401 | struct ide_atapi_pc { |
408 | /* actual packet bytes */ | 402 | /* actual packet bytes */ |
@@ -480,53 +474,53 @@ enum { | |||
480 | 474 | ||
481 | /* ide-cd */ | 475 | /* ide-cd */ |
482 | /* Drive cannot eject the disc. */ | 476 | /* Drive cannot eject the disc. */ |
483 | IDE_AFLAG_NO_EJECT = (1 << 3), | 477 | IDE_AFLAG_NO_EJECT = (1 << 1), |
484 | /* Drive is a pre ATAPI 1.2 drive. */ | 478 | /* Drive is a pre ATAPI 1.2 drive. */ |
485 | IDE_AFLAG_PRE_ATAPI12 = (1 << 4), | 479 | IDE_AFLAG_PRE_ATAPI12 = (1 << 2), |
486 | /* TOC addresses are in BCD. */ | 480 | /* TOC addresses are in BCD. */ |
487 | IDE_AFLAG_TOCADDR_AS_BCD = (1 << 5), | 481 | IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), |
488 | /* TOC track numbers are in BCD. */ | 482 | /* TOC track numbers are in BCD. */ |
489 | IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 6), | 483 | IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), |
490 | /* | 484 | /* |
491 | * Drive does not provide data in multiples of SECTOR_SIZE | 485 | * Drive does not provide data in multiples of SECTOR_SIZE |
492 | * when more than one interrupt is needed. | 486 | * when more than one interrupt is needed. |
493 | */ | 487 | */ |
494 | IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), | 488 | IDE_AFLAG_LIMIT_NFRAMES = (1 << 5), |
495 | /* Saved TOC information is current. */ | 489 | /* Saved TOC information is current. */ |
496 | IDE_AFLAG_TOC_VALID = (1 << 9), | 490 | IDE_AFLAG_TOC_VALID = (1 << 6), |
497 | /* We think that the drive door is locked. */ | 491 | /* We think that the drive door is locked. */ |
498 | IDE_AFLAG_DOOR_LOCKED = (1 << 10), | 492 | IDE_AFLAG_DOOR_LOCKED = (1 << 7), |
499 | /* SET_CD_SPEED command is unsupported. */ | 493 | /* SET_CD_SPEED command is unsupported. */ |
500 | IDE_AFLAG_NO_SPEED_SELECT = (1 << 11), | 494 | IDE_AFLAG_NO_SPEED_SELECT = (1 << 8), |
501 | IDE_AFLAG_VERTOS_300_SSD = (1 << 12), | 495 | IDE_AFLAG_VERTOS_300_SSD = (1 << 9), |
502 | IDE_AFLAG_VERTOS_600_ESD = (1 << 13), | 496 | IDE_AFLAG_VERTOS_600_ESD = (1 << 10), |
503 | IDE_AFLAG_SANYO_3CD = (1 << 14), | 497 | IDE_AFLAG_SANYO_3CD = (1 << 11), |
504 | IDE_AFLAG_FULL_CAPS_PAGE = (1 << 15), | 498 | IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12), |
505 | IDE_AFLAG_PLAY_AUDIO_OK = (1 << 16), | 499 | IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13), |
506 | IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17), | 500 | IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14), |
507 | 501 | ||
508 | /* ide-floppy */ | 502 | /* ide-floppy */ |
509 | /* Avoid commands not supported in Clik drive */ | 503 | /* Avoid commands not supported in Clik drive */ |
510 | IDE_AFLAG_CLIK_DRIVE = (1 << 19), | 504 | IDE_AFLAG_CLIK_DRIVE = (1 << 15), |
511 | /* Requires BH algorithm for packets */ | 505 | /* Requires BH algorithm for packets */ |
512 | IDE_AFLAG_ZIP_DRIVE = (1 << 20), | 506 | IDE_AFLAG_ZIP_DRIVE = (1 << 16), |
513 | /* Supports format progress report */ | 507 | /* Supports format progress report */ |
514 | IDE_AFLAG_SRFP = (1 << 22), | 508 | IDE_AFLAG_SRFP = (1 << 17), |
515 | 509 | ||
516 | /* ide-tape */ | 510 | /* ide-tape */ |
517 | IDE_AFLAG_IGNORE_DSC = (1 << 23), | 511 | IDE_AFLAG_IGNORE_DSC = (1 << 18), |
518 | /* 0 When the tape position is unknown */ | 512 | /* 0 When the tape position is unknown */ |
519 | IDE_AFLAG_ADDRESS_VALID = (1 << 24), | 513 | IDE_AFLAG_ADDRESS_VALID = (1 << 19), |
520 | /* Device already opened */ | 514 | /* Device already opened */ |
521 | IDE_AFLAG_BUSY = (1 << 25), | 515 | IDE_AFLAG_BUSY = (1 << 20), |
522 | /* Attempt to auto-detect the current user block size */ | 516 | /* Attempt to auto-detect the current user block size */ |
523 | IDE_AFLAG_DETECT_BS = (1 << 26), | 517 | IDE_AFLAG_DETECT_BS = (1 << 21), |
524 | /* Currently on a filemark */ | 518 | /* Currently on a filemark */ |
525 | IDE_AFLAG_FILEMARK = (1 << 27), | 519 | IDE_AFLAG_FILEMARK = (1 << 22), |
526 | /* 0 = no tape is loaded, so we don't rewind after ejecting */ | 520 | /* 0 = no tape is loaded, so we don't rewind after ejecting */ |
527 | IDE_AFLAG_MEDIUM_PRESENT = (1 << 28), | 521 | IDE_AFLAG_MEDIUM_PRESENT = (1 << 23), |
528 | 522 | ||
529 | IDE_AFLAG_NO_AUTOCLOSE = (1 << 29), | 523 | IDE_AFLAG_NO_AUTOCLOSE = (1 << 24), |
530 | }; | 524 | }; |
531 | 525 | ||
532 | /* device flags */ | 526 | /* device flags */ |
@@ -565,28 +559,26 @@ enum { | |||
565 | IDE_DFLAG_NODMA = (1 << 16), | 559 | IDE_DFLAG_NODMA = (1 << 16), |
566 | /* powermanagment told us not to do anything, so sleep nicely */ | 560 | /* powermanagment told us not to do anything, so sleep nicely */ |
567 | IDE_DFLAG_BLOCKED = (1 << 17), | 561 | IDE_DFLAG_BLOCKED = (1 << 17), |
568 | /* ide-scsi emulation */ | ||
569 | IDE_DFLAG_SCSI = (1 << 18), | ||
570 | /* sleeping & sleep field valid */ | 562 | /* sleeping & sleep field valid */ |
571 | IDE_DFLAG_SLEEPING = (1 << 19), | 563 | IDE_DFLAG_SLEEPING = (1 << 18), |
572 | IDE_DFLAG_POST_RESET = (1 << 20), | 564 | IDE_DFLAG_POST_RESET = (1 << 19), |
573 | IDE_DFLAG_UDMA33_WARNED = (1 << 21), | 565 | IDE_DFLAG_UDMA33_WARNED = (1 << 20), |
574 | IDE_DFLAG_LBA48 = (1 << 22), | 566 | IDE_DFLAG_LBA48 = (1 << 21), |
575 | /* status of write cache */ | 567 | /* status of write cache */ |
576 | IDE_DFLAG_WCACHE = (1 << 23), | 568 | IDE_DFLAG_WCACHE = (1 << 22), |
577 | /* used for ignoring ATA_DF */ | 569 | /* used for ignoring ATA_DF */ |
578 | IDE_DFLAG_NOWERR = (1 << 24), | 570 | IDE_DFLAG_NOWERR = (1 << 23), |
579 | /* retrying in PIO */ | 571 | /* retrying in PIO */ |
580 | IDE_DFLAG_DMA_PIO_RETRY = (1 << 25), | 572 | IDE_DFLAG_DMA_PIO_RETRY = (1 << 24), |
581 | IDE_DFLAG_LBA = (1 << 26), | 573 | IDE_DFLAG_LBA = (1 << 25), |
582 | /* don't unload heads */ | 574 | /* don't unload heads */ |
583 | IDE_DFLAG_NO_UNLOAD = (1 << 27), | 575 | IDE_DFLAG_NO_UNLOAD = (1 << 26), |
584 | /* heads unloaded, please don't reset port */ | 576 | /* heads unloaded, please don't reset port */ |
585 | IDE_DFLAG_PARKED = (1 << 28), | 577 | IDE_DFLAG_PARKED = (1 << 27), |
586 | IDE_DFLAG_MEDIA_CHANGED = (1 << 29), | 578 | IDE_DFLAG_MEDIA_CHANGED = (1 << 28), |
587 | /* write protect */ | 579 | /* write protect */ |
588 | IDE_DFLAG_WP = (1 << 30), | 580 | IDE_DFLAG_WP = (1 << 29), |
589 | IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 31), | 581 | IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), |
590 | }; | 582 | }; |
591 | 583 | ||
592 | struct ide_drive_s { | 584 | struct ide_drive_s { |
@@ -610,8 +602,6 @@ struct ide_drive_s { | |||
610 | unsigned long dev_flags; | 602 | unsigned long dev_flags; |
611 | 603 | ||
612 | unsigned long sleep; /* sleep until this time */ | 604 | unsigned long sleep; /* sleep until this time */ |
613 | unsigned long service_start; /* time we started last request */ | ||
614 | unsigned long service_time; /* service time of last request */ | ||
615 | unsigned long timeout; /* max time to wait for irq */ | 605 | unsigned long timeout; /* max time to wait for irq */ |
616 | 606 | ||
617 | special_t special; /* special action flags */ | 607 | special_t special; /* special action flags */ |
@@ -879,8 +869,6 @@ typedef struct hwgroup_s { | |||
879 | 869 | ||
880 | /* BOOL: protects all fields below */ | 870 | /* BOOL: protects all fields below */ |
881 | volatile int busy; | 871 | volatile int busy; |
882 | /* BOOL: wake us up on timer expiry */ | ||
883 | unsigned int sleeping : 1; | ||
884 | /* BOOL: polling active & poll_timeout field valid */ | 872 | /* BOOL: polling active & poll_timeout field valid */ |
885 | unsigned int polling : 1; | 873 | unsigned int polling : 1; |
886 | 874 | ||
@@ -1258,14 +1246,11 @@ int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); | |||
1258 | void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); | 1246 | void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); |
1259 | void ide_retry_pc(ide_drive_t *, struct gendisk *); | 1247 | void ide_retry_pc(ide_drive_t *, struct gendisk *); |
1260 | 1248 | ||
1261 | static inline unsigned long ide_scsi_get_timeout(struct ide_atapi_pc *pc) | 1249 | int ide_cd_expiry(ide_drive_t *); |
1262 | { | ||
1263 | return max_t(unsigned long, WAIT_CMD, pc->timeout - jiffies); | ||
1264 | } | ||
1265 | 1250 | ||
1266 | int ide_scsi_expiry(ide_drive_t *); | 1251 | int ide_cd_get_xferlen(struct request *); |
1267 | 1252 | ||
1268 | ide_startstop_t ide_issue_pc(ide_drive_t *, unsigned int, ide_expiry_t *); | 1253 | ide_startstop_t ide_issue_pc(ide_drive_t *); |
1269 | 1254 | ||
1270 | ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); | 1255 | ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); |
1271 | 1256 | ||
@@ -1287,6 +1272,26 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); | |||
1287 | 1272 | ||
1288 | extern void ide_timer_expiry(unsigned long); | 1273 | extern void ide_timer_expiry(unsigned long); |
1289 | extern irqreturn_t ide_intr(int irq, void *dev_id); | 1274 | extern irqreturn_t ide_intr(int irq, void *dev_id); |
1275 | |||
1276 | static inline int ide_lock_hwgroup(ide_hwgroup_t *hwgroup) | ||
1277 | { | ||
1278 | if (hwgroup->busy) | ||
1279 | return 1; | ||
1280 | |||
1281 | hwgroup->busy = 1; | ||
1282 | /* for atari only */ | ||
1283 | ide_get_lock(ide_intr, hwgroup); | ||
1284 | |||
1285 | return 0; | ||
1286 | } | ||
1287 | |||
1288 | static inline void ide_unlock_hwgroup(ide_hwgroup_t *hwgroup) | ||
1289 | { | ||
1290 | /* for atari only */ | ||
1291 | ide_release_lock(); | ||
1292 | hwgroup->busy = 0; | ||
1293 | } | ||
1294 | |||
1290 | extern void do_ide_request(struct request_queue *); | 1295 | extern void do_ide_request(struct request_queue *); |
1291 | 1296 | ||
1292 | void ide_init_disk(struct gendisk *, ide_drive_t *); | 1297 | void ide_init_disk(struct gendisk *, ide_drive_t *); |
@@ -1533,6 +1538,7 @@ void ide_unregister_region(struct gendisk *); | |||
1533 | void ide_undecoded_slave(ide_drive_t *); | 1538 | void ide_undecoded_slave(ide_drive_t *); |
1534 | 1539 | ||
1535 | void ide_port_apply_params(ide_hwif_t *); | 1540 | void ide_port_apply_params(ide_hwif_t *); |
1541 | int ide_sysfs_register_port(ide_hwif_t *); | ||
1536 | 1542 | ||
1537 | struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); | 1543 | struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); |
1538 | void ide_host_free(struct ide_host *); | 1544 | void ide_host_free(struct ide_host *); |
@@ -1627,6 +1633,9 @@ extern struct mutex ide_cfg_mtx; | |||
1627 | 1633 | ||
1628 | #define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0) | 1634 | #define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0) |
1629 | 1635 | ||
1636 | char *ide_media_string(ide_drive_t *); | ||
1637 | |||
1638 | extern struct device_attribute ide_dev_attrs[]; | ||
1630 | extern struct bus_type ide_bus_type; | 1639 | extern struct bus_type ide_bus_type; |
1631 | extern struct class *ide_port_class; | 1640 | extern struct class *ide_port_class; |
1632 | 1641 | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 8cc8ef47f5b6..990355fbc54e 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -111,13 +111,13 @@ extern void enable_irq(unsigned int irq); | |||
111 | 111 | ||
112 | extern cpumask_t irq_default_affinity; | 112 | extern cpumask_t irq_default_affinity; |
113 | 113 | ||
114 | extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); | 114 | extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
115 | extern int irq_can_set_affinity(unsigned int irq); | 115 | extern int irq_can_set_affinity(unsigned int irq); |
116 | extern int irq_select_affinity(unsigned int irq); | 116 | extern int irq_select_affinity(unsigned int irq); |
117 | 117 | ||
118 | #else /* CONFIG_SMP */ | 118 | #else /* CONFIG_SMP */ |
119 | 119 | ||
120 | static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 120 | static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) |
121 | { | 121 | { |
122 | return -EINVAL; | 122 | return -EINVAL; |
123 | } | 123 | } |
diff --git a/include/linux/irq.h b/include/linux/irq.h index d64a6d49bdef..f899b502f186 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -113,7 +113,8 @@ struct irq_chip { | |||
113 | void (*eoi)(unsigned int irq); | 113 | void (*eoi)(unsigned int irq); |
114 | 114 | ||
115 | void (*end)(unsigned int irq); | 115 | void (*end)(unsigned int irq); |
116 | void (*set_affinity)(unsigned int irq, cpumask_t dest); | 116 | void (*set_affinity)(unsigned int irq, |
117 | const struct cpumask *dest); | ||
117 | int (*retrigger)(unsigned int irq); | 118 | int (*retrigger)(unsigned int irq); |
118 | int (*set_type)(unsigned int irq, unsigned int flow_type); | 119 | int (*set_type)(unsigned int irq, unsigned int flow_type); |
119 | int (*set_wake)(unsigned int irq, unsigned int on); | 120 | int (*set_wake)(unsigned int irq, unsigned int on); |
diff --git a/include/linux/istallion.h b/include/linux/istallion.h index 0d1840723249..7faca98c7d14 100644 --- a/include/linux/istallion.h +++ b/include/linux/istallion.h | |||
@@ -59,9 +59,7 @@ struct stliport { | |||
59 | unsigned int devnr; | 59 | unsigned int devnr; |
60 | int baud_base; | 60 | int baud_base; |
61 | int custom_divisor; | 61 | int custom_divisor; |
62 | int close_delay; | ||
63 | int closing_wait; | 62 | int closing_wait; |
64 | int openwaitcnt; | ||
65 | int rc; | 63 | int rc; |
66 | int argsize; | 64 | int argsize; |
67 | void *argp; | 65 | void *argp; |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index f18b86fa8655..35525ac63337 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -83,6 +83,7 @@ struct kvm_irqchip { | |||
83 | #define KVM_EXIT_S390_SIEIC 13 | 83 | #define KVM_EXIT_S390_SIEIC 13 |
84 | #define KVM_EXIT_S390_RESET 14 | 84 | #define KVM_EXIT_S390_RESET 14 |
85 | #define KVM_EXIT_DCR 15 | 85 | #define KVM_EXIT_DCR 15 |
86 | #define KVM_EXIT_NMI 16 | ||
86 | 87 | ||
87 | /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ | 88 | /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ |
88 | struct kvm_run { | 89 | struct kvm_run { |
@@ -387,6 +388,14 @@ struct kvm_trace_rec { | |||
387 | #define KVM_CAP_DEVICE_ASSIGNMENT 17 | 388 | #define KVM_CAP_DEVICE_ASSIGNMENT 17 |
388 | #endif | 389 | #endif |
389 | #define KVM_CAP_IOMMU 18 | 390 | #define KVM_CAP_IOMMU 18 |
391 | #if defined(CONFIG_X86) | ||
392 | #define KVM_CAP_DEVICE_MSI 20 | ||
393 | #endif | ||
394 | /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ | ||
395 | #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 | ||
396 | #if defined(CONFIG_X86) | ||
397 | #define KVM_CAP_USER_NMI 22 | ||
398 | #endif | ||
390 | 399 | ||
391 | /* | 400 | /* |
392 | * ioctls for VM fds | 401 | * ioctls for VM fds |
@@ -458,6 +467,8 @@ struct kvm_trace_rec { | |||
458 | #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) | 467 | #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97) |
459 | #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) | 468 | #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) |
460 | #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) | 469 | #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) |
470 | /* Available with KVM_CAP_NMI */ | ||
471 | #define KVM_NMI _IO(KVMIO, 0x9a) | ||
461 | 472 | ||
462 | #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) | 473 | #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) |
463 | #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) | 474 | #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) |
@@ -500,10 +511,17 @@ struct kvm_assigned_irq { | |||
500 | __u32 guest_irq; | 511 | __u32 guest_irq; |
501 | __u32 flags; | 512 | __u32 flags; |
502 | union { | 513 | union { |
514 | struct { | ||
515 | __u32 addr_lo; | ||
516 | __u32 addr_hi; | ||
517 | __u32 data; | ||
518 | } guest_msi; | ||
503 | __u32 reserved[12]; | 519 | __u32 reserved[12]; |
504 | }; | 520 | }; |
505 | }; | 521 | }; |
506 | 522 | ||
507 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | 523 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) |
508 | 524 | ||
525 | #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 0) | ||
526 | |||
509 | #endif | 527 | #endif |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index bb92be2153bc..eafabd5c66b2 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/preempt.h> | 17 | #include <linux/preempt.h> |
18 | #include <linux/marker.h> | 18 | #include <linux/marker.h> |
19 | #include <linux/msi.h> | ||
19 | #include <asm/signal.h> | 20 | #include <asm/signal.h> |
20 | 21 | ||
21 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
@@ -306,8 +307,14 @@ struct kvm_assigned_dev_kernel { | |||
306 | int host_busnr; | 307 | int host_busnr; |
307 | int host_devfn; | 308 | int host_devfn; |
308 | int host_irq; | 309 | int host_irq; |
310 | bool host_irq_disabled; | ||
309 | int guest_irq; | 311 | int guest_irq; |
310 | int irq_requested; | 312 | struct msi_msg guest_msi; |
313 | #define KVM_ASSIGNED_DEV_GUEST_INTX (1 << 0) | ||
314 | #define KVM_ASSIGNED_DEV_GUEST_MSI (1 << 1) | ||
315 | #define KVM_ASSIGNED_DEV_HOST_INTX (1 << 8) | ||
316 | #define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9) | ||
317 | unsigned long irq_requested_type; | ||
311 | int irq_source_id; | 318 | int irq_source_id; |
312 | struct pci_dev *dev; | 319 | struct pci_dev *dev; |
313 | struct kvm *kvm; | 320 | struct kvm *kvm; |
@@ -316,8 +323,7 @@ void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level); | |||
316 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); | 323 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi); |
317 | void kvm_register_irq_ack_notifier(struct kvm *kvm, | 324 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
318 | struct kvm_irq_ack_notifier *kian); | 325 | struct kvm_irq_ack_notifier *kian); |
319 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | 326 | void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); |
320 | struct kvm_irq_ack_notifier *kian); | ||
321 | int kvm_request_irq_source_id(struct kvm *kvm); | 327 | int kvm_request_irq_source_id(struct kvm *kvm); |
322 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | 328 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
323 | 329 | ||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index b6e694454280..218c73b1e6d4 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1766,6 +1766,7 @@ | |||
1766 | #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081 | 1766 | #define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081 |
1767 | #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082 | 1767 | #define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082 |
1768 | #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 | 1768 | #define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050 |
1769 | #define PCI_SUBDEVICE_ID_SIIG_DUAL_SERIAL 0x2530 | ||
1769 | 1770 | ||
1770 | #define PCI_VENDOR_ID_RADISYS 0x1331 | 1771 | #define PCI_VENDOR_ID_RADISYS 0x1331 |
1771 | 1772 | ||
@@ -1795,6 +1796,7 @@ | |||
1795 | #define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202 | 1796 | #define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202 |
1796 | #define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401 | 1797 | #define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401 |
1797 | #define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801 | 1798 | #define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801 |
1799 | #define PCI_DEVICE_ID_SEALEVEL_7803 0x7803 | ||
1798 | #define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804 | 1800 | #define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804 |
1799 | 1801 | ||
1800 | #define PCI_VENDOR_ID_HYPERCOPE 0x1365 | 1802 | #define PCI_VENDOR_ID_HYPERCOPE 0x1365 |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8395e715809d..158d53d07765 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -250,7 +250,7 @@ extern void init_idle_bootup_task(struct task_struct *idle); | |||
250 | extern int runqueue_is_locked(void); | 250 | extern int runqueue_is_locked(void); |
251 | extern void task_rq_unlock_wait(struct task_struct *p); | 251 | extern void task_rq_unlock_wait(struct task_struct *p); |
252 | 252 | ||
253 | extern cpumask_t nohz_cpu_mask; | 253 | extern cpumask_var_t nohz_cpu_mask; |
254 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 254 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
255 | extern int select_nohz_load_balancer(int cpu); | 255 | extern int select_nohz_load_balancer(int cpu); |
256 | #else | 256 | #else |
@@ -758,20 +758,51 @@ enum cpu_idle_type { | |||
758 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ | 758 | #define SD_SERIALIZE 1024 /* Only a single load balancing instance */ |
759 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ | 759 | #define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */ |
760 | 760 | ||
761 | #define BALANCE_FOR_MC_POWER \ | 761 | enum powersavings_balance_level { |
762 | (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0) | 762 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ |
763 | POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package | ||
764 | * first for long running threads | ||
765 | */ | ||
766 | POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle | ||
767 | * cpu package for power savings | ||
768 | */ | ||
769 | MAX_POWERSAVINGS_BALANCE_LEVELS | ||
770 | }; | ||
763 | 771 | ||
764 | #define BALANCE_FOR_PKG_POWER \ | 772 | extern int sched_mc_power_savings, sched_smt_power_savings; |
765 | ((sched_mc_power_savings || sched_smt_power_savings) ? \ | ||
766 | SD_POWERSAVINGS_BALANCE : 0) | ||
767 | 773 | ||
768 | #define test_sd_parent(sd, flag) ((sd->parent && \ | 774 | static inline int sd_balance_for_mc_power(void) |
769 | (sd->parent->flags & flag)) ? 1 : 0) | 775 | { |
776 | if (sched_smt_power_savings) | ||
777 | return SD_POWERSAVINGS_BALANCE; | ||
770 | 778 | ||
779 | return 0; | ||
780 | } | ||
781 | |||
782 | static inline int sd_balance_for_package_power(void) | ||
783 | { | ||
784 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
785 | return SD_POWERSAVINGS_BALANCE; | ||
786 | |||
787 | return 0; | ||
788 | } | ||
789 | |||
790 | /* | ||
791 | * Optimise SD flags for power savings: | ||
792 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. | ||
793 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 | ||
794 | */ | ||
795 | |||
796 | static inline int sd_power_saving_flags(void) | ||
797 | { | ||
798 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
799 | return SD_BALANCE_NEWIDLE; | ||
800 | |||
801 | return 0; | ||
802 | } | ||
771 | 803 | ||
772 | struct sched_group { | 804 | struct sched_group { |
773 | struct sched_group *next; /* Must be a circular list */ | 805 | struct sched_group *next; /* Must be a circular list */ |
774 | cpumask_t cpumask; | ||
775 | 806 | ||
776 | /* | 807 | /* |
777 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 808 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
@@ -784,8 +815,15 @@ struct sched_group { | |||
784 | * (see include/linux/reciprocal_div.h) | 815 | * (see include/linux/reciprocal_div.h) |
785 | */ | 816 | */ |
786 | u32 reciprocal_cpu_power; | 817 | u32 reciprocal_cpu_power; |
818 | |||
819 | unsigned long cpumask[]; | ||
787 | }; | 820 | }; |
788 | 821 | ||
822 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | ||
823 | { | ||
824 | return to_cpumask(sg->cpumask); | ||
825 | } | ||
826 | |||
789 | enum sched_domain_level { | 827 | enum sched_domain_level { |
790 | SD_LV_NONE = 0, | 828 | SD_LV_NONE = 0, |
791 | SD_LV_SIBLING, | 829 | SD_LV_SIBLING, |
@@ -809,7 +847,6 @@ struct sched_domain { | |||
809 | struct sched_domain *parent; /* top domain must be null terminated */ | 847 | struct sched_domain *parent; /* top domain must be null terminated */ |
810 | struct sched_domain *child; /* bottom domain must be null terminated */ | 848 | struct sched_domain *child; /* bottom domain must be null terminated */ |
811 | struct sched_group *groups; /* the balancing groups of the domain */ | 849 | struct sched_group *groups; /* the balancing groups of the domain */ |
812 | cpumask_t span; /* span of all CPUs in this domain */ | ||
813 | unsigned long min_interval; /* Minimum balance interval ms */ | 850 | unsigned long min_interval; /* Minimum balance interval ms */ |
814 | unsigned long max_interval; /* Maximum balance interval ms */ | 851 | unsigned long max_interval; /* Maximum balance interval ms */ |
815 | unsigned int busy_factor; /* less balancing by factor if busy */ | 852 | unsigned int busy_factor; /* less balancing by factor if busy */ |
@@ -864,18 +901,35 @@ struct sched_domain { | |||
864 | #ifdef CONFIG_SCHED_DEBUG | 901 | #ifdef CONFIG_SCHED_DEBUG |
865 | char *name; | 902 | char *name; |
866 | #endif | 903 | #endif |
904 | |||
905 | /* span of all CPUs in this domain */ | ||
906 | unsigned long span[]; | ||
867 | }; | 907 | }; |
868 | 908 | ||
869 | extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 909 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
910 | { | ||
911 | return to_cpumask(sd->span); | ||
912 | } | ||
913 | |||
914 | extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
870 | struct sched_domain_attr *dattr_new); | 915 | struct sched_domain_attr *dattr_new); |
871 | extern int arch_reinit_sched_domains(void); | 916 | extern int arch_reinit_sched_domains(void); |
872 | 917 | ||
918 | /* Test a flag in parent sched domain */ | ||
919 | static inline int test_sd_parent(struct sched_domain *sd, int flag) | ||
920 | { | ||
921 | if (sd->parent && (sd->parent->flags & flag)) | ||
922 | return 1; | ||
923 | |||
924 | return 0; | ||
925 | } | ||
926 | |||
873 | #else /* CONFIG_SMP */ | 927 | #else /* CONFIG_SMP */ |
874 | 928 | ||
875 | struct sched_domain_attr; | 929 | struct sched_domain_attr; |
876 | 930 | ||
877 | static inline void | 931 | static inline void |
878 | partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 932 | partition_sched_domains(int ndoms_new, struct cpumask *doms_new, |
879 | struct sched_domain_attr *dattr_new) | 933 | struct sched_domain_attr *dattr_new) |
880 | { | 934 | { |
881 | } | 935 | } |
@@ -926,7 +980,7 @@ struct sched_class { | |||
926 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 980 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); |
927 | 981 | ||
928 | void (*set_cpus_allowed)(struct task_struct *p, | 982 | void (*set_cpus_allowed)(struct task_struct *p, |
929 | const cpumask_t *newmask); | 983 | const struct cpumask *newmask); |
930 | 984 | ||
931 | void (*rq_online)(struct rq *rq); | 985 | void (*rq_online)(struct rq *rq); |
932 | void (*rq_offline)(struct rq *rq); | 986 | void (*rq_offline)(struct rq *rq); |
@@ -1579,12 +1633,12 @@ extern cputime_t task_gtime(struct task_struct *p); | |||
1579 | 1633 | ||
1580 | #ifdef CONFIG_SMP | 1634 | #ifdef CONFIG_SMP |
1581 | extern int set_cpus_allowed_ptr(struct task_struct *p, | 1635 | extern int set_cpus_allowed_ptr(struct task_struct *p, |
1582 | const cpumask_t *new_mask); | 1636 | const struct cpumask *new_mask); |
1583 | #else | 1637 | #else |
1584 | static inline int set_cpus_allowed_ptr(struct task_struct *p, | 1638 | static inline int set_cpus_allowed_ptr(struct task_struct *p, |
1585 | const cpumask_t *new_mask) | 1639 | const struct cpumask *new_mask) |
1586 | { | 1640 | { |
1587 | if (!cpu_isset(0, *new_mask)) | 1641 | if (!cpumask_test_cpu(0, new_mask)) |
1588 | return -EINVAL; | 1642 | return -EINVAL; |
1589 | return 0; | 1643 | return 0; |
1590 | } | 1644 | } |
@@ -2195,10 +2249,8 @@ __trace_special(void *__tr, void *__data, | |||
2195 | } | 2249 | } |
2196 | #endif | 2250 | #endif |
2197 | 2251 | ||
2198 | extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask); | 2252 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
2199 | extern long sched_getaffinity(pid_t pid, cpumask_t *mask); | 2253 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
2200 | |||
2201 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
2202 | 2254 | ||
2203 | extern void normalize_rt_tasks(void); | 2255 | extern void normalize_rt_tasks(void); |
2204 | 2256 | ||
diff --git a/include/linux/serial.h b/include/linux/serial.h index 1ea8d9265bf6..9136cc5608c3 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h | |||
@@ -10,8 +10,9 @@ | |||
10 | #ifndef _LINUX_SERIAL_H | 10 | #ifndef _LINUX_SERIAL_H |
11 | #define _LINUX_SERIAL_H | 11 | #define _LINUX_SERIAL_H |
12 | 12 | ||
13 | #ifdef __KERNEL__ | ||
14 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | |||
15 | #ifdef __KERNEL__ | ||
15 | #include <asm/page.h> | 16 | #include <asm/page.h> |
16 | 17 | ||
17 | /* | 18 | /* |
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 3d37c94abbc8..d4d2a78ad43e 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h | |||
@@ -28,6 +28,9 @@ struct plat_serial8250_port { | |||
28 | unsigned char iotype; /* UPIO_* */ | 28 | unsigned char iotype; /* UPIO_* */ |
29 | unsigned char hub6; | 29 | unsigned char hub6; |
30 | upf_t flags; /* UPF_* flags */ | 30 | upf_t flags; /* UPF_* flags */ |
31 | unsigned int type; /* If UPF_FIXED_TYPE */ | ||
32 | unsigned int (*serial_in)(struct uart_port *, int); | ||
33 | void (*serial_out)(struct uart_port *, int, int); | ||
31 | }; | 34 | }; |
32 | 35 | ||
33 | /* | 36 | /* |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index feb3b939ec4b..b4199841f1fc 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -40,7 +40,8 @@ | |||
40 | #define PORT_NS16550A 14 | 40 | #define PORT_NS16550A 14 |
41 | #define PORT_XSCALE 15 | 41 | #define PORT_XSCALE 15 |
42 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ | 42 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ |
43 | #define PORT_MAX_8250 16 /* max port ID */ | 43 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ |
44 | #define PORT_MAX_8250 17 /* max port ID */ | ||
44 | 45 | ||
45 | /* | 46 | /* |
46 | * ARM specific type numbers. These are not currently guaranteed | 47 | * ARM specific type numbers. These are not currently guaranteed |
@@ -248,6 +249,8 @@ struct uart_port { | |||
248 | spinlock_t lock; /* port lock */ | 249 | spinlock_t lock; /* port lock */ |
249 | unsigned long iobase; /* in/out[bwl] */ | 250 | unsigned long iobase; /* in/out[bwl] */ |
250 | unsigned char __iomem *membase; /* read/write[bwl] */ | 251 | unsigned char __iomem *membase; /* read/write[bwl] */ |
252 | unsigned int (*serial_in)(struct uart_port *, int); | ||
253 | void (*serial_out)(struct uart_port *, int, int); | ||
251 | unsigned int irq; /* irq number */ | 254 | unsigned int irq; /* irq number */ |
252 | unsigned int uartclk; /* base uart clock */ | 255 | unsigned int uartclk; /* base uart clock */ |
253 | unsigned int fifosize; /* tx fifo size */ | 256 | unsigned int fifosize; /* tx fifo size */ |
@@ -293,6 +296,8 @@ struct uart_port { | |||
293 | #define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16)) | 296 | #define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16)) |
294 | #define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) | 297 | #define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) |
295 | #define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) | 298 | #define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) |
299 | /* The exact UART type is known and should not be probed. */ | ||
300 | #define UPF_FIXED_TYPE ((__force upf_t) (1 << 27)) | ||
296 | #define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28)) | 301 | #define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28)) |
297 | #define UPF_FIXED_PORT ((__force upf_t) (1 << 29)) | 302 | #define UPF_FIXED_PORT ((__force upf_t) (1 << 29)) |
298 | #define UPF_DEAD ((__force upf_t) (1 << 30)) | 303 | #define UPF_DEAD ((__force upf_t) (1 << 30)) |
@@ -316,35 +321,13 @@ struct uart_port { | |||
316 | }; | 321 | }; |
317 | 322 | ||
318 | /* | 323 | /* |
319 | * This is the state information which is persistent across opens. | ||
320 | * The low level driver must not to touch any elements contained | ||
321 | * within. | ||
322 | */ | ||
323 | struct uart_state { | ||
324 | unsigned int close_delay; /* msec */ | ||
325 | unsigned int closing_wait; /* msec */ | ||
326 | |||
327 | #define USF_CLOSING_WAIT_INF (0) | ||
328 | #define USF_CLOSING_WAIT_NONE (~0U) | ||
329 | |||
330 | int count; | ||
331 | int pm_state; | ||
332 | struct uart_info *info; | ||
333 | struct uart_port *port; | ||
334 | |||
335 | struct mutex mutex; | ||
336 | }; | ||
337 | |||
338 | #define UART_XMIT_SIZE PAGE_SIZE | ||
339 | |||
340 | typedef unsigned int __bitwise__ uif_t; | ||
341 | |||
342 | /* | ||
343 | * This is the state information which is only valid when the port | 324 | * This is the state information which is only valid when the port |
344 | * is open; it may be freed by the core driver once the device has | 325 | * is open; it may be cleared the core driver once the device has |
345 | * been closed. Either the low level driver or the core can modify | 326 | * been closed. Either the low level driver or the core can modify |
346 | * stuff here. | 327 | * stuff here. |
347 | */ | 328 | */ |
329 | typedef unsigned int __bitwise__ uif_t; | ||
330 | |||
348 | struct uart_info { | 331 | struct uart_info { |
349 | struct tty_port port; | 332 | struct tty_port port; |
350 | struct circ_buf xmit; | 333 | struct circ_buf xmit; |
@@ -366,6 +349,29 @@ struct uart_info { | |||
366 | wait_queue_head_t delta_msr_wait; | 349 | wait_queue_head_t delta_msr_wait; |
367 | }; | 350 | }; |
368 | 351 | ||
352 | /* | ||
353 | * This is the state information which is persistent across opens. | ||
354 | * The low level driver must not to touch any elements contained | ||
355 | * within. | ||
356 | */ | ||
357 | struct uart_state { | ||
358 | unsigned int close_delay; /* msec */ | ||
359 | unsigned int closing_wait; /* msec */ | ||
360 | |||
361 | #define USF_CLOSING_WAIT_INF (0) | ||
362 | #define USF_CLOSING_WAIT_NONE (~0U) | ||
363 | |||
364 | int count; | ||
365 | int pm_state; | ||
366 | struct uart_info info; | ||
367 | struct uart_port *port; | ||
368 | |||
369 | struct mutex mutex; | ||
370 | }; | ||
371 | |||
372 | #define UART_XMIT_SIZE PAGE_SIZE | ||
373 | |||
374 | |||
369 | /* number of characters left in xmit buffer before we ask for more */ | 375 | /* number of characters left in xmit buffer before we ask for more */ |
370 | #define WAKEUP_CHARS 256 | 376 | #define WAKEUP_CHARS 256 |
371 | 377 | ||
@@ -439,8 +445,13 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port); | |||
439 | #define uart_circ_chars_free(circ) \ | 445 | #define uart_circ_chars_free(circ) \ |
440 | (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) | 446 | (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) |
441 | 447 | ||
442 | #define uart_tx_stopped(portp) \ | 448 | static inline int uart_tx_stopped(struct uart_port *port) |
443 | ((portp)->info->port.tty->stopped || (portp)->info->port.tty->hw_stopped) | 449 | { |
450 | struct tty_struct *tty = port->info->port.tty; | ||
451 | if(tty->stopped || tty->hw_stopped) | ||
452 | return 1; | ||
453 | return 0; | ||
454 | } | ||
444 | 455 | ||
445 | /* | 456 | /* |
446 | * The following are helper functions for the low level drivers. | 457 | * The following are helper functions for the low level drivers. |
@@ -451,7 +462,7 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) | |||
451 | #ifdef SUPPORT_SYSRQ | 462 | #ifdef SUPPORT_SYSRQ |
452 | if (port->sysrq) { | 463 | if (port->sysrq) { |
453 | if (ch && time_before(jiffies, port->sysrq)) { | 464 | if (ch && time_before(jiffies, port->sysrq)) { |
454 | handle_sysrq(ch, port->info ? port->info->port.tty : NULL); | 465 | handle_sysrq(ch, port->info->port.tty); |
455 | port->sysrq = 0; | 466 | port->sysrq = 0; |
456 | return 1; | 467 | return 1; |
457 | } | 468 | } |
diff --git a/include/linux/topology.h b/include/linux/topology.h index 0c5b5ac36d8e..e632d29f0544 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -125,7 +125,8 @@ int arch_update_cpu_topology(void); | |||
125 | | SD_WAKE_AFFINE \ | 125 | | SD_WAKE_AFFINE \ |
126 | | SD_WAKE_BALANCE \ | 126 | | SD_WAKE_BALANCE \ |
127 | | SD_SHARE_PKG_RESOURCES\ | 127 | | SD_SHARE_PKG_RESOURCES\ |
128 | | BALANCE_FOR_MC_POWER, \ | 128 | | sd_balance_for_mc_power()\ |
129 | | sd_power_saving_flags(),\ | ||
129 | .last_balance = jiffies, \ | 130 | .last_balance = jiffies, \ |
130 | .balance_interval = 1, \ | 131 | .balance_interval = 1, \ |
131 | } | 132 | } |
@@ -150,7 +151,8 @@ int arch_update_cpu_topology(void); | |||
150 | | SD_BALANCE_FORK \ | 151 | | SD_BALANCE_FORK \ |
151 | | SD_WAKE_AFFINE \ | 152 | | SD_WAKE_AFFINE \ |
152 | | SD_WAKE_BALANCE \ | 153 | | SD_WAKE_BALANCE \ |
153 | | BALANCE_FOR_PKG_POWER,\ | 154 | | sd_balance_for_package_power()\ |
155 | | sd_power_saving_flags(),\ | ||
154 | .last_balance = jiffies, \ | 156 | .last_balance = jiffies, \ |
155 | .balance_interval = 1, \ | 157 | .balance_interval = 1, \ |
156 | } | 158 | } |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 3f4954c55e53..fc39db95499f 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -180,8 +180,17 @@ struct signal_struct; | |||
180 | * until a hangup so don't use the wrong path. | 180 | * until a hangup so don't use the wrong path. |
181 | */ | 181 | */ |
182 | 182 | ||
183 | struct tty_port; | ||
184 | |||
185 | struct tty_port_operations { | ||
186 | /* Return 1 if the carrier is raised */ | ||
187 | int (*carrier_raised)(struct tty_port *port); | ||
188 | void (*raise_dtr_rts)(struct tty_port *port); | ||
189 | }; | ||
190 | |||
183 | struct tty_port { | 191 | struct tty_port { |
184 | struct tty_struct *tty; /* Back pointer */ | 192 | struct tty_struct *tty; /* Back pointer */ |
193 | const struct tty_port_operations *ops; /* Port operations */ | ||
185 | spinlock_t lock; /* Lock protecting tty field */ | 194 | spinlock_t lock; /* Lock protecting tty field */ |
186 | int blocked_open; /* Waiting to open */ | 195 | int blocked_open; /* Waiting to open */ |
187 | int count; /* Usage count */ | 196 | int count; /* Usage count */ |
@@ -253,6 +262,7 @@ struct tty_struct { | |||
253 | unsigned int column; | 262 | unsigned int column; |
254 | unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1; | 263 | unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1; |
255 | unsigned char closing:1; | 264 | unsigned char closing:1; |
265 | unsigned char echo_overrun:1; | ||
256 | unsigned short minimum_to_wake; | 266 | unsigned short minimum_to_wake; |
257 | unsigned long overrun_time; | 267 | unsigned long overrun_time; |
258 | int num_overrun; | 268 | int num_overrun; |
@@ -262,11 +272,16 @@ struct tty_struct { | |||
262 | int read_tail; | 272 | int read_tail; |
263 | int read_cnt; | 273 | int read_cnt; |
264 | unsigned long read_flags[N_TTY_BUF_SIZE/(8*sizeof(unsigned long))]; | 274 | unsigned long read_flags[N_TTY_BUF_SIZE/(8*sizeof(unsigned long))]; |
275 | unsigned char *echo_buf; | ||
276 | unsigned int echo_pos; | ||
277 | unsigned int echo_cnt; | ||
265 | int canon_data; | 278 | int canon_data; |
266 | unsigned long canon_head; | 279 | unsigned long canon_head; |
267 | unsigned int canon_column; | 280 | unsigned int canon_column; |
268 | struct mutex atomic_read_lock; | 281 | struct mutex atomic_read_lock; |
269 | struct mutex atomic_write_lock; | 282 | struct mutex atomic_write_lock; |
283 | struct mutex output_lock; | ||
284 | struct mutex echo_lock; | ||
270 | unsigned char *write_buf; | 285 | unsigned char *write_buf; |
271 | int write_cnt; | 286 | int write_cnt; |
272 | spinlock_t read_lock; | 287 | spinlock_t read_lock; |
@@ -295,6 +310,7 @@ struct tty_struct { | |||
295 | #define TTY_PUSH 6 /* n_tty private */ | 310 | #define TTY_PUSH 6 /* n_tty private */ |
296 | #define TTY_CLOSING 7 /* ->close() in progress */ | 311 | #define TTY_CLOSING 7 /* ->close() in progress */ |
297 | #define TTY_LDISC 9 /* Line discipline attached */ | 312 | #define TTY_LDISC 9 /* Line discipline attached */ |
313 | #define TTY_LDISC_CHANGING 10 /* Line discipline changing */ | ||
298 | #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ | 314 | #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ |
299 | #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ | 315 | #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ |
300 | #define TTY_PTY_LOCK 16 /* pty private */ | 316 | #define TTY_PTY_LOCK 16 /* pty private */ |
@@ -354,8 +370,7 @@ extern int tty_write_room(struct tty_struct *tty); | |||
354 | extern void tty_driver_flush_buffer(struct tty_struct *tty); | 370 | extern void tty_driver_flush_buffer(struct tty_struct *tty); |
355 | extern void tty_throttle(struct tty_struct *tty); | 371 | extern void tty_throttle(struct tty_struct *tty); |
356 | extern void tty_unthrottle(struct tty_struct *tty); | 372 | extern void tty_unthrottle(struct tty_struct *tty); |
357 | extern int tty_do_resize(struct tty_struct *tty, struct tty_struct *real_tty, | 373 | extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); |
358 | struct winsize *ws); | ||
359 | extern void tty_shutdown(struct tty_struct *tty); | 374 | extern void tty_shutdown(struct tty_struct *tty); |
360 | extern void tty_free_termios(struct tty_struct *tty); | 375 | extern void tty_free_termios(struct tty_struct *tty); |
361 | extern int is_current_pgrp_orphaned(void); | 376 | extern int is_current_pgrp_orphaned(void); |
@@ -421,6 +436,14 @@ extern int tty_port_alloc_xmit_buf(struct tty_port *port); | |||
421 | extern void tty_port_free_xmit_buf(struct tty_port *port); | 436 | extern void tty_port_free_xmit_buf(struct tty_port *port); |
422 | extern struct tty_struct *tty_port_tty_get(struct tty_port *port); | 437 | extern struct tty_struct *tty_port_tty_get(struct tty_port *port); |
423 | extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); | 438 | extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); |
439 | extern int tty_port_carrier_raised(struct tty_port *port); | ||
440 | extern void tty_port_raise_dtr_rts(struct tty_port *port); | ||
441 | extern void tty_port_hangup(struct tty_port *port); | ||
442 | extern int tty_port_block_til_ready(struct tty_port *port, | ||
443 | struct tty_struct *tty, struct file *filp); | ||
444 | extern int tty_port_close_start(struct tty_port *port, | ||
445 | struct tty_struct *tty, struct file *filp); | ||
446 | extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); | ||
424 | 447 | ||
425 | extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); | 448 | extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc); |
426 | extern int tty_unregister_ldisc(int disc); | 449 | extern int tty_unregister_ldisc(int disc); |
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 78416b901589..08e088334dba 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
@@ -196,8 +196,7 @@ | |||
196 | * Optional: If not provided then the write method is called under | 196 | * Optional: If not provided then the write method is called under |
197 | * the atomic write lock to keep it serialized with the ldisc. | 197 | * the atomic write lock to keep it serialized with the ldisc. |
198 | * | 198 | * |
199 | * int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty, | 199 | * int (*resize)(struct tty_struct *tty, struct winsize *ws) |
200 | * unsigned int rows, unsigned int cols); | ||
201 | * | 200 | * |
202 | * Called when a termios request is issued which changes the | 201 | * Called when a termios request is issued which changes the |
203 | * requested terminal geometry. | 202 | * requested terminal geometry. |
@@ -258,8 +257,7 @@ struct tty_operations { | |||
258 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 257 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
259 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 258 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
260 | unsigned int set, unsigned int clear); | 259 | unsigned int set, unsigned int clear); |
261 | int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty, | 260 | int (*resize)(struct tty_struct *tty, struct winsize *ws); |
262 | struct winsize *ws); | ||
263 | int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); | 261 | int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); |
264 | #ifdef CONFIG_CONSOLE_POLL | 262 | #ifdef CONFIG_CONSOLE_POLL |
265 | int (*poll_init)(struct tty_driver *driver, int line, char *options); | 263 | int (*poll_init)(struct tty_driver *driver, int line, char *options); |
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h index a102561e7026..fb7c359bdfba 100644 --- a/include/linux/usb/wusb-wa.h +++ b/include/linux/usb/wusb-wa.h | |||
@@ -51,6 +51,7 @@ enum { | |||
51 | WUSB_REQ_GET_TIME = 25, | 51 | WUSB_REQ_GET_TIME = 25, |
52 | WUSB_REQ_SET_STREAM_IDX = 26, | 52 | WUSB_REQ_SET_STREAM_IDX = 26, |
53 | WUSB_REQ_SET_WUSB_MAS = 27, | 53 | WUSB_REQ_SET_WUSB_MAS = 27, |
54 | WUSB_REQ_CHAN_STOP = 28, | ||
54 | }; | 55 | }; |
55 | 56 | ||
56 | 57 | ||
diff --git a/include/linux/uwb.h b/include/linux/uwb.h index f9ccbd9a2ced..c02128991ff7 100644 --- a/include/linux/uwb.h +++ b/include/linux/uwb.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
32 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
33 | #include <linux/wait.h> | ||
33 | #include <linux/workqueue.h> | 34 | #include <linux/workqueue.h> |
34 | #include <linux/uwb/spec.h> | 35 | #include <linux/uwb/spec.h> |
35 | 36 | ||
@@ -66,6 +67,7 @@ struct uwb_dev { | |||
66 | struct uwb_dev_addr dev_addr; | 67 | struct uwb_dev_addr dev_addr; |
67 | int beacon_slot; | 68 | int beacon_slot; |
68 | DECLARE_BITMAP(streams, UWB_NUM_STREAMS); | 69 | DECLARE_BITMAP(streams, UWB_NUM_STREAMS); |
70 | DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); | ||
69 | }; | 71 | }; |
70 | #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) | 72 | #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) |
71 | 73 | ||
@@ -86,12 +88,31 @@ struct uwb_notifs_chain { | |||
86 | struct mutex mutex; | 88 | struct mutex mutex; |
87 | }; | 89 | }; |
88 | 90 | ||
91 | /* Beacon cache list */ | ||
92 | struct uwb_beca { | ||
93 | struct list_head list; | ||
94 | size_t entries; | ||
95 | struct mutex mutex; | ||
96 | }; | ||
97 | |||
98 | /* Event handling thread. */ | ||
99 | struct uwbd { | ||
100 | int pid; | ||
101 | struct task_struct *task; | ||
102 | wait_queue_head_t wq; | ||
103 | struct list_head event_list; | ||
104 | spinlock_t event_list_lock; | ||
105 | }; | ||
106 | |||
89 | /** | 107 | /** |
90 | * struct uwb_mas_bm - a bitmap of all MAS in a superframe | 108 | * struct uwb_mas_bm - a bitmap of all MAS in a superframe |
91 | * @bm: a bitmap of length #UWB_NUM_MAS | 109 | * @bm: a bitmap of length #UWB_NUM_MAS |
92 | */ | 110 | */ |
93 | struct uwb_mas_bm { | 111 | struct uwb_mas_bm { |
94 | DECLARE_BITMAP(bm, UWB_NUM_MAS); | 112 | DECLARE_BITMAP(bm, UWB_NUM_MAS); |
113 | DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); | ||
114 | int safe; | ||
115 | int unsafe; | ||
95 | }; | 116 | }; |
96 | 117 | ||
97 | /** | 118 | /** |
@@ -117,14 +138,24 @@ struct uwb_mas_bm { | |||
117 | * FIXME: further target states TBD. | 138 | * FIXME: further target states TBD. |
118 | */ | 139 | */ |
119 | enum uwb_rsv_state { | 140 | enum uwb_rsv_state { |
120 | UWB_RSV_STATE_NONE, | 141 | UWB_RSV_STATE_NONE = 0, |
121 | UWB_RSV_STATE_O_INITIATED, | 142 | UWB_RSV_STATE_O_INITIATED, |
122 | UWB_RSV_STATE_O_PENDING, | 143 | UWB_RSV_STATE_O_PENDING, |
123 | UWB_RSV_STATE_O_MODIFIED, | 144 | UWB_RSV_STATE_O_MODIFIED, |
124 | UWB_RSV_STATE_O_ESTABLISHED, | 145 | UWB_RSV_STATE_O_ESTABLISHED, |
146 | UWB_RSV_STATE_O_TO_BE_MOVED, | ||
147 | UWB_RSV_STATE_O_MOVE_EXPANDING, | ||
148 | UWB_RSV_STATE_O_MOVE_COMBINING, | ||
149 | UWB_RSV_STATE_O_MOVE_REDUCING, | ||
125 | UWB_RSV_STATE_T_ACCEPTED, | 150 | UWB_RSV_STATE_T_ACCEPTED, |
126 | UWB_RSV_STATE_T_DENIED, | 151 | UWB_RSV_STATE_T_DENIED, |
152 | UWB_RSV_STATE_T_CONFLICT, | ||
127 | UWB_RSV_STATE_T_PENDING, | 153 | UWB_RSV_STATE_T_PENDING, |
154 | UWB_RSV_STATE_T_EXPANDING_ACCEPTED, | ||
155 | UWB_RSV_STATE_T_EXPANDING_CONFLICT, | ||
156 | UWB_RSV_STATE_T_EXPANDING_PENDING, | ||
157 | UWB_RSV_STATE_T_EXPANDING_DENIED, | ||
158 | UWB_RSV_STATE_T_RESIZED, | ||
128 | 159 | ||
129 | UWB_RSV_STATE_LAST, | 160 | UWB_RSV_STATE_LAST, |
130 | }; | 161 | }; |
@@ -149,6 +180,12 @@ struct uwb_rsv_target { | |||
149 | }; | 180 | }; |
150 | }; | 181 | }; |
151 | 182 | ||
183 | struct uwb_rsv_move { | ||
184 | struct uwb_mas_bm final_mas; | ||
185 | struct uwb_ie_drp *companion_drp_ie; | ||
186 | struct uwb_mas_bm companion_mas; | ||
187 | }; | ||
188 | |||
152 | /* | 189 | /* |
153 | * Number of streams reserved for reservations targeted at DevAddrs. | 190 | * Number of streams reserved for reservations targeted at DevAddrs. |
154 | */ | 191 | */ |
@@ -186,6 +223,7 @@ typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv); | |||
186 | * | 223 | * |
187 | * @status: negotiation status | 224 | * @status: negotiation status |
188 | * @stream: stream index allocated for this reservation | 225 | * @stream: stream index allocated for this reservation |
226 | * @tiebreaker: conflict tiebreaker for this reservation | ||
189 | * @mas: reserved MAS | 227 | * @mas: reserved MAS |
190 | * @drp_ie: the DRP IE | 228 | * @drp_ie: the DRP IE |
191 | * @ie_valid: true iff the DRP IE matches the reservation parameters | 229 | * @ie_valid: true iff the DRP IE matches the reservation parameters |
@@ -201,25 +239,29 @@ struct uwb_rsv { | |||
201 | struct uwb_rc *rc; | 239 | struct uwb_rc *rc; |
202 | struct list_head rc_node; | 240 | struct list_head rc_node; |
203 | struct list_head pal_node; | 241 | struct list_head pal_node; |
242 | struct kref kref; | ||
204 | 243 | ||
205 | struct uwb_dev *owner; | 244 | struct uwb_dev *owner; |
206 | struct uwb_rsv_target target; | 245 | struct uwb_rsv_target target; |
207 | enum uwb_drp_type type; | 246 | enum uwb_drp_type type; |
208 | int max_mas; | 247 | int max_mas; |
209 | int min_mas; | 248 | int min_mas; |
210 | int sparsity; | 249 | int max_interval; |
211 | bool is_multicast; | 250 | bool is_multicast; |
212 | 251 | ||
213 | uwb_rsv_cb_f callback; | 252 | uwb_rsv_cb_f callback; |
214 | void *pal_priv; | 253 | void *pal_priv; |
215 | 254 | ||
216 | enum uwb_rsv_state state; | 255 | enum uwb_rsv_state state; |
256 | bool needs_release_companion_mas; | ||
217 | u8 stream; | 257 | u8 stream; |
258 | u8 tiebreaker; | ||
218 | struct uwb_mas_bm mas; | 259 | struct uwb_mas_bm mas; |
219 | struct uwb_ie_drp *drp_ie; | 260 | struct uwb_ie_drp *drp_ie; |
261 | struct uwb_rsv_move mv; | ||
220 | bool ie_valid; | 262 | bool ie_valid; |
221 | struct timer_list timer; | 263 | struct timer_list timer; |
222 | bool expired; | 264 | struct work_struct handle_timeout_work; |
223 | }; | 265 | }; |
224 | 266 | ||
225 | static const | 267 | static const |
@@ -261,6 +303,13 @@ struct uwb_drp_avail { | |||
261 | bool ie_valid; | 303 | bool ie_valid; |
262 | }; | 304 | }; |
263 | 305 | ||
306 | struct uwb_drp_backoff_win { | ||
307 | u8 window; | ||
308 | u8 n; | ||
309 | int total_expired; | ||
310 | struct timer_list timer; | ||
311 | bool can_reserve_extra_mases; | ||
312 | }; | ||
264 | 313 | ||
265 | const char *uwb_rsv_state_str(enum uwb_rsv_state state); | 314 | const char *uwb_rsv_state_str(enum uwb_rsv_state state); |
266 | const char *uwb_rsv_type_str(enum uwb_drp_type type); | 315 | const char *uwb_rsv_type_str(enum uwb_drp_type type); |
@@ -276,6 +325,8 @@ void uwb_rsv_terminate(struct uwb_rsv *rsv); | |||
276 | 325 | ||
277 | void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); | 326 | void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); |
278 | 327 | ||
328 | void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); | ||
329 | |||
279 | /** | 330 | /** |
280 | * Radio Control Interface instance | 331 | * Radio Control Interface instance |
281 | * | 332 | * |
@@ -337,23 +388,33 @@ struct uwb_rc { | |||
337 | u8 ctx_roll; | 388 | u8 ctx_roll; |
338 | 389 | ||
339 | int beaconing; /* Beaconing state [channel number] */ | 390 | int beaconing; /* Beaconing state [channel number] */ |
391 | int beaconing_forced; | ||
340 | int scanning; | 392 | int scanning; |
341 | enum uwb_scan_type scan_type:3; | 393 | enum uwb_scan_type scan_type:3; |
342 | unsigned ready:1; | 394 | unsigned ready:1; |
343 | struct uwb_notifs_chain notifs_chain; | 395 | struct uwb_notifs_chain notifs_chain; |
396 | struct uwb_beca uwb_beca; | ||
397 | |||
398 | struct uwbd uwbd; | ||
344 | 399 | ||
400 | struct uwb_drp_backoff_win bow; | ||
345 | struct uwb_drp_avail drp_avail; | 401 | struct uwb_drp_avail drp_avail; |
346 | struct list_head reservations; | 402 | struct list_head reservations; |
403 | struct list_head cnflt_alien_list; | ||
404 | struct uwb_mas_bm cnflt_alien_bitmap; | ||
347 | struct mutex rsvs_mutex; | 405 | struct mutex rsvs_mutex; |
406 | spinlock_t rsvs_lock; | ||
348 | struct workqueue_struct *rsv_workq; | 407 | struct workqueue_struct *rsv_workq; |
349 | struct work_struct rsv_update_work; | ||
350 | 408 | ||
409 | struct delayed_work rsv_update_work; | ||
410 | struct delayed_work rsv_alien_bp_work; | ||
411 | int set_drp_ie_pending; | ||
351 | struct mutex ies_mutex; | 412 | struct mutex ies_mutex; |
352 | struct uwb_rc_cmd_set_ie *ies; | 413 | struct uwb_rc_cmd_set_ie *ies; |
353 | size_t ies_capacity; | 414 | size_t ies_capacity; |
354 | 415 | ||
355 | spinlock_t pal_lock; | ||
356 | struct list_head pals; | 416 | struct list_head pals; |
417 | int active_pals; | ||
357 | 418 | ||
358 | struct uwb_dbg *dbg; | 419 | struct uwb_dbg *dbg; |
359 | }; | 420 | }; |
@@ -361,11 +422,19 @@ struct uwb_rc { | |||
361 | 422 | ||
362 | /** | 423 | /** |
363 | * struct uwb_pal - a UWB PAL | 424 | * struct uwb_pal - a UWB PAL |
364 | * @name: descriptive name for this PAL (wushc, wlp, etc.). | 425 | * @name: descriptive name for this PAL (wusbhc, wlp, etc.). |
365 | * @device: a device for the PAL. Used to link the PAL and the radio | 426 | * @device: a device for the PAL. Used to link the PAL and the radio |
366 | * controller in sysfs. | 427 | * controller in sysfs. |
428 | * @rc: the radio controller the PAL uses. | ||
429 | * @channel_changed: called when the channel used by the radio changes. | ||
430 | * A channel of -1 means the channel has been stopped. | ||
367 | * @new_rsv: called when a peer requests a reservation (may be NULL if | 431 | * @new_rsv: called when a peer requests a reservation (may be NULL if |
368 | * the PAL cannot accept reservation requests). | 432 | * the PAL cannot accept reservation requests). |
433 | * @channel: channel being used by the PAL; 0 if the PAL isn't using | ||
434 | * the radio; -1 if the PAL wishes to use the radio but | ||
435 | * cannot. | ||
436 | * @debugfs_dir: a debugfs directory which the PAL can use for its own | ||
437 | * debugfs files. | ||
369 | * | 438 | * |
370 | * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB | 439 | * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB |
371 | * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). | 440 | * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). |
@@ -384,12 +453,21 @@ struct uwb_pal { | |||
384 | struct list_head node; | 453 | struct list_head node; |
385 | const char *name; | 454 | const char *name; |
386 | struct device *device; | 455 | struct device *device; |
387 | void (*new_rsv)(struct uwb_rsv *rsv); | 456 | struct uwb_rc *rc; |
457 | |||
458 | void (*channel_changed)(struct uwb_pal *pal, int channel); | ||
459 | void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv); | ||
460 | |||
461 | int channel; | ||
462 | struct dentry *debugfs_dir; | ||
388 | }; | 463 | }; |
389 | 464 | ||
390 | void uwb_pal_init(struct uwb_pal *pal); | 465 | void uwb_pal_init(struct uwb_pal *pal); |
391 | int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal); | 466 | int uwb_pal_register(struct uwb_pal *pal); |
392 | void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal); | 467 | void uwb_pal_unregister(struct uwb_pal *pal); |
468 | |||
469 | int uwb_radio_start(struct uwb_pal *pal); | ||
470 | void uwb_radio_stop(struct uwb_pal *pal); | ||
393 | 471 | ||
394 | /* | 472 | /* |
395 | * General public API | 473 | * General public API |
@@ -443,8 +521,6 @@ ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name, | |||
443 | struct uwb_rccb *cmd, size_t cmd_size, | 521 | struct uwb_rccb *cmd, size_t cmd_size, |
444 | u8 expected_type, u16 expected_event, | 522 | u8 expected_type, u16 expected_event, |
445 | struct uwb_rceb **preply); | 523 | struct uwb_rceb **preply); |
446 | ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **); | ||
447 | int uwb_bg_joined(struct uwb_rc *rc); | ||
448 | 524 | ||
449 | size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); | 525 | size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); |
450 | 526 | ||
@@ -520,6 +596,8 @@ void uwb_rc_rm(struct uwb_rc *); | |||
520 | void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); | 596 | void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); |
521 | void uwb_rc_neh_error(struct uwb_rc *, int); | 597 | void uwb_rc_neh_error(struct uwb_rc *, int); |
522 | void uwb_rc_reset_all(struct uwb_rc *rc); | 598 | void uwb_rc_reset_all(struct uwb_rc *rc); |
599 | void uwb_rc_pre_reset(struct uwb_rc *rc); | ||
600 | void uwb_rc_post_reset(struct uwb_rc *rc); | ||
523 | 601 | ||
524 | /** | 602 | /** |
525 | * uwb_rsv_is_owner - is the owner of this reservation the RC? | 603 | * uwb_rsv_is_owner - is the owner of this reservation the RC? |
@@ -531,7 +609,9 @@ static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) | |||
531 | } | 609 | } |
532 | 610 | ||
533 | /** | 611 | /** |
534 | * Events generated by UWB that can be passed to any listeners | 612 | * enum uwb_notifs - UWB events that can be passed to any listeners |
613 | * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group. | ||
614 | * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group. | ||
535 | * | 615 | * |
536 | * Higher layers can register callback functions with the radio | 616 | * Higher layers can register callback functions with the radio |
537 | * controller using uwb_notifs_register(). The radio controller | 617 | * controller using uwb_notifs_register(). The radio controller |
@@ -539,8 +619,6 @@ static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv) | |||
539 | * nodes when an event occurs. | 619 | * nodes when an event occurs. |
540 | */ | 620 | */ |
541 | enum uwb_notifs { | 621 | enum uwb_notifs { |
542 | UWB_NOTIF_BG_JOIN = 0, /* radio controller joined a beacon group */ | ||
543 | UWB_NOTIF_BG_LEAVE = 1, /* radio controller left a beacon group */ | ||
544 | UWB_NOTIF_ONAIR, | 622 | UWB_NOTIF_ONAIR, |
545 | UWB_NOTIF_OFFAIR, | 623 | UWB_NOTIF_OFFAIR, |
546 | }; | 624 | }; |
@@ -652,22 +730,9 @@ static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe) | |||
652 | 730 | ||
653 | /* Information Element handling */ | 731 | /* Information Element handling */ |
654 | 732 | ||
655 | /* For representing the state of writing to a buffer when iterating */ | ||
656 | struct uwb_buf_ctx { | ||
657 | char *buf; | ||
658 | size_t bytes, size; | ||
659 | }; | ||
660 | |||
661 | typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *, | ||
662 | size_t, void *); | ||
663 | struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); | 733 | struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); |
664 | ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, | 734 | int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size); |
665 | const void *buf, size_t size); | 735 | int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id); |
666 | int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *, | ||
667 | size_t, void *); | ||
668 | int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); | ||
669 | struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); | ||
670 | |||
671 | 736 | ||
672 | /* | 737 | /* |
673 | * Transmission statistics | 738 | * Transmission statistics |
diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h index 1141f41bab5c..8da004e25628 100644 --- a/include/linux/uwb/debug-cmd.h +++ b/include/linux/uwb/debug-cmd.h | |||
@@ -32,6 +32,10 @@ | |||
32 | enum uwb_dbg_cmd_type { | 32 | enum uwb_dbg_cmd_type { |
33 | UWB_DBG_CMD_RSV_ESTABLISH = 1, | 33 | UWB_DBG_CMD_RSV_ESTABLISH = 1, |
34 | UWB_DBG_CMD_RSV_TERMINATE = 2, | 34 | UWB_DBG_CMD_RSV_TERMINATE = 2, |
35 | UWB_DBG_CMD_IE_ADD = 3, | ||
36 | UWB_DBG_CMD_IE_RM = 4, | ||
37 | UWB_DBG_CMD_RADIO_START = 5, | ||
38 | UWB_DBG_CMD_RADIO_STOP = 6, | ||
35 | }; | 39 | }; |
36 | 40 | ||
37 | struct uwb_dbg_cmd_rsv_establish { | 41 | struct uwb_dbg_cmd_rsv_establish { |
@@ -39,18 +43,25 @@ struct uwb_dbg_cmd_rsv_establish { | |||
39 | __u8 type; | 43 | __u8 type; |
40 | __u16 max_mas; | 44 | __u16 max_mas; |
41 | __u16 min_mas; | 45 | __u16 min_mas; |
42 | __u8 sparsity; | 46 | __u8 max_interval; |
43 | }; | 47 | }; |
44 | 48 | ||
45 | struct uwb_dbg_cmd_rsv_terminate { | 49 | struct uwb_dbg_cmd_rsv_terminate { |
46 | int index; | 50 | int index; |
47 | }; | 51 | }; |
48 | 52 | ||
53 | struct uwb_dbg_cmd_ie { | ||
54 | __u8 data[128]; | ||
55 | int len; | ||
56 | }; | ||
57 | |||
49 | struct uwb_dbg_cmd { | 58 | struct uwb_dbg_cmd { |
50 | __u32 type; | 59 | __u32 type; |
51 | union { | 60 | union { |
52 | struct uwb_dbg_cmd_rsv_establish rsv_establish; | 61 | struct uwb_dbg_cmd_rsv_establish rsv_establish; |
53 | struct uwb_dbg_cmd_rsv_terminate rsv_terminate; | 62 | struct uwb_dbg_cmd_rsv_terminate rsv_terminate; |
63 | struct uwb_dbg_cmd_ie ie_add; | ||
64 | struct uwb_dbg_cmd_ie ie_rm; | ||
54 | }; | 65 | }; |
55 | }; | 66 | }; |
56 | 67 | ||
diff --git a/include/linux/uwb/debug.h b/include/linux/uwb/debug.h deleted file mode 100644 index a86a73fe303f..000000000000 --- a/include/linux/uwb/debug.h +++ /dev/null | |||
@@ -1,82 +0,0 @@ | |||
1 | /* | ||
2 | * Ultra Wide Band | ||
3 | * Debug Support | ||
4 | * | ||
5 | * Copyright (C) 2005-2006 Intel Corporation | ||
6 | * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version | ||
10 | * 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
20 | * 02110-1301, USA. | ||
21 | * | ||
22 | * | ||
23 | * FIXME: doc | ||
24 | * Invoke like: | ||
25 | * | ||
26 | * #define D_LOCAL 4 | ||
27 | * #include <linux/uwb/debug.h> | ||
28 | * | ||
29 | * At the end of your include files. | ||
30 | */ | ||
31 | #include <linux/types.h> | ||
32 | |||
33 | struct device; | ||
34 | extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize); | ||
35 | |||
36 | /* Master debug switch; !0 enables, 0 disables */ | ||
37 | #define D_MASTER (!0) | ||
38 | |||
39 | /* Local (per-file) debug switch; #define before #including */ | ||
40 | #ifndef D_LOCAL | ||
41 | #define D_LOCAL 0 | ||
42 | #endif | ||
43 | |||
44 | #undef __d_printf | ||
45 | #undef d_fnstart | ||
46 | #undef d_fnend | ||
47 | #undef d_printf | ||
48 | #undef d_dump | ||
49 | |||
50 | #define __d_printf(l, _tag, _dev, f, a...) \ | ||
51 | do { \ | ||
52 | struct device *__dev = (_dev); \ | ||
53 | if (D_MASTER && D_LOCAL >= (l)) { \ | ||
54 | char __head[64] = ""; \ | ||
55 | if (_dev != NULL) { \ | ||
56 | if ((unsigned long)__dev < 4096) \ | ||
57 | printk(KERN_ERR "E: Corrupt dev %p\n", \ | ||
58 | __dev); \ | ||
59 | else \ | ||
60 | snprintf(__head, sizeof(__head), \ | ||
61 | "%s %s: ", \ | ||
62 | dev_driver_string(__dev), \ | ||
63 | __dev->bus_id); \ | ||
64 | } \ | ||
65 | printk(KERN_ERR "%s%s" _tag ": " f, __head, \ | ||
66 | __func__, ## a); \ | ||
67 | } \ | ||
68 | } while (0 && _dev) | ||
69 | |||
70 | #define d_fnstart(l, _dev, f, a...) \ | ||
71 | __d_printf(l, " FNSTART", _dev, f, ## a) | ||
72 | #define d_fnend(l, _dev, f, a...) \ | ||
73 | __d_printf(l, " FNEND", _dev, f, ## a) | ||
74 | #define d_printf(l, _dev, f, a...) \ | ||
75 | __d_printf(l, "", _dev, f, ## a) | ||
76 | #define d_dump(l, _dev, ptr, size) \ | ||
77 | do { \ | ||
78 | struct device *__dev = _dev; \ | ||
79 | if (D_MASTER && D_LOCAL >= (l)) \ | ||
80 | dump_bytes(__dev, ptr, size); \ | ||
81 | } while (0 && _dev) | ||
82 | #define d_test(l) (D_MASTER && D_LOCAL >= (l)) | ||
diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h index 198c15f8e251..b52e44f1bd33 100644 --- a/include/linux/uwb/spec.h +++ b/include/linux/uwb/spec.h | |||
@@ -59,6 +59,11 @@ enum { UWB_NUM_ZONES = 16 }; | |||
59 | #define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) | 59 | #define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * Number of MAS required before a row can be considered available. | ||
63 | */ | ||
64 | #define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) | ||
65 | |||
66 | /* | ||
62 | * Number of streams per DRP reservation between a pair of devices. | 67 | * Number of streams per DRP reservation between a pair of devices. |
63 | * | 68 | * |
64 | * [ECMA-368] section 16.8.6. | 69 | * [ECMA-368] section 16.8.6. |
@@ -94,6 +99,26 @@ enum { UWB_BEACON_SLOT_LENGTH_US = 85 }; | |||
94 | enum { UWB_MAX_LOST_BEACONS = 3 }; | 99 | enum { UWB_MAX_LOST_BEACONS = 3 }; |
95 | 100 | ||
96 | /* | 101 | /* |
102 | * mDRPBackOffWinMin | ||
103 | * | ||
104 | * The minimum number of superframes to wait before trying to reserve | ||
105 | * extra MAS. | ||
106 | * | ||
107 | * [ECMA-368] section 17.16 | ||
108 | */ | ||
109 | enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; | ||
110 | |||
111 | /* | ||
112 | * mDRPBackOffWinMax | ||
113 | * | ||
114 | * The maximum number of superframes to wait before trying to reserve | ||
115 | * extra MAS. | ||
116 | * | ||
117 | * [ECMA-368] section 17.16 | ||
118 | */ | ||
119 | enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; | ||
120 | |||
121 | /* | ||
97 | * Length of a superframe in microseconds. | 122 | * Length of a superframe in microseconds. |
98 | */ | 123 | */ |
99 | #define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) | 124 | #define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS) |
@@ -200,6 +225,12 @@ enum uwb_drp_reason { | |||
200 | UWB_DRP_REASON_MODIFIED, | 225 | UWB_DRP_REASON_MODIFIED, |
201 | }; | 226 | }; |
202 | 227 | ||
228 | /** Relinquish Request Reason Codes ([ECMA-368] table 113) */ | ||
229 | enum uwb_relinquish_req_reason { | ||
230 | UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0, | ||
231 | UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION, | ||
232 | }; | ||
233 | |||
203 | /** | 234 | /** |
204 | * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) | 235 | * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) |
205 | */ | 236 | */ |
@@ -252,6 +283,7 @@ enum uwb_ie { | |||
252 | UWB_APP_SPEC_PROBE_IE = 15, | 283 | UWB_APP_SPEC_PROBE_IE = 15, |
253 | UWB_IDENTIFICATION_IE = 19, | 284 | UWB_IDENTIFICATION_IE = 19, |
254 | UWB_MASTER_KEY_ID_IE = 20, | 285 | UWB_MASTER_KEY_ID_IE = 20, |
286 | UWB_RELINQUISH_REQUEST_IE = 21, | ||
255 | UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ | 287 | UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ |
256 | UWB_APP_SPEC_IE = 255, | 288 | UWB_APP_SPEC_IE = 255, |
257 | }; | 289 | }; |
@@ -365,6 +397,27 @@ struct uwb_ie_drp_avail { | |||
365 | DECLARE_BITMAP(bmp, UWB_NUM_MAS); | 397 | DECLARE_BITMAP(bmp, UWB_NUM_MAS); |
366 | } __attribute__((packed)); | 398 | } __attribute__((packed)); |
367 | 399 | ||
400 | /* Relinqish Request IE ([ECMA-368] section 16.8.19). */ | ||
401 | struct uwb_relinquish_request_ie { | ||
402 | struct uwb_ie_hdr hdr; | ||
403 | __le16 relinquish_req_control; | ||
404 | struct uwb_dev_addr dev_addr; | ||
405 | struct uwb_drp_alloc allocs[]; | ||
406 | } __attribute__((packed)); | ||
407 | |||
408 | static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie) | ||
409 | { | ||
410 | return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf; | ||
411 | } | ||
412 | |||
413 | static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie, | ||
414 | int reason_code) | ||
415 | { | ||
416 | u16 ctrl = le16_to_cpu(ie->relinquish_req_control); | ||
417 | ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0); | ||
418 | ie->relinquish_req_control = cpu_to_le16(ctrl); | ||
419 | } | ||
420 | |||
368 | /** | 421 | /** |
369 | * The Vendor ID is set to an OUI that indicates the vendor of the device. | 422 | * The Vendor ID is set to an OUI that indicates the vendor of the device. |
370 | * ECMA-368 [16.8.10] | 423 | * ECMA-368 [16.8.10] |
diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h index 36a39e34f8d7..4b4fc0f43855 100644 --- a/include/linux/uwb/umc.h +++ b/include/linux/uwb/umc.h | |||
@@ -89,6 +89,8 @@ struct umc_driver { | |||
89 | void (*remove)(struct umc_dev *); | 89 | void (*remove)(struct umc_dev *); |
90 | int (*suspend)(struct umc_dev *, pm_message_t state); | 90 | int (*suspend)(struct umc_dev *, pm_message_t state); |
91 | int (*resume)(struct umc_dev *); | 91 | int (*resume)(struct umc_dev *); |
92 | int (*pre_reset)(struct umc_dev *); | ||
93 | int (*post_reset)(struct umc_dev *); | ||
92 | 94 | ||
93 | struct device_driver driver; | 95 | struct device_driver driver; |
94 | }; | 96 | }; |
diff --git a/include/linux/wlp.h b/include/linux/wlp.h index 033545e145c7..ac95ce6606ac 100644 --- a/include/linux/wlp.h +++ b/include/linux/wlp.h | |||
@@ -646,6 +646,7 @@ struct wlp_wss { | |||
646 | struct wlp { | 646 | struct wlp { |
647 | struct mutex mutex; | 647 | struct mutex mutex; |
648 | struct uwb_rc *rc; /* UWB radio controller */ | 648 | struct uwb_rc *rc; /* UWB radio controller */ |
649 | struct net_device *ndev; | ||
649 | struct uwb_pal pal; | 650 | struct uwb_pal pal; |
650 | struct wlp_eda eda; | 651 | struct wlp_eda eda; |
651 | struct wlp_uuid uuid; | 652 | struct wlp_uuid uuid; |
@@ -675,7 +676,7 @@ struct wlp_wss_attribute { | |||
675 | static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \ | 676 | static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \ |
676 | _show, _store) | 677 | _show, _store) |
677 | 678 | ||
678 | extern int wlp_setup(struct wlp *, struct uwb_rc *); | 679 | extern int wlp_setup(struct wlp *, struct uwb_rc *, struct net_device *ndev); |
679 | extern void wlp_remove(struct wlp *); | 680 | extern void wlp_remove(struct wlp *); |
680 | extern ssize_t wlp_neighborhood_show(struct wlp *, char *); | 681 | extern ssize_t wlp_neighborhood_show(struct wlp *, char *); |
681 | extern int wlp_wss_setup(struct net_device *, struct wlp_wss *); | 682 | extern int wlp_wss_setup(struct net_device *, struct wlp_wss *); |
diff --git a/init/Kconfig b/init/Kconfig index 13627191a60d..f6281711166d 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -924,6 +924,15 @@ config KMOD | |||
924 | 924 | ||
925 | endif # MODULES | 925 | endif # MODULES |
926 | 926 | ||
927 | config INIT_ALL_POSSIBLE | ||
928 | bool | ||
929 | help | ||
930 | Back when each arch used to define their own cpu_online_map and | ||
931 | cpu_possible_map, some of them chose to initialize cpu_possible_map | ||
932 | with all 1s, and others with all 0s. When they were centralised, | ||
933 | it was better to provide this option than to break all the archs | ||
934 | and have several arch maintainers persuing me down dark alleys. | ||
935 | |||
927 | config STOP_MACHINE | 936 | config STOP_MACHINE |
928 | bool | 937 | bool |
929 | default y | 938 | default y |
diff --git a/init/main.c b/init/main.c index f5e64f20d2b0..ad8f9f53f8d1 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -75,15 +75,6 @@ | |||
75 | #include <asm/smp.h> | 75 | #include <asm/smp.h> |
76 | #endif | 76 | #endif |
77 | 77 | ||
78 | /* | ||
79 | * This is one of the first .c files built. Error out early if we have compiler | ||
80 | * trouble. | ||
81 | */ | ||
82 | |||
83 | #if __GNUC__ == 4 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ == 0 | ||
84 | #warning gcc-4.1.0 is known to miscompile the kernel. A different compiler version is recommended. | ||
85 | #endif | ||
86 | |||
87 | static int kernel_init(void *); | 78 | static int kernel_init(void *); |
88 | 79 | ||
89 | extern void init_IRQ(void); | 80 | extern void init_IRQ(void); |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 8ea32e8d68b0..bae131a1211b 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -24,19 +24,20 @@ | |||
24 | cpumask_t cpu_present_map __read_mostly; | 24 | cpumask_t cpu_present_map __read_mostly; |
25 | EXPORT_SYMBOL(cpu_present_map); | 25 | EXPORT_SYMBOL(cpu_present_map); |
26 | 26 | ||
27 | #ifndef CONFIG_SMP | ||
28 | |||
29 | /* | 27 | /* |
30 | * Represents all cpu's that are currently online. | 28 | * Represents all cpu's that are currently online. |
31 | */ | 29 | */ |
32 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL; | 30 | cpumask_t cpu_online_map __read_mostly; |
33 | EXPORT_SYMBOL(cpu_online_map); | 31 | EXPORT_SYMBOL(cpu_online_map); |
34 | 32 | ||
33 | #ifdef CONFIG_INIT_ALL_POSSIBLE | ||
35 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; | 34 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; |
35 | #else | ||
36 | cpumask_t cpu_possible_map __read_mostly; | ||
37 | #endif | ||
36 | EXPORT_SYMBOL(cpu_possible_map); | 38 | EXPORT_SYMBOL(cpu_possible_map); |
37 | 39 | ||
38 | #else /* CONFIG_SMP */ | 40 | #ifdef CONFIG_SMP |
39 | |||
40 | /* Serializes the updates to cpu_online_map, cpu_present_map */ | 41 | /* Serializes the updates to cpu_online_map, cpu_present_map */ |
41 | static DEFINE_MUTEX(cpu_add_remove_lock); | 42 | static DEFINE_MUTEX(cpu_add_remove_lock); |
42 | 43 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 96c0ba13b8cd..39c1a4c1c5a9 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -896,7 +896,7 @@ static int update_cpumask(struct cpuset *cs, const char *buf) | |||
896 | if (!*buf) { | 896 | if (!*buf) { |
897 | cpus_clear(trialcs.cpus_allowed); | 897 | cpus_clear(trialcs.cpus_allowed); |
898 | } else { | 898 | } else { |
899 | retval = cpulist_parse(buf, trialcs.cpus_allowed); | 899 | retval = cpulist_parse(buf, &trialcs.cpus_allowed); |
900 | if (retval < 0) | 900 | if (retval < 0) |
901 | return retval; | 901 | return retval; |
902 | 902 | ||
@@ -1482,7 +1482,7 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) | |||
1482 | mask = cs->cpus_allowed; | 1482 | mask = cs->cpus_allowed; |
1483 | mutex_unlock(&callback_mutex); | 1483 | mutex_unlock(&callback_mutex); |
1484 | 1484 | ||
1485 | return cpulist_scnprintf(page, PAGE_SIZE, mask); | 1485 | return cpulist_scnprintf(page, PAGE_SIZE, &mask); |
1486 | } | 1486 | } |
1487 | 1487 | ||
1488 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) | 1488 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 6eb3c7952b64..f63c706d25e1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -46,7 +46,7 @@ void dynamic_irq_init(unsigned int irq) | |||
46 | desc->irq_count = 0; | 46 | desc->irq_count = 0; |
47 | desc->irqs_unhandled = 0; | 47 | desc->irqs_unhandled = 0; |
48 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
49 | cpus_setall(desc->affinity); | 49 | cpumask_setall(&desc->affinity); |
50 | #endif | 50 | #endif |
51 | spin_unlock_irqrestore(&desc->lock, flags); | 51 | spin_unlock_irqrestore(&desc->lock, flags); |
52 | } | 52 | } |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 540f6c49f3fa..61c4a9b62165 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -79,7 +79,7 @@ int irq_can_set_affinity(unsigned int irq) | |||
79 | * @cpumask: cpumask | 79 | * @cpumask: cpumask |
80 | * | 80 | * |
81 | */ | 81 | */ |
82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 82 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
83 | { | 83 | { |
84 | struct irq_desc *desc = irq_to_desc(irq); | 84 | struct irq_desc *desc = irq_to_desc(irq); |
85 | unsigned long flags; | 85 | unsigned long flags; |
@@ -91,14 +91,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
91 | 91 | ||
92 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 92 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
94 | desc->affinity = cpumask; | 94 | cpumask_copy(&desc->affinity, cpumask); |
95 | desc->chip->set_affinity(irq, cpumask); | 95 | desc->chip->set_affinity(irq, cpumask); |
96 | } else { | 96 | } else { |
97 | desc->status |= IRQ_MOVE_PENDING; | 97 | desc->status |= IRQ_MOVE_PENDING; |
98 | desc->pending_mask = cpumask; | 98 | cpumask_copy(&desc->pending_mask, cpumask); |
99 | } | 99 | } |
100 | #else | 100 | #else |
101 | desc->affinity = cpumask; | 101 | cpumask_copy(&desc->affinity, cpumask); |
102 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
103 | #endif | 103 | #endif |
104 | desc->status |= IRQ_AFFINITY_SET; | 104 | desc->status |= IRQ_AFFINITY_SET; |
@@ -112,26 +112,24 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
112 | */ | 112 | */ |
113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | 113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) |
114 | { | 114 | { |
115 | cpumask_t mask; | ||
116 | |||
117 | if (!irq_can_set_affinity(irq)) | 115 | if (!irq_can_set_affinity(irq)) |
118 | return 0; | 116 | return 0; |
119 | 117 | ||
120 | cpus_and(mask, cpu_online_map, irq_default_affinity); | ||
121 | |||
122 | /* | 118 | /* |
123 | * Preserve an userspace affinity setup, but make sure that | 119 | * Preserve an userspace affinity setup, but make sure that |
124 | * one of the targets is online. | 120 | * one of the targets is online. |
125 | */ | 121 | */ |
126 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 122 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
127 | if (cpus_intersects(desc->affinity, cpu_online_map)) | 123 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) |
128 | mask = desc->affinity; | 124 | < nr_cpu_ids) |
125 | goto set_affinity; | ||
129 | else | 126 | else |
130 | desc->status &= ~IRQ_AFFINITY_SET; | 127 | desc->status &= ~IRQ_AFFINITY_SET; |
131 | } | 128 | } |
132 | 129 | ||
133 | desc->affinity = mask; | 130 | cpumask_and(&desc->affinity, cpu_online_mask, &irq_default_affinity); |
134 | desc->chip->set_affinity(irq, mask); | 131 | set_affinity: |
132 | desc->chip->set_affinity(irq, &desc->affinity); | ||
135 | 133 | ||
136 | return 0; | 134 | return 0; |
137 | } | 135 | } |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 9db681d95814..bd72329e630c 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -4,7 +4,6 @@ | |||
4 | void move_masked_irq(int irq) | 4 | void move_masked_irq(int irq) |
5 | { | 5 | { |
6 | struct irq_desc *desc = irq_to_desc(irq); | 6 | struct irq_desc *desc = irq_to_desc(irq); |
7 | cpumask_t tmp; | ||
8 | 7 | ||
9 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 8 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) |
10 | return; | 9 | return; |
@@ -19,7 +18,7 @@ void move_masked_irq(int irq) | |||
19 | 18 | ||
20 | desc->status &= ~IRQ_MOVE_PENDING; | 19 | desc->status &= ~IRQ_MOVE_PENDING; |
21 | 20 | ||
22 | if (unlikely(cpus_empty(desc->pending_mask))) | 21 | if (unlikely(cpumask_empty(&desc->pending_mask))) |
23 | return; | 22 | return; |
24 | 23 | ||
25 | if (!desc->chip->set_affinity) | 24 | if (!desc->chip->set_affinity) |
@@ -27,8 +26,6 @@ void move_masked_irq(int irq) | |||
27 | 26 | ||
28 | assert_spin_locked(&desc->lock); | 27 | assert_spin_locked(&desc->lock); |
29 | 28 | ||
30 | cpus_and(tmp, desc->pending_mask, cpu_online_map); | ||
31 | |||
32 | /* | 29 | /* |
33 | * If there was a valid mask to work with, please | 30 | * If there was a valid mask to work with, please |
34 | * do the disable, re-program, enable sequence. | 31 | * do the disable, re-program, enable sequence. |
@@ -41,10 +38,13 @@ void move_masked_irq(int irq) | |||
41 | * For correct operation this depends on the caller | 38 | * For correct operation this depends on the caller |
42 | * masking the irqs. | 39 | * masking the irqs. |
43 | */ | 40 | */ |
44 | if (likely(!cpus_empty(tmp))) { | 41 | if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) |
45 | desc->chip->set_affinity(irq,tmp); | 42 | < nr_cpu_ids)) { |
43 | cpumask_and(&desc->affinity, | ||
44 | &desc->pending_mask, cpu_online_mask); | ||
45 | desc->chip->set_affinity(irq, &desc->affinity); | ||
46 | } | 46 | } |
47 | cpus_clear(desc->pending_mask); | 47 | cpumask_clear(&desc->pending_mask); |
48 | } | 48 | } |
49 | 49 | ||
50 | void move_native_irq(int irq) | 50 | void move_native_irq(int irq) |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index f6b3440f05bc..d2c0e5ee53c5 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -40,33 +40,42 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
40 | const char __user *buffer, size_t count, loff_t *pos) | 40 | const char __user *buffer, size_t count, loff_t *pos) |
41 | { | 41 | { |
42 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; | 42 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; |
43 | cpumask_t new_value; | 43 | cpumask_var_t new_value; |
44 | int err; | 44 | int err; |
45 | 45 | ||
46 | if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || | 46 | if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || |
47 | irq_balancing_disabled(irq)) | 47 | irq_balancing_disabled(irq)) |
48 | return -EIO; | 48 | return -EIO; |
49 | 49 | ||
50 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) | ||
51 | return -ENOMEM; | ||
52 | |||
50 | err = cpumask_parse_user(buffer, count, new_value); | 53 | err = cpumask_parse_user(buffer, count, new_value); |
51 | if (err) | 54 | if (err) |
52 | return err; | 55 | goto free_cpumask; |
53 | 56 | ||
54 | if (!is_affinity_mask_valid(new_value)) | 57 | if (!is_affinity_mask_valid(*new_value)) { |
55 | return -EINVAL; | 58 | err = -EINVAL; |
59 | goto free_cpumask; | ||
60 | } | ||
56 | 61 | ||
57 | /* | 62 | /* |
58 | * Do not allow disabling IRQs completely - it's a too easy | 63 | * Do not allow disabling IRQs completely - it's a too easy |
59 | * way to make the system unusable accidentally :-) At least | 64 | * way to make the system unusable accidentally :-) At least |
60 | * one online CPU still has to be targeted. | 65 | * one online CPU still has to be targeted. |
61 | */ | 66 | */ |
62 | if (!cpus_intersects(new_value, cpu_online_map)) | 67 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
63 | /* Special case for empty set - allow the architecture | 68 | /* Special case for empty set - allow the architecture |
64 | code to set default SMP affinity. */ | 69 | code to set default SMP affinity. */ |
65 | return irq_select_affinity_usr(irq) ? -EINVAL : count; | 70 | err = irq_select_affinity_usr(irq) ? -EINVAL : count; |
66 | 71 | } else { | |
67 | irq_set_affinity(irq, new_value); | 72 | irq_set_affinity(irq, new_value); |
73 | err = count; | ||
74 | } | ||
68 | 75 | ||
69 | return count; | 76 | free_cpumask: |
77 | free_cpumask_var(new_value); | ||
78 | return err; | ||
70 | } | 79 | } |
71 | 80 | ||
72 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) | 81 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) |
@@ -95,7 +104,7 @@ static ssize_t default_affinity_write(struct file *file, | |||
95 | cpumask_t new_value; | 104 | cpumask_t new_value; |
96 | int err; | 105 | int err; |
97 | 106 | ||
98 | err = cpumask_parse_user(buffer, count, new_value); | 107 | err = cpumask_parse_user(buffer, count, &new_value); |
99 | if (err) | 108 | if (err) |
100 | return err; | 109 | return err; |
101 | 110 | ||
diff --git a/kernel/profile.c b/kernel/profile.c index 60adefb59b5e..4cb7d68fed82 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -442,7 +442,7 @@ void profile_tick(int type) | |||
442 | static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, | 442 | static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, |
443 | int count, int *eof, void *data) | 443 | int count, int *eof, void *data) |
444 | { | 444 | { |
445 | int len = cpumask_scnprintf(page, count, *(cpumask_t *)data); | 445 | int len = cpumask_scnprintf(page, count, (cpumask_t *)data); |
446 | if (count - len < 2) | 446 | if (count - len < 2) |
447 | return -EINVAL; | 447 | return -EINVAL; |
448 | len += sprintf(page + len, "\n"); | 448 | len += sprintf(page + len, "\n"); |
@@ -456,7 +456,7 @@ static int prof_cpu_mask_write_proc(struct file *file, | |||
456 | unsigned long full_count = count, err; | 456 | unsigned long full_count = count, err; |
457 | cpumask_t new_value; | 457 | cpumask_t new_value; |
458 | 458 | ||
459 | err = cpumask_parse_user(buffer, count, new_value); | 459 | err = cpumask_parse_user(buffer, count, &new_value); |
460 | if (err) | 460 | if (err) |
461 | return err; | 461 | return err; |
462 | 462 | ||
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index e503a002f330..c03ca3e61919 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -393,7 +393,7 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp) | |||
393 | * unnecessarily. | 393 | * unnecessarily. |
394 | */ | 394 | */ |
395 | smp_mb(); | 395 | smp_mb(); |
396 | cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); | 396 | cpumask_andnot(&rcp->cpumask, cpu_online_mask, nohz_cpu_mask); |
397 | 397 | ||
398 | rcp->signaled = 0; | 398 | rcp->signaled = 0; |
399 | } | 399 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index fff1c4a20b65..27ba1d642f0f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -498,18 +498,26 @@ struct rt_rq { | |||
498 | */ | 498 | */ |
499 | struct root_domain { | 499 | struct root_domain { |
500 | atomic_t refcount; | 500 | atomic_t refcount; |
501 | cpumask_t span; | 501 | cpumask_var_t span; |
502 | cpumask_t online; | 502 | cpumask_var_t online; |
503 | 503 | ||
504 | /* | 504 | /* |
505 | * The "RT overload" flag: it gets set if a CPU has more than | 505 | * The "RT overload" flag: it gets set if a CPU has more than |
506 | * one runnable RT task. | 506 | * one runnable RT task. |
507 | */ | 507 | */ |
508 | cpumask_t rto_mask; | 508 | cpumask_var_t rto_mask; |
509 | atomic_t rto_count; | 509 | atomic_t rto_count; |
510 | #ifdef CONFIG_SMP | 510 | #ifdef CONFIG_SMP |
511 | struct cpupri cpupri; | 511 | struct cpupri cpupri; |
512 | #endif | 512 | #endif |
513 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
514 | /* | ||
515 | * Preferred wake up cpu nominated by sched_mc balance that will be | ||
516 | * used when most cpus are idle in the system indicating overall very | ||
517 | * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2) | ||
518 | */ | ||
519 | unsigned int sched_mc_preferred_wakeup_cpu; | ||
520 | #endif | ||
513 | }; | 521 | }; |
514 | 522 | ||
515 | /* | 523 | /* |
@@ -1514,7 +1522,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1514 | struct sched_domain *sd = data; | 1522 | struct sched_domain *sd = data; |
1515 | int i; | 1523 | int i; |
1516 | 1524 | ||
1517 | for_each_cpu_mask(i, sd->span) { | 1525 | for_each_cpu(i, sched_domain_span(sd)) { |
1518 | /* | 1526 | /* |
1519 | * If there are currently no tasks on the cpu pretend there | 1527 | * If there are currently no tasks on the cpu pretend there |
1520 | * is one of average load so that when a new task gets to | 1528 | * is one of average load so that when a new task gets to |
@@ -1535,7 +1543,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1535 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) | 1543 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) |
1536 | shares = tg->shares; | 1544 | shares = tg->shares; |
1537 | 1545 | ||
1538 | for_each_cpu_mask(i, sd->span) | 1546 | for_each_cpu(i, sched_domain_span(sd)) |
1539 | update_group_shares_cpu(tg, i, shares, rq_weight); | 1547 | update_group_shares_cpu(tg, i, shares, rq_weight); |
1540 | 1548 | ||
1541 | return 0; | 1549 | return 0; |
@@ -2101,15 +2109,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2101 | int i; | 2109 | int i; |
2102 | 2110 | ||
2103 | /* Skip over this group if it has no CPUs allowed */ | 2111 | /* Skip over this group if it has no CPUs allowed */ |
2104 | if (!cpus_intersects(group->cpumask, p->cpus_allowed)) | 2112 | if (!cpumask_intersects(sched_group_cpus(group), |
2113 | &p->cpus_allowed)) | ||
2105 | continue; | 2114 | continue; |
2106 | 2115 | ||
2107 | local_group = cpu_isset(this_cpu, group->cpumask); | 2116 | local_group = cpumask_test_cpu(this_cpu, |
2117 | sched_group_cpus(group)); | ||
2108 | 2118 | ||
2109 | /* Tally up the load of all CPUs in the group */ | 2119 | /* Tally up the load of all CPUs in the group */ |
2110 | avg_load = 0; | 2120 | avg_load = 0; |
2111 | 2121 | ||
2112 | for_each_cpu_mask_nr(i, group->cpumask) { | 2122 | for_each_cpu(i, sched_group_cpus(group)) { |
2113 | /* Bias balancing toward cpus of our domain */ | 2123 | /* Bias balancing toward cpus of our domain */ |
2114 | if (local_group) | 2124 | if (local_group) |
2115 | load = source_load(i, load_idx); | 2125 | load = source_load(i, load_idx); |
@@ -2141,17 +2151,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2141 | * find_idlest_cpu - find the idlest cpu among the cpus in group. | 2151 | * find_idlest_cpu - find the idlest cpu among the cpus in group. |
2142 | */ | 2152 | */ |
2143 | static int | 2153 | static int |
2144 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, | 2154 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
2145 | cpumask_t *tmp) | ||
2146 | { | 2155 | { |
2147 | unsigned long load, min_load = ULONG_MAX; | 2156 | unsigned long load, min_load = ULONG_MAX; |
2148 | int idlest = -1; | 2157 | int idlest = -1; |
2149 | int i; | 2158 | int i; |
2150 | 2159 | ||
2151 | /* Traverse only the allowed CPUs */ | 2160 | /* Traverse only the allowed CPUs */ |
2152 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); | 2161 | for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { |
2153 | |||
2154 | for_each_cpu_mask_nr(i, *tmp) { | ||
2155 | load = weighted_cpuload(i); | 2162 | load = weighted_cpuload(i); |
2156 | 2163 | ||
2157 | if (load < min_load || (load == min_load && i == this_cpu)) { | 2164 | if (load < min_load || (load == min_load && i == this_cpu)) { |
@@ -2193,7 +2200,6 @@ static int sched_balance_self(int cpu, int flag) | |||
2193 | update_shares(sd); | 2200 | update_shares(sd); |
2194 | 2201 | ||
2195 | while (sd) { | 2202 | while (sd) { |
2196 | cpumask_t span, tmpmask; | ||
2197 | struct sched_group *group; | 2203 | struct sched_group *group; |
2198 | int new_cpu, weight; | 2204 | int new_cpu, weight; |
2199 | 2205 | ||
@@ -2202,14 +2208,13 @@ static int sched_balance_self(int cpu, int flag) | |||
2202 | continue; | 2208 | continue; |
2203 | } | 2209 | } |
2204 | 2210 | ||
2205 | span = sd->span; | ||
2206 | group = find_idlest_group(sd, t, cpu); | 2211 | group = find_idlest_group(sd, t, cpu); |
2207 | if (!group) { | 2212 | if (!group) { |
2208 | sd = sd->child; | 2213 | sd = sd->child; |
2209 | continue; | 2214 | continue; |
2210 | } | 2215 | } |
2211 | 2216 | ||
2212 | new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); | 2217 | new_cpu = find_idlest_cpu(group, t, cpu); |
2213 | if (new_cpu == -1 || new_cpu == cpu) { | 2218 | if (new_cpu == -1 || new_cpu == cpu) { |
2214 | /* Now try balancing at a lower domain level of cpu */ | 2219 | /* Now try balancing at a lower domain level of cpu */ |
2215 | sd = sd->child; | 2220 | sd = sd->child; |
@@ -2218,10 +2223,10 @@ static int sched_balance_self(int cpu, int flag) | |||
2218 | 2223 | ||
2219 | /* Now try balancing at a lower domain level of new_cpu */ | 2224 | /* Now try balancing at a lower domain level of new_cpu */ |
2220 | cpu = new_cpu; | 2225 | cpu = new_cpu; |
2226 | weight = cpumask_weight(sched_domain_span(sd)); | ||
2221 | sd = NULL; | 2227 | sd = NULL; |
2222 | weight = cpus_weight(span); | ||
2223 | for_each_domain(cpu, tmp) { | 2228 | for_each_domain(cpu, tmp) { |
2224 | if (weight <= cpus_weight(tmp->span)) | 2229 | if (weight <= cpumask_weight(sched_domain_span(tmp))) |
2225 | break; | 2230 | break; |
2226 | if (tmp->flags & flag) | 2231 | if (tmp->flags & flag) |
2227 | sd = tmp; | 2232 | sd = tmp; |
@@ -2266,7 +2271,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2266 | cpu = task_cpu(p); | 2271 | cpu = task_cpu(p); |
2267 | 2272 | ||
2268 | for_each_domain(this_cpu, sd) { | 2273 | for_each_domain(this_cpu, sd) { |
2269 | if (cpu_isset(cpu, sd->span)) { | 2274 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2270 | update_shares(sd); | 2275 | update_shares(sd); |
2271 | break; | 2276 | break; |
2272 | } | 2277 | } |
@@ -2315,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2315 | else { | 2320 | else { |
2316 | struct sched_domain *sd; | 2321 | struct sched_domain *sd; |
2317 | for_each_domain(this_cpu, sd) { | 2322 | for_each_domain(this_cpu, sd) { |
2318 | if (cpu_isset(cpu, sd->span)) { | 2323 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2319 | schedstat_inc(sd, ttwu_wake_remote); | 2324 | schedstat_inc(sd, ttwu_wake_remote); |
2320 | break; | 2325 | break; |
2321 | } | 2326 | } |
@@ -2846,7 +2851,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2846 | struct rq *rq; | 2851 | struct rq *rq; |
2847 | 2852 | ||
2848 | rq = task_rq_lock(p, &flags); | 2853 | rq = task_rq_lock(p, &flags); |
2849 | if (!cpu_isset(dest_cpu, p->cpus_allowed) | 2854 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) |
2850 | || unlikely(!cpu_active(dest_cpu))) | 2855 | || unlikely(!cpu_active(dest_cpu))) |
2851 | goto out; | 2856 | goto out; |
2852 | 2857 | ||
@@ -2911,7 +2916,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
2911 | * 2) cannot be migrated to this CPU due to cpus_allowed, or | 2916 | * 2) cannot be migrated to this CPU due to cpus_allowed, or |
2912 | * 3) are cache-hot on their current CPU. | 2917 | * 3) are cache-hot on their current CPU. |
2913 | */ | 2918 | */ |
2914 | if (!cpu_isset(this_cpu, p->cpus_allowed)) { | 2919 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { |
2915 | schedstat_inc(p, se.nr_failed_migrations_affine); | 2920 | schedstat_inc(p, se.nr_failed_migrations_affine); |
2916 | return 0; | 2921 | return 0; |
2917 | } | 2922 | } |
@@ -3086,7 +3091,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
3086 | static struct sched_group * | 3091 | static struct sched_group * |
3087 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 3092 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
3088 | unsigned long *imbalance, enum cpu_idle_type idle, | 3093 | unsigned long *imbalance, enum cpu_idle_type idle, |
3089 | int *sd_idle, const cpumask_t *cpus, int *balance) | 3094 | int *sd_idle, const struct cpumask *cpus, int *balance) |
3090 | { | 3095 | { |
3091 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 3096 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
3092 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 3097 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
@@ -3122,10 +3127,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3122 | unsigned long sum_avg_load_per_task; | 3127 | unsigned long sum_avg_load_per_task; |
3123 | unsigned long avg_load_per_task; | 3128 | unsigned long avg_load_per_task; |
3124 | 3129 | ||
3125 | local_group = cpu_isset(this_cpu, group->cpumask); | 3130 | local_group = cpumask_test_cpu(this_cpu, |
3131 | sched_group_cpus(group)); | ||
3126 | 3132 | ||
3127 | if (local_group) | 3133 | if (local_group) |
3128 | balance_cpu = first_cpu(group->cpumask); | 3134 | balance_cpu = cpumask_first(sched_group_cpus(group)); |
3129 | 3135 | ||
3130 | /* Tally up the load of all CPUs in the group */ | 3136 | /* Tally up the load of all CPUs in the group */ |
3131 | sum_weighted_load = sum_nr_running = avg_load = 0; | 3137 | sum_weighted_load = sum_nr_running = avg_load = 0; |
@@ -3134,13 +3140,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3134 | max_cpu_load = 0; | 3140 | max_cpu_load = 0; |
3135 | min_cpu_load = ~0UL; | 3141 | min_cpu_load = ~0UL; |
3136 | 3142 | ||
3137 | for_each_cpu_mask_nr(i, group->cpumask) { | 3143 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { |
3138 | struct rq *rq; | 3144 | struct rq *rq = cpu_rq(i); |
3139 | |||
3140 | if (!cpu_isset(i, *cpus)) | ||
3141 | continue; | ||
3142 | |||
3143 | rq = cpu_rq(i); | ||
3144 | 3145 | ||
3145 | if (*sd_idle && rq->nr_running) | 3146 | if (*sd_idle && rq->nr_running) |
3146 | *sd_idle = 0; | 3147 | *sd_idle = 0; |
@@ -3251,8 +3252,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3251 | */ | 3252 | */ |
3252 | if ((sum_nr_running < min_nr_running) || | 3253 | if ((sum_nr_running < min_nr_running) || |
3253 | (sum_nr_running == min_nr_running && | 3254 | (sum_nr_running == min_nr_running && |
3254 | first_cpu(group->cpumask) < | 3255 | cpumask_first(sched_group_cpus(group)) > |
3255 | first_cpu(group_min->cpumask))) { | 3256 | cpumask_first(sched_group_cpus(group_min)))) { |
3256 | group_min = group; | 3257 | group_min = group; |
3257 | min_nr_running = sum_nr_running; | 3258 | min_nr_running = sum_nr_running; |
3258 | min_load_per_task = sum_weighted_load / | 3259 | min_load_per_task = sum_weighted_load / |
@@ -3267,8 +3268,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3267 | if (sum_nr_running <= group_capacity - 1) { | 3268 | if (sum_nr_running <= group_capacity - 1) { |
3268 | if (sum_nr_running > leader_nr_running || | 3269 | if (sum_nr_running > leader_nr_running || |
3269 | (sum_nr_running == leader_nr_running && | 3270 | (sum_nr_running == leader_nr_running && |
3270 | first_cpu(group->cpumask) > | 3271 | cpumask_first(sched_group_cpus(group)) < |
3271 | first_cpu(group_leader->cpumask))) { | 3272 | cpumask_first(sched_group_cpus(group_leader)))) { |
3272 | group_leader = group; | 3273 | group_leader = group; |
3273 | leader_nr_running = sum_nr_running; | 3274 | leader_nr_running = sum_nr_running; |
3274 | } | 3275 | } |
@@ -3394,6 +3395,10 @@ out_balanced: | |||
3394 | 3395 | ||
3395 | if (this == group_leader && group_leader != group_min) { | 3396 | if (this == group_leader && group_leader != group_min) { |
3396 | *imbalance = min_load_per_task; | 3397 | *imbalance = min_load_per_task; |
3398 | if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { | ||
3399 | cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = | ||
3400 | cpumask_first(sched_group_cpus(group_leader)); | ||
3401 | } | ||
3397 | return group_min; | 3402 | return group_min; |
3398 | } | 3403 | } |
3399 | #endif | 3404 | #endif |
@@ -3407,16 +3412,16 @@ ret: | |||
3407 | */ | 3412 | */ |
3408 | static struct rq * | 3413 | static struct rq * |
3409 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | 3414 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, |
3410 | unsigned long imbalance, const cpumask_t *cpus) | 3415 | unsigned long imbalance, const struct cpumask *cpus) |
3411 | { | 3416 | { |
3412 | struct rq *busiest = NULL, *rq; | 3417 | struct rq *busiest = NULL, *rq; |
3413 | unsigned long max_load = 0; | 3418 | unsigned long max_load = 0; |
3414 | int i; | 3419 | int i; |
3415 | 3420 | ||
3416 | for_each_cpu_mask_nr(i, group->cpumask) { | 3421 | for_each_cpu(i, sched_group_cpus(group)) { |
3417 | unsigned long wl; | 3422 | unsigned long wl; |
3418 | 3423 | ||
3419 | if (!cpu_isset(i, *cpus)) | 3424 | if (!cpumask_test_cpu(i, cpus)) |
3420 | continue; | 3425 | continue; |
3421 | 3426 | ||
3422 | rq = cpu_rq(i); | 3427 | rq = cpu_rq(i); |
@@ -3446,7 +3451,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3446 | */ | 3451 | */ |
3447 | static int load_balance(int this_cpu, struct rq *this_rq, | 3452 | static int load_balance(int this_cpu, struct rq *this_rq, |
3448 | struct sched_domain *sd, enum cpu_idle_type idle, | 3453 | struct sched_domain *sd, enum cpu_idle_type idle, |
3449 | int *balance, cpumask_t *cpus) | 3454 | int *balance, struct cpumask *cpus) |
3450 | { | 3455 | { |
3451 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 3456 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
3452 | struct sched_group *group; | 3457 | struct sched_group *group; |
@@ -3454,7 +3459,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
3454 | struct rq *busiest; | 3459 | struct rq *busiest; |
3455 | unsigned long flags; | 3460 | unsigned long flags; |
3456 | 3461 | ||
3457 | cpus_setall(*cpus); | 3462 | cpumask_setall(cpus); |
3458 | 3463 | ||
3459 | /* | 3464 | /* |
3460 | * When power savings policy is enabled for the parent domain, idle | 3465 | * When power savings policy is enabled for the parent domain, idle |
@@ -3514,8 +3519,8 @@ redo: | |||
3514 | 3519 | ||
3515 | /* All tasks on this runqueue were pinned by CPU affinity */ | 3520 | /* All tasks on this runqueue were pinned by CPU affinity */ |
3516 | if (unlikely(all_pinned)) { | 3521 | if (unlikely(all_pinned)) { |
3517 | cpu_clear(cpu_of(busiest), *cpus); | 3522 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3518 | if (!cpus_empty(*cpus)) | 3523 | if (!cpumask_empty(cpus)) |
3519 | goto redo; | 3524 | goto redo; |
3520 | goto out_balanced; | 3525 | goto out_balanced; |
3521 | } | 3526 | } |
@@ -3532,7 +3537,8 @@ redo: | |||
3532 | /* don't kick the migration_thread, if the curr | 3537 | /* don't kick the migration_thread, if the curr |
3533 | * task on busiest cpu can't be moved to this_cpu | 3538 | * task on busiest cpu can't be moved to this_cpu |
3534 | */ | 3539 | */ |
3535 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | 3540 | if (!cpumask_test_cpu(this_cpu, |
3541 | &busiest->curr->cpus_allowed)) { | ||
3536 | spin_unlock_irqrestore(&busiest->lock, flags); | 3542 | spin_unlock_irqrestore(&busiest->lock, flags); |
3537 | all_pinned = 1; | 3543 | all_pinned = 1; |
3538 | goto out_one_pinned; | 3544 | goto out_one_pinned; |
@@ -3607,7 +3613,7 @@ out: | |||
3607 | */ | 3613 | */ |
3608 | static int | 3614 | static int |
3609 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | 3615 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, |
3610 | cpumask_t *cpus) | 3616 | struct cpumask *cpus) |
3611 | { | 3617 | { |
3612 | struct sched_group *group; | 3618 | struct sched_group *group; |
3613 | struct rq *busiest = NULL; | 3619 | struct rq *busiest = NULL; |
@@ -3616,7 +3622,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | |||
3616 | int sd_idle = 0; | 3622 | int sd_idle = 0; |
3617 | int all_pinned = 0; | 3623 | int all_pinned = 0; |
3618 | 3624 | ||
3619 | cpus_setall(*cpus); | 3625 | cpumask_setall(cpus); |
3620 | 3626 | ||
3621 | /* | 3627 | /* |
3622 | * When power savings policy is enabled for the parent domain, idle | 3628 | * When power savings policy is enabled for the parent domain, idle |
@@ -3660,17 +3666,71 @@ redo: | |||
3660 | double_unlock_balance(this_rq, busiest); | 3666 | double_unlock_balance(this_rq, busiest); |
3661 | 3667 | ||
3662 | if (unlikely(all_pinned)) { | 3668 | if (unlikely(all_pinned)) { |
3663 | cpu_clear(cpu_of(busiest), *cpus); | 3669 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3664 | if (!cpus_empty(*cpus)) | 3670 | if (!cpumask_empty(cpus)) |
3665 | goto redo; | 3671 | goto redo; |
3666 | } | 3672 | } |
3667 | } | 3673 | } |
3668 | 3674 | ||
3669 | if (!ld_moved) { | 3675 | if (!ld_moved) { |
3676 | int active_balance = 0; | ||
3677 | |||
3670 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); | 3678 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); |
3671 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 3679 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
3672 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 3680 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
3673 | return -1; | 3681 | return -1; |
3682 | |||
3683 | if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | ||
3684 | return -1; | ||
3685 | |||
3686 | if (sd->nr_balance_failed++ < 2) | ||
3687 | return -1; | ||
3688 | |||
3689 | /* | ||
3690 | * The only task running in a non-idle cpu can be moved to this | ||
3691 | * cpu in an attempt to completely freeup the other CPU | ||
3692 | * package. The same method used to move task in load_balance() | ||
3693 | * have been extended for load_balance_newidle() to speedup | ||
3694 | * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2) | ||
3695 | * | ||
3696 | * The package power saving logic comes from | ||
3697 | * find_busiest_group(). If there are no imbalance, then | ||
3698 | * f_b_g() will return NULL. However when sched_mc={1,2} then | ||
3699 | * f_b_g() will select a group from which a running task may be | ||
3700 | * pulled to this cpu in order to make the other package idle. | ||
3701 | * If there is no opportunity to make a package idle and if | ||
3702 | * there are no imbalance, then f_b_g() will return NULL and no | ||
3703 | * action will be taken in load_balance_newidle(). | ||
3704 | * | ||
3705 | * Under normal task pull operation due to imbalance, there | ||
3706 | * will be more than one task in the source run queue and | ||
3707 | * move_tasks() will succeed. ld_moved will be true and this | ||
3708 | * active balance code will not be triggered. | ||
3709 | */ | ||
3710 | |||
3711 | /* Lock busiest in correct order while this_rq is held */ | ||
3712 | double_lock_balance(this_rq, busiest); | ||
3713 | |||
3714 | /* | ||
3715 | * don't kick the migration_thread, if the curr | ||
3716 | * task on busiest cpu can't be moved to this_cpu | ||
3717 | */ | ||
3718 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | ||
3719 | double_unlock_balance(this_rq, busiest); | ||
3720 | all_pinned = 1; | ||
3721 | return ld_moved; | ||
3722 | } | ||
3723 | |||
3724 | if (!busiest->active_balance) { | ||
3725 | busiest->active_balance = 1; | ||
3726 | busiest->push_cpu = this_cpu; | ||
3727 | active_balance = 1; | ||
3728 | } | ||
3729 | |||
3730 | double_unlock_balance(this_rq, busiest); | ||
3731 | if (active_balance) | ||
3732 | wake_up_process(busiest->migration_thread); | ||
3733 | |||
3674 | } else | 3734 | } else |
3675 | sd->nr_balance_failed = 0; | 3735 | sd->nr_balance_failed = 0; |
3676 | 3736 | ||
@@ -3696,7 +3756,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3696 | struct sched_domain *sd; | 3756 | struct sched_domain *sd; |
3697 | int pulled_task = 0; | 3757 | int pulled_task = 0; |
3698 | unsigned long next_balance = jiffies + HZ; | 3758 | unsigned long next_balance = jiffies + HZ; |
3699 | cpumask_t tmpmask; | 3759 | cpumask_var_t tmpmask; |
3760 | |||
3761 | if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) | ||
3762 | return; | ||
3700 | 3763 | ||
3701 | for_each_domain(this_cpu, sd) { | 3764 | for_each_domain(this_cpu, sd) { |
3702 | unsigned long interval; | 3765 | unsigned long interval; |
@@ -3707,7 +3770,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3707 | if (sd->flags & SD_BALANCE_NEWIDLE) | 3770 | if (sd->flags & SD_BALANCE_NEWIDLE) |
3708 | /* If we've pulled tasks over stop searching: */ | 3771 | /* If we've pulled tasks over stop searching: */ |
3709 | pulled_task = load_balance_newidle(this_cpu, this_rq, | 3772 | pulled_task = load_balance_newidle(this_cpu, this_rq, |
3710 | sd, &tmpmask); | 3773 | sd, tmpmask); |
3711 | 3774 | ||
3712 | interval = msecs_to_jiffies(sd->balance_interval); | 3775 | interval = msecs_to_jiffies(sd->balance_interval); |
3713 | if (time_after(next_balance, sd->last_balance + interval)) | 3776 | if (time_after(next_balance, sd->last_balance + interval)) |
@@ -3722,6 +3785,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3722 | */ | 3785 | */ |
3723 | this_rq->next_balance = next_balance; | 3786 | this_rq->next_balance = next_balance; |
3724 | } | 3787 | } |
3788 | free_cpumask_var(tmpmask); | ||
3725 | } | 3789 | } |
3726 | 3790 | ||
3727 | /* | 3791 | /* |
@@ -3759,7 +3823,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3759 | /* Search for an sd spanning us and the target CPU. */ | 3823 | /* Search for an sd spanning us and the target CPU. */ |
3760 | for_each_domain(target_cpu, sd) { | 3824 | for_each_domain(target_cpu, sd) { |
3761 | if ((sd->flags & SD_LOAD_BALANCE) && | 3825 | if ((sd->flags & SD_LOAD_BALANCE) && |
3762 | cpu_isset(busiest_cpu, sd->span)) | 3826 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) |
3763 | break; | 3827 | break; |
3764 | } | 3828 | } |
3765 | 3829 | ||
@@ -3778,10 +3842,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3778 | #ifdef CONFIG_NO_HZ | 3842 | #ifdef CONFIG_NO_HZ |
3779 | static struct { | 3843 | static struct { |
3780 | atomic_t load_balancer; | 3844 | atomic_t load_balancer; |
3781 | cpumask_t cpu_mask; | 3845 | cpumask_var_t cpu_mask; |
3782 | } nohz ____cacheline_aligned = { | 3846 | } nohz ____cacheline_aligned = { |
3783 | .load_balancer = ATOMIC_INIT(-1), | 3847 | .load_balancer = ATOMIC_INIT(-1), |
3784 | .cpu_mask = CPU_MASK_NONE, | ||
3785 | }; | 3848 | }; |
3786 | 3849 | ||
3787 | /* | 3850 | /* |
@@ -3809,7 +3872,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3809 | int cpu = smp_processor_id(); | 3872 | int cpu = smp_processor_id(); |
3810 | 3873 | ||
3811 | if (stop_tick) { | 3874 | if (stop_tick) { |
3812 | cpu_set(cpu, nohz.cpu_mask); | 3875 | cpumask_set_cpu(cpu, nohz.cpu_mask); |
3813 | cpu_rq(cpu)->in_nohz_recently = 1; | 3876 | cpu_rq(cpu)->in_nohz_recently = 1; |
3814 | 3877 | ||
3815 | /* | 3878 | /* |
@@ -3823,7 +3886,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3823 | } | 3886 | } |
3824 | 3887 | ||
3825 | /* time for ilb owner also to sleep */ | 3888 | /* time for ilb owner also to sleep */ |
3826 | if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 3889 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
3827 | if (atomic_read(&nohz.load_balancer) == cpu) | 3890 | if (atomic_read(&nohz.load_balancer) == cpu) |
3828 | atomic_set(&nohz.load_balancer, -1); | 3891 | atomic_set(&nohz.load_balancer, -1); |
3829 | return 0; | 3892 | return 0; |
@@ -3836,10 +3899,10 @@ int select_nohz_load_balancer(int stop_tick) | |||
3836 | } else if (atomic_read(&nohz.load_balancer) == cpu) | 3899 | } else if (atomic_read(&nohz.load_balancer) == cpu) |
3837 | return 1; | 3900 | return 1; |
3838 | } else { | 3901 | } else { |
3839 | if (!cpu_isset(cpu, nohz.cpu_mask)) | 3902 | if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) |
3840 | return 0; | 3903 | return 0; |
3841 | 3904 | ||
3842 | cpu_clear(cpu, nohz.cpu_mask); | 3905 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
3843 | 3906 | ||
3844 | if (atomic_read(&nohz.load_balancer) == cpu) | 3907 | if (atomic_read(&nohz.load_balancer) == cpu) |
3845 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3908 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
@@ -3867,7 +3930,11 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3867 | unsigned long next_balance = jiffies + 60*HZ; | 3930 | unsigned long next_balance = jiffies + 60*HZ; |
3868 | int update_next_balance = 0; | 3931 | int update_next_balance = 0; |
3869 | int need_serialize; | 3932 | int need_serialize; |
3870 | cpumask_t tmp; | 3933 | cpumask_var_t tmp; |
3934 | |||
3935 | /* Fails alloc? Rebalancing probably not a priority right now. */ | ||
3936 | if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) | ||
3937 | return; | ||
3871 | 3938 | ||
3872 | for_each_domain(cpu, sd) { | 3939 | for_each_domain(cpu, sd) { |
3873 | if (!(sd->flags & SD_LOAD_BALANCE)) | 3940 | if (!(sd->flags & SD_LOAD_BALANCE)) |
@@ -3892,7 +3959,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3892 | } | 3959 | } |
3893 | 3960 | ||
3894 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | 3961 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
3895 | if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) { | 3962 | if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { |
3896 | /* | 3963 | /* |
3897 | * We've pulled tasks over so either we're no | 3964 | * We've pulled tasks over so either we're no |
3898 | * longer idle, or one of our SMT siblings is | 3965 | * longer idle, or one of our SMT siblings is |
@@ -3926,6 +3993,8 @@ out: | |||
3926 | */ | 3993 | */ |
3927 | if (likely(update_next_balance)) | 3994 | if (likely(update_next_balance)) |
3928 | rq->next_balance = next_balance; | 3995 | rq->next_balance = next_balance; |
3996 | |||
3997 | free_cpumask_var(tmp); | ||
3929 | } | 3998 | } |
3930 | 3999 | ||
3931 | /* | 4000 | /* |
@@ -3950,12 +4019,13 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3950 | */ | 4019 | */ |
3951 | if (this_rq->idle_at_tick && | 4020 | if (this_rq->idle_at_tick && |
3952 | atomic_read(&nohz.load_balancer) == this_cpu) { | 4021 | atomic_read(&nohz.load_balancer) == this_cpu) { |
3953 | cpumask_t cpus = nohz.cpu_mask; | ||
3954 | struct rq *rq; | 4022 | struct rq *rq; |
3955 | int balance_cpu; | 4023 | int balance_cpu; |
3956 | 4024 | ||
3957 | cpu_clear(this_cpu, cpus); | 4025 | for_each_cpu(balance_cpu, nohz.cpu_mask) { |
3958 | for_each_cpu_mask_nr(balance_cpu, cpus) { | 4026 | if (balance_cpu == this_cpu) |
4027 | continue; | ||
4028 | |||
3959 | /* | 4029 | /* |
3960 | * If this cpu gets work to do, stop the load balancing | 4030 | * If this cpu gets work to do, stop the load balancing |
3961 | * work being done for other cpus. Next load | 4031 | * work being done for other cpus. Next load |
@@ -3993,7 +4063,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
3993 | rq->in_nohz_recently = 0; | 4063 | rq->in_nohz_recently = 0; |
3994 | 4064 | ||
3995 | if (atomic_read(&nohz.load_balancer) == cpu) { | 4065 | if (atomic_read(&nohz.load_balancer) == cpu) { |
3996 | cpu_clear(cpu, nohz.cpu_mask); | 4066 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
3997 | atomic_set(&nohz.load_balancer, -1); | 4067 | atomic_set(&nohz.load_balancer, -1); |
3998 | } | 4068 | } |
3999 | 4069 | ||
@@ -4006,7 +4076,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4006 | * TBD: Traverse the sched domains and nominate | 4076 | * TBD: Traverse the sched domains and nominate |
4007 | * the nearest cpu in the nohz.cpu_mask. | 4077 | * the nearest cpu in the nohz.cpu_mask. |
4008 | */ | 4078 | */ |
4009 | int ilb = first_cpu(nohz.cpu_mask); | 4079 | int ilb = cpumask_first(nohz.cpu_mask); |
4010 | 4080 | ||
4011 | if (ilb < nr_cpu_ids) | 4081 | if (ilb < nr_cpu_ids) |
4012 | resched_cpu(ilb); | 4082 | resched_cpu(ilb); |
@@ -4018,7 +4088,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4018 | * cpus with ticks stopped, is it time for that to stop? | 4088 | * cpus with ticks stopped, is it time for that to stop? |
4019 | */ | 4089 | */ |
4020 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && | 4090 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && |
4021 | cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 4091 | cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
4022 | resched_cpu(cpu); | 4092 | resched_cpu(cpu); |
4023 | return; | 4093 | return; |
4024 | } | 4094 | } |
@@ -4028,7 +4098,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4028 | * someone else, then no need raise the SCHED_SOFTIRQ | 4098 | * someone else, then no need raise the SCHED_SOFTIRQ |
4029 | */ | 4099 | */ |
4030 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && | 4100 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && |
4031 | cpu_isset(cpu, nohz.cpu_mask)) | 4101 | cpumask_test_cpu(cpu, nohz.cpu_mask)) |
4032 | return; | 4102 | return; |
4033 | #endif | 4103 | #endif |
4034 | if (time_after_eq(jiffies, rq->next_balance)) | 4104 | if (time_after_eq(jiffies, rq->next_balance)) |
@@ -5401,10 +5471,9 @@ out_unlock: | |||
5401 | return retval; | 5471 | return retval; |
5402 | } | 5472 | } |
5403 | 5473 | ||
5404 | long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | 5474 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
5405 | { | 5475 | { |
5406 | cpumask_t cpus_allowed; | 5476 | cpumask_var_t cpus_allowed, new_mask; |
5407 | cpumask_t new_mask = *in_mask; | ||
5408 | struct task_struct *p; | 5477 | struct task_struct *p; |
5409 | int retval; | 5478 | int retval; |
5410 | 5479 | ||
@@ -5426,6 +5495,14 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
5426 | get_task_struct(p); | 5495 | get_task_struct(p); |
5427 | read_unlock(&tasklist_lock); | 5496 | read_unlock(&tasklist_lock); |
5428 | 5497 | ||
5498 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { | ||
5499 | retval = -ENOMEM; | ||
5500 | goto out_put_task; | ||
5501 | } | ||
5502 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { | ||
5503 | retval = -ENOMEM; | ||
5504 | goto out_free_cpus_allowed; | ||
5505 | } | ||
5429 | retval = -EPERM; | 5506 | retval = -EPERM; |
5430 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) | 5507 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
5431 | goto out_unlock; | 5508 | goto out_unlock; |
@@ -5434,37 +5511,41 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
5434 | if (retval) | 5511 | if (retval) |
5435 | goto out_unlock; | 5512 | goto out_unlock; |
5436 | 5513 | ||
5437 | cpuset_cpus_allowed(p, &cpus_allowed); | 5514 | cpuset_cpus_allowed(p, cpus_allowed); |
5438 | cpus_and(new_mask, new_mask, cpus_allowed); | 5515 | cpumask_and(new_mask, in_mask, cpus_allowed); |
5439 | again: | 5516 | again: |
5440 | retval = set_cpus_allowed_ptr(p, &new_mask); | 5517 | retval = set_cpus_allowed_ptr(p, new_mask); |
5441 | 5518 | ||
5442 | if (!retval) { | 5519 | if (!retval) { |
5443 | cpuset_cpus_allowed(p, &cpus_allowed); | 5520 | cpuset_cpus_allowed(p, cpus_allowed); |
5444 | if (!cpus_subset(new_mask, cpus_allowed)) { | 5521 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
5445 | /* | 5522 | /* |
5446 | * We must have raced with a concurrent cpuset | 5523 | * We must have raced with a concurrent cpuset |
5447 | * update. Just reset the cpus_allowed to the | 5524 | * update. Just reset the cpus_allowed to the |
5448 | * cpuset's cpus_allowed | 5525 | * cpuset's cpus_allowed |
5449 | */ | 5526 | */ |
5450 | new_mask = cpus_allowed; | 5527 | cpumask_copy(new_mask, cpus_allowed); |
5451 | goto again; | 5528 | goto again; |
5452 | } | 5529 | } |
5453 | } | 5530 | } |
5454 | out_unlock: | 5531 | out_unlock: |
5532 | free_cpumask_var(new_mask); | ||
5533 | out_free_cpus_allowed: | ||
5534 | free_cpumask_var(cpus_allowed); | ||
5535 | out_put_task: | ||
5455 | put_task_struct(p); | 5536 | put_task_struct(p); |
5456 | put_online_cpus(); | 5537 | put_online_cpus(); |
5457 | return retval; | 5538 | return retval; |
5458 | } | 5539 | } |
5459 | 5540 | ||
5460 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | 5541 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
5461 | cpumask_t *new_mask) | 5542 | struct cpumask *new_mask) |
5462 | { | 5543 | { |
5463 | if (len < sizeof(cpumask_t)) { | 5544 | if (len < cpumask_size()) |
5464 | memset(new_mask, 0, sizeof(cpumask_t)); | 5545 | cpumask_clear(new_mask); |
5465 | } else if (len > sizeof(cpumask_t)) { | 5546 | else if (len > cpumask_size()) |
5466 | len = sizeof(cpumask_t); | 5547 | len = cpumask_size(); |
5467 | } | 5548 | |
5468 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; | 5549 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
5469 | } | 5550 | } |
5470 | 5551 | ||
@@ -5477,17 +5558,20 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
5477 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | 5558 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, |
5478 | unsigned long __user *user_mask_ptr) | 5559 | unsigned long __user *user_mask_ptr) |
5479 | { | 5560 | { |
5480 | cpumask_t new_mask; | 5561 | cpumask_var_t new_mask; |
5481 | int retval; | 5562 | int retval; |
5482 | 5563 | ||
5483 | retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); | 5564 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
5484 | if (retval) | 5565 | return -ENOMEM; |
5485 | return retval; | ||
5486 | 5566 | ||
5487 | return sched_setaffinity(pid, &new_mask); | 5567 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
5568 | if (retval == 0) | ||
5569 | retval = sched_setaffinity(pid, new_mask); | ||
5570 | free_cpumask_var(new_mask); | ||
5571 | return retval; | ||
5488 | } | 5572 | } |
5489 | 5573 | ||
5490 | long sched_getaffinity(pid_t pid, cpumask_t *mask) | 5574 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
5491 | { | 5575 | { |
5492 | struct task_struct *p; | 5576 | struct task_struct *p; |
5493 | int retval; | 5577 | int retval; |
@@ -5504,7 +5588,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
5504 | if (retval) | 5588 | if (retval) |
5505 | goto out_unlock; | 5589 | goto out_unlock; |
5506 | 5590 | ||
5507 | cpus_and(*mask, p->cpus_allowed, cpu_online_map); | 5591 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
5508 | 5592 | ||
5509 | out_unlock: | 5593 | out_unlock: |
5510 | read_unlock(&tasklist_lock); | 5594 | read_unlock(&tasklist_lock); |
@@ -5523,19 +5607,24 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
5523 | unsigned long __user *user_mask_ptr) | 5607 | unsigned long __user *user_mask_ptr) |
5524 | { | 5608 | { |
5525 | int ret; | 5609 | int ret; |
5526 | cpumask_t mask; | 5610 | cpumask_var_t mask; |
5527 | 5611 | ||
5528 | if (len < sizeof(cpumask_t)) | 5612 | if (len < cpumask_size()) |
5529 | return -EINVAL; | 5613 | return -EINVAL; |
5530 | 5614 | ||
5531 | ret = sched_getaffinity(pid, &mask); | 5615 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
5532 | if (ret < 0) | 5616 | return -ENOMEM; |
5533 | return ret; | ||
5534 | 5617 | ||
5535 | if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) | 5618 | ret = sched_getaffinity(pid, mask); |
5536 | return -EFAULT; | 5619 | if (ret == 0) { |
5620 | if (copy_to_user(user_mask_ptr, mask, cpumask_size())) | ||
5621 | ret = -EFAULT; | ||
5622 | else | ||
5623 | ret = cpumask_size(); | ||
5624 | } | ||
5625 | free_cpumask_var(mask); | ||
5537 | 5626 | ||
5538 | return sizeof(cpumask_t); | 5627 | return ret; |
5539 | } | 5628 | } |
5540 | 5629 | ||
5541 | /** | 5630 | /** |
@@ -5877,7 +5966,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5877 | idle->se.exec_start = sched_clock(); | 5966 | idle->se.exec_start = sched_clock(); |
5878 | 5967 | ||
5879 | idle->prio = idle->normal_prio = MAX_PRIO; | 5968 | idle->prio = idle->normal_prio = MAX_PRIO; |
5880 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 5969 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
5881 | __set_task_cpu(idle, cpu); | 5970 | __set_task_cpu(idle, cpu); |
5882 | 5971 | ||
5883 | rq->curr = rq->idle = idle; | 5972 | rq->curr = rq->idle = idle; |
@@ -5904,9 +5993,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5904 | * indicates which cpus entered this state. This is used | 5993 | * indicates which cpus entered this state. This is used |
5905 | * in the rcu update to wait only for active cpus. For system | 5994 | * in the rcu update to wait only for active cpus. For system |
5906 | * which do not switch off the HZ timer nohz_cpu_mask should | 5995 | * which do not switch off the HZ timer nohz_cpu_mask should |
5907 | * always be CPU_MASK_NONE. | 5996 | * always be CPU_BITS_NONE. |
5908 | */ | 5997 | */ |
5909 | cpumask_t nohz_cpu_mask = CPU_MASK_NONE; | 5998 | cpumask_var_t nohz_cpu_mask; |
5910 | 5999 | ||
5911 | /* | 6000 | /* |
5912 | * Increase the granularity value when there are more CPUs, | 6001 | * Increase the granularity value when there are more CPUs, |
@@ -5961,7 +6050,7 @@ static inline void sched_init_granularity(void) | |||
5961 | * task must not exit() & deallocate itself prematurely. The | 6050 | * task must not exit() & deallocate itself prematurely. The |
5962 | * call is not atomic; no spinlocks may be held. | 6051 | * call is not atomic; no spinlocks may be held. |
5963 | */ | 6052 | */ |
5964 | int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | 6053 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
5965 | { | 6054 | { |
5966 | struct migration_req req; | 6055 | struct migration_req req; |
5967 | unsigned long flags; | 6056 | unsigned long flags; |
@@ -5969,13 +6058,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5969 | int ret = 0; | 6058 | int ret = 0; |
5970 | 6059 | ||
5971 | rq = task_rq_lock(p, &flags); | 6060 | rq = task_rq_lock(p, &flags); |
5972 | if (!cpus_intersects(*new_mask, cpu_online_map)) { | 6061 | if (!cpumask_intersects(new_mask, cpu_online_mask)) { |
5973 | ret = -EINVAL; | 6062 | ret = -EINVAL; |
5974 | goto out; | 6063 | goto out; |
5975 | } | 6064 | } |
5976 | 6065 | ||
5977 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && | 6066 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && |
5978 | !cpus_equal(p->cpus_allowed, *new_mask))) { | 6067 | !cpumask_equal(&p->cpus_allowed, new_mask))) { |
5979 | ret = -EINVAL; | 6068 | ret = -EINVAL; |
5980 | goto out; | 6069 | goto out; |
5981 | } | 6070 | } |
@@ -5983,15 +6072,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5983 | if (p->sched_class->set_cpus_allowed) | 6072 | if (p->sched_class->set_cpus_allowed) |
5984 | p->sched_class->set_cpus_allowed(p, new_mask); | 6073 | p->sched_class->set_cpus_allowed(p, new_mask); |
5985 | else { | 6074 | else { |
5986 | p->cpus_allowed = *new_mask; | 6075 | cpumask_copy(&p->cpus_allowed, new_mask); |
5987 | p->rt.nr_cpus_allowed = cpus_weight(*new_mask); | 6076 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); |
5988 | } | 6077 | } |
5989 | 6078 | ||
5990 | /* Can the task run on the task's current CPU? If so, we're done */ | 6079 | /* Can the task run on the task's current CPU? If so, we're done */ |
5991 | if (cpu_isset(task_cpu(p), *new_mask)) | 6080 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
5992 | goto out; | 6081 | goto out; |
5993 | 6082 | ||
5994 | if (migrate_task(p, any_online_cpu(*new_mask), &req)) { | 6083 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { |
5995 | /* Need help from migration thread: drop lock and wait. */ | 6084 | /* Need help from migration thread: drop lock and wait. */ |
5996 | task_rq_unlock(rq, &flags); | 6085 | task_rq_unlock(rq, &flags); |
5997 | wake_up_process(rq->migration_thread); | 6086 | wake_up_process(rq->migration_thread); |
@@ -6033,7 +6122,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6033 | if (task_cpu(p) != src_cpu) | 6122 | if (task_cpu(p) != src_cpu) |
6034 | goto done; | 6123 | goto done; |
6035 | /* Affinity changed (again). */ | 6124 | /* Affinity changed (again). */ |
6036 | if (!cpu_isset(dest_cpu, p->cpus_allowed)) | 6125 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
6037 | goto fail; | 6126 | goto fail; |
6038 | 6127 | ||
6039 | on_rq = p->se.on_rq; | 6128 | on_rq = p->se.on_rq; |
@@ -6130,50 +6219,43 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6130 | */ | 6219 | */ |
6131 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6220 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
6132 | { | 6221 | { |
6133 | unsigned long flags; | ||
6134 | cpumask_t mask; | ||
6135 | struct rq *rq; | ||
6136 | int dest_cpu; | 6222 | int dest_cpu; |
6223 | /* FIXME: Use cpumask_of_node here. */ | ||
6224 | cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu)); | ||
6225 | const struct cpumask *nodemask = &_nodemask; | ||
6226 | |||
6227 | again: | ||
6228 | /* Look for allowed, online CPU in same node. */ | ||
6229 | for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) | ||
6230 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) | ||
6231 | goto move; | ||
6232 | |||
6233 | /* Any allowed, online CPU? */ | ||
6234 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); | ||
6235 | if (dest_cpu < nr_cpu_ids) | ||
6236 | goto move; | ||
6237 | |||
6238 | /* No more Mr. Nice Guy. */ | ||
6239 | if (dest_cpu >= nr_cpu_ids) { | ||
6240 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); | ||
6241 | dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); | ||
6137 | 6242 | ||
6138 | do { | 6243 | /* |
6139 | /* On same node? */ | 6244 | * Don't tell them about moving exiting tasks or |
6140 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); | 6245 | * kernel threads (both mm NULL), since they never |
6141 | cpus_and(mask, mask, p->cpus_allowed); | 6246 | * leave kernel. |
6142 | dest_cpu = any_online_cpu(mask); | 6247 | */ |
6143 | 6248 | if (p->mm && printk_ratelimit()) { | |
6144 | /* On any allowed CPU? */ | 6249 | printk(KERN_INFO "process %d (%s) no " |
6145 | if (dest_cpu >= nr_cpu_ids) | 6250 | "longer affine to cpu%d\n", |
6146 | dest_cpu = any_online_cpu(p->cpus_allowed); | 6251 | task_pid_nr(p), p->comm, dead_cpu); |
6147 | |||
6148 | /* No more Mr. Nice Guy. */ | ||
6149 | if (dest_cpu >= nr_cpu_ids) { | ||
6150 | cpumask_t cpus_allowed; | ||
6151 | |||
6152 | cpuset_cpus_allowed_locked(p, &cpus_allowed); | ||
6153 | /* | ||
6154 | * Try to stay on the same cpuset, where the | ||
6155 | * current cpuset may be a subset of all cpus. | ||
6156 | * The cpuset_cpus_allowed_locked() variant of | ||
6157 | * cpuset_cpus_allowed() will not block. It must be | ||
6158 | * called within calls to cpuset_lock/cpuset_unlock. | ||
6159 | */ | ||
6160 | rq = task_rq_lock(p, &flags); | ||
6161 | p->cpus_allowed = cpus_allowed; | ||
6162 | dest_cpu = any_online_cpu(p->cpus_allowed); | ||
6163 | task_rq_unlock(rq, &flags); | ||
6164 | |||
6165 | /* | ||
6166 | * Don't tell them about moving exiting tasks or | ||
6167 | * kernel threads (both mm NULL), since they never | ||
6168 | * leave kernel. | ||
6169 | */ | ||
6170 | if (p->mm && printk_ratelimit()) { | ||
6171 | printk(KERN_INFO "process %d (%s) no " | ||
6172 | "longer affine to cpu%d\n", | ||
6173 | task_pid_nr(p), p->comm, dead_cpu); | ||
6174 | } | ||
6175 | } | 6252 | } |
6176 | } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); | 6253 | } |
6254 | |||
6255 | move: | ||
6256 | /* It can have affinity changed while we were choosing. */ | ||
6257 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | ||
6258 | goto again; | ||
6177 | } | 6259 | } |
6178 | 6260 | ||
6179 | /* | 6261 | /* |
@@ -6185,7 +6267,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
6185 | */ | 6267 | */ |
6186 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 6268 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
6187 | { | 6269 | { |
6188 | struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); | 6270 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); |
6189 | unsigned long flags; | 6271 | unsigned long flags; |
6190 | 6272 | ||
6191 | local_irq_save(flags); | 6273 | local_irq_save(flags); |
@@ -6475,7 +6557,7 @@ static void set_rq_online(struct rq *rq) | |||
6475 | if (!rq->online) { | 6557 | if (!rq->online) { |
6476 | const struct sched_class *class; | 6558 | const struct sched_class *class; |
6477 | 6559 | ||
6478 | cpu_set(rq->cpu, rq->rd->online); | 6560 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
6479 | rq->online = 1; | 6561 | rq->online = 1; |
6480 | 6562 | ||
6481 | for_each_class(class) { | 6563 | for_each_class(class) { |
@@ -6495,7 +6577,7 @@ static void set_rq_offline(struct rq *rq) | |||
6495 | class->rq_offline(rq); | 6577 | class->rq_offline(rq); |
6496 | } | 6578 | } |
6497 | 6579 | ||
6498 | cpu_clear(rq->cpu, rq->rd->online); | 6580 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
6499 | rq->online = 0; | 6581 | rq->online = 0; |
6500 | } | 6582 | } |
6501 | } | 6583 | } |
@@ -6536,7 +6618,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6536 | rq = cpu_rq(cpu); | 6618 | rq = cpu_rq(cpu); |
6537 | spin_lock_irqsave(&rq->lock, flags); | 6619 | spin_lock_irqsave(&rq->lock, flags); |
6538 | if (rq->rd) { | 6620 | if (rq->rd) { |
6539 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6621 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6540 | 6622 | ||
6541 | set_rq_online(rq); | 6623 | set_rq_online(rq); |
6542 | } | 6624 | } |
@@ -6550,7 +6632,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6550 | break; | 6632 | break; |
6551 | /* Unbind it from offline cpu so it can run. Fall thru. */ | 6633 | /* Unbind it from offline cpu so it can run. Fall thru. */ |
6552 | kthread_bind(cpu_rq(cpu)->migration_thread, | 6634 | kthread_bind(cpu_rq(cpu)->migration_thread, |
6553 | any_online_cpu(cpu_online_map)); | 6635 | cpumask_any(cpu_online_mask)); |
6554 | kthread_stop(cpu_rq(cpu)->migration_thread); | 6636 | kthread_stop(cpu_rq(cpu)->migration_thread); |
6555 | cpu_rq(cpu)->migration_thread = NULL; | 6637 | cpu_rq(cpu)->migration_thread = NULL; |
6556 | break; | 6638 | break; |
@@ -6600,7 +6682,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6600 | rq = cpu_rq(cpu); | 6682 | rq = cpu_rq(cpu); |
6601 | spin_lock_irqsave(&rq->lock, flags); | 6683 | spin_lock_irqsave(&rq->lock, flags); |
6602 | if (rq->rd) { | 6684 | if (rq->rd) { |
6603 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6685 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6604 | set_rq_offline(rq); | 6686 | set_rq_offline(rq); |
6605 | } | 6687 | } |
6606 | spin_unlock_irqrestore(&rq->lock, flags); | 6688 | spin_unlock_irqrestore(&rq->lock, flags); |
@@ -6639,13 +6721,13 @@ early_initcall(migration_init); | |||
6639 | #ifdef CONFIG_SCHED_DEBUG | 6721 | #ifdef CONFIG_SCHED_DEBUG |
6640 | 6722 | ||
6641 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6723 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
6642 | cpumask_t *groupmask) | 6724 | struct cpumask *groupmask) |
6643 | { | 6725 | { |
6644 | struct sched_group *group = sd->groups; | 6726 | struct sched_group *group = sd->groups; |
6645 | char str[256]; | 6727 | char str[256]; |
6646 | 6728 | ||
6647 | cpulist_scnprintf(str, sizeof(str), sd->span); | 6729 | cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); |
6648 | cpus_clear(*groupmask); | 6730 | cpumask_clear(groupmask); |
6649 | 6731 | ||
6650 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 6732 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
6651 | 6733 | ||
@@ -6659,11 +6741,11 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6659 | 6741 | ||
6660 | printk(KERN_CONT "span %s level %s\n", str, sd->name); | 6742 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
6661 | 6743 | ||
6662 | if (!cpu_isset(cpu, sd->span)) { | 6744 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
6663 | printk(KERN_ERR "ERROR: domain->span does not contain " | 6745 | printk(KERN_ERR "ERROR: domain->span does not contain " |
6664 | "CPU%d\n", cpu); | 6746 | "CPU%d\n", cpu); |
6665 | } | 6747 | } |
6666 | if (!cpu_isset(cpu, group->cpumask)) { | 6748 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
6667 | printk(KERN_ERR "ERROR: domain->groups does not contain" | 6749 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
6668 | " CPU%d\n", cpu); | 6750 | " CPU%d\n", cpu); |
6669 | } | 6751 | } |
@@ -6683,31 +6765,32 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6683 | break; | 6765 | break; |
6684 | } | 6766 | } |
6685 | 6767 | ||
6686 | if (!cpus_weight(group->cpumask)) { | 6768 | if (!cpumask_weight(sched_group_cpus(group))) { |
6687 | printk(KERN_CONT "\n"); | 6769 | printk(KERN_CONT "\n"); |
6688 | printk(KERN_ERR "ERROR: empty group\n"); | 6770 | printk(KERN_ERR "ERROR: empty group\n"); |
6689 | break; | 6771 | break; |
6690 | } | 6772 | } |
6691 | 6773 | ||
6692 | if (cpus_intersects(*groupmask, group->cpumask)) { | 6774 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
6693 | printk(KERN_CONT "\n"); | 6775 | printk(KERN_CONT "\n"); |
6694 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 6776 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
6695 | break; | 6777 | break; |
6696 | } | 6778 | } |
6697 | 6779 | ||
6698 | cpus_or(*groupmask, *groupmask, group->cpumask); | 6780 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
6699 | 6781 | ||
6700 | cpulist_scnprintf(str, sizeof(str), group->cpumask); | 6782 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
6701 | printk(KERN_CONT " %s", str); | 6783 | printk(KERN_CONT " %s", str); |
6702 | 6784 | ||
6703 | group = group->next; | 6785 | group = group->next; |
6704 | } while (group != sd->groups); | 6786 | } while (group != sd->groups); |
6705 | printk(KERN_CONT "\n"); | 6787 | printk(KERN_CONT "\n"); |
6706 | 6788 | ||
6707 | if (!cpus_equal(sd->span, *groupmask)) | 6789 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
6708 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | 6790 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
6709 | 6791 | ||
6710 | if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) | 6792 | if (sd->parent && |
6793 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) | ||
6711 | printk(KERN_ERR "ERROR: parent span is not a superset " | 6794 | printk(KERN_ERR "ERROR: parent span is not a superset " |
6712 | "of domain->span\n"); | 6795 | "of domain->span\n"); |
6713 | return 0; | 6796 | return 0; |
@@ -6715,7 +6798,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6715 | 6798 | ||
6716 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | 6799 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
6717 | { | 6800 | { |
6718 | cpumask_t *groupmask; | 6801 | cpumask_var_t groupmask; |
6719 | int level = 0; | 6802 | int level = 0; |
6720 | 6803 | ||
6721 | if (!sd) { | 6804 | if (!sd) { |
@@ -6725,8 +6808,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6725 | 6808 | ||
6726 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | 6809 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
6727 | 6810 | ||
6728 | groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 6811 | if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { |
6729 | if (!groupmask) { | ||
6730 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); | 6812 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); |
6731 | return; | 6813 | return; |
6732 | } | 6814 | } |
@@ -6739,7 +6821,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6739 | if (!sd) | 6821 | if (!sd) |
6740 | break; | 6822 | break; |
6741 | } | 6823 | } |
6742 | kfree(groupmask); | 6824 | free_cpumask_var(groupmask); |
6743 | } | 6825 | } |
6744 | #else /* !CONFIG_SCHED_DEBUG */ | 6826 | #else /* !CONFIG_SCHED_DEBUG */ |
6745 | # define sched_domain_debug(sd, cpu) do { } while (0) | 6827 | # define sched_domain_debug(sd, cpu) do { } while (0) |
@@ -6747,7 +6829,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6747 | 6829 | ||
6748 | static int sd_degenerate(struct sched_domain *sd) | 6830 | static int sd_degenerate(struct sched_domain *sd) |
6749 | { | 6831 | { |
6750 | if (cpus_weight(sd->span) == 1) | 6832 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
6751 | return 1; | 6833 | return 1; |
6752 | 6834 | ||
6753 | /* Following flags need at least 2 groups */ | 6835 | /* Following flags need at least 2 groups */ |
@@ -6778,7 +6860,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6778 | if (sd_degenerate(parent)) | 6860 | if (sd_degenerate(parent)) |
6779 | return 1; | 6861 | return 1; |
6780 | 6862 | ||
6781 | if (!cpus_equal(sd->span, parent->span)) | 6863 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
6782 | return 0; | 6864 | return 0; |
6783 | 6865 | ||
6784 | /* Does parent contain flags not in child? */ | 6866 | /* Does parent contain flags not in child? */ |
@@ -6802,6 +6884,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6802 | return 1; | 6884 | return 1; |
6803 | } | 6885 | } |
6804 | 6886 | ||
6887 | static void free_rootdomain(struct root_domain *rd) | ||
6888 | { | ||
6889 | cpupri_cleanup(&rd->cpupri); | ||
6890 | |||
6891 | free_cpumask_var(rd->rto_mask); | ||
6892 | free_cpumask_var(rd->online); | ||
6893 | free_cpumask_var(rd->span); | ||
6894 | kfree(rd); | ||
6895 | } | ||
6896 | |||
6805 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6897 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
6806 | { | 6898 | { |
6807 | unsigned long flags; | 6899 | unsigned long flags; |
@@ -6811,38 +6903,63 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6811 | if (rq->rd) { | 6903 | if (rq->rd) { |
6812 | struct root_domain *old_rd = rq->rd; | 6904 | struct root_domain *old_rd = rq->rd; |
6813 | 6905 | ||
6814 | if (cpu_isset(rq->cpu, old_rd->online)) | 6906 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
6815 | set_rq_offline(rq); | 6907 | set_rq_offline(rq); |
6816 | 6908 | ||
6817 | cpu_clear(rq->cpu, old_rd->span); | 6909 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
6818 | 6910 | ||
6819 | if (atomic_dec_and_test(&old_rd->refcount)) | 6911 | if (atomic_dec_and_test(&old_rd->refcount)) |
6820 | kfree(old_rd); | 6912 | free_rootdomain(old_rd); |
6821 | } | 6913 | } |
6822 | 6914 | ||
6823 | atomic_inc(&rd->refcount); | 6915 | atomic_inc(&rd->refcount); |
6824 | rq->rd = rd; | 6916 | rq->rd = rd; |
6825 | 6917 | ||
6826 | cpu_set(rq->cpu, rd->span); | 6918 | cpumask_set_cpu(rq->cpu, rd->span); |
6827 | if (cpu_isset(rq->cpu, cpu_online_map)) | 6919 | if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) |
6828 | set_rq_online(rq); | 6920 | set_rq_online(rq); |
6829 | 6921 | ||
6830 | spin_unlock_irqrestore(&rq->lock, flags); | 6922 | spin_unlock_irqrestore(&rq->lock, flags); |
6831 | } | 6923 | } |
6832 | 6924 | ||
6833 | static void init_rootdomain(struct root_domain *rd) | 6925 | static int init_rootdomain(struct root_domain *rd, bool bootmem) |
6834 | { | 6926 | { |
6835 | memset(rd, 0, sizeof(*rd)); | 6927 | memset(rd, 0, sizeof(*rd)); |
6836 | 6928 | ||
6837 | cpus_clear(rd->span); | 6929 | if (bootmem) { |
6838 | cpus_clear(rd->online); | 6930 | alloc_bootmem_cpumask_var(&def_root_domain.span); |
6931 | alloc_bootmem_cpumask_var(&def_root_domain.online); | ||
6932 | alloc_bootmem_cpumask_var(&def_root_domain.rto_mask); | ||
6933 | cpupri_init(&rd->cpupri, true); | ||
6934 | return 0; | ||
6935 | } | ||
6936 | |||
6937 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) | ||
6938 | goto free_rd; | ||
6939 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | ||
6940 | goto free_span; | ||
6941 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | ||
6942 | goto free_online; | ||
6943 | |||
6944 | if (cpupri_init(&rd->cpupri, false) != 0) | ||
6945 | goto free_rto_mask; | ||
6946 | return 0; | ||
6839 | 6947 | ||
6840 | cpupri_init(&rd->cpupri); | 6948 | free_rto_mask: |
6949 | free_cpumask_var(rd->rto_mask); | ||
6950 | free_online: | ||
6951 | free_cpumask_var(rd->online); | ||
6952 | free_span: | ||
6953 | free_cpumask_var(rd->span); | ||
6954 | free_rd: | ||
6955 | kfree(rd); | ||
6956 | return -ENOMEM; | ||
6841 | } | 6957 | } |
6842 | 6958 | ||
6843 | static void init_defrootdomain(void) | 6959 | static void init_defrootdomain(void) |
6844 | { | 6960 | { |
6845 | init_rootdomain(&def_root_domain); | 6961 | init_rootdomain(&def_root_domain, true); |
6962 | |||
6846 | atomic_set(&def_root_domain.refcount, 1); | 6963 | atomic_set(&def_root_domain.refcount, 1); |
6847 | } | 6964 | } |
6848 | 6965 | ||
@@ -6854,7 +6971,10 @@ static struct root_domain *alloc_rootdomain(void) | |||
6854 | if (!rd) | 6971 | if (!rd) |
6855 | return NULL; | 6972 | return NULL; |
6856 | 6973 | ||
6857 | init_rootdomain(rd); | 6974 | if (init_rootdomain(rd, false) != 0) { |
6975 | kfree(rd); | ||
6976 | return NULL; | ||
6977 | } | ||
6858 | 6978 | ||
6859 | return rd; | 6979 | return rd; |
6860 | } | 6980 | } |
@@ -6896,19 +7016,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6896 | } | 7016 | } |
6897 | 7017 | ||
6898 | /* cpus with isolated domains */ | 7018 | /* cpus with isolated domains */ |
6899 | static cpumask_t cpu_isolated_map = CPU_MASK_NONE; | 7019 | static cpumask_var_t cpu_isolated_map; |
6900 | 7020 | ||
6901 | /* Setup the mask of cpus configured for isolated domains */ | 7021 | /* Setup the mask of cpus configured for isolated domains */ |
6902 | static int __init isolated_cpu_setup(char *str) | 7022 | static int __init isolated_cpu_setup(char *str) |
6903 | { | 7023 | { |
6904 | static int __initdata ints[NR_CPUS]; | 7024 | cpulist_parse(str, cpu_isolated_map); |
6905 | int i; | ||
6906 | |||
6907 | str = get_options(str, ARRAY_SIZE(ints), ints); | ||
6908 | cpus_clear(cpu_isolated_map); | ||
6909 | for (i = 1; i <= ints[0]; i++) | ||
6910 | if (ints[i] < NR_CPUS) | ||
6911 | cpu_set(ints[i], cpu_isolated_map); | ||
6912 | return 1; | 7025 | return 1; |
6913 | } | 7026 | } |
6914 | 7027 | ||
@@ -6917,42 +7030,43 @@ __setup("isolcpus=", isolated_cpu_setup); | |||
6917 | /* | 7030 | /* |
6918 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer | 7031 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer |
6919 | * to a function which identifies what group(along with sched group) a CPU | 7032 | * to a function which identifies what group(along with sched group) a CPU |
6920 | * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS | 7033 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids |
6921 | * (due to the fact that we keep track of groups covered with a cpumask_t). | 7034 | * (due to the fact that we keep track of groups covered with a struct cpumask). |
6922 | * | 7035 | * |
6923 | * init_sched_build_groups will build a circular linked list of the groups | 7036 | * init_sched_build_groups will build a circular linked list of the groups |
6924 | * covered by the given span, and will set each group's ->cpumask correctly, | 7037 | * covered by the given span, and will set each group's ->cpumask correctly, |
6925 | * and ->cpu_power to 0. | 7038 | * and ->cpu_power to 0. |
6926 | */ | 7039 | */ |
6927 | static void | 7040 | static void |
6928 | init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | 7041 | init_sched_build_groups(const struct cpumask *span, |
6929 | int (*group_fn)(int cpu, const cpumask_t *cpu_map, | 7042 | const struct cpumask *cpu_map, |
7043 | int (*group_fn)(int cpu, const struct cpumask *cpu_map, | ||
6930 | struct sched_group **sg, | 7044 | struct sched_group **sg, |
6931 | cpumask_t *tmpmask), | 7045 | struct cpumask *tmpmask), |
6932 | cpumask_t *covered, cpumask_t *tmpmask) | 7046 | struct cpumask *covered, struct cpumask *tmpmask) |
6933 | { | 7047 | { |
6934 | struct sched_group *first = NULL, *last = NULL; | 7048 | struct sched_group *first = NULL, *last = NULL; |
6935 | int i; | 7049 | int i; |
6936 | 7050 | ||
6937 | cpus_clear(*covered); | 7051 | cpumask_clear(covered); |
6938 | 7052 | ||
6939 | for_each_cpu_mask_nr(i, *span) { | 7053 | for_each_cpu(i, span) { |
6940 | struct sched_group *sg; | 7054 | struct sched_group *sg; |
6941 | int group = group_fn(i, cpu_map, &sg, tmpmask); | 7055 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
6942 | int j; | 7056 | int j; |
6943 | 7057 | ||
6944 | if (cpu_isset(i, *covered)) | 7058 | if (cpumask_test_cpu(i, covered)) |
6945 | continue; | 7059 | continue; |
6946 | 7060 | ||
6947 | cpus_clear(sg->cpumask); | 7061 | cpumask_clear(sched_group_cpus(sg)); |
6948 | sg->__cpu_power = 0; | 7062 | sg->__cpu_power = 0; |
6949 | 7063 | ||
6950 | for_each_cpu_mask_nr(j, *span) { | 7064 | for_each_cpu(j, span) { |
6951 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | 7065 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
6952 | continue; | 7066 | continue; |
6953 | 7067 | ||
6954 | cpu_set(j, *covered); | 7068 | cpumask_set_cpu(j, covered); |
6955 | cpu_set(j, sg->cpumask); | 7069 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
6956 | } | 7070 | } |
6957 | if (!first) | 7071 | if (!first) |
6958 | first = sg; | 7072 | first = sg; |
@@ -7016,9 +7130,10 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
7016 | * should be one that prevents unnecessary balancing, but also spreads tasks | 7130 | * should be one that prevents unnecessary balancing, but also spreads tasks |
7017 | * out optimally. | 7131 | * out optimally. |
7018 | */ | 7132 | */ |
7019 | static void sched_domain_node_span(int node, cpumask_t *span) | 7133 | static void sched_domain_node_span(int node, struct cpumask *span) |
7020 | { | 7134 | { |
7021 | nodemask_t used_nodes; | 7135 | nodemask_t used_nodes; |
7136 | /* FIXME: use cpumask_of_node() */ | ||
7022 | node_to_cpumask_ptr(nodemask, node); | 7137 | node_to_cpumask_ptr(nodemask, node); |
7023 | int i; | 7138 | int i; |
7024 | 7139 | ||
@@ -7040,18 +7155,33 @@ static void sched_domain_node_span(int node, cpumask_t *span) | |||
7040 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | 7155 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
7041 | 7156 | ||
7042 | /* | 7157 | /* |
7158 | * The cpus mask in sched_group and sched_domain hangs off the end. | ||
7159 | * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space | ||
7160 | * for nr_cpu_ids < CONFIG_NR_CPUS. | ||
7161 | */ | ||
7162 | struct static_sched_group { | ||
7163 | struct sched_group sg; | ||
7164 | DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); | ||
7165 | }; | ||
7166 | |||
7167 | struct static_sched_domain { | ||
7168 | struct sched_domain sd; | ||
7169 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); | ||
7170 | }; | ||
7171 | |||
7172 | /* | ||
7043 | * SMT sched-domains: | 7173 | * SMT sched-domains: |
7044 | */ | 7174 | */ |
7045 | #ifdef CONFIG_SCHED_SMT | 7175 | #ifdef CONFIG_SCHED_SMT |
7046 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | 7176 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
7047 | static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); | 7177 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); |
7048 | 7178 | ||
7049 | static int | 7179 | static int |
7050 | cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7180 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
7051 | cpumask_t *unused) | 7181 | struct sched_group **sg, struct cpumask *unused) |
7052 | { | 7182 | { |
7053 | if (sg) | 7183 | if (sg) |
7054 | *sg = &per_cpu(sched_group_cpus, cpu); | 7184 | *sg = &per_cpu(sched_group_cpus, cpu).sg; |
7055 | return cpu; | 7185 | return cpu; |
7056 | } | 7186 | } |
7057 | #endif /* CONFIG_SCHED_SMT */ | 7187 | #endif /* CONFIG_SCHED_SMT */ |
@@ -7060,56 +7190,55 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | |||
7060 | * multi-core sched-domains: | 7190 | * multi-core sched-domains: |
7061 | */ | 7191 | */ |
7062 | #ifdef CONFIG_SCHED_MC | 7192 | #ifdef CONFIG_SCHED_MC |
7063 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | 7193 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); |
7064 | static DEFINE_PER_CPU(struct sched_group, sched_group_core); | 7194 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); |
7065 | #endif /* CONFIG_SCHED_MC */ | 7195 | #endif /* CONFIG_SCHED_MC */ |
7066 | 7196 | ||
7067 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 7197 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
7068 | static int | 7198 | static int |
7069 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7199 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7070 | cpumask_t *mask) | 7200 | struct sched_group **sg, struct cpumask *mask) |
7071 | { | 7201 | { |
7072 | int group; | 7202 | int group; |
7073 | 7203 | ||
7074 | *mask = per_cpu(cpu_sibling_map, cpu); | 7204 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7075 | cpus_and(*mask, *mask, *cpu_map); | 7205 | group = cpumask_first(mask); |
7076 | group = first_cpu(*mask); | ||
7077 | if (sg) | 7206 | if (sg) |
7078 | *sg = &per_cpu(sched_group_core, group); | 7207 | *sg = &per_cpu(sched_group_core, group).sg; |
7079 | return group; | 7208 | return group; |
7080 | } | 7209 | } |
7081 | #elif defined(CONFIG_SCHED_MC) | 7210 | #elif defined(CONFIG_SCHED_MC) |
7082 | static int | 7211 | static int |
7083 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7212 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7084 | cpumask_t *unused) | 7213 | struct sched_group **sg, struct cpumask *unused) |
7085 | { | 7214 | { |
7086 | if (sg) | 7215 | if (sg) |
7087 | *sg = &per_cpu(sched_group_core, cpu); | 7216 | *sg = &per_cpu(sched_group_core, cpu).sg; |
7088 | return cpu; | 7217 | return cpu; |
7089 | } | 7218 | } |
7090 | #endif | 7219 | #endif |
7091 | 7220 | ||
7092 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 7221 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
7093 | static DEFINE_PER_CPU(struct sched_group, sched_group_phys); | 7222 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
7094 | 7223 | ||
7095 | static int | 7224 | static int |
7096 | cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7225 | cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, |
7097 | cpumask_t *mask) | 7226 | struct sched_group **sg, struct cpumask *mask) |
7098 | { | 7227 | { |
7099 | int group; | 7228 | int group; |
7100 | #ifdef CONFIG_SCHED_MC | 7229 | #ifdef CONFIG_SCHED_MC |
7230 | /* FIXME: Use cpu_coregroup_mask. */ | ||
7101 | *mask = cpu_coregroup_map(cpu); | 7231 | *mask = cpu_coregroup_map(cpu); |
7102 | cpus_and(*mask, *mask, *cpu_map); | 7232 | cpus_and(*mask, *mask, *cpu_map); |
7103 | group = first_cpu(*mask); | 7233 | group = cpumask_first(mask); |
7104 | #elif defined(CONFIG_SCHED_SMT) | 7234 | #elif defined(CONFIG_SCHED_SMT) |
7105 | *mask = per_cpu(cpu_sibling_map, cpu); | 7235 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7106 | cpus_and(*mask, *mask, *cpu_map); | 7236 | group = cpumask_first(mask); |
7107 | group = first_cpu(*mask); | ||
7108 | #else | 7237 | #else |
7109 | group = cpu; | 7238 | group = cpu; |
7110 | #endif | 7239 | #endif |
7111 | if (sg) | 7240 | if (sg) |
7112 | *sg = &per_cpu(sched_group_phys, group); | 7241 | *sg = &per_cpu(sched_group_phys, group).sg; |
7113 | return group; | 7242 | return group; |
7114 | } | 7243 | } |
7115 | 7244 | ||
@@ -7123,19 +7252,21 @@ static DEFINE_PER_CPU(struct sched_domain, node_domains); | |||
7123 | static struct sched_group ***sched_group_nodes_bycpu; | 7252 | static struct sched_group ***sched_group_nodes_bycpu; |
7124 | 7253 | ||
7125 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7254 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); |
7126 | static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); | 7255 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
7127 | 7256 | ||
7128 | static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, | 7257 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
7129 | struct sched_group **sg, cpumask_t *nodemask) | 7258 | struct sched_group **sg, |
7259 | struct cpumask *nodemask) | ||
7130 | { | 7260 | { |
7131 | int group; | 7261 | int group; |
7262 | /* FIXME: use cpumask_of_node */ | ||
7263 | node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu)); | ||
7132 | 7264 | ||
7133 | *nodemask = node_to_cpumask(cpu_to_node(cpu)); | 7265 | cpumask_and(nodemask, pnodemask, cpu_map); |
7134 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7266 | group = cpumask_first(nodemask); |
7135 | group = first_cpu(*nodemask); | ||
7136 | 7267 | ||
7137 | if (sg) | 7268 | if (sg) |
7138 | *sg = &per_cpu(sched_group_allnodes, group); | 7269 | *sg = &per_cpu(sched_group_allnodes, group).sg; |
7139 | return group; | 7270 | return group; |
7140 | } | 7271 | } |
7141 | 7272 | ||
@@ -7147,11 +7278,11 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7147 | if (!sg) | 7278 | if (!sg) |
7148 | return; | 7279 | return; |
7149 | do { | 7280 | do { |
7150 | for_each_cpu_mask_nr(j, sg->cpumask) { | 7281 | for_each_cpu(j, sched_group_cpus(sg)) { |
7151 | struct sched_domain *sd; | 7282 | struct sched_domain *sd; |
7152 | 7283 | ||
7153 | sd = &per_cpu(phys_domains, j); | 7284 | sd = &per_cpu(phys_domains, j).sd; |
7154 | if (j != first_cpu(sd->groups->cpumask)) { | 7285 | if (j != cpumask_first(sched_group_cpus(sd->groups))) { |
7155 | /* | 7286 | /* |
7156 | * Only add "power" once for each | 7287 | * Only add "power" once for each |
7157 | * physical package. | 7288 | * physical package. |
@@ -7168,11 +7299,12 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7168 | 7299 | ||
7169 | #ifdef CONFIG_NUMA | 7300 | #ifdef CONFIG_NUMA |
7170 | /* Free memory allocated for various sched_group structures */ | 7301 | /* Free memory allocated for various sched_group structures */ |
7171 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7302 | static void free_sched_groups(const struct cpumask *cpu_map, |
7303 | struct cpumask *nodemask) | ||
7172 | { | 7304 | { |
7173 | int cpu, i; | 7305 | int cpu, i; |
7174 | 7306 | ||
7175 | for_each_cpu_mask_nr(cpu, *cpu_map) { | 7307 | for_each_cpu(cpu, cpu_map) { |
7176 | struct sched_group **sched_group_nodes | 7308 | struct sched_group **sched_group_nodes |
7177 | = sched_group_nodes_bycpu[cpu]; | 7309 | = sched_group_nodes_bycpu[cpu]; |
7178 | 7310 | ||
@@ -7181,10 +7313,11 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
7181 | 7313 | ||
7182 | for (i = 0; i < nr_node_ids; i++) { | 7314 | for (i = 0; i < nr_node_ids; i++) { |
7183 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7315 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
7316 | /* FIXME: Use cpumask_of_node */ | ||
7317 | node_to_cpumask_ptr(pnodemask, i); | ||
7184 | 7318 | ||
7185 | *nodemask = node_to_cpumask(i); | 7319 | cpus_and(*nodemask, *pnodemask, *cpu_map); |
7186 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7320 | if (cpumask_empty(nodemask)) |
7187 | if (cpus_empty(*nodemask)) | ||
7188 | continue; | 7321 | continue; |
7189 | 7322 | ||
7190 | if (sg == NULL) | 7323 | if (sg == NULL) |
@@ -7202,7 +7335,8 @@ next_sg: | |||
7202 | } | 7335 | } |
7203 | } | 7336 | } |
7204 | #else /* !CONFIG_NUMA */ | 7337 | #else /* !CONFIG_NUMA */ |
7205 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7338 | static void free_sched_groups(const struct cpumask *cpu_map, |
7339 | struct cpumask *nodemask) | ||
7206 | { | 7340 | { |
7207 | } | 7341 | } |
7208 | #endif /* CONFIG_NUMA */ | 7342 | #endif /* CONFIG_NUMA */ |
@@ -7228,7 +7362,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
7228 | 7362 | ||
7229 | WARN_ON(!sd || !sd->groups); | 7363 | WARN_ON(!sd || !sd->groups); |
7230 | 7364 | ||
7231 | if (cpu != first_cpu(sd->groups->cpumask)) | 7365 | if (cpu != cpumask_first(sched_group_cpus(sd->groups))) |
7232 | return; | 7366 | return; |
7233 | 7367 | ||
7234 | child = sd->child; | 7368 | child = sd->child; |
@@ -7293,48 +7427,6 @@ SD_INIT_FUNC(CPU) | |||
7293 | SD_INIT_FUNC(MC) | 7427 | SD_INIT_FUNC(MC) |
7294 | #endif | 7428 | #endif |
7295 | 7429 | ||
7296 | /* | ||
7297 | * To minimize stack usage kmalloc room for cpumasks and share the | ||
7298 | * space as the usage in build_sched_domains() dictates. Used only | ||
7299 | * if the amount of space is significant. | ||
7300 | */ | ||
7301 | struct allmasks { | ||
7302 | cpumask_t tmpmask; /* make this one first */ | ||
7303 | union { | ||
7304 | cpumask_t nodemask; | ||
7305 | cpumask_t this_sibling_map; | ||
7306 | cpumask_t this_core_map; | ||
7307 | }; | ||
7308 | cpumask_t send_covered; | ||
7309 | |||
7310 | #ifdef CONFIG_NUMA | ||
7311 | cpumask_t domainspan; | ||
7312 | cpumask_t covered; | ||
7313 | cpumask_t notcovered; | ||
7314 | #endif | ||
7315 | }; | ||
7316 | |||
7317 | #if NR_CPUS > 128 | ||
7318 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | ||
7319 | static inline void sched_cpumask_alloc(struct allmasks **masks) | ||
7320 | { | ||
7321 | *masks = kmalloc(sizeof(**masks), GFP_KERNEL); | ||
7322 | } | ||
7323 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7324 | { | ||
7325 | kfree(masks); | ||
7326 | } | ||
7327 | #else | ||
7328 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | ||
7329 | static inline void sched_cpumask_alloc(struct allmasks **masks) | ||
7330 | { } | ||
7331 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7332 | { } | ||
7333 | #endif | ||
7334 | |||
7335 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | ||
7336 | ((unsigned long)(a) + offsetof(struct allmasks, v)) | ||
7337 | |||
7338 | static int default_relax_domain_level = -1; | 7430 | static int default_relax_domain_level = -1; |
7339 | 7431 | ||
7340 | static int __init setup_relax_domain_level(char *str) | 7432 | static int __init setup_relax_domain_level(char *str) |
@@ -7374,17 +7466,38 @@ static void set_domain_attribute(struct sched_domain *sd, | |||
7374 | * Build sched domains for a given set of cpus and attach the sched domains | 7466 | * Build sched domains for a given set of cpus and attach the sched domains |
7375 | * to the individual cpus | 7467 | * to the individual cpus |
7376 | */ | 7468 | */ |
7377 | static int __build_sched_domains(const cpumask_t *cpu_map, | 7469 | static int __build_sched_domains(const struct cpumask *cpu_map, |
7378 | struct sched_domain_attr *attr) | 7470 | struct sched_domain_attr *attr) |
7379 | { | 7471 | { |
7380 | int i; | 7472 | int i, err = -ENOMEM; |
7381 | struct root_domain *rd; | 7473 | struct root_domain *rd; |
7382 | SCHED_CPUMASK_DECLARE(allmasks); | 7474 | cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered, |
7383 | cpumask_t *tmpmask; | 7475 | tmpmask; |
7384 | #ifdef CONFIG_NUMA | 7476 | #ifdef CONFIG_NUMA |
7477 | cpumask_var_t domainspan, covered, notcovered; | ||
7385 | struct sched_group **sched_group_nodes = NULL; | 7478 | struct sched_group **sched_group_nodes = NULL; |
7386 | int sd_allnodes = 0; | 7479 | int sd_allnodes = 0; |
7387 | 7480 | ||
7481 | if (!alloc_cpumask_var(&domainspan, GFP_KERNEL)) | ||
7482 | goto out; | ||
7483 | if (!alloc_cpumask_var(&covered, GFP_KERNEL)) | ||
7484 | goto free_domainspan; | ||
7485 | if (!alloc_cpumask_var(¬covered, GFP_KERNEL)) | ||
7486 | goto free_covered; | ||
7487 | #endif | ||
7488 | |||
7489 | if (!alloc_cpumask_var(&nodemask, GFP_KERNEL)) | ||
7490 | goto free_notcovered; | ||
7491 | if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL)) | ||
7492 | goto free_nodemask; | ||
7493 | if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL)) | ||
7494 | goto free_this_sibling_map; | ||
7495 | if (!alloc_cpumask_var(&send_covered, GFP_KERNEL)) | ||
7496 | goto free_this_core_map; | ||
7497 | if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) | ||
7498 | goto free_send_covered; | ||
7499 | |||
7500 | #ifdef CONFIG_NUMA | ||
7388 | /* | 7501 | /* |
7389 | * Allocate the per-node list of sched groups | 7502 | * Allocate the per-node list of sched groups |
7390 | */ | 7503 | */ |
@@ -7392,54 +7505,37 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7392 | GFP_KERNEL); | 7505 | GFP_KERNEL); |
7393 | if (!sched_group_nodes) { | 7506 | if (!sched_group_nodes) { |
7394 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 7507 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
7395 | return -ENOMEM; | 7508 | goto free_tmpmask; |
7396 | } | 7509 | } |
7397 | #endif | 7510 | #endif |
7398 | 7511 | ||
7399 | rd = alloc_rootdomain(); | 7512 | rd = alloc_rootdomain(); |
7400 | if (!rd) { | 7513 | if (!rd) { |
7401 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 7514 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
7402 | #ifdef CONFIG_NUMA | 7515 | goto free_sched_groups; |
7403 | kfree(sched_group_nodes); | ||
7404 | #endif | ||
7405 | return -ENOMEM; | ||
7406 | } | 7516 | } |
7407 | 7517 | ||
7408 | /* get space for all scratch cpumask variables */ | ||
7409 | sched_cpumask_alloc(&allmasks); | ||
7410 | if (!allmasks) { | ||
7411 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | ||
7412 | kfree(rd); | ||
7413 | #ifdef CONFIG_NUMA | 7518 | #ifdef CONFIG_NUMA |
7414 | kfree(sched_group_nodes); | 7519 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; |
7415 | #endif | ||
7416 | return -ENOMEM; | ||
7417 | } | ||
7418 | |||
7419 | tmpmask = (cpumask_t *)allmasks; | ||
7420 | |||
7421 | |||
7422 | #ifdef CONFIG_NUMA | ||
7423 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | ||
7424 | #endif | 7520 | #endif |
7425 | 7521 | ||
7426 | /* | 7522 | /* |
7427 | * Set up domains for cpus specified by the cpu_map. | 7523 | * Set up domains for cpus specified by the cpu_map. |
7428 | */ | 7524 | */ |
7429 | for_each_cpu_mask_nr(i, *cpu_map) { | 7525 | for_each_cpu(i, cpu_map) { |
7430 | struct sched_domain *sd = NULL, *p; | 7526 | struct sched_domain *sd = NULL, *p; |
7431 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
7432 | 7527 | ||
7528 | /* FIXME: use cpumask_of_node */ | ||
7433 | *nodemask = node_to_cpumask(cpu_to_node(i)); | 7529 | *nodemask = node_to_cpumask(cpu_to_node(i)); |
7434 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7530 | cpus_and(*nodemask, *nodemask, *cpu_map); |
7435 | 7531 | ||
7436 | #ifdef CONFIG_NUMA | 7532 | #ifdef CONFIG_NUMA |
7437 | if (cpus_weight(*cpu_map) > | 7533 | if (cpumask_weight(cpu_map) > |
7438 | SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { | 7534 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { |
7439 | sd = &per_cpu(allnodes_domains, i); | 7535 | sd = &per_cpu(allnodes_domains, i); |
7440 | SD_INIT(sd, ALLNODES); | 7536 | SD_INIT(sd, ALLNODES); |
7441 | set_domain_attribute(sd, attr); | 7537 | set_domain_attribute(sd, attr); |
7442 | sd->span = *cpu_map; | 7538 | cpumask_copy(sched_domain_span(sd), cpu_map); |
7443 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); | 7539 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); |
7444 | p = sd; | 7540 | p = sd; |
7445 | sd_allnodes = 1; | 7541 | sd_allnodes = 1; |
@@ -7449,18 +7545,19 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7449 | sd = &per_cpu(node_domains, i); | 7545 | sd = &per_cpu(node_domains, i); |
7450 | SD_INIT(sd, NODE); | 7546 | SD_INIT(sd, NODE); |
7451 | set_domain_attribute(sd, attr); | 7547 | set_domain_attribute(sd, attr); |
7452 | sched_domain_node_span(cpu_to_node(i), &sd->span); | 7548 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
7453 | sd->parent = p; | 7549 | sd->parent = p; |
7454 | if (p) | 7550 | if (p) |
7455 | p->child = sd; | 7551 | p->child = sd; |
7456 | cpus_and(sd->span, sd->span, *cpu_map); | 7552 | cpumask_and(sched_domain_span(sd), |
7553 | sched_domain_span(sd), cpu_map); | ||
7457 | #endif | 7554 | #endif |
7458 | 7555 | ||
7459 | p = sd; | 7556 | p = sd; |
7460 | sd = &per_cpu(phys_domains, i); | 7557 | sd = &per_cpu(phys_domains, i).sd; |
7461 | SD_INIT(sd, CPU); | 7558 | SD_INIT(sd, CPU); |
7462 | set_domain_attribute(sd, attr); | 7559 | set_domain_attribute(sd, attr); |
7463 | sd->span = *nodemask; | 7560 | cpumask_copy(sched_domain_span(sd), nodemask); |
7464 | sd->parent = p; | 7561 | sd->parent = p; |
7465 | if (p) | 7562 | if (p) |
7466 | p->child = sd; | 7563 | p->child = sd; |
@@ -7468,11 +7565,12 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7468 | 7565 | ||
7469 | #ifdef CONFIG_SCHED_MC | 7566 | #ifdef CONFIG_SCHED_MC |
7470 | p = sd; | 7567 | p = sd; |
7471 | sd = &per_cpu(core_domains, i); | 7568 | sd = &per_cpu(core_domains, i).sd; |
7472 | SD_INIT(sd, MC); | 7569 | SD_INIT(sd, MC); |
7473 | set_domain_attribute(sd, attr); | 7570 | set_domain_attribute(sd, attr); |
7474 | sd->span = cpu_coregroup_map(i); | 7571 | *sched_domain_span(sd) = cpu_coregroup_map(i); |
7475 | cpus_and(sd->span, sd->span, *cpu_map); | 7572 | cpumask_and(sched_domain_span(sd), |
7573 | sched_domain_span(sd), cpu_map); | ||
7476 | sd->parent = p; | 7574 | sd->parent = p; |
7477 | p->child = sd; | 7575 | p->child = sd; |
7478 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); | 7576 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7480,11 +7578,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7480 | 7578 | ||
7481 | #ifdef CONFIG_SCHED_SMT | 7579 | #ifdef CONFIG_SCHED_SMT |
7482 | p = sd; | 7580 | p = sd; |
7483 | sd = &per_cpu(cpu_domains, i); | 7581 | sd = &per_cpu(cpu_domains, i).sd; |
7484 | SD_INIT(sd, SIBLING); | 7582 | SD_INIT(sd, SIBLING); |
7485 | set_domain_attribute(sd, attr); | 7583 | set_domain_attribute(sd, attr); |
7486 | sd->span = per_cpu(cpu_sibling_map, i); | 7584 | cpumask_and(sched_domain_span(sd), |
7487 | cpus_and(sd->span, sd->span, *cpu_map); | 7585 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7488 | sd->parent = p; | 7586 | sd->parent = p; |
7489 | p->child = sd; | 7587 | p->child = sd; |
7490 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); | 7588 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7493,13 +7591,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7493 | 7591 | ||
7494 | #ifdef CONFIG_SCHED_SMT | 7592 | #ifdef CONFIG_SCHED_SMT |
7495 | /* Set up CPU (sibling) groups */ | 7593 | /* Set up CPU (sibling) groups */ |
7496 | for_each_cpu_mask_nr(i, *cpu_map) { | 7594 | for_each_cpu(i, cpu_map) { |
7497 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); | 7595 | cpumask_and(this_sibling_map, |
7498 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7596 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7499 | 7597 | if (i != cpumask_first(this_sibling_map)) | |
7500 | *this_sibling_map = per_cpu(cpu_sibling_map, i); | ||
7501 | cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); | ||
7502 | if (i != first_cpu(*this_sibling_map)) | ||
7503 | continue; | 7598 | continue; |
7504 | 7599 | ||
7505 | init_sched_build_groups(this_sibling_map, cpu_map, | 7600 | init_sched_build_groups(this_sibling_map, cpu_map, |
@@ -7510,13 +7605,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7510 | 7605 | ||
7511 | #ifdef CONFIG_SCHED_MC | 7606 | #ifdef CONFIG_SCHED_MC |
7512 | /* Set up multi-core groups */ | 7607 | /* Set up multi-core groups */ |
7513 | for_each_cpu_mask_nr(i, *cpu_map) { | 7608 | for_each_cpu(i, cpu_map) { |
7514 | SCHED_CPUMASK_VAR(this_core_map, allmasks); | 7609 | /* FIXME: Use cpu_coregroup_mask */ |
7515 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
7516 | |||
7517 | *this_core_map = cpu_coregroup_map(i); | 7610 | *this_core_map = cpu_coregroup_map(i); |
7518 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | 7611 | cpus_and(*this_core_map, *this_core_map, *cpu_map); |
7519 | if (i != first_cpu(*this_core_map)) | 7612 | if (i != cpumask_first(this_core_map)) |
7520 | continue; | 7613 | continue; |
7521 | 7614 | ||
7522 | init_sched_build_groups(this_core_map, cpu_map, | 7615 | init_sched_build_groups(this_core_map, cpu_map, |
@@ -7527,12 +7620,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7527 | 7620 | ||
7528 | /* Set up physical groups */ | 7621 | /* Set up physical groups */ |
7529 | for (i = 0; i < nr_node_ids; i++) { | 7622 | for (i = 0; i < nr_node_ids; i++) { |
7530 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7623 | /* FIXME: Use cpumask_of_node */ |
7531 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
7532 | |||
7533 | *nodemask = node_to_cpumask(i); | 7624 | *nodemask = node_to_cpumask(i); |
7534 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7625 | cpus_and(*nodemask, *nodemask, *cpu_map); |
7535 | if (cpus_empty(*nodemask)) | 7626 | if (cpumask_empty(nodemask)) |
7536 | continue; | 7627 | continue; |
7537 | 7628 | ||
7538 | init_sched_build_groups(nodemask, cpu_map, | 7629 | init_sched_build_groups(nodemask, cpu_map, |
@@ -7543,8 +7634,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7543 | #ifdef CONFIG_NUMA | 7634 | #ifdef CONFIG_NUMA |
7544 | /* Set up node groups */ | 7635 | /* Set up node groups */ |
7545 | if (sd_allnodes) { | 7636 | if (sd_allnodes) { |
7546 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
7547 | |||
7548 | init_sched_build_groups(cpu_map, cpu_map, | 7637 | init_sched_build_groups(cpu_map, cpu_map, |
7549 | &cpu_to_allnodes_group, | 7638 | &cpu_to_allnodes_group, |
7550 | send_covered, tmpmask); | 7639 | send_covered, tmpmask); |
@@ -7553,58 +7642,58 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7553 | for (i = 0; i < nr_node_ids; i++) { | 7642 | for (i = 0; i < nr_node_ids; i++) { |
7554 | /* Set up node groups */ | 7643 | /* Set up node groups */ |
7555 | struct sched_group *sg, *prev; | 7644 | struct sched_group *sg, *prev; |
7556 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
7557 | SCHED_CPUMASK_VAR(domainspan, allmasks); | ||
7558 | SCHED_CPUMASK_VAR(covered, allmasks); | ||
7559 | int j; | 7645 | int j; |
7560 | 7646 | ||
7647 | /* FIXME: Use cpumask_of_node */ | ||
7561 | *nodemask = node_to_cpumask(i); | 7648 | *nodemask = node_to_cpumask(i); |
7562 | cpus_clear(*covered); | 7649 | cpumask_clear(covered); |
7563 | 7650 | ||
7564 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7651 | cpus_and(*nodemask, *nodemask, *cpu_map); |
7565 | if (cpus_empty(*nodemask)) { | 7652 | if (cpumask_empty(nodemask)) { |
7566 | sched_group_nodes[i] = NULL; | 7653 | sched_group_nodes[i] = NULL; |
7567 | continue; | 7654 | continue; |
7568 | } | 7655 | } |
7569 | 7656 | ||
7570 | sched_domain_node_span(i, domainspan); | 7657 | sched_domain_node_span(i, domainspan); |
7571 | cpus_and(*domainspan, *domainspan, *cpu_map); | 7658 | cpumask_and(domainspan, domainspan, cpu_map); |
7572 | 7659 | ||
7573 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); | 7660 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
7661 | GFP_KERNEL, i); | ||
7574 | if (!sg) { | 7662 | if (!sg) { |
7575 | printk(KERN_WARNING "Can not alloc domain group for " | 7663 | printk(KERN_WARNING "Can not alloc domain group for " |
7576 | "node %d\n", i); | 7664 | "node %d\n", i); |
7577 | goto error; | 7665 | goto error; |
7578 | } | 7666 | } |
7579 | sched_group_nodes[i] = sg; | 7667 | sched_group_nodes[i] = sg; |
7580 | for_each_cpu_mask_nr(j, *nodemask) { | 7668 | for_each_cpu(j, nodemask) { |
7581 | struct sched_domain *sd; | 7669 | struct sched_domain *sd; |
7582 | 7670 | ||
7583 | sd = &per_cpu(node_domains, j); | 7671 | sd = &per_cpu(node_domains, j); |
7584 | sd->groups = sg; | 7672 | sd->groups = sg; |
7585 | } | 7673 | } |
7586 | sg->__cpu_power = 0; | 7674 | sg->__cpu_power = 0; |
7587 | sg->cpumask = *nodemask; | 7675 | cpumask_copy(sched_group_cpus(sg), nodemask); |
7588 | sg->next = sg; | 7676 | sg->next = sg; |
7589 | cpus_or(*covered, *covered, *nodemask); | 7677 | cpumask_or(covered, covered, nodemask); |
7590 | prev = sg; | 7678 | prev = sg; |
7591 | 7679 | ||
7592 | for (j = 0; j < nr_node_ids; j++) { | 7680 | for (j = 0; j < nr_node_ids; j++) { |
7593 | SCHED_CPUMASK_VAR(notcovered, allmasks); | ||
7594 | int n = (i + j) % nr_node_ids; | 7681 | int n = (i + j) % nr_node_ids; |
7682 | /* FIXME: Use cpumask_of_node */ | ||
7595 | node_to_cpumask_ptr(pnodemask, n); | 7683 | node_to_cpumask_ptr(pnodemask, n); |
7596 | 7684 | ||
7597 | cpus_complement(*notcovered, *covered); | 7685 | cpumask_complement(notcovered, covered); |
7598 | cpus_and(*tmpmask, *notcovered, *cpu_map); | 7686 | cpumask_and(tmpmask, notcovered, cpu_map); |
7599 | cpus_and(*tmpmask, *tmpmask, *domainspan); | 7687 | cpumask_and(tmpmask, tmpmask, domainspan); |
7600 | if (cpus_empty(*tmpmask)) | 7688 | if (cpumask_empty(tmpmask)) |
7601 | break; | 7689 | break; |
7602 | 7690 | ||
7603 | cpus_and(*tmpmask, *tmpmask, *pnodemask); | 7691 | cpumask_and(tmpmask, tmpmask, pnodemask); |
7604 | if (cpus_empty(*tmpmask)) | 7692 | if (cpumask_empty(tmpmask)) |
7605 | continue; | 7693 | continue; |
7606 | 7694 | ||
7607 | sg = kmalloc_node(sizeof(struct sched_group), | 7695 | sg = kmalloc_node(sizeof(struct sched_group) + |
7696 | cpumask_size(), | ||
7608 | GFP_KERNEL, i); | 7697 | GFP_KERNEL, i); |
7609 | if (!sg) { | 7698 | if (!sg) { |
7610 | printk(KERN_WARNING | 7699 | printk(KERN_WARNING |
@@ -7612,9 +7701,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7612 | goto error; | 7701 | goto error; |
7613 | } | 7702 | } |
7614 | sg->__cpu_power = 0; | 7703 | sg->__cpu_power = 0; |
7615 | sg->cpumask = *tmpmask; | 7704 | cpumask_copy(sched_group_cpus(sg), tmpmask); |
7616 | sg->next = prev->next; | 7705 | sg->next = prev->next; |
7617 | cpus_or(*covered, *covered, *tmpmask); | 7706 | cpumask_or(covered, covered, tmpmask); |
7618 | prev->next = sg; | 7707 | prev->next = sg; |
7619 | prev = sg; | 7708 | prev = sg; |
7620 | } | 7709 | } |
@@ -7623,22 +7712,22 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7623 | 7712 | ||
7624 | /* Calculate CPU power for physical packages and nodes */ | 7713 | /* Calculate CPU power for physical packages and nodes */ |
7625 | #ifdef CONFIG_SCHED_SMT | 7714 | #ifdef CONFIG_SCHED_SMT |
7626 | for_each_cpu_mask_nr(i, *cpu_map) { | 7715 | for_each_cpu(i, cpu_map) { |
7627 | struct sched_domain *sd = &per_cpu(cpu_domains, i); | 7716 | struct sched_domain *sd = &per_cpu(cpu_domains, i).sd; |
7628 | 7717 | ||
7629 | init_sched_groups_power(i, sd); | 7718 | init_sched_groups_power(i, sd); |
7630 | } | 7719 | } |
7631 | #endif | 7720 | #endif |
7632 | #ifdef CONFIG_SCHED_MC | 7721 | #ifdef CONFIG_SCHED_MC |
7633 | for_each_cpu_mask_nr(i, *cpu_map) { | 7722 | for_each_cpu(i, cpu_map) { |
7634 | struct sched_domain *sd = &per_cpu(core_domains, i); | 7723 | struct sched_domain *sd = &per_cpu(core_domains, i).sd; |
7635 | 7724 | ||
7636 | init_sched_groups_power(i, sd); | 7725 | init_sched_groups_power(i, sd); |
7637 | } | 7726 | } |
7638 | #endif | 7727 | #endif |
7639 | 7728 | ||
7640 | for_each_cpu_mask_nr(i, *cpu_map) { | 7729 | for_each_cpu(i, cpu_map) { |
7641 | struct sched_domain *sd = &per_cpu(phys_domains, i); | 7730 | struct sched_domain *sd = &per_cpu(phys_domains, i).sd; |
7642 | 7731 | ||
7643 | init_sched_groups_power(i, sd); | 7732 | init_sched_groups_power(i, sd); |
7644 | } | 7733 | } |
@@ -7650,53 +7739,78 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7650 | if (sd_allnodes) { | 7739 | if (sd_allnodes) { |
7651 | struct sched_group *sg; | 7740 | struct sched_group *sg; |
7652 | 7741 | ||
7653 | cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, | 7742 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, |
7654 | tmpmask); | 7743 | tmpmask); |
7655 | init_numa_sched_groups_power(sg); | 7744 | init_numa_sched_groups_power(sg); |
7656 | } | 7745 | } |
7657 | #endif | 7746 | #endif |
7658 | 7747 | ||
7659 | /* Attach the domains */ | 7748 | /* Attach the domains */ |
7660 | for_each_cpu_mask_nr(i, *cpu_map) { | 7749 | for_each_cpu(i, cpu_map) { |
7661 | struct sched_domain *sd; | 7750 | struct sched_domain *sd; |
7662 | #ifdef CONFIG_SCHED_SMT | 7751 | #ifdef CONFIG_SCHED_SMT |
7663 | sd = &per_cpu(cpu_domains, i); | 7752 | sd = &per_cpu(cpu_domains, i).sd; |
7664 | #elif defined(CONFIG_SCHED_MC) | 7753 | #elif defined(CONFIG_SCHED_MC) |
7665 | sd = &per_cpu(core_domains, i); | 7754 | sd = &per_cpu(core_domains, i).sd; |
7666 | #else | 7755 | #else |
7667 | sd = &per_cpu(phys_domains, i); | 7756 | sd = &per_cpu(phys_domains, i).sd; |
7668 | #endif | 7757 | #endif |
7669 | cpu_attach_domain(sd, rd, i); | 7758 | cpu_attach_domain(sd, rd, i); |
7670 | } | 7759 | } |
7671 | 7760 | ||
7672 | sched_cpumask_free(allmasks); | 7761 | err = 0; |
7673 | return 0; | 7762 | |
7763 | free_tmpmask: | ||
7764 | free_cpumask_var(tmpmask); | ||
7765 | free_send_covered: | ||
7766 | free_cpumask_var(send_covered); | ||
7767 | free_this_core_map: | ||
7768 | free_cpumask_var(this_core_map); | ||
7769 | free_this_sibling_map: | ||
7770 | free_cpumask_var(this_sibling_map); | ||
7771 | free_nodemask: | ||
7772 | free_cpumask_var(nodemask); | ||
7773 | free_notcovered: | ||
7774 | #ifdef CONFIG_NUMA | ||
7775 | free_cpumask_var(notcovered); | ||
7776 | free_covered: | ||
7777 | free_cpumask_var(covered); | ||
7778 | free_domainspan: | ||
7779 | free_cpumask_var(domainspan); | ||
7780 | out: | ||
7781 | #endif | ||
7782 | return err; | ||
7783 | |||
7784 | free_sched_groups: | ||
7785 | #ifdef CONFIG_NUMA | ||
7786 | kfree(sched_group_nodes); | ||
7787 | #endif | ||
7788 | goto free_tmpmask; | ||
7674 | 7789 | ||
7675 | #ifdef CONFIG_NUMA | 7790 | #ifdef CONFIG_NUMA |
7676 | error: | 7791 | error: |
7677 | free_sched_groups(cpu_map, tmpmask); | 7792 | free_sched_groups(cpu_map, tmpmask); |
7678 | sched_cpumask_free(allmasks); | 7793 | free_rootdomain(rd); |
7679 | kfree(rd); | 7794 | goto free_tmpmask; |
7680 | return -ENOMEM; | ||
7681 | #endif | 7795 | #endif |
7682 | } | 7796 | } |
7683 | 7797 | ||
7684 | static int build_sched_domains(const cpumask_t *cpu_map) | 7798 | static int build_sched_domains(const struct cpumask *cpu_map) |
7685 | { | 7799 | { |
7686 | return __build_sched_domains(cpu_map, NULL); | 7800 | return __build_sched_domains(cpu_map, NULL); |
7687 | } | 7801 | } |
7688 | 7802 | ||
7689 | static cpumask_t *doms_cur; /* current sched domains */ | 7803 | static struct cpumask *doms_cur; /* current sched domains */ |
7690 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 7804 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
7691 | static struct sched_domain_attr *dattr_cur; | 7805 | static struct sched_domain_attr *dattr_cur; |
7692 | /* attribues of custom domains in 'doms_cur' */ | 7806 | /* attribues of custom domains in 'doms_cur' */ |
7693 | 7807 | ||
7694 | /* | 7808 | /* |
7695 | * Special case: If a kmalloc of a doms_cur partition (array of | 7809 | * Special case: If a kmalloc of a doms_cur partition (array of |
7696 | * cpumask_t) fails, then fallback to a single sched domain, | 7810 | * cpumask) fails, then fallback to a single sched domain, |
7697 | * as determined by the single cpumask_t fallback_doms. | 7811 | * as determined by the single cpumask fallback_doms. |
7698 | */ | 7812 | */ |
7699 | static cpumask_t fallback_doms; | 7813 | static cpumask_var_t fallback_doms; |
7700 | 7814 | ||
7701 | /* | 7815 | /* |
7702 | * arch_update_cpu_topology lets virtualized architectures update the | 7816 | * arch_update_cpu_topology lets virtualized architectures update the |
@@ -7713,16 +7827,16 @@ int __attribute__((weak)) arch_update_cpu_topology(void) | |||
7713 | * For now this just excludes isolated cpus, but could be used to | 7827 | * For now this just excludes isolated cpus, but could be used to |
7714 | * exclude other special cases in the future. | 7828 | * exclude other special cases in the future. |
7715 | */ | 7829 | */ |
7716 | static int arch_init_sched_domains(const cpumask_t *cpu_map) | 7830 | static int arch_init_sched_domains(const struct cpumask *cpu_map) |
7717 | { | 7831 | { |
7718 | int err; | 7832 | int err; |
7719 | 7833 | ||
7720 | arch_update_cpu_topology(); | 7834 | arch_update_cpu_topology(); |
7721 | ndoms_cur = 1; | 7835 | ndoms_cur = 1; |
7722 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 7836 | doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); |
7723 | if (!doms_cur) | 7837 | if (!doms_cur) |
7724 | doms_cur = &fallback_doms; | 7838 | doms_cur = fallback_doms; |
7725 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); | 7839 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); |
7726 | dattr_cur = NULL; | 7840 | dattr_cur = NULL; |
7727 | err = build_sched_domains(doms_cur); | 7841 | err = build_sched_domains(doms_cur); |
7728 | register_sched_domain_sysctl(); | 7842 | register_sched_domain_sysctl(); |
@@ -7730,8 +7844,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
7730 | return err; | 7844 | return err; |
7731 | } | 7845 | } |
7732 | 7846 | ||
7733 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | 7847 | static void arch_destroy_sched_domains(const struct cpumask *cpu_map, |
7734 | cpumask_t *tmpmask) | 7848 | struct cpumask *tmpmask) |
7735 | { | 7849 | { |
7736 | free_sched_groups(cpu_map, tmpmask); | 7850 | free_sched_groups(cpu_map, tmpmask); |
7737 | } | 7851 | } |
@@ -7740,15 +7854,16 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | |||
7740 | * Detach sched domains from a group of cpus specified in cpu_map | 7854 | * Detach sched domains from a group of cpus specified in cpu_map |
7741 | * These cpus will now be attached to the NULL domain | 7855 | * These cpus will now be attached to the NULL domain |
7742 | */ | 7856 | */ |
7743 | static void detach_destroy_domains(const cpumask_t *cpu_map) | 7857 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
7744 | { | 7858 | { |
7745 | cpumask_t tmpmask; | 7859 | /* Save because hotplug lock held. */ |
7860 | static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); | ||
7746 | int i; | 7861 | int i; |
7747 | 7862 | ||
7748 | for_each_cpu_mask_nr(i, *cpu_map) | 7863 | for_each_cpu(i, cpu_map) |
7749 | cpu_attach_domain(NULL, &def_root_domain, i); | 7864 | cpu_attach_domain(NULL, &def_root_domain, i); |
7750 | synchronize_sched(); | 7865 | synchronize_sched(); |
7751 | arch_destroy_sched_domains(cpu_map, &tmpmask); | 7866 | arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); |
7752 | } | 7867 | } |
7753 | 7868 | ||
7754 | /* handle null as "default" */ | 7869 | /* handle null as "default" */ |
@@ -7773,7 +7888,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7773 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 7888 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
7774 | * It destroys each deleted domain and builds each new domain. | 7889 | * It destroys each deleted domain and builds each new domain. |
7775 | * | 7890 | * |
7776 | * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. | 7891 | * 'doms_new' is an array of cpumask's of length 'ndoms_new'. |
7777 | * The masks don't intersect (don't overlap.) We should setup one | 7892 | * The masks don't intersect (don't overlap.) We should setup one |
7778 | * sched domain for each mask. CPUs not in any of the cpumasks will | 7893 | * sched domain for each mask. CPUs not in any of the cpumasks will |
7779 | * not be load balanced. If the same cpumask appears both in the | 7894 | * not be load balanced. If the same cpumask appears both in the |
@@ -7787,13 +7902,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7787 | * the single partition 'fallback_doms', it also forces the domains | 7902 | * the single partition 'fallback_doms', it also forces the domains |
7788 | * to be rebuilt. | 7903 | * to be rebuilt. |
7789 | * | 7904 | * |
7790 | * If doms_new == NULL it will be replaced with cpu_online_map. | 7905 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
7791 | * ndoms_new == 0 is a special case for destroying existing domains, | 7906 | * ndoms_new == 0 is a special case for destroying existing domains, |
7792 | * and it will not create the default domain. | 7907 | * and it will not create the default domain. |
7793 | * | 7908 | * |
7794 | * Call with hotplug lock held | 7909 | * Call with hotplug lock held |
7795 | */ | 7910 | */ |
7796 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 7911 | /* FIXME: Change to struct cpumask *doms_new[] */ |
7912 | void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
7797 | struct sched_domain_attr *dattr_new) | 7913 | struct sched_domain_attr *dattr_new) |
7798 | { | 7914 | { |
7799 | int i, j, n; | 7915 | int i, j, n; |
@@ -7812,7 +7928,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
7812 | /* Destroy deleted domains */ | 7928 | /* Destroy deleted domains */ |
7813 | for (i = 0; i < ndoms_cur; i++) { | 7929 | for (i = 0; i < ndoms_cur; i++) { |
7814 | for (j = 0; j < n && !new_topology; j++) { | 7930 | for (j = 0; j < n && !new_topology; j++) { |
7815 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7931 | if (cpumask_equal(&doms_cur[i], &doms_new[j]) |
7816 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7932 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
7817 | goto match1; | 7933 | goto match1; |
7818 | } | 7934 | } |
@@ -7824,15 +7940,15 @@ match1: | |||
7824 | 7940 | ||
7825 | if (doms_new == NULL) { | 7941 | if (doms_new == NULL) { |
7826 | ndoms_cur = 0; | 7942 | ndoms_cur = 0; |
7827 | doms_new = &fallback_doms; | 7943 | doms_new = fallback_doms; |
7828 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7944 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); |
7829 | WARN_ON_ONCE(dattr_new); | 7945 | WARN_ON_ONCE(dattr_new); |
7830 | } | 7946 | } |
7831 | 7947 | ||
7832 | /* Build new domains */ | 7948 | /* Build new domains */ |
7833 | for (i = 0; i < ndoms_new; i++) { | 7949 | for (i = 0; i < ndoms_new; i++) { |
7834 | for (j = 0; j < ndoms_cur && !new_topology; j++) { | 7950 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
7835 | if (cpus_equal(doms_new[i], doms_cur[j]) | 7951 | if (cpumask_equal(&doms_new[i], &doms_cur[j]) |
7836 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 7952 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
7837 | goto match2; | 7953 | goto match2; |
7838 | } | 7954 | } |
@@ -7844,7 +7960,7 @@ match2: | |||
7844 | } | 7960 | } |
7845 | 7961 | ||
7846 | /* Remember the new sched domains */ | 7962 | /* Remember the new sched domains */ |
7847 | if (doms_cur != &fallback_doms) | 7963 | if (doms_cur != fallback_doms) |
7848 | kfree(doms_cur); | 7964 | kfree(doms_cur); |
7849 | kfree(dattr_cur); /* kfree(NULL) is safe */ | 7965 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
7850 | doms_cur = doms_new; | 7966 | doms_cur = doms_new; |
@@ -7873,14 +7989,25 @@ int arch_reinit_sched_domains(void) | |||
7873 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | 7989 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
7874 | { | 7990 | { |
7875 | int ret; | 7991 | int ret; |
7992 | unsigned int level = 0; | ||
7876 | 7993 | ||
7877 | if (buf[0] != '0' && buf[0] != '1') | 7994 | if (sscanf(buf, "%u", &level) != 1) |
7995 | return -EINVAL; | ||
7996 | |||
7997 | /* | ||
7998 | * level is always be positive so don't check for | ||
7999 | * level < POWERSAVINGS_BALANCE_NONE which is 0 | ||
8000 | * What happens on 0 or 1 byte write, | ||
8001 | * need to check for count as well? | ||
8002 | */ | ||
8003 | |||
8004 | if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) | ||
7878 | return -EINVAL; | 8005 | return -EINVAL; |
7879 | 8006 | ||
7880 | if (smt) | 8007 | if (smt) |
7881 | sched_smt_power_savings = (buf[0] == '1'); | 8008 | sched_smt_power_savings = level; |
7882 | else | 8009 | else |
7883 | sched_mc_power_savings = (buf[0] == '1'); | 8010 | sched_mc_power_savings = level; |
7884 | 8011 | ||
7885 | ret = arch_reinit_sched_domains(); | 8012 | ret = arch_reinit_sched_domains(); |
7886 | 8013 | ||
@@ -7984,7 +8111,9 @@ static int update_runtime(struct notifier_block *nfb, | |||
7984 | 8111 | ||
7985 | void __init sched_init_smp(void) | 8112 | void __init sched_init_smp(void) |
7986 | { | 8113 | { |
7987 | cpumask_t non_isolated_cpus; | 8114 | cpumask_var_t non_isolated_cpus; |
8115 | |||
8116 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); | ||
7988 | 8117 | ||
7989 | #if defined(CONFIG_NUMA) | 8118 | #if defined(CONFIG_NUMA) |
7990 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | 8119 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), |
@@ -7993,10 +8122,10 @@ void __init sched_init_smp(void) | |||
7993 | #endif | 8122 | #endif |
7994 | get_online_cpus(); | 8123 | get_online_cpus(); |
7995 | mutex_lock(&sched_domains_mutex); | 8124 | mutex_lock(&sched_domains_mutex); |
7996 | arch_init_sched_domains(&cpu_online_map); | 8125 | arch_init_sched_domains(cpu_online_mask); |
7997 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 8126 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
7998 | if (cpus_empty(non_isolated_cpus)) | 8127 | if (cpumask_empty(non_isolated_cpus)) |
7999 | cpu_set(smp_processor_id(), non_isolated_cpus); | 8128 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
8000 | mutex_unlock(&sched_domains_mutex); | 8129 | mutex_unlock(&sched_domains_mutex); |
8001 | put_online_cpus(); | 8130 | put_online_cpus(); |
8002 | 8131 | ||
@@ -8011,9 +8140,13 @@ void __init sched_init_smp(void) | |||
8011 | init_hrtick(); | 8140 | init_hrtick(); |
8012 | 8141 | ||
8013 | /* Move init over to a non-isolated CPU */ | 8142 | /* Move init over to a non-isolated CPU */ |
8014 | if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) | 8143 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
8015 | BUG(); | 8144 | BUG(); |
8016 | sched_init_granularity(); | 8145 | sched_init_granularity(); |
8146 | free_cpumask_var(non_isolated_cpus); | ||
8147 | |||
8148 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | ||
8149 | init_sched_rt_class(); | ||
8017 | } | 8150 | } |
8018 | #else | 8151 | #else |
8019 | void __init sched_init_smp(void) | 8152 | void __init sched_init_smp(void) |
@@ -8328,6 +8461,15 @@ void __init sched_init(void) | |||
8328 | */ | 8461 | */ |
8329 | current->sched_class = &fair_sched_class; | 8462 | current->sched_class = &fair_sched_class; |
8330 | 8463 | ||
8464 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | ||
8465 | alloc_bootmem_cpumask_var(&nohz_cpu_mask); | ||
8466 | #ifdef CONFIG_SMP | ||
8467 | #ifdef CONFIG_NO_HZ | ||
8468 | alloc_bootmem_cpumask_var(&nohz.cpu_mask); | ||
8469 | #endif | ||
8470 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | ||
8471 | #endif /* SMP */ | ||
8472 | |||
8331 | scheduler_running = 1; | 8473 | scheduler_running = 1; |
8332 | } | 8474 | } |
8333 | 8475 | ||
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 52154fefab7e..018b7be1db2e 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c | |||
@@ -67,24 +67,21 @@ static int convert_prio(int prio) | |||
67 | * Returns: (int)bool - CPUs were found | 67 | * Returns: (int)bool - CPUs were found |
68 | */ | 68 | */ |
69 | int cpupri_find(struct cpupri *cp, struct task_struct *p, | 69 | int cpupri_find(struct cpupri *cp, struct task_struct *p, |
70 | cpumask_t *lowest_mask) | 70 | struct cpumask *lowest_mask) |
71 | { | 71 | { |
72 | int idx = 0; | 72 | int idx = 0; |
73 | int task_pri = convert_prio(p->prio); | 73 | int task_pri = convert_prio(p->prio); |
74 | 74 | ||
75 | for_each_cpupri_active(cp->pri_active, idx) { | 75 | for_each_cpupri_active(cp->pri_active, idx) { |
76 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; | 76 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; |
77 | cpumask_t mask; | ||
78 | 77 | ||
79 | if (idx >= task_pri) | 78 | if (idx >= task_pri) |
80 | break; | 79 | break; |
81 | 80 | ||
82 | cpus_and(mask, p->cpus_allowed, vec->mask); | 81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
83 | |||
84 | if (cpus_empty(mask)) | ||
85 | continue; | 82 | continue; |
86 | 83 | ||
87 | *lowest_mask = mask; | 84 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); |
88 | return 1; | 85 | return 1; |
89 | } | 86 | } |
90 | 87 | ||
@@ -126,7 +123,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
126 | vec->count--; | 123 | vec->count--; |
127 | if (!vec->count) | 124 | if (!vec->count) |
128 | clear_bit(oldpri, cp->pri_active); | 125 | clear_bit(oldpri, cp->pri_active); |
129 | cpu_clear(cpu, vec->mask); | 126 | cpumask_clear_cpu(cpu, vec->mask); |
130 | 127 | ||
131 | spin_unlock_irqrestore(&vec->lock, flags); | 128 | spin_unlock_irqrestore(&vec->lock, flags); |
132 | } | 129 | } |
@@ -136,7 +133,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
136 | 133 | ||
137 | spin_lock_irqsave(&vec->lock, flags); | 134 | spin_lock_irqsave(&vec->lock, flags); |
138 | 135 | ||
139 | cpu_set(cpu, vec->mask); | 136 | cpumask_set_cpu(cpu, vec->mask); |
140 | vec->count++; | 137 | vec->count++; |
141 | if (vec->count == 1) | 138 | if (vec->count == 1) |
142 | set_bit(newpri, cp->pri_active); | 139 | set_bit(newpri, cp->pri_active); |
@@ -150,10 +147,11 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
150 | /** | 147 | /** |
151 | * cpupri_init - initialize the cpupri structure | 148 | * cpupri_init - initialize the cpupri structure |
152 | * @cp: The cpupri context | 149 | * @cp: The cpupri context |
150 | * @bootmem: true if allocations need to use bootmem | ||
153 | * | 151 | * |
154 | * Returns: (void) | 152 | * Returns: -ENOMEM if memory fails. |
155 | */ | 153 | */ |
156 | void cpupri_init(struct cpupri *cp) | 154 | int cpupri_init(struct cpupri *cp, bool bootmem) |
157 | { | 155 | { |
158 | int i; | 156 | int i; |
159 | 157 | ||
@@ -164,11 +162,30 @@ void cpupri_init(struct cpupri *cp) | |||
164 | 162 | ||
165 | spin_lock_init(&vec->lock); | 163 | spin_lock_init(&vec->lock); |
166 | vec->count = 0; | 164 | vec->count = 0; |
167 | cpus_clear(vec->mask); | 165 | if (bootmem) |
166 | alloc_bootmem_cpumask_var(&vec->mask); | ||
167 | else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL)) | ||
168 | goto cleanup; | ||
168 | } | 169 | } |
169 | 170 | ||
170 | for_each_possible_cpu(i) | 171 | for_each_possible_cpu(i) |
171 | cp->cpu_to_pri[i] = CPUPRI_INVALID; | 172 | cp->cpu_to_pri[i] = CPUPRI_INVALID; |
173 | return 0; | ||
174 | |||
175 | cleanup: | ||
176 | for (i--; i >= 0; i--) | ||
177 | free_cpumask_var(cp->pri_to_cpu[i].mask); | ||
178 | return -ENOMEM; | ||
172 | } | 179 | } |
173 | 180 | ||
181 | /** | ||
182 | * cpupri_cleanup - clean up the cpupri structure | ||
183 | * @cp: The cpupri context | ||
184 | */ | ||
185 | void cpupri_cleanup(struct cpupri *cp) | ||
186 | { | ||
187 | int i; | ||
174 | 188 | ||
189 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) | ||
190 | free_cpumask_var(cp->pri_to_cpu[i].mask); | ||
191 | } | ||
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h index f25811b0f931..642a94ef8a0a 100644 --- a/kernel/sched_cpupri.h +++ b/kernel/sched_cpupri.h | |||
@@ -14,7 +14,7 @@ | |||
14 | struct cpupri_vec { | 14 | struct cpupri_vec { |
15 | spinlock_t lock; | 15 | spinlock_t lock; |
16 | int count; | 16 | int count; |
17 | cpumask_t mask; | 17 | cpumask_var_t mask; |
18 | }; | 18 | }; |
19 | 19 | ||
20 | struct cpupri { | 20 | struct cpupri { |
@@ -27,7 +27,8 @@ struct cpupri { | |||
27 | int cpupri_find(struct cpupri *cp, | 27 | int cpupri_find(struct cpupri *cp, |
28 | struct task_struct *p, cpumask_t *lowest_mask); | 28 | struct task_struct *p, cpumask_t *lowest_mask); |
29 | void cpupri_set(struct cpupri *cp, int cpu, int pri); | 29 | void cpupri_set(struct cpupri *cp, int cpu, int pri); |
30 | void cpupri_init(struct cpupri *cp); | 30 | int cpupri_init(struct cpupri *cp, bool bootmem); |
31 | void cpupri_cleanup(struct cpupri *cp); | ||
31 | #else | 32 | #else |
32 | #define cpupri_set(cp, cpu, pri) do { } while (0) | 33 | #define cpupri_set(cp, cpu, pri) do { } while (0) |
33 | #define cpupri_init() do { } while (0) | 34 | #define cpupri_init() do { } while (0) |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5ad4440f0fc4..56c0efe902a7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1019,16 +1019,33 @@ static void yield_task_fair(struct rq *rq) | |||
1019 | * search starts with cpus closest then further out as needed, | 1019 | * search starts with cpus closest then further out as needed, |
1020 | * so we always favor a closer, idle cpu. | 1020 | * so we always favor a closer, idle cpu. |
1021 | * Domains may include CPUs that are not usable for migration, | 1021 | * Domains may include CPUs that are not usable for migration, |
1022 | * hence we need to mask them out (cpu_active_map) | 1022 | * hence we need to mask them out (cpu_active_mask) |
1023 | * | 1023 | * |
1024 | * Returns the CPU we should wake onto. | 1024 | * Returns the CPU we should wake onto. |
1025 | */ | 1025 | */ |
1026 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) | 1026 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) |
1027 | static int wake_idle(int cpu, struct task_struct *p) | 1027 | static int wake_idle(int cpu, struct task_struct *p) |
1028 | { | 1028 | { |
1029 | cpumask_t tmp; | ||
1030 | struct sched_domain *sd; | 1029 | struct sched_domain *sd; |
1031 | int i; | 1030 | int i; |
1031 | unsigned int chosen_wakeup_cpu; | ||
1032 | int this_cpu; | ||
1033 | |||
1034 | /* | ||
1035 | * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu | ||
1036 | * are idle and this is not a kernel thread and this task's affinity | ||
1037 | * allows it to be moved to preferred cpu, then just move! | ||
1038 | */ | ||
1039 | |||
1040 | this_cpu = smp_processor_id(); | ||
1041 | chosen_wakeup_cpu = | ||
1042 | cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu; | ||
1043 | |||
1044 | if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP && | ||
1045 | idle_cpu(cpu) && idle_cpu(this_cpu) && | ||
1046 | p->mm && !(p->flags & PF_KTHREAD) && | ||
1047 | cpu_isset(chosen_wakeup_cpu, p->cpus_allowed)) | ||
1048 | return chosen_wakeup_cpu; | ||
1032 | 1049 | ||
1033 | /* | 1050 | /* |
1034 | * If it is idle, then it is the best cpu to run this task. | 1051 | * If it is idle, then it is the best cpu to run this task. |
@@ -1046,10 +1063,9 @@ static int wake_idle(int cpu, struct task_struct *p) | |||
1046 | if ((sd->flags & SD_WAKE_IDLE) | 1063 | if ((sd->flags & SD_WAKE_IDLE) |
1047 | || ((sd->flags & SD_WAKE_IDLE_FAR) | 1064 | || ((sd->flags & SD_WAKE_IDLE_FAR) |
1048 | && !task_hot(p, task_rq(p)->clock, sd))) { | 1065 | && !task_hot(p, task_rq(p)->clock, sd))) { |
1049 | cpus_and(tmp, sd->span, p->cpus_allowed); | 1066 | for_each_cpu_and(i, sched_domain_span(sd), |
1050 | cpus_and(tmp, tmp, cpu_active_map); | 1067 | &p->cpus_allowed) { |
1051 | for_each_cpu_mask_nr(i, tmp) { | 1068 | if (cpu_active(i) && idle_cpu(i)) { |
1052 | if (idle_cpu(i)) { | ||
1053 | if (i != task_cpu(p)) { | 1069 | if (i != task_cpu(p)) { |
1054 | schedstat_inc(p, | 1070 | schedstat_inc(p, |
1055 | se.nr_wakeups_idle); | 1071 | se.nr_wakeups_idle); |
@@ -1242,13 +1258,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1242 | * this_cpu and prev_cpu are present in: | 1258 | * this_cpu and prev_cpu are present in: |
1243 | */ | 1259 | */ |
1244 | for_each_domain(this_cpu, sd) { | 1260 | for_each_domain(this_cpu, sd) { |
1245 | if (cpu_isset(prev_cpu, sd->span)) { | 1261 | if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) { |
1246 | this_sd = sd; | 1262 | this_sd = sd; |
1247 | break; | 1263 | break; |
1248 | } | 1264 | } |
1249 | } | 1265 | } |
1250 | 1266 | ||
1251 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) | 1267 | if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed))) |
1252 | goto out; | 1268 | goto out; |
1253 | 1269 | ||
1254 | /* | 1270 | /* |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 51d2af3e6191..833b6d44483c 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -15,7 +15,7 @@ static inline void rt_set_overload(struct rq *rq) | |||
15 | if (!rq->online) | 15 | if (!rq->online) |
16 | return; | 16 | return; |
17 | 17 | ||
18 | cpu_set(rq->cpu, rq->rd->rto_mask); | 18 | cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); |
19 | /* | 19 | /* |
20 | * Make sure the mask is visible before we set | 20 | * Make sure the mask is visible before we set |
21 | * the overload count. That is checked to determine | 21 | * the overload count. That is checked to determine |
@@ -34,7 +34,7 @@ static inline void rt_clear_overload(struct rq *rq) | |||
34 | 34 | ||
35 | /* the order here really doesn't matter */ | 35 | /* the order here really doesn't matter */ |
36 | atomic_dec(&rq->rd->rto_count); | 36 | atomic_dec(&rq->rd->rto_count); |
37 | cpu_clear(rq->cpu, rq->rd->rto_mask); | 37 | cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); |
38 | } | 38 | } |
39 | 39 | ||
40 | static void update_rt_migration(struct rq *rq) | 40 | static void update_rt_migration(struct rq *rq) |
@@ -139,14 +139,14 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se) | |||
139 | } | 139 | } |
140 | 140 | ||
141 | #ifdef CONFIG_SMP | 141 | #ifdef CONFIG_SMP |
142 | static inline cpumask_t sched_rt_period_mask(void) | 142 | static inline const struct cpumask *sched_rt_period_mask(void) |
143 | { | 143 | { |
144 | return cpu_rq(smp_processor_id())->rd->span; | 144 | return cpu_rq(smp_processor_id())->rd->span; |
145 | } | 145 | } |
146 | #else | 146 | #else |
147 | static inline cpumask_t sched_rt_period_mask(void) | 147 | static inline const struct cpumask *sched_rt_period_mask(void) |
148 | { | 148 | { |
149 | return cpu_online_map; | 149 | return cpu_online_mask; |
150 | } | 150 | } |
151 | #endif | 151 | #endif |
152 | 152 | ||
@@ -212,9 +212,9 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq) | |||
212 | return rt_rq->rt_throttled; | 212 | return rt_rq->rt_throttled; |
213 | } | 213 | } |
214 | 214 | ||
215 | static inline cpumask_t sched_rt_period_mask(void) | 215 | static inline const struct cpumask *sched_rt_period_mask(void) |
216 | { | 216 | { |
217 | return cpu_online_map; | 217 | return cpu_online_mask; |
218 | } | 218 | } |
219 | 219 | ||
220 | static inline | 220 | static inline |
@@ -241,11 +241,11 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
241 | int i, weight, more = 0; | 241 | int i, weight, more = 0; |
242 | u64 rt_period; | 242 | u64 rt_period; |
243 | 243 | ||
244 | weight = cpus_weight(rd->span); | 244 | weight = cpumask_weight(rd->span); |
245 | 245 | ||
246 | spin_lock(&rt_b->rt_runtime_lock); | 246 | spin_lock(&rt_b->rt_runtime_lock); |
247 | rt_period = ktime_to_ns(rt_b->rt_period); | 247 | rt_period = ktime_to_ns(rt_b->rt_period); |
248 | for_each_cpu_mask_nr(i, rd->span) { | 248 | for_each_cpu(i, rd->span) { |
249 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 249 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
250 | s64 diff; | 250 | s64 diff; |
251 | 251 | ||
@@ -324,7 +324,7 @@ static void __disable_runtime(struct rq *rq) | |||
324 | /* | 324 | /* |
325 | * Greedy reclaim, take back as much as we can. | 325 | * Greedy reclaim, take back as much as we can. |
326 | */ | 326 | */ |
327 | for_each_cpu_mask(i, rd->span) { | 327 | for_each_cpu(i, rd->span) { |
328 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 328 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
329 | s64 diff; | 329 | s64 diff; |
330 | 330 | ||
@@ -429,13 +429,13 @@ static inline int balance_runtime(struct rt_rq *rt_rq) | |||
429 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | 429 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) |
430 | { | 430 | { |
431 | int i, idle = 1; | 431 | int i, idle = 1; |
432 | cpumask_t span; | 432 | const struct cpumask *span; |
433 | 433 | ||
434 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) | 434 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
435 | return 1; | 435 | return 1; |
436 | 436 | ||
437 | span = sched_rt_period_mask(); | 437 | span = sched_rt_period_mask(); |
438 | for_each_cpu_mask(i, span) { | 438 | for_each_cpu(i, span) { |
439 | int enqueue = 0; | 439 | int enqueue = 0; |
440 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 440 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); |
441 | struct rq *rq = rq_of_rt_rq(rt_rq); | 441 | struct rq *rq = rq_of_rt_rq(rt_rq); |
@@ -805,17 +805,20 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
805 | 805 | ||
806 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | 806 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
807 | { | 807 | { |
808 | cpumask_t mask; | 808 | cpumask_var_t mask; |
809 | 809 | ||
810 | if (rq->curr->rt.nr_cpus_allowed == 1) | 810 | if (rq->curr->rt.nr_cpus_allowed == 1) |
811 | return; | 811 | return; |
812 | 812 | ||
813 | if (p->rt.nr_cpus_allowed != 1 | 813 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) |
814 | && cpupri_find(&rq->rd->cpupri, p, &mask)) | ||
815 | return; | 814 | return; |
816 | 815 | ||
817 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | 816 | if (p->rt.nr_cpus_allowed != 1 |
818 | return; | 817 | && cpupri_find(&rq->rd->cpupri, p, mask)) |
818 | goto free; | ||
819 | |||
820 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) | ||
821 | goto free; | ||
819 | 822 | ||
820 | /* | 823 | /* |
821 | * There appears to be other cpus that can accept | 824 | * There appears to be other cpus that can accept |
@@ -824,6 +827,8 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
824 | */ | 827 | */ |
825 | requeue_task_rt(rq, p, 1); | 828 | requeue_task_rt(rq, p, 1); |
826 | resched_task(rq->curr); | 829 | resched_task(rq->curr); |
830 | free: | ||
831 | free_cpumask_var(mask); | ||
827 | } | 832 | } |
828 | 833 | ||
829 | #endif /* CONFIG_SMP */ | 834 | #endif /* CONFIG_SMP */ |
@@ -914,7 +919,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | |||
914 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 919 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
915 | { | 920 | { |
916 | if (!task_running(rq, p) && | 921 | if (!task_running(rq, p) && |
917 | (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) && | 922 | (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && |
918 | (p->rt.nr_cpus_allowed > 1)) | 923 | (p->rt.nr_cpus_allowed > 1)) |
919 | return 1; | 924 | return 1; |
920 | return 0; | 925 | return 0; |
@@ -953,7 +958,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
953 | return next; | 958 | return next; |
954 | } | 959 | } |
955 | 960 | ||
956 | static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); | 961 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
957 | 962 | ||
958 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | 963 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) |
959 | { | 964 | { |
@@ -973,7 +978,7 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | |||
973 | static int find_lowest_rq(struct task_struct *task) | 978 | static int find_lowest_rq(struct task_struct *task) |
974 | { | 979 | { |
975 | struct sched_domain *sd; | 980 | struct sched_domain *sd; |
976 | cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); | 981 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
977 | int this_cpu = smp_processor_id(); | 982 | int this_cpu = smp_processor_id(); |
978 | int cpu = task_cpu(task); | 983 | int cpu = task_cpu(task); |
979 | 984 | ||
@@ -988,7 +993,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
988 | * I guess we might want to change cpupri_find() to ignore those | 993 | * I guess we might want to change cpupri_find() to ignore those |
989 | * in the first place. | 994 | * in the first place. |
990 | */ | 995 | */ |
991 | cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); | 996 | cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); |
992 | 997 | ||
993 | /* | 998 | /* |
994 | * At this point we have built a mask of cpus representing the | 999 | * At this point we have built a mask of cpus representing the |
@@ -998,7 +1003,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
998 | * We prioritize the last cpu that the task executed on since | 1003 | * We prioritize the last cpu that the task executed on since |
999 | * it is most likely cache-hot in that location. | 1004 | * it is most likely cache-hot in that location. |
1000 | */ | 1005 | */ |
1001 | if (cpu_isset(cpu, *lowest_mask)) | 1006 | if (cpumask_test_cpu(cpu, lowest_mask)) |
1002 | return cpu; | 1007 | return cpu; |
1003 | 1008 | ||
1004 | /* | 1009 | /* |
@@ -1013,7 +1018,8 @@ static int find_lowest_rq(struct task_struct *task) | |||
1013 | cpumask_t domain_mask; | 1018 | cpumask_t domain_mask; |
1014 | int best_cpu; | 1019 | int best_cpu; |
1015 | 1020 | ||
1016 | cpus_and(domain_mask, sd->span, *lowest_mask); | 1021 | cpumask_and(&domain_mask, sched_domain_span(sd), |
1022 | lowest_mask); | ||
1017 | 1023 | ||
1018 | best_cpu = pick_optimal_cpu(this_cpu, | 1024 | best_cpu = pick_optimal_cpu(this_cpu, |
1019 | &domain_mask); | 1025 | &domain_mask); |
@@ -1054,8 +1060,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1054 | * Also make sure that it wasn't scheduled on its rq. | 1060 | * Also make sure that it wasn't scheduled on its rq. |
1055 | */ | 1061 | */ |
1056 | if (unlikely(task_rq(task) != rq || | 1062 | if (unlikely(task_rq(task) != rq || |
1057 | !cpu_isset(lowest_rq->cpu, | 1063 | !cpumask_test_cpu(lowest_rq->cpu, |
1058 | task->cpus_allowed) || | 1064 | &task->cpus_allowed) || |
1059 | task_running(rq, task) || | 1065 | task_running(rq, task) || |
1060 | !task->se.on_rq)) { | 1066 | !task->se.on_rq)) { |
1061 | 1067 | ||
@@ -1176,7 +1182,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1176 | 1182 | ||
1177 | next = pick_next_task_rt(this_rq); | 1183 | next = pick_next_task_rt(this_rq); |
1178 | 1184 | ||
1179 | for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { | 1185 | for_each_cpu(cpu, this_rq->rd->rto_mask) { |
1180 | if (this_cpu == cpu) | 1186 | if (this_cpu == cpu) |
1181 | continue; | 1187 | continue; |
1182 | 1188 | ||
@@ -1305,9 +1311,9 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1305 | } | 1311 | } |
1306 | 1312 | ||
1307 | static void set_cpus_allowed_rt(struct task_struct *p, | 1313 | static void set_cpus_allowed_rt(struct task_struct *p, |
1308 | const cpumask_t *new_mask) | 1314 | const struct cpumask *new_mask) |
1309 | { | 1315 | { |
1310 | int weight = cpus_weight(*new_mask); | 1316 | int weight = cpumask_weight(new_mask); |
1311 | 1317 | ||
1312 | BUG_ON(!rt_task(p)); | 1318 | BUG_ON(!rt_task(p)); |
1313 | 1319 | ||
@@ -1328,7 +1334,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1328 | update_rt_migration(rq); | 1334 | update_rt_migration(rq); |
1329 | } | 1335 | } |
1330 | 1336 | ||
1331 | p->cpus_allowed = *new_mask; | 1337 | cpumask_copy(&p->cpus_allowed, new_mask); |
1332 | p->rt.nr_cpus_allowed = weight; | 1338 | p->rt.nr_cpus_allowed = weight; |
1333 | } | 1339 | } |
1334 | 1340 | ||
@@ -1371,6 +1377,14 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p, | |||
1371 | if (!rq->rt.rt_nr_running) | 1377 | if (!rq->rt.rt_nr_running) |
1372 | pull_rt_task(rq); | 1378 | pull_rt_task(rq); |
1373 | } | 1379 | } |
1380 | |||
1381 | static inline void init_sched_rt_class(void) | ||
1382 | { | ||
1383 | unsigned int i; | ||
1384 | |||
1385 | for_each_possible_cpu(i) | ||
1386 | alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL); | ||
1387 | } | ||
1374 | #endif /* CONFIG_SMP */ | 1388 | #endif /* CONFIG_SMP */ |
1375 | 1389 | ||
1376 | /* | 1390 | /* |
@@ -1541,3 +1555,4 @@ static void print_rt_stats(struct seq_file *m, int cpu) | |||
1541 | rcu_read_unlock(); | 1555 | rcu_read_unlock(); |
1542 | } | 1556 | } |
1543 | #endif /* CONFIG_SCHED_DEBUG */ | 1557 | #endif /* CONFIG_SCHED_DEBUG */ |
1558 | |||
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 3b01098164c8..f2773b5d1226 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -42,7 +42,8 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
42 | for_each_domain(cpu, sd) { | 42 | for_each_domain(cpu, sd) { |
43 | enum cpu_idle_type itype; | 43 | enum cpu_idle_type itype; |
44 | 44 | ||
45 | cpumask_scnprintf(mask_str, mask_len, sd->span); | 45 | cpumask_scnprintf(mask_str, mask_len, |
46 | sched_domain_span(sd)); | ||
46 | seq_printf(seq, "domain%d %s", dcount++, mask_str); | 47 | seq_printf(seq, "domain%d %s", dcount++, mask_str); |
47 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; | 48 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; |
48 | itype++) { | 49 | itype++) { |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index bd6be76303cf..6d7dc4ec4aa5 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -352,7 +352,7 @@ static int parse(struct nlattr *na, cpumask_t *mask) | |||
352 | if (!data) | 352 | if (!data) |
353 | return -ENOMEM; | 353 | return -ENOMEM; |
354 | nla_strlcpy(data, na, len); | 354 | nla_strlcpy(data, na, len); |
355 | ret = cpulist_parse(data, *mask); | 355 | ret = cpulist_parse(data, mask); |
356 | kfree(data); | 356 | kfree(data); |
357 | return ret; | 357 | return ret; |
358 | } | 358 | } |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index f8d968063cea..ea2f48af83cf 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -166,6 +166,8 @@ static void clockevents_notify_released(void) | |||
166 | void clockevents_register_device(struct clock_event_device *dev) | 166 | void clockevents_register_device(struct clock_event_device *dev) |
167 | { | 167 | { |
168 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | 168 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); |
169 | BUG_ON(!dev->cpumask); | ||
170 | |||
169 | /* | 171 | /* |
170 | * A nsec2cyc multiplicator of 0 is invalid and we'd crash | 172 | * A nsec2cyc multiplicator of 0 is invalid and we'd crash |
171 | * on it, so fix it up and emit a warning: | 173 | * on it, so fix it up and emit a warning: |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f98a1b7b16e9..9590af2327be 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -150,7 +150,7 @@ static void tick_do_broadcast(cpumask_t mask) | |||
150 | */ | 150 | */ |
151 | cpu = first_cpu(mask); | 151 | cpu = first_cpu(mask); |
152 | td = &per_cpu(tick_cpu_device, cpu); | 152 | td = &per_cpu(tick_cpu_device, cpu); |
153 | td->evtdev->broadcast(mask); | 153 | td->evtdev->broadcast(&mask); |
154 | } | 154 | } |
155 | } | 155 | } |
156 | 156 | ||
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index df12434b43ca..f8372be74122 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -136,7 +136,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
136 | */ | 136 | */ |
137 | static void tick_setup_device(struct tick_device *td, | 137 | static void tick_setup_device(struct tick_device *td, |
138 | struct clock_event_device *newdev, int cpu, | 138 | struct clock_event_device *newdev, int cpu, |
139 | const cpumask_t *cpumask) | 139 | const struct cpumask *cpumask) |
140 | { | 140 | { |
141 | ktime_t next_event; | 141 | ktime_t next_event; |
142 | void (*handler)(struct clock_event_device *) = NULL; | 142 | void (*handler)(struct clock_event_device *) = NULL; |
@@ -171,8 +171,8 @@ static void tick_setup_device(struct tick_device *td, | |||
171 | * When the device is not per cpu, pin the interrupt to the | 171 | * When the device is not per cpu, pin the interrupt to the |
172 | * current cpu: | 172 | * current cpu: |
173 | */ | 173 | */ |
174 | if (!cpus_equal(newdev->cpumask, *cpumask)) | 174 | if (!cpumask_equal(newdev->cpumask, cpumask)) |
175 | irq_set_affinity(newdev->irq, *cpumask); | 175 | irq_set_affinity(newdev->irq, cpumask); |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * When global broadcasting is active, check if the current | 178 | * When global broadcasting is active, check if the current |
@@ -202,14 +202,14 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
202 | spin_lock_irqsave(&tick_device_lock, flags); | 202 | spin_lock_irqsave(&tick_device_lock, flags); |
203 | 203 | ||
204 | cpu = smp_processor_id(); | 204 | cpu = smp_processor_id(); |
205 | if (!cpu_isset(cpu, newdev->cpumask)) | 205 | if (!cpumask_test_cpu(cpu, newdev->cpumask)) |
206 | goto out_bc; | 206 | goto out_bc; |
207 | 207 | ||
208 | td = &per_cpu(tick_cpu_device, cpu); | 208 | td = &per_cpu(tick_cpu_device, cpu); |
209 | curdev = td->evtdev; | 209 | curdev = td->evtdev; |
210 | 210 | ||
211 | /* cpu local device ? */ | 211 | /* cpu local device ? */ |
212 | if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) { | 212 | if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) { |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * If the cpu affinity of the device interrupt can not | 215 | * If the cpu affinity of the device interrupt can not |
@@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
222 | * If we have a cpu local device already, do not replace it | 222 | * If we have a cpu local device already, do not replace it |
223 | * by a non cpu local device | 223 | * by a non cpu local device |
224 | */ | 224 | */ |
225 | if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu))) | 225 | if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) |
226 | goto out_bc; | 226 | goto out_bc; |
227 | } | 227 | } |
228 | 228 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 8f3fc2582d38..76a574bbef97 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -144,7 +144,7 @@ void tick_nohz_update_jiffies(void) | |||
144 | if (!ts->tick_stopped) | 144 | if (!ts->tick_stopped) |
145 | return; | 145 | return; |
146 | 146 | ||
147 | cpu_clear(cpu, nohz_cpu_mask); | 147 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
148 | now = ktime_get(); | 148 | now = ktime_get(); |
149 | ts->idle_waketime = now; | 149 | ts->idle_waketime = now; |
150 | 150 | ||
@@ -301,7 +301,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
301 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | 301 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
302 | 302 | ||
303 | if (delta_jiffies > 1) | 303 | if (delta_jiffies > 1) |
304 | cpu_set(cpu, nohz_cpu_mask); | 304 | cpumask_set_cpu(cpu, nohz_cpu_mask); |
305 | 305 | ||
306 | /* Skip reprogram of event if its not changed */ | 306 | /* Skip reprogram of event if its not changed */ |
307 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) | 307 | if (ts->tick_stopped && ktime_equal(expires, dev->next_event)) |
@@ -319,7 +319,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
319 | /* | 319 | /* |
320 | * sched tick not stopped! | 320 | * sched tick not stopped! |
321 | */ | 321 | */ |
322 | cpu_clear(cpu, nohz_cpu_mask); | 322 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
323 | goto out; | 323 | goto out; |
324 | } | 324 | } |
325 | 325 | ||
@@ -361,7 +361,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
361 | * softirq. | 361 | * softirq. |
362 | */ | 362 | */ |
363 | tick_do_update_jiffies64(ktime_get()); | 363 | tick_do_update_jiffies64(ktime_get()); |
364 | cpu_clear(cpu, nohz_cpu_mask); | 364 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
365 | } | 365 | } |
366 | raise_softirq_irqoff(TIMER_SOFTIRQ); | 366 | raise_softirq_irqoff(TIMER_SOFTIRQ); |
367 | out: | 367 | out: |
@@ -439,7 +439,7 @@ void tick_nohz_restart_sched_tick(void) | |||
439 | select_nohz_load_balancer(0); | 439 | select_nohz_load_balancer(0); |
440 | now = ktime_get(); | 440 | now = ktime_get(); |
441 | tick_do_update_jiffies64(now); | 441 | tick_do_update_jiffies64(now); |
442 | cpu_clear(cpu, nohz_cpu_mask); | 442 | cpumask_clear_cpu(cpu, nohz_cpu_mask); |
443 | 443 | ||
444 | /* | 444 | /* |
445 | * We stopped the tick in idle. Update process times would miss the | 445 | * We stopped the tick in idle. Update process times would miss the |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4185d5221633..0e91f43b6baf 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -2674,7 +2674,7 @@ tracing_cpumask_read(struct file *filp, char __user *ubuf, | |||
2674 | 2674 | ||
2675 | mutex_lock(&tracing_cpumask_update_lock); | 2675 | mutex_lock(&tracing_cpumask_update_lock); |
2676 | 2676 | ||
2677 | len = cpumask_scnprintf(mask_str, count, tracing_cpumask); | 2677 | len = cpumask_scnprintf(mask_str, count, &tracing_cpumask); |
2678 | if (count - len < 2) { | 2678 | if (count - len < 2) { |
2679 | count = -EINVAL; | 2679 | count = -EINVAL; |
2680 | goto out_err; | 2680 | goto out_err; |
@@ -2695,7 +2695,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2695 | int err, cpu; | 2695 | int err, cpu; |
2696 | 2696 | ||
2697 | mutex_lock(&tracing_cpumask_update_lock); | 2697 | mutex_lock(&tracing_cpumask_update_lock); |
2698 | err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); | 2698 | err = cpumask_parse_user(ubuf, count, &tracing_cpumask_new); |
2699 | if (err) | 2699 | if (err) |
2700 | goto err_unlock; | 2700 | goto err_unlock; |
2701 | 2701 | ||
diff --git a/lib/Kconfig b/lib/Kconfig index fd4118e097f0..2ba43c4a5b07 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -159,4 +159,11 @@ config CHECK_SIGNATURE | |||
159 | config HAVE_LMB | 159 | config HAVE_LMB |
160 | boolean | 160 | boolean |
161 | 161 | ||
162 | config CPUMASK_OFFSTACK | ||
163 | bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS | ||
164 | help | ||
165 | Use dynamic allocation for cpumask_var_t, instead of putting | ||
166 | them on the stack. This is a bit more expensive, but avoids | ||
167 | stack overflow. | ||
168 | |||
162 | endmenu | 169 | endmenu |
@@ -3642,7 +3642,7 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3642 | len < PAGE_SIZE - 60) { | 3642 | len < PAGE_SIZE - 60) { |
3643 | len += sprintf(buf + len, " cpus="); | 3643 | len += sprintf(buf + len, " cpus="); |
3644 | len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, | 3644 | len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, |
3645 | l->cpus); | 3645 | &l->cpus); |
3646 | } | 3646 | } |
3647 | 3647 | ||
3648 | if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && | 3648 | if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index e4e2caeb9d82..086d5ef098fd 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -371,9 +371,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
371 | IRDA_DEBUG(2, "%s()\n", __func__ ); | 371 | IRDA_DEBUG(2, "%s()\n", __func__ ); |
372 | 372 | ||
373 | line = tty->index; | 373 | line = tty->index; |
374 | if ((line < 0) || (line >= IRCOMM_TTY_PORTS)) { | 374 | if (line >= IRCOMM_TTY_PORTS) |
375 | return -ENODEV; | 375 | return -ENODEV; |
376 | } | ||
377 | 376 | ||
378 | /* Check if instance already exists */ | 377 | /* Check if instance already exists */ |
379 | self = hashbin_lock_find(ircomm_tty, line, NULL); | 378 | self = hashbin_lock_find(ircomm_tty, line, NULL); |
@@ -405,6 +404,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
405 | * Force TTY into raw mode by default which is usually what | 404 | * Force TTY into raw mode by default which is usually what |
406 | * we want for IrCOMM and IrLPT. This way applications will | 405 | * we want for IrCOMM and IrLPT. This way applications will |
407 | * not have to twiddle with printcap etc. | 406 | * not have to twiddle with printcap etc. |
407 | * | ||
408 | * Note this is completely usafe and doesn't work properly | ||
408 | */ | 409 | */ |
409 | tty->termios->c_iflag = 0; | 410 | tty->termios->c_iflag = 0; |
410 | tty->termios->c_oflag = 0; | 411 | tty->termios->c_oflag = 0; |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 53772bb46320..23b81cf242af 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -150,10 +150,11 @@ static int ioapic_inj_irq(struct kvm_ioapic *ioapic, | |||
150 | static void ioapic_inj_nmi(struct kvm_vcpu *vcpu) | 150 | static void ioapic_inj_nmi(struct kvm_vcpu *vcpu) |
151 | { | 151 | { |
152 | kvm_inject_nmi(vcpu); | 152 | kvm_inject_nmi(vcpu); |
153 | kvm_vcpu_kick(vcpu); | ||
153 | } | 154 | } |
154 | 155 | ||
155 | static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, | 156 | u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, |
156 | u8 dest_mode) | 157 | u8 dest_mode) |
157 | { | 158 | { |
158 | u32 mask = 0; | 159 | u32 mask = 0; |
159 | int i; | 160 | int i; |
@@ -207,7 +208,8 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
207 | "vector=%x trig_mode=%x\n", | 208 | "vector=%x trig_mode=%x\n", |
208 | dest, dest_mode, delivery_mode, vector, trig_mode); | 209 | dest, dest_mode, delivery_mode, vector, trig_mode); |
209 | 210 | ||
210 | deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode); | 211 | deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic, dest, |
212 | dest_mode); | ||
211 | if (!deliver_bitmask) { | 213 | if (!deliver_bitmask) { |
212 | ioapic_debug("no target on destination\n"); | 214 | ioapic_debug("no target on destination\n"); |
213 | return 0; | 215 | return 0; |
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index cd7ae7691c9d..49c9581d2586 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h | |||
@@ -85,5 +85,7 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); | |||
85 | int kvm_ioapic_init(struct kvm *kvm); | 85 | int kvm_ioapic_init(struct kvm *kvm); |
86 | void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); | 86 | void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); |
87 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); | 87 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); |
88 | u32 kvm_ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, | ||
89 | u8 dest_mode); | ||
88 | 90 | ||
89 | #endif | 91 | #endif |
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c index 55ad76ee2d09..aa5d1e5c497e 100644 --- a/virt/kvm/irq_comm.c +++ b/virt/kvm/irq_comm.c | |||
@@ -61,10 +61,9 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm, | |||
61 | hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list); | 61 | hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list); |
62 | } | 62 | } |
63 | 63 | ||
64 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, | 64 | void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian) |
65 | struct kvm_irq_ack_notifier *kian) | ||
66 | { | 65 | { |
67 | hlist_del(&kian->link); | 66 | hlist_del_init(&kian->link); |
68 | } | 67 | } |
69 | 68 | ||
70 | /* The caller must hold kvm->lock mutex */ | 69 | /* The caller must hold kvm->lock mutex */ |
@@ -73,11 +72,15 @@ int kvm_request_irq_source_id(struct kvm *kvm) | |||
73 | unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; | 72 | unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; |
74 | int irq_source_id = find_first_zero_bit(bitmap, | 73 | int irq_source_id = find_first_zero_bit(bitmap, |
75 | sizeof(kvm->arch.irq_sources_bitmap)); | 74 | sizeof(kvm->arch.irq_sources_bitmap)); |
75 | |||
76 | if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { | 76 | if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { |
77 | printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); | 77 | printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n"); |
78 | irq_source_id = -EFAULT; | 78 | return -EFAULT; |
79 | } else | 79 | } |
80 | set_bit(irq_source_id, bitmap); | 80 | |
81 | ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); | ||
82 | set_bit(irq_source_id, bitmap); | ||
83 | |||
81 | return irq_source_id; | 84 | return irq_source_id; |
82 | } | 85 | } |
83 | 86 | ||
@@ -85,7 +88,9 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) | |||
85 | { | 88 | { |
86 | int i; | 89 | int i; |
87 | 90 | ||
88 | if (irq_source_id <= 0 || | 91 | ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); |
92 | |||
93 | if (irq_source_id < 0 || | ||
89 | irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { | 94 | irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) { |
90 | printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); | 95 | printk(KERN_ERR "kvm: IRQ source ID out of range!\n"); |
91 | return; | 96 | return; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a87f45edfae8..fc6127cbea1f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -47,6 +47,10 @@ | |||
47 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
48 | #include <asm/pgtable.h> | 48 | #include <asm/pgtable.h> |
49 | 49 | ||
50 | #ifdef CONFIG_X86 | ||
51 | #include <asm/msidef.h> | ||
52 | #endif | ||
53 | |||
50 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET | 54 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
51 | #include "coalesced_mmio.h" | 55 | #include "coalesced_mmio.h" |
52 | #endif | 56 | #endif |
@@ -60,10 +64,13 @@ | |||
60 | MODULE_AUTHOR("Qumranet"); | 64 | MODULE_AUTHOR("Qumranet"); |
61 | MODULE_LICENSE("GPL"); | 65 | MODULE_LICENSE("GPL"); |
62 | 66 | ||
67 | static int msi2intx = 1; | ||
68 | module_param(msi2intx, bool, 0); | ||
69 | |||
63 | DEFINE_SPINLOCK(kvm_lock); | 70 | DEFINE_SPINLOCK(kvm_lock); |
64 | LIST_HEAD(vm_list); | 71 | LIST_HEAD(vm_list); |
65 | 72 | ||
66 | static cpumask_t cpus_hardware_enabled; | 73 | static cpumask_var_t cpus_hardware_enabled; |
67 | 74 | ||
68 | struct kmem_cache *kvm_vcpu_cache; | 75 | struct kmem_cache *kvm_vcpu_cache; |
69 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); | 76 | EXPORT_SYMBOL_GPL(kvm_vcpu_cache); |
@@ -75,9 +82,60 @@ struct dentry *kvm_debugfs_dir; | |||
75 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | 82 | static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, |
76 | unsigned long arg); | 83 | unsigned long arg); |
77 | 84 | ||
78 | bool kvm_rebooting; | 85 | static bool kvm_rebooting; |
79 | 86 | ||
80 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT | 87 | #ifdef KVM_CAP_DEVICE_ASSIGNMENT |
88 | |||
89 | #ifdef CONFIG_X86 | ||
90 | static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) | ||
91 | { | ||
92 | int vcpu_id; | ||
93 | struct kvm_vcpu *vcpu; | ||
94 | struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm); | ||
95 | int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK) | ||
96 | >> MSI_ADDR_DEST_ID_SHIFT; | ||
97 | int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK) | ||
98 | >> MSI_DATA_VECTOR_SHIFT; | ||
99 | int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT, | ||
100 | (unsigned long *)&dev->guest_msi.address_lo); | ||
101 | int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT, | ||
102 | (unsigned long *)&dev->guest_msi.data); | ||
103 | int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT, | ||
104 | (unsigned long *)&dev->guest_msi.data); | ||
105 | u32 deliver_bitmask; | ||
106 | |||
107 | BUG_ON(!ioapic); | ||
108 | |||
109 | deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic, | ||
110 | dest_id, dest_mode); | ||
111 | /* IOAPIC delivery mode value is the same as MSI here */ | ||
112 | switch (delivery_mode) { | ||
113 | case IOAPIC_LOWEST_PRIORITY: | ||
114 | vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector, | ||
115 | deliver_bitmask); | ||
116 | if (vcpu != NULL) | ||
117 | kvm_apic_set_irq(vcpu, vector, trig_mode); | ||
118 | else | ||
119 | printk(KERN_INFO "kvm: null lowest priority vcpu!\n"); | ||
120 | break; | ||
121 | case IOAPIC_FIXED: | ||
122 | for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { | ||
123 | if (!(deliver_bitmask & (1 << vcpu_id))) | ||
124 | continue; | ||
125 | deliver_bitmask &= ~(1 << vcpu_id); | ||
126 | vcpu = ioapic->kvm->vcpus[vcpu_id]; | ||
127 | if (vcpu) | ||
128 | kvm_apic_set_irq(vcpu, vector, trig_mode); | ||
129 | } | ||
130 | break; | ||
131 | default: | ||
132 | printk(KERN_INFO "kvm: unsupported MSI delivery mode\n"); | ||
133 | } | ||
134 | } | ||
135 | #else | ||
136 | static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {} | ||
137 | #endif | ||
138 | |||
81 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, | 139 | static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, |
82 | int assigned_dev_id) | 140 | int assigned_dev_id) |
83 | { | 141 | { |
@@ -104,9 +162,16 @@ static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) | |||
104 | * finer-grained lock, update this | 162 | * finer-grained lock, update this |
105 | */ | 163 | */ |
106 | mutex_lock(&assigned_dev->kvm->lock); | 164 | mutex_lock(&assigned_dev->kvm->lock); |
107 | kvm_set_irq(assigned_dev->kvm, | 165 | if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX) |
108 | assigned_dev->irq_source_id, | 166 | kvm_set_irq(assigned_dev->kvm, |
109 | assigned_dev->guest_irq, 1); | 167 | assigned_dev->irq_source_id, |
168 | assigned_dev->guest_irq, 1); | ||
169 | else if (assigned_dev->irq_requested_type & | ||
170 | KVM_ASSIGNED_DEV_GUEST_MSI) { | ||
171 | assigned_device_msi_dispatch(assigned_dev); | ||
172 | enable_irq(assigned_dev->host_irq); | ||
173 | assigned_dev->host_irq_disabled = false; | ||
174 | } | ||
110 | mutex_unlock(&assigned_dev->kvm->lock); | 175 | mutex_unlock(&assigned_dev->kvm->lock); |
111 | kvm_put_kvm(assigned_dev->kvm); | 176 | kvm_put_kvm(assigned_dev->kvm); |
112 | } | 177 | } |
@@ -117,8 +182,12 @@ static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) | |||
117 | (struct kvm_assigned_dev_kernel *) dev_id; | 182 | (struct kvm_assigned_dev_kernel *) dev_id; |
118 | 183 | ||
119 | kvm_get_kvm(assigned_dev->kvm); | 184 | kvm_get_kvm(assigned_dev->kvm); |
185 | |||
120 | schedule_work(&assigned_dev->interrupt_work); | 186 | schedule_work(&assigned_dev->interrupt_work); |
187 | |||
121 | disable_irq_nosync(irq); | 188 | disable_irq_nosync(irq); |
189 | assigned_dev->host_irq_disabled = true; | ||
190 | |||
122 | return IRQ_HANDLED; | 191 | return IRQ_HANDLED; |
123 | } | 192 | } |
124 | 193 | ||
@@ -132,19 +201,32 @@ static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) | |||
132 | 201 | ||
133 | dev = container_of(kian, struct kvm_assigned_dev_kernel, | 202 | dev = container_of(kian, struct kvm_assigned_dev_kernel, |
134 | ack_notifier); | 203 | ack_notifier); |
204 | |||
135 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); | 205 | kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); |
136 | enable_irq(dev->host_irq); | 206 | |
207 | /* The guest irq may be shared so this ack may be | ||
208 | * from another device. | ||
209 | */ | ||
210 | if (dev->host_irq_disabled) { | ||
211 | enable_irq(dev->host_irq); | ||
212 | dev->host_irq_disabled = false; | ||
213 | } | ||
137 | } | 214 | } |
138 | 215 | ||
139 | static void kvm_free_assigned_device(struct kvm *kvm, | 216 | static void kvm_free_assigned_irq(struct kvm *kvm, |
140 | struct kvm_assigned_dev_kernel | 217 | struct kvm_assigned_dev_kernel *assigned_dev) |
141 | *assigned_dev) | ||
142 | { | 218 | { |
143 | if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested) | 219 | if (!irqchip_in_kernel(kvm)) |
144 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | 220 | return; |
221 | |||
222 | kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier); | ||
145 | 223 | ||
146 | kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); | 224 | if (assigned_dev->irq_source_id != -1) |
147 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); | 225 | kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); |
226 | assigned_dev->irq_source_id = -1; | ||
227 | |||
228 | if (!assigned_dev->irq_requested_type) | ||
229 | return; | ||
148 | 230 | ||
149 | if (cancel_work_sync(&assigned_dev->interrupt_work)) | 231 | if (cancel_work_sync(&assigned_dev->interrupt_work)) |
150 | /* We had pending work. That means we will have to take | 232 | /* We had pending work. That means we will have to take |
@@ -152,6 +234,23 @@ static void kvm_free_assigned_device(struct kvm *kvm, | |||
152 | */ | 234 | */ |
153 | kvm_put_kvm(kvm); | 235 | kvm_put_kvm(kvm); |
154 | 236 | ||
237 | free_irq(assigned_dev->host_irq, (void *)assigned_dev); | ||
238 | |||
239 | if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) | ||
240 | pci_disable_msi(assigned_dev->dev); | ||
241 | |||
242 | assigned_dev->irq_requested_type = 0; | ||
243 | } | ||
244 | |||
245 | |||
246 | static void kvm_free_assigned_device(struct kvm *kvm, | ||
247 | struct kvm_assigned_dev_kernel | ||
248 | *assigned_dev) | ||
249 | { | ||
250 | kvm_free_assigned_irq(kvm, assigned_dev); | ||
251 | |||
252 | pci_reset_function(assigned_dev->dev); | ||
253 | |||
155 | pci_release_regions(assigned_dev->dev); | 254 | pci_release_regions(assigned_dev->dev); |
156 | pci_disable_device(assigned_dev->dev); | 255 | pci_disable_device(assigned_dev->dev); |
157 | pci_dev_put(assigned_dev->dev); | 256 | pci_dev_put(assigned_dev->dev); |
@@ -174,6 +273,95 @@ void kvm_free_all_assigned_devices(struct kvm *kvm) | |||
174 | } | 273 | } |
175 | } | 274 | } |
176 | 275 | ||
276 | static int assigned_device_update_intx(struct kvm *kvm, | ||
277 | struct kvm_assigned_dev_kernel *adev, | ||
278 | struct kvm_assigned_irq *airq) | ||
279 | { | ||
280 | adev->guest_irq = airq->guest_irq; | ||
281 | adev->ack_notifier.gsi = airq->guest_irq; | ||
282 | |||
283 | if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX) | ||
284 | return 0; | ||
285 | |||
286 | if (irqchip_in_kernel(kvm)) { | ||
287 | if (!msi2intx && | ||
288 | adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) { | ||
289 | free_irq(adev->host_irq, (void *)kvm); | ||
290 | pci_disable_msi(adev->dev); | ||
291 | } | ||
292 | |||
293 | if (!capable(CAP_SYS_RAWIO)) | ||
294 | return -EPERM; | ||
295 | |||
296 | if (airq->host_irq) | ||
297 | adev->host_irq = airq->host_irq; | ||
298 | else | ||
299 | adev->host_irq = adev->dev->irq; | ||
300 | |||
301 | /* Even though this is PCI, we don't want to use shared | ||
302 | * interrupts. Sharing host devices with guest-assigned devices | ||
303 | * on the same interrupt line is not a happy situation: there | ||
304 | * are going to be long delays in accepting, acking, etc. | ||
305 | */ | ||
306 | if (request_irq(adev->host_irq, kvm_assigned_dev_intr, | ||
307 | 0, "kvm_assigned_intx_device", (void *)adev)) | ||
308 | return -EIO; | ||
309 | } | ||
310 | |||
311 | adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX | | ||
312 | KVM_ASSIGNED_DEV_HOST_INTX; | ||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | #ifdef CONFIG_X86 | ||
317 | static int assigned_device_update_msi(struct kvm *kvm, | ||
318 | struct kvm_assigned_dev_kernel *adev, | ||
319 | struct kvm_assigned_irq *airq) | ||
320 | { | ||
321 | int r; | ||
322 | |||
323 | if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) { | ||
324 | /* x86 don't care upper address of guest msi message addr */ | ||
325 | adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI; | ||
326 | adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX; | ||
327 | adev->guest_msi.address_lo = airq->guest_msi.addr_lo; | ||
328 | adev->guest_msi.data = airq->guest_msi.data; | ||
329 | adev->ack_notifier.gsi = -1; | ||
330 | } else if (msi2intx) { | ||
331 | adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX; | ||
332 | adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI; | ||
333 | adev->guest_irq = airq->guest_irq; | ||
334 | adev->ack_notifier.gsi = airq->guest_irq; | ||
335 | } | ||
336 | |||
337 | if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) | ||
338 | return 0; | ||
339 | |||
340 | if (irqchip_in_kernel(kvm)) { | ||
341 | if (!msi2intx) { | ||
342 | if (adev->irq_requested_type & | ||
343 | KVM_ASSIGNED_DEV_HOST_INTX) | ||
344 | free_irq(adev->host_irq, (void *)adev); | ||
345 | |||
346 | r = pci_enable_msi(adev->dev); | ||
347 | if (r) | ||
348 | return r; | ||
349 | } | ||
350 | |||
351 | adev->host_irq = adev->dev->irq; | ||
352 | if (request_irq(adev->host_irq, kvm_assigned_dev_intr, 0, | ||
353 | "kvm_assigned_msi_device", (void *)adev)) | ||
354 | return -EIO; | ||
355 | } | ||
356 | |||
357 | if (!msi2intx) | ||
358 | adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_MSI; | ||
359 | |||
360 | adev->irq_requested_type |= KVM_ASSIGNED_DEV_HOST_MSI; | ||
361 | return 0; | ||
362 | } | ||
363 | #endif | ||
364 | |||
177 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | 365 | static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, |
178 | struct kvm_assigned_irq | 366 | struct kvm_assigned_irq |
179 | *assigned_irq) | 367 | *assigned_irq) |
@@ -190,49 +378,68 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, | |||
190 | return -EINVAL; | 378 | return -EINVAL; |
191 | } | 379 | } |
192 | 380 | ||
193 | if (match->irq_requested) { | 381 | if (!match->irq_requested_type) { |
194 | match->guest_irq = assigned_irq->guest_irq; | 382 | INIT_WORK(&match->interrupt_work, |
195 | match->ack_notifier.gsi = assigned_irq->guest_irq; | 383 | kvm_assigned_dev_interrupt_work_handler); |
196 | mutex_unlock(&kvm->lock); | 384 | if (irqchip_in_kernel(kvm)) { |
197 | return 0; | 385 | /* Register ack nofitier */ |
198 | } | 386 | match->ack_notifier.gsi = -1; |
387 | match->ack_notifier.irq_acked = | ||
388 | kvm_assigned_dev_ack_irq; | ||
389 | kvm_register_irq_ack_notifier(kvm, | ||
390 | &match->ack_notifier); | ||
391 | |||
392 | /* Request IRQ source ID */ | ||
393 | r = kvm_request_irq_source_id(kvm); | ||
394 | if (r < 0) | ||
395 | goto out_release; | ||
396 | else | ||
397 | match->irq_source_id = r; | ||
199 | 398 | ||
200 | INIT_WORK(&match->interrupt_work, | 399 | #ifdef CONFIG_X86 |
201 | kvm_assigned_dev_interrupt_work_handler); | 400 | /* Determine host device irq type, we can know the |
401 | * result from dev->msi_enabled */ | ||
402 | if (msi2intx) | ||
403 | pci_enable_msi(match->dev); | ||
404 | #endif | ||
405 | } | ||
406 | } | ||
202 | 407 | ||
203 | if (irqchip_in_kernel(kvm)) { | 408 | if ((!msi2intx && |
204 | if (!capable(CAP_SYS_RAWIO)) { | 409 | (assigned_irq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI)) || |
205 | r = -EPERM; | 410 | (msi2intx && match->dev->msi_enabled)) { |
411 | #ifdef CONFIG_X86 | ||
412 | r = assigned_device_update_msi(kvm, match, assigned_irq); | ||
413 | if (r) { | ||
414 | printk(KERN_WARNING "kvm: failed to enable " | ||
415 | "MSI device!\n"); | ||
206 | goto out_release; | 416 | goto out_release; |
207 | } | 417 | } |
208 | 418 | #else | |
209 | if (assigned_irq->host_irq) | 419 | r = -ENOTTY; |
210 | match->host_irq = assigned_irq->host_irq; | 420 | #endif |
211 | else | 421 | } else if (assigned_irq->host_irq == 0 && match->dev->irq == 0) { |
212 | match->host_irq = match->dev->irq; | 422 | /* Host device IRQ 0 means don't support INTx */ |
213 | match->guest_irq = assigned_irq->guest_irq; | 423 | if (!msi2intx) { |
214 | match->ack_notifier.gsi = assigned_irq->guest_irq; | 424 | printk(KERN_WARNING |
215 | match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; | 425 | "kvm: wait device to enable MSI!\n"); |
216 | kvm_register_irq_ack_notifier(kvm, &match->ack_notifier); | 426 | r = 0; |
217 | r = kvm_request_irq_source_id(kvm); | 427 | } else { |
218 | if (r < 0) | 428 | printk(KERN_WARNING |
429 | "kvm: failed to enable MSI device!\n"); | ||
430 | r = -ENOTTY; | ||
219 | goto out_release; | 431 | goto out_release; |
220 | else | 432 | } |
221 | match->irq_source_id = r; | 433 | } else { |
222 | 434 | /* Non-sharing INTx mode */ | |
223 | /* Even though this is PCI, we don't want to use shared | 435 | r = assigned_device_update_intx(kvm, match, assigned_irq); |
224 | * interrupts. Sharing host devices with guest-assigned devices | 436 | if (r) { |
225 | * on the same interrupt line is not a happy situation: there | 437 | printk(KERN_WARNING "kvm: failed to enable " |
226 | * are going to be long delays in accepting, acking, etc. | 438 | "INTx device!\n"); |
227 | */ | ||
228 | if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0, | ||
229 | "kvm_assigned_device", (void *)match)) { | ||
230 | r = -EIO; | ||
231 | goto out_release; | 439 | goto out_release; |
232 | } | 440 | } |
233 | } | 441 | } |
234 | 442 | ||
235 | match->irq_requested = true; | ||
236 | mutex_unlock(&kvm->lock); | 443 | mutex_unlock(&kvm->lock); |
237 | return r; | 444 | return r; |
238 | out_release: | 445 | out_release: |
@@ -283,11 +490,14 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, | |||
283 | __func__); | 490 | __func__); |
284 | goto out_disable; | 491 | goto out_disable; |
285 | } | 492 | } |
493 | |||
494 | pci_reset_function(dev); | ||
495 | |||
286 | match->assigned_dev_id = assigned_dev->assigned_dev_id; | 496 | match->assigned_dev_id = assigned_dev->assigned_dev_id; |
287 | match->host_busnr = assigned_dev->busnr; | 497 | match->host_busnr = assigned_dev->busnr; |
288 | match->host_devfn = assigned_dev->devfn; | 498 | match->host_devfn = assigned_dev->devfn; |
289 | match->dev = dev; | 499 | match->dev = dev; |
290 | 500 | match->irq_source_id = -1; | |
291 | match->kvm = kvm; | 501 | match->kvm = kvm; |
292 | 502 | ||
293 | list_add(&match->list, &kvm->arch.assigned_dev_head); | 503 | list_add(&match->list, &kvm->arch.assigned_dev_head); |
@@ -355,57 +565,48 @@ static void ack_flush(void *_completed) | |||
355 | { | 565 | { |
356 | } | 566 | } |
357 | 567 | ||
358 | void kvm_flush_remote_tlbs(struct kvm *kvm) | 568 | static bool make_all_cpus_request(struct kvm *kvm, unsigned int req) |
359 | { | 569 | { |
360 | int i, cpu, me; | 570 | int i, cpu, me; |
361 | cpumask_t cpus; | 571 | cpumask_var_t cpus; |
572 | bool called = true; | ||
362 | struct kvm_vcpu *vcpu; | 573 | struct kvm_vcpu *vcpu; |
363 | 574 | ||
575 | if (alloc_cpumask_var(&cpus, GFP_ATOMIC)) | ||
576 | cpumask_clear(cpus); | ||
577 | |||
364 | me = get_cpu(); | 578 | me = get_cpu(); |
365 | cpus_clear(cpus); | ||
366 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 579 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
367 | vcpu = kvm->vcpus[i]; | 580 | vcpu = kvm->vcpus[i]; |
368 | if (!vcpu) | 581 | if (!vcpu) |
369 | continue; | 582 | continue; |
370 | if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | 583 | if (test_and_set_bit(req, &vcpu->requests)) |
371 | continue; | 584 | continue; |
372 | cpu = vcpu->cpu; | 585 | cpu = vcpu->cpu; |
373 | if (cpu != -1 && cpu != me) | 586 | if (cpus != NULL && cpu != -1 && cpu != me) |
374 | cpu_set(cpu, cpus); | 587 | cpumask_set_cpu(cpu, cpus); |
375 | } | 588 | } |
376 | if (cpus_empty(cpus)) | 589 | if (unlikely(cpus == NULL)) |
377 | goto out; | 590 | smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1); |
378 | ++kvm->stat.remote_tlb_flush; | 591 | else if (!cpumask_empty(cpus)) |
379 | smp_call_function_mask(cpus, ack_flush, NULL, 1); | 592 | smp_call_function_many(cpus, ack_flush, NULL, 1); |
380 | out: | 593 | else |
594 | called = false; | ||
381 | put_cpu(); | 595 | put_cpu(); |
596 | free_cpumask_var(cpus); | ||
597 | return called; | ||
382 | } | 598 | } |
383 | 599 | ||
384 | void kvm_reload_remote_mmus(struct kvm *kvm) | 600 | void kvm_flush_remote_tlbs(struct kvm *kvm) |
385 | { | 601 | { |
386 | int i, cpu, me; | 602 | if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) |
387 | cpumask_t cpus; | 603 | ++kvm->stat.remote_tlb_flush; |
388 | struct kvm_vcpu *vcpu; | ||
389 | |||
390 | me = get_cpu(); | ||
391 | cpus_clear(cpus); | ||
392 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | ||
393 | vcpu = kvm->vcpus[i]; | ||
394 | if (!vcpu) | ||
395 | continue; | ||
396 | if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | ||
397 | continue; | ||
398 | cpu = vcpu->cpu; | ||
399 | if (cpu != -1 && cpu != me) | ||
400 | cpu_set(cpu, cpus); | ||
401 | } | ||
402 | if (cpus_empty(cpus)) | ||
403 | goto out; | ||
404 | smp_call_function_mask(cpus, ack_flush, NULL, 1); | ||
405 | out: | ||
406 | put_cpu(); | ||
407 | } | 604 | } |
408 | 605 | ||
606 | void kvm_reload_remote_mmus(struct kvm *kvm) | ||
607 | { | ||
608 | make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); | ||
609 | } | ||
409 | 610 | ||
410 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) | 611 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) |
411 | { | 612 | { |
@@ -710,6 +911,8 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
710 | goto out; | 911 | goto out; |
711 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) | 912 | if (mem->guest_phys_addr & (PAGE_SIZE - 1)) |
712 | goto out; | 913 | goto out; |
914 | if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1))) | ||
915 | goto out; | ||
713 | if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) | 916 | if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
714 | goto out; | 917 | goto out; |
715 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) | 918 | if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) |
@@ -821,7 +1024,10 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
821 | goto out_free; | 1024 | goto out_free; |
822 | } | 1025 | } |
823 | 1026 | ||
824 | kvm_free_physmem_slot(&old, &new); | 1027 | kvm_free_physmem_slot(&old, npages ? &new : NULL); |
1028 | /* Slot deletion case: we have to update the current slot */ | ||
1029 | if (!npages) | ||
1030 | *memslot = old; | ||
825 | #ifdef CONFIG_DMAR | 1031 | #ifdef CONFIG_DMAR |
826 | /* map the pages in iommu page table */ | 1032 | /* map the pages in iommu page table */ |
827 | r = kvm_iommu_map_pages(kvm, base_gfn, npages); | 1033 | r = kvm_iommu_map_pages(kvm, base_gfn, npages); |
@@ -918,7 +1124,7 @@ int kvm_is_error_hva(unsigned long addr) | |||
918 | } | 1124 | } |
919 | EXPORT_SYMBOL_GPL(kvm_is_error_hva); | 1125 | EXPORT_SYMBOL_GPL(kvm_is_error_hva); |
920 | 1126 | ||
921 | static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | 1127 | struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) |
922 | { | 1128 | { |
923 | int i; | 1129 | int i; |
924 | 1130 | ||
@@ -931,11 +1137,12 @@ static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | |||
931 | } | 1137 | } |
932 | return NULL; | 1138 | return NULL; |
933 | } | 1139 | } |
1140 | EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); | ||
934 | 1141 | ||
935 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | 1142 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) |
936 | { | 1143 | { |
937 | gfn = unalias_gfn(kvm, gfn); | 1144 | gfn = unalias_gfn(kvm, gfn); |
938 | return __gfn_to_memslot(kvm, gfn); | 1145 | return gfn_to_memslot_unaliased(kvm, gfn); |
939 | } | 1146 | } |
940 | 1147 | ||
941 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) | 1148 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) |
@@ -959,7 +1166,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) | |||
959 | struct kvm_memory_slot *slot; | 1166 | struct kvm_memory_slot *slot; |
960 | 1167 | ||
961 | gfn = unalias_gfn(kvm, gfn); | 1168 | gfn = unalias_gfn(kvm, gfn); |
962 | slot = __gfn_to_memslot(kvm, gfn); | 1169 | slot = gfn_to_memslot_unaliased(kvm, gfn); |
963 | if (!slot) | 1170 | if (!slot) |
964 | return bad_hva(); | 1171 | return bad_hva(); |
965 | return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); | 1172 | return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE); |
@@ -1210,7 +1417,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | |||
1210 | struct kvm_memory_slot *memslot; | 1417 | struct kvm_memory_slot *memslot; |
1211 | 1418 | ||
1212 | gfn = unalias_gfn(kvm, gfn); | 1419 | gfn = unalias_gfn(kvm, gfn); |
1213 | memslot = __gfn_to_memslot(kvm, gfn); | 1420 | memslot = gfn_to_memslot_unaliased(kvm, gfn); |
1214 | if (memslot && memslot->dirty_bitmap) { | 1421 | if (memslot && memslot->dirty_bitmap) { |
1215 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1422 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
1216 | 1423 | ||
@@ -1295,7 +1502,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) | |||
1295 | return 0; | 1502 | return 0; |
1296 | } | 1503 | } |
1297 | 1504 | ||
1298 | static const struct file_operations kvm_vcpu_fops = { | 1505 | static struct file_operations kvm_vcpu_fops = { |
1299 | .release = kvm_vcpu_release, | 1506 | .release = kvm_vcpu_release, |
1300 | .unlocked_ioctl = kvm_vcpu_ioctl, | 1507 | .unlocked_ioctl = kvm_vcpu_ioctl, |
1301 | .compat_ioctl = kvm_vcpu_ioctl, | 1508 | .compat_ioctl = kvm_vcpu_ioctl, |
@@ -1689,7 +1896,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma) | |||
1689 | return 0; | 1896 | return 0; |
1690 | } | 1897 | } |
1691 | 1898 | ||
1692 | static const struct file_operations kvm_vm_fops = { | 1899 | static struct file_operations kvm_vm_fops = { |
1693 | .release = kvm_vm_release, | 1900 | .release = kvm_vm_release, |
1694 | .unlocked_ioctl = kvm_vm_ioctl, | 1901 | .unlocked_ioctl = kvm_vm_ioctl, |
1695 | .compat_ioctl = kvm_vm_ioctl, | 1902 | .compat_ioctl = kvm_vm_ioctl, |
@@ -1711,6 +1918,18 @@ static int kvm_dev_ioctl_create_vm(void) | |||
1711 | return fd; | 1918 | return fd; |
1712 | } | 1919 | } |
1713 | 1920 | ||
1921 | static long kvm_dev_ioctl_check_extension_generic(long arg) | ||
1922 | { | ||
1923 | switch (arg) { | ||
1924 | case KVM_CAP_USER_MEMORY: | ||
1925 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: | ||
1926 | return 1; | ||
1927 | default: | ||
1928 | break; | ||
1929 | } | ||
1930 | return kvm_dev_ioctl_check_extension(arg); | ||
1931 | } | ||
1932 | |||
1714 | static long kvm_dev_ioctl(struct file *filp, | 1933 | static long kvm_dev_ioctl(struct file *filp, |
1715 | unsigned int ioctl, unsigned long arg) | 1934 | unsigned int ioctl, unsigned long arg) |
1716 | { | 1935 | { |
@@ -1730,7 +1949,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1730 | r = kvm_dev_ioctl_create_vm(); | 1949 | r = kvm_dev_ioctl_create_vm(); |
1731 | break; | 1950 | break; |
1732 | case KVM_CHECK_EXTENSION: | 1951 | case KVM_CHECK_EXTENSION: |
1733 | r = kvm_dev_ioctl_check_extension(arg); | 1952 | r = kvm_dev_ioctl_check_extension_generic(arg); |
1734 | break; | 1953 | break; |
1735 | case KVM_GET_VCPU_MMAP_SIZE: | 1954 | case KVM_GET_VCPU_MMAP_SIZE: |
1736 | r = -EINVAL; | 1955 | r = -EINVAL; |
@@ -1771,9 +1990,9 @@ static void hardware_enable(void *junk) | |||
1771 | { | 1990 | { |
1772 | int cpu = raw_smp_processor_id(); | 1991 | int cpu = raw_smp_processor_id(); |
1773 | 1992 | ||
1774 | if (cpu_isset(cpu, cpus_hardware_enabled)) | 1993 | if (cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
1775 | return; | 1994 | return; |
1776 | cpu_set(cpu, cpus_hardware_enabled); | 1995 | cpumask_set_cpu(cpu, cpus_hardware_enabled); |
1777 | kvm_arch_hardware_enable(NULL); | 1996 | kvm_arch_hardware_enable(NULL); |
1778 | } | 1997 | } |
1779 | 1998 | ||
@@ -1781,9 +2000,9 @@ static void hardware_disable(void *junk) | |||
1781 | { | 2000 | { |
1782 | int cpu = raw_smp_processor_id(); | 2001 | int cpu = raw_smp_processor_id(); |
1783 | 2002 | ||
1784 | if (!cpu_isset(cpu, cpus_hardware_enabled)) | 2003 | if (!cpumask_test_cpu(cpu, cpus_hardware_enabled)) |
1785 | return; | 2004 | return; |
1786 | cpu_clear(cpu, cpus_hardware_enabled); | 2005 | cpumask_clear_cpu(cpu, cpus_hardware_enabled); |
1787 | kvm_arch_hardware_disable(NULL); | 2006 | kvm_arch_hardware_disable(NULL); |
1788 | } | 2007 | } |
1789 | 2008 | ||
@@ -2017,9 +2236,14 @@ int kvm_init(void *opaque, unsigned int vcpu_size, | |||
2017 | 2236 | ||
2018 | bad_pfn = page_to_pfn(bad_page); | 2237 | bad_pfn = page_to_pfn(bad_page); |
2019 | 2238 | ||
2239 | if (!alloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { | ||
2240 | r = -ENOMEM; | ||
2241 | goto out_free_0; | ||
2242 | } | ||
2243 | |||
2020 | r = kvm_arch_hardware_setup(); | 2244 | r = kvm_arch_hardware_setup(); |
2021 | if (r < 0) | 2245 | if (r < 0) |
2022 | goto out_free_0; | 2246 | goto out_free_0a; |
2023 | 2247 | ||
2024 | for_each_online_cpu(cpu) { | 2248 | for_each_online_cpu(cpu) { |
2025 | smp_call_function_single(cpu, | 2249 | smp_call_function_single(cpu, |
@@ -2053,6 +2277,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size, | |||
2053 | } | 2277 | } |
2054 | 2278 | ||
2055 | kvm_chardev_ops.owner = module; | 2279 | kvm_chardev_ops.owner = module; |
2280 | kvm_vm_fops.owner = module; | ||
2281 | kvm_vcpu_fops.owner = module; | ||
2056 | 2282 | ||
2057 | r = misc_register(&kvm_dev); | 2283 | r = misc_register(&kvm_dev); |
2058 | if (r) { | 2284 | if (r) { |
@@ -2062,6 +2288,9 @@ int kvm_init(void *opaque, unsigned int vcpu_size, | |||
2062 | 2288 | ||
2063 | kvm_preempt_ops.sched_in = kvm_sched_in; | 2289 | kvm_preempt_ops.sched_in = kvm_sched_in; |
2064 | kvm_preempt_ops.sched_out = kvm_sched_out; | 2290 | kvm_preempt_ops.sched_out = kvm_sched_out; |
2291 | #ifndef CONFIG_X86 | ||
2292 | msi2intx = 0; | ||
2293 | #endif | ||
2065 | 2294 | ||
2066 | return 0; | 2295 | return 0; |
2067 | 2296 | ||
@@ -2078,6 +2307,8 @@ out_free_2: | |||
2078 | on_each_cpu(hardware_disable, NULL, 1); | 2307 | on_each_cpu(hardware_disable, NULL, 1); |
2079 | out_free_1: | 2308 | out_free_1: |
2080 | kvm_arch_hardware_unsetup(); | 2309 | kvm_arch_hardware_unsetup(); |
2310 | out_free_0a: | ||
2311 | free_cpumask_var(cpus_hardware_enabled); | ||
2081 | out_free_0: | 2312 | out_free_0: |
2082 | __free_page(bad_page); | 2313 | __free_page(bad_page); |
2083 | out: | 2314 | out: |
@@ -2101,6 +2332,7 @@ void kvm_exit(void) | |||
2101 | kvm_arch_hardware_unsetup(); | 2332 | kvm_arch_hardware_unsetup(); |
2102 | kvm_arch_exit(); | 2333 | kvm_arch_exit(); |
2103 | kvm_exit_debug(); | 2334 | kvm_exit_debug(); |
2335 | free_cpumask_var(cpus_hardware_enabled); | ||
2104 | __free_page(bad_page); | 2336 | __free_page(bad_page); |
2105 | } | 2337 | } |
2106 | EXPORT_SYMBOL_GPL(kvm_exit); | 2338 | EXPORT_SYMBOL_GPL(kvm_exit); |
diff --git a/virt/kvm/kvm_trace.c b/virt/kvm/kvm_trace.c index 41dcc845f78c..f59874446440 100644 --- a/virt/kvm/kvm_trace.c +++ b/virt/kvm/kvm_trace.c | |||
@@ -252,6 +252,7 @@ void kvm_trace_cleanup(void) | |||
252 | struct kvm_trace_probe *p = &kvm_trace_probes[i]; | 252 | struct kvm_trace_probe *p = &kvm_trace_probes[i]; |
253 | marker_probe_unregister(p->name, p->probe_func, p); | 253 | marker_probe_unregister(p->name, p->probe_func, p); |
254 | } | 254 | } |
255 | marker_synchronize_unregister(); | ||
255 | 256 | ||
256 | relay_close(kt->rchan); | 257 | relay_close(kt->rchan); |
257 | debugfs_remove(kt->lost_file); | 258 | debugfs_remove(kt->lost_file); |