diff options
100 files changed, 1580 insertions, 2682 deletions
diff --git a/Documentation/ide/ide-tape.txt b/Documentation/ide/ide-tape.txt index 658f271a373f..3f348a0b21d8 100644 --- a/Documentation/ide/ide-tape.txt +++ b/Documentation/ide/ide-tape.txt | |||
@@ -1,146 +1,65 @@ | |||
1 | /* | 1 | IDE ATAPI streaming tape driver. |
2 | * IDE ATAPI streaming tape driver. | 2 | |
3 | * | 3 | This driver is a part of the Linux ide driver. |
4 | * This driver is a part of the Linux ide driver. | 4 | |
5 | * | 5 | The driver, in co-operation with ide.c, basically traverses the |
6 | * The driver, in co-operation with ide.c, basically traverses the | 6 | request-list for the block device interface. The character device |
7 | * request-list for the block device interface. The character device | 7 | interface, on the other hand, creates new requests, adds them |
8 | * interface, on the other hand, creates new requests, adds them | 8 | to the request-list of the block device, and waits for their completion. |
9 | * to the request-list of the block device, and waits for their completion. | 9 | |
10 | * | 10 | The block device major and minor numbers are determined from the |
11 | * Pipelined operation mode is now supported on both reads and writes. | 11 | tape's relative position in the ide interfaces, as explained in ide.c. |
12 | * | 12 | |
13 | * The block device major and minor numbers are determined from the | 13 | The character device interface consists of the following devices: |
14 | * tape's relative position in the ide interfaces, as explained in ide.c. | 14 | |
15 | * | 15 | ht0 major 37, minor 0 first IDE tape, rewind on close. |
16 | * The character device interface consists of the following devices: | 16 | ht1 major 37, minor 1 second IDE tape, rewind on close. |
17 | * | 17 | ... |
18 | * ht0 major 37, minor 0 first IDE tape, rewind on close. | 18 | nht0 major 37, minor 128 first IDE tape, no rewind on close. |
19 | * ht1 major 37, minor 1 second IDE tape, rewind on close. | 19 | nht1 major 37, minor 129 second IDE tape, no rewind on close. |
20 | * ... | 20 | ... |
21 | * nht0 major 37, minor 128 first IDE tape, no rewind on close. | 21 | |
22 | * nht1 major 37, minor 129 second IDE tape, no rewind on close. | 22 | The general magnetic tape commands compatible interface, as defined by |
23 | * ... | 23 | include/linux/mtio.h, is accessible through the character device. |
24 | * | 24 | |
25 | * The general magnetic tape commands compatible interface, as defined by | 25 | General ide driver configuration options, such as the interrupt-unmask |
26 | * include/linux/mtio.h, is accessible through the character device. | 26 | flag, can be configured by issuing an ioctl to the block device interface, |
27 | * | 27 | as any other ide device. |
28 | * General ide driver configuration options, such as the interrupt-unmask | 28 | |
29 | * flag, can be configured by issuing an ioctl to the block device interface, | 29 | Our own ide-tape ioctl's can be issued to either the block device or |
30 | * as any other ide device. | 30 | the character device interface. |
31 | * | 31 | |
32 | * Our own ide-tape ioctl's can be issued to either the block device or | 32 | Maximal throughput with minimal bus load will usually be achieved in the |
33 | * the character device interface. | 33 | following scenario: |
34 | * | 34 | |
35 | * Maximal throughput with minimal bus load will usually be achieved in the | 35 | 1. ide-tape is operating in the pipelined operation mode. |
36 | * following scenario: | 36 | 2. No buffering is performed by the user backup program. |
37 | * | 37 | |
38 | * 1. ide-tape is operating in the pipelined operation mode. | 38 | Testing was done with a 2 GB CONNER CTMA 4000 IDE ATAPI Streaming Tape Drive. |
39 | * 2. No buffering is performed by the user backup program. | 39 | |
40 | * | 40 | Here are some words from the first releases of hd.c, which are quoted |
41 | * Testing was done with a 2 GB CONNER CTMA 4000 IDE ATAPI Streaming Tape Drive. | 41 | in ide.c and apply here as well: |
42 | * | 42 | |
43 | * Here are some words from the first releases of hd.c, which are quoted | 43 | | Special care is recommended. Have Fun! |
44 | * in ide.c and apply here as well: | 44 | |
45 | * | 45 | Possible improvements: |
46 | * | Special care is recommended. Have Fun! | 46 | |
47 | * | 47 | 1. Support for the ATAPI overlap protocol. |
48 | * | 48 | |
49 | * An overview of the pipelined operation mode. | 49 | In order to maximize bus throughput, we currently use the DSC |
50 | * | 50 | overlap method which enables ide.c to service requests from the |
51 | * In the pipelined write mode, we will usually just add requests to our | 51 | other device while the tape is busy executing a command. The |
52 | * pipeline and return immediately, before we even start to service them. The | 52 | DSC overlap method involves polling the tape's status register |
53 | * user program will then have enough time to prepare the next request while | 53 | for the DSC bit, and servicing the other device while the tape |
54 | * we are still busy servicing previous requests. In the pipelined read mode, | 54 | isn't ready. |
55 | * the situation is similar - we add read-ahead requests into the pipeline, | 55 | |
56 | * before the user even requested them. | 56 | In the current QIC development standard (December 1995), |
57 | * | 57 | it is recommended that new tape drives will *in addition* |
58 | * The pipeline can be viewed as a "safety net" which will be activated when | 58 | implement the ATAPI overlap protocol, which is used for the |
59 | * the system load is high and prevents the user backup program from keeping up | 59 | same purpose - efficient use of the IDE bus, but is interrupt |
60 | * with the current tape speed. At this point, the pipeline will get | 60 | driven and thus has much less CPU overhead. |
61 | * shorter and shorter but the tape will still be streaming at the same speed. | 61 | |
62 | * Assuming we have enough pipeline stages, the system load will hopefully | 62 | ATAPI overlap is likely to be supported in most new ATAPI |
63 | * decrease before the pipeline is completely empty, and the backup program | 63 | devices, including new ATAPI cdroms, and thus provides us |
64 | * will be able to "catch up" and refill the pipeline again. | 64 | a method by which we can achieve higher throughput when |
65 | * | 65 | sharing a (fast) ATA-2 disk with any (slow) new ATAPI device. |
66 | * When using the pipelined mode, it would be best to disable any type of | ||
67 | * buffering done by the user program, as ide-tape already provides all the | ||
68 | * benefits in the kernel, where it can be done in a more efficient way. | ||
69 | * As we will usually not block the user program on a request, the most | ||
70 | * efficient user code will then be a simple read-write-read-... cycle. | ||
71 | * Any additional logic will usually just slow down the backup process. | ||
72 | * | ||
73 | * Using the pipelined mode, I get a constant over 400 KBps throughput, | ||
74 | * which seems to be the maximum throughput supported by my tape. | ||
75 | * | ||
76 | * However, there are some downfalls: | ||
77 | * | ||
78 | * 1. We use memory (for data buffers) in proportional to the number | ||
79 | * of pipeline stages (each stage is about 26 KB with my tape). | ||
80 | * 2. In the pipelined write mode, we cheat and postpone error codes | ||
81 | * to the user task. In read mode, the actual tape position | ||
82 | * will be a bit further than the last requested block. | ||
83 | * | ||
84 | * Concerning (1): | ||
85 | * | ||
86 | * 1. We allocate stages dynamically only when we need them. When | ||
87 | * we don't need them, we don't consume additional memory. In | ||
88 | * case we can't allocate stages, we just manage without them | ||
89 | * (at the expense of decreased throughput) so when Linux is | ||
90 | * tight in memory, we will not pose additional difficulties. | ||
91 | * | ||
92 | * 2. The maximum number of stages (which is, in fact, the maximum | ||
93 | * amount of memory) which we allocate is limited by the compile | ||
94 | * time parameter IDETAPE_MAX_PIPELINE_STAGES. | ||
95 | * | ||
96 | * 3. The maximum number of stages is a controlled parameter - We | ||
97 | * don't start from the user defined maximum number of stages | ||
98 | * but from the lower IDETAPE_MIN_PIPELINE_STAGES (again, we | ||
99 | * will not even allocate this amount of stages if the user | ||
100 | * program can't handle the speed). We then implement a feedback | ||
101 | * loop which checks if the pipeline is empty, and if it is, we | ||
102 | * increase the maximum number of stages as necessary until we | ||
103 | * reach the optimum value which just manages to keep the tape | ||
104 | * busy with minimum allocated memory or until we reach | ||
105 | * IDETAPE_MAX_PIPELINE_STAGES. | ||
106 | * | ||
107 | * Concerning (2): | ||
108 | * | ||
109 | * In pipelined write mode, ide-tape can not return accurate error codes | ||
110 | * to the user program since we usually just add the request to the | ||
111 | * pipeline without waiting for it to be serviced. In case an error | ||
112 | * occurs, I will report it on the next user request. | ||
113 | * | ||
114 | * In the pipelined read mode, subsequent read requests or forward | ||
115 | * filemark spacing will perform correctly, as we preserve all blocks | ||
116 | * and filemarks which we encountered during our excess read-ahead. | ||
117 | * | ||
118 | * For accurate tape positioning and error reporting, disabling | ||
119 | * pipelined mode might be the best option. | ||
120 | * | ||
121 | * You can enable/disable/tune the pipelined operation mode by adjusting | ||
122 | * the compile time parameters below. | ||
123 | * | ||
124 | * | ||
125 | * Possible improvements. | ||
126 | * | ||
127 | * 1. Support for the ATAPI overlap protocol. | ||
128 | * | ||
129 | * In order to maximize bus throughput, we currently use the DSC | ||
130 | * overlap method which enables ide.c to service requests from the | ||
131 | * other device while the tape is busy executing a command. The | ||
132 | * DSC overlap method involves polling the tape's status register | ||
133 | * for the DSC bit, and servicing the other device while the tape | ||
134 | * isn't ready. | ||
135 | * | ||
136 | * In the current QIC development standard (December 1995), | ||
137 | * it is recommended that new tape drives will *in addition* | ||
138 | * implement the ATAPI overlap protocol, which is used for the | ||
139 | * same purpose - efficient use of the IDE bus, but is interrupt | ||
140 | * driven and thus has much less CPU overhead. | ||
141 | * | ||
142 | * ATAPI overlap is likely to be supported in most new ATAPI | ||
143 | * devices, including new ATAPI cdroms, and thus provides us | ||
144 | * a method by which we can achieve higher throughput when | ||
145 | * sharing a (fast) ATA-2 disk with any (slow) new ATAPI device. | ||
146 | */ | ||
diff --git a/Documentation/ide/ide.txt b/Documentation/ide/ide.txt index 486c699f4aea..0c78f4b1d9d9 100644 --- a/Documentation/ide/ide.txt +++ b/Documentation/ide/ide.txt | |||
@@ -82,27 +82,26 @@ Drives are normally found by auto-probing and/or examining the CMOS/BIOS data. | |||
82 | For really weird situations, the apparent (fdisk) geometry can also be specified | 82 | For really weird situations, the apparent (fdisk) geometry can also be specified |
83 | on the kernel "command line" using LILO. The format of such lines is: | 83 | on the kernel "command line" using LILO. The format of such lines is: |
84 | 84 | ||
85 | hdx=cyls,heads,sects | 85 | ide_core.chs=[interface_number.device_number]:cyls,heads,sects |
86 | or hdx=cdrom | 86 | or ide_core.cdrom=[interface_number.device_number] |
87 | 87 | ||
88 | where hdx can be any of hda through hdh, Three values are required | 88 | For example: |
89 | (cyls,heads,sects). For example: | ||
90 | 89 | ||
91 | hdc=1050,32,64 hdd=cdrom | 90 | ide_core.chs=1.0:1050,32,64 ide_core.cdrom=1.1 |
92 | 91 | ||
93 | either {hda,hdb} or {hdc,hdd}. The results of successful auto-probing may | 92 | The results of successful auto-probing may override the physical geometry/irq |
94 | override the physical geometry/irq specified, though the "original" geometry | 93 | specified, though the "original" geometry may be retained as the "logical" |
95 | may be retained as the "logical" geometry for partitioning purposes (fdisk). | 94 | geometry for partitioning purposes (fdisk). |
96 | 95 | ||
97 | If the auto-probing during boot time confuses a drive (ie. the drive works | 96 | If the auto-probing during boot time confuses a drive (ie. the drive works |
98 | with hd.c but not with ide.c), then an command line option may be specified | 97 | with hd.c but not with ide.c), then an command line option may be specified |
99 | for each drive for which you'd like the drive to skip the hardware | 98 | for each drive for which you'd like the drive to skip the hardware |
100 | probe/identification sequence. For example: | 99 | probe/identification sequence. For example: |
101 | 100 | ||
102 | hdb=noprobe | 101 | ide_core.noprobe=0.1 |
103 | or | 102 | or |
104 | hdc=768,16,32 | 103 | ide_core.chs=1.0:768,16,32 |
105 | hdc=noprobe | 104 | ide_core.noprobe=1.0 |
106 | 105 | ||
107 | Note that when only one IDE device is attached to an interface, it should be | 106 | Note that when only one IDE device is attached to an interface, it should be |
108 | jumpered as "single" or "master", *not* "slave". Many folks have had | 107 | jumpered as "single" or "master", *not* "slave". Many folks have had |
@@ -118,9 +117,9 @@ If for some reason your cdrom drive is *not* found at boot time, you can force | |||
118 | the probe to look harder by supplying a kernel command line parameter | 117 | the probe to look harder by supplying a kernel command line parameter |
119 | via LILO, such as: | 118 | via LILO, such as: |
120 | 119 | ||
121 | hdc=cdrom /* hdc = "master" on second interface */ | 120 | ide_core.cdrom=1.0 /* "master" on second interface (hdc) */ |
122 | or | 121 | or |
123 | hdd=cdrom /* hdd = "slave" on second interface */ | 122 | ide_core.cdrom=1.1 /* "slave" on second interface (hdd) */ |
124 | 123 | ||
125 | For example, a GW2000 system might have a hard drive on the primary | 124 | For example, a GW2000 system might have a hard drive on the primary |
126 | interface (/dev/hda) and an IDE cdrom drive on the secondary interface | 125 | interface (/dev/hda) and an IDE cdrom drive on the secondary interface |
@@ -174,9 +173,7 @@ to /etc/modprobe.conf. | |||
174 | 173 | ||
175 | When ide.c is used as a module, you can pass command line parameters to the | 174 | When ide.c is used as a module, you can pass command line parameters to the |
176 | driver using the "options=" keyword to insmod, while replacing any ',' with | 175 | driver using the "options=" keyword to insmod, while replacing any ',' with |
177 | ';'. For example: | 176 | ';'. |
178 | |||
179 | insmod ide.o options="hda=nodma hdb=nodma" | ||
180 | 177 | ||
181 | 178 | ||
182 | ================================================================================ | 179 | ================================================================================ |
@@ -184,57 +181,6 @@ driver using the "options=" keyword to insmod, while replacing any ',' with | |||
184 | Summary of ide driver parameters for kernel command line | 181 | Summary of ide driver parameters for kernel command line |
185 | -------------------------------------------------------- | 182 | -------------------------------------------------------- |
186 | 183 | ||
187 | "hdx=" is recognized for all "x" from "a" to "u", such as "hdc". | ||
188 | |||
189 | "idex=" is recognized for all "x" from "0" to "9", such as "ide1". | ||
190 | |||
191 | "hdx=noprobe" : drive may be present, but do not probe for it | ||
192 | |||
193 | "hdx=none" : drive is NOT present, ignore cmos and do not probe | ||
194 | |||
195 | "hdx=nowerr" : ignore the WRERR_STAT bit on this drive | ||
196 | |||
197 | "hdx=cdrom" : drive is present, and is a cdrom drive | ||
198 | |||
199 | "hdx=cyl,head,sect" : disk drive is present, with specified geometry | ||
200 | |||
201 | "hdx=autotune" : driver will attempt to tune interface speed | ||
202 | to the fastest PIO mode supported, | ||
203 | if possible for this drive only. | ||
204 | Not fully supported by all chipset types, | ||
205 | and quite likely to cause trouble with | ||
206 | older/odd IDE drives. | ||
207 | |||
208 | "hdx=nodma" : disallow DMA | ||
209 | |||
210 | "idebus=xx" : inform IDE driver of VESA/PCI bus speed in MHz, | ||
211 | where "xx" is between 20 and 66 inclusive, | ||
212 | used when tuning chipset PIO modes. | ||
213 | For PCI bus, 25 is correct for a P75 system, | ||
214 | 30 is correct for P90,P120,P180 systems, | ||
215 | and 33 is used for P100,P133,P166 systems. | ||
216 | If in doubt, use idebus=33 for PCI. | ||
217 | As for VLB, it is safest to not specify it. | ||
218 | Bigger values are safer than smaller ones. | ||
219 | |||
220 | "idex=serialize" : do not overlap operations on idex. Please note | ||
221 | that you will have to specify this option for | ||
222 | both the respective primary and secondary channel | ||
223 | to take effect. | ||
224 | |||
225 | "idex=reset" : reset interface after probe | ||
226 | |||
227 | "idex=ata66" : informs the interface that it has an 80c cable | ||
228 | for chipsets that are ATA-66 capable, but the | ||
229 | ability to bit test for detection is currently | ||
230 | unknown. | ||
231 | |||
232 | "ide=doubler" : probe/support IDE doublers on Amiga | ||
233 | |||
234 | There may be more options than shown -- use the source, Luke! | ||
235 | |||
236 | Everything else is rejected with a "BAD OPTION" message. | ||
237 | |||
238 | For legacy IDE VLB host drivers (ali14xx/dtc2278/ht6560b/qd65xx/umc8672) | 184 | For legacy IDE VLB host drivers (ali14xx/dtc2278/ht6560b/qd65xx/umc8672) |
239 | you need to explicitly enable probing by using "probe" kernel parameter, | 185 | you need to explicitly enable probing by using "probe" kernel parameter, |
240 | i.e. to enable probing for ALI M14xx chipsets (ali14xx host driver) use: | 186 | i.e. to enable probing for ALI M14xx chipsets (ali14xx host driver) use: |
@@ -251,6 +197,33 @@ are detected automatically). | |||
251 | You also need to use "probe" kernel parameter for ide-4drives driver | 197 | You also need to use "probe" kernel parameter for ide-4drives driver |
252 | (support for IDE generic chipset with four drives on one port). | 198 | (support for IDE generic chipset with four drives on one port). |
253 | 199 | ||
200 | To enable support for IDE doublers on Amiga use "doubler" kernel parameter | ||
201 | for gayle host driver (i.e. "gayle.doubler" if the driver is built-in). | ||
202 | |||
203 | To force ignoring cable detection (this should be needed only if you're using | ||
204 | short 40-wires cable which cannot be automatically detected - if this is not | ||
205 | a case please report it as a bug instead) use "ignore_cable" kernel parameter: | ||
206 | |||
207 | * "ide_core.ignore_cable=[interface_number]" boot option if IDE is built-in | ||
208 | (i.e. "ide_core.ignore_cable=1" to force ignoring cable for "ide1") | ||
209 | |||
210 | * "ignore_cable=[interface_number]" module parameter (for ide_core module) | ||
211 | if IDE is compiled as module | ||
212 | |||
213 | Other kernel parameters for ide_core are: | ||
214 | |||
215 | * "nodma=[interface_number.device_number]" to disallow DMA for a device | ||
216 | |||
217 | * "noflush=[interface_number.device_number]" to disable flush requests | ||
218 | |||
219 | * "noprobe=[interface_number.device_number]" to skip probing | ||
220 | |||
221 | * "nowerr=[interface_number.device_number]" to ignore the WRERR_STAT bit | ||
222 | |||
223 | * "cdrom=[interface_number.device_number]" to force device as a CD-ROM | ||
224 | |||
225 | * "chs=[interface_number.device_number]" to force device as a disk (using CHS) | ||
226 | |||
254 | ================================================================================ | 227 | ================================================================================ |
255 | 228 | ||
256 | Some Terminology | 229 | Some Terminology |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index bf6303ec0bde..e5f3d918316f 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -772,10 +772,6 @@ and is between 256 and 4096 characters. It is defined in the file | |||
772 | Format: ide=nodma or ide=doubler | 772 | Format: ide=nodma or ide=doubler |
773 | See Documentation/ide/ide.txt. | 773 | See Documentation/ide/ide.txt. |
774 | 774 | ||
775 | ide?= [HW] (E)IDE subsystem | ||
776 | Format: ide?=ata66 or chipset specific parameters. | ||
777 | See Documentation/ide/ide.txt. | ||
778 | |||
779 | idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed | 775 | idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed |
780 | See Documentation/ide/ide.txt. | 776 | See Documentation/ide/ide.txt. |
781 | 777 | ||
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index f0030a0999c7..e4ea362e8480 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -904,6 +904,7 @@ recalc: | |||
904 | original_pm_idle(); | 904 | original_pm_idle(); |
905 | else | 905 | else |
906 | default_idle(); | 906 | default_idle(); |
907 | local_irq_disable(); | ||
907 | jiffies_since_last_check = jiffies - last_jiffies; | 908 | jiffies_since_last_check = jiffies - last_jiffies; |
908 | if (jiffies_since_last_check > idle_period) | 909 | if (jiffies_since_last_check > idle_period) |
909 | goto recalc; | 910 | goto recalc; |
@@ -911,6 +912,8 @@ recalc: | |||
911 | 912 | ||
912 | if (apm_idle_done) | 913 | if (apm_idle_done) |
913 | apm_do_busy(); | 914 | apm_do_busy(); |
915 | |||
916 | local_irq_enable(); | ||
914 | } | 917 | } |
915 | 918 | ||
916 | /** | 919 | /** |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 3004d716539d..67e9b4a1e89d 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -4,6 +4,8 @@ | |||
4 | #include <linux/smp.h> | 4 | #include <linux/smp.h> |
5 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
6 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
7 | #include <linux/module.h> | ||
8 | #include <linux/pm.h> | ||
7 | 9 | ||
8 | struct kmem_cache *task_xstate_cachep; | 10 | struct kmem_cache *task_xstate_cachep; |
9 | 11 | ||
@@ -42,3 +44,118 @@ void arch_task_cache_init(void) | |||
42 | __alignof__(union thread_xstate), | 44 | __alignof__(union thread_xstate), |
43 | SLAB_PANIC, NULL); | 45 | SLAB_PANIC, NULL); |
44 | } | 46 | } |
47 | |||
48 | static void do_nothing(void *unused) | ||
49 | { | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | ||
54 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | ||
55 | * handler on SMP systems. | ||
56 | * | ||
57 | * Caller must have changed pm_idle to the new value before the call. Old | ||
58 | * pm_idle value will not be used by any CPU after the return of this function. | ||
59 | */ | ||
60 | void cpu_idle_wait(void) | ||
61 | { | ||
62 | smp_mb(); | ||
63 | /* kick all the CPUs so that they exit out of pm_idle */ | ||
64 | smp_call_function(do_nothing, NULL, 0, 1); | ||
65 | } | ||
66 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | ||
67 | |||
68 | /* | ||
69 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | ||
70 | * which can obviate IPI to trigger checking of need_resched. | ||
71 | * We execute MONITOR against need_resched and enter optimized wait state | ||
72 | * through MWAIT. Whenever someone changes need_resched, we would be woken | ||
73 | * up from MWAIT (without an IPI). | ||
74 | * | ||
75 | * New with Core Duo processors, MWAIT can take some hints based on CPU | ||
76 | * capability. | ||
77 | */ | ||
78 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | ||
79 | { | ||
80 | if (!need_resched()) { | ||
81 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
82 | smp_mb(); | ||
83 | if (!need_resched()) | ||
84 | __mwait(ax, cx); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | ||
89 | static void mwait_idle(void) | ||
90 | { | ||
91 | if (!need_resched()) { | ||
92 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
93 | smp_mb(); | ||
94 | if (!need_resched()) | ||
95 | __sti_mwait(0, 0); | ||
96 | else | ||
97 | local_irq_enable(); | ||
98 | } else | ||
99 | local_irq_enable(); | ||
100 | } | ||
101 | |||
102 | |||
103 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | ||
104 | { | ||
105 | if (force_mwait) | ||
106 | return 1; | ||
107 | /* Any C1 states supported? */ | ||
108 | return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * On SMP it's slightly faster (but much more power-consuming!) | ||
113 | * to poll the ->work.need_resched flag instead of waiting for the | ||
114 | * cross-CPU IPI to arrive. Use this option with caution. | ||
115 | */ | ||
116 | static void poll_idle(void) | ||
117 | { | ||
118 | local_irq_enable(); | ||
119 | cpu_relax(); | ||
120 | } | ||
121 | |||
122 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | ||
123 | { | ||
124 | static int selected; | ||
125 | |||
126 | if (selected) | ||
127 | return; | ||
128 | #ifdef CONFIG_X86_SMP | ||
129 | if (pm_idle == poll_idle && smp_num_siblings > 1) { | ||
130 | printk(KERN_WARNING "WARNING: polling idle and HT enabled," | ||
131 | " performance may degrade.\n"); | ||
132 | } | ||
133 | #endif | ||
134 | if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { | ||
135 | /* | ||
136 | * Skip, if setup has overridden idle. | ||
137 | * One CPU supports mwait => All CPUs supports mwait | ||
138 | */ | ||
139 | if (!pm_idle) { | ||
140 | printk(KERN_INFO "using mwait in idle threads.\n"); | ||
141 | pm_idle = mwait_idle; | ||
142 | } | ||
143 | } | ||
144 | selected = 1; | ||
145 | } | ||
146 | |||
147 | static int __init idle_setup(char *str) | ||
148 | { | ||
149 | if (!strcmp(str, "poll")) { | ||
150 | printk("using polling idle threads.\n"); | ||
151 | pm_idle = poll_idle; | ||
152 | } else if (!strcmp(str, "mwait")) | ||
153 | force_mwait = 1; | ||
154 | else | ||
155 | return -1; | ||
156 | |||
157 | boot_option_idle_override = 1; | ||
158 | return 0; | ||
159 | } | ||
160 | early_param("idle", idle_setup); | ||
161 | |||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 77de848bd1fb..f8476dfbb60d 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -111,12 +111,10 @@ void default_idle(void) | |||
111 | */ | 111 | */ |
112 | smp_mb(); | 112 | smp_mb(); |
113 | 113 | ||
114 | local_irq_disable(); | 114 | if (!need_resched()) |
115 | if (!need_resched()) { | ||
116 | safe_halt(); /* enables interrupts racelessly */ | 115 | safe_halt(); /* enables interrupts racelessly */ |
117 | local_irq_disable(); | 116 | else |
118 | } | 117 | local_irq_enable(); |
119 | local_irq_enable(); | ||
120 | current_thread_info()->status |= TS_POLLING; | 118 | current_thread_info()->status |= TS_POLLING; |
121 | } else { | 119 | } else { |
122 | local_irq_enable(); | 120 | local_irq_enable(); |
@@ -128,17 +126,6 @@ void default_idle(void) | |||
128 | EXPORT_SYMBOL(default_idle); | 126 | EXPORT_SYMBOL(default_idle); |
129 | #endif | 127 | #endif |
130 | 128 | ||
131 | /* | ||
132 | * On SMP it's slightly faster (but much more power-consuming!) | ||
133 | * to poll the ->work.need_resched flag instead of waiting for the | ||
134 | * cross-CPU IPI to arrive. Use this option with caution. | ||
135 | */ | ||
136 | static void poll_idle(void) | ||
137 | { | ||
138 | local_irq_enable(); | ||
139 | cpu_relax(); | ||
140 | } | ||
141 | |||
142 | #ifdef CONFIG_HOTPLUG_CPU | 129 | #ifdef CONFIG_HOTPLUG_CPU |
143 | #include <asm/nmi.h> | 130 | #include <asm/nmi.h> |
144 | /* We don't actually take CPU down, just spin without interrupts. */ | 131 | /* We don't actually take CPU down, just spin without interrupts. */ |
@@ -196,6 +183,7 @@ void cpu_idle(void) | |||
196 | if (cpu_is_offline(cpu)) | 183 | if (cpu_is_offline(cpu)) |
197 | play_dead(); | 184 | play_dead(); |
198 | 185 | ||
186 | local_irq_disable(); | ||
199 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | 187 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
200 | idle(); | 188 | idle(); |
201 | } | 189 | } |
@@ -206,104 +194,6 @@ void cpu_idle(void) | |||
206 | } | 194 | } |
207 | } | 195 | } |
208 | 196 | ||
209 | static void do_nothing(void *unused) | ||
210 | { | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | ||
215 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | ||
216 | * handler on SMP systems. | ||
217 | * | ||
218 | * Caller must have changed pm_idle to the new value before the call. Old | ||
219 | * pm_idle value will not be used by any CPU after the return of this function. | ||
220 | */ | ||
221 | void cpu_idle_wait(void) | ||
222 | { | ||
223 | smp_mb(); | ||
224 | /* kick all the CPUs so that they exit out of pm_idle */ | ||
225 | smp_call_function(do_nothing, NULL, 0, 1); | ||
226 | } | ||
227 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | ||
228 | |||
229 | /* | ||
230 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | ||
231 | * which can obviate IPI to trigger checking of need_resched. | ||
232 | * We execute MONITOR against need_resched and enter optimized wait state | ||
233 | * through MWAIT. Whenever someone changes need_resched, we would be woken | ||
234 | * up from MWAIT (without an IPI). | ||
235 | * | ||
236 | * New with Core Duo processors, MWAIT can take some hints based on CPU | ||
237 | * capability. | ||
238 | */ | ||
239 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | ||
240 | { | ||
241 | if (!need_resched()) { | ||
242 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
243 | smp_mb(); | ||
244 | if (!need_resched()) | ||
245 | __sti_mwait(ax, cx); | ||
246 | else | ||
247 | local_irq_enable(); | ||
248 | } else | ||
249 | local_irq_enable(); | ||
250 | } | ||
251 | |||
252 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | ||
253 | static void mwait_idle(void) | ||
254 | { | ||
255 | local_irq_enable(); | ||
256 | mwait_idle_with_hints(0, 0); | ||
257 | } | ||
258 | |||
259 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | ||
260 | { | ||
261 | if (force_mwait) | ||
262 | return 1; | ||
263 | /* Any C1 states supported? */ | ||
264 | return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; | ||
265 | } | ||
266 | |||
267 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | ||
268 | { | ||
269 | static int selected; | ||
270 | |||
271 | if (selected) | ||
272 | return; | ||
273 | #ifdef CONFIG_X86_SMP | ||
274 | if (pm_idle == poll_idle && smp_num_siblings > 1) { | ||
275 | printk(KERN_WARNING "WARNING: polling idle and HT enabled," | ||
276 | " performance may degrade.\n"); | ||
277 | } | ||
278 | #endif | ||
279 | if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { | ||
280 | /* | ||
281 | * Skip, if setup has overridden idle. | ||
282 | * One CPU supports mwait => All CPUs supports mwait | ||
283 | */ | ||
284 | if (!pm_idle) { | ||
285 | printk(KERN_INFO "using mwait in idle threads.\n"); | ||
286 | pm_idle = mwait_idle; | ||
287 | } | ||
288 | } | ||
289 | selected = 1; | ||
290 | } | ||
291 | |||
292 | static int __init idle_setup(char *str) | ||
293 | { | ||
294 | if (!strcmp(str, "poll")) { | ||
295 | printk("using polling idle threads.\n"); | ||
296 | pm_idle = poll_idle; | ||
297 | } else if (!strcmp(str, "mwait")) | ||
298 | force_mwait = 1; | ||
299 | else | ||
300 | return -1; | ||
301 | |||
302 | boot_option_idle_override = 1; | ||
303 | return 0; | ||
304 | } | ||
305 | early_param("idle", idle_setup); | ||
306 | |||
307 | void __show_registers(struct pt_regs *regs, int all) | 197 | void __show_registers(struct pt_regs *regs, int all) |
308 | { | 198 | { |
309 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | 199 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 131c2ee7ac56..e2319f39988b 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -106,26 +106,13 @@ void default_idle(void) | |||
106 | * test NEED_RESCHED: | 106 | * test NEED_RESCHED: |
107 | */ | 107 | */ |
108 | smp_mb(); | 108 | smp_mb(); |
109 | local_irq_disable(); | 109 | if (!need_resched()) |
110 | if (!need_resched()) { | ||
111 | safe_halt(); /* enables interrupts racelessly */ | 110 | safe_halt(); /* enables interrupts racelessly */ |
112 | local_irq_disable(); | 111 | else |
113 | } | 112 | local_irq_enable(); |
114 | local_irq_enable(); | ||
115 | current_thread_info()->status |= TS_POLLING; | 113 | current_thread_info()->status |= TS_POLLING; |
116 | } | 114 | } |
117 | 115 | ||
118 | /* | ||
119 | * On SMP it's slightly faster (but much more power-consuming!) | ||
120 | * to poll the ->need_resched flag instead of waiting for the | ||
121 | * cross-CPU IPI to arrive. Use this option with caution. | ||
122 | */ | ||
123 | static void poll_idle(void) | ||
124 | { | ||
125 | local_irq_enable(); | ||
126 | cpu_relax(); | ||
127 | } | ||
128 | |||
129 | #ifdef CONFIG_HOTPLUG_CPU | 116 | #ifdef CONFIG_HOTPLUG_CPU |
130 | DECLARE_PER_CPU(int, cpu_state); | 117 | DECLARE_PER_CPU(int, cpu_state); |
131 | 118 | ||
@@ -192,110 +179,6 @@ void cpu_idle(void) | |||
192 | } | 179 | } |
193 | } | 180 | } |
194 | 181 | ||
195 | static void do_nothing(void *unused) | ||
196 | { | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | ||
201 | * pm_idle and update to new pm_idle value. Required while changing pm_idle | ||
202 | * handler on SMP systems. | ||
203 | * | ||
204 | * Caller must have changed pm_idle to the new value before the call. Old | ||
205 | * pm_idle value will not be used by any CPU after the return of this function. | ||
206 | */ | ||
207 | void cpu_idle_wait(void) | ||
208 | { | ||
209 | smp_mb(); | ||
210 | /* kick all the CPUs so that they exit out of pm_idle */ | ||
211 | smp_call_function(do_nothing, NULL, 0, 1); | ||
212 | } | ||
213 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | ||
214 | |||
215 | /* | ||
216 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | ||
217 | * which can obviate IPI to trigger checking of need_resched. | ||
218 | * We execute MONITOR against need_resched and enter optimized wait state | ||
219 | * through MWAIT. Whenever someone changes need_resched, we would be woken | ||
220 | * up from MWAIT (without an IPI). | ||
221 | * | ||
222 | * New with Core Duo processors, MWAIT can take some hints based on CPU | ||
223 | * capability. | ||
224 | */ | ||
225 | void mwait_idle_with_hints(unsigned long ax, unsigned long cx) | ||
226 | { | ||
227 | if (!need_resched()) { | ||
228 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
229 | smp_mb(); | ||
230 | if (!need_resched()) | ||
231 | __mwait(ax, cx); | ||
232 | } | ||
233 | } | ||
234 | |||
235 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | ||
236 | static void mwait_idle(void) | ||
237 | { | ||
238 | if (!need_resched()) { | ||
239 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | ||
240 | smp_mb(); | ||
241 | if (!need_resched()) | ||
242 | __sti_mwait(0, 0); | ||
243 | else | ||
244 | local_irq_enable(); | ||
245 | } else { | ||
246 | local_irq_enable(); | ||
247 | } | ||
248 | } | ||
249 | |||
250 | |||
251 | static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | ||
252 | { | ||
253 | if (force_mwait) | ||
254 | return 1; | ||
255 | /* Any C1 states supported? */ | ||
256 | return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0; | ||
257 | } | ||
258 | |||
259 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | ||
260 | { | ||
261 | static int selected; | ||
262 | |||
263 | if (selected) | ||
264 | return; | ||
265 | #ifdef CONFIG_X86_SMP | ||
266 | if (pm_idle == poll_idle && smp_num_siblings > 1) { | ||
267 | printk(KERN_WARNING "WARNING: polling idle and HT enabled," | ||
268 | " performance may degrade.\n"); | ||
269 | } | ||
270 | #endif | ||
271 | if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { | ||
272 | /* | ||
273 | * Skip, if setup has overridden idle. | ||
274 | * One CPU supports mwait => All CPUs supports mwait | ||
275 | */ | ||
276 | if (!pm_idle) { | ||
277 | printk(KERN_INFO "using mwait in idle threads.\n"); | ||
278 | pm_idle = mwait_idle; | ||
279 | } | ||
280 | } | ||
281 | selected = 1; | ||
282 | } | ||
283 | |||
284 | static int __init idle_setup(char *str) | ||
285 | { | ||
286 | if (!strcmp(str, "poll")) { | ||
287 | printk("using polling idle threads.\n"); | ||
288 | pm_idle = poll_idle; | ||
289 | } else if (!strcmp(str, "mwait")) | ||
290 | force_mwait = 1; | ||
291 | else | ||
292 | return -1; | ||
293 | |||
294 | boot_option_idle_override = 1; | ||
295 | return 0; | ||
296 | } | ||
297 | early_param("idle", idle_setup); | ||
298 | |||
299 | /* Prints also some state that isn't saved in the pt_regs */ | 182 | /* Prints also some state that isn't saved in the pt_regs */ |
300 | void __show_regs(struct pt_regs * regs) | 183 | void __show_regs(struct pt_regs * regs) |
301 | { | 184 | { |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 788da9781f80..0d90ff5fd117 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -418,13 +418,12 @@ static void acpi_processor_idle(void) | |||
418 | 418 | ||
419 | cx = pr->power.state; | 419 | cx = pr->power.state; |
420 | if (!cx || acpi_idle_suspend) { | 420 | if (!cx || acpi_idle_suspend) { |
421 | if (pm_idle_save) | 421 | if (pm_idle_save) { |
422 | pm_idle_save(); | 422 | pm_idle_save(); /* enables IRQs */ |
423 | else | 423 | } else { |
424 | acpi_safe_halt(); | 424 | acpi_safe_halt(); |
425 | |||
426 | if (irqs_disabled()) | ||
427 | local_irq_enable(); | 425 | local_irq_enable(); |
426 | } | ||
428 | 427 | ||
429 | return; | 428 | return; |
430 | } | 429 | } |
@@ -520,10 +519,12 @@ static void acpi_processor_idle(void) | |||
520 | * Use the appropriate idle routine, the one that would | 519 | * Use the appropriate idle routine, the one that would |
521 | * be used without acpi C-states. | 520 | * be used without acpi C-states. |
522 | */ | 521 | */ |
523 | if (pm_idle_save) | 522 | if (pm_idle_save) { |
524 | pm_idle_save(); | 523 | pm_idle_save(); /* enables IRQs */ |
525 | else | 524 | } else { |
526 | acpi_safe_halt(); | 525 | acpi_safe_halt(); |
526 | local_irq_enable(); | ||
527 | } | ||
527 | 528 | ||
528 | /* | 529 | /* |
529 | * TBD: Can't get time duration while in C1, as resumes | 530 | * TBD: Can't get time duration while in C1, as resumes |
@@ -534,8 +535,6 @@ static void acpi_processor_idle(void) | |||
534 | * skew otherwise. | 535 | * skew otherwise. |
535 | */ | 536 | */ |
536 | sleep_ticks = 0xFFFFFFFF; | 537 | sleep_ticks = 0xFFFFFFFF; |
537 | if (irqs_disabled()) | ||
538 | local_irq_enable(); | ||
539 | 538 | ||
540 | break; | 539 | break; |
541 | 540 | ||
diff --git a/drivers/ide/arm/bast-ide.c b/drivers/ide/arm/bast-ide.c index d158f579bde2..713cef20622e 100644 --- a/drivers/ide/arm/bast-ide.c +++ b/drivers/ide/arm/bast-ide.c | |||
@@ -35,12 +35,12 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq) | |||
35 | base += BAST_IDE_CS; | 35 | base += BAST_IDE_CS; |
36 | aux += BAST_IDE_CS; | 36 | aux += BAST_IDE_CS; |
37 | 37 | ||
38 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { | 38 | for (i = 0; i <= 7; i++) { |
39 | hw.io_ports[i] = (unsigned long)base; | 39 | hw.io_ports_array[i] = (unsigned long)base; |
40 | base += 0x20; | 40 | base += 0x20; |
41 | } | 41 | } |
42 | 42 | ||
43 | hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20); | 43 | hw.io_ports.ctl_addr = aux + (6 * 0x20); |
44 | hw.irq = irq; | 44 | hw.irq = irq; |
45 | 45 | ||
46 | hwif = ide_find_port(); | 46 | hwif = ide_find_port(); |
@@ -49,11 +49,7 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq) | |||
49 | 49 | ||
50 | i = hwif->index; | 50 | i = hwif->index; |
51 | 51 | ||
52 | if (hwif->present) | 52 | ide_init_port_data(hwif, i); |
53 | ide_unregister(i); | ||
54 | else | ||
55 | ide_init_port_data(hwif, i); | ||
56 | |||
57 | ide_init_port_hw(hwif, &hw); | 53 | ide_init_port_hw(hwif, &hw); |
58 | hwif->port_ops = NULL; | 54 | hwif->port_ops = NULL; |
59 | 55 | ||
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c index 7d642f44e35b..124445c20921 100644 --- a/drivers/ide/arm/icside.c +++ b/drivers/ide/arm/icside.c | |||
@@ -426,11 +426,12 @@ icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *e | |||
426 | */ | 426 | */ |
427 | default_hwif_mmiops(hwif); | 427 | default_hwif_mmiops(hwif); |
428 | 428 | ||
429 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { | 429 | for (i = 0; i <= 7; i++) { |
430 | hwif->io_ports[i] = port; | 430 | hwif->io_ports_array[i] = port; |
431 | port += 1 << info->stepping; | 431 | port += 1 << info->stepping; |
432 | } | 432 | } |
433 | hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)base + info->ctrloffset; | 433 | hwif->io_ports.ctl_addr = |
434 | (unsigned long)base + info->ctrloffset; | ||
434 | hwif->irq = ec->irq; | 435 | hwif->irq = ec->irq; |
435 | hwif->chipset = ide_acorn; | 436 | hwif->chipset = ide_acorn; |
436 | hwif->gendev.parent = &ec->dev; | 437 | hwif->gendev.parent = &ec->dev; |
@@ -480,8 +481,7 @@ static const struct ide_port_info icside_v6_port_info __initdata = { | |||
480 | .init_dma = icside_dma_off_init, | 481 | .init_dma = icside_dma_off_init, |
481 | .port_ops = &icside_v6_no_dma_port_ops, | 482 | .port_ops = &icside_v6_no_dma_port_ops, |
482 | .dma_ops = &icside_v6_dma_ops, | 483 | .dma_ops = &icside_v6_dma_ops, |
483 | .host_flags = IDE_HFLAG_SERIALIZE | | 484 | .host_flags = IDE_HFLAG_SERIALIZE, |
484 | IDE_HFLAG_NO_AUTOTUNE, | ||
485 | .mwdma_mask = ATA_MWDMA2, | 485 | .mwdma_mask = ATA_MWDMA2, |
486 | .swdma_mask = ATA_SWDMA2, | 486 | .swdma_mask = ATA_SWDMA2, |
487 | }; | 487 | }; |
@@ -547,14 +547,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) | |||
547 | hwif->config_data = (unsigned long)ioc_base; | 547 | hwif->config_data = (unsigned long)ioc_base; |
548 | hwif->select_data = sel; | 548 | hwif->select_data = sel; |
549 | 549 | ||
550 | mate->maskproc = icside_maskproc; | ||
551 | mate->hwif_data = state; | 550 | mate->hwif_data = state; |
552 | mate->config_data = (unsigned long)ioc_base; | 551 | mate->config_data = (unsigned long)ioc_base; |
553 | mate->select_data = sel | 1; | 552 | mate->select_data = sel | 1; |
554 | 553 | ||
555 | if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) { | 554 | if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) { |
556 | d.init_dma = icside_dma_init; | 555 | d.init_dma = icside_dma_init; |
557 | d.port_ops = &icside_v6_dma_port_ops; | 556 | d.port_ops = &icside_v6_port_ops; |
558 | d.dma_ops = NULL; | 557 | d.dma_ops = NULL; |
559 | } | 558 | } |
560 | 559 | ||
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c index 8fa34e26443a..aaf32541622d 100644 --- a/drivers/ide/arm/palm_bk3710.c +++ b/drivers/ide/arm/palm_bk3710.c | |||
@@ -321,7 +321,7 @@ static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif, | |||
321 | const struct ide_port_info *d) | 321 | const struct ide_port_info *d) |
322 | { | 322 | { |
323 | unsigned long base = | 323 | unsigned long base = |
324 | hwif->io_ports[IDE_DATA_OFFSET] - IDE_PALM_ATA_PRI_REG_OFFSET; | 324 | hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET; |
325 | 325 | ||
326 | printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name); | 326 | printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name); |
327 | 327 | ||
@@ -386,8 +386,8 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev) | |||
386 | 386 | ||
387 | pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET; | 387 | pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET; |
388 | for (i = 0; i < IDE_NR_PORTS - 2; i++) | 388 | for (i = 0; i < IDE_NR_PORTS - 2; i++) |
389 | hw.io_ports[i] = pribase + i; | 389 | hw.io_ports_array[i] = pribase + i; |
390 | hw.io_ports[IDE_CONTROL_OFFSET] = mem->start + | 390 | hw.io_ports.ctl_addr = mem->start + |
391 | IDE_PALM_ATA_PRI_CTL_OFFSET; | 391 | IDE_PALM_ATA_PRI_CTL_OFFSET; |
392 | hw.irq = irq->start; | 392 | hw.irq = irq->start; |
393 | hw.chipset = ide_palm3710; | 393 | hw.chipset = ide_palm3710; |
@@ -398,11 +398,7 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev) | |||
398 | 398 | ||
399 | i = hwif->index; | 399 | i = hwif->index; |
400 | 400 | ||
401 | if (hwif->present) | 401 | ide_init_port_data(hwif, i); |
402 | ide_unregister(i); | ||
403 | else | ||
404 | ide_init_port_data(hwif, i); | ||
405 | |||
406 | ide_init_port_hw(hwif, &hw); | 402 | ide_init_port_hw(hwif, &hw); |
407 | 403 | ||
408 | hwif->mmio = 1; | 404 | hwif->mmio = 1; |
diff --git a/drivers/ide/arm/rapide.c b/drivers/ide/arm/rapide.c index c0581bd98d0d..babc1a5e128d 100644 --- a/drivers/ide/arm/rapide.c +++ b/drivers/ide/arm/rapide.c | |||
@@ -17,11 +17,11 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, | |||
17 | unsigned long port = (unsigned long)base; | 17 | unsigned long port = (unsigned long)base; |
18 | int i; | 18 | int i; |
19 | 19 | ||
20 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { | 20 | for (i = 0; i <= 7; i++) { |
21 | hw->io_ports[i] = port; | 21 | hw->io_ports_array[i] = port; |
22 | port += sz; | 22 | port += sz; |
23 | } | 23 | } |
24 | hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl; | 24 | hw->io_ports.ctl_addr = (unsigned long)ctrl; |
25 | hw->irq = irq; | 25 | hw->irq = irq; |
26 | } | 26 | } |
27 | 27 | ||
@@ -75,7 +75,7 @@ static void __devexit rapide_remove(struct expansion_card *ec) | |||
75 | 75 | ||
76 | ecard_set_drvdata(ec, NULL); | 76 | ecard_set_drvdata(ec, NULL); |
77 | 77 | ||
78 | ide_unregister(hwif->index); | 78 | ide_unregister(hwif); |
79 | 79 | ||
80 | ecard_release_resources(ec); | 80 | ecard_release_resources(ec); |
81 | } | 81 | } |
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c index a62ca75c7e28..9df26855bc05 100644 --- a/drivers/ide/cris/ide-cris.c +++ b/drivers/ide/cris/ide-cris.c | |||
@@ -88,8 +88,8 @@ enum /* Transfer types */ | |||
88 | int | 88 | int |
89 | cris_ide_ack_intr(ide_hwif_t* hwif) | 89 | cris_ide_ack_intr(ide_hwif_t* hwif) |
90 | { | 90 | { |
91 | reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, | 91 | reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, |
92 | int, hwif->io_ports[0]); | 92 | hwif->io_ports.data_addr); |
93 | REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel); | 93 | REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel); |
94 | return 1; | 94 | return 1; |
95 | } | 95 | } |
@@ -231,7 +231,7 @@ cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir,int type, | |||
231 | ide_hwif_t *hwif = drive->hwif; | 231 | ide_hwif_t *hwif = drive->hwif; |
232 | 232 | ||
233 | reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, | 233 | reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, |
234 | hwif->io_ports[IDE_DATA_OFFSET]); | 234 | hwif->io_ports.data_addr); |
235 | reg_ata_rw_trf_cnt trf_cnt = {0}; | 235 | reg_ata_rw_trf_cnt trf_cnt = {0}; |
236 | 236 | ||
237 | mycontext.saved_data = (dma_descr_data*)virt_to_phys(d); | 237 | mycontext.saved_data = (dma_descr_data*)virt_to_phys(d); |
@@ -271,7 +271,7 @@ static int cris_dma_test_irq(ide_drive_t *drive) | |||
271 | int intr = REG_RD_INT(ata, regi_ata, r_intr); | 271 | int intr = REG_RD_INT(ata, regi_ata, r_intr); |
272 | 272 | ||
273 | reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, | 273 | reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int, |
274 | hwif->io_ports[IDE_DATA_OFFSET]); | 274 | hwif->io_ports.data_addr); |
275 | 275 | ||
276 | return intr & (1 << ctrl2.sel) ? 1 : 0; | 276 | return intr & (1 << ctrl2.sel) ? 1 : 0; |
277 | } | 277 | } |
@@ -531,7 +531,7 @@ static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int d | |||
531 | *R_ATA_CTRL_DATA = | 531 | *R_ATA_CTRL_DATA = |
532 | cmd | | 532 | cmd | |
533 | IO_FIELD(R_ATA_CTRL_DATA, data, | 533 | IO_FIELD(R_ATA_CTRL_DATA, data, |
534 | drive->hwif->io_ports[IDE_DATA_OFFSET]) | | 534 | drive->hwif->io_ports.data_addr) | |
535 | IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | | 535 | IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | |
536 | IO_STATE(R_ATA_CTRL_DATA, multi, on) | | 536 | IO_STATE(R_ATA_CTRL_DATA, multi, on) | |
537 | IO_STATE(R_ATA_CTRL_DATA, dma_size, word); | 537 | IO_STATE(R_ATA_CTRL_DATA, dma_size, word); |
@@ -550,7 +550,7 @@ static int cris_dma_test_irq(ide_drive_t *drive) | |||
550 | { | 550 | { |
551 | int intr = *R_IRQ_MASK0_RD; | 551 | int intr = *R_IRQ_MASK0_RD; |
552 | int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel, | 552 | int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel, |
553 | drive->hwif->io_ports[IDE_DATA_OFFSET]); | 553 | drive->hwif->io_ports.data_addr); |
554 | 554 | ||
555 | return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0; | 555 | return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0; |
556 | } | 556 | } |
@@ -644,7 +644,7 @@ cris_ide_inw(unsigned long reg) { | |||
644 | * call will also timeout on busy, but as long as the | 644 | * call will also timeout on busy, but as long as the |
645 | * write is still performed, everything will be fine. | 645 | * write is still performed, everything will be fine. |
646 | */ | 646 | */ |
647 | if (cris_ide_get_reg(reg) == IDE_STATUS_OFFSET) | 647 | if (cris_ide_get_reg(reg) == 7) |
648 | return BUSY_STAT; | 648 | return BUSY_STAT; |
649 | else | 649 | else |
650 | /* For other rare cases we assume 0 is good enough. */ | 650 | /* For other rare cases we assume 0 is good enough. */ |
@@ -765,13 +765,13 @@ static void __init cris_setup_ports(hw_regs_t *hw, unsigned long base) | |||
765 | memset(hw, 0, sizeof(*hw)); | 765 | memset(hw, 0, sizeof(*hw)); |
766 | 766 | ||
767 | for (i = 0; i <= 7; i++) | 767 | for (i = 0; i <= 7; i++) |
768 | hw->io_ports[i] = base + cris_ide_reg_addr(i, 0, 1); | 768 | hw->io_ports_array[i] = base + cris_ide_reg_addr(i, 0, 1); |
769 | 769 | ||
770 | /* | 770 | /* |
771 | * the IDE control register is at ATA address 6, | 771 | * the IDE control register is at ATA address 6, |
772 | * with CS1 active instead of CS0 | 772 | * with CS1 active instead of CS0 |
773 | */ | 773 | */ |
774 | hw->io_ports[IDE_CONTROL_OFFSET] = base + cris_ide_reg_addr(6, 1, 0); | 774 | hw->io_ports.ctl_addr = base + cris_ide_reg_addr(6, 1, 0); |
775 | 775 | ||
776 | hw->irq = ide_default_irq(0); | 776 | hw->irq = ide_default_irq(0); |
777 | hw->ack_intr = cris_ide_ack_intr; | 777 | hw->ack_intr = cris_ide_ack_intr; |
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c index 0708b29cdb17..fd23f12e17aa 100644 --- a/drivers/ide/h8300/ide-h8300.c +++ b/drivers/ide/h8300/ide-h8300.c | |||
@@ -63,9 +63,9 @@ static inline void hw_setup(hw_regs_t *hw) | |||
63 | int i; | 63 | int i; |
64 | 64 | ||
65 | memset(hw, 0, sizeof(hw_regs_t)); | 65 | memset(hw, 0, sizeof(hw_regs_t)); |
66 | for (i = 0; i <= IDE_STATUS_OFFSET; i++) | 66 | for (i = 0; i <= 7; i++) |
67 | hw->io_ports[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; | 67 | hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; |
68 | hw->io_ports[IDE_CONTROL_OFFSET] = CONFIG_H8300_IDE_ALT; | 68 | hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT; |
69 | hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ; | 69 | hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ; |
70 | hw->chipset = ide_generic; | 70 | hw->chipset = ide_generic; |
71 | } | 71 | } |
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index e4ad26e4fce7..9d3601fa5680 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c | |||
@@ -60,9 +60,17 @@ struct ide_acpi_hwif_link { | |||
60 | #define DEBPRINT(fmt, args...) do {} while (0) | 60 | #define DEBPRINT(fmt, args...) do {} while (0) |
61 | #endif /* DEBUGGING */ | 61 | #endif /* DEBUGGING */ |
62 | 62 | ||
63 | extern int ide_noacpi; | 63 | int ide_noacpi; |
64 | extern int ide_noacpitfs; | 64 | module_param_named(noacpi, ide_noacpi, bool, 0); |
65 | extern int ide_noacpionboot; | 65 | MODULE_PARM_DESC(noacpi, "disable IDE ACPI support"); |
66 | |||
67 | int ide_acpigtf; | ||
68 | module_param_named(acpigtf, ide_acpigtf, bool, 0); | ||
69 | MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support"); | ||
70 | |||
71 | int ide_acpionboot; | ||
72 | module_param_named(acpionboot, ide_acpionboot, bool, 0); | ||
73 | MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot"); | ||
66 | 74 | ||
67 | static bool ide_noacpi_psx; | 75 | static bool ide_noacpi_psx; |
68 | static int no_acpi_psx(const struct dmi_system_id *id) | 76 | static int no_acpi_psx(const struct dmi_system_id *id) |
@@ -376,7 +384,7 @@ static int taskfile_load_raw(ide_drive_t *drive, | |||
376 | memcpy(&args.tf_array[7], >f->tfa, 7); | 384 | memcpy(&args.tf_array[7], >f->tfa, 7); |
377 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; | 385 | args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; |
378 | 386 | ||
379 | if (ide_noacpitfs) { | 387 | if (!ide_acpigtf) { |
380 | DEBPRINT("_GTF execution disabled\n"); | 388 | DEBPRINT("_GTF execution disabled\n"); |
381 | return err; | 389 | return err; |
382 | } | 390 | } |
@@ -721,7 +729,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif) | |||
721 | drive->name, err); | 729 | drive->name, err); |
722 | } | 730 | } |
723 | 731 | ||
724 | if (ide_noacpionboot) { | 732 | if (!ide_acpionboot) { |
725 | DEBPRINT("ACPI methods disabled on boot\n"); | 733 | DEBPRINT("ACPI methods disabled on boot\n"); |
726 | return; | 734 | return; |
727 | } | 735 | } |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index ad984322da94..b34fd2bde96f 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -560,7 +560,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, | |||
560 | /* packet command */ | 560 | /* packet command */ |
561 | spin_lock_irqsave(&ide_lock, flags); | 561 | spin_lock_irqsave(&ide_lock, flags); |
562 | hwif->OUTBSYNC(drive, WIN_PACKETCMD, | 562 | hwif->OUTBSYNC(drive, WIN_PACKETCMD, |
563 | hwif->io_ports[IDE_COMMAND_OFFSET]); | 563 | hwif->io_ports.command_addr); |
564 | ndelay(400); | 564 | ndelay(400); |
565 | spin_unlock_irqrestore(&ide_lock, flags); | 565 | spin_unlock_irqrestore(&ide_lock, flags); |
566 | 566 | ||
@@ -952,9 +952,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
952 | } | 952 | } |
953 | 953 | ||
954 | /* ok we fall to pio :/ */ | 954 | /* ok we fall to pio :/ */ |
955 | ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]) & 0x3; | 955 | ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3; |
956 | lowcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]); | 956 | lowcyl = hwif->INB(hwif->io_ports.lbam_addr); |
957 | highcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]); | 957 | highcyl = hwif->INB(hwif->io_ports.lbah_addr); |
958 | 958 | ||
959 | len = lowcyl + (256 * highcyl); | 959 | len = lowcyl + (256 * highcyl); |
960 | 960 | ||
@@ -1909,9 +1909,7 @@ static int ide_cdrom_setup(ide_drive_t *drive) | |||
1909 | /* set correct block size */ | 1909 | /* set correct block size */ |
1910 | blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE); | 1910 | blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE); |
1911 | 1911 | ||
1912 | if (drive->autotune == IDE_TUNE_DEFAULT || | 1912 | drive->dsc_overlap = (drive->next != drive); |
1913 | drive->autotune == IDE_TUNE_AUTO) | ||
1914 | drive->dsc_overlap = (drive->next != drive); | ||
1915 | 1913 | ||
1916 | if (ide_cdrom_register(drive, nslots)) { | 1914 | if (ide_cdrom_register(drive, nslots)) { |
1917 | printk(KERN_ERR "%s: %s failed to register device with the" | 1915 | printk(KERN_ERR "%s: %s failed to register device with the" |
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 6e891bccd052..489079b8ed03 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -465,10 +465,10 @@ static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive) | |||
465 | } | 465 | } |
466 | 466 | ||
467 | /* Get the number of bytes to transfer */ | 467 | /* Get the number of bytes to transfer */ |
468 | bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) | | 468 | bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) | |
469 | hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]); | 469 | hwif->INB(hwif->io_ports.lbam_addr); |
470 | /* on this interrupt */ | 470 | /* on this interrupt */ |
471 | ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); | 471 | ireason = hwif->INB(hwif->io_ports.nsect_addr); |
472 | 472 | ||
473 | if (ireason & CD) { | 473 | if (ireason & CD) { |
474 | printk(KERN_ERR "ide-floppy: CoD != 0 in %s\n", __func__); | 474 | printk(KERN_ERR "ide-floppy: CoD != 0 in %s\n", __func__); |
@@ -539,7 +539,7 @@ static ide_startstop_t idefloppy_transfer_pc(ide_drive_t *drive) | |||
539 | "initiated yet DRQ isn't asserted\n"); | 539 | "initiated yet DRQ isn't asserted\n"); |
540 | return startstop; | 540 | return startstop; |
541 | } | 541 | } |
542 | ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); | 542 | ireason = hwif->INB(hwif->io_ports.nsect_addr); |
543 | if ((ireason & CD) == 0 || (ireason & IO)) { | 543 | if ((ireason & CD) == 0 || (ireason & IO)) { |
544 | printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while " | 544 | printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while " |
545 | "issuing a packet command\n"); | 545 | "issuing a packet command\n"); |
@@ -586,7 +586,7 @@ static ide_startstop_t idefloppy_transfer_pc1(ide_drive_t *drive) | |||
586 | "initiated yet DRQ isn't asserted\n"); | 586 | "initiated yet DRQ isn't asserted\n"); |
587 | return startstop; | 587 | return startstop; |
588 | } | 588 | } |
589 | ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); | 589 | ireason = hwif->INB(hwif->io_ports.nsect_addr); |
590 | if ((ireason & CD) == 0 || (ireason & IO)) { | 590 | if ((ireason & CD) == 0 || (ireason & IO)) { |
591 | printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) " | 591 | printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) " |
592 | "while issuing a packet command\n"); | 592 | "while issuing a packet command\n"); |
@@ -692,7 +692,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive, | |||
692 | return ide_started; | 692 | return ide_started; |
693 | } else { | 693 | } else { |
694 | /* Issue the packet command */ | 694 | /* Issue the packet command */ |
695 | hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]); | 695 | hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr); |
696 | return (*pkt_xfer_routine) (drive); | 696 | return (*pkt_xfer_routine) (drive); |
697 | } | 697 | } |
698 | } | 698 | } |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 0fe89a599275..3a2d8930d17f 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -298,48 +298,43 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) | |||
298 | void ide_tf_read(ide_drive_t *drive, ide_task_t *task) | 298 | void ide_tf_read(ide_drive_t *drive, ide_task_t *task) |
299 | { | 299 | { |
300 | ide_hwif_t *hwif = drive->hwif; | 300 | ide_hwif_t *hwif = drive->hwif; |
301 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
301 | struct ide_taskfile *tf = &task->tf; | 302 | struct ide_taskfile *tf = &task->tf; |
302 | 303 | ||
303 | if (task->tf_flags & IDE_TFLAG_IN_DATA) { | 304 | if (task->tf_flags & IDE_TFLAG_IN_DATA) { |
304 | u16 data = hwif->INW(hwif->io_ports[IDE_DATA_OFFSET]); | 305 | u16 data = hwif->INW(io_ports->data_addr); |
305 | 306 | ||
306 | tf->data = data & 0xff; | 307 | tf->data = data & 0xff; |
307 | tf->hob_data = (data >> 8) & 0xff; | 308 | tf->hob_data = (data >> 8) & 0xff; |
308 | } | 309 | } |
309 | 310 | ||
310 | /* be sure we're looking at the low order bits */ | 311 | /* be sure we're looking at the low order bits */ |
311 | hwif->OUTB(drive->ctl & ~0x80, hwif->io_ports[IDE_CONTROL_OFFSET]); | 312 | hwif->OUTB(drive->ctl & ~0x80, io_ports->ctl_addr); |
312 | 313 | ||
313 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) | 314 | if (task->tf_flags & IDE_TFLAG_IN_NSECT) |
314 | tf->nsect = hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]); | 315 | tf->nsect = hwif->INB(io_ports->nsect_addr); |
315 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) | 316 | if (task->tf_flags & IDE_TFLAG_IN_LBAL) |
316 | tf->lbal = hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]); | 317 | tf->lbal = hwif->INB(io_ports->lbal_addr); |
317 | if (task->tf_flags & IDE_TFLAG_IN_LBAM) | 318 | if (task->tf_flags & IDE_TFLAG_IN_LBAM) |
318 | tf->lbam = hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]); | 319 | tf->lbam = hwif->INB(io_ports->lbam_addr); |
319 | if (task->tf_flags & IDE_TFLAG_IN_LBAH) | 320 | if (task->tf_flags & IDE_TFLAG_IN_LBAH) |
320 | tf->lbah = hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]); | 321 | tf->lbah = hwif->INB(io_ports->lbah_addr); |
321 | if (task->tf_flags & IDE_TFLAG_IN_DEVICE) | 322 | if (task->tf_flags & IDE_TFLAG_IN_DEVICE) |
322 | tf->device = hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]); | 323 | tf->device = hwif->INB(io_ports->device_addr); |
323 | 324 | ||
324 | if (task->tf_flags & IDE_TFLAG_LBA48) { | 325 | if (task->tf_flags & IDE_TFLAG_LBA48) { |
325 | hwif->OUTB(drive->ctl | 0x80, | 326 | hwif->OUTB(drive->ctl | 0x80, io_ports->ctl_addr); |
326 | hwif->io_ports[IDE_CONTROL_OFFSET]); | ||
327 | 327 | ||
328 | if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) | 328 | if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) |
329 | tf->hob_feature = | 329 | tf->hob_feature = hwif->INB(io_ports->feature_addr); |
330 | hwif->INB(hwif->io_ports[IDE_FEATURE_OFFSET]); | ||
331 | if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) | 330 | if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) |
332 | tf->hob_nsect = | 331 | tf->hob_nsect = hwif->INB(io_ports->nsect_addr); |
333 | hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]); | ||
334 | if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) | 332 | if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) |
335 | tf->hob_lbal = | 333 | tf->hob_lbal = hwif->INB(io_ports->lbal_addr); |
336 | hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]); | ||
337 | if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) | 334 | if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) |
338 | tf->hob_lbam = | 335 | tf->hob_lbam = hwif->INB(io_ports->lbam_addr); |
339 | hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]); | ||
340 | if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) | 336 | if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) |
341 | tf->hob_lbah = | 337 | tf->hob_lbah = hwif->INB(io_ports->lbah_addr); |
342 | hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]); | ||
343 | } | 338 | } |
344 | } | 339 | } |
345 | 340 | ||
@@ -454,7 +449,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 | |||
454 | if (err == ABRT_ERR) { | 449 | if (err == ABRT_ERR) { |
455 | if (drive->select.b.lba && | 450 | if (drive->select.b.lba && |
456 | /* some newer drives don't support WIN_SPECIFY */ | 451 | /* some newer drives don't support WIN_SPECIFY */ |
457 | hwif->INB(hwif->io_ports[IDE_COMMAND_OFFSET]) == | 452 | hwif->INB(hwif->io_ports.command_addr) == |
458 | WIN_SPECIFY) | 453 | WIN_SPECIFY) |
459 | return ide_stopped; | 454 | return ide_stopped; |
460 | } else if ((err & BAD_CRC) == BAD_CRC) { | 455 | } else if ((err & BAD_CRC) == BAD_CRC) { |
@@ -507,8 +502,7 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u | |||
507 | 502 | ||
508 | if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) | 503 | if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) |
509 | /* force an abort */ | 504 | /* force an abort */ |
510 | hwif->OUTB(WIN_IDLEIMMEDIATE, | 505 | hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr); |
511 | hwif->io_ports[IDE_COMMAND_OFFSET]); | ||
512 | 506 | ||
513 | if (rq->errors >= ERROR_MAX) { | 507 | if (rq->errors >= ERROR_MAX) { |
514 | ide_kill_rq(drive, rq); | 508 | ide_kill_rq(drive, rq); |
@@ -1421,7 +1415,7 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup) | |||
1421 | */ | 1415 | */ |
1422 | do { | 1416 | do { |
1423 | if (hwif->irq == irq) { | 1417 | if (hwif->irq == irq) { |
1424 | stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); | 1418 | stat = hwif->INB(hwif->io_ports.status_addr); |
1425 | if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { | 1419 | if (!OK_STAT(stat, READY_STAT, BAD_STAT)) { |
1426 | /* Try to not flood the console with msgs */ | 1420 | /* Try to not flood the console with msgs */ |
1427 | static unsigned long last_msgtime, count; | 1421 | static unsigned long last_msgtime, count; |
@@ -1511,7 +1505,7 @@ irqreturn_t ide_intr (int irq, void *dev_id) | |||
1511 | * Whack the status register, just in case | 1505 | * Whack the status register, just in case |
1512 | * we have a leftover pending IRQ. | 1506 | * we have a leftover pending IRQ. |
1513 | */ | 1507 | */ |
1514 | (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); | 1508 | (void) hwif->INB(hwif->io_ports.status_addr); |
1515 | #endif /* CONFIG_BLK_DEV_IDEPCI */ | 1509 | #endif /* CONFIG_BLK_DEV_IDEPCI */ |
1516 | } | 1510 | } |
1517 | spin_unlock_irqrestore(&ide_lock, flags); | 1511 | spin_unlock_irqrestore(&ide_lock, flags); |
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 9c646bd63549..5425d3038ec2 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c | |||
@@ -164,7 +164,7 @@ void SELECT_DRIVE (ide_drive_t *drive) | |||
164 | if (port_ops && port_ops->selectproc) | 164 | if (port_ops && port_ops->selectproc) |
165 | port_ops->selectproc(drive); | 165 | port_ops->selectproc(drive); |
166 | 166 | ||
167 | hwif->OUTB(drive->select.all, hwif->io_ports[IDE_SELECT_OFFSET]); | 167 | hwif->OUTB(drive->select.all, hwif->io_ports.device_addr); |
168 | } | 168 | } |
169 | 169 | ||
170 | void SELECT_MASK (ide_drive_t *drive, int mask) | 170 | void SELECT_MASK (ide_drive_t *drive, int mask) |
@@ -194,24 +194,22 @@ static void ata_vlb_sync(ide_drive_t *drive, unsigned long port) | |||
194 | */ | 194 | */ |
195 | static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount) | 195 | static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount) |
196 | { | 196 | { |
197 | ide_hwif_t *hwif = HWIF(drive); | 197 | ide_hwif_t *hwif = drive->hwif; |
198 | u8 io_32bit = drive->io_32bit; | 198 | struct ide_io_ports *io_ports = &hwif->io_ports; |
199 | u8 io_32bit = drive->io_32bit; | ||
199 | 200 | ||
200 | if (io_32bit) { | 201 | if (io_32bit) { |
201 | if (io_32bit & 2) { | 202 | if (io_32bit & 2) { |
202 | unsigned long flags; | 203 | unsigned long flags; |
203 | 204 | ||
204 | local_irq_save(flags); | 205 | local_irq_save(flags); |
205 | ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]); | 206 | ata_vlb_sync(drive, io_ports->nsect_addr); |
206 | hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, | 207 | hwif->INSL(io_ports->data_addr, buffer, wcount); |
207 | wcount); | ||
208 | local_irq_restore(flags); | 208 | local_irq_restore(flags); |
209 | } else | 209 | } else |
210 | hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, | 210 | hwif->INSL(io_ports->data_addr, buffer, wcount); |
211 | wcount); | ||
212 | } else | 211 | } else |
213 | hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET], buffer, | 212 | hwif->INSW(io_ports->data_addr, buffer, wcount << 1); |
214 | wcount << 1); | ||
215 | } | 213 | } |
216 | 214 | ||
217 | /* | 215 | /* |
@@ -219,24 +217,22 @@ static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount) | |||
219 | */ | 217 | */ |
220 | static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount) | 218 | static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount) |
221 | { | 219 | { |
222 | ide_hwif_t *hwif = HWIF(drive); | 220 | ide_hwif_t *hwif = drive->hwif; |
223 | u8 io_32bit = drive->io_32bit; | 221 | struct ide_io_ports *io_ports = &hwif->io_ports; |
222 | u8 io_32bit = drive->io_32bit; | ||
224 | 223 | ||
225 | if (io_32bit) { | 224 | if (io_32bit) { |
226 | if (io_32bit & 2) { | 225 | if (io_32bit & 2) { |
227 | unsigned long flags; | 226 | unsigned long flags; |
228 | 227 | ||
229 | local_irq_save(flags); | 228 | local_irq_save(flags); |
230 | ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]); | 229 | ata_vlb_sync(drive, io_ports->nsect_addr); |
231 | hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, | 230 | hwif->OUTSL(io_ports->data_addr, buffer, wcount); |
232 | wcount); | ||
233 | local_irq_restore(flags); | 231 | local_irq_restore(flags); |
234 | } else | 232 | } else |
235 | hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer, | 233 | hwif->OUTSL(io_ports->data_addr, buffer, wcount); |
236 | wcount); | ||
237 | } else | 234 | } else |
238 | hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET], buffer, | 235 | hwif->OUTSW(io_ports->data_addr, buffer, wcount << 1); |
239 | wcount << 1); | ||
240 | } | 236 | } |
241 | 237 | ||
242 | /* | 238 | /* |
@@ -255,14 +251,13 @@ static void atapi_input_bytes(ide_drive_t *drive, void *buffer, u32 bytecount) | |||
255 | #if defined(CONFIG_ATARI) || defined(CONFIG_Q40) | 251 | #if defined(CONFIG_ATARI) || defined(CONFIG_Q40) |
256 | if (MACH_IS_ATARI || MACH_IS_Q40) { | 252 | if (MACH_IS_ATARI || MACH_IS_Q40) { |
257 | /* Atari has a byte-swapped IDE interface */ | 253 | /* Atari has a byte-swapped IDE interface */ |
258 | insw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer, | 254 | insw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2); |
259 | bytecount / 2); | ||
260 | return; | 255 | return; |
261 | } | 256 | } |
262 | #endif /* CONFIG_ATARI || CONFIG_Q40 */ | 257 | #endif /* CONFIG_ATARI || CONFIG_Q40 */ |
263 | hwif->ata_input_data(drive, buffer, bytecount / 4); | 258 | hwif->ata_input_data(drive, buffer, bytecount / 4); |
264 | if ((bytecount & 0x03) >= 2) | 259 | if ((bytecount & 0x03) >= 2) |
265 | hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET], | 260 | hwif->INSW(hwif->io_ports.data_addr, |
266 | (u8 *)buffer + (bytecount & ~0x03), 1); | 261 | (u8 *)buffer + (bytecount & ~0x03), 1); |
267 | } | 262 | } |
268 | 263 | ||
@@ -274,14 +269,13 @@ static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount) | |||
274 | #if defined(CONFIG_ATARI) || defined(CONFIG_Q40) | 269 | #if defined(CONFIG_ATARI) || defined(CONFIG_Q40) |
275 | if (MACH_IS_ATARI || MACH_IS_Q40) { | 270 | if (MACH_IS_ATARI || MACH_IS_Q40) { |
276 | /* Atari has a byte-swapped IDE interface */ | 271 | /* Atari has a byte-swapped IDE interface */ |
277 | outsw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer, | 272 | outsw_swapw(hwif->io_ports.data_addr, buffer, bytecount / 2); |
278 | bytecount / 2); | ||
279 | return; | 273 | return; |
280 | } | 274 | } |
281 | #endif /* CONFIG_ATARI || CONFIG_Q40 */ | 275 | #endif /* CONFIG_ATARI || CONFIG_Q40 */ |
282 | hwif->ata_output_data(drive, buffer, bytecount / 4); | 276 | hwif->ata_output_data(drive, buffer, bytecount / 4); |
283 | if ((bytecount & 0x03) >= 2) | 277 | if ((bytecount & 0x03) >= 2) |
284 | hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET], | 278 | hwif->OUTSW(hwif->io_ports.data_addr, |
285 | (u8 *)buffer + (bytecount & ~0x03), 1); | 279 | (u8 *)buffer + (bytecount & ~0x03), 1); |
286 | } | 280 | } |
287 | 281 | ||
@@ -445,7 +439,7 @@ int drive_is_ready (ide_drive_t *drive) | |||
445 | * an interrupt with another pci card/device. We make no assumptions | 439 | * an interrupt with another pci card/device. We make no assumptions |
446 | * about possible isa-pnp and pci-pnp issues yet. | 440 | * about possible isa-pnp and pci-pnp issues yet. |
447 | */ | 441 | */ |
448 | if (hwif->io_ports[IDE_CONTROL_OFFSET]) | 442 | if (hwif->io_ports.ctl_addr) |
449 | stat = ide_read_altstatus(drive); | 443 | stat = ide_read_altstatus(drive); |
450 | else | 444 | else |
451 | /* Note: this may clear a pending IRQ!! */ | 445 | /* Note: this may clear a pending IRQ!! */ |
@@ -647,7 +641,7 @@ int ide_driveid_update(ide_drive_t *drive) | |||
647 | SELECT_MASK(drive, 1); | 641 | SELECT_MASK(drive, 1); |
648 | ide_set_irq(drive, 1); | 642 | ide_set_irq(drive, 1); |
649 | msleep(50); | 643 | msleep(50); |
650 | hwif->OUTB(WIN_IDENTIFY, hwif->io_ports[IDE_COMMAND_OFFSET]); | 644 | hwif->OUTB(WIN_IDENTIFY, hwif->io_ports.command_addr); |
651 | timeout = jiffies + WAIT_WORSTCASE; | 645 | timeout = jiffies + WAIT_WORSTCASE; |
652 | do { | 646 | do { |
653 | if (time_after(jiffies, timeout)) { | 647 | if (time_after(jiffies, timeout)) { |
@@ -696,6 +690,7 @@ int ide_driveid_update(ide_drive_t *drive) | |||
696 | int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | 690 | int ide_config_drive_speed(ide_drive_t *drive, u8 speed) |
697 | { | 691 | { |
698 | ide_hwif_t *hwif = drive->hwif; | 692 | ide_hwif_t *hwif = drive->hwif; |
693 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
699 | int error = 0; | 694 | int error = 0; |
700 | u8 stat; | 695 | u8 stat; |
701 | 696 | ||
@@ -734,10 +729,9 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) | |||
734 | SELECT_MASK(drive, 0); | 729 | SELECT_MASK(drive, 0); |
735 | udelay(1); | 730 | udelay(1); |
736 | ide_set_irq(drive, 0); | 731 | ide_set_irq(drive, 0); |
737 | hwif->OUTB(speed, hwif->io_ports[IDE_NSECTOR_OFFSET]); | 732 | hwif->OUTB(speed, io_ports->nsect_addr); |
738 | hwif->OUTB(SETFEATURES_XFER, hwif->io_ports[IDE_FEATURE_OFFSET]); | 733 | hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr); |
739 | hwif->OUTBSYNC(drive, WIN_SETFEATURES, | 734 | hwif->OUTBSYNC(drive, WIN_SETFEATURES, io_ports->command_addr); |
740 | hwif->io_ports[IDE_COMMAND_OFFSET]); | ||
741 | if (drive->quirk_list == 2) | 735 | if (drive->quirk_list == 2) |
742 | ide_set_irq(drive, 1); | 736 | ide_set_irq(drive, 1); |
743 | 737 | ||
@@ -845,7 +839,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler, | |||
845 | 839 | ||
846 | spin_lock_irqsave(&ide_lock, flags); | 840 | spin_lock_irqsave(&ide_lock, flags); |
847 | __ide_set_handler(drive, handler, timeout, expiry); | 841 | __ide_set_handler(drive, handler, timeout, expiry); |
848 | hwif->OUTBSYNC(drive, cmd, hwif->io_ports[IDE_COMMAND_OFFSET]); | 842 | hwif->OUTBSYNC(drive, cmd, hwif->io_ports.command_addr); |
849 | /* | 843 | /* |
850 | * Drive takes 400nS to respond, we must avoid the IRQ being | 844 | * Drive takes 400nS to respond, we must avoid the IRQ being |
851 | * serviced before that. | 845 | * serviced before that. |
@@ -1029,6 +1023,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) | |||
1029 | unsigned long flags; | 1023 | unsigned long flags; |
1030 | ide_hwif_t *hwif; | 1024 | ide_hwif_t *hwif; |
1031 | ide_hwgroup_t *hwgroup; | 1025 | ide_hwgroup_t *hwgroup; |
1026 | struct ide_io_ports *io_ports; | ||
1032 | const struct ide_port_ops *port_ops; | 1027 | const struct ide_port_ops *port_ops; |
1033 | u8 ctl; | 1028 | u8 ctl; |
1034 | 1029 | ||
@@ -1036,6 +1031,8 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) | |||
1036 | hwif = HWIF(drive); | 1031 | hwif = HWIF(drive); |
1037 | hwgroup = HWGROUP(drive); | 1032 | hwgroup = HWGROUP(drive); |
1038 | 1033 | ||
1034 | io_ports = &hwif->io_ports; | ||
1035 | |||
1039 | /* We must not reset with running handlers */ | 1036 | /* We must not reset with running handlers */ |
1040 | BUG_ON(hwgroup->handler != NULL); | 1037 | BUG_ON(hwgroup->handler != NULL); |
1041 | 1038 | ||
@@ -1045,8 +1042,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) | |||
1045 | pre_reset(drive); | 1042 | pre_reset(drive); |
1046 | SELECT_DRIVE(drive); | 1043 | SELECT_DRIVE(drive); |
1047 | udelay (20); | 1044 | udelay (20); |
1048 | hwif->OUTBSYNC(drive, WIN_SRST, | 1045 | hwif->OUTBSYNC(drive, WIN_SRST, io_ports->command_addr); |
1049 | hwif->io_ports[IDE_COMMAND_OFFSET]); | ||
1050 | ndelay(400); | 1046 | ndelay(400); |
1051 | hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; | 1047 | hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; |
1052 | hwgroup->polling = 1; | 1048 | hwgroup->polling = 1; |
@@ -1062,7 +1058,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) | |||
1062 | for (unit = 0; unit < MAX_DRIVES; ++unit) | 1058 | for (unit = 0; unit < MAX_DRIVES; ++unit) |
1063 | pre_reset(&hwif->drives[unit]); | 1059 | pre_reset(&hwif->drives[unit]); |
1064 | 1060 | ||
1065 | if (hwif->io_ports[IDE_CONTROL_OFFSET] == 0) { | 1061 | if (io_ports->ctl_addr == 0) { |
1066 | spin_unlock_irqrestore(&ide_lock, flags); | 1062 | spin_unlock_irqrestore(&ide_lock, flags); |
1067 | return ide_stopped; | 1063 | return ide_stopped; |
1068 | } | 1064 | } |
@@ -1077,14 +1073,14 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) | |||
1077 | * recover from reset very quickly, saving us the first 50ms wait time. | 1073 | * recover from reset very quickly, saving us the first 50ms wait time. |
1078 | */ | 1074 | */ |
1079 | /* set SRST and nIEN */ | 1075 | /* set SRST and nIEN */ |
1080 | hwif->OUTBSYNC(drive, drive->ctl|6, hwif->io_ports[IDE_CONTROL_OFFSET]); | 1076 | hwif->OUTBSYNC(drive, drive->ctl|6, io_ports->ctl_addr); |
1081 | /* more than enough time */ | 1077 | /* more than enough time */ |
1082 | udelay(10); | 1078 | udelay(10); |
1083 | if (drive->quirk_list == 2) | 1079 | if (drive->quirk_list == 2) |
1084 | ctl = drive->ctl; /* clear SRST and nIEN */ | 1080 | ctl = drive->ctl; /* clear SRST and nIEN */ |
1085 | else | 1081 | else |
1086 | ctl = drive->ctl | 2; /* clear SRST, leave nIEN */ | 1082 | ctl = drive->ctl | 2; /* clear SRST, leave nIEN */ |
1087 | hwif->OUTBSYNC(drive, ctl, hwif->io_ports[IDE_CONTROL_OFFSET]); | 1083 | hwif->OUTBSYNC(drive, ctl, io_ports->ctl_addr); |
1088 | /* more than enough time */ | 1084 | /* more than enough time */ |
1089 | udelay(10); | 1085 | udelay(10); |
1090 | hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; | 1086 | hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; |
@@ -1129,7 +1125,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout) | |||
1129 | * about locking issues (2.5 work ?). | 1125 | * about locking issues (2.5 work ?). |
1130 | */ | 1126 | */ |
1131 | mdelay(1); | 1127 | mdelay(1); |
1132 | stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); | 1128 | stat = hwif->INB(hwif->io_ports.status_addr); |
1133 | if ((stat & BUSY_STAT) == 0) | 1129 | if ((stat & BUSY_STAT) == 0) |
1134 | return 0; | 1130 | return 0; |
1135 | /* | 1131 | /* |
diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index 10c20e9a5785..6a8953f68e9f 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c | |||
@@ -82,10 +82,7 @@ static void idepnp_remove(struct pnp_dev *dev) | |||
82 | { | 82 | { |
83 | ide_hwif_t *hwif = pnp_get_drvdata(dev); | 83 | ide_hwif_t *hwif = pnp_get_drvdata(dev); |
84 | 84 | ||
85 | if (hwif) | 85 | ide_unregister(hwif); |
86 | ide_unregister(hwif->index); | ||
87 | else | ||
88 | printk(KERN_ERR "idepnp: Unable to remove device, please report.\n"); | ||
89 | 86 | ||
90 | release_region(pnp_port_start(dev, 1), 1); | 87 | release_region(pnp_port_start(dev, 1), 1); |
91 | release_region(pnp_port_start(dev, 0), 8); | 88 | release_region(pnp_port_start(dev, 0), 8); |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index a4b65b321f51..862f02603f9b 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -264,6 +264,7 @@ err_misc: | |||
264 | static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) | 264 | static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) |
265 | { | 265 | { |
266 | ide_hwif_t *hwif = HWIF(drive); | 266 | ide_hwif_t *hwif = HWIF(drive); |
267 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
267 | int use_altstatus = 0, rc; | 268 | int use_altstatus = 0, rc; |
268 | unsigned long timeout; | 269 | unsigned long timeout; |
269 | u8 s = 0, a = 0; | 270 | u8 s = 0, a = 0; |
@@ -271,7 +272,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) | |||
271 | /* take a deep breath */ | 272 | /* take a deep breath */ |
272 | msleep(50); | 273 | msleep(50); |
273 | 274 | ||
274 | if (hwif->io_ports[IDE_CONTROL_OFFSET]) { | 275 | if (io_ports->ctl_addr) { |
275 | a = ide_read_altstatus(drive); | 276 | a = ide_read_altstatus(drive); |
276 | s = ide_read_status(drive); | 277 | s = ide_read_status(drive); |
277 | if ((a ^ s) & ~INDEX_STAT) | 278 | if ((a ^ s) & ~INDEX_STAT) |
@@ -289,10 +290,10 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) | |||
289 | */ | 290 | */ |
290 | if ((cmd == WIN_PIDENTIFY)) | 291 | if ((cmd == WIN_PIDENTIFY)) |
291 | /* disable dma & overlap */ | 292 | /* disable dma & overlap */ |
292 | hwif->OUTB(0, hwif->io_ports[IDE_FEATURE_OFFSET]); | 293 | hwif->OUTB(0, io_ports->feature_addr); |
293 | 294 | ||
294 | /* ask drive for ID */ | 295 | /* ask drive for ID */ |
295 | hwif->OUTB(cmd, hwif->io_ports[IDE_COMMAND_OFFSET]); | 296 | hwif->OUTB(cmd, io_ports->command_addr); |
296 | 297 | ||
297 | timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; | 298 | timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2; |
298 | timeout += jiffies; | 299 | timeout += jiffies; |
@@ -353,7 +354,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd) | |||
353 | * interrupts during the identify-phase that | 354 | * interrupts during the identify-phase that |
354 | * the irq handler isn't expecting. | 355 | * the irq handler isn't expecting. |
355 | */ | 356 | */ |
356 | if (hwif->io_ports[IDE_CONTROL_OFFSET]) { | 357 | if (hwif->io_ports.ctl_addr) { |
357 | if (!hwif->irq) { | 358 | if (!hwif->irq) { |
358 | autoprobe = 1; | 359 | autoprobe = 1; |
359 | cookie = probe_irq_on(); | 360 | cookie = probe_irq_on(); |
@@ -393,7 +394,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif) | |||
393 | 394 | ||
394 | do { | 395 | do { |
395 | msleep(50); | 396 | msleep(50); |
396 | stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); | 397 | stat = hwif->INB(hwif->io_ports.status_addr); |
397 | if ((stat & BUSY_STAT) == 0) | 398 | if ((stat & BUSY_STAT) == 0) |
398 | return 0; | 399 | return 0; |
399 | } while (time_before(jiffies, timeout)); | 400 | } while (time_before(jiffies, timeout)); |
@@ -425,6 +426,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif) | |||
425 | static int do_probe (ide_drive_t *drive, u8 cmd) | 426 | static int do_probe (ide_drive_t *drive, u8 cmd) |
426 | { | 427 | { |
427 | ide_hwif_t *hwif = HWIF(drive); | 428 | ide_hwif_t *hwif = HWIF(drive); |
429 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
428 | int rc; | 430 | int rc; |
429 | u8 stat; | 431 | u8 stat; |
430 | 432 | ||
@@ -445,7 +447,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
445 | msleep(50); | 447 | msleep(50); |
446 | SELECT_DRIVE(drive); | 448 | SELECT_DRIVE(drive); |
447 | msleep(50); | 449 | msleep(50); |
448 | if (hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]) != drive->select.all && | 450 | if (hwif->INB(io_ports->device_addr) != drive->select.all && |
449 | !drive->present) { | 451 | !drive->present) { |
450 | if (drive->select.b.unit != 0) { | 452 | if (drive->select.b.unit != 0) { |
451 | /* exit with drive0 selected */ | 453 | /* exit with drive0 selected */ |
@@ -472,17 +474,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd) | |||
472 | if (stat == (BUSY_STAT | READY_STAT)) | 474 | if (stat == (BUSY_STAT | READY_STAT)) |
473 | return 4; | 475 | return 4; |
474 | 476 | ||
475 | if ((rc == 1 && cmd == WIN_PIDENTIFY) && | 477 | if (rc == 1 && cmd == WIN_PIDENTIFY) { |
476 | ((drive->autotune == IDE_TUNE_DEFAULT) || | ||
477 | (drive->autotune == IDE_TUNE_AUTO))) { | ||
478 | printk(KERN_ERR "%s: no response (status = 0x%02x), " | 478 | printk(KERN_ERR "%s: no response (status = 0x%02x), " |
479 | "resetting drive\n", drive->name, stat); | 479 | "resetting drive\n", drive->name, stat); |
480 | msleep(50); | 480 | msleep(50); |
481 | hwif->OUTB(drive->select.all, | 481 | hwif->OUTB(drive->select.all, io_ports->device_addr); |
482 | hwif->io_ports[IDE_SELECT_OFFSET]); | ||
483 | msleep(50); | 482 | msleep(50); |
484 | hwif->OUTB(WIN_SRST, | 483 | hwif->OUTB(WIN_SRST, io_ports->command_addr); |
485 | hwif->io_ports[IDE_COMMAND_OFFSET]); | ||
486 | (void)ide_busy_sleep(hwif); | 484 | (void)ide_busy_sleep(hwif); |
487 | rc = try_to_identify(drive, cmd); | 485 | rc = try_to_identify(drive, cmd); |
488 | } | 486 | } |
@@ -518,7 +516,7 @@ static void enable_nest (ide_drive_t *drive) | |||
518 | printk("%s: enabling %s -- ", hwif->name, drive->id->model); | 516 | printk("%s: enabling %s -- ", hwif->name, drive->id->model); |
519 | SELECT_DRIVE(drive); | 517 | SELECT_DRIVE(drive); |
520 | msleep(50); | 518 | msleep(50); |
521 | hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports[IDE_COMMAND_OFFSET]); | 519 | hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr); |
522 | 520 | ||
523 | if (ide_busy_sleep(hwif)) { | 521 | if (ide_busy_sleep(hwif)) { |
524 | printk(KERN_CONT "failed (timeout)\n"); | 522 | printk(KERN_CONT "failed (timeout)\n"); |
@@ -800,14 +798,9 @@ static int ide_probe_port(ide_hwif_t *hwif) | |||
800 | if (drive->present) | 798 | if (drive->present) |
801 | rc = 0; | 799 | rc = 0; |
802 | } | 800 | } |
803 | if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) { | 801 | |
804 | printk(KERN_WARNING "%s: reset\n", hwif->name); | ||
805 | hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]); | ||
806 | udelay(10); | ||
807 | hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]); | ||
808 | (void)ide_busy_sleep(hwif); | ||
809 | } | ||
810 | local_irq_restore(flags); | 802 | local_irq_restore(flags); |
803 | |||
811 | /* | 804 | /* |
812 | * Use cached IRQ number. It might be (and is...) changed by probe | 805 | * Use cached IRQ number. It might be (and is...) changed by probe |
813 | * code above | 806 | * code above |
@@ -834,12 +827,7 @@ static void ide_port_tune_devices(ide_hwif_t *hwif) | |||
834 | ide_drive_t *drive = &hwif->drives[unit]; | 827 | ide_drive_t *drive = &hwif->drives[unit]; |
835 | 828 | ||
836 | if (drive->present) { | 829 | if (drive->present) { |
837 | if (drive->autotune == IDE_TUNE_AUTO) | 830 | ide_set_max_pio(drive); |
838 | ide_set_max_pio(drive); | ||
839 | |||
840 | if (drive->autotune != IDE_TUNE_DEFAULT && | ||
841 | drive->autotune != IDE_TUNE_AUTO) | ||
842 | continue; | ||
843 | 831 | ||
844 | drive->nice1 = 1; | 832 | drive->nice1 = 1; |
845 | 833 | ||
@@ -994,6 +982,7 @@ static void ide_port_setup_devices(ide_hwif_t *hwif) | |||
994 | */ | 982 | */ |
995 | static int init_irq (ide_hwif_t *hwif) | 983 | static int init_irq (ide_hwif_t *hwif) |
996 | { | 984 | { |
985 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
997 | unsigned int index; | 986 | unsigned int index; |
998 | ide_hwgroup_t *hwgroup; | 987 | ide_hwgroup_t *hwgroup; |
999 | ide_hwif_t *match = NULL; | 988 | ide_hwif_t *match = NULL; |
@@ -1077,9 +1066,9 @@ static int init_irq (ide_hwif_t *hwif) | |||
1077 | if (IDE_CHIPSET_IS_PCI(hwif->chipset)) | 1066 | if (IDE_CHIPSET_IS_PCI(hwif->chipset)) |
1078 | sa = IRQF_SHARED; | 1067 | sa = IRQF_SHARED; |
1079 | 1068 | ||
1080 | if (hwif->io_ports[IDE_CONTROL_OFFSET]) | 1069 | if (io_ports->ctl_addr) |
1081 | /* clear nIEN */ | 1070 | /* clear nIEN */ |
1082 | hwif->OUTB(0x08, hwif->io_ports[IDE_CONTROL_OFFSET]); | 1071 | hwif->OUTB(0x08, io_ports->ctl_addr); |
1083 | 1072 | ||
1084 | if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup)) | 1073 | if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup)) |
1085 | goto out_unlink; | 1074 | goto out_unlink; |
@@ -1095,12 +1084,11 @@ static int init_irq (ide_hwif_t *hwif) | |||
1095 | 1084 | ||
1096 | #if !defined(__mc68000__) | 1085 | #if !defined(__mc68000__) |
1097 | printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, | 1086 | printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, |
1098 | hwif->io_ports[IDE_DATA_OFFSET], | 1087 | io_ports->data_addr, io_ports->status_addr, |
1099 | hwif->io_ports[IDE_DATA_OFFSET]+7, | 1088 | io_ports->ctl_addr, hwif->irq); |
1100 | hwif->io_ports[IDE_CONTROL_OFFSET], hwif->irq); | ||
1101 | #else | 1089 | #else |
1102 | printk("%s at 0x%08lx on irq %d", hwif->name, | 1090 | printk("%s at 0x%08lx on irq %d", hwif->name, |
1103 | hwif->io_ports[IDE_DATA_OFFSET], hwif->irq); | 1091 | io_ports->data_addr, hwif->irq); |
1104 | #endif /* __mc68000__ */ | 1092 | #endif /* __mc68000__ */ |
1105 | if (match) | 1093 | if (match) |
1106 | printk(" (%sed with %s)", | 1094 | printk(" (%sed with %s)", |
@@ -1242,8 +1230,8 @@ static int hwif_init(ide_hwif_t *hwif) | |||
1242 | int old_irq; | 1230 | int old_irq; |
1243 | 1231 | ||
1244 | if (!hwif->irq) { | 1232 | if (!hwif->irq) { |
1245 | if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]))) | 1233 | hwif->irq = ide_default_irq(hwif->io_ports.data_addr); |
1246 | { | 1234 | if (!hwif->irq) { |
1247 | printk("%s: DISABLED, NO IRQ\n", hwif->name); | 1235 | printk("%s: DISABLED, NO IRQ\n", hwif->name); |
1248 | return 0; | 1236 | return 0; |
1249 | } | 1237 | } |
@@ -1272,7 +1260,8 @@ static int hwif_init(ide_hwif_t *hwif) | |||
1272 | * It failed to initialise. Find the default IRQ for | 1260 | * It failed to initialise. Find the default IRQ for |
1273 | * this port and try that. | 1261 | * this port and try that. |
1274 | */ | 1262 | */ |
1275 | if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]))) { | 1263 | hwif->irq = ide_default_irq(hwif->io_ports.data_addr); |
1264 | if (!hwif->irq) { | ||
1276 | printk("%s: Disabled unable to get IRQ %d.\n", | 1265 | printk("%s: Disabled unable to get IRQ %d.\n", |
1277 | hwif->name, old_irq); | 1266 | hwif->name, old_irq); |
1278 | goto out; | 1267 | goto out; |
@@ -1336,8 +1325,6 @@ static void ide_port_init_devices(ide_hwif_t *hwif) | |||
1336 | drive->unmask = 1; | 1325 | drive->unmask = 1; |
1337 | if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) | 1326 | if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) |
1338 | drive->no_unmask = 1; | 1327 | drive->no_unmask = 1; |
1339 | if ((hwif->host_flags & IDE_HFLAG_NO_AUTOTUNE) == 0) | ||
1340 | drive->autotune = 1; | ||
1341 | } | 1328 | } |
1342 | 1329 | ||
1343 | if (port_ops && port_ops->port_init_devs) | 1330 | if (port_ops && port_ops->port_init_devs) |
@@ -1518,13 +1505,20 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d) | |||
1518 | int i, rc = 0; | 1505 | int i, rc = 0; |
1519 | 1506 | ||
1520 | for (i = 0; i < MAX_HWIFS; i++) { | 1507 | for (i = 0; i < MAX_HWIFS; i++) { |
1521 | if (d == NULL || idx[i] == 0xff) { | 1508 | if (idx[i] == 0xff) { |
1522 | mate = NULL; | 1509 | mate = NULL; |
1523 | continue; | 1510 | continue; |
1524 | } | 1511 | } |
1525 | 1512 | ||
1526 | hwif = &ide_hwifs[idx[i]]; | 1513 | hwif = &ide_hwifs[idx[i]]; |
1527 | 1514 | ||
1515 | ide_port_apply_params(hwif); | ||
1516 | |||
1517 | if (d == NULL) { | ||
1518 | mate = NULL; | ||
1519 | continue; | ||
1520 | } | ||
1521 | |||
1528 | if (d->chipset != ide_etrax100 && (i & 1) && mate) { | 1522 | if (d->chipset != ide_etrax100 && (i & 1) && mate) { |
1529 | hwif->mate = mate; | 1523 | hwif->mate = mate; |
1530 | mate->mate = hwif; | 1524 | mate->mate = hwif; |
@@ -1621,6 +1615,7 @@ EXPORT_SYMBOL_GPL(ide_device_add); | |||
1621 | 1615 | ||
1622 | void ide_port_scan(ide_hwif_t *hwif) | 1616 | void ide_port_scan(ide_hwif_t *hwif) |
1623 | { | 1617 | { |
1618 | ide_port_apply_params(hwif); | ||
1624 | ide_port_cable_detect(hwif); | 1619 | ide_port_cable_detect(hwif); |
1625 | ide_port_init_devices(hwif); | 1620 | ide_port_init_devices(hwif); |
1626 | 1621 | ||
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index d9d98ac85b29..7b2f3815a838 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
@@ -786,14 +786,6 @@ void ide_proc_register_port(ide_hwif_t *hwif) | |||
786 | } | 786 | } |
787 | } | 787 | } |
788 | 788 | ||
789 | #ifdef CONFIG_BLK_DEV_IDEPCI | ||
790 | void ide_pci_create_host_proc(const char *name, get_info_t *get_info) | ||
791 | { | ||
792 | create_proc_info_entry(name, 0, proc_ide_root, get_info); | ||
793 | } | ||
794 | EXPORT_SYMBOL_GPL(ide_pci_create_host_proc); | ||
795 | #endif | ||
796 | |||
797 | void ide_proc_unregister_port(ide_hwif_t *hwif) | 789 | void ide_proc_unregister_port(ide_hwif_t *hwif) |
798 | { | 790 | { |
799 | if (hwif->proc) { | 791 | if (hwif->proc) { |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index d3d8b8d5157c..29870c415110 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -72,26 +72,6 @@ enum { | |||
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | /**************************** Tunable parameters *****************************/ | 74 | /**************************** Tunable parameters *****************************/ |
75 | |||
76 | |||
77 | /* | ||
78 | * Pipelined mode parameters. | ||
79 | * | ||
80 | * We try to use the minimum number of stages which is enough to keep the tape | ||
81 | * constantly streaming. To accomplish that, we implement a feedback loop around | ||
82 | * the maximum number of stages: | ||
83 | * | ||
84 | * We start from MIN maximum stages (we will not even use MIN stages if we don't | ||
85 | * need them), increment it by RATE*(MAX-MIN) whenever we sense that the | ||
86 | * pipeline is empty, until we reach the optimum value or until we reach MAX. | ||
87 | * | ||
88 | * Setting the following parameter to 0 is illegal: the pipelined mode cannot be | ||
89 | * disabled (idetape_calculate_speeds() divides by tape->max_stages.) | ||
90 | */ | ||
91 | #define IDETAPE_MIN_PIPELINE_STAGES 1 | ||
92 | #define IDETAPE_MAX_PIPELINE_STAGES 400 | ||
93 | #define IDETAPE_INCREASE_STAGES_RATE 20 | ||
94 | |||
95 | /* | 75 | /* |
96 | * After each failed packet command we issue a request sense command and retry | 76 | * After each failed packet command we issue a request sense command and retry |
97 | * the packet command IDETAPE_MAX_PC_RETRIES times. | 77 | * the packet command IDETAPE_MAX_PC_RETRIES times. |
@@ -224,28 +204,17 @@ enum { | |||
224 | /* 0 When the tape position is unknown */ | 204 | /* 0 When the tape position is unknown */ |
225 | IDETAPE_FLAG_ADDRESS_VALID = (1 << 1), | 205 | IDETAPE_FLAG_ADDRESS_VALID = (1 << 1), |
226 | /* Device already opened */ | 206 | /* Device already opened */ |
227 | IDETAPE_FLAG_BUSY = (1 << 2), | 207 | IDETAPE_FLAG_BUSY = (1 << 2), |
228 | /* Error detected in a pipeline stage */ | ||
229 | IDETAPE_FLAG_PIPELINE_ERR = (1 << 3), | ||
230 | /* Attempt to auto-detect the current user block size */ | 208 | /* Attempt to auto-detect the current user block size */ |
231 | IDETAPE_FLAG_DETECT_BS = (1 << 4), | 209 | IDETAPE_FLAG_DETECT_BS = (1 << 3), |
232 | /* Currently on a filemark */ | 210 | /* Currently on a filemark */ |
233 | IDETAPE_FLAG_FILEMARK = (1 << 5), | 211 | IDETAPE_FLAG_FILEMARK = (1 << 4), |
234 | /* DRQ interrupt device */ | 212 | /* DRQ interrupt device */ |
235 | IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6), | 213 | IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5), |
236 | /* pipeline active */ | ||
237 | IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7), | ||
238 | /* 0 = no tape is loaded, so we don't rewind after ejecting */ | 214 | /* 0 = no tape is loaded, so we don't rewind after ejecting */ |
239 | IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8), | 215 | IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6), |
240 | }; | 216 | }; |
241 | 217 | ||
242 | /* A pipeline stage. */ | ||
243 | typedef struct idetape_stage_s { | ||
244 | struct request rq; /* The corresponding request */ | ||
245 | struct idetape_bh *bh; /* The data buffers */ | ||
246 | struct idetape_stage_s *next; /* Pointer to the next stage */ | ||
247 | } idetape_stage_t; | ||
248 | |||
249 | /* | 218 | /* |
250 | * Most of our global data which we need to save even as we leave the driver due | 219 | * Most of our global data which we need to save even as we leave the driver due |
251 | * to an interrupt or a timer event is stored in the struct defined below. | 220 | * to an interrupt or a timer event is stored in the struct defined below. |
@@ -289,9 +258,7 @@ typedef struct ide_tape_obj { | |||
289 | * While polling for DSC we use postponed_rq to postpone the current | 258 | * While polling for DSC we use postponed_rq to postpone the current |
290 | * request so that ide.c will be able to service pending requests on the | 259 | * request so that ide.c will be able to service pending requests on the |
291 | * other device. Note that at most we will have only one DSC (usually | 260 | * other device. Note that at most we will have only one DSC (usually |
292 | * data transfer) request in the device request queue. Additional | 261 | * data transfer) request in the device request queue. |
293 | * requests can be queued in our internal pipeline, but they will be | ||
294 | * visible to ide.c only one at a time. | ||
295 | */ | 262 | */ |
296 | struct request *postponed_rq; | 263 | struct request *postponed_rq; |
297 | /* The time in which we started polling for DSC */ | 264 | /* The time in which we started polling for DSC */ |
@@ -331,43 +298,20 @@ typedef struct ide_tape_obj { | |||
331 | * At most, there is only one ide-tape originated data transfer request | 298 | * At most, there is only one ide-tape originated data transfer request |
332 | * in the device request queue. This allows ide.c to easily service | 299 | * in the device request queue. This allows ide.c to easily service |
333 | * requests from the other device when we postpone our active request. | 300 | * requests from the other device when we postpone our active request. |
334 | * In the pipelined operation mode, we use our internal pipeline | ||
335 | * structure to hold more data requests. The data buffer size is chosen | ||
336 | * based on the tape's recommendation. | ||
337 | */ | 301 | */ |
338 | /* ptr to the request which is waiting in the device request queue */ | 302 | |
339 | struct request *active_data_rq; | ||
340 | /* Data buffer size chosen based on the tape's recommendation */ | 303 | /* Data buffer size chosen based on the tape's recommendation */ |
341 | int stage_size; | 304 | int buffer_size; |
342 | idetape_stage_t *merge_stage; | 305 | /* merge buffer */ |
343 | int merge_stage_size; | 306 | struct idetape_bh *merge_bh; |
307 | /* size of the merge buffer */ | ||
308 | int merge_bh_size; | ||
309 | /* pointer to current buffer head within the merge buffer */ | ||
344 | struct idetape_bh *bh; | 310 | struct idetape_bh *bh; |
345 | char *b_data; | 311 | char *b_data; |
346 | int b_count; | 312 | int b_count; |
347 | 313 | ||
348 | /* | 314 | int pages_per_buffer; |
349 | * Pipeline parameters. | ||
350 | * | ||
351 | * To accomplish non-pipelined mode, we simply set the following | ||
352 | * variables to zero (or NULL, where appropriate). | ||
353 | */ | ||
354 | /* Number of currently used stages */ | ||
355 | int nr_stages; | ||
356 | /* Number of pending stages */ | ||
357 | int nr_pending_stages; | ||
358 | /* We will not allocate more than this number of stages */ | ||
359 | int max_stages, min_pipeline, max_pipeline; | ||
360 | /* The first stage which will be removed from the pipeline */ | ||
361 | idetape_stage_t *first_stage; | ||
362 | /* The currently active stage */ | ||
363 | idetape_stage_t *active_stage; | ||
364 | /* Will be serviced after the currently active request */ | ||
365 | idetape_stage_t *next_stage; | ||
366 | /* New requests will be added to the pipeline here */ | ||
367 | idetape_stage_t *last_stage; | ||
368 | /* Optional free stage which we can use */ | ||
369 | idetape_stage_t *cache_stage; | ||
370 | int pages_per_stage; | ||
371 | /* Wasted space in each stage */ | 315 | /* Wasted space in each stage */ |
372 | int excess_bh_size; | 316 | int excess_bh_size; |
373 | 317 | ||
@@ -388,45 +332,6 @@ typedef struct ide_tape_obj { | |||
388 | /* the tape is write protected (hardware or opened as read-only) */ | 332 | /* the tape is write protected (hardware or opened as read-only) */ |
389 | char write_prot; | 333 | char write_prot; |
390 | 334 | ||
391 | /* | ||
392 | * Limit the number of times a request can be postponed, to avoid an | ||
393 | * infinite postpone deadlock. | ||
394 | */ | ||
395 | int postpone_cnt; | ||
396 | |||
397 | /* | ||
398 | * Measures number of frames: | ||
399 | * | ||
400 | * 1. written/read to/from the driver pipeline (pipeline_head). | ||
401 | * 2. written/read to/from the tape buffers (idetape_bh). | ||
402 | * 3. written/read by the tape to/from the media (tape_head). | ||
403 | */ | ||
404 | int pipeline_head; | ||
405 | int buffer_head; | ||
406 | int tape_head; | ||
407 | int last_tape_head; | ||
408 | |||
409 | /* Speed control at the tape buffers input/output */ | ||
410 | unsigned long insert_time; | ||
411 | int insert_size; | ||
412 | int insert_speed; | ||
413 | int max_insert_speed; | ||
414 | int measure_insert_time; | ||
415 | |||
416 | /* Speed regulation negative feedback loop */ | ||
417 | int speed_control; | ||
418 | int pipeline_head_speed; | ||
419 | int controlled_pipeline_head_speed; | ||
420 | int uncontrolled_pipeline_head_speed; | ||
421 | int controlled_last_pipeline_head; | ||
422 | unsigned long uncontrolled_pipeline_head_time; | ||
423 | unsigned long controlled_pipeline_head_time; | ||
424 | int controlled_previous_pipeline_head; | ||
425 | int uncontrolled_previous_pipeline_head; | ||
426 | unsigned long controlled_previous_head_time; | ||
427 | unsigned long uncontrolled_previous_head_time; | ||
428 | int restart_speed_control_req; | ||
429 | |||
430 | u32 debug_mask; | 335 | u32 debug_mask; |
431 | } idetape_tape_t; | 336 | } idetape_tape_t; |
432 | 337 | ||
@@ -674,128 +579,36 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense) | |||
674 | } | 579 | } |
675 | } | 580 | } |
676 | 581 | ||
677 | static void idetape_activate_next_stage(ide_drive_t *drive) | 582 | /* Free data buffers completely. */ |
583 | static void ide_tape_kfree_buffer(idetape_tape_t *tape) | ||
678 | { | 584 | { |
679 | idetape_tape_t *tape = drive->driver_data; | 585 | struct idetape_bh *prev_bh, *bh = tape->merge_bh; |
680 | idetape_stage_t *stage = tape->next_stage; | ||
681 | struct request *rq = &stage->rq; | ||
682 | 586 | ||
683 | debug_log(DBG_PROCS, "Enter %s\n", __func__); | 587 | while (bh) { |
588 | u32 size = bh->b_size; | ||
684 | 589 | ||
685 | if (stage == NULL) { | 590 | while (size) { |
686 | printk(KERN_ERR "ide-tape: bug: Trying to activate a non" | 591 | unsigned int order = fls(size >> PAGE_SHIFT)-1; |
687 | " existing stage\n"); | ||
688 | return; | ||
689 | } | ||
690 | 592 | ||
691 | rq->rq_disk = tape->disk; | 593 | if (bh->b_data) |
692 | rq->buffer = NULL; | 594 | free_pages((unsigned long)bh->b_data, order); |
693 | rq->special = (void *)stage->bh; | 595 | |
694 | tape->active_data_rq = rq; | 596 | size &= (order-1); |
695 | tape->active_stage = stage; | 597 | bh->b_data += (1 << order) * PAGE_SIZE; |
696 | tape->next_stage = stage->next; | ||
697 | } | ||
698 | |||
699 | /* Free a stage along with its related buffers completely. */ | ||
700 | static void __idetape_kfree_stage(idetape_stage_t *stage) | ||
701 | { | ||
702 | struct idetape_bh *prev_bh, *bh = stage->bh; | ||
703 | int size; | ||
704 | |||
705 | while (bh != NULL) { | ||
706 | if (bh->b_data != NULL) { | ||
707 | size = (int) bh->b_size; | ||
708 | while (size > 0) { | ||
709 | free_page((unsigned long) bh->b_data); | ||
710 | size -= PAGE_SIZE; | ||
711 | bh->b_data += PAGE_SIZE; | ||
712 | } | ||
713 | } | 598 | } |
714 | prev_bh = bh; | 599 | prev_bh = bh; |
715 | bh = bh->b_reqnext; | 600 | bh = bh->b_reqnext; |
716 | kfree(prev_bh); | 601 | kfree(prev_bh); |
717 | } | 602 | } |
718 | kfree(stage); | 603 | kfree(tape->merge_bh); |
719 | } | ||
720 | |||
721 | static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage) | ||
722 | { | ||
723 | __idetape_kfree_stage(stage); | ||
724 | } | ||
725 | |||
726 | /* | ||
727 | * Remove tape->first_stage from the pipeline. The caller should avoid race | ||
728 | * conditions. | ||
729 | */ | ||
730 | static void idetape_remove_stage_head(ide_drive_t *drive) | ||
731 | { | ||
732 | idetape_tape_t *tape = drive->driver_data; | ||
733 | idetape_stage_t *stage; | ||
734 | |||
735 | debug_log(DBG_PROCS, "Enter %s\n", __func__); | ||
736 | |||
737 | if (tape->first_stage == NULL) { | ||
738 | printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n"); | ||
739 | return; | ||
740 | } | ||
741 | if (tape->active_stage == tape->first_stage) { | ||
742 | printk(KERN_ERR "ide-tape: bug: Trying to free our active " | ||
743 | "pipeline stage\n"); | ||
744 | return; | ||
745 | } | ||
746 | stage = tape->first_stage; | ||
747 | tape->first_stage = stage->next; | ||
748 | idetape_kfree_stage(tape, stage); | ||
749 | tape->nr_stages--; | ||
750 | if (tape->first_stage == NULL) { | ||
751 | tape->last_stage = NULL; | ||
752 | if (tape->next_stage != NULL) | ||
753 | printk(KERN_ERR "ide-tape: bug: tape->next_stage !=" | ||
754 | " NULL\n"); | ||
755 | if (tape->nr_stages) | ||
756 | printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 " | ||
757 | "now\n"); | ||
758 | } | ||
759 | } | 604 | } |
760 | 605 | ||
761 | /* | ||
762 | * This will free all the pipeline stages starting from new_last_stage->next | ||
763 | * to the end of the list, and point tape->last_stage to new_last_stage. | ||
764 | */ | ||
765 | static void idetape_abort_pipeline(ide_drive_t *drive, | ||
766 | idetape_stage_t *new_last_stage) | ||
767 | { | ||
768 | idetape_tape_t *tape = drive->driver_data; | ||
769 | idetape_stage_t *stage = new_last_stage->next; | ||
770 | idetape_stage_t *nstage; | ||
771 | |||
772 | debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__); | ||
773 | |||
774 | while (stage) { | ||
775 | nstage = stage->next; | ||
776 | idetape_kfree_stage(tape, stage); | ||
777 | --tape->nr_stages; | ||
778 | --tape->nr_pending_stages; | ||
779 | stage = nstage; | ||
780 | } | ||
781 | if (new_last_stage) | ||
782 | new_last_stage->next = NULL; | ||
783 | tape->last_stage = new_last_stage; | ||
784 | tape->next_stage = NULL; | ||
785 | } | ||
786 | |||
787 | /* | ||
788 | * Finish servicing a request and insert a pending pipeline request into the | ||
789 | * main device queue. | ||
790 | */ | ||
791 | static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) | 606 | static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) |
792 | { | 607 | { |
793 | struct request *rq = HWGROUP(drive)->rq; | 608 | struct request *rq = HWGROUP(drive)->rq; |
794 | idetape_tape_t *tape = drive->driver_data; | 609 | idetape_tape_t *tape = drive->driver_data; |
795 | unsigned long flags; | 610 | unsigned long flags; |
796 | int error; | 611 | int error; |
797 | int remove_stage = 0; | ||
798 | idetape_stage_t *active_stage; | ||
799 | 612 | ||
800 | debug_log(DBG_PROCS, "Enter %s\n", __func__); | 613 | debug_log(DBG_PROCS, "Enter %s\n", __func__); |
801 | 614 | ||
@@ -815,58 +628,8 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects) | |||
815 | 628 | ||
816 | spin_lock_irqsave(&tape->lock, flags); | 629 | spin_lock_irqsave(&tape->lock, flags); |
817 | 630 | ||
818 | /* The request was a pipelined data transfer request */ | ||
819 | if (tape->active_data_rq == rq) { | ||
820 | active_stage = tape->active_stage; | ||
821 | tape->active_stage = NULL; | ||
822 | tape->active_data_rq = NULL; | ||
823 | tape->nr_pending_stages--; | ||
824 | if (rq->cmd[0] & REQ_IDETAPE_WRITE) { | ||
825 | remove_stage = 1; | ||
826 | if (error) { | ||
827 | set_bit(IDETAPE_FLAG_PIPELINE_ERR, | ||
828 | &tape->flags); | ||
829 | if (error == IDETAPE_ERROR_EOD) | ||
830 | idetape_abort_pipeline(drive, | ||
831 | active_stage); | ||
832 | } | ||
833 | } else if (rq->cmd[0] & REQ_IDETAPE_READ) { | ||
834 | if (error == IDETAPE_ERROR_EOD) { | ||
835 | set_bit(IDETAPE_FLAG_PIPELINE_ERR, | ||
836 | &tape->flags); | ||
837 | idetape_abort_pipeline(drive, active_stage); | ||
838 | } | ||
839 | } | ||
840 | if (tape->next_stage != NULL) { | ||
841 | idetape_activate_next_stage(drive); | ||
842 | |||
843 | /* Insert the next request into the request queue. */ | ||
844 | (void)ide_do_drive_cmd(drive, tape->active_data_rq, | ||
845 | ide_end); | ||
846 | } else if (!error) { | ||
847 | /* | ||
848 | * This is a part of the feedback loop which tries to | ||
849 | * find the optimum number of stages. We are starting | ||
850 | * from a minimum maximum number of stages, and if we | ||
851 | * sense that the pipeline is empty, we try to increase | ||
852 | * it, until we reach the user compile time memory | ||
853 | * limit. | ||
854 | */ | ||
855 | int i = (tape->max_pipeline - tape->min_pipeline) / 10; | ||
856 | |||
857 | tape->max_stages += max(i, 1); | ||
858 | tape->max_stages = max(tape->max_stages, | ||
859 | tape->min_pipeline); | ||
860 | tape->max_stages = min(tape->max_stages, | ||
861 | tape->max_pipeline); | ||
862 | } | ||
863 | } | ||
864 | ide_end_drive_cmd(drive, 0, 0); | 631 | ide_end_drive_cmd(drive, 0, 0); |
865 | 632 | ||
866 | if (remove_stage) | ||
867 | idetape_remove_stage_head(drive); | ||
868 | if (tape->active_data_rq == NULL) | ||
869 | clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags); | ||
870 | spin_unlock_irqrestore(&tape->lock, flags); | 633 | spin_unlock_irqrestore(&tape->lock, flags); |
871 | return 0; | 634 | return 0; |
872 | } | 635 | } |
@@ -1083,10 +846,10 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive) | |||
1083 | return ide_do_reset(drive); | 846 | return ide_do_reset(drive); |
1084 | } | 847 | } |
1085 | /* Get the number of bytes to transfer on this interrupt. */ | 848 | /* Get the number of bytes to transfer on this interrupt. */ |
1086 | bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) | | 849 | bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) | |
1087 | hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]); | 850 | hwif->INB(hwif->io_ports.lbam_addr); |
1088 | 851 | ||
1089 | ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); | 852 | ireason = hwif->INB(hwif->io_ports.nsect_addr); |
1090 | 853 | ||
1091 | if (ireason & CD) { | 854 | if (ireason & CD) { |
1092 | printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__); | 855 | printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__); |
@@ -1190,12 +953,12 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive) | |||
1190 | "yet DRQ isn't asserted\n"); | 953 | "yet DRQ isn't asserted\n"); |
1191 | return startstop; | 954 | return startstop; |
1192 | } | 955 | } |
1193 | ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); | 956 | ireason = hwif->INB(hwif->io_ports.nsect_addr); |
1194 | while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) { | 957 | while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) { |
1195 | printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing " | 958 | printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing " |
1196 | "a packet command, retrying\n"); | 959 | "a packet command, retrying\n"); |
1197 | udelay(100); | 960 | udelay(100); |
1198 | ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); | 961 | ireason = hwif->INB(hwif->io_ports.nsect_addr); |
1199 | if (retries == 0) { | 962 | if (retries == 0) { |
1200 | printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while " | 963 | printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while " |
1201 | "issuing a packet command, ignoring\n"); | 964 | "issuing a packet command, ignoring\n"); |
@@ -1292,7 +1055,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive, | |||
1292 | IDETAPE_WAIT_CMD, NULL); | 1055 | IDETAPE_WAIT_CMD, NULL); |
1293 | return ide_started; | 1056 | return ide_started; |
1294 | } else { | 1057 | } else { |
1295 | hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]); | 1058 | hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr); |
1296 | return idetape_transfer_pc(drive); | 1059 | return idetape_transfer_pc(drive); |
1297 | } | 1060 | } |
1298 | } | 1061 | } |
@@ -1335,69 +1098,6 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code) | |||
1335 | pc->idetape_callback = &idetape_pc_callback; | 1098 | pc->idetape_callback = &idetape_pc_callback; |
1336 | } | 1099 | } |
1337 | 1100 | ||
1338 | static void idetape_calculate_speeds(ide_drive_t *drive) | ||
1339 | { | ||
1340 | idetape_tape_t *tape = drive->driver_data; | ||
1341 | |||
1342 | if (time_after(jiffies, | ||
1343 | tape->controlled_pipeline_head_time + 120 * HZ)) { | ||
1344 | tape->controlled_previous_pipeline_head = | ||
1345 | tape->controlled_last_pipeline_head; | ||
1346 | tape->controlled_previous_head_time = | ||
1347 | tape->controlled_pipeline_head_time; | ||
1348 | tape->controlled_last_pipeline_head = tape->pipeline_head; | ||
1349 | tape->controlled_pipeline_head_time = jiffies; | ||
1350 | } | ||
1351 | if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ)) | ||
1352 | tape->controlled_pipeline_head_speed = (tape->pipeline_head - | ||
1353 | tape->controlled_last_pipeline_head) * 32 * HZ / | ||
1354 | (jiffies - tape->controlled_pipeline_head_time); | ||
1355 | else if (time_after(jiffies, tape->controlled_previous_head_time)) | ||
1356 | tape->controlled_pipeline_head_speed = (tape->pipeline_head - | ||
1357 | tape->controlled_previous_pipeline_head) * 32 * | ||
1358 | HZ / (jiffies - tape->controlled_previous_head_time); | ||
1359 | |||
1360 | if (tape->nr_pending_stages < tape->max_stages/*- 1 */) { | ||
1361 | /* -1 for read mode error recovery */ | ||
1362 | if (time_after(jiffies, tape->uncontrolled_previous_head_time + | ||
1363 | 10 * HZ)) { | ||
1364 | tape->uncontrolled_pipeline_head_time = jiffies; | ||
1365 | tape->uncontrolled_pipeline_head_speed = | ||
1366 | (tape->pipeline_head - | ||
1367 | tape->uncontrolled_previous_pipeline_head) * | ||
1368 | 32 * HZ / (jiffies - | ||
1369 | tape->uncontrolled_previous_head_time); | ||
1370 | } | ||
1371 | } else { | ||
1372 | tape->uncontrolled_previous_head_time = jiffies; | ||
1373 | tape->uncontrolled_previous_pipeline_head = tape->pipeline_head; | ||
1374 | if (time_after(jiffies, tape->uncontrolled_pipeline_head_time + | ||
1375 | 30 * HZ)) | ||
1376 | tape->uncontrolled_pipeline_head_time = jiffies; | ||
1377 | |||
1378 | } | ||
1379 | tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed, | ||
1380 | tape->controlled_pipeline_head_speed); | ||
1381 | |||
1382 | if (tape->speed_control == 1) { | ||
1383 | if (tape->nr_pending_stages >= tape->max_stages / 2) | ||
1384 | tape->max_insert_speed = tape->pipeline_head_speed + | ||
1385 | (1100 - tape->pipeline_head_speed) * 2 * | ||
1386 | (tape->nr_pending_stages - tape->max_stages / 2) | ||
1387 | / tape->max_stages; | ||
1388 | else | ||
1389 | tape->max_insert_speed = 500 + | ||
1390 | (tape->pipeline_head_speed - 500) * 2 * | ||
1391 | tape->nr_pending_stages / tape->max_stages; | ||
1392 | |||
1393 | if (tape->nr_pending_stages >= tape->max_stages * 99 / 100) | ||
1394 | tape->max_insert_speed = 5000; | ||
1395 | } else | ||
1396 | tape->max_insert_speed = tape->speed_control; | ||
1397 | |||
1398 | tape->max_insert_speed = max(tape->max_insert_speed, 500); | ||
1399 | } | ||
1400 | |||
1401 | static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive) | 1101 | static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive) |
1402 | { | 1102 | { |
1403 | idetape_tape_t *tape = drive->driver_data; | 1103 | idetape_tape_t *tape = drive->driver_data; |
@@ -1432,17 +1132,7 @@ static ide_startstop_t idetape_rw_callback(ide_drive_t *drive) | |||
1432 | int blocks = tape->pc->xferred / tape->blk_size; | 1132 | int blocks = tape->pc->xferred / tape->blk_size; |
1433 | 1133 | ||
1434 | tape->avg_size += blocks * tape->blk_size; | 1134 | tape->avg_size += blocks * tape->blk_size; |
1435 | tape->insert_size += blocks * tape->blk_size; | 1135 | |
1436 | if (tape->insert_size > 1024 * 1024) | ||
1437 | tape->measure_insert_time = 1; | ||
1438 | if (tape->measure_insert_time) { | ||
1439 | tape->measure_insert_time = 0; | ||
1440 | tape->insert_time = jiffies; | ||
1441 | tape->insert_size = 0; | ||
1442 | } | ||
1443 | if (time_after(jiffies, tape->insert_time)) | ||
1444 | tape->insert_speed = tape->insert_size / 1024 * HZ / | ||
1445 | (jiffies - tape->insert_time); | ||
1446 | if (time_after_eq(jiffies, tape->avg_time + HZ)) { | 1136 | if (time_after_eq(jiffies, tape->avg_time + HZ)) { |
1447 | tape->avg_speed = tape->avg_size * HZ / | 1137 | tape->avg_speed = tape->avg_size * HZ / |
1448 | (jiffies - tape->avg_time) / 1024; | 1138 | (jiffies - tape->avg_time) / 1024; |
@@ -1475,7 +1165,7 @@ static void idetape_create_read_cmd(idetape_tape_t *tape, | |||
1475 | pc->buf = NULL; | 1165 | pc->buf = NULL; |
1476 | pc->buf_size = length * tape->blk_size; | 1166 | pc->buf_size = length * tape->blk_size; |
1477 | pc->req_xfer = pc->buf_size; | 1167 | pc->req_xfer = pc->buf_size; |
1478 | if (pc->req_xfer == tape->stage_size) | 1168 | if (pc->req_xfer == tape->buffer_size) |
1479 | pc->flags |= PC_FLAG_DMA_RECOMMENDED; | 1169 | pc->flags |= PC_FLAG_DMA_RECOMMENDED; |
1480 | } | 1170 | } |
1481 | 1171 | ||
@@ -1495,7 +1185,7 @@ static void idetape_create_write_cmd(idetape_tape_t *tape, | |||
1495 | pc->buf = NULL; | 1185 | pc->buf = NULL; |
1496 | pc->buf_size = length * tape->blk_size; | 1186 | pc->buf_size = length * tape->blk_size; |
1497 | pc->req_xfer = pc->buf_size; | 1187 | pc->req_xfer = pc->buf_size; |
1498 | if (pc->req_xfer == tape->stage_size) | 1188 | if (pc->req_xfer == tape->buffer_size) |
1499 | pc->flags |= PC_FLAG_DMA_RECOMMENDED; | 1189 | pc->flags |= PC_FLAG_DMA_RECOMMENDED; |
1500 | } | 1190 | } |
1501 | 1191 | ||
@@ -1547,10 +1237,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, | |||
1547 | drive->post_reset = 0; | 1237 | drive->post_reset = 0; |
1548 | } | 1238 | } |
1549 | 1239 | ||
1550 | if (time_after(jiffies, tape->insert_time)) | ||
1551 | tape->insert_speed = tape->insert_size / 1024 * HZ / | ||
1552 | (jiffies - tape->insert_time); | ||
1553 | idetape_calculate_speeds(drive); | ||
1554 | if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) && | 1240 | if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) && |
1555 | (stat & SEEK_STAT) == 0) { | 1241 | (stat & SEEK_STAT) == 0) { |
1556 | if (postponed_rq == NULL) { | 1242 | if (postponed_rq == NULL) { |
@@ -1574,16 +1260,12 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, | |||
1574 | return ide_stopped; | 1260 | return ide_stopped; |
1575 | } | 1261 | } |
1576 | if (rq->cmd[0] & REQ_IDETAPE_READ) { | 1262 | if (rq->cmd[0] & REQ_IDETAPE_READ) { |
1577 | tape->buffer_head++; | ||
1578 | tape->postpone_cnt = 0; | ||
1579 | pc = idetape_next_pc_storage(drive); | 1263 | pc = idetape_next_pc_storage(drive); |
1580 | idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, | 1264 | idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, |
1581 | (struct idetape_bh *)rq->special); | 1265 | (struct idetape_bh *)rq->special); |
1582 | goto out; | 1266 | goto out; |
1583 | } | 1267 | } |
1584 | if (rq->cmd[0] & REQ_IDETAPE_WRITE) { | 1268 | if (rq->cmd[0] & REQ_IDETAPE_WRITE) { |
1585 | tape->buffer_head++; | ||
1586 | tape->postpone_cnt = 0; | ||
1587 | pc = idetape_next_pc_storage(drive); | 1269 | pc = idetape_next_pc_storage(drive); |
1588 | idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, | 1270 | idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, |
1589 | (struct idetape_bh *)rq->special); | 1271 | (struct idetape_bh *)rq->special); |
@@ -1604,103 +1286,91 @@ out: | |||
1604 | return idetape_issue_pc(drive, pc); | 1286 | return idetape_issue_pc(drive, pc); |
1605 | } | 1287 | } |
1606 | 1288 | ||
1607 | /* Pipeline related functions */ | ||
1608 | |||
1609 | /* | 1289 | /* |
1610 | * The function below uses __get_free_page to allocate a pipeline stage, along | 1290 | * The function below uses __get_free_pages to allocate a data buffer of size |
1611 | * with all the necessary small buffers which together make a buffer of size | 1291 | * tape->buffer_size (or a bit more). We attempt to combine sequential pages as |
1612 | * tape->stage_size (or a bit more). We attempt to combine sequential pages as | ||
1613 | * much as possible. | 1292 | * much as possible. |
1614 | * | 1293 | * |
1615 | * It returns a pointer to the new allocated stage, or NULL if we can't (or | 1294 | * It returns a pointer to the newly allocated buffer, or NULL in case of |
1616 | * don't want to) allocate a stage. | 1295 | * failure. |
1617 | * | ||
1618 | * Pipeline stages are optional and are used to increase performance. If we | ||
1619 | * can't allocate them, we'll manage without them. | ||
1620 | */ | 1296 | */ |
1621 | static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full, | 1297 | static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape, |
1622 | int clear) | 1298 | int full, int clear) |
1623 | { | 1299 | { |
1624 | idetape_stage_t *stage; | 1300 | struct idetape_bh *prev_bh, *bh, *merge_bh; |
1625 | struct idetape_bh *prev_bh, *bh; | 1301 | int pages = tape->pages_per_buffer; |
1626 | int pages = tape->pages_per_stage; | 1302 | unsigned int order, b_allocd; |
1627 | char *b_data = NULL; | 1303 | char *b_data = NULL; |
1628 | 1304 | ||
1629 | stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL); | 1305 | merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL); |
1630 | if (!stage) | 1306 | bh = merge_bh; |
1631 | return NULL; | ||
1632 | stage->next = NULL; | ||
1633 | |||
1634 | stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL); | ||
1635 | bh = stage->bh; | ||
1636 | if (bh == NULL) | 1307 | if (bh == NULL) |
1637 | goto abort; | 1308 | goto abort; |
1638 | bh->b_reqnext = NULL; | 1309 | |
1639 | bh->b_data = (char *) __get_free_page(GFP_KERNEL); | 1310 | order = fls(pages) - 1; |
1311 | bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order); | ||
1640 | if (!bh->b_data) | 1312 | if (!bh->b_data) |
1641 | goto abort; | 1313 | goto abort; |
1314 | b_allocd = (1 << order) * PAGE_SIZE; | ||
1315 | pages &= (order-1); | ||
1316 | |||
1642 | if (clear) | 1317 | if (clear) |
1643 | memset(bh->b_data, 0, PAGE_SIZE); | 1318 | memset(bh->b_data, 0, b_allocd); |
1644 | bh->b_size = PAGE_SIZE; | 1319 | bh->b_reqnext = NULL; |
1320 | bh->b_size = b_allocd; | ||
1645 | atomic_set(&bh->b_count, full ? bh->b_size : 0); | 1321 | atomic_set(&bh->b_count, full ? bh->b_size : 0); |
1646 | 1322 | ||
1647 | while (--pages) { | 1323 | while (pages) { |
1648 | b_data = (char *) __get_free_page(GFP_KERNEL); | 1324 | order = fls(pages) - 1; |
1325 | b_data = (char *) __get_free_pages(GFP_KERNEL, order); | ||
1649 | if (!b_data) | 1326 | if (!b_data) |
1650 | goto abort; | 1327 | goto abort; |
1328 | b_allocd = (1 << order) * PAGE_SIZE; | ||
1329 | |||
1651 | if (clear) | 1330 | if (clear) |
1652 | memset(b_data, 0, PAGE_SIZE); | 1331 | memset(b_data, 0, b_allocd); |
1653 | if (bh->b_data == b_data + PAGE_SIZE) { | 1332 | |
1654 | bh->b_size += PAGE_SIZE; | 1333 | /* newly allocated page frames below buffer header or ...*/ |
1655 | bh->b_data -= PAGE_SIZE; | 1334 | if (bh->b_data == b_data + b_allocd) { |
1335 | bh->b_size += b_allocd; | ||
1336 | bh->b_data -= b_allocd; | ||
1656 | if (full) | 1337 | if (full) |
1657 | atomic_add(PAGE_SIZE, &bh->b_count); | 1338 | atomic_add(b_allocd, &bh->b_count); |
1658 | continue; | 1339 | continue; |
1659 | } | 1340 | } |
1341 | /* they are above the header */ | ||
1660 | if (b_data == bh->b_data + bh->b_size) { | 1342 | if (b_data == bh->b_data + bh->b_size) { |
1661 | bh->b_size += PAGE_SIZE; | 1343 | bh->b_size += b_allocd; |
1662 | if (full) | 1344 | if (full) |
1663 | atomic_add(PAGE_SIZE, &bh->b_count); | 1345 | atomic_add(b_allocd, &bh->b_count); |
1664 | continue; | 1346 | continue; |
1665 | } | 1347 | } |
1666 | prev_bh = bh; | 1348 | prev_bh = bh; |
1667 | bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL); | 1349 | bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL); |
1668 | if (!bh) { | 1350 | if (!bh) { |
1669 | free_page((unsigned long) b_data); | 1351 | free_pages((unsigned long) b_data, order); |
1670 | goto abort; | 1352 | goto abort; |
1671 | } | 1353 | } |
1672 | bh->b_reqnext = NULL; | 1354 | bh->b_reqnext = NULL; |
1673 | bh->b_data = b_data; | 1355 | bh->b_data = b_data; |
1674 | bh->b_size = PAGE_SIZE; | 1356 | bh->b_size = b_allocd; |
1675 | atomic_set(&bh->b_count, full ? bh->b_size : 0); | 1357 | atomic_set(&bh->b_count, full ? bh->b_size : 0); |
1676 | prev_bh->b_reqnext = bh; | 1358 | prev_bh->b_reqnext = bh; |
1359 | |||
1360 | pages &= (order-1); | ||
1677 | } | 1361 | } |
1362 | |||
1678 | bh->b_size -= tape->excess_bh_size; | 1363 | bh->b_size -= tape->excess_bh_size; |
1679 | if (full) | 1364 | if (full) |
1680 | atomic_sub(tape->excess_bh_size, &bh->b_count); | 1365 | atomic_sub(tape->excess_bh_size, &bh->b_count); |
1681 | return stage; | 1366 | return merge_bh; |
1682 | abort: | 1367 | abort: |
1683 | __idetape_kfree_stage(stage); | 1368 | ide_tape_kfree_buffer(tape); |
1684 | return NULL; | 1369 | return NULL; |
1685 | } | 1370 | } |
1686 | 1371 | ||
1687 | static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape) | ||
1688 | { | ||
1689 | idetape_stage_t *cache_stage = tape->cache_stage; | ||
1690 | |||
1691 | debug_log(DBG_PROCS, "Enter %s\n", __func__); | ||
1692 | |||
1693 | if (tape->nr_stages >= tape->max_stages) | ||
1694 | return NULL; | ||
1695 | if (cache_stage != NULL) { | ||
1696 | tape->cache_stage = NULL; | ||
1697 | return cache_stage; | ||
1698 | } | ||
1699 | return __idetape_kmalloc_stage(tape, 0, 0); | ||
1700 | } | ||
1701 | |||
1702 | static int idetape_copy_stage_from_user(idetape_tape_t *tape, | 1372 | static int idetape_copy_stage_from_user(idetape_tape_t *tape, |
1703 | idetape_stage_t *stage, const char __user *buf, int n) | 1373 | const char __user *buf, int n) |
1704 | { | 1374 | { |
1705 | struct idetape_bh *bh = tape->bh; | 1375 | struct idetape_bh *bh = tape->bh; |
1706 | int count; | 1376 | int count; |
@@ -1732,7 +1402,7 @@ static int idetape_copy_stage_from_user(idetape_tape_t *tape, | |||
1732 | } | 1402 | } |
1733 | 1403 | ||
1734 | static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf, | 1404 | static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf, |
1735 | idetape_stage_t *stage, int n) | 1405 | int n) |
1736 | { | 1406 | { |
1737 | struct idetape_bh *bh = tape->bh; | 1407 | struct idetape_bh *bh = tape->bh; |
1738 | int count; | 1408 | int count; |
@@ -1763,11 +1433,11 @@ static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf, | |||
1763 | return ret; | 1433 | return ret; |
1764 | } | 1434 | } |
1765 | 1435 | ||
1766 | static void idetape_init_merge_stage(idetape_tape_t *tape) | 1436 | static void idetape_init_merge_buffer(idetape_tape_t *tape) |
1767 | { | 1437 | { |
1768 | struct idetape_bh *bh = tape->merge_stage->bh; | 1438 | struct idetape_bh *bh = tape->merge_bh; |
1439 | tape->bh = tape->merge_bh; | ||
1769 | 1440 | ||
1770 | tape->bh = bh; | ||
1771 | if (tape->chrdev_dir == IDETAPE_DIR_WRITE) | 1441 | if (tape->chrdev_dir == IDETAPE_DIR_WRITE) |
1772 | atomic_set(&bh->b_count, 0); | 1442 | atomic_set(&bh->b_count, 0); |
1773 | else { | 1443 | else { |
@@ -1776,61 +1446,6 @@ static void idetape_init_merge_stage(idetape_tape_t *tape) | |||
1776 | } | 1446 | } |
1777 | } | 1447 | } |
1778 | 1448 | ||
1779 | static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage) | ||
1780 | { | ||
1781 | struct idetape_bh *tmp; | ||
1782 | |||
1783 | tmp = stage->bh; | ||
1784 | stage->bh = tape->merge_stage->bh; | ||
1785 | tape->merge_stage->bh = tmp; | ||
1786 | idetape_init_merge_stage(tape); | ||
1787 | } | ||
1788 | |||
1789 | /* Add a new stage at the end of the pipeline. */ | ||
1790 | static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage) | ||
1791 | { | ||
1792 | idetape_tape_t *tape = drive->driver_data; | ||
1793 | unsigned long flags; | ||
1794 | |||
1795 | debug_log(DBG_PROCS, "Enter %s\n", __func__); | ||
1796 | |||
1797 | spin_lock_irqsave(&tape->lock, flags); | ||
1798 | stage->next = NULL; | ||
1799 | if (tape->last_stage != NULL) | ||
1800 | tape->last_stage->next = stage; | ||
1801 | else | ||
1802 | tape->first_stage = stage; | ||
1803 | tape->next_stage = stage; | ||
1804 | tape->last_stage = stage; | ||
1805 | if (tape->next_stage == NULL) | ||
1806 | tape->next_stage = tape->last_stage; | ||
1807 | tape->nr_stages++; | ||
1808 | tape->nr_pending_stages++; | ||
1809 | spin_unlock_irqrestore(&tape->lock, flags); | ||
1810 | } | ||
1811 | |||
1812 | /* Install a completion in a pending request and sleep until it is serviced. The | ||
1813 | * caller should ensure that the request will not be serviced before we install | ||
1814 | * the completion (usually by disabling interrupts). | ||
1815 | */ | ||
1816 | static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq) | ||
1817 | { | ||
1818 | DECLARE_COMPLETION_ONSTACK(wait); | ||
1819 | idetape_tape_t *tape = drive->driver_data; | ||
1820 | |||
1821 | if (rq == NULL || !blk_special_request(rq)) { | ||
1822 | printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid" | ||
1823 | " request\n"); | ||
1824 | return; | ||
1825 | } | ||
1826 | rq->end_io_data = &wait; | ||
1827 | rq->end_io = blk_end_sync_rq; | ||
1828 | spin_unlock_irq(&tape->lock); | ||
1829 | wait_for_completion(&wait); | ||
1830 | /* The stage and its struct request have been deallocated */ | ||
1831 | spin_lock_irq(&tape->lock); | ||
1832 | } | ||
1833 | |||
1834 | static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive) | 1449 | static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive) |
1835 | { | 1450 | { |
1836 | idetape_tape_t *tape = drive->driver_data; | 1451 | idetape_tape_t *tape = drive->driver_data; |
@@ -1899,7 +1514,7 @@ static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc) | |||
1899 | * to the request list without waiting for it to be serviced! In that case, we | 1514 | * to the request list without waiting for it to be serviced! In that case, we |
1900 | * usually use idetape_queue_pc_head(). | 1515 | * usually use idetape_queue_pc_head(). |
1901 | */ | 1516 | */ |
1902 | static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc) | 1517 | static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc) |
1903 | { | 1518 | { |
1904 | struct ide_tape_obj *tape = drive->driver_data; | 1519 | struct ide_tape_obj *tape = drive->driver_data; |
1905 | struct request rq; | 1520 | struct request rq; |
@@ -1931,7 +1546,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout) | |||
1931 | timeout += jiffies; | 1546 | timeout += jiffies; |
1932 | while (time_before(jiffies, timeout)) { | 1547 | while (time_before(jiffies, timeout)) { |
1933 | idetape_create_test_unit_ready_cmd(&pc); | 1548 | idetape_create_test_unit_ready_cmd(&pc); |
1934 | if (!__idetape_queue_pc_tail(drive, &pc)) | 1549 | if (!idetape_queue_pc_tail(drive, &pc)) |
1935 | return 0; | 1550 | return 0; |
1936 | if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2) | 1551 | if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2) |
1937 | || (tape->asc == 0x3A)) { | 1552 | || (tape->asc == 0x3A)) { |
@@ -1940,7 +1555,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout) | |||
1940 | return -ENOMEDIUM; | 1555 | return -ENOMEDIUM; |
1941 | idetape_create_load_unload_cmd(drive, &pc, | 1556 | idetape_create_load_unload_cmd(drive, &pc, |
1942 | IDETAPE_LU_LOAD_MASK); | 1557 | IDETAPE_LU_LOAD_MASK); |
1943 | __idetape_queue_pc_tail(drive, &pc); | 1558 | idetape_queue_pc_tail(drive, &pc); |
1944 | load_attempted = 1; | 1559 | load_attempted = 1; |
1945 | /* not about to be ready */ | 1560 | /* not about to be ready */ |
1946 | } else if (!(tape->sense_key == 2 && tape->asc == 4 && | 1561 | } else if (!(tape->sense_key == 2 && tape->asc == 4 && |
@@ -1951,11 +1566,6 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout) | |||
1951 | return -EIO; | 1566 | return -EIO; |
1952 | } | 1567 | } |
1953 | 1568 | ||
1954 | static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc) | ||
1955 | { | ||
1956 | return __idetape_queue_pc_tail(drive, pc); | ||
1957 | } | ||
1958 | |||
1959 | static int idetape_flush_tape_buffers(ide_drive_t *drive) | 1569 | static int idetape_flush_tape_buffers(ide_drive_t *drive) |
1960 | { | 1570 | { |
1961 | struct ide_atapi_pc pc; | 1571 | struct ide_atapi_pc pc; |
@@ -2021,50 +1631,21 @@ static int idetape_create_prevent_cmd(ide_drive_t *drive, | |||
2021 | return 1; | 1631 | return 1; |
2022 | } | 1632 | } |
2023 | 1633 | ||
2024 | static int __idetape_discard_read_pipeline(ide_drive_t *drive) | 1634 | static void __ide_tape_discard_merge_buffer(ide_drive_t *drive) |
2025 | { | 1635 | { |
2026 | idetape_tape_t *tape = drive->driver_data; | 1636 | idetape_tape_t *tape = drive->driver_data; |
2027 | unsigned long flags; | ||
2028 | int cnt; | ||
2029 | 1637 | ||
2030 | if (tape->chrdev_dir != IDETAPE_DIR_READ) | 1638 | if (tape->chrdev_dir != IDETAPE_DIR_READ) |
2031 | return 0; | 1639 | return; |
2032 | 1640 | ||
2033 | /* Remove merge stage. */ | 1641 | clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags); |
2034 | cnt = tape->merge_stage_size / tape->blk_size; | 1642 | tape->merge_bh_size = 0; |
2035 | if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) | 1643 | if (tape->merge_bh != NULL) { |
2036 | ++cnt; /* Filemarks count as 1 sector */ | 1644 | ide_tape_kfree_buffer(tape); |
2037 | tape->merge_stage_size = 0; | 1645 | tape->merge_bh = NULL; |
2038 | if (tape->merge_stage != NULL) { | ||
2039 | __idetape_kfree_stage(tape->merge_stage); | ||
2040 | tape->merge_stage = NULL; | ||
2041 | } | 1646 | } |
2042 | 1647 | ||
2043 | /* Clear pipeline flags. */ | ||
2044 | clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags); | ||
2045 | tape->chrdev_dir = IDETAPE_DIR_NONE; | 1648 | tape->chrdev_dir = IDETAPE_DIR_NONE; |
2046 | |||
2047 | /* Remove pipeline stages. */ | ||
2048 | if (tape->first_stage == NULL) | ||
2049 | return 0; | ||
2050 | |||
2051 | spin_lock_irqsave(&tape->lock, flags); | ||
2052 | tape->next_stage = NULL; | ||
2053 | if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) | ||
2054 | idetape_wait_for_request(drive, tape->active_data_rq); | ||
2055 | spin_unlock_irqrestore(&tape->lock, flags); | ||
2056 | |||
2057 | while (tape->first_stage != NULL) { | ||
2058 | struct request *rq_ptr = &tape->first_stage->rq; | ||
2059 | |||
2060 | cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors; | ||
2061 | if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK) | ||
2062 | ++cnt; | ||
2063 | idetape_remove_stage_head(drive); | ||
2064 | } | ||
2065 | tape->nr_pending_stages = 0; | ||
2066 | tape->max_stages = tape->min_pipeline; | ||
2067 | return cnt; | ||
2068 | } | 1649 | } |
2069 | 1650 | ||
2070 | /* | 1651 | /* |
@@ -2081,7 +1662,7 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block, | |||
2081 | struct ide_atapi_pc pc; | 1662 | struct ide_atapi_pc pc; |
2082 | 1663 | ||
2083 | if (tape->chrdev_dir == IDETAPE_DIR_READ) | 1664 | if (tape->chrdev_dir == IDETAPE_DIR_READ) |
2084 | __idetape_discard_read_pipeline(drive); | 1665 | __ide_tape_discard_merge_buffer(drive); |
2085 | idetape_wait_ready(drive, 60 * 5 * HZ); | 1666 | idetape_wait_ready(drive, 60 * 5 * HZ); |
2086 | idetape_create_locate_cmd(drive, &pc, block, partition, skip); | 1667 | idetape_create_locate_cmd(drive, &pc, block, partition, skip); |
2087 | retval = idetape_queue_pc_tail(drive, &pc); | 1668 | retval = idetape_queue_pc_tail(drive, &pc); |
@@ -2092,20 +1673,19 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block, | |||
2092 | return (idetape_queue_pc_tail(drive, &pc)); | 1673 | return (idetape_queue_pc_tail(drive, &pc)); |
2093 | } | 1674 | } |
2094 | 1675 | ||
2095 | static void idetape_discard_read_pipeline(ide_drive_t *drive, | 1676 | static void ide_tape_discard_merge_buffer(ide_drive_t *drive, |
2096 | int restore_position) | 1677 | int restore_position) |
2097 | { | 1678 | { |
2098 | idetape_tape_t *tape = drive->driver_data; | 1679 | idetape_tape_t *tape = drive->driver_data; |
2099 | int cnt; | ||
2100 | int seek, position; | 1680 | int seek, position; |
2101 | 1681 | ||
2102 | cnt = __idetape_discard_read_pipeline(drive); | 1682 | __ide_tape_discard_merge_buffer(drive); |
2103 | if (restore_position) { | 1683 | if (restore_position) { |
2104 | position = idetape_read_position(drive); | 1684 | position = idetape_read_position(drive); |
2105 | seek = position > cnt ? position - cnt : 0; | 1685 | seek = position > 0 ? position : 0; |
2106 | if (idetape_position_tape(drive, seek, 0, 0)) { | 1686 | if (idetape_position_tape(drive, seek, 0, 0)) { |
2107 | printk(KERN_INFO "ide-tape: %s: position_tape failed in" | 1687 | printk(KERN_INFO "ide-tape: %s: position_tape failed in" |
2108 | " discard_pipeline()\n", tape->name); | 1688 | " %s\n", tape->name, __func__); |
2109 | return; | 1689 | return; |
2110 | } | 1690 | } |
2111 | } | 1691 | } |
@@ -2123,12 +1703,6 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, | |||
2123 | 1703 | ||
2124 | debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd); | 1704 | debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd); |
2125 | 1705 | ||
2126 | if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) { | ||
2127 | printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n", | ||
2128 | __func__); | ||
2129 | return (0); | ||
2130 | } | ||
2131 | |||
2132 | idetape_init_rq(&rq, cmd); | 1706 | idetape_init_rq(&rq, cmd); |
2133 | rq.rq_disk = tape->disk; | 1707 | rq.rq_disk = tape->disk; |
2134 | rq.special = (void *)bh; | 1708 | rq.special = (void *)bh; |
@@ -2140,26 +1714,13 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, | |||
2140 | if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0) | 1714 | if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0) |
2141 | return 0; | 1715 | return 0; |
2142 | 1716 | ||
2143 | if (tape->merge_stage) | 1717 | if (tape->merge_bh) |
2144 | idetape_init_merge_stage(tape); | 1718 | idetape_init_merge_buffer(tape); |
2145 | if (rq.errors == IDETAPE_ERROR_GENERAL) | 1719 | if (rq.errors == IDETAPE_ERROR_GENERAL) |
2146 | return -EIO; | 1720 | return -EIO; |
2147 | return (tape->blk_size * (blocks-rq.current_nr_sectors)); | 1721 | return (tape->blk_size * (blocks-rq.current_nr_sectors)); |
2148 | } | 1722 | } |
2149 | 1723 | ||
2150 | /* start servicing the pipeline stages, starting from tape->next_stage. */ | ||
2151 | static void idetape_plug_pipeline(ide_drive_t *drive) | ||
2152 | { | ||
2153 | idetape_tape_t *tape = drive->driver_data; | ||
2154 | |||
2155 | if (tape->next_stage == NULL) | ||
2156 | return; | ||
2157 | if (!test_and_set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) { | ||
2158 | idetape_activate_next_stage(drive); | ||
2159 | (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end); | ||
2160 | } | ||
2161 | } | ||
2162 | |||
2163 | static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc) | 1724 | static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc) |
2164 | { | 1725 | { |
2165 | idetape_init_pc(pc); | 1726 | idetape_init_pc(pc); |
@@ -2197,137 +1758,39 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd) | |||
2197 | pc->idetape_callback = &idetape_pc_callback; | 1758 | pc->idetape_callback = &idetape_pc_callback; |
2198 | } | 1759 | } |
2199 | 1760 | ||
2200 | static void idetape_wait_first_stage(ide_drive_t *drive) | 1761 | /* Queue up a character device originated write request. */ |
2201 | { | ||
2202 | idetape_tape_t *tape = drive->driver_data; | ||
2203 | unsigned long flags; | ||
2204 | |||
2205 | if (tape->first_stage == NULL) | ||
2206 | return; | ||
2207 | spin_lock_irqsave(&tape->lock, flags); | ||
2208 | if (tape->active_stage == tape->first_stage) | ||
2209 | idetape_wait_for_request(drive, tape->active_data_rq); | ||
2210 | spin_unlock_irqrestore(&tape->lock, flags); | ||
2211 | } | ||
2212 | |||
2213 | /* | ||
2214 | * Try to add a character device originated write request to our pipeline. In | ||
2215 | * case we don't succeed, we revert to non-pipelined operation mode for this | ||
2216 | * request. In order to accomplish that, we | ||
2217 | * | ||
2218 | * 1. Try to allocate a new pipeline stage. | ||
2219 | * 2. If we can't, wait for more and more requests to be serviced and try again | ||
2220 | * each time. | ||
2221 | * 3. If we still can't allocate a stage, fallback to non-pipelined operation | ||
2222 | * mode for this request. | ||
2223 | */ | ||
2224 | static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks) | 1762 | static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks) |
2225 | { | 1763 | { |
2226 | idetape_tape_t *tape = drive->driver_data; | 1764 | idetape_tape_t *tape = drive->driver_data; |
2227 | idetape_stage_t *new_stage; | ||
2228 | unsigned long flags; | ||
2229 | struct request *rq; | ||
2230 | 1765 | ||
2231 | debug_log(DBG_CHRDEV, "Enter %s\n", __func__); | 1766 | debug_log(DBG_CHRDEV, "Enter %s\n", __func__); |
2232 | 1767 | ||
2233 | /* Attempt to allocate a new stage. Beware possible race conditions. */ | 1768 | return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, |
2234 | while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) { | 1769 | blocks, tape->merge_bh); |
2235 | spin_lock_irqsave(&tape->lock, flags); | ||
2236 | if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) { | ||
2237 | idetape_wait_for_request(drive, tape->active_data_rq); | ||
2238 | spin_unlock_irqrestore(&tape->lock, flags); | ||
2239 | } else { | ||
2240 | spin_unlock_irqrestore(&tape->lock, flags); | ||
2241 | idetape_plug_pipeline(drive); | ||
2242 | if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, | ||
2243 | &tape->flags)) | ||
2244 | continue; | ||
2245 | /* | ||
2246 | * The machine is short on memory. Fallback to non- | ||
2247 | * pipelined operation mode for this request. | ||
2248 | */ | ||
2249 | return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, | ||
2250 | blocks, tape->merge_stage->bh); | ||
2251 | } | ||
2252 | } | ||
2253 | rq = &new_stage->rq; | ||
2254 | idetape_init_rq(rq, REQ_IDETAPE_WRITE); | ||
2255 | /* Doesn't actually matter - We always assume sequential access */ | ||
2256 | rq->sector = tape->first_frame; | ||
2257 | rq->current_nr_sectors = blocks; | ||
2258 | rq->nr_sectors = blocks; | ||
2259 | |||
2260 | idetape_switch_buffers(tape, new_stage); | ||
2261 | idetape_add_stage_tail(drive, new_stage); | ||
2262 | tape->pipeline_head++; | ||
2263 | idetape_calculate_speeds(drive); | ||
2264 | |||
2265 | /* | ||
2266 | * Estimate whether the tape has stopped writing by checking if our | ||
2267 | * write pipeline is currently empty. If we are not writing anymore, | ||
2268 | * wait for the pipeline to be almost completely full (90%) before | ||
2269 | * starting to service requests, so that we will be able to keep up with | ||
2270 | * the higher speeds of the tape. | ||
2271 | */ | ||
2272 | if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) { | ||
2273 | if (tape->nr_stages >= tape->max_stages * 9 / 10 || | ||
2274 | tape->nr_stages >= tape->max_stages - | ||
2275 | tape->uncontrolled_pipeline_head_speed * 3 * 1024 / | ||
2276 | tape->blk_size) { | ||
2277 | tape->measure_insert_time = 1; | ||
2278 | tape->insert_time = jiffies; | ||
2279 | tape->insert_size = 0; | ||
2280 | tape->insert_speed = 0; | ||
2281 | idetape_plug_pipeline(drive); | ||
2282 | } | ||
2283 | } | ||
2284 | if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags)) | ||
2285 | /* Return a deferred error */ | ||
2286 | return -EIO; | ||
2287 | return blocks; | ||
2288 | } | 1770 | } |
2289 | 1771 | ||
2290 | /* | 1772 | static void ide_tape_flush_merge_buffer(ide_drive_t *drive) |
2291 | * Wait until all pending pipeline requests are serviced. Typically called on | ||
2292 | * device close. | ||
2293 | */ | ||
2294 | static void idetape_wait_for_pipeline(ide_drive_t *drive) | ||
2295 | { | ||
2296 | idetape_tape_t *tape = drive->driver_data; | ||
2297 | unsigned long flags; | ||
2298 | |||
2299 | while (tape->next_stage || test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, | ||
2300 | &tape->flags)) { | ||
2301 | idetape_plug_pipeline(drive); | ||
2302 | spin_lock_irqsave(&tape->lock, flags); | ||
2303 | if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) | ||
2304 | idetape_wait_for_request(drive, tape->active_data_rq); | ||
2305 | spin_unlock_irqrestore(&tape->lock, flags); | ||
2306 | } | ||
2307 | } | ||
2308 | |||
2309 | static void idetape_empty_write_pipeline(ide_drive_t *drive) | ||
2310 | { | 1773 | { |
2311 | idetape_tape_t *tape = drive->driver_data; | 1774 | idetape_tape_t *tape = drive->driver_data; |
2312 | int blocks, min; | 1775 | int blocks, min; |
2313 | struct idetape_bh *bh; | 1776 | struct idetape_bh *bh; |
2314 | 1777 | ||
2315 | if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { | 1778 | if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { |
2316 | printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline," | 1779 | printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer" |
2317 | " but we are not writing.\n"); | 1780 | " but we are not writing.\n"); |
2318 | return; | 1781 | return; |
2319 | } | 1782 | } |
2320 | if (tape->merge_stage_size > tape->stage_size) { | 1783 | if (tape->merge_bh_size > tape->buffer_size) { |
2321 | printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n"); | 1784 | printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n"); |
2322 | tape->merge_stage_size = tape->stage_size; | 1785 | tape->merge_bh_size = tape->buffer_size; |
2323 | } | 1786 | } |
2324 | if (tape->merge_stage_size) { | 1787 | if (tape->merge_bh_size) { |
2325 | blocks = tape->merge_stage_size / tape->blk_size; | 1788 | blocks = tape->merge_bh_size / tape->blk_size; |
2326 | if (tape->merge_stage_size % tape->blk_size) { | 1789 | if (tape->merge_bh_size % tape->blk_size) { |
2327 | unsigned int i; | 1790 | unsigned int i; |
2328 | 1791 | ||
2329 | blocks++; | 1792 | blocks++; |
2330 | i = tape->blk_size - tape->merge_stage_size % | 1793 | i = tape->blk_size - tape->merge_bh_size % |
2331 | tape->blk_size; | 1794 | tape->blk_size; |
2332 | bh = tape->bh->b_reqnext; | 1795 | bh = tape->bh->b_reqnext; |
2333 | while (bh) { | 1796 | while (bh) { |
@@ -2351,74 +1814,33 @@ static void idetape_empty_write_pipeline(ide_drive_t *drive) | |||
2351 | } | 1814 | } |
2352 | } | 1815 | } |
2353 | (void) idetape_add_chrdev_write_request(drive, blocks); | 1816 | (void) idetape_add_chrdev_write_request(drive, blocks); |
2354 | tape->merge_stage_size = 0; | 1817 | tape->merge_bh_size = 0; |
2355 | } | 1818 | } |
2356 | idetape_wait_for_pipeline(drive); | 1819 | if (tape->merge_bh != NULL) { |
2357 | if (tape->merge_stage != NULL) { | 1820 | ide_tape_kfree_buffer(tape); |
2358 | __idetape_kfree_stage(tape->merge_stage); | 1821 | tape->merge_bh = NULL; |
2359 | tape->merge_stage = NULL; | ||
2360 | } | 1822 | } |
2361 | clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags); | ||
2362 | tape->chrdev_dir = IDETAPE_DIR_NONE; | 1823 | tape->chrdev_dir = IDETAPE_DIR_NONE; |
2363 | |||
2364 | /* | ||
2365 | * On the next backup, perform the feedback loop again. (I don't want to | ||
2366 | * keep sense information between backups, as some systems are | ||
2367 | * constantly on, and the system load can be totally different on the | ||
2368 | * next backup). | ||
2369 | */ | ||
2370 | tape->max_stages = tape->min_pipeline; | ||
2371 | if (tape->first_stage != NULL || | ||
2372 | tape->next_stage != NULL || | ||
2373 | tape->last_stage != NULL || | ||
2374 | tape->nr_stages != 0) { | ||
2375 | printk(KERN_ERR "ide-tape: ide-tape pipeline bug, " | ||
2376 | "first_stage %p, next_stage %p, " | ||
2377 | "last_stage %p, nr_stages %d\n", | ||
2378 | tape->first_stage, tape->next_stage, | ||
2379 | tape->last_stage, tape->nr_stages); | ||
2380 | } | ||
2381 | } | 1824 | } |
2382 | 1825 | ||
2383 | static void idetape_restart_speed_control(ide_drive_t *drive) | 1826 | static int idetape_init_read(ide_drive_t *drive) |
2384 | { | 1827 | { |
2385 | idetape_tape_t *tape = drive->driver_data; | 1828 | idetape_tape_t *tape = drive->driver_data; |
2386 | |||
2387 | tape->restart_speed_control_req = 0; | ||
2388 | tape->pipeline_head = 0; | ||
2389 | tape->controlled_last_pipeline_head = 0; | ||
2390 | tape->controlled_previous_pipeline_head = 0; | ||
2391 | tape->uncontrolled_previous_pipeline_head = 0; | ||
2392 | tape->controlled_pipeline_head_speed = 5000; | ||
2393 | tape->pipeline_head_speed = 5000; | ||
2394 | tape->uncontrolled_pipeline_head_speed = 0; | ||
2395 | tape->controlled_pipeline_head_time = | ||
2396 | tape->uncontrolled_pipeline_head_time = jiffies; | ||
2397 | tape->controlled_previous_head_time = | ||
2398 | tape->uncontrolled_previous_head_time = jiffies; | ||
2399 | } | ||
2400 | |||
2401 | static int idetape_init_read(ide_drive_t *drive, int max_stages) | ||
2402 | { | ||
2403 | idetape_tape_t *tape = drive->driver_data; | ||
2404 | idetape_stage_t *new_stage; | ||
2405 | struct request rq; | ||
2406 | int bytes_read; | 1829 | int bytes_read; |
2407 | u16 blocks = *(u16 *)&tape->caps[12]; | ||
2408 | 1830 | ||
2409 | /* Initialize read operation */ | 1831 | /* Initialize read operation */ |
2410 | if (tape->chrdev_dir != IDETAPE_DIR_READ) { | 1832 | if (tape->chrdev_dir != IDETAPE_DIR_READ) { |
2411 | if (tape->chrdev_dir == IDETAPE_DIR_WRITE) { | 1833 | if (tape->chrdev_dir == IDETAPE_DIR_WRITE) { |
2412 | idetape_empty_write_pipeline(drive); | 1834 | ide_tape_flush_merge_buffer(drive); |
2413 | idetape_flush_tape_buffers(drive); | 1835 | idetape_flush_tape_buffers(drive); |
2414 | } | 1836 | } |
2415 | if (tape->merge_stage || tape->merge_stage_size) { | 1837 | if (tape->merge_bh || tape->merge_bh_size) { |
2416 | printk(KERN_ERR "ide-tape: merge_stage_size should be" | 1838 | printk(KERN_ERR "ide-tape: merge_bh_size should be" |
2417 | " 0 now\n"); | 1839 | " 0 now\n"); |
2418 | tape->merge_stage_size = 0; | 1840 | tape->merge_bh_size = 0; |
2419 | } | 1841 | } |
2420 | tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0); | 1842 | tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0); |
2421 | if (!tape->merge_stage) | 1843 | if (!tape->merge_bh) |
2422 | return -ENOMEM; | 1844 | return -ENOMEM; |
2423 | tape->chrdev_dir = IDETAPE_DIR_READ; | 1845 | tape->chrdev_dir = IDETAPE_DIR_READ; |
2424 | 1846 | ||
@@ -2431,54 +1853,23 @@ static int idetape_init_read(ide_drive_t *drive, int max_stages) | |||
2431 | if (drive->dsc_overlap) { | 1853 | if (drive->dsc_overlap) { |
2432 | bytes_read = idetape_queue_rw_tail(drive, | 1854 | bytes_read = idetape_queue_rw_tail(drive, |
2433 | REQ_IDETAPE_READ, 0, | 1855 | REQ_IDETAPE_READ, 0, |
2434 | tape->merge_stage->bh); | 1856 | tape->merge_bh); |
2435 | if (bytes_read < 0) { | 1857 | if (bytes_read < 0) { |
2436 | __idetape_kfree_stage(tape->merge_stage); | 1858 | ide_tape_kfree_buffer(tape); |
2437 | tape->merge_stage = NULL; | 1859 | tape->merge_bh = NULL; |
2438 | tape->chrdev_dir = IDETAPE_DIR_NONE; | 1860 | tape->chrdev_dir = IDETAPE_DIR_NONE; |
2439 | return bytes_read; | 1861 | return bytes_read; |
2440 | } | 1862 | } |
2441 | } | 1863 | } |
2442 | } | 1864 | } |
2443 | if (tape->restart_speed_control_req) | 1865 | |
2444 | idetape_restart_speed_control(drive); | ||
2445 | idetape_init_rq(&rq, REQ_IDETAPE_READ); | ||
2446 | rq.sector = tape->first_frame; | ||
2447 | rq.nr_sectors = blocks; | ||
2448 | rq.current_nr_sectors = blocks; | ||
2449 | if (!test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags) && | ||
2450 | tape->nr_stages < max_stages) { | ||
2451 | new_stage = idetape_kmalloc_stage(tape); | ||
2452 | while (new_stage != NULL) { | ||
2453 | new_stage->rq = rq; | ||
2454 | idetape_add_stage_tail(drive, new_stage); | ||
2455 | if (tape->nr_stages >= max_stages) | ||
2456 | break; | ||
2457 | new_stage = idetape_kmalloc_stage(tape); | ||
2458 | } | ||
2459 | } | ||
2460 | if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) { | ||
2461 | if (tape->nr_pending_stages >= 3 * max_stages / 4) { | ||
2462 | tape->measure_insert_time = 1; | ||
2463 | tape->insert_time = jiffies; | ||
2464 | tape->insert_size = 0; | ||
2465 | tape->insert_speed = 0; | ||
2466 | idetape_plug_pipeline(drive); | ||
2467 | } | ||
2468 | } | ||
2469 | return 0; | 1866 | return 0; |
2470 | } | 1867 | } |
2471 | 1868 | ||
2472 | /* | 1869 | /* called from idetape_chrdev_read() to service a chrdev read request. */ |
2473 | * Called from idetape_chrdev_read() to service a character device read request | ||
2474 | * and add read-ahead requests to our pipeline. | ||
2475 | */ | ||
2476 | static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks) | 1870 | static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks) |
2477 | { | 1871 | { |
2478 | idetape_tape_t *tape = drive->driver_data; | 1872 | idetape_tape_t *tape = drive->driver_data; |
2479 | unsigned long flags; | ||
2480 | struct request *rq_ptr; | ||
2481 | int bytes_read; | ||
2482 | 1873 | ||
2483 | debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); | 1874 | debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks); |
2484 | 1875 | ||
@@ -2486,39 +1877,10 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks) | |||
2486 | if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) | 1877 | if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) |
2487 | return 0; | 1878 | return 0; |
2488 | 1879 | ||
2489 | /* Wait for the next block to reach the head of the pipeline. */ | 1880 | idetape_init_read(drive); |
2490 | idetape_init_read(drive, tape->max_stages); | ||
2491 | if (tape->first_stage == NULL) { | ||
2492 | if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags)) | ||
2493 | return 0; | ||
2494 | return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks, | ||
2495 | tape->merge_stage->bh); | ||
2496 | } | ||
2497 | idetape_wait_first_stage(drive); | ||
2498 | rq_ptr = &tape->first_stage->rq; | ||
2499 | bytes_read = tape->blk_size * (rq_ptr->nr_sectors - | ||
2500 | rq_ptr->current_nr_sectors); | ||
2501 | rq_ptr->nr_sectors = 0; | ||
2502 | rq_ptr->current_nr_sectors = 0; | ||
2503 | 1881 | ||
2504 | if (rq_ptr->errors == IDETAPE_ERROR_EOD) | 1882 | return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks, |
2505 | return 0; | 1883 | tape->merge_bh); |
2506 | else { | ||
2507 | idetape_switch_buffers(tape, tape->first_stage); | ||
2508 | if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK) | ||
2509 | set_bit(IDETAPE_FLAG_FILEMARK, &tape->flags); | ||
2510 | spin_lock_irqsave(&tape->lock, flags); | ||
2511 | idetape_remove_stage_head(drive); | ||
2512 | spin_unlock_irqrestore(&tape->lock, flags); | ||
2513 | tape->pipeline_head++; | ||
2514 | idetape_calculate_speeds(drive); | ||
2515 | } | ||
2516 | if (bytes_read > blocks * tape->blk_size) { | ||
2517 | printk(KERN_ERR "ide-tape: bug: trying to return more bytes" | ||
2518 | " than requested\n"); | ||
2519 | bytes_read = blocks * tape->blk_size; | ||
2520 | } | ||
2521 | return (bytes_read); | ||
2522 | } | 1884 | } |
2523 | 1885 | ||
2524 | static void idetape_pad_zeros(ide_drive_t *drive, int bcount) | 1886 | static void idetape_pad_zeros(ide_drive_t *drive, int bcount) |
@@ -2530,8 +1892,8 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount) | |||
2530 | while (bcount) { | 1892 | while (bcount) { |
2531 | unsigned int count; | 1893 | unsigned int count; |
2532 | 1894 | ||
2533 | bh = tape->merge_stage->bh; | 1895 | bh = tape->merge_bh; |
2534 | count = min(tape->stage_size, bcount); | 1896 | count = min(tape->buffer_size, bcount); |
2535 | bcount -= count; | 1897 | bcount -= count; |
2536 | blocks = count / tape->blk_size; | 1898 | blocks = count / tape->blk_size; |
2537 | while (count) { | 1899 | while (count) { |
@@ -2542,29 +1904,8 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount) | |||
2542 | bh = bh->b_reqnext; | 1904 | bh = bh->b_reqnext; |
2543 | } | 1905 | } |
2544 | idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks, | 1906 | idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks, |
2545 | tape->merge_stage->bh); | 1907 | tape->merge_bh); |
2546 | } | ||
2547 | } | ||
2548 | |||
2549 | static int idetape_pipeline_size(ide_drive_t *drive) | ||
2550 | { | ||
2551 | idetape_tape_t *tape = drive->driver_data; | ||
2552 | idetape_stage_t *stage; | ||
2553 | struct request *rq; | ||
2554 | int size = 0; | ||
2555 | |||
2556 | idetape_wait_for_pipeline(drive); | ||
2557 | stage = tape->first_stage; | ||
2558 | while (stage != NULL) { | ||
2559 | rq = &stage->rq; | ||
2560 | size += tape->blk_size * (rq->nr_sectors - | ||
2561 | rq->current_nr_sectors); | ||
2562 | if (rq->errors == IDETAPE_ERROR_FILEMARK) | ||
2563 | size += tape->blk_size; | ||
2564 | stage = stage->next; | ||
2565 | } | 1908 | } |
2566 | size += tape->merge_stage_size; | ||
2567 | return size; | ||
2568 | } | 1909 | } |
2569 | 1910 | ||
2570 | /* | 1911 | /* |
@@ -2612,11 +1953,10 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd, | |||
2612 | if (copy_from_user(&config, argp, sizeof(config))) | 1953 | if (copy_from_user(&config, argp, sizeof(config))) |
2613 | return -EFAULT; | 1954 | return -EFAULT; |
2614 | tape->best_dsc_rw_freq = config.dsc_rw_frequency; | 1955 | tape->best_dsc_rw_freq = config.dsc_rw_frequency; |
2615 | tape->max_stages = config.nr_stages; | ||
2616 | break; | 1956 | break; |
2617 | case 0x0350: | 1957 | case 0x0350: |
2618 | config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq; | 1958 | config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq; |
2619 | config.nr_stages = tape->max_stages; | 1959 | config.nr_stages = 1; |
2620 | if (copy_to_user(argp, &config, sizeof(config))) | 1960 | if (copy_to_user(argp, &config, sizeof(config))) |
2621 | return -EFAULT; | 1961 | return -EFAULT; |
2622 | break; | 1962 | break; |
@@ -2626,19 +1966,11 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd, | |||
2626 | return 0; | 1966 | return 0; |
2627 | } | 1967 | } |
2628 | 1968 | ||
2629 | /* | ||
2630 | * The function below is now a bit more complicated than just passing the | ||
2631 | * command to the tape since we may have crossed some filemarks during our | ||
2632 | * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to | ||
2633 | * support MTFSFM when the filemark is in our internal pipeline even if the tape | ||
2634 | * doesn't support spacing over filemarks in the reverse direction. | ||
2635 | */ | ||
2636 | static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op, | 1969 | static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op, |
2637 | int mt_count) | 1970 | int mt_count) |
2638 | { | 1971 | { |
2639 | idetape_tape_t *tape = drive->driver_data; | 1972 | idetape_tape_t *tape = drive->driver_data; |
2640 | struct ide_atapi_pc pc; | 1973 | struct ide_atapi_pc pc; |
2641 | unsigned long flags; | ||
2642 | int retval, count = 0; | 1974 | int retval, count = 0; |
2643 | int sprev = !!(tape->caps[4] & 0x20); | 1975 | int sprev = !!(tape->caps[4] & 0x20); |
2644 | 1976 | ||
@@ -2651,48 +1983,12 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op, | |||
2651 | } | 1983 | } |
2652 | 1984 | ||
2653 | if (tape->chrdev_dir == IDETAPE_DIR_READ) { | 1985 | if (tape->chrdev_dir == IDETAPE_DIR_READ) { |
2654 | /* its a read-ahead buffer, scan it for crossed filemarks. */ | 1986 | tape->merge_bh_size = 0; |
2655 | tape->merge_stage_size = 0; | ||
2656 | if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) | 1987 | if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) |
2657 | ++count; | 1988 | ++count; |
2658 | while (tape->first_stage != NULL) { | 1989 | ide_tape_discard_merge_buffer(drive, 0); |
2659 | if (count == mt_count) { | ||
2660 | if (mt_op == MTFSFM) | ||
2661 | set_bit(IDETAPE_FLAG_FILEMARK, | ||
2662 | &tape->flags); | ||
2663 | return 0; | ||
2664 | } | ||
2665 | spin_lock_irqsave(&tape->lock, flags); | ||
2666 | if (tape->first_stage == tape->active_stage) { | ||
2667 | /* | ||
2668 | * We have reached the active stage in the read | ||
2669 | * pipeline. There is no point in allowing the | ||
2670 | * drive to continue reading any farther, so we | ||
2671 | * stop the pipeline. | ||
2672 | * | ||
2673 | * This section should be moved to a separate | ||
2674 | * subroutine because similar operations are | ||
2675 | * done in __idetape_discard_read_pipeline(), | ||
2676 | * for example. | ||
2677 | */ | ||
2678 | tape->next_stage = NULL; | ||
2679 | spin_unlock_irqrestore(&tape->lock, flags); | ||
2680 | idetape_wait_first_stage(drive); | ||
2681 | tape->next_stage = tape->first_stage->next; | ||
2682 | } else | ||
2683 | spin_unlock_irqrestore(&tape->lock, flags); | ||
2684 | if (tape->first_stage->rq.errors == | ||
2685 | IDETAPE_ERROR_FILEMARK) | ||
2686 | ++count; | ||
2687 | idetape_remove_stage_head(drive); | ||
2688 | } | ||
2689 | idetape_discard_read_pipeline(drive, 0); | ||
2690 | } | 1990 | } |
2691 | 1991 | ||
2692 | /* | ||
2693 | * The filemark was not found in our internal pipeline; now we can issue | ||
2694 | * the space command. | ||
2695 | */ | ||
2696 | switch (mt_op) { | 1992 | switch (mt_op) { |
2697 | case MTFSF: | 1993 | case MTFSF: |
2698 | case MTBSF: | 1994 | case MTBSF: |
@@ -2748,27 +2044,25 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, | |||
2748 | (count % tape->blk_size) == 0) | 2044 | (count % tape->blk_size) == 0) |
2749 | tape->user_bs_factor = count / tape->blk_size; | 2045 | tape->user_bs_factor = count / tape->blk_size; |
2750 | } | 2046 | } |
2751 | rc = idetape_init_read(drive, tape->max_stages); | 2047 | rc = idetape_init_read(drive); |
2752 | if (rc < 0) | 2048 | if (rc < 0) |
2753 | return rc; | 2049 | return rc; |
2754 | if (count == 0) | 2050 | if (count == 0) |
2755 | return (0); | 2051 | return (0); |
2756 | if (tape->merge_stage_size) { | 2052 | if (tape->merge_bh_size) { |
2757 | actually_read = min((unsigned int)(tape->merge_stage_size), | 2053 | actually_read = min((unsigned int)(tape->merge_bh_size), |
2758 | (unsigned int)count); | 2054 | (unsigned int)count); |
2759 | if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, | 2055 | if (idetape_copy_stage_to_user(tape, buf, actually_read)) |
2760 | actually_read)) | ||
2761 | ret = -EFAULT; | 2056 | ret = -EFAULT; |
2762 | buf += actually_read; | 2057 | buf += actually_read; |
2763 | tape->merge_stage_size -= actually_read; | 2058 | tape->merge_bh_size -= actually_read; |
2764 | count -= actually_read; | 2059 | count -= actually_read; |
2765 | } | 2060 | } |
2766 | while (count >= tape->stage_size) { | 2061 | while (count >= tape->buffer_size) { |
2767 | bytes_read = idetape_add_chrdev_read_request(drive, ctl); | 2062 | bytes_read = idetape_add_chrdev_read_request(drive, ctl); |
2768 | if (bytes_read <= 0) | 2063 | if (bytes_read <= 0) |
2769 | goto finish; | 2064 | goto finish; |
2770 | if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, | 2065 | if (idetape_copy_stage_to_user(tape, buf, bytes_read)) |
2771 | bytes_read)) | ||
2772 | ret = -EFAULT; | 2066 | ret = -EFAULT; |
2773 | buf += bytes_read; | 2067 | buf += bytes_read; |
2774 | count -= bytes_read; | 2068 | count -= bytes_read; |
@@ -2779,11 +2073,10 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, | |||
2779 | if (bytes_read <= 0) | 2073 | if (bytes_read <= 0) |
2780 | goto finish; | 2074 | goto finish; |
2781 | temp = min((unsigned long)count, (unsigned long)bytes_read); | 2075 | temp = min((unsigned long)count, (unsigned long)bytes_read); |
2782 | if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, | 2076 | if (idetape_copy_stage_to_user(tape, buf, temp)) |
2783 | temp)) | ||
2784 | ret = -EFAULT; | 2077 | ret = -EFAULT; |
2785 | actually_read += temp; | 2078 | actually_read += temp; |
2786 | tape->merge_stage_size = bytes_read-temp; | 2079 | tape->merge_bh_size = bytes_read-temp; |
2787 | } | 2080 | } |
2788 | finish: | 2081 | finish: |
2789 | if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) { | 2082 | if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) { |
@@ -2814,17 +2107,17 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf, | |||
2814 | /* Initialize write operation */ | 2107 | /* Initialize write operation */ |
2815 | if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { | 2108 | if (tape->chrdev_dir != IDETAPE_DIR_WRITE) { |
2816 | if (tape->chrdev_dir == IDETAPE_DIR_READ) | 2109 | if (tape->chrdev_dir == IDETAPE_DIR_READ) |
2817 | idetape_discard_read_pipeline(drive, 1); | 2110 | ide_tape_discard_merge_buffer(drive, 1); |
2818 | if (tape->merge_stage || tape->merge_stage_size) { | 2111 | if (tape->merge_bh || tape->merge_bh_size) { |
2819 | printk(KERN_ERR "ide-tape: merge_stage_size " | 2112 | printk(KERN_ERR "ide-tape: merge_bh_size " |
2820 | "should be 0 now\n"); | 2113 | "should be 0 now\n"); |
2821 | tape->merge_stage_size = 0; | 2114 | tape->merge_bh_size = 0; |
2822 | } | 2115 | } |
2823 | tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0); | 2116 | tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0); |
2824 | if (!tape->merge_stage) | 2117 | if (!tape->merge_bh) |
2825 | return -ENOMEM; | 2118 | return -ENOMEM; |
2826 | tape->chrdev_dir = IDETAPE_DIR_WRITE; | 2119 | tape->chrdev_dir = IDETAPE_DIR_WRITE; |
2827 | idetape_init_merge_stage(tape); | 2120 | idetape_init_merge_buffer(tape); |
2828 | 2121 | ||
2829 | /* | 2122 | /* |
2830 | * Issue a write 0 command to ensure that DSC handshake is | 2123 | * Issue a write 0 command to ensure that DSC handshake is |
@@ -2835,10 +2128,10 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf, | |||
2835 | if (drive->dsc_overlap) { | 2128 | if (drive->dsc_overlap) { |
2836 | ssize_t retval = idetape_queue_rw_tail(drive, | 2129 | ssize_t retval = idetape_queue_rw_tail(drive, |
2837 | REQ_IDETAPE_WRITE, 0, | 2130 | REQ_IDETAPE_WRITE, 0, |
2838 | tape->merge_stage->bh); | 2131 | tape->merge_bh); |
2839 | if (retval < 0) { | 2132 | if (retval < 0) { |
2840 | __idetape_kfree_stage(tape->merge_stage); | 2133 | ide_tape_kfree_buffer(tape); |
2841 | tape->merge_stage = NULL; | 2134 | tape->merge_bh = NULL; |
2842 | tape->chrdev_dir = IDETAPE_DIR_NONE; | 2135 | tape->chrdev_dir = IDETAPE_DIR_NONE; |
2843 | return retval; | 2136 | return retval; |
2844 | } | 2137 | } |
@@ -2846,49 +2139,44 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf, | |||
2846 | } | 2139 | } |
2847 | if (count == 0) | 2140 | if (count == 0) |
2848 | return (0); | 2141 | return (0); |
2849 | if (tape->restart_speed_control_req) | 2142 | if (tape->merge_bh_size) { |
2850 | idetape_restart_speed_control(drive); | 2143 | if (tape->merge_bh_size >= tape->buffer_size) { |
2851 | if (tape->merge_stage_size) { | ||
2852 | if (tape->merge_stage_size >= tape->stage_size) { | ||
2853 | printk(KERN_ERR "ide-tape: bug: merge buf too big\n"); | 2144 | printk(KERN_ERR "ide-tape: bug: merge buf too big\n"); |
2854 | tape->merge_stage_size = 0; | 2145 | tape->merge_bh_size = 0; |
2855 | } | 2146 | } |
2856 | actually_written = min((unsigned int) | 2147 | actually_written = min((unsigned int) |
2857 | (tape->stage_size - tape->merge_stage_size), | 2148 | (tape->buffer_size - tape->merge_bh_size), |
2858 | (unsigned int)count); | 2149 | (unsigned int)count); |
2859 | if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, | 2150 | if (idetape_copy_stage_from_user(tape, buf, actually_written)) |
2860 | actually_written)) | ||
2861 | ret = -EFAULT; | 2151 | ret = -EFAULT; |
2862 | buf += actually_written; | 2152 | buf += actually_written; |
2863 | tape->merge_stage_size += actually_written; | 2153 | tape->merge_bh_size += actually_written; |
2864 | count -= actually_written; | 2154 | count -= actually_written; |
2865 | 2155 | ||
2866 | if (tape->merge_stage_size == tape->stage_size) { | 2156 | if (tape->merge_bh_size == tape->buffer_size) { |
2867 | ssize_t retval; | 2157 | ssize_t retval; |
2868 | tape->merge_stage_size = 0; | 2158 | tape->merge_bh_size = 0; |
2869 | retval = idetape_add_chrdev_write_request(drive, ctl); | 2159 | retval = idetape_add_chrdev_write_request(drive, ctl); |
2870 | if (retval <= 0) | 2160 | if (retval <= 0) |
2871 | return (retval); | 2161 | return (retval); |
2872 | } | 2162 | } |
2873 | } | 2163 | } |
2874 | while (count >= tape->stage_size) { | 2164 | while (count >= tape->buffer_size) { |
2875 | ssize_t retval; | 2165 | ssize_t retval; |
2876 | if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, | 2166 | if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size)) |
2877 | tape->stage_size)) | ||
2878 | ret = -EFAULT; | 2167 | ret = -EFAULT; |
2879 | buf += tape->stage_size; | 2168 | buf += tape->buffer_size; |
2880 | count -= tape->stage_size; | 2169 | count -= tape->buffer_size; |
2881 | retval = idetape_add_chrdev_write_request(drive, ctl); | 2170 | retval = idetape_add_chrdev_write_request(drive, ctl); |
2882 | actually_written += tape->stage_size; | 2171 | actually_written += tape->buffer_size; |
2883 | if (retval <= 0) | 2172 | if (retval <= 0) |
2884 | return (retval); | 2173 | return (retval); |
2885 | } | 2174 | } |
2886 | if (count) { | 2175 | if (count) { |
2887 | actually_written += count; | 2176 | actually_written += count; |
2888 | if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, | 2177 | if (idetape_copy_stage_from_user(tape, buf, count)) |
2889 | count)) | ||
2890 | ret = -EFAULT; | 2178 | ret = -EFAULT; |
2891 | tape->merge_stage_size += count; | 2179 | tape->merge_bh_size += count; |
2892 | } | 2180 | } |
2893 | return ret ? ret : actually_written; | 2181 | return ret ? ret : actually_written; |
2894 | } | 2182 | } |
@@ -2912,8 +2200,7 @@ static int idetape_write_filemark(ide_drive_t *drive) | |||
2912 | * | 2200 | * |
2913 | * Note: MTBSF and MTBSFM are not supported when the tape doesn't support | 2201 | * Note: MTBSF and MTBSFM are not supported when the tape doesn't support |
2914 | * spacing over filemarks in the reverse direction. In this case, MTFSFM is also | 2202 | * spacing over filemarks in the reverse direction. In this case, MTFSFM is also |
2915 | * usually not supported (it is supported in the rare case in which we crossed | 2203 | * usually not supported. |
2916 | * the filemark during our read-ahead pipelined operation mode). | ||
2917 | * | 2204 | * |
2918 | * The following commands are currently not supported: | 2205 | * The following commands are currently not supported: |
2919 | * | 2206 | * |
@@ -2929,7 +2216,6 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) | |||
2929 | debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n", | 2216 | debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n", |
2930 | mt_op, mt_count); | 2217 | mt_op, mt_count); |
2931 | 2218 | ||
2932 | /* Commands which need our pipelined read-ahead stages. */ | ||
2933 | switch (mt_op) { | 2219 | switch (mt_op) { |
2934 | case MTFSF: | 2220 | case MTFSF: |
2935 | case MTFSFM: | 2221 | case MTFSFM: |
@@ -2946,7 +2232,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) | |||
2946 | case MTWEOF: | 2232 | case MTWEOF: |
2947 | if (tape->write_prot) | 2233 | if (tape->write_prot) |
2948 | return -EACCES; | 2234 | return -EACCES; |
2949 | idetape_discard_read_pipeline(drive, 1); | 2235 | ide_tape_discard_merge_buffer(drive, 1); |
2950 | for (i = 0; i < mt_count; i++) { | 2236 | for (i = 0; i < mt_count; i++) { |
2951 | retval = idetape_write_filemark(drive); | 2237 | retval = idetape_write_filemark(drive); |
2952 | if (retval) | 2238 | if (retval) |
@@ -2954,12 +2240,12 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) | |||
2954 | } | 2240 | } |
2955 | return 0; | 2241 | return 0; |
2956 | case MTREW: | 2242 | case MTREW: |
2957 | idetape_discard_read_pipeline(drive, 0); | 2243 | ide_tape_discard_merge_buffer(drive, 0); |
2958 | if (idetape_rewind_tape(drive)) | 2244 | if (idetape_rewind_tape(drive)) |
2959 | return -EIO; | 2245 | return -EIO; |
2960 | return 0; | 2246 | return 0; |
2961 | case MTLOAD: | 2247 | case MTLOAD: |
2962 | idetape_discard_read_pipeline(drive, 0); | 2248 | ide_tape_discard_merge_buffer(drive, 0); |
2963 | idetape_create_load_unload_cmd(drive, &pc, | 2249 | idetape_create_load_unload_cmd(drive, &pc, |
2964 | IDETAPE_LU_LOAD_MASK); | 2250 | IDETAPE_LU_LOAD_MASK); |
2965 | return idetape_queue_pc_tail(drive, &pc); | 2251 | return idetape_queue_pc_tail(drive, &pc); |
@@ -2974,7 +2260,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) | |||
2974 | if (!idetape_queue_pc_tail(drive, &pc)) | 2260 | if (!idetape_queue_pc_tail(drive, &pc)) |
2975 | tape->door_locked = DOOR_UNLOCKED; | 2261 | tape->door_locked = DOOR_UNLOCKED; |
2976 | } | 2262 | } |
2977 | idetape_discard_read_pipeline(drive, 0); | 2263 | ide_tape_discard_merge_buffer(drive, 0); |
2978 | idetape_create_load_unload_cmd(drive, &pc, | 2264 | idetape_create_load_unload_cmd(drive, &pc, |
2979 | !IDETAPE_LU_LOAD_MASK); | 2265 | !IDETAPE_LU_LOAD_MASK); |
2980 | retval = idetape_queue_pc_tail(drive, &pc); | 2266 | retval = idetape_queue_pc_tail(drive, &pc); |
@@ -2982,10 +2268,10 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) | |||
2982 | clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); | 2268 | clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags); |
2983 | return retval; | 2269 | return retval; |
2984 | case MTNOP: | 2270 | case MTNOP: |
2985 | idetape_discard_read_pipeline(drive, 0); | 2271 | ide_tape_discard_merge_buffer(drive, 0); |
2986 | return idetape_flush_tape_buffers(drive); | 2272 | return idetape_flush_tape_buffers(drive); |
2987 | case MTRETEN: | 2273 | case MTRETEN: |
2988 | idetape_discard_read_pipeline(drive, 0); | 2274 | ide_tape_discard_merge_buffer(drive, 0); |
2989 | idetape_create_load_unload_cmd(drive, &pc, | 2275 | idetape_create_load_unload_cmd(drive, &pc, |
2990 | IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK); | 2276 | IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK); |
2991 | return idetape_queue_pc_tail(drive, &pc); | 2277 | return idetape_queue_pc_tail(drive, &pc); |
@@ -3007,11 +2293,11 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) | |||
3007 | set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); | 2293 | set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags); |
3008 | return 0; | 2294 | return 0; |
3009 | case MTSEEK: | 2295 | case MTSEEK: |
3010 | idetape_discard_read_pipeline(drive, 0); | 2296 | ide_tape_discard_merge_buffer(drive, 0); |
3011 | return idetape_position_tape(drive, | 2297 | return idetape_position_tape(drive, |
3012 | mt_count * tape->user_bs_factor, tape->partition, 0); | 2298 | mt_count * tape->user_bs_factor, tape->partition, 0); |
3013 | case MTSETPART: | 2299 | case MTSETPART: |
3014 | idetape_discard_read_pipeline(drive, 0); | 2300 | ide_tape_discard_merge_buffer(drive, 0); |
3015 | return idetape_position_tape(drive, 0, mt_count, 0); | 2301 | return idetape_position_tape(drive, 0, mt_count, 0); |
3016 | case MTFSR: | 2302 | case MTFSR: |
3017 | case MTBSR: | 2303 | case MTBSR: |
@@ -3056,13 +2342,12 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file, | |||
3056 | 2342 | ||
3057 | debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd); | 2343 | debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd); |
3058 | 2344 | ||
3059 | tape->restart_speed_control_req = 1; | ||
3060 | if (tape->chrdev_dir == IDETAPE_DIR_WRITE) { | 2345 | if (tape->chrdev_dir == IDETAPE_DIR_WRITE) { |
3061 | idetape_empty_write_pipeline(drive); | 2346 | ide_tape_flush_merge_buffer(drive); |
3062 | idetape_flush_tape_buffers(drive); | 2347 | idetape_flush_tape_buffers(drive); |
3063 | } | 2348 | } |
3064 | if (cmd == MTIOCGET || cmd == MTIOCPOS) { | 2349 | if (cmd == MTIOCGET || cmd == MTIOCPOS) { |
3065 | block_offset = idetape_pipeline_size(drive) / | 2350 | block_offset = tape->merge_bh_size / |
3066 | (tape->blk_size * tape->user_bs_factor); | 2351 | (tape->blk_size * tape->user_bs_factor); |
3067 | position = idetape_read_position(drive); | 2352 | position = idetape_read_position(drive); |
3068 | if (position < 0) | 2353 | if (position < 0) |
@@ -3094,7 +2379,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file, | |||
3094 | return 0; | 2379 | return 0; |
3095 | default: | 2380 | default: |
3096 | if (tape->chrdev_dir == IDETAPE_DIR_READ) | 2381 | if (tape->chrdev_dir == IDETAPE_DIR_READ) |
3097 | idetape_discard_read_pipeline(drive, 1); | 2382 | ide_tape_discard_merge_buffer(drive, 1); |
3098 | return idetape_blkdev_ioctl(drive, cmd, arg); | 2383 | return idetape_blkdev_ioctl(drive, cmd, arg); |
3099 | } | 2384 | } |
3100 | } | 2385 | } |
@@ -3168,9 +2453,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp) | |||
3168 | if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags)) | 2453 | if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags)) |
3169 | (void)idetape_rewind_tape(drive); | 2454 | (void)idetape_rewind_tape(drive); |
3170 | 2455 | ||
3171 | if (tape->chrdev_dir != IDETAPE_DIR_READ) | ||
3172 | clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags); | ||
3173 | |||
3174 | /* Read block size and write protect status from drive. */ | 2456 | /* Read block size and write protect status from drive. */ |
3175 | ide_tape_get_bsize_from_bdesc(drive); | 2457 | ide_tape_get_bsize_from_bdesc(drive); |
3176 | 2458 | ||
@@ -3199,8 +2481,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp) | |||
3199 | } | 2481 | } |
3200 | } | 2482 | } |
3201 | } | 2483 | } |
3202 | idetape_restart_speed_control(drive); | ||
3203 | tape->restart_speed_control_req = 0; | ||
3204 | return 0; | 2484 | return 0; |
3205 | 2485 | ||
3206 | out_put_tape: | 2486 | out_put_tape: |
@@ -3212,13 +2492,13 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor) | |||
3212 | { | 2492 | { |
3213 | idetape_tape_t *tape = drive->driver_data; | 2493 | idetape_tape_t *tape = drive->driver_data; |
3214 | 2494 | ||
3215 | idetape_empty_write_pipeline(drive); | 2495 | ide_tape_flush_merge_buffer(drive); |
3216 | tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0); | 2496 | tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0); |
3217 | if (tape->merge_stage != NULL) { | 2497 | if (tape->merge_bh != NULL) { |
3218 | idetape_pad_zeros(drive, tape->blk_size * | 2498 | idetape_pad_zeros(drive, tape->blk_size * |
3219 | (tape->user_bs_factor - 1)); | 2499 | (tape->user_bs_factor - 1)); |
3220 | __idetape_kfree_stage(tape->merge_stage); | 2500 | ide_tape_kfree_buffer(tape); |
3221 | tape->merge_stage = NULL; | 2501 | tape->merge_bh = NULL; |
3222 | } | 2502 | } |
3223 | idetape_write_filemark(drive); | 2503 | idetape_write_filemark(drive); |
3224 | idetape_flush_tape_buffers(drive); | 2504 | idetape_flush_tape_buffers(drive); |
@@ -3241,14 +2521,9 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp) | |||
3241 | idetape_write_release(drive, minor); | 2521 | idetape_write_release(drive, minor); |
3242 | if (tape->chrdev_dir == IDETAPE_DIR_READ) { | 2522 | if (tape->chrdev_dir == IDETAPE_DIR_READ) { |
3243 | if (minor < 128) | 2523 | if (minor < 128) |
3244 | idetape_discard_read_pipeline(drive, 1); | 2524 | ide_tape_discard_merge_buffer(drive, 1); |
3245 | else | ||
3246 | idetape_wait_for_pipeline(drive); | ||
3247 | } | ||
3248 | if (tape->cache_stage != NULL) { | ||
3249 | __idetape_kfree_stage(tape->cache_stage); | ||
3250 | tape->cache_stage = NULL; | ||
3251 | } | 2525 | } |
2526 | |||
3252 | if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags)) | 2527 | if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags)) |
3253 | (void) idetape_rewind_tape(drive); | 2528 | (void) idetape_rewind_tape(drive); |
3254 | if (tape->chrdev_dir == IDETAPE_DIR_NONE) { | 2529 | if (tape->chrdev_dir == IDETAPE_DIR_NONE) { |
@@ -3385,33 +2660,15 @@ static void idetape_add_settings(ide_drive_t *drive) | |||
3385 | 2660 | ||
3386 | ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff, | 2661 | ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff, |
3387 | 1, 2, (u16 *)&tape->caps[16], NULL); | 2662 | 1, 2, (u16 *)&tape->caps[16], NULL); |
3388 | ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff, | ||
3389 | tape->stage_size / 1024, 1, &tape->min_pipeline, NULL); | ||
3390 | ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff, | ||
3391 | tape->stage_size / 1024, 1, &tape->max_stages, NULL); | ||
3392 | ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff, | ||
3393 | tape->stage_size / 1024, 1, &tape->max_pipeline, NULL); | ||
3394 | ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0, | ||
3395 | 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages, | ||
3396 | NULL); | ||
3397 | ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0, | ||
3398 | 0xffff, tape->stage_size / 1024, 1, | ||
3399 | &tape->nr_pending_stages, NULL); | ||
3400 | ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff, | 2663 | ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff, |
3401 | 1, 1, (u16 *)&tape->caps[14], NULL); | 2664 | 1, 1, (u16 *)&tape->caps[14], NULL); |
3402 | ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1, | 2665 | ide_add_setting(drive, "buffer_size", SETTING_READ, TYPE_INT, 0, 0xffff, |
3403 | 1024, &tape->stage_size, NULL); | 2666 | 1, 1024, &tape->buffer_size, NULL); |
3404 | ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN, | 2667 | ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN, |
3405 | IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq, | 2668 | IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq, |
3406 | NULL); | 2669 | NULL); |
3407 | ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, | 2670 | ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, |
3408 | 1, &drive->dsc_overlap, NULL); | 2671 | 1, &drive->dsc_overlap, NULL); |
3409 | ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT, | ||
3410 | 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed, | ||
3411 | NULL); | ||
3412 | ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT, | ||
3413 | 0, 0xffff, 1, 1, | ||
3414 | &tape->uncontrolled_pipeline_head_speed, NULL); | ||
3415 | ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, | 2672 | ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, |
3416 | 1, 1, &tape->avg_speed, NULL); | 2673 | 1, 1, &tape->avg_speed, NULL); |
3417 | ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1, | 2674 | ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1, |
@@ -3434,11 +2691,10 @@ static inline void idetape_add_settings(ide_drive_t *drive) { ; } | |||
3434 | */ | 2691 | */ |
3435 | static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) | 2692 | static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) |
3436 | { | 2693 | { |
3437 | unsigned long t1, tmid, tn, t; | 2694 | unsigned long t; |
3438 | int speed; | 2695 | int speed; |
3439 | int stage_size; | 2696 | int buffer_size; |
3440 | u8 gcw[2]; | 2697 | u8 gcw[2]; |
3441 | struct sysinfo si; | ||
3442 | u16 *ctl = (u16 *)&tape->caps[12]; | 2698 | u16 *ctl = (u16 *)&tape->caps[12]; |
3443 | 2699 | ||
3444 | spin_lock_init(&tape->lock); | 2700 | spin_lock_init(&tape->lock); |
@@ -3457,65 +2713,33 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) | |||
3457 | tape->name[2] = '0' + minor; | 2713 | tape->name[2] = '0' + minor; |
3458 | tape->chrdev_dir = IDETAPE_DIR_NONE; | 2714 | tape->chrdev_dir = IDETAPE_DIR_NONE; |
3459 | tape->pc = tape->pc_stack; | 2715 | tape->pc = tape->pc_stack; |
3460 | tape->max_insert_speed = 10000; | ||
3461 | tape->speed_control = 1; | ||
3462 | *((unsigned short *) &gcw) = drive->id->config; | 2716 | *((unsigned short *) &gcw) = drive->id->config; |
3463 | 2717 | ||
3464 | /* Command packet DRQ type */ | 2718 | /* Command packet DRQ type */ |
3465 | if (((gcw[0] & 0x60) >> 5) == 1) | 2719 | if (((gcw[0] & 0x60) >> 5) == 1) |
3466 | set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags); | 2720 | set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags); |
3467 | 2721 | ||
3468 | tape->min_pipeline = 10; | ||
3469 | tape->max_pipeline = 10; | ||
3470 | tape->max_stages = 10; | ||
3471 | |||
3472 | idetape_get_inquiry_results(drive); | 2722 | idetape_get_inquiry_results(drive); |
3473 | idetape_get_mode_sense_results(drive); | 2723 | idetape_get_mode_sense_results(drive); |
3474 | ide_tape_get_bsize_from_bdesc(drive); | 2724 | ide_tape_get_bsize_from_bdesc(drive); |
3475 | tape->user_bs_factor = 1; | 2725 | tape->user_bs_factor = 1; |
3476 | tape->stage_size = *ctl * tape->blk_size; | 2726 | tape->buffer_size = *ctl * tape->blk_size; |
3477 | while (tape->stage_size > 0xffff) { | 2727 | while (tape->buffer_size > 0xffff) { |
3478 | printk(KERN_NOTICE "ide-tape: decreasing stage size\n"); | 2728 | printk(KERN_NOTICE "ide-tape: decreasing stage size\n"); |
3479 | *ctl /= 2; | 2729 | *ctl /= 2; |
3480 | tape->stage_size = *ctl * tape->blk_size; | 2730 | tape->buffer_size = *ctl * tape->blk_size; |
3481 | } | 2731 | } |
3482 | stage_size = tape->stage_size; | 2732 | buffer_size = tape->buffer_size; |
3483 | tape->pages_per_stage = stage_size / PAGE_SIZE; | 2733 | tape->pages_per_buffer = buffer_size / PAGE_SIZE; |
3484 | if (stage_size % PAGE_SIZE) { | 2734 | if (buffer_size % PAGE_SIZE) { |
3485 | tape->pages_per_stage++; | 2735 | tape->pages_per_buffer++; |
3486 | tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE; | 2736 | tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE; |
3487 | } | 2737 | } |
3488 | 2738 | ||
3489 | /* Select the "best" DSC read/write polling freq and pipeline size. */ | 2739 | /* select the "best" DSC read/write polling freq */ |
3490 | speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]); | 2740 | speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]); |
3491 | 2741 | ||
3492 | tape->max_stages = speed * 1000 * 10 / tape->stage_size; | 2742 | t = (IDETAPE_FIFO_THRESHOLD * tape->buffer_size * HZ) / (speed * 1000); |
3493 | |||
3494 | /* Limit memory use for pipeline to 10% of physical memory */ | ||
3495 | si_meminfo(&si); | ||
3496 | if (tape->max_stages * tape->stage_size > | ||
3497 | si.totalram * si.mem_unit / 10) | ||
3498 | tape->max_stages = | ||
3499 | si.totalram * si.mem_unit / (10 * tape->stage_size); | ||
3500 | |||
3501 | tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES); | ||
3502 | tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES); | ||
3503 | tape->max_pipeline = | ||
3504 | min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES); | ||
3505 | if (tape->max_stages == 0) { | ||
3506 | tape->max_stages = 1; | ||
3507 | tape->min_pipeline = 1; | ||
3508 | tape->max_pipeline = 1; | ||
3509 | } | ||
3510 | |||
3511 | t1 = (tape->stage_size * HZ) / (speed * 1000); | ||
3512 | tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125); | ||
3513 | tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000); | ||
3514 | |||
3515 | if (tape->max_stages) | ||
3516 | t = tn; | ||
3517 | else | ||
3518 | t = t1; | ||
3519 | 2743 | ||
3520 | /* | 2744 | /* |
3521 | * Ensure that the number we got makes sense; limit it within | 2745 | * Ensure that the number we got makes sense; limit it within |
@@ -3525,11 +2749,10 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor) | |||
3525 | min_t(unsigned long, t, IDETAPE_DSC_RW_MAX), | 2749 | min_t(unsigned long, t, IDETAPE_DSC_RW_MAX), |
3526 | IDETAPE_DSC_RW_MIN); | 2750 | IDETAPE_DSC_RW_MIN); |
3527 | printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " | 2751 | printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, " |
3528 | "%dkB pipeline, %lums tDSC%s\n", | 2752 | "%lums tDSC%s\n", |
3529 | drive->name, tape->name, *(u16 *)&tape->caps[14], | 2753 | drive->name, tape->name, *(u16 *)&tape->caps[14], |
3530 | (*(u16 *)&tape->caps[16] * 512) / tape->stage_size, | 2754 | (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size, |
3531 | tape->stage_size / 1024, | 2755 | tape->buffer_size / 1024, |
3532 | tape->max_stages * tape->stage_size / 1024, | ||
3533 | tape->best_dsc_rw_freq * 1000 / HZ, | 2756 | tape->best_dsc_rw_freq * 1000 / HZ, |
3534 | drive->using_dma ? ", DMA":""); | 2757 | drive->using_dma ? ", DMA":""); |
3535 | 2758 | ||
@@ -3553,7 +2776,7 @@ static void ide_tape_release(struct kref *kref) | |||
3553 | ide_drive_t *drive = tape->drive; | 2776 | ide_drive_t *drive = tape->drive; |
3554 | struct gendisk *g = tape->disk; | 2777 | struct gendisk *g = tape->disk; |
3555 | 2778 | ||
3556 | BUG_ON(tape->first_stage != NULL || tape->merge_stage_size); | 2779 | BUG_ON(tape->merge_bh_size); |
3557 | 2780 | ||
3558 | drive->dsc_overlap = 0; | 2781 | drive->dsc_overlap = 0; |
3559 | drive->driver_data = NULL; | 2782 | drive->driver_data = NULL; |
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index a317ca9c46e5..9f9ad9fb6b89 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c | |||
@@ -36,6 +36,7 @@ | |||
36 | void ide_tf_load(ide_drive_t *drive, ide_task_t *task) | 36 | void ide_tf_load(ide_drive_t *drive, ide_task_t *task) |
37 | { | 37 | { |
38 | ide_hwif_t *hwif = drive->hwif; | 38 | ide_hwif_t *hwif = drive->hwif; |
39 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
39 | struct ide_taskfile *tf = &task->tf; | 40 | struct ide_taskfile *tf = &task->tf; |
40 | u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; | 41 | u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; |
41 | 42 | ||
@@ -59,34 +60,33 @@ void ide_tf_load(ide_drive_t *drive, ide_task_t *task) | |||
59 | SELECT_MASK(drive, 0); | 60 | SELECT_MASK(drive, 0); |
60 | 61 | ||
61 | if (task->tf_flags & IDE_TFLAG_OUT_DATA) | 62 | if (task->tf_flags & IDE_TFLAG_OUT_DATA) |
62 | hwif->OUTW((tf->hob_data << 8) | tf->data, | 63 | hwif->OUTW((tf->hob_data << 8) | tf->data, io_ports->data_addr); |
63 | hwif->io_ports[IDE_DATA_OFFSET]); | ||
64 | 64 | ||
65 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) | 65 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) |
66 | hwif->OUTB(tf->hob_feature, hwif->io_ports[IDE_FEATURE_OFFSET]); | 66 | hwif->OUTB(tf->hob_feature, io_ports->feature_addr); |
67 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) | 67 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) |
68 | hwif->OUTB(tf->hob_nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]); | 68 | hwif->OUTB(tf->hob_nsect, io_ports->nsect_addr); |
69 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) | 69 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) |
70 | hwif->OUTB(tf->hob_lbal, hwif->io_ports[IDE_SECTOR_OFFSET]); | 70 | hwif->OUTB(tf->hob_lbal, io_ports->lbal_addr); |
71 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) | 71 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) |
72 | hwif->OUTB(tf->hob_lbam, hwif->io_ports[IDE_LCYL_OFFSET]); | 72 | hwif->OUTB(tf->hob_lbam, io_ports->lbam_addr); |
73 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) | 73 | if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) |
74 | hwif->OUTB(tf->hob_lbah, hwif->io_ports[IDE_HCYL_OFFSET]); | 74 | hwif->OUTB(tf->hob_lbah, io_ports->lbah_addr); |
75 | 75 | ||
76 | if (task->tf_flags & IDE_TFLAG_OUT_FEATURE) | 76 | if (task->tf_flags & IDE_TFLAG_OUT_FEATURE) |
77 | hwif->OUTB(tf->feature, hwif->io_ports[IDE_FEATURE_OFFSET]); | 77 | hwif->OUTB(tf->feature, io_ports->feature_addr); |
78 | if (task->tf_flags & IDE_TFLAG_OUT_NSECT) | 78 | if (task->tf_flags & IDE_TFLAG_OUT_NSECT) |
79 | hwif->OUTB(tf->nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]); | 79 | hwif->OUTB(tf->nsect, io_ports->nsect_addr); |
80 | if (task->tf_flags & IDE_TFLAG_OUT_LBAL) | 80 | if (task->tf_flags & IDE_TFLAG_OUT_LBAL) |
81 | hwif->OUTB(tf->lbal, hwif->io_ports[IDE_SECTOR_OFFSET]); | 81 | hwif->OUTB(tf->lbal, io_ports->lbal_addr); |
82 | if (task->tf_flags & IDE_TFLAG_OUT_LBAM) | 82 | if (task->tf_flags & IDE_TFLAG_OUT_LBAM) |
83 | hwif->OUTB(tf->lbam, hwif->io_ports[IDE_LCYL_OFFSET]); | 83 | hwif->OUTB(tf->lbam, io_ports->lbam_addr); |
84 | if (task->tf_flags & IDE_TFLAG_OUT_LBAH) | 84 | if (task->tf_flags & IDE_TFLAG_OUT_LBAH) |
85 | hwif->OUTB(tf->lbah, hwif->io_ports[IDE_HCYL_OFFSET]); | 85 | hwif->OUTB(tf->lbah, io_ports->lbah_addr); |
86 | 86 | ||
87 | if (task->tf_flags & IDE_TFLAG_OUT_DEVICE) | 87 | if (task->tf_flags & IDE_TFLAG_OUT_DEVICE) |
88 | hwif->OUTB((tf->device & HIHI) | drive->select.all, | 88 | hwif->OUTB((tf->device & HIHI) | drive->select.all, |
89 | hwif->io_ports[IDE_SELECT_OFFSET]); | 89 | io_ports->device_addr); |
90 | } | 90 | } |
91 | 91 | ||
92 | int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) | 92 | int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) |
@@ -155,8 +155,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task) | |||
155 | switch (task->data_phase) { | 155 | switch (task->data_phase) { |
156 | case TASKFILE_MULTI_OUT: | 156 | case TASKFILE_MULTI_OUT: |
157 | case TASKFILE_OUT: | 157 | case TASKFILE_OUT: |
158 | hwif->OUTBSYNC(drive, tf->command, | 158 | hwif->OUTBSYNC(drive, tf->command, hwif->io_ports.command_addr); |
159 | hwif->io_ports[IDE_COMMAND_OFFSET]); | ||
160 | ndelay(400); /* FIXME */ | 159 | ndelay(400); /* FIXME */ |
161 | return pre_task_out_intr(drive, task->rq); | 160 | return pre_task_out_intr(drive, task->rq); |
162 | case TASKFILE_MULTI_IN: | 161 | case TASKFILE_MULTI_IN: |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index bced02f9f2c3..999584c03d97 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -94,12 +94,6 @@ DEFINE_MUTEX(ide_cfg_mtx); | |||
94 | 94 | ||
95 | int noautodma = 0; | 95 | int noautodma = 0; |
96 | 96 | ||
97 | #ifdef CONFIG_BLK_DEV_IDEACPI | ||
98 | int ide_noacpi = 0; | ||
99 | int ide_noacpitfs = 1; | ||
100 | int ide_noacpionboot = 1; | ||
101 | #endif | ||
102 | |||
103 | ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */ | 97 | ide_hwif_t ide_hwifs[MAX_HWIFS]; /* master data repository */ |
104 | 98 | ||
105 | static void ide_port_init_devices_data(ide_hwif_t *); | 99 | static void ide_port_init_devices_data(ide_hwif_t *); |
@@ -293,7 +287,7 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices); | |||
293 | 287 | ||
294 | /** | 288 | /** |
295 | * ide_unregister - free an IDE interface | 289 | * ide_unregister - free an IDE interface |
296 | * @index: index of interface (will change soon to a pointer) | 290 | * @hwif: IDE interface |
297 | * | 291 | * |
298 | * Perform the final unregister of an IDE interface. At the moment | 292 | * Perform the final unregister of an IDE interface. At the moment |
299 | * we don't refcount interfaces so this will also get split up. | 293 | * we don't refcount interfaces so this will also get split up. |
@@ -313,19 +307,16 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices); | |||
313 | * This is raving bonkers. | 307 | * This is raving bonkers. |
314 | */ | 308 | */ |
315 | 309 | ||
316 | void ide_unregister(unsigned int index) | 310 | void ide_unregister(ide_hwif_t *hwif) |
317 | { | 311 | { |
318 | ide_hwif_t *hwif, *g; | 312 | ide_hwif_t *g; |
319 | ide_hwgroup_t *hwgroup; | 313 | ide_hwgroup_t *hwgroup; |
320 | int irq_count = 0; | 314 | int irq_count = 0; |
321 | 315 | ||
322 | BUG_ON(index >= MAX_HWIFS); | ||
323 | |||
324 | BUG_ON(in_interrupt()); | 316 | BUG_ON(in_interrupt()); |
325 | BUG_ON(irqs_disabled()); | 317 | BUG_ON(irqs_disabled()); |
326 | mutex_lock(&ide_cfg_mtx); | 318 | mutex_lock(&ide_cfg_mtx); |
327 | spin_lock_irq(&ide_lock); | 319 | spin_lock_irq(&ide_lock); |
328 | hwif = &ide_hwifs[index]; | ||
329 | if (!hwif->present) | 320 | if (!hwif->present) |
330 | goto abort; | 321 | goto abort; |
331 | __ide_port_unregister_devices(hwif); | 322 | __ide_port_unregister_devices(hwif); |
@@ -366,7 +357,7 @@ void ide_unregister(unsigned int index) | |||
366 | ide_release_dma_engine(hwif); | 357 | ide_release_dma_engine(hwif); |
367 | 358 | ||
368 | /* restore hwif data to pristine status */ | 359 | /* restore hwif data to pristine status */ |
369 | ide_init_port_data(hwif, index); | 360 | ide_init_port_data(hwif, hwif->index); |
370 | 361 | ||
371 | abort: | 362 | abort: |
372 | spin_unlock_irq(&ide_lock); | 363 | spin_unlock_irq(&ide_lock); |
@@ -377,7 +368,7 @@ EXPORT_SYMBOL(ide_unregister); | |||
377 | 368 | ||
378 | void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) | 369 | void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) |
379 | { | 370 | { |
380 | memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports)); | 371 | memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); |
381 | hwif->irq = hw->irq; | 372 | hwif->irq = hw->irq; |
382 | hwif->chipset = hw->chipset; | 373 | hwif->chipset = hw->chipset; |
383 | hwif->gendev.parent = hw->dev; | 374 | hwif->gendev.parent = hw->dev; |
@@ -837,16 +828,6 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m | |||
837 | return 0; /* zero = nothing matched */ | 828 | return 0; /* zero = nothing matched */ |
838 | } | 829 | } |
839 | 830 | ||
840 | extern int probe_ali14xx; | ||
841 | extern int probe_umc8672; | ||
842 | extern int probe_dtc2278; | ||
843 | extern int probe_ht6560b; | ||
844 | extern int probe_qd65xx; | ||
845 | extern int cmd640_vlb; | ||
846 | extern int probe_4drives; | ||
847 | |||
848 | static int __initdata is_chipset_set; | ||
849 | |||
850 | /* | 831 | /* |
851 | * ide_setup() gets called VERY EARLY during initialization, | 832 | * ide_setup() gets called VERY EARLY during initialization, |
852 | * to handle kernel "command line" strings beginning with "hdx=" or "ide". | 833 | * to handle kernel "command line" strings beginning with "hdx=" or "ide". |
@@ -855,14 +836,12 @@ static int __initdata is_chipset_set; | |||
855 | */ | 836 | */ |
856 | static int __init ide_setup(char *s) | 837 | static int __init ide_setup(char *s) |
857 | { | 838 | { |
858 | int i, vals[3]; | ||
859 | ide_hwif_t *hwif; | 839 | ide_hwif_t *hwif; |
860 | ide_drive_t *drive; | 840 | ide_drive_t *drive; |
861 | unsigned int hw, unit; | 841 | unsigned int hw, unit; |
842 | int vals[3]; | ||
862 | const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1); | 843 | const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1); |
863 | const char max_hwif = '0' + (MAX_HWIFS - 1); | ||
864 | 844 | ||
865 | |||
866 | if (strncmp(s,"hd",2) == 0 && s[2] == '=') /* hd= is for hd.c */ | 845 | if (strncmp(s,"hd",2) == 0 && s[2] == '=') /* hd= is for hd.c */ |
867 | return 0; /* driver and not us */ | 846 | return 0; /* driver and not us */ |
868 | 847 | ||
@@ -878,7 +857,7 @@ static int __init ide_setup(char *s) | |||
878 | 857 | ||
879 | printk(" : Enabled support for IDE doublers\n"); | 858 | printk(" : Enabled support for IDE doublers\n"); |
880 | ide_doubler = 1; | 859 | ide_doubler = 1; |
881 | return 1; | 860 | goto obsolete_option; |
882 | } | 861 | } |
883 | #endif /* CONFIG_BLK_DEV_IDEDOUBLER */ | 862 | #endif /* CONFIG_BLK_DEV_IDEDOUBLER */ |
884 | 863 | ||
@@ -892,17 +871,17 @@ static int __init ide_setup(char *s) | |||
892 | if (!strcmp(s, "ide=noacpi")) { | 871 | if (!strcmp(s, "ide=noacpi")) { |
893 | //printk(" : Disable IDE ACPI support.\n"); | 872 | //printk(" : Disable IDE ACPI support.\n"); |
894 | ide_noacpi = 1; | 873 | ide_noacpi = 1; |
895 | return 1; | 874 | goto obsolete_option; |
896 | } | 875 | } |
897 | if (!strcmp(s, "ide=acpigtf")) { | 876 | if (!strcmp(s, "ide=acpigtf")) { |
898 | //printk(" : Enable IDE ACPI _GTF support.\n"); | 877 | //printk(" : Enable IDE ACPI _GTF support.\n"); |
899 | ide_noacpitfs = 0; | 878 | ide_acpigtf = 1; |
900 | return 1; | 879 | goto obsolete_option; |
901 | } | 880 | } |
902 | if (!strcmp(s, "ide=acpionboot")) { | 881 | if (!strcmp(s, "ide=acpionboot")) { |
903 | //printk(" : Call IDE ACPI methods on boot.\n"); | 882 | //printk(" : Call IDE ACPI methods on boot.\n"); |
904 | ide_noacpionboot = 0; | 883 | ide_acpionboot = 1; |
905 | return 1; | 884 | goto obsolete_option; |
906 | } | 885 | } |
907 | #endif /* CONFIG_BLK_DEV_IDEACPI */ | 886 | #endif /* CONFIG_BLK_DEV_IDEACPI */ |
908 | 887 | ||
@@ -912,7 +891,7 @@ static int __init ide_setup(char *s) | |||
912 | if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) { | 891 | if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) { |
913 | const char *hd_words[] = { | 892 | const char *hd_words[] = { |
914 | "none", "noprobe", "nowerr", "cdrom", "nodma", | 893 | "none", "noprobe", "nowerr", "cdrom", "nodma", |
915 | "autotune", "noautotune", "-8", "-9", "-10", | 894 | "-6", "-7", "-8", "-9", "-10", |
916 | "noflush", "remap", "remap63", "scsi", NULL }; | 895 | "noflush", "remap", "remap63", "scsi", NULL }; |
917 | unit = s[2] - 'a'; | 896 | unit = s[2] - 'a'; |
918 | hw = unit / MAX_DRIVES; | 897 | hw = unit / MAX_DRIVES; |
@@ -927,28 +906,22 @@ static int __init ide_setup(char *s) | |||
927 | case -1: /* "none" */ | 906 | case -1: /* "none" */ |
928 | case -2: /* "noprobe" */ | 907 | case -2: /* "noprobe" */ |
929 | drive->noprobe = 1; | 908 | drive->noprobe = 1; |
930 | goto done; | 909 | goto obsolete_option; |
931 | case -3: /* "nowerr" */ | 910 | case -3: /* "nowerr" */ |
932 | drive->bad_wstat = BAD_R_STAT; | 911 | drive->bad_wstat = BAD_R_STAT; |
933 | goto done; | 912 | goto obsolete_option; |
934 | case -4: /* "cdrom" */ | 913 | case -4: /* "cdrom" */ |
935 | drive->present = 1; | 914 | drive->present = 1; |
936 | drive->media = ide_cdrom; | 915 | drive->media = ide_cdrom; |
937 | /* an ATAPI device ignores DRDY */ | 916 | /* an ATAPI device ignores DRDY */ |
938 | drive->ready_stat = 0; | 917 | drive->ready_stat = 0; |
939 | goto done; | 918 | goto obsolete_option; |
940 | case -5: /* nodma */ | 919 | case -5: /* nodma */ |
941 | drive->nodma = 1; | 920 | drive->nodma = 1; |
942 | goto done; | ||
943 | case -6: /* "autotune" */ | ||
944 | drive->autotune = IDE_TUNE_AUTO; | ||
945 | goto obsolete_option; | ||
946 | case -7: /* "noautotune" */ | ||
947 | drive->autotune = IDE_TUNE_NOAUTO; | ||
948 | goto obsolete_option; | 921 | goto obsolete_option; |
949 | case -11: /* noflush */ | 922 | case -11: /* noflush */ |
950 | drive->noflush = 1; | 923 | drive->noflush = 1; |
951 | goto done; | 924 | goto obsolete_option; |
952 | case -12: /* "remap" */ | 925 | case -12: /* "remap" */ |
953 | drive->remap_0_to_1 = 1; | 926 | drive->remap_0_to_1 = 1; |
954 | goto obsolete_option; | 927 | goto obsolete_option; |
@@ -966,7 +939,7 @@ static int __init ide_setup(char *s) | |||
966 | drive->sect = drive->bios_sect = vals[2]; | 939 | drive->sect = drive->bios_sect = vals[2]; |
967 | drive->present = 1; | 940 | drive->present = 1; |
968 | drive->forced_geom = 1; | 941 | drive->forced_geom = 1; |
969 | goto done; | 942 | goto obsolete_option; |
970 | default: | 943 | default: |
971 | goto bad_option; | 944 | goto bad_option; |
972 | } | 945 | } |
@@ -984,126 +957,15 @@ static int __init ide_setup(char *s) | |||
984 | idebus_parameter = vals[0]; | 957 | idebus_parameter = vals[0]; |
985 | } else | 958 | } else |
986 | printk(" -- BAD BUS SPEED! Expected value from 20 to 66"); | 959 | printk(" -- BAD BUS SPEED! Expected value from 20 to 66"); |
987 | goto done; | 960 | goto obsolete_option; |
988 | } | 961 | } |
989 | /* | ||
990 | * Look for interface options: "idex=" | ||
991 | */ | ||
992 | if (s[3] >= '0' && s[3] <= max_hwif) { | ||
993 | /* | ||
994 | * Be VERY CAREFUL changing this: note hardcoded indexes below | ||
995 | * (-8, -9, -10) are reserved to ease the hardcoding. | ||
996 | */ | ||
997 | static const char *ide_words[] = { | ||
998 | "minus1", "serialize", "minus3", "minus4", | ||
999 | "reset", "minus6", "ata66", "minus8", "minus9", | ||
1000 | "minus10", "four", "qd65xx", "ht6560b", "cmd640_vlb", | ||
1001 | "dtc2278", "umc8672", "ali14xx", NULL }; | ||
1002 | |||
1003 | hw = s[3] - '0'; | ||
1004 | hwif = &ide_hwifs[hw]; | ||
1005 | i = match_parm(&s[4], ide_words, vals, 3); | ||
1006 | |||
1007 | /* | ||
1008 | * Cryptic check to ensure chipset not already set for hwif. | ||
1009 | * Note: we can't depend on hwif->chipset here. | ||
1010 | */ | ||
1011 | if (i >= -18 && i <= -11) { | ||
1012 | /* chipset already specified */ | ||
1013 | if (is_chipset_set) | ||
1014 | goto bad_option; | ||
1015 | /* these drivers are for "ide0=" only */ | ||
1016 | if (hw != 0) | ||
1017 | goto bad_hwif; | ||
1018 | is_chipset_set = 1; | ||
1019 | printk("\n"); | ||
1020 | } | ||
1021 | |||
1022 | switch (i) { | ||
1023 | #ifdef CONFIG_BLK_DEV_ALI14XX | ||
1024 | case -17: /* "ali14xx" */ | ||
1025 | probe_ali14xx = 1; | ||
1026 | goto obsolete_option; | ||
1027 | #endif | ||
1028 | #ifdef CONFIG_BLK_DEV_UMC8672 | ||
1029 | case -16: /* "umc8672" */ | ||
1030 | probe_umc8672 = 1; | ||
1031 | goto obsolete_option; | ||
1032 | #endif | ||
1033 | #ifdef CONFIG_BLK_DEV_DTC2278 | ||
1034 | case -15: /* "dtc2278" */ | ||
1035 | probe_dtc2278 = 1; | ||
1036 | goto obsolete_option; | ||
1037 | #endif | ||
1038 | #ifdef CONFIG_BLK_DEV_CMD640 | ||
1039 | case -14: /* "cmd640_vlb" */ | ||
1040 | cmd640_vlb = 1; | ||
1041 | goto obsolete_option; | ||
1042 | #endif | ||
1043 | #ifdef CONFIG_BLK_DEV_HT6560B | ||
1044 | case -13: /* "ht6560b" */ | ||
1045 | probe_ht6560b = 1; | ||
1046 | goto obsolete_option; | ||
1047 | #endif | ||
1048 | #ifdef CONFIG_BLK_DEV_QD65XX | ||
1049 | case -12: /* "qd65xx" */ | ||
1050 | probe_qd65xx = 1; | ||
1051 | goto obsolete_option; | ||
1052 | #endif | ||
1053 | #ifdef CONFIG_BLK_DEV_4DRIVES | ||
1054 | case -11: /* "four" drives on one set of ports */ | ||
1055 | probe_4drives = 1; | ||
1056 | goto obsolete_option; | ||
1057 | #endif | ||
1058 | case -10: /* minus10 */ | ||
1059 | case -9: /* minus9 */ | ||
1060 | case -8: /* minus8 */ | ||
1061 | case -6: | ||
1062 | case -4: | ||
1063 | case -3: | ||
1064 | goto bad_option; | ||
1065 | case -7: /* ata66 */ | ||
1066 | #ifdef CONFIG_BLK_DEV_IDEPCI | ||
1067 | /* | ||
1068 | * Use ATA_CBL_PATA40_SHORT so drive side | ||
1069 | * cable detection is also overriden. | ||
1070 | */ | ||
1071 | hwif->cbl = ATA_CBL_PATA40_SHORT; | ||
1072 | goto obsolete_option; | ||
1073 | #else | ||
1074 | goto bad_hwif; | ||
1075 | #endif | ||
1076 | case -5: /* "reset" */ | ||
1077 | hwif->reset = 1; | ||
1078 | goto obsolete_option; | ||
1079 | case -2: /* "serialize" */ | ||
1080 | hwif->mate = &ide_hwifs[hw^1]; | ||
1081 | hwif->mate->mate = hwif; | ||
1082 | hwif->serialized = hwif->mate->serialized = 1; | ||
1083 | goto obsolete_option; | ||
1084 | 962 | ||
1085 | case -1: | ||
1086 | case 0: | ||
1087 | case 1: | ||
1088 | case 2: | ||
1089 | case 3: | ||
1090 | goto bad_option; | ||
1091 | default: | ||
1092 | printk(" -- SUPPORT NOT CONFIGURED IN THIS KERNEL\n"); | ||
1093 | return 1; | ||
1094 | } | ||
1095 | } | ||
1096 | bad_option: | 963 | bad_option: |
1097 | printk(" -- BAD OPTION\n"); | 964 | printk(" -- BAD OPTION\n"); |
1098 | return 1; | 965 | return 1; |
1099 | obsolete_option: | 966 | obsolete_option: |
1100 | printk(" -- OBSOLETE OPTION, WILL BE REMOVED SOON!\n"); | 967 | printk(" -- OBSOLETE OPTION, WILL BE REMOVED SOON!\n"); |
1101 | return 1; | 968 | return 1; |
1102 | bad_hwif: | ||
1103 | printk("-- NOT SUPPORTED ON ide%d", hw); | ||
1104 | done: | ||
1105 | printk("\n"); | ||
1106 | return 1; | ||
1107 | } | 969 | } |
1108 | 970 | ||
1109 | EXPORT_SYMBOL(ide_lock); | 971 | EXPORT_SYMBOL(ide_lock); |
@@ -1239,6 +1101,185 @@ static void ide_port_class_release(struct device *portdev) | |||
1239 | put_device(&hwif->gendev); | 1101 | put_device(&hwif->gendev); |
1240 | } | 1102 | } |
1241 | 1103 | ||
1104 | int ide_vlb_clk; | ||
1105 | EXPORT_SYMBOL_GPL(ide_vlb_clk); | ||
1106 | |||
1107 | module_param_named(vlb_clock, ide_vlb_clk, int, 0); | ||
1108 | MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)"); | ||
1109 | |||
1110 | int ide_pci_clk; | ||
1111 | EXPORT_SYMBOL_GPL(ide_pci_clk); | ||
1112 | |||
1113 | module_param_named(pci_clock, ide_pci_clk, int, 0); | ||
1114 | MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)"); | ||
1115 | |||
1116 | static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp) | ||
1117 | { | ||
1118 | int a, b, i, j = 1; | ||
1119 | unsigned int *dev_param_mask = (unsigned int *)kp->arg; | ||
1120 | |||
1121 | if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 && | ||
1122 | sscanf(s, "%d.%d", &a, &b) != 2) | ||
1123 | return -EINVAL; | ||
1124 | |||
1125 | i = a * MAX_DRIVES + b; | ||
1126 | |||
1127 | if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1) | ||
1128 | return -EINVAL; | ||
1129 | |||
1130 | if (j) | ||
1131 | *dev_param_mask |= (1 << i); | ||
1132 | else | ||
1133 | *dev_param_mask &= (1 << i); | ||
1134 | |||
1135 | return 0; | ||
1136 | } | ||
1137 | |||
1138 | static unsigned int ide_nodma; | ||
1139 | |||
1140 | module_param_call(nodma, ide_set_dev_param_mask, NULL, &ide_nodma, 0); | ||
1141 | MODULE_PARM_DESC(nodma, "disallow DMA for a device"); | ||
1142 | |||
1143 | static unsigned int ide_noflush; | ||
1144 | |||
1145 | module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0); | ||
1146 | MODULE_PARM_DESC(noflush, "disable flush requests for a device"); | ||
1147 | |||
1148 | static unsigned int ide_noprobe; | ||
1149 | |||
1150 | module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0); | ||
1151 | MODULE_PARM_DESC(noprobe, "skip probing for a device"); | ||
1152 | |||
1153 | static unsigned int ide_nowerr; | ||
1154 | |||
1155 | module_param_call(nowerr, ide_set_dev_param_mask, NULL, &ide_nowerr, 0); | ||
1156 | MODULE_PARM_DESC(nowerr, "ignore the WRERR_STAT bit for a device"); | ||
1157 | |||
1158 | static unsigned int ide_cdroms; | ||
1159 | |||
1160 | module_param_call(cdrom, ide_set_dev_param_mask, NULL, &ide_cdroms, 0); | ||
1161 | MODULE_PARM_DESC(cdrom, "force device as a CD-ROM"); | ||
1162 | |||
1163 | struct chs_geom { | ||
1164 | unsigned int cyl; | ||
1165 | u8 head; | ||
1166 | u8 sect; | ||
1167 | }; | ||
1168 | |||
1169 | static unsigned int ide_disks; | ||
1170 | static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES]; | ||
1171 | |||
1172 | static int ide_set_disk_chs(const char *str, struct kernel_param *kp) | ||
1173 | { | ||
1174 | int a, b, c = 0, h = 0, s = 0, i, j = 1; | ||
1175 | |||
1176 | if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 && | ||
1177 | sscanf(str, "%d.%d:%d", &a, &b, &j) != 3) | ||
1178 | return -EINVAL; | ||
1179 | |||
1180 | i = a * MAX_DRIVES + b; | ||
1181 | |||
1182 | if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1) | ||
1183 | return -EINVAL; | ||
1184 | |||
1185 | if (c > INT_MAX || h > 255 || s > 255) | ||
1186 | return -EINVAL; | ||
1187 | |||
1188 | if (j) | ||
1189 | ide_disks |= (1 << i); | ||
1190 | else | ||
1191 | ide_disks &= (1 << i); | ||
1192 | |||
1193 | ide_disks_chs[i].cyl = c; | ||
1194 | ide_disks_chs[i].head = h; | ||
1195 | ide_disks_chs[i].sect = s; | ||
1196 | |||
1197 | return 0; | ||
1198 | } | ||
1199 | |||
1200 | module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0); | ||
1201 | MODULE_PARM_DESC(chs, "force device as a disk (using CHS)"); | ||
1202 | |||
1203 | static void ide_dev_apply_params(ide_drive_t *drive) | ||
1204 | { | ||
1205 | int i = drive->hwif->index * MAX_DRIVES + drive->select.b.unit; | ||
1206 | |||
1207 | if (ide_nodma & (1 << i)) { | ||
1208 | printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name); | ||
1209 | drive->nodma = 1; | ||
1210 | } | ||
1211 | if (ide_noflush & (1 << i)) { | ||
1212 | printk(KERN_INFO "ide: disabling flush requests for %s\n", | ||
1213 | drive->name); | ||
1214 | drive->noflush = 1; | ||
1215 | } | ||
1216 | if (ide_noprobe & (1 << i)) { | ||
1217 | printk(KERN_INFO "ide: skipping probe for %s\n", drive->name); | ||
1218 | drive->noprobe = 1; | ||
1219 | } | ||
1220 | if (ide_nowerr & (1 << i)) { | ||
1221 | printk(KERN_INFO "ide: ignoring the WRERR_STAT bit for %s\n", | ||
1222 | drive->name); | ||
1223 | drive->bad_wstat = BAD_R_STAT; | ||
1224 | } | ||
1225 | if (ide_cdroms & (1 << i)) { | ||
1226 | printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name); | ||
1227 | drive->present = 1; | ||
1228 | drive->media = ide_cdrom; | ||
1229 | /* an ATAPI device ignores DRDY */ | ||
1230 | drive->ready_stat = 0; | ||
1231 | } | ||
1232 | if (ide_disks & (1 << i)) { | ||
1233 | drive->cyl = drive->bios_cyl = ide_disks_chs[i].cyl; | ||
1234 | drive->head = drive->bios_head = ide_disks_chs[i].head; | ||
1235 | drive->sect = drive->bios_sect = ide_disks_chs[i].sect; | ||
1236 | drive->forced_geom = 1; | ||
1237 | printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n", | ||
1238 | drive->name, | ||
1239 | drive->cyl, drive->head, drive->sect); | ||
1240 | drive->present = 1; | ||
1241 | drive->media = ide_disk; | ||
1242 | drive->ready_stat = READY_STAT; | ||
1243 | } | ||
1244 | } | ||
1245 | |||
1246 | static unsigned int ide_ignore_cable; | ||
1247 | |||
1248 | static int ide_set_ignore_cable(const char *s, struct kernel_param *kp) | ||
1249 | { | ||
1250 | int i, j = 1; | ||
1251 | |||
1252 | if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1) | ||
1253 | return -EINVAL; | ||
1254 | |||
1255 | if (i >= MAX_HWIFS || j < 0 || j > 1) | ||
1256 | return -EINVAL; | ||
1257 | |||
1258 | if (j) | ||
1259 | ide_ignore_cable |= (1 << i); | ||
1260 | else | ||
1261 | ide_ignore_cable &= (1 << i); | ||
1262 | |||
1263 | return 0; | ||
1264 | } | ||
1265 | |||
1266 | module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0); | ||
1267 | MODULE_PARM_DESC(ignore_cable, "ignore cable detection"); | ||
1268 | |||
1269 | void ide_port_apply_params(ide_hwif_t *hwif) | ||
1270 | { | ||
1271 | int i; | ||
1272 | |||
1273 | if (ide_ignore_cable & (1 << hwif->index)) { | ||
1274 | printk(KERN_INFO "ide: ignoring cable detection for %s\n", | ||
1275 | hwif->name); | ||
1276 | hwif->cbl = ATA_CBL_PATA40_SHORT; | ||
1277 | } | ||
1278 | |||
1279 | for (i = 0; i < MAX_DRIVES; i++) | ||
1280 | ide_dev_apply_params(&hwif->drives[i]); | ||
1281 | } | ||
1282 | |||
1242 | /* | 1283 | /* |
1243 | * This is gets invoked once during initialization, to set *everything* up | 1284 | * This is gets invoked once during initialization, to set *everything* up |
1244 | */ | 1285 | */ |
@@ -1305,11 +1346,6 @@ int __init init_module (void) | |||
1305 | 1346 | ||
1306 | void __exit cleanup_module (void) | 1347 | void __exit cleanup_module (void) |
1307 | { | 1348 | { |
1308 | int index; | ||
1309 | |||
1310 | for (index = 0; index < MAX_HWIFS; ++index) | ||
1311 | ide_unregister(index); | ||
1312 | |||
1313 | proc_ide_destroy(); | 1349 | proc_ide_destroy(); |
1314 | 1350 | ||
1315 | class_destroy(ide_port_class); | 1351 | class_destroy(ide_port_class); |
diff --git a/drivers/ide/legacy/ali14xx.c b/drivers/ide/legacy/ali14xx.c index 6efbf947c6db..90c65cf97448 100644 --- a/drivers/ide/legacy/ali14xx.c +++ b/drivers/ide/legacy/ali14xx.c | |||
@@ -116,7 +116,7 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
116 | int time1, time2; | 116 | int time1, time2; |
117 | u8 param1, param2, param3, param4; | 117 | u8 param1, param2, param3, param4; |
118 | unsigned long flags; | 118 | unsigned long flags; |
119 | int bus_speed = system_bus_clock(); | 119 | int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock(); |
120 | 120 | ||
121 | /* calculate timing, according to PIO mode */ | 121 | /* calculate timing, according to PIO mode */ |
122 | time1 = ide_pio_cycle_time(drive, pio); | 122 | time1 = ide_pio_cycle_time(drive, pio); |
@@ -202,7 +202,7 @@ static const struct ide_port_info ali14xx_port_info = { | |||
202 | .name = DRV_NAME, | 202 | .name = DRV_NAME, |
203 | .chipset = ide_ali14xx, | 203 | .chipset = ide_ali14xx, |
204 | .port_ops = &ali14xx_port_ops, | 204 | .port_ops = &ali14xx_port_ops, |
205 | .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE, | 205 | .host_flags = IDE_HFLAG_NO_DMA, |
206 | .pio_mask = ATA_PIO4, | 206 | .pio_mask = ATA_PIO4, |
207 | }; | 207 | }; |
208 | 208 | ||
@@ -220,7 +220,7 @@ static int __init ali14xx_probe(void) | |||
220 | return ide_legacy_device_add(&ali14xx_port_info, 0); | 220 | return ide_legacy_device_add(&ali14xx_port_info, 0); |
221 | } | 221 | } |
222 | 222 | ||
223 | int probe_ali14xx; | 223 | static int probe_ali14xx; |
224 | 224 | ||
225 | module_param_named(probe, probe_ali14xx, bool, 0); | 225 | module_param_named(probe, probe_ali14xx, bool, 0); |
226 | MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets"); | 226 | MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets"); |
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c index f51433bce8e4..5c730e4dd735 100644 --- a/drivers/ide/legacy/buddha.c +++ b/drivers/ide/legacy/buddha.c | |||
@@ -102,7 +102,7 @@ static int buddha_ack_intr(ide_hwif_t *hwif) | |||
102 | { | 102 | { |
103 | unsigned char ch; | 103 | unsigned char ch; |
104 | 104 | ||
105 | ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); | 105 | ch = z_readb(hwif->io_ports.irq_addr); |
106 | if (!(ch & 0x80)) | 106 | if (!(ch & 0x80)) |
107 | return 0; | 107 | return 0; |
108 | return 1; | 108 | return 1; |
@@ -112,9 +112,9 @@ static int xsurf_ack_intr(ide_hwif_t *hwif) | |||
112 | { | 112 | { |
113 | unsigned char ch; | 113 | unsigned char ch; |
114 | 114 | ||
115 | ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); | 115 | ch = z_readb(hwif->io_ports.irq_addr); |
116 | /* X-Surf needs a 0 written to IRQ register to ensure ISA bit A11 stays at 0 */ | 116 | /* X-Surf needs a 0 written to IRQ register to ensure ISA bit A11 stays at 0 */ |
117 | z_writeb(0, hwif->io_ports[IDE_IRQ_OFFSET]); | 117 | z_writeb(0, hwif->io_ports.irq_addr); |
118 | if (!(ch & 0x80)) | 118 | if (!(ch & 0x80)) |
119 | return 0; | 119 | return 0; |
120 | return 1; | 120 | return 1; |
@@ -128,13 +128,13 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base, | |||
128 | 128 | ||
129 | memset(hw, 0, sizeof(*hw)); | 129 | memset(hw, 0, sizeof(*hw)); |
130 | 130 | ||
131 | hw->io_ports[IDE_DATA_OFFSET] = base; | 131 | hw->io_ports.data_addr = base; |
132 | 132 | ||
133 | for (i = 1; i < 8; i++) | 133 | for (i = 1; i < 8; i++) |
134 | hw->io_ports[i] = base + 2 + i * 4; | 134 | hw->io_ports_array[i] = base + 2 + i * 4; |
135 | 135 | ||
136 | hw->io_ports[IDE_CONTROL_OFFSET] = ctl; | 136 | hw->io_ports.ctl_addr = ctl; |
137 | hw->io_ports[IDE_IRQ_OFFSET] = irq_port; | 137 | hw->io_ports.irq_addr = irq_port; |
138 | 138 | ||
139 | hw->irq = IRQ_AMIGA_PORTS; | 139 | hw->irq = IRQ_AMIGA_PORTS; |
140 | hw->ack_intr = ack_intr; | 140 | hw->ack_intr = ack_intr; |
diff --git a/drivers/ide/legacy/dtc2278.c b/drivers/ide/legacy/dtc2278.c index f7c4ad1c57c0..af791a02a120 100644 --- a/drivers/ide/legacy/dtc2278.c +++ b/drivers/ide/legacy/dtc2278.c | |||
@@ -101,8 +101,7 @@ static const struct ide_port_info dtc2278_port_info __initdata = { | |||
101 | IDE_HFLAG_IO_32BIT | | 101 | IDE_HFLAG_IO_32BIT | |
102 | /* disallow ->io_32bit changes */ | 102 | /* disallow ->io_32bit changes */ |
103 | IDE_HFLAG_NO_IO_32BIT | | 103 | IDE_HFLAG_NO_IO_32BIT | |
104 | IDE_HFLAG_NO_DMA | | 104 | IDE_HFLAG_NO_DMA, |
105 | IDE_HFLAG_NO_AUTOTUNE, | ||
106 | .pio_mask = ATA_PIO4, | 105 | .pio_mask = ATA_PIO4, |
107 | }; | 106 | }; |
108 | 107 | ||
@@ -131,7 +130,7 @@ static int __init dtc2278_probe(void) | |||
131 | return ide_legacy_device_add(&dtc2278_port_info, 0); | 130 | return ide_legacy_device_add(&dtc2278_port_info, 0); |
132 | } | 131 | } |
133 | 132 | ||
134 | int probe_dtc2278 = 0; | 133 | static int probe_dtc2278; |
135 | 134 | ||
136 | module_param_named(probe, probe_dtc2278, bool, 0); | 135 | module_param_named(probe, probe_dtc2278, bool, 0); |
137 | MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets"); | 136 | MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets"); |
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c index 5c19c422c5cc..56cdaa0eeea5 100644 --- a/drivers/ide/legacy/falconide.c +++ b/drivers/ide/legacy/falconide.c | |||
@@ -50,12 +50,12 @@ static void __init falconide_setup_ports(hw_regs_t *hw) | |||
50 | 50 | ||
51 | memset(hw, 0, sizeof(*hw)); | 51 | memset(hw, 0, sizeof(*hw)); |
52 | 52 | ||
53 | hw->io_ports[IDE_DATA_OFFSET] = ATA_HD_BASE; | 53 | hw->io_ports.data_addr = ATA_HD_BASE; |
54 | 54 | ||
55 | for (i = 1; i < 8; i++) | 55 | for (i = 1; i < 8; i++) |
56 | hw->io_ports[i] = ATA_HD_BASE + 1 + i * 4; | 56 | hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4; |
57 | 57 | ||
58 | hw->io_ports[IDE_CONTROL_OFFSET] = ATA_HD_BASE + ATA_HD_CONTROL; | 58 | hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL; |
59 | 59 | ||
60 | hw->irq = IRQ_MFP_IDE; | 60 | hw->irq = IRQ_MFP_IDE; |
61 | hw->ack_intr = NULL; | 61 | hw->ack_intr = NULL; |
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c index a0c9601bdaf0..a9c2593a898c 100644 --- a/drivers/ide/legacy/gayle.c +++ b/drivers/ide/legacy/gayle.c | |||
@@ -63,6 +63,8 @@ | |||
63 | #define GAYLE_HAS_CONTROL_REG (!ide_doubler) | 63 | #define GAYLE_HAS_CONTROL_REG (!ide_doubler) |
64 | #define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000) | 64 | #define GAYLE_IDEREG_SIZE (ide_doubler ? 0x1000 : 0x2000) |
65 | int ide_doubler = 0; /* support IDE doublers? */ | 65 | int ide_doubler = 0; /* support IDE doublers? */ |
66 | module_param_named(doubler, ide_doubler, bool, 0); | ||
67 | MODULE_PARM_DESC(doubler, "enable support for IDE doublers"); | ||
66 | #endif /* CONFIG_BLK_DEV_IDEDOUBLER */ | 68 | #endif /* CONFIG_BLK_DEV_IDEDOUBLER */ |
67 | 69 | ||
68 | 70 | ||
@@ -74,7 +76,7 @@ static int gayle_ack_intr_a4000(ide_hwif_t *hwif) | |||
74 | { | 76 | { |
75 | unsigned char ch; | 77 | unsigned char ch; |
76 | 78 | ||
77 | ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); | 79 | ch = z_readb(hwif->io_ports.irq_addr); |
78 | if (!(ch & GAYLE_IRQ_IDE)) | 80 | if (!(ch & GAYLE_IRQ_IDE)) |
79 | return 0; | 81 | return 0; |
80 | return 1; | 82 | return 1; |
@@ -84,11 +86,11 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif) | |||
84 | { | 86 | { |
85 | unsigned char ch; | 87 | unsigned char ch; |
86 | 88 | ||
87 | ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]); | 89 | ch = z_readb(hwif->io_ports.irq_addr); |
88 | if (!(ch & GAYLE_IRQ_IDE)) | 90 | if (!(ch & GAYLE_IRQ_IDE)) |
89 | return 0; | 91 | return 0; |
90 | (void)z_readb(hwif->io_ports[IDE_STATUS_OFFSET]); | 92 | (void)z_readb(hwif->io_ports.status_addr); |
91 | z_writeb(0x7c, hwif->io_ports[IDE_IRQ_OFFSET]); | 93 | z_writeb(0x7c, hwif->io_ports.irq_addr); |
92 | return 1; | 94 | return 1; |
93 | } | 95 | } |
94 | 96 | ||
@@ -100,13 +102,13 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base, | |||
100 | 102 | ||
101 | memset(hw, 0, sizeof(*hw)); | 103 | memset(hw, 0, sizeof(*hw)); |
102 | 104 | ||
103 | hw->io_ports[IDE_DATA_OFFSET] = base; | 105 | hw->io_ports.data_addr = base; |
104 | 106 | ||
105 | for (i = 1; i < 8; i++) | 107 | for (i = 1; i < 8; i++) |
106 | hw->io_ports[i] = base + 2 + i * 4; | 108 | hw->io_ports_array[i] = base + 2 + i * 4; |
107 | 109 | ||
108 | hw->io_ports[IDE_CONTROL_OFFSET] = ctl; | 110 | hw->io_ports.ctl_addr = ctl; |
109 | hw->io_ports[IDE_IRQ_OFFSET] = irq_port; | 111 | hw->io_ports.irq_addr = irq_port; |
110 | 112 | ||
111 | hw->irq = IRQ_AMIGA_PORTS; | 113 | hw->irq = IRQ_AMIGA_PORTS; |
112 | hw->ack_intr = ack_intr; | 114 | hw->ack_intr = ack_intr; |
diff --git a/drivers/ide/legacy/ht6560b.c b/drivers/ide/legacy/ht6560b.c index 702d8deb5780..4fe516df9f74 100644 --- a/drivers/ide/legacy/ht6560b.c +++ b/drivers/ide/legacy/ht6560b.c | |||
@@ -157,8 +157,8 @@ static void ht6560b_selectproc (ide_drive_t *drive) | |||
157 | /* | 157 | /* |
158 | * Set timing for this drive: | 158 | * Set timing for this drive: |
159 | */ | 159 | */ |
160 | outb(timing, hwif->io_ports[IDE_SELECT_OFFSET]); | 160 | outb(timing, hwif->io_ports.device_addr); |
161 | (void)inb(hwif->io_ports[IDE_STATUS_OFFSET]); | 161 | (void)inb(hwif->io_ports.status_addr); |
162 | #ifdef DEBUG | 162 | #ifdef DEBUG |
163 | printk("ht6560b: %s: select=%#x timing=%#x\n", | 163 | printk("ht6560b: %s: select=%#x timing=%#x\n", |
164 | drive->name, select, timing); | 164 | drive->name, select, timing); |
@@ -212,8 +212,8 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio) | |||
212 | { | 212 | { |
213 | int active_time, recovery_time; | 213 | int active_time, recovery_time; |
214 | int active_cycles, recovery_cycles; | 214 | int active_cycles, recovery_cycles; |
215 | int bus_speed = system_bus_clock(); | 215 | int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock(); |
216 | 216 | ||
217 | if (pio) { | 217 | if (pio) { |
218 | unsigned int cycle_time; | 218 | unsigned int cycle_time; |
219 | 219 | ||
@@ -323,7 +323,7 @@ static void __init ht6560b_port_init_devs(ide_hwif_t *hwif) | |||
323 | hwif->drives[1].drive_data = t; | 323 | hwif->drives[1].drive_data = t; |
324 | } | 324 | } |
325 | 325 | ||
326 | int probe_ht6560b = 0; | 326 | static int probe_ht6560b; |
327 | 327 | ||
328 | module_param_named(probe, probe_ht6560b, bool, 0); | 328 | module_param_named(probe, probe_ht6560b, bool, 0); |
329 | MODULE_PARM_DESC(probe, "probe for HT6560B chipset"); | 329 | MODULE_PARM_DESC(probe, "probe for HT6560B chipset"); |
@@ -340,7 +340,6 @@ static const struct ide_port_info ht6560b_port_info __initdata = { | |||
340 | .port_ops = &ht6560b_port_ops, | 340 | .port_ops = &ht6560b_port_ops, |
341 | .host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */ | 341 | .host_flags = IDE_HFLAG_SERIALIZE | /* is this needed? */ |
342 | IDE_HFLAG_NO_DMA | | 342 | IDE_HFLAG_NO_DMA | |
343 | IDE_HFLAG_NO_AUTOTUNE | | ||
344 | IDE_HFLAG_ABUSE_PREFETCH, | 343 | IDE_HFLAG_ABUSE_PREFETCH, |
345 | .pio_mask = ATA_PIO4, | 344 | .pio_mask = ATA_PIO4, |
346 | }; | 345 | }; |
diff --git a/drivers/ide/legacy/ide-4drives.c b/drivers/ide/legacy/ide-4drives.c index 17f94d0cb539..ecae916a3385 100644 --- a/drivers/ide/legacy/ide-4drives.c +++ b/drivers/ide/legacy/ide-4drives.c | |||
@@ -6,7 +6,7 @@ | |||
6 | 6 | ||
7 | #define DRV_NAME "ide-4drives" | 7 | #define DRV_NAME "ide-4drives" |
8 | 8 | ||
9 | int probe_4drives; | 9 | static int probe_4drives; |
10 | 10 | ||
11 | module_param_named(probe, probe_4drives, bool, 0); | 11 | module_param_named(probe, probe_4drives, bool, 0); |
12 | MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port"); | 12 | MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port"); |
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index 855e157b18d3..aa2ea3deac85 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c | |||
@@ -140,8 +140,8 @@ static void ide_detach(struct pcmcia_device *link) | |||
140 | 140 | ||
141 | ide_release(link); | 141 | ide_release(link); |
142 | 142 | ||
143 | release_region(hwif->io_ports[IDE_CONTROL_OFFSET], 1); | 143 | release_region(hwif->io_ports.ctl_addr, 1); |
144 | release_region(hwif->io_ports[IDE_DATA_OFFSET], 8); | 144 | release_region(hwif->io_ports.data_addr, 8); |
145 | 145 | ||
146 | kfree(info); | 146 | kfree(info); |
147 | } /* ide_detach */ | 147 | } /* ide_detach */ |
@@ -183,11 +183,7 @@ static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl, | |||
183 | 183 | ||
184 | i = hwif->index; | 184 | i = hwif->index; |
185 | 185 | ||
186 | if (hwif->present) | 186 | ide_init_port_data(hwif, i); |
187 | ide_unregister(i); | ||
188 | else | ||
189 | ide_init_port_data(hwif, i); | ||
190 | |||
191 | ide_init_port_hw(hwif, &hw); | 187 | ide_init_port_hw(hwif, &hw); |
192 | hwif->port_ops = &idecs_port_ops; | 188 | hwif->port_ops = &idecs_port_ops; |
193 | 189 | ||
@@ -390,7 +386,7 @@ void ide_release(struct pcmcia_device *link) | |||
390 | if (info->ndev) { | 386 | if (info->ndev) { |
391 | /* FIXME: if this fails we need to queue the cleanup somehow | 387 | /* FIXME: if this fails we need to queue the cleanup somehow |
392 | -- need to investigate the required PCMCIA magic */ | 388 | -- need to investigate the required PCMCIA magic */ |
393 | ide_unregister(hwif->index); | 389 | ide_unregister(hwif); |
394 | } | 390 | } |
395 | info->ndev = 0; | 391 | info->ndev = 0; |
396 | 392 | ||
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c index 822f48b05c70..8279dc7ca4c0 100644 --- a/drivers/ide/legacy/ide_platform.c +++ b/drivers/ide/legacy/ide_platform.c | |||
@@ -30,14 +30,14 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw, | |||
30 | unsigned long port = (unsigned long)base; | 30 | unsigned long port = (unsigned long)base; |
31 | int i; | 31 | int i; |
32 | 32 | ||
33 | hw->io_ports[IDE_DATA_OFFSET] = port; | 33 | hw->io_ports.data_addr = port; |
34 | 34 | ||
35 | port += (1 << pdata->ioport_shift); | 35 | port += (1 << pdata->ioport_shift); |
36 | for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET; | 36 | for (i = 1; i <= 7; |
37 | i++, port += (1 << pdata->ioport_shift)) | 37 | i++, port += (1 << pdata->ioport_shift)) |
38 | hw->io_ports[i] = port; | 38 | hw->io_ports_array[i] = port; |
39 | 39 | ||
40 | hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl; | 40 | hw->io_ports.ctl_addr = (unsigned long)ctrl; |
41 | 41 | ||
42 | hw->irq = irq; | 42 | hw->irq = irq; |
43 | 43 | ||
@@ -120,7 +120,7 @@ static int __devexit plat_ide_remove(struct platform_device *pdev) | |||
120 | { | 120 | { |
121 | ide_hwif_t *hwif = pdev->dev.driver_data; | 121 | ide_hwif_t *hwif = pdev->dev.driver_data; |
122 | 122 | ||
123 | ide_unregister(hwif->index); | 123 | ide_unregister(hwif); |
124 | 124 | ||
125 | return 0; | 125 | return 0; |
126 | } | 126 | } |
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c index 26546d0afc7f..1f527bbf8d96 100644 --- a/drivers/ide/legacy/macide.c +++ b/drivers/ide/legacy/macide.c | |||
@@ -72,9 +72,9 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base, | |||
72 | memset(hw, 0, sizeof(*hw)); | 72 | memset(hw, 0, sizeof(*hw)); |
73 | 73 | ||
74 | for (i = 0; i < 8; i++) | 74 | for (i = 0; i < 8; i++) |
75 | hw->io_ports[i] = base + i * 4; | 75 | hw->io_ports_array[i] = base + i * 4; |
76 | 76 | ||
77 | hw->io_ports[IDE_CONTROL_OFFSET] = base + IDE_CONTROL; | 77 | hw->io_ports.ctl_addr = base + IDE_CONTROL; |
78 | 78 | ||
79 | hw->irq = irq; | 79 | hw->irq = irq; |
80 | hw->ack_intr = ack_intr; | 80 | hw->ack_intr = ack_intr; |
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c index f23999dd3d46..a3573d40b4b7 100644 --- a/drivers/ide/legacy/q40ide.c +++ b/drivers/ide/legacy/q40ide.c | |||
@@ -80,10 +80,10 @@ void q40_ide_setup_ports ( hw_regs_t *hw, | |||
80 | for (i = 0; i < IDE_NR_PORTS; i++) { | 80 | for (i = 0; i < IDE_NR_PORTS; i++) { |
81 | /* BIG FAT WARNING: | 81 | /* BIG FAT WARNING: |
82 | assumption: only DATA port is ever used in 16 bit mode */ | 82 | assumption: only DATA port is ever used in 16 bit mode */ |
83 | if ( i==0 ) | 83 | if (i == 0) |
84 | hw->io_ports[i] = Q40_ISA_IO_W(base + offsets[i]); | 84 | hw->io_ports_array[i] = Q40_ISA_IO_W(base + offsets[i]); |
85 | else | 85 | else |
86 | hw->io_ports[i] = Q40_ISA_IO_B(base + offsets[i]); | 86 | hw->io_ports_array[i] = Q40_ISA_IO_B(base + offsets[i]); |
87 | } | 87 | } |
88 | 88 | ||
89 | hw->irq = irq; | 89 | hw->irq = irq; |
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c index 15a99aae0cf9..6424af154325 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/legacy/qd65xx.c | |||
@@ -11,11 +11,7 @@ | |||
11 | * | 11 | * |
12 | * QDI QD6500/QD6580 EIDE controller fast support | 12 | * QDI QD6500/QD6580 EIDE controller fast support |
13 | * | 13 | * |
14 | * Please set local bus speed using kernel parameter idebus | ||
15 | * for example, "idebus=33" stands for 33Mhz VLbus | ||
16 | * To activate controller support, use "ide0=qd65xx" | 14 | * To activate controller support, use "ide0=qd65xx" |
17 | * To enable tuning, use "hda=autotune hdb=autotune" | ||
18 | * To enable 2nd channel tuning (qd6580 only), use "hdc=autotune hdd=autotune" | ||
19 | */ | 15 | */ |
20 | 16 | ||
21 | /* | 17 | /* |
@@ -114,17 +110,18 @@ static void qd65xx_select(ide_drive_t *drive) | |||
114 | 110 | ||
115 | static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time) | 111 | static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time) |
116 | { | 112 | { |
117 | u8 active_cycle,recovery_cycle; | 113 | int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock(); |
114 | u8 act_cyc, rec_cyc; | ||
118 | 115 | ||
119 | if (system_bus_clock()<=33) { | 116 | if (clk <= 33) { |
120 | active_cycle = 9 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 2, 9); | 117 | act_cyc = 9 - IDE_IN(active_time * clk / 1000 + 1, 2, 9); |
121 | recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 0, 15); | 118 | rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15); |
122 | } else { | 119 | } else { |
123 | active_cycle = 8 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 1, 8); | 120 | act_cyc = 8 - IDE_IN(active_time * clk / 1000 + 1, 1, 8); |
124 | recovery_cycle = 18 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 3, 18); | 121 | rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18); |
125 | } | 122 | } |
126 | 123 | ||
127 | return((recovery_cycle<<4) | 0x08 | active_cycle); | 124 | return (rec_cyc << 4) | 0x08 | act_cyc; |
128 | } | 125 | } |
129 | 126 | ||
130 | /* | 127 | /* |
@@ -135,10 +132,13 @@ static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery | |||
135 | 132 | ||
136 | static u8 qd6580_compute_timing (int active_time, int recovery_time) | 133 | static u8 qd6580_compute_timing (int active_time, int recovery_time) |
137 | { | 134 | { |
138 | u8 active_cycle = 17 - IDE_IN(active_time * system_bus_clock() / 1000 + 1, 2, 17); | 135 | int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock(); |
139 | u8 recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 2, 15); | 136 | u8 act_cyc, rec_cyc; |
140 | 137 | ||
141 | return((recovery_cycle<<4) | active_cycle); | 138 | act_cyc = 17 - IDE_IN(active_time * clk / 1000 + 1, 2, 17); |
139 | rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15); | ||
140 | |||
141 | return (rec_cyc << 4) | act_cyc; | ||
142 | } | 142 | } |
143 | 143 | ||
144 | /* | 144 | /* |
@@ -322,8 +322,7 @@ static const struct ide_port_info qd65xx_port_info __initdata = { | |||
322 | .name = DRV_NAME, | 322 | .name = DRV_NAME, |
323 | .chipset = ide_qd65xx, | 323 | .chipset = ide_qd65xx, |
324 | .host_flags = IDE_HFLAG_IO_32BIT | | 324 | .host_flags = IDE_HFLAG_IO_32BIT | |
325 | IDE_HFLAG_NO_DMA | | 325 | IDE_HFLAG_NO_DMA, |
326 | IDE_HFLAG_NO_AUTOTUNE, | ||
327 | .pio_mask = ATA_PIO4, | 326 | .pio_mask = ATA_PIO4, |
328 | }; | 327 | }; |
329 | 328 | ||
@@ -399,7 +398,7 @@ static int __init qd_probe(int base) | |||
399 | return rc; | 398 | return rc; |
400 | } | 399 | } |
401 | 400 | ||
402 | int probe_qd65xx = 0; | 401 | static int probe_qd65xx; |
403 | 402 | ||
404 | module_param_named(probe, probe_qd65xx, bool, 0); | 403 | module_param_named(probe, probe_qd65xx, bool, 0); |
405 | MODULE_PARM_DESC(probe, "probe for QD65xx chipsets"); | 404 | MODULE_PARM_DESC(probe, "probe for QD65xx chipsets"); |
diff --git a/drivers/ide/legacy/umc8672.c b/drivers/ide/legacy/umc8672.c index 17d515329fe0..b54a14a57755 100644 --- a/drivers/ide/legacy/umc8672.c +++ b/drivers/ide/legacy/umc8672.c | |||
@@ -130,7 +130,7 @@ static const struct ide_port_info umc8672_port_info __initdata = { | |||
130 | .name = DRV_NAME, | 130 | .name = DRV_NAME, |
131 | .chipset = ide_umc8672, | 131 | .chipset = ide_umc8672, |
132 | .port_ops = &umc8672_port_ops, | 132 | .port_ops = &umc8672_port_ops, |
133 | .host_flags = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE, | 133 | .host_flags = IDE_HFLAG_NO_DMA, |
134 | .pio_mask = ATA_PIO4, | 134 | .pio_mask = ATA_PIO4, |
135 | }; | 135 | }; |
136 | 136 | ||
@@ -158,7 +158,7 @@ static int __init umc8672_probe(void) | |||
158 | return ide_legacy_device_add(&umc8672_port_info, 0); | 158 | return ide_legacy_device_add(&umc8672_port_info, 0); |
159 | } | 159 | } |
160 | 160 | ||
161 | int probe_umc8672; | 161 | static int probe_umc8672; |
162 | 162 | ||
163 | module_param_named(probe, probe_umc8672, bool, 0); | 163 | module_param_named(probe, probe_umc8672, bool, 0); |
164 | MODULE_PARM_DESC(probe, "probe for UMC8672 chipset"); | 164 | MODULE_PARM_DESC(probe, "probe for UMC8672 chipset"); |
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c index 3485a310c95b..296b9c674bae 100644 --- a/drivers/ide/mips/au1xxx-ide.c +++ b/drivers/ide/mips/au1xxx-ide.c | |||
@@ -502,12 +502,11 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) | |||
502 | static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) | 502 | static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) |
503 | { | 503 | { |
504 | int i; | 504 | int i; |
505 | unsigned long *ata_regs = hw->io_ports; | 505 | unsigned long *ata_regs = hw->io_ports_array; |
506 | 506 | ||
507 | /* FIXME? */ | 507 | /* FIXME? */ |
508 | for (i = 0; i < IDE_CONTROL_OFFSET; i++) { | 508 | for (i = 0; i < 8; i++) |
509 | *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET); | 509 | *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET); |
510 | } | ||
511 | 510 | ||
512 | /* set the Alternative Status register */ | 511 | /* set the Alternative Status register */ |
513 | *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET); | 512 | *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET); |
@@ -627,7 +626,7 @@ static int au_ide_remove(struct device *dev) | |||
627 | ide_hwif_t *hwif = dev_get_drvdata(dev); | 626 | ide_hwif_t *hwif = dev_get_drvdata(dev); |
628 | _auide_hwif *ahwif = &auide_hwif; | 627 | _auide_hwif *ahwif = &auide_hwif; |
629 | 628 | ||
630 | ide_unregister(hwif->index); | 629 | ide_unregister(hwif); |
631 | 630 | ||
632 | iounmap((void *)ahwif->regbase); | 631 | iounmap((void *)ahwif->regbase); |
633 | 632 | ||
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c index 112fe566bb93..68947626e4aa 100644 --- a/drivers/ide/mips/swarm.c +++ b/drivers/ide/mips/swarm.c | |||
@@ -113,10 +113,10 @@ static int __devinit swarm_ide_probe(struct device *dev) | |||
113 | 113 | ||
114 | hwif->chipset = ide_generic; | 114 | hwif->chipset = ide_generic; |
115 | 115 | ||
116 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) | 116 | for (i = 0; i <= 7; i++) |
117 | hwif->io_ports[i] = | 117 | hwif->io_ports_array[i] = |
118 | (unsigned long)(base + ((0x1f0 + i) << 5)); | 118 | (unsigned long)(base + ((0x1f0 + i) << 5)); |
119 | hwif->io_ports[IDE_CONTROL_OFFSET] = | 119 | hwif->io_ports.ctl_addr = |
120 | (unsigned long)(base + (0x3f6 << 5)); | 120 | (unsigned long)(base + (0x3f6 << 5)); |
121 | hwif->irq = K_INT_GB_IDE; | 121 | hwif->irq = K_INT_GB_IDE; |
122 | 122 | ||
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c index ca16f37f9486..7f46c224b7c4 100644 --- a/drivers/ide/pci/aec62xx.c +++ b/drivers/ide/pci/aec62xx.c | |||
@@ -140,7 +140,7 @@ static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
140 | 140 | ||
141 | static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name) | 141 | static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name) |
142 | { | 142 | { |
143 | int bus_speed = system_bus_clock(); | 143 | int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock(); |
144 | 144 | ||
145 | if (bus_speed <= 33) | 145 | if (bus_speed <= 33) |
146 | pci_set_drvdata(dev, (void *) aec6xxx_33_base); | 146 | pci_set_drvdata(dev, (void *) aec6xxx_33_base); |
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c index b5a3bc33e167..b36a22b8c213 100644 --- a/drivers/ide/pci/alim15x3.c +++ b/drivers/ide/pci/alim15x3.c | |||
@@ -38,8 +38,6 @@ | |||
38 | 38 | ||
39 | #include <asm/io.h> | 39 | #include <asm/io.h> |
40 | 40 | ||
41 | #define DISPLAY_ALI_TIMINGS | ||
42 | |||
43 | /* | 41 | /* |
44 | * ALi devices are not plug in. Otherwise these static values would | 42 | * ALi devices are not plug in. Otherwise these static values would |
45 | * need to go. They ought to go away anyway | 43 | * need to go. They ought to go away anyway |
@@ -49,236 +47,6 @@ static u8 m5229_revision; | |||
49 | static u8 chip_is_1543c_e; | 47 | static u8 chip_is_1543c_e; |
50 | static struct pci_dev *isa_dev; | 48 | static struct pci_dev *isa_dev; |
51 | 49 | ||
52 | #if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) | ||
53 | #include <linux/stat.h> | ||
54 | #include <linux/proc_fs.h> | ||
55 | |||
56 | static u8 ali_proc = 0; | ||
57 | |||
58 | static struct pci_dev *bmide_dev; | ||
59 | |||
60 | static char *fifo[4] = { | ||
61 | "FIFO Off", | ||
62 | "FIFO On ", | ||
63 | "DMA mode", | ||
64 | "PIO mode" }; | ||
65 | |||
66 | static char *udmaT[8] = { | ||
67 | "1.5T", | ||
68 | " 2T", | ||
69 | "2.5T", | ||
70 | " 3T", | ||
71 | "3.5T", | ||
72 | " 4T", | ||
73 | " 6T", | ||
74 | " 8T" | ||
75 | }; | ||
76 | |||
77 | static char *channel_status[8] = { | ||
78 | "OK ", | ||
79 | "busy ", | ||
80 | "DRQ ", | ||
81 | "DRQ busy ", | ||
82 | "error ", | ||
83 | "error busy ", | ||
84 | "error DRQ ", | ||
85 | "error DRQ busy" | ||
86 | }; | ||
87 | |||
88 | /** | ||
89 | * ali_get_info - generate proc file for ALi IDE | ||
90 | * @buffer: buffer to fill | ||
91 | * @addr: address of user start in buffer | ||
92 | * @offset: offset into 'file' | ||
93 | * @count: buffer count | ||
94 | * | ||
95 | * Walks the Ali devices and outputs summary data on the tuning and | ||
96 | * anything else that will help with debugging | ||
97 | */ | ||
98 | |||
99 | static int ali_get_info (char *buffer, char **addr, off_t offset, int count) | ||
100 | { | ||
101 | unsigned long bibma; | ||
102 | u8 reg53h, reg5xh, reg5yh, reg5xh1, reg5yh1, c0, c1, rev, tmp; | ||
103 | char *q, *p = buffer; | ||
104 | |||
105 | /* fetch rev. */ | ||
106 | pci_read_config_byte(bmide_dev, 0x08, &rev); | ||
107 | if (rev >= 0xc1) /* M1543C or newer */ | ||
108 | udmaT[7] = " ???"; | ||
109 | else | ||
110 | fifo[3] = " ??? "; | ||
111 | |||
112 | /* first fetch bibma: */ | ||
113 | |||
114 | bibma = pci_resource_start(bmide_dev, 4); | ||
115 | |||
116 | /* | ||
117 | * at that point bibma+0x2 et bibma+0xa are byte | ||
118 | * registers to investigate: | ||
119 | */ | ||
120 | c0 = inb(bibma + 0x02); | ||
121 | c1 = inb(bibma + 0x0a); | ||
122 | |||
123 | p += sprintf(p, | ||
124 | "\n Ali M15x3 Chipset.\n"); | ||
125 | p += sprintf(p, | ||
126 | " ------------------\n"); | ||
127 | pci_read_config_byte(bmide_dev, 0x78, ®53h); | ||
128 | p += sprintf(p, "PCI Clock: %d.\n", reg53h); | ||
129 | |||
130 | pci_read_config_byte(bmide_dev, 0x53, ®53h); | ||
131 | p += sprintf(p, | ||
132 | "CD_ROM FIFO:%s, CD_ROM DMA:%s\n", | ||
133 | (reg53h & 0x02) ? "Yes" : "No ", | ||
134 | (reg53h & 0x01) ? "Yes" : "No " ); | ||
135 | pci_read_config_byte(bmide_dev, 0x74, ®53h); | ||
136 | p += sprintf(p, | ||
137 | "FIFO Status: contains %d Words, runs%s%s\n\n", | ||
138 | (reg53h & 0x3f), | ||
139 | (reg53h & 0x40) ? " OVERWR" : "", | ||
140 | (reg53h & 0x80) ? " OVERRD." : "." ); | ||
141 | |||
142 | p += sprintf(p, | ||
143 | "-------------------primary channel" | ||
144 | "-------------------secondary channel" | ||
145 | "---------\n\n"); | ||
146 | |||
147 | pci_read_config_byte(bmide_dev, 0x09, ®53h); | ||
148 | p += sprintf(p, | ||
149 | "channel status: %s" | ||
150 | " %s\n", | ||
151 | (reg53h & 0x20) ? "On " : "Off", | ||
152 | (reg53h & 0x10) ? "On " : "Off" ); | ||
153 | |||
154 | p += sprintf(p, | ||
155 | "both channels togth: %s" | ||
156 | " %s\n", | ||
157 | (c0&0x80) ? "No " : "Yes", | ||
158 | (c1&0x80) ? "No " : "Yes" ); | ||
159 | |||
160 | pci_read_config_byte(bmide_dev, 0x76, ®53h); | ||
161 | p += sprintf(p, | ||
162 | "Channel state: %s %s\n", | ||
163 | channel_status[reg53h & 0x07], | ||
164 | channel_status[(reg53h & 0x70) >> 4] ); | ||
165 | |||
166 | pci_read_config_byte(bmide_dev, 0x58, ®5xh); | ||
167 | pci_read_config_byte(bmide_dev, 0x5c, ®5yh); | ||
168 | p += sprintf(p, | ||
169 | "Add. Setup Timing: %dT" | ||
170 | " %dT\n", | ||
171 | (reg5xh & 0x07) ? (reg5xh & 0x07) : 8, | ||
172 | (reg5yh & 0x07) ? (reg5yh & 0x07) : 8 ); | ||
173 | |||
174 | pci_read_config_byte(bmide_dev, 0x59, ®5xh); | ||
175 | pci_read_config_byte(bmide_dev, 0x5d, ®5yh); | ||
176 | p += sprintf(p, | ||
177 | "Command Act. Count: %dT" | ||
178 | " %dT\n" | ||
179 | "Command Rec. Count: %dT" | ||
180 | " %dT\n\n", | ||
181 | (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8, | ||
182 | (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8, | ||
183 | (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16, | ||
184 | (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16 ); | ||
185 | |||
186 | p += sprintf(p, | ||
187 | "----------------drive0-----------drive1" | ||
188 | "------------drive0-----------drive1------\n\n"); | ||
189 | p += sprintf(p, | ||
190 | "DMA enabled: %s %s" | ||
191 | " %s %s\n", | ||
192 | (c0&0x20) ? "Yes" : "No ", | ||
193 | (c0&0x40) ? "Yes" : "No ", | ||
194 | (c1&0x20) ? "Yes" : "No ", | ||
195 | (c1&0x40) ? "Yes" : "No " ); | ||
196 | |||
197 | pci_read_config_byte(bmide_dev, 0x54, ®5xh); | ||
198 | pci_read_config_byte(bmide_dev, 0x55, ®5yh); | ||
199 | q = "FIFO threshold: %2d Words %2d Words" | ||
200 | " %2d Words %2d Words\n"; | ||
201 | if (rev < 0xc1) { | ||
202 | if ((rev == 0x20) && | ||
203 | (pci_read_config_byte(bmide_dev, 0x4f, &tmp), (tmp &= 0x20))) { | ||
204 | p += sprintf(p, q, 8, 8, 8, 8); | ||
205 | } else { | ||
206 | p += sprintf(p, q, | ||
207 | (reg5xh & 0x03) + 12, | ||
208 | ((reg5xh & 0x30)>>4) + 12, | ||
209 | (reg5yh & 0x03) + 12, | ||
210 | ((reg5yh & 0x30)>>4) + 12 ); | ||
211 | } | ||
212 | } else { | ||
213 | int t1 = (tmp = (reg5xh & 0x03)) ? (tmp << 3) : 4; | ||
214 | int t2 = (tmp = ((reg5xh & 0x30)>>4)) ? (tmp << 3) : 4; | ||
215 | int t3 = (tmp = (reg5yh & 0x03)) ? (tmp << 3) : 4; | ||
216 | int t4 = (tmp = ((reg5yh & 0x30)>>4)) ? (tmp << 3) : 4; | ||
217 | p += sprintf(p, q, t1, t2, t3, t4); | ||
218 | } | ||
219 | |||
220 | #if 0 | ||
221 | p += sprintf(p, | ||
222 | "FIFO threshold: %2d Words %2d Words" | ||
223 | " %2d Words %2d Words\n", | ||
224 | (reg5xh & 0x03) + 12, | ||
225 | ((reg5xh & 0x30)>>4) + 12, | ||
226 | (reg5yh & 0x03) + 12, | ||
227 | ((reg5yh & 0x30)>>4) + 12 ); | ||
228 | #endif | ||
229 | |||
230 | p += sprintf(p, | ||
231 | "FIFO mode: %s %s %s %s\n", | ||
232 | fifo[((reg5xh & 0x0c) >> 2)], | ||
233 | fifo[((reg5xh & 0xc0) >> 6)], | ||
234 | fifo[((reg5yh & 0x0c) >> 2)], | ||
235 | fifo[((reg5yh & 0xc0) >> 6)] ); | ||
236 | |||
237 | pci_read_config_byte(bmide_dev, 0x5a, ®5xh); | ||
238 | pci_read_config_byte(bmide_dev, 0x5b, ®5xh1); | ||
239 | pci_read_config_byte(bmide_dev, 0x5e, ®5yh); | ||
240 | pci_read_config_byte(bmide_dev, 0x5f, ®5yh1); | ||
241 | |||
242 | p += sprintf(p,/* | ||
243 | "------------------drive0-----------drive1" | ||
244 | "------------drive0-----------drive1------\n")*/ | ||
245 | "Dt RW act. Cnt %2dT %2dT" | ||
246 | " %2dT %2dT\n" | ||
247 | "Dt RW rec. Cnt %2dT %2dT" | ||
248 | " %2dT %2dT\n\n", | ||
249 | (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8, | ||
250 | (reg5xh1 & 0x70) ? ((reg5xh1 & 0x70) >> 4) : 8, | ||
251 | (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8, | ||
252 | (reg5yh1 & 0x70) ? ((reg5yh1 & 0x70) >> 4) : 8, | ||
253 | (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16, | ||
254 | (reg5xh1 & 0x0f) ? (reg5xh1 & 0x0f) : 16, | ||
255 | (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16, | ||
256 | (reg5yh1 & 0x0f) ? (reg5yh1 & 0x0f) : 16 ); | ||
257 | |||
258 | p += sprintf(p, | ||
259 | "-----------------------------------UDMA Timings" | ||
260 | "--------------------------------\n\n"); | ||
261 | |||
262 | pci_read_config_byte(bmide_dev, 0x56, ®5xh); | ||
263 | pci_read_config_byte(bmide_dev, 0x57, ®5yh); | ||
264 | p += sprintf(p, | ||
265 | "UDMA: %s %s" | ||
266 | " %s %s\n" | ||
267 | "UDMA timings: %s %s" | ||
268 | " %s %s\n\n", | ||
269 | (reg5xh & 0x08) ? "OK" : "No", | ||
270 | (reg5xh & 0x80) ? "OK" : "No", | ||
271 | (reg5yh & 0x08) ? "OK" : "No", | ||
272 | (reg5yh & 0x80) ? "OK" : "No", | ||
273 | udmaT[(reg5xh & 0x07)], | ||
274 | udmaT[(reg5xh & 0x70) >> 4], | ||
275 | udmaT[reg5yh & 0x07], | ||
276 | udmaT[(reg5yh & 0x70) >> 4] ); | ||
277 | |||
278 | return p-buffer; /* => must be less than 4k! */ | ||
279 | } | ||
280 | #endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */ | ||
281 | |||
282 | /** | 50 | /** |
283 | * ali_set_pio_mode - set host controller for PIO mode | 51 | * ali_set_pio_mode - set host controller for PIO mode |
284 | * @drive: drive | 52 | * @drive: drive |
@@ -294,7 +62,7 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
294 | int s_time, a_time, c_time; | 62 | int s_time, a_time, c_time; |
295 | u8 s_clc, a_clc, r_clc; | 63 | u8 s_clc, a_clc, r_clc; |
296 | unsigned long flags; | 64 | unsigned long flags; |
297 | int bus_speed = system_bus_clock(); | 65 | int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock(); |
298 | int port = hwif->channel ? 0x5c : 0x58; | 66 | int port = hwif->channel ? 0x5c : 0x58; |
299 | int portFIFO = hwif->channel ? 0x55 : 0x54; | 67 | int portFIFO = hwif->channel ? 0x55 : 0x54; |
300 | u8 cd_dma_fifo = 0; | 68 | u8 cd_dma_fifo = 0; |
@@ -465,14 +233,6 @@ static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const c | |||
465 | 233 | ||
466 | isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); | 234 | isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); |
467 | 235 | ||
468 | #if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) | ||
469 | if (!ali_proc) { | ||
470 | ali_proc = 1; | ||
471 | bmide_dev = dev; | ||
472 | ide_pci_create_host_proc("ali", ali_get_info); | ||
473 | } | ||
474 | #endif /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */ | ||
475 | |||
476 | local_irq_save(flags); | 236 | local_irq_save(flags); |
477 | 237 | ||
478 | if (m5229_revision < 0xC2) { | 238 | if (m5229_revision < 0xC2) { |
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c index f7c883808b02..efcf54338be7 100644 --- a/drivers/ide/pci/amd74xx.c +++ b/drivers/ide/pci/amd74xx.c | |||
@@ -179,7 +179,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, | |||
179 | * Determine the system bus clock. | 179 | * Determine the system bus clock. |
180 | */ | 180 | */ |
181 | 181 | ||
182 | amd_clock = system_bus_clock() * 1000; | 182 | amd_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000; |
183 | 183 | ||
184 | switch (amd_clock) { | 184 | switch (amd_clock) { |
185 | case 33000: amd_clock = 33333; break; | 185 | case 33000: amd_clock = 33333; break; |
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index 25c2f1bd175f..aaf38109eaec 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c | |||
@@ -111,10 +111,7 @@ | |||
111 | 111 | ||
112 | #define DRV_NAME "cmd640" | 112 | #define DRV_NAME "cmd640" |
113 | 113 | ||
114 | /* | 114 | static int cmd640_vlb; |
115 | * This flag is set in ide.c by the parameter: ide0=cmd640_vlb | ||
116 | */ | ||
117 | int cmd640_vlb; | ||
118 | 115 | ||
119 | /* | 116 | /* |
120 | * CMD640 specific registers definition. | 117 | * CMD640 specific registers definition. |
@@ -350,12 +347,12 @@ static int __init secondary_port_responding(void) | |||
350 | 347 | ||
351 | spin_lock_irqsave(&cmd640_lock, flags); | 348 | spin_lock_irqsave(&cmd640_lock, flags); |
352 | 349 | ||
353 | outb_p(0x0a, 0x170 + IDE_SELECT_OFFSET); /* select drive0 */ | 350 | outb_p(0x0a, 0x176); /* select drive0 */ |
354 | udelay(100); | 351 | udelay(100); |
355 | if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x0a) { | 352 | if ((inb_p(0x176) & 0x1f) != 0x0a) { |
356 | outb_p(0x1a, 0x170 + IDE_SELECT_OFFSET); /* select drive1 */ | 353 | outb_p(0x1a, 0x176); /* select drive1 */ |
357 | udelay(100); | 354 | udelay(100); |
358 | if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x1a) { | 355 | if ((inb_p(0x176) & 0x1f) != 0x1a) { |
359 | spin_unlock_irqrestore(&cmd640_lock, flags); | 356 | spin_unlock_irqrestore(&cmd640_lock, flags); |
360 | return 0; /* nothing responded */ | 357 | return 0; /* nothing responded */ |
361 | } | 358 | } |
@@ -383,6 +380,7 @@ static void cmd640_dump_regs(void) | |||
383 | } | 380 | } |
384 | #endif | 381 | #endif |
385 | 382 | ||
383 | #ifndef CONFIG_BLK_DEV_CMD640_ENHANCED | ||
386 | /* | 384 | /* |
387 | * Check whether prefetch is on for a drive, | 385 | * Check whether prefetch is on for a drive, |
388 | * and initialize the unmask flags for safe operation. | 386 | * and initialize the unmask flags for safe operation. |
@@ -403,9 +401,7 @@ static void __init check_prefetch(ide_drive_t *drive, unsigned int index) | |||
403 | drive->no_io_32bit = 0; | 401 | drive->no_io_32bit = 0; |
404 | } | 402 | } |
405 | } | 403 | } |
406 | 404 | #else | |
407 | #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED | ||
408 | |||
409 | /* | 405 | /* |
410 | * Sets prefetch mode for a drive. | 406 | * Sets prefetch mode for a drive. |
411 | */ | 407 | */ |
@@ -462,34 +458,6 @@ static inline u8 pack_nibbles(u8 upper, u8 lower) | |||
462 | } | 458 | } |
463 | 459 | ||
464 | /* | 460 | /* |
465 | * This routine retrieves the initial drive timings from the chipset. | ||
466 | */ | ||
467 | static void __init retrieve_drive_counts(unsigned int index) | ||
468 | { | ||
469 | u8 b; | ||
470 | |||
471 | /* | ||
472 | * Get the internal setup timing, and convert to clock count | ||
473 | */ | ||
474 | b = get_cmd640_reg(arttim_regs[index]) & ~0x3f; | ||
475 | switch (b) { | ||
476 | case 0x00: b = 4; break; | ||
477 | case 0x80: b = 3; break; | ||
478 | case 0x40: b = 2; break; | ||
479 | default: b = 5; break; | ||
480 | } | ||
481 | setup_counts[index] = b; | ||
482 | |||
483 | /* | ||
484 | * Get the active/recovery counts | ||
485 | */ | ||
486 | b = get_cmd640_reg(drwtim_regs[index]); | ||
487 | active_counts[index] = (b >> 4) ? (b >> 4) : 0x10; | ||
488 | recovery_counts[index] = (b & 0x0f) ? (b & 0x0f) : 0x10; | ||
489 | } | ||
490 | |||
491 | |||
492 | /* | ||
493 | * This routine writes the prepared setup/active/recovery counts | 461 | * This routine writes the prepared setup/active/recovery counts |
494 | * for a drive into the cmd640 chipset registers to active them. | 462 | * for a drive into the cmd640 chipset registers to active them. |
495 | */ | 463 | */ |
@@ -555,7 +523,14 @@ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index, | |||
555 | { | 523 | { |
556 | int setup_time, active_time, recovery_time, clock_time; | 524 | int setup_time, active_time, recovery_time, clock_time; |
557 | u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count; | 525 | u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count; |
558 | int bus_speed = system_bus_clock(); | 526 | int bus_speed; |
527 | |||
528 | if (cmd640_vlb && ide_vlb_clk) | ||
529 | bus_speed = ide_vlb_clk; | ||
530 | else if (!cmd640_vlb && ide_pci_clk) | ||
531 | bus_speed = ide_pci_clk; | ||
532 | else | ||
533 | bus_speed = system_bus_clock(); | ||
559 | 534 | ||
560 | if (pio_mode > 5) | 535 | if (pio_mode > 5) |
561 | pio_mode = 5; | 536 | pio_mode = 5; |
@@ -679,7 +654,6 @@ static const struct ide_port_info cmd640_port_info __initdata = { | |||
679 | .chipset = ide_cmd640, | 654 | .chipset = ide_cmd640, |
680 | .host_flags = IDE_HFLAG_SERIALIZE | | 655 | .host_flags = IDE_HFLAG_SERIALIZE | |
681 | IDE_HFLAG_NO_DMA | | 656 | IDE_HFLAG_NO_DMA | |
682 | IDE_HFLAG_NO_AUTOTUNE | | ||
683 | IDE_HFLAG_ABUSE_PREFETCH | | 657 | IDE_HFLAG_ABUSE_PREFETCH | |
684 | IDE_HFLAG_ABUSE_FAST_DEVSEL, | 658 | IDE_HFLAG_ABUSE_FAST_DEVSEL, |
685 | #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED | 659 | #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED |
@@ -862,29 +836,16 @@ static int __init cmd640x_init(void) | |||
862 | } | 836 | } |
863 | 837 | ||
864 | #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED | 838 | #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED |
865 | if (drive->autotune || ((index > 1) && second_port_toggled)) { | 839 | /* |
866 | /* | 840 | * Reset timing to the slowest speed and turn off prefetch. |
867 | * Reset timing to the slowest speed and turn off | 841 | * This way, the drive identify code has a better chance. |
868 | * prefetch. This way, the drive identify code has | 842 | */ |
869 | * a better chance. | 843 | setup_counts [index] = 4; /* max possible */ |
870 | */ | 844 | active_counts [index] = 16; /* max possible */ |
871 | setup_counts [index] = 4; /* max possible */ | 845 | recovery_counts [index] = 16; /* max possible */ |
872 | active_counts [index] = 16; /* max possible */ | 846 | program_drive_counts(drive, index); |
873 | recovery_counts [index] = 16; /* max possible */ | 847 | set_prefetch_mode(drive, index, 0); |
874 | program_drive_counts(drive, index); | 848 | printk("cmd640: drive%d timings/prefetch cleared\n", index); |
875 | set_prefetch_mode(drive, index, 0); | ||
876 | printk("cmd640: drive%d timings/prefetch cleared\n", index); | ||
877 | } else { | ||
878 | /* | ||
879 | * Record timings/prefetch without changing them. | ||
880 | * This preserves any prior BIOS setup. | ||
881 | */ | ||
882 | retrieve_drive_counts (index); | ||
883 | check_prefetch(drive, index); | ||
884 | printk("cmd640: drive%d timings/prefetch(%s) preserved", | ||
885 | index, drive->no_io_32bit ? "off" : "on"); | ||
886 | display_clocks(index); | ||
887 | } | ||
888 | #else | 849 | #else |
889 | /* | 850 | /* |
890 | * Set the drive unmask flags to match the prefetch setting | 851 | * Set the drive unmask flags to match the prefetch setting |
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index 006fb62656bc..08674711d089 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c | |||
@@ -68,8 +68,8 @@ static u8 quantize_timing(int timing, int quant) | |||
68 | */ | 68 | */ |
69 | static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time) | 69 | static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time) |
70 | { | 70 | { |
71 | struct pci_dev *dev = to_pci_dev(drive->hwif->dev); | 71 | struct pci_dev *dev = to_pci_dev(drive->hwif->dev); |
72 | int clock_time = 1000 / system_bus_clock(); | 72 | int clock_time = 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock()); |
73 | u8 cycle_count, active_count, recovery_count, drwtim; | 73 | u8 cycle_count, active_count, recovery_count, drwtim; |
74 | static const u8 recovery_values[] = | 74 | static const u8 recovery_values[] = |
75 | {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0}; | 75 | {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0}; |
@@ -128,7 +128,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio) | |||
128 | ide_pio_timings[pio].active_time); | 128 | ide_pio_timings[pio].active_time); |
129 | 129 | ||
130 | setup_count = quantize_timing(ide_pio_timings[pio].setup_time, | 130 | setup_count = quantize_timing(ide_pio_timings[pio].setup_time, |
131 | 1000 / system_bus_clock()); | 131 | 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock())); |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * The primary channel has individual address setup timing registers | 134 | * The primary channel has individual address setup timing registers |
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c index e30eae5a01b6..77cc22c2ad45 100644 --- a/drivers/ide/pci/cy82c693.c +++ b/drivers/ide/pci/cy82c693.c | |||
@@ -18,8 +18,6 @@ | |||
18 | * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA | 18 | * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA |
19 | * - this is my first linux driver, so there's probably a lot of room | 19 | * - this is my first linux driver, so there's probably a lot of room |
20 | * for optimizations and bug fixing, so feel free to do it. | 20 | * for optimizations and bug fixing, so feel free to do it. |
21 | * - use idebus=xx parameter to set PCI bus speed - needed to calc | ||
22 | * timings for PIO modes (default will be 40) | ||
23 | * - if using PIO mode it's a good idea to set the PIO mode and | 21 | * - if using PIO mode it's a good idea to set the PIO mode and |
24 | * 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda | 22 | * 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda |
25 | * - I had some problems with my IBM DHEA with PIO modes < 2 | 23 | * - I had some problems with my IBM DHEA with PIO modes < 2 |
@@ -136,7 +134,7 @@ static int calc_clk(int time, int bus_speed) | |||
136 | static void compute_clocks(u8 pio, pio_clocks_t *p_pclk) | 134 | static void compute_clocks(u8 pio, pio_clocks_t *p_pclk) |
137 | { | 135 | { |
138 | int clk1, clk2; | 136 | int clk1, clk2; |
139 | int bus_speed = system_bus_clock(); /* get speed of PCI bus */ | 137 | int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock(); |
140 | 138 | ||
141 | /* we don't check against CY82C693's min and max speed, | 139 | /* we don't check against CY82C693's min and max speed, |
142 | * so you can play with the idebus=xx parameter | 140 | * so you can play with the idebus=xx parameter |
diff --git a/drivers/ide/pci/delkin_cb.c b/drivers/ide/pci/delkin_cb.c index c7b7e0483287..b9e457996d0e 100644 --- a/drivers/ide/pci/delkin_cb.c +++ b/drivers/ide/pci/delkin_cb.c | |||
@@ -87,11 +87,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) | |||
87 | 87 | ||
88 | i = hwif->index; | 88 | i = hwif->index; |
89 | 89 | ||
90 | if (hwif->present) | 90 | ide_init_port_data(hwif, i); |
91 | ide_unregister(i); | ||
92 | else | ||
93 | ide_init_port_data(hwif, i); | ||
94 | |||
95 | ide_init_port_hw(hwif, &hw); | 91 | ide_init_port_hw(hwif, &hw); |
96 | hwif->port_ops = &delkin_cb_port_ops; | 92 | hwif->port_ops = &delkin_cb_port_ops; |
97 | 93 | ||
@@ -123,8 +119,7 @@ delkin_cb_remove (struct pci_dev *dev) | |||
123 | { | 119 | { |
124 | ide_hwif_t *hwif = pci_get_drvdata(dev); | 120 | ide_hwif_t *hwif = pci_get_drvdata(dev); |
125 | 121 | ||
126 | if (hwif) | 122 | ide_unregister(hwif); |
127 | ide_unregister(hwif->index); | ||
128 | 123 | ||
129 | pci_release_regions(dev); | 124 | pci_release_regions(dev); |
130 | pci_disable_device(dev); | 125 | pci_disable_device(dev); |
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index 8c02961d0188..c929dadaaaff 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -760,7 +760,7 @@ static void hpt3xx_maskproc(ide_drive_t *drive, int mask) | |||
760 | } | 760 | } |
761 | } else | 761 | } else |
762 | outb(mask ? (drive->ctl | 2) : (drive->ctl & ~2), | 762 | outb(mask ? (drive->ctl | 2) : (drive->ctl & ~2), |
763 | hwif->io_ports[IDE_CONTROL_OFFSET]); | 763 | hwif->io_ports.ctl_addr); |
764 | } | 764 | } |
765 | 765 | ||
766 | /* | 766 | /* |
diff --git a/drivers/ide/pci/ns87415.c b/drivers/ide/pci/ns87415.c index e1b0c9a9ab9c..c13e299077ec 100644 --- a/drivers/ide/pci/ns87415.c +++ b/drivers/ide/pci/ns87415.c | |||
@@ -72,8 +72,8 @@ static void __devinit superio_ide_init_iops (struct hwif_s *hwif) | |||
72 | base = pci_resource_start(pdev, port * 2) & ~3; | 72 | base = pci_resource_start(pdev, port * 2) & ~3; |
73 | dmabase = pci_resource_start(pdev, 4) & ~3; | 73 | dmabase = pci_resource_start(pdev, 4) & ~3; |
74 | 74 | ||
75 | superio_ide_status[port] = base + IDE_STATUS_OFFSET; | 75 | superio_ide_status[port] = base + 7; |
76 | superio_ide_select[port] = base + IDE_SELECT_OFFSET; | 76 | superio_ide_select[port] = base + 6; |
77 | superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa); | 77 | superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa); |
78 | 78 | ||
79 | /* Clear error/interrupt, enable dma */ | 79 | /* Clear error/interrupt, enable dma */ |
@@ -231,12 +231,12 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif) | |||
231 | * SELECT_DRIVE() properly during first ide_probe_port(). | 231 | * SELECT_DRIVE() properly during first ide_probe_port(). |
232 | */ | 232 | */ |
233 | timeout = 10000; | 233 | timeout = 10000; |
234 | outb(12, hwif->io_ports[IDE_CONTROL_OFFSET]); | 234 | outb(12, hwif->io_ports.ctl_addr); |
235 | udelay(10); | 235 | udelay(10); |
236 | outb(8, hwif->io_ports[IDE_CONTROL_OFFSET]); | 236 | outb(8, hwif->io_ports.ctl_addr); |
237 | do { | 237 | do { |
238 | udelay(50); | 238 | udelay(50); |
239 | stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); | 239 | stat = hwif->INB(hwif->io_ports.status_addr); |
240 | if (stat == 0xff) | 240 | if (stat == 0xff) |
241 | break; | 241 | break; |
242 | } while ((stat & BUSY_STAT) && --timeout); | 242 | } while ((stat & BUSY_STAT) && --timeout); |
@@ -244,7 +244,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif) | |||
244 | } | 244 | } |
245 | 245 | ||
246 | if (!using_inta) | 246 | if (!using_inta) |
247 | hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]); | 247 | hwif->irq = ide_default_irq(hwif->io_ports.data_addr); |
248 | else if (!hwif->irq && hwif->mate && hwif->mate->irq) | 248 | else if (!hwif->irq && hwif->mate && hwif->mate->irq) |
249 | hwif->irq = hwif->mate->irq; /* share IRQ with mate */ | 249 | hwif->irq = hwif->mate->irq; /* share IRQ with mate */ |
250 | 250 | ||
diff --git a/drivers/ide/pci/opti621.c b/drivers/ide/pci/opti621.c index 9edacba20ffb..6e99080497bf 100644 --- a/drivers/ide/pci/opti621.c +++ b/drivers/ide/pci/opti621.c | |||
@@ -53,8 +53,7 @@ | |||
53 | * If you then set the second drive to another PIO, the old value | 53 | * If you then set the second drive to another PIO, the old value |
54 | * (automatically selected) will be overrided by yours. | 54 | * (automatically selected) will be overrided by yours. |
55 | * There is a 25/33MHz switch in configuration | 55 | * There is a 25/33MHz switch in configuration |
56 | * register, but driver is written for use at any frequency which get | 56 | * register, but driver is written for use at any frequency. |
57 | * (use idebus=xx to select PCI bus speed). | ||
58 | * | 57 | * |
59 | * Version 0.1, Nov 8, 1996 | 58 | * Version 0.1, Nov 8, 1996 |
60 | * by Jaromir Koutek, for 2.1.8. | 59 | * by Jaromir Koutek, for 2.1.8. |
@@ -210,7 +209,7 @@ static void compute_clocks(int pio, pio_clocks_t *clks) | |||
210 | { | 209 | { |
211 | if (pio != PIO_NOT_EXIST) { | 210 | if (pio != PIO_NOT_EXIST) { |
212 | int adr_setup, data_pls; | 211 | int adr_setup, data_pls; |
213 | int bus_speed = system_bus_clock(); | 212 | int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock(); |
214 | 213 | ||
215 | adr_setup = ide_pio_timings[pio].setup_time; | 214 | adr_setup = ide_pio_timings[pio].setup_time; |
216 | data_pls = ide_pio_timings[pio].active_time; | 215 | data_pls = ide_pio_timings[pio].active_time; |
@@ -280,7 +279,7 @@ static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio) | |||
280 | 279 | ||
281 | spin_lock_irqsave(&opti621_lock, flags); | 280 | spin_lock_irqsave(&opti621_lock, flags); |
282 | 281 | ||
283 | reg_base = hwif->io_ports[IDE_DATA_OFFSET]; | 282 | reg_base = hwif->io_ports.data_addr; |
284 | 283 | ||
285 | /* allow Register-B */ | 284 | /* allow Register-B */ |
286 | outb(0xc0, reg_base + CNTRL_REG); | 285 | outb(0xc0, reg_base + CNTRL_REG); |
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c index 17cf86490d59..ad7cdf9060ca 100644 --- a/drivers/ide/pci/scc_pata.c +++ b/drivers/ide/pci/scc_pata.c | |||
@@ -334,7 +334,7 @@ static int scc_dma_end(ide_drive_t *drive) | |||
334 | 334 | ||
335 | /* errata A308 workaround: Step5 (check data loss) */ | 335 | /* errata A308 workaround: Step5 (check data loss) */ |
336 | /* We don't check non ide_disk because it is limited to UDMA4 */ | 336 | /* We don't check non ide_disk because it is limited to UDMA4 */ |
337 | if (!(in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET]) | 337 | if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr) |
338 | & ERR_STAT) && | 338 | & ERR_STAT) && |
339 | drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) { | 339 | drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) { |
340 | reg = in_be32((void __iomem *)intsts_port); | 340 | reg = in_be32((void __iomem *)intsts_port); |
@@ -438,7 +438,7 @@ static int scc_dma_test_irq(ide_drive_t *drive) | |||
438 | u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014); | 438 | u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014); |
439 | 439 | ||
440 | /* SCC errata A252,A308 workaround: Step4 */ | 440 | /* SCC errata A252,A308 workaround: Step4 */ |
441 | if ((in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET]) | 441 | if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr) |
442 | & ERR_STAT) && | 442 | & ERR_STAT) && |
443 | (int_stat & INTSTS_INTRQ)) | 443 | (int_stat & INTSTS_INTRQ)) |
444 | return 1; | 444 | return 1; |
@@ -534,8 +534,8 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, | |||
534 | } | 534 | } |
535 | 535 | ||
536 | memset(&hw, 0, sizeof(hw)); | 536 | memset(&hw, 0, sizeof(hw)); |
537 | for (i = IDE_DATA_OFFSET; i <= IDE_CONTROL_OFFSET; i++) | 537 | for (i = 0; i <= 8; i++) |
538 | hw.io_ports[i] = ports->dma + 0x20 + i * 4; | 538 | hw.io_ports_array[i] = ports->dma + 0x20 + i * 4; |
539 | hw.irq = dev->irq; | 539 | hw.irq = dev->irq; |
540 | hw.dev = &dev->dev; | 540 | hw.dev = &dev->dev; |
541 | hw.chipset = ide_pci; | 541 | hw.chipset = ide_pci; |
@@ -763,9 +763,8 @@ static void __devexit scc_remove(struct pci_dev *dev) | |||
763 | hwif->dmatable_cpu = NULL; | 763 | hwif->dmatable_cpu = NULL; |
764 | } | 764 | } |
765 | 765 | ||
766 | ide_unregister(hwif->index); | 766 | ide_unregister(hwif); |
767 | 767 | ||
768 | hwif->chipset = ide_unknown; | ||
769 | iounmap((void*)ports->dma); | 768 | iounmap((void*)ports->dma); |
770 | iounmap((void*)ports->ctl); | 769 | iounmap((void*)ports->ctl); |
771 | pci_release_selected_regions(dev, (1 << 2) - 1); | 770 | pci_release_selected_regions(dev, (1 << 2) - 1); |
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c index 321a4e28ac19..63e28f4e6d3b 100644 --- a/drivers/ide/pci/sgiioc4.c +++ b/drivers/ide/pci/sgiioc4.c | |||
@@ -98,28 +98,28 @@ sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port, | |||
98 | int i; | 98 | int i; |
99 | 99 | ||
100 | /* Registers are word (32 bit) aligned */ | 100 | /* Registers are word (32 bit) aligned */ |
101 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) | 101 | for (i = 0; i <= 7; i++) |
102 | hw->io_ports[i] = reg + i * 4; | 102 | hw->io_ports_array[i] = reg + i * 4; |
103 | 103 | ||
104 | if (ctrl_port) | 104 | if (ctrl_port) |
105 | hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; | 105 | hw->io_ports.ctl_addr = ctrl_port; |
106 | 106 | ||
107 | if (irq_port) | 107 | if (irq_port) |
108 | hw->io_ports[IDE_IRQ_OFFSET] = irq_port; | 108 | hw->io_ports.irq_addr = irq_port; |
109 | } | 109 | } |
110 | 110 | ||
111 | static void | 111 | static void |
112 | sgiioc4_maskproc(ide_drive_t * drive, int mask) | 112 | sgiioc4_maskproc(ide_drive_t * drive, int mask) |
113 | { | 113 | { |
114 | writeb(mask ? (drive->ctl | 2) : (drive->ctl & ~2), | 114 | writeb(mask ? (drive->ctl | 2) : (drive->ctl & ~2), |
115 | (void __iomem *)drive->hwif->io_ports[IDE_CONTROL_OFFSET]); | 115 | (void __iomem *)drive->hwif->io_ports.ctl_addr); |
116 | } | 116 | } |
117 | 117 | ||
118 | static int | 118 | static int |
119 | sgiioc4_checkirq(ide_hwif_t * hwif) | 119 | sgiioc4_checkirq(ide_hwif_t * hwif) |
120 | { | 120 | { |
121 | unsigned long intr_addr = | 121 | unsigned long intr_addr = |
122 | hwif->io_ports[IDE_IRQ_OFFSET] + IOC4_INTR_REG * 4; | 122 | hwif->io_ports.irq_addr + IOC4_INTR_REG * 4; |
123 | 123 | ||
124 | if ((u8)readl((void __iomem *)intr_addr) & 0x03) | 124 | if ((u8)readl((void __iomem *)intr_addr) & 0x03) |
125 | return 1; | 125 | return 1; |
@@ -134,8 +134,8 @@ sgiioc4_clearirq(ide_drive_t * drive) | |||
134 | { | 134 | { |
135 | u32 intr_reg; | 135 | u32 intr_reg; |
136 | ide_hwif_t *hwif = HWIF(drive); | 136 | ide_hwif_t *hwif = HWIF(drive); |
137 | unsigned long other_ir = | 137 | struct ide_io_ports *io_ports = &hwif->io_ports; |
138 | hwif->io_ports[IDE_IRQ_OFFSET] + (IOC4_INTR_REG << 2); | 138 | unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2); |
139 | 139 | ||
140 | /* Code to check for PCI error conditions */ | 140 | /* Code to check for PCI error conditions */ |
141 | intr_reg = readl((void __iomem *)other_ir); | 141 | intr_reg = readl((void __iomem *)other_ir); |
@@ -147,12 +147,12 @@ sgiioc4_clearirq(ide_drive_t * drive) | |||
147 | * a "clear" status if it got cleared. If not, then spin | 147 | * a "clear" status if it got cleared. If not, then spin |
148 | * for a bit trying to clear it. | 148 | * for a bit trying to clear it. |
149 | */ | 149 | */ |
150 | u8 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]); | 150 | u8 stat = sgiioc4_INB(io_ports->status_addr); |
151 | int count = 0; | 151 | int count = 0; |
152 | stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]); | 152 | stat = sgiioc4_INB(io_ports->status_addr); |
153 | while ((stat & 0x80) && (count++ < 100)) { | 153 | while ((stat & 0x80) && (count++ < 100)) { |
154 | udelay(1); | 154 | udelay(1); |
155 | stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]); | 155 | stat = sgiioc4_INB(io_ports->status_addr); |
156 | } | 156 | } |
157 | 157 | ||
158 | if (intr_reg & 0x02) { | 158 | if (intr_reg & 0x02) { |
@@ -162,9 +162,9 @@ sgiioc4_clearirq(ide_drive_t * drive) | |||
162 | pci_stat_cmd_reg; | 162 | pci_stat_cmd_reg; |
163 | 163 | ||
164 | pci_err_addr_low = | 164 | pci_err_addr_low = |
165 | readl((void __iomem *)hwif->io_ports[IDE_IRQ_OFFSET]); | 165 | readl((void __iomem *)io_ports->irq_addr); |
166 | pci_err_addr_high = | 166 | pci_err_addr_high = |
167 | readl((void __iomem *)(hwif->io_ports[IDE_IRQ_OFFSET] + 4)); | 167 | readl((void __iomem *)(io_ports->irq_addr + 4)); |
168 | pci_read_config_dword(dev, PCI_COMMAND, | 168 | pci_read_config_dword(dev, PCI_COMMAND, |
169 | &pci_stat_cmd_reg); | 169 | &pci_stat_cmd_reg); |
170 | printk(KERN_ERR | 170 | printk(KERN_ERR |
@@ -573,7 +573,6 @@ static const struct ide_port_info sgiioc4_port_info __devinitdata = { | |||
573 | .init_dma = ide_dma_sgiioc4, | 573 | .init_dma = ide_dma_sgiioc4, |
574 | .port_ops = &sgiioc4_port_ops, | 574 | .port_ops = &sgiioc4_port_ops, |
575 | .dma_ops = &sgiioc4_dma_ops, | 575 | .dma_ops = &sgiioc4_dma_ops, |
576 | .host_flags = IDE_HFLAG_NO_AUTOTUNE, | ||
577 | .mwdma_mask = ATA_MWDMA2_ONLY, | 576 | .mwdma_mask = ATA_MWDMA2_ONLY, |
578 | }; | 577 | }; |
579 | 578 | ||
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c index 1fffea3211bd..c2040a017f47 100644 --- a/drivers/ide/pci/siimage.c +++ b/drivers/ide/pci/siimage.c | |||
@@ -622,9 +622,10 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif) | |||
622 | struct pci_dev *dev = to_pci_dev(hwif->dev); | 622 | struct pci_dev *dev = to_pci_dev(hwif->dev); |
623 | void *addr = pci_get_drvdata(dev); | 623 | void *addr = pci_get_drvdata(dev); |
624 | u8 ch = hwif->channel; | 624 | u8 ch = hwif->channel; |
625 | hw_regs_t hw; | ||
626 | unsigned long base; | 625 | unsigned long base; |
627 | 626 | ||
627 | struct ide_io_ports *io_ports = &hwif->io_ports; | ||
628 | |||
628 | /* | 629 | /* |
629 | * Fill in the basic HWIF bits | 630 | * Fill in the basic HWIF bits |
630 | */ | 631 | */ |
@@ -638,7 +639,7 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif) | |||
638 | * based I/O | 639 | * based I/O |
639 | */ | 640 | */ |
640 | 641 | ||
641 | memset(&hw, 0, sizeof(hw_regs_t)); | 642 | memset(io_ports, 0, sizeof(*io_ports)); |
642 | 643 | ||
643 | base = (unsigned long)addr; | 644 | base = (unsigned long)addr; |
644 | if (ch) | 645 | if (ch) |
@@ -651,17 +652,15 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif) | |||
651 | * so we can't currently use it sanely since we want to | 652 | * so we can't currently use it sanely since we want to |
652 | * use LBA48 mode. | 653 | * use LBA48 mode. |
653 | */ | 654 | */ |
654 | hw.io_ports[IDE_DATA_OFFSET] = base; | 655 | io_ports->data_addr = base; |
655 | hw.io_ports[IDE_ERROR_OFFSET] = base + 1; | 656 | io_ports->error_addr = base + 1; |
656 | hw.io_ports[IDE_NSECTOR_OFFSET] = base + 2; | 657 | io_ports->nsect_addr = base + 2; |
657 | hw.io_ports[IDE_SECTOR_OFFSET] = base + 3; | 658 | io_ports->lbal_addr = base + 3; |
658 | hw.io_ports[IDE_LCYL_OFFSET] = base + 4; | 659 | io_ports->lbam_addr = base + 4; |
659 | hw.io_ports[IDE_HCYL_OFFSET] = base + 5; | 660 | io_ports->lbah_addr = base + 5; |
660 | hw.io_ports[IDE_SELECT_OFFSET] = base + 6; | 661 | io_ports->device_addr = base + 6; |
661 | hw.io_ports[IDE_STATUS_OFFSET] = base + 7; | 662 | io_ports->status_addr = base + 7; |
662 | hw.io_ports[IDE_CONTROL_OFFSET] = base + 10; | 663 | io_ports->ctl_addr = base + 10; |
663 | |||
664 | hw.io_ports[IDE_IRQ_OFFSET] = 0; | ||
665 | 664 | ||
666 | if (pdev_is_sata(dev)) { | 665 | if (pdev_is_sata(dev)) { |
667 | base = (unsigned long)addr; | 666 | base = (unsigned long)addr; |
@@ -672,8 +671,6 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif) | |||
672 | hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100; | 671 | hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100; |
673 | } | 672 | } |
674 | 673 | ||
675 | memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports)); | ||
676 | |||
677 | hwif->irq = dev->irq; | 674 | hwif->irq = dev->irq; |
678 | 675 | ||
679 | hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00); | 676 | hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00); |
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c index 15ee38f7ad3f..a8a3138682ef 100644 --- a/drivers/ide/pci/trm290.c +++ b/drivers/ide/pci/trm290.c | |||
@@ -298,7 +298,7 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif) | |||
298 | if (old != compat && old_mask == 0xff) { | 298 | if (old != compat && old_mask == 0xff) { |
299 | /* leave lower 10 bits untouched */ | 299 | /* leave lower 10 bits untouched */ |
300 | compat += (next_offset += 0x400); | 300 | compat += (next_offset += 0x400); |
301 | hwif->io_ports[IDE_CONTROL_OFFSET] = compat + 2; | 301 | hwif->io_ports.ctl_addr = compat + 2; |
302 | outw(compat | 1, hwif->config_data); | 302 | outw(compat | 1, hwif->config_data); |
303 | new = inw(hwif->config_data); | 303 | new = inw(hwif->config_data); |
304 | printk(KERN_INFO "%s: control basereg workaround: " | 304 | printk(KERN_INFO "%s: control basereg workaround: " |
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c index bbd17bec6ffe..566e0ecb8db1 100644 --- a/drivers/ide/pci/via82cxxx.c +++ b/drivers/ide/pci/via82cxxx.c | |||
@@ -340,7 +340,7 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const | |||
340 | * Determine system bus clock. | 340 | * Determine system bus clock. |
341 | */ | 341 | */ |
342 | 342 | ||
343 | via_clock = system_bus_clock() * 1000; | 343 | via_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000; |
344 | 344 | ||
345 | switch (via_clock) { | 345 | switch (via_clock) { |
346 | case 33000: via_clock = 33333; break; | 346 | case 33000: via_clock = 33333; break; |
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c index a82f6efb660b..f0e638dcc3ab 100644 --- a/drivers/ide/ppc/mpc8xx.c +++ b/drivers/ide/ppc/mpc8xx.c | |||
@@ -131,7 +131,7 @@ static int pcmcia_schlvl = PCMCIA_SCHLVL; | |||
131 | #if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT) | 131 | #if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT) |
132 | static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) | 132 | static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) |
133 | { | 133 | { |
134 | unsigned long *p = hw->io_ports; | 134 | unsigned long *p = hw->io_ports_array; |
135 | int i; | 135 | int i; |
136 | 136 | ||
137 | typedef struct { | 137 | typedef struct { |
@@ -314,7 +314,7 @@ static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) | |||
314 | #if defined(CONFIG_IDE_EXT_DIRECT) | 314 | #if defined(CONFIG_IDE_EXT_DIRECT) |
315 | static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) | 315 | static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port) |
316 | { | 316 | { |
317 | unsigned long *p = hw->io_ports; | 317 | unsigned long *p = hw->io_ports_array; |
318 | int i; | 318 | int i; |
319 | 319 | ||
320 | u32 ide_phy_base; | 320 | u32 ide_phy_base; |
@@ -811,24 +811,28 @@ static int __init mpc8xx_ide_probe(void) | |||
811 | #ifdef IDE0_BASE_OFFSET | 811 | #ifdef IDE0_BASE_OFFSET |
812 | memset(&hw, 0, sizeof(hw)); | 812 | memset(&hw, 0, sizeof(hw)); |
813 | if (!m8xx_ide_init_ports(&hw, 0)) { | 813 | if (!m8xx_ide_init_ports(&hw, 0)) { |
814 | ide_hwif_t *hwif = &ide_hwifs[0]; | 814 | ide_hwif_t *hwif = ide_find_port(); |
815 | 815 | ||
816 | ide_init_port_hw(hwif, &hw); | 816 | if (hwif) { |
817 | hwif->pio_mask = ATA_PIO4; | 817 | ide_init_port_hw(hwif, &hw); |
818 | hwif->port_ops = &m8xx_port_ops; | 818 | hwif->pio_mask = ATA_PIO4; |
819 | hwif->port_ops = &m8xx_port_ops; | ||
819 | 820 | ||
820 | idx[0] = 0; | 821 | idx[0] = hwif->index; |
822 | } | ||
821 | } | 823 | } |
822 | #ifdef IDE1_BASE_OFFSET | 824 | #ifdef IDE1_BASE_OFFSET |
823 | memset(&hw, 0, sizeof(hw)); | 825 | memset(&hw, 0, sizeof(hw)); |
824 | if (!m8xx_ide_init_ports(&hw, 1)) { | 826 | if (!m8xx_ide_init_ports(&hw, 1)) { |
825 | ide_hwif_t *mate = &ide_hwifs[1]; | 827 | ide_hwif_t *mate = ide_find_port(); |
826 | 828 | ||
827 | ide_init_port_hw(mate, &hw); | 829 | if (mate) { |
828 | mate->pio_mask = ATA_PIO4; | 830 | ide_init_port_hw(mate, &hw); |
829 | mate->port_ops = &m8xx_port_ops; | 831 | mate->pio_mask = ATA_PIO4; |
832 | mate->port_ops = &m8xx_port_ops; | ||
830 | 833 | ||
831 | idx[1] = 1; | 834 | idx[1] = mate->index; |
835 | } | ||
832 | } | 836 | } |
833 | #endif | 837 | #endif |
834 | #endif | 838 | #endif |
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c index 185faa0dce94..3cac6b2790dd 100644 --- a/drivers/ide/ppc/pmac.c +++ b/drivers/ide/ppc/pmac.c | |||
@@ -417,7 +417,7 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive); | |||
417 | #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ | 417 | #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ |
418 | 418 | ||
419 | #define PMAC_IDE_REG(x) \ | 419 | #define PMAC_IDE_REG(x) \ |
420 | ((void __iomem *)((drive)->hwif->io_ports[IDE_DATA_OFFSET] + (x))) | 420 | ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x))) |
421 | 421 | ||
422 | /* | 422 | /* |
423 | * Apply the timings of the proper unit (master/slave) to the shared | 423 | * Apply the timings of the proper unit (master/slave) to the shared |
@@ -1086,8 +1086,9 @@ static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base) | |||
1086 | int i; | 1086 | int i; |
1087 | 1087 | ||
1088 | for (i = 0; i < 8; ++i) | 1088 | for (i = 0; i < 8; ++i) |
1089 | hw->io_ports[i] = base + i * 0x10; | 1089 | hw->io_ports_array[i] = base + i * 0x10; |
1090 | hw->io_ports[8] = base + 0x160; | 1090 | |
1091 | hw->io_ports.ctl_addr = base + 0x160; | ||
1091 | } | 1092 | } |
1092 | 1093 | ||
1093 | /* | 1094 | /* |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 0d13fe0a260b..3d6d9461c31d 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -160,6 +160,7 @@ struct ehca_qp { | |||
160 | }; | 160 | }; |
161 | u32 qp_type; | 161 | u32 qp_type; |
162 | enum ehca_ext_qp_type ext_type; | 162 | enum ehca_ext_qp_type ext_type; |
163 | enum ib_qp_state state; | ||
163 | struct ipz_queue ipz_squeue; | 164 | struct ipz_queue ipz_squeue; |
164 | struct ipz_queue ipz_rqueue; | 165 | struct ipz_queue ipz_rqueue; |
165 | struct h_galpas galpas; | 166 | struct h_galpas galpas; |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index b5ca94c6b8d9..ca5eb0cb628c 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -633,7 +633,7 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) | |||
633 | unsigned long flags; | 633 | unsigned long flags; |
634 | 634 | ||
635 | WARN_ON_ONCE(!in_interrupt()); | 635 | WARN_ON_ONCE(!in_interrupt()); |
636 | if (ehca_debug_level) | 636 | if (ehca_debug_level >= 3) |
637 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | 637 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); |
638 | 638 | ||
639 | spin_lock_irqsave(&pool->last_cpu_lock, flags); | 639 | spin_lock_irqsave(&pool->last_cpu_lock, flags); |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 65b3362cdb9b..65048976198c 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -50,7 +50,7 @@ | |||
50 | #include "ehca_tools.h" | 50 | #include "ehca_tools.h" |
51 | #include "hcp_if.h" | 51 | #include "hcp_if.h" |
52 | 52 | ||
53 | #define HCAD_VERSION "0025" | 53 | #define HCAD_VERSION "0026" |
54 | 54 | ||
55 | MODULE_LICENSE("Dual BSD/GPL"); | 55 | MODULE_LICENSE("Dual BSD/GPL"); |
56 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 56 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
@@ -60,7 +60,6 @@ MODULE_VERSION(HCAD_VERSION); | |||
60 | static int ehca_open_aqp1 = 0; | 60 | static int ehca_open_aqp1 = 0; |
61 | static int ehca_hw_level = 0; | 61 | static int ehca_hw_level = 0; |
62 | static int ehca_poll_all_eqs = 1; | 62 | static int ehca_poll_all_eqs = 1; |
63 | static int ehca_mr_largepage = 1; | ||
64 | 63 | ||
65 | int ehca_debug_level = 0; | 64 | int ehca_debug_level = 0; |
66 | int ehca_nr_ports = 2; | 65 | int ehca_nr_ports = 2; |
@@ -70,45 +69,40 @@ int ehca_static_rate = -1; | |||
70 | int ehca_scaling_code = 0; | 69 | int ehca_scaling_code = 0; |
71 | int ehca_lock_hcalls = -1; | 70 | int ehca_lock_hcalls = -1; |
72 | 71 | ||
73 | module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO); | 72 | module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO); |
74 | module_param_named(debug_level, ehca_debug_level, int, S_IRUGO); | 73 | module_param_named(debug_level, ehca_debug_level, int, S_IRUGO); |
75 | module_param_named(hw_level, ehca_hw_level, int, S_IRUGO); | 74 | module_param_named(hw_level, ehca_hw_level, int, S_IRUGO); |
76 | module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO); | 75 | module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO); |
77 | module_param_named(use_hp_mr, ehca_use_hp_mr, int, S_IRUGO); | 76 | module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO); |
78 | module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO); | 77 | module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO); |
79 | module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, S_IRUGO); | 78 | module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO); |
80 | module_param_named(static_rate, ehca_static_rate, int, S_IRUGO); | 79 | module_param_named(static_rate, ehca_static_rate, int, S_IRUGO); |
81 | module_param_named(scaling_code, ehca_scaling_code, int, S_IRUGO); | 80 | module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO); |
82 | module_param_named(mr_largepage, ehca_mr_largepage, int, S_IRUGO); | ||
83 | module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO); | 81 | module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO); |
84 | 82 | ||
85 | MODULE_PARM_DESC(open_aqp1, | 83 | MODULE_PARM_DESC(open_aqp1, |
86 | "AQP1 on startup (0: no (default), 1: yes)"); | 84 | "Open AQP1 on startup (default: no)"); |
87 | MODULE_PARM_DESC(debug_level, | 85 | MODULE_PARM_DESC(debug_level, |
88 | "debug level" | 86 | "Amount of debug output (0: none (default), 1: traces, " |
89 | " (0: no debug traces (default), 1: with debug traces)"); | 87 | "2: some dumps, 3: lots)"); |
90 | MODULE_PARM_DESC(hw_level, | 88 | MODULE_PARM_DESC(hw_level, |
91 | "hardware level" | 89 | "Hardware level (0: autosensing (default), " |
92 | " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); | 90 | "0x10..0x14: eHCA, 0x20..0x23: eHCA2)"); |
93 | MODULE_PARM_DESC(nr_ports, | 91 | MODULE_PARM_DESC(nr_ports, |
94 | "number of connected ports (-1: autodetect, 1: port one only, " | 92 | "number of connected ports (-1: autodetect, 1: port one only, " |
95 | "2: two ports (default)"); | 93 | "2: two ports (default)"); |
96 | MODULE_PARM_DESC(use_hp_mr, | 94 | MODULE_PARM_DESC(use_hp_mr, |
97 | "high performance MRs (0: no (default), 1: yes)"); | 95 | "Use high performance MRs (default: no)"); |
98 | MODULE_PARM_DESC(port_act_time, | 96 | MODULE_PARM_DESC(port_act_time, |
99 | "time to wait for port activation (default: 30 sec)"); | 97 | "Time to wait for port activation (default: 30 sec)"); |
100 | MODULE_PARM_DESC(poll_all_eqs, | 98 | MODULE_PARM_DESC(poll_all_eqs, |
101 | "polls all event queues periodically" | 99 | "Poll all event queues periodically (default: yes)"); |
102 | " (0: no, 1: yes (default))"); | ||
103 | MODULE_PARM_DESC(static_rate, | 100 | MODULE_PARM_DESC(static_rate, |
104 | "set permanent static rate (default: disabled)"); | 101 | "Set permanent static rate (default: no static rate)"); |
105 | MODULE_PARM_DESC(scaling_code, | 102 | MODULE_PARM_DESC(scaling_code, |
106 | "set scaling code (0: disabled/default, 1: enabled)"); | 103 | "Enable scaling code (default: no)"); |
107 | MODULE_PARM_DESC(mr_largepage, | ||
108 | "use large page for MR (0: use PAGE_SIZE (default), " | ||
109 | "1: use large page depending on MR size"); | ||
110 | MODULE_PARM_DESC(lock_hcalls, | 104 | MODULE_PARM_DESC(lock_hcalls, |
111 | "serialize all hCalls made by the driver " | 105 | "Serialize all hCalls made by the driver " |
112 | "(default: autodetect)"); | 106 | "(default: autodetect)"); |
113 | 107 | ||
114 | DEFINE_RWLOCK(ehca_qp_idr_lock); | 108 | DEFINE_RWLOCK(ehca_qp_idr_lock); |
@@ -275,6 +269,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca) | |||
275 | u64 h_ret; | 269 | u64 h_ret; |
276 | struct hipz_query_hca *rblock; | 270 | struct hipz_query_hca *rblock; |
277 | struct hipz_query_port *port; | 271 | struct hipz_query_port *port; |
272 | const char *loc_code; | ||
278 | 273 | ||
279 | static const u32 pgsize_map[] = { | 274 | static const u32 pgsize_map[] = { |
280 | HCA_CAP_MR_PGSIZE_4K, 0x1000, | 275 | HCA_CAP_MR_PGSIZE_4K, 0x1000, |
@@ -283,6 +278,12 @@ static int ehca_sense_attributes(struct ehca_shca *shca) | |||
283 | HCA_CAP_MR_PGSIZE_16M, 0x1000000, | 278 | HCA_CAP_MR_PGSIZE_16M, 0x1000000, |
284 | }; | 279 | }; |
285 | 280 | ||
281 | ehca_gen_dbg("Probing adapter %s...", | ||
282 | shca->ofdev->node->full_name); | ||
283 | loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL); | ||
284 | if (loc_code) | ||
285 | ehca_gen_dbg(" ... location lode=%s", loc_code); | ||
286 | |||
286 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); | 287 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
287 | if (!rblock) { | 288 | if (!rblock) { |
288 | ehca_gen_err("Cannot allocate rblock memory."); | 289 | ehca_gen_err("Cannot allocate rblock memory."); |
@@ -350,11 +351,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca) | |||
350 | 351 | ||
351 | /* translate supported MR page sizes; always support 4K */ | 352 | /* translate supported MR page sizes; always support 4K */ |
352 | shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; | 353 | shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; |
353 | if (ehca_mr_largepage) { /* support extra sizes only if enabled */ | 354 | for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2) |
354 | for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2) | 355 | if (rblock->memory_page_size_supported & pgsize_map[i]) |
355 | if (rblock->memory_page_size_supported & pgsize_map[i]) | 356 | shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; |
356 | shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; | ||
357 | } | ||
358 | 357 | ||
359 | /* query max MTU from first port -- it's the same for all ports */ | 358 | /* query max MTU from first port -- it's the same for all ports */ |
360 | port = (struct hipz_query_port *)rblock; | 359 | port = (struct hipz_query_port *)rblock; |
@@ -567,8 +566,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport) | |||
567 | 566 | ||
568 | static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) | 567 | static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) |
569 | { | 568 | { |
570 | return snprintf(buf, PAGE_SIZE, "%d\n", | 569 | return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level); |
571 | ehca_debug_level); | ||
572 | } | 570 | } |
573 | 571 | ||
574 | static ssize_t ehca_store_debug_level(struct device_driver *ddp, | 572 | static ssize_t ehca_store_debug_level(struct device_driver *ddp, |
@@ -657,14 +655,6 @@ static ssize_t ehca_show_adapter_handle(struct device *dev, | |||
657 | } | 655 | } |
658 | static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); | 656 | static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); |
659 | 657 | ||
660 | static ssize_t ehca_show_mr_largepage(struct device *dev, | ||
661 | struct device_attribute *attr, | ||
662 | char *buf) | ||
663 | { | ||
664 | return sprintf(buf, "%d\n", ehca_mr_largepage); | ||
665 | } | ||
666 | static DEVICE_ATTR(mr_largepage, S_IRUGO, ehca_show_mr_largepage, NULL); | ||
667 | |||
668 | static struct attribute *ehca_dev_attrs[] = { | 658 | static struct attribute *ehca_dev_attrs[] = { |
669 | &dev_attr_adapter_handle.attr, | 659 | &dev_attr_adapter_handle.attr, |
670 | &dev_attr_num_ports.attr, | 660 | &dev_attr_num_ports.attr, |
@@ -681,7 +671,6 @@ static struct attribute *ehca_dev_attrs[] = { | |||
681 | &dev_attr_cur_mw.attr, | 671 | &dev_attr_cur_mw.attr, |
682 | &dev_attr_max_pd.attr, | 672 | &dev_attr_max_pd.attr, |
683 | &dev_attr_max_ah.attr, | 673 | &dev_attr_max_ah.attr, |
684 | &dev_attr_mr_largepage.attr, | ||
685 | NULL | 674 | NULL |
686 | }; | 675 | }; |
687 | 676 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index f26997fc00f8..46ae4eb2c4e1 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -1794,8 +1794,9 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list, | |||
1794 | int t; | 1794 | int t; |
1795 | for (t = start_idx; t <= end_idx; t++) { | 1795 | for (t = start_idx; t <= end_idx; t++) { |
1796 | u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; | 1796 | u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; |
1797 | ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, | 1797 | if (ehca_debug_level >= 3) |
1798 | *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); | 1798 | ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, |
1799 | *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); | ||
1799 | if (pgaddr - PAGE_SIZE != *prev_pgaddr) { | 1800 | if (pgaddr - PAGE_SIZE != *prev_pgaddr) { |
1800 | ehca_gen_err("uncontiguous page found pgaddr=%lx " | 1801 | ehca_gen_err("uncontiguous page found pgaddr=%lx " |
1801 | "prev_pgaddr=%lx page_list_i=%x", | 1802 | "prev_pgaddr=%lx page_list_i=%x", |
@@ -1862,10 +1863,13 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, | |||
1862 | pgaddr & | 1863 | pgaddr & |
1863 | ~(pginfo->hwpage_size - 1)); | 1864 | ~(pginfo->hwpage_size - 1)); |
1864 | } | 1865 | } |
1865 | ehca_gen_dbg("kpage=%lx chunk_page=%lx " | 1866 | if (ehca_debug_level >= 3) { |
1866 | "value=%016lx", *kpage, pgaddr, | 1867 | u64 val = *(u64 *)abs_to_virt( |
1867 | *(u64 *)abs_to_virt( | 1868 | phys_to_abs(pgaddr)); |
1868 | phys_to_abs(pgaddr))); | 1869 | ehca_gen_dbg("kpage=%lx chunk_page=%lx " |
1870 | "value=%016lx", | ||
1871 | *kpage, pgaddr, val); | ||
1872 | } | ||
1869 | prev_pgaddr = pgaddr; | 1873 | prev_pgaddr = pgaddr; |
1870 | i++; | 1874 | i++; |
1871 | pginfo->kpage_cnt++; | 1875 | pginfo->kpage_cnt++; |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 3eb14a52cbf2..57bef1152cc2 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -550,6 +550,7 @@ static struct ehca_qp *internal_create_qp( | |||
550 | spin_lock_init(&my_qp->spinlock_r); | 550 | spin_lock_init(&my_qp->spinlock_r); |
551 | my_qp->qp_type = qp_type; | 551 | my_qp->qp_type = qp_type; |
552 | my_qp->ext_type = parms.ext_type; | 552 | my_qp->ext_type = parms.ext_type; |
553 | my_qp->state = IB_QPS_RESET; | ||
553 | 554 | ||
554 | if (init_attr->recv_cq) | 555 | if (init_attr->recv_cq) |
555 | my_qp->recv_cq = | 556 | my_qp->recv_cq = |
@@ -965,7 +966,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
965 | qp_num, bad_send_wqe_p); | 966 | qp_num, bad_send_wqe_p); |
966 | /* convert wqe pointer to vadr */ | 967 | /* convert wqe pointer to vadr */ |
967 | bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); | 968 | bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); |
968 | if (ehca_debug_level) | 969 | if (ehca_debug_level >= 2) |
969 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); | 970 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); |
970 | squeue = &my_qp->ipz_squeue; | 971 | squeue = &my_qp->ipz_squeue; |
971 | if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { | 972 | if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { |
@@ -978,7 +979,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
978 | wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); | 979 | wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); |
979 | *bad_wqe_cnt = 0; | 980 | *bad_wqe_cnt = 0; |
980 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { | 981 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { |
981 | if (ehca_debug_level) | 982 | if (ehca_debug_level >= 2) |
982 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); | 983 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); |
983 | wqe->nr_of_data_seg = 0; /* suppress data access */ | 984 | wqe->nr_of_data_seg = 0; /* suppress data access */ |
984 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ | 985 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ |
@@ -1450,7 +1451,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
1450 | /* no support for max_send/recv_sge yet */ | 1451 | /* no support for max_send/recv_sge yet */ |
1451 | } | 1452 | } |
1452 | 1453 | ||
1453 | if (ehca_debug_level) | 1454 | if (ehca_debug_level >= 2) |
1454 | ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); | 1455 | ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); |
1455 | 1456 | ||
1456 | h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, | 1457 | h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, |
@@ -1508,6 +1509,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
1508 | if (attr_mask & IB_QP_QKEY) | 1509 | if (attr_mask & IB_QP_QKEY) |
1509 | my_qp->qkey = attr->qkey; | 1510 | my_qp->qkey = attr->qkey; |
1510 | 1511 | ||
1512 | my_qp->state = qp_new_state; | ||
1513 | |||
1511 | modify_qp_exit2: | 1514 | modify_qp_exit2: |
1512 | if (squeue_locked) { /* this means: sqe -> rts */ | 1515 | if (squeue_locked) { /* this means: sqe -> rts */ |
1513 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); | 1516 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); |
@@ -1763,7 +1766,7 @@ int ehca_query_qp(struct ib_qp *qp, | |||
1763 | if (qp_init_attr) | 1766 | if (qp_init_attr) |
1764 | *qp_init_attr = my_qp->init_attr; | 1767 | *qp_init_attr = my_qp->init_attr; |
1765 | 1768 | ||
1766 | if (ehca_debug_level) | 1769 | if (ehca_debug_level >= 2) |
1767 | ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); | 1770 | ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); |
1768 | 1771 | ||
1769 | query_qp_exit1: | 1772 | query_qp_exit1: |
@@ -1811,7 +1814,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |||
1811 | goto modify_srq_exit0; | 1814 | goto modify_srq_exit0; |
1812 | } | 1815 | } |
1813 | 1816 | ||
1814 | if (ehca_debug_level) | 1817 | if (ehca_debug_level >= 2) |
1815 | ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); | 1818 | ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); |
1816 | 1819 | ||
1817 | h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, | 1820 | h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, |
@@ -1864,7 +1867,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) | |||
1864 | srq_attr->srq_limit = EHCA_BMASK_GET( | 1867 | srq_attr->srq_limit = EHCA_BMASK_GET( |
1865 | MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); | 1868 | MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); |
1866 | 1869 | ||
1867 | if (ehca_debug_level) | 1870 | if (ehca_debug_level >= 2) |
1868 | ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); | 1871 | ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); |
1869 | 1872 | ||
1870 | query_srq_exit1: | 1873 | query_srq_exit1: |
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index a20bbf466188..bbe0436f4f75 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
@@ -81,7 +81,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, | |||
81 | recv_wr->sg_list[cnt_ds].length; | 81 | recv_wr->sg_list[cnt_ds].length; |
82 | } | 82 | } |
83 | 83 | ||
84 | if (ehca_debug_level) { | 84 | if (ehca_debug_level >= 3) { |
85 | ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", | 85 | ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", |
86 | ipz_rqueue); | 86 | ipz_rqueue); |
87 | ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); | 87 | ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); |
@@ -281,7 +281,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, | |||
281 | return -EINVAL; | 281 | return -EINVAL; |
282 | } | 282 | } |
283 | 283 | ||
284 | if (ehca_debug_level) { | 284 | if (ehca_debug_level >= 3) { |
285 | ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); | 285 | ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); |
286 | ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); | 286 | ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); |
287 | } | 287 | } |
@@ -421,6 +421,11 @@ int ehca_post_send(struct ib_qp *qp, | |||
421 | int ret = 0; | 421 | int ret = 0; |
422 | unsigned long flags; | 422 | unsigned long flags; |
423 | 423 | ||
424 | if (unlikely(my_qp->state != IB_QPS_RTS)) { | ||
425 | ehca_err(qp->device, "QP not in RTS state qpn=%x", qp->qp_num); | ||
426 | return -EINVAL; | ||
427 | } | ||
428 | |||
424 | /* LOCK the QUEUE */ | 429 | /* LOCK the QUEUE */ |
425 | spin_lock_irqsave(&my_qp->spinlock_s, flags); | 430 | spin_lock_irqsave(&my_qp->spinlock_s, flags); |
426 | 431 | ||
@@ -454,13 +459,14 @@ int ehca_post_send(struct ib_qp *qp, | |||
454 | goto post_send_exit0; | 459 | goto post_send_exit0; |
455 | } | 460 | } |
456 | wqe_cnt++; | 461 | wqe_cnt++; |
457 | ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d", | ||
458 | my_qp, qp->qp_num, wqe_cnt); | ||
459 | } /* eof for cur_send_wr */ | 462 | } /* eof for cur_send_wr */ |
460 | 463 | ||
461 | post_send_exit0: | 464 | post_send_exit0: |
462 | iosync(); /* serialize GAL register access */ | 465 | iosync(); /* serialize GAL register access */ |
463 | hipz_update_sqa(my_qp, wqe_cnt); | 466 | hipz_update_sqa(my_qp, wqe_cnt); |
467 | if (unlikely(ret || ehca_debug_level >= 2)) | ||
468 | ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i", | ||
469 | my_qp, qp->qp_num, wqe_cnt, ret); | ||
464 | my_qp->message_count += wqe_cnt; | 470 | my_qp->message_count += wqe_cnt; |
465 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); | 471 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); |
466 | return ret; | 472 | return ret; |
@@ -520,13 +526,14 @@ static int internal_post_recv(struct ehca_qp *my_qp, | |||
520 | goto post_recv_exit0; | 526 | goto post_recv_exit0; |
521 | } | 527 | } |
522 | wqe_cnt++; | 528 | wqe_cnt++; |
523 | ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d", | ||
524 | my_qp, my_qp->real_qp_num, wqe_cnt); | ||
525 | } /* eof for cur_recv_wr */ | 529 | } /* eof for cur_recv_wr */ |
526 | 530 | ||
527 | post_recv_exit0: | 531 | post_recv_exit0: |
528 | iosync(); /* serialize GAL register access */ | 532 | iosync(); /* serialize GAL register access */ |
529 | hipz_update_rqa(my_qp, wqe_cnt); | 533 | hipz_update_rqa(my_qp, wqe_cnt); |
534 | if (unlikely(ret || ehca_debug_level >= 2)) | ||
535 | ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i", | ||
536 | my_qp, my_qp->real_qp_num, wqe_cnt, ret); | ||
530 | spin_unlock_irqrestore(&my_qp->spinlock_r, flags); | 537 | spin_unlock_irqrestore(&my_qp->spinlock_r, flags); |
531 | return ret; | 538 | return ret; |
532 | } | 539 | } |
@@ -570,16 +577,17 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) | |||
570 | struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); | 577 | struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); |
571 | struct ehca_cqe *cqe; | 578 | struct ehca_cqe *cqe; |
572 | struct ehca_qp *my_qp; | 579 | struct ehca_qp *my_qp; |
573 | int cqe_count = 0; | 580 | int cqe_count = 0, is_error; |
574 | 581 | ||
575 | poll_cq_one_read_cqe: | 582 | poll_cq_one_read_cqe: |
576 | cqe = (struct ehca_cqe *) | 583 | cqe = (struct ehca_cqe *) |
577 | ipz_qeit_get_inc_valid(&my_cq->ipz_queue); | 584 | ipz_qeit_get_inc_valid(&my_cq->ipz_queue); |
578 | if (!cqe) { | 585 | if (!cqe) { |
579 | ret = -EAGAIN; | 586 | ret = -EAGAIN; |
580 | ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p " | 587 | if (ehca_debug_level >= 3) |
581 | "cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret); | 588 | ehca_dbg(cq->device, "Completion queue is empty " |
582 | goto poll_cq_one_exit0; | 589 | "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number); |
590 | goto poll_cq_one_exit0; | ||
583 | } | 591 | } |
584 | 592 | ||
585 | /* prevents loads being reordered across this point */ | 593 | /* prevents loads being reordered across this point */ |
@@ -609,7 +617,7 @@ poll_cq_one_read_cqe: | |||
609 | ehca_dbg(cq->device, | 617 | ehca_dbg(cq->device, |
610 | "Got CQE with purged bit qp_num=%x src_qp=%x", | 618 | "Got CQE with purged bit qp_num=%x src_qp=%x", |
611 | cqe->local_qp_number, cqe->remote_qp_number); | 619 | cqe->local_qp_number, cqe->remote_qp_number); |
612 | if (ehca_debug_level) | 620 | if (ehca_debug_level >= 2) |
613 | ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", | 621 | ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", |
614 | cqe->local_qp_number, | 622 | cqe->local_qp_number, |
615 | cqe->remote_qp_number); | 623 | cqe->remote_qp_number); |
@@ -622,11 +630,13 @@ poll_cq_one_read_cqe: | |||
622 | } | 630 | } |
623 | } | 631 | } |
624 | 632 | ||
625 | /* tracing cqe */ | 633 | is_error = cqe->status & WC_STATUS_ERROR_BIT; |
626 | if (unlikely(ehca_debug_level)) { | 634 | |
635 | /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */ | ||
636 | if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) { | ||
627 | ehca_dbg(cq->device, | 637 | ehca_dbg(cq->device, |
628 | "Received COMPLETION ehca_cq=%p cq_num=%x -----", | 638 | "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----", |
629 | my_cq, my_cq->cq_number); | 639 | is_error ? "ERROR " : "", my_cq, my_cq->cq_number); |
630 | ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", | 640 | ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", |
631 | my_cq, my_cq->cq_number); | 641 | my_cq, my_cq->cq_number); |
632 | ehca_dbg(cq->device, | 642 | ehca_dbg(cq->device, |
@@ -649,8 +659,9 @@ poll_cq_one_read_cqe: | |||
649 | /* update also queue adder to throw away this entry!!! */ | 659 | /* update also queue adder to throw away this entry!!! */ |
650 | goto poll_cq_one_exit0; | 660 | goto poll_cq_one_exit0; |
651 | } | 661 | } |
662 | |||
652 | /* eval ib_wc_status */ | 663 | /* eval ib_wc_status */ |
653 | if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) { | 664 | if (unlikely(is_error)) { |
654 | /* complete with errors */ | 665 | /* complete with errors */ |
655 | map_ib_wc_status(cqe->status, &wc->status); | 666 | map_ib_wc_status(cqe->status, &wc->status); |
656 | wc->vendor_err = wc->status; | 667 | wc->vendor_err = wc->status; |
@@ -671,14 +682,6 @@ poll_cq_one_read_cqe: | |||
671 | wc->imm_data = cpu_to_be32(cqe->immediate_data); | 682 | wc->imm_data = cpu_to_be32(cqe->immediate_data); |
672 | wc->sl = cqe->service_level; | 683 | wc->sl = cqe->service_level; |
673 | 684 | ||
674 | if (unlikely(wc->status != IB_WC_SUCCESS)) | ||
675 | ehca_dbg(cq->device, | ||
676 | "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe " | ||
677 | "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx " | ||
678 | "cqe=%p", my_cq, my_cq->cq_number, cqe->optype, | ||
679 | cqe->status, cqe->local_qp_number, | ||
680 | cqe->remote_qp_number, cqe->work_request_id, cqe); | ||
681 | |||
682 | poll_cq_one_exit0: | 685 | poll_cq_one_exit0: |
683 | if (cqe_count > 0) | 686 | if (cqe_count > 0) |
684 | hipz_update_feca(my_cq, cqe_count); | 687 | hipz_update_feca(my_cq, cqe_count); |
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c index 1b07f2beafaf..e43ed8f8a0c8 100644 --- a/drivers/infiniband/hw/ehca/ehca_uverbs.c +++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c | |||
@@ -211,8 +211,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, | |||
211 | break; | 211 | break; |
212 | 212 | ||
213 | case 1: /* qp rqueue_addr */ | 213 | case 1: /* qp rqueue_addr */ |
214 | ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", | 214 | ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num); |
215 | qp->ib_qp.qp_num); | ||
216 | ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, | 215 | ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, |
217 | &qp->mm_count_rqueue); | 216 | &qp->mm_count_rqueue); |
218 | if (unlikely(ret)) { | 217 | if (unlikely(ret)) { |
@@ -224,8 +223,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, | |||
224 | break; | 223 | break; |
225 | 224 | ||
226 | case 2: /* qp squeue_addr */ | 225 | case 2: /* qp squeue_addr */ |
227 | ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", | 226 | ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num); |
228 | qp->ib_qp.qp_num); | ||
229 | ret = ehca_mmap_queue(vma, &qp->ipz_squeue, | 227 | ret = ehca_mmap_queue(vma, &qp->ipz_squeue, |
230 | &qp->mm_count_squeue); | 228 | &qp->mm_count_squeue); |
231 | if (unlikely(ret)) { | 229 | if (unlikely(ret)) { |
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index 7029aa653751..5245e13c3a30 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c | |||
@@ -123,8 +123,9 @@ static long ehca_plpar_hcall_norets(unsigned long opcode, | |||
123 | int i, sleep_msecs; | 123 | int i, sleep_msecs; |
124 | unsigned long flags = 0; | 124 | unsigned long flags = 0; |
125 | 125 | ||
126 | ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT, | 126 | if (unlikely(ehca_debug_level >= 2)) |
127 | opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); | 127 | ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT, |
128 | opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); | ||
128 | 129 | ||
129 | for (i = 0; i < 5; i++) { | 130 | for (i = 0; i < 5; i++) { |
130 | /* serialize hCalls to work around firmware issue */ | 131 | /* serialize hCalls to work around firmware issue */ |
@@ -148,7 +149,8 @@ static long ehca_plpar_hcall_norets(unsigned long opcode, | |||
148 | opcode, ret, arg1, arg2, arg3, | 149 | opcode, ret, arg1, arg2, arg3, |
149 | arg4, arg5, arg6, arg7); | 150 | arg4, arg5, arg6, arg7); |
150 | else | 151 | else |
151 | ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret); | 152 | if (unlikely(ehca_debug_level >= 2)) |
153 | ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret); | ||
152 | 154 | ||
153 | return ret; | 155 | return ret; |
154 | } | 156 | } |
@@ -172,8 +174,10 @@ static long ehca_plpar_hcall9(unsigned long opcode, | |||
172 | int i, sleep_msecs; | 174 | int i, sleep_msecs; |
173 | unsigned long flags = 0; | 175 | unsigned long flags = 0; |
174 | 176 | ||
175 | ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode, | 177 | if (unlikely(ehca_debug_level >= 2)) |
176 | arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); | 178 | ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode, |
179 | arg1, arg2, arg3, arg4, arg5, | ||
180 | arg6, arg7, arg8, arg9); | ||
177 | 181 | ||
178 | for (i = 0; i < 5; i++) { | 182 | for (i = 0; i < 5; i++) { |
179 | /* serialize hCalls to work around firmware issue */ | 183 | /* serialize hCalls to work around firmware issue */ |
@@ -201,7 +205,7 @@ static long ehca_plpar_hcall9(unsigned long opcode, | |||
201 | ret, outs[0], outs[1], outs[2], outs[3], | 205 | ret, outs[0], outs[1], outs[2], outs[3], |
202 | outs[4], outs[5], outs[6], outs[7], | 206 | outs[4], outs[5], outs[6], outs[7], |
203 | outs[8]); | 207 | outs[8]); |
204 | } else | 208 | } else if (unlikely(ehca_debug_level >= 2)) |
205 | ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT, | 209 | ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT, |
206 | ret, outs[0], outs[1], outs[2], outs[3], | 210 | ret, outs[0], outs[1], outs[2], outs[3], |
207 | outs[4], outs[5], outs[6], outs[7], | 211 | outs[4], outs[5], outs[6], outs[7], |
@@ -381,7 +385,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, | |||
381 | r_cb, /* r6 */ | 385 | r_cb, /* r6 */ |
382 | 0, 0, 0, 0); | 386 | 0, 0, 0, 0); |
383 | 387 | ||
384 | if (ehca_debug_level) | 388 | if (ehca_debug_level >= 2) |
385 | ehca_dmp(query_port_response_block, 64, "response_block"); | 389 | ehca_dmp(query_port_response_block, 64, "response_block"); |
386 | 390 | ||
387 | return ret; | 391 | return ret; |
@@ -731,9 +735,6 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle, | |||
731 | u64 ret; | 735 | u64 ret; |
732 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | 736 | u64 outs[PLPAR_HCALL9_BUFSIZE]; |
733 | 737 | ||
734 | ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x " | ||
735 | "vaddr=%lx length=%lx", | ||
736 | (u32)PAGE_SIZE, access_ctrl, vaddr, length); | ||
737 | ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, | 738 | ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, |
738 | adapter_handle.handle, /* r4 */ | 739 | adapter_handle.handle, /* r4 */ |
739 | 5, /* r5 */ | 740 | 5, /* r5 */ |
@@ -758,7 +759,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle, | |||
758 | { | 759 | { |
759 | u64 ret; | 760 | u64 ret; |
760 | 761 | ||
761 | if (unlikely(ehca_debug_level >= 2)) { | 762 | if (unlikely(ehca_debug_level >= 3)) { |
762 | if (count > 1) { | 763 | if (count > 1) { |
763 | u64 *kpage; | 764 | u64 *kpage; |
764 | int i; | 765 | int i; |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 3557e7edc9b6..5e570bb0bb6f 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -204,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
204 | 204 | ||
205 | uar = &to_mucontext(context)->uar; | 205 | uar = &to_mucontext(context)->uar; |
206 | } else { | 206 | } else { |
207 | err = mlx4_ib_db_alloc(dev, &cq->db, 1); | 207 | err = mlx4_db_alloc(dev->dev, &cq->db, 1); |
208 | if (err) | 208 | if (err) |
209 | goto err_cq; | 209 | goto err_cq; |
210 | 210 | ||
@@ -250,7 +250,7 @@ err_mtt: | |||
250 | 250 | ||
251 | err_db: | 251 | err_db: |
252 | if (!context) | 252 | if (!context) |
253 | mlx4_ib_db_free(dev, &cq->db); | 253 | mlx4_db_free(dev->dev, &cq->db); |
254 | 254 | ||
255 | err_cq: | 255 | err_cq: |
256 | kfree(cq); | 256 | kfree(cq); |
@@ -435,7 +435,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq) | |||
435 | ib_umem_release(mcq->umem); | 435 | ib_umem_release(mcq->umem); |
436 | } else { | 436 | } else { |
437 | mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); | 437 | mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); |
438 | mlx4_ib_db_free(dev, &mcq->db); | 438 | mlx4_db_free(dev->dev, &mcq->db); |
439 | } | 439 | } |
440 | 440 | ||
441 | kfree(mcq); | 441 | kfree(mcq); |
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c index 1c36087aef14..8e342cc9baec 100644 --- a/drivers/infiniband/hw/mlx4/doorbell.c +++ b/drivers/infiniband/hw/mlx4/doorbell.c | |||
@@ -34,124 +34,6 @@ | |||
34 | 34 | ||
35 | #include "mlx4_ib.h" | 35 | #include "mlx4_ib.h" |
36 | 36 | ||
37 | struct mlx4_ib_db_pgdir { | ||
38 | struct list_head list; | ||
39 | DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE); | ||
40 | DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2); | ||
41 | unsigned long *bits[2]; | ||
42 | __be32 *db_page; | ||
43 | dma_addr_t db_dma; | ||
44 | }; | ||
45 | |||
46 | static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev) | ||
47 | { | ||
48 | struct mlx4_ib_db_pgdir *pgdir; | ||
49 | |||
50 | pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); | ||
51 | if (!pgdir) | ||
52 | return NULL; | ||
53 | |||
54 | bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2); | ||
55 | pgdir->bits[0] = pgdir->order0; | ||
56 | pgdir->bits[1] = pgdir->order1; | ||
57 | pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device, | ||
58 | PAGE_SIZE, &pgdir->db_dma, | ||
59 | GFP_KERNEL); | ||
60 | if (!pgdir->db_page) { | ||
61 | kfree(pgdir); | ||
62 | return NULL; | ||
63 | } | ||
64 | |||
65 | return pgdir; | ||
66 | } | ||
67 | |||
68 | static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir, | ||
69 | struct mlx4_ib_db *db, int order) | ||
70 | { | ||
71 | int o; | ||
72 | int i; | ||
73 | |||
74 | for (o = order; o <= 1; ++o) { | ||
75 | i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o); | ||
76 | if (i < MLX4_IB_DB_PER_PAGE >> o) | ||
77 | goto found; | ||
78 | } | ||
79 | |||
80 | return -ENOMEM; | ||
81 | |||
82 | found: | ||
83 | clear_bit(i, pgdir->bits[o]); | ||
84 | |||
85 | i <<= o; | ||
86 | |||
87 | if (o > order) | ||
88 | set_bit(i ^ 1, pgdir->bits[order]); | ||
89 | |||
90 | db->u.pgdir = pgdir; | ||
91 | db->index = i; | ||
92 | db->db = pgdir->db_page + db->index; | ||
93 | db->dma = pgdir->db_dma + db->index * 4; | ||
94 | db->order = order; | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order) | ||
100 | { | ||
101 | struct mlx4_ib_db_pgdir *pgdir; | ||
102 | int ret = 0; | ||
103 | |||
104 | mutex_lock(&dev->pgdir_mutex); | ||
105 | |||
106 | list_for_each_entry(pgdir, &dev->pgdir_list, list) | ||
107 | if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)) | ||
108 | goto out; | ||
109 | |||
110 | pgdir = mlx4_ib_alloc_db_pgdir(dev); | ||
111 | if (!pgdir) { | ||
112 | ret = -ENOMEM; | ||
113 | goto out; | ||
114 | } | ||
115 | |||
116 | list_add(&pgdir->list, &dev->pgdir_list); | ||
117 | |||
118 | /* This should never fail -- we just allocated an empty page: */ | ||
119 | WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)); | ||
120 | |||
121 | out: | ||
122 | mutex_unlock(&dev->pgdir_mutex); | ||
123 | |||
124 | return ret; | ||
125 | } | ||
126 | |||
127 | void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db) | ||
128 | { | ||
129 | int o; | ||
130 | int i; | ||
131 | |||
132 | mutex_lock(&dev->pgdir_mutex); | ||
133 | |||
134 | o = db->order; | ||
135 | i = db->index; | ||
136 | |||
137 | if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { | ||
138 | clear_bit(i ^ 1, db->u.pgdir->order0); | ||
139 | ++o; | ||
140 | } | ||
141 | |||
142 | i >>= o; | ||
143 | set_bit(i, db->u.pgdir->bits[o]); | ||
144 | |||
145 | if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) { | ||
146 | dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE, | ||
147 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | ||
148 | list_del(&db->u.pgdir->list); | ||
149 | kfree(db->u.pgdir); | ||
150 | } | ||
151 | |||
152 | mutex_unlock(&dev->pgdir_mutex); | ||
153 | } | ||
154 | |||
155 | struct mlx4_ib_user_db_page { | 37 | struct mlx4_ib_user_db_page { |
156 | struct list_head list; | 38 | struct list_head list; |
157 | struct ib_umem *umem; | 39 | struct ib_umem *umem; |
@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page { | |||
160 | }; | 42 | }; |
161 | 43 | ||
162 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | 44 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, |
163 | struct mlx4_ib_db *db) | 45 | struct mlx4_db *db) |
164 | { | 46 | { |
165 | struct mlx4_ib_user_db_page *page; | 47 | struct mlx4_ib_user_db_page *page; |
166 | struct ib_umem_chunk *chunk; | 48 | struct ib_umem_chunk *chunk; |
@@ -202,7 +84,7 @@ out: | |||
202 | return err; | 84 | return err; |
203 | } | 85 | } |
204 | 86 | ||
205 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db) | 87 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db) |
206 | { | 88 | { |
207 | mutex_lock(&context->db_page_mutex); | 89 | mutex_lock(&context->db_page_mutex); |
208 | 90 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 4d9b5ac42202..4d61e32866c6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -557,9 +557,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
557 | goto err_uar; | 557 | goto err_uar; |
558 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); | 558 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); |
559 | 559 | ||
560 | INIT_LIST_HEAD(&ibdev->pgdir_list); | ||
561 | mutex_init(&ibdev->pgdir_mutex); | ||
562 | |||
563 | ibdev->dev = dev; | 560 | ibdev->dev = dev; |
564 | 561 | ||
565 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); | 562 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 9e637323c155..5cf994794d25 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -43,24 +43,6 @@ | |||
43 | #include <linux/mlx4/device.h> | 43 | #include <linux/mlx4/device.h> |
44 | #include <linux/mlx4/doorbell.h> | 44 | #include <linux/mlx4/doorbell.h> |
45 | 45 | ||
46 | enum { | ||
47 | MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4 | ||
48 | }; | ||
49 | |||
50 | struct mlx4_ib_db_pgdir; | ||
51 | struct mlx4_ib_user_db_page; | ||
52 | |||
53 | struct mlx4_ib_db { | ||
54 | __be32 *db; | ||
55 | union { | ||
56 | struct mlx4_ib_db_pgdir *pgdir; | ||
57 | struct mlx4_ib_user_db_page *user_page; | ||
58 | } u; | ||
59 | dma_addr_t dma; | ||
60 | int index; | ||
61 | int order; | ||
62 | }; | ||
63 | |||
64 | struct mlx4_ib_ucontext { | 46 | struct mlx4_ib_ucontext { |
65 | struct ib_ucontext ibucontext; | 47 | struct ib_ucontext ibucontext; |
66 | struct mlx4_uar uar; | 48 | struct mlx4_uar uar; |
@@ -88,7 +70,7 @@ struct mlx4_ib_cq { | |||
88 | struct mlx4_cq mcq; | 70 | struct mlx4_cq mcq; |
89 | struct mlx4_ib_cq_buf buf; | 71 | struct mlx4_ib_cq_buf buf; |
90 | struct mlx4_ib_cq_resize *resize_buf; | 72 | struct mlx4_ib_cq_resize *resize_buf; |
91 | struct mlx4_ib_db db; | 73 | struct mlx4_db db; |
92 | spinlock_t lock; | 74 | spinlock_t lock; |
93 | struct mutex resize_mutex; | 75 | struct mutex resize_mutex; |
94 | struct ib_umem *umem; | 76 | struct ib_umem *umem; |
@@ -127,7 +109,7 @@ struct mlx4_ib_qp { | |||
127 | struct mlx4_qp mqp; | 109 | struct mlx4_qp mqp; |
128 | struct mlx4_buf buf; | 110 | struct mlx4_buf buf; |
129 | 111 | ||
130 | struct mlx4_ib_db db; | 112 | struct mlx4_db db; |
131 | struct mlx4_ib_wq rq; | 113 | struct mlx4_ib_wq rq; |
132 | 114 | ||
133 | u32 doorbell_qpn; | 115 | u32 doorbell_qpn; |
@@ -154,7 +136,7 @@ struct mlx4_ib_srq { | |||
154 | struct ib_srq ibsrq; | 136 | struct ib_srq ibsrq; |
155 | struct mlx4_srq msrq; | 137 | struct mlx4_srq msrq; |
156 | struct mlx4_buf buf; | 138 | struct mlx4_buf buf; |
157 | struct mlx4_ib_db db; | 139 | struct mlx4_db db; |
158 | u64 *wrid; | 140 | u64 *wrid; |
159 | spinlock_t lock; | 141 | spinlock_t lock; |
160 | int head; | 142 | int head; |
@@ -175,9 +157,6 @@ struct mlx4_ib_dev { | |||
175 | struct mlx4_dev *dev; | 157 | struct mlx4_dev *dev; |
176 | void __iomem *uar_map; | 158 | void __iomem *uar_map; |
177 | 159 | ||
178 | struct list_head pgdir_list; | ||
179 | struct mutex pgdir_mutex; | ||
180 | |||
181 | struct mlx4_uar priv_uar; | 160 | struct mlx4_uar priv_uar; |
182 | u32 priv_pdn; | 161 | u32 priv_pdn; |
183 | MLX4_DECLARE_DOORBELL_LOCK(uar_lock); | 162 | MLX4_DECLARE_DOORBELL_LOCK(uar_lock); |
@@ -248,11 +227,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah) | |||
248 | return container_of(ibah, struct mlx4_ib_ah, ibah); | 227 | return container_of(ibah, struct mlx4_ib_ah, ibah); |
249 | } | 228 | } |
250 | 229 | ||
251 | int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order); | ||
252 | void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db); | ||
253 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | 230 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, |
254 | struct mlx4_ib_db *db); | 231 | struct mlx4_db *db); |
255 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db); | 232 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db); |
256 | 233 | ||
257 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); | 234 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); |
258 | int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, | 235 | int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index b75efae7e449..80ea8b9e7761 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -514,7 +514,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
514 | goto err; | 514 | goto err; |
515 | 515 | ||
516 | if (!init_attr->srq) { | 516 | if (!init_attr->srq) { |
517 | err = mlx4_ib_db_alloc(dev, &qp->db, 0); | 517 | err = mlx4_db_alloc(dev->dev, &qp->db, 0); |
518 | if (err) | 518 | if (err) |
519 | goto err; | 519 | goto err; |
520 | 520 | ||
@@ -580,7 +580,7 @@ err_buf: | |||
580 | 580 | ||
581 | err_db: | 581 | err_db: |
582 | if (!pd->uobject && !init_attr->srq) | 582 | if (!pd->uobject && !init_attr->srq) |
583 | mlx4_ib_db_free(dev, &qp->db); | 583 | mlx4_db_free(dev->dev, &qp->db); |
584 | 584 | ||
585 | err: | 585 | err: |
586 | return err; | 586 | return err; |
@@ -666,7 +666,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
666 | kfree(qp->rq.wrid); | 666 | kfree(qp->rq.wrid); |
667 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); | 667 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
668 | if (!qp->ibqp.srq) | 668 | if (!qp->ibqp.srq) |
669 | mlx4_ib_db_free(dev, &qp->db); | 669 | mlx4_db_free(dev->dev, &qp->db); |
670 | } | 670 | } |
671 | } | 671 | } |
672 | 672 | ||
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index beaa3b06cf58..204619702f9d 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -129,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
129 | if (err) | 129 | if (err) |
130 | goto err_mtt; | 130 | goto err_mtt; |
131 | } else { | 131 | } else { |
132 | err = mlx4_ib_db_alloc(dev, &srq->db, 0); | 132 | err = mlx4_db_alloc(dev->dev, &srq->db, 0); |
133 | if (err) | 133 | if (err) |
134 | goto err_srq; | 134 | goto err_srq; |
135 | 135 | ||
@@ -200,7 +200,7 @@ err_buf: | |||
200 | 200 | ||
201 | err_db: | 201 | err_db: |
202 | if (!pd->uobject) | 202 | if (!pd->uobject) |
203 | mlx4_ib_db_free(dev, &srq->db); | 203 | mlx4_db_free(dev->dev, &srq->db); |
204 | 204 | ||
205 | err_srq: | 205 | err_srq: |
206 | kfree(srq); | 206 | kfree(srq); |
@@ -267,7 +267,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq) | |||
267 | kfree(msrq->wrid); | 267 | kfree(msrq->wrid); |
268 | mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, | 268 | mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, |
269 | &msrq->buf); | 269 | &msrq->buf); |
270 | mlx4_ib_db_free(dev, &msrq->db); | 270 | mlx4_db_free(dev->dev, &msrq->db); |
271 | } | 271 | } |
272 | 272 | ||
273 | kfree(msrq); | 273 | kfree(msrq); |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index b046262ed638..a4e9269a29bd 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -139,8 +139,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier, | |||
139 | 139 | ||
140 | addr = ntohl(ifa->ifa_address); | 140 | addr = ntohl(ifa->ifa_address); |
141 | mask = ntohl(ifa->ifa_mask); | 141 | mask = ntohl(ifa->ifa_mask); |
142 | nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n", | 142 | nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address " NIPQUAD_FMT |
143 | addr, mask); | 143 | ", netmask " NIPQUAD_FMT ".\n", |
144 | HIPQUAD(addr), HIPQUAD(mask)); | ||
144 | list_for_each_entry(nesdev, &nes_dev_list, list) { | 145 | list_for_each_entry(nesdev, &nes_dev_list, list) { |
145 | nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", | 146 | nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", |
146 | nesdev, nesdev->netdev[0]->name); | 147 | nesdev, nesdev->netdev[0]->name); |
@@ -353,13 +354,11 @@ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn) | |||
353 | */ | 354 | */ |
354 | static void nes_print_macaddr(struct net_device *netdev) | 355 | static void nes_print_macaddr(struct net_device *netdev) |
355 | { | 356 | { |
356 | nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n", | 357 | DECLARE_MAC_BUF(mac); |
357 | netdev->name, | ||
358 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | ||
359 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5], | ||
360 | netdev->irq); | ||
361 | } | ||
362 | 358 | ||
359 | nes_debug(NES_DBG_INIT, "%s: %s, IRQ %u\n", | ||
360 | netdev->name, print_mac(mac, netdev->dev_addr), netdev->irq); | ||
361 | } | ||
363 | 362 | ||
364 | /** | 363 | /** |
365 | * nes_interrupt - handle interrupts | 364 | * nes_interrupt - handle interrupts |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index d0738623bcf3..d940fc27129a 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -852,8 +852,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | |||
852 | /* get a handle on the hte */ | 852 | /* get a handle on the hte */ |
853 | hte = &cm_core->connected_nodes; | 853 | hte = &cm_core->connected_nodes; |
854 | 854 | ||
855 | nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n", | 855 | nes_debug(NES_DBG_CM, "Searching for an owner node: " NIPQUAD_FMT ":%x from core %p->%p\n", |
856 | loc_addr, loc_port, cm_core, hte); | 856 | HIPQUAD(loc_addr), loc_port, cm_core, hte); |
857 | 857 | ||
858 | /* walk list and find cm_node associated with this session ID */ | 858 | /* walk list and find cm_node associated with this session ID */ |
859 | spin_lock_irqsave(&cm_core->ht_lock, flags); | 859 | spin_lock_irqsave(&cm_core->ht_lock, flags); |
@@ -902,8 +902,8 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, | |||
902 | } | 902 | } |
903 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | 903 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); |
904 | 904 | ||
905 | nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n", | 905 | nes_debug(NES_DBG_CM, "Unable to find listener for " NIPQUAD_FMT ":%x\n", |
906 | dst_addr, dst_port); | 906 | HIPQUAD(dst_addr), dst_port); |
907 | 907 | ||
908 | /* no listener */ | 908 | /* no listener */ |
909 | return NULL; | 909 | return NULL; |
@@ -1054,6 +1054,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1054 | int arpindex = 0; | 1054 | int arpindex = 0; |
1055 | struct nes_device *nesdev; | 1055 | struct nes_device *nesdev; |
1056 | struct nes_adapter *nesadapter; | 1056 | struct nes_adapter *nesadapter; |
1057 | DECLARE_MAC_BUF(mac); | ||
1057 | 1058 | ||
1058 | /* create an hte and cm_node for this instance */ | 1059 | /* create an hte and cm_node for this instance */ |
1059 | cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); | 1060 | cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); |
@@ -1066,8 +1067,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1066 | cm_node->loc_port = cm_info->loc_port; | 1067 | cm_node->loc_port = cm_info->loc_port; |
1067 | cm_node->rem_port = cm_info->rem_port; | 1068 | cm_node->rem_port = cm_info->rem_port; |
1068 | cm_node->send_write0 = send_first; | 1069 | cm_node->send_write0 = send_first; |
1069 | nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n", | 1070 | nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n", |
1070 | cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port); | 1071 | HIPQUAD(cm_node->loc_addr), cm_node->loc_port, |
1072 | HIPQUAD(cm_node->rem_addr), cm_node->rem_port); | ||
1071 | cm_node->listener = listener; | 1073 | cm_node->listener = listener; |
1072 | cm_node->netdev = nesvnic->netdev; | 1074 | cm_node->netdev = nesvnic->netdev; |
1073 | cm_node->cm_id = cm_info->cm_id; | 1075 | cm_node->cm_id = cm_info->cm_id; |
@@ -1116,11 +1118,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1116 | 1118 | ||
1117 | /* copy the mac addr to node context */ | 1119 | /* copy the mac addr to node context */ |
1118 | memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN); | 1120 | memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN); |
1119 | nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x," | 1121 | nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %s\n", |
1120 | " %02x, %02x, %02x, %02x, %02x\n", | 1122 | print_mac(mac, cm_node->rem_mac)); |
1121 | cm_node->rem_mac[0], cm_node->rem_mac[1], | ||
1122 | cm_node->rem_mac[2], cm_node->rem_mac[3], | ||
1123 | cm_node->rem_mac[4], cm_node->rem_mac[5]); | ||
1124 | 1123 | ||
1125 | add_hte_node(cm_core, cm_node); | 1124 | add_hte_node(cm_core, cm_node); |
1126 | atomic_inc(&cm_nodes_created); | 1125 | atomic_inc(&cm_nodes_created); |
@@ -1850,8 +1849,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni | |||
1850 | nfo.rem_addr = ntohl(iph->saddr); | 1849 | nfo.rem_addr = ntohl(iph->saddr); |
1851 | nfo.rem_port = ntohs(tcph->source); | 1850 | nfo.rem_port = ntohs(tcph->source); |
1852 | 1851 | ||
1853 | nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n", | 1852 | nes_debug(NES_DBG_CM, "Received packet: dest=" NIPQUAD_FMT |
1854 | iph->daddr, tcph->dest, iph->saddr, tcph->source); | 1853 | ":0x%04X src=" NIPQUAD_FMT ":0x%04X\n", |
1854 | NIPQUAD(iph->daddr), tcph->dest, | ||
1855 | NIPQUAD(iph->saddr), tcph->source); | ||
1855 | 1856 | ||
1856 | /* note: this call is going to increment cm_node ref count */ | 1857 | /* note: this call is going to increment cm_node ref count */ |
1857 | cm_node = find_node(cm_core, | 1858 | cm_node = find_node(cm_core, |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index aa53aab91bf8..08964cc7e98a 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -636,6 +636,15 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ | |||
636 | nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); | 636 | nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); |
637 | return 0; | 637 | return 0; |
638 | } | 638 | } |
639 | |||
640 | i = 0; | ||
641 | while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) | ||
642 | mdelay(1); | ||
643 | if (i >= 10000) { | ||
644 | printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", | ||
645 | nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); | ||
646 | return 0; | ||
647 | } | ||
639 | } | 648 | } |
640 | 649 | ||
641 | /* port reset */ | 650 | /* port reset */ |
@@ -684,17 +693,6 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ | |||
684 | } | 693 | } |
685 | } | 694 | } |
686 | 695 | ||
687 | |||
688 | |||
689 | i = 0; | ||
690 | while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) | ||
691 | mdelay(1); | ||
692 | if (i >= 10000) { | ||
693 | printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", | ||
694 | nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); | ||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | return port_count; | 696 | return port_count; |
699 | } | 697 | } |
700 | 698 | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index b7e2844f096b..8f36e231bdf5 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -905,7 +905,7 @@ struct nes_hw_qp { | |||
905 | }; | 905 | }; |
906 | 906 | ||
907 | struct nes_hw_cq { | 907 | struct nes_hw_cq { |
908 | struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */ | 908 | struct nes_hw_cqe *cq_vbase; /* PCI memory for host rings */ |
909 | void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq); | 909 | void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq); |
910 | dma_addr_t cq_pbase; /* PCI memory for host rings */ | 910 | dma_addr_t cq_pbase; /* PCI memory for host rings */ |
911 | u16 cq_head; | 911 | u16 cq_head; |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 01cd0effc492..e5366b013c1a 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -787,16 +787,14 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p) | |||
787 | int i; | 787 | int i; |
788 | u32 macaddr_low; | 788 | u32 macaddr_low; |
789 | u16 macaddr_high; | 789 | u16 macaddr_high; |
790 | DECLARE_MAC_BUF(mac); | ||
790 | 791 | ||
791 | if (!is_valid_ether_addr(mac_addr->sa_data)) | 792 | if (!is_valid_ether_addr(mac_addr->sa_data)) |
792 | return -EADDRNOTAVAIL; | 793 | return -EADDRNOTAVAIL; |
793 | 794 | ||
794 | memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); | 795 | memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); |
795 | printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n", | 796 | printk(PFX "%s: Address length = %d, Address = %s\n", |
796 | __func__, netdev->addr_len, | 797 | __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data)); |
797 | mac_addr->sa_data[0], mac_addr->sa_data[1], | ||
798 | mac_addr->sa_data[2], mac_addr->sa_data[3], | ||
799 | mac_addr->sa_data[4], mac_addr->sa_data[5]); | ||
800 | macaddr_high = ((u16)netdev->dev_addr[0]) << 8; | 798 | macaddr_high = ((u16)netdev->dev_addr[0]) << 8; |
801 | macaddr_high += (u16)netdev->dev_addr[1]; | 799 | macaddr_high += (u16)netdev->dev_addr[1]; |
802 | macaddr_low = ((u32)netdev->dev_addr[2]) << 24; | 800 | macaddr_low = ((u32)netdev->dev_addr[2]) << 24; |
@@ -878,11 +876,11 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev) | |||
878 | if (mc_nic_index < 0) | 876 | if (mc_nic_index < 0) |
879 | mc_nic_index = nesvnic->nic_index; | 877 | mc_nic_index = nesvnic->nic_index; |
880 | if (multicast_addr) { | 878 | if (multicast_addr) { |
881 | nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n", | 879 | DECLARE_MAC_BUF(mac); |
882 | multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1], | 880 | nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n", |
883 | multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3], | 881 | print_mac(mac, multicast_addr->dmi_addr), |
884 | multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5], | 882 | perfect_filter_register_address+(mc_index * 8), |
885 | perfect_filter_register_address+(mc_index * 8), mc_nic_index); | 883 | mc_nic_index); |
886 | macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8; | 884 | macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8; |
887 | macaddr_high += (u16)multicast_addr->dmi_addr[1]; | 885 | macaddr_high += (u16)multicast_addr->dmi_addr[1]; |
888 | macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24; | 886 | macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24; |
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index f9db07c2717d..c6d5631a6995 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c | |||
@@ -660,7 +660,9 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti | |||
660 | 660 | ||
661 | /* DELETE or RESOLVE */ | 661 | /* DELETE or RESOLVE */ |
662 | if (arp_index == nesadapter->arp_table_size) { | 662 | if (arp_index == nesadapter->arp_table_size) { |
663 | nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n"); | 663 | nes_debug(NES_DBG_NETDEV, "MAC for " NIPQUAD_FMT " not in ARP table - cannot %s\n", |
664 | HIPQUAD(ip_addr), | ||
665 | action == NES_ARP_RESOLVE ? "resolve" : "delete"); | ||
664 | return -1; | 666 | return -1; |
665 | } | 667 | } |
666 | 668 | ||
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index f9a5d4390892..ee74f7c7a6da 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -1976,7 +1976,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq) | |||
1976 | 1976 | ||
1977 | if (nescq->cq_mem_size) | 1977 | if (nescq->cq_mem_size) |
1978 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, | 1978 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, |
1979 | (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase); | 1979 | nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase); |
1980 | kfree(nescq); | 1980 | kfree(nescq); |
1981 | 1981 | ||
1982 | return ret; | 1982 | return ret; |
@@ -3610,6 +3610,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
3610 | while (cqe_count < num_entries) { | 3610 | while (cqe_count < num_entries) { |
3611 | if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & | 3611 | if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & |
3612 | NES_CQE_VALID) { | 3612 | NES_CQE_VALID) { |
3613 | /* | ||
3614 | * Make sure we read CQ entry contents *after* | ||
3615 | * we've checked the valid bit. | ||
3616 | */ | ||
3617 | rmb(); | ||
3618 | |||
3613 | cqe = nescq->hw_cq.cq_vbase[head]; | 3619 | cqe = nescq->hw_cq.cq_vbase[head]; |
3614 | nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; | 3620 | nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; |
3615 | u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); | 3621 | u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 73b2b176ad0e..f1f142dc64b1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -56,11 +56,11 @@ | |||
56 | /* constants */ | 56 | /* constants */ |
57 | 57 | ||
58 | enum { | 58 | enum { |
59 | IPOIB_PACKET_SIZE = 2048, | ||
60 | IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES, | ||
61 | |||
62 | IPOIB_ENCAP_LEN = 4, | 59 | IPOIB_ENCAP_LEN = 4, |
63 | 60 | ||
61 | IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN, | ||
62 | IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */ | ||
63 | |||
64 | IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ | 64 | IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ |
65 | IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, | 65 | IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, |
66 | IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, | 66 | IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, |
@@ -139,7 +139,7 @@ struct ipoib_mcast { | |||
139 | 139 | ||
140 | struct ipoib_rx_buf { | 140 | struct ipoib_rx_buf { |
141 | struct sk_buff *skb; | 141 | struct sk_buff *skb; |
142 | u64 mapping; | 142 | u64 mapping[IPOIB_UD_RX_SG]; |
143 | }; | 143 | }; |
144 | 144 | ||
145 | struct ipoib_tx_buf { | 145 | struct ipoib_tx_buf { |
@@ -294,6 +294,7 @@ struct ipoib_dev_priv { | |||
294 | 294 | ||
295 | unsigned int admin_mtu; | 295 | unsigned int admin_mtu; |
296 | unsigned int mcast_mtu; | 296 | unsigned int mcast_mtu; |
297 | unsigned int max_ib_mtu; | ||
297 | 298 | ||
298 | struct ipoib_rx_buf *rx_ring; | 299 | struct ipoib_rx_buf *rx_ring; |
299 | 300 | ||
@@ -305,6 +306,9 @@ struct ipoib_dev_priv { | |||
305 | struct ib_send_wr tx_wr; | 306 | struct ib_send_wr tx_wr; |
306 | unsigned tx_outstanding; | 307 | unsigned tx_outstanding; |
307 | 308 | ||
309 | struct ib_recv_wr rx_wr; | ||
310 | struct ib_sge rx_sge[IPOIB_UD_RX_SG]; | ||
311 | |||
308 | struct ib_wc ibwc[IPOIB_NUM_WC]; | 312 | struct ib_wc ibwc[IPOIB_NUM_WC]; |
309 | 313 | ||
310 | struct list_head dead_ahs; | 314 | struct list_head dead_ahs; |
@@ -366,6 +370,14 @@ struct ipoib_neigh { | |||
366 | struct list_head list; | 370 | struct list_head list; |
367 | }; | 371 | }; |
368 | 372 | ||
373 | #define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN) | ||
374 | #define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES) | ||
375 | |||
376 | static inline int ipoib_ud_need_sg(unsigned int ib_mtu) | ||
377 | { | ||
378 | return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE; | ||
379 | } | ||
380 | |||
369 | /* | 381 | /* |
370 | * We stash a pointer to our private neighbour information after our | 382 | * We stash a pointer to our private neighbour information after our |
371 | * hardware address in neigh->ha. The ALIGN() expression here makes | 383 | * hardware address in neigh->ha. The ALIGN() expression here makes |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0205eb7c1bd3..7cf1fa7074ab 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -89,28 +89,59 @@ void ipoib_free_ah(struct kref *kref) | |||
89 | spin_unlock_irqrestore(&priv->lock, flags); | 89 | spin_unlock_irqrestore(&priv->lock, flags); |
90 | } | 90 | } |
91 | 91 | ||
92 | static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, | ||
93 | u64 mapping[IPOIB_UD_RX_SG]) | ||
94 | { | ||
95 | if (ipoib_ud_need_sg(priv->max_ib_mtu)) { | ||
96 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, | ||
97 | DMA_FROM_DEVICE); | ||
98 | ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE, | ||
99 | DMA_FROM_DEVICE); | ||
100 | } else | ||
101 | ib_dma_unmap_single(priv->ca, mapping[0], | ||
102 | IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), | ||
103 | DMA_FROM_DEVICE); | ||
104 | } | ||
105 | |||
106 | static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv, | ||
107 | struct sk_buff *skb, | ||
108 | unsigned int length) | ||
109 | { | ||
110 | if (ipoib_ud_need_sg(priv->max_ib_mtu)) { | ||
111 | skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; | ||
112 | unsigned int size; | ||
113 | /* | ||
114 | * There is only two buffers needed for max_payload = 4K, | ||
115 | * first buf size is IPOIB_UD_HEAD_SIZE | ||
116 | */ | ||
117 | skb->tail += IPOIB_UD_HEAD_SIZE; | ||
118 | skb->len += length; | ||
119 | |||
120 | size = length - IPOIB_UD_HEAD_SIZE; | ||
121 | |||
122 | frag->size = size; | ||
123 | skb->data_len += size; | ||
124 | skb->truesize += size; | ||
125 | } else | ||
126 | skb_put(skb, length); | ||
127 | |||
128 | } | ||
129 | |||
92 | static int ipoib_ib_post_receive(struct net_device *dev, int id) | 130 | static int ipoib_ib_post_receive(struct net_device *dev, int id) |
93 | { | 131 | { |
94 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 132 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
95 | struct ib_sge list; | ||
96 | struct ib_recv_wr param; | ||
97 | struct ib_recv_wr *bad_wr; | 133 | struct ib_recv_wr *bad_wr; |
98 | int ret; | 134 | int ret; |
99 | 135 | ||
100 | list.addr = priv->rx_ring[id].mapping; | 136 | priv->rx_wr.wr_id = id | IPOIB_OP_RECV; |
101 | list.length = IPOIB_BUF_SIZE; | 137 | priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; |
102 | list.lkey = priv->mr->lkey; | 138 | priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; |
103 | 139 | ||
104 | param.next = NULL; | ||
105 | param.wr_id = id | IPOIB_OP_RECV; | ||
106 | param.sg_list = &list; | ||
107 | param.num_sge = 1; | ||
108 | 140 | ||
109 | ret = ib_post_recv(priv->qp, ¶m, &bad_wr); | 141 | ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); |
110 | if (unlikely(ret)) { | 142 | if (unlikely(ret)) { |
111 | ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); | 143 | ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); |
112 | ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, | 144 | ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); |
113 | IPOIB_BUF_SIZE, DMA_FROM_DEVICE); | ||
114 | dev_kfree_skb_any(priv->rx_ring[id].skb); | 145 | dev_kfree_skb_any(priv->rx_ring[id].skb); |
115 | priv->rx_ring[id].skb = NULL; | 146 | priv->rx_ring[id].skb = NULL; |
116 | } | 147 | } |
@@ -118,15 +149,21 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id) | |||
118 | return ret; | 149 | return ret; |
119 | } | 150 | } |
120 | 151 | ||
121 | static int ipoib_alloc_rx_skb(struct net_device *dev, int id) | 152 | static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) |
122 | { | 153 | { |
123 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 154 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
124 | struct sk_buff *skb; | 155 | struct sk_buff *skb; |
125 | u64 addr; | 156 | int buf_size; |
157 | u64 *mapping; | ||
126 | 158 | ||
127 | skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); | 159 | if (ipoib_ud_need_sg(priv->max_ib_mtu)) |
128 | if (!skb) | 160 | buf_size = IPOIB_UD_HEAD_SIZE; |
129 | return -ENOMEM; | 161 | else |
162 | buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); | ||
163 | |||
164 | skb = dev_alloc_skb(buf_size + 4); | ||
165 | if (unlikely(!skb)) | ||
166 | return NULL; | ||
130 | 167 | ||
131 | /* | 168 | /* |
132 | * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte | 169 | * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte |
@@ -135,17 +172,32 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) | |||
135 | */ | 172 | */ |
136 | skb_reserve(skb, 4); | 173 | skb_reserve(skb, 4); |
137 | 174 | ||
138 | addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, | 175 | mapping = priv->rx_ring[id].mapping; |
139 | DMA_FROM_DEVICE); | 176 | mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, |
140 | if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { | 177 | DMA_FROM_DEVICE); |
141 | dev_kfree_skb_any(skb); | 178 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) |
142 | return -EIO; | 179 | goto error; |
180 | |||
181 | if (ipoib_ud_need_sg(priv->max_ib_mtu)) { | ||
182 | struct page *page = alloc_page(GFP_ATOMIC); | ||
183 | if (!page) | ||
184 | goto partial_error; | ||
185 | skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE); | ||
186 | mapping[1] = | ||
187 | ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page, | ||
188 | 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
189 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1]))) | ||
190 | goto partial_error; | ||
143 | } | 191 | } |
144 | 192 | ||
145 | priv->rx_ring[id].skb = skb; | 193 | priv->rx_ring[id].skb = skb; |
146 | priv->rx_ring[id].mapping = addr; | 194 | return skb; |
147 | 195 | ||
148 | return 0; | 196 | partial_error: |
197 | ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE); | ||
198 | error: | ||
199 | dev_kfree_skb_any(skb); | ||
200 | return NULL; | ||
149 | } | 201 | } |
150 | 202 | ||
151 | static int ipoib_ib_post_receives(struct net_device *dev) | 203 | static int ipoib_ib_post_receives(struct net_device *dev) |
@@ -154,7 +206,7 @@ static int ipoib_ib_post_receives(struct net_device *dev) | |||
154 | int i; | 206 | int i; |
155 | 207 | ||
156 | for (i = 0; i < ipoib_recvq_size; ++i) { | 208 | for (i = 0; i < ipoib_recvq_size; ++i) { |
157 | if (ipoib_alloc_rx_skb(dev, i)) { | 209 | if (!ipoib_alloc_rx_skb(dev, i)) { |
158 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); | 210 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); |
159 | return -ENOMEM; | 211 | return -ENOMEM; |
160 | } | 212 | } |
@@ -172,7 +224,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
172 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 224 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
173 | unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; | 225 | unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; |
174 | struct sk_buff *skb; | 226 | struct sk_buff *skb; |
175 | u64 addr; | 227 | u64 mapping[IPOIB_UD_RX_SG]; |
176 | 228 | ||
177 | ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", | 229 | ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", |
178 | wr_id, wc->status); | 230 | wr_id, wc->status); |
@@ -184,15 +236,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
184 | } | 236 | } |
185 | 237 | ||
186 | skb = priv->rx_ring[wr_id].skb; | 238 | skb = priv->rx_ring[wr_id].skb; |
187 | addr = priv->rx_ring[wr_id].mapping; | ||
188 | 239 | ||
189 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | 240 | if (unlikely(wc->status != IB_WC_SUCCESS)) { |
190 | if (wc->status != IB_WC_WR_FLUSH_ERR) | 241 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
191 | ipoib_warn(priv, "failed recv event " | 242 | ipoib_warn(priv, "failed recv event " |
192 | "(status=%d, wrid=%d vend_err %x)\n", | 243 | "(status=%d, wrid=%d vend_err %x)\n", |
193 | wc->status, wr_id, wc->vendor_err); | 244 | wc->status, wr_id, wc->vendor_err); |
194 | ib_dma_unmap_single(priv->ca, addr, | 245 | ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); |
195 | IPOIB_BUF_SIZE, DMA_FROM_DEVICE); | ||
196 | dev_kfree_skb_any(skb); | 246 | dev_kfree_skb_any(skb); |
197 | priv->rx_ring[wr_id].skb = NULL; | 247 | priv->rx_ring[wr_id].skb = NULL; |
198 | return; | 248 | return; |
@@ -205,11 +255,14 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
205 | if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) | 255 | if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) |
206 | goto repost; | 256 | goto repost; |
207 | 257 | ||
258 | memcpy(mapping, priv->rx_ring[wr_id].mapping, | ||
259 | IPOIB_UD_RX_SG * sizeof *mapping); | ||
260 | |||
208 | /* | 261 | /* |
209 | * If we can't allocate a new RX buffer, dump | 262 | * If we can't allocate a new RX buffer, dump |
210 | * this packet and reuse the old buffer. | 263 | * this packet and reuse the old buffer. |
211 | */ | 264 | */ |
212 | if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { | 265 | if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) { |
213 | ++dev->stats.rx_dropped; | 266 | ++dev->stats.rx_dropped; |
214 | goto repost; | 267 | goto repost; |
215 | } | 268 | } |
@@ -217,9 +270,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
217 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", | 270 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", |
218 | wc->byte_len, wc->slid); | 271 | wc->byte_len, wc->slid); |
219 | 272 | ||
220 | ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); | 273 | ipoib_ud_dma_unmap_rx(priv, mapping); |
274 | ipoib_ud_skb_put_frags(priv, skb, wc->byte_len); | ||
221 | 275 | ||
222 | skb_put(skb, wc->byte_len); | ||
223 | skb_pull(skb, IB_GRH_BYTES); | 276 | skb_pull(skb, IB_GRH_BYTES); |
224 | 277 | ||
225 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | 278 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
@@ -733,10 +786,8 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) | |||
733 | rx_req = &priv->rx_ring[i]; | 786 | rx_req = &priv->rx_ring[i]; |
734 | if (!rx_req->skb) | 787 | if (!rx_req->skb) |
735 | continue; | 788 | continue; |
736 | ib_dma_unmap_single(priv->ca, | 789 | ipoib_ud_dma_unmap_rx(priv, |
737 | rx_req->mapping, | 790 | priv->rx_ring[i].mapping); |
738 | IPOIB_BUF_SIZE, | ||
739 | DMA_FROM_DEVICE); | ||
740 | dev_kfree_skb_any(rx_req->skb); | 791 | dev_kfree_skb_any(rx_req->skb); |
741 | rx_req->skb = NULL; | 792 | rx_req->skb = NULL; |
742 | } | 793 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index bd07f02cf02b..7a4ed9d3d844 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -195,7 +195,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) | |||
195 | return 0; | 195 | return 0; |
196 | } | 196 | } |
197 | 197 | ||
198 | if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) | 198 | if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) |
199 | return -EINVAL; | 199 | return -EINVAL; |
200 | 200 | ||
201 | priv->admin_mtu = new_mtu; | 201 | priv->admin_mtu = new_mtu; |
@@ -971,10 +971,6 @@ static void ipoib_setup(struct net_device *dev) | |||
971 | NETIF_F_LLTX | | 971 | NETIF_F_LLTX | |
972 | NETIF_F_HIGHDMA); | 972 | NETIF_F_HIGHDMA); |
973 | 973 | ||
974 | /* MTU will be reset when mcast join happens */ | ||
975 | dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; | ||
976 | priv->mcast_mtu = priv->admin_mtu = dev->mtu; | ||
977 | |||
978 | memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); | 974 | memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); |
979 | 975 | ||
980 | netif_carrier_off(dev); | 976 | netif_carrier_off(dev); |
@@ -1107,6 +1103,7 @@ static struct net_device *ipoib_add_port(const char *format, | |||
1107 | { | 1103 | { |
1108 | struct ipoib_dev_priv *priv; | 1104 | struct ipoib_dev_priv *priv; |
1109 | struct ib_device_attr *device_attr; | 1105 | struct ib_device_attr *device_attr; |
1106 | struct ib_port_attr attr; | ||
1110 | int result = -ENOMEM; | 1107 | int result = -ENOMEM; |
1111 | 1108 | ||
1112 | priv = ipoib_intf_alloc(format); | 1109 | priv = ipoib_intf_alloc(format); |
@@ -1115,6 +1112,18 @@ static struct net_device *ipoib_add_port(const char *format, | |||
1115 | 1112 | ||
1116 | SET_NETDEV_DEV(priv->dev, hca->dma_device); | 1113 | SET_NETDEV_DEV(priv->dev, hca->dma_device); |
1117 | 1114 | ||
1115 | if (!ib_query_port(hca, port, &attr)) | ||
1116 | priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); | ||
1117 | else { | ||
1118 | printk(KERN_WARNING "%s: ib_query_port %d failed\n", | ||
1119 | hca->name, port); | ||
1120 | goto device_init_failed; | ||
1121 | } | ||
1122 | |||
1123 | /* MTU will be reset when mcast join happens */ | ||
1124 | priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); | ||
1125 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; | ||
1126 | |||
1118 | result = ib_query_pkey(hca, port, 0, &priv->pkey); | 1127 | result = ib_query_pkey(hca, port, 0, &priv->pkey); |
1119 | if (result) { | 1128 | if (result) { |
1120 | printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", | 1129 | printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 31a53c5bcb13..d00a2c174aee 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
567 | return; | 567 | return; |
568 | } | 568 | } |
569 | 569 | ||
570 | priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - | 570 | priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); |
571 | IPOIB_ENCAP_LEN; | ||
572 | 571 | ||
573 | if (!ipoib_cm_admin_enabled(dev)) | 572 | if (!ipoib_cm_admin_enabled(dev)) |
574 | dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); | 573 | dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 8a20e3742c43..07c03f178a49 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c | |||
@@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) | |||
150 | .max_send_wr = ipoib_sendq_size, | 150 | .max_send_wr = ipoib_sendq_size, |
151 | .max_recv_wr = ipoib_recvq_size, | 151 | .max_recv_wr = ipoib_recvq_size, |
152 | .max_send_sge = 1, | 152 | .max_send_sge = 1, |
153 | .max_recv_sge = 1 | 153 | .max_recv_sge = IPOIB_UD_RX_SG |
154 | }, | 154 | }, |
155 | .sq_sig_type = IB_SIGNAL_ALL_WR, | 155 | .sq_sig_type = IB_SIGNAL_ALL_WR, |
156 | .qp_type = IB_QPT_UD | 156 | .qp_type = IB_QPT_UD |
@@ -215,6 +215,19 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) | |||
215 | priv->tx_wr.sg_list = priv->tx_sge; | 215 | priv->tx_wr.sg_list = priv->tx_sge; |
216 | priv->tx_wr.send_flags = IB_SEND_SIGNALED; | 216 | priv->tx_wr.send_flags = IB_SEND_SIGNALED; |
217 | 217 | ||
218 | priv->rx_sge[0].lkey = priv->mr->lkey; | ||
219 | if (ipoib_ud_need_sg(priv->max_ib_mtu)) { | ||
220 | priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE; | ||
221 | priv->rx_sge[1].length = PAGE_SIZE; | ||
222 | priv->rx_sge[1].lkey = priv->mr->lkey; | ||
223 | priv->rx_wr.num_sge = IPOIB_UD_RX_SG; | ||
224 | } else { | ||
225 | priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); | ||
226 | priv->rx_wr.num_sge = 1; | ||
227 | } | ||
228 | priv->rx_wr.next = NULL; | ||
229 | priv->rx_wr.sg_list = priv->rx_sge; | ||
230 | |||
218 | return 0; | 231 | return 0; |
219 | 232 | ||
220 | out_free_cq: | 233 | out_free_cq: |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 293f5b892e3f..431fdeaa2dc4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
@@ -89,6 +89,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
89 | goto err; | 89 | goto err; |
90 | } | 90 | } |
91 | 91 | ||
92 | priv->max_ib_mtu = ppriv->max_ib_mtu; | ||
92 | set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); | 93 | set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); |
93 | 94 | ||
94 | priv->pkey = pkey; | 95 | priv->pkey = pkey; |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 4b07bdadb81e..b29e3affb805 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
@@ -444,6 +444,23 @@ exit: | |||
444 | __FUNCTION__, retval); | 444 | __FUNCTION__, retval); |
445 | } | 445 | } |
446 | 446 | ||
447 | static void xpad_bulk_out(struct urb *urb) | ||
448 | { | ||
449 | switch (urb->status) { | ||
450 | case 0: | ||
451 | /* success */ | ||
452 | break; | ||
453 | case -ECONNRESET: | ||
454 | case -ENOENT: | ||
455 | case -ESHUTDOWN: | ||
456 | /* this urb is terminated, clean up */ | ||
457 | dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status); | ||
458 | break; | ||
459 | default: | ||
460 | dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status); | ||
461 | } | ||
462 | } | ||
463 | |||
447 | #if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS) | 464 | #if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS) |
448 | static void xpad_irq_out(struct urb *urb) | 465 | static void xpad_irq_out(struct urb *urb) |
449 | { | 466 | { |
@@ -475,23 +492,6 @@ exit: | |||
475 | __FUNCTION__, retval); | 492 | __FUNCTION__, retval); |
476 | } | 493 | } |
477 | 494 | ||
478 | static void xpad_bulk_out(struct urb *urb) | ||
479 | { | ||
480 | switch (urb->status) { | ||
481 | case 0: | ||
482 | /* success */ | ||
483 | break; | ||
484 | case -ECONNRESET: | ||
485 | case -ENOENT: | ||
486 | case -ESHUTDOWN: | ||
487 | /* this urb is terminated, clean up */ | ||
488 | dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status); | ||
489 | break; | ||
490 | default: | ||
491 | dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status); | ||
492 | } | ||
493 | } | ||
494 | |||
495 | static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) | 495 | static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) |
496 | { | 496 | { |
497 | struct usb_endpoint_descriptor *ep_irq_out; | 497 | struct usb_endpoint_descriptor *ep_irq_out; |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0697aa8ea774..8082c1d142df 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2011,7 +2011,7 @@ config E1000_DISABLE_PACKET_SPLIT | |||
2011 | 2011 | ||
2012 | config E1000E | 2012 | config E1000E |
2013 | tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" | 2013 | tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" |
2014 | depends on PCI | 2014 | depends on PCI && (!SPARC32 || BROKEN) |
2015 | ---help--- | 2015 | ---help--- |
2016 | This driver supports the PCI-Express Intel(R) PRO/1000 gigabit | 2016 | This driver supports the PCI-Express Intel(R) PRO/1000 gigabit |
2017 | ethernet family of adapters. For PCI or PCI-X e1000 adapters, | 2017 | ethernet family of adapters. For PCI or PCI-X e1000 adapters, |
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index 75ef9d0d974d..f9d6b4dca180 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -196,3 +196,160 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |||
196 | } | 196 | } |
197 | } | 197 | } |
198 | EXPORT_SYMBOL_GPL(mlx4_buf_free); | 198 | EXPORT_SYMBOL_GPL(mlx4_buf_free); |
199 | |||
200 | static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) | ||
201 | { | ||
202 | struct mlx4_db_pgdir *pgdir; | ||
203 | |||
204 | pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); | ||
205 | if (!pgdir) | ||
206 | return NULL; | ||
207 | |||
208 | bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); | ||
209 | pgdir->bits[0] = pgdir->order0; | ||
210 | pgdir->bits[1] = pgdir->order1; | ||
211 | pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, | ||
212 | &pgdir->db_dma, GFP_KERNEL); | ||
213 | if (!pgdir->db_page) { | ||
214 | kfree(pgdir); | ||
215 | return NULL; | ||
216 | } | ||
217 | |||
218 | return pgdir; | ||
219 | } | ||
220 | |||
221 | static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, | ||
222 | struct mlx4_db *db, int order) | ||
223 | { | ||
224 | int o; | ||
225 | int i; | ||
226 | |||
227 | for (o = order; o <= 1; ++o) { | ||
228 | i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); | ||
229 | if (i < MLX4_DB_PER_PAGE >> o) | ||
230 | goto found; | ||
231 | } | ||
232 | |||
233 | return -ENOMEM; | ||
234 | |||
235 | found: | ||
236 | clear_bit(i, pgdir->bits[o]); | ||
237 | |||
238 | i <<= o; | ||
239 | |||
240 | if (o > order) | ||
241 | set_bit(i ^ 1, pgdir->bits[order]); | ||
242 | |||
243 | db->u.pgdir = pgdir; | ||
244 | db->index = i; | ||
245 | db->db = pgdir->db_page + db->index; | ||
246 | db->dma = pgdir->db_dma + db->index * 4; | ||
247 | db->order = order; | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) | ||
253 | { | ||
254 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
255 | struct mlx4_db_pgdir *pgdir; | ||
256 | int ret = 0; | ||
257 | |||
258 | mutex_lock(&priv->pgdir_mutex); | ||
259 | |||
260 | list_for_each_entry(pgdir, &priv->pgdir_list, list) | ||
261 | if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) | ||
262 | goto out; | ||
263 | |||
264 | pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev)); | ||
265 | if (!pgdir) { | ||
266 | ret = -ENOMEM; | ||
267 | goto out; | ||
268 | } | ||
269 | |||
270 | list_add(&pgdir->list, &priv->pgdir_list); | ||
271 | |||
272 | /* This should never fail -- we just allocated an empty page: */ | ||
273 | WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); | ||
274 | |||
275 | out: | ||
276 | mutex_unlock(&priv->pgdir_mutex); | ||
277 | |||
278 | return ret; | ||
279 | } | ||
280 | EXPORT_SYMBOL_GPL(mlx4_db_alloc); | ||
281 | |||
282 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) | ||
283 | { | ||
284 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
285 | int o; | ||
286 | int i; | ||
287 | |||
288 | mutex_lock(&priv->pgdir_mutex); | ||
289 | |||
290 | o = db->order; | ||
291 | i = db->index; | ||
292 | |||
293 | if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { | ||
294 | clear_bit(i ^ 1, db->u.pgdir->order0); | ||
295 | ++o; | ||
296 | } | ||
297 | i >>= o; | ||
298 | set_bit(i, db->u.pgdir->bits[o]); | ||
299 | |||
300 | if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { | ||
301 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, | ||
302 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | ||
303 | list_del(&db->u.pgdir->list); | ||
304 | kfree(db->u.pgdir); | ||
305 | } | ||
306 | |||
307 | mutex_unlock(&priv->pgdir_mutex); | ||
308 | } | ||
309 | EXPORT_SYMBOL_GPL(mlx4_db_free); | ||
310 | |||
311 | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | ||
312 | int size, int max_direct) | ||
313 | { | ||
314 | int err; | ||
315 | |||
316 | err = mlx4_db_alloc(dev, &wqres->db, 1); | ||
317 | if (err) | ||
318 | return err; | ||
319 | |||
320 | *wqres->db.db = 0; | ||
321 | |||
322 | err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf); | ||
323 | if (err) | ||
324 | goto err_db; | ||
325 | |||
326 | err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, | ||
327 | &wqres->mtt); | ||
328 | if (err) | ||
329 | goto err_buf; | ||
330 | |||
331 | err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); | ||
332 | if (err) | ||
333 | goto err_mtt; | ||
334 | |||
335 | return 0; | ||
336 | |||
337 | err_mtt: | ||
338 | mlx4_mtt_cleanup(dev, &wqres->mtt); | ||
339 | err_buf: | ||
340 | mlx4_buf_free(dev, size, &wqres->buf); | ||
341 | err_db: | ||
342 | mlx4_db_free(dev, &wqres->db); | ||
343 | |||
344 | return err; | ||
345 | } | ||
346 | EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res); | ||
347 | |||
348 | void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | ||
349 | int size) | ||
350 | { | ||
351 | mlx4_mtt_cleanup(dev, &wqres->mtt); | ||
352 | mlx4_buf_free(dev, size, &wqres->buf); | ||
353 | mlx4_db_free(dev, &wqres->db); | ||
354 | } | ||
355 | EXPORT_SYMBOL_GPL(mlx4_free_hwq_res); | ||
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index caa5bcf54e35..6fda0af9d0a6 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c | |||
@@ -180,7 +180,7 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, | |||
180 | cq_context->mtt_base_addr_h = mtt_addr >> 32; | 180 | cq_context->mtt_base_addr_h = mtt_addr >> 32; |
181 | cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | 181 | cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); |
182 | 182 | ||
183 | err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); | 183 | err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); |
184 | 184 | ||
185 | mlx4_free_cmd_mailbox(dev, mailbox); | 185 | mlx4_free_cmd_mailbox(dev, mailbox); |
186 | return err; | 186 | return err; |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 49a4acab5e82..a6aa49fc1d68 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -798,6 +798,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
798 | INIT_LIST_HEAD(&priv->ctx_list); | 798 | INIT_LIST_HEAD(&priv->ctx_list); |
799 | spin_lock_init(&priv->ctx_lock); | 799 | spin_lock_init(&priv->ctx_lock); |
800 | 800 | ||
801 | INIT_LIST_HEAD(&priv->pgdir_list); | ||
802 | mutex_init(&priv->pgdir_mutex); | ||
803 | |||
801 | /* | 804 | /* |
802 | * Now reset the HCA before we touch the PCI capabilities or | 805 | * Now reset the HCA before we touch the PCI capabilities or |
803 | * attempt a firmware command, since a boot ROM may have left | 806 | * attempt a firmware command, since a boot ROM may have left |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 73336810e652..a4023c2dd050 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -257,6 +257,9 @@ struct mlx4_priv { | |||
257 | struct list_head ctx_list; | 257 | struct list_head ctx_list; |
258 | spinlock_t ctx_lock; | 258 | spinlock_t ctx_lock; |
259 | 259 | ||
260 | struct list_head pgdir_list; | ||
261 | struct mutex pgdir_mutex; | ||
262 | |||
260 | struct mlx4_fw fw; | 263 | struct mlx4_fw fw; |
261 | struct mlx4_cmd cmd; | 264 | struct mlx4_cmd cmd; |
262 | 265 | ||
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c index fa24e6597591..ee5484c44a18 100644 --- a/drivers/net/mlx4/qp.c +++ b/drivers/net/mlx4/qp.c | |||
@@ -299,3 +299,34 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, | |||
299 | } | 299 | } |
300 | EXPORT_SYMBOL_GPL(mlx4_qp_query); | 300 | EXPORT_SYMBOL_GPL(mlx4_qp_query); |
301 | 301 | ||
302 | int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | ||
303 | struct mlx4_qp_context *context, | ||
304 | struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) | ||
305 | { | ||
306 | int err; | ||
307 | int i; | ||
308 | enum mlx4_qp_state states[] = { | ||
309 | MLX4_QP_STATE_RST, | ||
310 | MLX4_QP_STATE_INIT, | ||
311 | MLX4_QP_STATE_RTR, | ||
312 | MLX4_QP_STATE_RTS | ||
313 | }; | ||
314 | |||
315 | for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { | ||
316 | context->flags &= cpu_to_be32(~(0xf << 28)); | ||
317 | context->flags |= cpu_to_be32(states[i + 1] << 28); | ||
318 | err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], | ||
319 | context, 0, 0, qp); | ||
320 | if (err) { | ||
321 | mlx4_err(dev, "Failed to bring QP to state: " | ||
322 | "%d with error: %d\n", | ||
323 | states[i + 1], err); | ||
324 | return err; | ||
325 | } | ||
326 | |||
327 | *qp_state = states[i + 1]; | ||
328 | } | ||
329 | |||
330 | return 0; | ||
331 | } | ||
332 | EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); | ||
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c index 3638fa808ded..32553639aded 100644 --- a/drivers/scsi/ide-scsi.c +++ b/drivers/scsi/ide-scsi.c | |||
@@ -258,8 +258,7 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) | |||
258 | 258 | ||
259 | if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) | 259 | if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT)) |
260 | /* force an abort */ | 260 | /* force an abort */ |
261 | hwif->OUTB(WIN_IDLEIMMEDIATE, | 261 | hwif->OUTB(WIN_IDLEIMMEDIATE, hwif->io_ports.command_addr); |
262 | hwif->io_ports[IDE_COMMAND_OFFSET]); | ||
263 | 262 | ||
264 | rq->errors++; | 263 | rq->errors++; |
265 | 264 | ||
@@ -410,9 +409,9 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive) | |||
410 | idescsi_end_request (drive, 1, 0); | 409 | idescsi_end_request (drive, 1, 0); |
411 | return ide_stopped; | 410 | return ide_stopped; |
412 | } | 411 | } |
413 | bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) | | 412 | bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) | |
414 | hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]); | 413 | hwif->INB(hwif->io_ports.lbam_addr); |
415 | ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); | 414 | ireason = hwif->INB(hwif->io_ports.nsect_addr); |
416 | 415 | ||
417 | if (ireason & CD) { | 416 | if (ireason & CD) { |
418 | printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n"); | 417 | printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n"); |
@@ -485,7 +484,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive) | |||
485 | "initiated yet DRQ isn't asserted\n"); | 484 | "initiated yet DRQ isn't asserted\n"); |
486 | return startstop; | 485 | return startstop; |
487 | } | 486 | } |
488 | ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]); | 487 | ireason = hwif->INB(hwif->io_ports.nsect_addr); |
489 | if ((ireason & CD) == 0 || (ireason & IO)) { | 488 | if ((ireason & CD) == 0 || (ireason & IO)) { |
490 | printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while " | 489 | printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while " |
491 | "issuing a packet command\n"); | 490 | "issuing a packet command\n"); |
@@ -575,7 +574,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive, | |||
575 | return ide_started; | 574 | return ide_started; |
576 | } else { | 575 | } else { |
577 | /* Issue the packet command */ | 576 | /* Issue the packet command */ |
578 | hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]); | 577 | hwif->OUTB(WIN_PACKETCMD, hwif->io_ports.command_addr); |
579 | return idescsi_transfer_pc(drive); | 578 | return idescsi_transfer_pc(drive); |
580 | } | 579 | } |
581 | } | 580 | } |
diff --git a/include/asm-arm/arch-sa1100/ide.h b/include/asm-arm/arch-sa1100/ide.h index 98b10bcf9f1b..b14cbda01dc3 100644 --- a/include/asm-arm/arch-sa1100/ide.h +++ b/include/asm-arm/arch-sa1100/ide.h | |||
@@ -37,12 +37,12 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, | |||
37 | 37 | ||
38 | memset(hw, 0, sizeof(*hw)); | 38 | memset(hw, 0, sizeof(*hw)); |
39 | 39 | ||
40 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { | 40 | for (i = 0; i <= 7; i++) { |
41 | hw->io_ports[i] = reg; | 41 | hw->io_ports_array[i] = reg; |
42 | reg += regincr; | 42 | reg += regincr; |
43 | } | 43 | } |
44 | 44 | ||
45 | hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; | 45 | hw->io_ports.ctl_addr = ctrl_port; |
46 | 46 | ||
47 | if (irq) | 47 | if (irq) |
48 | *irq = 0; | 48 | *irq = 0; |
diff --git a/include/asm-cris/arch-v10/ide.h b/include/asm-cris/arch-v10/ide.h index ea34e0d0a388..5366e6239328 100644 --- a/include/asm-cris/arch-v10/ide.h +++ b/include/asm-cris/arch-v10/ide.h | |||
@@ -59,22 +59,19 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, u | |||
59 | int i; | 59 | int i; |
60 | 60 | ||
61 | /* fill in ports for ATA addresses 0 to 7 */ | 61 | /* fill in ports for ATA addresses 0 to 7 */ |
62 | 62 | for (i = 0; i <= 7; i++) { | |
63 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) { | 63 | hw->io_ports_array[i] = data_port | |
64 | hw->io_ports[i] = data_port | | ||
65 | IO_FIELD(R_ATA_CTRL_DATA, addr, i) | | 64 | IO_FIELD(R_ATA_CTRL_DATA, addr, i) | |
66 | IO_STATE(R_ATA_CTRL_DATA, cs0, active); | 65 | IO_STATE(R_ATA_CTRL_DATA, cs0, active); |
67 | } | 66 | } |
68 | 67 | ||
69 | /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */ | 68 | /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */ |
70 | 69 | hw->io_ports.ctl_addr = data_port | | |
71 | hw->io_ports[IDE_CONTROL_OFFSET] = data_port | | ||
72 | IO_FIELD(R_ATA_CTRL_DATA, addr, 6) | | 70 | IO_FIELD(R_ATA_CTRL_DATA, addr, 6) | |
73 | IO_STATE(R_ATA_CTRL_DATA, cs1, active); | 71 | IO_STATE(R_ATA_CTRL_DATA, cs1, active); |
74 | 72 | ||
75 | /* whats this for ? */ | 73 | /* whats this for ? */ |
76 | 74 | hw->io_ports.irq_addr = 0; | |
77 | hw->io_ports[IDE_IRQ_OFFSET] = 0; | ||
78 | } | 75 | } |
79 | 76 | ||
80 | static inline void ide_init_default_hwifs(void) | 77 | static inline void ide_init_default_hwifs(void) |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 117343b0c271..2e7974ec77ec 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -722,6 +722,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) | |||
722 | 722 | ||
723 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | 723 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) |
724 | { | 724 | { |
725 | trace_hardirqs_on(); | ||
725 | /* "mwait %eax, %ecx;" */ | 726 | /* "mwait %eax, %ecx;" */ |
726 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" | 727 | asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" |
727 | :: "a" (eax), "c" (ecx)); | 728 | :: "a" (eax), "c" (ecx)); |
diff --git a/include/linux/ide.h b/include/linux/ide.h index f0af504dfa42..32fd77bb4436 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -48,13 +48,6 @@ typedef unsigned char byte; /* used everywhere */ | |||
48 | #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ | 48 | #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * Tune flags | ||
52 | */ | ||
53 | #define IDE_TUNE_NOAUTO 2 | ||
54 | #define IDE_TUNE_AUTO 1 | ||
55 | #define IDE_TUNE_DEFAULT 0 | ||
56 | |||
57 | /* | ||
58 | * state flags | 51 | * state flags |
59 | */ | 52 | */ |
60 | 53 | ||
@@ -68,23 +61,30 @@ typedef unsigned char byte; /* used everywhere */ | |||
68 | */ | 61 | */ |
69 | #define IDE_NR_PORTS (10) | 62 | #define IDE_NR_PORTS (10) |
70 | 63 | ||
71 | #define IDE_DATA_OFFSET (0) | 64 | struct ide_io_ports { |
72 | #define IDE_ERROR_OFFSET (1) | 65 | unsigned long data_addr; |
73 | #define IDE_NSECTOR_OFFSET (2) | 66 | |
74 | #define IDE_SECTOR_OFFSET (3) | 67 | union { |
75 | #define IDE_LCYL_OFFSET (4) | 68 | unsigned long error_addr; /* read: error */ |
76 | #define IDE_HCYL_OFFSET (5) | 69 | unsigned long feature_addr; /* write: feature */ |
77 | #define IDE_SELECT_OFFSET (6) | 70 | }; |
78 | #define IDE_STATUS_OFFSET (7) | 71 | |
79 | #define IDE_CONTROL_OFFSET (8) | 72 | unsigned long nsect_addr; |
80 | #define IDE_IRQ_OFFSET (9) | 73 | unsigned long lbal_addr; |
81 | 74 | unsigned long lbam_addr; | |
82 | #define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET | 75 | unsigned long lbah_addr; |
83 | #define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET | 76 | |
84 | #define IDE_ALTSTATUS_OFFSET IDE_CONTROL_OFFSET | 77 | unsigned long device_addr; |
85 | #define IDE_IREASON_OFFSET IDE_NSECTOR_OFFSET | 78 | |
86 | #define IDE_BCOUNTL_OFFSET IDE_LCYL_OFFSET | 79 | union { |
87 | #define IDE_BCOUNTH_OFFSET IDE_HCYL_OFFSET | 80 | unsigned long status_addr; /*  read: status  */ |
81 | unsigned long command_addr; /* write: command */ | ||
82 | }; | ||
83 | |||
84 | unsigned long ctl_addr; | ||
85 | |||
86 | unsigned long irq_addr; | ||
87 | }; | ||
88 | 88 | ||
89 | #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) | 89 | #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good)) |
90 | #define BAD_R_STAT (BUSY_STAT | ERR_STAT) | 90 | #define BAD_R_STAT (BUSY_STAT | ERR_STAT) |
@@ -163,7 +163,11 @@ typedef u8 hwif_chipset_t; | |||
163 | * Structure to hold all information about the location of this port | 163 | * Structure to hold all information about the location of this port |
164 | */ | 164 | */ |
165 | typedef struct hw_regs_s { | 165 | typedef struct hw_regs_s { |
166 | unsigned long io_ports[IDE_NR_PORTS]; /* task file registers */ | 166 | union { |
167 | struct ide_io_ports io_ports; | ||
168 | unsigned long io_ports_array[IDE_NR_PORTS]; | ||
169 | }; | ||
170 | |||
167 | int irq; /* our irq number */ | 171 | int irq; /* our irq number */ |
168 | ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ | 172 | ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ |
169 | hwif_chipset_t chipset; | 173 | hwif_chipset_t chipset; |
@@ -179,10 +183,10 @@ static inline void ide_std_init_ports(hw_regs_t *hw, | |||
179 | { | 183 | { |
180 | unsigned int i; | 184 | unsigned int i; |
181 | 185 | ||
182 | for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) | 186 | for (i = 0; i <= 7; i++) |
183 | hw->io_ports[i] = io_addr++; | 187 | hw->io_ports_array[i] = io_addr++; |
184 | 188 | ||
185 | hw->io_ports[IDE_CONTROL_OFFSET] = ctl_addr; | 189 | hw->io_ports.ctl_addr = ctl_addr; |
186 | } | 190 | } |
187 | 191 | ||
188 | #include <asm/ide.h> | 192 | #include <asm/ide.h> |
@@ -328,7 +332,6 @@ typedef struct ide_drive_s { | |||
328 | unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */ | 332 | unsigned atapi_overlap : 1; /* ATAPI overlap (not supported) */ |
329 | unsigned doorlocking : 1; /* for removable only: door lock/unlock works */ | 333 | unsigned doorlocking : 1; /* for removable only: door lock/unlock works */ |
330 | unsigned nodma : 1; /* disallow DMA */ | 334 | unsigned nodma : 1; /* disallow DMA */ |
331 | unsigned autotune : 2; /* 0=default, 1=autotune, 2=noautotune */ | ||
332 | unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ | 335 | unsigned remap_0_to_1 : 1; /* 0=noremap, 1=remap 0->1 (for EZDrive) */ |
333 | unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ | 336 | unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */ |
334 | unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */ | 337 | unsigned vdma : 1; /* 1=doing PIO over DMA 0=doing normal DMA */ |
@@ -432,8 +435,8 @@ typedef struct hwif_s { | |||
432 | 435 | ||
433 | char name[6]; /* name of interface, eg. "ide0" */ | 436 | char name[6]; /* name of interface, eg. "ide0" */ |
434 | 437 | ||
435 | /* task file registers for pata and sata */ | 438 | struct ide_io_ports io_ports; |
436 | unsigned long io_ports[IDE_NR_PORTS]; | 439 | |
437 | unsigned long sata_scr[SATA_NR_PORTS]; | 440 | unsigned long sata_scr[SATA_NR_PORTS]; |
438 | 441 | ||
439 | ide_drive_t drives[MAX_DRIVES]; /* drive info */ | 442 | ide_drive_t drives[MAX_DRIVES]; /* drive info */ |
@@ -520,7 +523,6 @@ typedef struct hwif_s { | |||
520 | unsigned present : 1; /* this interface exists */ | 523 | unsigned present : 1; /* this interface exists */ |
521 | unsigned serialized : 1; /* serialized all channel operation */ | 524 | unsigned serialized : 1; /* serialized all channel operation */ |
522 | unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ | 525 | unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */ |
523 | unsigned reset : 1; /* reset after probe */ | ||
524 | unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ | 526 | unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ |
525 | unsigned mmio : 1; /* host uses MMIO */ | 527 | unsigned mmio : 1; /* host uses MMIO */ |
526 | 528 | ||
@@ -703,10 +705,6 @@ void ide_add_generic_settings(ide_drive_t *); | |||
703 | read_proc_t proc_ide_read_capacity; | 705 | read_proc_t proc_ide_read_capacity; |
704 | read_proc_t proc_ide_read_geometry; | 706 | read_proc_t proc_ide_read_geometry; |
705 | 707 | ||
706 | #ifdef CONFIG_BLK_DEV_IDEPCI | ||
707 | void ide_pci_create_host_proc(const char *, get_info_t *); | ||
708 | #endif | ||
709 | |||
710 | /* | 708 | /* |
711 | * Standard exit stuff: | 709 | * Standard exit stuff: |
712 | */ | 710 | */ |
@@ -807,8 +805,14 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig | |||
807 | #ifndef _IDE_C | 805 | #ifndef _IDE_C |
808 | extern ide_hwif_t ide_hwifs[]; /* master data repository */ | 806 | extern ide_hwif_t ide_hwifs[]; /* master data repository */ |
809 | #endif | 807 | #endif |
808 | extern int ide_noacpi; | ||
809 | extern int ide_acpigtf; | ||
810 | extern int ide_acpionboot; | ||
810 | extern int noautodma; | 811 | extern int noautodma; |
811 | 812 | ||
813 | extern int ide_vlb_clk; | ||
814 | extern int ide_pci_clk; | ||
815 | |||
812 | ide_hwif_t *ide_find_port_slot(const struct ide_port_info *); | 816 | ide_hwif_t *ide_find_port_slot(const struct ide_port_info *); |
813 | 817 | ||
814 | static inline ide_hwif_t *ide_find_port(void) | 818 | static inline ide_hwif_t *ide_find_port(void) |
@@ -1068,8 +1072,6 @@ enum { | |||
1068 | IDE_HFLAG_NO_DMA = (1 << 14), | 1072 | IDE_HFLAG_NO_DMA = (1 << 14), |
1069 | /* check if host is PCI IDE device before allowing DMA */ | 1073 | /* check if host is PCI IDE device before allowing DMA */ |
1070 | IDE_HFLAG_NO_AUTODMA = (1 << 15), | 1074 | IDE_HFLAG_NO_AUTODMA = (1 << 15), |
1071 | /* don't autotune PIO */ | ||
1072 | IDE_HFLAG_NO_AUTOTUNE = (1 << 16), | ||
1073 | /* host is CS5510/CS5520 */ | 1075 | /* host is CS5510/CS5520 */ |
1074 | IDE_HFLAG_CS5520 = IDE_HFLAG_VDMA, | 1076 | IDE_HFLAG_CS5520 = IDE_HFLAG_VDMA, |
1075 | /* no LBA48 */ | 1077 | /* no LBA48 */ |
@@ -1215,13 +1217,15 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} | |||
1215 | #endif | 1217 | #endif |
1216 | 1218 | ||
1217 | void ide_remove_port_from_hwgroup(ide_hwif_t *); | 1219 | void ide_remove_port_from_hwgroup(ide_hwif_t *); |
1218 | void ide_unregister(unsigned int); | 1220 | void ide_unregister(ide_hwif_t *); |
1219 | 1221 | ||
1220 | void ide_register_region(struct gendisk *); | 1222 | void ide_register_region(struct gendisk *); |
1221 | void ide_unregister_region(struct gendisk *); | 1223 | void ide_unregister_region(struct gendisk *); |
1222 | 1224 | ||
1223 | void ide_undecoded_slave(ide_drive_t *); | 1225 | void ide_undecoded_slave(ide_drive_t *); |
1224 | 1226 | ||
1227 | void ide_port_apply_params(ide_hwif_t *); | ||
1228 | |||
1225 | int ide_device_add_all(u8 *idx, const struct ide_port_info *); | 1229 | int ide_device_add_all(u8 *idx, const struct ide_port_info *); |
1226 | int ide_device_add(u8 idx[4], const struct ide_port_info *); | 1230 | int ide_device_add(u8 idx[4], const struct ide_port_info *); |
1227 | int ide_legacy_device_add(const struct ide_port_info *, unsigned long); | 1231 | int ide_legacy_device_add(const struct ide_port_info *, unsigned long); |
@@ -1333,29 +1337,28 @@ static inline void ide_set_irq(ide_drive_t *drive, int on) | |||
1333 | { | 1337 | { |
1334 | ide_hwif_t *hwif = drive->hwif; | 1338 | ide_hwif_t *hwif = drive->hwif; |
1335 | 1339 | ||
1336 | hwif->OUTB(drive->ctl | (on ? 0 : 2), | 1340 | hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr); |
1337 | hwif->io_ports[IDE_CONTROL_OFFSET]); | ||
1338 | } | 1341 | } |
1339 | 1342 | ||
1340 | static inline u8 ide_read_status(ide_drive_t *drive) | 1343 | static inline u8 ide_read_status(ide_drive_t *drive) |
1341 | { | 1344 | { |
1342 | ide_hwif_t *hwif = drive->hwif; | 1345 | ide_hwif_t *hwif = drive->hwif; |
1343 | 1346 | ||
1344 | return hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]); | 1347 | return hwif->INB(hwif->io_ports.status_addr); |
1345 | } | 1348 | } |
1346 | 1349 | ||
1347 | static inline u8 ide_read_altstatus(ide_drive_t *drive) | 1350 | static inline u8 ide_read_altstatus(ide_drive_t *drive) |
1348 | { | 1351 | { |
1349 | ide_hwif_t *hwif = drive->hwif; | 1352 | ide_hwif_t *hwif = drive->hwif; |
1350 | 1353 | ||
1351 | return hwif->INB(hwif->io_ports[IDE_CONTROL_OFFSET]); | 1354 | return hwif->INB(hwif->io_ports.ctl_addr); |
1352 | } | 1355 | } |
1353 | 1356 | ||
1354 | static inline u8 ide_read_error(ide_drive_t *drive) | 1357 | static inline u8 ide_read_error(ide_drive_t *drive) |
1355 | { | 1358 | { |
1356 | ide_hwif_t *hwif = drive->hwif; | 1359 | ide_hwif_t *hwif = drive->hwif; |
1357 | 1360 | ||
1358 | return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]); | 1361 | return hwif->INB(hwif->io_ports.error_addr); |
1359 | } | 1362 | } |
1360 | 1363 | ||
1361 | /* | 1364 | /* |
@@ -1368,7 +1371,7 @@ static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount) | |||
1368 | 1371 | ||
1369 | /* FIXME: use ->atapi_input_bytes */ | 1372 | /* FIXME: use ->atapi_input_bytes */ |
1370 | while (bcount--) | 1373 | while (bcount--) |
1371 | (void)hwif->INB(hwif->io_ports[IDE_DATA_OFFSET]); | 1374 | (void)hwif->INB(hwif->io_ports.data_addr); |
1372 | } | 1375 | } |
1373 | 1376 | ||
1374 | static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount) | 1377 | static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount) |
@@ -1377,7 +1380,7 @@ static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount) | |||
1377 | 1380 | ||
1378 | /* FIXME: use ->atapi_output_bytes */ | 1381 | /* FIXME: use ->atapi_output_bytes */ |
1379 | while (bcount--) | 1382 | while (bcount--) |
1380 | hwif->OUTB(0, hwif->io_ports[IDE_DATA_OFFSET]); | 1383 | hwif->OUTB(0, hwif->io_ports.data_addr); |
1381 | } | 1384 | } |
1382 | 1385 | ||
1383 | #endif /* _IDE_H */ | 1386 | #endif /* _IDE_H */ |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index ff7df1a2222f..9fa1a8002ce2 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -208,6 +208,38 @@ struct mlx4_mtt { | |||
208 | int page_shift; | 208 | int page_shift; |
209 | }; | 209 | }; |
210 | 210 | ||
211 | enum { | ||
212 | MLX4_DB_PER_PAGE = PAGE_SIZE / 4 | ||
213 | }; | ||
214 | |||
215 | struct mlx4_db_pgdir { | ||
216 | struct list_head list; | ||
217 | DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE); | ||
218 | DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2); | ||
219 | unsigned long *bits[2]; | ||
220 | __be32 *db_page; | ||
221 | dma_addr_t db_dma; | ||
222 | }; | ||
223 | |||
224 | struct mlx4_ib_user_db_page; | ||
225 | |||
226 | struct mlx4_db { | ||
227 | __be32 *db; | ||
228 | union { | ||
229 | struct mlx4_db_pgdir *pgdir; | ||
230 | struct mlx4_ib_user_db_page *user_page; | ||
231 | } u; | ||
232 | dma_addr_t dma; | ||
233 | int index; | ||
234 | int order; | ||
235 | }; | ||
236 | |||
237 | struct mlx4_hwq_resources { | ||
238 | struct mlx4_db db; | ||
239 | struct mlx4_mtt mtt; | ||
240 | struct mlx4_buf buf; | ||
241 | }; | ||
242 | |||
211 | struct mlx4_mr { | 243 | struct mlx4_mr { |
212 | struct mlx4_mtt mtt; | 244 | struct mlx4_mtt mtt; |
213 | u64 iova; | 245 | u64 iova; |
@@ -341,6 +373,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
341 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 373 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
342 | struct mlx4_buf *buf); | 374 | struct mlx4_buf *buf); |
343 | 375 | ||
376 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); | ||
377 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); | ||
378 | |||
379 | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | ||
380 | int size, int max_direct); | ||
381 | void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, | ||
382 | int size); | ||
383 | |||
344 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | 384 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, |
345 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); | 385 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); |
346 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | 386 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index a5e43febee4f..7f128b266faa 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
@@ -296,6 +296,10 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
296 | int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, | 296 | int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, |
297 | struct mlx4_qp_context *context); | 297 | struct mlx4_qp_context *context); |
298 | 298 | ||
299 | int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | ||
300 | struct mlx4_qp_context *context, | ||
301 | struct mlx4_qp *qp, enum mlx4_qp_state *qp_state); | ||
302 | |||
299 | static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) | 303 | static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) |
300 | { | 304 | { |
301 | return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); | 305 | return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 742003d3a841..9ee3affab346 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/jhash.h> | 15 | #include <linux/jhash.h> |
16 | #include <asm/unaligned.h> | ||
16 | #include "ieee80211_i.h" | 17 | #include "ieee80211_i.h" |
17 | 18 | ||
18 | 19 | ||
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 02de8f1522a3..3df809222d1c 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -7,7 +7,6 @@ | |||
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <asm/unaligned.h> | ||
11 | #include "mesh.h" | 10 | #include "mesh.h" |
12 | 11 | ||
13 | #define TEST_FRAME_LEN 8192 | 12 | #define TEST_FRAME_LEN 8192 |