aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cdrom/viocd.c14
-rw-r--r--drivers/cpufreq/Kconfig24
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c586
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c180
-rw-r--r--drivers/cpufreq/cpufreq_stats.c47
-rw-r--r--drivers/firmware/pcdp.c1
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/input/mousedev.c15
-rw-r--r--drivers/macintosh/via-pmu.c8
-rw-r--r--drivers/media/dvb/bt8xx/dst.c122
-rw-r--r--drivers/net/tg3.c571
-rw-r--r--drivers/scsi/ahci.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c2
-rw-r--r--drivers/scsi/ata_piix.c2
-rw-r--r--drivers/scsi/libata-core.c15
-rw-r--r--drivers/scsi/libata.h2
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c1
-rw-r--r--drivers/scsi/sata_qstor.c2
-rw-r--r--drivers/scsi/sata_sil.c1
-rw-r--r--drivers/scsi/sata_sis.c1
-rw-r--r--drivers/scsi/sata_svw.c1
-rw-r--r--drivers/scsi/sata_sx4.c2
-rw-r--r--drivers/scsi/sata_uli.c1
-rw-r--r--drivers/scsi/sata_via.c1
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/usb/media/pwc/pwc-ctrl.c2
-rw-r--r--drivers/usb/media/pwc/pwc-uncompress.c2
-rw-r--r--drivers/video/intelfb/intelfbdrv.c22
31 files changed, 1427 insertions, 216 deletions
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index fcca26c89bbc..38dd9ffbe8bc 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -488,6 +488,20 @@ static int viocd_packet(struct cdrom_device_info *cdi,
488 & (CDC_DVD_RAM | CDC_RAM)) != 0; 488 & (CDC_DVD_RAM | CDC_RAM)) != 0;
489 } 489 }
490 break; 490 break;
491 case GPCMD_GET_CONFIGURATION:
492 if (cgc->cmd[3] == CDF_RWRT) {
493 struct rwrt_feature_desc *rfd = (struct rwrt_feature_desc *)(cgc->buffer + sizeof(struct feature_header));
494
495 if ((buflen >=
496 (sizeof(struct feature_header) + sizeof(*rfd))) &&
497 (cdi->ops->capability & ~cdi->mask
498 & (CDC_DVD_RAM | CDC_RAM))) {
499 rfd->feature_code = cpu_to_be16(CDF_RWRT);
500 rfd->curr = 1;
501 ret = 0;
502 }
503 }
504 break;
491 default: 505 default:
492 if (cgc->sense) { 506 if (cgc->sense) {
493 /* indicate Unknown code */ 507 /* indicate Unknown code */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 95882bb1950e..60c9be99c6d9 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -46,6 +46,10 @@ config CPU_FREQ_STAT_DETAILS
46 This will show detail CPU frequency translation table in sysfs file 46 This will show detail CPU frequency translation table in sysfs file
47 system 47 system
48 48
49# Note that it is not currently possible to set the other governors (such as ondemand)
50# as the default, since if they fail to initialise, cpufreq will be
51# left in an undefined state.
52
49choice 53choice
50 prompt "Default CPUFreq governor" 54 prompt "Default CPUFreq governor"
51 default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 55 default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
@@ -115,4 +119,24 @@ config CPU_FREQ_GOV_ONDEMAND
115 119
116 If in doubt, say N. 120 If in doubt, say N.
117 121
122config CPU_FREQ_GOV_CONSERVATIVE
123 tristate "'conservative' cpufreq governor"
124 depends on CPU_FREQ
125 help
126 'conservative' - this driver is rather similar to the 'ondemand'
127 governor both in its source code and its purpose, the difference is
128 its optimisation for better suitability in a battery powered
129 environment. The frequency is gracefully increased and decreased
130 rather than jumping to 100% when speed is required.
131
132 If you have a desktop machine then you should really be considering
133 the 'ondemand' governor instead, however if you are using a laptop,
134 PDA or even an AMD64 based computer (due to the unacceptable
135 step-by-step latency issues between the minimum and maximum frequency
136 transitions in the CPU) you will probably want to use this governor.
137
138 For details, take a look at linux/Documentation/cpu-freq.
139
140 If in doubt, say N.
141
118endif # CPU_FREQ 142endif # CPU_FREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 67b16e5a41a7..71fc3b4173f1 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
8obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o 8obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
9obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o 9obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
10obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o 10obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
11obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
11 12
12# CPUfreq cross-arch helpers 13# CPUfreq cross-arch helpers
13obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o 14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8e561313d094..03b5fb2ddcf4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -258,7 +258,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
258 (likely(cpufreq_cpu_data[freqs->cpu]->cur)) && 258 (likely(cpufreq_cpu_data[freqs->cpu]->cur)) &&
259 (unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur))) 259 (unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur)))
260 { 260 {
261 printk(KERN_WARNING "Warning: CPU frequency is %u, " 261 dprintk(KERN_WARNING "Warning: CPU frequency is %u, "
262 "cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur); 262 "cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur);
263 freqs->old = cpufreq_cpu_data[freqs->cpu]->cur; 263 freqs->old = cpufreq_cpu_data[freqs->cpu]->cur;
264 } 264 }
@@ -814,7 +814,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigne
814{ 814{
815 struct cpufreq_freqs freqs; 815 struct cpufreq_freqs freqs;
816 816
817 printk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing " 817 dprintk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing "
818 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 818 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
819 819
820 freqs.cpu = cpu; 820 freqs.cpu = cpu;
@@ -923,7 +923,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, u32 state)
923 struct cpufreq_freqs freqs; 923 struct cpufreq_freqs freqs;
924 924
925 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) 925 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
926 printk(KERN_DEBUG "Warning: CPU frequency is %u, " 926 dprintk(KERN_DEBUG "Warning: CPU frequency is %u, "
927 "cpufreq assumed %u kHz.\n", 927 "cpufreq assumed %u kHz.\n",
928 cur_freq, cpu_policy->cur); 928 cur_freq, cpu_policy->cur);
929 929
@@ -1004,7 +1004,7 @@ static int cpufreq_resume(struct sys_device * sysdev)
1004 struct cpufreq_freqs freqs; 1004 struct cpufreq_freqs freqs;
1005 1005
1006 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) 1006 if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1007 printk(KERN_WARNING "Warning: CPU frequency" 1007 dprintk(KERN_WARNING "Warning: CPU frequency"
1008 "is %u, cpufreq assumed %u kHz.\n", 1008 "is %u, cpufreq assumed %u kHz.\n",
1009 cur_freq, cpu_policy->cur); 1009 cur_freq, cpu_policy->cur);
1010 1010
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
new file mode 100644
index 000000000000..e1df376e709e
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -0,0 +1,586 @@
1/*
2 * drivers/cpufreq/cpufreq_conservative.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
7 * (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/smp.h>
17#include <linux/init.h>
18#include <linux/interrupt.h>
19#include <linux/ctype.h>
20#include <linux/cpufreq.h>
21#include <linux/sysctl.h>
22#include <linux/types.h>
23#include <linux/fs.h>
24#include <linux/sysfs.h>
25#include <linux/sched.h>
26#include <linux/kmod.h>
27#include <linux/workqueue.h>
28#include <linux/jiffies.h>
29#include <linux/kernel_stat.h>
30#include <linux/percpu.h>
31
32/*
33 * dbs is used in this file as a shortform for demandbased switching
34 * It helps to keep variable names smaller, simpler
35 */
36
37#define DEF_FREQUENCY_UP_THRESHOLD (80)
38#define MIN_FREQUENCY_UP_THRESHOLD (0)
39#define MAX_FREQUENCY_UP_THRESHOLD (100)
40
41#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
42#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
43#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
44
45/*
46 * The polling frequency of this governor depends on the capability of
47 * the processor. Default polling frequency is 1000 times the transition
48 * latency of the processor. The governor will work on any processor with
49 * transition latency <= 10mS, using appropriate sampling
50 * rate.
51 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
52 * this governor will not work.
53 * All times here are in uS.
54 */
55static unsigned int def_sampling_rate;
56#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
57#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
58#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000)
59#define DEF_SAMPLING_DOWN_FACTOR (5)
60#define TRANSITION_LATENCY_LIMIT (10 * 1000)
61
62static void do_dbs_timer(void *data);
63
64struct cpu_dbs_info_s {
65 struct cpufreq_policy *cur_policy;
66 unsigned int prev_cpu_idle_up;
67 unsigned int prev_cpu_idle_down;
68 unsigned int enable;
69};
70static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
71
72static unsigned int dbs_enable; /* number of CPUs using this policy */
73
74static DECLARE_MUTEX (dbs_sem);
75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
76
77struct dbs_tuners {
78 unsigned int sampling_rate;
79 unsigned int sampling_down_factor;
80 unsigned int up_threshold;
81 unsigned int down_threshold;
82 unsigned int ignore_nice;
83 unsigned int freq_step;
84};
85
86static struct dbs_tuners dbs_tuners_ins = {
87 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
88 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
89 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
90};
91
92static inline unsigned int get_cpu_idle_time(unsigned int cpu)
93{
94 return kstat_cpu(cpu).cpustat.idle +
95 kstat_cpu(cpu).cpustat.iowait +
96 ( !dbs_tuners_ins.ignore_nice ?
97 kstat_cpu(cpu).cpustat.nice :
98 0);
99}
100
101/************************** sysfs interface ************************/
102static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
103{
104 return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
105}
106
107static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
108{
109 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
110}
111
112#define define_one_ro(_name) \
113static struct freq_attr _name = \
114__ATTR(_name, 0444, show_##_name, NULL)
115
116define_one_ro(sampling_rate_max);
117define_one_ro(sampling_rate_min);
118
119/* cpufreq_conservative Governor Tunables */
120#define show_one(file_name, object) \
121static ssize_t show_##file_name \
122(struct cpufreq_policy *unused, char *buf) \
123{ \
124 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
125}
126show_one(sampling_rate, sampling_rate);
127show_one(sampling_down_factor, sampling_down_factor);
128show_one(up_threshold, up_threshold);
129show_one(down_threshold, down_threshold);
130show_one(ignore_nice, ignore_nice);
131show_one(freq_step, freq_step);
132
133static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
134 const char *buf, size_t count)
135{
136 unsigned int input;
137 int ret;
138 ret = sscanf (buf, "%u", &input);
139 if (ret != 1 )
140 return -EINVAL;
141
142 down(&dbs_sem);
143 dbs_tuners_ins.sampling_down_factor = input;
144 up(&dbs_sem);
145
146 return count;
147}
148
149static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
150 const char *buf, size_t count)
151{
152 unsigned int input;
153 int ret;
154 ret = sscanf (buf, "%u", &input);
155
156 down(&dbs_sem);
157 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
158 up(&dbs_sem);
159 return -EINVAL;
160 }
161
162 dbs_tuners_ins.sampling_rate = input;
163 up(&dbs_sem);
164
165 return count;
166}
167
168static ssize_t store_up_threshold(struct cpufreq_policy *unused,
169 const char *buf, size_t count)
170{
171 unsigned int input;
172 int ret;
173 ret = sscanf (buf, "%u", &input);
174
175 down(&dbs_sem);
176 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
177 input < MIN_FREQUENCY_UP_THRESHOLD ||
178 input <= dbs_tuners_ins.down_threshold) {
179 up(&dbs_sem);
180 return -EINVAL;
181 }
182
183 dbs_tuners_ins.up_threshold = input;
184 up(&dbs_sem);
185
186 return count;
187}
188
189static ssize_t store_down_threshold(struct cpufreq_policy *unused,
190 const char *buf, size_t count)
191{
192 unsigned int input;
193 int ret;
194 ret = sscanf (buf, "%u", &input);
195
196 down(&dbs_sem);
197 if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
198 input < MIN_FREQUENCY_DOWN_THRESHOLD ||
199 input >= dbs_tuners_ins.up_threshold) {
200 up(&dbs_sem);
201 return -EINVAL;
202 }
203
204 dbs_tuners_ins.down_threshold = input;
205 up(&dbs_sem);
206
207 return count;
208}
209
210static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
211 const char *buf, size_t count)
212{
213 unsigned int input;
214 int ret;
215
216 unsigned int j;
217
218 ret = sscanf (buf, "%u", &input);
219 if ( ret != 1 )
220 return -EINVAL;
221
222 if ( input > 1 )
223 input = 1;
224
225 down(&dbs_sem);
226 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
227 up(&dbs_sem);
228 return count;
229 }
230 dbs_tuners_ins.ignore_nice = input;
231
232 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
233 for_each_online_cpu(j) {
234 struct cpu_dbs_info_s *j_dbs_info;
235 j_dbs_info = &per_cpu(cpu_dbs_info, j);
236 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
237 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
238 }
239 up(&dbs_sem);
240
241 return count;
242}
243
244static ssize_t store_freq_step(struct cpufreq_policy *policy,
245 const char *buf, size_t count)
246{
247 unsigned int input;
248 int ret;
249
250 ret = sscanf (buf, "%u", &input);
251
252 if ( ret != 1 )
253 return -EINVAL;
254
255 if ( input > 100 )
256 input = 100;
257
258 /* no need to test here if freq_step is zero as the user might actually
259 * want this, they would be crazy though :) */
260 down(&dbs_sem);
261 dbs_tuners_ins.freq_step = input;
262 up(&dbs_sem);
263
264 return count;
265}
266
267#define define_one_rw(_name) \
268static struct freq_attr _name = \
269__ATTR(_name, 0644, show_##_name, store_##_name)
270
271define_one_rw(sampling_rate);
272define_one_rw(sampling_down_factor);
273define_one_rw(up_threshold);
274define_one_rw(down_threshold);
275define_one_rw(ignore_nice);
276define_one_rw(freq_step);
277
278static struct attribute * dbs_attributes[] = {
279 &sampling_rate_max.attr,
280 &sampling_rate_min.attr,
281 &sampling_rate.attr,
282 &sampling_down_factor.attr,
283 &up_threshold.attr,
284 &down_threshold.attr,
285 &ignore_nice.attr,
286 &freq_step.attr,
287 NULL
288};
289
290static struct attribute_group dbs_attr_group = {
291 .attrs = dbs_attributes,
292 .name = "conservative",
293};
294
295/************************** sysfs end ************************/
296
297static void dbs_check_cpu(int cpu)
298{
299 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
300 unsigned int freq_step;
301 unsigned int freq_down_sampling_rate;
302 static int down_skip[NR_CPUS];
303 static int requested_freq[NR_CPUS];
304 static unsigned short init_flag = 0;
305 struct cpu_dbs_info_s *this_dbs_info;
306 struct cpu_dbs_info_s *dbs_info;
307
308 struct cpufreq_policy *policy;
309 unsigned int j;
310
311 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
312 if (!this_dbs_info->enable)
313 return;
314
315 policy = this_dbs_info->cur_policy;
316
317 if ( init_flag == 0 ) {
318 for ( /* NULL */; init_flag < NR_CPUS; init_flag++ ) {
319 dbs_info = &per_cpu(cpu_dbs_info, init_flag);
320 requested_freq[cpu] = dbs_info->cur_policy->cur;
321 }
322 init_flag = 1;
323 }
324
325 /*
326 * The default safe range is 20% to 80%
327 * Every sampling_rate, we check
328 * - If current idle time is less than 20%, then we try to
329 * increase frequency
330 * Every sampling_rate*sampling_down_factor, we check
331 * - If current idle time is more than 80%, then we try to
332 * decrease frequency
333 *
334 * Any frequency increase takes it to the maximum frequency.
335 * Frequency reduction happens at minimum steps of
336 * 5% (default) of max_frequency
337 */
338
339 /* Check for frequency increase */
340
341 idle_ticks = UINT_MAX;
342 for_each_cpu_mask(j, policy->cpus) {
343 unsigned int tmp_idle_ticks, total_idle_ticks;
344 struct cpu_dbs_info_s *j_dbs_info;
345
346 j_dbs_info = &per_cpu(cpu_dbs_info, j);
347 /* Check for frequency increase */
348 total_idle_ticks = get_cpu_idle_time(j);
349 tmp_idle_ticks = total_idle_ticks -
350 j_dbs_info->prev_cpu_idle_up;
351 j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
352
353 if (tmp_idle_ticks < idle_ticks)
354 idle_ticks = tmp_idle_ticks;
355 }
356
357 /* Scale idle ticks by 100 and compare with up and down ticks */
358 idle_ticks *= 100;
359 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
360 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
361
362 if (idle_ticks < up_idle_ticks) {
363 down_skip[cpu] = 0;
364 for_each_cpu_mask(j, policy->cpus) {
365 struct cpu_dbs_info_s *j_dbs_info;
366
367 j_dbs_info = &per_cpu(cpu_dbs_info, j);
368 j_dbs_info->prev_cpu_idle_down =
369 j_dbs_info->prev_cpu_idle_up;
370 }
371 /* if we are already at full speed then break out early */
372 if (requested_freq[cpu] == policy->max)
373 return;
374
375 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
376
377 /* max freq cannot be less than 100. But who knows.... */
378 if (unlikely(freq_step == 0))
379 freq_step = 5;
380
381 requested_freq[cpu] += freq_step;
382 if (requested_freq[cpu] > policy->max)
383 requested_freq[cpu] = policy->max;
384
385 __cpufreq_driver_target(policy, requested_freq[cpu],
386 CPUFREQ_RELATION_H);
387 return;
388 }
389
390 /* Check for frequency decrease */
391 down_skip[cpu]++;
392 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
393 return;
394
395 idle_ticks = UINT_MAX;
396 for_each_cpu_mask(j, policy->cpus) {
397 unsigned int tmp_idle_ticks, total_idle_ticks;
398 struct cpu_dbs_info_s *j_dbs_info;
399
400 j_dbs_info = &per_cpu(cpu_dbs_info, j);
401 total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
402 tmp_idle_ticks = total_idle_ticks -
403 j_dbs_info->prev_cpu_idle_down;
404 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
405
406 if (tmp_idle_ticks < idle_ticks)
407 idle_ticks = tmp_idle_ticks;
408 }
409
410 /* Scale idle ticks by 100 and compare with up and down ticks */
411 idle_ticks *= 100;
412 down_skip[cpu] = 0;
413
414 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
415 dbs_tuners_ins.sampling_down_factor;
416 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
417 usecs_to_jiffies(freq_down_sampling_rate);
418
419 if (idle_ticks > down_idle_ticks) {
420 /* if we are already at the lowest speed then break out early
421 * or if we 'cannot' reduce the speed as the user might want
422 * freq_step to be zero */
423 if (requested_freq[cpu] == policy->min
424 || dbs_tuners_ins.freq_step == 0)
425 return;
426
427 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
428
429 /* max freq cannot be less than 100. But who knows.... */
430 if (unlikely(freq_step == 0))
431 freq_step = 5;
432
433 requested_freq[cpu] -= freq_step;
434 if (requested_freq[cpu] < policy->min)
435 requested_freq[cpu] = policy->min;
436
437 __cpufreq_driver_target(policy,
438 requested_freq[cpu],
439 CPUFREQ_RELATION_H);
440 return;
441 }
442}
443
444static void do_dbs_timer(void *data)
445{
446 int i;
447 down(&dbs_sem);
448 for_each_online_cpu(i)
449 dbs_check_cpu(i);
450 schedule_delayed_work(&dbs_work,
451 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
452 up(&dbs_sem);
453}
454
455static inline void dbs_timer_init(void)
456{
457 INIT_WORK(&dbs_work, do_dbs_timer, NULL);
458 schedule_delayed_work(&dbs_work,
459 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
460 return;
461}
462
463static inline void dbs_timer_exit(void)
464{
465 cancel_delayed_work(&dbs_work);
466 return;
467}
468
469static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
470 unsigned int event)
471{
472 unsigned int cpu = policy->cpu;
473 struct cpu_dbs_info_s *this_dbs_info;
474 unsigned int j;
475
476 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
477
478 switch (event) {
479 case CPUFREQ_GOV_START:
480 if ((!cpu_online(cpu)) ||
481 (!policy->cur))
482 return -EINVAL;
483
484 if (policy->cpuinfo.transition_latency >
485 (TRANSITION_LATENCY_LIMIT * 1000))
486 return -EINVAL;
487 if (this_dbs_info->enable) /* Already enabled */
488 break;
489
490 down(&dbs_sem);
491 for_each_cpu_mask(j, policy->cpus) {
492 struct cpu_dbs_info_s *j_dbs_info;
493 j_dbs_info = &per_cpu(cpu_dbs_info, j);
494 j_dbs_info->cur_policy = policy;
495
496 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
497 j_dbs_info->prev_cpu_idle_down
498 = j_dbs_info->prev_cpu_idle_up;
499 }
500 this_dbs_info->enable = 1;
501 sysfs_create_group(&policy->kobj, &dbs_attr_group);
502 dbs_enable++;
503 /*
504 * Start the timerschedule work, when this governor
505 * is used for first time
506 */
507 if (dbs_enable == 1) {
508 unsigned int latency;
509 /* policy latency is in nS. Convert it to uS first */
510
511 latency = policy->cpuinfo.transition_latency;
512 if (latency < 1000)
513 latency = 1000;
514
515 def_sampling_rate = (latency / 1000) *
516 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
517 dbs_tuners_ins.sampling_rate = def_sampling_rate;
518 dbs_tuners_ins.ignore_nice = 0;
519 dbs_tuners_ins.freq_step = 5;
520
521 dbs_timer_init();
522 }
523
524 up(&dbs_sem);
525 break;
526
527 case CPUFREQ_GOV_STOP:
528 down(&dbs_sem);
529 this_dbs_info->enable = 0;
530 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
531 dbs_enable--;
532 /*
533 * Stop the timerschedule work, when this governor
534 * is used for first time
535 */
536 if (dbs_enable == 0)
537 dbs_timer_exit();
538
539 up(&dbs_sem);
540
541 break;
542
543 case CPUFREQ_GOV_LIMITS:
544 down(&dbs_sem);
545 if (policy->max < this_dbs_info->cur_policy->cur)
546 __cpufreq_driver_target(
547 this_dbs_info->cur_policy,
548 policy->max, CPUFREQ_RELATION_H);
549 else if (policy->min > this_dbs_info->cur_policy->cur)
550 __cpufreq_driver_target(
551 this_dbs_info->cur_policy,
552 policy->min, CPUFREQ_RELATION_L);
553 up(&dbs_sem);
554 break;
555 }
556 return 0;
557}
558
559static struct cpufreq_governor cpufreq_gov_dbs = {
560 .name = "conservative",
561 .governor = cpufreq_governor_dbs,
562 .owner = THIS_MODULE,
563};
564
565static int __init cpufreq_gov_dbs_init(void)
566{
567 return cpufreq_register_governor(&cpufreq_gov_dbs);
568}
569
570static void __exit cpufreq_gov_dbs_exit(void)
571{
572 /* Make sure that the scheduled work is indeed not running */
573 flush_scheduled_work();
574
575 cpufreq_unregister_governor(&cpufreq_gov_dbs);
576}
577
578
579MODULE_AUTHOR ("Alexander Clouter <alex-kernel@digriz.org.uk>");
580MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for "
581 "Low Latency Frequency Transition capable processors "
582 "optimised for use in a battery environment");
583MODULE_LICENSE ("GPL");
584
585module_init(cpufreq_gov_dbs_init);
586module_exit(cpufreq_gov_dbs_exit);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 8d83a21c6477..c1fc9c62bb51 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -34,13 +34,9 @@
34 */ 34 */
35 35
36#define DEF_FREQUENCY_UP_THRESHOLD (80) 36#define DEF_FREQUENCY_UP_THRESHOLD (80)
37#define MIN_FREQUENCY_UP_THRESHOLD (0) 37#define MIN_FREQUENCY_UP_THRESHOLD (11)
38#define MAX_FREQUENCY_UP_THRESHOLD (100) 38#define MAX_FREQUENCY_UP_THRESHOLD (100)
39 39
40#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
41#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
42#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
43
44/* 40/*
45 * The polling frequency of this governor depends on the capability of 41 * The polling frequency of this governor depends on the capability of
46 * the processor. Default polling frequency is 1000 times the transition 42 * the processor. Default polling frequency is 1000 times the transition
@@ -55,9 +51,9 @@ static unsigned int def_sampling_rate;
55#define MIN_SAMPLING_RATE (def_sampling_rate / 2) 51#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
56#define MAX_SAMPLING_RATE (500 * def_sampling_rate) 52#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
57#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) 53#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
58#define DEF_SAMPLING_DOWN_FACTOR (10) 54#define DEF_SAMPLING_DOWN_FACTOR (1)
55#define MAX_SAMPLING_DOWN_FACTOR (10)
59#define TRANSITION_LATENCY_LIMIT (10 * 1000) 56#define TRANSITION_LATENCY_LIMIT (10 * 1000)
60#define sampling_rate_in_HZ(x) (((x * HZ) < (1000 * 1000))?1:((x * HZ) / (1000 * 1000)))
61 57
62static void do_dbs_timer(void *data); 58static void do_dbs_timer(void *data);
63 59
@@ -78,15 +74,23 @@ struct dbs_tuners {
78 unsigned int sampling_rate; 74 unsigned int sampling_rate;
79 unsigned int sampling_down_factor; 75 unsigned int sampling_down_factor;
80 unsigned int up_threshold; 76 unsigned int up_threshold;
81 unsigned int down_threshold; 77 unsigned int ignore_nice;
82}; 78};
83 79
84static struct dbs_tuners dbs_tuners_ins = { 80static struct dbs_tuners dbs_tuners_ins = {
85 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 81 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
86 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
87 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 82 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
88}; 83};
89 84
85static inline unsigned int get_cpu_idle_time(unsigned int cpu)
86{
87 return kstat_cpu(cpu).cpustat.idle +
88 kstat_cpu(cpu).cpustat.iowait +
89 ( !dbs_tuners_ins.ignore_nice ?
90 kstat_cpu(cpu).cpustat.nice :
91 0);
92}
93
90/************************** sysfs interface ************************/ 94/************************** sysfs interface ************************/
91static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 95static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
92{ 96{
@@ -115,7 +119,7 @@ static ssize_t show_##file_name \
115show_one(sampling_rate, sampling_rate); 119show_one(sampling_rate, sampling_rate);
116show_one(sampling_down_factor, sampling_down_factor); 120show_one(sampling_down_factor, sampling_down_factor);
117show_one(up_threshold, up_threshold); 121show_one(up_threshold, up_threshold);
118show_one(down_threshold, down_threshold); 122show_one(ignore_nice, ignore_nice);
119 123
120static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 124static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
121 const char *buf, size_t count) 125 const char *buf, size_t count)
@@ -126,6 +130,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
126 if (ret != 1 ) 130 if (ret != 1 )
127 return -EINVAL; 131 return -EINVAL;
128 132
133 if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
134 return -EINVAL;
135
129 down(&dbs_sem); 136 down(&dbs_sem);
130 dbs_tuners_ins.sampling_down_factor = input; 137 dbs_tuners_ins.sampling_down_factor = input;
131 up(&dbs_sem); 138 up(&dbs_sem);
@@ -161,8 +168,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
161 168
162 down(&dbs_sem); 169 down(&dbs_sem);
163 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 170 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
164 input < MIN_FREQUENCY_UP_THRESHOLD || 171 input < MIN_FREQUENCY_UP_THRESHOLD) {
165 input <= dbs_tuners_ins.down_threshold) {
166 up(&dbs_sem); 172 up(&dbs_sem);
167 return -EINVAL; 173 return -EINVAL;
168 } 174 }
@@ -173,22 +179,35 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
173 return count; 179 return count;
174} 180}
175 181
176static ssize_t store_down_threshold(struct cpufreq_policy *unused, 182static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
177 const char *buf, size_t count) 183 const char *buf, size_t count)
178{ 184{
179 unsigned int input; 185 unsigned int input;
180 int ret; 186 int ret;
187
188 unsigned int j;
189
181 ret = sscanf (buf, "%u", &input); 190 ret = sscanf (buf, "%u", &input);
191 if ( ret != 1 )
192 return -EINVAL;
182 193
194 if ( input > 1 )
195 input = 1;
196
183 down(&dbs_sem); 197 down(&dbs_sem);
184 if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || 198 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
185 input < MIN_FREQUENCY_DOWN_THRESHOLD ||
186 input >= dbs_tuners_ins.up_threshold) {
187 up(&dbs_sem); 199 up(&dbs_sem);
188 return -EINVAL; 200 return count;
189 } 201 }
202 dbs_tuners_ins.ignore_nice = input;
190 203
191 dbs_tuners_ins.down_threshold = input; 204 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
205 for_each_online_cpu(j) {
206 struct cpu_dbs_info_s *j_dbs_info;
207 j_dbs_info = &per_cpu(cpu_dbs_info, j);
208 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
209 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
210 }
192 up(&dbs_sem); 211 up(&dbs_sem);
193 212
194 return count; 213 return count;
@@ -201,7 +220,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
201define_one_rw(sampling_rate); 220define_one_rw(sampling_rate);
202define_one_rw(sampling_down_factor); 221define_one_rw(sampling_down_factor);
203define_one_rw(up_threshold); 222define_one_rw(up_threshold);
204define_one_rw(down_threshold); 223define_one_rw(ignore_nice);
205 224
206static struct attribute * dbs_attributes[] = { 225static struct attribute * dbs_attributes[] = {
207 &sampling_rate_max.attr, 226 &sampling_rate_max.attr,
@@ -209,7 +228,7 @@ static struct attribute * dbs_attributes[] = {
209 &sampling_rate.attr, 228 &sampling_rate.attr,
210 &sampling_down_factor.attr, 229 &sampling_down_factor.attr,
211 &up_threshold.attr, 230 &up_threshold.attr,
212 &down_threshold.attr, 231 &ignore_nice.attr,
213 NULL 232 NULL
214}; 233};
215 234
@@ -222,9 +241,8 @@ static struct attribute_group dbs_attr_group = {
222 241
223static void dbs_check_cpu(int cpu) 242static void dbs_check_cpu(int cpu)
224{ 243{
225 unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; 244 unsigned int idle_ticks, up_idle_ticks, total_ticks;
226 unsigned int total_idle_ticks; 245 unsigned int freq_next;
227 unsigned int freq_down_step;
228 unsigned int freq_down_sampling_rate; 246 unsigned int freq_down_sampling_rate;
229 static int down_skip[NR_CPUS]; 247 static int down_skip[NR_CPUS];
230 struct cpu_dbs_info_s *this_dbs_info; 248 struct cpu_dbs_info_s *this_dbs_info;
@@ -238,38 +256,25 @@ static void dbs_check_cpu(int cpu)
238 256
239 policy = this_dbs_info->cur_policy; 257 policy = this_dbs_info->cur_policy;
240 /* 258 /*
241 * The default safe range is 20% to 80% 259 * Every sampling_rate, we check, if current idle time is less
242 * Every sampling_rate, we check 260 * than 20% (default), then we try to increase frequency
243 * - If current idle time is less than 20%, then we try to 261 * Every sampling_rate*sampling_down_factor, we look for a the lowest
244 * increase frequency 262 * frequency which can sustain the load while keeping idle time over
245 * Every sampling_rate*sampling_down_factor, we check 263 * 30%. If such a frequency exist, we try to decrease to this frequency.
246 * - If current idle time is more than 80%, then we try to
247 * decrease frequency
248 * 264 *
249 * Any frequency increase takes it to the maximum frequency. 265 * Any frequency increase takes it to the maximum frequency.
250 * Frequency reduction happens at minimum steps of 266 * Frequency reduction happens at minimum steps of
251 * 5% of max_frequency 267 * 5% (default) of current frequency
252 */ 268 */
253 269
254 /* Check for frequency increase */ 270 /* Check for frequency increase */
255 total_idle_ticks = kstat_cpu(cpu).cpustat.idle + 271 idle_ticks = UINT_MAX;
256 kstat_cpu(cpu).cpustat.iowait;
257 idle_ticks = total_idle_ticks -
258 this_dbs_info->prev_cpu_idle_up;
259 this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
260
261
262 for_each_cpu_mask(j, policy->cpus) { 272 for_each_cpu_mask(j, policy->cpus) {
263 unsigned int tmp_idle_ticks; 273 unsigned int tmp_idle_ticks, total_idle_ticks;
264 struct cpu_dbs_info_s *j_dbs_info; 274 struct cpu_dbs_info_s *j_dbs_info;
265 275
266 if (j == cpu)
267 continue;
268
269 j_dbs_info = &per_cpu(cpu_dbs_info, j); 276 j_dbs_info = &per_cpu(cpu_dbs_info, j);
270 /* Check for frequency increase */ 277 total_idle_ticks = get_cpu_idle_time(j);
271 total_idle_ticks = kstat_cpu(j).cpustat.idle +
272 kstat_cpu(j).cpustat.iowait;
273 tmp_idle_ticks = total_idle_ticks - 278 tmp_idle_ticks = total_idle_ticks -
274 j_dbs_info->prev_cpu_idle_up; 279 j_dbs_info->prev_cpu_idle_up;
275 j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 280 j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -281,13 +286,23 @@ static void dbs_check_cpu(int cpu)
281 /* Scale idle ticks by 100 and compare with up and down ticks */ 286 /* Scale idle ticks by 100 and compare with up and down ticks */
282 idle_ticks *= 100; 287 idle_ticks *= 100;
283 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) * 288 up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
284 sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate); 289 usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
285 290
286 if (idle_ticks < up_idle_ticks) { 291 if (idle_ticks < up_idle_ticks) {
292 down_skip[cpu] = 0;
293 for_each_cpu_mask(j, policy->cpus) {
294 struct cpu_dbs_info_s *j_dbs_info;
295
296 j_dbs_info = &per_cpu(cpu_dbs_info, j);
297 j_dbs_info->prev_cpu_idle_down =
298 j_dbs_info->prev_cpu_idle_up;
299 }
300 /* if we are already at full speed then break out early */
301 if (policy->cur == policy->max)
302 return;
303
287 __cpufreq_driver_target(policy, policy->max, 304 __cpufreq_driver_target(policy, policy->max,
288 CPUFREQ_RELATION_H); 305 CPUFREQ_RELATION_H);
289 down_skip[cpu] = 0;
290 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
291 return; 306 return;
292 } 307 }
293 308
@@ -296,23 +311,14 @@ static void dbs_check_cpu(int cpu)
296 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor) 311 if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
297 return; 312 return;
298 313
299 total_idle_ticks = kstat_cpu(cpu).cpustat.idle + 314 idle_ticks = UINT_MAX;
300 kstat_cpu(cpu).cpustat.iowait;
301 idle_ticks = total_idle_ticks -
302 this_dbs_info->prev_cpu_idle_down;
303 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
304
305 for_each_cpu_mask(j, policy->cpus) { 315 for_each_cpu_mask(j, policy->cpus) {
306 unsigned int tmp_idle_ticks; 316 unsigned int tmp_idle_ticks, total_idle_ticks;
307 struct cpu_dbs_info_s *j_dbs_info; 317 struct cpu_dbs_info_s *j_dbs_info;
308 318
309 if (j == cpu)
310 continue;
311
312 j_dbs_info = &per_cpu(cpu_dbs_info, j); 319 j_dbs_info = &per_cpu(cpu_dbs_info, j);
313 /* Check for frequency increase */ 320 /* Check for frequency decrease */
314 total_idle_ticks = kstat_cpu(j).cpustat.idle + 321 total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
315 kstat_cpu(j).cpustat.iowait;
316 tmp_idle_ticks = total_idle_ticks - 322 tmp_idle_ticks = total_idle_ticks -
317 j_dbs_info->prev_cpu_idle_down; 323 j_dbs_info->prev_cpu_idle_down;
318 j_dbs_info->prev_cpu_idle_down = total_idle_ticks; 324 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -321,38 +327,37 @@ static void dbs_check_cpu(int cpu)
321 idle_ticks = tmp_idle_ticks; 327 idle_ticks = tmp_idle_ticks;
322 } 328 }
323 329
324 /* Scale idle ticks by 100 and compare with up and down ticks */
325 idle_ticks *= 100;
326 down_skip[cpu] = 0; 330 down_skip[cpu] = 0;
331 /* if we cannot reduce the frequency anymore, break out early */
332 if (policy->cur == policy->min)
333 return;
327 334
335 /* Compute how many ticks there are between two measurements */
328 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * 336 freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
329 dbs_tuners_ins.sampling_down_factor; 337 dbs_tuners_ins.sampling_down_factor;
330 down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * 338 total_ticks = usecs_to_jiffies(freq_down_sampling_rate);
331 sampling_rate_in_HZ(freq_down_sampling_rate);
332 339
333 if (idle_ticks > down_idle_ticks ) { 340 /*
334 freq_down_step = (5 * policy->max) / 100; 341 * The optimal frequency is the frequency that is the lowest that
335 342 * can support the current CPU usage without triggering the up
336 /* max freq cannot be less than 100. But who knows.... */ 343 * policy. To be safe, we focus 10 points under the threshold.
337 if (unlikely(freq_down_step == 0)) 344 */
338 freq_down_step = 5; 345 freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks;
346 freq_next = (freq_next * policy->cur) /
347 (dbs_tuners_ins.up_threshold - 10);
339 348
340 __cpufreq_driver_target(policy, 349 if (freq_next <= ((policy->cur * 95) / 100))
341 policy->cur - freq_down_step, 350 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
342 CPUFREQ_RELATION_H);
343 return;
344 }
345} 351}
346 352
347static void do_dbs_timer(void *data) 353static void do_dbs_timer(void *data)
348{ 354{
349 int i; 355 int i;
350 down(&dbs_sem); 356 down(&dbs_sem);
351 for (i = 0; i < NR_CPUS; i++) 357 for_each_online_cpu(i)
352 if (cpu_online(i)) 358 dbs_check_cpu(i);
353 dbs_check_cpu(i);
354 schedule_delayed_work(&dbs_work, 359 schedule_delayed_work(&dbs_work,
355 sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate)); 360 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
356 up(&dbs_sem); 361 up(&dbs_sem);
357} 362}
358 363
@@ -360,7 +365,7 @@ static inline void dbs_timer_init(void)
360{ 365{
361 INIT_WORK(&dbs_work, do_dbs_timer, NULL); 366 INIT_WORK(&dbs_work, do_dbs_timer, NULL);
362 schedule_delayed_work(&dbs_work, 367 schedule_delayed_work(&dbs_work,
363 sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate)); 368 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
364 return; 369 return;
365} 370}
366 371
@@ -397,12 +402,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
397 j_dbs_info = &per_cpu(cpu_dbs_info, j); 402 j_dbs_info = &per_cpu(cpu_dbs_info, j);
398 j_dbs_info->cur_policy = policy; 403 j_dbs_info->cur_policy = policy;
399 404
400 j_dbs_info->prev_cpu_idle_up = 405 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
401 kstat_cpu(j).cpustat.idle + 406 j_dbs_info->prev_cpu_idle_down
402 kstat_cpu(j).cpustat.iowait; 407 = j_dbs_info->prev_cpu_idle_up;
403 j_dbs_info->prev_cpu_idle_down =
404 kstat_cpu(j).cpustat.idle +
405 kstat_cpu(j).cpustat.iowait;
406 } 408 }
407 this_dbs_info->enable = 1; 409 this_dbs_info->enable = 1;
408 sysfs_create_group(&policy->kobj, &dbs_attr_group); 410 sysfs_create_group(&policy->kobj, &dbs_attr_group);
@@ -422,6 +424,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
422 def_sampling_rate = (latency / 1000) * 424 def_sampling_rate = (latency / 1000) *
423 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 425 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
424 dbs_tuners_ins.sampling_rate = def_sampling_rate; 426 dbs_tuners_ins.sampling_rate = def_sampling_rate;
427 dbs_tuners_ins.ignore_nice = 0;
425 428
426 dbs_timer_init(); 429 dbs_timer_init();
427 } 430 }
@@ -461,12 +464,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
461 return 0; 464 return 0;
462} 465}
463 466
464struct cpufreq_governor cpufreq_gov_dbs = { 467static struct cpufreq_governor cpufreq_gov_dbs = {
465 .name = "ondemand", 468 .name = "ondemand",
466 .governor = cpufreq_governor_dbs, 469 .governor = cpufreq_governor_dbs,
467 .owner = THIS_MODULE, 470 .owner = THIS_MODULE,
468}; 471};
469EXPORT_SYMBOL(cpufreq_gov_dbs);
470 472
471static int __init cpufreq_gov_dbs_init(void) 473static int __init cpufreq_gov_dbs_init(void)
472{ 474{
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 2084593937c6..741b6b191e6a 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -19,6 +19,7 @@
19#include <linux/percpu.h> 19#include <linux/percpu.h>
20#include <linux/kobject.h> 20#include <linux/kobject.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <asm/cputime.h>
22 23
23static spinlock_t cpufreq_stats_lock; 24static spinlock_t cpufreq_stats_lock;
24 25
@@ -29,20 +30,14 @@ static struct freq_attr _attr_##_name = {\
29 .show = _show,\ 30 .show = _show,\
30}; 31};
31 32
32static unsigned long
33delta_time(unsigned long old, unsigned long new)
34{
35 return (old > new) ? (old - new): (new + ~old + 1);
36}
37
38struct cpufreq_stats { 33struct cpufreq_stats {
39 unsigned int cpu; 34 unsigned int cpu;
40 unsigned int total_trans; 35 unsigned int total_trans;
41 unsigned long long last_time; 36 unsigned long long last_time;
42 unsigned int max_state; 37 unsigned int max_state;
43 unsigned int state_num; 38 unsigned int state_num;
44 unsigned int last_index; 39 unsigned int last_index;
45 unsigned long long *time_in_state; 40 cputime64_t *time_in_state;
46 unsigned int *freq_table; 41 unsigned int *freq_table;
47#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 42#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
48 unsigned int *trans_table; 43 unsigned int *trans_table;
@@ -60,12 +55,16 @@ static int
60cpufreq_stats_update (unsigned int cpu) 55cpufreq_stats_update (unsigned int cpu)
61{ 56{
62 struct cpufreq_stats *stat; 57 struct cpufreq_stats *stat;
58 unsigned long long cur_time;
59
60 cur_time = get_jiffies_64();
63 spin_lock(&cpufreq_stats_lock); 61 spin_lock(&cpufreq_stats_lock);
64 stat = cpufreq_stats_table[cpu]; 62 stat = cpufreq_stats_table[cpu];
65 if (stat->time_in_state) 63 if (stat->time_in_state)
66 stat->time_in_state[stat->last_index] += 64 stat->time_in_state[stat->last_index] =
67 delta_time(stat->last_time, jiffies); 65 cputime64_add(stat->time_in_state[stat->last_index],
68 stat->last_time = jiffies; 66 cputime_sub(cur_time, stat->last_time));
67 stat->last_time = cur_time;
69 spin_unlock(&cpufreq_stats_lock); 68 spin_unlock(&cpufreq_stats_lock);
70 return 0; 69 return 0;
71} 70}
@@ -90,8 +89,8 @@ show_time_in_state(struct cpufreq_policy *policy, char *buf)
90 return 0; 89 return 0;
91 cpufreq_stats_update(stat->cpu); 90 cpufreq_stats_update(stat->cpu);
92 for (i = 0; i < stat->state_num; i++) { 91 for (i = 0; i < stat->state_num; i++) {
93 len += sprintf(buf + len, "%u %llu\n", 92 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
94 stat->freq_table[i], stat->time_in_state[i]); 93 (unsigned long long)cputime64_to_clock_t(stat->time_in_state[i]));
95 } 94 }
96 return len; 95 return len;
97} 96}
@@ -107,16 +106,30 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
107 if(!stat) 106 if(!stat)
108 return 0; 107 return 0;
109 cpufreq_stats_update(stat->cpu); 108 cpufreq_stats_update(stat->cpu);
109 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
110 len += snprintf(buf + len, PAGE_SIZE - len, " : ");
111 for (i = 0; i < stat->state_num; i++) {
112 if (len >= PAGE_SIZE)
113 break;
114 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
115 stat->freq_table[i]);
116 }
117 if (len >= PAGE_SIZE)
118 return len;
119
120 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
121
110 for (i = 0; i < stat->state_num; i++) { 122 for (i = 0; i < stat->state_num; i++) {
111 if (len >= PAGE_SIZE) 123 if (len >= PAGE_SIZE)
112 break; 124 break;
113 len += snprintf(buf + len, PAGE_SIZE - len, "%9u:\t", 125
126 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
114 stat->freq_table[i]); 127 stat->freq_table[i]);
115 128
116 for (j = 0; j < stat->state_num; j++) { 129 for (j = 0; j < stat->state_num; j++) {
117 if (len >= PAGE_SIZE) 130 if (len >= PAGE_SIZE)
118 break; 131 break;
119 len += snprintf(buf + len, PAGE_SIZE - len, "%u\t", 132 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
120 stat->trans_table[i*stat->max_state+j]); 133 stat->trans_table[i*stat->max_state+j]);
121 } 134 }
122 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 135 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -197,7 +210,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
197 count++; 210 count++;
198 } 211 }
199 212
200 alloc_size = count * sizeof(int) + count * sizeof(long long); 213 alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
201 214
202#ifdef CONFIG_CPU_FREQ_STAT_DETAILS 215#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
203 alloc_size += count * count * sizeof(int); 216 alloc_size += count * count * sizeof(int);
@@ -224,7 +237,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
224 } 237 }
225 stat->state_num = j; 238 stat->state_num = j;
226 spin_lock(&cpufreq_stats_lock); 239 spin_lock(&cpufreq_stats_lock);
227 stat->last_time = jiffies; 240 stat->last_time = get_jiffies_64();
228 stat->last_index = freq_table_get_index(stat, policy->cur); 241 stat->last_index = freq_table_get_index(stat, policy->cur);
229 spin_unlock(&cpufreq_stats_lock); 242 spin_unlock(&cpufreq_stats_lock);
230 cpufreq_cpu_put(data); 243 cpufreq_cpu_put(data);
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 6d5df6c2efa2..df1b721154d2 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -11,6 +11,7 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/config.h>
14#include <linux/acpi.h> 15#include <linux/acpi.h>
15#include <linux/console.h> 16#include <linux/console.h>
16#include <linux/efi.h> 17#include <linux/efi.h>
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 78e3e7b24d7d..39f3e9101ed4 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1936,7 +1936,7 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
1936 * NOTE! The "len" and "addr" checks should possibly have 1936 * NOTE! The "len" and "addr" checks should possibly have
1937 * separate masks. 1937 * separate masks.
1938 */ 1938 */
1939 if ((rq->data_len & mask) || (addr & mask)) 1939 if ((rq->data_len & 15) || (addr & mask))
1940 info->dma = 0; 1940 info->dma = 0;
1941 } 1941 }
1942 1942
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 564974ce5793..96fb9870834a 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -101,6 +101,7 @@ struct mousedev_list {
101 unsigned char ready, buffer, bufsiz; 101 unsigned char ready, buffer, bufsiz;
102 unsigned char imexseq, impsseq; 102 unsigned char imexseq, impsseq;
103 enum mousedev_emul mode; 103 enum mousedev_emul mode;
104 unsigned long last_buttons;
104}; 105};
105 106
106#define MOUSEDEV_SEQ_LEN 6 107#define MOUSEDEV_SEQ_LEN 6
@@ -224,7 +225,7 @@ static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_h
224 spin_lock_irqsave(&list->packet_lock, flags); 225 spin_lock_irqsave(&list->packet_lock, flags);
225 226
226 p = &list->packets[list->head]; 227 p = &list->packets[list->head];
227 if (list->ready && p->buttons != packet->buttons) { 228 if (list->ready && p->buttons != mousedev->packet.buttons) {
228 unsigned int new_head = (list->head + 1) % PACKET_QUEUE_LEN; 229 unsigned int new_head = (list->head + 1) % PACKET_QUEUE_LEN;
229 if (new_head != list->tail) { 230 if (new_head != list->tail) {
230 p = &list->packets[list->head = new_head]; 231 p = &list->packets[list->head = new_head];
@@ -249,10 +250,13 @@ static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_h
249 p->dz += packet->dz; 250 p->dz += packet->dz;
250 p->buttons = mousedev->packet.buttons; 251 p->buttons = mousedev->packet.buttons;
251 252
252 list->ready = 1; 253 if (p->dx || p->dy || p->dz || p->buttons != list->last_buttons)
254 list->ready = 1;
253 255
254 spin_unlock_irqrestore(&list->packet_lock, flags); 256 spin_unlock_irqrestore(&list->packet_lock, flags);
255 kill_fasync(&list->fasync, SIGIO, POLL_IN); 257
258 if (list->ready)
259 kill_fasync(&list->fasync, SIGIO, POLL_IN);
256 } 260 }
257 261
258 wake_up_interruptible(&mousedev->wait); 262 wake_up_interruptible(&mousedev->wait);
@@ -477,9 +481,10 @@ static void mousedev_packet(struct mousedev_list *list, signed char *ps2_data)
477 } 481 }
478 482
479 if (!p->dx && !p->dy && !p->dz) { 483 if (!p->dx && !p->dy && !p->dz) {
480 if (list->tail == list->head) 484 if (list->tail == list->head) {
481 list->ready = 0; 485 list->ready = 0;
482 else 486 list->last_buttons = p->buttons;
487 } else
483 list->tail = (list->tail + 1) % PACKET_QUEUE_LEN; 488 list->tail = (list->tail + 1) % PACKET_QUEUE_LEN;
484 } 489 }
485 490
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index e654aa5eecd4..bb9f4044c74d 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2421,7 +2421,7 @@ pmac_wakeup_devices(void)
2421 2421
2422 /* Re-enable local CPU interrupts */ 2422 /* Re-enable local CPU interrupts */
2423 local_irq_enable(); 2423 local_irq_enable();
2424 mdelay(100); 2424 mdelay(10);
2425 preempt_enable(); 2425 preempt_enable();
2426 2426
2427 /* Re-enable clock spreading on some machines */ 2427 /* Re-enable clock spreading on some machines */
@@ -2549,7 +2549,9 @@ powerbook_sleep_Core99(void)
2549 return ret; 2549 return ret;
2550 } 2550 }
2551 2551
2552 printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1)); 2552 /* Stop environment and ADB interrupts */
2553 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0);
2554 pmu_wait_complete(&req);
2553 2555
2554 /* Tell PMU what events will wake us up */ 2556 /* Tell PMU what events will wake us up */
2555 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_CLR_WAKEUP_EVENTS, 2557 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_CLR_WAKEUP_EVENTS,
@@ -2611,8 +2613,6 @@ powerbook_sleep_Core99(void)
2611 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); 2613 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
2612 pmu_wait_complete(&req); 2614 pmu_wait_complete(&req);
2613 2615
2614 printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
2615
2616 pmac_wakeup_devices(); 2616 pmac_wakeup_devices();
2617 2617
2618 return 0; 2618 return 0;
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index d047e349d706..1339912c308b 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -906,22 +906,12 @@ static int dst_tone_power_cmd(struct dst_state* state)
906 if (state->dst_type == DST_TYPE_IS_TERR) 906 if (state->dst_type == DST_TYPE_IS_TERR)
907 return 0; 907 return 0;
908 908
909 if (state->voltage == SEC_VOLTAGE_OFF) 909 paket[4] = state->tx_tuna[4];
910 paket[4] = 0; 910 paket[2] = state->tx_tuna[2];
911 else 911 paket[3] = state->tx_tuna[3];
912 paket[4] = 1;
913
914 if (state->tone == SEC_TONE_ON)
915 paket[2] = 0x02;
916 else
917 paket[2] = 0;
918 if (state->minicmd == SEC_MINI_A)
919 paket[3] = 0x02;
920 else
921 paket[3] = 0;
922
923 paket[7] = dst_check_sum (paket, 7); 912 paket[7] = dst_check_sum (paket, 7);
924 dst_command(state, paket, 8); 913 dst_command(state, paket, 8);
914
925 return 0; 915 return 0;
926} 916}
927 917
@@ -980,7 +970,7 @@ static int dst_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage);
980 970
981static int dst_write_tuna(struct dvb_frontend* fe) 971static int dst_write_tuna(struct dvb_frontend* fe)
982{ 972{
983 struct dst_state* state = (struct dst_state*) fe->demodulator_priv; 973 struct dst_state* state = fe->demodulator_priv;
984 int retval; 974 int retval;
985 u8 reply; 975 u8 reply;
986 976
@@ -1048,10 +1038,10 @@ static int dst_write_tuna(struct dvb_frontend* fe)
1048 1038
1049static int dst_set_diseqc(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd) 1039static int dst_set_diseqc(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd)
1050{ 1040{
1051 struct dst_state* state = (struct dst_state*) fe->demodulator_priv; 1041 struct dst_state* state = fe->demodulator_priv;
1052 u8 paket[8] = { 0x00, 0x08, 0x04, 0xe0, 0x10, 0x38, 0xf0, 0xec }; 1042 u8 paket[8] = { 0x00, 0x08, 0x04, 0xe0, 0x10, 0x38, 0xf0, 0xec };
1053 1043
1054 if (state->dst_type == DST_TYPE_IS_TERR) 1044 if (state->dst_type != DST_TYPE_IS_SAT)
1055 return 0; 1045 return 0;
1056 1046
1057 if (cmd->msg_len == 0 || cmd->msg_len > 4) 1047 if (cmd->msg_len == 0 || cmd->msg_len > 4)
@@ -1064,39 +1054,32 @@ static int dst_set_diseqc(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd*
1064 1054
1065static int dst_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage) 1055static int dst_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
1066{ 1056{
1067 u8 *val;
1068 int need_cmd; 1057 int need_cmd;
1069 struct dst_state* state = (struct dst_state*) fe->demodulator_priv; 1058 struct dst_state* state = fe->demodulator_priv;
1070 1059
1071 state->voltage = voltage; 1060 state->voltage = voltage;
1072 1061
1073 if (state->dst_type == DST_TYPE_IS_TERR) 1062 if (state->dst_type != DST_TYPE_IS_SAT)
1074 return 0; 1063 return 0;
1075 1064
1076 need_cmd = 0; 1065 need_cmd = 0;
1077 val = &state->tx_tuna[0];
1078 val[8] &= ~0x40;
1079 switch (voltage) { 1066 switch (voltage) {
1080 case SEC_VOLTAGE_13: 1067 case SEC_VOLTAGE_13:
1081 if ((state->diseq_flags & HAS_POWER) == 0) 1068 case SEC_VOLTAGE_18:
1082 need_cmd = 1; 1069 if ((state->diseq_flags & HAS_POWER) == 0)
1083 state->diseq_flags |= HAS_POWER; 1070 need_cmd = 1;
1084 break; 1071 state->diseq_flags |= HAS_POWER;
1072 state->tx_tuna[4] = 0x01;
1073 break;
1085 1074
1086 case SEC_VOLTAGE_18: 1075 case SEC_VOLTAGE_OFF:
1087 if ((state->diseq_flags & HAS_POWER) == 0)
1088 need_cmd = 1; 1076 need_cmd = 1;
1089 state->diseq_flags |= HAS_POWER; 1077 state->diseq_flags &= ~(HAS_POWER | HAS_LOCK | ATTEMPT_TUNE);
1090 val[8] |= 0x40; 1078 state->tx_tuna[4] = 0x00;
1091 break; 1079 break;
1092
1093 case SEC_VOLTAGE_OFF:
1094 need_cmd = 1;
1095 state->diseq_flags &= ~(HAS_POWER | HAS_LOCK | ATTEMPT_TUNE);
1096 break;
1097 1080
1098 default: 1081 default:
1099 return -EINVAL; 1082 return -EINVAL;
1100 } 1083 }
1101 if (need_cmd) 1084 if (need_cmd)
1102 dst_tone_power_cmd(state); 1085 dst_tone_power_cmd(state);
@@ -1106,37 +1089,56 @@ static int dst_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
1106 1089
1107static int dst_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone) 1090static int dst_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
1108{ 1091{
1109 u8 *val; 1092 struct dst_state* state = fe->demodulator_priv;
1110 struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
1111 1093
1112 state->tone = tone; 1094 state->tone = tone;
1113 1095
1114 if (state->dst_type == DST_TYPE_IS_TERR) 1096 if (state->dst_type != DST_TYPE_IS_SAT)
1115 return 0; 1097 return 0;
1116 1098
1117 val = &state->tx_tuna[0]; 1099 switch (tone) {
1100 case SEC_TONE_OFF:
1101 state->tx_tuna[2] = 0xff;
1102 break;
1118 1103
1119 val[8] &= ~0x1; 1104 case SEC_TONE_ON:
1105 state->tx_tuna[2] = 0x02;
1106 break;
1120 1107
1121 switch (tone) { 1108 default:
1122 case SEC_TONE_OFF: 1109 return -EINVAL;
1123 break; 1110 }
1111 dst_tone_power_cmd(state);
1124 1112
1125 case SEC_TONE_ON: 1113 return 0;
1126 val[8] |= 1; 1114}
1127 break;
1128 1115
1129 default: 1116static int dst_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd)
1130 return -EINVAL; 1117{
1118 struct dst_state *state = fe->demodulator_priv;
1119
1120 if (state->dst_type != DST_TYPE_IS_SAT)
1121 return 0;
1122
1123 state->minicmd = minicmd;
1124
1125 switch (minicmd) {
1126 case SEC_MINI_A:
1127 state->tx_tuna[3] = 0x02;
1128 break;
1129 case SEC_MINI_B:
1130 state->tx_tuna[3] = 0xff;
1131 break;
1131 } 1132 }
1132 dst_tone_power_cmd(state); 1133 dst_tone_power_cmd(state);
1133 1134
1134 return 0; 1135 return 0;
1135} 1136}
1136 1137
1138
1137static int dst_init(struct dvb_frontend* fe) 1139static int dst_init(struct dvb_frontend* fe)
1138{ 1140{
1139 struct dst_state* state = (struct dst_state*) fe->demodulator_priv; 1141 struct dst_state* state = fe->demodulator_priv;
1140 static u8 ini_satci_tuna[] = { 9, 0, 3, 0xb6, 1, 0, 0x73, 0x21, 0, 0 }; 1142 static u8 ini_satci_tuna[] = { 9, 0, 3, 0xb6, 1, 0, 0x73, 0x21, 0, 0 };
1141 static u8 ini_satfta_tuna[] = { 0, 0, 3, 0xb6, 1, 0x55, 0xbd, 0x50, 0, 0 }; 1143 static u8 ini_satfta_tuna[] = { 0, 0, 3, 0xb6, 1, 0x55, 0xbd, 0x50, 0, 0 };
1142 static u8 ini_tvfta_tuna[] = { 0, 0, 3, 0xb6, 1, 7, 0x0, 0x0, 0, 0 }; 1144 static u8 ini_tvfta_tuna[] = { 0, 0, 3, 0xb6, 1, 7, 0x0, 0x0, 0, 0 };
@@ -1168,7 +1170,7 @@ static int dst_init(struct dvb_frontend* fe)
1168 1170
1169static int dst_read_status(struct dvb_frontend* fe, fe_status_t* status) 1171static int dst_read_status(struct dvb_frontend* fe, fe_status_t* status)
1170{ 1172{
1171 struct dst_state* state = (struct dst_state*) fe->demodulator_priv; 1173 struct dst_state* state = fe->demodulator_priv;
1172 1174
1173 *status = 0; 1175 *status = 0;
1174 if (state->diseq_flags & HAS_LOCK) { 1176 if (state->diseq_flags & HAS_LOCK) {
@@ -1182,7 +1184,7 @@ static int dst_read_status(struct dvb_frontend* fe, fe_status_t* status)
1182 1184
1183static int dst_read_signal_strength(struct dvb_frontend* fe, u16* strength) 1185static int dst_read_signal_strength(struct dvb_frontend* fe, u16* strength)
1184{ 1186{
1185 struct dst_state* state = (struct dst_state*) fe->demodulator_priv; 1187 struct dst_state* state = fe->demodulator_priv;
1186 1188
1187 dst_get_signal(state); 1189 dst_get_signal(state);
1188 *strength = state->decode_strength; 1190 *strength = state->decode_strength;
@@ -1192,7 +1194,7 @@ static int dst_read_signal_strength(struct dvb_frontend* fe, u16* strength)
1192 1194
1193static int dst_read_snr(struct dvb_frontend* fe, u16* snr) 1195static int dst_read_snr(struct dvb_frontend* fe, u16* snr)
1194{ 1196{
1195 struct dst_state* state = (struct dst_state*) fe->demodulator_priv; 1197 struct dst_state* state = fe->demodulator_priv;
1196 1198
1197 dst_get_signal(state); 1199 dst_get_signal(state);
1198 *snr = state->decode_snr; 1200 *snr = state->decode_snr;
@@ -1202,7 +1204,7 @@ static int dst_read_snr(struct dvb_frontend* fe, u16* snr)
1202 1204
1203static int dst_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) 1205static int dst_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
1204{ 1206{
1205 struct dst_state* state = (struct dst_state*) fe->demodulator_priv; 1207 struct dst_state* state = fe->demodulator_priv;
1206 1208
1207 dst_set_freq(state, p->frequency); 1209 dst_set_freq(state, p->frequency);
1208 if (verbose > 4) 1210 if (verbose > 4)
@@ -1228,7 +1230,7 @@ static int dst_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_paramet
1228 1230
1229static int dst_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p) 1231static int dst_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
1230{ 1232{
1231 struct dst_state* state = (struct dst_state*) fe->demodulator_priv; 1233 struct dst_state* state = fe->demodulator_priv;
1232 1234
1233 p->frequency = state->decode_freq; 1235 p->frequency = state->decode_freq;
1234 p->inversion = state->inversion; 1236 p->inversion = state->inversion;
@@ -1248,7 +1250,7 @@ static int dst_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_paramet
1248 1250
1249static void dst_release(struct dvb_frontend* fe) 1251static void dst_release(struct dvb_frontend* fe)
1250{ 1252{
1251 struct dst_state* state = (struct dst_state*) fe->demodulator_priv; 1253 struct dst_state* state = fe->demodulator_priv;
1252 kfree(state); 1254 kfree(state);
1253} 1255}
1254 1256
@@ -1346,7 +1348,7 @@ static struct dvb_frontend_ops dst_dvbs_ops = {
1346 .read_signal_strength = dst_read_signal_strength, 1348 .read_signal_strength = dst_read_signal_strength,
1347 .read_snr = dst_read_snr, 1349 .read_snr = dst_read_snr,
1348 1350
1349 .diseqc_send_burst = dst_set_tone, 1351 .diseqc_send_burst = dst_send_burst,
1350 .diseqc_send_master_cmd = dst_set_diseqc, 1352 .diseqc_send_master_cmd = dst_set_diseqc,
1351 .set_voltage = dst_set_voltage, 1353 .set_voltage = dst_set_voltage,
1352 .set_tone = dst_set_tone, 1354 .set_tone = dst_set_tone,
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f10dd74988c4..fc9b5cd957aa 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -133,6 +133,8 @@
133/* number of ETHTOOL_GSTATS u64's */ 133/* number of ETHTOOL_GSTATS u64's */
134#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) 134#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
135 135
136#define TG3_NUM_TEST 6
137
136static char version[] __devinitdata = 138static char version[] __devinitdata =
137 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 139 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
138 140
@@ -316,6 +318,17 @@ static struct {
316 { "nic_tx_threshold_hit" } 318 { "nic_tx_threshold_hit" }
317}; 319};
318 320
321static struct {
322 const char string[ETH_GSTRING_LEN];
323} ethtool_test_keys[TG3_NUM_TEST] = {
324 { "nvram test (online) " },
325 { "link test (online) " },
326 { "register test (offline)" },
327 { "memory test (offline)" },
328 { "loopback test (offline)" },
329 { "interrupt test (offline)" },
330};
331
319static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 332static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
320{ 333{
321 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) { 334 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
@@ -3070,7 +3083,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3070} 3083}
3071 3084
3072static int tg3_init_hw(struct tg3 *); 3085static int tg3_init_hw(struct tg3 *);
3073static int tg3_halt(struct tg3 *, int); 3086static int tg3_halt(struct tg3 *, int, int);
3074 3087
3075#ifdef CONFIG_NET_POLL_CONTROLLER 3088#ifdef CONFIG_NET_POLL_CONTROLLER
3076static void tg3_poll_controller(struct net_device *dev) 3089static void tg3_poll_controller(struct net_device *dev)
@@ -3094,7 +3107,7 @@ static void tg3_reset_task(void *_data)
3094 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER; 3107 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3095 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; 3108 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3096 3109
3097 tg3_halt(tp, 0); 3110 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3098 tg3_init_hw(tp); 3111 tg3_init_hw(tp);
3099 3112
3100 tg3_netif_start(tp); 3113 tg3_netif_start(tp);
@@ -3440,7 +3453,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3440 spin_lock_irq(&tp->lock); 3453 spin_lock_irq(&tp->lock);
3441 spin_lock(&tp->tx_lock); 3454 spin_lock(&tp->tx_lock);
3442 3455
3443 tg3_halt(tp, 1); 3456 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3444 3457
3445 tg3_set_mtu(dev, tp, new_mtu); 3458 tg3_set_mtu(dev, tp, new_mtu);
3446 3459
@@ -4131,19 +4144,19 @@ static void tg3_stop_fw(struct tg3 *tp)
4131} 4144}
4132 4145
4133/* tp->lock is held. */ 4146/* tp->lock is held. */
4134static int tg3_halt(struct tg3 *tp, int silent) 4147static int tg3_halt(struct tg3 *tp, int kind, int silent)
4135{ 4148{
4136 int err; 4149 int err;
4137 4150
4138 tg3_stop_fw(tp); 4151 tg3_stop_fw(tp);
4139 4152
4140 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN); 4153 tg3_write_sig_pre_reset(tp, kind);
4141 4154
4142 tg3_abort_hw(tp, silent); 4155 tg3_abort_hw(tp, silent);
4143 err = tg3_chip_reset(tp); 4156 err = tg3_chip_reset(tp);
4144 4157
4145 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN); 4158 tg3_write_sig_legacy(tp, kind);
4146 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 4159 tg3_write_sig_post_reset(tp, kind);
4147 4160
4148 if (err) 4161 if (err)
4149 return err; 4162 return err;
@@ -4357,7 +4370,12 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
4357 */ 4370 */
4358 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 4371 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4359 4372
4373 /* It is possible that bootcode is still loading at this point.
4374 * Get the nvram lock first before halting the cpu.
4375 */
4376 tg3_nvram_lock(tp);
4360 err = tg3_halt_cpu(tp, cpu_base); 4377 err = tg3_halt_cpu(tp, cpu_base);
4378 tg3_nvram_unlock(tp);
4361 if (err) 4379 if (err)
4362 goto out; 4380 goto out;
4363 4381
@@ -5881,6 +5899,9 @@ static int tg3_test_interrupt(struct tg3 *tp)
5881 int err, i; 5899 int err, i;
5882 u32 int_mbox = 0; 5900 u32 int_mbox = 0;
5883 5901
5902 if (!netif_running(dev))
5903 return -ENODEV;
5904
5884 tg3_disable_ints(tp); 5905 tg3_disable_ints(tp);
5885 5906
5886 free_irq(tp->pdev->irq, dev); 5907 free_irq(tp->pdev->irq, dev);
@@ -5984,7 +6005,7 @@ static int tg3_test_msi(struct tg3 *tp)
5984 spin_lock_irq(&tp->lock); 6005 spin_lock_irq(&tp->lock);
5985 spin_lock(&tp->tx_lock); 6006 spin_lock(&tp->tx_lock);
5986 6007
5987 tg3_halt(tp, 1); 6008 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5988 err = tg3_init_hw(tp); 6009 err = tg3_init_hw(tp);
5989 6010
5990 spin_unlock(&tp->tx_lock); 6011 spin_unlock(&tp->tx_lock);
@@ -6060,7 +6081,7 @@ static int tg3_open(struct net_device *dev)
6060 6081
6061 err = tg3_init_hw(tp); 6082 err = tg3_init_hw(tp);
6062 if (err) { 6083 if (err) {
6063 tg3_halt(tp, 1); 6084 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6064 tg3_free_rings(tp); 6085 tg3_free_rings(tp);
6065 } else { 6086 } else {
6066 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) 6087 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
@@ -6104,7 +6125,7 @@ static int tg3_open(struct net_device *dev)
6104 pci_disable_msi(tp->pdev); 6125 pci_disable_msi(tp->pdev);
6105 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 6126 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6106 } 6127 }
6107 tg3_halt(tp, 1); 6128 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6108 tg3_free_rings(tp); 6129 tg3_free_rings(tp);
6109 tg3_free_consistent(tp); 6130 tg3_free_consistent(tp);
6110 6131
@@ -6377,7 +6398,7 @@ static int tg3_close(struct net_device *dev)
6377 6398
6378 tg3_disable_ints(tp); 6399 tg3_disable_ints(tp);
6379 6400
6380 tg3_halt(tp, 1); 6401 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6381 tg3_free_rings(tp); 6402 tg3_free_rings(tp);
6382 tp->tg3_flags &= 6403 tp->tg3_flags &=
6383 ~(TG3_FLAG_INIT_COMPLETE | 6404 ~(TG3_FLAG_INIT_COMPLETE |
@@ -7097,7 +7118,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
7097 tp->tx_pending = ering->tx_pending; 7118 tp->tx_pending = ering->tx_pending;
7098 7119
7099 if (netif_running(dev)) { 7120 if (netif_running(dev)) {
7100 tg3_halt(tp, 1); 7121 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7101 tg3_init_hw(tp); 7122 tg3_init_hw(tp);
7102 tg3_netif_start(tp); 7123 tg3_netif_start(tp);
7103 } 7124 }
@@ -7140,7 +7161,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
7140 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE; 7161 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7141 7162
7142 if (netif_running(dev)) { 7163 if (netif_running(dev)) {
7143 tg3_halt(tp, 1); 7164 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7144 tg3_init_hw(tp); 7165 tg3_init_hw(tp);
7145 tg3_netif_start(tp); 7166 tg3_netif_start(tp);
7146 } 7167 }
@@ -7199,12 +7220,20 @@ static int tg3_get_stats_count (struct net_device *dev)
7199 return TG3_NUM_STATS; 7220 return TG3_NUM_STATS;
7200} 7221}
7201 7222
7223static int tg3_get_test_count (struct net_device *dev)
7224{
7225 return TG3_NUM_TEST;
7226}
7227
7202static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 7228static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
7203{ 7229{
7204 switch (stringset) { 7230 switch (stringset) {
7205 case ETH_SS_STATS: 7231 case ETH_SS_STATS:
7206 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); 7232 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
7207 break; 7233 break;
7234 case ETH_SS_TEST:
7235 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
7236 break;
7208 default: 7237 default:
7209 WARN_ON(1); /* we need a WARN() */ 7238 WARN_ON(1); /* we need a WARN() */
7210 break; 7239 break;
@@ -7218,6 +7247,516 @@ static void tg3_get_ethtool_stats (struct net_device *dev,
7218 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats)); 7247 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
7219} 7248}
7220 7249
7250#define NVRAM_TEST_SIZE 0x100
7251
7252static int tg3_test_nvram(struct tg3 *tp)
7253{
7254 u32 *buf, csum;
7255 int i, j, err = 0;
7256
7257 buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
7258 if (buf == NULL)
7259 return -ENOMEM;
7260
7261 for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
7262 u32 val;
7263
7264 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
7265 break;
7266 buf[j] = cpu_to_le32(val);
7267 }
7268 if (i < NVRAM_TEST_SIZE)
7269 goto out;
7270
7271 err = -EIO;
7272 if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
7273 goto out;
7274
7275 /* Bootstrap checksum at offset 0x10 */
7276 csum = calc_crc((unsigned char *) buf, 0x10);
7277 if(csum != cpu_to_le32(buf[0x10/4]))
7278 goto out;
7279
7280 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7281 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
7282 if (csum != cpu_to_le32(buf[0xfc/4]))
7283 goto out;
7284
7285 err = 0;
7286
7287out:
7288 kfree(buf);
7289 return err;
7290}
7291
7292#define TG3_SERDES_TIMEOUT_SEC 2
7293#define TG3_COPPER_TIMEOUT_SEC 6
7294
7295static int tg3_test_link(struct tg3 *tp)
7296{
7297 int i, max;
7298
7299 if (!netif_running(tp->dev))
7300 return -ENODEV;
7301
7302 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7303 max = TG3_SERDES_TIMEOUT_SEC;
7304 else
7305 max = TG3_COPPER_TIMEOUT_SEC;
7306
7307 for (i = 0; i < max; i++) {
7308 if (netif_carrier_ok(tp->dev))
7309 return 0;
7310
7311 if (msleep_interruptible(1000))
7312 break;
7313 }
7314
7315 return -EIO;
7316}
7317
7318/* Only test the commonly used registers */
7319static int tg3_test_registers(struct tg3 *tp)
7320{
7321 int i, is_5705;
7322 u32 offset, read_mask, write_mask, val, save_val, read_val;
7323 static struct {
7324 u16 offset;
7325 u16 flags;
7326#define TG3_FL_5705 0x1
7327#define TG3_FL_NOT_5705 0x2
7328#define TG3_FL_NOT_5788 0x4
7329 u32 read_mask;
7330 u32 write_mask;
7331 } reg_tbl[] = {
7332 /* MAC Control Registers */
7333 { MAC_MODE, TG3_FL_NOT_5705,
7334 0x00000000, 0x00ef6f8c },
7335 { MAC_MODE, TG3_FL_5705,
7336 0x00000000, 0x01ef6b8c },
7337 { MAC_STATUS, TG3_FL_NOT_5705,
7338 0x03800107, 0x00000000 },
7339 { MAC_STATUS, TG3_FL_5705,
7340 0x03800100, 0x00000000 },
7341 { MAC_ADDR_0_HIGH, 0x0000,
7342 0x00000000, 0x0000ffff },
7343 { MAC_ADDR_0_LOW, 0x0000,
7344 0x00000000, 0xffffffff },
7345 { MAC_RX_MTU_SIZE, 0x0000,
7346 0x00000000, 0x0000ffff },
7347 { MAC_TX_MODE, 0x0000,
7348 0x00000000, 0x00000070 },
7349 { MAC_TX_LENGTHS, 0x0000,
7350 0x00000000, 0x00003fff },
7351 { MAC_RX_MODE, TG3_FL_NOT_5705,
7352 0x00000000, 0x000007fc },
7353 { MAC_RX_MODE, TG3_FL_5705,
7354 0x00000000, 0x000007dc },
7355 { MAC_HASH_REG_0, 0x0000,
7356 0x00000000, 0xffffffff },
7357 { MAC_HASH_REG_1, 0x0000,
7358 0x00000000, 0xffffffff },
7359 { MAC_HASH_REG_2, 0x0000,
7360 0x00000000, 0xffffffff },
7361 { MAC_HASH_REG_3, 0x0000,
7362 0x00000000, 0xffffffff },
7363
7364 /* Receive Data and Receive BD Initiator Control Registers. */
7365 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
7366 0x00000000, 0xffffffff },
7367 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
7368 0x00000000, 0xffffffff },
7369 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
7370 0x00000000, 0x00000003 },
7371 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
7372 0x00000000, 0xffffffff },
7373 { RCVDBDI_STD_BD+0, 0x0000,
7374 0x00000000, 0xffffffff },
7375 { RCVDBDI_STD_BD+4, 0x0000,
7376 0x00000000, 0xffffffff },
7377 { RCVDBDI_STD_BD+8, 0x0000,
7378 0x00000000, 0xffff0002 },
7379 { RCVDBDI_STD_BD+0xc, 0x0000,
7380 0x00000000, 0xffffffff },
7381
7382 /* Receive BD Initiator Control Registers. */
7383 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
7384 0x00000000, 0xffffffff },
7385 { RCVBDI_STD_THRESH, TG3_FL_5705,
7386 0x00000000, 0x000003ff },
7387 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
7388 0x00000000, 0xffffffff },
7389
7390 /* Host Coalescing Control Registers. */
7391 { HOSTCC_MODE, TG3_FL_NOT_5705,
7392 0x00000000, 0x00000004 },
7393 { HOSTCC_MODE, TG3_FL_5705,
7394 0x00000000, 0x000000f6 },
7395 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
7396 0x00000000, 0xffffffff },
7397 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
7398 0x00000000, 0x000003ff },
7399 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
7400 0x00000000, 0xffffffff },
7401 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
7402 0x00000000, 0x000003ff },
7403 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
7404 0x00000000, 0xffffffff },
7405 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7406 0x00000000, 0x000000ff },
7407 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
7408 0x00000000, 0xffffffff },
7409 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
7410 0x00000000, 0x000000ff },
7411 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
7412 0x00000000, 0xffffffff },
7413 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
7414 0x00000000, 0xffffffff },
7415 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7416 0x00000000, 0xffffffff },
7417 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7418 0x00000000, 0x000000ff },
7419 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
7420 0x00000000, 0xffffffff },
7421 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
7422 0x00000000, 0x000000ff },
7423 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
7424 0x00000000, 0xffffffff },
7425 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
7426 0x00000000, 0xffffffff },
7427 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
7428 0x00000000, 0xffffffff },
7429 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
7430 0x00000000, 0xffffffff },
7431 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
7432 0x00000000, 0xffffffff },
7433 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
7434 0xffffffff, 0x00000000 },
7435 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
7436 0xffffffff, 0x00000000 },
7437
7438 /* Buffer Manager Control Registers. */
7439 { BUFMGR_MB_POOL_ADDR, 0x0000,
7440 0x00000000, 0x007fff80 },
7441 { BUFMGR_MB_POOL_SIZE, 0x0000,
7442 0x00000000, 0x007fffff },
7443 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
7444 0x00000000, 0x0000003f },
7445 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
7446 0x00000000, 0x000001ff },
7447 { BUFMGR_MB_HIGH_WATER, 0x0000,
7448 0x00000000, 0x000001ff },
7449 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
7450 0xffffffff, 0x00000000 },
7451 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
7452 0xffffffff, 0x00000000 },
7453
7454 /* Mailbox Registers */
7455 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
7456 0x00000000, 0x000001ff },
7457 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
7458 0x00000000, 0x000001ff },
7459 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
7460 0x00000000, 0x000007ff },
7461 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
7462 0x00000000, 0x000001ff },
7463
7464 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7465 };
7466
7467 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7468 is_5705 = 1;
7469 else
7470 is_5705 = 0;
7471
7472 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
7473 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
7474 continue;
7475
7476 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
7477 continue;
7478
7479 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7480 (reg_tbl[i].flags & TG3_FL_NOT_5788))
7481 continue;
7482
7483 offset = (u32) reg_tbl[i].offset;
7484 read_mask = reg_tbl[i].read_mask;
7485 write_mask = reg_tbl[i].write_mask;
7486
7487 /* Save the original register content */
7488 save_val = tr32(offset);
7489
7490 /* Determine the read-only value. */
7491 read_val = save_val & read_mask;
7492
7493 /* Write zero to the register, then make sure the read-only bits
7494 * are not changed and the read/write bits are all zeros.
7495 */
7496 tw32(offset, 0);
7497
7498 val = tr32(offset);
7499
7500 /* Test the read-only and read/write bits. */
7501 if (((val & read_mask) != read_val) || (val & write_mask))
7502 goto out;
7503
7504 /* Write ones to all the bits defined by RdMask and WrMask, then
7505 * make sure the read-only bits are not changed and the
7506 * read/write bits are all ones.
7507 */
7508 tw32(offset, read_mask | write_mask);
7509
7510 val = tr32(offset);
7511
7512 /* Test the read-only bits. */
7513 if ((val & read_mask) != read_val)
7514 goto out;
7515
7516 /* Test the read/write bits. */
7517 if ((val & write_mask) != write_mask)
7518 goto out;
7519
7520 tw32(offset, save_val);
7521 }
7522
7523 return 0;
7524
7525out:
7526 printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
7527 tw32(offset, save_val);
7528 return -EIO;
7529}
7530
7531static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
7532{
7533 static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7534 int i;
7535 u32 j;
7536
7537 for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
7538 for (j = 0; j < len; j += 4) {
7539 u32 val;
7540
7541 tg3_write_mem(tp, offset + j, test_pattern[i]);
7542 tg3_read_mem(tp, offset + j, &val);
7543 if (val != test_pattern[i])
7544 return -EIO;
7545 }
7546 }
7547 return 0;
7548}
7549
7550static int tg3_test_memory(struct tg3 *tp)
7551{
7552 static struct mem_entry {
7553 u32 offset;
7554 u32 len;
7555 } mem_tbl_570x[] = {
7556 { 0x00000000, 0x01000},
7557 { 0x00002000, 0x1c000},
7558 { 0xffffffff, 0x00000}
7559 }, mem_tbl_5705[] = {
7560 { 0x00000100, 0x0000c},
7561 { 0x00000200, 0x00008},
7562 { 0x00000b50, 0x00400},
7563 { 0x00004000, 0x00800},
7564 { 0x00006000, 0x01000},
7565 { 0x00008000, 0x02000},
7566 { 0x00010000, 0x0e000},
7567 { 0xffffffff, 0x00000}
7568 };
7569 struct mem_entry *mem_tbl;
7570 int err = 0;
7571 int i;
7572
7573 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7574 mem_tbl = mem_tbl_5705;
7575 else
7576 mem_tbl = mem_tbl_570x;
7577
7578 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
7579 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
7580 mem_tbl[i].len)) != 0)
7581 break;
7582 }
7583
7584 return err;
7585}
7586
7587static int tg3_test_loopback(struct tg3 *tp)
7588{
7589 u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
7590 u32 desc_idx;
7591 struct sk_buff *skb, *rx_skb;
7592 u8 *tx_data;
7593 dma_addr_t map;
7594 int num_pkts, tx_len, rx_len, i, err;
7595 struct tg3_rx_buffer_desc *desc;
7596
7597 if (!netif_running(tp->dev))
7598 return -ENODEV;
7599
7600 err = -EIO;
7601
7602 tg3_abort_hw(tp, 1);
7603
7604 /* Clearing this flag to keep interrupts disabled */
7605 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7606 tg3_reset_hw(tp);
7607
7608 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
7609 MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
7610 MAC_MODE_PORT_MODE_GMII;
7611 tw32(MAC_MODE, mac_mode);
7612
7613 tx_len = 1514;
7614 skb = dev_alloc_skb(tx_len);
7615 tx_data = skb_put(skb, tx_len);
7616 memcpy(tx_data, tp->dev->dev_addr, 6);
7617 memset(tx_data + 6, 0x0, 8);
7618
7619 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
7620
7621 for (i = 14; i < tx_len; i++)
7622 tx_data[i] = (u8) (i & 0xff);
7623
7624 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
7625
7626 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7627 HOSTCC_MODE_NOW);
7628
7629 udelay(10);
7630
7631 rx_start_idx = tp->hw_status->idx[0].rx_producer;
7632
7633 send_idx = 0;
7634 num_pkts = 0;
7635
7636 tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
7637
7638 send_idx++;
7639 num_pkts++;
7640
7641 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
7642 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
7643
7644 udelay(10);
7645
7646 for (i = 0; i < 10; i++) {
7647 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7648 HOSTCC_MODE_NOW);
7649
7650 udelay(10);
7651
7652 tx_idx = tp->hw_status->idx[0].tx_consumer;
7653 rx_idx = tp->hw_status->idx[0].rx_producer;
7654 if ((tx_idx == send_idx) &&
7655 (rx_idx == (rx_start_idx + num_pkts)))
7656 break;
7657 }
7658
7659 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
7660 dev_kfree_skb(skb);
7661
7662 if (tx_idx != send_idx)
7663 goto out;
7664
7665 if (rx_idx != rx_start_idx + num_pkts)
7666 goto out;
7667
7668 desc = &tp->rx_rcb[rx_start_idx];
7669 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
7670 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
7671 if (opaque_key != RXD_OPAQUE_RING_STD)
7672 goto out;
7673
7674 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
7675 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
7676 goto out;
7677
7678 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
7679 if (rx_len != tx_len)
7680 goto out;
7681
7682 rx_skb = tp->rx_std_buffers[desc_idx].skb;
7683
7684 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
7685 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
7686
7687 for (i = 14; i < tx_len; i++) {
7688 if (*(rx_skb->data + i) != (u8) (i & 0xff))
7689 goto out;
7690 }
7691 err = 0;
7692
7693 /* tg3_free_rings will unmap and free the rx_skb */
7694out:
7695 return err;
7696}
7697
7698static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
7699 u64 *data)
7700{
7701 struct tg3 *tp = netdev_priv(dev);
7702
7703 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
7704
7705 if (tg3_test_nvram(tp) != 0) {
7706 etest->flags |= ETH_TEST_FL_FAILED;
7707 data[0] = 1;
7708 }
7709 if (tg3_test_link(tp) != 0) {
7710 etest->flags |= ETH_TEST_FL_FAILED;
7711 data[1] = 1;
7712 }
7713 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7714 if (netif_running(dev))
7715 tg3_netif_stop(tp);
7716
7717 spin_lock_irq(&tp->lock);
7718 spin_lock(&tp->tx_lock);
7719
7720 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
7721 tg3_nvram_lock(tp);
7722 tg3_halt_cpu(tp, RX_CPU_BASE);
7723 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7724 tg3_halt_cpu(tp, TX_CPU_BASE);
7725 tg3_nvram_unlock(tp);
7726
7727 if (tg3_test_registers(tp) != 0) {
7728 etest->flags |= ETH_TEST_FL_FAILED;
7729 data[2] = 1;
7730 }
7731 if (tg3_test_memory(tp) != 0) {
7732 etest->flags |= ETH_TEST_FL_FAILED;
7733 data[3] = 1;
7734 }
7735 if (tg3_test_loopback(tp) != 0) {
7736 etest->flags |= ETH_TEST_FL_FAILED;
7737 data[4] = 1;
7738 }
7739
7740 spin_unlock(&tp->tx_lock);
7741 spin_unlock_irq(&tp->lock);
7742 if (tg3_test_interrupt(tp) != 0) {
7743 etest->flags |= ETH_TEST_FL_FAILED;
7744 data[5] = 1;
7745 }
7746 spin_lock_irq(&tp->lock);
7747 spin_lock(&tp->tx_lock);
7748
7749 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7750 if (netif_running(dev)) {
7751 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7752 tg3_init_hw(tp);
7753 tg3_netif_start(tp);
7754 }
7755 spin_unlock(&tp->tx_lock);
7756 spin_unlock_irq(&tp->lock);
7757 }
7758}
7759
7221static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 7760static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7222{ 7761{
7223 struct mii_ioctl_data *data = if_mii(ifr); 7762 struct mii_ioctl_data *data = if_mii(ifr);
@@ -7331,6 +7870,8 @@ static struct ethtool_ops tg3_ethtool_ops = {
7331 .get_tso = ethtool_op_get_tso, 7870 .get_tso = ethtool_op_get_tso,
7332 .set_tso = tg3_set_tso, 7871 .set_tso = tg3_set_tso,
7333#endif 7872#endif
7873 .self_test_count = tg3_get_test_count,
7874 .self_test = tg3_self_test,
7334 .get_strings = tg3_get_strings, 7875 .get_strings = tg3_get_strings,
7335 .get_stats_count = tg3_get_stats_count, 7876 .get_stats_count = tg3_get_stats_count,
7336 .get_ethtool_stats = tg3_get_ethtool_stats, 7877 .get_ethtool_stats = tg3_get_ethtool_stats,
@@ -9478,7 +10019,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
9478 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 10019 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9479 pci_save_state(tp->pdev); 10020 pci_save_state(tp->pdev);
9480 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 10021 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
9481 tg3_halt(tp, 1); 10022 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9482 } 10023 }
9483 10024
9484 err = tg3_test_dma(tp); 10025 err = tg3_test_dma(tp);
@@ -9605,7 +10146,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9605 10146
9606 spin_lock_irq(&tp->lock); 10147 spin_lock_irq(&tp->lock);
9607 spin_lock(&tp->tx_lock); 10148 spin_lock(&tp->tx_lock);
9608 tg3_halt(tp, 1); 10149 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9609 spin_unlock(&tp->tx_lock); 10150 spin_unlock(&tp->tx_lock);
9610 spin_unlock_irq(&tp->lock); 10151 spin_unlock_irq(&tp->lock);
9611 10152
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index da5bd33d982d..fc5263c6b102 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/sched.h> 34#include <linux/sched.h>
35#include <linux/dma-mapping.h>
35#include "scsi.h" 36#include "scsi.h"
36#include <scsi/scsi_host.h> 37#include <scsi/scsi_host.h>
37#include <linux/libata.h> 38#include <linux/libata.h>
@@ -289,6 +290,8 @@ static void ahci_host_stop(struct ata_host_set *host_set)
289{ 290{
290 struct ahci_host_priv *hpriv = host_set->private_data; 291 struct ahci_host_priv *hpriv = host_set->private_data;
291 kfree(hpriv); 292 kfree(hpriv);
293
294 ata_host_stop(host_set);
292} 295}
293 296
294static int ahci_port_start(struct ata_port *ap) 297static int ahci_port_start(struct ata_port *ap)
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 550c9921691a..7c02b7dc7098 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -2488,7 +2488,7 @@ ahd_linux_dv_thread(void *data)
2488 sprintf(current->comm, "ahd_dv_%d", ahd->unit); 2488 sprintf(current->comm, "ahd_dv_%d", ahd->unit);
2489#else 2489#else
2490 daemonize("ahd_dv_%d", ahd->unit); 2490 daemonize("ahd_dv_%d", ahd->unit);
2491 current->flags |= PF_FREEZE; 2491 current->flags |= PF_NOFREEZE;
2492#endif 2492#endif
2493 unlock_kernel(); 2493 unlock_kernel();
2494 2494
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 3867f91ef8c7..54c52349adc5 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -153,6 +153,7 @@ static struct ata_port_operations piix_pata_ops = {
153 153
154 .port_start = ata_port_start, 154 .port_start = ata_port_start,
155 .port_stop = ata_port_stop, 155 .port_stop = ata_port_stop,
156 .host_stop = ata_host_stop,
156}; 157};
157 158
158static struct ata_port_operations piix_sata_ops = { 159static struct ata_port_operations piix_sata_ops = {
@@ -180,6 +181,7 @@ static struct ata_port_operations piix_sata_ops = {
180 181
181 .port_start = ata_port_start, 182 .port_start = ata_port_start,
182 .port_stop = ata_port_stop, 183 .port_stop = ata_port_stop,
184 .host_stop = ata_host_stop,
183}; 185};
184 186
185static struct ata_port_info piix_port_info[] = { 187static struct ata_port_info piix_port_info[] = {
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 63d3f70d06e1..30a88f0e7bd6 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -3322,6 +3322,13 @@ void ata_port_stop (struct ata_port *ap)
3322 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); 3322 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3323} 3323}
3324 3324
3325void ata_host_stop (struct ata_host_set *host_set)
3326{
3327 if (host_set->mmio_base)
3328 iounmap(host_set->mmio_base);
3329}
3330
3331
3325/** 3332/**
3326 * ata_host_remove - Unregister SCSI host structure with upper layers 3333 * ata_host_remove - Unregister SCSI host structure with upper layers
3327 * @ap: Port to unregister 3334 * @ap: Port to unregister
@@ -3878,10 +3885,6 @@ void ata_pci_remove_one (struct pci_dev *pdev)
3878 } 3885 }
3879 3886
3880 free_irq(host_set->irq, host_set); 3887 free_irq(host_set->irq, host_set);
3881 if (host_set->ops->host_stop)
3882 host_set->ops->host_stop(host_set);
3883 if (host_set->mmio_base)
3884 iounmap(host_set->mmio_base);
3885 3888
3886 for (i = 0; i < host_set->n_ports; i++) { 3889 for (i = 0; i < host_set->n_ports; i++) {
3887 ap = host_set->ports[i]; 3890 ap = host_set->ports[i];
@@ -3900,6 +3903,9 @@ void ata_pci_remove_one (struct pci_dev *pdev)
3900 scsi_host_put(ap->host); 3903 scsi_host_put(ap->host);
3901 } 3904 }
3902 3905
3906 if (host_set->ops->host_stop)
3907 host_set->ops->host_stop(host_set);
3908
3903 kfree(host_set); 3909 kfree(host_set);
3904 3910
3905 pci_release_regions(pdev); 3911 pci_release_regions(pdev);
@@ -3997,6 +4003,7 @@ EXPORT_SYMBOL_GPL(ata_chk_err);
3997EXPORT_SYMBOL_GPL(ata_exec_command); 4003EXPORT_SYMBOL_GPL(ata_exec_command);
3998EXPORT_SYMBOL_GPL(ata_port_start); 4004EXPORT_SYMBOL_GPL(ata_port_start);
3999EXPORT_SYMBOL_GPL(ata_port_stop); 4005EXPORT_SYMBOL_GPL(ata_port_stop);
4006EXPORT_SYMBOL_GPL(ata_host_stop);
4000EXPORT_SYMBOL_GPL(ata_interrupt); 4007EXPORT_SYMBOL_GPL(ata_interrupt);
4001EXPORT_SYMBOL_GPL(ata_qc_prep); 4008EXPORT_SYMBOL_GPL(ata_qc_prep);
4002EXPORT_SYMBOL_GPL(ata_bmdma_setup); 4009EXPORT_SYMBOL_GPL(ata_bmdma_setup);
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 6518226b8f87..d90430bbb0de 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -26,7 +26,7 @@
26#define __LIBATA_H__ 26#define __LIBATA_H__
27 27
28#define DRV_NAME "libata" 28#define DRV_NAME "libata"
29#define DRV_VERSION "1.10" /* must be exactly four chars */ 29#define DRV_VERSION "1.11" /* must be exactly four chars */
30 30
31struct ata_scsi_args { 31struct ata_scsi_args {
32 u16 *id; 32 u16 *id;
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 69009f853a49..b0403ccd8a25 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -329,6 +329,8 @@ static void nv_host_stop (struct ata_host_set *host_set)
329 host->host_desc->disable_hotplug(host_set); 329 host->host_desc->disable_hotplug(host_set);
330 330
331 kfree(host); 331 kfree(host);
332
333 ata_host_stop(host_set);
332} 334}
333 335
334static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 336static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index c4e9e0298122..b18c90582e67 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -122,6 +122,7 @@ static struct ata_port_operations pdc_ata_ops = {
122 .scr_write = pdc_sata_scr_write, 122 .scr_write = pdc_sata_scr_write,
123 .port_start = pdc_port_start, 123 .port_start = pdc_port_start,
124 .port_stop = pdc_port_stop, 124 .port_stop = pdc_port_stop,
125 .host_stop = ata_host_stop,
125}; 126};
126 127
127static struct ata_port_info pdc_port_info[] = { 128static struct ata_port_info pdc_port_info[] = {
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index dfd362104717..1383e8a28d72 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -536,6 +536,8 @@ static void qs_host_stop(struct ata_host_set *host_set)
536 536
537 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ 537 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
538 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ 538 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
539
540 ata_host_stop(host_set);
539} 541}
540 542
541static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe) 543static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 2b2ff48be396..238580d244e6 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -161,6 +161,7 @@ static struct ata_port_operations sil_ops = {
161 .scr_write = sil_scr_write, 161 .scr_write = sil_scr_write,
162 .port_start = ata_port_start, 162 .port_start = ata_port_start,
163 .port_stop = ata_port_stop, 163 .port_stop = ata_port_stop,
164 .host_stop = ata_host_stop,
164}; 165};
165 166
166static struct ata_port_info sil_port_info[] = { 167static struct ata_port_info sil_port_info[] = {
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 5105ddd08447..e418b89c6b9d 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -114,6 +114,7 @@ static struct ata_port_operations sis_ops = {
114 .scr_write = sis_scr_write, 114 .scr_write = sis_scr_write,
115 .port_start = ata_port_start, 115 .port_start = ata_port_start,
116 .port_stop = ata_port_stop, 116 .port_stop = ata_port_stop,
117 .host_stop = ata_host_stop,
117}; 118};
118 119
119static struct ata_port_info sis_port_info = { 120static struct ata_port_info sis_port_info = {
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 05075bd3a893..edef1fa969fc 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -313,6 +313,7 @@ static struct ata_port_operations k2_sata_ops = {
313 .scr_write = k2_sata_scr_write, 313 .scr_write = k2_sata_scr_write,
314 .port_start = ata_port_start, 314 .port_start = ata_port_start,
315 .port_stop = ata_port_stop, 315 .port_stop = ata_port_stop,
316 .host_stop = ata_host_stop,
316}; 317};
317 318
318static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) 319static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 70118650c461..140cea05de3f 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -245,6 +245,8 @@ static void pdc20621_host_stop(struct ata_host_set *host_set)
245 245
246 iounmap(dimm_mmio); 246 iounmap(dimm_mmio);
247 kfree(hpriv); 247 kfree(hpriv);
248
249 ata_host_stop(host_set);
248} 250}
249 251
250static int pdc_port_start(struct ata_port *ap) 252static int pdc_port_start(struct ata_port *ap)
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 0bff4f475f26..a71fb54eebd3 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -113,6 +113,7 @@ static struct ata_port_operations uli_ops = {
113 113
114 .port_start = ata_port_start, 114 .port_start = ata_port_start,
115 .port_stop = ata_port_stop, 115 .port_stop = ata_port_stop,
116 .host_stop = ata_host_stop,
116}; 117};
117 118
118static struct ata_port_info uli_port_info = { 119static struct ata_port_info uli_port_info = {
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 3a7830667277..f43183c19a12 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -134,6 +134,7 @@ static struct ata_port_operations svia_sata_ops = {
134 134
135 .port_start = ata_port_start, 135 .port_start = ata_port_start,
136 .port_stop = ata_port_stop, 136 .port_stop = ata_port_stop,
137 .host_stop = ata_host_stop,
137}; 138};
138 139
139static struct ata_port_info svia_port_info = { 140static struct ata_port_info svia_port_info = {
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 2c28f0ad73c2..c5e09dc6f3de 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -21,6 +21,7 @@
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/dma-mapping.h>
24#include "scsi.h" 25#include "scsi.h"
25#include <scsi/scsi_host.h> 26#include <scsi/scsi_host.h>
26#include <linux/libata.h> 27#include <linux/libata.h>
@@ -230,6 +231,7 @@ static struct ata_port_operations vsc_sata_ops = {
230 .scr_write = vsc_sata_scr_write, 231 .scr_write = vsc_sata_scr_write,
231 .port_start = ata_port_start, 232 .port_start = ata_port_start,
232 .port_stop = ata_port_stop, 233 .port_stop = ata_port_stop,
234 .host_stop = ata_host_stop,
233}; 235};
234 236
235static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base) 237static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
diff --git a/drivers/usb/media/pwc/pwc-ctrl.c b/drivers/usb/media/pwc/pwc-ctrl.c
index 3e1e4fe20d85..53099190952c 100644
--- a/drivers/usb/media/pwc/pwc-ctrl.c
+++ b/drivers/usb/media/pwc/pwc-ctrl.c
@@ -48,8 +48,6 @@
48#include "pwc-uncompress.h" 48#include "pwc-uncompress.h"
49#include "pwc-kiara.h" 49#include "pwc-kiara.h"
50#include "pwc-timon.h" 50#include "pwc-timon.h"
51#include "pwc-dec1.h"
52#include "pwc-dec23.h"
53 51
54/* Request types: video */ 52/* Request types: video */
55#define SET_LUM_CTL 0x01 53#define SET_LUM_CTL 0x01
diff --git a/drivers/usb/media/pwc/pwc-uncompress.c b/drivers/usb/media/pwc/pwc-uncompress.c
index c596083f06ba..bc3b1635eab0 100644
--- a/drivers/usb/media/pwc/pwc-uncompress.c
+++ b/drivers/usb/media/pwc/pwc-uncompress.c
@@ -29,8 +29,6 @@
29 29
30#include "pwc.h" 30#include "pwc.h"
31#include "pwc-uncompress.h" 31#include "pwc-uncompress.h"
32#include "pwc-dec1.h"
33#include "pwc-dec23.h"
34 32
35int pwc_decompress(struct pwc_device *pdev) 33int pwc_decompress(struct pwc_device *pdev)
36{ 34{
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 549e22939260..25f9a9a65c24 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -228,17 +228,17 @@ MODULE_DESCRIPTION(
228MODULE_LICENSE("Dual BSD/GPL"); 228MODULE_LICENSE("Dual BSD/GPL");
229MODULE_DEVICE_TABLE(pci, intelfb_pci_table); 229MODULE_DEVICE_TABLE(pci, intelfb_pci_table);
230 230
231static int accel __initdata = 1; 231static int accel = 1;
232static int vram __initdata = 4; 232static int vram = 4;
233static int hwcursor __initdata = 1; 233static int hwcursor = 1;
234static int mtrr __initdata = 1; 234static int mtrr = 1;
235static int fixed __initdata = 0; 235static int fixed = 0;
236static int noinit __initdata = 0; 236static int noinit = 0;
237static int noregister __initdata = 0; 237static int noregister = 0;
238static int probeonly __initdata = 0; 238static int probeonly = 0;
239static int idonly __initdata = 0; 239static int idonly = 0;
240static int bailearly __initdata = 0; 240static int bailearly = 0;
241static char *mode __initdata = NULL; 241static char *mode = NULL;
242 242
243module_param(accel, bool, S_IRUGO); 243module_param(accel, bool, S_IRUGO);
244MODULE_PARM_DESC(accel, "Enable console acceleration"); 244MODULE_PARM_DESC(accel, "Enable console acceleration");