aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/idle/i7300_idle.c
diff options
context:
space:
mode:
authorAndy Henroid <andrew.d.henroid@intel.com>2008-10-09 14:45:22 -0400
committerLen Brown <len.brown@intel.com>2008-10-21 23:58:41 -0400
commit27471fdb32e77ecb92f09d4ac5757785b4dc33bc (patch)
tree2895d9918c019f48db32d479a2fc9f8725a3ed6e /drivers/idle/i7300_idle.c
parentc7d87d79d14cecab7a34dedf1b133377cf5a0203 (diff)
i7300_idle driver v1.55
The Intel 7300 Memory Controller supports dynamic throttling of memory which can be used to save power when system is idle. This driver does the memory throttling when all CPUs are idle on such a system. Refer to "Intel 7300 Memory Controller Hub (MCH)" datasheet for the config space description. Signed-off-by: Andy Henroid <andrew.d.henroid@intel.com> Signed-off-by: Len Brown <len.brown@intel.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Diffstat (limited to 'drivers/idle/i7300_idle.c')
-rw-r--r--drivers/idle/i7300_idle.c674
1 files changed, 674 insertions, 0 deletions
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
new file mode 100644
index 00000000000..59d1bbc3cd3
--- /dev/null
+++ b/drivers/idle/i7300_idle.c
@@ -0,0 +1,674 @@
1/*
2 * (C) Copyright 2008 Intel Corporation
3 * Authors:
4 * Andy Henroid <andrew.d.henroid@intel.com>
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 */
7
8/*
9 * Save DIMM power on Intel 7300-based platforms when all CPUs/cores
10 * are idle, using the DIMM thermal throttling capability.
11 *
12 * This driver depends on the Intel integrated DMA controller (I/O AT).
13 * If the driver for I/O AT (drivers/dma/ioatdma*) is also enabled,
14 * this driver should work cooperatively.
15 */
16
17/* #define DEBUG */
18
19#include <linux/module.h>
20#include <linux/pci.h>
21#include <linux/sched.h>
22#include <linux/notifier.h>
23#include <linux/cpumask.h>
24#include <linux/ktime.h>
25#include <linux/delay.h>
26#include <linux/debugfs.h>
27#include <linux/stop_machine.h>
28
29#include <asm/idle.h>
30
31#include "../dma/ioatdma_hw.h"
32#include "../dma/ioatdma_registers.h"
33
34#define I7300_IDLE_DRIVER_VERSION "1.55"
35#define I7300_PRINT "i7300_idle:"
36
37static int debug;
38module_param_named(debug, debug, uint, 0644);
39MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
40
41#define dprintk(fmt, arg...) \
42 do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
43
44/*
45 * Value to set THRTLOW to when initiating throttling
46 * 0 = No throttling
47 * 1 = Throttle when > 4 activations per eval window (Maximum throttling)
48 * 2 = Throttle when > 8 activations
49 * 168 = Throttle when > 168 activations (Minimum throttling)
50 */
51#define MAX_THRTLWLIMIT 168
52static uint i7300_idle_thrtlowlm = 1;
53module_param_named(thrtlwlimit, i7300_idle_thrtlowlm, uint, 0644);
54MODULE_PARM_DESC(thrtlwlimit,
55 "Value for THRTLOWLM activation field "
56 "(0 = disable throttle, 1 = Max throttle, 168 = Min throttle)");
57
58/*
59 * simple invocation and duration statistics
60 */
61static unsigned long total_starts;
62static unsigned long total_us;
63
64#ifdef DEBUG
65static unsigned long past_skip;
66#endif
67
68static struct pci_dev *fbd_dev;
69
70static spinlock_t i7300_idle_lock;
71static int i7300_idle_active;
72
73static u8 i7300_idle_thrtctl_saved;
74static u8 i7300_idle_thrtlow_saved;
75static u32 i7300_idle_mc_saved;
76
77static cpumask_t idle_cpumask;
78static ktime_t start_ktime;
79static unsigned long avg_idle_us;
80
81static struct dentry *debugfs_dir;
82
83/* Begin: I/O AT Helper routines */
84
85#define IOAT_CHANBASE(ioat_ctl, chan) (ioat_ctl + 0x80 + 0x80 * chan)
86/* Snoop control (disable snoops when coherency is not important) */
87#define IOAT_DESC_SADDR_SNP_CTL (1UL << 1)
88#define IOAT_DESC_DADDR_SNP_CTL (1UL << 2)
89
90static struct pci_dev *ioat_dev;
91static struct ioat_dma_descriptor *ioat_desc; /* I/O AT desc & data (1 page) */
92static unsigned long ioat_desc_phys;
93static u8 *ioat_iomap; /* I/O AT memory-mapped control regs (aka CB_BAR) */
94static u8 *ioat_chanbase;
95
96/* Start I/O AT memory copy */
97static int i7300_idle_ioat_start(void)
98{
99 u32 err;
100 /* Clear error (due to circular descriptor pointer) */
101 err = readl(ioat_chanbase + IOAT_CHANERR_OFFSET);
102 if (err)
103 writel(err, ioat_chanbase + IOAT_CHANERR_OFFSET);
104
105 writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
106 return 0;
107}
108
109/* Stop I/O AT memory copy */
110static void i7300_idle_ioat_stop(void)
111{
112 int i;
113 u8 sts;
114
115 for (i = 0; i < 5; i++) {
116 writeb(IOAT_CHANCMD_RESET,
117 ioat_chanbase + IOAT1_CHANCMD_OFFSET);
118
119 udelay(10);
120
121 sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
122 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
123
124 if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE)
125 break;
126
127 }
128
129 if (i == 5)
130 dprintk("failed to suspend+reset I/O AT after 5 retries\n");
131
132}
133
134/* Test I/O AT by copying 1024 byte from 2k to 1k */
135static int __init i7300_idle_ioat_selftest(u8 *ctl,
136 struct ioat_dma_descriptor *desc, unsigned long desc_phys)
137{
138 u64 chan_sts;
139
140 memset(desc, 0, 2048);
141 memset((u8 *) desc + 2048, 0xab, 1024);
142
143 desc[0].size = 1024;
144 desc[0].ctl = 0;
145 desc[0].src_addr = desc_phys + 2048;
146 desc[0].dst_addr = desc_phys + 1024;
147 desc[0].next = 0;
148
149 writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
150 writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
151
152 udelay(1000);
153
154 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
155 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
156
157 if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) {
158 /* Not complete, reset the channel */
159 writeb(IOAT_CHANCMD_RESET,
160 ioat_chanbase + IOAT1_CHANCMD_OFFSET);
161 return -1;
162 }
163
164 if (*(u32 *) ((u8 *) desc + 3068) != 0xabababab ||
165 *(u32 *) ((u8 *) desc + 2044) != 0xabababab) {
166 dprintk("Data values src 0x%x, dest 0x%x, memset 0x%x\n",
167 *(u32 *) ((u8 *) desc + 2048),
168 *(u32 *) ((u8 *) desc + 1024),
169 *(u32 *) ((u8 *) desc + 3072));
170 return -1;
171 }
172 return 0;
173}
174
175static struct device dummy_dma_dev = {
176 .bus_id = "fallback device",
177 .coherent_dma_mask = DMA_64BIT_MASK,
178 .dma_mask = &dummy_dma_dev.coherent_dma_mask,
179};
180
181/* Setup and initialize I/O AT */
182/* This driver needs I/O AT as the throttling takes effect only when there is
183 * some memory activity. We use I/O AT to set up a dummy copy, while all CPUs
184 * go idle and memory is throttled.
185 */
186static int __init i7300_idle_ioat_init(void)
187{
188 u8 ver, chan_count, ioat_chan;
189 u16 chan_ctl;
190
191 ioat_iomap = (u8 *) ioremap_nocache(pci_resource_start(ioat_dev, 0),
192 pci_resource_len(ioat_dev, 0));
193
194 if (!ioat_iomap) {
195 printk(KERN_ERR I7300_PRINT "failed to map I/O AT registers\n");
196 goto err_ret;
197 }
198
199 ver = readb(ioat_iomap + IOAT_VER_OFFSET);
200 if (ver != IOAT_VER_1_2) {
201 printk(KERN_ERR I7300_PRINT "unknown I/O AT version (%u.%u)\n",
202 ver >> 4, ver & 0xf);
203 goto err_unmap;
204 }
205
206 chan_count = readb(ioat_iomap + IOAT_CHANCNT_OFFSET);
207 if (!chan_count) {
208 printk(KERN_ERR I7300_PRINT "unexpected # of I/O AT channels "
209 "(%u)\n",
210 chan_count);
211 goto err_unmap;
212 }
213
214 ioat_chan = chan_count - 1;
215 ioat_chanbase = IOAT_CHANBASE(ioat_iomap, ioat_chan);
216
217 chan_ctl = readw(ioat_chanbase + IOAT_CHANCTRL_OFFSET);
218 if (chan_ctl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
219 printk(KERN_ERR I7300_PRINT "channel %d in use\n", ioat_chan);
220 goto err_unmap;
221 }
222
223 writew(IOAT_CHANCTRL_CHANNEL_IN_USE,
224 ioat_chanbase + IOAT_CHANCTRL_OFFSET);
225
226 ioat_desc = (struct ioat_dma_descriptor *)dma_alloc_coherent(
227 &dummy_dma_dev, 4096,
228 (dma_addr_t *)&ioat_desc_phys, GFP_KERNEL);
229 if (!ioat_desc) {
230 printk(KERN_ERR I7300_PRINT "failed to allocate I/O AT desc\n");
231 goto err_mark_unused;
232 }
233
234 writel(ioat_desc_phys & 0xffffffffUL,
235 ioat_chanbase + IOAT1_CHAINADDR_OFFSET_LOW);
236 writel(ioat_desc_phys >> 32,
237 ioat_chanbase + IOAT1_CHAINADDR_OFFSET_HIGH);
238
239 if (i7300_idle_ioat_selftest(ioat_iomap, ioat_desc, ioat_desc_phys)) {
240 printk(KERN_ERR I7300_PRINT "I/O AT self-test failed\n");
241 goto err_free;
242 }
243
244 /* Setup circular I/O AT descriptor chain */
245 ioat_desc[0].ctl = IOAT_DESC_SADDR_SNP_CTL | IOAT_DESC_DADDR_SNP_CTL;
246 ioat_desc[0].src_addr = ioat_desc_phys + 2048;
247 ioat_desc[0].dst_addr = ioat_desc_phys + 3072;
248 ioat_desc[0].size = 128;
249 ioat_desc[0].next = ioat_desc_phys + sizeof(struct ioat_dma_descriptor);
250
251 ioat_desc[1].ctl = ioat_desc[0].ctl;
252 ioat_desc[1].src_addr = ioat_desc[0].src_addr;
253 ioat_desc[1].dst_addr = ioat_desc[0].dst_addr;
254 ioat_desc[1].size = ioat_desc[0].size;
255 ioat_desc[1].next = ioat_desc_phys;
256
257 return 0;
258
259err_free:
260 dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
261err_mark_unused:
262 writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
263err_unmap:
264 iounmap(ioat_iomap);
265err_ret:
266 return -ENODEV;
267}
268
269/* Cleanup I/O AT */
270static void __exit i7300_idle_ioat_exit(void)
271{
272 int i;
273 u64 chan_sts;
274
275 i7300_idle_ioat_stop();
276
277 /* Wait for a while for the channel to halt before releasing */
278 for (i = 0; i < 10; i++) {
279 writeb(IOAT_CHANCMD_RESET,
280 ioat_chanbase + IOAT1_CHANCMD_OFFSET);
281
282 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
283 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
284
285 if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
286 writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
287 break;
288 }
289 udelay(1000);
290 }
291
292 chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
293 IOAT_CHANSTS_DMA_TRANSFER_STATUS;
294
295 /*
296 * We tried to reset multiple times. If IO A/T channel is still active
297 * flag an error and return without cleanup. Memory leak is better
298 * than random corruption in that extreme error situation.
299 */
300 if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) {
301 printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels."
302 " Not freeing resources\n");
303 return;
304 }
305
306 dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
307 iounmap(ioat_iomap);
308}
309
310/* End: I/O AT Helper routines */
311
312#define DIMM_THRTLOW 0x64
313#define DIMM_THRTCTL 0x67
314#define DIMM_THRTCTL_THRMHUNT (1UL << 0)
315#define DIMM_MC 0x40
316#define DIMM_GTW_MODE (1UL << 17)
317#define DIMM_GBLACT 0x60
318
319/*
320 * Keep track of an exponential-decaying average of recent idle durations.
321 * The latest duration gets DURATION_WEIGHT_PCT percentage weight
322 * in this average, with the old average getting the remaining weight.
323 *
324 * High weights emphasize recent history, low weights include long history.
325 */
326#define DURATION_WEIGHT_PCT 55
327
328/*
329 * When the decaying average of recent durations or the predicted duration
330 * of the next timer interrupt is shorter than duration_threshold, the
331 * driver will decline to throttle.
332 */
333#define DURATION_THRESHOLD_US 100
334
335
336/* Store DIMM thermal throttle configuration */
337static int i7300_idle_thrt_save(void)
338{
339 u32 new_mc_val;
340 u8 gblactlm;
341
342 pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &i7300_idle_thrtctl_saved);
343 pci_read_config_byte(fbd_dev, DIMM_THRTLOW, &i7300_idle_thrtlow_saved);
344 pci_read_config_dword(fbd_dev, DIMM_MC, &i7300_idle_mc_saved);
345 /*
346 * Make sure we have Global Throttling Window Mode set to have a
347 * "short" window. This (mostly) works around an issue where
348 * throttling persists until the end of the global throttling window
349 * size. On the tested system, this was resulting in a maximum of
350 * 64 ms to exit throttling (average 32 ms). The actual numbers
351 * depends on system frequencies. Setting the short window reduces
352 * this by a factor of 4096.
353 *
354 * We will only do this only if the system is set for
355 * unlimited-activations while in open-loop throttling (i.e., when
356 * Global Activation Throttle Limit is zero).
357 */
358 pci_read_config_byte(fbd_dev, DIMM_GBLACT, &gblactlm);
359 dprintk("thrtctl_saved = 0x%02x, thrtlow_saved = 0x%02x\n",
360 i7300_idle_thrtctl_saved,
361 i7300_idle_thrtlow_saved);
362 dprintk("mc_saved = 0x%08x, gblactlm = 0x%02x\n",
363 i7300_idle_mc_saved,
364 gblactlm);
365 if (gblactlm == 0) {
366 new_mc_val = i7300_idle_mc_saved | DIMM_GTW_MODE;
367 pci_write_config_dword(fbd_dev, DIMM_MC, new_mc_val);
368 return 0;
369 } else {
370 dprintk("could not set GTW_MODE = 1 (OLTT enabled)\n");
371 return -ENODEV;
372 }
373}
374
375/* Restore DIMM thermal throttle configuration */
376static void i7300_idle_thrt_restore(void)
377{
378 pci_write_config_dword(fbd_dev, DIMM_MC, i7300_idle_mc_saved);
379 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
380 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
381}
382
383/* Enable DIMM thermal throttling */
384static void i7300_idle_start(void)
385{
386 u8 new_ctl;
387 u8 limit;
388
389 new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
390 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
391
392 limit = i7300_idle_thrtlowlm;
393 if (unlikely(limit > MAX_THRTLWLIMIT))
394 limit = MAX_THRTLWLIMIT;
395
396 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, limit);
397
398 new_ctl = i7300_idle_thrtctl_saved | DIMM_THRTCTL_THRMHUNT;
399 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
400}
401
402/* Disable DIMM thermal throttling */
403static void i7300_idle_stop(void)
404{
405 u8 new_ctl;
406 u8 got_ctl;
407
408 new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
409 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
410
411 pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
412 pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
413 pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &got_ctl);
414 WARN_ON_ONCE(got_ctl != i7300_idle_thrtctl_saved);
415}
416
417
418/*
419 * i7300_avg_duration_check()
420 * return 0 if the decaying average of recent idle durations is
421 * more than DURATION_THRESHOLD_US
422 */
423static int i7300_avg_duration_check(void)
424{
425 if (avg_idle_us >= DURATION_THRESHOLD_US)
426 return 0;
427
428#ifdef DEBUG
429 past_skip++;
430#endif
431 return 1;
432}
433
434/* Idle notifier to look at idle CPUs */
435static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
436 void *data)
437{
438 unsigned long flags;
439 ktime_t now_ktime;
440 static ktime_t idle_begin_time;
441 static int time_init = 1;
442
443 if (!i7300_idle_thrtlowlm)
444 return 0;
445
446 if (unlikely(time_init)) {
447 time_init = 0;
448 idle_begin_time = ktime_get();
449 }
450
451 spin_lock_irqsave(&i7300_idle_lock, flags);
452 if (val == IDLE_START) {
453
454 cpu_set(smp_processor_id(), idle_cpumask);
455
456 if (cpus_weight(idle_cpumask) != num_online_cpus())
457 goto end;
458
459 now_ktime = ktime_get();
460 idle_begin_time = now_ktime;
461
462 if (i7300_avg_duration_check())
463 goto end;
464
465 i7300_idle_active = 1;
466 total_starts++;
467 start_ktime = now_ktime;
468
469 i7300_idle_start();
470 i7300_idle_ioat_start();
471
472 } else if (val == IDLE_END) {
473 cpu_clear(smp_processor_id(), idle_cpumask);
474 if (cpus_weight(idle_cpumask) == (num_online_cpus() - 1)) {
475 /* First CPU coming out of idle */
476 u64 idle_duration_us;
477
478 now_ktime = ktime_get();
479
480 idle_duration_us = ktime_to_us(ktime_sub
481 (now_ktime, idle_begin_time));
482
483 avg_idle_us =
484 ((100 - DURATION_WEIGHT_PCT) * avg_idle_us +
485 DURATION_WEIGHT_PCT * idle_duration_us) / 100;
486
487 if (i7300_idle_active) {
488 ktime_t idle_ktime;
489
490 idle_ktime = ktime_sub(now_ktime, start_ktime);
491 total_us += ktime_to_us(idle_ktime);
492
493 i7300_idle_ioat_stop();
494 i7300_idle_stop();
495 i7300_idle_active = 0;
496 }
497 }
498 }
499end:
500 spin_unlock_irqrestore(&i7300_idle_lock, flags);
501 return 0;
502}
503
504static struct notifier_block i7300_idle_nb = {
505 .notifier_call = i7300_idle_notifier,
506};
507
508/*
509 * I/O AT controls (PCI bus 0 device 8 function 0)
510 * DIMM controls (PCI bus 0 device 16 function 1)
511 */
512#define IOAT_BUS 0
513#define IOAT_DEVFN PCI_DEVFN(8, 0)
514#define MEMCTL_BUS 0
515#define MEMCTL_DEVFN PCI_DEVFN(16, 1)
516
517struct fbd_ioat {
518 unsigned int vendor;
519 unsigned int ioat_dev;
520};
521
522/*
523 * The i5000 chip-set has the same hooks as the i7300
524 * but support is disabled by default because this driver
525 * has not been validated on that platform.
526 */
527#define SUPPORT_I5000 0
528
529static const struct fbd_ioat fbd_ioat_list[] = {
530 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB},
531#if SUPPORT_I5000
532 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT},
533#endif
534 {0, 0}
535};
536
537/* table of devices that work with this driver */
538static const struct pci_device_id pci_tbl[] = {
539 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
540#if SUPPORT_I5000
541 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
542#endif
543 { } /* Terminating entry */
544};
545
546MODULE_DEVICE_TABLE(pci, pci_tbl);
547
548/* Check for known platforms with I/O-AT */
549static int __init i7300_idle_platform_probe(void)
550{
551 int i;
552
553 fbd_dev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN);
554 if (!fbd_dev)
555 return -ENODEV;
556
557 for (i = 0; pci_tbl[i].vendor != 0; i++) {
558 if (fbd_dev->vendor == pci_tbl[i].vendor &&
559 fbd_dev->device == pci_tbl[i].device) {
560 break;
561 }
562 }
563 if (pci_tbl[i].vendor == 0)
564 return -ENODEV;
565
566 ioat_dev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN);
567 if (!ioat_dev)
568 return -ENODEV;
569
570 for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
571 if (ioat_dev->vendor == fbd_ioat_list[i].vendor &&
572 ioat_dev->device == fbd_ioat_list[i].ioat_dev) {
573 return 0;
574 }
575 }
576 return -ENODEV;
577}
578
579int stats_open_generic(struct inode *inode, struct file *fp)
580{
581 fp->private_data = inode->i_private;
582 return 0;
583}
584
585static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count,
586 loff_t *off)
587{
588 unsigned long *p = fp->private_data;
589 char buf[32];
590 int len;
591
592 len = snprintf(buf, 32, "%lu\n", *p);
593 return simple_read_from_buffer(ubuf, count, off, buf, len);
594}
595
596static const struct file_operations idle_fops = {
597 .open = stats_open_generic,
598 .read = stats_read_ul,
599};
600
601struct debugfs_file_info {
602 void *ptr;
603 char name[32];
604 struct dentry *file;
605} debugfs_file_list[] = {
606 {&total_starts, "total_starts", NULL},
607 {&total_us, "total_us", NULL},
608#ifdef DEBUG
609 {&past_skip, "past_skip", NULL},
610#endif
611 {NULL, "", NULL}
612 };
613
614static int __init i7300_idle_init(void)
615{
616 spin_lock_init(&i7300_idle_lock);
617 cpus_clear(idle_cpumask);
618 total_us = 0;
619
620 if (i7300_idle_platform_probe())
621 return -ENODEV;
622
623 if (i7300_idle_thrt_save())
624 return -ENODEV;
625
626 if (i7300_idle_ioat_init())
627 return -ENODEV;
628
629 debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
630 if (debugfs_dir) {
631 int i = 0;
632
633 while (debugfs_file_list[i].ptr != NULL) {
634 debugfs_file_list[i].file = debugfs_create_file(
635 debugfs_file_list[i].name,
636 S_IRUSR,
637 debugfs_dir,
638 debugfs_file_list[i].ptr,
639 &idle_fops);
640 i++;
641 }
642 }
643
644 idle_notifier_register(&i7300_idle_nb);
645
646 printk(KERN_INFO "i7300_idle: loaded v%s\n", I7300_IDLE_DRIVER_VERSION);
647 return 0;
648}
649
650static void __exit i7300_idle_exit(void)
651{
652 idle_notifier_unregister(&i7300_idle_nb);
653
654 if (debugfs_dir) {
655 int i = 0;
656
657 while (debugfs_file_list[i].file != NULL) {
658 debugfs_remove(debugfs_file_list[i].file);
659 i++;
660 }
661
662 debugfs_remove(debugfs_dir);
663 }
664 i7300_idle_thrt_restore();
665 i7300_idle_ioat_exit();
666}
667
668module_init(i7300_idle_init);
669module_exit(i7300_idle_exit);
670
671MODULE_AUTHOR("Andy Henroid <andrew.d.henroid@intel.com>");
672MODULE_DESCRIPTION("Intel Chipset DIMM Idle Power Saving Driver v"
673 I7300_IDLE_DRIVER_VERSION);
674MODULE_LICENSE("GPL");