diff options
Diffstat (limited to 'drivers/edac/i5100_edac.c')
-rw-r--r-- | drivers/edac/i5100_edac.c | 112 |
1 files changed, 110 insertions, 2 deletions
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index b2fbb4567dc6..9a933180b434 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c | |||
@@ -25,6 +25,8 @@ | |||
25 | 25 | ||
26 | /* device 16, func 1 */ | 26 | /* device 16, func 1 */ |
27 | #define I5100_MC 0x40 /* Memory Control Register */ | 27 | #define I5100_MC 0x40 /* Memory Control Register */ |
28 | #define I5100_MC_SCRBEN_MASK (1 << 7) | ||
29 | #define I5100_MC_SCRBDONE_MASK (1 << 4) | ||
28 | #define I5100_MS 0x44 /* Memory Status Register */ | 30 | #define I5100_MS 0x44 /* Memory Status Register */ |
29 | #define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */ | 31 | #define I5100_SPDDATA 0x48 /* Serial Presence Detect Status Reg */ |
30 | #define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */ | 32 | #define I5100_SPDCMD 0x4c /* Serial Presence Detect Command Reg */ |
@@ -72,11 +74,21 @@ | |||
72 | 74 | ||
73 | /* bit field accessors */ | 75 | /* bit field accessors */ |
74 | 76 | ||
77 | static inline u32 i5100_mc_scrben(u32 mc) | ||
78 | { | ||
79 | return mc >> 7 & 1; | ||
80 | } | ||
81 | |||
75 | static inline u32 i5100_mc_errdeten(u32 mc) | 82 | static inline u32 i5100_mc_errdeten(u32 mc) |
76 | { | 83 | { |
77 | return mc >> 5 & 1; | 84 | return mc >> 5 & 1; |
78 | } | 85 | } |
79 | 86 | ||
87 | static inline u32 i5100_mc_scrbdone(u32 mc) | ||
88 | { | ||
89 | return mc >> 4 & 1; | ||
90 | } | ||
91 | |||
80 | static inline u16 i5100_spddata_rdo(u16 a) | 92 | static inline u16 i5100_spddata_rdo(u16 a) |
81 | { | 93 | { |
82 | return a >> 15 & 1; | 94 | return a >> 15 & 1; |
@@ -272,6 +284,7 @@ static inline u32 i5100_recmemb_ras(u32 a) | |||
272 | #define I5100_MAX_DIMM_SLOTS_PER_CHAN 4 | 284 | #define I5100_MAX_DIMM_SLOTS_PER_CHAN 4 |
273 | #define I5100_MAX_RANK_INTERLEAVE 4 | 285 | #define I5100_MAX_RANK_INTERLEAVE 4 |
274 | #define I5100_MAX_DMIRS 5 | 286 | #define I5100_MAX_DMIRS 5 |
287 | #define I5100_SCRUB_REFRESH_RATE (5 * 60 * HZ) | ||
275 | 288 | ||
276 | struct i5100_priv { | 289 | struct i5100_priv { |
277 | /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */ | 290 | /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */ |
@@ -318,6 +331,9 @@ struct i5100_priv { | |||
318 | struct pci_dev *mc; /* device 16 func 1 */ | 331 | struct pci_dev *mc; /* device 16 func 1 */ |
319 | struct pci_dev *ch0mm; /* device 21 func 0 */ | 332 | struct pci_dev *ch0mm; /* device 21 func 0 */ |
320 | struct pci_dev *ch1mm; /* device 22 func 0 */ | 333 | struct pci_dev *ch1mm; /* device 22 func 0 */ |
334 | |||
335 | struct delayed_work i5100_scrubbing; | ||
336 | int scrub_enable; | ||
321 | }; | 337 | }; |
322 | 338 | ||
323 | /* map a rank/chan to a slot number on the mainboard */ | 339 | /* map a rank/chan to a slot number on the mainboard */ |
@@ -534,6 +550,80 @@ static void i5100_check_error(struct mem_ctl_info *mci) | |||
534 | } | 550 | } |
535 | } | 551 | } |
536 | 552 | ||
553 | /* The i5100 chipset will scrub the entire memory once, then | ||
554 | * set a done bit. Continuous scrubbing is achieved by enqueing | ||
555 | * delayed work to a workqueue, checking every few minutes if | ||
556 | * the scrubbing has completed and if so reinitiating it. | ||
557 | */ | ||
558 | |||
559 | static void i5100_refresh_scrubbing(struct work_struct *work) | ||
560 | { | ||
561 | struct delayed_work *i5100_scrubbing = container_of(work, | ||
562 | struct delayed_work, | ||
563 | work); | ||
564 | struct i5100_priv *priv = container_of(i5100_scrubbing, | ||
565 | struct i5100_priv, | ||
566 | i5100_scrubbing); | ||
567 | u32 dw; | ||
568 | |||
569 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | ||
570 | |||
571 | if (priv->scrub_enable) { | ||
572 | |||
573 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | ||
574 | |||
575 | if (i5100_mc_scrbdone(dw)) { | ||
576 | dw |= I5100_MC_SCRBEN_MASK; | ||
577 | pci_write_config_dword(priv->mc, I5100_MC, dw); | ||
578 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | ||
579 | } | ||
580 | |||
581 | schedule_delayed_work(&(priv->i5100_scrubbing), | ||
582 | I5100_SCRUB_REFRESH_RATE); | ||
583 | } | ||
584 | } | ||
585 | /* | ||
586 | * The bandwidth is based on experimentation, feel free to refine it. | ||
587 | */ | ||
588 | static int i5100_set_scrub_rate(struct mem_ctl_info *mci, | ||
589 | u32 *bandwidth) | ||
590 | { | ||
591 | struct i5100_priv *priv = mci->pvt_info; | ||
592 | u32 dw; | ||
593 | |||
594 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | ||
595 | if (*bandwidth) { | ||
596 | priv->scrub_enable = 1; | ||
597 | dw |= I5100_MC_SCRBEN_MASK; | ||
598 | schedule_delayed_work(&(priv->i5100_scrubbing), | ||
599 | I5100_SCRUB_REFRESH_RATE); | ||
600 | } else { | ||
601 | priv->scrub_enable = 0; | ||
602 | dw &= ~I5100_MC_SCRBEN_MASK; | ||
603 | cancel_delayed_work(&(priv->i5100_scrubbing)); | ||
604 | } | ||
605 | pci_write_config_dword(priv->mc, I5100_MC, dw); | ||
606 | |||
607 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | ||
608 | |||
609 | *bandwidth = 5900000 * i5100_mc_scrben(dw); | ||
610 | |||
611 | return 0; | ||
612 | } | ||
613 | |||
614 | static int i5100_get_scrub_rate(struct mem_ctl_info *mci, | ||
615 | u32 *bandwidth) | ||
616 | { | ||
617 | struct i5100_priv *priv = mci->pvt_info; | ||
618 | u32 dw; | ||
619 | |||
620 | pci_read_config_dword(priv->mc, I5100_MC, &dw); | ||
621 | |||
622 | *bandwidth = 5900000 * i5100_mc_scrben(dw); | ||
623 | |||
624 | return 0; | ||
625 | } | ||
626 | |||
537 | static struct pci_dev *pci_get_device_func(unsigned vendor, | 627 | static struct pci_dev *pci_get_device_func(unsigned vendor, |
538 | unsigned device, | 628 | unsigned device, |
539 | unsigned func) | 629 | unsigned func) |
@@ -869,6 +959,16 @@ static int __devinit i5100_init_one(struct pci_dev *pdev, | |||
869 | priv->ch0mm = ch0mm; | 959 | priv->ch0mm = ch0mm; |
870 | priv->ch1mm = ch1mm; | 960 | priv->ch1mm = ch1mm; |
871 | 961 | ||
962 | INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing); | ||
963 | |||
964 | /* If scrubbing was already enabled by the bios, start maintaining it */ | ||
965 | pci_read_config_dword(pdev, I5100_MC, &dw); | ||
966 | if (i5100_mc_scrben(dw)) { | ||
967 | priv->scrub_enable = 1; | ||
968 | schedule_delayed_work(&(priv->i5100_scrubbing), | ||
969 | I5100_SCRUB_REFRESH_RATE); | ||
970 | } | ||
971 | |||
872 | i5100_init_dimm_layout(pdev, mci); | 972 | i5100_init_dimm_layout(pdev, mci); |
873 | i5100_init_interleaving(pdev, mci); | 973 | i5100_init_interleaving(pdev, mci); |
874 | 974 | ||
@@ -882,6 +982,8 @@ static int __devinit i5100_init_one(struct pci_dev *pdev, | |||
882 | mci->ctl_page_to_phys = NULL; | 982 | mci->ctl_page_to_phys = NULL; |
883 | 983 | ||
884 | mci->edac_check = i5100_check_error; | 984 | mci->edac_check = i5100_check_error; |
985 | mci->set_sdram_scrub_rate = i5100_set_scrub_rate; | ||
986 | mci->get_sdram_scrub_rate = i5100_get_scrub_rate; | ||
885 | 987 | ||
886 | i5100_init_csrows(mci); | 988 | i5100_init_csrows(mci); |
887 | 989 | ||
@@ -897,12 +999,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev, | |||
897 | 999 | ||
898 | if (edac_mc_add_mc(mci)) { | 1000 | if (edac_mc_add_mc(mci)) { |
899 | ret = -ENODEV; | 1001 | ret = -ENODEV; |
900 | goto bail_mc; | 1002 | goto bail_scrub; |
901 | } | 1003 | } |
902 | 1004 | ||
903 | return ret; | 1005 | return ret; |
904 | 1006 | ||
905 | bail_mc: | 1007 | bail_scrub: |
1008 | priv->scrub_enable = 0; | ||
1009 | cancel_delayed_work_sync(&(priv->i5100_scrubbing)); | ||
906 | edac_mc_free(mci); | 1010 | edac_mc_free(mci); |
907 | 1011 | ||
908 | bail_disable_ch1: | 1012 | bail_disable_ch1: |
@@ -935,6 +1039,10 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev) | |||
935 | return; | 1039 | return; |
936 | 1040 | ||
937 | priv = mci->pvt_info; | 1041 | priv = mci->pvt_info; |
1042 | |||
1043 | priv->scrub_enable = 0; | ||
1044 | cancel_delayed_work_sync(&(priv->i5100_scrubbing)); | ||
1045 | |||
938 | pci_disable_device(pdev); | 1046 | pci_disable_device(pdev); |
939 | pci_disable_device(priv->ch0mm); | 1047 | pci_disable_device(priv->ch0mm); |
940 | pci_disable_device(priv->ch1mm); | 1048 | pci_disable_device(priv->ch1mm); |