diff options
author | Arnd Bergmann <arnd@arndb.de> | 2011-11-23 15:47:41 -0500 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2011-11-23 15:47:41 -0500 |
commit | 58a273745fbb2fbd01d26e7a60f0acc8c1d99469 (patch) | |
tree | bc16200f3b6ea150b298422754e32959eaa339bc /drivers | |
parent | 951c486f62490e032da0ad17e93270b0cfb6687f (diff) | |
parent | 0116da4fcc1ae8a80d9002441e98768f2a6fa2fe (diff) |
Merge branches 'drivers/macb-gem' and 'drivers/pxa-gpio' into next/drivers
Diffstat (limited to 'drivers')
295 files changed, 11766 insertions, 7337 deletions
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 55accb7018bb..cc70f3fdcdd1 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c | |||
@@ -269,16 +269,17 @@ acpi_status acpi_hw_clear_acpi_status(void) | |||
269 | 269 | ||
270 | status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, | 270 | status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, |
271 | ACPI_BITMASK_ALL_FIXED_STATUS); | 271 | ACPI_BITMASK_ALL_FIXED_STATUS); |
272 | if (ACPI_FAILURE(status)) { | 272 | |
273 | goto unlock_and_exit; | 273 | acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); |
274 | } | 274 | |
275 | if (ACPI_FAILURE(status)) | ||
276 | goto exit; | ||
275 | 277 | ||
276 | /* Clear the GPE Bits in all GPE registers in all GPE blocks */ | 278 | /* Clear the GPE Bits in all GPE registers in all GPE blocks */ |
277 | 279 | ||
278 | status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); | 280 | status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL); |
279 | 281 | ||
280 | unlock_and_exit: | 282 | exit: |
281 | acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); | ||
282 | return_ACPI_STATUS(status); | 283 | return_ACPI_STATUS(status); |
283 | } | 284 | } |
284 | 285 | ||
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c index 04ae1c88c03c..cfc0cc10af39 100644 --- a/drivers/acpi/atomicio.c +++ b/drivers/acpi/atomicio.c | |||
@@ -76,7 +76,7 @@ static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr, | |||
76 | { | 76 | { |
77 | struct acpi_iomap *map; | 77 | struct acpi_iomap *map; |
78 | 78 | ||
79 | map = __acpi_find_iomap(paddr, size); | 79 | map = __acpi_find_iomap(paddr, size/8); |
80 | if (map) | 80 | if (map) |
81 | return map->vaddr + (paddr - map->paddr); | 81 | return map->vaddr + (paddr - map->paddr); |
82 | else | 82 | else |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 437ddbf0c49a..9ecec98bc76e 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -911,10 +911,7 @@ void __init acpi_early_init(void) | |||
911 | } | 911 | } |
912 | #endif | 912 | #endif |
913 | 913 | ||
914 | status = | 914 | status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE); |
915 | acpi_enable_subsystem(~ | ||
916 | (ACPI_NO_HARDWARE_INIT | | ||
917 | ACPI_NO_ACPI_ENABLE)); | ||
918 | if (ACPI_FAILURE(status)) { | 915 | if (ACPI_FAILURE(status)) { |
919 | printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); | 916 | printk(KERN_ERR PREFIX "Unable to enable ACPI\n"); |
920 | goto error0; | 917 | goto error0; |
@@ -935,8 +932,7 @@ static int __init acpi_bus_init(void) | |||
935 | 932 | ||
936 | acpi_os_initialize1(); | 933 | acpi_os_initialize1(); |
937 | 934 | ||
938 | status = | 935 | status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); |
939 | acpi_enable_subsystem(ACPI_NO_HARDWARE_INIT | ACPI_NO_ACPI_ENABLE); | ||
940 | if (ACPI_FAILURE(status)) { | 936 | if (ACPI_FAILURE(status)) { |
941 | printk(KERN_ERR PREFIX | 937 | printk(KERN_ERR PREFIX |
942 | "Unable to start the ACPI Interpreter\n"); | 938 | "Unable to start the ACPI Interpreter\n"); |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index a4e0f1ba6040..9d7bc9f6b6cc 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
@@ -426,7 +426,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, | |||
426 | 426 | ||
427 | if (action == CPU_ONLINE && pr) { | 427 | if (action == CPU_ONLINE && pr) { |
428 | acpi_processor_ppc_has_changed(pr, 0); | 428 | acpi_processor_ppc_has_changed(pr, 0); |
429 | acpi_processor_cst_has_changed(pr); | 429 | acpi_processor_hotplug(pr); |
430 | acpi_processor_reevaluate_tstate(pr, action); | 430 | acpi_processor_reevaluate_tstate(pr, action); |
431 | acpi_processor_tstate_has_changed(pr); | 431 | acpi_processor_tstate_has_changed(pr); |
432 | } | 432 | } |
@@ -503,8 +503,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) | |||
503 | acpi_processor_get_throttling_info(pr); | 503 | acpi_processor_get_throttling_info(pr); |
504 | acpi_processor_get_limit_info(pr); | 504 | acpi_processor_get_limit_info(pr); |
505 | 505 | ||
506 | 506 | if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) | |
507 | if (cpuidle_get_driver() == &acpi_idle_driver) | ||
508 | acpi_processor_power_init(pr, device); | 507 | acpi_processor_power_init(pr, device); |
509 | 508 | ||
510 | pr->cdev = thermal_cooling_device_register("Processor", device, | 509 | pr->cdev = thermal_cooling_device_register("Processor", device, |
@@ -800,17 +799,9 @@ static int __init acpi_processor_init(void) | |||
800 | 799 | ||
801 | memset(&errata, 0, sizeof(errata)); | 800 | memset(&errata, 0, sizeof(errata)); |
802 | 801 | ||
803 | if (!cpuidle_register_driver(&acpi_idle_driver)) { | ||
804 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", | ||
805 | acpi_idle_driver.name); | ||
806 | } else { | ||
807 | printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", | ||
808 | cpuidle_get_driver()->name); | ||
809 | } | ||
810 | |||
811 | result = acpi_bus_register_driver(&acpi_processor_driver); | 802 | result = acpi_bus_register_driver(&acpi_processor_driver); |
812 | if (result < 0) | 803 | if (result < 0) |
813 | goto out_cpuidle; | 804 | return result; |
814 | 805 | ||
815 | acpi_processor_install_hotplug_notify(); | 806 | acpi_processor_install_hotplug_notify(); |
816 | 807 | ||
@@ -821,11 +812,6 @@ static int __init acpi_processor_init(void) | |||
821 | acpi_processor_throttling_init(); | 812 | acpi_processor_throttling_init(); |
822 | 813 | ||
823 | return 0; | 814 | return 0; |
824 | |||
825 | out_cpuidle: | ||
826 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
827 | |||
828 | return result; | ||
829 | } | 815 | } |
830 | 816 | ||
831 | static void __exit acpi_processor_exit(void) | 817 | static void __exit acpi_processor_exit(void) |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 9b88f9828d8c..0e8e2de2ed3e 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -224,7 +224,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, | |||
224 | /* | 224 | /* |
225 | * Suspend / resume control | 225 | * Suspend / resume control |
226 | */ | 226 | */ |
227 | static int acpi_idle_suspend; | ||
228 | static u32 saved_bm_rld; | 227 | static u32 saved_bm_rld; |
229 | 228 | ||
230 | static void acpi_idle_bm_rld_save(void) | 229 | static void acpi_idle_bm_rld_save(void) |
@@ -243,21 +242,13 @@ static void acpi_idle_bm_rld_restore(void) | |||
243 | 242 | ||
244 | int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) | 243 | int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) |
245 | { | 244 | { |
246 | if (acpi_idle_suspend == 1) | ||
247 | return 0; | ||
248 | |||
249 | acpi_idle_bm_rld_save(); | 245 | acpi_idle_bm_rld_save(); |
250 | acpi_idle_suspend = 1; | ||
251 | return 0; | 246 | return 0; |
252 | } | 247 | } |
253 | 248 | ||
254 | int acpi_processor_resume(struct acpi_device * device) | 249 | int acpi_processor_resume(struct acpi_device * device) |
255 | { | 250 | { |
256 | if (acpi_idle_suspend == 0) | ||
257 | return 0; | ||
258 | |||
259 | acpi_idle_bm_rld_restore(); | 251 | acpi_idle_bm_rld_restore(); |
260 | acpi_idle_suspend = 0; | ||
261 | return 0; | 252 | return 0; |
262 | } | 253 | } |
263 | 254 | ||
@@ -741,66 +732,65 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
741 | /** | 732 | /** |
742 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type | 733 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type |
743 | * @dev: the target CPU | 734 | * @dev: the target CPU |
744 | * @state: the state data | 735 | * @drv: cpuidle driver containing cpuidle state info |
736 | * @index: index of target state | ||
745 | * | 737 | * |
746 | * This is equivalent to the HALT instruction. | 738 | * This is equivalent to the HALT instruction. |
747 | */ | 739 | */ |
748 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, | 740 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, |
749 | struct cpuidle_state *state) | 741 | struct cpuidle_driver *drv, int index) |
750 | { | 742 | { |
751 | ktime_t kt1, kt2; | 743 | ktime_t kt1, kt2; |
752 | s64 idle_time; | 744 | s64 idle_time; |
753 | struct acpi_processor *pr; | 745 | struct acpi_processor *pr; |
754 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 746 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
747 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | ||
755 | 748 | ||
756 | pr = __this_cpu_read(processors); | 749 | pr = __this_cpu_read(processors); |
750 | dev->last_residency = 0; | ||
757 | 751 | ||
758 | if (unlikely(!pr)) | 752 | if (unlikely(!pr)) |
759 | return 0; | 753 | return -EINVAL; |
760 | 754 | ||
761 | local_irq_disable(); | 755 | local_irq_disable(); |
762 | 756 | ||
763 | /* Do not access any ACPI IO ports in suspend path */ | ||
764 | if (acpi_idle_suspend) { | ||
765 | local_irq_enable(); | ||
766 | cpu_relax(); | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | lapic_timer_state_broadcast(pr, cx, 1); | 757 | lapic_timer_state_broadcast(pr, cx, 1); |
771 | kt1 = ktime_get_real(); | 758 | kt1 = ktime_get_real(); |
772 | acpi_idle_do_entry(cx); | 759 | acpi_idle_do_entry(cx); |
773 | kt2 = ktime_get_real(); | 760 | kt2 = ktime_get_real(); |
774 | idle_time = ktime_to_us(ktime_sub(kt2, kt1)); | 761 | idle_time = ktime_to_us(ktime_sub(kt2, kt1)); |
775 | 762 | ||
763 | /* Update device last_residency*/ | ||
764 | dev->last_residency = (int)idle_time; | ||
765 | |||
776 | local_irq_enable(); | 766 | local_irq_enable(); |
777 | cx->usage++; | 767 | cx->usage++; |
778 | lapic_timer_state_broadcast(pr, cx, 0); | 768 | lapic_timer_state_broadcast(pr, cx, 0); |
779 | 769 | ||
780 | return idle_time; | 770 | return index; |
781 | } | 771 | } |
782 | 772 | ||
783 | /** | 773 | /** |
784 | * acpi_idle_enter_simple - enters an ACPI state without BM handling | 774 | * acpi_idle_enter_simple - enters an ACPI state without BM handling |
785 | * @dev: the target CPU | 775 | * @dev: the target CPU |
786 | * @state: the state data | 776 | * @drv: cpuidle driver with cpuidle state information |
777 | * @index: the index of suggested state | ||
787 | */ | 778 | */ |
788 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, | 779 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, |
789 | struct cpuidle_state *state) | 780 | struct cpuidle_driver *drv, int index) |
790 | { | 781 | { |
791 | struct acpi_processor *pr; | 782 | struct acpi_processor *pr; |
792 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 783 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
784 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | ||
793 | ktime_t kt1, kt2; | 785 | ktime_t kt1, kt2; |
794 | s64 idle_time_ns; | 786 | s64 idle_time_ns; |
795 | s64 idle_time; | 787 | s64 idle_time; |
796 | 788 | ||
797 | pr = __this_cpu_read(processors); | 789 | pr = __this_cpu_read(processors); |
790 | dev->last_residency = 0; | ||
798 | 791 | ||
799 | if (unlikely(!pr)) | 792 | if (unlikely(!pr)) |
800 | return 0; | 793 | return -EINVAL; |
801 | |||
802 | if (acpi_idle_suspend) | ||
803 | return(acpi_idle_enter_c1(dev, state)); | ||
804 | 794 | ||
805 | local_irq_disable(); | 795 | local_irq_disable(); |
806 | 796 | ||
@@ -815,7 +805,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
815 | if (unlikely(need_resched())) { | 805 | if (unlikely(need_resched())) { |
816 | current_thread_info()->status |= TS_POLLING; | 806 | current_thread_info()->status |= TS_POLLING; |
817 | local_irq_enable(); | 807 | local_irq_enable(); |
818 | return 0; | 808 | return -EINVAL; |
819 | } | 809 | } |
820 | } | 810 | } |
821 | 811 | ||
@@ -837,6 +827,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
837 | idle_time = idle_time_ns; | 827 | idle_time = idle_time_ns; |
838 | do_div(idle_time, NSEC_PER_USEC); | 828 | do_div(idle_time, NSEC_PER_USEC); |
839 | 829 | ||
830 | /* Update device last_residency*/ | ||
831 | dev->last_residency = (int)idle_time; | ||
832 | |||
840 | /* Tell the scheduler how much we idled: */ | 833 | /* Tell the scheduler how much we idled: */ |
841 | sched_clock_idle_wakeup_event(idle_time_ns); | 834 | sched_clock_idle_wakeup_event(idle_time_ns); |
842 | 835 | ||
@@ -848,7 +841,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
848 | 841 | ||
849 | lapic_timer_state_broadcast(pr, cx, 0); | 842 | lapic_timer_state_broadcast(pr, cx, 0); |
850 | cx->time += idle_time; | 843 | cx->time += idle_time; |
851 | return idle_time; | 844 | return index; |
852 | } | 845 | } |
853 | 846 | ||
854 | static int c3_cpu_count; | 847 | static int c3_cpu_count; |
@@ -857,37 +850,37 @@ static DEFINE_RAW_SPINLOCK(c3_lock); | |||
857 | /** | 850 | /** |
858 | * acpi_idle_enter_bm - enters C3 with proper BM handling | 851 | * acpi_idle_enter_bm - enters C3 with proper BM handling |
859 | * @dev: the target CPU | 852 | * @dev: the target CPU |
860 | * @state: the state data | 853 | * @drv: cpuidle driver containing state data |
854 | * @index: the index of suggested state | ||
861 | * | 855 | * |
862 | * If BM is detected, the deepest non-C3 idle state is entered instead. | 856 | * If BM is detected, the deepest non-C3 idle state is entered instead. |
863 | */ | 857 | */ |
864 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, | 858 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, |
865 | struct cpuidle_state *state) | 859 | struct cpuidle_driver *drv, int index) |
866 | { | 860 | { |
867 | struct acpi_processor *pr; | 861 | struct acpi_processor *pr; |
868 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state); | 862 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
863 | struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); | ||
869 | ktime_t kt1, kt2; | 864 | ktime_t kt1, kt2; |
870 | s64 idle_time_ns; | 865 | s64 idle_time_ns; |
871 | s64 idle_time; | 866 | s64 idle_time; |
872 | 867 | ||
873 | 868 | ||
874 | pr = __this_cpu_read(processors); | 869 | pr = __this_cpu_read(processors); |
870 | dev->last_residency = 0; | ||
875 | 871 | ||
876 | if (unlikely(!pr)) | 872 | if (unlikely(!pr)) |
877 | return 0; | 873 | return -EINVAL; |
878 | |||
879 | if (acpi_idle_suspend) | ||
880 | return(acpi_idle_enter_c1(dev, state)); | ||
881 | 874 | ||
882 | if (!cx->bm_sts_skip && acpi_idle_bm_check()) { | 875 | if (!cx->bm_sts_skip && acpi_idle_bm_check()) { |
883 | if (dev->safe_state) { | 876 | if (drv->safe_state_index >= 0) { |
884 | dev->last_state = dev->safe_state; | 877 | return drv->states[drv->safe_state_index].enter(dev, |
885 | return dev->safe_state->enter(dev, dev->safe_state); | 878 | drv, drv->safe_state_index); |
886 | } else { | 879 | } else { |
887 | local_irq_disable(); | 880 | local_irq_disable(); |
888 | acpi_safe_halt(); | 881 | acpi_safe_halt(); |
889 | local_irq_enable(); | 882 | local_irq_enable(); |
890 | return 0; | 883 | return -EINVAL; |
891 | } | 884 | } |
892 | } | 885 | } |
893 | 886 | ||
@@ -904,7 +897,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
904 | if (unlikely(need_resched())) { | 897 | if (unlikely(need_resched())) { |
905 | current_thread_info()->status |= TS_POLLING; | 898 | current_thread_info()->status |= TS_POLLING; |
906 | local_irq_enable(); | 899 | local_irq_enable(); |
907 | return 0; | 900 | return -EINVAL; |
908 | } | 901 | } |
909 | } | 902 | } |
910 | 903 | ||
@@ -954,6 +947,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
954 | idle_time = idle_time_ns; | 947 | idle_time = idle_time_ns; |
955 | do_div(idle_time, NSEC_PER_USEC); | 948 | do_div(idle_time, NSEC_PER_USEC); |
956 | 949 | ||
950 | /* Update device last_residency*/ | ||
951 | dev->last_residency = (int)idle_time; | ||
952 | |||
957 | /* Tell the scheduler how much we idled: */ | 953 | /* Tell the scheduler how much we idled: */ |
958 | sched_clock_idle_wakeup_event(idle_time_ns); | 954 | sched_clock_idle_wakeup_event(idle_time_ns); |
959 | 955 | ||
@@ -965,7 +961,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
965 | 961 | ||
966 | lapic_timer_state_broadcast(pr, cx, 0); | 962 | lapic_timer_state_broadcast(pr, cx, 0); |
967 | cx->time += idle_time; | 963 | cx->time += idle_time; |
968 | return idle_time; | 964 | return index; |
969 | } | 965 | } |
970 | 966 | ||
971 | struct cpuidle_driver acpi_idle_driver = { | 967 | struct cpuidle_driver acpi_idle_driver = { |
@@ -974,14 +970,16 @@ struct cpuidle_driver acpi_idle_driver = { | |||
974 | }; | 970 | }; |
975 | 971 | ||
976 | /** | 972 | /** |
977 | * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE | 973 | * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE |
974 | * device i.e. per-cpu data | ||
975 | * | ||
978 | * @pr: the ACPI processor | 976 | * @pr: the ACPI processor |
979 | */ | 977 | */ |
980 | static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | 978 | static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) |
981 | { | 979 | { |
982 | int i, count = CPUIDLE_DRIVER_STATE_START; | 980 | int i, count = CPUIDLE_DRIVER_STATE_START; |
983 | struct acpi_processor_cx *cx; | 981 | struct acpi_processor_cx *cx; |
984 | struct cpuidle_state *state; | 982 | struct cpuidle_state_usage *state_usage; |
985 | struct cpuidle_device *dev = &pr->power.dev; | 983 | struct cpuidle_device *dev = &pr->power.dev; |
986 | 984 | ||
987 | if (!pr->flags.power_setup_done) | 985 | if (!pr->flags.power_setup_done) |
@@ -992,9 +990,62 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
992 | } | 990 | } |
993 | 991 | ||
994 | dev->cpu = pr->id; | 992 | dev->cpu = pr->id; |
993 | |||
994 | if (max_cstate == 0) | ||
995 | max_cstate = 1; | ||
996 | |||
997 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { | ||
998 | cx = &pr->power.states[i]; | ||
999 | state_usage = &dev->states_usage[count]; | ||
1000 | |||
1001 | if (!cx->valid) | ||
1002 | continue; | ||
1003 | |||
1004 | #ifdef CONFIG_HOTPLUG_CPU | ||
1005 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
1006 | !pr->flags.has_cst && | ||
1007 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
1008 | continue; | ||
1009 | #endif | ||
1010 | |||
1011 | cpuidle_set_statedata(state_usage, cx); | ||
1012 | |||
1013 | count++; | ||
1014 | if (count == CPUIDLE_STATE_MAX) | ||
1015 | break; | ||
1016 | } | ||
1017 | |||
1018 | dev->state_count = count; | ||
1019 | |||
1020 | if (!count) | ||
1021 | return -EINVAL; | ||
1022 | |||
1023 | return 0; | ||
1024 | } | ||
1025 | |||
1026 | /** | ||
1027 | * acpi_processor_setup_cpuidle states- prepares and configures cpuidle | ||
1028 | * global state data i.e. idle routines | ||
1029 | * | ||
1030 | * @pr: the ACPI processor | ||
1031 | */ | ||
1032 | static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) | ||
1033 | { | ||
1034 | int i, count = CPUIDLE_DRIVER_STATE_START; | ||
1035 | struct acpi_processor_cx *cx; | ||
1036 | struct cpuidle_state *state; | ||
1037 | struct cpuidle_driver *drv = &acpi_idle_driver; | ||
1038 | |||
1039 | if (!pr->flags.power_setup_done) | ||
1040 | return -EINVAL; | ||
1041 | |||
1042 | if (pr->flags.power == 0) | ||
1043 | return -EINVAL; | ||
1044 | |||
1045 | drv->safe_state_index = -1; | ||
995 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { | 1046 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { |
996 | dev->states[i].name[0] = '\0'; | 1047 | drv->states[i].name[0] = '\0'; |
997 | dev->states[i].desc[0] = '\0'; | 1048 | drv->states[i].desc[0] = '\0'; |
998 | } | 1049 | } |
999 | 1050 | ||
1000 | if (max_cstate == 0) | 1051 | if (max_cstate == 0) |
@@ -1002,7 +1053,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1002 | 1053 | ||
1003 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { | 1054 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { |
1004 | cx = &pr->power.states[i]; | 1055 | cx = &pr->power.states[i]; |
1005 | state = &dev->states[count]; | ||
1006 | 1056 | ||
1007 | if (!cx->valid) | 1057 | if (!cx->valid) |
1008 | continue; | 1058 | continue; |
@@ -1013,8 +1063,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1013 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 1063 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
1014 | continue; | 1064 | continue; |
1015 | #endif | 1065 | #endif |
1016 | cpuidle_set_statedata(state, cx); | ||
1017 | 1066 | ||
1067 | state = &drv->states[count]; | ||
1018 | snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); | 1068 | snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); |
1019 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); | 1069 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); |
1020 | state->exit_latency = cx->latency; | 1070 | state->exit_latency = cx->latency; |
@@ -1027,13 +1077,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1027 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1077 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
1028 | 1078 | ||
1029 | state->enter = acpi_idle_enter_c1; | 1079 | state->enter = acpi_idle_enter_c1; |
1030 | dev->safe_state = state; | 1080 | drv->safe_state_index = count; |
1031 | break; | 1081 | break; |
1032 | 1082 | ||
1033 | case ACPI_STATE_C2: | 1083 | case ACPI_STATE_C2: |
1034 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1084 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
1035 | state->enter = acpi_idle_enter_simple; | 1085 | state->enter = acpi_idle_enter_simple; |
1036 | dev->safe_state = state; | 1086 | drv->safe_state_index = count; |
1037 | break; | 1087 | break; |
1038 | 1088 | ||
1039 | case ACPI_STATE_C3: | 1089 | case ACPI_STATE_C3: |
@@ -1049,7 +1099,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1049 | break; | 1099 | break; |
1050 | } | 1100 | } |
1051 | 1101 | ||
1052 | dev->state_count = count; | 1102 | drv->state_count = count; |
1053 | 1103 | ||
1054 | if (!count) | 1104 | if (!count) |
1055 | return -EINVAL; | 1105 | return -EINVAL; |
@@ -1057,7 +1107,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1057 | return 0; | 1107 | return 0; |
1058 | } | 1108 | } |
1059 | 1109 | ||
1060 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | 1110 | int acpi_processor_hotplug(struct acpi_processor *pr) |
1061 | { | 1111 | { |
1062 | int ret = 0; | 1112 | int ret = 0; |
1063 | 1113 | ||
@@ -1078,7 +1128,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
1078 | cpuidle_disable_device(&pr->power.dev); | 1128 | cpuidle_disable_device(&pr->power.dev); |
1079 | acpi_processor_get_power_info(pr); | 1129 | acpi_processor_get_power_info(pr); |
1080 | if (pr->flags.power) { | 1130 | if (pr->flags.power) { |
1081 | acpi_processor_setup_cpuidle(pr); | 1131 | acpi_processor_setup_cpuidle_cx(pr); |
1082 | ret = cpuidle_enable_device(&pr->power.dev); | 1132 | ret = cpuidle_enable_device(&pr->power.dev); |
1083 | } | 1133 | } |
1084 | cpuidle_resume_and_unlock(); | 1134 | cpuidle_resume_and_unlock(); |
@@ -1086,10 +1136,72 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
1086 | return ret; | 1136 | return ret; |
1087 | } | 1137 | } |
1088 | 1138 | ||
1139 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1140 | { | ||
1141 | int cpu; | ||
1142 | struct acpi_processor *_pr; | ||
1143 | |||
1144 | if (disabled_by_idle_boot_param()) | ||
1145 | return 0; | ||
1146 | |||
1147 | if (!pr) | ||
1148 | return -EINVAL; | ||
1149 | |||
1150 | if (nocst) | ||
1151 | return -ENODEV; | ||
1152 | |||
1153 | if (!pr->flags.power_setup_done) | ||
1154 | return -ENODEV; | ||
1155 | |||
1156 | /* | ||
1157 | * FIXME: Design the ACPI notification to make it once per | ||
1158 | * system instead of once per-cpu. This condition is a hack | ||
1159 | * to make the code that updates C-States be called once. | ||
1160 | */ | ||
1161 | |||
1162 | if (smp_processor_id() == 0 && | ||
1163 | cpuidle_get_driver() == &acpi_idle_driver) { | ||
1164 | |||
1165 | cpuidle_pause_and_lock(); | ||
1166 | /* Protect against cpu-hotplug */ | ||
1167 | get_online_cpus(); | ||
1168 | |||
1169 | /* Disable all cpuidle devices */ | ||
1170 | for_each_online_cpu(cpu) { | ||
1171 | _pr = per_cpu(processors, cpu); | ||
1172 | if (!_pr || !_pr->flags.power_setup_done) | ||
1173 | continue; | ||
1174 | cpuidle_disable_device(&_pr->power.dev); | ||
1175 | } | ||
1176 | |||
1177 | /* Populate Updated C-state information */ | ||
1178 | acpi_processor_setup_cpuidle_states(pr); | ||
1179 | |||
1180 | /* Enable all cpuidle devices */ | ||
1181 | for_each_online_cpu(cpu) { | ||
1182 | _pr = per_cpu(processors, cpu); | ||
1183 | if (!_pr || !_pr->flags.power_setup_done) | ||
1184 | continue; | ||
1185 | acpi_processor_get_power_info(_pr); | ||
1186 | if (_pr->flags.power) { | ||
1187 | acpi_processor_setup_cpuidle_cx(_pr); | ||
1188 | cpuidle_enable_device(&_pr->power.dev); | ||
1189 | } | ||
1190 | } | ||
1191 | put_online_cpus(); | ||
1192 | cpuidle_resume_and_unlock(); | ||
1193 | } | ||
1194 | |||
1195 | return 0; | ||
1196 | } | ||
1197 | |||
1198 | static int acpi_processor_registered; | ||
1199 | |||
1089 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | 1200 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, |
1090 | struct acpi_device *device) | 1201 | struct acpi_device *device) |
1091 | { | 1202 | { |
1092 | acpi_status status = 0; | 1203 | acpi_status status = 0; |
1204 | int retval; | ||
1093 | static int first_run; | 1205 | static int first_run; |
1094 | 1206 | ||
1095 | if (disabled_by_idle_boot_param()) | 1207 | if (disabled_by_idle_boot_param()) |
@@ -1126,9 +1238,26 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1126 | * platforms that only support C1. | 1238 | * platforms that only support C1. |
1127 | */ | 1239 | */ |
1128 | if (pr->flags.power) { | 1240 | if (pr->flags.power) { |
1129 | acpi_processor_setup_cpuidle(pr); | 1241 | /* Register acpi_idle_driver if not already registered */ |
1130 | if (cpuidle_register_device(&pr->power.dev)) | 1242 | if (!acpi_processor_registered) { |
1131 | return -EIO; | 1243 | acpi_processor_setup_cpuidle_states(pr); |
1244 | retval = cpuidle_register_driver(&acpi_idle_driver); | ||
1245 | if (retval) | ||
1246 | return retval; | ||
1247 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", | ||
1248 | acpi_idle_driver.name); | ||
1249 | } | ||
1250 | /* Register per-cpu cpuidle_device. Cpuidle driver | ||
1251 | * must already be registered before registering device | ||
1252 | */ | ||
1253 | acpi_processor_setup_cpuidle_cx(pr); | ||
1254 | retval = cpuidle_register_device(&pr->power.dev); | ||
1255 | if (retval) { | ||
1256 | if (acpi_processor_registered == 0) | ||
1257 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
1258 | return retval; | ||
1259 | } | ||
1260 | acpi_processor_registered++; | ||
1132 | } | 1261 | } |
1133 | return 0; | 1262 | return 0; |
1134 | } | 1263 | } |
@@ -1139,8 +1268,13 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
1139 | if (disabled_by_idle_boot_param()) | 1268 | if (disabled_by_idle_boot_param()) |
1140 | return 0; | 1269 | return 0; |
1141 | 1270 | ||
1142 | cpuidle_unregister_device(&pr->power.dev); | 1271 | if (pr->flags.power) { |
1143 | pr->flags.power_setup_done = 0; | 1272 | cpuidle_unregister_device(&pr->power.dev); |
1273 | acpi_processor_registered--; | ||
1274 | if (acpi_processor_registered == 0) | ||
1275 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
1276 | } | ||
1144 | 1277 | ||
1278 | pr->flags.power_setup_done = 0; | ||
1145 | return 0; | 1279 | return 0; |
1146 | } | 1280 | } |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 449c556274c0..8ab80bafe3f1 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -1062,13 +1062,12 @@ static void acpi_add_id(struct acpi_device *device, const char *dev_id) | |||
1062 | if (!id) | 1062 | if (!id) |
1063 | return; | 1063 | return; |
1064 | 1064 | ||
1065 | id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL); | 1065 | id->id = kstrdup(dev_id, GFP_KERNEL); |
1066 | if (!id->id) { | 1066 | if (!id->id) { |
1067 | kfree(id); | 1067 | kfree(id); |
1068 | return; | 1068 | return; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | strcpy(id->id, dev_id); | ||
1072 | list_add_tail(&id->list, &device->pnp.ids); | 1071 | list_add_tail(&id->list, &device->pnp.ids); |
1073 | } | 1072 | } |
1074 | 1073 | ||
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index c538d0ef10ff..9f66181c814e 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
@@ -706,11 +706,23 @@ static void __exit interrupt_stats_exit(void) | |||
706 | return; | 706 | return; |
707 | } | 707 | } |
708 | 708 | ||
709 | static ssize_t | ||
710 | acpi_show_profile(struct device *dev, struct device_attribute *attr, | ||
711 | char *buf) | ||
712 | { | ||
713 | return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); | ||
714 | } | ||
715 | |||
716 | static const struct device_attribute pm_profile_attr = | ||
717 | __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); | ||
718 | |||
709 | int __init acpi_sysfs_init(void) | 719 | int __init acpi_sysfs_init(void) |
710 | { | 720 | { |
711 | int result; | 721 | int result; |
712 | 722 | ||
713 | result = acpi_tables_sysfs_init(); | 723 | result = acpi_tables_sysfs_init(); |
714 | 724 | if (result) | |
725 | return result; | ||
726 | result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr); | ||
715 | return result; | 727 | return result; |
716 | } | 728 | } |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index fb7b90b05922..cf26222a93c5 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -390,6 +390,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
390 | /* Promise */ | 390 | /* Promise */ |
391 | { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ | 391 | { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ |
392 | 392 | ||
393 | /* Asmedia */ | ||
394 | { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */ | ||
395 | |||
393 | /* Generic, PCI class code for AHCI */ | 396 | /* Generic, PCI class code for AHCI */ |
394 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 397 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
395 | PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, | 398 | PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, |
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 004f2ce3dc73..ec555951176e 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c | |||
@@ -65,7 +65,7 @@ static struct scsi_host_template ahci_platform_sht = { | |||
65 | static int __init ahci_probe(struct platform_device *pdev) | 65 | static int __init ahci_probe(struct platform_device *pdev) |
66 | { | 66 | { |
67 | struct device *dev = &pdev->dev; | 67 | struct device *dev = &pdev->dev; |
68 | struct ahci_platform_data *pdata = dev->platform_data; | 68 | struct ahci_platform_data *pdata = dev_get_platdata(dev); |
69 | const struct platform_device_id *id = platform_get_device_id(pdev); | 69 | const struct platform_device_id *id = platform_get_device_id(pdev); |
70 | struct ata_port_info pi = ahci_port_info[id->driver_data]; | 70 | struct ata_port_info pi = ahci_port_info[id->driver_data]; |
71 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 71 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
@@ -191,7 +191,7 @@ err0: | |||
191 | static int __devexit ahci_remove(struct platform_device *pdev) | 191 | static int __devexit ahci_remove(struct platform_device *pdev) |
192 | { | 192 | { |
193 | struct device *dev = &pdev->dev; | 193 | struct device *dev = &pdev->dev; |
194 | struct ahci_platform_data *pdata = dev->platform_data; | 194 | struct ahci_platform_data *pdata = dev_get_platdata(dev); |
195 | struct ata_host *host = dev_get_drvdata(dev); | 195 | struct ata_host *host = dev_get_drvdata(dev); |
196 | 196 | ||
197 | ata_host_detach(host); | 197 | ata_host_detach(host); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index f22957c2769a..a9b282038000 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -2883,7 +2883,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2883 | sata_scr_read(link, SCR_STATUS, &sstatus)) | 2883 | sata_scr_read(link, SCR_STATUS, &sstatus)) |
2884 | rc = -ERESTART; | 2884 | rc = -ERESTART; |
2885 | 2885 | ||
2886 | if (rc == -ERESTART || try >= max_tries) { | 2886 | if (try >= max_tries) { |
2887 | /* | 2887 | /* |
2888 | * Thaw host port even if reset failed, so that the port | 2888 | * Thaw host port even if reset failed, so that the port |
2889 | * can be retried on the next phy event. This risks | 2889 | * can be retried on the next phy event. This risks |
@@ -2909,6 +2909,16 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2909 | ata_eh_acquire(ap); | 2909 | ata_eh_acquire(ap); |
2910 | } | 2910 | } |
2911 | 2911 | ||
2912 | /* | ||
2913 | * While disks spinup behind PMP, some controllers fail sending SRST. | ||
2914 | * They need to be reset - as well as the PMP - before retrying. | ||
2915 | */ | ||
2916 | if (rc == -ERESTART) { | ||
2917 | if (ata_is_host_link(link)) | ||
2918 | ata_eh_thaw_port(ap); | ||
2919 | goto out; | ||
2920 | } | ||
2921 | |||
2912 | if (try == max_tries - 1) { | 2922 | if (try == max_tries - 1) { |
2913 | sata_down_spd_limit(link, 0); | 2923 | sata_down_spd_limit(link, 0); |
2914 | if (slave) | 2924 | if (slave) |
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 104462dbc524..21b80c555c60 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c | |||
@@ -389,12 +389,9 @@ static void sata_pmp_quirks(struct ata_port *ap) | |||
389 | /* link reports offline after LPM */ | 389 | /* link reports offline after LPM */ |
390 | link->flags |= ATA_LFLAG_NO_LPM; | 390 | link->flags |= ATA_LFLAG_NO_LPM; |
391 | 391 | ||
392 | /* Class code report is unreliable and SRST | 392 | /* Class code report is unreliable. */ |
393 | * times out under certain configurations. | ||
394 | */ | ||
395 | if (link->pmp < 5) | 393 | if (link->pmp < 5) |
396 | link->flags |= ATA_LFLAG_NO_SRST | | 394 | link->flags |= ATA_LFLAG_ASSUME_ATA; |
397 | ATA_LFLAG_ASSUME_ATA; | ||
398 | 395 | ||
399 | /* port 5 is for SEMB device and it doesn't like SRST */ | 396 | /* port 5 is for SEMB device and it doesn't like SRST */ |
400 | if (link->pmp == 5) | 397 | if (link->pmp == 5) |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 72a9770ac42f..2a5412e7e9c1 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -1217,6 +1217,10 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) | |||
1217 | 1217 | ||
1218 | /** | 1218 | /** |
1219 | * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth | 1219 | * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth |
1220 | * @ap: ATA port to which the device change the queue depth | ||
1221 | * @sdev: SCSI device to configure queue depth for | ||
1222 | * @queue_depth: new queue depth | ||
1223 | * @reason: calling context | ||
1220 | * | 1224 | * |
1221 | * libsas and libata have different approaches for associating a sdev to | 1225 | * libsas and libata have different approaches for associating a sdev to |
1222 | * its ata_port. | 1226 | * its ata_port. |
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c index a72ab0dde4e5..2a472c5bb7db 100644 --- a/drivers/ata/pata_of_platform.c +++ b/drivers/ata/pata_of_platform.c | |||
@@ -52,7 +52,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev) | |||
52 | } | 52 | } |
53 | 53 | ||
54 | ret = of_irq_to_resource(dn, 0, &irq_res); | 54 | ret = of_irq_to_resource(dn, 0, &irq_res); |
55 | if (ret == NO_IRQ) | 55 | if (!ret) |
56 | irq_res.start = irq_res.end = 0; | 56 | irq_res.start = irq_res.end = 0; |
57 | else | 57 | else |
58 | irq_res.flags = 0; | 58 | irq_res.flags = 0; |
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c index 447d9c05fb5a..95ec435f0eb4 100644 --- a/drivers/ata/sata_sis.c +++ b/drivers/ata/sata_sis.c | |||
@@ -104,7 +104,7 @@ static const struct ata_port_info sis_port_info = { | |||
104 | }; | 104 | }; |
105 | 105 | ||
106 | MODULE_AUTHOR("Uwe Koziolek"); | 106 | MODULE_AUTHOR("Uwe Koziolek"); |
107 | MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller"); | 107 | MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller"); |
108 | MODULE_LICENSE("GPL"); | 108 | MODULE_LICENSE("GPL"); |
109 | MODULE_DEVICE_TABLE(pci, sis_pci_tbl); | 109 | MODULE_DEVICE_TABLE(pci, sis_pci_tbl); |
110 | MODULE_VERSION(DRV_VERSION); | 110 | MODULE_VERSION(DRV_VERSION); |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 434a6c011675..95706fa24c73 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -669,7 +669,7 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev) | |||
669 | struct device_opp *dev_opp = find_device_opp(dev); | 669 | struct device_opp *dev_opp = find_device_opp(dev); |
670 | 670 | ||
671 | if (IS_ERR(dev_opp)) | 671 | if (IS_ERR(dev_opp)) |
672 | return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */ | 672 | return ERR_CAST(dev_opp); /* matching type */ |
673 | 673 | ||
674 | return &dev_opp->head; | 674 | return &dev_opp->head; |
675 | } | 675 | } |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index db7cb8111fbe..106beb194f3c 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -105,7 +105,7 @@ static int ath3k_load_firmware(struct usb_device *udev, | |||
105 | 105 | ||
106 | pipe = usb_sndctrlpipe(udev, 0); | 106 | pipe = usb_sndctrlpipe(udev, 0); |
107 | 107 | ||
108 | send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); | 108 | send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); |
109 | if (!send_buf) { | 109 | if (!send_buf) { |
110 | BT_ERR("Can't allocate memory chunk for firmware"); | 110 | BT_ERR("Can't allocate memory chunk for firmware"); |
111 | return -ENOMEM; | 111 | return -ENOMEM; |
@@ -176,7 +176,7 @@ static int ath3k_load_fwfile(struct usb_device *udev, | |||
176 | 176 | ||
177 | count = firmware->size; | 177 | count = firmware->size; |
178 | 178 | ||
179 | send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); | 179 | send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); |
180 | if (!send_buf) { | 180 | if (!send_buf) { |
181 | BT_ERR("Can't allocate memory chunk for firmware"); | 181 | BT_ERR("Can't allocate memory chunk for firmware"); |
182 | return -ENOMEM; | 182 | return -ENOMEM; |
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c index 8b1b643a519b..54952ab800b8 100644 --- a/drivers/bluetooth/bcm203x.c +++ b/drivers/bluetooth/bcm203x.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | 26 | ||
27 | #include <linux/atomic.h> | ||
27 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
28 | #include <linux/init.h> | 29 | #include <linux/init.h> |
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
@@ -65,6 +66,7 @@ struct bcm203x_data { | |||
65 | unsigned long state; | 66 | unsigned long state; |
66 | 67 | ||
67 | struct work_struct work; | 68 | struct work_struct work; |
69 | atomic_t shutdown; | ||
68 | 70 | ||
69 | struct urb *urb; | 71 | struct urb *urb; |
70 | unsigned char *buffer; | 72 | unsigned char *buffer; |
@@ -97,6 +99,7 @@ static void bcm203x_complete(struct urb *urb) | |||
97 | 99 | ||
98 | data->state = BCM203X_SELECT_MEMORY; | 100 | data->state = BCM203X_SELECT_MEMORY; |
99 | 101 | ||
102 | /* use workqueue to have a small delay */ | ||
100 | schedule_work(&data->work); | 103 | schedule_work(&data->work); |
101 | break; | 104 | break; |
102 | 105 | ||
@@ -155,7 +158,10 @@ static void bcm203x_work(struct work_struct *work) | |||
155 | struct bcm203x_data *data = | 158 | struct bcm203x_data *data = |
156 | container_of(work, struct bcm203x_data, work); | 159 | container_of(work, struct bcm203x_data, work); |
157 | 160 | ||
158 | if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) | 161 | if (atomic_read(&data->shutdown)) |
162 | return; | ||
163 | |||
164 | if (usb_submit_urb(data->urb, GFP_KERNEL) < 0) | ||
159 | BT_ERR("Can't submit URB"); | 165 | BT_ERR("Can't submit URB"); |
160 | } | 166 | } |
161 | 167 | ||
@@ -243,6 +249,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id | |||
243 | 249 | ||
244 | usb_set_intfdata(intf, data); | 250 | usb_set_intfdata(intf, data); |
245 | 251 | ||
252 | /* use workqueue to have a small delay */ | ||
246 | schedule_work(&data->work); | 253 | schedule_work(&data->work); |
247 | 254 | ||
248 | return 0; | 255 | return 0; |
@@ -254,6 +261,9 @@ static void bcm203x_disconnect(struct usb_interface *intf) | |||
254 | 261 | ||
255 | BT_DBG("intf %p", intf); | 262 | BT_DBG("intf %p", intf); |
256 | 263 | ||
264 | atomic_inc(&data->shutdown); | ||
265 | cancel_work_sync(&data->work); | ||
266 | |||
257 | usb_kill_urb(data->urb); | 267 | usb_kill_urb(data->urb); |
258 | 268 | ||
259 | usb_set_intfdata(intf, NULL); | 269 | usb_set_intfdata(intf, NULL); |
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index 005919ab043c..61b591470a90 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c | |||
@@ -568,22 +568,23 @@ static int bfusb_load_firmware(struct bfusb_data *data, | |||
568 | 568 | ||
569 | BT_INFO("BlueFRITZ! USB loading firmware"); | 569 | BT_INFO("BlueFRITZ! USB loading firmware"); |
570 | 570 | ||
571 | buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_KERNEL); | ||
572 | if (!buf) { | ||
573 | BT_ERR("Can't allocate memory chunk for firmware"); | ||
574 | return -ENOMEM; | ||
575 | } | ||
576 | |||
571 | pipe = usb_sndctrlpipe(data->udev, 0); | 577 | pipe = usb_sndctrlpipe(data->udev, 0); |
572 | 578 | ||
573 | if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, | 579 | if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, |
574 | 0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) { | 580 | 0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) { |
575 | BT_ERR("Can't change to loading configuration"); | 581 | BT_ERR("Can't change to loading configuration"); |
582 | kfree(buf); | ||
576 | return -EBUSY; | 583 | return -EBUSY; |
577 | } | 584 | } |
578 | 585 | ||
579 | data->udev->toggle[0] = data->udev->toggle[1] = 0; | 586 | data->udev->toggle[0] = data->udev->toggle[1] = 0; |
580 | 587 | ||
581 | buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_ATOMIC); | ||
582 | if (!buf) { | ||
583 | BT_ERR("Can't allocate memory chunk for firmware"); | ||
584 | return -ENOMEM; | ||
585 | } | ||
586 | |||
587 | pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); | 588 | pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); |
588 | 589 | ||
589 | while (count) { | 590 | while (count) { |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 66cd0b8096ca..c92424ca1a55 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -1186,10 +1186,11 @@ static void gen6_cleanup(void) | |||
1186 | /* Certain Gen5 chipsets require require idling the GPU before | 1186 | /* Certain Gen5 chipsets require require idling the GPU before |
1187 | * unmapping anything from the GTT when VT-d is enabled. | 1187 | * unmapping anything from the GTT when VT-d is enabled. |
1188 | */ | 1188 | */ |
1189 | extern int intel_iommu_gfx_mapped; | ||
1190 | static inline int needs_idle_maps(void) | 1189 | static inline int needs_idle_maps(void) |
1191 | { | 1190 | { |
1191 | #ifdef CONFIG_INTEL_IOMMU | ||
1192 | const unsigned short gpu_devid = intel_private.pcidev->device; | 1192 | const unsigned short gpu_devid = intel_private.pcidev->device; |
1193 | extern int intel_iommu_gfx_mapped; | ||
1193 | 1194 | ||
1194 | /* Query intel_iommu to see if we need the workaround. Presumably that | 1195 | /* Query intel_iommu to see if we need the workaround. Presumably that |
1195 | * was loaded first. | 1196 | * was loaded first. |
@@ -1198,7 +1199,7 @@ static inline int needs_idle_maps(void) | |||
1198 | gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && | 1199 | gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && |
1199 | intel_iommu_gfx_mapped) | 1200 | intel_iommu_gfx_mapped) |
1200 | return 1; | 1201 | return 1; |
1201 | 1202 | #endif | |
1202 | return 0; | 1203 | return 0; |
1203 | } | 1204 | } |
1204 | 1205 | ||
@@ -1236,7 +1237,7 @@ static int i9xx_setup(void) | |||
1236 | intel_private.gtt_bus_addr = reg_addr + gtt_offset; | 1237 | intel_private.gtt_bus_addr = reg_addr + gtt_offset; |
1237 | } | 1238 | } |
1238 | 1239 | ||
1239 | if (needs_idle_maps()); | 1240 | if (needs_idle_maps()) |
1240 | intel_private.base.do_idle_maps = 1; | 1241 | intel_private.base.do_idle_maps = 1; |
1241 | 1242 | ||
1242 | intel_i9xx_setup_flush(); | 1243 | intel_i9xx_setup_flush(); |
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c index edaa987621ea..f5002015d82e 100644 --- a/drivers/cpufreq/db8500-cpufreq.c +++ b/drivers/cpufreq/db8500-cpufreq.c | |||
@@ -109,7 +109,7 @@ static unsigned int db8500_cpufreq_getspeed(unsigned int cpu) | |||
109 | 109 | ||
110 | static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) | 110 | static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) |
111 | { | 111 | { |
112 | int res; | 112 | int i, res; |
113 | 113 | ||
114 | BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table)); | 114 | BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table)); |
115 | 115 | ||
@@ -120,8 +120,8 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) | |||
120 | freq_table[3].frequency = 1000000; | 120 | freq_table[3].frequency = 1000000; |
121 | } | 121 | } |
122 | pr_info("db8500-cpufreq : Available frequencies:\n"); | 122 | pr_info("db8500-cpufreq : Available frequencies:\n"); |
123 | while (freq_table[i].frequency != CPUFREQ_TABLE_END) | 123 | for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) |
124 | pr_info(" %d Mhz\n", freq_table[i++].frequency/1000); | 124 | pr_info(" %d Mhz\n", freq_table[i].frequency/1000); |
125 | 125 | ||
126 | /* get policy fields based on the table */ | 126 | /* get policy fields based on the table */ |
127 | res = cpufreq_frequency_table_cpuinfo(policy, freq_table); | 127 | res = cpufreq_frequency_table_cpuinfo(policy, freq_table); |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index becd6d99203b..06ce2680d00d 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -62,8 +62,9 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); | |||
62 | int cpuidle_idle_call(void) | 62 | int cpuidle_idle_call(void) |
63 | { | 63 | { |
64 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 64 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
65 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
65 | struct cpuidle_state *target_state; | 66 | struct cpuidle_state *target_state; |
66 | int next_state; | 67 | int next_state, entered_state; |
67 | 68 | ||
68 | if (off) | 69 | if (off) |
69 | return -ENODEV; | 70 | return -ENODEV; |
@@ -84,45 +85,36 @@ int cpuidle_idle_call(void) | |||
84 | hrtimer_peek_ahead_timers(); | 85 | hrtimer_peek_ahead_timers(); |
85 | #endif | 86 | #endif |
86 | 87 | ||
87 | /* | ||
88 | * Call the device's prepare function before calling the | ||
89 | * governor's select function. ->prepare gives the device's | ||
90 | * cpuidle driver a chance to update any dynamic information | ||
91 | * of its cpuidle states for the current idle period, e.g. | ||
92 | * state availability, latencies, residencies, etc. | ||
93 | */ | ||
94 | if (dev->prepare) | ||
95 | dev->prepare(dev); | ||
96 | |||
97 | /* ask the governor for the next state */ | 88 | /* ask the governor for the next state */ |
98 | next_state = cpuidle_curr_governor->select(dev); | 89 | next_state = cpuidle_curr_governor->select(drv, dev); |
99 | if (need_resched()) { | 90 | if (need_resched()) { |
100 | local_irq_enable(); | 91 | local_irq_enable(); |
101 | return 0; | 92 | return 0; |
102 | } | 93 | } |
103 | 94 | ||
104 | target_state = &dev->states[next_state]; | 95 | target_state = &drv->states[next_state]; |
105 | |||
106 | /* enter the state and update stats */ | ||
107 | dev->last_state = target_state; | ||
108 | 96 | ||
109 | trace_power_start(POWER_CSTATE, next_state, dev->cpu); | 97 | trace_power_start(POWER_CSTATE, next_state, dev->cpu); |
110 | trace_cpu_idle(next_state, dev->cpu); | 98 | trace_cpu_idle(next_state, dev->cpu); |
111 | 99 | ||
112 | dev->last_residency = target_state->enter(dev, target_state); | 100 | entered_state = target_state->enter(dev, drv, next_state); |
113 | 101 | ||
114 | trace_power_end(dev->cpu); | 102 | trace_power_end(dev->cpu); |
115 | trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); | 103 | trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); |
116 | 104 | ||
117 | if (dev->last_state) | 105 | if (entered_state >= 0) { |
118 | target_state = dev->last_state; | 106 | /* Update cpuidle counters */ |
119 | 107 | /* This can be moved to within driver enter routine | |
120 | target_state->time += (unsigned long long)dev->last_residency; | 108 | * but that results in multiple copies of same code. |
121 | target_state->usage++; | 109 | */ |
110 | dev->states_usage[entered_state].time += | ||
111 | (unsigned long long)dev->last_residency; | ||
112 | dev->states_usage[entered_state].usage++; | ||
113 | } | ||
122 | 114 | ||
123 | /* give the governor an opportunity to reflect on the outcome */ | 115 | /* give the governor an opportunity to reflect on the outcome */ |
124 | if (cpuidle_curr_governor->reflect) | 116 | if (cpuidle_curr_governor->reflect) |
125 | cpuidle_curr_governor->reflect(dev); | 117 | cpuidle_curr_governor->reflect(dev, entered_state); |
126 | 118 | ||
127 | return 0; | 119 | return 0; |
128 | } | 120 | } |
@@ -173,11 +165,11 @@ void cpuidle_resume_and_unlock(void) | |||
173 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | 165 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); |
174 | 166 | ||
175 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX | 167 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX |
176 | static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) | 168 | static int poll_idle(struct cpuidle_device *dev, |
169 | struct cpuidle_driver *drv, int index) | ||
177 | { | 170 | { |
178 | ktime_t t1, t2; | 171 | ktime_t t1, t2; |
179 | s64 diff; | 172 | s64 diff; |
180 | int ret; | ||
181 | 173 | ||
182 | t1 = ktime_get(); | 174 | t1 = ktime_get(); |
183 | local_irq_enable(); | 175 | local_irq_enable(); |
@@ -189,15 +181,14 @@ static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) | |||
189 | if (diff > INT_MAX) | 181 | if (diff > INT_MAX) |
190 | diff = INT_MAX; | 182 | diff = INT_MAX; |
191 | 183 | ||
192 | ret = (int) diff; | 184 | dev->last_residency = (int) diff; |
193 | return ret; | 185 | |
186 | return index; | ||
194 | } | 187 | } |
195 | 188 | ||
196 | static void poll_idle_init(struct cpuidle_device *dev) | 189 | static void poll_idle_init(struct cpuidle_driver *drv) |
197 | { | 190 | { |
198 | struct cpuidle_state *state = &dev->states[0]; | 191 | struct cpuidle_state *state = &drv->states[0]; |
199 | |||
200 | cpuidle_set_statedata(state, NULL); | ||
201 | 192 | ||
202 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); | 193 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); |
203 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); | 194 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); |
@@ -208,7 +199,7 @@ static void poll_idle_init(struct cpuidle_device *dev) | |||
208 | state->enter = poll_idle; | 199 | state->enter = poll_idle; |
209 | } | 200 | } |
210 | #else | 201 | #else |
211 | static void poll_idle_init(struct cpuidle_device *dev) {} | 202 | static void poll_idle_init(struct cpuidle_driver *drv) {} |
212 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ | 203 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ |
213 | 204 | ||
214 | /** | 205 | /** |
@@ -235,21 +226,20 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
235 | return ret; | 226 | return ret; |
236 | } | 227 | } |
237 | 228 | ||
238 | poll_idle_init(dev); | 229 | poll_idle_init(cpuidle_get_driver()); |
239 | 230 | ||
240 | if ((ret = cpuidle_add_state_sysfs(dev))) | 231 | if ((ret = cpuidle_add_state_sysfs(dev))) |
241 | return ret; | 232 | return ret; |
242 | 233 | ||
243 | if (cpuidle_curr_governor->enable && | 234 | if (cpuidle_curr_governor->enable && |
244 | (ret = cpuidle_curr_governor->enable(dev))) | 235 | (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev))) |
245 | goto fail_sysfs; | 236 | goto fail_sysfs; |
246 | 237 | ||
247 | for (i = 0; i < dev->state_count; i++) { | 238 | for (i = 0; i < dev->state_count; i++) { |
248 | dev->states[i].usage = 0; | 239 | dev->states_usage[i].usage = 0; |
249 | dev->states[i].time = 0; | 240 | dev->states_usage[i].time = 0; |
250 | } | 241 | } |
251 | dev->last_residency = 0; | 242 | dev->last_residency = 0; |
252 | dev->last_state = NULL; | ||
253 | 243 | ||
254 | smp_wmb(); | 244 | smp_wmb(); |
255 | 245 | ||
@@ -283,7 +273,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev) | |||
283 | dev->enabled = 0; | 273 | dev->enabled = 0; |
284 | 274 | ||
285 | if (cpuidle_curr_governor->disable) | 275 | if (cpuidle_curr_governor->disable) |
286 | cpuidle_curr_governor->disable(dev); | 276 | cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); |
287 | 277 | ||
288 | cpuidle_remove_state_sysfs(dev); | 278 | cpuidle_remove_state_sysfs(dev); |
289 | enabled_devices--; | 279 | enabled_devices--; |
@@ -311,26 +301,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
311 | 301 | ||
312 | init_completion(&dev->kobj_unregister); | 302 | init_completion(&dev->kobj_unregister); |
313 | 303 | ||
314 | /* | ||
315 | * cpuidle driver should set the dev->power_specified bit | ||
316 | * before registering the device if the driver provides | ||
317 | * power_usage numbers. | ||
318 | * | ||
319 | * For those devices whose ->power_specified is not set, | ||
320 | * we fill in power_usage with decreasing values as the | ||
321 | * cpuidle code has an implicit assumption that state Cn | ||
322 | * uses less power than C(n-1). | ||
323 | * | ||
324 | * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned | ||
325 | * an power value of -1. So we use -2, -3, etc, for other | ||
326 | * c-states. | ||
327 | */ | ||
328 | if (!dev->power_specified) { | ||
329 | int i; | ||
330 | for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) | ||
331 | dev->states[i].power_usage = -1 - i; | ||
332 | } | ||
333 | |||
334 | per_cpu(cpuidle_devices, dev->cpu) = dev; | 304 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
335 | list_add(&dev->device_list, &cpuidle_detected_devices); | 305 | list_add(&dev->device_list, &cpuidle_detected_devices); |
336 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | 306 | if ((ret = cpuidle_add_sysfs(sys_dev))) { |
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 3f7e3cedd133..284d7af5a9c8 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c | |||
@@ -17,6 +17,30 @@ | |||
17 | static struct cpuidle_driver *cpuidle_curr_driver; | 17 | static struct cpuidle_driver *cpuidle_curr_driver; |
18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | 18 | DEFINE_SPINLOCK(cpuidle_driver_lock); |
19 | 19 | ||
20 | static void __cpuidle_register_driver(struct cpuidle_driver *drv) | ||
21 | { | ||
22 | int i; | ||
23 | /* | ||
24 | * cpuidle driver should set the drv->power_specified bit | ||
25 | * before registering if the driver provides | ||
26 | * power_usage numbers. | ||
27 | * | ||
28 | * If power_specified is not set, | ||
29 | * we fill in power_usage with decreasing values as the | ||
30 | * cpuidle code has an implicit assumption that state Cn | ||
31 | * uses less power than C(n-1). | ||
32 | * | ||
33 | * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned | ||
34 | * an power value of -1. So we use -2, -3, etc, for other | ||
35 | * c-states. | ||
36 | */ | ||
37 | if (!drv->power_specified) { | ||
38 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) | ||
39 | drv->states[i].power_usage = -1 - i; | ||
40 | } | ||
41 | } | ||
42 | |||
43 | |||
20 | /** | 44 | /** |
21 | * cpuidle_register_driver - registers a driver | 45 | * cpuidle_register_driver - registers a driver |
22 | * @drv: the driver | 46 | * @drv: the driver |
@@ -34,6 +58,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) | |||
34 | spin_unlock(&cpuidle_driver_lock); | 58 | spin_unlock(&cpuidle_driver_lock); |
35 | return -EBUSY; | 59 | return -EBUSY; |
36 | } | 60 | } |
61 | __cpuidle_register_driver(drv); | ||
37 | cpuidle_curr_driver = drv; | 62 | cpuidle_curr_driver = drv; |
38 | spin_unlock(&cpuidle_driver_lock); | 63 | spin_unlock(&cpuidle_driver_lock); |
39 | 64 | ||
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 3b8fce20f023..b6a09ea859b1 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev, | |||
60 | 60 | ||
61 | /** | 61 | /** |
62 | * ladder_select_state - selects the next state to enter | 62 | * ladder_select_state - selects the next state to enter |
63 | * @drv: cpuidle driver | ||
63 | * @dev: the CPU | 64 | * @dev: the CPU |
64 | */ | 65 | */ |
65 | static int ladder_select_state(struct cpuidle_device *dev) | 66 | static int ladder_select_state(struct cpuidle_driver *drv, |
67 | struct cpuidle_device *dev) | ||
66 | { | 68 | { |
67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | 69 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); |
68 | struct ladder_device_state *last_state; | 70 | struct ladder_device_state *last_state; |
@@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
77 | 79 | ||
78 | last_state = &ldev->states[last_idx]; | 80 | last_state = &ldev->states[last_idx]; |
79 | 81 | ||
80 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) | 82 | if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) { |
81 | last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; | 83 | last_residency = cpuidle_get_last_residency(dev) - \ |
84 | drv->states[last_idx].exit_latency; | ||
85 | } | ||
82 | else | 86 | else |
83 | last_residency = last_state->threshold.promotion_time + 1; | 87 | last_residency = last_state->threshold.promotion_time + 1; |
84 | 88 | ||
85 | /* consider promotion */ | 89 | /* consider promotion */ |
86 | if (last_idx < dev->state_count - 1 && | 90 | if (last_idx < drv->state_count - 1 && |
87 | last_residency > last_state->threshold.promotion_time && | 91 | last_residency > last_state->threshold.promotion_time && |
88 | dev->states[last_idx + 1].exit_latency <= latency_req) { | 92 | drv->states[last_idx + 1].exit_latency <= latency_req) { |
89 | last_state->stats.promotion_count++; | 93 | last_state->stats.promotion_count++; |
90 | last_state->stats.demotion_count = 0; | 94 | last_state->stats.demotion_count = 0; |
91 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | 95 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { |
@@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
96 | 100 | ||
97 | /* consider demotion */ | 101 | /* consider demotion */ |
98 | if (last_idx > CPUIDLE_DRIVER_STATE_START && | 102 | if (last_idx > CPUIDLE_DRIVER_STATE_START && |
99 | dev->states[last_idx].exit_latency > latency_req) { | 103 | drv->states[last_idx].exit_latency > latency_req) { |
100 | int i; | 104 | int i; |
101 | 105 | ||
102 | for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { | 106 | for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { |
103 | if (dev->states[i].exit_latency <= latency_req) | 107 | if (drv->states[i].exit_latency <= latency_req) |
104 | break; | 108 | break; |
105 | } | 109 | } |
106 | ladder_do_selection(ldev, last_idx, i); | 110 | ladder_do_selection(ldev, last_idx, i); |
@@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
123 | 127 | ||
124 | /** | 128 | /** |
125 | * ladder_enable_device - setup for the governor | 129 | * ladder_enable_device - setup for the governor |
130 | * @drv: cpuidle driver | ||
126 | * @dev: the CPU | 131 | * @dev: the CPU |
127 | */ | 132 | */ |
128 | static int ladder_enable_device(struct cpuidle_device *dev) | 133 | static int ladder_enable_device(struct cpuidle_driver *drv, |
134 | struct cpuidle_device *dev) | ||
129 | { | 135 | { |
130 | int i; | 136 | int i; |
131 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); | 137 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); |
@@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
134 | 140 | ||
135 | ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; | 141 | ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; |
136 | 142 | ||
137 | for (i = 0; i < dev->state_count; i++) { | 143 | for (i = 0; i < drv->state_count; i++) { |
138 | state = &dev->states[i]; | 144 | state = &drv->states[i]; |
139 | lstate = &ldev->states[i]; | 145 | lstate = &ldev->states[i]; |
140 | 146 | ||
141 | lstate->stats.promotion_count = 0; | 147 | lstate->stats.promotion_count = 0; |
@@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
144 | lstate->threshold.promotion_count = PROMOTION_COUNT; | 150 | lstate->threshold.promotion_count = PROMOTION_COUNT; |
145 | lstate->threshold.demotion_count = DEMOTION_COUNT; | 151 | lstate->threshold.demotion_count = DEMOTION_COUNT; |
146 | 152 | ||
147 | if (i < dev->state_count - 1) | 153 | if (i < drv->state_count - 1) |
148 | lstate->threshold.promotion_time = state->exit_latency; | 154 | lstate->threshold.promotion_time = state->exit_latency; |
149 | if (i > 0) | 155 | if (i > 0) |
150 | lstate->threshold.demotion_time = state->exit_latency; | 156 | lstate->threshold.demotion_time = state->exit_latency; |
@@ -153,11 +159,24 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
153 | return 0; | 159 | return 0; |
154 | } | 160 | } |
155 | 161 | ||
162 | /** | ||
163 | * ladder_reflect - update the correct last_state_idx | ||
164 | * @dev: the CPU | ||
165 | * @index: the index of actual state entered | ||
166 | */ | ||
167 | static void ladder_reflect(struct cpuidle_device *dev, int index) | ||
168 | { | ||
169 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | ||
170 | if (index > 0) | ||
171 | ldev->last_state_idx = index; | ||
172 | } | ||
173 | |||
156 | static struct cpuidle_governor ladder_governor = { | 174 | static struct cpuidle_governor ladder_governor = { |
157 | .name = "ladder", | 175 | .name = "ladder", |
158 | .rating = 10, | 176 | .rating = 10, |
159 | .enable = ladder_enable_device, | 177 | .enable = ladder_enable_device, |
160 | .select = ladder_select_state, | 178 | .select = ladder_select_state, |
179 | .reflect = ladder_reflect, | ||
161 | .owner = THIS_MODULE, | 180 | .owner = THIS_MODULE, |
162 | }; | 181 | }; |
163 | 182 | ||
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 00275244ce2f..ad0952601ae2 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -183,7 +183,7 @@ static inline int performance_multiplier(void) | |||
183 | 183 | ||
184 | static DEFINE_PER_CPU(struct menu_device, menu_devices); | 184 | static DEFINE_PER_CPU(struct menu_device, menu_devices); |
185 | 185 | ||
186 | static void menu_update(struct cpuidle_device *dev); | 186 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); |
187 | 187 | ||
188 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ | 188 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ |
189 | static u64 div_round64(u64 dividend, u32 divisor) | 189 | static u64 div_round64(u64 dividend, u32 divisor) |
@@ -229,9 +229,10 @@ static void detect_repeating_patterns(struct menu_device *data) | |||
229 | 229 | ||
230 | /** | 230 | /** |
231 | * menu_select - selects the next idle state to enter | 231 | * menu_select - selects the next idle state to enter |
232 | * @drv: cpuidle driver containing state data | ||
232 | * @dev: the CPU | 233 | * @dev: the CPU |
233 | */ | 234 | */ |
234 | static int menu_select(struct cpuidle_device *dev) | 235 | static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
235 | { | 236 | { |
236 | struct menu_device *data = &__get_cpu_var(menu_devices); | 237 | struct menu_device *data = &__get_cpu_var(menu_devices); |
237 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); | 238 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
@@ -241,7 +242,7 @@ static int menu_select(struct cpuidle_device *dev) | |||
241 | struct timespec t; | 242 | struct timespec t; |
242 | 243 | ||
243 | if (data->needs_update) { | 244 | if (data->needs_update) { |
244 | menu_update(dev); | 245 | menu_update(drv, dev); |
245 | data->needs_update = 0; | 246 | data->needs_update = 0; |
246 | } | 247 | } |
247 | 248 | ||
@@ -286,11 +287,9 @@ static int menu_select(struct cpuidle_device *dev) | |||
286 | * Find the idle state with the lowest power while satisfying | 287 | * Find the idle state with the lowest power while satisfying |
287 | * our constraints. | 288 | * our constraints. |
288 | */ | 289 | */ |
289 | for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { | 290 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { |
290 | struct cpuidle_state *s = &dev->states[i]; | 291 | struct cpuidle_state *s = &drv->states[i]; |
291 | 292 | ||
292 | if (s->flags & CPUIDLE_FLAG_IGNORE) | ||
293 | continue; | ||
294 | if (s->target_residency > data->predicted_us) | 293 | if (s->target_residency > data->predicted_us) |
295 | continue; | 294 | continue; |
296 | if (s->exit_latency > latency_req) | 295 | if (s->exit_latency > latency_req) |
@@ -311,26 +310,30 @@ static int menu_select(struct cpuidle_device *dev) | |||
311 | /** | 310 | /** |
312 | * menu_reflect - records that data structures need update | 311 | * menu_reflect - records that data structures need update |
313 | * @dev: the CPU | 312 | * @dev: the CPU |
313 | * @index: the index of actual entered state | ||
314 | * | 314 | * |
315 | * NOTE: it's important to be fast here because this operation will add to | 315 | * NOTE: it's important to be fast here because this operation will add to |
316 | * the overall exit latency. | 316 | * the overall exit latency. |
317 | */ | 317 | */ |
318 | static void menu_reflect(struct cpuidle_device *dev) | 318 | static void menu_reflect(struct cpuidle_device *dev, int index) |
319 | { | 319 | { |
320 | struct menu_device *data = &__get_cpu_var(menu_devices); | 320 | struct menu_device *data = &__get_cpu_var(menu_devices); |
321 | data->needs_update = 1; | 321 | data->last_state_idx = index; |
322 | if (index >= 0) | ||
323 | data->needs_update = 1; | ||
322 | } | 324 | } |
323 | 325 | ||
324 | /** | 326 | /** |
325 | * menu_update - attempts to guess what happened after entry | 327 | * menu_update - attempts to guess what happened after entry |
328 | * @drv: cpuidle driver containing state data | ||
326 | * @dev: the CPU | 329 | * @dev: the CPU |
327 | */ | 330 | */ |
328 | static void menu_update(struct cpuidle_device *dev) | 331 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
329 | { | 332 | { |
330 | struct menu_device *data = &__get_cpu_var(menu_devices); | 333 | struct menu_device *data = &__get_cpu_var(menu_devices); |
331 | int last_idx = data->last_state_idx; | 334 | int last_idx = data->last_state_idx; |
332 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); | 335 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); |
333 | struct cpuidle_state *target = &dev->states[last_idx]; | 336 | struct cpuidle_state *target = &drv->states[last_idx]; |
334 | unsigned int measured_us; | 337 | unsigned int measured_us; |
335 | u64 new_factor; | 338 | u64 new_factor; |
336 | 339 | ||
@@ -384,9 +387,11 @@ static void menu_update(struct cpuidle_device *dev) | |||
384 | 387 | ||
385 | /** | 388 | /** |
386 | * menu_enable_device - scans a CPU's states and does setup | 389 | * menu_enable_device - scans a CPU's states and does setup |
390 | * @drv: cpuidle driver | ||
387 | * @dev: the CPU | 391 | * @dev: the CPU |
388 | */ | 392 | */ |
389 | static int menu_enable_device(struct cpuidle_device *dev) | 393 | static int menu_enable_device(struct cpuidle_driver *drv, |
394 | struct cpuidle_device *dev) | ||
390 | { | 395 | { |
391 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | 396 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
392 | 397 | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index be7917ec40c9..1e756e160dca 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
@@ -216,7 +216,8 @@ static struct kobj_type ktype_cpuidle = { | |||
216 | 216 | ||
217 | struct cpuidle_state_attr { | 217 | struct cpuidle_state_attr { |
218 | struct attribute attr; | 218 | struct attribute attr; |
219 | ssize_t (*show)(struct cpuidle_state *, char *); | 219 | ssize_t (*show)(struct cpuidle_state *, \ |
220 | struct cpuidle_state_usage *, char *); | ||
220 | ssize_t (*store)(struct cpuidle_state *, const char *, size_t); | 221 | ssize_t (*store)(struct cpuidle_state *, const char *, size_t); |
221 | }; | 222 | }; |
222 | 223 | ||
@@ -224,19 +225,22 @@ struct cpuidle_state_attr { | |||
224 | static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) | 225 | static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) |
225 | 226 | ||
226 | #define define_show_state_function(_name) \ | 227 | #define define_show_state_function(_name) \ |
227 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | 228 | static ssize_t show_state_##_name(struct cpuidle_state *state, \ |
229 | struct cpuidle_state_usage *state_usage, char *buf) \ | ||
228 | { \ | 230 | { \ |
229 | return sprintf(buf, "%u\n", state->_name);\ | 231 | return sprintf(buf, "%u\n", state->_name);\ |
230 | } | 232 | } |
231 | 233 | ||
232 | #define define_show_state_ull_function(_name) \ | 234 | #define define_show_state_ull_function(_name) \ |
233 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | 235 | static ssize_t show_state_##_name(struct cpuidle_state *state, \ |
236 | struct cpuidle_state_usage *state_usage, char *buf) \ | ||
234 | { \ | 237 | { \ |
235 | return sprintf(buf, "%llu\n", state->_name);\ | 238 | return sprintf(buf, "%llu\n", state_usage->_name);\ |
236 | } | 239 | } |
237 | 240 | ||
238 | #define define_show_state_str_function(_name) \ | 241 | #define define_show_state_str_function(_name) \ |
239 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | 242 | static ssize_t show_state_##_name(struct cpuidle_state *state, \ |
243 | struct cpuidle_state_usage *state_usage, char *buf) \ | ||
240 | { \ | 244 | { \ |
241 | if (state->_name[0] == '\0')\ | 245 | if (state->_name[0] == '\0')\ |
242 | return sprintf(buf, "<null>\n");\ | 246 | return sprintf(buf, "<null>\n");\ |
@@ -269,16 +273,18 @@ static struct attribute *cpuidle_state_default_attrs[] = { | |||
269 | 273 | ||
270 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) | 274 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) |
271 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) | 275 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) |
276 | #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) | ||
272 | #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) | 277 | #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) |
273 | static ssize_t cpuidle_state_show(struct kobject * kobj, | 278 | static ssize_t cpuidle_state_show(struct kobject * kobj, |
274 | struct attribute * attr ,char * buf) | 279 | struct attribute * attr ,char * buf) |
275 | { | 280 | { |
276 | int ret = -EIO; | 281 | int ret = -EIO; |
277 | struct cpuidle_state *state = kobj_to_state(kobj); | 282 | struct cpuidle_state *state = kobj_to_state(kobj); |
283 | struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj); | ||
278 | struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); | 284 | struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); |
279 | 285 | ||
280 | if (cattr->show) | 286 | if (cattr->show) |
281 | ret = cattr->show(state, buf); | 287 | ret = cattr->show(state, state_usage, buf); |
282 | 288 | ||
283 | return ret; | 289 | return ret; |
284 | } | 290 | } |
@@ -316,13 +322,15 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device) | |||
316 | { | 322 | { |
317 | int i, ret = -ENOMEM; | 323 | int i, ret = -ENOMEM; |
318 | struct cpuidle_state_kobj *kobj; | 324 | struct cpuidle_state_kobj *kobj; |
325 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
319 | 326 | ||
320 | /* state statistics */ | 327 | /* state statistics */ |
321 | for (i = 0; i < device->state_count; i++) { | 328 | for (i = 0; i < device->state_count; i++) { |
322 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | 329 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); |
323 | if (!kobj) | 330 | if (!kobj) |
324 | goto error_state; | 331 | goto error_state; |
325 | kobj->state = &device->states[i]; | 332 | kobj->state = &drv->states[i]; |
333 | kobj->state_usage = &device->states_usage[i]; | ||
326 | init_completion(&kobj->kobj_unregister); | 334 | init_completion(&kobj->kobj_unregister); |
327 | 335 | ||
328 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, | 336 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 0e49d87f6c60..0b0562979171 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
@@ -148,13 +148,17 @@ static int _get_gpio_dataout(struct gpio_bank *bank, int gpio) | |||
148 | return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0; | 148 | return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0; |
149 | } | 149 | } |
150 | 150 | ||
151 | #define MOD_REG_BIT(reg, bit_mask, set) \ | 151 | static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) |
152 | do { \ | 152 | { |
153 | int l = __raw_readl(base + reg); \ | 153 | int l = __raw_readl(base + reg); |
154 | if (set) l |= bit_mask; \ | 154 | |
155 | else l &= ~bit_mask; \ | 155 | if (set) |
156 | __raw_writel(l, base + reg); \ | 156 | l |= mask; |
157 | } while(0) | 157 | else |
158 | l &= ~mask; | ||
159 | |||
160 | __raw_writel(l, base + reg); | ||
161 | } | ||
158 | 162 | ||
159 | /** | 163 | /** |
160 | * _set_gpio_debounce - low level gpio debounce time | 164 | * _set_gpio_debounce - low level gpio debounce time |
@@ -210,28 +214,28 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio, | |||
210 | u32 gpio_bit = 1 << gpio; | 214 | u32 gpio_bit = 1 << gpio; |
211 | 215 | ||
212 | if (cpu_is_omap44xx()) { | 216 | if (cpu_is_omap44xx()) { |
213 | MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit, | 217 | _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT0, gpio_bit, |
214 | trigger & IRQ_TYPE_LEVEL_LOW); | 218 | trigger & IRQ_TYPE_LEVEL_LOW); |
215 | MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT1, gpio_bit, | 219 | _gpio_rmw(base, OMAP4_GPIO_LEVELDETECT1, gpio_bit, |
216 | trigger & IRQ_TYPE_LEVEL_HIGH); | 220 | trigger & IRQ_TYPE_LEVEL_HIGH); |
217 | MOD_REG_BIT(OMAP4_GPIO_RISINGDETECT, gpio_bit, | 221 | _gpio_rmw(base, OMAP4_GPIO_RISINGDETECT, gpio_bit, |
218 | trigger & IRQ_TYPE_EDGE_RISING); | 222 | trigger & IRQ_TYPE_EDGE_RISING); |
219 | MOD_REG_BIT(OMAP4_GPIO_FALLINGDETECT, gpio_bit, | 223 | _gpio_rmw(base, OMAP4_GPIO_FALLINGDETECT, gpio_bit, |
220 | trigger & IRQ_TYPE_EDGE_FALLING); | 224 | trigger & IRQ_TYPE_EDGE_FALLING); |
221 | } else { | 225 | } else { |
222 | MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit, | 226 | _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT0, gpio_bit, |
223 | trigger & IRQ_TYPE_LEVEL_LOW); | 227 | trigger & IRQ_TYPE_LEVEL_LOW); |
224 | MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit, | 228 | _gpio_rmw(base, OMAP24XX_GPIO_LEVELDETECT1, gpio_bit, |
225 | trigger & IRQ_TYPE_LEVEL_HIGH); | 229 | trigger & IRQ_TYPE_LEVEL_HIGH); |
226 | MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit, | 230 | _gpio_rmw(base, OMAP24XX_GPIO_RISINGDETECT, gpio_bit, |
227 | trigger & IRQ_TYPE_EDGE_RISING); | 231 | trigger & IRQ_TYPE_EDGE_RISING); |
228 | MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit, | 232 | _gpio_rmw(base, OMAP24XX_GPIO_FALLINGDETECT, gpio_bit, |
229 | trigger & IRQ_TYPE_EDGE_FALLING); | 233 | trigger & IRQ_TYPE_EDGE_FALLING); |
230 | } | 234 | } |
231 | if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { | 235 | if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { |
232 | if (cpu_is_omap44xx()) { | 236 | if (cpu_is_omap44xx()) { |
233 | MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit, | 237 | _gpio_rmw(base, OMAP4_GPIO_IRQWAKEN0, gpio_bit, |
234 | trigger != 0); | 238 | trigger != 0); |
235 | } else { | 239 | } else { |
236 | /* | 240 | /* |
237 | * GPIO wakeup request can only be generated on edge | 241 | * GPIO wakeup request can only be generated on edge |
@@ -1086,6 +1090,11 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start, | |||
1086 | 1090 | ||
1087 | gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base, | 1091 | gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base, |
1088 | handle_simple_irq); | 1092 | handle_simple_irq); |
1093 | if (!gc) { | ||
1094 | dev_err(bank->dev, "Memory alloc failed for gc\n"); | ||
1095 | return; | ||
1096 | } | ||
1097 | |||
1089 | ct = gc->chip_types; | 1098 | ct = gc->chip_types; |
1090 | 1099 | ||
1091 | /* NOTE: No ack required, reading IRQ status clears it. */ | 1100 | /* NOTE: No ack required, reading IRQ status clears it. */ |
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 0550dcb85814..147df8ae79db 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c | |||
@@ -596,9 +596,6 @@ static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert) | |||
596 | 596 | ||
597 | /* set platform specific polarity inversion */ | 597 | /* set platform specific polarity inversion */ |
598 | ret = pca953x_write_reg(chip, PCA953X_INVERT, invert); | 598 | ret = pca953x_write_reg(chip, PCA953X_INVERT, invert); |
599 | if (ret) | ||
600 | goto out; | ||
601 | return 0; | ||
602 | out: | 599 | out: |
603 | return ret; | 600 | return ret; |
604 | } | 601 | } |
@@ -640,7 +637,7 @@ static int __devinit pca953x_probe(struct i2c_client *client, | |||
640 | struct pca953x_platform_data *pdata; | 637 | struct pca953x_platform_data *pdata; |
641 | struct pca953x_chip *chip; | 638 | struct pca953x_chip *chip; |
642 | int irq_base=0, invert=0; | 639 | int irq_base=0, invert=0; |
643 | int ret = 0; | 640 | int ret; |
644 | 641 | ||
645 | chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); | 642 | chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL); |
646 | if (chip == NULL) | 643 | if (chip == NULL) |
@@ -673,10 +670,10 @@ static int __devinit pca953x_probe(struct i2c_client *client, | |||
673 | pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK); | 670 | pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK); |
674 | 671 | ||
675 | if (chip->chip_type == PCA953X_TYPE) | 672 | if (chip->chip_type == PCA953X_TYPE) |
676 | device_pca953x_init(chip, invert); | 673 | ret = device_pca953x_init(chip, invert); |
677 | else if (chip->chip_type == PCA957X_TYPE) | ||
678 | device_pca957x_init(chip, invert); | ||
679 | else | 674 | else |
675 | ret = device_pca957x_init(chip, invert); | ||
676 | if (ret) | ||
680 | goto out_failed; | 677 | goto out_failed; |
681 | 678 | ||
682 | ret = pca953x_irq_setup(chip, id, irq_base); | 679 | ret = pca953x_irq_setup(chip, id, irq_base); |
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 785127cb281b..1368826ef284 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -9,7 +9,6 @@ menuconfig DRM | |||
9 | depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU | 9 | depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU |
10 | select I2C | 10 | select I2C |
11 | select I2C_ALGOBIT | 11 | select I2C_ALGOBIT |
12 | select SLOW_WORK | ||
13 | help | 12 | help |
14 | Kernel-level support for the Direct Rendering Infrastructure (DRI) | 13 | Kernel-level support for the Direct Rendering Infrastructure (DRI) |
15 | introduced in XFree86 4.0. If you say Y here, you need to select | 14 | introduced in XFree86 4.0. If you say Y here, you need to select |
@@ -96,6 +95,7 @@ config DRM_I915 | |||
96 | select FB_CFB_IMAGEBLIT | 95 | select FB_CFB_IMAGEBLIT |
97 | # i915 depends on ACPI_VIDEO when ACPI is enabled | 96 | # i915 depends on ACPI_VIDEO when ACPI is enabled |
98 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | 97 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick |
98 | select BACKLIGHT_LCD_SUPPORT if ACPI | ||
99 | select BACKLIGHT_CLASS_DEVICE if ACPI | 99 | select BACKLIGHT_CLASS_DEVICE if ACPI |
100 | select VIDEO_OUTPUT_CONTROL if ACPI | 100 | select VIDEO_OUTPUT_CONTROL if ACPI |
101 | select INPUT if ACPI | 101 | select INPUT if ACPI |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 711d9653abd0..405c63b9d539 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -163,6 +163,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = | |||
163 | { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, | 163 | { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, |
164 | { DRM_MODE_CONNECTOR_TV, "TV", 0 }, | 164 | { DRM_MODE_CONNECTOR_TV, "TV", 0 }, |
165 | { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, | 165 | { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, |
166 | { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0}, | ||
166 | }; | 167 | }; |
167 | 168 | ||
168 | static struct drm_prop_enum_list drm_encoder_enum_list[] = | 169 | static struct drm_prop_enum_list drm_encoder_enum_list[] = |
@@ -171,6 +172,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] = | |||
171 | { DRM_MODE_ENCODER_TMDS, "TMDS" }, | 172 | { DRM_MODE_ENCODER_TMDS, "TMDS" }, |
172 | { DRM_MODE_ENCODER_LVDS, "LVDS" }, | 173 | { DRM_MODE_ENCODER_LVDS, "LVDS" }, |
173 | { DRM_MODE_ENCODER_TVDAC, "TV" }, | 174 | { DRM_MODE_ENCODER_TVDAC, "TV" }, |
175 | { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, | ||
174 | }; | 176 | }; |
175 | 177 | ||
176 | char *drm_get_encoder_name(struct drm_encoder *encoder) | 178 | char *drm_get_encoder_name(struct drm_encoder *encoder) |
@@ -464,8 +466,10 @@ void drm_connector_init(struct drm_device *dev, | |||
464 | list_add_tail(&connector->head, &dev->mode_config.connector_list); | 466 | list_add_tail(&connector->head, &dev->mode_config.connector_list); |
465 | dev->mode_config.num_connector++; | 467 | dev->mode_config.num_connector++; |
466 | 468 | ||
467 | drm_connector_attach_property(connector, | 469 | if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) |
468 | dev->mode_config.edid_property, 0); | 470 | drm_connector_attach_property(connector, |
471 | dev->mode_config.edid_property, | ||
472 | 0); | ||
469 | 473 | ||
470 | drm_connector_attach_property(connector, | 474 | drm_connector_attach_property(connector, |
471 | dev->mode_config.dpms_property, 0); | 475 | dev->mode_config.dpms_property, 0); |
@@ -2114,8 +2118,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags, | |||
2114 | property->num_values = num_values; | 2118 | property->num_values = num_values; |
2115 | INIT_LIST_HEAD(&property->enum_blob_list); | 2119 | INIT_LIST_HEAD(&property->enum_blob_list); |
2116 | 2120 | ||
2117 | if (name) | 2121 | if (name) { |
2118 | strncpy(property->name, name, DRM_PROP_NAME_LEN); | 2122 | strncpy(property->name, name, DRM_PROP_NAME_LEN); |
2123 | property->name[DRM_PROP_NAME_LEN-1] = '\0'; | ||
2124 | } | ||
2119 | 2125 | ||
2120 | list_add_tail(&property->head, &dev->mode_config.property_list); | 2126 | list_add_tail(&property->head, &dev->mode_config.property_list); |
2121 | return property; | 2127 | return property; |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 2957636161e8..3969f7553fe7 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -484,6 +484,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
484 | struct drm_connector *save_connectors, *connector; | 484 | struct drm_connector *save_connectors, *connector; |
485 | int count = 0, ro, fail = 0; | 485 | int count = 0, ro, fail = 0; |
486 | struct drm_crtc_helper_funcs *crtc_funcs; | 486 | struct drm_crtc_helper_funcs *crtc_funcs; |
487 | struct drm_mode_set save_set; | ||
487 | int ret = 0; | 488 | int ret = 0; |
488 | int i; | 489 | int i; |
489 | 490 | ||
@@ -556,6 +557,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
556 | save_connectors[count++] = *connector; | 557 | save_connectors[count++] = *connector; |
557 | } | 558 | } |
558 | 559 | ||
560 | save_set.crtc = set->crtc; | ||
561 | save_set.mode = &set->crtc->mode; | ||
562 | save_set.x = set->crtc->x; | ||
563 | save_set.y = set->crtc->y; | ||
564 | save_set.fb = set->crtc->fb; | ||
565 | |||
559 | /* We should be able to check here if the fb has the same properties | 566 | /* We should be able to check here if the fb has the same properties |
560 | * and then just flip_or_move it */ | 567 | * and then just flip_or_move it */ |
561 | if (set->crtc->fb != set->fb) { | 568 | if (set->crtc->fb != set->fb) { |
@@ -721,6 +728,12 @@ fail: | |||
721 | *connector = save_connectors[count++]; | 728 | *connector = save_connectors[count++]; |
722 | } | 729 | } |
723 | 730 | ||
731 | /* Try to restore the config */ | ||
732 | if (mode_changed && | ||
733 | !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x, | ||
734 | save_set.y, save_set.fb)) | ||
735 | DRM_ERROR("failed to restore config after modeset failure\n"); | ||
736 | |||
724 | kfree(save_connectors); | 737 | kfree(save_connectors); |
725 | kfree(save_encoders); | 738 | kfree(save_encoders); |
726 | kfree(save_crtcs); | 739 | kfree(save_crtcs); |
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index d067c12ba940..1c7a1c0d3edd 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c | |||
@@ -118,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, | |||
118 | tmp->minor = minor; | 118 | tmp->minor = minor; |
119 | tmp->dent = ent; | 119 | tmp->dent = ent; |
120 | tmp->info_ent = &files[i]; | 120 | tmp->info_ent = &files[i]; |
121 | list_add(&(tmp->list), &(minor->debugfs_nodes.list)); | 121 | |
122 | mutex_lock(&minor->debugfs_lock); | ||
123 | list_add(&tmp->list, &minor->debugfs_list); | ||
124 | mutex_unlock(&minor->debugfs_lock); | ||
122 | } | 125 | } |
123 | return 0; | 126 | return 0; |
124 | 127 | ||
@@ -146,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id, | |||
146 | char name[64]; | 149 | char name[64]; |
147 | int ret; | 150 | int ret; |
148 | 151 | ||
149 | INIT_LIST_HEAD(&minor->debugfs_nodes.list); | 152 | INIT_LIST_HEAD(&minor->debugfs_list); |
153 | mutex_init(&minor->debugfs_lock); | ||
150 | sprintf(name, "%d", minor_id); | 154 | sprintf(name, "%d", minor_id); |
151 | minor->debugfs_root = debugfs_create_dir(name, root); | 155 | minor->debugfs_root = debugfs_create_dir(name, root); |
152 | if (!minor->debugfs_root) { | 156 | if (!minor->debugfs_root) { |
@@ -192,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count, | |||
192 | struct drm_info_node *tmp; | 196 | struct drm_info_node *tmp; |
193 | int i; | 197 | int i; |
194 | 198 | ||
199 | mutex_lock(&minor->debugfs_lock); | ||
195 | for (i = 0; i < count; i++) { | 200 | for (i = 0; i < count; i++) { |
196 | list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { | 201 | list_for_each_safe(pos, q, &minor->debugfs_list) { |
197 | tmp = list_entry(pos, struct drm_info_node, list); | 202 | tmp = list_entry(pos, struct drm_info_node, list); |
198 | if (tmp->info_ent == &files[i]) { | 203 | if (tmp->info_ent == &files[i]) { |
199 | debugfs_remove(tmp->dent); | 204 | debugfs_remove(tmp->dent); |
@@ -202,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count, | |||
202 | } | 207 | } |
203 | } | 208 | } |
204 | } | 209 | } |
210 | mutex_unlock(&minor->debugfs_lock); | ||
205 | return 0; | 211 | return 0; |
206 | } | 212 | } |
207 | EXPORT_SYMBOL(drm_debugfs_remove_files); | 213 | EXPORT_SYMBOL(drm_debugfs_remove_files); |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index fc81af9dbf42..40c187c60f44 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -125,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
125 | DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 125 | DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
126 | DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 126 | DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
127 | 127 | ||
128 | DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), | 128 | DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), |
129 | 129 | ||
130 | DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), | 130 | DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), |
131 | 131 | ||
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index cb3794a00f98..68b756253f9f 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -407,13 +407,16 @@ int drm_irq_uninstall(struct drm_device *dev) | |||
407 | /* | 407 | /* |
408 | * Wake up any waiters so they don't hang. | 408 | * Wake up any waiters so they don't hang. |
409 | */ | 409 | */ |
410 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | 410 | if (dev->num_crtcs) { |
411 | for (i = 0; i < dev->num_crtcs; i++) { | 411 | spin_lock_irqsave(&dev->vbl_lock, irqflags); |
412 | DRM_WAKEUP(&dev->vbl_queue[i]); | 412 | for (i = 0; i < dev->num_crtcs; i++) { |
413 | dev->vblank_enabled[i] = 0; | 413 | DRM_WAKEUP(&dev->vbl_queue[i]); |
414 | dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); | 414 | dev->vblank_enabled[i] = 0; |
415 | dev->last_vblank[i] = | ||
416 | dev->driver->get_vblank_counter(dev, i); | ||
417 | } | ||
418 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
415 | } | 419 | } |
416 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
417 | 420 | ||
418 | if (!irq_enabled) | 421 | if (!irq_enabled) |
419 | return -EINVAL; | 422 | return -EINVAL; |
@@ -1125,6 +1128,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, | |||
1125 | trace_drm_vblank_event_delivered(current->pid, pipe, | 1128 | trace_drm_vblank_event_delivered(current->pid, pipe, |
1126 | vblwait->request.sequence); | 1129 | vblwait->request.sequence); |
1127 | } else { | 1130 | } else { |
1131 | /* drm_handle_vblank_events will call drm_vblank_put */ | ||
1128 | list_add_tail(&e->base.link, &dev->vblank_event_list); | 1132 | list_add_tail(&e->base.link, &dev->vblank_event_list); |
1129 | vblwait->reply.sequence = vblwait->request.sequence; | 1133 | vblwait->reply.sequence = vblwait->request.sequence; |
1130 | } | 1134 | } |
@@ -1205,8 +1209,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
1205 | goto done; | 1209 | goto done; |
1206 | } | 1210 | } |
1207 | 1211 | ||
1208 | if (flags & _DRM_VBLANK_EVENT) | 1212 | if (flags & _DRM_VBLANK_EVENT) { |
1213 | /* must hold on to the vblank ref until the event fires | ||
1214 | * drm_vblank_put will be called asynchronously | ||
1215 | */ | ||
1209 | return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); | 1216 | return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); |
1217 | } | ||
1210 | 1218 | ||
1211 | if ((flags & _DRM_VBLANK_NEXTONMISS) && | 1219 | if ((flags & _DRM_VBLANK_NEXTONMISS) && |
1212 | (seq - vblwait->request.sequence) <= (1<<23)) { | 1220 | (seq - vblwait->request.sequence) <= (1<<23)) { |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d14b44e13f51..4f40f1ce1d8e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1506,7 +1506,10 @@ drm_add_fake_info_node(struct drm_minor *minor, | |||
1506 | node->minor = minor; | 1506 | node->minor = minor; |
1507 | node->dent = ent; | 1507 | node->dent = ent; |
1508 | node->info_ent = (void *) key; | 1508 | node->info_ent = (void *) key; |
1509 | list_add(&node->list, &minor->debugfs_nodes.list); | 1509 | |
1510 | mutex_lock(&minor->debugfs_lock); | ||
1511 | list_add(&node->list, &minor->debugfs_list); | ||
1512 | mutex_unlock(&minor->debugfs_lock); | ||
1510 | 1513 | ||
1511 | return 0; | 1514 | return 0; |
1512 | } | 1515 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index cc531bb59c26..e9c2cfe45daa 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -789,8 +789,8 @@ static struct vm_operations_struct i915_gem_vm_ops = { | |||
789 | }; | 789 | }; |
790 | 790 | ||
791 | static struct drm_driver driver = { | 791 | static struct drm_driver driver = { |
792 | /* don't use mtrr's here, the Xserver or user space app should | 792 | /* Don't use MTRRs here; the Xserver or userspace app should |
793 | * deal with them for intel hardware. | 793 | * deal with them for Intel hardware. |
794 | */ | 794 | */ |
795 | .driver_features = | 795 | .driver_features = |
796 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ | 796 | DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6651c36b6e8a..d18b07adcffa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1396,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file, | |||
1396 | 1396 | ||
1397 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { | 1397 | if (obj->base.size > dev_priv->mm.gtt_mappable_end) { |
1398 | ret = -E2BIG; | 1398 | ret = -E2BIG; |
1399 | goto unlock; | 1399 | goto out; |
1400 | } | 1400 | } |
1401 | 1401 | ||
1402 | if (obj->madv != I915_MADV_WILLNEED) { | 1402 | if (obj->madv != I915_MADV_WILLNEED) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 032a82098136..5fc201b49d30 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -640,10 +640,9 @@ static int | |||
640 | nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) | 640 | nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) |
641 | { | 641 | { |
642 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 642 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
643 | uint32_t reg0 = nv_rd32(dev, reg + 0); | ||
644 | uint32_t reg1 = nv_rd32(dev, reg + 4); | ||
645 | struct nouveau_pll_vals pll; | 643 | struct nouveau_pll_vals pll; |
646 | struct pll_lims pll_limits; | 644 | struct pll_lims pll_limits; |
645 | u32 ctrl, mask, coef; | ||
647 | int ret; | 646 | int ret; |
648 | 647 | ||
649 | ret = get_pll_limits(dev, reg, &pll_limits); | 648 | ret = get_pll_limits(dev, reg, &pll_limits); |
@@ -654,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) | |||
654 | if (!clk) | 653 | if (!clk) |
655 | return -ERANGE; | 654 | return -ERANGE; |
656 | 655 | ||
657 | reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); | 656 | coef = pll.N1 << 8 | pll.M1; |
658 | reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; | 657 | ctrl = pll.log2P << 16; |
659 | 658 | mask = 0x00070000; | |
660 | if (dev_priv->vbios.execute) { | 659 | if (reg == 0x004008) { |
661 | still_alive(); | 660 | mask |= 0x01f80000; |
662 | nv_wr32(dev, reg + 4, reg1); | 661 | ctrl |= (pll_limits.log2p_bias << 19); |
663 | nv_wr32(dev, reg + 0, reg0); | 662 | ctrl |= (pll.log2P << 22); |
664 | } | 663 | } |
665 | 664 | ||
665 | if (!dev_priv->vbios.execute) | ||
666 | return 0; | ||
667 | |||
668 | nv_mask(dev, reg + 0, mask, ctrl); | ||
669 | nv_wr32(dev, reg + 4, coef); | ||
666 | return 0; | 670 | return 0; |
667 | } | 671 | } |
668 | 672 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 7226f419e178..7cc37e690860 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -148,7 +148,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | |||
148 | 148 | ||
149 | if (dev_priv->card_type == NV_10 && | 149 | if (dev_priv->card_type == NV_10 && |
150 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && | 150 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
151 | nvbo->bo.mem.num_pages < vram_pages / 2) { | 151 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
152 | /* | 152 | /* |
153 | * Make sure that the color and depth buffers are handled | 153 | * Make sure that the color and depth buffers are handled |
154 | * by independent memory controller units. Up to a 9x | 154 | * by independent memory controller units. Up to a 9x |
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index a319d5646ea9..bb6ec9ef8676 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c | |||
@@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | |||
158 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); | 158 | INIT_LIST_HEAD(&chan->nvsw.vbl_wait); |
159 | INIT_LIST_HEAD(&chan->nvsw.flip); | 159 | INIT_LIST_HEAD(&chan->nvsw.flip); |
160 | INIT_LIST_HEAD(&chan->fence.pending); | 160 | INIT_LIST_HEAD(&chan->fence.pending); |
161 | spin_lock_init(&chan->fence.lock); | ||
161 | 162 | ||
162 | /* setup channel's memory and vm */ | 163 | /* setup channel's memory and vm */ |
163 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); | 164 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index e0d275e1c96c..cea6696b1906 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -710,7 +710,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, | |||
710 | case OUTPUT_DP: | 710 | case OUTPUT_DP: |
711 | max_clock = nv_encoder->dp.link_nr; | 711 | max_clock = nv_encoder->dp.link_nr; |
712 | max_clock *= nv_encoder->dp.link_bw; | 712 | max_clock *= nv_encoder->dp.link_bw; |
713 | clock = clock * nouveau_connector_bpp(connector) / 8; | 713 | clock = clock * nouveau_connector_bpp(connector) / 10; |
714 | break; | 714 | break; |
715 | default: | 715 | default: |
716 | BUG_ON(1); | 716 | BUG_ON(1); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 14a8627efe4d..3a4cc32b9e44 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -487,6 +487,7 @@ int nouveau_fbcon_init(struct drm_device *dev) | |||
487 | { | 487 | { |
488 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 488 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
489 | struct nouveau_fbdev *nfbdev; | 489 | struct nouveau_fbdev *nfbdev; |
490 | int preferred_bpp; | ||
490 | int ret; | 491 | int ret; |
491 | 492 | ||
492 | nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); | 493 | nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); |
@@ -505,7 +506,15 @@ int nouveau_fbcon_init(struct drm_device *dev) | |||
505 | } | 506 | } |
506 | 507 | ||
507 | drm_fb_helper_single_add_all_connectors(&nfbdev->helper); | 508 | drm_fb_helper_single_add_all_connectors(&nfbdev->helper); |
508 | drm_fb_helper_initial_config(&nfbdev->helper, 32); | 509 | |
510 | if (dev_priv->vram_size <= 32 * 1024 * 1024) | ||
511 | preferred_bpp = 8; | ||
512 | else if (dev_priv->vram_size <= 64 * 1024 * 1024) | ||
513 | preferred_bpp = 16; | ||
514 | else | ||
515 | preferred_bpp = 32; | ||
516 | |||
517 | drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp); | ||
509 | return 0; | 518 | return 0; |
510 | } | 519 | } |
511 | 520 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 81116cfea275..2f6daae68b9d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) | |||
539 | return ret; | 539 | return ret; |
540 | } | 540 | } |
541 | 541 | ||
542 | INIT_LIST_HEAD(&chan->fence.pending); | ||
543 | spin_lock_init(&chan->fence.lock); | ||
544 | atomic_set(&chan->fence.last_sequence_irq, 0); | 542 | atomic_set(&chan->fence.last_sequence_irq, 0); |
545 | return 0; | 543 | return 0; |
546 | } | 544 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index c6143df48b9f..d39b2202b197 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c | |||
@@ -333,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what, | |||
333 | 333 | ||
334 | NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); | 334 | NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); |
335 | 335 | ||
336 | for (i = 0; info[i].addr; i++) { | 336 | for (i = 0; i2c && info[i].addr; i++) { |
337 | if (nouveau_probe_i2c_addr(i2c, info[i].addr) && | 337 | if (nouveau_probe_i2c_addr(i2c, info[i].addr) && |
338 | (!match || match(i2c, &info[i]))) { | 338 | (!match || match(i2c, &info[i]))) { |
339 | NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); | 339 | NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c index 9f178aa94162..33d03fbf00df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_perf.c +++ b/drivers/gpu/drm/nouveau/nouveau_perf.c | |||
@@ -239,7 +239,7 @@ nouveau_perf_init(struct drm_device *dev) | |||
239 | if(version == 0x15) { | 239 | if(version == 0x15) { |
240 | memtimings->timing = | 240 | memtimings->timing = |
241 | kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); | 241 | kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); |
242 | if(!memtimings) { | 242 | if (!memtimings->timing) { |
243 | NV_WARN(dev,"Could not allocate memtiming table\n"); | 243 | NV_WARN(dev,"Could not allocate memtiming table\n"); |
244 | return; | 244 | return; |
245 | } | 245 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 82478e0998e5..d8831ab42bb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -579,6 +579,14 @@ nouveau_card_init(struct drm_device *dev) | |||
579 | if (ret) | 579 | if (ret) |
580 | goto out_display_early; | 580 | goto out_display_early; |
581 | 581 | ||
582 | /* workaround an odd issue on nvc1 by disabling the device's | ||
583 | * nosnoop capability. hopefully won't cause issues until a | ||
584 | * better fix is found - assuming there is one... | ||
585 | */ | ||
586 | if (dev_priv->chipset == 0xc1) { | ||
587 | nv_mask(dev, 0x00088080, 0x00000800, 0x00000000); | ||
588 | } | ||
589 | |||
582 | nouveau_pm_init(dev); | 590 | nouveau_pm_init(dev); |
583 | 591 | ||
584 | ret = engine->vram.init(dev); | 592 | ret = engine->vram.init(dev); |
@@ -1102,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) | |||
1102 | dev_priv->noaccel = !!nouveau_noaccel; | 1110 | dev_priv->noaccel = !!nouveau_noaccel; |
1103 | if (nouveau_noaccel == -1) { | 1111 | if (nouveau_noaccel == -1) { |
1104 | switch (dev_priv->chipset) { | 1112 | switch (dev_priv->chipset) { |
1105 | case 0xc1: /* known broken */ | 1113 | #if 0 |
1106 | case 0xc8: /* never tested */ | 1114 | case 0xXX: /* known broken */ |
1107 | NV_INFO(dev, "acceleration disabled by default, pass " | 1115 | NV_INFO(dev, "acceleration disabled by default, pass " |
1108 | "noaccel=0 to force enable\n"); | 1116 | "noaccel=0 to force enable\n"); |
1109 | dev_priv->noaccel = true; | 1117 | dev_priv->noaccel = true; |
1110 | break; | 1118 | break; |
1119 | #endif | ||
1111 | default: | 1120 | default: |
1112 | dev_priv->noaccel = false; | 1121 | dev_priv->noaccel = false; |
1113 | break; | 1122 | break; |
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index bbc0b9c7e1f7..e676b0d53478 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c | |||
@@ -57,12 +57,14 @@ read_pll_2(struct drm_device *dev, u32 reg) | |||
57 | int P = (ctrl & 0x00070000) >> 16; | 57 | int P = (ctrl & 0x00070000) >> 16; |
58 | u32 ref = 27000, clk = 0; | 58 | u32 ref = 27000, clk = 0; |
59 | 59 | ||
60 | if (ctrl & 0x80000000) | 60 | if ((ctrl & 0x80000000) && M1) { |
61 | clk = ref * N1 / M1; | 61 | clk = ref * N1 / M1; |
62 | 62 | if ((ctrl & 0x40000100) == 0x40000000) { | |
63 | if (!(ctrl & 0x00000100)) { | 63 | if (M2) |
64 | if (ctrl & 0x40000000) | 64 | clk = clk * N2 / M2; |
65 | clk = clk * N2 / M2; | 65 | else |
66 | clk = 0; | ||
67 | } | ||
66 | } | 68 | } |
67 | 69 | ||
68 | return clk >> P; | 70 | return clk >> P; |
@@ -177,6 +179,11 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl) | |||
177 | } | 179 | } |
178 | 180 | ||
179 | /* memory clock */ | 181 | /* memory clock */ |
182 | if (!perflvl->memory) { | ||
183 | info->mpll_ctrl = 0x00000000; | ||
184 | goto out; | ||
185 | } | ||
186 | |||
180 | ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, | 187 | ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, |
181 | &N1, &M1, &N2, &M2, &log2P); | 188 | &N1, &M1, &N2, &M2, &log2P); |
182 | if (ret < 0) | 189 | if (ret < 0) |
@@ -264,6 +271,9 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state) | |||
264 | mdelay(5); | 271 | mdelay(5); |
265 | nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); | 272 | nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); |
266 | 273 | ||
274 | if (!info->mpll_ctrl) | ||
275 | goto resume; | ||
276 | |||
267 | /* wait for vblank start on active crtcs, disable memory access */ | 277 | /* wait for vblank start on active crtcs, disable memory access */ |
268 | for (i = 0; i < 2; i++) { | 278 | for (i = 0; i < 2; i++) { |
269 | if (!(crtc_mask & (1 << i))) | 279 | if (!(crtc_mask & (1 << i))) |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8c979b31ff61..ac601f7c4e1a 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
@@ -131,8 +131,8 @@ nv50_graph_init(struct drm_device *dev, int engine) | |||
131 | NV_DEBUG(dev, "\n"); | 131 | NV_DEBUG(dev, "\n"); |
132 | 132 | ||
133 | /* master reset */ | 133 | /* master reset */ |
134 | nv_mask(dev, 0x000200, 0x00200100, 0x00000000); | 134 | nv_mask(dev, 0x000200, 0x00201000, 0x00000000); |
135 | nv_mask(dev, 0x000200, 0x00200100, 0x00200100); | 135 | nv_mask(dev, 0x000200, 0x00201000, 0x00201000); |
136 | nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ | 136 | nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ |
137 | 137 | ||
138 | /* reset/enable traps and interrupts */ | 138 | /* reset/enable traps and interrupts */ |
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c index d05c2c3b2444..4b46d6968566 100644 --- a/drivers/gpu/drm/nouveau/nv50_grctx.c +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c | |||
@@ -601,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx) | |||
601 | gr_def(ctx, offset + 0x1c, 0x00880000); | 601 | gr_def(ctx, offset + 0x1c, 0x00880000); |
602 | break; | 602 | break; |
603 | case 0x86: | 603 | case 0x86: |
604 | gr_def(ctx, offset + 0x1c, 0x008c0000); | 604 | gr_def(ctx, offset + 0x1c, 0x018c0000); |
605 | break; | 605 | break; |
606 | case 0x92: | 606 | case 0x92: |
607 | case 0x96: | 607 | case 0x96: |
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c index 9da23838e63e..2e45e57fd869 100644 --- a/drivers/gpu/drm/nouveau/nv50_vram.c +++ b/drivers/gpu/drm/nouveau/nv50_vram.c | |||
@@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev) | |||
160 | colbits = (r4 & 0x0000f000) >> 12; | 160 | colbits = (r4 & 0x0000f000) >> 12; |
161 | rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; | 161 | rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; |
162 | rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; | 162 | rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; |
163 | banks = ((r4 & 0x01000000) ? 8 : 4); | 163 | banks = 1 << (((r4 & 0x03000000) >> 24) + 2); |
164 | 164 | ||
165 | rowsize = parts * banks * (1 << colbits) * 8; | 165 | rowsize = parts * banks * (1 << colbits) * 8; |
166 | predicted = rowsize << rowbitsa; | 166 | predicted = rowsize << rowbitsa; |
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index bbdbc51830c8..a74e501afd25 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c | |||
@@ -157,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) | |||
157 | struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); | 157 | struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); |
158 | struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; | 158 | struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; |
159 | struct drm_device *dev = chan->dev; | 159 | struct drm_device *dev = chan->dev; |
160 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
160 | int i = 0, gpc, tp, ret; | 161 | int i = 0, gpc, tp, ret; |
161 | u32 magic; | ||
162 | 162 | ||
163 | ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, | 163 | ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, |
164 | &grch->unk408004); | 164 | &grch->unk408004); |
@@ -207,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) | |||
207 | nv_wo32(grch->mmio, i++ * 4, 0x0041880c); | 207 | nv_wo32(grch->mmio, i++ * 4, 0x0041880c); |
208 | nv_wo32(grch->mmio, i++ * 4, 0x80000018); | 208 | nv_wo32(grch->mmio, i++ * 4, 0x80000018); |
209 | 209 | ||
210 | magic = 0x02180000; | 210 | if (dev_priv->chipset != 0xc1) { |
211 | nv_wo32(grch->mmio, i++ * 4, 0x00405830); | 211 | u32 magic = 0x02180000; |
212 | nv_wo32(grch->mmio, i++ * 4, magic); | 212 | nv_wo32(grch->mmio, i++ * 4, 0x00405830); |
213 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | 213 | nv_wo32(grch->mmio, i++ * 4, magic); |
214 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { | 214 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { |
215 | u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); | 215 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { |
216 | nv_wo32(grch->mmio, i++ * 4, reg); | 216 | u32 reg = TP_UNIT(gpc, tp, 0x520); |
217 | nv_wo32(grch->mmio, i++ * 4, magic); | 217 | nv_wo32(grch->mmio, i++ * 4, reg); |
218 | nv_wo32(grch->mmio, i++ * 4, magic); | ||
219 | magic += 0x0324; | ||
220 | } | ||
221 | } | ||
222 | } else { | ||
223 | u32 magic = 0x02180000; | ||
224 | nv_wo32(grch->mmio, i++ * 4, 0x00405830); | ||
225 | nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218); | ||
226 | nv_wo32(grch->mmio, i++ * 4, 0x004064c4); | ||
227 | nv_wo32(grch->mmio, i++ * 4, 0x0086ffff); | ||
228 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) { | ||
229 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { | ||
230 | u32 reg = TP_UNIT(gpc, tp, 0x520); | ||
231 | nv_wo32(grch->mmio, i++ * 4, reg); | ||
232 | nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic); | ||
233 | magic += 0x0324; | ||
234 | } | ||
235 | for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { | ||
236 | u32 reg = TP_UNIT(gpc, tp, 0x544); | ||
237 | nv_wo32(grch->mmio, i++ * 4, reg); | ||
238 | nv_wo32(grch->mmio, i++ * 4, magic); | ||
239 | magic += 0x0324; | ||
240 | } | ||
218 | } | 241 | } |
219 | } | 242 | } |
220 | 243 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index dd0e6a736b3b..96b0b93d94ca 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c | |||
@@ -1812,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
1812 | /* calculate first set of magics */ | 1812 | /* calculate first set of magics */ |
1813 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); | 1813 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); |
1814 | 1814 | ||
1815 | gpc = -1; | ||
1815 | for (tp = 0; tp < priv->tp_total; tp++) { | 1816 | for (tp = 0; tp < priv->tp_total; tp++) { |
1816 | do { | 1817 | do { |
1817 | gpc = (gpc + 1) % priv->gpc_nr; | 1818 | gpc = (gpc + 1) % priv->gpc_nr; |
@@ -1861,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan) | |||
1861 | 1862 | ||
1862 | if (1) { | 1863 | if (1) { |
1863 | u32 tp_mask = 0, tp_set = 0; | 1864 | u32 tp_mask = 0, tp_set = 0; |
1864 | u8 tpnr[GPC_MAX]; | 1865 | u8 tpnr[GPC_MAX], a, b; |
1865 | 1866 | ||
1866 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); | 1867 | memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); |
1867 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) | 1868 | for (gpc = 0; gpc < priv->gpc_nr; gpc++) |
1868 | tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); | 1869 | tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); |
1869 | 1870 | ||
1870 | gpc = -1; | 1871 | for (i = 0, gpc = -1, b = -1; i < 32; i++) { |
1871 | for (i = 0, gpc = -1; i < 32; i++) { | 1872 | a = (i * (priv->tp_total - 1)) / 32; |
1872 | int ltp = i * (priv->tp_total - 1) / 32; | 1873 | if (a != b) { |
1873 | 1874 | b = a; | |
1874 | do { | 1875 | do { |
1875 | gpc = (gpc + 1) % priv->gpc_nr; | 1876 | gpc = (gpc + 1) % priv->gpc_nr; |
1876 | } while (!tpnr[gpc]); | 1877 | } while (!tpnr[gpc]); |
1877 | tp = priv->tp_nr[gpc] - tpnr[gpc]--; | 1878 | tp = priv->tp_nr[gpc] - tpnr[gpc]--; |
1878 | 1879 | ||
1879 | tp_set |= 1 << ((gpc * 8) + tp); | 1880 | tp_set |= 1 << ((gpc * 8) + tp); |
1881 | } | ||
1880 | 1882 | ||
1881 | do { | 1883 | nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); |
1882 | nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); | 1884 | nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask); |
1883 | tp_set ^= tp_mask; | ||
1884 | nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set); | ||
1885 | tp_set ^= tp_mask; | ||
1886 | } while (ltp == (++i * (priv->tp_total - 1) / 32)); | ||
1887 | i--; | ||
1888 | } | 1885 | } |
1889 | } | 1886 | } |
1890 | 1887 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c index edbfe9360ae2..ce984d573a51 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vram.c +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c | |||
@@ -43,7 +43,7 @@ static const u8 types[256] = { | |||
43 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 43 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
44 | 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, | 44 | 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, |
45 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, | 45 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, |
46 | 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, | 46 | 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, |
47 | 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, | 47 | 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, |
48 | 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, | 48 | 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, |
49 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 | 49 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 |
@@ -110,22 +110,26 @@ nvc0_vram_init(struct drm_device *dev) | |||
110 | u32 bsize = nv_rd32(dev, 0x10f20c); | 110 | u32 bsize = nv_rd32(dev, 0x10f20c); |
111 | u32 offset, length; | 111 | u32 offset, length; |
112 | bool uniform = true; | 112 | bool uniform = true; |
113 | int ret, i; | 113 | int ret, part; |
114 | 114 | ||
115 | NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); | 115 | NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); |
116 | NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); | 116 | NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); |
117 | 117 | ||
118 | /* read amount of vram attached to each memory controller */ | 118 | /* read amount of vram attached to each memory controller */ |
119 | for (i = 0; i < parts; i++) { | 119 | part = 0; |
120 | u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000)); | 120 | while (parts) { |
121 | u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000)); | ||
122 | if (psize == 0) | ||
123 | continue; | ||
124 | parts--; | ||
125 | |||
121 | if (psize != bsize) { | 126 | if (psize != bsize) { |
122 | if (psize < bsize) | 127 | if (psize < bsize) |
123 | bsize = psize; | 128 | bsize = psize; |
124 | uniform = false; | 129 | uniform = false; |
125 | } | 130 | } |
126 | 131 | ||
127 | NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize); | 132 | NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize); |
128 | |||
129 | dev_priv->vram_size += (u64)psize << 20; | 133 | dev_priv->vram_size += (u64)psize << 20; |
130 | } | 134 | } |
131 | 135 | ||
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 9f363e0c4b60..cf8b4bc3e73d 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -70,7 +70,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
70 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ | 70 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ |
71 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ | 71 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ |
72 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ | 72 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ |
73 | radeon_trace_points.o ni.o cayman_blit_shaders.o | 73 | radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o |
74 | 74 | ||
75 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 75 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
76 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o | 76 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a515b2a09d85..87631fede1f8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -558,7 +558,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
558 | bpc = connector->display_info.bpc; | 558 | bpc = connector->display_info.bpc; |
559 | encoder_mode = atombios_get_encoder_mode(encoder); | 559 | encoder_mode = atombios_get_encoder_mode(encoder); |
560 | if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || | 560 | if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || |
561 | radeon_encoder_is_dp_bridge(encoder)) { | 561 | (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { |
562 | if (connector) { | 562 | if (connector) { |
563 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 563 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
564 | struct radeon_connector_atom_dig *dig_connector = | 564 | struct radeon_connector_atom_dig *dig_connector = |
@@ -638,44 +638,29 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
638 | if (ss_enabled && ss->percentage) | 638 | if (ss_enabled && ss->percentage) |
639 | args.v3.sInput.ucDispPllConfig |= | 639 | args.v3.sInput.ucDispPllConfig |= |
640 | DISPPLL_CONFIG_SS_ENABLE; | 640 | DISPPLL_CONFIG_SS_ENABLE; |
641 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) || | 641 | if (ENCODER_MODE_IS_DP(encoder_mode)) { |
642 | radeon_encoder_is_dp_bridge(encoder)) { | 642 | args.v3.sInput.ucDispPllConfig |= |
643 | DISPPLL_CONFIG_COHERENT_MODE; | ||
644 | /* 16200 or 27000 */ | ||
645 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
646 | } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
643 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 647 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
644 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | 648 | if (encoder_mode == ATOM_ENCODER_MODE_HDMI) |
649 | /* deep color support */ | ||
650 | args.v3.sInput.usPixelClock = | ||
651 | cpu_to_le16((mode->clock * bpc / 8) / 10); | ||
652 | if (dig->coherent_mode) | ||
645 | args.v3.sInput.ucDispPllConfig |= | 653 | args.v3.sInput.ucDispPllConfig |= |
646 | DISPPLL_CONFIG_COHERENT_MODE; | 654 | DISPPLL_CONFIG_COHERENT_MODE; |
647 | /* 16200 or 27000 */ | 655 | if (mode->clock > 165000) |
648 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
649 | } else { | ||
650 | if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { | ||
651 | /* deep color support */ | ||
652 | args.v3.sInput.usPixelClock = | ||
653 | cpu_to_le16((mode->clock * bpc / 8) / 10); | ||
654 | } | ||
655 | if (dig->coherent_mode) | ||
656 | args.v3.sInput.ucDispPllConfig |= | ||
657 | DISPPLL_CONFIG_COHERENT_MODE; | ||
658 | if (mode->clock > 165000) | ||
659 | args.v3.sInput.ucDispPllConfig |= | ||
660 | DISPPLL_CONFIG_DUAL_LINK; | ||
661 | } | ||
662 | } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
663 | if (encoder_mode == ATOM_ENCODER_MODE_DP) { | ||
664 | args.v3.sInput.ucDispPllConfig |= | 656 | args.v3.sInput.ucDispPllConfig |= |
665 | DISPPLL_CONFIG_COHERENT_MODE; | 657 | DISPPLL_CONFIG_DUAL_LINK; |
666 | /* 16200 or 27000 */ | ||
667 | args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10); | ||
668 | } else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) { | ||
669 | if (mode->clock > 165000) | ||
670 | args.v3.sInput.ucDispPllConfig |= | ||
671 | DISPPLL_CONFIG_DUAL_LINK; | ||
672 | } | ||
673 | } | 658 | } |
674 | if (radeon_encoder_is_dp_bridge(encoder)) { | 659 | if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != |
675 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | 660 | ENCODER_OBJECT_ID_NONE) |
676 | struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); | 661 | args.v3.sInput.ucExtTransmitterID = |
677 | args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id; | 662 | radeon_encoder_get_dp_bridge_encoder_id(encoder); |
678 | } else | 663 | else |
679 | args.v3.sInput.ucExtTransmitterID = 0; | 664 | args.v3.sInput.ucExtTransmitterID = 0; |
680 | 665 | ||
681 | atom_execute_table(rdev->mode_info.atom_context, | 666 | atom_execute_table(rdev->mode_info.atom_context, |
@@ -945,6 +930,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode | |||
945 | bpc = connector->display_info.bpc; | 930 | bpc = connector->display_info.bpc; |
946 | 931 | ||
947 | switch (encoder_mode) { | 932 | switch (encoder_mode) { |
933 | case ATOM_ENCODER_MODE_DP_MST: | ||
948 | case ATOM_ENCODER_MODE_DP: | 934 | case ATOM_ENCODER_MODE_DP: |
949 | /* DP/eDP */ | 935 | /* DP/eDP */ |
950 | dp_clock = dig_connector->dp_clock / 10; | 936 | dp_clock = dig_connector->dp_clock / 10; |
@@ -1450,7 +1436,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) | |||
1450 | * PPLL/DCPLL programming and only program the DP DTO for the | 1436 | * PPLL/DCPLL programming and only program the DP DTO for the |
1451 | * crtc virtual pixel clock. | 1437 | * crtc virtual pixel clock. |
1452 | */ | 1438 | */ |
1453 | if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) { | 1439 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { |
1454 | if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk) | 1440 | if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk) |
1455 | return ATOM_PPLL_INVALID; | 1441 | return ATOM_PPLL_INVALID; |
1456 | } | 1442 | } |
@@ -1536,12 +1522,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, | |||
1536 | struct drm_display_mode *mode, | 1522 | struct drm_display_mode *mode, |
1537 | struct drm_display_mode *adjusted_mode) | 1523 | struct drm_display_mode *adjusted_mode) |
1538 | { | 1524 | { |
1539 | struct drm_device *dev = crtc->dev; | ||
1540 | struct radeon_device *rdev = dev->dev_private; | ||
1541 | |||
1542 | /* adjust pm to upcoming mode change */ | ||
1543 | radeon_pm_compute_clocks(rdev); | ||
1544 | |||
1545 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | 1525 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) |
1546 | return false; | 1526 | return false; |
1547 | return true; | 1527 | return true; |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 79e8ebc05307..6fb335a4fdda 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
283 | } | 283 | } |
284 | } | 284 | } |
285 | 285 | ||
286 | DRM_ERROR("aux i2c too many retries, giving up\n"); | 286 | DRM_DEBUG_KMS("aux i2c too many retries, giving up\n"); |
287 | return -EREMOTEIO; | 287 | return -EREMOTEIO; |
288 | } | 288 | } |
289 | 289 | ||
@@ -482,7 +482,8 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, | |||
482 | int bpp = convert_bpc_to_bpp(connector->display_info.bpc); | 482 | int bpp = convert_bpc_to_bpp(connector->display_info.bpc); |
483 | int lane_num, max_pix_clock; | 483 | int lane_num, max_pix_clock; |
484 | 484 | ||
485 | if (radeon_connector_encoder_is_dp_bridge(connector)) | 485 | if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == |
486 | ENCODER_OBJECT_ID_NUTMEG) | ||
486 | return 270000; | 487 | return 270000; |
487 | 488 | ||
488 | lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); | 489 | lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock); |
@@ -553,17 +554,32 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder, | |||
553 | { | 554 | { |
554 | struct drm_device *dev = encoder->dev; | 555 | struct drm_device *dev = encoder->dev; |
555 | struct radeon_device *rdev = dev->dev_private; | 556 | struct radeon_device *rdev = dev->dev_private; |
557 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
556 | int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; | 558 | int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; |
557 | 559 | ||
558 | if (!ASIC_IS_DCE4(rdev)) | 560 | if (!ASIC_IS_DCE4(rdev)) |
559 | return; | 561 | return; |
560 | 562 | ||
561 | if (radeon_connector_encoder_is_dp_bridge(connector)) | 563 | if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == |
564 | ENCODER_OBJECT_ID_NUTMEG) | ||
562 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; | 565 | panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; |
566 | else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == | ||
567 | ENCODER_OBJECT_ID_TRAVIS) | ||
568 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | ||
569 | else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
570 | u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); | ||
571 | if (tmp & 1) | ||
572 | panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; | ||
573 | } | ||
563 | 574 | ||
564 | atombios_dig_encoder_setup(encoder, | 575 | atombios_dig_encoder_setup(encoder, |
565 | ATOM_ENCODER_CMD_SETUP_PANEL_MODE, | 576 | ATOM_ENCODER_CMD_SETUP_PANEL_MODE, |
566 | panel_mode); | 577 | panel_mode); |
578 | |||
579 | if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) && | ||
580 | (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) { | ||
581 | radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1); | ||
582 | } | ||
567 | } | 583 | } |
568 | 584 | ||
569 | void radeon_dp_set_link_config(struct drm_connector *connector, | 585 | void radeon_dp_set_link_config(struct drm_connector *connector, |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c new file mode 100644 index 000000000000..39c04c1b8472 --- /dev/null +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -0,0 +1,2369 @@ | |||
1 | /* | ||
2 | * Copyright 2007-11 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice shall be included in | ||
13 | * all copies or substantial portions of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: Dave Airlie | ||
24 | * Alex Deucher | ||
25 | */ | ||
26 | #include "drmP.h" | ||
27 | #include "drm_crtc_helper.h" | ||
28 | #include "radeon_drm.h" | ||
29 | #include "radeon.h" | ||
30 | #include "atom.h" | ||
31 | |||
32 | extern int atom_debug; | ||
33 | |||
34 | /* evil but including atombios.h is much worse */ | ||
35 | bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | ||
36 | struct drm_display_mode *mode); | ||
37 | |||
38 | |||
39 | static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) | ||
40 | { | ||
41 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
42 | switch (radeon_encoder->encoder_id) { | ||
43 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
44 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
45 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
46 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
47 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
48 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
49 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
50 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
51 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
52 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
53 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
54 | return true; | ||
55 | default: | ||
56 | return false; | ||
57 | } | ||
58 | } | ||
59 | |||
60 | static struct drm_connector * | ||
61 | radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) | ||
62 | { | ||
63 | struct drm_device *dev = encoder->dev; | ||
64 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
65 | struct drm_connector *connector; | ||
66 | struct radeon_connector *radeon_connector; | ||
67 | |||
68 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
69 | radeon_connector = to_radeon_connector(connector); | ||
70 | if (radeon_encoder->devices & radeon_connector->devices) | ||
71 | return connector; | ||
72 | } | ||
73 | return NULL; | ||
74 | } | ||
75 | |||
76 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | ||
77 | struct drm_display_mode *mode, | ||
78 | struct drm_display_mode *adjusted_mode) | ||
79 | { | ||
80 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
81 | struct drm_device *dev = encoder->dev; | ||
82 | struct radeon_device *rdev = dev->dev_private; | ||
83 | |||
84 | /* set the active encoder to connector routing */ | ||
85 | radeon_encoder_set_active_device(encoder); | ||
86 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
87 | |||
88 | /* hw bug */ | ||
89 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
90 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) | ||
91 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; | ||
92 | |||
93 | /* get the native mode for LVDS */ | ||
94 | if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) | ||
95 | radeon_panel_mode_fixup(encoder, adjusted_mode); | ||
96 | |||
97 | /* get the native mode for TV */ | ||
98 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { | ||
99 | struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; | ||
100 | if (tv_dac) { | ||
101 | if (tv_dac->tv_std == TV_STD_NTSC || | ||
102 | tv_dac->tv_std == TV_STD_NTSC_J || | ||
103 | tv_dac->tv_std == TV_STD_PAL_M) | ||
104 | radeon_atom_get_tv_timings(rdev, 0, adjusted_mode); | ||
105 | else | ||
106 | radeon_atom_get_tv_timings(rdev, 1, adjusted_mode); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | if (ASIC_IS_DCE3(rdev) && | ||
111 | ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || | ||
112 | (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) { | ||
113 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
114 | radeon_dp_set_link_config(connector, mode); | ||
115 | } | ||
116 | |||
117 | return true; | ||
118 | } | ||
119 | |||
120 | static void | ||
121 | atombios_dac_setup(struct drm_encoder *encoder, int action) | ||
122 | { | ||
123 | struct drm_device *dev = encoder->dev; | ||
124 | struct radeon_device *rdev = dev->dev_private; | ||
125 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
126 | DAC_ENCODER_CONTROL_PS_ALLOCATION args; | ||
127 | int index = 0; | ||
128 | struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; | ||
129 | |||
130 | memset(&args, 0, sizeof(args)); | ||
131 | |||
132 | switch (radeon_encoder->encoder_id) { | ||
133 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
134 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
135 | index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); | ||
136 | break; | ||
137 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
138 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
139 | index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); | ||
140 | break; | ||
141 | } | ||
142 | |||
143 | args.ucAction = action; | ||
144 | |||
145 | if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT)) | ||
146 | args.ucDacStandard = ATOM_DAC1_PS2; | ||
147 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
148 | args.ucDacStandard = ATOM_DAC1_CV; | ||
149 | else { | ||
150 | switch (dac_info->tv_std) { | ||
151 | case TV_STD_PAL: | ||
152 | case TV_STD_PAL_M: | ||
153 | case TV_STD_SCART_PAL: | ||
154 | case TV_STD_SECAM: | ||
155 | case TV_STD_PAL_CN: | ||
156 | args.ucDacStandard = ATOM_DAC1_PAL; | ||
157 | break; | ||
158 | case TV_STD_NTSC: | ||
159 | case TV_STD_NTSC_J: | ||
160 | case TV_STD_PAL_60: | ||
161 | default: | ||
162 | args.ucDacStandard = ATOM_DAC1_NTSC; | ||
163 | break; | ||
164 | } | ||
165 | } | ||
166 | args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
167 | |||
168 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
169 | |||
170 | } | ||
171 | |||
172 | static void | ||
173 | atombios_tv_setup(struct drm_encoder *encoder, int action) | ||
174 | { | ||
175 | struct drm_device *dev = encoder->dev; | ||
176 | struct radeon_device *rdev = dev->dev_private; | ||
177 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
178 | TV_ENCODER_CONTROL_PS_ALLOCATION args; | ||
179 | int index = 0; | ||
180 | struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; | ||
181 | |||
182 | memset(&args, 0, sizeof(args)); | ||
183 | |||
184 | index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl); | ||
185 | |||
186 | args.sTVEncoder.ucAction = action; | ||
187 | |||
188 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
189 | args.sTVEncoder.ucTvStandard = ATOM_TV_CV; | ||
190 | else { | ||
191 | switch (dac_info->tv_std) { | ||
192 | case TV_STD_NTSC: | ||
193 | args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; | ||
194 | break; | ||
195 | case TV_STD_PAL: | ||
196 | args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; | ||
197 | break; | ||
198 | case TV_STD_PAL_M: | ||
199 | args.sTVEncoder.ucTvStandard = ATOM_TV_PALM; | ||
200 | break; | ||
201 | case TV_STD_PAL_60: | ||
202 | args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60; | ||
203 | break; | ||
204 | case TV_STD_NTSC_J: | ||
205 | args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ; | ||
206 | break; | ||
207 | case TV_STD_SCART_PAL: | ||
208 | args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */ | ||
209 | break; | ||
210 | case TV_STD_SECAM: | ||
211 | args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM; | ||
212 | break; | ||
213 | case TV_STD_PAL_CN: | ||
214 | args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN; | ||
215 | break; | ||
216 | default: | ||
217 | args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; | ||
218 | break; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
223 | |||
224 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
225 | |||
226 | } | ||
227 | |||
228 | union dvo_encoder_control { | ||
229 | ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; | ||
230 | DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; | ||
231 | DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; | ||
232 | }; | ||
233 | |||
234 | void | ||
235 | atombios_dvo_setup(struct drm_encoder *encoder, int action) | ||
236 | { | ||
237 | struct drm_device *dev = encoder->dev; | ||
238 | struct radeon_device *rdev = dev->dev_private; | ||
239 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
240 | union dvo_encoder_control args; | ||
241 | int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); | ||
242 | uint8_t frev, crev; | ||
243 | |||
244 | memset(&args, 0, sizeof(args)); | ||
245 | |||
246 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
247 | return; | ||
248 | |||
249 | switch (frev) { | ||
250 | case 1: | ||
251 | switch (crev) { | ||
252 | case 1: | ||
253 | /* R4xx, R5xx */ | ||
254 | args.ext_tmds.sXTmdsEncoder.ucEnable = action; | ||
255 | |||
256 | if (radeon_encoder->pixel_clock > 165000) | ||
257 | args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
258 | |||
259 | args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; | ||
260 | break; | ||
261 | case 2: | ||
262 | /* RS600/690/740 */ | ||
263 | args.dvo.sDVOEncoder.ucAction = action; | ||
264 | args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
265 | /* DFP1, CRT1, TV1 depending on the type of port */ | ||
266 | args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; | ||
267 | |||
268 | if (radeon_encoder->pixel_clock > 165000) | ||
269 | args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; | ||
270 | break; | ||
271 | case 3: | ||
272 | /* R6xx */ | ||
273 | args.dvo_v3.ucAction = action; | ||
274 | args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
275 | args.dvo_v3.ucDVOConfig = 0; /* XXX */ | ||
276 | break; | ||
277 | default: | ||
278 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
279 | break; | ||
280 | } | ||
281 | break; | ||
282 | default: | ||
283 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
284 | break; | ||
285 | } | ||
286 | |||
287 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
288 | } | ||
289 | |||
290 | union lvds_encoder_control { | ||
291 | LVDS_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
292 | LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; | ||
293 | }; | ||
294 | |||
295 | void | ||
296 | atombios_digital_setup(struct drm_encoder *encoder, int action) | ||
297 | { | ||
298 | struct drm_device *dev = encoder->dev; | ||
299 | struct radeon_device *rdev = dev->dev_private; | ||
300 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
301 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
302 | union lvds_encoder_control args; | ||
303 | int index = 0; | ||
304 | int hdmi_detected = 0; | ||
305 | uint8_t frev, crev; | ||
306 | |||
307 | if (!dig) | ||
308 | return; | ||
309 | |||
310 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | ||
311 | hdmi_detected = 1; | ||
312 | |||
313 | memset(&args, 0, sizeof(args)); | ||
314 | |||
315 | switch (radeon_encoder->encoder_id) { | ||
316 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
317 | index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); | ||
318 | break; | ||
319 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
320 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
321 | index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl); | ||
322 | break; | ||
323 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
324 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
325 | index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); | ||
326 | else | ||
327 | index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl); | ||
328 | break; | ||
329 | } | ||
330 | |||
331 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
332 | return; | ||
333 | |||
334 | switch (frev) { | ||
335 | case 1: | ||
336 | case 2: | ||
337 | switch (crev) { | ||
338 | case 1: | ||
339 | args.v1.ucMisc = 0; | ||
340 | args.v1.ucAction = action; | ||
341 | if (hdmi_detected) | ||
342 | args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; | ||
343 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
344 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
345 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) | ||
346 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
347 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) | ||
348 | args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; | ||
349 | } else { | ||
350 | if (dig->linkb) | ||
351 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | ||
352 | if (radeon_encoder->pixel_clock > 165000) | ||
353 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
354 | /*if (pScrn->rgbBits == 8) */ | ||
355 | args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; | ||
356 | } | ||
357 | break; | ||
358 | case 2: | ||
359 | case 3: | ||
360 | args.v2.ucMisc = 0; | ||
361 | args.v2.ucAction = action; | ||
362 | if (crev == 3) { | ||
363 | if (dig->coherent_mode) | ||
364 | args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; | ||
365 | } | ||
366 | if (hdmi_detected) | ||
367 | args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; | ||
368 | args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
369 | args.v2.ucTruncate = 0; | ||
370 | args.v2.ucSpatial = 0; | ||
371 | args.v2.ucTemporal = 0; | ||
372 | args.v2.ucFRC = 0; | ||
373 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
374 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) | ||
375 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
376 | if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) { | ||
377 | args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; | ||
378 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) | ||
379 | args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; | ||
380 | } | ||
381 | if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) { | ||
382 | args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; | ||
383 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) | ||
384 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; | ||
385 | if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) | ||
386 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; | ||
387 | } | ||
388 | } else { | ||
389 | if (dig->linkb) | ||
390 | args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | ||
391 | if (radeon_encoder->pixel_clock > 165000) | ||
392 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
393 | } | ||
394 | break; | ||
395 | default: | ||
396 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
397 | break; | ||
398 | } | ||
399 | break; | ||
400 | default: | ||
401 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
402 | break; | ||
403 | } | ||
404 | |||
405 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
406 | } | ||
407 | |||
408 | int | ||
409 | atombios_get_encoder_mode(struct drm_encoder *encoder) | ||
410 | { | ||
411 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
412 | struct drm_device *dev = encoder->dev; | ||
413 | struct radeon_device *rdev = dev->dev_private; | ||
414 | struct drm_connector *connector; | ||
415 | struct radeon_connector *radeon_connector; | ||
416 | struct radeon_connector_atom_dig *dig_connector; | ||
417 | |||
418 | /* dp bridges are always DP */ | ||
419 | if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) | ||
420 | return ATOM_ENCODER_MODE_DP; | ||
421 | |||
422 | /* DVO is always DVO */ | ||
423 | if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO) | ||
424 | return ATOM_ENCODER_MODE_DVO; | ||
425 | |||
426 | connector = radeon_get_connector_for_encoder(encoder); | ||
427 | /* if we don't have an active device yet, just use one of | ||
428 | * the connectors tied to the encoder. | ||
429 | */ | ||
430 | if (!connector) | ||
431 | connector = radeon_get_connector_for_encoder_init(encoder); | ||
432 | radeon_connector = to_radeon_connector(connector); | ||
433 | |||
434 | switch (connector->connector_type) { | ||
435 | case DRM_MODE_CONNECTOR_DVII: | ||
436 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ | ||
437 | if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { | ||
438 | /* fix me */ | ||
439 | if (ASIC_IS_DCE4(rdev)) | ||
440 | return ATOM_ENCODER_MODE_DVI; | ||
441 | else | ||
442 | return ATOM_ENCODER_MODE_HDMI; | ||
443 | } else if (radeon_connector->use_digital) | ||
444 | return ATOM_ENCODER_MODE_DVI; | ||
445 | else | ||
446 | return ATOM_ENCODER_MODE_CRT; | ||
447 | break; | ||
448 | case DRM_MODE_CONNECTOR_DVID: | ||
449 | case DRM_MODE_CONNECTOR_HDMIA: | ||
450 | default: | ||
451 | if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { | ||
452 | /* fix me */ | ||
453 | if (ASIC_IS_DCE4(rdev)) | ||
454 | return ATOM_ENCODER_MODE_DVI; | ||
455 | else | ||
456 | return ATOM_ENCODER_MODE_HDMI; | ||
457 | } else | ||
458 | return ATOM_ENCODER_MODE_DVI; | ||
459 | break; | ||
460 | case DRM_MODE_CONNECTOR_LVDS: | ||
461 | return ATOM_ENCODER_MODE_LVDS; | ||
462 | break; | ||
463 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
464 | dig_connector = radeon_connector->con_priv; | ||
465 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | ||
466 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | ||
467 | return ATOM_ENCODER_MODE_DP; | ||
468 | else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { | ||
469 | /* fix me */ | ||
470 | if (ASIC_IS_DCE4(rdev)) | ||
471 | return ATOM_ENCODER_MODE_DVI; | ||
472 | else | ||
473 | return ATOM_ENCODER_MODE_HDMI; | ||
474 | } else | ||
475 | return ATOM_ENCODER_MODE_DVI; | ||
476 | break; | ||
477 | case DRM_MODE_CONNECTOR_eDP: | ||
478 | return ATOM_ENCODER_MODE_DP; | ||
479 | case DRM_MODE_CONNECTOR_DVIA: | ||
480 | case DRM_MODE_CONNECTOR_VGA: | ||
481 | return ATOM_ENCODER_MODE_CRT; | ||
482 | break; | ||
483 | case DRM_MODE_CONNECTOR_Composite: | ||
484 | case DRM_MODE_CONNECTOR_SVIDEO: | ||
485 | case DRM_MODE_CONNECTOR_9PinDIN: | ||
486 | /* fix me */ | ||
487 | return ATOM_ENCODER_MODE_TV; | ||
488 | /*return ATOM_ENCODER_MODE_CV;*/ | ||
489 | break; | ||
490 | } | ||
491 | } | ||
492 | |||
493 | /* | ||
494 | * DIG Encoder/Transmitter Setup | ||
495 | * | ||
496 | * DCE 3.0/3.1 | ||
497 | * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA. | ||
498 | * Supports up to 3 digital outputs | ||
499 | * - 2 DIG encoder blocks. | ||
500 | * DIG1 can drive UNIPHY link A or link B | ||
501 | * DIG2 can drive UNIPHY link B or LVTMA | ||
502 | * | ||
503 | * DCE 3.2 | ||
504 | * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B). | ||
505 | * Supports up to 5 digital outputs | ||
506 | * - 2 DIG encoder blocks. | ||
507 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B | ||
508 | * | ||
509 | * DCE 4.0/5.0 | ||
510 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). | ||
511 | * Supports up to 6 digital outputs | ||
512 | * - 6 DIG encoder blocks. | ||
513 | * - DIG to PHY mapping is hardcoded | ||
514 | * DIG1 drives UNIPHY0 link A, A+B | ||
515 | * DIG2 drives UNIPHY0 link B | ||
516 | * DIG3 drives UNIPHY1 link A, A+B | ||
517 | * DIG4 drives UNIPHY1 link B | ||
518 | * DIG5 drives UNIPHY2 link A, A+B | ||
519 | * DIG6 drives UNIPHY2 link B | ||
520 | * | ||
521 | * DCE 4.1 | ||
522 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). | ||
523 | * Supports up to 6 digital outputs | ||
524 | * - 2 DIG encoder blocks. | ||
525 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B | ||
526 | * | ||
527 | * Routing | ||
528 | * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) | ||
529 | * Examples: | ||
530 | * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI | ||
531 | * crtc1 -> dig1 -> UNIPHY0 link B -> DP | ||
532 | * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS | ||
533 | * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI | ||
534 | */ | ||
535 | |||
536 | union dig_encoder_control { | ||
537 | DIG_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
538 | DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; | ||
539 | DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; | ||
540 | DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; | ||
541 | }; | ||
542 | |||
543 | void | ||
544 | atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) | ||
545 | { | ||
546 | struct drm_device *dev = encoder->dev; | ||
547 | struct radeon_device *rdev = dev->dev_private; | ||
548 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
549 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
550 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
551 | union dig_encoder_control args; | ||
552 | int index = 0; | ||
553 | uint8_t frev, crev; | ||
554 | int dp_clock = 0; | ||
555 | int dp_lane_count = 0; | ||
556 | int hpd_id = RADEON_HPD_NONE; | ||
557 | int bpc = 8; | ||
558 | |||
559 | if (connector) { | ||
560 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
561 | struct radeon_connector_atom_dig *dig_connector = | ||
562 | radeon_connector->con_priv; | ||
563 | |||
564 | dp_clock = dig_connector->dp_clock; | ||
565 | dp_lane_count = dig_connector->dp_lane_count; | ||
566 | hpd_id = radeon_connector->hpd.hpd; | ||
567 | bpc = connector->display_info.bpc; | ||
568 | } | ||
569 | |||
570 | /* no dig encoder assigned */ | ||
571 | if (dig->dig_encoder == -1) | ||
572 | return; | ||
573 | |||
574 | memset(&args, 0, sizeof(args)); | ||
575 | |||
576 | if (ASIC_IS_DCE4(rdev)) | ||
577 | index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl); | ||
578 | else { | ||
579 | if (dig->dig_encoder) | ||
580 | index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); | ||
581 | else | ||
582 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); | ||
583 | } | ||
584 | |||
585 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
586 | return; | ||
587 | |||
588 | switch (frev) { | ||
589 | case 1: | ||
590 | switch (crev) { | ||
591 | case 1: | ||
592 | args.v1.ucAction = action; | ||
593 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
594 | if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) | ||
595 | args.v3.ucPanelMode = panel_mode; | ||
596 | else | ||
597 | args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
598 | |||
599 | if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) | ||
600 | args.v1.ucLaneNum = dp_lane_count; | ||
601 | else if (radeon_encoder->pixel_clock > 165000) | ||
602 | args.v1.ucLaneNum = 8; | ||
603 | else | ||
604 | args.v1.ucLaneNum = 4; | ||
605 | |||
606 | if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) | ||
607 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
608 | switch (radeon_encoder->encoder_id) { | ||
609 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
610 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; | ||
611 | break; | ||
612 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
613 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
614 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; | ||
615 | break; | ||
616 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
617 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; | ||
618 | break; | ||
619 | } | ||
620 | if (dig->linkb) | ||
621 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; | ||
622 | else | ||
623 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; | ||
624 | break; | ||
625 | case 2: | ||
626 | case 3: | ||
627 | args.v3.ucAction = action; | ||
628 | args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
629 | if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) | ||
630 | args.v3.ucPanelMode = panel_mode; | ||
631 | else | ||
632 | args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
633 | |||
634 | if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) | ||
635 | args.v3.ucLaneNum = dp_lane_count; | ||
636 | else if (radeon_encoder->pixel_clock > 165000) | ||
637 | args.v3.ucLaneNum = 8; | ||
638 | else | ||
639 | args.v3.ucLaneNum = 4; | ||
640 | |||
641 | if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000)) | ||
642 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; | ||
643 | args.v3.acConfig.ucDigSel = dig->dig_encoder; | ||
644 | switch (bpc) { | ||
645 | case 0: | ||
646 | args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE; | ||
647 | break; | ||
648 | case 6: | ||
649 | args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR; | ||
650 | break; | ||
651 | case 8: | ||
652 | default: | ||
653 | args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
654 | break; | ||
655 | case 10: | ||
656 | args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR; | ||
657 | break; | ||
658 | case 12: | ||
659 | args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR; | ||
660 | break; | ||
661 | case 16: | ||
662 | args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR; | ||
663 | break; | ||
664 | } | ||
665 | break; | ||
666 | case 4: | ||
667 | args.v4.ucAction = action; | ||
668 | args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
669 | if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) | ||
670 | args.v4.ucPanelMode = panel_mode; | ||
671 | else | ||
672 | args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
673 | |||
674 | if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) | ||
675 | args.v4.ucLaneNum = dp_lane_count; | ||
676 | else if (radeon_encoder->pixel_clock > 165000) | ||
677 | args.v4.ucLaneNum = 8; | ||
678 | else | ||
679 | args.v4.ucLaneNum = 4; | ||
680 | |||
681 | if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) { | ||
682 | if (dp_clock == 270000) | ||
683 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; | ||
684 | else if (dp_clock == 540000) | ||
685 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; | ||
686 | } | ||
687 | args.v4.acConfig.ucDigSel = dig->dig_encoder; | ||
688 | switch (bpc) { | ||
689 | case 0: | ||
690 | args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE; | ||
691 | break; | ||
692 | case 6: | ||
693 | args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR; | ||
694 | break; | ||
695 | case 8: | ||
696 | default: | ||
697 | args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
698 | break; | ||
699 | case 10: | ||
700 | args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR; | ||
701 | break; | ||
702 | case 12: | ||
703 | args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR; | ||
704 | break; | ||
705 | case 16: | ||
706 | args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR; | ||
707 | break; | ||
708 | } | ||
709 | if (hpd_id == RADEON_HPD_NONE) | ||
710 | args.v4.ucHPD_ID = 0; | ||
711 | else | ||
712 | args.v4.ucHPD_ID = hpd_id + 1; | ||
713 | break; | ||
714 | default: | ||
715 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
716 | break; | ||
717 | } | ||
718 | break; | ||
719 | default: | ||
720 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
721 | break; | ||
722 | } | ||
723 | |||
724 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
725 | |||
726 | } | ||
727 | |||
728 | union dig_transmitter_control { | ||
729 | DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; | ||
730 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; | ||
731 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; | ||
732 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; | ||
733 | }; | ||
734 | |||
735 | void | ||
736 | atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) | ||
737 | { | ||
738 | struct drm_device *dev = encoder->dev; | ||
739 | struct radeon_device *rdev = dev->dev_private; | ||
740 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
741 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
742 | struct drm_connector *connector; | ||
743 | union dig_transmitter_control args; | ||
744 | int index = 0; | ||
745 | uint8_t frev, crev; | ||
746 | bool is_dp = false; | ||
747 | int pll_id = 0; | ||
748 | int dp_clock = 0; | ||
749 | int dp_lane_count = 0; | ||
750 | int connector_object_id = 0; | ||
751 | int igp_lane_info = 0; | ||
752 | int dig_encoder = dig->dig_encoder; | ||
753 | |||
754 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
755 | connector = radeon_get_connector_for_encoder_init(encoder); | ||
756 | /* just needed to avoid bailing in the encoder check. the encoder | ||
757 | * isn't used for init | ||
758 | */ | ||
759 | dig_encoder = 0; | ||
760 | } else | ||
761 | connector = radeon_get_connector_for_encoder(encoder); | ||
762 | |||
763 | if (connector) { | ||
764 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
765 | struct radeon_connector_atom_dig *dig_connector = | ||
766 | radeon_connector->con_priv; | ||
767 | |||
768 | dp_clock = dig_connector->dp_clock; | ||
769 | dp_lane_count = dig_connector->dp_lane_count; | ||
770 | connector_object_id = | ||
771 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
772 | igp_lane_info = dig_connector->igp_lane_info; | ||
773 | } | ||
774 | |||
775 | if (encoder->crtc) { | ||
776 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
777 | pll_id = radeon_crtc->pll_id; | ||
778 | } | ||
779 | |||
780 | /* no dig encoder assigned */ | ||
781 | if (dig_encoder == -1) | ||
782 | return; | ||
783 | |||
784 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder))) | ||
785 | is_dp = true; | ||
786 | |||
787 | memset(&args, 0, sizeof(args)); | ||
788 | |||
789 | switch (radeon_encoder->encoder_id) { | ||
790 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
791 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
792 | break; | ||
793 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
794 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
795 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
796 | index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | ||
797 | break; | ||
798 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
799 | index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl); | ||
800 | break; | ||
801 | } | ||
802 | |||
803 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
804 | return; | ||
805 | |||
806 | switch (frev) { | ||
807 | case 1: | ||
808 | switch (crev) { | ||
809 | case 1: | ||
810 | args.v1.ucAction = action; | ||
811 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
812 | args.v1.usInitInfo = cpu_to_le16(connector_object_id); | ||
813 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | ||
814 | args.v1.asMode.ucLaneSel = lane_num; | ||
815 | args.v1.asMode.ucLaneSet = lane_set; | ||
816 | } else { | ||
817 | if (is_dp) | ||
818 | args.v1.usPixelClock = | ||
819 | cpu_to_le16(dp_clock / 10); | ||
820 | else if (radeon_encoder->pixel_clock > 165000) | ||
821 | args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); | ||
822 | else | ||
823 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
824 | } | ||
825 | |||
826 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; | ||
827 | |||
828 | if (dig_encoder) | ||
829 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; | ||
830 | else | ||
831 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; | ||
832 | |||
833 | if ((rdev->flags & RADEON_IS_IGP) && | ||
834 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { | ||
835 | if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { | ||
836 | if (igp_lane_info & 0x1) | ||
837 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; | ||
838 | else if (igp_lane_info & 0x2) | ||
839 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; | ||
840 | else if (igp_lane_info & 0x4) | ||
841 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; | ||
842 | else if (igp_lane_info & 0x8) | ||
843 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; | ||
844 | } else { | ||
845 | if (igp_lane_info & 0x3) | ||
846 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; | ||
847 | else if (igp_lane_info & 0xc) | ||
848 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; | ||
849 | } | ||
850 | } | ||
851 | |||
852 | if (dig->linkb) | ||
853 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; | ||
854 | else | ||
855 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; | ||
856 | |||
857 | if (is_dp) | ||
858 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; | ||
859 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
860 | if (dig->coherent_mode) | ||
861 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; | ||
862 | if (radeon_encoder->pixel_clock > 165000) | ||
863 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; | ||
864 | } | ||
865 | break; | ||
866 | case 2: | ||
867 | args.v2.ucAction = action; | ||
868 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
869 | args.v2.usInitInfo = cpu_to_le16(connector_object_id); | ||
870 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | ||
871 | args.v2.asMode.ucLaneSel = lane_num; | ||
872 | args.v2.asMode.ucLaneSet = lane_set; | ||
873 | } else { | ||
874 | if (is_dp) | ||
875 | args.v2.usPixelClock = | ||
876 | cpu_to_le16(dp_clock / 10); | ||
877 | else if (radeon_encoder->pixel_clock > 165000) | ||
878 | args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); | ||
879 | else | ||
880 | args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
881 | } | ||
882 | |||
883 | args.v2.acConfig.ucEncoderSel = dig_encoder; | ||
884 | if (dig->linkb) | ||
885 | args.v2.acConfig.ucLinkSel = 1; | ||
886 | |||
887 | switch (radeon_encoder->encoder_id) { | ||
888 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
889 | args.v2.acConfig.ucTransmitterSel = 0; | ||
890 | break; | ||
891 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
892 | args.v2.acConfig.ucTransmitterSel = 1; | ||
893 | break; | ||
894 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
895 | args.v2.acConfig.ucTransmitterSel = 2; | ||
896 | break; | ||
897 | } | ||
898 | |||
899 | if (is_dp) { | ||
900 | args.v2.acConfig.fCoherentMode = 1; | ||
901 | args.v2.acConfig.fDPConnector = 1; | ||
902 | } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
903 | if (dig->coherent_mode) | ||
904 | args.v2.acConfig.fCoherentMode = 1; | ||
905 | if (radeon_encoder->pixel_clock > 165000) | ||
906 | args.v2.acConfig.fDualLinkConnector = 1; | ||
907 | } | ||
908 | break; | ||
909 | case 3: | ||
910 | args.v3.ucAction = action; | ||
911 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
912 | args.v3.usInitInfo = cpu_to_le16(connector_object_id); | ||
913 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | ||
914 | args.v3.asMode.ucLaneSel = lane_num; | ||
915 | args.v3.asMode.ucLaneSet = lane_set; | ||
916 | } else { | ||
917 | if (is_dp) | ||
918 | args.v3.usPixelClock = | ||
919 | cpu_to_le16(dp_clock / 10); | ||
920 | else if (radeon_encoder->pixel_clock > 165000) | ||
921 | args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); | ||
922 | else | ||
923 | args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
924 | } | ||
925 | |||
926 | if (is_dp) | ||
927 | args.v3.ucLaneNum = dp_lane_count; | ||
928 | else if (radeon_encoder->pixel_clock > 165000) | ||
929 | args.v3.ucLaneNum = 8; | ||
930 | else | ||
931 | args.v3.ucLaneNum = 4; | ||
932 | |||
933 | if (dig->linkb) | ||
934 | args.v3.acConfig.ucLinkSel = 1; | ||
935 | if (dig_encoder & 1) | ||
936 | args.v3.acConfig.ucEncoderSel = 1; | ||
937 | |||
938 | /* Select the PLL for the PHY | ||
939 | * DP PHY should be clocked from external src if there is | ||
940 | * one. | ||
941 | */ | ||
942 | /* On DCE4, if there is an external clock, it generates the DP ref clock */ | ||
943 | if (is_dp && rdev->clock.dp_extclk) | ||
944 | args.v3.acConfig.ucRefClkSource = 2; /* external src */ | ||
945 | else | ||
946 | args.v3.acConfig.ucRefClkSource = pll_id; | ||
947 | |||
948 | switch (radeon_encoder->encoder_id) { | ||
949 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
950 | args.v3.acConfig.ucTransmitterSel = 0; | ||
951 | break; | ||
952 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
953 | args.v3.acConfig.ucTransmitterSel = 1; | ||
954 | break; | ||
955 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
956 | args.v3.acConfig.ucTransmitterSel = 2; | ||
957 | break; | ||
958 | } | ||
959 | |||
960 | if (is_dp) | ||
961 | args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */ | ||
962 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
963 | if (dig->coherent_mode) | ||
964 | args.v3.acConfig.fCoherentMode = 1; | ||
965 | if (radeon_encoder->pixel_clock > 165000) | ||
966 | args.v3.acConfig.fDualLinkConnector = 1; | ||
967 | } | ||
968 | break; | ||
969 | case 4: | ||
970 | args.v4.ucAction = action; | ||
971 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
972 | args.v4.usInitInfo = cpu_to_le16(connector_object_id); | ||
973 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | ||
974 | args.v4.asMode.ucLaneSel = lane_num; | ||
975 | args.v4.asMode.ucLaneSet = lane_set; | ||
976 | } else { | ||
977 | if (is_dp) | ||
978 | args.v4.usPixelClock = | ||
979 | cpu_to_le16(dp_clock / 10); | ||
980 | else if (radeon_encoder->pixel_clock > 165000) | ||
981 | args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); | ||
982 | else | ||
983 | args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
984 | } | ||
985 | |||
986 | if (is_dp) | ||
987 | args.v4.ucLaneNum = dp_lane_count; | ||
988 | else if (radeon_encoder->pixel_clock > 165000) | ||
989 | args.v4.ucLaneNum = 8; | ||
990 | else | ||
991 | args.v4.ucLaneNum = 4; | ||
992 | |||
993 | if (dig->linkb) | ||
994 | args.v4.acConfig.ucLinkSel = 1; | ||
995 | if (dig_encoder & 1) | ||
996 | args.v4.acConfig.ucEncoderSel = 1; | ||
997 | |||
998 | /* Select the PLL for the PHY | ||
999 | * DP PHY should be clocked from external src if there is | ||
1000 | * one. | ||
1001 | */ | ||
1002 | /* On DCE5 DCPLL usually generates the DP ref clock */ | ||
1003 | if (is_dp) { | ||
1004 | if (rdev->clock.dp_extclk) | ||
1005 | args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK; | ||
1006 | else | ||
1007 | args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL; | ||
1008 | } else | ||
1009 | args.v4.acConfig.ucRefClkSource = pll_id; | ||
1010 | |||
1011 | switch (radeon_encoder->encoder_id) { | ||
1012 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1013 | args.v4.acConfig.ucTransmitterSel = 0; | ||
1014 | break; | ||
1015 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1016 | args.v4.acConfig.ucTransmitterSel = 1; | ||
1017 | break; | ||
1018 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1019 | args.v4.acConfig.ucTransmitterSel = 2; | ||
1020 | break; | ||
1021 | } | ||
1022 | |||
1023 | if (is_dp) | ||
1024 | args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */ | ||
1025 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
1026 | if (dig->coherent_mode) | ||
1027 | args.v4.acConfig.fCoherentMode = 1; | ||
1028 | if (radeon_encoder->pixel_clock > 165000) | ||
1029 | args.v4.acConfig.fDualLinkConnector = 1; | ||
1030 | } | ||
1031 | break; | ||
1032 | default: | ||
1033 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
1034 | break; | ||
1035 | } | ||
1036 | break; | ||
1037 | default: | ||
1038 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
1039 | break; | ||
1040 | } | ||
1041 | |||
1042 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1043 | } | ||
1044 | |||
1045 | bool | ||
1046 | atombios_set_edp_panel_power(struct drm_connector *connector, int action) | ||
1047 | { | ||
1048 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1049 | struct drm_device *dev = radeon_connector->base.dev; | ||
1050 | struct radeon_device *rdev = dev->dev_private; | ||
1051 | union dig_transmitter_control args; | ||
1052 | int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | ||
1053 | uint8_t frev, crev; | ||
1054 | |||
1055 | if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) | ||
1056 | goto done; | ||
1057 | |||
1058 | if (!ASIC_IS_DCE4(rdev)) | ||
1059 | goto done; | ||
1060 | |||
1061 | if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && | ||
1062 | (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) | ||
1063 | goto done; | ||
1064 | |||
1065 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1066 | goto done; | ||
1067 | |||
1068 | memset(&args, 0, sizeof(args)); | ||
1069 | |||
1070 | args.v1.ucAction = action; | ||
1071 | |||
1072 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1073 | |||
1074 | /* wait for the panel to power up */ | ||
1075 | if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { | ||
1076 | int i; | ||
1077 | |||
1078 | for (i = 0; i < 300; i++) { | ||
1079 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | ||
1080 | return true; | ||
1081 | mdelay(1); | ||
1082 | } | ||
1083 | return false; | ||
1084 | } | ||
1085 | done: | ||
1086 | return true; | ||
1087 | } | ||
1088 | |||
1089 | union external_encoder_control { | ||
1090 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
1091 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; | ||
1092 | }; | ||
1093 | |||
1094 | static void | ||
1095 | atombios_external_encoder_setup(struct drm_encoder *encoder, | ||
1096 | struct drm_encoder *ext_encoder, | ||
1097 | int action) | ||
1098 | { | ||
1099 | struct drm_device *dev = encoder->dev; | ||
1100 | struct radeon_device *rdev = dev->dev_private; | ||
1101 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1102 | struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); | ||
1103 | union external_encoder_control args; | ||
1104 | struct drm_connector *connector; | ||
1105 | int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); | ||
1106 | u8 frev, crev; | ||
1107 | int dp_clock = 0; | ||
1108 | int dp_lane_count = 0; | ||
1109 | int connector_object_id = 0; | ||
1110 | u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
1111 | int bpc = 8; | ||
1112 | |||
1113 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | ||
1114 | connector = radeon_get_connector_for_encoder_init(encoder); | ||
1115 | else | ||
1116 | connector = radeon_get_connector_for_encoder(encoder); | ||
1117 | |||
1118 | if (connector) { | ||
1119 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1120 | struct radeon_connector_atom_dig *dig_connector = | ||
1121 | radeon_connector->con_priv; | ||
1122 | |||
1123 | dp_clock = dig_connector->dp_clock; | ||
1124 | dp_lane_count = dig_connector->dp_lane_count; | ||
1125 | connector_object_id = | ||
1126 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
1127 | bpc = connector->display_info.bpc; | ||
1128 | } | ||
1129 | |||
1130 | memset(&args, 0, sizeof(args)); | ||
1131 | |||
1132 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1133 | return; | ||
1134 | |||
1135 | switch (frev) { | ||
1136 | case 1: | ||
1137 | /* no params on frev 1 */ | ||
1138 | break; | ||
1139 | case 2: | ||
1140 | switch (crev) { | ||
1141 | case 1: | ||
1142 | case 2: | ||
1143 | args.v1.sDigEncoder.ucAction = action; | ||
1144 | args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
1145 | args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
1146 | |||
1147 | if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) { | ||
1148 | if (dp_clock == 270000) | ||
1149 | args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
1150 | args.v1.sDigEncoder.ucLaneNum = dp_lane_count; | ||
1151 | } else if (radeon_encoder->pixel_clock > 165000) | ||
1152 | args.v1.sDigEncoder.ucLaneNum = 8; | ||
1153 | else | ||
1154 | args.v1.sDigEncoder.ucLaneNum = 4; | ||
1155 | break; | ||
1156 | case 3: | ||
1157 | args.v3.sExtEncoder.ucAction = action; | ||
1158 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | ||
1159 | args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); | ||
1160 | else | ||
1161 | args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
1162 | args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
1163 | |||
1164 | if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) { | ||
1165 | if (dp_clock == 270000) | ||
1166 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; | ||
1167 | else if (dp_clock == 540000) | ||
1168 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; | ||
1169 | args.v3.sExtEncoder.ucLaneNum = dp_lane_count; | ||
1170 | } else if (radeon_encoder->pixel_clock > 165000) | ||
1171 | args.v3.sExtEncoder.ucLaneNum = 8; | ||
1172 | else | ||
1173 | args.v3.sExtEncoder.ucLaneNum = 4; | ||
1174 | switch (ext_enum) { | ||
1175 | case GRAPH_OBJECT_ENUM_ID1: | ||
1176 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; | ||
1177 | break; | ||
1178 | case GRAPH_OBJECT_ENUM_ID2: | ||
1179 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; | ||
1180 | break; | ||
1181 | case GRAPH_OBJECT_ENUM_ID3: | ||
1182 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; | ||
1183 | break; | ||
1184 | } | ||
1185 | switch (bpc) { | ||
1186 | case 0: | ||
1187 | args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE; | ||
1188 | break; | ||
1189 | case 6: | ||
1190 | args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR; | ||
1191 | break; | ||
1192 | case 8: | ||
1193 | default: | ||
1194 | args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
1195 | break; | ||
1196 | case 10: | ||
1197 | args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR; | ||
1198 | break; | ||
1199 | case 12: | ||
1200 | args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR; | ||
1201 | break; | ||
1202 | case 16: | ||
1203 | args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR; | ||
1204 | break; | ||
1205 | } | ||
1206 | break; | ||
1207 | default: | ||
1208 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
1209 | return; | ||
1210 | } | ||
1211 | break; | ||
1212 | default: | ||
1213 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
1214 | return; | ||
1215 | } | ||
1216 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1217 | } | ||
1218 | |||
1219 | static void | ||
1220 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | ||
1221 | { | ||
1222 | struct drm_device *dev = encoder->dev; | ||
1223 | struct radeon_device *rdev = dev->dev_private; | ||
1224 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1225 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1226 | ENABLE_YUV_PS_ALLOCATION args; | ||
1227 | int index = GetIndexIntoMasterTable(COMMAND, EnableYUV); | ||
1228 | uint32_t temp, reg; | ||
1229 | |||
1230 | memset(&args, 0, sizeof(args)); | ||
1231 | |||
1232 | if (rdev->family >= CHIP_R600) | ||
1233 | reg = R600_BIOS_3_SCRATCH; | ||
1234 | else | ||
1235 | reg = RADEON_BIOS_3_SCRATCH; | ||
1236 | |||
1237 | /* XXX: fix up scratch reg handling */ | ||
1238 | temp = RREG32(reg); | ||
1239 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1240 | WREG32(reg, (ATOM_S3_TV1_ACTIVE | | ||
1241 | (radeon_crtc->crtc_id << 18))); | ||
1242 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1243 | WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24))); | ||
1244 | else | ||
1245 | WREG32(reg, 0); | ||
1246 | |||
1247 | if (enable) | ||
1248 | args.ucEnable = ATOM_ENABLE; | ||
1249 | args.ucCRTC = radeon_crtc->crtc_id; | ||
1250 | |||
1251 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1252 | |||
1253 | WREG32(reg, temp); | ||
1254 | } | ||
1255 | |||
1256 | static void | ||
1257 | radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) | ||
1258 | { | ||
1259 | struct drm_device *dev = encoder->dev; | ||
1260 | struct radeon_device *rdev = dev->dev_private; | ||
1261 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1262 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; | ||
1263 | int index = 0; | ||
1264 | |||
1265 | memset(&args, 0, sizeof(args)); | ||
1266 | |||
1267 | switch (radeon_encoder->encoder_id) { | ||
1268 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
1269 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
1270 | index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl); | ||
1271 | break; | ||
1272 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
1273 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
1274 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1275 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
1276 | break; | ||
1277 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
1278 | index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); | ||
1279 | break; | ||
1280 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
1281 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
1282 | index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); | ||
1283 | else | ||
1284 | index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl); | ||
1285 | break; | ||
1286 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
1287 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
1288 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1289 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); | ||
1290 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1291 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); | ||
1292 | else | ||
1293 | index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); | ||
1294 | break; | ||
1295 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
1296 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
1297 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1298 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); | ||
1299 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1300 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); | ||
1301 | else | ||
1302 | index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); | ||
1303 | break; | ||
1304 | default: | ||
1305 | return; | ||
1306 | } | ||
1307 | |||
1308 | switch (mode) { | ||
1309 | case DRM_MODE_DPMS_ON: | ||
1310 | args.ucAction = ATOM_ENABLE; | ||
1311 | /* workaround for DVOOutputControl on some RS690 systems */ | ||
1312 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { | ||
1313 | u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); | ||
1314 | WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); | ||
1315 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1316 | WREG32(RADEON_BIOS_3_SCRATCH, reg); | ||
1317 | } else | ||
1318 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1319 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
1320 | args.ucAction = ATOM_LCD_BLON; | ||
1321 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1322 | } | ||
1323 | break; | ||
1324 | case DRM_MODE_DPMS_STANDBY: | ||
1325 | case DRM_MODE_DPMS_SUSPEND: | ||
1326 | case DRM_MODE_DPMS_OFF: | ||
1327 | args.ucAction = ATOM_DISABLE; | ||
1328 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1329 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
1330 | args.ucAction = ATOM_LCD_BLOFF; | ||
1331 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1332 | } | ||
1333 | break; | ||
1334 | } | ||
1335 | } | ||
1336 | |||
1337 | static void | ||
1338 | radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) | ||
1339 | { | ||
1340 | struct drm_device *dev = encoder->dev; | ||
1341 | struct radeon_device *rdev = dev->dev_private; | ||
1342 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1343 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1344 | struct radeon_connector *radeon_connector = NULL; | ||
1345 | struct radeon_connector_atom_dig *radeon_dig_connector = NULL; | ||
1346 | |||
1347 | if (connector) { | ||
1348 | radeon_connector = to_radeon_connector(connector); | ||
1349 | radeon_dig_connector = radeon_connector->con_priv; | ||
1350 | } | ||
1351 | |||
1352 | switch (mode) { | ||
1353 | case DRM_MODE_DPMS_ON: | ||
1354 | /* some early dce3.2 boards have a bug in their transmitter control table */ | ||
1355 | if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730)) | ||
1356 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1357 | else | ||
1358 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
1359 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { | ||
1360 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
1361 | atombios_set_edp_panel_power(connector, | ||
1362 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
1363 | radeon_dig_connector->edp_on = true; | ||
1364 | } | ||
1365 | if (ASIC_IS_DCE4(rdev)) | ||
1366 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); | ||
1367 | radeon_dp_link_train(encoder, connector); | ||
1368 | if (ASIC_IS_DCE4(rdev)) | ||
1369 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); | ||
1370 | } | ||
1371 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
1372 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
1373 | break; | ||
1374 | case DRM_MODE_DPMS_STANDBY: | ||
1375 | case DRM_MODE_DPMS_SUSPEND: | ||
1376 | case DRM_MODE_DPMS_OFF: | ||
1377 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | ||
1378 | if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { | ||
1379 | if (ASIC_IS_DCE4(rdev)) | ||
1380 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); | ||
1381 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
1382 | atombios_set_edp_panel_power(connector, | ||
1383 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
1384 | radeon_dig_connector->edp_on = false; | ||
1385 | } | ||
1386 | } | ||
1387 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
1388 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); | ||
1389 | break; | ||
1390 | } | ||
1391 | } | ||
1392 | |||
1393 | static void | ||
1394 | radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder, | ||
1395 | struct drm_encoder *ext_encoder, | ||
1396 | int mode) | ||
1397 | { | ||
1398 | struct drm_device *dev = encoder->dev; | ||
1399 | struct radeon_device *rdev = dev->dev_private; | ||
1400 | |||
1401 | switch (mode) { | ||
1402 | case DRM_MODE_DPMS_ON: | ||
1403 | default: | ||
1404 | if (ASIC_IS_DCE41(rdev)) { | ||
1405 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1406 | EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT); | ||
1407 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1408 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF); | ||
1409 | } else | ||
1410 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | ||
1411 | break; | ||
1412 | case DRM_MODE_DPMS_STANDBY: | ||
1413 | case DRM_MODE_DPMS_SUSPEND: | ||
1414 | case DRM_MODE_DPMS_OFF: | ||
1415 | if (ASIC_IS_DCE41(rdev)) { | ||
1416 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1417 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING); | ||
1418 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1419 | EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT); | ||
1420 | } else | ||
1421 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE); | ||
1422 | break; | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | static void | ||
1427 | radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | ||
1428 | { | ||
1429 | struct drm_device *dev = encoder->dev; | ||
1430 | struct radeon_device *rdev = dev->dev_private; | ||
1431 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1432 | struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); | ||
1433 | |||
1434 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", | ||
1435 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, | ||
1436 | radeon_encoder->active_device); | ||
1437 | switch (radeon_encoder->encoder_id) { | ||
1438 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
1439 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
1440 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
1441 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
1442 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
1443 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
1444 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
1445 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
1446 | radeon_atom_encoder_dpms_avivo(encoder, mode); | ||
1447 | break; | ||
1448 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1449 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1450 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1451 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
1452 | radeon_atom_encoder_dpms_dig(encoder, mode); | ||
1453 | break; | ||
1454 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1455 | if (ASIC_IS_DCE5(rdev)) { | ||
1456 | switch (mode) { | ||
1457 | case DRM_MODE_DPMS_ON: | ||
1458 | atombios_dvo_setup(encoder, ATOM_ENABLE); | ||
1459 | break; | ||
1460 | case DRM_MODE_DPMS_STANDBY: | ||
1461 | case DRM_MODE_DPMS_SUSPEND: | ||
1462 | case DRM_MODE_DPMS_OFF: | ||
1463 | atombios_dvo_setup(encoder, ATOM_DISABLE); | ||
1464 | break; | ||
1465 | } | ||
1466 | } else if (ASIC_IS_DCE3(rdev)) | ||
1467 | radeon_atom_encoder_dpms_dig(encoder, mode); | ||
1468 | else | ||
1469 | radeon_atom_encoder_dpms_avivo(encoder, mode); | ||
1470 | break; | ||
1471 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
1472 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
1473 | if (ASIC_IS_DCE5(rdev)) { | ||
1474 | switch (mode) { | ||
1475 | case DRM_MODE_DPMS_ON: | ||
1476 | atombios_dac_setup(encoder, ATOM_ENABLE); | ||
1477 | break; | ||
1478 | case DRM_MODE_DPMS_STANDBY: | ||
1479 | case DRM_MODE_DPMS_SUSPEND: | ||
1480 | case DRM_MODE_DPMS_OFF: | ||
1481 | atombios_dac_setup(encoder, ATOM_DISABLE); | ||
1482 | break; | ||
1483 | } | ||
1484 | } else | ||
1485 | radeon_atom_encoder_dpms_avivo(encoder, mode); | ||
1486 | break; | ||
1487 | default: | ||
1488 | return; | ||
1489 | } | ||
1490 | |||
1491 | if (ext_encoder) | ||
1492 | radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode); | ||
1493 | |||
1494 | radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | ||
1495 | |||
1496 | } | ||
1497 | |||
1498 | union crtc_source_param { | ||
1499 | SELECT_CRTC_SOURCE_PS_ALLOCATION v1; | ||
1500 | SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; | ||
1501 | }; | ||
1502 | |||
1503 | static void | ||
1504 | atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | ||
1505 | { | ||
1506 | struct drm_device *dev = encoder->dev; | ||
1507 | struct radeon_device *rdev = dev->dev_private; | ||
1508 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1509 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1510 | union crtc_source_param args; | ||
1511 | int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); | ||
1512 | uint8_t frev, crev; | ||
1513 | struct radeon_encoder_atom_dig *dig; | ||
1514 | |||
1515 | memset(&args, 0, sizeof(args)); | ||
1516 | |||
1517 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1518 | return; | ||
1519 | |||
1520 | switch (frev) { | ||
1521 | case 1: | ||
1522 | switch (crev) { | ||
1523 | case 1: | ||
1524 | default: | ||
1525 | if (ASIC_IS_AVIVO(rdev)) | ||
1526 | args.v1.ucCRTC = radeon_crtc->crtc_id; | ||
1527 | else { | ||
1528 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) { | ||
1529 | args.v1.ucCRTC = radeon_crtc->crtc_id; | ||
1530 | } else { | ||
1531 | args.v1.ucCRTC = radeon_crtc->crtc_id << 2; | ||
1532 | } | ||
1533 | } | ||
1534 | switch (radeon_encoder->encoder_id) { | ||
1535 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
1536 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
1537 | args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX; | ||
1538 | break; | ||
1539 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
1540 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
1541 | if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) | ||
1542 | args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX; | ||
1543 | else | ||
1544 | args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX; | ||
1545 | break; | ||
1546 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
1547 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
1548 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1549 | args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX; | ||
1550 | break; | ||
1551 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
1552 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
1553 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1554 | args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; | ||
1555 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1556 | args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; | ||
1557 | else | ||
1558 | args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX; | ||
1559 | break; | ||
1560 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
1561 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
1562 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1563 | args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; | ||
1564 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1565 | args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; | ||
1566 | else | ||
1567 | args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX; | ||
1568 | break; | ||
1569 | } | ||
1570 | break; | ||
1571 | case 2: | ||
1572 | args.v2.ucCRTC = radeon_crtc->crtc_id; | ||
1573 | if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) { | ||
1574 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1575 | |||
1576 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) | ||
1577 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; | ||
1578 | else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) | ||
1579 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; | ||
1580 | else | ||
1581 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
1582 | } else | ||
1583 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
1584 | switch (radeon_encoder->encoder_id) { | ||
1585 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1586 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1587 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1588 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
1589 | dig = radeon_encoder->enc_priv; | ||
1590 | switch (dig->dig_encoder) { | ||
1591 | case 0: | ||
1592 | args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; | ||
1593 | break; | ||
1594 | case 1: | ||
1595 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; | ||
1596 | break; | ||
1597 | case 2: | ||
1598 | args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; | ||
1599 | break; | ||
1600 | case 3: | ||
1601 | args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; | ||
1602 | break; | ||
1603 | case 4: | ||
1604 | args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; | ||
1605 | break; | ||
1606 | case 5: | ||
1607 | args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; | ||
1608 | break; | ||
1609 | } | ||
1610 | break; | ||
1611 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1612 | args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; | ||
1613 | break; | ||
1614 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
1615 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1616 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
1617 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1618 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
1619 | else | ||
1620 | args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID; | ||
1621 | break; | ||
1622 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
1623 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1624 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
1625 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1626 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
1627 | else | ||
1628 | args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID; | ||
1629 | break; | ||
1630 | } | ||
1631 | break; | ||
1632 | } | ||
1633 | break; | ||
1634 | default: | ||
1635 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
1636 | return; | ||
1637 | } | ||
1638 | |||
1639 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1640 | |||
1641 | /* update scratch regs with new routing */ | ||
1642 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | ||
1643 | } | ||
1644 | |||
1645 | static void | ||
1646 | atombios_apply_encoder_quirks(struct drm_encoder *encoder, | ||
1647 | struct drm_display_mode *mode) | ||
1648 | { | ||
1649 | struct drm_device *dev = encoder->dev; | ||
1650 | struct radeon_device *rdev = dev->dev_private; | ||
1651 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1652 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1653 | |||
1654 | /* Funky macbooks */ | ||
1655 | if ((dev->pdev->device == 0x71C5) && | ||
1656 | (dev->pdev->subsystem_vendor == 0x106b) && | ||
1657 | (dev->pdev->subsystem_device == 0x0080)) { | ||
1658 | if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { | ||
1659 | uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL); | ||
1660 | |||
1661 | lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN; | ||
1662 | lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN; | ||
1663 | |||
1664 | WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control); | ||
1665 | } | ||
1666 | } | ||
1667 | |||
1668 | /* set scaler clears this on some chips */ | ||
1669 | if (ASIC_IS_AVIVO(rdev) && | ||
1670 | (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { | ||
1671 | if (ASIC_IS_DCE4(rdev)) { | ||
1672 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1673 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1674 | EVERGREEN_INTERLEAVE_EN); | ||
1675 | else | ||
1676 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1677 | } else { | ||
1678 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1679 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1680 | AVIVO_D1MODE_INTERLEAVE_EN); | ||
1681 | else | ||
1682 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1683 | } | ||
1684 | } | ||
1685 | } | ||
1686 | |||
1687 | static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | ||
1688 | { | ||
1689 | struct drm_device *dev = encoder->dev; | ||
1690 | struct radeon_device *rdev = dev->dev_private; | ||
1691 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1692 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1693 | struct drm_encoder *test_encoder; | ||
1694 | struct radeon_encoder_atom_dig *dig; | ||
1695 | uint32_t dig_enc_in_use = 0; | ||
1696 | |||
1697 | /* DCE4/5 */ | ||
1698 | if (ASIC_IS_DCE4(rdev)) { | ||
1699 | dig = radeon_encoder->enc_priv; | ||
1700 | if (ASIC_IS_DCE41(rdev)) { | ||
1701 | /* ontario follows DCE4 */ | ||
1702 | if (rdev->family == CHIP_PALM) { | ||
1703 | if (dig->linkb) | ||
1704 | return 1; | ||
1705 | else | ||
1706 | return 0; | ||
1707 | } else | ||
1708 | /* llano follows DCE3.2 */ | ||
1709 | return radeon_crtc->crtc_id; | ||
1710 | } else { | ||
1711 | switch (radeon_encoder->encoder_id) { | ||
1712 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1713 | if (dig->linkb) | ||
1714 | return 1; | ||
1715 | else | ||
1716 | return 0; | ||
1717 | break; | ||
1718 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1719 | if (dig->linkb) | ||
1720 | return 3; | ||
1721 | else | ||
1722 | return 2; | ||
1723 | break; | ||
1724 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1725 | if (dig->linkb) | ||
1726 | return 5; | ||
1727 | else | ||
1728 | return 4; | ||
1729 | break; | ||
1730 | } | ||
1731 | } | ||
1732 | } | ||
1733 | |||
1734 | /* on DCE32 and encoder can driver any block so just crtc id */ | ||
1735 | if (ASIC_IS_DCE32(rdev)) { | ||
1736 | return radeon_crtc->crtc_id; | ||
1737 | } | ||
1738 | |||
1739 | /* on DCE3 - LVTMA can only be driven by DIGB */ | ||
1740 | list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { | ||
1741 | struct radeon_encoder *radeon_test_encoder; | ||
1742 | |||
1743 | if (encoder == test_encoder) | ||
1744 | continue; | ||
1745 | |||
1746 | if (!radeon_encoder_is_digital(test_encoder)) | ||
1747 | continue; | ||
1748 | |||
1749 | radeon_test_encoder = to_radeon_encoder(test_encoder); | ||
1750 | dig = radeon_test_encoder->enc_priv; | ||
1751 | |||
1752 | if (dig->dig_encoder >= 0) | ||
1753 | dig_enc_in_use |= (1 << dig->dig_encoder); | ||
1754 | } | ||
1755 | |||
1756 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) { | ||
1757 | if (dig_enc_in_use & 0x2) | ||
1758 | DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n"); | ||
1759 | return 1; | ||
1760 | } | ||
1761 | if (!(dig_enc_in_use & 1)) | ||
1762 | return 0; | ||
1763 | return 1; | ||
1764 | } | ||
1765 | |||
1766 | /* This only needs to be called once at startup */ | ||
1767 | void | ||
1768 | radeon_atom_encoder_init(struct radeon_device *rdev) | ||
1769 | { | ||
1770 | struct drm_device *dev = rdev->ddev; | ||
1771 | struct drm_encoder *encoder; | ||
1772 | |||
1773 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
1774 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1775 | struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); | ||
1776 | |||
1777 | switch (radeon_encoder->encoder_id) { | ||
1778 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1779 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1780 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1781 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
1782 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); | ||
1783 | break; | ||
1784 | default: | ||
1785 | break; | ||
1786 | } | ||
1787 | |||
1788 | if (ext_encoder && ASIC_IS_DCE41(rdev)) | ||
1789 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1790 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); | ||
1791 | } | ||
1792 | } | ||
1793 | |||
1794 | static void | ||
1795 | radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | ||
1796 | struct drm_display_mode *mode, | ||
1797 | struct drm_display_mode *adjusted_mode) | ||
1798 | { | ||
1799 | struct drm_device *dev = encoder->dev; | ||
1800 | struct radeon_device *rdev = dev->dev_private; | ||
1801 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1802 | struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); | ||
1803 | |||
1804 | radeon_encoder->pixel_clock = adjusted_mode->clock; | ||
1805 | |||
1806 | if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { | ||
1807 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) | ||
1808 | atombios_yuv_setup(encoder, true); | ||
1809 | else | ||
1810 | atombios_yuv_setup(encoder, false); | ||
1811 | } | ||
1812 | |||
1813 | switch (radeon_encoder->encoder_id) { | ||
1814 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
1815 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
1816 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
1817 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
1818 | atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); | ||
1819 | break; | ||
1820 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1821 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1822 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1823 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
1824 | if (ASIC_IS_DCE4(rdev)) { | ||
1825 | /* disable the transmitter */ | ||
1826 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
1827 | /* setup and enable the encoder */ | ||
1828 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); | ||
1829 | |||
1830 | /* enable the transmitter */ | ||
1831 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1832 | } else { | ||
1833 | /* disable the encoder and transmitter */ | ||
1834 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
1835 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); | ||
1836 | |||
1837 | /* setup and enable the encoder and transmitter */ | ||
1838 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); | ||
1839 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); | ||
1840 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1841 | } | ||
1842 | break; | ||
1843 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
1844 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
1845 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1846 | atombios_dvo_setup(encoder, ATOM_ENABLE); | ||
1847 | break; | ||
1848 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
1849 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
1850 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
1851 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
1852 | atombios_dac_setup(encoder, ATOM_ENABLE); | ||
1853 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { | ||
1854 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | ||
1855 | atombios_tv_setup(encoder, ATOM_ENABLE); | ||
1856 | else | ||
1857 | atombios_tv_setup(encoder, ATOM_DISABLE); | ||
1858 | } | ||
1859 | break; | ||
1860 | } | ||
1861 | |||
1862 | if (ext_encoder) { | ||
1863 | if (ASIC_IS_DCE41(rdev)) | ||
1864 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1865 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); | ||
1866 | else | ||
1867 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | ||
1868 | } | ||
1869 | |||
1870 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | ||
1871 | |||
1872 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { | ||
1873 | r600_hdmi_enable(encoder); | ||
1874 | r600_hdmi_setmode(encoder, adjusted_mode); | ||
1875 | } | ||
1876 | } | ||
1877 | |||
1878 | static bool | ||
1879 | atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
1880 | { | ||
1881 | struct drm_device *dev = encoder->dev; | ||
1882 | struct radeon_device *rdev = dev->dev_private; | ||
1883 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1884 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1885 | |||
1886 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | | ||
1887 | ATOM_DEVICE_CV_SUPPORT | | ||
1888 | ATOM_DEVICE_CRT_SUPPORT)) { | ||
1889 | DAC_LOAD_DETECTION_PS_ALLOCATION args; | ||
1890 | int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection); | ||
1891 | uint8_t frev, crev; | ||
1892 | |||
1893 | memset(&args, 0, sizeof(args)); | ||
1894 | |||
1895 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1896 | return false; | ||
1897 | |||
1898 | args.sDacload.ucMisc = 0; | ||
1899 | |||
1900 | if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) || | ||
1901 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1)) | ||
1902 | args.sDacload.ucDacType = ATOM_DAC_A; | ||
1903 | else | ||
1904 | args.sDacload.ucDacType = ATOM_DAC_B; | ||
1905 | |||
1906 | if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) | ||
1907 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT); | ||
1908 | else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) | ||
1909 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT); | ||
1910 | else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { | ||
1911 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT); | ||
1912 | if (crev >= 3) | ||
1913 | args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; | ||
1914 | } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { | ||
1915 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT); | ||
1916 | if (crev >= 3) | ||
1917 | args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; | ||
1918 | } | ||
1919 | |||
1920 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1921 | |||
1922 | return true; | ||
1923 | } else | ||
1924 | return false; | ||
1925 | } | ||
1926 | |||
1927 | static enum drm_connector_status | ||
1928 | radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
1929 | { | ||
1930 | struct drm_device *dev = encoder->dev; | ||
1931 | struct radeon_device *rdev = dev->dev_private; | ||
1932 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1933 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1934 | uint32_t bios_0_scratch; | ||
1935 | |||
1936 | if (!atombios_dac_load_detect(encoder, connector)) { | ||
1937 | DRM_DEBUG_KMS("detect returned false \n"); | ||
1938 | return connector_status_unknown; | ||
1939 | } | ||
1940 | |||
1941 | if (rdev->family >= CHIP_R600) | ||
1942 | bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); | ||
1943 | else | ||
1944 | bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); | ||
1945 | |||
1946 | DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); | ||
1947 | if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { | ||
1948 | if (bios_0_scratch & ATOM_S0_CRT1_MASK) | ||
1949 | return connector_status_connected; | ||
1950 | } | ||
1951 | if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { | ||
1952 | if (bios_0_scratch & ATOM_S0_CRT2_MASK) | ||
1953 | return connector_status_connected; | ||
1954 | } | ||
1955 | if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { | ||
1956 | if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) | ||
1957 | return connector_status_connected; | ||
1958 | } | ||
1959 | if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { | ||
1960 | if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) | ||
1961 | return connector_status_connected; /* CTV */ | ||
1962 | else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) | ||
1963 | return connector_status_connected; /* STV */ | ||
1964 | } | ||
1965 | return connector_status_disconnected; | ||
1966 | } | ||
1967 | |||
1968 | static enum drm_connector_status | ||
1969 | radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
1970 | { | ||
1971 | struct drm_device *dev = encoder->dev; | ||
1972 | struct radeon_device *rdev = dev->dev_private; | ||
1973 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1974 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1975 | struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); | ||
1976 | u32 bios_0_scratch; | ||
1977 | |||
1978 | if (!ASIC_IS_DCE4(rdev)) | ||
1979 | return connector_status_unknown; | ||
1980 | |||
1981 | if (!ext_encoder) | ||
1982 | return connector_status_unknown; | ||
1983 | |||
1984 | if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0) | ||
1985 | return connector_status_unknown; | ||
1986 | |||
1987 | /* load detect on the dp bridge */ | ||
1988 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1989 | EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION); | ||
1990 | |||
1991 | bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); | ||
1992 | |||
1993 | DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); | ||
1994 | if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { | ||
1995 | if (bios_0_scratch & ATOM_S0_CRT1_MASK) | ||
1996 | return connector_status_connected; | ||
1997 | } | ||
1998 | if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { | ||
1999 | if (bios_0_scratch & ATOM_S0_CRT2_MASK) | ||
2000 | return connector_status_connected; | ||
2001 | } | ||
2002 | if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { | ||
2003 | if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) | ||
2004 | return connector_status_connected; | ||
2005 | } | ||
2006 | if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { | ||
2007 | if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) | ||
2008 | return connector_status_connected; /* CTV */ | ||
2009 | else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) | ||
2010 | return connector_status_connected; /* STV */ | ||
2011 | } | ||
2012 | return connector_status_disconnected; | ||
2013 | } | ||
2014 | |||
2015 | void | ||
2016 | radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder) | ||
2017 | { | ||
2018 | struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); | ||
2019 | |||
2020 | if (ext_encoder) | ||
2021 | /* ddc_setup on the dp bridge */ | ||
2022 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
2023 | EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP); | ||
2024 | |||
2025 | } | ||
2026 | |||
2027 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | ||
2028 | { | ||
2029 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2030 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
2031 | |||
2032 | if ((radeon_encoder->active_device & | ||
2033 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || | ||
2034 | (radeon_encoder_get_dp_bridge_encoder_id(encoder) != | ||
2035 | ENCODER_OBJECT_ID_NONE)) { | ||
2036 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
2037 | if (dig) | ||
2038 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); | ||
2039 | } | ||
2040 | |||
2041 | radeon_atom_output_lock(encoder, true); | ||
2042 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
2043 | |||
2044 | if (connector) { | ||
2045 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
2046 | |||
2047 | /* select the clock/data port if it uses a router */ | ||
2048 | if (radeon_connector->router.cd_valid) | ||
2049 | radeon_router_select_cd_port(radeon_connector); | ||
2050 | |||
2051 | /* turn eDP panel on for mode set */ | ||
2052 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
2053 | atombios_set_edp_panel_power(connector, | ||
2054 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
2055 | } | ||
2056 | |||
2057 | /* this is needed for the pll/ss setup to work correctly in some cases */ | ||
2058 | atombios_set_encoder_crtc_source(encoder); | ||
2059 | } | ||
2060 | |||
2061 | static void radeon_atom_encoder_commit(struct drm_encoder *encoder) | ||
2062 | { | ||
2063 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); | ||
2064 | radeon_atom_output_lock(encoder, false); | ||
2065 | } | ||
2066 | |||
2067 | static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | ||
2068 | { | ||
2069 | struct drm_device *dev = encoder->dev; | ||
2070 | struct radeon_device *rdev = dev->dev_private; | ||
2071 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2072 | struct radeon_encoder_atom_dig *dig; | ||
2073 | |||
2074 | /* check for pre-DCE3 cards with shared encoders; | ||
2075 | * can't really use the links individually, so don't disable | ||
2076 | * the encoder if it's in use by another connector | ||
2077 | */ | ||
2078 | if (!ASIC_IS_DCE3(rdev)) { | ||
2079 | struct drm_encoder *other_encoder; | ||
2080 | struct radeon_encoder *other_radeon_encoder; | ||
2081 | |||
2082 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | ||
2083 | other_radeon_encoder = to_radeon_encoder(other_encoder); | ||
2084 | if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && | ||
2085 | drm_helper_encoder_in_use(other_encoder)) | ||
2086 | goto disable_done; | ||
2087 | } | ||
2088 | } | ||
2089 | |||
2090 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
2091 | |||
2092 | switch (radeon_encoder->encoder_id) { | ||
2093 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
2094 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
2095 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
2096 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
2097 | atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE); | ||
2098 | break; | ||
2099 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
2100 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
2101 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
2102 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
2103 | if (ASIC_IS_DCE4(rdev)) | ||
2104 | /* disable the transmitter */ | ||
2105 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
2106 | else { | ||
2107 | /* disable the encoder and transmitter */ | ||
2108 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
2109 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); | ||
2110 | } | ||
2111 | break; | ||
2112 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
2113 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
2114 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
2115 | atombios_dvo_setup(encoder, ATOM_DISABLE); | ||
2116 | break; | ||
2117 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
2118 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
2119 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
2120 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
2121 | atombios_dac_setup(encoder, ATOM_DISABLE); | ||
2122 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | ||
2123 | atombios_tv_setup(encoder, ATOM_DISABLE); | ||
2124 | break; | ||
2125 | } | ||
2126 | |||
2127 | disable_done: | ||
2128 | if (radeon_encoder_is_digital(encoder)) { | ||
2129 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | ||
2130 | r600_hdmi_disable(encoder); | ||
2131 | dig = radeon_encoder->enc_priv; | ||
2132 | dig->dig_encoder = -1; | ||
2133 | } | ||
2134 | radeon_encoder->active_device = 0; | ||
2135 | } | ||
2136 | |||
2137 | /* these are handled by the primary encoders */ | ||
2138 | static void radeon_atom_ext_prepare(struct drm_encoder *encoder) | ||
2139 | { | ||
2140 | |||
2141 | } | ||
2142 | |||
2143 | static void radeon_atom_ext_commit(struct drm_encoder *encoder) | ||
2144 | { | ||
2145 | |||
2146 | } | ||
2147 | |||
2148 | static void | ||
2149 | radeon_atom_ext_mode_set(struct drm_encoder *encoder, | ||
2150 | struct drm_display_mode *mode, | ||
2151 | struct drm_display_mode *adjusted_mode) | ||
2152 | { | ||
2153 | |||
2154 | } | ||
2155 | |||
2156 | static void radeon_atom_ext_disable(struct drm_encoder *encoder) | ||
2157 | { | ||
2158 | |||
2159 | } | ||
2160 | |||
2161 | static void | ||
2162 | radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) | ||
2163 | { | ||
2164 | |||
2165 | } | ||
2166 | |||
2167 | static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, | ||
2168 | struct drm_display_mode *mode, | ||
2169 | struct drm_display_mode *adjusted_mode) | ||
2170 | { | ||
2171 | return true; | ||
2172 | } | ||
2173 | |||
2174 | static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { | ||
2175 | .dpms = radeon_atom_ext_dpms, | ||
2176 | .mode_fixup = radeon_atom_ext_mode_fixup, | ||
2177 | .prepare = radeon_atom_ext_prepare, | ||
2178 | .mode_set = radeon_atom_ext_mode_set, | ||
2179 | .commit = radeon_atom_ext_commit, | ||
2180 | .disable = radeon_atom_ext_disable, | ||
2181 | /* no detect for TMDS/LVDS yet */ | ||
2182 | }; | ||
2183 | |||
2184 | static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { | ||
2185 | .dpms = radeon_atom_encoder_dpms, | ||
2186 | .mode_fixup = radeon_atom_mode_fixup, | ||
2187 | .prepare = radeon_atom_encoder_prepare, | ||
2188 | .mode_set = radeon_atom_encoder_mode_set, | ||
2189 | .commit = radeon_atom_encoder_commit, | ||
2190 | .disable = radeon_atom_encoder_disable, | ||
2191 | .detect = radeon_atom_dig_detect, | ||
2192 | }; | ||
2193 | |||
2194 | static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { | ||
2195 | .dpms = radeon_atom_encoder_dpms, | ||
2196 | .mode_fixup = radeon_atom_mode_fixup, | ||
2197 | .prepare = radeon_atom_encoder_prepare, | ||
2198 | .mode_set = radeon_atom_encoder_mode_set, | ||
2199 | .commit = radeon_atom_encoder_commit, | ||
2200 | .detect = radeon_atom_dac_detect, | ||
2201 | }; | ||
2202 | |||
2203 | void radeon_enc_destroy(struct drm_encoder *encoder) | ||
2204 | { | ||
2205 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2206 | kfree(radeon_encoder->enc_priv); | ||
2207 | drm_encoder_cleanup(encoder); | ||
2208 | kfree(radeon_encoder); | ||
2209 | } | ||
2210 | |||
2211 | static const struct drm_encoder_funcs radeon_atom_enc_funcs = { | ||
2212 | .destroy = radeon_enc_destroy, | ||
2213 | }; | ||
2214 | |||
2215 | struct radeon_encoder_atom_dac * | ||
2216 | radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) | ||
2217 | { | ||
2218 | struct drm_device *dev = radeon_encoder->base.dev; | ||
2219 | struct radeon_device *rdev = dev->dev_private; | ||
2220 | struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); | ||
2221 | |||
2222 | if (!dac) | ||
2223 | return NULL; | ||
2224 | |||
2225 | dac->tv_std = radeon_atombios_get_tv_info(rdev); | ||
2226 | return dac; | ||
2227 | } | ||
2228 | |||
2229 | struct radeon_encoder_atom_dig * | ||
2230 | radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | ||
2231 | { | ||
2232 | int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
2233 | struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); | ||
2234 | |||
2235 | if (!dig) | ||
2236 | return NULL; | ||
2237 | |||
2238 | /* coherent mode by default */ | ||
2239 | dig->coherent_mode = true; | ||
2240 | dig->dig_encoder = -1; | ||
2241 | |||
2242 | if (encoder_enum == 2) | ||
2243 | dig->linkb = true; | ||
2244 | else | ||
2245 | dig->linkb = false; | ||
2246 | |||
2247 | return dig; | ||
2248 | } | ||
2249 | |||
2250 | void | ||
2251 | radeon_add_atom_encoder(struct drm_device *dev, | ||
2252 | uint32_t encoder_enum, | ||
2253 | uint32_t supported_device, | ||
2254 | u16 caps) | ||
2255 | { | ||
2256 | struct radeon_device *rdev = dev->dev_private; | ||
2257 | struct drm_encoder *encoder; | ||
2258 | struct radeon_encoder *radeon_encoder; | ||
2259 | |||
2260 | /* see if we already added it */ | ||
2261 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
2262 | radeon_encoder = to_radeon_encoder(encoder); | ||
2263 | if (radeon_encoder->encoder_enum == encoder_enum) { | ||
2264 | radeon_encoder->devices |= supported_device; | ||
2265 | return; | ||
2266 | } | ||
2267 | |||
2268 | } | ||
2269 | |||
2270 | /* add a new one */ | ||
2271 | radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); | ||
2272 | if (!radeon_encoder) | ||
2273 | return; | ||
2274 | |||
2275 | encoder = &radeon_encoder->base; | ||
2276 | switch (rdev->num_crtc) { | ||
2277 | case 1: | ||
2278 | encoder->possible_crtcs = 0x1; | ||
2279 | break; | ||
2280 | case 2: | ||
2281 | default: | ||
2282 | encoder->possible_crtcs = 0x3; | ||
2283 | break; | ||
2284 | case 4: | ||
2285 | encoder->possible_crtcs = 0xf; | ||
2286 | break; | ||
2287 | case 6: | ||
2288 | encoder->possible_crtcs = 0x3f; | ||
2289 | break; | ||
2290 | } | ||
2291 | |||
2292 | radeon_encoder->enc_priv = NULL; | ||
2293 | |||
2294 | radeon_encoder->encoder_enum = encoder_enum; | ||
2295 | radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
2296 | radeon_encoder->devices = supported_device; | ||
2297 | radeon_encoder->rmx_type = RMX_OFF; | ||
2298 | radeon_encoder->underscan_type = UNDERSCAN_OFF; | ||
2299 | radeon_encoder->is_ext_encoder = false; | ||
2300 | radeon_encoder->caps = caps; | ||
2301 | |||
2302 | switch (radeon_encoder->encoder_id) { | ||
2303 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
2304 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
2305 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
2306 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
2307 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
2308 | radeon_encoder->rmx_type = RMX_FULL; | ||
2309 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | ||
2310 | radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); | ||
2311 | } else { | ||
2312 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | ||
2313 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | ||
2314 | } | ||
2315 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); | ||
2316 | break; | ||
2317 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
2318 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
2319 | radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); | ||
2320 | drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); | ||
2321 | break; | ||
2322 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
2323 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
2324 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
2325 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); | ||
2326 | radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); | ||
2327 | drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); | ||
2328 | break; | ||
2329 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
2330 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
2331 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
2332 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
2333 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
2334 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
2335 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
2336 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
2337 | radeon_encoder->rmx_type = RMX_FULL; | ||
2338 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | ||
2339 | radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); | ||
2340 | } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { | ||
2341 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
2342 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | ||
2343 | } else { | ||
2344 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | ||
2345 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | ||
2346 | } | ||
2347 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); | ||
2348 | break; | ||
2349 | case ENCODER_OBJECT_ID_SI170B: | ||
2350 | case ENCODER_OBJECT_ID_CH7303: | ||
2351 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: | ||
2352 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: | ||
2353 | case ENCODER_OBJECT_ID_TITFP513: | ||
2354 | case ENCODER_OBJECT_ID_VT1623: | ||
2355 | case ENCODER_OBJECT_ID_HDMI_SI1930: | ||
2356 | case ENCODER_OBJECT_ID_TRAVIS: | ||
2357 | case ENCODER_OBJECT_ID_NUTMEG: | ||
2358 | /* these are handled by the primary encoders */ | ||
2359 | radeon_encoder->is_ext_encoder = true; | ||
2360 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
2361 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | ||
2362 | else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) | ||
2363 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
2364 | else | ||
2365 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | ||
2366 | drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); | ||
2367 | break; | ||
2368 | } | ||
2369 | } | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index ed406e8404a3..1d603a3335db 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -157,6 +157,57 @@ int sumo_get_temp(struct radeon_device *rdev) | |||
157 | return actual_temp * 1000; | 157 | return actual_temp * 1000; |
158 | } | 158 | } |
159 | 159 | ||
160 | void sumo_pm_init_profile(struct radeon_device *rdev) | ||
161 | { | ||
162 | int idx; | ||
163 | |||
164 | /* default */ | ||
165 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; | ||
166 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; | ||
167 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | ||
168 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; | ||
169 | |||
170 | /* low,mid sh/mh */ | ||
171 | if (rdev->flags & RADEON_IS_MOBILITY) | ||
172 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | ||
173 | else | ||
174 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
175 | |||
176 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; | ||
177 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; | ||
178 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
179 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | ||
180 | |||
181 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; | ||
182 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; | ||
183 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
184 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | ||
185 | |||
186 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; | ||
187 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; | ||
188 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
189 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; | ||
190 | |||
191 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; | ||
192 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; | ||
193 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
194 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; | ||
195 | |||
196 | /* high sh/mh */ | ||
197 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
198 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; | ||
199 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; | ||
200 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | ||
201 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = | ||
202 | rdev->pm.power_state[idx].num_clock_modes - 1; | ||
203 | |||
204 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; | ||
205 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; | ||
206 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | ||
207 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = | ||
208 | rdev->pm.power_state[idx].num_clock_modes - 1; | ||
209 | } | ||
210 | |||
160 | void evergreen_pm_misc(struct radeon_device *rdev) | 211 | void evergreen_pm_misc(struct radeon_device *rdev) |
161 | { | 212 | { |
162 | int req_ps_idx = rdev->pm.requested_power_state_index; | 213 | int req_ps_idx = rdev->pm.requested_power_state_index; |
@@ -353,6 +404,7 @@ void evergreen_hpd_init(struct radeon_device *rdev) | |||
353 | default: | 404 | default: |
354 | break; | 405 | break; |
355 | } | 406 | } |
407 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | ||
356 | } | 408 | } |
357 | if (rdev->irq.installed) | 409 | if (rdev->irq.installed) |
358 | evergreen_irq_set(rdev); | 410 | evergreen_irq_set(rdev); |
@@ -893,7 +945,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) | |||
893 | u32 tmp; | 945 | u32 tmp; |
894 | int r; | 946 | int r; |
895 | 947 | ||
896 | if (rdev->gart.table.vram.robj == NULL) { | 948 | if (rdev->gart.robj == NULL) { |
897 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | 949 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
898 | return -EINVAL; | 950 | return -EINVAL; |
899 | } | 951 | } |
@@ -945,7 +997,6 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) | |||
945 | void evergreen_pcie_gart_disable(struct radeon_device *rdev) | 997 | void evergreen_pcie_gart_disable(struct radeon_device *rdev) |
946 | { | 998 | { |
947 | u32 tmp; | 999 | u32 tmp; |
948 | int r; | ||
949 | 1000 | ||
950 | /* Disable all tables */ | 1001 | /* Disable all tables */ |
951 | WREG32(VM_CONTEXT0_CNTL, 0); | 1002 | WREG32(VM_CONTEXT0_CNTL, 0); |
@@ -965,14 +1016,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev) | |||
965 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | 1016 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
966 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 1017 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
967 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | 1018 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
968 | if (rdev->gart.table.vram.robj) { | 1019 | radeon_gart_table_vram_unpin(rdev); |
969 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | ||
970 | if (likely(r == 0)) { | ||
971 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
972 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
973 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
974 | } | ||
975 | } | ||
976 | } | 1020 | } |
977 | 1021 | ||
978 | void evergreen_pcie_gart_fini(struct radeon_device *rdev) | 1022 | void evergreen_pcie_gart_fini(struct radeon_device *rdev) |
@@ -1226,7 +1270,7 @@ void evergreen_mc_program(struct radeon_device *rdev) | |||
1226 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | 1270 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
1227 | rdev->mc.vram_end >> 12); | 1271 | rdev->mc.vram_end >> 12); |
1228 | } | 1272 | } |
1229 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 1273 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); |
1230 | if (rdev->flags & RADEON_IS_IGP) { | 1274 | if (rdev->flags & RADEON_IS_IGP) { |
1231 | tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; | 1275 | tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; |
1232 | tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; | 1276 | tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; |
@@ -3031,6 +3075,10 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
3031 | } | 3075 | } |
3032 | } | 3076 | } |
3033 | 3077 | ||
3078 | r = r600_vram_scratch_init(rdev); | ||
3079 | if (r) | ||
3080 | return r; | ||
3081 | |||
3034 | evergreen_mc_program(rdev); | 3082 | evergreen_mc_program(rdev); |
3035 | if (rdev->flags & RADEON_IS_AGP) { | 3083 | if (rdev->flags & RADEON_IS_AGP) { |
3036 | evergreen_agp_enable(rdev); | 3084 | evergreen_agp_enable(rdev); |
@@ -3235,6 +3283,7 @@ void evergreen_fini(struct radeon_device *rdev) | |||
3235 | radeon_ib_pool_fini(rdev); | 3283 | radeon_ib_pool_fini(rdev); |
3236 | radeon_irq_kms_fini(rdev); | 3284 | radeon_irq_kms_fini(rdev); |
3237 | evergreen_pcie_gart_fini(rdev); | 3285 | evergreen_pcie_gart_fini(rdev); |
3286 | r600_vram_scratch_fini(rdev); | ||
3238 | radeon_gem_fini(rdev); | 3287 | radeon_gem_fini(rdev); |
3239 | radeon_fence_driver_fini(rdev); | 3288 | radeon_fence_driver_fini(rdev); |
3240 | radeon_agp_fini(rdev); | 3289 | radeon_agp_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index dcf11bbc06d9..914e5af84163 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -94,6 +94,15 @@ cp_set_surface_sync(struct radeon_device *rdev, | |||
94 | else | 94 | else |
95 | cp_coher_size = ((size + 255) >> 8); | 95 | cp_coher_size = ((size + 255) >> 8); |
96 | 96 | ||
97 | if (rdev->family >= CHIP_CAYMAN) { | ||
98 | /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync | ||
99 | * to the RB directly. For IBs, the CP programs this as part of the | ||
100 | * surface_sync packet. | ||
101 | */ | ||
102 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
103 | radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
104 | radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */ | ||
105 | } | ||
97 | radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); | 106 | radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
98 | radeon_ring_write(rdev, sync_type); | 107 | radeon_ring_write(rdev, sync_type); |
99 | radeon_ring_write(rdev, cp_coher_size); | 108 | radeon_ring_write(rdev, cp_coher_size); |
@@ -174,7 +183,7 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
174 | static void | 183 | static void |
175 | set_tex_resource(struct radeon_device *rdev, | 184 | set_tex_resource(struct radeon_device *rdev, |
176 | int format, int w, int h, int pitch, | 185 | int format, int w, int h, int pitch, |
177 | u64 gpu_addr) | 186 | u64 gpu_addr, u32 size) |
178 | { | 187 | { |
179 | u32 sq_tex_resource_word0, sq_tex_resource_word1; | 188 | u32 sq_tex_resource_word0, sq_tex_resource_word1; |
180 | u32 sq_tex_resource_word4, sq_tex_resource_word7; | 189 | u32 sq_tex_resource_word4, sq_tex_resource_word7; |
@@ -196,6 +205,9 @@ set_tex_resource(struct radeon_device *rdev, | |||
196 | sq_tex_resource_word7 = format | | 205 | sq_tex_resource_word7 = format | |
197 | S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE); | 206 | S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE); |
198 | 207 | ||
208 | cp_set_surface_sync(rdev, | ||
209 | PACKET3_TC_ACTION_ENA, size, gpu_addr); | ||
210 | |||
199 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); | 211 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8)); |
200 | radeon_ring_write(rdev, 0); | 212 | radeon_ring_write(rdev, 0); |
201 | radeon_ring_write(rdev, sq_tex_resource_word0); | 213 | radeon_ring_write(rdev, sq_tex_resource_word0); |
@@ -613,11 +625,13 @@ int evergreen_blit_init(struct radeon_device *rdev) | |||
613 | rdev->r600_blit.primitives.set_default_state = set_default_state; | 625 | rdev->r600_blit.primitives.set_default_state = set_default_state; |
614 | 626 | ||
615 | rdev->r600_blit.ring_size_common = 55; /* shaders + def state */ | 627 | rdev->r600_blit.ring_size_common = 55; /* shaders + def state */ |
616 | rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */ | 628 | rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ |
617 | rdev->r600_blit.ring_size_common += 5; /* done copy */ | 629 | rdev->r600_blit.ring_size_common += 5; /* done copy */ |
618 | rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */ | 630 | rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ |
619 | 631 | ||
620 | rdev->r600_blit.ring_size_per_loop = 74; | 632 | rdev->r600_blit.ring_size_per_loop = 74; |
633 | if (rdev->family >= CHIP_CAYMAN) | ||
634 | rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */ | ||
621 | 635 | ||
622 | rdev->r600_blit.max_dim = 16384; | 636 | rdev->r600_blit.max_dim = 16384; |
623 | 637 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index fdb93f884575..0e5799857465 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -262,8 +262,11 @@ int ni_mc_load_microcode(struct radeon_device *rdev) | |||
262 | WREG32(MC_SEQ_SUP_CNTL, 0x00000001); | 262 | WREG32(MC_SEQ_SUP_CNTL, 0x00000001); |
263 | 263 | ||
264 | /* wait for training to complete */ | 264 | /* wait for training to complete */ |
265 | while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)) | 265 | for (i = 0; i < rdev->usec_timeout; i++) { |
266 | udelay(10); | 266 | if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD) |
267 | break; | ||
268 | udelay(1); | ||
269 | } | ||
267 | 270 | ||
268 | if (running) | 271 | if (running) |
269 | WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); | 272 | WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); |
@@ -933,7 +936,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev) | |||
933 | { | 936 | { |
934 | int r; | 937 | int r; |
935 | 938 | ||
936 | if (rdev->gart.table.vram.robj == NULL) { | 939 | if (rdev->gart.robj == NULL) { |
937 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | 940 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
938 | return -EINVAL; | 941 | return -EINVAL; |
939 | } | 942 | } |
@@ -978,8 +981,6 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev) | |||
978 | 981 | ||
979 | void cayman_pcie_gart_disable(struct radeon_device *rdev) | 982 | void cayman_pcie_gart_disable(struct radeon_device *rdev) |
980 | { | 983 | { |
981 | int r; | ||
982 | |||
983 | /* Disable all tables */ | 984 | /* Disable all tables */ |
984 | WREG32(VM_CONTEXT0_CNTL, 0); | 985 | WREG32(VM_CONTEXT0_CNTL, 0); |
985 | WREG32(VM_CONTEXT1_CNTL, 0); | 986 | WREG32(VM_CONTEXT1_CNTL, 0); |
@@ -995,14 +996,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev) | |||
995 | WREG32(VM_L2_CNTL2, 0); | 996 | WREG32(VM_L2_CNTL2, 0); |
996 | WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | | 997 | WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | |
997 | L2_CACHE_BIGK_FRAGMENT_SIZE(6)); | 998 | L2_CACHE_BIGK_FRAGMENT_SIZE(6)); |
998 | if (rdev->gart.table.vram.robj) { | 999 | radeon_gart_table_vram_unpin(rdev); |
999 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | ||
1000 | if (likely(r == 0)) { | ||
1001 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
1002 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
1003 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
1004 | } | ||
1005 | } | ||
1006 | } | 1000 | } |
1007 | 1001 | ||
1008 | void cayman_pcie_gart_fini(struct radeon_device *rdev) | 1002 | void cayman_pcie_gart_fini(struct radeon_device *rdev) |
@@ -1362,6 +1356,10 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1362 | return r; | 1356 | return r; |
1363 | } | 1357 | } |
1364 | 1358 | ||
1359 | r = r600_vram_scratch_init(rdev); | ||
1360 | if (r) | ||
1361 | return r; | ||
1362 | |||
1365 | evergreen_mc_program(rdev); | 1363 | evergreen_mc_program(rdev); |
1366 | r = cayman_pcie_gart_enable(rdev); | 1364 | r = cayman_pcie_gart_enable(rdev); |
1367 | if (r) | 1365 | if (r) |
@@ -1557,6 +1555,7 @@ void cayman_fini(struct radeon_device *rdev) | |||
1557 | radeon_ib_pool_fini(rdev); | 1555 | radeon_ib_pool_fini(rdev); |
1558 | radeon_irq_kms_fini(rdev); | 1556 | radeon_irq_kms_fini(rdev); |
1559 | cayman_pcie_gart_fini(rdev); | 1557 | cayman_pcie_gart_fini(rdev); |
1558 | r600_vram_scratch_fini(rdev); | ||
1560 | radeon_gem_fini(rdev); | 1559 | radeon_gem_fini(rdev); |
1561 | radeon_fence_driver_fini(rdev); | 1560 | radeon_fence_driver_fini(rdev); |
1562 | radeon_bo_fini(rdev); | 1561 | radeon_bo_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index cbf49f4f408e..ad158ea49901 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -537,6 +537,7 @@ void r100_hpd_init(struct radeon_device *rdev) | |||
537 | default: | 537 | default: |
538 | break; | 538 | break; |
539 | } | 539 | } |
540 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | ||
540 | } | 541 | } |
541 | if (rdev->irq.installed) | 542 | if (rdev->irq.installed) |
542 | r100_irq_set(rdev); | 543 | r100_irq_set(rdev); |
@@ -577,7 +578,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
577 | { | 578 | { |
578 | int r; | 579 | int r; |
579 | 580 | ||
580 | if (rdev->gart.table.ram.ptr) { | 581 | if (rdev->gart.ptr) { |
581 | WARN(1, "R100 PCI GART already initialized\n"); | 582 | WARN(1, "R100 PCI GART already initialized\n"); |
582 | return 0; | 583 | return 0; |
583 | } | 584 | } |
@@ -636,10 +637,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev) | |||
636 | 637 | ||
637 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 638 | int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
638 | { | 639 | { |
640 | u32 *gtt = rdev->gart.ptr; | ||
641 | |||
639 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 642 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
640 | return -EINVAL; | 643 | return -EINVAL; |
641 | } | 644 | } |
642 | rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); | 645 | gtt[i] = cpu_to_le32(lower_32_bits(addr)); |
643 | return 0; | 646 | return 0; |
644 | } | 647 | } |
645 | 648 | ||
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 33f2b68c680b..400b26df652a 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -74,7 +74,7 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
74 | 74 | ||
75 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 75 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
76 | { | 76 | { |
77 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 77 | void __iomem *ptr = rdev->gart.ptr; |
78 | 78 | ||
79 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 79 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
80 | return -EINVAL; | 80 | return -EINVAL; |
@@ -93,7 +93,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) | |||
93 | { | 93 | { |
94 | int r; | 94 | int r; |
95 | 95 | ||
96 | if (rdev->gart.table.vram.robj) { | 96 | if (rdev->gart.robj) { |
97 | WARN(1, "RV370 PCIE GART already initialized\n"); | 97 | WARN(1, "RV370 PCIE GART already initialized\n"); |
98 | return 0; | 98 | return 0; |
99 | } | 99 | } |
@@ -116,7 +116,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) | |||
116 | uint32_t tmp; | 116 | uint32_t tmp; |
117 | int r; | 117 | int r; |
118 | 118 | ||
119 | if (rdev->gart.table.vram.robj == NULL) { | 119 | if (rdev->gart.robj == NULL) { |
120 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | 120 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
121 | return -EINVAL; | 121 | return -EINVAL; |
122 | } | 122 | } |
@@ -154,7 +154,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) | |||
154 | void rv370_pcie_gart_disable(struct radeon_device *rdev) | 154 | void rv370_pcie_gart_disable(struct radeon_device *rdev) |
155 | { | 155 | { |
156 | u32 tmp; | 156 | u32 tmp; |
157 | int r; | ||
158 | 157 | ||
159 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); | 158 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); |
160 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); | 159 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); |
@@ -163,14 +162,7 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev) | |||
163 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); | 162 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
164 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; | 163 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
165 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); | 164 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); |
166 | if (rdev->gart.table.vram.robj) { | 165 | radeon_gart_table_vram_unpin(rdev); |
167 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | ||
168 | if (likely(r == 0)) { | ||
169 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
170 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
171 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
172 | } | ||
173 | } | ||
174 | } | 166 | } |
175 | 167 | ||
176 | void rv370_pcie_gart_fini(struct radeon_device *rdev) | 168 | void rv370_pcie_gart_fini(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 4e777c1e4b7b..9cdda0b3b081 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -288,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev) | |||
288 | pcie_lanes); | 288 | pcie_lanes); |
289 | } | 289 | } |
290 | 290 | ||
291 | static int r600_pm_get_type_index(struct radeon_device *rdev, | ||
292 | enum radeon_pm_state_type ps_type, | ||
293 | int instance) | ||
294 | { | ||
295 | int i; | ||
296 | int found_instance = -1; | ||
297 | |||
298 | for (i = 0; i < rdev->pm.num_power_states; i++) { | ||
299 | if (rdev->pm.power_state[i].type == ps_type) { | ||
300 | found_instance++; | ||
301 | if (found_instance == instance) | ||
302 | return i; | ||
303 | } | ||
304 | } | ||
305 | /* return default if no match */ | ||
306 | return rdev->pm.default_power_state_index; | ||
307 | } | ||
308 | |||
309 | void rs780_pm_init_profile(struct radeon_device *rdev) | 291 | void rs780_pm_init_profile(struct radeon_device *rdev) |
310 | { | 292 | { |
311 | if (rdev->pm.num_power_states == 2) { | 293 | if (rdev->pm.num_power_states == 2) { |
@@ -421,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev) | |||
421 | 403 | ||
422 | void r600_pm_init_profile(struct radeon_device *rdev) | 404 | void r600_pm_init_profile(struct radeon_device *rdev) |
423 | { | 405 | { |
406 | int idx; | ||
407 | |||
424 | if (rdev->family == CHIP_R600) { | 408 | if (rdev->family == CHIP_R600) { |
425 | /* XXX */ | 409 | /* XXX */ |
426 | /* default */ | 410 | /* default */ |
@@ -502,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev) | |||
502 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; | 486 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; |
503 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; | 487 | rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; |
504 | /* low sh */ | 488 | /* low sh */ |
505 | if (rdev->flags & RADEON_IS_MOBILITY) { | 489 | if (rdev->flags & RADEON_IS_MOBILITY) |
506 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = | 490 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); |
507 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | 491 | else |
508 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = | 492 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); |
509 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | 493 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; |
510 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | 494 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; |
511 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | 495 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; |
512 | } else { | 496 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; |
513 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = | ||
514 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
515 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = | ||
516 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
517 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; | ||
518 | rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; | ||
519 | } | ||
520 | /* mid sh */ | 497 | /* mid sh */ |
521 | if (rdev->flags & RADEON_IS_MOBILITY) { | 498 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; |
522 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = | 499 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; |
523 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | 500 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; |
524 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = | 501 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; |
525 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); | ||
526 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
527 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; | ||
528 | } else { | ||
529 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = | ||
530 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
531 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = | ||
532 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
533 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; | ||
534 | rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; | ||
535 | } | ||
536 | /* high sh */ | 502 | /* high sh */ |
537 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = | 503 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); |
538 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | 504 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; |
539 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = | 505 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; |
540 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); | ||
541 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; | 506 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; |
542 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; | 507 | rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; |
543 | /* low mh */ | 508 | /* low mh */ |
544 | if (rdev->flags & RADEON_IS_MOBILITY) { | 509 | if (rdev->flags & RADEON_IS_MOBILITY) |
545 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = | 510 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); |
546 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | 511 | else |
547 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = | 512 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); |
548 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | 513 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; |
549 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | 514 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; |
550 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | 515 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; |
551 | } else { | 516 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; |
552 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = | ||
553 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
554 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = | ||
555 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
556 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; | ||
557 | rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; | ||
558 | } | ||
559 | /* mid mh */ | 517 | /* mid mh */ |
560 | if (rdev->flags & RADEON_IS_MOBILITY) { | 518 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; |
561 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = | 519 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; |
562 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | 520 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; |
563 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = | 521 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; |
564 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); | ||
565 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
566 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; | ||
567 | } else { | ||
568 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = | ||
569 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
570 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = | ||
571 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
572 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; | ||
573 | rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; | ||
574 | } | ||
575 | /* high mh */ | 522 | /* high mh */ |
576 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = | 523 | idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); |
577 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | 524 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; |
578 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = | 525 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; |
579 | r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); | ||
580 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; | 526 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; |
581 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; | 527 | rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; |
582 | } | 528 | } |
@@ -763,13 +709,14 @@ void r600_hpd_init(struct radeon_device *rdev) | |||
763 | struct drm_device *dev = rdev->ddev; | 709 | struct drm_device *dev = rdev->ddev; |
764 | struct drm_connector *connector; | 710 | struct drm_connector *connector; |
765 | 711 | ||
766 | if (ASIC_IS_DCE3(rdev)) { | 712 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
767 | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); | 713 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
768 | if (ASIC_IS_DCE32(rdev)) | 714 | |
769 | tmp |= DC_HPDx_EN; | 715 | if (ASIC_IS_DCE3(rdev)) { |
716 | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); | ||
717 | if (ASIC_IS_DCE32(rdev)) | ||
718 | tmp |= DC_HPDx_EN; | ||
770 | 719 | ||
771 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
772 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
773 | switch (radeon_connector->hpd.hpd) { | 720 | switch (radeon_connector->hpd.hpd) { |
774 | case RADEON_HPD_1: | 721 | case RADEON_HPD_1: |
775 | WREG32(DC_HPD1_CONTROL, tmp); | 722 | WREG32(DC_HPD1_CONTROL, tmp); |
@@ -799,10 +746,7 @@ void r600_hpd_init(struct radeon_device *rdev) | |||
799 | default: | 746 | default: |
800 | break; | 747 | break; |
801 | } | 748 | } |
802 | } | 749 | } else { |
803 | } else { | ||
804 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
805 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
806 | switch (radeon_connector->hpd.hpd) { | 750 | switch (radeon_connector->hpd.hpd) { |
807 | case RADEON_HPD_1: | 751 | case RADEON_HPD_1: |
808 | WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); | 752 | WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); |
@@ -820,6 +764,7 @@ void r600_hpd_init(struct radeon_device *rdev) | |||
820 | break; | 764 | break; |
821 | } | 765 | } |
822 | } | 766 | } |
767 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | ||
823 | } | 768 | } |
824 | if (rdev->irq.installed) | 769 | if (rdev->irq.installed) |
825 | r600_irq_set(rdev); | 770 | r600_irq_set(rdev); |
@@ -897,7 +842,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
897 | /* flush hdp cache so updates hit vram */ | 842 | /* flush hdp cache so updates hit vram */ |
898 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && | 843 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && |
899 | !(rdev->flags & RADEON_IS_AGP)) { | 844 | !(rdev->flags & RADEON_IS_AGP)) { |
900 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 845 | void __iomem *ptr = (void *)rdev->gart.ptr; |
901 | u32 tmp; | 846 | u32 tmp; |
902 | 847 | ||
903 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read | 848 | /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read |
@@ -932,7 +877,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev) | |||
932 | { | 877 | { |
933 | int r; | 878 | int r; |
934 | 879 | ||
935 | if (rdev->gart.table.vram.robj) { | 880 | if (rdev->gart.robj) { |
936 | WARN(1, "R600 PCIE GART already initialized\n"); | 881 | WARN(1, "R600 PCIE GART already initialized\n"); |
937 | return 0; | 882 | return 0; |
938 | } | 883 | } |
@@ -949,7 +894,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
949 | u32 tmp; | 894 | u32 tmp; |
950 | int r, i; | 895 | int r, i; |
951 | 896 | ||
952 | if (rdev->gart.table.vram.robj == NULL) { | 897 | if (rdev->gart.robj == NULL) { |
953 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | 898 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
954 | return -EINVAL; | 899 | return -EINVAL; |
955 | } | 900 | } |
@@ -1004,7 +949,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
1004 | void r600_pcie_gart_disable(struct radeon_device *rdev) | 949 | void r600_pcie_gart_disable(struct radeon_device *rdev) |
1005 | { | 950 | { |
1006 | u32 tmp; | 951 | u32 tmp; |
1007 | int i, r; | 952 | int i; |
1008 | 953 | ||
1009 | /* Disable all tables */ | 954 | /* Disable all tables */ |
1010 | for (i = 0; i < 7; i++) | 955 | for (i = 0; i < 7; i++) |
@@ -1031,14 +976,7 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) | |||
1031 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); | 976 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); |
1032 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); | 977 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); |
1033 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | 978 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); |
1034 | if (rdev->gart.table.vram.robj) { | 979 | radeon_gart_table_vram_unpin(rdev); |
1035 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | ||
1036 | if (likely(r == 0)) { | ||
1037 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
1038 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
1039 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
1040 | } | ||
1041 | } | ||
1042 | } | 980 | } |
1043 | 981 | ||
1044 | void r600_pcie_gart_fini(struct radeon_device *rdev) | 982 | void r600_pcie_gart_fini(struct radeon_device *rdev) |
@@ -1138,7 +1076,7 @@ static void r600_mc_program(struct radeon_device *rdev) | |||
1138 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); | 1076 | WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); |
1139 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); | 1077 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); |
1140 | } | 1078 | } |
1141 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 1079 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); |
1142 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; | 1080 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
1143 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 1081 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
1144 | WREG32(MC_VM_FB_LOCATION, tmp); | 1082 | WREG32(MC_VM_FB_LOCATION, tmp); |
@@ -1277,6 +1215,53 @@ int r600_mc_init(struct radeon_device *rdev) | |||
1277 | return 0; | 1215 | return 0; |
1278 | } | 1216 | } |
1279 | 1217 | ||
1218 | int r600_vram_scratch_init(struct radeon_device *rdev) | ||
1219 | { | ||
1220 | int r; | ||
1221 | |||
1222 | if (rdev->vram_scratch.robj == NULL) { | ||
1223 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, | ||
1224 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | ||
1225 | &rdev->vram_scratch.robj); | ||
1226 | if (r) { | ||
1227 | return r; | ||
1228 | } | ||
1229 | } | ||
1230 | |||
1231 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | ||
1232 | if (unlikely(r != 0)) | ||
1233 | return r; | ||
1234 | r = radeon_bo_pin(rdev->vram_scratch.robj, | ||
1235 | RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr); | ||
1236 | if (r) { | ||
1237 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
1238 | return r; | ||
1239 | } | ||
1240 | r = radeon_bo_kmap(rdev->vram_scratch.robj, | ||
1241 | (void **)&rdev->vram_scratch.ptr); | ||
1242 | if (r) | ||
1243 | radeon_bo_unpin(rdev->vram_scratch.robj); | ||
1244 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
1245 | |||
1246 | return r; | ||
1247 | } | ||
1248 | |||
1249 | void r600_vram_scratch_fini(struct radeon_device *rdev) | ||
1250 | { | ||
1251 | int r; | ||
1252 | |||
1253 | if (rdev->vram_scratch.robj == NULL) { | ||
1254 | return; | ||
1255 | } | ||
1256 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | ||
1257 | if (likely(r == 0)) { | ||
1258 | radeon_bo_kunmap(rdev->vram_scratch.robj); | ||
1259 | radeon_bo_unpin(rdev->vram_scratch.robj); | ||
1260 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
1261 | } | ||
1262 | radeon_bo_unref(&rdev->vram_scratch.robj); | ||
1263 | } | ||
1264 | |||
1280 | /* We doesn't check that the GPU really needs a reset we simply do the | 1265 | /* We doesn't check that the GPU really needs a reset we simply do the |
1281 | * reset, it's up to the caller to determine if the GPU needs one. We | 1266 | * reset, it's up to the caller to determine if the GPU needs one. We |
1282 | * might add an helper function to check that. | 1267 | * might add an helper function to check that. |
@@ -2332,6 +2317,14 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
2332 | if (rdev->wb.use_event) { | 2317 | if (rdev->wb.use_event) { |
2333 | u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + | 2318 | u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + |
2334 | (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base); | 2319 | (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base); |
2320 | /* flush read cache over gart */ | ||
2321 | radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); | ||
2322 | radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA | | ||
2323 | PACKET3_VC_ACTION_ENA | | ||
2324 | PACKET3_SH_ACTION_ENA); | ||
2325 | radeon_ring_write(rdev, 0xFFFFFFFF); | ||
2326 | radeon_ring_write(rdev, 0); | ||
2327 | radeon_ring_write(rdev, 10); /* poll interval */ | ||
2335 | /* EVENT_WRITE_EOP - flush caches, send int */ | 2328 | /* EVENT_WRITE_EOP - flush caches, send int */ |
2336 | radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); | 2329 | radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); |
2337 | radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); | 2330 | radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); |
@@ -2340,6 +2333,14 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
2340 | radeon_ring_write(rdev, fence->seq); | 2333 | radeon_ring_write(rdev, fence->seq); |
2341 | radeon_ring_write(rdev, 0); | 2334 | radeon_ring_write(rdev, 0); |
2342 | } else { | 2335 | } else { |
2336 | /* flush read cache over gart */ | ||
2337 | radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); | ||
2338 | radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA | | ||
2339 | PACKET3_VC_ACTION_ENA | | ||
2340 | PACKET3_SH_ACTION_ENA); | ||
2341 | radeon_ring_write(rdev, 0xFFFFFFFF); | ||
2342 | radeon_ring_write(rdev, 0); | ||
2343 | radeon_ring_write(rdev, 10); /* poll interval */ | ||
2343 | radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); | 2344 | radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); |
2344 | radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); | 2345 | radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); |
2345 | /* wait for 3D idle clean */ | 2346 | /* wait for 3D idle clean */ |
@@ -2421,6 +2422,10 @@ int r600_startup(struct radeon_device *rdev) | |||
2421 | } | 2422 | } |
2422 | } | 2423 | } |
2423 | 2424 | ||
2425 | r = r600_vram_scratch_init(rdev); | ||
2426 | if (r) | ||
2427 | return r; | ||
2428 | |||
2424 | r600_mc_program(rdev); | 2429 | r600_mc_program(rdev); |
2425 | if (rdev->flags & RADEON_IS_AGP) { | 2430 | if (rdev->flags & RADEON_IS_AGP) { |
2426 | r600_agp_enable(rdev); | 2431 | r600_agp_enable(rdev); |
@@ -2641,6 +2646,7 @@ void r600_fini(struct radeon_device *rdev) | |||
2641 | radeon_ib_pool_fini(rdev); | 2646 | radeon_ib_pool_fini(rdev); |
2642 | radeon_irq_kms_fini(rdev); | 2647 | radeon_irq_kms_fini(rdev); |
2643 | r600_pcie_gart_fini(rdev); | 2648 | r600_pcie_gart_fini(rdev); |
2649 | r600_vram_scratch_fini(rdev); | ||
2644 | radeon_agp_fini(rdev); | 2650 | radeon_agp_fini(rdev); |
2645 | radeon_gem_fini(rdev); | 2651 | radeon_gem_fini(rdev); |
2646 | radeon_fence_driver_fini(rdev); | 2652 | radeon_fence_driver_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index c4cf1308d4a1..e09d2818f949 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -201,7 +201,7 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
201 | static void | 201 | static void |
202 | set_tex_resource(struct radeon_device *rdev, | 202 | set_tex_resource(struct radeon_device *rdev, |
203 | int format, int w, int h, int pitch, | 203 | int format, int w, int h, int pitch, |
204 | u64 gpu_addr) | 204 | u64 gpu_addr, u32 size) |
205 | { | 205 | { |
206 | uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; | 206 | uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; |
207 | 207 | ||
@@ -222,6 +222,9 @@ set_tex_resource(struct radeon_device *rdev, | |||
222 | S_038010_DST_SEL_Z(SQ_SEL_Z) | | 222 | S_038010_DST_SEL_Z(SQ_SEL_Z) | |
223 | S_038010_DST_SEL_W(SQ_SEL_W); | 223 | S_038010_DST_SEL_W(SQ_SEL_W); |
224 | 224 | ||
225 | cp_set_surface_sync(rdev, | ||
226 | PACKET3_TC_ACTION_ENA, size, gpu_addr); | ||
227 | |||
225 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); | 228 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); |
226 | radeon_ring_write(rdev, 0); | 229 | radeon_ring_write(rdev, 0); |
227 | radeon_ring_write(rdev, sq_tex_resource_word0); | 230 | radeon_ring_write(rdev, sq_tex_resource_word0); |
@@ -500,9 +503,9 @@ int r600_blit_init(struct radeon_device *rdev) | |||
500 | rdev->r600_blit.primitives.set_default_state = set_default_state; | 503 | rdev->r600_blit.primitives.set_default_state = set_default_state; |
501 | 504 | ||
502 | rdev->r600_blit.ring_size_common = 40; /* shaders + def state */ | 505 | rdev->r600_blit.ring_size_common = 40; /* shaders + def state */ |
503 | rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */ | 506 | rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */ |
504 | rdev->r600_blit.ring_size_common += 5; /* done copy */ | 507 | rdev->r600_blit.ring_size_common += 5; /* done copy */ |
505 | rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */ | 508 | rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */ |
506 | 509 | ||
507 | rdev->r600_blit.ring_size_per_loop = 76; | 510 | rdev->r600_blit.ring_size_per_loop = 76; |
508 | /* set_render_target emits 2 extra dwords on rv6xx */ | 511 | /* set_render_target emits 2 extra dwords on rv6xx */ |
@@ -760,10 +763,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, | |||
760 | vb[11] = i2f(h); | 763 | vb[11] = i2f(h); |
761 | 764 | ||
762 | rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, | 765 | rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8, |
763 | w, h, w, src_gpu_addr); | 766 | w, h, w, src_gpu_addr, size_in_bytes); |
764 | rdev->r600_blit.primitives.cp_set_surface_sync(rdev, | ||
765 | PACKET3_TC_ACTION_ENA, | ||
766 | size_in_bytes, src_gpu_addr); | ||
767 | rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8, | 767 | rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8, |
768 | w, h, dst_gpu_addr); | 768 | w, h, dst_gpu_addr); |
769 | rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h); | 769 | rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index e3170c794c1d..fc5a1d642cb5 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -93,6 +93,7 @@ extern int radeon_audio; | |||
93 | extern int radeon_disp_priority; | 93 | extern int radeon_disp_priority; |
94 | extern int radeon_hw_i2c; | 94 | extern int radeon_hw_i2c; |
95 | extern int radeon_pcie_gen2; | 95 | extern int radeon_pcie_gen2; |
96 | extern int radeon_msi; | ||
96 | 97 | ||
97 | /* | 98 | /* |
98 | * Copy from radeon_drv.h so we don't have to include both and have conflicting | 99 | * Copy from radeon_drv.h so we don't have to include both and have conflicting |
@@ -306,30 +307,17 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv, | |||
306 | */ | 307 | */ |
307 | struct radeon_mc; | 308 | struct radeon_mc; |
308 | 309 | ||
309 | struct radeon_gart_table_ram { | ||
310 | volatile uint32_t *ptr; | ||
311 | }; | ||
312 | |||
313 | struct radeon_gart_table_vram { | ||
314 | struct radeon_bo *robj; | ||
315 | volatile uint32_t *ptr; | ||
316 | }; | ||
317 | |||
318 | union radeon_gart_table { | ||
319 | struct radeon_gart_table_ram ram; | ||
320 | struct radeon_gart_table_vram vram; | ||
321 | }; | ||
322 | |||
323 | #define RADEON_GPU_PAGE_SIZE 4096 | 310 | #define RADEON_GPU_PAGE_SIZE 4096 |
324 | #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) | 311 | #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) |
325 | #define RADEON_GPU_PAGE_SHIFT 12 | 312 | #define RADEON_GPU_PAGE_SHIFT 12 |
326 | 313 | ||
327 | struct radeon_gart { | 314 | struct radeon_gart { |
328 | dma_addr_t table_addr; | 315 | dma_addr_t table_addr; |
316 | struct radeon_bo *robj; | ||
317 | void *ptr; | ||
329 | unsigned num_gpu_pages; | 318 | unsigned num_gpu_pages; |
330 | unsigned num_cpu_pages; | 319 | unsigned num_cpu_pages; |
331 | unsigned table_size; | 320 | unsigned table_size; |
332 | union radeon_gart_table table; | ||
333 | struct page **pages; | 321 | struct page **pages; |
334 | dma_addr_t *pages_addr; | 322 | dma_addr_t *pages_addr; |
335 | bool *ttm_alloced; | 323 | bool *ttm_alloced; |
@@ -340,6 +328,8 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev); | |||
340 | void radeon_gart_table_ram_free(struct radeon_device *rdev); | 328 | void radeon_gart_table_ram_free(struct radeon_device *rdev); |
341 | int radeon_gart_table_vram_alloc(struct radeon_device *rdev); | 329 | int radeon_gart_table_vram_alloc(struct radeon_device *rdev); |
342 | void radeon_gart_table_vram_free(struct radeon_device *rdev); | 330 | void radeon_gart_table_vram_free(struct radeon_device *rdev); |
331 | int radeon_gart_table_vram_pin(struct radeon_device *rdev); | ||
332 | void radeon_gart_table_vram_unpin(struct radeon_device *rdev); | ||
343 | int radeon_gart_init(struct radeon_device *rdev); | 333 | int radeon_gart_init(struct radeon_device *rdev); |
344 | void radeon_gart_fini(struct radeon_device *rdev); | 334 | void radeon_gart_fini(struct radeon_device *rdev); |
345 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | 335 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, |
@@ -347,6 +337,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
347 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | 337 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
348 | int pages, struct page **pagelist, | 338 | int pages, struct page **pagelist, |
349 | dma_addr_t *dma_addr); | 339 | dma_addr_t *dma_addr); |
340 | void radeon_gart_restore(struct radeon_device *rdev); | ||
350 | 341 | ||
351 | 342 | ||
352 | /* | 343 | /* |
@@ -437,25 +428,26 @@ union radeon_irq_stat_regs { | |||
437 | struct evergreen_irq_stat_regs evergreen; | 428 | struct evergreen_irq_stat_regs evergreen; |
438 | }; | 429 | }; |
439 | 430 | ||
431 | #define RADEON_MAX_HPD_PINS 6 | ||
432 | #define RADEON_MAX_CRTCS 6 | ||
433 | #define RADEON_MAX_HDMI_BLOCKS 2 | ||
434 | |||
440 | struct radeon_irq { | 435 | struct radeon_irq { |
441 | bool installed; | 436 | bool installed; |
442 | bool sw_int; | 437 | bool sw_int; |
443 | /* FIXME: use a define max crtc rather than hardcode it */ | 438 | bool crtc_vblank_int[RADEON_MAX_CRTCS]; |
444 | bool crtc_vblank_int[6]; | 439 | bool pflip[RADEON_MAX_CRTCS]; |
445 | bool pflip[6]; | ||
446 | wait_queue_head_t vblank_queue; | 440 | wait_queue_head_t vblank_queue; |
447 | /* FIXME: use defines for max hpd/dacs */ | 441 | bool hpd[RADEON_MAX_HPD_PINS]; |
448 | bool hpd[6]; | ||
449 | bool gui_idle; | 442 | bool gui_idle; |
450 | bool gui_idle_acked; | 443 | bool gui_idle_acked; |
451 | wait_queue_head_t idle_queue; | 444 | wait_queue_head_t idle_queue; |
452 | /* FIXME: use defines for max HDMI blocks */ | 445 | bool hdmi[RADEON_MAX_HDMI_BLOCKS]; |
453 | bool hdmi[2]; | ||
454 | spinlock_t sw_lock; | 446 | spinlock_t sw_lock; |
455 | int sw_refcount; | 447 | int sw_refcount; |
456 | union radeon_irq_stat_regs stat_regs; | 448 | union radeon_irq_stat_regs stat_regs; |
457 | spinlock_t pflip_lock[6]; | 449 | spinlock_t pflip_lock[RADEON_MAX_CRTCS]; |
458 | int pflip_refcount[6]; | 450 | int pflip_refcount[RADEON_MAX_CRTCS]; |
459 | }; | 451 | }; |
460 | 452 | ||
461 | int radeon_irq_kms_init(struct radeon_device *rdev); | 453 | int radeon_irq_kms_init(struct radeon_device *rdev); |
@@ -533,7 +525,7 @@ struct r600_blit_cp_primitives { | |||
533 | void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); | 525 | void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); |
534 | void (*set_tex_resource)(struct radeon_device *rdev, | 526 | void (*set_tex_resource)(struct radeon_device *rdev, |
535 | int format, int w, int h, int pitch, | 527 | int format, int w, int h, int pitch, |
536 | u64 gpu_addr); | 528 | u64 gpu_addr, u32 size); |
537 | void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, | 529 | void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, |
538 | int x2, int y2); | 530 | int x2, int y2); |
539 | void (*draw_auto)(struct radeon_device *rdev); | 531 | void (*draw_auto)(struct radeon_device *rdev); |
@@ -792,8 +784,7 @@ struct radeon_pm_clock_info { | |||
792 | 784 | ||
793 | struct radeon_power_state { | 785 | struct radeon_power_state { |
794 | enum radeon_pm_state_type type; | 786 | enum radeon_pm_state_type type; |
795 | /* XXX: use a define for num clock modes */ | 787 | struct radeon_pm_clock_info *clock_info; |
796 | struct radeon_pm_clock_info clock_info[8]; | ||
797 | /* number of valid clock modes in this power state */ | 788 | /* number of valid clock modes in this power state */ |
798 | int num_clock_modes; | 789 | int num_clock_modes; |
799 | struct radeon_pm_clock_info *default_clock_mode; | 790 | struct radeon_pm_clock_info *default_clock_mode; |
@@ -863,6 +854,9 @@ struct radeon_pm { | |||
863 | struct device *int_hwmon_dev; | 854 | struct device *int_hwmon_dev; |
864 | }; | 855 | }; |
865 | 856 | ||
857 | int radeon_pm_get_type_index(struct radeon_device *rdev, | ||
858 | enum radeon_pm_state_type ps_type, | ||
859 | int instance); | ||
866 | 860 | ||
867 | /* | 861 | /* |
868 | * Benchmarking | 862 | * Benchmarking |
@@ -1143,12 +1137,55 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
1143 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | 1137 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, |
1144 | struct drm_file *filp); | 1138 | struct drm_file *filp); |
1145 | 1139 | ||
1146 | /* VRAM scratch page for HDP bug */ | 1140 | /* VRAM scratch page for HDP bug, default vram page */ |
1147 | struct r700_vram_scratch { | 1141 | struct r600_vram_scratch { |
1148 | struct radeon_bo *robj; | 1142 | struct radeon_bo *robj; |
1149 | volatile uint32_t *ptr; | 1143 | volatile uint32_t *ptr; |
1144 | u64 gpu_addr; | ||
1150 | }; | 1145 | }; |
1151 | 1146 | ||
1147 | |||
1148 | /* | ||
1149 | * Mutex which allows recursive locking from the same process. | ||
1150 | */ | ||
1151 | struct radeon_mutex { | ||
1152 | struct mutex mutex; | ||
1153 | struct task_struct *owner; | ||
1154 | int level; | ||
1155 | }; | ||
1156 | |||
1157 | static inline void radeon_mutex_init(struct radeon_mutex *mutex) | ||
1158 | { | ||
1159 | mutex_init(&mutex->mutex); | ||
1160 | mutex->owner = NULL; | ||
1161 | mutex->level = 0; | ||
1162 | } | ||
1163 | |||
1164 | static inline void radeon_mutex_lock(struct radeon_mutex *mutex) | ||
1165 | { | ||
1166 | if (mutex_trylock(&mutex->mutex)) { | ||
1167 | /* The mutex was unlocked before, so it's ours now */ | ||
1168 | mutex->owner = current; | ||
1169 | } else if (mutex->owner != current) { | ||
1170 | /* Another process locked the mutex, take it */ | ||
1171 | mutex_lock(&mutex->mutex); | ||
1172 | mutex->owner = current; | ||
1173 | } | ||
1174 | /* Otherwise the mutex was already locked by this process */ | ||
1175 | |||
1176 | mutex->level++; | ||
1177 | } | ||
1178 | |||
1179 | static inline void radeon_mutex_unlock(struct radeon_mutex *mutex) | ||
1180 | { | ||
1181 | if (--mutex->level > 0) | ||
1182 | return; | ||
1183 | |||
1184 | mutex->owner = NULL; | ||
1185 | mutex_unlock(&mutex->mutex); | ||
1186 | } | ||
1187 | |||
1188 | |||
1152 | /* | 1189 | /* |
1153 | * Core structure, functions and helpers. | 1190 | * Core structure, functions and helpers. |
1154 | */ | 1191 | */ |
@@ -1204,7 +1241,7 @@ struct radeon_device { | |||
1204 | struct radeon_gem gem; | 1241 | struct radeon_gem gem; |
1205 | struct radeon_pm pm; | 1242 | struct radeon_pm pm; |
1206 | uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; | 1243 | uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; |
1207 | struct mutex cs_mutex; | 1244 | struct radeon_mutex cs_mutex; |
1208 | struct radeon_wb wb; | 1245 | struct radeon_wb wb; |
1209 | struct radeon_dummy_page dummy_page; | 1246 | struct radeon_dummy_page dummy_page; |
1210 | bool gpu_lockup; | 1247 | bool gpu_lockup; |
@@ -1218,7 +1255,7 @@ struct radeon_device { | |||
1218 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ | 1255 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ |
1219 | const struct firmware *mc_fw; /* NI MC firmware */ | 1256 | const struct firmware *mc_fw; /* NI MC firmware */ |
1220 | struct r600_blit r600_blit; | 1257 | struct r600_blit r600_blit; |
1221 | struct r700_vram_scratch vram_scratch; | 1258 | struct r600_vram_scratch vram_scratch; |
1222 | int msi_enabled; /* msi enabled */ | 1259 | int msi_enabled; /* msi enabled */ |
1223 | struct r600_ih ih; /* r6/700 interrupt ring */ | 1260 | struct r600_ih ih; /* r6/700 interrupt ring */ |
1224 | struct work_struct hotplug_work; | 1261 | struct work_struct hotplug_work; |
@@ -1442,8 +1479,6 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v); | |||
1442 | /* AGP */ | 1479 | /* AGP */ |
1443 | extern int radeon_gpu_reset(struct radeon_device *rdev); | 1480 | extern int radeon_gpu_reset(struct radeon_device *rdev); |
1444 | extern void radeon_agp_disable(struct radeon_device *rdev); | 1481 | extern void radeon_agp_disable(struct radeon_device *rdev); |
1445 | extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); | ||
1446 | extern void radeon_gart_restore(struct radeon_device *rdev); | ||
1447 | extern int radeon_modeset_init(struct radeon_device *rdev); | 1482 | extern int radeon_modeset_init(struct radeon_device *rdev); |
1448 | extern void radeon_modeset_fini(struct radeon_device *rdev); | 1483 | extern void radeon_modeset_fini(struct radeon_device *rdev); |
1449 | extern bool radeon_card_posted(struct radeon_device *rdev); | 1484 | extern bool radeon_card_posted(struct radeon_device *rdev); |
@@ -1467,6 +1502,12 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); | |||
1467 | extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); | 1502 | extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); |
1468 | 1503 | ||
1469 | /* | 1504 | /* |
1505 | * R600 vram scratch functions | ||
1506 | */ | ||
1507 | int r600_vram_scratch_init(struct radeon_device *rdev); | ||
1508 | void r600_vram_scratch_fini(struct radeon_device *rdev); | ||
1509 | |||
1510 | /* | ||
1470 | * r600 functions used by radeon_encoder.c | 1511 | * r600 functions used by radeon_encoder.c |
1471 | */ | 1512 | */ |
1472 | extern void r600_hdmi_enable(struct drm_encoder *encoder); | 1513 | extern void r600_hdmi_enable(struct drm_encoder *encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e2944566ffea..a2e1eae114ef 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = { | |||
834 | .pm_misc = &evergreen_pm_misc, | 834 | .pm_misc = &evergreen_pm_misc, |
835 | .pm_prepare = &evergreen_pm_prepare, | 835 | .pm_prepare = &evergreen_pm_prepare, |
836 | .pm_finish = &evergreen_pm_finish, | 836 | .pm_finish = &evergreen_pm_finish, |
837 | .pm_init_profile = &rs780_pm_init_profile, | 837 | .pm_init_profile = &sumo_pm_init_profile, |
838 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 838 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
839 | .pre_page_flip = &evergreen_pre_page_flip, | 839 | .pre_page_flip = &evergreen_pre_page_flip, |
840 | .page_flip = &evergreen_page_flip, | 840 | .page_flip = &evergreen_page_flip, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 85f14f0337e4..59914842a729 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p); | |||
413 | extern void evergreen_pm_misc(struct radeon_device *rdev); | 413 | extern void evergreen_pm_misc(struct radeon_device *rdev); |
414 | extern void evergreen_pm_prepare(struct radeon_device *rdev); | 414 | extern void evergreen_pm_prepare(struct radeon_device *rdev); |
415 | extern void evergreen_pm_finish(struct radeon_device *rdev); | 415 | extern void evergreen_pm_finish(struct radeon_device *rdev); |
416 | extern void sumo_pm_init_profile(struct radeon_device *rdev); | ||
416 | extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); | 417 | extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); |
417 | extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); | 418 | extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); |
418 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); | 419 | extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 08d0b94332e6..d2d179267af3 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -1999,6 +1999,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) | |||
1999 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 1999 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
2000 | switch (frev) { | 2000 | switch (frev) { |
2001 | case 1: | 2001 | case 1: |
2002 | rdev->pm.power_state[state_index].clock_info = | ||
2003 | kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); | ||
2004 | if (!rdev->pm.power_state[state_index].clock_info) | ||
2005 | return state_index; | ||
2002 | rdev->pm.power_state[state_index].num_clock_modes = 1; | 2006 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
2003 | rdev->pm.power_state[state_index].clock_info[0].mclk = | 2007 | rdev->pm.power_state[state_index].clock_info[0].mclk = |
2004 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); | 2008 | le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); |
@@ -2035,6 +2039,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) | |||
2035 | state_index++; | 2039 | state_index++; |
2036 | break; | 2040 | break; |
2037 | case 2: | 2041 | case 2: |
2042 | rdev->pm.power_state[state_index].clock_info = | ||
2043 | kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); | ||
2044 | if (!rdev->pm.power_state[state_index].clock_info) | ||
2045 | return state_index; | ||
2038 | rdev->pm.power_state[state_index].num_clock_modes = 1; | 2046 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
2039 | rdev->pm.power_state[state_index].clock_info[0].mclk = | 2047 | rdev->pm.power_state[state_index].clock_info[0].mclk = |
2040 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); | 2048 | le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); |
@@ -2072,6 +2080,10 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) | |||
2072 | state_index++; | 2080 | state_index++; |
2073 | break; | 2081 | break; |
2074 | case 3: | 2082 | case 3: |
2083 | rdev->pm.power_state[state_index].clock_info = | ||
2084 | kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); | ||
2085 | if (!rdev->pm.power_state[state_index].clock_info) | ||
2086 | return state_index; | ||
2075 | rdev->pm.power_state[state_index].num_clock_modes = 1; | 2087 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
2076 | rdev->pm.power_state[state_index].clock_info[0].mclk = | 2088 | rdev->pm.power_state[state_index].clock_info[0].mclk = |
2077 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); | 2089 | le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); |
@@ -2257,7 +2269,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde | |||
2257 | rdev->pm.default_power_state_index = state_index; | 2269 | rdev->pm.default_power_state_index = state_index; |
2258 | rdev->pm.power_state[state_index].default_clock_mode = | 2270 | rdev->pm.power_state[state_index].default_clock_mode = |
2259 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; | 2271 | &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; |
2260 | if (ASIC_IS_DCE5(rdev)) { | 2272 | if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) { |
2261 | /* NI chips post without MC ucode, so default clocks are strobe mode only */ | 2273 | /* NI chips post without MC ucode, so default clocks are strobe mode only */ |
2262 | rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; | 2274 | rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; |
2263 | rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; | 2275 | rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; |
@@ -2377,17 +2389,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) | |||
2377 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + | 2389 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + |
2378 | (power_state->v1.ucNonClockStateIndex * | 2390 | (power_state->v1.ucNonClockStateIndex * |
2379 | power_info->pplib.ucNonClockSize)); | 2391 | power_info->pplib.ucNonClockSize)); |
2380 | for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { | 2392 | rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * |
2381 | clock_info = (union pplib_clock_info *) | 2393 | ((power_info->pplib.ucStateEntrySize - 1) ? |
2382 | (mode_info->atom_context->bios + data_offset + | 2394 | (power_info->pplib.ucStateEntrySize - 1) : 1), |
2383 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + | 2395 | GFP_KERNEL); |
2384 | (power_state->v1.ucClockStateIndices[j] * | 2396 | if (!rdev->pm.power_state[i].clock_info) |
2385 | power_info->pplib.ucClockInfoSize)); | 2397 | return state_index; |
2386 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | 2398 | if (power_info->pplib.ucStateEntrySize - 1) { |
2387 | state_index, mode_index, | 2399 | for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { |
2388 | clock_info); | 2400 | clock_info = (union pplib_clock_info *) |
2389 | if (valid) | 2401 | (mode_info->atom_context->bios + data_offset + |
2390 | mode_index++; | 2402 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + |
2403 | (power_state->v1.ucClockStateIndices[j] * | ||
2404 | power_info->pplib.ucClockInfoSize)); | ||
2405 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | ||
2406 | state_index, mode_index, | ||
2407 | clock_info); | ||
2408 | if (valid) | ||
2409 | mode_index++; | ||
2410 | } | ||
2411 | } else { | ||
2412 | rdev->pm.power_state[state_index].clock_info[0].mclk = | ||
2413 | rdev->clock.default_mclk; | ||
2414 | rdev->pm.power_state[state_index].clock_info[0].sclk = | ||
2415 | rdev->clock.default_sclk; | ||
2416 | mode_index++; | ||
2391 | } | 2417 | } |
2392 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; | 2418 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; |
2393 | if (mode_index) { | 2419 | if (mode_index) { |
@@ -2456,18 +2482,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) | |||
2456 | non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ | 2482 | non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ |
2457 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) | 2483 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) |
2458 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; | 2484 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; |
2459 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { | 2485 | rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * |
2460 | clock_array_index = power_state->v2.clockInfoIndex[j]; | 2486 | (power_state->v2.ucNumDPMLevels ? |
2461 | /* XXX this might be an inagua bug... */ | 2487 | power_state->v2.ucNumDPMLevels : 1), |
2462 | if (clock_array_index >= clock_info_array->ucNumEntries) | 2488 | GFP_KERNEL); |
2463 | continue; | 2489 | if (!rdev->pm.power_state[i].clock_info) |
2464 | clock_info = (union pplib_clock_info *) | 2490 | return state_index; |
2465 | &clock_info_array->clockInfo[clock_array_index]; | 2491 | if (power_state->v2.ucNumDPMLevels) { |
2466 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | 2492 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { |
2467 | state_index, mode_index, | 2493 | clock_array_index = power_state->v2.clockInfoIndex[j]; |
2468 | clock_info); | 2494 | /* XXX this might be an inagua bug... */ |
2469 | if (valid) | 2495 | if (clock_array_index >= clock_info_array->ucNumEntries) |
2470 | mode_index++; | 2496 | continue; |
2497 | clock_info = (union pplib_clock_info *) | ||
2498 | &clock_info_array->clockInfo[clock_array_index]; | ||
2499 | valid = radeon_atombios_parse_pplib_clock_info(rdev, | ||
2500 | state_index, mode_index, | ||
2501 | clock_info); | ||
2502 | if (valid) | ||
2503 | mode_index++; | ||
2504 | } | ||
2505 | } else { | ||
2506 | rdev->pm.power_state[state_index].clock_info[0].mclk = | ||
2507 | rdev->clock.default_mclk; | ||
2508 | rdev->pm.power_state[state_index].clock_info[0].sclk = | ||
2509 | rdev->clock.default_sclk; | ||
2510 | mode_index++; | ||
2471 | } | 2511 | } |
2472 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; | 2512 | rdev->pm.power_state[state_index].num_clock_modes = mode_index; |
2473 | if (mode_index) { | 2513 | if (mode_index) { |
@@ -2524,19 +2564,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) | |||
2524 | } else { | 2564 | } else { |
2525 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); | 2565 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); |
2526 | if (rdev->pm.power_state) { | 2566 | if (rdev->pm.power_state) { |
2527 | /* add the default mode */ | 2567 | rdev->pm.power_state[0].clock_info = |
2528 | rdev->pm.power_state[state_index].type = | 2568 | kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); |
2529 | POWER_STATE_TYPE_DEFAULT; | 2569 | if (rdev->pm.power_state[0].clock_info) { |
2530 | rdev->pm.power_state[state_index].num_clock_modes = 1; | 2570 | /* add the default mode */ |
2531 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; | 2571 | rdev->pm.power_state[state_index].type = |
2532 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; | 2572 | POWER_STATE_TYPE_DEFAULT; |
2533 | rdev->pm.power_state[state_index].default_clock_mode = | 2573 | rdev->pm.power_state[state_index].num_clock_modes = 1; |
2534 | &rdev->pm.power_state[state_index].clock_info[0]; | 2574 | rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; |
2535 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; | 2575 | rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; |
2536 | rdev->pm.power_state[state_index].pcie_lanes = 16; | 2576 | rdev->pm.power_state[state_index].default_clock_mode = |
2537 | rdev->pm.default_power_state_index = state_index; | 2577 | &rdev->pm.power_state[state_index].clock_info[0]; |
2538 | rdev->pm.power_state[state_index].flags = 0; | 2578 | rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; |
2539 | state_index++; | 2579 | rdev->pm.power_state[state_index].pcie_lanes = 16; |
2580 | rdev->pm.default_power_state_index = state_index; | ||
2581 | rdev->pm.power_state[state_index].flags = 0; | ||
2582 | state_index++; | ||
2583 | } | ||
2540 | } | 2584 | } |
2541 | } | 2585 | } |
2542 | 2586 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 5cafc90de7f8..17e1a9b2d8fb 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, | |||
98 | struct radeon_bo *sobj = NULL; | 98 | struct radeon_bo *sobj = NULL; |
99 | uint64_t saddr, daddr; | 99 | uint64_t saddr, daddr; |
100 | int r, n; | 100 | int r, n; |
101 | unsigned int time; | 101 | int time; |
102 | 102 | ||
103 | n = RADEON_BENCHMARK_ITERATIONS; | 103 | n = RADEON_BENCHMARK_ITERATIONS; |
104 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); | 104 | r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 8bf83c4b4147..81fc100be7e1 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) | |||
2563 | 2563 | ||
2564 | /* allocate 2 power states */ | 2564 | /* allocate 2 power states */ |
2565 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); | 2565 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); |
2566 | if (!rdev->pm.power_state) { | 2566 | if (rdev->pm.power_state) { |
2567 | rdev->pm.default_power_state_index = state_index; | 2567 | /* allocate 1 clock mode per state */ |
2568 | rdev->pm.num_power_states = 0; | 2568 | rdev->pm.power_state[0].clock_info = |
2569 | 2569 | kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); | |
2570 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | 2570 | rdev->pm.power_state[1].clock_info = |
2571 | rdev->pm.current_clock_mode_index = 0; | 2571 | kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL); |
2572 | return; | 2572 | if (!rdev->pm.power_state[0].clock_info || |
2573 | } | 2573 | !rdev->pm.power_state[1].clock_info) |
2574 | goto pm_failed; | ||
2575 | } else | ||
2576 | goto pm_failed; | ||
2574 | 2577 | ||
2575 | /* check for a thermal chip */ | 2578 | /* check for a thermal chip */ |
2576 | offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); | 2579 | offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); |
@@ -2735,6 +2738,14 @@ default_mode: | |||
2735 | 2738 | ||
2736 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | 2739 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; |
2737 | rdev->pm.current_clock_mode_index = 0; | 2740 | rdev->pm.current_clock_mode_index = 0; |
2741 | return; | ||
2742 | |||
2743 | pm_failed: | ||
2744 | rdev->pm.default_power_state_index = state_index; | ||
2745 | rdev->pm.num_power_states = 0; | ||
2746 | |||
2747 | rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; | ||
2748 | rdev->pm.current_clock_mode_index = 0; | ||
2738 | } | 2749 | } |
2739 | 2750 | ||
2740 | void radeon_external_tmds_setup(struct drm_encoder *encoder) | 2751 | void radeon_external_tmds_setup(struct drm_encoder *encoder) |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index dec6cbe6a0a6..e7cb3ab09243 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -44,8 +44,6 @@ extern void | |||
44 | radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, | 44 | radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, |
45 | struct drm_connector *drm_connector); | 45 | struct drm_connector *drm_connector); |
46 | 46 | ||
47 | bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); | ||
48 | |||
49 | void radeon_connector_hotplug(struct drm_connector *connector) | 47 | void radeon_connector_hotplug(struct drm_connector *connector) |
50 | { | 48 | { |
51 | struct drm_device *dev = connector->dev; | 49 | struct drm_device *dev = connector->dev; |
@@ -432,55 +430,6 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr | |||
432 | return 0; | 430 | return 0; |
433 | } | 431 | } |
434 | 432 | ||
435 | /* | ||
436 | * Some integrated ATI Radeon chipset implementations (e. g. | ||
437 | * Asus M2A-VM HDMI) may indicate the availability of a DDC, | ||
438 | * even when there's no monitor connected. For these connectors | ||
439 | * following DDC probe extension will be applied: check also for the | ||
440 | * availability of EDID with at least a correct EDID header. Only then, | ||
441 | * DDC is assumed to be available. This prevents drm_get_edid() and | ||
442 | * drm_edid_block_valid() from periodically dumping data and kernel | ||
443 | * errors into the logs and onto the terminal. | ||
444 | */ | ||
445 | static bool radeon_connector_needs_extended_probe(struct radeon_device *dev, | ||
446 | uint32_t supported_device, | ||
447 | int connector_type) | ||
448 | { | ||
449 | /* Asus M2A-VM HDMI board sends data to i2c bus even, | ||
450 | * if HDMI add-on card is not plugged in or HDMI is disabled in | ||
451 | * BIOS. Valid DDC can only be assumed, if also a valid EDID header | ||
452 | * can be retrieved via i2c bus during DDC probe */ | ||
453 | if ((dev->pdev->device == 0x791e) && | ||
454 | (dev->pdev->subsystem_vendor == 0x1043) && | ||
455 | (dev->pdev->subsystem_device == 0x826d)) { | ||
456 | if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && | ||
457 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | ||
458 | return true; | ||
459 | } | ||
460 | /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus | ||
461 | * for a DVI connector that is not implemented */ | ||
462 | if ((dev->pdev->device == 0x796e) && | ||
463 | (dev->pdev->subsystem_vendor == 0x1019) && | ||
464 | (dev->pdev->subsystem_device == 0x2615)) { | ||
465 | if ((connector_type == DRM_MODE_CONNECTOR_DVID) && | ||
466 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | ||
467 | return true; | ||
468 | } | ||
469 | /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100 | ||
470 | * (RS690M) sends data to i2c bus for a HDMI connector that | ||
471 | * is not implemented */ | ||
472 | if ((dev->pdev->device == 0x791f) && | ||
473 | (dev->pdev->subsystem_vendor == 0x1179) && | ||
474 | (dev->pdev->subsystem_device == 0xff68)) { | ||
475 | if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && | ||
476 | (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) | ||
477 | return true; | ||
478 | } | ||
479 | |||
480 | /* Default: no EDID header probe required for DDC probing */ | ||
481 | return false; | ||
482 | } | ||
483 | |||
484 | static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | 433 | static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, |
485 | struct drm_connector *connector) | 434 | struct drm_connector *connector) |
486 | { | 435 | { |
@@ -721,8 +670,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
721 | ret = connector_status_disconnected; | 670 | ret = connector_status_disconnected; |
722 | 671 | ||
723 | if (radeon_connector->ddc_bus) | 672 | if (radeon_connector->ddc_bus) |
724 | dret = radeon_ddc_probe(radeon_connector, | 673 | dret = radeon_ddc_probe(radeon_connector); |
725 | radeon_connector->requires_extended_probe); | ||
726 | if (dret) { | 674 | if (dret) { |
727 | radeon_connector->detected_by_load = false; | 675 | radeon_connector->detected_by_load = false; |
728 | if (radeon_connector->edid) { | 676 | if (radeon_connector->edid) { |
@@ -764,7 +712,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force) | |||
764 | if (radeon_connector->dac_load_detect && encoder) { | 712 | if (radeon_connector->dac_load_detect && encoder) { |
765 | encoder_funcs = encoder->helper_private; | 713 | encoder_funcs = encoder->helper_private; |
766 | ret = encoder_funcs->detect(encoder, connector); | 714 | ret = encoder_funcs->detect(encoder, connector); |
767 | if (ret == connector_status_connected) | 715 | if (ret != connector_status_disconnected) |
768 | radeon_connector->detected_by_load = true; | 716 | radeon_connector->detected_by_load = true; |
769 | } | 717 | } |
770 | } | 718 | } |
@@ -904,8 +852,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
904 | bool dret = false; | 852 | bool dret = false; |
905 | 853 | ||
906 | if (radeon_connector->ddc_bus) | 854 | if (radeon_connector->ddc_bus) |
907 | dret = radeon_ddc_probe(radeon_connector, | 855 | dret = radeon_ddc_probe(radeon_connector); |
908 | radeon_connector->requires_extended_probe); | ||
909 | if (dret) { | 856 | if (dret) { |
910 | radeon_connector->detected_by_load = false; | 857 | radeon_connector->detected_by_load = false; |
911 | if (radeon_connector->edid) { | 858 | if (radeon_connector->edid) { |
@@ -1005,8 +952,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
1005 | ret = encoder_funcs->detect(encoder, connector); | 952 | ret = encoder_funcs->detect(encoder, connector); |
1006 | if (ret == connector_status_connected) { | 953 | if (ret == connector_status_connected) { |
1007 | radeon_connector->use_digital = false; | 954 | radeon_connector->use_digital = false; |
1008 | radeon_connector->detected_by_load = true; | ||
1009 | } | 955 | } |
956 | if (ret != connector_status_disconnected) | ||
957 | radeon_connector->detected_by_load = true; | ||
1010 | } | 958 | } |
1011 | break; | 959 | break; |
1012 | } | 960 | } |
@@ -1203,7 +1151,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector) | |||
1203 | } | 1151 | } |
1204 | } else { | 1152 | } else { |
1205 | /* need to setup ddc on the bridge */ | 1153 | /* need to setup ddc on the bridge */ |
1206 | if (radeon_connector_encoder_is_dp_bridge(connector)) { | 1154 | if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != |
1155 | ENCODER_OBJECT_ID_NONE) { | ||
1207 | if (encoder) | 1156 | if (encoder) |
1208 | radeon_atom_ext_encoder_setup_ddc(encoder); | 1157 | radeon_atom_ext_encoder_setup_ddc(encoder); |
1209 | } | 1158 | } |
@@ -1213,13 +1162,12 @@ static int radeon_dp_get_modes(struct drm_connector *connector) | |||
1213 | return ret; | 1162 | return ret; |
1214 | } | 1163 | } |
1215 | 1164 | ||
1216 | bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector) | 1165 | u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector) |
1217 | { | 1166 | { |
1218 | struct drm_mode_object *obj; | 1167 | struct drm_mode_object *obj; |
1219 | struct drm_encoder *encoder; | 1168 | struct drm_encoder *encoder; |
1220 | struct radeon_encoder *radeon_encoder; | 1169 | struct radeon_encoder *radeon_encoder; |
1221 | int i; | 1170 | int i; |
1222 | bool found = false; | ||
1223 | 1171 | ||
1224 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { | 1172 | for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
1225 | if (connector->encoder_ids[i] == 0) | 1173 | if (connector->encoder_ids[i] == 0) |
@@ -1235,14 +1183,13 @@ bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector) | |||
1235 | switch (radeon_encoder->encoder_id) { | 1183 | switch (radeon_encoder->encoder_id) { |
1236 | case ENCODER_OBJECT_ID_TRAVIS: | 1184 | case ENCODER_OBJECT_ID_TRAVIS: |
1237 | case ENCODER_OBJECT_ID_NUTMEG: | 1185 | case ENCODER_OBJECT_ID_NUTMEG: |
1238 | found = true; | 1186 | return radeon_encoder->encoder_id; |
1239 | break; | ||
1240 | default: | 1187 | default: |
1241 | break; | 1188 | break; |
1242 | } | 1189 | } |
1243 | } | 1190 | } |
1244 | 1191 | ||
1245 | return found; | 1192 | return ENCODER_OBJECT_ID_NONE; |
1246 | } | 1193 | } |
1247 | 1194 | ||
1248 | bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) | 1195 | bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) |
@@ -1319,7 +1266,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1319 | if (!radeon_dig_connector->edp_on) | 1266 | if (!radeon_dig_connector->edp_on) |
1320 | atombios_set_edp_panel_power(connector, | 1267 | atombios_set_edp_panel_power(connector, |
1321 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | 1268 | ATOM_TRANSMITTER_ACTION_POWER_OFF); |
1322 | } else if (radeon_connector_encoder_is_dp_bridge(connector)) { | 1269 | } else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) != |
1270 | ENCODER_OBJECT_ID_NONE) { | ||
1323 | /* DP bridges are always DP */ | 1271 | /* DP bridges are always DP */ |
1324 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; | 1272 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
1325 | /* get the DPCD from the bridge */ | 1273 | /* get the DPCD from the bridge */ |
@@ -1328,8 +1276,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1328 | if (encoder) { | 1276 | if (encoder) { |
1329 | /* setup ddc on the bridge */ | 1277 | /* setup ddc on the bridge */ |
1330 | radeon_atom_ext_encoder_setup_ddc(encoder); | 1278 | radeon_atom_ext_encoder_setup_ddc(encoder); |
1331 | if (radeon_ddc_probe(radeon_connector, | 1279 | if (radeon_ddc_probe(radeon_connector)) /* try DDC */ |
1332 | radeon_connector->requires_extended_probe)) /* try DDC */ | ||
1333 | ret = connector_status_connected; | 1280 | ret = connector_status_connected; |
1334 | else if (radeon_connector->dac_load_detect) { /* try load detection */ | 1281 | else if (radeon_connector->dac_load_detect) { /* try load detection */ |
1335 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 1282 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
@@ -1347,8 +1294,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1347 | if (radeon_dp_getdpcd(radeon_connector)) | 1294 | if (radeon_dp_getdpcd(radeon_connector)) |
1348 | ret = connector_status_connected; | 1295 | ret = connector_status_connected; |
1349 | } else { | 1296 | } else { |
1350 | if (radeon_ddc_probe(radeon_connector, | 1297 | if (radeon_ddc_probe(radeon_connector)) |
1351 | radeon_connector->requires_extended_probe)) | ||
1352 | ret = connector_status_connected; | 1298 | ret = connector_status_connected; |
1353 | } | 1299 | } |
1354 | } | 1300 | } |
@@ -1493,9 +1439,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1493 | radeon_connector->shared_ddc = shared_ddc; | 1439 | radeon_connector->shared_ddc = shared_ddc; |
1494 | radeon_connector->connector_object_id = connector_object_id; | 1440 | radeon_connector->connector_object_id = connector_object_id; |
1495 | radeon_connector->hpd = *hpd; | 1441 | radeon_connector->hpd = *hpd; |
1496 | radeon_connector->requires_extended_probe = | 1442 | |
1497 | radeon_connector_needs_extended_probe(rdev, supported_device, | ||
1498 | connector_type); | ||
1499 | radeon_connector->router = *router; | 1443 | radeon_connector->router = *router; |
1500 | if (router->ddc_valid || router->cd_valid) { | 1444 | if (router->ddc_valid || router->cd_valid) { |
1501 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); | 1445 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); |
@@ -1842,9 +1786,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1842 | radeon_connector->devices = supported_device; | 1786 | radeon_connector->devices = supported_device; |
1843 | radeon_connector->connector_object_id = connector_object_id; | 1787 | radeon_connector->connector_object_id = connector_object_id; |
1844 | radeon_connector->hpd = *hpd; | 1788 | radeon_connector->hpd = *hpd; |
1845 | radeon_connector->requires_extended_probe = | 1789 | |
1846 | radeon_connector_needs_extended_probe(rdev, supported_device, | ||
1847 | connector_type); | ||
1848 | switch (connector_type) { | 1790 | switch (connector_type) { |
1849 | case DRM_MODE_CONNECTOR_VGA: | 1791 | case DRM_MODE_CONNECTOR_VGA: |
1850 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1792 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index fae00c0d75aa..ccaa243c1442 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -222,7 +222,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
222 | struct radeon_cs_chunk *ib_chunk; | 222 | struct radeon_cs_chunk *ib_chunk; |
223 | int r; | 223 | int r; |
224 | 224 | ||
225 | mutex_lock(&rdev->cs_mutex); | 225 | radeon_mutex_lock(&rdev->cs_mutex); |
226 | /* initialize parser */ | 226 | /* initialize parser */ |
227 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); | 227 | memset(&parser, 0, sizeof(struct radeon_cs_parser)); |
228 | parser.filp = filp; | 228 | parser.filp = filp; |
@@ -233,14 +233,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
233 | if (r) { | 233 | if (r) { |
234 | DRM_ERROR("Failed to initialize parser !\n"); | 234 | DRM_ERROR("Failed to initialize parser !\n"); |
235 | radeon_cs_parser_fini(&parser, r); | 235 | radeon_cs_parser_fini(&parser, r); |
236 | mutex_unlock(&rdev->cs_mutex); | 236 | radeon_mutex_unlock(&rdev->cs_mutex); |
237 | return r; | 237 | return r; |
238 | } | 238 | } |
239 | r = radeon_ib_get(rdev, &parser.ib); | 239 | r = radeon_ib_get(rdev, &parser.ib); |
240 | if (r) { | 240 | if (r) { |
241 | DRM_ERROR("Failed to get ib !\n"); | 241 | DRM_ERROR("Failed to get ib !\n"); |
242 | radeon_cs_parser_fini(&parser, r); | 242 | radeon_cs_parser_fini(&parser, r); |
243 | mutex_unlock(&rdev->cs_mutex); | 243 | radeon_mutex_unlock(&rdev->cs_mutex); |
244 | return r; | 244 | return r; |
245 | } | 245 | } |
246 | r = radeon_cs_parser_relocs(&parser); | 246 | r = radeon_cs_parser_relocs(&parser); |
@@ -248,7 +248,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
248 | if (r != -ERESTARTSYS) | 248 | if (r != -ERESTARTSYS) |
249 | DRM_ERROR("Failed to parse relocation %d!\n", r); | 249 | DRM_ERROR("Failed to parse relocation %d!\n", r); |
250 | radeon_cs_parser_fini(&parser, r); | 250 | radeon_cs_parser_fini(&parser, r); |
251 | mutex_unlock(&rdev->cs_mutex); | 251 | radeon_mutex_unlock(&rdev->cs_mutex); |
252 | return r; | 252 | return r; |
253 | } | 253 | } |
254 | /* Copy the packet into the IB, the parser will read from the | 254 | /* Copy the packet into the IB, the parser will read from the |
@@ -260,14 +260,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
260 | if (r || parser.parser_error) { | 260 | if (r || parser.parser_error) { |
261 | DRM_ERROR("Invalid command stream !\n"); | 261 | DRM_ERROR("Invalid command stream !\n"); |
262 | radeon_cs_parser_fini(&parser, r); | 262 | radeon_cs_parser_fini(&parser, r); |
263 | mutex_unlock(&rdev->cs_mutex); | 263 | radeon_mutex_unlock(&rdev->cs_mutex); |
264 | return r; | 264 | return r; |
265 | } | 265 | } |
266 | r = radeon_cs_finish_pages(&parser); | 266 | r = radeon_cs_finish_pages(&parser); |
267 | if (r) { | 267 | if (r) { |
268 | DRM_ERROR("Invalid command stream !\n"); | 268 | DRM_ERROR("Invalid command stream !\n"); |
269 | radeon_cs_parser_fini(&parser, r); | 269 | radeon_cs_parser_fini(&parser, r); |
270 | mutex_unlock(&rdev->cs_mutex); | 270 | radeon_mutex_unlock(&rdev->cs_mutex); |
271 | return r; | 271 | return r; |
272 | } | 272 | } |
273 | r = radeon_ib_schedule(rdev, parser.ib); | 273 | r = radeon_ib_schedule(rdev, parser.ib); |
@@ -275,7 +275,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
275 | DRM_ERROR("Failed to schedule IB !\n"); | 275 | DRM_ERROR("Failed to schedule IB !\n"); |
276 | } | 276 | } |
277 | radeon_cs_parser_fini(&parser, r); | 277 | radeon_cs_parser_fini(&parser, r); |
278 | mutex_unlock(&rdev->cs_mutex); | 278 | radeon_mutex_unlock(&rdev->cs_mutex); |
279 | return r; | 279 | return r; |
280 | } | 280 | } |
281 | 281 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c33bc914d93d..c4d00a171411 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
716 | 716 | ||
717 | /* mutex initialization are all done here so we | 717 | /* mutex initialization are all done here so we |
718 | * can recall function without having locking issues */ | 718 | * can recall function without having locking issues */ |
719 | mutex_init(&rdev->cs_mutex); | 719 | radeon_mutex_init(&rdev->cs_mutex); |
720 | mutex_init(&rdev->ib_pool.mutex); | 720 | mutex_init(&rdev->ib_pool.mutex); |
721 | mutex_init(&rdev->cp.mutex); | 721 | mutex_init(&rdev->cp.mutex); |
722 | mutex_init(&rdev->dc_hw_i2c_mutex); | 722 | mutex_init(&rdev->dc_hw_i2c_mutex); |
@@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
955 | int r; | 955 | int r; |
956 | int resched; | 956 | int resched; |
957 | 957 | ||
958 | /* Prevent CS ioctl from interfering */ | ||
959 | radeon_mutex_lock(&rdev->cs_mutex); | ||
960 | |||
958 | radeon_save_bios_scratch_regs(rdev); | 961 | radeon_save_bios_scratch_regs(rdev); |
959 | /* block TTM */ | 962 | /* block TTM */ |
960 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | 963 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); |
@@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
967 | radeon_restore_bios_scratch_regs(rdev); | 970 | radeon_restore_bios_scratch_regs(rdev); |
968 | drm_helper_resume_force_mode(rdev->ddev); | 971 | drm_helper_resume_force_mode(rdev->ddev); |
969 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 972 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
970 | return 0; | ||
971 | } | 973 | } |
972 | /* bad news, how to tell it to userspace ? */ | 974 | |
973 | dev_info(rdev->dev, "GPU reset failed\n"); | 975 | radeon_mutex_unlock(&rdev->cs_mutex); |
976 | |||
977 | if (r) { | ||
978 | /* bad news, how to tell it to userspace ? */ | ||
979 | dev_info(rdev->dev, "GPU reset failed\n"); | ||
980 | } | ||
981 | |||
974 | return r; | 982 | return r; |
975 | } | 983 | } |
976 | 984 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6adb3e58affd..a22d6e6a49a2 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -33,8 +33,6 @@ | |||
33 | #include "drm_crtc_helper.h" | 33 | #include "drm_crtc_helper.h" |
34 | #include "drm_edid.h" | 34 | #include "drm_edid.h" |
35 | 35 | ||
36 | static int radeon_ddc_dump(struct drm_connector *connector); | ||
37 | |||
38 | static void avivo_crtc_load_lut(struct drm_crtc *crtc) | 36 | static void avivo_crtc_load_lut(struct drm_crtc *crtc) |
39 | { | 37 | { |
40 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 38 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
@@ -669,7 +667,6 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
669 | static bool radeon_setup_enc_conn(struct drm_device *dev) | 667 | static bool radeon_setup_enc_conn(struct drm_device *dev) |
670 | { | 668 | { |
671 | struct radeon_device *rdev = dev->dev_private; | 669 | struct radeon_device *rdev = dev->dev_private; |
672 | struct drm_connector *drm_connector; | ||
673 | bool ret = false; | 670 | bool ret = false; |
674 | 671 | ||
675 | if (rdev->bios) { | 672 | if (rdev->bios) { |
@@ -689,8 +686,6 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) | |||
689 | if (ret) { | 686 | if (ret) { |
690 | radeon_setup_encoder_clones(dev); | 687 | radeon_setup_encoder_clones(dev); |
691 | radeon_print_display_setup(dev); | 688 | radeon_print_display_setup(dev); |
692 | list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) | ||
693 | radeon_ddc_dump(drm_connector); | ||
694 | } | 689 | } |
695 | 690 | ||
696 | return ret; | 691 | return ret; |
@@ -708,7 +703,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
708 | 703 | ||
709 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | 704 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
710 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || | 705 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || |
711 | radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) { | 706 | (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != |
707 | ENCODER_OBJECT_ID_NONE)) { | ||
712 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; | 708 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; |
713 | 709 | ||
714 | if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || | 710 | if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || |
@@ -743,34 +739,6 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
743 | return 0; | 739 | return 0; |
744 | } | 740 | } |
745 | 741 | ||
746 | static int radeon_ddc_dump(struct drm_connector *connector) | ||
747 | { | ||
748 | struct edid *edid; | ||
749 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
750 | int ret = 0; | ||
751 | |||
752 | /* on hw with routers, select right port */ | ||
753 | if (radeon_connector->router.ddc_valid) | ||
754 | radeon_router_select_ddc_port(radeon_connector); | ||
755 | |||
756 | if (!radeon_connector->ddc_bus) | ||
757 | return -1; | ||
758 | edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); | ||
759 | /* Log EDID retrieval status here. In particular with regard to | ||
760 | * connectors with requires_extended_probe flag set, that will prevent | ||
761 | * function radeon_dvi_detect() to fetch EDID on this connector, | ||
762 | * as long as there is no valid EDID header found */ | ||
763 | if (edid) { | ||
764 | DRM_INFO("Radeon display connector %s: Found valid EDID", | ||
765 | drm_get_connector_name(connector)); | ||
766 | kfree(edid); | ||
767 | } else { | ||
768 | DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID", | ||
769 | drm_get_connector_name(connector)); | ||
770 | } | ||
771 | return ret; | ||
772 | } | ||
773 | |||
774 | /* avivo */ | 742 | /* avivo */ |
775 | static void avivo_get_fb_div(struct radeon_pll *pll, | 743 | static void avivo_get_fb_div(struct radeon_pll *pll, |
776 | u32 target_clock, | 744 | u32 target_clock, |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 969933833ccb..a0b35e909489 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -119,6 +119,7 @@ int radeon_audio = 0; | |||
119 | int radeon_disp_priority = 0; | 119 | int radeon_disp_priority = 0; |
120 | int radeon_hw_i2c = 0; | 120 | int radeon_hw_i2c = 0; |
121 | int radeon_pcie_gen2 = 0; | 121 | int radeon_pcie_gen2 = 0; |
122 | int radeon_msi = -1; | ||
122 | 123 | ||
123 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); | 124 | MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); |
124 | module_param_named(no_wb, radeon_no_wb, int, 0444); | 125 | module_param_named(no_wb, radeon_no_wb, int, 0444); |
@@ -165,6 +166,9 @@ module_param_named(hw_i2c, radeon_hw_i2c, int, 0444); | |||
165 | MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); | 166 | MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)"); |
166 | module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); | 167 | module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444); |
167 | 168 | ||
169 | MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); | ||
170 | module_param_named(msi, radeon_msi, int, 0444); | ||
171 | |||
168 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) | 172 | static int radeon_suspend(struct drm_device *dev, pm_message_t state) |
169 | { | 173 | { |
170 | drm_radeon_private_t *dev_priv = dev->dev_private; | 174 | drm_radeon_private_t *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index eb3f6dc6df83..06e413e6a920 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -29,12 +29,6 @@ | |||
29 | #include "radeon.h" | 29 | #include "radeon.h" |
30 | #include "atom.h" | 30 | #include "atom.h" |
31 | 31 | ||
32 | extern int atom_debug; | ||
33 | |||
34 | /* evil but including atombios.h is much worse */ | ||
35 | bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | ||
36 | struct drm_display_mode *mode); | ||
37 | |||
38 | static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) | 32 | static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) |
39 | { | 33 | { |
40 | struct drm_device *dev = encoder->dev; | 34 | struct drm_device *dev = encoder->dev; |
@@ -156,27 +150,6 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8 | |||
156 | return ret; | 150 | return ret; |
157 | } | 151 | } |
158 | 152 | ||
159 | static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) | ||
160 | { | ||
161 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
162 | switch (radeon_encoder->encoder_id) { | ||
163 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
164 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
165 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
166 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
167 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
168 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
169 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
170 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
171 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
172 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
173 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
174 | return true; | ||
175 | default: | ||
176 | return false; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | void | 153 | void |
181 | radeon_link_encoder_connector(struct drm_device *dev) | 154 | radeon_link_encoder_connector(struct drm_device *dev) |
182 | { | 155 | { |
@@ -229,23 +202,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) | |||
229 | return NULL; | 202 | return NULL; |
230 | } | 203 | } |
231 | 204 | ||
232 | static struct drm_connector * | 205 | struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder) |
233 | radeon_get_connector_for_encoder_init(struct drm_encoder *encoder) | ||
234 | { | ||
235 | struct drm_device *dev = encoder->dev; | ||
236 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
237 | struct drm_connector *connector; | ||
238 | struct radeon_connector *radeon_connector; | ||
239 | |||
240 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
241 | radeon_connector = to_radeon_connector(connector); | ||
242 | if (radeon_encoder->devices & radeon_connector->devices) | ||
243 | return connector; | ||
244 | } | ||
245 | return NULL; | ||
246 | } | ||
247 | |||
248 | struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) | ||
249 | { | 206 | { |
250 | struct drm_device *dev = encoder->dev; | 207 | struct drm_device *dev = encoder->dev; |
251 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 208 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
@@ -266,9 +223,9 @@ struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder | |||
266 | return NULL; | 223 | return NULL; |
267 | } | 224 | } |
268 | 225 | ||
269 | bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder) | 226 | u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder) |
270 | { | 227 | { |
271 | struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder); | 228 | struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder); |
272 | 229 | ||
273 | if (other_encoder) { | 230 | if (other_encoder) { |
274 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder); | 231 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder); |
@@ -332,2105 +289,3 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder, | |||
332 | 289 | ||
333 | } | 290 | } |
334 | 291 | ||
335 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | ||
336 | struct drm_display_mode *mode, | ||
337 | struct drm_display_mode *adjusted_mode) | ||
338 | { | ||
339 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
340 | struct drm_device *dev = encoder->dev; | ||
341 | struct radeon_device *rdev = dev->dev_private; | ||
342 | |||
343 | /* set the active encoder to connector routing */ | ||
344 | radeon_encoder_set_active_device(encoder); | ||
345 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
346 | |||
347 | /* hw bug */ | ||
348 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
349 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) | ||
350 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; | ||
351 | |||
352 | /* get the native mode for LVDS */ | ||
353 | if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) | ||
354 | radeon_panel_mode_fixup(encoder, adjusted_mode); | ||
355 | |||
356 | /* get the native mode for TV */ | ||
357 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { | ||
358 | struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; | ||
359 | if (tv_dac) { | ||
360 | if (tv_dac->tv_std == TV_STD_NTSC || | ||
361 | tv_dac->tv_std == TV_STD_NTSC_J || | ||
362 | tv_dac->tv_std == TV_STD_PAL_M) | ||
363 | radeon_atom_get_tv_timings(rdev, 0, adjusted_mode); | ||
364 | else | ||
365 | radeon_atom_get_tv_timings(rdev, 1, adjusted_mode); | ||
366 | } | ||
367 | } | ||
368 | |||
369 | if (ASIC_IS_DCE3(rdev) && | ||
370 | ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || | ||
371 | radeon_encoder_is_dp_bridge(encoder))) { | ||
372 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
373 | radeon_dp_set_link_config(connector, mode); | ||
374 | } | ||
375 | |||
376 | return true; | ||
377 | } | ||
378 | |||
379 | static void | ||
380 | atombios_dac_setup(struct drm_encoder *encoder, int action) | ||
381 | { | ||
382 | struct drm_device *dev = encoder->dev; | ||
383 | struct radeon_device *rdev = dev->dev_private; | ||
384 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
385 | DAC_ENCODER_CONTROL_PS_ALLOCATION args; | ||
386 | int index = 0; | ||
387 | struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; | ||
388 | |||
389 | memset(&args, 0, sizeof(args)); | ||
390 | |||
391 | switch (radeon_encoder->encoder_id) { | ||
392 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
393 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
394 | index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); | ||
395 | break; | ||
396 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
397 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
398 | index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); | ||
399 | break; | ||
400 | } | ||
401 | |||
402 | args.ucAction = action; | ||
403 | |||
404 | if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT)) | ||
405 | args.ucDacStandard = ATOM_DAC1_PS2; | ||
406 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
407 | args.ucDacStandard = ATOM_DAC1_CV; | ||
408 | else { | ||
409 | switch (dac_info->tv_std) { | ||
410 | case TV_STD_PAL: | ||
411 | case TV_STD_PAL_M: | ||
412 | case TV_STD_SCART_PAL: | ||
413 | case TV_STD_SECAM: | ||
414 | case TV_STD_PAL_CN: | ||
415 | args.ucDacStandard = ATOM_DAC1_PAL; | ||
416 | break; | ||
417 | case TV_STD_NTSC: | ||
418 | case TV_STD_NTSC_J: | ||
419 | case TV_STD_PAL_60: | ||
420 | default: | ||
421 | args.ucDacStandard = ATOM_DAC1_NTSC; | ||
422 | break; | ||
423 | } | ||
424 | } | ||
425 | args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
426 | |||
427 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
428 | |||
429 | } | ||
430 | |||
431 | static void | ||
432 | atombios_tv_setup(struct drm_encoder *encoder, int action) | ||
433 | { | ||
434 | struct drm_device *dev = encoder->dev; | ||
435 | struct radeon_device *rdev = dev->dev_private; | ||
436 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
437 | TV_ENCODER_CONTROL_PS_ALLOCATION args; | ||
438 | int index = 0; | ||
439 | struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; | ||
440 | |||
441 | memset(&args, 0, sizeof(args)); | ||
442 | |||
443 | index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl); | ||
444 | |||
445 | args.sTVEncoder.ucAction = action; | ||
446 | |||
447 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
448 | args.sTVEncoder.ucTvStandard = ATOM_TV_CV; | ||
449 | else { | ||
450 | switch (dac_info->tv_std) { | ||
451 | case TV_STD_NTSC: | ||
452 | args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; | ||
453 | break; | ||
454 | case TV_STD_PAL: | ||
455 | args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; | ||
456 | break; | ||
457 | case TV_STD_PAL_M: | ||
458 | args.sTVEncoder.ucTvStandard = ATOM_TV_PALM; | ||
459 | break; | ||
460 | case TV_STD_PAL_60: | ||
461 | args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60; | ||
462 | break; | ||
463 | case TV_STD_NTSC_J: | ||
464 | args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ; | ||
465 | break; | ||
466 | case TV_STD_SCART_PAL: | ||
467 | args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */ | ||
468 | break; | ||
469 | case TV_STD_SECAM: | ||
470 | args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM; | ||
471 | break; | ||
472 | case TV_STD_PAL_CN: | ||
473 | args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN; | ||
474 | break; | ||
475 | default: | ||
476 | args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; | ||
477 | break; | ||
478 | } | ||
479 | } | ||
480 | |||
481 | args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
482 | |||
483 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
484 | |||
485 | } | ||
486 | |||
487 | union dvo_encoder_control { | ||
488 | ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; | ||
489 | DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; | ||
490 | DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; | ||
491 | }; | ||
492 | |||
493 | void | ||
494 | atombios_dvo_setup(struct drm_encoder *encoder, int action) | ||
495 | { | ||
496 | struct drm_device *dev = encoder->dev; | ||
497 | struct radeon_device *rdev = dev->dev_private; | ||
498 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
499 | union dvo_encoder_control args; | ||
500 | int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); | ||
501 | |||
502 | memset(&args, 0, sizeof(args)); | ||
503 | |||
504 | if (ASIC_IS_DCE3(rdev)) { | ||
505 | /* DCE3+ */ | ||
506 | args.dvo_v3.ucAction = action; | ||
507 | args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
508 | args.dvo_v3.ucDVOConfig = 0; /* XXX */ | ||
509 | } else if (ASIC_IS_DCE2(rdev)) { | ||
510 | /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */ | ||
511 | args.dvo.sDVOEncoder.ucAction = action; | ||
512 | args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
513 | /* DFP1, CRT1, TV1 depending on the type of port */ | ||
514 | args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; | ||
515 | |||
516 | if (radeon_encoder->pixel_clock > 165000) | ||
517 | args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; | ||
518 | } else { | ||
519 | /* R4xx, R5xx */ | ||
520 | args.ext_tmds.sXTmdsEncoder.ucEnable = action; | ||
521 | |||
522 | if (radeon_encoder->pixel_clock > 165000) | ||
523 | args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
524 | |||
525 | /*if (pScrn->rgbBits == 8)*/ | ||
526 | args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; | ||
527 | } | ||
528 | |||
529 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
530 | } | ||
531 | |||
532 | union lvds_encoder_control { | ||
533 | LVDS_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
534 | LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; | ||
535 | }; | ||
536 | |||
537 | void | ||
538 | atombios_digital_setup(struct drm_encoder *encoder, int action) | ||
539 | { | ||
540 | struct drm_device *dev = encoder->dev; | ||
541 | struct radeon_device *rdev = dev->dev_private; | ||
542 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
543 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
544 | union lvds_encoder_control args; | ||
545 | int index = 0; | ||
546 | int hdmi_detected = 0; | ||
547 | uint8_t frev, crev; | ||
548 | |||
549 | if (!dig) | ||
550 | return; | ||
551 | |||
552 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | ||
553 | hdmi_detected = 1; | ||
554 | |||
555 | memset(&args, 0, sizeof(args)); | ||
556 | |||
557 | switch (radeon_encoder->encoder_id) { | ||
558 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
559 | index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); | ||
560 | break; | ||
561 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
562 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
563 | index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl); | ||
564 | break; | ||
565 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
566 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
567 | index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl); | ||
568 | else | ||
569 | index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl); | ||
570 | break; | ||
571 | } | ||
572 | |||
573 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
574 | return; | ||
575 | |||
576 | switch (frev) { | ||
577 | case 1: | ||
578 | case 2: | ||
579 | switch (crev) { | ||
580 | case 1: | ||
581 | args.v1.ucMisc = 0; | ||
582 | args.v1.ucAction = action; | ||
583 | if (hdmi_detected) | ||
584 | args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; | ||
585 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
586 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
587 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) | ||
588 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
589 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) | ||
590 | args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; | ||
591 | } else { | ||
592 | if (dig->linkb) | ||
593 | args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | ||
594 | if (radeon_encoder->pixel_clock > 165000) | ||
595 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
596 | /*if (pScrn->rgbBits == 8) */ | ||
597 | args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; | ||
598 | } | ||
599 | break; | ||
600 | case 2: | ||
601 | case 3: | ||
602 | args.v2.ucMisc = 0; | ||
603 | args.v2.ucAction = action; | ||
604 | if (crev == 3) { | ||
605 | if (dig->coherent_mode) | ||
606 | args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; | ||
607 | } | ||
608 | if (hdmi_detected) | ||
609 | args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; | ||
610 | args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
611 | args.v2.ucTruncate = 0; | ||
612 | args.v2.ucSpatial = 0; | ||
613 | args.v2.ucTemporal = 0; | ||
614 | args.v2.ucFRC = 0; | ||
615 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
616 | if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) | ||
617 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
618 | if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) { | ||
619 | args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; | ||
620 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) | ||
621 | args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; | ||
622 | } | ||
623 | if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) { | ||
624 | args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; | ||
625 | if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) | ||
626 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; | ||
627 | if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) | ||
628 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; | ||
629 | } | ||
630 | } else { | ||
631 | if (dig->linkb) | ||
632 | args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; | ||
633 | if (radeon_encoder->pixel_clock > 165000) | ||
634 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; | ||
635 | } | ||
636 | break; | ||
637 | default: | ||
638 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
639 | break; | ||
640 | } | ||
641 | break; | ||
642 | default: | ||
643 | DRM_ERROR("Unknown table version %d, %d\n", frev, crev); | ||
644 | break; | ||
645 | } | ||
646 | |||
647 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
648 | } | ||
649 | |||
650 | int | ||
651 | atombios_get_encoder_mode(struct drm_encoder *encoder) | ||
652 | { | ||
653 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
654 | struct drm_device *dev = encoder->dev; | ||
655 | struct radeon_device *rdev = dev->dev_private; | ||
656 | struct drm_connector *connector; | ||
657 | struct radeon_connector *radeon_connector; | ||
658 | struct radeon_connector_atom_dig *dig_connector; | ||
659 | |||
660 | /* dp bridges are always DP */ | ||
661 | if (radeon_encoder_is_dp_bridge(encoder)) | ||
662 | return ATOM_ENCODER_MODE_DP; | ||
663 | |||
664 | /* DVO is always DVO */ | ||
665 | if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO) | ||
666 | return ATOM_ENCODER_MODE_DVO; | ||
667 | |||
668 | connector = radeon_get_connector_for_encoder(encoder); | ||
669 | /* if we don't have an active device yet, just use one of | ||
670 | * the connectors tied to the encoder. | ||
671 | */ | ||
672 | if (!connector) | ||
673 | connector = radeon_get_connector_for_encoder_init(encoder); | ||
674 | radeon_connector = to_radeon_connector(connector); | ||
675 | |||
676 | switch (connector->connector_type) { | ||
677 | case DRM_MODE_CONNECTOR_DVII: | ||
678 | case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ | ||
679 | if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { | ||
680 | /* fix me */ | ||
681 | if (ASIC_IS_DCE4(rdev)) | ||
682 | return ATOM_ENCODER_MODE_DVI; | ||
683 | else | ||
684 | return ATOM_ENCODER_MODE_HDMI; | ||
685 | } else if (radeon_connector->use_digital) | ||
686 | return ATOM_ENCODER_MODE_DVI; | ||
687 | else | ||
688 | return ATOM_ENCODER_MODE_CRT; | ||
689 | break; | ||
690 | case DRM_MODE_CONNECTOR_DVID: | ||
691 | case DRM_MODE_CONNECTOR_HDMIA: | ||
692 | default: | ||
693 | if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { | ||
694 | /* fix me */ | ||
695 | if (ASIC_IS_DCE4(rdev)) | ||
696 | return ATOM_ENCODER_MODE_DVI; | ||
697 | else | ||
698 | return ATOM_ENCODER_MODE_HDMI; | ||
699 | } else | ||
700 | return ATOM_ENCODER_MODE_DVI; | ||
701 | break; | ||
702 | case DRM_MODE_CONNECTOR_LVDS: | ||
703 | return ATOM_ENCODER_MODE_LVDS; | ||
704 | break; | ||
705 | case DRM_MODE_CONNECTOR_DisplayPort: | ||
706 | dig_connector = radeon_connector->con_priv; | ||
707 | if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || | ||
708 | (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) | ||
709 | return ATOM_ENCODER_MODE_DP; | ||
710 | else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) { | ||
711 | /* fix me */ | ||
712 | if (ASIC_IS_DCE4(rdev)) | ||
713 | return ATOM_ENCODER_MODE_DVI; | ||
714 | else | ||
715 | return ATOM_ENCODER_MODE_HDMI; | ||
716 | } else | ||
717 | return ATOM_ENCODER_MODE_DVI; | ||
718 | break; | ||
719 | case DRM_MODE_CONNECTOR_eDP: | ||
720 | return ATOM_ENCODER_MODE_DP; | ||
721 | case DRM_MODE_CONNECTOR_DVIA: | ||
722 | case DRM_MODE_CONNECTOR_VGA: | ||
723 | return ATOM_ENCODER_MODE_CRT; | ||
724 | break; | ||
725 | case DRM_MODE_CONNECTOR_Composite: | ||
726 | case DRM_MODE_CONNECTOR_SVIDEO: | ||
727 | case DRM_MODE_CONNECTOR_9PinDIN: | ||
728 | /* fix me */ | ||
729 | return ATOM_ENCODER_MODE_TV; | ||
730 | /*return ATOM_ENCODER_MODE_CV;*/ | ||
731 | break; | ||
732 | } | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * DIG Encoder/Transmitter Setup | ||
737 | * | ||
738 | * DCE 3.0/3.1 | ||
739 | * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA. | ||
740 | * Supports up to 3 digital outputs | ||
741 | * - 2 DIG encoder blocks. | ||
742 | * DIG1 can drive UNIPHY link A or link B | ||
743 | * DIG2 can drive UNIPHY link B or LVTMA | ||
744 | * | ||
745 | * DCE 3.2 | ||
746 | * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B). | ||
747 | * Supports up to 5 digital outputs | ||
748 | * - 2 DIG encoder blocks. | ||
749 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B | ||
750 | * | ||
751 | * DCE 4.0/5.0 | ||
752 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). | ||
753 | * Supports up to 6 digital outputs | ||
754 | * - 6 DIG encoder blocks. | ||
755 | * - DIG to PHY mapping is hardcoded | ||
756 | * DIG1 drives UNIPHY0 link A, A+B | ||
757 | * DIG2 drives UNIPHY0 link B | ||
758 | * DIG3 drives UNIPHY1 link A, A+B | ||
759 | * DIG4 drives UNIPHY1 link B | ||
760 | * DIG5 drives UNIPHY2 link A, A+B | ||
761 | * DIG6 drives UNIPHY2 link B | ||
762 | * | ||
763 | * DCE 4.1 | ||
764 | * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). | ||
765 | * Supports up to 6 digital outputs | ||
766 | * - 2 DIG encoder blocks. | ||
767 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B | ||
768 | * | ||
769 | * Routing | ||
770 | * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) | ||
771 | * Examples: | ||
772 | * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI | ||
773 | * crtc1 -> dig1 -> UNIPHY0 link B -> DP | ||
774 | * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS | ||
775 | * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI | ||
776 | */ | ||
777 | |||
778 | union dig_encoder_control { | ||
779 | DIG_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
780 | DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; | ||
781 | DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; | ||
782 | DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; | ||
783 | }; | ||
784 | |||
785 | void | ||
786 | atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode) | ||
787 | { | ||
788 | struct drm_device *dev = encoder->dev; | ||
789 | struct radeon_device *rdev = dev->dev_private; | ||
790 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
791 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
792 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
793 | union dig_encoder_control args; | ||
794 | int index = 0; | ||
795 | uint8_t frev, crev; | ||
796 | int dp_clock = 0; | ||
797 | int dp_lane_count = 0; | ||
798 | int hpd_id = RADEON_HPD_NONE; | ||
799 | int bpc = 8; | ||
800 | |||
801 | if (connector) { | ||
802 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
803 | struct radeon_connector_atom_dig *dig_connector = | ||
804 | radeon_connector->con_priv; | ||
805 | |||
806 | dp_clock = dig_connector->dp_clock; | ||
807 | dp_lane_count = dig_connector->dp_lane_count; | ||
808 | hpd_id = radeon_connector->hpd.hpd; | ||
809 | bpc = connector->display_info.bpc; | ||
810 | } | ||
811 | |||
812 | /* no dig encoder assigned */ | ||
813 | if (dig->dig_encoder == -1) | ||
814 | return; | ||
815 | |||
816 | memset(&args, 0, sizeof(args)); | ||
817 | |||
818 | if (ASIC_IS_DCE4(rdev)) | ||
819 | index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl); | ||
820 | else { | ||
821 | if (dig->dig_encoder) | ||
822 | index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); | ||
823 | else | ||
824 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); | ||
825 | } | ||
826 | |||
827 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
828 | return; | ||
829 | |||
830 | args.v1.ucAction = action; | ||
831 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
832 | if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE) | ||
833 | args.v3.ucPanelMode = panel_mode; | ||
834 | else | ||
835 | args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
836 | |||
837 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || | ||
838 | (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) | ||
839 | args.v1.ucLaneNum = dp_lane_count; | ||
840 | else if (radeon_encoder->pixel_clock > 165000) | ||
841 | args.v1.ucLaneNum = 8; | ||
842 | else | ||
843 | args.v1.ucLaneNum = 4; | ||
844 | |||
845 | if (ASIC_IS_DCE5(rdev)) { | ||
846 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || | ||
847 | (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) { | ||
848 | if (dp_clock == 270000) | ||
849 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; | ||
850 | else if (dp_clock == 540000) | ||
851 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; | ||
852 | } | ||
853 | args.v4.acConfig.ucDigSel = dig->dig_encoder; | ||
854 | switch (bpc) { | ||
855 | case 0: | ||
856 | args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE; | ||
857 | break; | ||
858 | case 6: | ||
859 | args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR; | ||
860 | break; | ||
861 | case 8: | ||
862 | default: | ||
863 | args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
864 | break; | ||
865 | case 10: | ||
866 | args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR; | ||
867 | break; | ||
868 | case 12: | ||
869 | args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR; | ||
870 | break; | ||
871 | case 16: | ||
872 | args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR; | ||
873 | break; | ||
874 | } | ||
875 | if (hpd_id == RADEON_HPD_NONE) | ||
876 | args.v4.ucHPD_ID = 0; | ||
877 | else | ||
878 | args.v4.ucHPD_ID = hpd_id + 1; | ||
879 | } else if (ASIC_IS_DCE4(rdev)) { | ||
880 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) | ||
881 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; | ||
882 | args.v3.acConfig.ucDigSel = dig->dig_encoder; | ||
883 | switch (bpc) { | ||
884 | case 0: | ||
885 | args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE; | ||
886 | break; | ||
887 | case 6: | ||
888 | args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR; | ||
889 | break; | ||
890 | case 8: | ||
891 | default: | ||
892 | args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
893 | break; | ||
894 | case 10: | ||
895 | args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR; | ||
896 | break; | ||
897 | case 12: | ||
898 | args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR; | ||
899 | break; | ||
900 | case 16: | ||
901 | args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR; | ||
902 | break; | ||
903 | } | ||
904 | } else { | ||
905 | if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) | ||
906 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
907 | switch (radeon_encoder->encoder_id) { | ||
908 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
909 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; | ||
910 | break; | ||
911 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
912 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
913 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; | ||
914 | break; | ||
915 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
916 | args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; | ||
917 | break; | ||
918 | } | ||
919 | if (dig->linkb) | ||
920 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; | ||
921 | else | ||
922 | args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; | ||
923 | } | ||
924 | |||
925 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
926 | |||
927 | } | ||
928 | |||
929 | union dig_transmitter_control { | ||
930 | DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; | ||
931 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; | ||
932 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; | ||
933 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; | ||
934 | }; | ||
935 | |||
936 | void | ||
937 | atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) | ||
938 | { | ||
939 | struct drm_device *dev = encoder->dev; | ||
940 | struct radeon_device *rdev = dev->dev_private; | ||
941 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
942 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
943 | struct drm_connector *connector; | ||
944 | union dig_transmitter_control args; | ||
945 | int index = 0; | ||
946 | uint8_t frev, crev; | ||
947 | bool is_dp = false; | ||
948 | int pll_id = 0; | ||
949 | int dp_clock = 0; | ||
950 | int dp_lane_count = 0; | ||
951 | int connector_object_id = 0; | ||
952 | int igp_lane_info = 0; | ||
953 | int dig_encoder = dig->dig_encoder; | ||
954 | |||
955 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
956 | connector = radeon_get_connector_for_encoder_init(encoder); | ||
957 | /* just needed to avoid bailing in the encoder check. the encoder | ||
958 | * isn't used for init | ||
959 | */ | ||
960 | dig_encoder = 0; | ||
961 | } else | ||
962 | connector = radeon_get_connector_for_encoder(encoder); | ||
963 | |||
964 | if (connector) { | ||
965 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
966 | struct radeon_connector_atom_dig *dig_connector = | ||
967 | radeon_connector->con_priv; | ||
968 | |||
969 | dp_clock = dig_connector->dp_clock; | ||
970 | dp_lane_count = dig_connector->dp_lane_count; | ||
971 | connector_object_id = | ||
972 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
973 | igp_lane_info = dig_connector->igp_lane_info; | ||
974 | } | ||
975 | |||
976 | /* no dig encoder assigned */ | ||
977 | if (dig_encoder == -1) | ||
978 | return; | ||
979 | |||
980 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) | ||
981 | is_dp = true; | ||
982 | |||
983 | memset(&args, 0, sizeof(args)); | ||
984 | |||
985 | switch (radeon_encoder->encoder_id) { | ||
986 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
987 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
988 | break; | ||
989 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
990 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
991 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
992 | index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | ||
993 | break; | ||
994 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
995 | index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl); | ||
996 | break; | ||
997 | } | ||
998 | |||
999 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1000 | return; | ||
1001 | |||
1002 | args.v1.ucAction = action; | ||
1003 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | ||
1004 | args.v1.usInitInfo = cpu_to_le16(connector_object_id); | ||
1005 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | ||
1006 | args.v1.asMode.ucLaneSel = lane_num; | ||
1007 | args.v1.asMode.ucLaneSet = lane_set; | ||
1008 | } else { | ||
1009 | if (is_dp) | ||
1010 | args.v1.usPixelClock = | ||
1011 | cpu_to_le16(dp_clock / 10); | ||
1012 | else if (radeon_encoder->pixel_clock > 165000) | ||
1013 | args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); | ||
1014 | else | ||
1015 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
1016 | } | ||
1017 | if (ASIC_IS_DCE4(rdev)) { | ||
1018 | if (is_dp) | ||
1019 | args.v3.ucLaneNum = dp_lane_count; | ||
1020 | else if (radeon_encoder->pixel_clock > 165000) | ||
1021 | args.v3.ucLaneNum = 8; | ||
1022 | else | ||
1023 | args.v3.ucLaneNum = 4; | ||
1024 | |||
1025 | if (dig->linkb) | ||
1026 | args.v3.acConfig.ucLinkSel = 1; | ||
1027 | if (dig_encoder & 1) | ||
1028 | args.v3.acConfig.ucEncoderSel = 1; | ||
1029 | |||
1030 | /* Select the PLL for the PHY | ||
1031 | * DP PHY should be clocked from external src if there is | ||
1032 | * one. | ||
1033 | */ | ||
1034 | if (encoder->crtc) { | ||
1035 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1036 | pll_id = radeon_crtc->pll_id; | ||
1037 | } | ||
1038 | |||
1039 | if (ASIC_IS_DCE5(rdev)) { | ||
1040 | /* On DCE5 DCPLL usually generates the DP ref clock */ | ||
1041 | if (is_dp) { | ||
1042 | if (rdev->clock.dp_extclk) | ||
1043 | args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK; | ||
1044 | else | ||
1045 | args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL; | ||
1046 | } else | ||
1047 | args.v4.acConfig.ucRefClkSource = pll_id; | ||
1048 | } else { | ||
1049 | /* On DCE4, if there is an external clock, it generates the DP ref clock */ | ||
1050 | if (is_dp && rdev->clock.dp_extclk) | ||
1051 | args.v3.acConfig.ucRefClkSource = 2; /* external src */ | ||
1052 | else | ||
1053 | args.v3.acConfig.ucRefClkSource = pll_id; | ||
1054 | } | ||
1055 | |||
1056 | switch (radeon_encoder->encoder_id) { | ||
1057 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1058 | args.v3.acConfig.ucTransmitterSel = 0; | ||
1059 | break; | ||
1060 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1061 | args.v3.acConfig.ucTransmitterSel = 1; | ||
1062 | break; | ||
1063 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1064 | args.v3.acConfig.ucTransmitterSel = 2; | ||
1065 | break; | ||
1066 | } | ||
1067 | |||
1068 | if (is_dp) | ||
1069 | args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */ | ||
1070 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
1071 | if (dig->coherent_mode) | ||
1072 | args.v3.acConfig.fCoherentMode = 1; | ||
1073 | if (radeon_encoder->pixel_clock > 165000) | ||
1074 | args.v3.acConfig.fDualLinkConnector = 1; | ||
1075 | } | ||
1076 | } else if (ASIC_IS_DCE32(rdev)) { | ||
1077 | args.v2.acConfig.ucEncoderSel = dig_encoder; | ||
1078 | if (dig->linkb) | ||
1079 | args.v2.acConfig.ucLinkSel = 1; | ||
1080 | |||
1081 | switch (radeon_encoder->encoder_id) { | ||
1082 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1083 | args.v2.acConfig.ucTransmitterSel = 0; | ||
1084 | break; | ||
1085 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1086 | args.v2.acConfig.ucTransmitterSel = 1; | ||
1087 | break; | ||
1088 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1089 | args.v2.acConfig.ucTransmitterSel = 2; | ||
1090 | break; | ||
1091 | } | ||
1092 | |||
1093 | if (is_dp) { | ||
1094 | args.v2.acConfig.fCoherentMode = 1; | ||
1095 | args.v2.acConfig.fDPConnector = 1; | ||
1096 | } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
1097 | if (dig->coherent_mode) | ||
1098 | args.v2.acConfig.fCoherentMode = 1; | ||
1099 | if (radeon_encoder->pixel_clock > 165000) | ||
1100 | args.v2.acConfig.fDualLinkConnector = 1; | ||
1101 | } | ||
1102 | } else { | ||
1103 | args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; | ||
1104 | |||
1105 | if (dig_encoder) | ||
1106 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; | ||
1107 | else | ||
1108 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; | ||
1109 | |||
1110 | if ((rdev->flags & RADEON_IS_IGP) && | ||
1111 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) { | ||
1112 | if (is_dp || (radeon_encoder->pixel_clock <= 165000)) { | ||
1113 | if (igp_lane_info & 0x1) | ||
1114 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; | ||
1115 | else if (igp_lane_info & 0x2) | ||
1116 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; | ||
1117 | else if (igp_lane_info & 0x4) | ||
1118 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; | ||
1119 | else if (igp_lane_info & 0x8) | ||
1120 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; | ||
1121 | } else { | ||
1122 | if (igp_lane_info & 0x3) | ||
1123 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; | ||
1124 | else if (igp_lane_info & 0xc) | ||
1125 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; | ||
1126 | } | ||
1127 | } | ||
1128 | |||
1129 | if (dig->linkb) | ||
1130 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; | ||
1131 | else | ||
1132 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; | ||
1133 | |||
1134 | if (is_dp) | ||
1135 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; | ||
1136 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
1137 | if (dig->coherent_mode) | ||
1138 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; | ||
1139 | if (radeon_encoder->pixel_clock > 165000) | ||
1140 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; | ||
1141 | } | ||
1142 | } | ||
1143 | |||
1144 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1145 | } | ||
1146 | |||
1147 | bool | ||
1148 | atombios_set_edp_panel_power(struct drm_connector *connector, int action) | ||
1149 | { | ||
1150 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1151 | struct drm_device *dev = radeon_connector->base.dev; | ||
1152 | struct radeon_device *rdev = dev->dev_private; | ||
1153 | union dig_transmitter_control args; | ||
1154 | int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); | ||
1155 | uint8_t frev, crev; | ||
1156 | |||
1157 | if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) | ||
1158 | goto done; | ||
1159 | |||
1160 | if (!ASIC_IS_DCE4(rdev)) | ||
1161 | goto done; | ||
1162 | |||
1163 | if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) && | ||
1164 | (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) | ||
1165 | goto done; | ||
1166 | |||
1167 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1168 | goto done; | ||
1169 | |||
1170 | memset(&args, 0, sizeof(args)); | ||
1171 | |||
1172 | args.v1.ucAction = action; | ||
1173 | |||
1174 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1175 | |||
1176 | /* wait for the panel to power up */ | ||
1177 | if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) { | ||
1178 | int i; | ||
1179 | |||
1180 | for (i = 0; i < 300; i++) { | ||
1181 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | ||
1182 | return true; | ||
1183 | mdelay(1); | ||
1184 | } | ||
1185 | return false; | ||
1186 | } | ||
1187 | done: | ||
1188 | return true; | ||
1189 | } | ||
1190 | |||
1191 | union external_encoder_control { | ||
1192 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; | ||
1193 | EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; | ||
1194 | }; | ||
1195 | |||
1196 | static void | ||
1197 | atombios_external_encoder_setup(struct drm_encoder *encoder, | ||
1198 | struct drm_encoder *ext_encoder, | ||
1199 | int action) | ||
1200 | { | ||
1201 | struct drm_device *dev = encoder->dev; | ||
1202 | struct radeon_device *rdev = dev->dev_private; | ||
1203 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1204 | struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); | ||
1205 | union external_encoder_control args; | ||
1206 | struct drm_connector *connector; | ||
1207 | int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); | ||
1208 | u8 frev, crev; | ||
1209 | int dp_clock = 0; | ||
1210 | int dp_lane_count = 0; | ||
1211 | int connector_object_id = 0; | ||
1212 | u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
1213 | int bpc = 8; | ||
1214 | |||
1215 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | ||
1216 | connector = radeon_get_connector_for_encoder_init(encoder); | ||
1217 | else | ||
1218 | connector = radeon_get_connector_for_encoder(encoder); | ||
1219 | |||
1220 | if (connector) { | ||
1221 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1222 | struct radeon_connector_atom_dig *dig_connector = | ||
1223 | radeon_connector->con_priv; | ||
1224 | |||
1225 | dp_clock = dig_connector->dp_clock; | ||
1226 | dp_lane_count = dig_connector->dp_lane_count; | ||
1227 | connector_object_id = | ||
1228 | (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
1229 | bpc = connector->display_info.bpc; | ||
1230 | } | ||
1231 | |||
1232 | memset(&args, 0, sizeof(args)); | ||
1233 | |||
1234 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1235 | return; | ||
1236 | |||
1237 | switch (frev) { | ||
1238 | case 1: | ||
1239 | /* no params on frev 1 */ | ||
1240 | break; | ||
1241 | case 2: | ||
1242 | switch (crev) { | ||
1243 | case 1: | ||
1244 | case 2: | ||
1245 | args.v1.sDigEncoder.ucAction = action; | ||
1246 | args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
1247 | args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
1248 | |||
1249 | if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | ||
1250 | if (dp_clock == 270000) | ||
1251 | args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
1252 | args.v1.sDigEncoder.ucLaneNum = dp_lane_count; | ||
1253 | } else if (radeon_encoder->pixel_clock > 165000) | ||
1254 | args.v1.sDigEncoder.ucLaneNum = 8; | ||
1255 | else | ||
1256 | args.v1.sDigEncoder.ucLaneNum = 4; | ||
1257 | break; | ||
1258 | case 3: | ||
1259 | args.v3.sExtEncoder.ucAction = action; | ||
1260 | if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) | ||
1261 | args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id); | ||
1262 | else | ||
1263 | args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | ||
1264 | args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); | ||
1265 | |||
1266 | if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | ||
1267 | if (dp_clock == 270000) | ||
1268 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; | ||
1269 | else if (dp_clock == 540000) | ||
1270 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; | ||
1271 | args.v3.sExtEncoder.ucLaneNum = dp_lane_count; | ||
1272 | } else if (radeon_encoder->pixel_clock > 165000) | ||
1273 | args.v3.sExtEncoder.ucLaneNum = 8; | ||
1274 | else | ||
1275 | args.v3.sExtEncoder.ucLaneNum = 4; | ||
1276 | switch (ext_enum) { | ||
1277 | case GRAPH_OBJECT_ENUM_ID1: | ||
1278 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; | ||
1279 | break; | ||
1280 | case GRAPH_OBJECT_ENUM_ID2: | ||
1281 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; | ||
1282 | break; | ||
1283 | case GRAPH_OBJECT_ENUM_ID3: | ||
1284 | args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; | ||
1285 | break; | ||
1286 | } | ||
1287 | switch (bpc) { | ||
1288 | case 0: | ||
1289 | args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE; | ||
1290 | break; | ||
1291 | case 6: | ||
1292 | args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR; | ||
1293 | break; | ||
1294 | case 8: | ||
1295 | default: | ||
1296 | args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; | ||
1297 | break; | ||
1298 | case 10: | ||
1299 | args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR; | ||
1300 | break; | ||
1301 | case 12: | ||
1302 | args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR; | ||
1303 | break; | ||
1304 | case 16: | ||
1305 | args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR; | ||
1306 | break; | ||
1307 | } | ||
1308 | break; | ||
1309 | default: | ||
1310 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
1311 | return; | ||
1312 | } | ||
1313 | break; | ||
1314 | default: | ||
1315 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
1316 | return; | ||
1317 | } | ||
1318 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1319 | } | ||
1320 | |||
1321 | static void | ||
1322 | atombios_yuv_setup(struct drm_encoder *encoder, bool enable) | ||
1323 | { | ||
1324 | struct drm_device *dev = encoder->dev; | ||
1325 | struct radeon_device *rdev = dev->dev_private; | ||
1326 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1327 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1328 | ENABLE_YUV_PS_ALLOCATION args; | ||
1329 | int index = GetIndexIntoMasterTable(COMMAND, EnableYUV); | ||
1330 | uint32_t temp, reg; | ||
1331 | |||
1332 | memset(&args, 0, sizeof(args)); | ||
1333 | |||
1334 | if (rdev->family >= CHIP_R600) | ||
1335 | reg = R600_BIOS_3_SCRATCH; | ||
1336 | else | ||
1337 | reg = RADEON_BIOS_3_SCRATCH; | ||
1338 | |||
1339 | /* XXX: fix up scratch reg handling */ | ||
1340 | temp = RREG32(reg); | ||
1341 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1342 | WREG32(reg, (ATOM_S3_TV1_ACTIVE | | ||
1343 | (radeon_crtc->crtc_id << 18))); | ||
1344 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1345 | WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24))); | ||
1346 | else | ||
1347 | WREG32(reg, 0); | ||
1348 | |||
1349 | if (enable) | ||
1350 | args.ucEnable = ATOM_ENABLE; | ||
1351 | args.ucCRTC = radeon_crtc->crtc_id; | ||
1352 | |||
1353 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1354 | |||
1355 | WREG32(reg, temp); | ||
1356 | } | ||
1357 | |||
1358 | static void | ||
1359 | radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | ||
1360 | { | ||
1361 | struct drm_device *dev = encoder->dev; | ||
1362 | struct radeon_device *rdev = dev->dev_private; | ||
1363 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1364 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
1365 | DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; | ||
1366 | int index = 0; | ||
1367 | bool is_dig = false; | ||
1368 | bool is_dce5_dac = false; | ||
1369 | bool is_dce5_dvo = false; | ||
1370 | |||
1371 | memset(&args, 0, sizeof(args)); | ||
1372 | |||
1373 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", | ||
1374 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, | ||
1375 | radeon_encoder->active_device); | ||
1376 | switch (radeon_encoder->encoder_id) { | ||
1377 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
1378 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
1379 | index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl); | ||
1380 | break; | ||
1381 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1382 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1383 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1384 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
1385 | is_dig = true; | ||
1386 | break; | ||
1387 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
1388 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
1389 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
1390 | break; | ||
1391 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1392 | if (ASIC_IS_DCE5(rdev)) | ||
1393 | is_dce5_dvo = true; | ||
1394 | else if (ASIC_IS_DCE3(rdev)) | ||
1395 | is_dig = true; | ||
1396 | else | ||
1397 | index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); | ||
1398 | break; | ||
1399 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
1400 | index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); | ||
1401 | break; | ||
1402 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
1403 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
1404 | index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); | ||
1405 | else | ||
1406 | index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl); | ||
1407 | break; | ||
1408 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
1409 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
1410 | if (ASIC_IS_DCE5(rdev)) | ||
1411 | is_dce5_dac = true; | ||
1412 | else { | ||
1413 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1414 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); | ||
1415 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1416 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); | ||
1417 | else | ||
1418 | index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); | ||
1419 | } | ||
1420 | break; | ||
1421 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
1422 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
1423 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1424 | index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); | ||
1425 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1426 | index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); | ||
1427 | else | ||
1428 | index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); | ||
1429 | break; | ||
1430 | } | ||
1431 | |||
1432 | if (is_dig) { | ||
1433 | switch (mode) { | ||
1434 | case DRM_MODE_DPMS_ON: | ||
1435 | /* some early dce3.2 boards have a bug in their transmitter control table */ | ||
1436 | if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730)) | ||
1437 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1438 | else | ||
1439 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); | ||
1440 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | ||
1441 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1442 | |||
1443 | if (connector && | ||
1444 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { | ||
1445 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1446 | struct radeon_connector_atom_dig *radeon_dig_connector = | ||
1447 | radeon_connector->con_priv; | ||
1448 | atombios_set_edp_panel_power(connector, | ||
1449 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
1450 | radeon_dig_connector->edp_on = true; | ||
1451 | } | ||
1452 | if (ASIC_IS_DCE4(rdev)) | ||
1453 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); | ||
1454 | radeon_dp_link_train(encoder, connector); | ||
1455 | if (ASIC_IS_DCE4(rdev)) | ||
1456 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); | ||
1457 | } | ||
1458 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
1459 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); | ||
1460 | break; | ||
1461 | case DRM_MODE_DPMS_STANDBY: | ||
1462 | case DRM_MODE_DPMS_SUSPEND: | ||
1463 | case DRM_MODE_DPMS_OFF: | ||
1464 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); | ||
1465 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { | ||
1466 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1467 | |||
1468 | if (ASIC_IS_DCE4(rdev)) | ||
1469 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); | ||
1470 | if (connector && | ||
1471 | (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { | ||
1472 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1473 | struct radeon_connector_atom_dig *radeon_dig_connector = | ||
1474 | radeon_connector->con_priv; | ||
1475 | atombios_set_edp_panel_power(connector, | ||
1476 | ATOM_TRANSMITTER_ACTION_POWER_OFF); | ||
1477 | radeon_dig_connector->edp_on = false; | ||
1478 | } | ||
1479 | } | ||
1480 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
1481 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); | ||
1482 | break; | ||
1483 | } | ||
1484 | } else if (is_dce5_dac) { | ||
1485 | switch (mode) { | ||
1486 | case DRM_MODE_DPMS_ON: | ||
1487 | atombios_dac_setup(encoder, ATOM_ENABLE); | ||
1488 | break; | ||
1489 | case DRM_MODE_DPMS_STANDBY: | ||
1490 | case DRM_MODE_DPMS_SUSPEND: | ||
1491 | case DRM_MODE_DPMS_OFF: | ||
1492 | atombios_dac_setup(encoder, ATOM_DISABLE); | ||
1493 | break; | ||
1494 | } | ||
1495 | } else if (is_dce5_dvo) { | ||
1496 | switch (mode) { | ||
1497 | case DRM_MODE_DPMS_ON: | ||
1498 | atombios_dvo_setup(encoder, ATOM_ENABLE); | ||
1499 | break; | ||
1500 | case DRM_MODE_DPMS_STANDBY: | ||
1501 | case DRM_MODE_DPMS_SUSPEND: | ||
1502 | case DRM_MODE_DPMS_OFF: | ||
1503 | atombios_dvo_setup(encoder, ATOM_DISABLE); | ||
1504 | break; | ||
1505 | } | ||
1506 | } else { | ||
1507 | switch (mode) { | ||
1508 | case DRM_MODE_DPMS_ON: | ||
1509 | args.ucAction = ATOM_ENABLE; | ||
1510 | /* workaround for DVOOutputControl on some RS690 systems */ | ||
1511 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { | ||
1512 | u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); | ||
1513 | WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); | ||
1514 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1515 | WREG32(RADEON_BIOS_3_SCRATCH, reg); | ||
1516 | } else | ||
1517 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1518 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
1519 | args.ucAction = ATOM_LCD_BLON; | ||
1520 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1521 | } | ||
1522 | break; | ||
1523 | case DRM_MODE_DPMS_STANDBY: | ||
1524 | case DRM_MODE_DPMS_SUSPEND: | ||
1525 | case DRM_MODE_DPMS_OFF: | ||
1526 | args.ucAction = ATOM_DISABLE; | ||
1527 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1528 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
1529 | args.ucAction = ATOM_LCD_BLOFF; | ||
1530 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1531 | } | ||
1532 | break; | ||
1533 | } | ||
1534 | } | ||
1535 | |||
1536 | if (ext_encoder) { | ||
1537 | switch (mode) { | ||
1538 | case DRM_MODE_DPMS_ON: | ||
1539 | default: | ||
1540 | if (ASIC_IS_DCE41(rdev)) { | ||
1541 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1542 | EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT); | ||
1543 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1544 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF); | ||
1545 | } else | ||
1546 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | ||
1547 | break; | ||
1548 | case DRM_MODE_DPMS_STANDBY: | ||
1549 | case DRM_MODE_DPMS_SUSPEND: | ||
1550 | case DRM_MODE_DPMS_OFF: | ||
1551 | if (ASIC_IS_DCE41(rdev)) { | ||
1552 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1553 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING); | ||
1554 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1555 | EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT); | ||
1556 | } else | ||
1557 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE); | ||
1558 | break; | ||
1559 | } | ||
1560 | } | ||
1561 | |||
1562 | radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); | ||
1563 | |||
1564 | } | ||
1565 | |||
1566 | union crtc_source_param { | ||
1567 | SELECT_CRTC_SOURCE_PS_ALLOCATION v1; | ||
1568 | SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; | ||
1569 | }; | ||
1570 | |||
1571 | static void | ||
1572 | atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | ||
1573 | { | ||
1574 | struct drm_device *dev = encoder->dev; | ||
1575 | struct radeon_device *rdev = dev->dev_private; | ||
1576 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1577 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1578 | union crtc_source_param args; | ||
1579 | int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); | ||
1580 | uint8_t frev, crev; | ||
1581 | struct radeon_encoder_atom_dig *dig; | ||
1582 | |||
1583 | memset(&args, 0, sizeof(args)); | ||
1584 | |||
1585 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1586 | return; | ||
1587 | |||
1588 | switch (frev) { | ||
1589 | case 1: | ||
1590 | switch (crev) { | ||
1591 | case 1: | ||
1592 | default: | ||
1593 | if (ASIC_IS_AVIVO(rdev)) | ||
1594 | args.v1.ucCRTC = radeon_crtc->crtc_id; | ||
1595 | else { | ||
1596 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) { | ||
1597 | args.v1.ucCRTC = radeon_crtc->crtc_id; | ||
1598 | } else { | ||
1599 | args.v1.ucCRTC = radeon_crtc->crtc_id << 2; | ||
1600 | } | ||
1601 | } | ||
1602 | switch (radeon_encoder->encoder_id) { | ||
1603 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
1604 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
1605 | args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX; | ||
1606 | break; | ||
1607 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
1608 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
1609 | if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) | ||
1610 | args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX; | ||
1611 | else | ||
1612 | args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX; | ||
1613 | break; | ||
1614 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
1615 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
1616 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1617 | args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX; | ||
1618 | break; | ||
1619 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
1620 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
1621 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1622 | args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; | ||
1623 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1624 | args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; | ||
1625 | else | ||
1626 | args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX; | ||
1627 | break; | ||
1628 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
1629 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
1630 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1631 | args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; | ||
1632 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1633 | args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; | ||
1634 | else | ||
1635 | args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX; | ||
1636 | break; | ||
1637 | } | ||
1638 | break; | ||
1639 | case 2: | ||
1640 | args.v2.ucCRTC = radeon_crtc->crtc_id; | ||
1641 | if (radeon_encoder_is_dp_bridge(encoder)) { | ||
1642 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1643 | |||
1644 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) | ||
1645 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; | ||
1646 | else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) | ||
1647 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; | ||
1648 | else | ||
1649 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
1650 | } else | ||
1651 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
1652 | switch (radeon_encoder->encoder_id) { | ||
1653 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1654 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1655 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1656 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
1657 | dig = radeon_encoder->enc_priv; | ||
1658 | switch (dig->dig_encoder) { | ||
1659 | case 0: | ||
1660 | args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; | ||
1661 | break; | ||
1662 | case 1: | ||
1663 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; | ||
1664 | break; | ||
1665 | case 2: | ||
1666 | args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; | ||
1667 | break; | ||
1668 | case 3: | ||
1669 | args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; | ||
1670 | break; | ||
1671 | case 4: | ||
1672 | args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; | ||
1673 | break; | ||
1674 | case 5: | ||
1675 | args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; | ||
1676 | break; | ||
1677 | } | ||
1678 | break; | ||
1679 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1680 | args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; | ||
1681 | break; | ||
1682 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
1683 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1684 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
1685 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1686 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
1687 | else | ||
1688 | args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID; | ||
1689 | break; | ||
1690 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
1691 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) | ||
1692 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
1693 | else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) | ||
1694 | args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; | ||
1695 | else | ||
1696 | args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID; | ||
1697 | break; | ||
1698 | } | ||
1699 | break; | ||
1700 | } | ||
1701 | break; | ||
1702 | default: | ||
1703 | DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); | ||
1704 | return; | ||
1705 | } | ||
1706 | |||
1707 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1708 | |||
1709 | /* update scratch regs with new routing */ | ||
1710 | radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | ||
1711 | } | ||
1712 | |||
1713 | static void | ||
1714 | atombios_apply_encoder_quirks(struct drm_encoder *encoder, | ||
1715 | struct drm_display_mode *mode) | ||
1716 | { | ||
1717 | struct drm_device *dev = encoder->dev; | ||
1718 | struct radeon_device *rdev = dev->dev_private; | ||
1719 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1720 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1721 | |||
1722 | /* Funky macbooks */ | ||
1723 | if ((dev->pdev->device == 0x71C5) && | ||
1724 | (dev->pdev->subsystem_vendor == 0x106b) && | ||
1725 | (dev->pdev->subsystem_device == 0x0080)) { | ||
1726 | if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { | ||
1727 | uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL); | ||
1728 | |||
1729 | lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN; | ||
1730 | lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN; | ||
1731 | |||
1732 | WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control); | ||
1733 | } | ||
1734 | } | ||
1735 | |||
1736 | /* set scaler clears this on some chips */ | ||
1737 | if (ASIC_IS_AVIVO(rdev) && | ||
1738 | (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) { | ||
1739 | if (ASIC_IS_DCE4(rdev)) { | ||
1740 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1741 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1742 | EVERGREEN_INTERLEAVE_EN); | ||
1743 | else | ||
1744 | WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1745 | } else { | ||
1746 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1747 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, | ||
1748 | AVIVO_D1MODE_INTERLEAVE_EN); | ||
1749 | else | ||
1750 | WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); | ||
1751 | } | ||
1752 | } | ||
1753 | } | ||
1754 | |||
1755 | static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | ||
1756 | { | ||
1757 | struct drm_device *dev = encoder->dev; | ||
1758 | struct radeon_device *rdev = dev->dev_private; | ||
1759 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | ||
1760 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1761 | struct drm_encoder *test_encoder; | ||
1762 | struct radeon_encoder_atom_dig *dig; | ||
1763 | uint32_t dig_enc_in_use = 0; | ||
1764 | |||
1765 | /* DCE4/5 */ | ||
1766 | if (ASIC_IS_DCE4(rdev)) { | ||
1767 | dig = radeon_encoder->enc_priv; | ||
1768 | if (ASIC_IS_DCE41(rdev)) { | ||
1769 | /* ontario follows DCE4 */ | ||
1770 | if (rdev->family == CHIP_PALM) { | ||
1771 | if (dig->linkb) | ||
1772 | return 1; | ||
1773 | else | ||
1774 | return 0; | ||
1775 | } else | ||
1776 | /* llano follows DCE3.2 */ | ||
1777 | return radeon_crtc->crtc_id; | ||
1778 | } else { | ||
1779 | switch (radeon_encoder->encoder_id) { | ||
1780 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1781 | if (dig->linkb) | ||
1782 | return 1; | ||
1783 | else | ||
1784 | return 0; | ||
1785 | break; | ||
1786 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1787 | if (dig->linkb) | ||
1788 | return 3; | ||
1789 | else | ||
1790 | return 2; | ||
1791 | break; | ||
1792 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1793 | if (dig->linkb) | ||
1794 | return 5; | ||
1795 | else | ||
1796 | return 4; | ||
1797 | break; | ||
1798 | } | ||
1799 | } | ||
1800 | } | ||
1801 | |||
1802 | /* on DCE32 and encoder can driver any block so just crtc id */ | ||
1803 | if (ASIC_IS_DCE32(rdev)) { | ||
1804 | return radeon_crtc->crtc_id; | ||
1805 | } | ||
1806 | |||
1807 | /* on DCE3 - LVTMA can only be driven by DIGB */ | ||
1808 | list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { | ||
1809 | struct radeon_encoder *radeon_test_encoder; | ||
1810 | |||
1811 | if (encoder == test_encoder) | ||
1812 | continue; | ||
1813 | |||
1814 | if (!radeon_encoder_is_digital(test_encoder)) | ||
1815 | continue; | ||
1816 | |||
1817 | radeon_test_encoder = to_radeon_encoder(test_encoder); | ||
1818 | dig = radeon_test_encoder->enc_priv; | ||
1819 | |||
1820 | if (dig->dig_encoder >= 0) | ||
1821 | dig_enc_in_use |= (1 << dig->dig_encoder); | ||
1822 | } | ||
1823 | |||
1824 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) { | ||
1825 | if (dig_enc_in_use & 0x2) | ||
1826 | DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n"); | ||
1827 | return 1; | ||
1828 | } | ||
1829 | if (!(dig_enc_in_use & 1)) | ||
1830 | return 0; | ||
1831 | return 1; | ||
1832 | } | ||
1833 | |||
1834 | /* This only needs to be called once at startup */ | ||
1835 | void | ||
1836 | radeon_atom_encoder_init(struct radeon_device *rdev) | ||
1837 | { | ||
1838 | struct drm_device *dev = rdev->ddev; | ||
1839 | struct drm_encoder *encoder; | ||
1840 | |||
1841 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
1842 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1843 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
1844 | |||
1845 | switch (radeon_encoder->encoder_id) { | ||
1846 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1847 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1848 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1849 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
1850 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); | ||
1851 | break; | ||
1852 | default: | ||
1853 | break; | ||
1854 | } | ||
1855 | |||
1856 | if (ext_encoder && ASIC_IS_DCE41(rdev)) | ||
1857 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1858 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); | ||
1859 | } | ||
1860 | } | ||
1861 | |||
1862 | static void | ||
1863 | radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | ||
1864 | struct drm_display_mode *mode, | ||
1865 | struct drm_display_mode *adjusted_mode) | ||
1866 | { | ||
1867 | struct drm_device *dev = encoder->dev; | ||
1868 | struct radeon_device *rdev = dev->dev_private; | ||
1869 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1870 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
1871 | |||
1872 | radeon_encoder->pixel_clock = adjusted_mode->clock; | ||
1873 | |||
1874 | if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { | ||
1875 | if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) | ||
1876 | atombios_yuv_setup(encoder, true); | ||
1877 | else | ||
1878 | atombios_yuv_setup(encoder, false); | ||
1879 | } | ||
1880 | |||
1881 | switch (radeon_encoder->encoder_id) { | ||
1882 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
1883 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
1884 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
1885 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
1886 | atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); | ||
1887 | break; | ||
1888 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
1889 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
1890 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
1891 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
1892 | if (ASIC_IS_DCE4(rdev)) { | ||
1893 | /* disable the transmitter */ | ||
1894 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
1895 | /* setup and enable the encoder */ | ||
1896 | atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); | ||
1897 | |||
1898 | /* enable the transmitter */ | ||
1899 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1900 | } else { | ||
1901 | /* disable the encoder and transmitter */ | ||
1902 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
1903 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); | ||
1904 | |||
1905 | /* setup and enable the encoder and transmitter */ | ||
1906 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); | ||
1907 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); | ||
1908 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); | ||
1909 | } | ||
1910 | break; | ||
1911 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
1912 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
1913 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
1914 | atombios_dvo_setup(encoder, ATOM_ENABLE); | ||
1915 | break; | ||
1916 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
1917 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
1918 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
1919 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
1920 | atombios_dac_setup(encoder, ATOM_ENABLE); | ||
1921 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) { | ||
1922 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | ||
1923 | atombios_tv_setup(encoder, ATOM_ENABLE); | ||
1924 | else | ||
1925 | atombios_tv_setup(encoder, ATOM_DISABLE); | ||
1926 | } | ||
1927 | break; | ||
1928 | } | ||
1929 | |||
1930 | if (ext_encoder) { | ||
1931 | if (ASIC_IS_DCE41(rdev)) | ||
1932 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
1933 | EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); | ||
1934 | else | ||
1935 | atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); | ||
1936 | } | ||
1937 | |||
1938 | atombios_apply_encoder_quirks(encoder, adjusted_mode); | ||
1939 | |||
1940 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { | ||
1941 | r600_hdmi_enable(encoder); | ||
1942 | r600_hdmi_setmode(encoder, adjusted_mode); | ||
1943 | } | ||
1944 | } | ||
1945 | |||
1946 | static bool | ||
1947 | atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
1948 | { | ||
1949 | struct drm_device *dev = encoder->dev; | ||
1950 | struct radeon_device *rdev = dev->dev_private; | ||
1951 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
1952 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1953 | |||
1954 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | | ||
1955 | ATOM_DEVICE_CV_SUPPORT | | ||
1956 | ATOM_DEVICE_CRT_SUPPORT)) { | ||
1957 | DAC_LOAD_DETECTION_PS_ALLOCATION args; | ||
1958 | int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection); | ||
1959 | uint8_t frev, crev; | ||
1960 | |||
1961 | memset(&args, 0, sizeof(args)); | ||
1962 | |||
1963 | if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) | ||
1964 | return false; | ||
1965 | |||
1966 | args.sDacload.ucMisc = 0; | ||
1967 | |||
1968 | if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) || | ||
1969 | (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1)) | ||
1970 | args.sDacload.ucDacType = ATOM_DAC_A; | ||
1971 | else | ||
1972 | args.sDacload.ucDacType = ATOM_DAC_B; | ||
1973 | |||
1974 | if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) | ||
1975 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT); | ||
1976 | else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) | ||
1977 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT); | ||
1978 | else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { | ||
1979 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT); | ||
1980 | if (crev >= 3) | ||
1981 | args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; | ||
1982 | } else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { | ||
1983 | args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT); | ||
1984 | if (crev >= 3) | ||
1985 | args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; | ||
1986 | } | ||
1987 | |||
1988 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1989 | |||
1990 | return true; | ||
1991 | } else | ||
1992 | return false; | ||
1993 | } | ||
1994 | |||
1995 | static enum drm_connector_status | ||
1996 | radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
1997 | { | ||
1998 | struct drm_device *dev = encoder->dev; | ||
1999 | struct radeon_device *rdev = dev->dev_private; | ||
2000 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2001 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
2002 | uint32_t bios_0_scratch; | ||
2003 | |||
2004 | if (!atombios_dac_load_detect(encoder, connector)) { | ||
2005 | DRM_DEBUG_KMS("detect returned false \n"); | ||
2006 | return connector_status_unknown; | ||
2007 | } | ||
2008 | |||
2009 | if (rdev->family >= CHIP_R600) | ||
2010 | bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); | ||
2011 | else | ||
2012 | bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); | ||
2013 | |||
2014 | DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); | ||
2015 | if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { | ||
2016 | if (bios_0_scratch & ATOM_S0_CRT1_MASK) | ||
2017 | return connector_status_connected; | ||
2018 | } | ||
2019 | if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { | ||
2020 | if (bios_0_scratch & ATOM_S0_CRT2_MASK) | ||
2021 | return connector_status_connected; | ||
2022 | } | ||
2023 | if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { | ||
2024 | if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) | ||
2025 | return connector_status_connected; | ||
2026 | } | ||
2027 | if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { | ||
2028 | if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) | ||
2029 | return connector_status_connected; /* CTV */ | ||
2030 | else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) | ||
2031 | return connector_status_connected; /* STV */ | ||
2032 | } | ||
2033 | return connector_status_disconnected; | ||
2034 | } | ||
2035 | |||
2036 | static enum drm_connector_status | ||
2037 | radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector) | ||
2038 | { | ||
2039 | struct drm_device *dev = encoder->dev; | ||
2040 | struct radeon_device *rdev = dev->dev_private; | ||
2041 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2042 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
2043 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
2044 | u32 bios_0_scratch; | ||
2045 | |||
2046 | if (!ASIC_IS_DCE4(rdev)) | ||
2047 | return connector_status_unknown; | ||
2048 | |||
2049 | if (!ext_encoder) | ||
2050 | return connector_status_unknown; | ||
2051 | |||
2052 | if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0) | ||
2053 | return connector_status_unknown; | ||
2054 | |||
2055 | /* load detect on the dp bridge */ | ||
2056 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
2057 | EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION); | ||
2058 | |||
2059 | bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); | ||
2060 | |||
2061 | DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); | ||
2062 | if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { | ||
2063 | if (bios_0_scratch & ATOM_S0_CRT1_MASK) | ||
2064 | return connector_status_connected; | ||
2065 | } | ||
2066 | if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { | ||
2067 | if (bios_0_scratch & ATOM_S0_CRT2_MASK) | ||
2068 | return connector_status_connected; | ||
2069 | } | ||
2070 | if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { | ||
2071 | if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) | ||
2072 | return connector_status_connected; | ||
2073 | } | ||
2074 | if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { | ||
2075 | if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) | ||
2076 | return connector_status_connected; /* CTV */ | ||
2077 | else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) | ||
2078 | return connector_status_connected; /* STV */ | ||
2079 | } | ||
2080 | return connector_status_disconnected; | ||
2081 | } | ||
2082 | |||
2083 | void | ||
2084 | radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder) | ||
2085 | { | ||
2086 | struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); | ||
2087 | |||
2088 | if (ext_encoder) | ||
2089 | /* ddc_setup on the dp bridge */ | ||
2090 | atombios_external_encoder_setup(encoder, ext_encoder, | ||
2091 | EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP); | ||
2092 | |||
2093 | } | ||
2094 | |||
2095 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | ||
2096 | { | ||
2097 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2098 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
2099 | |||
2100 | if ((radeon_encoder->active_device & | ||
2101 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || | ||
2102 | radeon_encoder_is_dp_bridge(encoder)) { | ||
2103 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
2104 | if (dig) | ||
2105 | dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder); | ||
2106 | } | ||
2107 | |||
2108 | radeon_atom_output_lock(encoder, true); | ||
2109 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
2110 | |||
2111 | if (connector) { | ||
2112 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
2113 | |||
2114 | /* select the clock/data port if it uses a router */ | ||
2115 | if (radeon_connector->router.cd_valid) | ||
2116 | radeon_router_select_cd_port(radeon_connector); | ||
2117 | |||
2118 | /* turn eDP panel on for mode set */ | ||
2119 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
2120 | atombios_set_edp_panel_power(connector, | ||
2121 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
2122 | } | ||
2123 | |||
2124 | /* this is needed for the pll/ss setup to work correctly in some cases */ | ||
2125 | atombios_set_encoder_crtc_source(encoder); | ||
2126 | } | ||
2127 | |||
2128 | static void radeon_atom_encoder_commit(struct drm_encoder *encoder) | ||
2129 | { | ||
2130 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); | ||
2131 | radeon_atom_output_lock(encoder, false); | ||
2132 | } | ||
2133 | |||
2134 | static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | ||
2135 | { | ||
2136 | struct drm_device *dev = encoder->dev; | ||
2137 | struct radeon_device *rdev = dev->dev_private; | ||
2138 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2139 | struct radeon_encoder_atom_dig *dig; | ||
2140 | |||
2141 | /* check for pre-DCE3 cards with shared encoders; | ||
2142 | * can't really use the links individually, so don't disable | ||
2143 | * the encoder if it's in use by another connector | ||
2144 | */ | ||
2145 | if (!ASIC_IS_DCE3(rdev)) { | ||
2146 | struct drm_encoder *other_encoder; | ||
2147 | struct radeon_encoder *other_radeon_encoder; | ||
2148 | |||
2149 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | ||
2150 | other_radeon_encoder = to_radeon_encoder(other_encoder); | ||
2151 | if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && | ||
2152 | drm_helper_encoder_in_use(other_encoder)) | ||
2153 | goto disable_done; | ||
2154 | } | ||
2155 | } | ||
2156 | |||
2157 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
2158 | |||
2159 | switch (radeon_encoder->encoder_id) { | ||
2160 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
2161 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
2162 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
2163 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
2164 | atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE); | ||
2165 | break; | ||
2166 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
2167 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
2168 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
2169 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
2170 | if (ASIC_IS_DCE4(rdev)) | ||
2171 | /* disable the transmitter */ | ||
2172 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
2173 | else { | ||
2174 | /* disable the encoder and transmitter */ | ||
2175 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); | ||
2176 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); | ||
2177 | } | ||
2178 | break; | ||
2179 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
2180 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
2181 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
2182 | atombios_dvo_setup(encoder, ATOM_DISABLE); | ||
2183 | break; | ||
2184 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
2185 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
2186 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
2187 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
2188 | atombios_dac_setup(encoder, ATOM_DISABLE); | ||
2189 | if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | ||
2190 | atombios_tv_setup(encoder, ATOM_DISABLE); | ||
2191 | break; | ||
2192 | } | ||
2193 | |||
2194 | disable_done: | ||
2195 | if (radeon_encoder_is_digital(encoder)) { | ||
2196 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | ||
2197 | r600_hdmi_disable(encoder); | ||
2198 | dig = radeon_encoder->enc_priv; | ||
2199 | dig->dig_encoder = -1; | ||
2200 | } | ||
2201 | radeon_encoder->active_device = 0; | ||
2202 | } | ||
2203 | |||
2204 | /* these are handled by the primary encoders */ | ||
2205 | static void radeon_atom_ext_prepare(struct drm_encoder *encoder) | ||
2206 | { | ||
2207 | |||
2208 | } | ||
2209 | |||
2210 | static void radeon_atom_ext_commit(struct drm_encoder *encoder) | ||
2211 | { | ||
2212 | |||
2213 | } | ||
2214 | |||
2215 | static void | ||
2216 | radeon_atom_ext_mode_set(struct drm_encoder *encoder, | ||
2217 | struct drm_display_mode *mode, | ||
2218 | struct drm_display_mode *adjusted_mode) | ||
2219 | { | ||
2220 | |||
2221 | } | ||
2222 | |||
2223 | static void radeon_atom_ext_disable(struct drm_encoder *encoder) | ||
2224 | { | ||
2225 | |||
2226 | } | ||
2227 | |||
2228 | static void | ||
2229 | radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) | ||
2230 | { | ||
2231 | |||
2232 | } | ||
2233 | |||
2234 | static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, | ||
2235 | struct drm_display_mode *mode, | ||
2236 | struct drm_display_mode *adjusted_mode) | ||
2237 | { | ||
2238 | return true; | ||
2239 | } | ||
2240 | |||
2241 | static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { | ||
2242 | .dpms = radeon_atom_ext_dpms, | ||
2243 | .mode_fixup = radeon_atom_ext_mode_fixup, | ||
2244 | .prepare = radeon_atom_ext_prepare, | ||
2245 | .mode_set = radeon_atom_ext_mode_set, | ||
2246 | .commit = radeon_atom_ext_commit, | ||
2247 | .disable = radeon_atom_ext_disable, | ||
2248 | /* no detect for TMDS/LVDS yet */ | ||
2249 | }; | ||
2250 | |||
2251 | static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { | ||
2252 | .dpms = radeon_atom_encoder_dpms, | ||
2253 | .mode_fixup = radeon_atom_mode_fixup, | ||
2254 | .prepare = radeon_atom_encoder_prepare, | ||
2255 | .mode_set = radeon_atom_encoder_mode_set, | ||
2256 | .commit = radeon_atom_encoder_commit, | ||
2257 | .disable = radeon_atom_encoder_disable, | ||
2258 | .detect = radeon_atom_dig_detect, | ||
2259 | }; | ||
2260 | |||
2261 | static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { | ||
2262 | .dpms = radeon_atom_encoder_dpms, | ||
2263 | .mode_fixup = radeon_atom_mode_fixup, | ||
2264 | .prepare = radeon_atom_encoder_prepare, | ||
2265 | .mode_set = radeon_atom_encoder_mode_set, | ||
2266 | .commit = radeon_atom_encoder_commit, | ||
2267 | .detect = radeon_atom_dac_detect, | ||
2268 | }; | ||
2269 | |||
2270 | void radeon_enc_destroy(struct drm_encoder *encoder) | ||
2271 | { | ||
2272 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2273 | kfree(radeon_encoder->enc_priv); | ||
2274 | drm_encoder_cleanup(encoder); | ||
2275 | kfree(radeon_encoder); | ||
2276 | } | ||
2277 | |||
2278 | static const struct drm_encoder_funcs radeon_atom_enc_funcs = { | ||
2279 | .destroy = radeon_enc_destroy, | ||
2280 | }; | ||
2281 | |||
2282 | struct radeon_encoder_atom_dac * | ||
2283 | radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) | ||
2284 | { | ||
2285 | struct drm_device *dev = radeon_encoder->base.dev; | ||
2286 | struct radeon_device *rdev = dev->dev_private; | ||
2287 | struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); | ||
2288 | |||
2289 | if (!dac) | ||
2290 | return NULL; | ||
2291 | |||
2292 | dac->tv_std = radeon_atombios_get_tv_info(rdev); | ||
2293 | return dac; | ||
2294 | } | ||
2295 | |||
2296 | struct radeon_encoder_atom_dig * | ||
2297 | radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) | ||
2298 | { | ||
2299 | int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; | ||
2300 | struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); | ||
2301 | |||
2302 | if (!dig) | ||
2303 | return NULL; | ||
2304 | |||
2305 | /* coherent mode by default */ | ||
2306 | dig->coherent_mode = true; | ||
2307 | dig->dig_encoder = -1; | ||
2308 | |||
2309 | if (encoder_enum == 2) | ||
2310 | dig->linkb = true; | ||
2311 | else | ||
2312 | dig->linkb = false; | ||
2313 | |||
2314 | return dig; | ||
2315 | } | ||
2316 | |||
2317 | void | ||
2318 | radeon_add_atom_encoder(struct drm_device *dev, | ||
2319 | uint32_t encoder_enum, | ||
2320 | uint32_t supported_device, | ||
2321 | u16 caps) | ||
2322 | { | ||
2323 | struct radeon_device *rdev = dev->dev_private; | ||
2324 | struct drm_encoder *encoder; | ||
2325 | struct radeon_encoder *radeon_encoder; | ||
2326 | |||
2327 | /* see if we already added it */ | ||
2328 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
2329 | radeon_encoder = to_radeon_encoder(encoder); | ||
2330 | if (radeon_encoder->encoder_enum == encoder_enum) { | ||
2331 | radeon_encoder->devices |= supported_device; | ||
2332 | return; | ||
2333 | } | ||
2334 | |||
2335 | } | ||
2336 | |||
2337 | /* add a new one */ | ||
2338 | radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL); | ||
2339 | if (!radeon_encoder) | ||
2340 | return; | ||
2341 | |||
2342 | encoder = &radeon_encoder->base; | ||
2343 | switch (rdev->num_crtc) { | ||
2344 | case 1: | ||
2345 | encoder->possible_crtcs = 0x1; | ||
2346 | break; | ||
2347 | case 2: | ||
2348 | default: | ||
2349 | encoder->possible_crtcs = 0x3; | ||
2350 | break; | ||
2351 | case 4: | ||
2352 | encoder->possible_crtcs = 0xf; | ||
2353 | break; | ||
2354 | case 6: | ||
2355 | encoder->possible_crtcs = 0x3f; | ||
2356 | break; | ||
2357 | } | ||
2358 | |||
2359 | radeon_encoder->enc_priv = NULL; | ||
2360 | |||
2361 | radeon_encoder->encoder_enum = encoder_enum; | ||
2362 | radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
2363 | radeon_encoder->devices = supported_device; | ||
2364 | radeon_encoder->rmx_type = RMX_OFF; | ||
2365 | radeon_encoder->underscan_type = UNDERSCAN_OFF; | ||
2366 | radeon_encoder->is_ext_encoder = false; | ||
2367 | radeon_encoder->caps = caps; | ||
2368 | |||
2369 | switch (radeon_encoder->encoder_id) { | ||
2370 | case ENCODER_OBJECT_ID_INTERNAL_LVDS: | ||
2371 | case ENCODER_OBJECT_ID_INTERNAL_TMDS1: | ||
2372 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: | ||
2373 | case ENCODER_OBJECT_ID_INTERNAL_LVTM1: | ||
2374 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
2375 | radeon_encoder->rmx_type = RMX_FULL; | ||
2376 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | ||
2377 | radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); | ||
2378 | } else { | ||
2379 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | ||
2380 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | ||
2381 | } | ||
2382 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); | ||
2383 | break; | ||
2384 | case ENCODER_OBJECT_ID_INTERNAL_DAC1: | ||
2385 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
2386 | radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); | ||
2387 | drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); | ||
2388 | break; | ||
2389 | case ENCODER_OBJECT_ID_INTERNAL_DAC2: | ||
2390 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
2391 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
2392 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); | ||
2393 | radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); | ||
2394 | drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); | ||
2395 | break; | ||
2396 | case ENCODER_OBJECT_ID_INTERNAL_DVO1: | ||
2397 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
2398 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | ||
2399 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
2400 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | ||
2401 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
2402 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
2403 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
2404 | radeon_encoder->rmx_type = RMX_FULL; | ||
2405 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | ||
2406 | radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); | ||
2407 | } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { | ||
2408 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
2409 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | ||
2410 | } else { | ||
2411 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | ||
2412 | radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); | ||
2413 | } | ||
2414 | drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); | ||
2415 | break; | ||
2416 | case ENCODER_OBJECT_ID_SI170B: | ||
2417 | case ENCODER_OBJECT_ID_CH7303: | ||
2418 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: | ||
2419 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: | ||
2420 | case ENCODER_OBJECT_ID_TITFP513: | ||
2421 | case ENCODER_OBJECT_ID_VT1623: | ||
2422 | case ENCODER_OBJECT_ID_HDMI_SI1930: | ||
2423 | case ENCODER_OBJECT_ID_TRAVIS: | ||
2424 | case ENCODER_OBJECT_ID_NUTMEG: | ||
2425 | /* these are handled by the primary encoders */ | ||
2426 | radeon_encoder->is_ext_encoder = true; | ||
2427 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
2428 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); | ||
2429 | else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) | ||
2430 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); | ||
2431 | else | ||
2432 | drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); | ||
2433 | drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); | ||
2434 | break; | ||
2435 | } | ||
2436 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index fdc3a9a54bf8..ba7ab79e12c1 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -49,27 +49,27 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev) | |||
49 | rdev->gart.table_size >> PAGE_SHIFT); | 49 | rdev->gart.table_size >> PAGE_SHIFT); |
50 | } | 50 | } |
51 | #endif | 51 | #endif |
52 | rdev->gart.table.ram.ptr = ptr; | 52 | rdev->gart.ptr = ptr; |
53 | memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size); | 53 | memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size); |
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
57 | void radeon_gart_table_ram_free(struct radeon_device *rdev) | 57 | void radeon_gart_table_ram_free(struct radeon_device *rdev) |
58 | { | 58 | { |
59 | if (rdev->gart.table.ram.ptr == NULL) { | 59 | if (rdev->gart.ptr == NULL) { |
60 | return; | 60 | return; |
61 | } | 61 | } |
62 | #ifdef CONFIG_X86 | 62 | #ifdef CONFIG_X86 |
63 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || | 63 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || |
64 | rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { | 64 | rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
65 | set_memory_wb((unsigned long)rdev->gart.table.ram.ptr, | 65 | set_memory_wb((unsigned long)rdev->gart.ptr, |
66 | rdev->gart.table_size >> PAGE_SHIFT); | 66 | rdev->gart.table_size >> PAGE_SHIFT); |
67 | } | 67 | } |
68 | #endif | 68 | #endif |
69 | pci_free_consistent(rdev->pdev, rdev->gart.table_size, | 69 | pci_free_consistent(rdev->pdev, rdev->gart.table_size, |
70 | (void *)rdev->gart.table.ram.ptr, | 70 | (void *)rdev->gart.ptr, |
71 | rdev->gart.table_addr); | 71 | rdev->gart.table_addr); |
72 | rdev->gart.table.ram.ptr = NULL; | 72 | rdev->gart.ptr = NULL; |
73 | rdev->gart.table_addr = 0; | 73 | rdev->gart.table_addr = 0; |
74 | } | 74 | } |
75 | 75 | ||
@@ -77,10 +77,10 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) | |||
77 | { | 77 | { |
78 | int r; | 78 | int r; |
79 | 79 | ||
80 | if (rdev->gart.table.vram.robj == NULL) { | 80 | if (rdev->gart.robj == NULL) { |
81 | r = radeon_bo_create(rdev, rdev->gart.table_size, | 81 | r = radeon_bo_create(rdev, rdev->gart.table_size, |
82 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | 82 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
83 | &rdev->gart.table.vram.robj); | 83 | &rdev->gart.robj); |
84 | if (r) { | 84 | if (r) { |
85 | return r; | 85 | return r; |
86 | } | 86 | } |
@@ -93,38 +93,46 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) | |||
93 | uint64_t gpu_addr; | 93 | uint64_t gpu_addr; |
94 | int r; | 94 | int r; |
95 | 95 | ||
96 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | 96 | r = radeon_bo_reserve(rdev->gart.robj, false); |
97 | if (unlikely(r != 0)) | 97 | if (unlikely(r != 0)) |
98 | return r; | 98 | return r; |
99 | r = radeon_bo_pin(rdev->gart.table.vram.robj, | 99 | r = radeon_bo_pin(rdev->gart.robj, |
100 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); | 100 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
101 | if (r) { | 101 | if (r) { |
102 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | 102 | radeon_bo_unreserve(rdev->gart.robj); |
103 | return r; | 103 | return r; |
104 | } | 104 | } |
105 | r = radeon_bo_kmap(rdev->gart.table.vram.robj, | 105 | r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr); |
106 | (void **)&rdev->gart.table.vram.ptr); | ||
107 | if (r) | 106 | if (r) |
108 | radeon_bo_unpin(rdev->gart.table.vram.robj); | 107 | radeon_bo_unpin(rdev->gart.robj); |
109 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | 108 | radeon_bo_unreserve(rdev->gart.robj); |
110 | rdev->gart.table_addr = gpu_addr; | 109 | rdev->gart.table_addr = gpu_addr; |
111 | return r; | 110 | return r; |
112 | } | 111 | } |
113 | 112 | ||
114 | void radeon_gart_table_vram_free(struct radeon_device *rdev) | 113 | void radeon_gart_table_vram_unpin(struct radeon_device *rdev) |
115 | { | 114 | { |
116 | int r; | 115 | int r; |
117 | 116 | ||
118 | if (rdev->gart.table.vram.robj == NULL) { | 117 | if (rdev->gart.robj == NULL) { |
119 | return; | 118 | return; |
120 | } | 119 | } |
121 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | 120 | r = radeon_bo_reserve(rdev->gart.robj, false); |
122 | if (likely(r == 0)) { | 121 | if (likely(r == 0)) { |
123 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | 122 | radeon_bo_kunmap(rdev->gart.robj); |
124 | radeon_bo_unpin(rdev->gart.table.vram.robj); | 123 | radeon_bo_unpin(rdev->gart.robj); |
125 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | 124 | radeon_bo_unreserve(rdev->gart.robj); |
125 | rdev->gart.ptr = NULL; | ||
126 | } | 126 | } |
127 | radeon_bo_unref(&rdev->gart.table.vram.robj); | 127 | } |
128 | |||
129 | void radeon_gart_table_vram_free(struct radeon_device *rdev) | ||
130 | { | ||
131 | if (rdev->gart.robj == NULL) { | ||
132 | return; | ||
133 | } | ||
134 | radeon_gart_table_vram_unpin(rdev); | ||
135 | radeon_bo_unref(&rdev->gart.robj); | ||
128 | } | 136 | } |
129 | 137 | ||
130 | 138 | ||
@@ -151,12 +159,14 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, | |||
151 | if (rdev->gart.pages[p]) { | 159 | if (rdev->gart.pages[p]) { |
152 | if (!rdev->gart.ttm_alloced[p]) | 160 | if (!rdev->gart.ttm_alloced[p]) |
153 | pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], | 161 | pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], |
154 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 162 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
155 | rdev->gart.pages[p] = NULL; | 163 | rdev->gart.pages[p] = NULL; |
156 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; | 164 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
157 | page_base = rdev->gart.pages_addr[p]; | 165 | page_base = rdev->gart.pages_addr[p]; |
158 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { | 166 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
159 | radeon_gart_set_page(rdev, t, page_base); | 167 | if (rdev->gart.ptr) { |
168 | radeon_gart_set_page(rdev, t, page_base); | ||
169 | } | ||
160 | page_base += RADEON_GPU_PAGE_SIZE; | 170 | page_base += RADEON_GPU_PAGE_SIZE; |
161 | } | 171 | } |
162 | } | 172 | } |
@@ -199,10 +209,12 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, | |||
199 | } | 209 | } |
200 | } | 210 | } |
201 | rdev->gart.pages[p] = pagelist[i]; | 211 | rdev->gart.pages[p] = pagelist[i]; |
202 | page_base = rdev->gart.pages_addr[p]; | 212 | if (rdev->gart.ptr) { |
203 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { | 213 | page_base = rdev->gart.pages_addr[p]; |
204 | radeon_gart_set_page(rdev, t, page_base); | 214 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
205 | page_base += RADEON_GPU_PAGE_SIZE; | 215 | radeon_gart_set_page(rdev, t, page_base); |
216 | page_base += RADEON_GPU_PAGE_SIZE; | ||
217 | } | ||
206 | } | 218 | } |
207 | } | 219 | } |
208 | mb(); | 220 | mb(); |
@@ -215,6 +227,9 @@ void radeon_gart_restore(struct radeon_device *rdev) | |||
215 | int i, j, t; | 227 | int i, j, t; |
216 | u64 page_base; | 228 | u64 page_base; |
217 | 229 | ||
230 | if (!rdev->gart.ptr) { | ||
231 | return; | ||
232 | } | ||
218 | for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { | 233 | for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { |
219 | page_base = rdev->gart.pages_addr[i]; | 234 | page_base = rdev->gart.pages_addr[i]; |
220 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { | 235 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index e6d110ce2331..7bb1b079f480 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -34,7 +34,7 @@ | |||
34 | * radeon_ddc_probe | 34 | * radeon_ddc_probe |
35 | * | 35 | * |
36 | */ | 36 | */ |
37 | bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe) | 37 | bool radeon_ddc_probe(struct radeon_connector *radeon_connector) |
38 | { | 38 | { |
39 | u8 out = 0x0; | 39 | u8 out = 0x0; |
40 | u8 buf[8]; | 40 | u8 buf[8]; |
@@ -49,15 +49,11 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_e | |||
49 | { | 49 | { |
50 | .addr = 0x50, | 50 | .addr = 0x50, |
51 | .flags = I2C_M_RD, | 51 | .flags = I2C_M_RD, |
52 | .len = 1, | 52 | .len = 8, |
53 | .buf = buf, | 53 | .buf = buf, |
54 | } | 54 | } |
55 | }; | 55 | }; |
56 | 56 | ||
57 | /* Read 8 bytes from i2c for extended probe of EDID header */ | ||
58 | if (requires_extended_probe) | ||
59 | msgs[1].len = 8; | ||
60 | |||
61 | /* on hw with routers, select right port */ | 57 | /* on hw with routers, select right port */ |
62 | if (radeon_connector->router.ddc_valid) | 58 | if (radeon_connector->router.ddc_valid) |
63 | radeon_router_select_ddc_port(radeon_connector); | 59 | radeon_router_select_ddc_port(radeon_connector); |
@@ -66,17 +62,15 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_e | |||
66 | if (ret != 2) | 62 | if (ret != 2) |
67 | /* Couldn't find an accessible DDC on this connector */ | 63 | /* Couldn't find an accessible DDC on this connector */ |
68 | return false; | 64 | return false; |
69 | if (requires_extended_probe) { | 65 | /* Probe also for valid EDID header |
70 | /* Probe also for valid EDID header | 66 | * EDID header starts with: |
71 | * EDID header starts with: | 67 | * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. |
72 | * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. | 68 | * Only the first 6 bytes must be valid as |
73 | * Only the first 6 bytes must be valid as | 69 | * drm_edid_block_valid() can fix the last 2 bytes */ |
74 | * drm_edid_block_valid() can fix the last 2 bytes */ | 70 | if (drm_edid_header_is_valid(buf) < 6) { |
75 | if (drm_edid_header_is_valid(buf) < 6) { | 71 | /* Couldn't find an accessible EDID on this |
76 | /* Couldn't find an accessible EDID on this | 72 | * connector */ |
77 | * connector */ | 73 | return false; |
78 | return false; | ||
79 | } | ||
80 | } | 74 | } |
81 | return true; | 75 | return true; |
82 | } | 76 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 9ec830c77af0..8f86aeb26693 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -67,10 +67,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) | |||
67 | /* Disable *all* interrupts */ | 67 | /* Disable *all* interrupts */ |
68 | rdev->irq.sw_int = false; | 68 | rdev->irq.sw_int = false; |
69 | rdev->irq.gui_idle = false; | 69 | rdev->irq.gui_idle = false; |
70 | for (i = 0; i < rdev->num_crtc; i++) | 70 | for (i = 0; i < RADEON_MAX_HPD_PINS; i++) |
71 | rdev->irq.crtc_vblank_int[i] = false; | ||
72 | for (i = 0; i < 6; i++) { | ||
73 | rdev->irq.hpd[i] = false; | 71 | rdev->irq.hpd[i] = false; |
72 | for (i = 0; i < RADEON_MAX_CRTCS; i++) { | ||
73 | rdev->irq.crtc_vblank_int[i] = false; | ||
74 | rdev->irq.pflip[i] = false; | 74 | rdev->irq.pflip[i] = false; |
75 | } | 75 | } |
76 | radeon_irq_set(rdev); | 76 | radeon_irq_set(rdev); |
@@ -99,15 +99,55 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) | |||
99 | /* Disable *all* interrupts */ | 99 | /* Disable *all* interrupts */ |
100 | rdev->irq.sw_int = false; | 100 | rdev->irq.sw_int = false; |
101 | rdev->irq.gui_idle = false; | 101 | rdev->irq.gui_idle = false; |
102 | for (i = 0; i < rdev->num_crtc; i++) | 102 | for (i = 0; i < RADEON_MAX_HPD_PINS; i++) |
103 | rdev->irq.crtc_vblank_int[i] = false; | ||
104 | for (i = 0; i < 6; i++) { | ||
105 | rdev->irq.hpd[i] = false; | 103 | rdev->irq.hpd[i] = false; |
104 | for (i = 0; i < RADEON_MAX_CRTCS; i++) { | ||
105 | rdev->irq.crtc_vblank_int[i] = false; | ||
106 | rdev->irq.pflip[i] = false; | 106 | rdev->irq.pflip[i] = false; |
107 | } | 107 | } |
108 | radeon_irq_set(rdev); | 108 | radeon_irq_set(rdev); |
109 | } | 109 | } |
110 | 110 | ||
111 | static bool radeon_msi_ok(struct radeon_device *rdev) | ||
112 | { | ||
113 | /* RV370/RV380 was first asic with MSI support */ | ||
114 | if (rdev->family < CHIP_RV380) | ||
115 | return false; | ||
116 | |||
117 | /* MSIs don't work on AGP */ | ||
118 | if (rdev->flags & RADEON_IS_AGP) | ||
119 | return false; | ||
120 | |||
121 | /* force MSI on */ | ||
122 | if (radeon_msi == 1) | ||
123 | return true; | ||
124 | else if (radeon_msi == 0) | ||
125 | return false; | ||
126 | |||
127 | /* Quirks */ | ||
128 | /* HP RS690 only seems to work with MSIs. */ | ||
129 | if ((rdev->pdev->device == 0x791f) && | ||
130 | (rdev->pdev->subsystem_vendor == 0x103c) && | ||
131 | (rdev->pdev->subsystem_device == 0x30c2)) | ||
132 | return true; | ||
133 | |||
134 | /* Dell RS690 only seems to work with MSIs. */ | ||
135 | if ((rdev->pdev->device == 0x791f) && | ||
136 | (rdev->pdev->subsystem_vendor == 0x1028) && | ||
137 | (rdev->pdev->subsystem_device == 0x01fd)) | ||
138 | return true; | ||
139 | |||
140 | if (rdev->flags & RADEON_IS_IGP) { | ||
141 | /* APUs work fine with MSIs */ | ||
142 | if (rdev->family >= CHIP_PALM) | ||
143 | return true; | ||
144 | /* lots of IGPs have problems with MSIs */ | ||
145 | return false; | ||
146 | } | ||
147 | |||
148 | return true; | ||
149 | } | ||
150 | |||
111 | int radeon_irq_kms_init(struct radeon_device *rdev) | 151 | int radeon_irq_kms_init(struct radeon_device *rdev) |
112 | { | 152 | { |
113 | int i; | 153 | int i; |
@@ -124,12 +164,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
124 | } | 164 | } |
125 | /* enable msi */ | 165 | /* enable msi */ |
126 | rdev->msi_enabled = 0; | 166 | rdev->msi_enabled = 0; |
127 | /* MSIs don't seem to work reliably on all IGP | 167 | |
128 | * chips. Disable MSI on them for now. | 168 | if (radeon_msi_ok(rdev)) { |
129 | */ | ||
130 | if ((rdev->family >= CHIP_RV380) && | ||
131 | ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) && | ||
132 | (!(rdev->flags & RADEON_IS_AGP))) { | ||
133 | int ret = pci_enable_msi(rdev->pdev); | 169 | int ret = pci_enable_msi(rdev->pdev); |
134 | if (!ret) { | 170 | if (!ret) { |
135 | rdev->msi_enabled = 1; | 171 | rdev->msi_enabled = 1; |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 41a5d48e657b..daadf2111040 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc, | |||
991 | struct drm_display_mode *mode, | 991 | struct drm_display_mode *mode, |
992 | struct drm_display_mode *adjusted_mode) | 992 | struct drm_display_mode *adjusted_mode) |
993 | { | 993 | { |
994 | struct drm_device *dev = crtc->dev; | ||
995 | struct radeon_device *rdev = dev->dev_private; | ||
996 | |||
997 | /* adjust pm to upcoming mode change */ | ||
998 | radeon_pm_compute_clocks(rdev); | ||
999 | |||
1000 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | 994 | if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) |
1001 | return false; | 995 | return false; |
1002 | return true; | 996 | return true; |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index ed0178f03235..2c2e75ef8a37 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -438,9 +438,6 @@ struct radeon_connector { | |||
438 | struct radeon_i2c_chan *ddc_bus; | 438 | struct radeon_i2c_chan *ddc_bus; |
439 | /* some systems have an hdmi and vga port with a shared ddc line */ | 439 | /* some systems have an hdmi and vga port with a shared ddc line */ |
440 | bool shared_ddc; | 440 | bool shared_ddc; |
441 | /* for some Radeon chip families we apply an additional EDID header | ||
442 | check as part of the DDC probe */ | ||
443 | bool requires_extended_probe; | ||
444 | bool use_digital; | 441 | bool use_digital; |
445 | /* we need to mind the EDID between detect | 442 | /* we need to mind the EDID between detect |
446 | and get modes due to analog/digital/tvencoder */ | 443 | and get modes due to analog/digital/tvencoder */ |
@@ -459,6 +456,8 @@ struct radeon_framebuffer { | |||
459 | struct drm_gem_object *obj; | 456 | struct drm_gem_object *obj; |
460 | }; | 457 | }; |
461 | 458 | ||
459 | #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ | ||
460 | ((em) == ATOM_ENCODER_MODE_DP_MST)) | ||
462 | 461 | ||
463 | extern enum radeon_tv_std | 462 | extern enum radeon_tv_std |
464 | radeon_combios_get_tv_info(struct radeon_device *rdev); | 463 | radeon_combios_get_tv_info(struct radeon_device *rdev); |
@@ -468,8 +467,8 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev); | |||
468 | extern struct drm_connector * | 467 | extern struct drm_connector * |
469 | radeon_get_connector_for_encoder(struct drm_encoder *encoder); | 468 | radeon_get_connector_for_encoder(struct drm_encoder *encoder); |
470 | 469 | ||
471 | extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder); | 470 | extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); |
472 | extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector); | 471 | extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector); |
473 | extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); | 472 | extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector); |
474 | extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); | 473 | extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector); |
475 | 474 | ||
@@ -489,7 +488,7 @@ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, | |||
489 | int action, uint8_t lane_num, | 488 | int action, uint8_t lane_num, |
490 | uint8_t lane_set); | 489 | uint8_t lane_set); |
491 | extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); | 490 | extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder); |
492 | extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder); | 491 | extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder); |
493 | extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | 492 | extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
494 | u8 write_byte, u8 *read_byte); | 493 | u8 write_byte, u8 *read_byte); |
495 | 494 | ||
@@ -519,8 +518,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, | |||
519 | u8 val); | 518 | u8 val); |
520 | extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); | 519 | extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); |
521 | extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); | 520 | extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); |
522 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, | 521 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); |
523 | bool requires_extended_probe); | ||
524 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); | 522 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); |
525 | 523 | ||
526 | extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); | 524 | extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6fabe89fa6a1..78a665bd9519 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev); | |||
53 | 53 | ||
54 | #define ACPI_AC_CLASS "ac_adapter" | 54 | #define ACPI_AC_CLASS "ac_adapter" |
55 | 55 | ||
56 | int radeon_pm_get_type_index(struct radeon_device *rdev, | ||
57 | enum radeon_pm_state_type ps_type, | ||
58 | int instance) | ||
59 | { | ||
60 | int i; | ||
61 | int found_instance = -1; | ||
62 | |||
63 | for (i = 0; i < rdev->pm.num_power_states; i++) { | ||
64 | if (rdev->pm.power_state[i].type == ps_type) { | ||
65 | found_instance++; | ||
66 | if (found_instance == instance) | ||
67 | return i; | ||
68 | } | ||
69 | } | ||
70 | /* return default if no match */ | ||
71 | return rdev->pm.default_power_state_index; | ||
72 | } | ||
73 | |||
56 | #ifdef CONFIG_ACPI | 74 | #ifdef CONFIG_ACPI |
57 | static int radeon_acpi_event(struct notifier_block *nb, | 75 | static int radeon_acpi_event(struct notifier_block *nb, |
58 | unsigned long val, | 76 | unsigned long val, |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 89a6e1ecea8d..06b90c87f8f3 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -77,7 +77,7 @@ int rs400_gart_init(struct radeon_device *rdev) | |||
77 | { | 77 | { |
78 | int r; | 78 | int r; |
79 | 79 | ||
80 | if (rdev->gart.table.ram.ptr) { | 80 | if (rdev->gart.ptr) { |
81 | WARN(1, "RS400 GART already initialized\n"); | 81 | WARN(1, "RS400 GART already initialized\n"); |
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
@@ -212,6 +212,7 @@ void rs400_gart_fini(struct radeon_device *rdev) | |||
212 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 212 | int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
213 | { | 213 | { |
214 | uint32_t entry; | 214 | uint32_t entry; |
215 | u32 *gtt = rdev->gart.ptr; | ||
215 | 216 | ||
216 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 217 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
217 | return -EINVAL; | 218 | return -EINVAL; |
@@ -221,7 +222,7 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | |||
221 | ((upper_32_bits(addr) & 0xff) << 4) | | 222 | ((upper_32_bits(addr) & 0xff) << 4) | |
222 | RS400_PTE_WRITEABLE | RS400_PTE_READABLE; | 223 | RS400_PTE_WRITEABLE | RS400_PTE_READABLE; |
223 | entry = cpu_to_le32(entry); | 224 | entry = cpu_to_le32(entry); |
224 | rdev->gart.table.ram.ptr[i] = entry; | 225 | gtt[i] = entry; |
225 | return 0; | 226 | return 0; |
226 | } | 227 | } |
227 | 228 | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 9320dd6404f6..481b99e89f65 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -287,6 +287,7 @@ void rs600_hpd_init(struct radeon_device *rdev) | |||
287 | default: | 287 | default: |
288 | break; | 288 | break; |
289 | } | 289 | } |
290 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | ||
290 | } | 291 | } |
291 | if (rdev->irq.installed) | 292 | if (rdev->irq.installed) |
292 | rs600_irq_set(rdev); | 293 | rs600_irq_set(rdev); |
@@ -413,7 +414,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
413 | { | 414 | { |
414 | int r; | 415 | int r; |
415 | 416 | ||
416 | if (rdev->gart.table.vram.robj) { | 417 | if (rdev->gart.robj) { |
417 | WARN(1, "RS600 GART already initialized\n"); | 418 | WARN(1, "RS600 GART already initialized\n"); |
418 | return 0; | 419 | return 0; |
419 | } | 420 | } |
@@ -431,7 +432,7 @@ static int rs600_gart_enable(struct radeon_device *rdev) | |||
431 | u32 tmp; | 432 | u32 tmp; |
432 | int r, i; | 433 | int r, i; |
433 | 434 | ||
434 | if (rdev->gart.table.vram.robj == NULL) { | 435 | if (rdev->gart.robj == NULL) { |
435 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | 436 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
436 | return -EINVAL; | 437 | return -EINVAL; |
437 | } | 438 | } |
@@ -494,20 +495,12 @@ static int rs600_gart_enable(struct radeon_device *rdev) | |||
494 | void rs600_gart_disable(struct radeon_device *rdev) | 495 | void rs600_gart_disable(struct radeon_device *rdev) |
495 | { | 496 | { |
496 | u32 tmp; | 497 | u32 tmp; |
497 | int r; | ||
498 | 498 | ||
499 | /* FIXME: disable out of gart access */ | 499 | /* FIXME: disable out of gart access */ |
500 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); | 500 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); |
501 | tmp = RREG32_MC(R_000009_MC_CNTL1); | 501 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
502 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); | 502 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); |
503 | if (rdev->gart.table.vram.robj) { | 503 | radeon_gart_table_vram_unpin(rdev); |
504 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | ||
505 | if (r == 0) { | ||
506 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
507 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
508 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
509 | } | ||
510 | } | ||
511 | } | 504 | } |
512 | 505 | ||
513 | void rs600_gart_fini(struct radeon_device *rdev) | 506 | void rs600_gart_fini(struct radeon_device *rdev) |
@@ -525,7 +518,7 @@ void rs600_gart_fini(struct radeon_device *rdev) | |||
525 | 518 | ||
526 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) | 519 | int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
527 | { | 520 | { |
528 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; | 521 | void __iomem *ptr = (void *)rdev->gart.ptr; |
529 | 522 | ||
530 | if (i < 0 || i > rdev->gart.num_gpu_pages) { | 523 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
531 | return -EINVAL; | 524 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 87cc1feee3ac..a983f410ab89 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -124,7 +124,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) | |||
124 | u32 tmp; | 124 | u32 tmp; |
125 | int r, i; | 125 | int r, i; |
126 | 126 | ||
127 | if (rdev->gart.table.vram.robj == NULL) { | 127 | if (rdev->gart.robj == NULL) { |
128 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | 128 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
129 | return -EINVAL; | 129 | return -EINVAL; |
130 | } | 130 | } |
@@ -171,7 +171,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) | |||
171 | void rv770_pcie_gart_disable(struct radeon_device *rdev) | 171 | void rv770_pcie_gart_disable(struct radeon_device *rdev) |
172 | { | 172 | { |
173 | u32 tmp; | 173 | u32 tmp; |
174 | int i, r; | 174 | int i; |
175 | 175 | ||
176 | /* Disable all tables */ | 176 | /* Disable all tables */ |
177 | for (i = 0; i < 7; i++) | 177 | for (i = 0; i < 7; i++) |
@@ -191,14 +191,7 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) | |||
191 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | 191 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
192 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 192 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
193 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | 193 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
194 | if (rdev->gart.table.vram.robj) { | 194 | radeon_gart_table_vram_unpin(rdev); |
195 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); | ||
196 | if (likely(r == 0)) { | ||
197 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
198 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
199 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
200 | } | ||
201 | } | ||
202 | } | 195 | } |
203 | 196 | ||
204 | void rv770_pcie_gart_fini(struct radeon_device *rdev) | 197 | void rv770_pcie_gart_fini(struct radeon_device *rdev) |
@@ -282,7 +275,7 @@ static void rv770_mc_program(struct radeon_device *rdev) | |||
282 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, | 275 | WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
283 | rdev->mc.vram_end >> 12); | 276 | rdev->mc.vram_end >> 12); |
284 | } | 277 | } |
285 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); | 278 | WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); |
286 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; | 279 | tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; |
287 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); | 280 | tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); |
288 | WREG32(MC_VM_FB_LOCATION, tmp); | 281 | WREG32(MC_VM_FB_LOCATION, tmp); |
@@ -959,54 +952,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
959 | 952 | ||
960 | } | 953 | } |
961 | 954 | ||
962 | static int rv770_vram_scratch_init(struct radeon_device *rdev) | ||
963 | { | ||
964 | int r; | ||
965 | u64 gpu_addr; | ||
966 | |||
967 | if (rdev->vram_scratch.robj == NULL) { | ||
968 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, | ||
969 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | ||
970 | &rdev->vram_scratch.robj); | ||
971 | if (r) { | ||
972 | return r; | ||
973 | } | ||
974 | } | ||
975 | |||
976 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | ||
977 | if (unlikely(r != 0)) | ||
978 | return r; | ||
979 | r = radeon_bo_pin(rdev->vram_scratch.robj, | ||
980 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); | ||
981 | if (r) { | ||
982 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
983 | return r; | ||
984 | } | ||
985 | r = radeon_bo_kmap(rdev->vram_scratch.robj, | ||
986 | (void **)&rdev->vram_scratch.ptr); | ||
987 | if (r) | ||
988 | radeon_bo_unpin(rdev->vram_scratch.robj); | ||
989 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
990 | |||
991 | return r; | ||
992 | } | ||
993 | |||
994 | static void rv770_vram_scratch_fini(struct radeon_device *rdev) | ||
995 | { | ||
996 | int r; | ||
997 | |||
998 | if (rdev->vram_scratch.robj == NULL) { | ||
999 | return; | ||
1000 | } | ||
1001 | r = radeon_bo_reserve(rdev->vram_scratch.robj, false); | ||
1002 | if (likely(r == 0)) { | ||
1003 | radeon_bo_kunmap(rdev->vram_scratch.robj); | ||
1004 | radeon_bo_unpin(rdev->vram_scratch.robj); | ||
1005 | radeon_bo_unreserve(rdev->vram_scratch.robj); | ||
1006 | } | ||
1007 | radeon_bo_unref(&rdev->vram_scratch.robj); | ||
1008 | } | ||
1009 | |||
1010 | void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | 955 | void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) |
1011 | { | 956 | { |
1012 | u64 size_bf, size_af; | 957 | u64 size_bf, size_af; |
@@ -1106,6 +1051,10 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1106 | } | 1051 | } |
1107 | } | 1052 | } |
1108 | 1053 | ||
1054 | r = r600_vram_scratch_init(rdev); | ||
1055 | if (r) | ||
1056 | return r; | ||
1057 | |||
1109 | rv770_mc_program(rdev); | 1058 | rv770_mc_program(rdev); |
1110 | if (rdev->flags & RADEON_IS_AGP) { | 1059 | if (rdev->flags & RADEON_IS_AGP) { |
1111 | rv770_agp_enable(rdev); | 1060 | rv770_agp_enable(rdev); |
@@ -1114,9 +1063,7 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1114 | if (r) | 1063 | if (r) |
1115 | return r; | 1064 | return r; |
1116 | } | 1065 | } |
1117 | r = rv770_vram_scratch_init(rdev); | 1066 | |
1118 | if (r) | ||
1119 | return r; | ||
1120 | rv770_gpu_init(rdev); | 1067 | rv770_gpu_init(rdev); |
1121 | r = r600_blit_init(rdev); | 1068 | r = r600_blit_init(rdev); |
1122 | if (r) { | 1069 | if (r) { |
@@ -1316,7 +1263,7 @@ void rv770_fini(struct radeon_device *rdev) | |||
1316 | radeon_ib_pool_fini(rdev); | 1263 | radeon_ib_pool_fini(rdev); |
1317 | radeon_irq_kms_fini(rdev); | 1264 | radeon_irq_kms_fini(rdev); |
1318 | rv770_pcie_gart_fini(rdev); | 1265 | rv770_pcie_gart_fini(rdev); |
1319 | rv770_vram_scratch_fini(rdev); | 1266 | r600_vram_scratch_fini(rdev); |
1320 | radeon_gem_fini(rdev); | 1267 | radeon_gem_fini(rdev); |
1321 | radeon_fence_driver_fini(rdev); | 1268 | radeon_fence_driver_fini(rdev); |
1322 | radeon_agp_fini(rdev); | 1269 | radeon_agp_fini(rdev); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 1805b8c2a948..dff8fc767152 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -104,6 +104,9 @@ | |||
104 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ | 104 | #define DRM_IOCTL_VMW_PRESENT_READBACK \ |
105 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ | 105 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \ |
106 | struct drm_vmw_present_readback_arg) | 106 | struct drm_vmw_present_readback_arg) |
107 | #define DRM_IOCTL_VMW_UPDATE_LAYOUT \ | ||
108 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \ | ||
109 | struct drm_vmw_update_layout_arg) | ||
107 | 110 | ||
108 | /** | 111 | /** |
109 | * The core DRM version of this macro doesn't account for | 112 | * The core DRM version of this macro doesn't account for |
@@ -166,6 +169,9 @@ static struct drm_ioctl_desc vmw_ioctls[] = { | |||
166 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, | 169 | VMW_IOCTL_DEF(VMW_PRESENT_READBACK, |
167 | vmw_present_readback_ioctl, | 170 | vmw_present_readback_ioctl, |
168 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), | 171 | DRM_MASTER | DRM_AUTH | DRM_UNLOCKED), |
172 | VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT, | ||
173 | vmw_kms_update_layout_ioctl, | ||
174 | DRM_MASTER | DRM_UNLOCKED), | ||
169 | }; | 175 | }; |
170 | 176 | ||
171 | static struct pci_device_id vmw_pci_id_list[] = { | 177 | static struct pci_device_id vmw_pci_id_list[] = { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 30589d0aecd9..8cca91a93bde 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -40,9 +40,9 @@ | |||
40 | #include "ttm/ttm_module.h" | 40 | #include "ttm/ttm_module.h" |
41 | #include "vmwgfx_fence.h" | 41 | #include "vmwgfx_fence.h" |
42 | 42 | ||
43 | #define VMWGFX_DRIVER_DATE "20111008" | 43 | #define VMWGFX_DRIVER_DATE "20111025" |
44 | #define VMWGFX_DRIVER_MAJOR 2 | 44 | #define VMWGFX_DRIVER_MAJOR 2 |
45 | #define VMWGFX_DRIVER_MINOR 2 | 45 | #define VMWGFX_DRIVER_MINOR 3 |
46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 46 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 47 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 48 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
@@ -633,6 +633,8 @@ int vmw_kms_readback(struct vmw_private *dev_priv, | |||
633 | struct drm_vmw_fence_rep __user *user_fence_rep, | 633 | struct drm_vmw_fence_rep __user *user_fence_rep, |
634 | struct drm_vmw_rect *clips, | 634 | struct drm_vmw_rect *clips, |
635 | uint32_t num_clips); | 635 | uint32_t num_clips); |
636 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | ||
637 | struct drm_file *file_priv); | ||
636 | 638 | ||
637 | /** | 639 | /** |
638 | * Overlay control - vmwgfx_overlay.c | 640 | * Overlay control - vmwgfx_overlay.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8b14dfd513a1..880e285d7578 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -105,12 +105,17 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
105 | struct vmw_dma_buffer *dmabuf = NULL; | 105 | struct vmw_dma_buffer *dmabuf = NULL; |
106 | int ret; | 106 | int ret; |
107 | 107 | ||
108 | /* A lot of the code assumes this */ | ||
109 | if (handle && (width != 64 || height != 64)) | ||
110 | return -EINVAL; | ||
111 | |||
108 | if (handle) { | 112 | if (handle) { |
109 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, | 113 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, |
110 | handle, &surface); | 114 | handle, &surface); |
111 | if (!ret) { | 115 | if (!ret) { |
112 | if (!surface->snooper.image) { | 116 | if (!surface->snooper.image) { |
113 | DRM_ERROR("surface not suitable for cursor\n"); | 117 | DRM_ERROR("surface not suitable for cursor\n"); |
118 | vmw_surface_unreference(&surface); | ||
114 | return -EINVAL; | 119 | return -EINVAL; |
115 | } | 120 | } |
116 | } else { | 121 | } else { |
@@ -176,7 +181,9 @@ err_unreserve: | |||
176 | return 0; | 181 | return 0; |
177 | } | 182 | } |
178 | 183 | ||
179 | vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y); | 184 | vmw_cursor_update_position(dev_priv, true, |
185 | du->cursor_x + du->hotspot_x, | ||
186 | du->cursor_y + du->hotspot_y); | ||
180 | 187 | ||
181 | return 0; | 188 | return 0; |
182 | } | 189 | } |
@@ -191,7 +198,8 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
191 | du->cursor_y = y + crtc->y; | 198 | du->cursor_y = y + crtc->y; |
192 | 199 | ||
193 | vmw_cursor_update_position(dev_priv, shown, | 200 | vmw_cursor_update_position(dev_priv, shown, |
194 | du->cursor_x, du->cursor_y); | 201 | du->cursor_x + du->hotspot_x, |
202 | du->cursor_y + du->hotspot_y); | ||
195 | 203 | ||
196 | return 0; | 204 | return 0; |
197 | } | 205 | } |
@@ -212,7 +220,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, | |||
212 | SVGA3dCmdHeader header; | 220 | SVGA3dCmdHeader header; |
213 | SVGA3dCmdSurfaceDMA dma; | 221 | SVGA3dCmdSurfaceDMA dma; |
214 | } *cmd; | 222 | } *cmd; |
215 | int ret; | 223 | int i, ret; |
216 | 224 | ||
217 | cmd = container_of(header, struct vmw_dma_cmd, header); | 225 | cmd = container_of(header, struct vmw_dma_cmd, header); |
218 | 226 | ||
@@ -234,16 +242,19 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, | |||
234 | box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / | 242 | box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / |
235 | sizeof(SVGA3dCopyBox); | 243 | sizeof(SVGA3dCopyBox); |
236 | 244 | ||
237 | if (cmd->dma.guest.pitch != (64 * 4) || | 245 | if (cmd->dma.guest.ptr.offset % PAGE_SIZE || |
238 | cmd->dma.guest.ptr.offset % PAGE_SIZE || | ||
239 | box->x != 0 || box->y != 0 || box->z != 0 || | 246 | box->x != 0 || box->y != 0 || box->z != 0 || |
240 | box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || | 247 | box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || |
241 | box->w != 64 || box->h != 64 || box->d != 1 || | 248 | box->d != 1 || box_count != 1) { |
242 | box_count != 1) { | ||
243 | /* TODO handle none page aligned offsets */ | 249 | /* TODO handle none page aligned offsets */ |
244 | /* TODO handle partial uploads and pitch != 256 */ | 250 | /* TODO handle more dst & src != 0 */ |
245 | /* TODO handle more then one copy (size != 64) */ | 251 | /* TODO handle more then one copy */ |
246 | DRM_ERROR("lazy programmer, can't handle weird stuff\n"); | 252 | DRM_ERROR("Cant snoop dma request for cursor!\n"); |
253 | DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", | ||
254 | box->srcx, box->srcy, box->srcz, | ||
255 | box->x, box->y, box->z, | ||
256 | box->w, box->h, box->d, box_count, | ||
257 | cmd->dma.guest.ptr.offset); | ||
247 | return; | 258 | return; |
248 | } | 259 | } |
249 | 260 | ||
@@ -262,7 +273,16 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, | |||
262 | 273 | ||
263 | virtual = ttm_kmap_obj_virtual(&map, &dummy); | 274 | virtual = ttm_kmap_obj_virtual(&map, &dummy); |
264 | 275 | ||
265 | memcpy(srf->snooper.image, virtual, 64*64*4); | 276 | if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { |
277 | memcpy(srf->snooper.image, virtual, 64*64*4); | ||
278 | } else { | ||
279 | /* Image is unsigned pointer. */ | ||
280 | for (i = 0; i < box->h; i++) | ||
281 | memcpy(srf->snooper.image + i * 64, | ||
282 | virtual + i * cmd->dma.guest.pitch, | ||
283 | box->w * 4); | ||
284 | } | ||
285 | |||
266 | srf->snooper.age++; | 286 | srf->snooper.age++; |
267 | 287 | ||
268 | /* we can't call this function from this function since execbuf has | 288 | /* we can't call this function from this function since execbuf has |
@@ -394,8 +414,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv, | |||
394 | top = clips->y1; | 414 | top = clips->y1; |
395 | bottom = clips->y2; | 415 | bottom = clips->y2; |
396 | 416 | ||
397 | clips_ptr = clips; | 417 | /* skip the first clip rect */ |
398 | for (i = 1; i < num_clips; i++, clips_ptr += inc) { | 418 | for (i = 1, clips_ptr = clips + inc; |
419 | i < num_clips; i++, clips_ptr += inc) { | ||
399 | left = min_t(int, left, (int)clips_ptr->x1); | 420 | left = min_t(int, left, (int)clips_ptr->x1); |
400 | right = max_t(int, right, (int)clips_ptr->x2); | 421 | right = max_t(int, right, (int)clips_ptr->x2); |
401 | top = min_t(int, top, (int)clips_ptr->y1); | 422 | top = min_t(int, top, (int)clips_ptr->y1); |
@@ -994,7 +1015,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
994 | required_size = mode_cmd->pitch * mode_cmd->height; | 1015 | required_size = mode_cmd->pitch * mode_cmd->height; |
995 | if (unlikely(required_size > (u64) dev_priv->vram_size)) { | 1016 | if (unlikely(required_size > (u64) dev_priv->vram_size)) { |
996 | DRM_ERROR("VRAM size is too small for requested mode.\n"); | 1017 | DRM_ERROR("VRAM size is too small for requested mode.\n"); |
997 | return NULL; | 1018 | return ERR_PTR(-ENOMEM); |
998 | } | 1019 | } |
999 | 1020 | ||
1000 | /* | 1021 | /* |
@@ -1307,7 +1328,10 @@ int vmw_kms_close(struct vmw_private *dev_priv) | |||
1307 | * drm_encoder_cleanup which takes the lock we deadlock. | 1328 | * drm_encoder_cleanup which takes the lock we deadlock. |
1308 | */ | 1329 | */ |
1309 | drm_mode_config_cleanup(dev_priv->dev); | 1330 | drm_mode_config_cleanup(dev_priv->dev); |
1310 | vmw_kms_close_legacy_display_system(dev_priv); | 1331 | if (dev_priv->sou_priv) |
1332 | vmw_kms_close_screen_object_display(dev_priv); | ||
1333 | else | ||
1334 | vmw_kms_close_legacy_display_system(dev_priv); | ||
1311 | return 0; | 1335 | return 0; |
1312 | } | 1336 | } |
1313 | 1337 | ||
@@ -1517,6 +1541,8 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, | |||
1517 | du->pref_width = rects[du->unit].w; | 1541 | du->pref_width = rects[du->unit].w; |
1518 | du->pref_height = rects[du->unit].h; | 1542 | du->pref_height = rects[du->unit].h; |
1519 | du->pref_active = true; | 1543 | du->pref_active = true; |
1544 | du->gui_x = rects[du->unit].x; | ||
1545 | du->gui_y = rects[du->unit].y; | ||
1520 | } else { | 1546 | } else { |
1521 | du->pref_width = 800; | 1547 | du->pref_width = 800; |
1522 | du->pref_height = 600; | 1548 | du->pref_height = 600; |
@@ -1572,12 +1598,14 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force) | |||
1572 | uint32_t num_displays; | 1598 | uint32_t num_displays; |
1573 | struct drm_device *dev = connector->dev; | 1599 | struct drm_device *dev = connector->dev; |
1574 | struct vmw_private *dev_priv = vmw_priv(dev); | 1600 | struct vmw_private *dev_priv = vmw_priv(dev); |
1601 | struct vmw_display_unit *du = vmw_connector_to_du(connector); | ||
1575 | 1602 | ||
1576 | mutex_lock(&dev_priv->hw_mutex); | 1603 | mutex_lock(&dev_priv->hw_mutex); |
1577 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); | 1604 | num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS); |
1578 | mutex_unlock(&dev_priv->hw_mutex); | 1605 | mutex_unlock(&dev_priv->hw_mutex); |
1579 | 1606 | ||
1580 | return ((vmw_connector_to_du(connector)->unit < num_displays) ? | 1607 | return ((vmw_connector_to_du(connector)->unit < num_displays && |
1608 | du->pref_active) ? | ||
1581 | connector_status_connected : connector_status_disconnected); | 1609 | connector_status_connected : connector_status_disconnected); |
1582 | } | 1610 | } |
1583 | 1611 | ||
@@ -1658,6 +1686,28 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { | |||
1658 | { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, | 1686 | { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, |
1659 | }; | 1687 | }; |
1660 | 1688 | ||
1689 | /** | ||
1690 | * vmw_guess_mode_timing - Provide fake timings for a | ||
1691 | * 60Hz vrefresh mode. | ||
1692 | * | ||
1693 | * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay | ||
1694 | * members filled in. | ||
1695 | */ | ||
1696 | static void vmw_guess_mode_timing(struct drm_display_mode *mode) | ||
1697 | { | ||
1698 | mode->hsync_start = mode->hdisplay + 50; | ||
1699 | mode->hsync_end = mode->hsync_start + 50; | ||
1700 | mode->htotal = mode->hsync_end + 50; | ||
1701 | |||
1702 | mode->vsync_start = mode->vdisplay + 50; | ||
1703 | mode->vsync_end = mode->vsync_start + 50; | ||
1704 | mode->vtotal = mode->vsync_end + 50; | ||
1705 | |||
1706 | mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6; | ||
1707 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
1708 | } | ||
1709 | |||
1710 | |||
1661 | int vmw_du_connector_fill_modes(struct drm_connector *connector, | 1711 | int vmw_du_connector_fill_modes(struct drm_connector *connector, |
1662 | uint32_t max_width, uint32_t max_height) | 1712 | uint32_t max_width, uint32_t max_height) |
1663 | { | 1713 | { |
@@ -1680,18 +1730,23 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, | |||
1680 | return 0; | 1730 | return 0; |
1681 | mode->hdisplay = du->pref_width; | 1731 | mode->hdisplay = du->pref_width; |
1682 | mode->vdisplay = du->pref_height; | 1732 | mode->vdisplay = du->pref_height; |
1683 | mode->vrefresh = drm_mode_vrefresh(mode); | 1733 | vmw_guess_mode_timing(mode); |
1734 | |||
1684 | if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, | 1735 | if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, |
1685 | mode->vdisplay)) { | 1736 | mode->vdisplay)) { |
1686 | drm_mode_probed_add(connector, mode); | 1737 | drm_mode_probed_add(connector, mode); |
1738 | } else { | ||
1739 | drm_mode_destroy(dev, mode); | ||
1740 | mode = NULL; | ||
1741 | } | ||
1687 | 1742 | ||
1688 | if (du->pref_mode) { | 1743 | if (du->pref_mode) { |
1689 | list_del_init(&du->pref_mode->head); | 1744 | list_del_init(&du->pref_mode->head); |
1690 | drm_mode_destroy(dev, du->pref_mode); | 1745 | drm_mode_destroy(dev, du->pref_mode); |
1691 | } | ||
1692 | |||
1693 | du->pref_mode = mode; | ||
1694 | } | 1746 | } |
1747 | |||
1748 | /* mode might be null here, this is intended */ | ||
1749 | du->pref_mode = mode; | ||
1695 | } | 1750 | } |
1696 | 1751 | ||
1697 | for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { | 1752 | for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) { |
@@ -1712,6 +1767,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, | |||
1712 | drm_mode_probed_add(connector, mode); | 1767 | drm_mode_probed_add(connector, mode); |
1713 | } | 1768 | } |
1714 | 1769 | ||
1770 | /* Move the prefered mode first, help apps pick the right mode. */ | ||
1771 | if (du->pref_mode) | ||
1772 | list_move(&du->pref_mode->head, &connector->probed_modes); | ||
1773 | |||
1715 | drm_mode_connector_list_update(connector); | 1774 | drm_mode_connector_list_update(connector); |
1716 | 1775 | ||
1717 | return 1; | 1776 | return 1; |
@@ -1723,3 +1782,63 @@ int vmw_du_connector_set_property(struct drm_connector *connector, | |||
1723 | { | 1782 | { |
1724 | return 0; | 1783 | return 0; |
1725 | } | 1784 | } |
1785 | |||
1786 | |||
1787 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | ||
1788 | struct drm_file *file_priv) | ||
1789 | { | ||
1790 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1791 | struct drm_vmw_update_layout_arg *arg = | ||
1792 | (struct drm_vmw_update_layout_arg *)data; | ||
1793 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1794 | void __user *user_rects; | ||
1795 | struct drm_vmw_rect *rects; | ||
1796 | unsigned rects_size; | ||
1797 | int ret; | ||
1798 | int i; | ||
1799 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
1800 | |||
1801 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1802 | if (unlikely(ret != 0)) | ||
1803 | return ret; | ||
1804 | |||
1805 | if (!arg->num_outputs) { | ||
1806 | struct drm_vmw_rect def_rect = {0, 0, 800, 600}; | ||
1807 | vmw_du_update_layout(dev_priv, 1, &def_rect); | ||
1808 | goto out_unlock; | ||
1809 | } | ||
1810 | |||
1811 | rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); | ||
1812 | rects = kzalloc(rects_size, GFP_KERNEL); | ||
1813 | if (unlikely(!rects)) { | ||
1814 | ret = -ENOMEM; | ||
1815 | goto out_unlock; | ||
1816 | } | ||
1817 | |||
1818 | user_rects = (void __user *)(unsigned long)arg->rects; | ||
1819 | ret = copy_from_user(rects, user_rects, rects_size); | ||
1820 | if (unlikely(ret != 0)) { | ||
1821 | DRM_ERROR("Failed to get rects.\n"); | ||
1822 | ret = -EFAULT; | ||
1823 | goto out_free; | ||
1824 | } | ||
1825 | |||
1826 | for (i = 0; i < arg->num_outputs; ++i) { | ||
1827 | if (rects->x < 0 || | ||
1828 | rects->y < 0 || | ||
1829 | rects->x + rects->w > mode_config->max_width || | ||
1830 | rects->y + rects->h > mode_config->max_height) { | ||
1831 | DRM_ERROR("Invalid GUI layout.\n"); | ||
1832 | ret = -EINVAL; | ||
1833 | goto out_free; | ||
1834 | } | ||
1835 | } | ||
1836 | |||
1837 | vmw_du_update_layout(dev_priv, arg->num_outputs, rects); | ||
1838 | |||
1839 | out_free: | ||
1840 | kfree(rects); | ||
1841 | out_unlock: | ||
1842 | ttm_read_unlock(&vmaster->lock); | ||
1843 | return ret; | ||
1844 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index db0b901f8c3f..af8e6e5bd964 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
@@ -96,6 +96,13 @@ struct vmw_display_unit { | |||
96 | unsigned pref_height; | 96 | unsigned pref_height; |
97 | bool pref_active; | 97 | bool pref_active; |
98 | struct drm_display_mode *pref_mode; | 98 | struct drm_display_mode *pref_mode; |
99 | |||
100 | /* | ||
101 | * Gui positioning | ||
102 | */ | ||
103 | int gui_x; | ||
104 | int gui_y; | ||
105 | bool is_implicit; | ||
99 | }; | 106 | }; |
100 | 107 | ||
101 | #define vmw_crtc_to_du(x) \ | 108 | #define vmw_crtc_to_du(x) \ |
@@ -126,8 +133,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, | |||
126 | int vmw_du_connector_set_property(struct drm_connector *connector, | 133 | int vmw_du_connector_set_property(struct drm_connector *connector, |
127 | struct drm_property *property, | 134 | struct drm_property *property, |
128 | uint64_t val); | 135 | uint64_t val); |
129 | int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, | 136 | |
130 | struct drm_vmw_rect *rects); | ||
131 | 137 | ||
132 | /* | 138 | /* |
133 | * Legacy display unit functions - vmwgfx_ldu.c | 139 | * Legacy display unit functions - vmwgfx_ldu.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 92f56bc594eb..90c5e3928491 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -337,13 +337,14 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
337 | ldu->base.pref_width = 800; | 337 | ldu->base.pref_width = 800; |
338 | ldu->base.pref_height = 600; | 338 | ldu->base.pref_height = 600; |
339 | ldu->base.pref_mode = NULL; | 339 | ldu->base.pref_mode = NULL; |
340 | ldu->base.is_implicit = true; | ||
340 | 341 | ||
341 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | 342 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, |
342 | DRM_MODE_CONNECTOR_LVDS); | 343 | DRM_MODE_CONNECTOR_VIRTUAL); |
343 | connector->status = vmw_du_connector_detect(connector, true); | 344 | connector->status = vmw_du_connector_detect(connector, true); |
344 | 345 | ||
345 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, | 346 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, |
346 | DRM_MODE_ENCODER_LVDS); | 347 | DRM_MODE_ENCODER_VIRTUAL); |
347 | drm_mode_connector_attach_encoder(connector, encoder); | 348 | drm_mode_connector_attach_encoder(connector, encoder); |
348 | encoder->possible_crtcs = (1 << unit); | 349 | encoder->possible_crtcs = (1 << unit); |
349 | encoder->possible_clones = 0; | 350 | encoder->possible_clones = 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 477b2a9eb3c2..4defdcf1c72e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
@@ -36,12 +36,9 @@ | |||
36 | container_of(x, struct vmw_screen_object_unit, base.connector) | 36 | container_of(x, struct vmw_screen_object_unit, base.connector) |
37 | 37 | ||
38 | struct vmw_screen_object_display { | 38 | struct vmw_screen_object_display { |
39 | struct list_head active; | 39 | unsigned num_implicit; |
40 | 40 | ||
41 | unsigned num_active; | 41 | struct vmw_framebuffer *implicit_fb; |
42 | unsigned last_num_active; | ||
43 | |||
44 | struct vmw_framebuffer *fb; | ||
45 | }; | 42 | }; |
46 | 43 | ||
47 | /** | 44 | /** |
@@ -54,13 +51,11 @@ struct vmw_screen_object_unit { | |||
54 | struct vmw_dma_buffer *buffer; /**< Backing store buffer */ | 51 | struct vmw_dma_buffer *buffer; /**< Backing store buffer */ |
55 | 52 | ||
56 | bool defined; | 53 | bool defined; |
57 | 54 | bool active_implicit; | |
58 | struct list_head active; | ||
59 | }; | 55 | }; |
60 | 56 | ||
61 | static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) | 57 | static void vmw_sou_destroy(struct vmw_screen_object_unit *sou) |
62 | { | 58 | { |
63 | list_del_init(&sou->active); | ||
64 | vmw_display_unit_cleanup(&sou->base); | 59 | vmw_display_unit_cleanup(&sou->base); |
65 | kfree(sou); | 60 | kfree(sou); |
66 | } | 61 | } |
@@ -75,58 +70,31 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) | |||
75 | vmw_sou_destroy(vmw_crtc_to_sou(crtc)); | 70 | vmw_sou_destroy(vmw_crtc_to_sou(crtc)); |
76 | } | 71 | } |
77 | 72 | ||
78 | static int vmw_sou_del_active(struct vmw_private *vmw_priv, | 73 | static void vmw_sou_del_active(struct vmw_private *vmw_priv, |
79 | struct vmw_screen_object_unit *sou) | 74 | struct vmw_screen_object_unit *sou) |
80 | { | 75 | { |
81 | struct vmw_screen_object_display *ld = vmw_priv->sou_priv; | 76 | struct vmw_screen_object_display *ld = vmw_priv->sou_priv; |
82 | if (list_empty(&sou->active)) | ||
83 | return 0; | ||
84 | 77 | ||
85 | /* Must init otherwise list_empty(&sou->active) will not work. */ | 78 | if (sou->active_implicit) { |
86 | list_del_init(&sou->active); | 79 | if (--(ld->num_implicit) == 0) |
87 | if (--(ld->num_active) == 0) { | 80 | ld->implicit_fb = NULL; |
88 | BUG_ON(!ld->fb); | 81 | sou->active_implicit = false; |
89 | if (ld->fb->unpin) | ||
90 | ld->fb->unpin(ld->fb); | ||
91 | ld->fb = NULL; | ||
92 | } | 82 | } |
93 | |||
94 | return 0; | ||
95 | } | 83 | } |
96 | 84 | ||
97 | static int vmw_sou_add_active(struct vmw_private *vmw_priv, | 85 | static void vmw_sou_add_active(struct vmw_private *vmw_priv, |
98 | struct vmw_screen_object_unit *sou, | 86 | struct vmw_screen_object_unit *sou, |
99 | struct vmw_framebuffer *vfb) | 87 | struct vmw_framebuffer *vfb) |
100 | { | 88 | { |
101 | struct vmw_screen_object_display *ld = vmw_priv->sou_priv; | 89 | struct vmw_screen_object_display *ld = vmw_priv->sou_priv; |
102 | struct vmw_screen_object_unit *entry; | ||
103 | struct list_head *at; | ||
104 | |||
105 | BUG_ON(!ld->num_active && ld->fb); | ||
106 | if (vfb != ld->fb) { | ||
107 | if (ld->fb && ld->fb->unpin) | ||
108 | ld->fb->unpin(ld->fb); | ||
109 | if (vfb->pin) | ||
110 | vfb->pin(vfb); | ||
111 | ld->fb = vfb; | ||
112 | } | ||
113 | |||
114 | if (!list_empty(&sou->active)) | ||
115 | return 0; | ||
116 | 90 | ||
117 | at = &ld->active; | 91 | BUG_ON(!ld->num_implicit && ld->implicit_fb); |
118 | list_for_each_entry(entry, &ld->active, active) { | ||
119 | if (entry->base.unit > sou->base.unit) | ||
120 | break; | ||
121 | 92 | ||
122 | at = &entry->active; | 93 | if (!sou->active_implicit && sou->base.is_implicit) { |
94 | ld->implicit_fb = vfb; | ||
95 | sou->active_implicit = true; | ||
96 | ld->num_implicit++; | ||
123 | } | 97 | } |
124 | |||
125 | list_add(&sou->active, at); | ||
126 | |||
127 | ld->num_active++; | ||
128 | |||
129 | return 0; | ||
130 | } | 98 | } |
131 | 99 | ||
132 | /** | 100 | /** |
@@ -164,8 +132,13 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, | |||
164 | (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); | 132 | (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0); |
165 | cmd->obj.size.width = mode->hdisplay; | 133 | cmd->obj.size.width = mode->hdisplay; |
166 | cmd->obj.size.height = mode->vdisplay; | 134 | cmd->obj.size.height = mode->vdisplay; |
167 | cmd->obj.root.x = x; | 135 | if (sou->base.is_implicit) { |
168 | cmd->obj.root.y = y; | 136 | cmd->obj.root.x = x; |
137 | cmd->obj.root.y = y; | ||
138 | } else { | ||
139 | cmd->obj.root.x = sou->base.gui_x; | ||
140 | cmd->obj.root.y = sou->base.gui_y; | ||
141 | } | ||
169 | 142 | ||
170 | /* Ok to assume that buffer is pinned in vram */ | 143 | /* Ok to assume that buffer is pinned in vram */ |
171 | vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr); | 144 | vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr); |
@@ -312,10 +285,11 @@ static int vmw_sou_crtc_set_config(struct drm_mode_set *set) | |||
312 | } | 285 | } |
313 | 286 | ||
314 | /* sou only supports one fb active at the time */ | 287 | /* sou only supports one fb active at the time */ |
315 | if (dev_priv->sou_priv->fb && vfb && | 288 | if (sou->base.is_implicit && |
316 | !(dev_priv->sou_priv->num_active == 1 && | 289 | dev_priv->sou_priv->implicit_fb && vfb && |
317 | !list_empty(&sou->active)) && | 290 | !(dev_priv->sou_priv->num_implicit == 1 && |
318 | dev_priv->sou_priv->fb != vfb) { | 291 | sou->active_implicit) && |
292 | dev_priv->sou_priv->implicit_fb != vfb) { | ||
319 | DRM_ERROR("Multiple framebuffers not supported\n"); | 293 | DRM_ERROR("Multiple framebuffers not supported\n"); |
320 | return -EINVAL; | 294 | return -EINVAL; |
321 | } | 295 | } |
@@ -471,19 +445,20 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) | |||
471 | encoder = &sou->base.encoder; | 445 | encoder = &sou->base.encoder; |
472 | connector = &sou->base.connector; | 446 | connector = &sou->base.connector; |
473 | 447 | ||
474 | INIT_LIST_HEAD(&sou->active); | 448 | sou->active_implicit = false; |
475 | 449 | ||
476 | sou->base.pref_active = (unit == 0); | 450 | sou->base.pref_active = (unit == 0); |
477 | sou->base.pref_width = 800; | 451 | sou->base.pref_width = 800; |
478 | sou->base.pref_height = 600; | 452 | sou->base.pref_height = 600; |
479 | sou->base.pref_mode = NULL; | 453 | sou->base.pref_mode = NULL; |
454 | sou->base.is_implicit = true; | ||
480 | 455 | ||
481 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | 456 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, |
482 | DRM_MODE_CONNECTOR_LVDS); | 457 | DRM_MODE_CONNECTOR_VIRTUAL); |
483 | connector->status = vmw_du_connector_detect(connector, true); | 458 | connector->status = vmw_du_connector_detect(connector, true); |
484 | 459 | ||
485 | drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, | 460 | drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs, |
486 | DRM_MODE_ENCODER_LVDS); | 461 | DRM_MODE_ENCODER_VIRTUAL); |
487 | drm_mode_connector_attach_encoder(connector, encoder); | 462 | drm_mode_connector_attach_encoder(connector, encoder); |
488 | encoder->possible_crtcs = (1 << unit); | 463 | encoder->possible_crtcs = (1 << unit); |
489 | encoder->possible_clones = 0; | 464 | encoder->possible_clones = 0; |
@@ -520,10 +495,8 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv) | |||
520 | if (unlikely(!dev_priv->sou_priv)) | 495 | if (unlikely(!dev_priv->sou_priv)) |
521 | goto err_no_mem; | 496 | goto err_no_mem; |
522 | 497 | ||
523 | INIT_LIST_HEAD(&dev_priv->sou_priv->active); | 498 | dev_priv->sou_priv->num_implicit = 0; |
524 | dev_priv->sou_priv->num_active = 0; | 499 | dev_priv->sou_priv->implicit_fb = NULL; |
525 | dev_priv->sou_priv->last_num_active = 0; | ||
526 | dev_priv->sou_priv->fb = NULL; | ||
527 | 500 | ||
528 | ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); | 501 | ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS); |
529 | if (unlikely(ret != 0)) | 502 | if (unlikely(ret != 0)) |
@@ -558,9 +531,6 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv) | |||
558 | 531 | ||
559 | drm_vblank_cleanup(dev); | 532 | drm_vblank_cleanup(dev); |
560 | 533 | ||
561 | if (!list_empty(&dev_priv->sou_priv->active)) | ||
562 | DRM_ERROR("Still have active outputs when unloading driver"); | ||
563 | |||
564 | kfree(dev_priv->sou_priv); | 534 | kfree(dev_priv->sou_priv); |
565 | 535 | ||
566 | return 0; | 536 | return 0; |
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c index 143461a95ae4..86980fe04117 100644 --- a/drivers/hwspinlock/u8500_hsem.c +++ b/drivers/hwspinlock/u8500_hsem.c | |||
@@ -21,6 +21,7 @@ | |||
21 | * General Public License for more details. | 21 | * General Public License for more details. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/module.h> | ||
24 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
25 | #include <linux/io.h> | 26 | #include <linux/io.h> |
26 | #include <linux/pm_runtime.h> | 27 | #include <linux/pm_runtime.h> |
@@ -108,10 +109,8 @@ static int __devinit u8500_hsem_probe(struct platform_device *pdev) | |||
108 | return -ENODEV; | 109 | return -ENODEV; |
109 | 110 | ||
110 | io_base = ioremap(res->start, resource_size(res)); | 111 | io_base = ioremap(res->start, resource_size(res)); |
111 | if (!io_base) { | 112 | if (!io_base) |
112 | ret = -ENOMEM; | 113 | return -ENOMEM; |
113 | goto free_state; | ||
114 | } | ||
115 | 114 | ||
116 | /* make sure protocol 1 is selected */ | 115 | /* make sure protocol 1 is selected */ |
117 | val = readl(io_base + HSEM_CTRL_REG); | 116 | val = readl(io_base + HSEM_CTRL_REG); |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 04b09564bfa9..8126824daccb 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -43,7 +43,6 @@ | |||
43 | /* For SCSI -> ATAPI command conversion */ | 43 | /* For SCSI -> ATAPI command conversion */ |
44 | #include <scsi/scsi.h> | 44 | #include <scsi/scsi.h> |
45 | 45 | ||
46 | #include <linux/irq.h> | ||
47 | #include <linux/io.h> | 46 | #include <linux/io.h> |
48 | #include <asm/byteorder.h> | 47 | #include <asm/byteorder.h> |
49 | #include <linux/uaccess.h> | 48 | #include <linux/uaccess.h> |
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 61fdf544fbd6..3d42043fec51 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <scsi/scsi_ioctl.h> | 35 | #include <scsi/scsi_ioctl.h> |
36 | 36 | ||
37 | #include <asm/byteorder.h> | 37 | #include <asm/byteorder.h> |
38 | #include <linux/irq.h> | ||
39 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
40 | #include <linux/io.h> | 39 | #include <linux/io.h> |
41 | #include <asm/unaligned.h> | 40 | #include <asm/unaligned.h> |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 7ecb1ade8874..ce8237d36159 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <scsi/scsi.h> | 41 | #include <scsi/scsi.h> |
42 | 42 | ||
43 | #include <asm/byteorder.h> | 43 | #include <asm/byteorder.h> |
44 | #include <linux/irq.h> | ||
45 | #include <linux/uaccess.h> | 44 | #include <linux/uaccess.h> |
46 | #include <linux/io.h> | 45 | #include <linux/io.h> |
47 | #include <asm/unaligned.h> | 46 | #include <asm/unaligned.h> |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 18767f8ab090..5d2f8e13cf0e 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -82,7 +82,8 @@ static unsigned int mwait_substates; | |||
82 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ | 82 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ |
83 | 83 | ||
84 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; | 84 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; |
85 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); | 85 | static int intel_idle(struct cpuidle_device *dev, |
86 | struct cpuidle_driver *drv, int index); | ||
86 | 87 | ||
87 | static struct cpuidle_state *cpuidle_state_table; | 88 | static struct cpuidle_state *cpuidle_state_table; |
88 | 89 | ||
@@ -110,7 +111,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
110 | { /* MWAIT C1 */ | 111 | { /* MWAIT C1 */ |
111 | .name = "C1-NHM", | 112 | .name = "C1-NHM", |
112 | .desc = "MWAIT 0x00", | 113 | .desc = "MWAIT 0x00", |
113 | .driver_data = (void *) 0x00, | ||
114 | .flags = CPUIDLE_FLAG_TIME_VALID, | 114 | .flags = CPUIDLE_FLAG_TIME_VALID, |
115 | .exit_latency = 3, | 115 | .exit_latency = 3, |
116 | .target_residency = 6, | 116 | .target_residency = 6, |
@@ -118,7 +118,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
118 | { /* MWAIT C2 */ | 118 | { /* MWAIT C2 */ |
119 | .name = "C3-NHM", | 119 | .name = "C3-NHM", |
120 | .desc = "MWAIT 0x10", | 120 | .desc = "MWAIT 0x10", |
121 | .driver_data = (void *) 0x10, | ||
122 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 121 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
123 | .exit_latency = 20, | 122 | .exit_latency = 20, |
124 | .target_residency = 80, | 123 | .target_residency = 80, |
@@ -126,7 +125,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
126 | { /* MWAIT C3 */ | 125 | { /* MWAIT C3 */ |
127 | .name = "C6-NHM", | 126 | .name = "C6-NHM", |
128 | .desc = "MWAIT 0x20", | 127 | .desc = "MWAIT 0x20", |
129 | .driver_data = (void *) 0x20, | ||
130 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 128 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
131 | .exit_latency = 200, | 129 | .exit_latency = 200, |
132 | .target_residency = 800, | 130 | .target_residency = 800, |
@@ -138,7 +136,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
138 | { /* MWAIT C1 */ | 136 | { /* MWAIT C1 */ |
139 | .name = "C1-SNB", | 137 | .name = "C1-SNB", |
140 | .desc = "MWAIT 0x00", | 138 | .desc = "MWAIT 0x00", |
141 | .driver_data = (void *) 0x00, | ||
142 | .flags = CPUIDLE_FLAG_TIME_VALID, | 139 | .flags = CPUIDLE_FLAG_TIME_VALID, |
143 | .exit_latency = 1, | 140 | .exit_latency = 1, |
144 | .target_residency = 1, | 141 | .target_residency = 1, |
@@ -146,7 +143,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
146 | { /* MWAIT C2 */ | 143 | { /* MWAIT C2 */ |
147 | .name = "C3-SNB", | 144 | .name = "C3-SNB", |
148 | .desc = "MWAIT 0x10", | 145 | .desc = "MWAIT 0x10", |
149 | .driver_data = (void *) 0x10, | ||
150 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 146 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
151 | .exit_latency = 80, | 147 | .exit_latency = 80, |
152 | .target_residency = 211, | 148 | .target_residency = 211, |
@@ -154,7 +150,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
154 | { /* MWAIT C3 */ | 150 | { /* MWAIT C3 */ |
155 | .name = "C6-SNB", | 151 | .name = "C6-SNB", |
156 | .desc = "MWAIT 0x20", | 152 | .desc = "MWAIT 0x20", |
157 | .driver_data = (void *) 0x20, | ||
158 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 153 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
159 | .exit_latency = 104, | 154 | .exit_latency = 104, |
160 | .target_residency = 345, | 155 | .target_residency = 345, |
@@ -162,7 +157,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
162 | { /* MWAIT C4 */ | 157 | { /* MWAIT C4 */ |
163 | .name = "C7-SNB", | 158 | .name = "C7-SNB", |
164 | .desc = "MWAIT 0x30", | 159 | .desc = "MWAIT 0x30", |
165 | .driver_data = (void *) 0x30, | ||
166 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 160 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
167 | .exit_latency = 109, | 161 | .exit_latency = 109, |
168 | .target_residency = 345, | 162 | .target_residency = 345, |
@@ -174,7 +168,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
174 | { /* MWAIT C1 */ | 168 | { /* MWAIT C1 */ |
175 | .name = "C1-ATM", | 169 | .name = "C1-ATM", |
176 | .desc = "MWAIT 0x00", | 170 | .desc = "MWAIT 0x00", |
177 | .driver_data = (void *) 0x00, | ||
178 | .flags = CPUIDLE_FLAG_TIME_VALID, | 171 | .flags = CPUIDLE_FLAG_TIME_VALID, |
179 | .exit_latency = 1, | 172 | .exit_latency = 1, |
180 | .target_residency = 4, | 173 | .target_residency = 4, |
@@ -182,7 +175,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
182 | { /* MWAIT C2 */ | 175 | { /* MWAIT C2 */ |
183 | .name = "C2-ATM", | 176 | .name = "C2-ATM", |
184 | .desc = "MWAIT 0x10", | 177 | .desc = "MWAIT 0x10", |
185 | .driver_data = (void *) 0x10, | ||
186 | .flags = CPUIDLE_FLAG_TIME_VALID, | 178 | .flags = CPUIDLE_FLAG_TIME_VALID, |
187 | .exit_latency = 20, | 179 | .exit_latency = 20, |
188 | .target_residency = 80, | 180 | .target_residency = 80, |
@@ -191,7 +183,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
191 | { /* MWAIT C4 */ | 183 | { /* MWAIT C4 */ |
192 | .name = "C4-ATM", | 184 | .name = "C4-ATM", |
193 | .desc = "MWAIT 0x30", | 185 | .desc = "MWAIT 0x30", |
194 | .driver_data = (void *) 0x30, | ||
195 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 186 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
196 | .exit_latency = 100, | 187 | .exit_latency = 100, |
197 | .target_residency = 400, | 188 | .target_residency = 400, |
@@ -200,23 +191,55 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | |||
200 | { /* MWAIT C6 */ | 191 | { /* MWAIT C6 */ |
201 | .name = "C6-ATM", | 192 | .name = "C6-ATM", |
202 | .desc = "MWAIT 0x52", | 193 | .desc = "MWAIT 0x52", |
203 | .driver_data = (void *) 0x52, | ||
204 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 194 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
205 | .exit_latency = 140, | 195 | .exit_latency = 140, |
206 | .target_residency = 560, | 196 | .target_residency = 560, |
207 | .enter = &intel_idle }, | 197 | .enter = &intel_idle }, |
208 | }; | 198 | }; |
209 | 199 | ||
200 | static int get_driver_data(int cstate) | ||
201 | { | ||
202 | int driver_data; | ||
203 | switch (cstate) { | ||
204 | |||
205 | case 1: /* MWAIT C1 */ | ||
206 | driver_data = 0x00; | ||
207 | break; | ||
208 | case 2: /* MWAIT C2 */ | ||
209 | driver_data = 0x10; | ||
210 | break; | ||
211 | case 3: /* MWAIT C3 */ | ||
212 | driver_data = 0x20; | ||
213 | break; | ||
214 | case 4: /* MWAIT C4 */ | ||
215 | driver_data = 0x30; | ||
216 | break; | ||
217 | case 5: /* MWAIT C5 */ | ||
218 | driver_data = 0x40; | ||
219 | break; | ||
220 | case 6: /* MWAIT C6 */ | ||
221 | driver_data = 0x52; | ||
222 | break; | ||
223 | default: | ||
224 | driver_data = 0x00; | ||
225 | } | ||
226 | return driver_data; | ||
227 | } | ||
228 | |||
210 | /** | 229 | /** |
211 | * intel_idle | 230 | * intel_idle |
212 | * @dev: cpuidle_device | 231 | * @dev: cpuidle_device |
213 | * @state: cpuidle state | 232 | * @drv: cpuidle driver |
233 | * @index: index of cpuidle state | ||
214 | * | 234 | * |
215 | */ | 235 | */ |
216 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) | 236 | static int intel_idle(struct cpuidle_device *dev, |
237 | struct cpuidle_driver *drv, int index) | ||
217 | { | 238 | { |
218 | unsigned long ecx = 1; /* break on interrupt flag */ | 239 | unsigned long ecx = 1; /* break on interrupt flag */ |
219 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state); | 240 | struct cpuidle_state *state = &drv->states[index]; |
241 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | ||
242 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); | ||
220 | unsigned int cstate; | 243 | unsigned int cstate; |
221 | ktime_t kt_before, kt_after; | 244 | ktime_t kt_before, kt_after; |
222 | s64 usec_delta; | 245 | s64 usec_delta; |
@@ -257,7 +280,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) | |||
257 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 280 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
258 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | 281 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); |
259 | 282 | ||
260 | return usec_delta; | 283 | /* Update cpuidle counters */ |
284 | dev->last_residency = (int)usec_delta; | ||
285 | |||
286 | return index; | ||
261 | } | 287 | } |
262 | 288 | ||
263 | static void __setup_broadcast_timer(void *arg) | 289 | static void __setup_broadcast_timer(void *arg) |
@@ -398,6 +424,60 @@ static void intel_idle_cpuidle_devices_uninit(void) | |||
398 | return; | 424 | return; |
399 | } | 425 | } |
400 | /* | 426 | /* |
427 | * intel_idle_cpuidle_driver_init() | ||
428 | * allocate, initialize cpuidle_states | ||
429 | */ | ||
430 | static int intel_idle_cpuidle_driver_init(void) | ||
431 | { | ||
432 | int cstate; | ||
433 | struct cpuidle_driver *drv = &intel_idle_driver; | ||
434 | |||
435 | drv->state_count = 1; | ||
436 | |||
437 | for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { | ||
438 | int num_substates; | ||
439 | |||
440 | if (cstate > max_cstate) { | ||
441 | printk(PREFIX "max_cstate %d reached\n", | ||
442 | max_cstate); | ||
443 | break; | ||
444 | } | ||
445 | |||
446 | /* does the state exist in CPUID.MWAIT? */ | ||
447 | num_substates = (mwait_substates >> ((cstate) * 4)) | ||
448 | & MWAIT_SUBSTATE_MASK; | ||
449 | if (num_substates == 0) | ||
450 | continue; | ||
451 | /* is the state not enabled? */ | ||
452 | if (cpuidle_state_table[cstate].enter == NULL) { | ||
453 | /* does the driver not know about the state? */ | ||
454 | if (*cpuidle_state_table[cstate].name == '\0') | ||
455 | pr_debug(PREFIX "unaware of model 0x%x" | ||
456 | " MWAIT %d please" | ||
457 | " contact lenb@kernel.org", | ||
458 | boot_cpu_data.x86_model, cstate); | ||
459 | continue; | ||
460 | } | ||
461 | |||
462 | if ((cstate > 2) && | ||
463 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | ||
464 | mark_tsc_unstable("TSC halts in idle" | ||
465 | " states deeper than C2"); | ||
466 | |||
467 | drv->states[drv->state_count] = /* structure copy */ | ||
468 | cpuidle_state_table[cstate]; | ||
469 | |||
470 | drv->state_count += 1; | ||
471 | } | ||
472 | |||
473 | if (auto_demotion_disable_flags) | ||
474 | smp_call_function(auto_demotion_disable, NULL, 1); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | |||
480 | /* | ||
401 | * intel_idle_cpuidle_devices_init() | 481 | * intel_idle_cpuidle_devices_init() |
402 | * allocate, initialize, register cpuidle_devices | 482 | * allocate, initialize, register cpuidle_devices |
403 | */ | 483 | */ |
@@ -431,22 +511,11 @@ static int intel_idle_cpuidle_devices_init(void) | |||
431 | continue; | 511 | continue; |
432 | /* is the state not enabled? */ | 512 | /* is the state not enabled? */ |
433 | if (cpuidle_state_table[cstate].enter == NULL) { | 513 | if (cpuidle_state_table[cstate].enter == NULL) { |
434 | /* does the driver not know about the state? */ | ||
435 | if (*cpuidle_state_table[cstate].name == '\0') | ||
436 | pr_debug(PREFIX "unaware of model 0x%x" | ||
437 | " MWAIT %d please" | ||
438 | " contact lenb@kernel.org", | ||
439 | boot_cpu_data.x86_model, cstate); | ||
440 | continue; | 514 | continue; |
441 | } | 515 | } |
442 | 516 | ||
443 | if ((cstate > 2) && | 517 | dev->states_usage[dev->state_count].driver_data = |
444 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | 518 | (void *)get_driver_data(cstate); |
445 | mark_tsc_unstable("TSC halts in idle" | ||
446 | " states deeper than C2"); | ||
447 | |||
448 | dev->states[dev->state_count] = /* structure copy */ | ||
449 | cpuidle_state_table[cstate]; | ||
450 | 519 | ||
451 | dev->state_count += 1; | 520 | dev->state_count += 1; |
452 | } | 521 | } |
@@ -459,8 +528,6 @@ static int intel_idle_cpuidle_devices_init(void) | |||
459 | return -EIO; | 528 | return -EIO; |
460 | } | 529 | } |
461 | } | 530 | } |
462 | if (auto_demotion_disable_flags) | ||
463 | smp_call_function(auto_demotion_disable, NULL, 1); | ||
464 | 531 | ||
465 | return 0; | 532 | return 0; |
466 | } | 533 | } |
@@ -478,6 +545,7 @@ static int __init intel_idle_init(void) | |||
478 | if (retval) | 545 | if (retval) |
479 | return retval; | 546 | return retval; |
480 | 547 | ||
548 | intel_idle_cpuidle_driver_init(); | ||
481 | retval = cpuidle_register_driver(&intel_idle_driver); | 549 | retval = cpuidle_register_driver(&intel_idle_driver); |
482 | if (retval) { | 550 | if (retval) { |
483 | printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", | 551 | printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", |
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 9c192e79f806..288da5c1499d 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | ||
13 | #include <linux/err.h> | 14 | #include <linux/err.h> |
14 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
15 | #include <linux/io.h> | 16 | #include <linux/io.h> |
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c index e8fdb8830f69..46be456fcc00 100644 --- a/drivers/iommu/omap-iovmm.c +++ b/drivers/iommu/omap-iovmm.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/module.h> | ||
13 | #include <linux/err.h> | 14 | #include <linux/err.h> |
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
15 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c index 817f37a875c9..c9570fcf1cce 100644 --- a/drivers/macintosh/via-macii.c +++ b/drivers/macintosh/via-macii.c | |||
@@ -159,7 +159,7 @@ int macii_init(void) | |||
159 | err = macii_init_via(); | 159 | err = macii_init_via(); |
160 | if (err) goto out; | 160 | if (err) goto out; |
161 | 161 | ||
162 | err = request_irq(IRQ_MAC_ADB, macii_interrupt, IRQ_FLG_LOCK, "ADB", | 162 | err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB", |
163 | macii_interrupt); | 163 | macii_interrupt); |
164 | if (err) goto out; | 164 | if (err) goto out; |
165 | 165 | ||
diff --git a/drivers/macintosh/via-maciisi.c b/drivers/macintosh/via-maciisi.c index 9ab5b0c34f0d..34d02a91b29f 100644 --- a/drivers/macintosh/via-maciisi.c +++ b/drivers/macintosh/via-maciisi.c | |||
@@ -122,8 +122,8 @@ maciisi_init(void) | |||
122 | return err; | 122 | return err; |
123 | } | 123 | } |
124 | 124 | ||
125 | if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, IRQ_FLG_LOCK | IRQ_FLG_FAST, | 125 | if (request_irq(IRQ_MAC_ADB, maciisi_interrupt, 0, "ADB", |
126 | "ADB", maciisi_interrupt)) { | 126 | maciisi_interrupt)) { |
127 | printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB); | 127 | printk(KERN_ERR "maciisi_init: can't get irq %d\n", IRQ_MAC_ADB); |
128 | return -EAGAIN; | 128 | return -EAGAIN; |
129 | } | 129 | } |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cb246667dd52..0a6806f80ab5 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | #include <linux/version.h> | 15 | #include <linux/version.h> |
16 | #include <linux/shrinker.h> | 16 | #include <linux/shrinker.h> |
17 | #include <linux/module.h> | ||
17 | 18 | ||
18 | #define DM_MSG_PREFIX "bufio" | 19 | #define DM_MSG_PREFIX "bufio" |
19 | 20 | ||
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 65fd85ec6514..023fbc2d389e 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include "dm-btree-internal.h" | 8 | #include "dm-btree-internal.h" |
9 | #include "dm-transaction-manager.h" | 9 | #include "dm-transaction-manager.h" |
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/export.h> |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * Removing an entry from a btree | 14 | * Removing an entry from a btree |
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index e0638be53ea4..bd1e7ffbe26c 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include "dm-space-map.h" | 8 | #include "dm-space-map.h" |
9 | #include "dm-transaction-manager.h" | 9 | #include "dm-transaction-manager.h" |
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/export.h> |
12 | #include <linux/device-mapper.h> | 12 | #include <linux/device-mapper.h> |
13 | 13 | ||
14 | #define DM_MSG_PREFIX "btree" | 14 | #define DM_MSG_PREFIX "btree" |
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c index bb44a937fe63..50ed53bf4aa2 100644 --- a/drivers/md/persistent-data/dm-space-map-checker.c +++ b/drivers/md/persistent-data/dm-space-map-checker.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include "dm-space-map-checker.h" | 7 | #include "dm-space-map-checker.h" |
8 | 8 | ||
9 | #include <linux/device-mapper.h> | 9 | #include <linux/device-mapper.h> |
10 | #include <linux/export.h> | ||
10 | 11 | ||
11 | #ifdef CONFIG_DM_DEBUG_SPACE_MAPS | 12 | #ifdef CONFIG_DM_DEBUG_SPACE_MAPS |
12 | 13 | ||
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index aeff7852cf79..fc469ba9f627 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/list.h> | 13 | #include <linux/list.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/module.h> | 15 | #include <linux/export.h> |
16 | #include <linux/device-mapper.h> | 16 | #include <linux/device-mapper.h> |
17 | 17 | ||
18 | #define DM_MSG_PREFIX "space map disk" | 18 | #define DM_MSG_PREFIX "space map disk" |
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index 728e89a3f978..6f8d38747d7f 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c | |||
@@ -10,7 +10,7 @@ | |||
10 | #include "dm-space-map-metadata.h" | 10 | #include "dm-space-map-metadata.h" |
11 | #include "dm-persistent-data-internal.h" | 11 | #include "dm-persistent-data-internal.h" |
12 | 12 | ||
13 | #include <linux/module.h> | 13 | #include <linux/export.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/device-mapper.h> | 15 | #include <linux/device-mapper.h> |
16 | 16 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 472aedfb07cf..297e26092178 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3110,7 +3110,7 @@ static void handle_stripe(struct stripe_head *sh) | |||
3110 | struct r5dev *pdev, *qdev; | 3110 | struct r5dev *pdev, *qdev; |
3111 | 3111 | ||
3112 | clear_bit(STRIPE_HANDLE, &sh->state); | 3112 | clear_bit(STRIPE_HANDLE, &sh->state); |
3113 | if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { | 3113 | if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { |
3114 | /* already being handled, ensure it gets handled | 3114 | /* already being handled, ensure it gets handled |
3115 | * again when current action finishes */ | 3115 | * again when current action finishes */ |
3116 | set_bit(STRIPE_HANDLE, &sh->state); | 3116 | set_bit(STRIPE_HANDLE, &sh->state); |
@@ -3159,10 +3159,14 @@ static void handle_stripe(struct stripe_head *sh) | |||
3159 | /* check if the array has lost more than max_degraded devices and, | 3159 | /* check if the array has lost more than max_degraded devices and, |
3160 | * if so, some requests might need to be failed. | 3160 | * if so, some requests might need to be failed. |
3161 | */ | 3161 | */ |
3162 | if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written) | 3162 | if (s.failed > conf->max_degraded) { |
3163 | handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); | 3163 | sh->check_state = 0; |
3164 | if (s.failed > conf->max_degraded && s.syncing) | 3164 | sh->reconstruct_state = 0; |
3165 | handle_failed_sync(conf, sh, &s); | 3165 | if (s.to_read+s.to_write+s.written) |
3166 | handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); | ||
3167 | if (s.syncing) | ||
3168 | handle_failed_sync(conf, sh, &s); | ||
3169 | } | ||
3166 | 3170 | ||
3167 | /* | 3171 | /* |
3168 | * might be able to return some write requests if the parity blocks | 3172 | * might be able to return some write requests if the parity blocks |
@@ -3371,7 +3375,7 @@ finish: | |||
3371 | 3375 | ||
3372 | return_io(s.return_bi); | 3376 | return_io(s.return_bi); |
3373 | 3377 | ||
3374 | clear_bit(STRIPE_ACTIVE, &sh->state); | 3378 | clear_bit_unlock(STRIPE_ACTIVE, &sh->state); |
3375 | } | 3379 | } |
3376 | 3380 | ||
3377 | static void raid5_activate_delayed(struct r5conf *conf) | 3381 | static void raid5_activate_delayed(struct r5conf *conf) |
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c index 2e8c288258a9..34434557ef65 100644 --- a/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c +++ b/drivers/media/dvb/dvb-usb/mxl111sf-i2c.c | |||
@@ -398,7 +398,6 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state, | |||
398 | u8 i2c_r_data[24]; | 398 | u8 i2c_r_data[24]; |
399 | u8 i = 0; | 399 | u8 i = 0; |
400 | u8 fifo_status = 0; | 400 | u8 fifo_status = 0; |
401 | int ret; | ||
402 | int status = 0; | 401 | int status = 0; |
403 | 402 | ||
404 | mxl_i2c("read %d bytes", count); | 403 | mxl_i2c("read %d bytes", count); |
@@ -418,7 +417,7 @@ static int mxl111sf_i2c_readagain(struct mxl111sf_state *state, | |||
418 | i2c_w_data[4+(i*3)] = 0x00; | 417 | i2c_w_data[4+(i*3)] = 0x00; |
419 | } | 418 | } |
420 | 419 | ||
421 | ret = mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data); | 420 | mxl111sf_i2c_get_data(state, 0, i2c_w_data, i2c_r_data); |
422 | 421 | ||
423 | /* Check for I2C NACK status */ | 422 | /* Check for I2C NACK status */ |
424 | if (mxl111sf_i2c_check_status(state) == 1) { | 423 | if (mxl111sf_i2c_check_status(state) == 1) { |
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c index 91dc1fc2825b..b741b3a7a325 100644 --- a/drivers/media/dvb/dvb-usb/mxl111sf-phy.c +++ b/drivers/media/dvb/dvb-usb/mxl111sf-phy.c | |||
@@ -296,8 +296,7 @@ int mxl111sf_config_spi(struct mxl111sf_state *state, int onoff) | |||
296 | goto fail; | 296 | goto fail; |
297 | 297 | ||
298 | ret = mxl111sf_write_reg(state, 0x00, 0x00); | 298 | ret = mxl111sf_write_reg(state, 0x00, 0x00); |
299 | if (mxl_fail(ret)) | 299 | mxl_fail(ret); |
300 | goto fail; | ||
301 | fail: | 300 | fail: |
302 | return ret; | 301 | return ret; |
303 | } | 302 | } |
@@ -328,11 +327,13 @@ int mxl111sf_idac_config(struct mxl111sf_state *state, | |||
328 | /* set hysteresis value reg: 0x0B<5:0> */ | 327 | /* set hysteresis value reg: 0x0B<5:0> */ |
329 | ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG, | 328 | ret = mxl111sf_write_reg(state, V6_IDAC_HYSTERESIS_REG, |
330 | (hysteresis_value & 0x3F)); | 329 | (hysteresis_value & 0x3F)); |
330 | mxl_fail(ret); | ||
331 | } | 331 | } |
332 | 332 | ||
333 | ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val); | 333 | ret = mxl111sf_write_reg(state, V6_IDAC_SETTINGS_REG, val); |
334 | mxl_fail(ret); | ||
334 | 335 | ||
335 | return val; | 336 | return ret; |
336 | } | 337 | } |
337 | 338 | ||
338 | /* | 339 | /* |
diff --git a/drivers/media/video/s5k6aa.c b/drivers/media/video/s5k6aa.c index 2446736b7871..0df7f2a41814 100644 --- a/drivers/media/video/s5k6aa.c +++ b/drivers/media/video/s5k6aa.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
20 | #include <linux/i2c.h> | 20 | #include <linux/i2c.h> |
21 | #include <linux/media.h> | 21 | #include <linux/media.h> |
22 | #include <linux/module.h> | ||
22 | #include <linux/regulator/consumer.h> | 23 | #include <linux/regulator/consumer.h> |
23 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
24 | 25 | ||
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c index 725634d9736d..844a4d7797bc 100644 --- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c | |||
@@ -220,8 +220,8 @@ static int vidioc_querycap(struct file *file, void *priv, | |||
220 | strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); | 220 | strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); |
221 | cap->bus_info[0] = 0; | 221 | cap->bus_info[0] = 0; |
222 | cap->version = KERNEL_VERSION(1, 0, 0); | 222 | cap->version = KERNEL_VERSION(1, 0, 0); |
223 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | 223 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE | |
224 | | V4L2_CAP_STREAMING; | 224 | V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING; |
225 | return 0; | 225 | return 0; |
226 | } | 226 | } |
227 | 227 | ||
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c index ecef127dbc66..1e8cdb77d4b8 100644 --- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c | |||
@@ -785,8 +785,8 @@ static int vidioc_querycap(struct file *file, void *priv, | |||
785 | strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); | 785 | strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1); |
786 | cap->bus_info[0] = 0; | 786 | cap->bus_info[0] = 0; |
787 | cap->version = KERNEL_VERSION(1, 0, 0); | 787 | cap->version = KERNEL_VERSION(1, 0, 0); |
788 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | 788 | cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
789 | | V4L2_CAP_VIDEO_OUTPUT | 789 | | V4L2_CAP_VIDEO_OUTPUT_MPLANE |
790 | | V4L2_CAP_STREAMING; | 790 | | V4L2_CAP_STREAMING; |
791 | return 0; | 791 | return 0; |
792 | } | 792 | } |
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c index 10c2364f3e8a..254d32688843 100644 --- a/drivers/media/video/uvc/uvc_ctrl.c +++ b/drivers/media/video/uvc/uvc_ctrl.c | |||
@@ -1016,7 +1016,8 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain, | |||
1016 | 1016 | ||
1017 | menu_info = &mapping->menu_info[query_menu->index]; | 1017 | menu_info = &mapping->menu_info[query_menu->index]; |
1018 | 1018 | ||
1019 | if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { | 1019 | if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK && |
1020 | (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) { | ||
1020 | s32 bitmap; | 1021 | s32 bitmap; |
1021 | 1022 | ||
1022 | if (!ctrl->cached) { | 1023 | if (!ctrl->cached) { |
@@ -1225,7 +1226,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain, | |||
1225 | /* Valid menu indices are reported by the GET_RES request for | 1226 | /* Valid menu indices are reported by the GET_RES request for |
1226 | * UVC controls that support it. | 1227 | * UVC controls that support it. |
1227 | */ | 1228 | */ |
1228 | if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { | 1229 | if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK && |
1230 | (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) { | ||
1229 | if (!ctrl->cached) { | 1231 | if (!ctrl->cached) { |
1230 | ret = uvc_ctrl_populate_cache(chain, ctrl); | 1232 | ret = uvc_ctrl_populate_cache(chain, ctrl); |
1231 | if (ret < 0) | 1233 | if (ret < 0) |
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c index f17f92b86a30..0f415dade05a 100644 --- a/drivers/media/video/v4l2-ctrls.c +++ b/drivers/media/video/v4l2-ctrls.c | |||
@@ -821,8 +821,8 @@ static void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes) | |||
821 | fill_event(&ev, ctrl, changes); | 821 | fill_event(&ev, ctrl, changes); |
822 | 822 | ||
823 | list_for_each_entry(sev, &ctrl->ev_subs, node) | 823 | list_for_each_entry(sev, &ctrl->ev_subs, node) |
824 | if (sev->fh && (sev->fh != fh || | 824 | if (sev->fh != fh || |
825 | (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK))) | 825 | (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK)) |
826 | v4l2_event_queue_fh(sev->fh, &ev); | 826 | v4l2_event_queue_fh(sev->fh, &ev); |
827 | } | 827 | } |
828 | 828 | ||
@@ -947,6 +947,7 @@ static void new_to_cur(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, | |||
947 | if (ctrl->cluster[0]->has_volatiles) | 947 | if (ctrl->cluster[0]->has_volatiles) |
948 | ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; | 948 | ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; |
949 | } | 949 | } |
950 | fh = NULL; | ||
950 | } | 951 | } |
951 | if (changed || update_inactive) { | 952 | if (changed || update_inactive) { |
952 | /* If a control was changed that was not one of the controls | 953 | /* If a control was changed that was not one of the controls |
diff --git a/drivers/media/video/v4l2-event.c b/drivers/media/video/v4l2-event.c index 46037f225529..c26ad9637143 100644 --- a/drivers/media/video/v4l2-event.c +++ b/drivers/media/video/v4l2-event.c | |||
@@ -216,6 +216,9 @@ int v4l2_event_subscribe(struct v4l2_fh *fh, | |||
216 | unsigned long flags; | 216 | unsigned long flags; |
217 | unsigned i; | 217 | unsigned i; |
218 | 218 | ||
219 | if (sub->type == V4L2_EVENT_ALL) | ||
220 | return -EINVAL; | ||
221 | |||
219 | if (elems < 1) | 222 | if (elems < 1) |
220 | elems = 1; | 223 | elems = 1; |
221 | if (sub->type == V4L2_EVENT_CTRL) { | 224 | if (sub->type == V4L2_EVENT_CTRL) { |
@@ -283,6 +286,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, | |||
283 | { | 286 | { |
284 | struct v4l2_subscribed_event *sev; | 287 | struct v4l2_subscribed_event *sev; |
285 | unsigned long flags; | 288 | unsigned long flags; |
289 | int i; | ||
286 | 290 | ||
287 | if (sub->type == V4L2_EVENT_ALL) { | 291 | if (sub->type == V4L2_EVENT_ALL) { |
288 | v4l2_event_unsubscribe_all(fh); | 292 | v4l2_event_unsubscribe_all(fh); |
@@ -293,8 +297,12 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, | |||
293 | 297 | ||
294 | sev = v4l2_event_subscribed(fh, sub->type, sub->id); | 298 | sev = v4l2_event_subscribed(fh, sub->type, sub->id); |
295 | if (sev != NULL) { | 299 | if (sev != NULL) { |
300 | /* Remove any pending events for this subscription */ | ||
301 | for (i = 0; i < sev->in_use; i++) { | ||
302 | list_del(&sev->events[sev_pos(sev, i)].list); | ||
303 | fh->navailable--; | ||
304 | } | ||
296 | list_del(&sev->list); | 305 | list_del(&sev->list); |
297 | sev->fh = NULL; | ||
298 | } | 306 | } |
299 | 307 | ||
300 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | 308 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); |
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c index 979e544388cb..95a3f5e82aef 100644 --- a/drivers/media/video/videobuf2-core.c +++ b/drivers/media/video/videobuf2-core.c | |||
@@ -131,6 +131,7 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n) | |||
131 | continue; | 131 | continue; |
132 | 132 | ||
133 | for (plane = 0; plane < vb->num_planes; ++plane) { | 133 | for (plane = 0; plane < vb->num_planes; ++plane) { |
134 | vb->v4l2_planes[plane].length = q->plane_sizes[plane]; | ||
134 | vb->v4l2_planes[plane].m.mem_offset = off; | 135 | vb->v4l2_planes[plane].m.mem_offset = off; |
135 | 136 | ||
136 | dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", | 137 | dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", |
@@ -264,6 +265,7 @@ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) | |||
264 | q->num_buffers -= buffers; | 265 | q->num_buffers -= buffers; |
265 | if (!q->num_buffers) | 266 | if (!q->num_buffers) |
266 | q->memory = 0; | 267 | q->memory = 0; |
268 | INIT_LIST_HEAD(&q->queued_list); | ||
267 | } | 269 | } |
268 | 270 | ||
269 | /** | 271 | /** |
@@ -296,14 +298,14 @@ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) | |||
296 | { | 298 | { |
297 | unsigned int plane; | 299 | unsigned int plane; |
298 | for (plane = 0; plane < vb->num_planes; ++plane) { | 300 | for (plane = 0; plane < vb->num_planes; ++plane) { |
301 | void *mem_priv = vb->planes[plane].mem_priv; | ||
299 | /* | 302 | /* |
300 | * If num_users() has not been provided, call_memop | 303 | * If num_users() has not been provided, call_memop |
301 | * will return 0, apparently nobody cares about this | 304 | * will return 0, apparently nobody cares about this |
302 | * case anyway. If num_users() returns more than 1, | 305 | * case anyway. If num_users() returns more than 1, |
303 | * we are not the only user of the plane's memory. | 306 | * we are not the only user of the plane's memory. |
304 | */ | 307 | */ |
305 | if (call_memop(q, plane, num_users, | 308 | if (mem_priv && call_memop(q, plane, num_users, mem_priv) > 1) |
306 | vb->planes[plane].mem_priv) > 1) | ||
307 | return true; | 309 | return true; |
308 | } | 310 | } |
309 | return false; | 311 | return false; |
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c index 4175544b491b..ec10629a0b0b 100644 --- a/drivers/mfd/ab5500-core.c +++ b/drivers/mfd/ab5500-core.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * TODO: Event handling with irq_chip. Waiting for PRCMU fw support. | 13 | * TODO: Event handling with irq_chip. Waiting for PRCMU fw support. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | ||
16 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
17 | #include <linux/err.h> | 18 | #include <linux/err.h> |
18 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c index 6be1fe6b5f9a..43c0ebb81956 100644 --- a/drivers/mfd/ab5500-debugfs.c +++ b/drivers/mfd/ab5500-debugfs.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Debugfs support for the AB5500 MFD driver | 4 | * Debugfs support for the AB5500 MFD driver |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/export.h> | ||
7 | #include <linux/debugfs.h> | 8 | #include <linux/debugfs.h> |
8 | #include <linux/seq_file.h> | 9 | #include <linux/seq_file.h> |
9 | #include <linux/mfd/ab5500/ab5500.h> | 10 | #include <linux/mfd/ab5500/ab5500.h> |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index ae57769ba50d..4b976f00ea85 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -32,6 +32,7 @@ | |||
32 | /* VENDOR SPEC register */ | 32 | /* VENDOR SPEC register */ |
33 | #define SDHCI_VENDOR_SPEC 0xC0 | 33 | #define SDHCI_VENDOR_SPEC 0xC0 |
34 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 | 34 | #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 |
35 | #define SDHCI_WTMK_LVL 0x44 | ||
35 | #define SDHCI_MIX_CTRL 0x48 | 36 | #define SDHCI_MIX_CTRL 0x48 |
36 | 37 | ||
37 | /* | 38 | /* |
@@ -476,6 +477,13 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev) | |||
476 | if (is_imx53_esdhc(imx_data)) | 477 | if (is_imx53_esdhc(imx_data)) |
477 | imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; | 478 | imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; |
478 | 479 | ||
480 | /* | ||
481 | * The imx6q ROM code will change the default watermark level setting | ||
482 | * to something insane. Change it back here. | ||
483 | */ | ||
484 | if (is_imx6q_usdhc(imx_data)) | ||
485 | writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL); | ||
486 | |||
479 | boarddata = &imx_data->boarddata; | 487 | boarddata = &imx_data->boarddata; |
480 | if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { | 488 | if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { |
481 | if (!host->mmc->parent->platform_data) { | 489 | if (!host->mmc->parent->platform_data) { |
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 66b616ebe536..318a869286ab 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -12,27 +12,17 @@ menuconfig MTD | |||
12 | 12 | ||
13 | if MTD | 13 | if MTD |
14 | 14 | ||
15 | config MTD_DEBUG | ||
16 | bool "Debugging" | ||
17 | help | ||
18 | This turns on low-level debugging for the entire MTD sub-system. | ||
19 | Normally, you should say 'N'. | ||
20 | |||
21 | config MTD_DEBUG_VERBOSE | ||
22 | int "Debugging verbosity (0 = quiet, 3 = noisy)" | ||
23 | depends on MTD_DEBUG | ||
24 | default "0" | ||
25 | help | ||
26 | Determines the verbosity level of the MTD debugging messages. | ||
27 | |||
28 | config MTD_TESTS | 15 | config MTD_TESTS |
29 | tristate "MTD tests support" | 16 | tristate "MTD tests support (DANGEROUS)" |
30 | depends on m | 17 | depends on m |
31 | help | 18 | help |
32 | This option includes various MTD tests into compilation. The tests | 19 | This option includes various MTD tests into compilation. The tests |
33 | should normally be compiled as kernel modules. The modules perform | 20 | should normally be compiled as kernel modules. The modules perform |
34 | various checks and verifications when loaded. | 21 | various checks and verifications when loaded. |
35 | 22 | ||
23 | WARNING: some of the tests will ERASE entire MTD device which they | ||
24 | test. Do not use these tests unless you really know what you do. | ||
25 | |||
36 | config MTD_REDBOOT_PARTS | 26 | config MTD_REDBOOT_PARTS |
37 | tristate "RedBoot partition table parsing" | 27 | tristate "RedBoot partition table parsing" |
38 | ---help--- | 28 | ---help--- |
@@ -137,7 +127,8 @@ config MTD_AFS_PARTS | |||
137 | 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example. | 127 | 'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example. |
138 | 128 | ||
139 | config MTD_OF_PARTS | 129 | config MTD_OF_PARTS |
140 | def_bool y | 130 | tristate "OpenFirmware partitioning information support" |
131 | default Y | ||
141 | depends on OF | 132 | depends on OF |
142 | help | 133 | help |
143 | This provides a partition parsing function which derives | 134 | This provides a partition parsing function which derives |
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 39664c4229ff..9aaac3ac89f3 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
@@ -5,8 +5,8 @@ | |||
5 | # Core functionality. | 5 | # Core functionality. |
6 | obj-$(CONFIG_MTD) += mtd.o | 6 | obj-$(CONFIG_MTD) += mtd.o |
7 | mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o | 7 | mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o |
8 | mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o | ||
9 | 8 | ||
9 | obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o | ||
10 | obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o | 10 | obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o |
11 | obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o | 11 | obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o |
12 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o | 12 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o |
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c index 302372c08b56..89a02f6f65dc 100644 --- a/drivers/mtd/afs.c +++ b/drivers/mtd/afs.c | |||
@@ -162,8 +162,8 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr) | |||
162 | } | 162 | } |
163 | 163 | ||
164 | static int parse_afs_partitions(struct mtd_info *mtd, | 164 | static int parse_afs_partitions(struct mtd_info *mtd, |
165 | struct mtd_partition **pparts, | 165 | struct mtd_partition **pparts, |
166 | unsigned long origin) | 166 | struct mtd_part_parser_data *data) |
167 | { | 167 | { |
168 | struct mtd_partition *parts; | 168 | struct mtd_partition *parts; |
169 | u_int mask, off, idx, sz; | 169 | u_int mask, off, idx, sz; |
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c index 95949b97de6a..f40ea4547554 100644 --- a/drivers/mtd/ar7part.c +++ b/drivers/mtd/ar7part.c | |||
@@ -47,7 +47,7 @@ struct ar7_bin_rec { | |||
47 | 47 | ||
48 | static int create_mtd_partitions(struct mtd_info *master, | 48 | static int create_mtd_partitions(struct mtd_info *master, |
49 | struct mtd_partition **pparts, | 49 | struct mtd_partition **pparts, |
50 | unsigned long origin) | 50 | struct mtd_part_parser_data *data) |
51 | { | 51 | { |
52 | struct ar7_bin_rec header; | 52 | struct ar7_bin_rec header; |
53 | unsigned int offset; | 53 | unsigned int offset; |
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 23175edd5634..8d70895a58d6 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -145,8 +145,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd) | |||
145 | if (((major << 8) | minor) < 0x3131) { | 145 | if (((major << 8) | minor) < 0x3131) { |
146 | /* CFI version 1.0 => don't trust bootloc */ | 146 | /* CFI version 1.0 => don't trust bootloc */ |
147 | 147 | ||
148 | DEBUG(MTD_DEBUG_LEVEL1, | 148 | pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", |
149 | "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", | ||
150 | map->name, cfi->mfr, cfi->id); | 149 | map->name, cfi->mfr, cfi->id); |
151 | 150 | ||
152 | /* AFAICS all 29LV400 with a bottom boot block have a device ID | 151 | /* AFAICS all 29LV400 with a bottom boot block have a device ID |
@@ -166,8 +165,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd) | |||
166 | * the 8-bit device ID. | 165 | * the 8-bit device ID. |
167 | */ | 166 | */ |
168 | (cfi->mfr == CFI_MFR_MACRONIX)) { | 167 | (cfi->mfr == CFI_MFR_MACRONIX)) { |
169 | DEBUG(MTD_DEBUG_LEVEL1, | 168 | pr_debug("%s: Macronix MX29LV400C with bottom boot block" |
170 | "%s: Macronix MX29LV400C with bottom boot block" | ||
171 | " detected\n", map->name); | 169 | " detected\n", map->name); |
172 | extp->TopBottom = 2; /* bottom boot */ | 170 | extp->TopBottom = 2; /* bottom boot */ |
173 | } else | 171 | } else |
@@ -178,8 +176,7 @@ static void fixup_amd_bootblock(struct mtd_info *mtd) | |||
178 | extp->TopBottom = 2; /* bottom boot */ | 176 | extp->TopBottom = 2; /* bottom boot */ |
179 | } | 177 | } |
180 | 178 | ||
181 | DEBUG(MTD_DEBUG_LEVEL1, | 179 | pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" |
182 | "%s: AMD CFI PRI V%c.%c has no boot block field;" | ||
183 | " deduced %s from Device ID\n", map->name, major, minor, | 180 | " deduced %s from Device ID\n", map->name, major, minor, |
184 | extp->TopBottom == 2 ? "bottom" : "top"); | 181 | extp->TopBottom == 2 ? "bottom" : "top"); |
185 | } | 182 | } |
@@ -191,7 +188,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd) | |||
191 | struct map_info *map = mtd->priv; | 188 | struct map_info *map = mtd->priv; |
192 | struct cfi_private *cfi = map->fldrv_priv; | 189 | struct cfi_private *cfi = map->fldrv_priv; |
193 | if (cfi->cfiq->BufWriteTimeoutTyp) { | 190 | if (cfi->cfiq->BufWriteTimeoutTyp) { |
194 | DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); | 191 | pr_debug("Using buffer write method\n" ); |
195 | mtd->write = cfi_amdstd_write_buffers; | 192 | mtd->write = cfi_amdstd_write_buffers; |
196 | } | 193 | } |
197 | } | 194 | } |
@@ -443,8 +440,8 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
443 | mtd->writesize = 1; | 440 | mtd->writesize = 1; |
444 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; | 441 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
445 | 442 | ||
446 | DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n", | 443 | pr_debug("MTD %s(): write buffer size %d\n", __func__, |
447 | __func__, mtd->writebufsize); | 444 | mtd->writebufsize); |
448 | 445 | ||
449 | mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; | 446 | mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; |
450 | 447 | ||
@@ -1163,7 +1160,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1163 | return ret; | 1160 | return ret; |
1164 | } | 1161 | } |
1165 | 1162 | ||
1166 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", | 1163 | pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", |
1167 | __func__, adr, datum.x[0] ); | 1164 | __func__, adr, datum.x[0] ); |
1168 | 1165 | ||
1169 | /* | 1166 | /* |
@@ -1174,7 +1171,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1174 | */ | 1171 | */ |
1175 | oldd = map_read(map, adr); | 1172 | oldd = map_read(map, adr); |
1176 | if (map_word_equal(map, oldd, datum)) { | 1173 | if (map_word_equal(map, oldd, datum)) { |
1177 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", | 1174 | pr_debug("MTD %s(): NOP\n", |
1178 | __func__); | 1175 | __func__); |
1179 | goto op_done; | 1176 | goto op_done; |
1180 | } | 1177 | } |
@@ -1400,7 +1397,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1400 | 1397 | ||
1401 | datum = map_word_load(map, buf); | 1398 | datum = map_word_load(map, buf); |
1402 | 1399 | ||
1403 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", | 1400 | pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", |
1404 | __func__, adr, datum.x[0] ); | 1401 | __func__, adr, datum.x[0] ); |
1405 | 1402 | ||
1406 | XIP_INVAL_CACHED_RANGE(map, adr, len); | 1403 | XIP_INVAL_CACHED_RANGE(map, adr, len); |
@@ -1587,7 +1584,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1587 | return ret; | 1584 | return ret; |
1588 | } | 1585 | } |
1589 | 1586 | ||
1590 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", | 1587 | pr_debug("MTD %s(): ERASE 0x%.8lx\n", |
1591 | __func__, chip->start ); | 1588 | __func__, chip->start ); |
1592 | 1589 | ||
1593 | XIP_INVAL_CACHED_RANGE(map, adr, map->size); | 1590 | XIP_INVAL_CACHED_RANGE(map, adr, map->size); |
@@ -1675,7 +1672,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1675 | return ret; | 1672 | return ret; |
1676 | } | 1673 | } |
1677 | 1674 | ||
1678 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", | 1675 | pr_debug("MTD %s(): ERASE 0x%.8lx\n", |
1679 | __func__, adr ); | 1676 | __func__, adr ); |
1680 | 1677 | ||
1681 | XIP_INVAL_CACHED_RANGE(map, adr, len); | 1678 | XIP_INVAL_CACHED_RANGE(map, adr, len); |
@@ -1801,8 +1798,7 @@ static int do_atmel_lock(struct map_info *map, struct flchip *chip, | |||
1801 | goto out_unlock; | 1798 | goto out_unlock; |
1802 | chip->state = FL_LOCKING; | 1799 | chip->state = FL_LOCKING; |
1803 | 1800 | ||
1804 | DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", | 1801 | pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); |
1805 | __func__, adr, len); | ||
1806 | 1802 | ||
1807 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, | 1803 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, |
1808 | cfi->device_type, NULL); | 1804 | cfi->device_type, NULL); |
@@ -1837,8 +1833,7 @@ static int do_atmel_unlock(struct map_info *map, struct flchip *chip, | |||
1837 | goto out_unlock; | 1833 | goto out_unlock; |
1838 | chip->state = FL_UNLOCKING; | 1834 | chip->state = FL_UNLOCKING; |
1839 | 1835 | ||
1840 | DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n", | 1836 | pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); |
1841 | __func__, adr, len); | ||
1842 | 1837 | ||
1843 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, | 1838 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, |
1844 | cfi->device_type, NULL); | 1839 | cfi->device_type, NULL); |
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h index 5e3cc80128aa..89c6595454a5 100644 --- a/drivers/mtd/chips/fwh_lock.h +++ b/drivers/mtd/chips/fwh_lock.h | |||
@@ -34,8 +34,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, | |||
34 | 34 | ||
35 | /* Refuse the operation if the we cannot look behind the chip */ | 35 | /* Refuse the operation if the we cannot look behind the chip */ |
36 | if (chip->start < 0x400000) { | 36 | if (chip->start < 0x400000) { |
37 | DEBUG( MTD_DEBUG_LEVEL3, | 37 | pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n", |
38 | "MTD %s(): chip->start: %lx wanted >= 0x400000\n", | ||
39 | __func__, chip->start ); | 38 | __func__, chip->start ); |
40 | return -EIO; | 39 | return -EIO; |
41 | } | 40 | } |
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index ea832ea0e4aa..c443f527a53a 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c | |||
@@ -1914,11 +1914,10 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi) | |||
1914 | * (oh and incidentaly the jedec spec - 3.5.3.3) the reset | 1914 | * (oh and incidentaly the jedec spec - 3.5.3.3) the reset |
1915 | * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at | 1915 | * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at |
1916 | * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips | 1916 | * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips |
1917 | * as they will ignore the writes and dont care what address | 1917 | * as they will ignore the writes and don't care what address |
1918 | * the F0 is written to */ | 1918 | * the F0 is written to */ |
1919 | if (cfi->addr_unlock1) { | 1919 | if (cfi->addr_unlock1) { |
1920 | DEBUG( MTD_DEBUG_LEVEL3, | 1920 | pr_debug( "reset unlock called %x %x \n", |
1921 | "reset unlock called %x %x \n", | ||
1922 | cfi->addr_unlock1,cfi->addr_unlock2); | 1921 | cfi->addr_unlock1,cfi->addr_unlock2); |
1923 | cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); | 1922 | cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); |
1924 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); | 1923 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); |
@@ -1941,7 +1940,7 @@ static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int in | |||
1941 | uint8_t uaddr; | 1940 | uint8_t uaddr; |
1942 | 1941 | ||
1943 | if (!(jedec_table[index].devtypes & cfi->device_type)) { | 1942 | if (!(jedec_table[index].devtypes & cfi->device_type)) { |
1944 | DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", | 1943 | pr_debug("Rejecting potential %s with incompatible %d-bit device type\n", |
1945 | jedec_table[index].name, 4 * (1<<cfi->device_type)); | 1944 | jedec_table[index].name, 4 * (1<<cfi->device_type)); |
1946 | return 0; | 1945 | return 0; |
1947 | } | 1946 | } |
@@ -2021,7 +2020,7 @@ static inline int jedec_match( uint32_t base, | |||
2021 | * there aren't. | 2020 | * there aren't. |
2022 | */ | 2021 | */ |
2023 | if (finfo->dev_id > 0xff) { | 2022 | if (finfo->dev_id > 0xff) { |
2024 | DEBUG( MTD_DEBUG_LEVEL3, "%s(): ID is not 8bit\n", | 2023 | pr_debug("%s(): ID is not 8bit\n", |
2025 | __func__); | 2024 | __func__); |
2026 | goto match_done; | 2025 | goto match_done; |
2027 | } | 2026 | } |
@@ -2045,12 +2044,10 @@ static inline int jedec_match( uint32_t base, | |||
2045 | } | 2044 | } |
2046 | 2045 | ||
2047 | /* the part size must fit in the memory window */ | 2046 | /* the part size must fit in the memory window */ |
2048 | DEBUG( MTD_DEBUG_LEVEL3, | 2047 | pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", |
2049 | "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", | ||
2050 | __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) ); | 2048 | __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) ); |
2051 | if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) { | 2049 | if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) { |
2052 | DEBUG( MTD_DEBUG_LEVEL3, | 2050 | pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", |
2053 | "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", | ||
2054 | __func__, finfo->mfr_id, finfo->dev_id, | 2051 | __func__, finfo->mfr_id, finfo->dev_id, |
2055 | 1 << finfo->dev_size ); | 2052 | 1 << finfo->dev_size ); |
2056 | goto match_done; | 2053 | goto match_done; |
@@ -2061,13 +2058,12 @@ static inline int jedec_match( uint32_t base, | |||
2061 | 2058 | ||
2062 | uaddr = finfo->uaddr; | 2059 | uaddr = finfo->uaddr; |
2063 | 2060 | ||
2064 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", | 2061 | pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", |
2065 | __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); | 2062 | __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); |
2066 | if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr | 2063 | if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr |
2067 | && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 || | 2064 | && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 || |
2068 | unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) { | 2065 | unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) { |
2069 | DEBUG( MTD_DEBUG_LEVEL3, | 2066 | pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n", |
2070 | "MTD %s(): 0x%.4x 0x%.4x did not match\n", | ||
2071 | __func__, | 2067 | __func__, |
2072 | unlock_addrs[uaddr].addr1, | 2068 | unlock_addrs[uaddr].addr1, |
2073 | unlock_addrs[uaddr].addr2); | 2069 | unlock_addrs[uaddr].addr2); |
@@ -2083,15 +2079,13 @@ static inline int jedec_match( uint32_t base, | |||
2083 | * FIXME - write a driver that takes all of the chip info as | 2079 | * FIXME - write a driver that takes all of the chip info as |
2084 | * module parameters, doesn't probe but forces a load. | 2080 | * module parameters, doesn't probe but forces a load. |
2085 | */ | 2081 | */ |
2086 | DEBUG( MTD_DEBUG_LEVEL3, | 2082 | pr_debug("MTD %s(): check ID's disappear when not in ID mode\n", |
2087 | "MTD %s(): check ID's disappear when not in ID mode\n", | ||
2088 | __func__ ); | 2083 | __func__ ); |
2089 | jedec_reset( base, map, cfi ); | 2084 | jedec_reset( base, map, cfi ); |
2090 | mfr = jedec_read_mfr( map, base, cfi ); | 2085 | mfr = jedec_read_mfr( map, base, cfi ); |
2091 | id = jedec_read_id( map, base, cfi ); | 2086 | id = jedec_read_id( map, base, cfi ); |
2092 | if ( mfr == cfi->mfr && id == cfi->id ) { | 2087 | if ( mfr == cfi->mfr && id == cfi->id ) { |
2093 | DEBUG( MTD_DEBUG_LEVEL3, | 2088 | pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n" |
2094 | "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n" | ||
2095 | "You might need to manually specify JEDEC parameters.\n", | 2089 | "You might need to manually specify JEDEC parameters.\n", |
2096 | __func__, cfi->mfr, cfi->id ); | 2090 | __func__, cfi->mfr, cfi->id ); |
2097 | goto match_done; | 2091 | goto match_done; |
@@ -2104,7 +2098,7 @@ static inline int jedec_match( uint32_t base, | |||
2104 | * Put the device back in ID mode - only need to do this if we | 2098 | * Put the device back in ID mode - only need to do this if we |
2105 | * were truly frobbing a real device. | 2099 | * were truly frobbing a real device. |
2106 | */ | 2100 | */ |
2107 | DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ ); | 2101 | pr_debug("MTD %s(): return to ID mode\n", __func__ ); |
2108 | if (cfi->addr_unlock1) { | 2102 | if (cfi->addr_unlock1) { |
2109 | cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); | 2103 | cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); |
2110 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); | 2104 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); |
@@ -2167,13 +2161,11 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, | |||
2167 | 2161 | ||
2168 | cfi->mfr = jedec_read_mfr(map, base, cfi); | 2162 | cfi->mfr = jedec_read_mfr(map, base, cfi); |
2169 | cfi->id = jedec_read_id(map, base, cfi); | 2163 | cfi->id = jedec_read_id(map, base, cfi); |
2170 | DEBUG(MTD_DEBUG_LEVEL3, | 2164 | pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n", |
2171 | "Search for id:(%02x %02x) interleave(%d) type(%d)\n", | ||
2172 | cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type); | 2165 | cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type); |
2173 | for (i = 0; i < ARRAY_SIZE(jedec_table); i++) { | 2166 | for (i = 0; i < ARRAY_SIZE(jedec_table); i++) { |
2174 | if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) { | 2167 | if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) { |
2175 | DEBUG( MTD_DEBUG_LEVEL3, | 2168 | pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", |
2176 | "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", | ||
2177 | __func__, cfi->mfr, cfi->id, | 2169 | __func__, cfi->mfr, cfi->id, |
2178 | cfi->addr_unlock1, cfi->addr_unlock2 ); | 2170 | cfi->addr_unlock1, cfi->addr_unlock2 ); |
2179 | if (!cfi_jedec_setup(map, cfi, i)) | 2171 | if (!cfi_jedec_setup(map, cfi, i)) |
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index 8cf667da2408..ddf9ec6d9168 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c | |||
@@ -189,10 +189,7 @@ static struct mtd_partition * newpart(char *s, | |||
189 | extra_mem_size; | 189 | extra_mem_size; |
190 | parts = kzalloc(alloc_size, GFP_KERNEL); | 190 | parts = kzalloc(alloc_size, GFP_KERNEL); |
191 | if (!parts) | 191 | if (!parts) |
192 | { | ||
193 | printk(KERN_ERR ERRP "out of memory\n"); | ||
194 | return NULL; | 192 | return NULL; |
195 | } | ||
196 | extra_mem = (unsigned char *)(parts + *num_parts); | 193 | extra_mem = (unsigned char *)(parts + *num_parts); |
197 | } | 194 | } |
198 | /* enter this partition (offset will be calculated later if it is zero at this point) */ | 195 | /* enter this partition (offset will be calculated later if it is zero at this point) */ |
@@ -317,8 +314,8 @@ static int mtdpart_setup_real(char *s) | |||
317 | * the first one in the chain if a NULL mtd_id is passed in. | 314 | * the first one in the chain if a NULL mtd_id is passed in. |
318 | */ | 315 | */ |
319 | static int parse_cmdline_partitions(struct mtd_info *master, | 316 | static int parse_cmdline_partitions(struct mtd_info *master, |
320 | struct mtd_partition **pparts, | 317 | struct mtd_partition **pparts, |
321 | unsigned long origin) | 318 | struct mtd_part_parser_data *data) |
322 | { | 319 | { |
323 | unsigned long offset; | 320 | unsigned long offset; |
324 | int i; | 321 | int i; |
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 35081ce77fbd..283d887f7825 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig | |||
@@ -249,6 +249,16 @@ config MTD_DOC2001PLUS | |||
249 | under "NAND Flash Device Drivers" (currently that driver does not | 249 | under "NAND Flash Device Drivers" (currently that driver does not |
250 | support all Millennium Plus devices). | 250 | support all Millennium Plus devices). |
251 | 251 | ||
252 | config MTD_DOCG3 | ||
253 | tristate "M-Systems Disk-On-Chip G3" | ||
254 | ---help--- | ||
255 | This provides an MTD device driver for the M-Systems DiskOnChip | ||
256 | G3 devices. | ||
257 | |||
258 | The driver provides access to G3 DiskOnChip, distributed by | ||
259 | M-Systems and now Sandisk. The support is very experimental, | ||
260 | and doesn't give access to any write operations. | ||
261 | |||
252 | config MTD_DOCPROBE | 262 | config MTD_DOCPROBE |
253 | tristate | 263 | tristate |
254 | select MTD_DOCECC | 264 | select MTD_DOCECC |
@@ -268,8 +278,7 @@ config MTD_DOCPROBE_ADVANCED | |||
268 | config MTD_DOCPROBE_ADDRESS | 278 | config MTD_DOCPROBE_ADDRESS |
269 | hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED | 279 | hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED |
270 | depends on MTD_DOCPROBE | 280 | depends on MTD_DOCPROBE |
271 | default "0x0000" if MTD_DOCPROBE_ADVANCED | 281 | default "0x0" |
272 | default "0" if !MTD_DOCPROBE_ADVANCED | ||
273 | ---help--- | 282 | ---help--- |
274 | By default, the probe for DiskOnChip devices will look for a | 283 | By default, the probe for DiskOnChip devices will look for a |
275 | DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000. | 284 | DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000. |
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index f3226b1d38fc..56c7cd462f11 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile | |||
@@ -5,6 +5,7 @@ | |||
5 | obj-$(CONFIG_MTD_DOC2000) += doc2000.o | 5 | obj-$(CONFIG_MTD_DOC2000) += doc2000.o |
6 | obj-$(CONFIG_MTD_DOC2001) += doc2001.o | 6 | obj-$(CONFIG_MTD_DOC2001) += doc2001.o |
7 | obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o | 7 | obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o |
8 | obj-$(CONFIG_MTD_DOCG3) += docg3.o | ||
8 | obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o | 9 | obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o |
9 | obj-$(CONFIG_MTD_DOCECC) += docecc.o | 10 | obj-$(CONFIG_MTD_DOCECC) += docecc.o |
10 | obj-$(CONFIG_MTD_SLRAM) += slram.o | 11 | obj-$(CONFIG_MTD_SLRAM) += slram.o |
@@ -17,3 +18,5 @@ obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o | |||
17 | obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o | 18 | obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o |
18 | obj-$(CONFIG_MTD_M25P80) += m25p80.o | 19 | obj-$(CONFIG_MTD_M25P80) += m25p80.o |
19 | obj-$(CONFIG_MTD_SST25L) += sst25l.o | 20 | obj-$(CONFIG_MTD_SST25L) += sst25l.o |
21 | |||
22 | CFLAGS_docg3.o += -I$(src) \ No newline at end of file | ||
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c index f7fbf6025ef2..e9fad9151219 100644 --- a/drivers/mtd/devices/doc2000.c +++ b/drivers/mtd/devices/doc2000.c | |||
@@ -82,8 +82,7 @@ static int _DoC_WaitReady(struct DiskOnChip *doc) | |||
82 | void __iomem *docptr = doc->virtadr; | 82 | void __iomem *docptr = doc->virtadr; |
83 | unsigned long timeo = jiffies + (HZ * 10); | 83 | unsigned long timeo = jiffies + (HZ * 10); |
84 | 84 | ||
85 | DEBUG(MTD_DEBUG_LEVEL3, | 85 | pr_debug("_DoC_WaitReady called for out-of-line wait\n"); |
86 | "_DoC_WaitReady called for out-of-line wait\n"); | ||
87 | 86 | ||
88 | /* Out-of-line routine to wait for chip response */ | 87 | /* Out-of-line routine to wait for chip response */ |
89 | while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { | 88 | while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { |
@@ -92,7 +91,7 @@ static int _DoC_WaitReady(struct DiskOnChip *doc) | |||
92 | DoC_Delay(doc, 2); | 91 | DoC_Delay(doc, 2); |
93 | 92 | ||
94 | if (time_after(jiffies, timeo)) { | 93 | if (time_after(jiffies, timeo)) { |
95 | DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n"); | 94 | pr_debug("_DoC_WaitReady timed out.\n"); |
96 | return -EIO; | 95 | return -EIO; |
97 | } | 96 | } |
98 | udelay(1); | 97 | udelay(1); |
@@ -323,8 +322,7 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip) | |||
323 | 322 | ||
324 | /* Reset the chip */ | 323 | /* Reset the chip */ |
325 | if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) { | 324 | if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) { |
326 | DEBUG(MTD_DEBUG_LEVEL2, | 325 | pr_debug("DoC_Command (reset) for %d,%d returned true\n", |
327 | "DoC_Command (reset) for %d,%d returned true\n", | ||
328 | floor, chip); | 326 | floor, chip); |
329 | return 0; | 327 | return 0; |
330 | } | 328 | } |
@@ -332,8 +330,7 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip) | |||
332 | 330 | ||
333 | /* Read the NAND chip ID: 1. Send ReadID command */ | 331 | /* Read the NAND chip ID: 1. Send ReadID command */ |
334 | if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) { | 332 | if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) { |
335 | DEBUG(MTD_DEBUG_LEVEL2, | 333 | pr_debug("DoC_Command (ReadID) for %d,%d returned true\n", |
336 | "DoC_Command (ReadID) for %d,%d returned true\n", | ||
337 | floor, chip); | 334 | floor, chip); |
338 | return 0; | 335 | return 0; |
339 | } | 336 | } |
@@ -699,7 +696,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
699 | #ifdef ECC_DEBUG | 696 | #ifdef ECC_DEBUG |
700 | printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from); | 697 | printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from); |
701 | #endif | 698 | #endif |
702 | /* Read the ECC syndrom through the DiskOnChip ECC | 699 | /* Read the ECC syndrome through the DiskOnChip ECC |
703 | logic. These syndrome will be all ZERO when there | 700 | logic. These syndrome will be all ZERO when there |
704 | is no error */ | 701 | is no error */ |
705 | for (i = 0; i < 6; i++) { | 702 | for (i = 0; i < 6; i++) { |
@@ -930,7 +927,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, | |||
930 | uint8_t *buf = ops->oobbuf; | 927 | uint8_t *buf = ops->oobbuf; |
931 | size_t len = ops->len; | 928 | size_t len = ops->len; |
932 | 929 | ||
933 | BUG_ON(ops->mode != MTD_OOB_PLACE); | 930 | BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); |
934 | 931 | ||
935 | ofs += ops->ooboffs; | 932 | ofs += ops->ooboffs; |
936 | 933 | ||
@@ -1094,7 +1091,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, | |||
1094 | struct DiskOnChip *this = mtd->priv; | 1091 | struct DiskOnChip *this = mtd->priv; |
1095 | int ret; | 1092 | int ret; |
1096 | 1093 | ||
1097 | BUG_ON(ops->mode != MTD_OOB_PLACE); | 1094 | BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); |
1098 | 1095 | ||
1099 | mutex_lock(&this->lock); | 1096 | mutex_lock(&this->lock); |
1100 | ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len, | 1097 | ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len, |
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c index 241192f05bc8..a3f7a27499be 100644 --- a/drivers/mtd/devices/doc2001.c +++ b/drivers/mtd/devices/doc2001.c | |||
@@ -55,15 +55,14 @@ static int _DoC_WaitReady(void __iomem * docptr) | |||
55 | { | 55 | { |
56 | unsigned short c = 0xffff; | 56 | unsigned short c = 0xffff; |
57 | 57 | ||
58 | DEBUG(MTD_DEBUG_LEVEL3, | 58 | pr_debug("_DoC_WaitReady called for out-of-line wait\n"); |
59 | "_DoC_WaitReady called for out-of-line wait\n"); | ||
60 | 59 | ||
61 | /* Out-of-line routine to wait for chip response */ | 60 | /* Out-of-line routine to wait for chip response */ |
62 | while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c) | 61 | while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c) |
63 | ; | 62 | ; |
64 | 63 | ||
65 | if (c == 0) | 64 | if (c == 0) |
66 | DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n"); | 65 | pr_debug("_DoC_WaitReady timed out.\n"); |
67 | 66 | ||
68 | return (c == 0); | 67 | return (c == 0); |
69 | } | 68 | } |
@@ -464,7 +463,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len, | |||
464 | #ifdef ECC_DEBUG | 463 | #ifdef ECC_DEBUG |
465 | printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); | 464 | printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); |
466 | #endif | 465 | #endif |
467 | /* Read the ECC syndrom through the DiskOnChip ECC logic. | 466 | /* Read the ECC syndrome through the DiskOnChip ECC logic. |
468 | These syndrome will be all ZERO when there is no error */ | 467 | These syndrome will be all ZERO when there is no error */ |
469 | for (i = 0; i < 6; i++) { | 468 | for (i = 0; i < 6; i++) { |
470 | syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i); | 469 | syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i); |
@@ -632,7 +631,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, | |||
632 | uint8_t *buf = ops->oobbuf; | 631 | uint8_t *buf = ops->oobbuf; |
633 | size_t len = ops->len; | 632 | size_t len = ops->len; |
634 | 633 | ||
635 | BUG_ON(ops->mode != MTD_OOB_PLACE); | 634 | BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); |
636 | 635 | ||
637 | ofs += ops->ooboffs; | 636 | ofs += ops->ooboffs; |
638 | 637 | ||
@@ -690,7 +689,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, | |||
690 | uint8_t *buf = ops->oobbuf; | 689 | uint8_t *buf = ops->oobbuf; |
691 | size_t len = ops->len; | 690 | size_t len = ops->len; |
692 | 691 | ||
693 | BUG_ON(ops->mode != MTD_OOB_PLACE); | 692 | BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); |
694 | 693 | ||
695 | ofs += ops->ooboffs; | 694 | ofs += ops->ooboffs; |
696 | 695 | ||
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c index 09ae0adc3ad0..99351bc3e0ed 100644 --- a/drivers/mtd/devices/doc2001plus.c +++ b/drivers/mtd/devices/doc2001plus.c | |||
@@ -61,15 +61,14 @@ static int _DoC_WaitReady(void __iomem * docptr) | |||
61 | { | 61 | { |
62 | unsigned int c = 0xffff; | 62 | unsigned int c = 0xffff; |
63 | 63 | ||
64 | DEBUG(MTD_DEBUG_LEVEL3, | 64 | pr_debug("_DoC_WaitReady called for out-of-line wait\n"); |
65 | "_DoC_WaitReady called for out-of-line wait\n"); | ||
66 | 65 | ||
67 | /* Out-of-line routine to wait for chip response */ | 66 | /* Out-of-line routine to wait for chip response */ |
68 | while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c) | 67 | while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c) |
69 | ; | 68 | ; |
70 | 69 | ||
71 | if (c == 0) | 70 | if (c == 0) |
72 | DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n"); | 71 | pr_debug("_DoC_WaitReady timed out.\n"); |
73 | 72 | ||
74 | return (c == 0); | 73 | return (c == 0); |
75 | } | 74 | } |
@@ -655,7 +654,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
655 | #ifdef ECC_DEBUG | 654 | #ifdef ECC_DEBUG |
656 | printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); | 655 | printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); |
657 | #endif | 656 | #endif |
658 | /* Read the ECC syndrom through the DiskOnChip ECC logic. | 657 | /* Read the ECC syndrome through the DiskOnChip ECC logic. |
659 | These syndrome will be all ZERO when there is no error */ | 658 | These syndrome will be all ZERO when there is no error */ |
660 | for (i = 0; i < 6; i++) | 659 | for (i = 0; i < 6; i++) |
661 | syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); | 660 | syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); |
@@ -835,7 +834,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, | |||
835 | uint8_t *buf = ops->oobbuf; | 834 | uint8_t *buf = ops->oobbuf; |
836 | size_t len = ops->len; | 835 | size_t len = ops->len; |
837 | 836 | ||
838 | BUG_ON(ops->mode != MTD_OOB_PLACE); | 837 | BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); |
839 | 838 | ||
840 | ofs += ops->ooboffs; | 839 | ofs += ops->ooboffs; |
841 | 840 | ||
@@ -920,7 +919,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, | |||
920 | uint8_t *buf = ops->oobbuf; | 919 | uint8_t *buf = ops->oobbuf; |
921 | size_t len = ops->len; | 920 | size_t len = ops->len; |
922 | 921 | ||
923 | BUG_ON(ops->mode != MTD_OOB_PLACE); | 922 | BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); |
924 | 923 | ||
925 | ofs += ops->ooboffs; | 924 | ofs += ops->ooboffs; |
926 | 925 | ||
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c index 37ef29a73ee4..4a1c39b6f37d 100644 --- a/drivers/mtd/devices/docecc.c +++ b/drivers/mtd/devices/docecc.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * ECC algorithm for M-systems disk on chip. We use the excellent Reed | 2 | * ECC algorithm for M-systems disk on chip. We use the excellent Reed |
3 | * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the | 3 | * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the |
4 | * GNU GPL License. The rest is simply to convert the disk on chip | 4 | * GNU GPL License. The rest is simply to convert the disk on chip |
5 | * syndrom into a standard syndom. | 5 | * syndrome into a standard syndome. |
6 | * | 6 | * |
7 | * Author: Fabrice Bellard (fabrice.bellard@netgem.com) | 7 | * Author: Fabrice Bellard (fabrice.bellard@netgem.com) |
8 | * Copyright (C) 2000 Netgem S.A. | 8 | * Copyright (C) 2000 Netgem S.A. |
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c new file mode 100644 index 000000000000..bdcf5df982e8 --- /dev/null +++ b/drivers/mtd/devices/docg3.c | |||
@@ -0,0 +1,1114 @@ | |||
1 | /* | ||
2 | * Handles the M-Systems DiskOnChip G3 chip | ||
3 | * | ||
4 | * Copyright (C) 2011 Robert Jarzmik | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/string.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/io.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/mtd/mtd.h> | ||
31 | #include <linux/mtd/partitions.h> | ||
32 | |||
33 | #include <linux/debugfs.h> | ||
34 | #include <linux/seq_file.h> | ||
35 | |||
36 | #define CREATE_TRACE_POINTS | ||
37 | #include "docg3.h" | ||
38 | |||
39 | /* | ||
40 | * This driver handles the DiskOnChip G3 flash memory. | ||
41 | * | ||
42 | * As no specification is available from M-Systems/Sandisk, this drivers lacks | ||
43 | * several functions available on the chip, as : | ||
44 | * - block erase | ||
45 | * - page write | ||
46 | * - IPL write | ||
47 | * - ECC fixing (lack of BCH algorith understanding) | ||
48 | * - powerdown / powerup | ||
49 | * | ||
50 | * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and | ||
51 | * the driver assumes a 16bits data bus. | ||
52 | * | ||
53 | * DocG3 relies on 2 ECC algorithms, which are handled in hardware : | ||
54 | * - a 1 byte Hamming code stored in the OOB for each page | ||
55 | * - a 7 bytes BCH code stored in the OOB for each page | ||
56 | * The BCH part is only used for check purpose, no correction is available as | ||
57 | * some information is missing. What is known is that : | ||
58 | * - BCH is in GF(2^14) | ||
59 | * - BCH is over data of 520 bytes (512 page + 7 page_info bytes | ||
60 | * + 1 hamming byte) | ||
61 | * - BCH can correct up to 4 bits (t = 4) | ||
62 | * - BCH syndroms are calculated in hardware, and checked in hardware as well | ||
63 | * | ||
64 | */ | ||
65 | |||
66 | static inline u8 doc_readb(struct docg3 *docg3, u16 reg) | ||
67 | { | ||
68 | u8 val = readb(docg3->base + reg); | ||
69 | |||
70 | trace_docg3_io(0, 8, reg, (int)val); | ||
71 | return val; | ||
72 | } | ||
73 | |||
74 | static inline u16 doc_readw(struct docg3 *docg3, u16 reg) | ||
75 | { | ||
76 | u16 val = readw(docg3->base + reg); | ||
77 | |||
78 | trace_docg3_io(0, 16, reg, (int)val); | ||
79 | return val; | ||
80 | } | ||
81 | |||
82 | static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg) | ||
83 | { | ||
84 | writeb(val, docg3->base + reg); | ||
85 | trace_docg3_io(1, 16, reg, val); | ||
86 | } | ||
87 | |||
88 | static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg) | ||
89 | { | ||
90 | writew(val, docg3->base + reg); | ||
91 | trace_docg3_io(1, 16, reg, val); | ||
92 | } | ||
93 | |||
94 | static inline void doc_flash_command(struct docg3 *docg3, u8 cmd) | ||
95 | { | ||
96 | doc_writeb(docg3, cmd, DOC_FLASHCOMMAND); | ||
97 | } | ||
98 | |||
99 | static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq) | ||
100 | { | ||
101 | doc_writeb(docg3, seq, DOC_FLASHSEQUENCE); | ||
102 | } | ||
103 | |||
104 | static inline void doc_flash_address(struct docg3 *docg3, u8 addr) | ||
105 | { | ||
106 | doc_writeb(docg3, addr, DOC_FLASHADDRESS); | ||
107 | } | ||
108 | |||
109 | static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL }; | ||
110 | |||
111 | static int doc_register_readb(struct docg3 *docg3, int reg) | ||
112 | { | ||
113 | u8 val; | ||
114 | |||
115 | doc_writew(docg3, reg, DOC_READADDRESS); | ||
116 | val = doc_readb(docg3, reg); | ||
117 | doc_vdbg("Read register %04x : %02x\n", reg, val); | ||
118 | return val; | ||
119 | } | ||
120 | |||
121 | static int doc_register_readw(struct docg3 *docg3, int reg) | ||
122 | { | ||
123 | u16 val; | ||
124 | |||
125 | doc_writew(docg3, reg, DOC_READADDRESS); | ||
126 | val = doc_readw(docg3, reg); | ||
127 | doc_vdbg("Read register %04x : %04x\n", reg, val); | ||
128 | return val; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * doc_delay - delay docg3 operations | ||
133 | * @docg3: the device | ||
134 | * @nbNOPs: the number of NOPs to issue | ||
135 | * | ||
136 | * As no specification is available, the right timings between chip commands are | ||
137 | * unknown. The only available piece of information are the observed nops on a | ||
138 | * working docg3 chip. | ||
139 | * Therefore, doc_delay relies on a busy loop of NOPs, instead of scheduler | ||
140 | * friendlier msleep() functions or blocking mdelay(). | ||
141 | */ | ||
142 | static void doc_delay(struct docg3 *docg3, int nbNOPs) | ||
143 | { | ||
144 | int i; | ||
145 | |||
146 | doc_dbg("NOP x %d\n", nbNOPs); | ||
147 | for (i = 0; i < nbNOPs; i++) | ||
148 | doc_writeb(docg3, 0, DOC_NOP); | ||
149 | } | ||
150 | |||
151 | static int is_prot_seq_error(struct docg3 *docg3) | ||
152 | { | ||
153 | int ctrl; | ||
154 | |||
155 | ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); | ||
156 | return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR); | ||
157 | } | ||
158 | |||
159 | static int doc_is_ready(struct docg3 *docg3) | ||
160 | { | ||
161 | int ctrl; | ||
162 | |||
163 | ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); | ||
164 | return ctrl & DOC_CTRL_FLASHREADY; | ||
165 | } | ||
166 | |||
167 | static int doc_wait_ready(struct docg3 *docg3) | ||
168 | { | ||
169 | int maxWaitCycles = 100; | ||
170 | |||
171 | do { | ||
172 | doc_delay(docg3, 4); | ||
173 | cpu_relax(); | ||
174 | } while (!doc_is_ready(docg3) && maxWaitCycles--); | ||
175 | doc_delay(docg3, 2); | ||
176 | if (maxWaitCycles > 0) | ||
177 | return 0; | ||
178 | else | ||
179 | return -EIO; | ||
180 | } | ||
181 | |||
182 | static int doc_reset_seq(struct docg3 *docg3) | ||
183 | { | ||
184 | int ret; | ||
185 | |||
186 | doc_writeb(docg3, 0x10, DOC_FLASHCONTROL); | ||
187 | doc_flash_sequence(docg3, DOC_SEQ_RESET); | ||
188 | doc_flash_command(docg3, DOC_CMD_RESET); | ||
189 | doc_delay(docg3, 2); | ||
190 | ret = doc_wait_ready(docg3); | ||
191 | |||
192 | doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true"); | ||
193 | return ret; | ||
194 | } | ||
195 | |||
196 | /** | ||
197 | * doc_read_data_area - Read data from data area | ||
198 | * @docg3: the device | ||
199 | * @buf: the buffer to fill in | ||
200 | * @len: the lenght to read | ||
201 | * @first: first time read, DOC_READADDRESS should be set | ||
202 | * | ||
203 | * Reads bytes from flash data. Handles the single byte / even bytes reads. | ||
204 | */ | ||
205 | static void doc_read_data_area(struct docg3 *docg3, void *buf, int len, | ||
206 | int first) | ||
207 | { | ||
208 | int i, cdr, len4; | ||
209 | u16 data16, *dst16; | ||
210 | u8 data8, *dst8; | ||
211 | |||
212 | doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len); | ||
213 | cdr = len & 0x3; | ||
214 | len4 = len - cdr; | ||
215 | |||
216 | if (first) | ||
217 | doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS); | ||
218 | dst16 = buf; | ||
219 | for (i = 0; i < len4; i += 2) { | ||
220 | data16 = doc_readw(docg3, DOC_IOSPACE_DATA); | ||
221 | *dst16 = data16; | ||
222 | dst16++; | ||
223 | } | ||
224 | |||
225 | if (cdr) { | ||
226 | doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE, | ||
227 | DOC_READADDRESS); | ||
228 | doc_delay(docg3, 1); | ||
229 | dst8 = (u8 *)dst16; | ||
230 | for (i = 0; i < cdr; i++) { | ||
231 | data8 = doc_readb(docg3, DOC_IOSPACE_DATA); | ||
232 | *dst8 = data8; | ||
233 | dst8++; | ||
234 | } | ||
235 | } | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * doc_set_data_mode - Sets the flash to reliable data mode | ||
240 | * @docg3: the device | ||
241 | * | ||
242 | * The reliable data mode is a bit slower than the fast mode, but less errors | ||
243 | * occur. Entering the reliable mode cannot be done without entering the fast | ||
244 | * mode first. | ||
245 | */ | ||
246 | static void doc_set_reliable_mode(struct docg3 *docg3) | ||
247 | { | ||
248 | doc_dbg("doc_set_reliable_mode()\n"); | ||
249 | doc_flash_sequence(docg3, DOC_SEQ_SET_MODE); | ||
250 | doc_flash_command(docg3, DOC_CMD_FAST_MODE); | ||
251 | doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE); | ||
252 | doc_delay(docg3, 2); | ||
253 | } | ||
254 | |||
255 | /** | ||
256 | * doc_set_asic_mode - Set the ASIC mode | ||
257 | * @docg3: the device | ||
258 | * @mode: the mode | ||
259 | * | ||
260 | * The ASIC can work in 3 modes : | ||
261 | * - RESET: all registers are zeroed | ||
262 | * - NORMAL: receives and handles commands | ||
263 | * - POWERDOWN: minimal poweruse, flash parts shut off | ||
264 | */ | ||
265 | static void doc_set_asic_mode(struct docg3 *docg3, u8 mode) | ||
266 | { | ||
267 | int i; | ||
268 | |||
269 | for (i = 0; i < 12; i++) | ||
270 | doc_readb(docg3, DOC_IOSPACE_IPL); | ||
271 | |||
272 | mode |= DOC_ASICMODE_MDWREN; | ||
273 | doc_dbg("doc_set_asic_mode(%02x)\n", mode); | ||
274 | doc_writeb(docg3, mode, DOC_ASICMODE); | ||
275 | doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM); | ||
276 | doc_delay(docg3, 1); | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * doc_set_device_id - Sets the devices id for cascaded G3 chips | ||
281 | * @docg3: the device | ||
282 | * @id: the chip to select (amongst 0, 1, 2, 3) | ||
283 | * | ||
284 | * There can be 4 cascaded G3 chips. This function selects the one which will | ||
285 | * should be the active one. | ||
286 | */ | ||
287 | static void doc_set_device_id(struct docg3 *docg3, int id) | ||
288 | { | ||
289 | u8 ctrl; | ||
290 | |||
291 | doc_dbg("doc_set_device_id(%d)\n", id); | ||
292 | doc_writeb(docg3, id, DOC_DEVICESELECT); | ||
293 | ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); | ||
294 | |||
295 | ctrl &= ~DOC_CTRL_VIOLATION; | ||
296 | ctrl |= DOC_CTRL_CE; | ||
297 | doc_writeb(docg3, ctrl, DOC_FLASHCONTROL); | ||
298 | } | ||
299 | |||
300 | /** | ||
301 | * doc_set_extra_page_mode - Change flash page layout | ||
302 | * @docg3: the device | ||
303 | * | ||
304 | * Normally, the flash page is split into the data (512 bytes) and the out of | ||
305 | * band data (16 bytes). For each, 4 more bytes can be accessed, where the wear | ||
306 | * leveling counters are stored. To access this last area of 4 bytes, a special | ||
307 | * mode must be input to the flash ASIC. | ||
308 | * | ||
309 | * Returns 0 if no error occured, -EIO else. | ||
310 | */ | ||
311 | static int doc_set_extra_page_mode(struct docg3 *docg3) | ||
312 | { | ||
313 | int fctrl; | ||
314 | |||
315 | doc_dbg("doc_set_extra_page_mode()\n"); | ||
316 | doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532); | ||
317 | doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532); | ||
318 | doc_delay(docg3, 2); | ||
319 | |||
320 | fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); | ||
321 | if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR)) | ||
322 | return -EIO; | ||
323 | else | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * doc_seek - Set both flash planes to the specified block, page for reading | ||
329 | * @docg3: the device | ||
330 | * @block0: the first plane block index | ||
331 | * @block1: the second plane block index | ||
332 | * @page: the page index within the block | ||
333 | * @wear: if true, read will occur on the 4 extra bytes of the wear area | ||
334 | * @ofs: offset in page to read | ||
335 | * | ||
336 | * Programs the flash even and odd planes to the specific block and page. | ||
337 | * Alternatively, programs the flash to the wear area of the specified page. | ||
338 | */ | ||
339 | static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page, | ||
340 | int wear, int ofs) | ||
341 | { | ||
342 | int sector, ret = 0; | ||
343 | |||
344 | doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n", | ||
345 | block0, block1, page, ofs, wear); | ||
346 | |||
347 | if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) { | ||
348 | doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1); | ||
349 | doc_flash_command(docg3, DOC_CMD_READ_PLANE1); | ||
350 | doc_delay(docg3, 2); | ||
351 | } else { | ||
352 | doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2); | ||
353 | doc_flash_command(docg3, DOC_CMD_READ_PLANE2); | ||
354 | doc_delay(docg3, 2); | ||
355 | } | ||
356 | |||
357 | doc_set_reliable_mode(docg3); | ||
358 | if (wear) | ||
359 | ret = doc_set_extra_page_mode(docg3); | ||
360 | if (ret) | ||
361 | goto out; | ||
362 | |||
363 | sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); | ||
364 | doc_flash_sequence(docg3, DOC_SEQ_READ); | ||
365 | doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); | ||
366 | doc_delay(docg3, 1); | ||
367 | doc_flash_address(docg3, sector & 0xff); | ||
368 | doc_flash_address(docg3, (sector >> 8) & 0xff); | ||
369 | doc_flash_address(docg3, (sector >> 16) & 0xff); | ||
370 | doc_delay(docg3, 1); | ||
371 | |||
372 | sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK); | ||
373 | doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR); | ||
374 | doc_delay(docg3, 1); | ||
375 | doc_flash_address(docg3, sector & 0xff); | ||
376 | doc_flash_address(docg3, (sector >> 8) & 0xff); | ||
377 | doc_flash_address(docg3, (sector >> 16) & 0xff); | ||
378 | doc_delay(docg3, 2); | ||
379 | |||
380 | out: | ||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * doc_read_page_ecc_init - Initialize hardware ECC engine | ||
386 | * @docg3: the device | ||
387 | * @len: the number of bytes covered by the ECC (BCH covered) | ||
388 | * | ||
389 | * The function does initialize the hardware ECC engine to compute the Hamming | ||
390 | * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes). | ||
391 | * | ||
392 | * Return 0 if succeeded, -EIO on error | ||
393 | */ | ||
394 | static int doc_read_page_ecc_init(struct docg3 *docg3, int len) | ||
395 | { | ||
396 | doc_writew(docg3, DOC_ECCCONF0_READ_MODE | ||
397 | | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE | ||
398 | | (len & DOC_ECCCONF0_DATA_BYTES_MASK), | ||
399 | DOC_ECCCONF0); | ||
400 | doc_delay(docg3, 4); | ||
401 | doc_register_readb(docg3, DOC_FLASHCONTROL); | ||
402 | return doc_wait_ready(docg3); | ||
403 | } | ||
404 | |||
405 | /** | ||
406 | * doc_read_page_prepare - Prepares reading data from a flash page | ||
407 | * @docg3: the device | ||
408 | * @block0: the first plane block index on flash memory | ||
409 | * @block1: the second plane block index on flash memory | ||
410 | * @page: the page index in the block | ||
411 | * @offset: the offset in the page (must be a multiple of 4) | ||
412 | * | ||
413 | * Prepares the page to be read in the flash memory : | ||
414 | * - tell ASIC to map the flash pages | ||
415 | * - tell ASIC to be in read mode | ||
416 | * | ||
417 | * After a call to this method, a call to doc_read_page_finish is mandatory, | ||
418 | * to end the read cycle of the flash. | ||
419 | * | ||
420 | * Read data from a flash page. The length to be read must be between 0 and | ||
421 | * (page_size + oob_size + wear_size), ie. 532, and a multiple of 4 (because | ||
422 | * the extra bytes reading is not implemented). | ||
423 | * | ||
424 | * As pages are grouped by 2 (in 2 planes), reading from a page must be done | ||
425 | * in two steps: | ||
426 | * - one read of 512 bytes at offset 0 | ||
427 | * - one read of 512 bytes at offset 512 + 16 | ||
428 | * | ||
429 | * Returns 0 if successful, -EIO if a read error occured. | ||
430 | */ | ||
431 | static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1, | ||
432 | int page, int offset) | ||
433 | { | ||
434 | int wear_area = 0, ret = 0; | ||
435 | |||
436 | doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n", | ||
437 | block0, block1, page, offset); | ||
438 | if (offset >= DOC_LAYOUT_WEAR_OFFSET) | ||
439 | wear_area = 1; | ||
440 | if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2)) | ||
441 | return -EINVAL; | ||
442 | |||
443 | doc_set_device_id(docg3, docg3->device_id); | ||
444 | ret = doc_reset_seq(docg3); | ||
445 | if (ret) | ||
446 | goto err; | ||
447 | |||
448 | /* Program the flash address block and page */ | ||
449 | ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset); | ||
450 | if (ret) | ||
451 | goto err; | ||
452 | |||
453 | doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES); | ||
454 | doc_delay(docg3, 2); | ||
455 | doc_wait_ready(docg3); | ||
456 | |||
457 | doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ); | ||
458 | doc_delay(docg3, 1); | ||
459 | if (offset >= DOC_LAYOUT_PAGE_SIZE * 2) | ||
460 | offset -= 2 * DOC_LAYOUT_PAGE_SIZE; | ||
461 | doc_flash_address(docg3, offset >> 2); | ||
462 | doc_delay(docg3, 1); | ||
463 | doc_wait_ready(docg3); | ||
464 | |||
465 | doc_flash_command(docg3, DOC_CMD_READ_FLASH); | ||
466 | |||
467 | return 0; | ||
468 | err: | ||
469 | doc_writeb(docg3, 0, DOC_DATAEND); | ||
470 | doc_delay(docg3, 2); | ||
471 | return -EIO; | ||
472 | } | ||
473 | |||
474 | /** | ||
475 | * doc_read_page_getbytes - Reads bytes from a prepared page | ||
476 | * @docg3: the device | ||
477 | * @len: the number of bytes to be read (must be a multiple of 4) | ||
478 | * @buf: the buffer to be filled in | ||
479 | * @first: 1 if first time read, DOC_READADDRESS should be set | ||
480 | * | ||
481 | */ | ||
482 | static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf, | ||
483 | int first) | ||
484 | { | ||
485 | doc_read_data_area(docg3, buf, len, first); | ||
486 | doc_delay(docg3, 2); | ||
487 | return len; | ||
488 | } | ||
489 | |||
490 | /** | ||
491 | * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms | ||
492 | * @docg3: the device | ||
493 | * @syns: the array of 7 integers where the syndroms will be stored | ||
494 | */ | ||
495 | static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns) | ||
496 | { | ||
497 | int i; | ||
498 | |||
499 | for (i = 0; i < DOC_ECC_BCH_SIZE; i++) | ||
500 | syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i)); | ||
501 | } | ||
502 | |||
503 | /** | ||
504 | * doc_read_page_finish - Ends reading of a flash page | ||
505 | * @docg3: the device | ||
506 | * | ||
507 | * As a side effect, resets the chip selector to 0. This ensures that after each | ||
508 | * read operation, the floor 0 is selected. Therefore, if the systems halts, the | ||
509 | * reboot will boot on floor 0, where the IPL is. | ||
510 | */ | ||
511 | static void doc_read_page_finish(struct docg3 *docg3) | ||
512 | { | ||
513 | doc_writeb(docg3, 0, DOC_DATAEND); | ||
514 | doc_delay(docg3, 2); | ||
515 | doc_set_device_id(docg3, 0); | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * calc_block_sector - Calculate blocks, pages and ofs. | ||
520 | |||
521 | * @from: offset in flash | ||
522 | * @block0: first plane block index calculated | ||
523 | * @block1: second plane block index calculated | ||
524 | * @page: page calculated | ||
525 | * @ofs: offset in page | ||
526 | */ | ||
527 | static void calc_block_sector(loff_t from, int *block0, int *block1, int *page, | ||
528 | int *ofs) | ||
529 | { | ||
530 | uint sector; | ||
531 | |||
532 | sector = from / DOC_LAYOUT_PAGE_SIZE; | ||
533 | *block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES) | ||
534 | * DOC_LAYOUT_NBPLANES; | ||
535 | *block1 = *block0 + 1; | ||
536 | *page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES); | ||
537 | *page /= DOC_LAYOUT_NBPLANES; | ||
538 | if (sector % 2) | ||
539 | *ofs = DOC_LAYOUT_PAGE_OOB_SIZE; | ||
540 | else | ||
541 | *ofs = 0; | ||
542 | } | ||
543 | |||
544 | /** | ||
545 | * doc_read - Read bytes from flash | ||
546 | * @mtd: the device | ||
547 | * @from: the offset from first block and first page, in bytes, aligned on page | ||
548 | * size | ||
549 | * @len: the number of bytes to read (must be a multiple of 4) | ||
550 | * @retlen: the number of bytes actually read | ||
551 | * @buf: the filled in buffer | ||
552 | * | ||
553 | * Reads flash memory pages. This function does not read the OOB chunk, but only | ||
554 | * the page data. | ||
555 | * | ||
556 | * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured | ||
557 | */ | ||
558 | static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
559 | size_t *retlen, u_char *buf) | ||
560 | { | ||
561 | struct docg3 *docg3 = mtd->priv; | ||
562 | int block0, block1, page, readlen, ret, ofs = 0; | ||
563 | int syn[DOC_ECC_BCH_SIZE], eccconf1; | ||
564 | u8 oob[DOC_LAYOUT_OOB_SIZE]; | ||
565 | |||
566 | ret = -EINVAL; | ||
567 | doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf); | ||
568 | if (from % DOC_LAYOUT_PAGE_SIZE) | ||
569 | goto err; | ||
570 | if (len % 4) | ||
571 | goto err; | ||
572 | calc_block_sector(from, &block0, &block1, &page, &ofs); | ||
573 | if (block1 > docg3->max_block) | ||
574 | goto err; | ||
575 | |||
576 | *retlen = 0; | ||
577 | ret = 0; | ||
578 | readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE); | ||
579 | while (!ret && len > 0) { | ||
580 | readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE); | ||
581 | ret = doc_read_page_prepare(docg3, block0, block1, page, ofs); | ||
582 | if (ret < 0) | ||
583 | goto err; | ||
584 | ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES); | ||
585 | if (ret < 0) | ||
586 | goto err_in_read; | ||
587 | ret = doc_read_page_getbytes(docg3, readlen, buf, 1); | ||
588 | if (ret < readlen) | ||
589 | goto err_in_read; | ||
590 | ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE, | ||
591 | oob, 0); | ||
592 | if (ret < DOC_LAYOUT_OOB_SIZE) | ||
593 | goto err_in_read; | ||
594 | |||
595 | *retlen += readlen; | ||
596 | buf += readlen; | ||
597 | len -= readlen; | ||
598 | |||
599 | ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE; | ||
600 | if (ofs == 0) | ||
601 | page += 2; | ||
602 | if (page > DOC_ADDR_PAGE_MASK) { | ||
603 | page = 0; | ||
604 | block0 += 2; | ||
605 | block1 += 2; | ||
606 | } | ||
607 | |||
608 | /* | ||
609 | * There should be a BCH bitstream fixing algorithm here ... | ||
610 | * By now, a page read failure is triggered by BCH error | ||
611 | */ | ||
612 | doc_get_hw_bch_syndroms(docg3, syn); | ||
613 | eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); | ||
614 | |||
615 | doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
616 | oob[0], oob[1], oob[2], oob[3], oob[4], | ||
617 | oob[5], oob[6]); | ||
618 | doc_dbg("OOB - HAMMING: %02x\n", oob[7]); | ||
619 | doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
620 | oob[8], oob[9], oob[10], oob[11], oob[12], | ||
621 | oob[13], oob[14]); | ||
622 | doc_dbg("OOB - UNUSED: %02x\n", oob[15]); | ||
623 | doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1); | ||
624 | doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n", | ||
625 | syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]); | ||
626 | |||
627 | ret = -EBADMSG; | ||
628 | if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) { | ||
629 | if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) | ||
630 | goto err_in_read; | ||
631 | if (is_prot_seq_error(docg3)) | ||
632 | goto err_in_read; | ||
633 | } | ||
634 | doc_read_page_finish(docg3); | ||
635 | } | ||
636 | |||
637 | return 0; | ||
638 | err_in_read: | ||
639 | doc_read_page_finish(docg3); | ||
640 | err: | ||
641 | return ret; | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * doc_read_oob - Read out of band bytes from flash | ||
646 | * @mtd: the device | ||
647 | * @from: the offset from first block and first page, in bytes, aligned on page | ||
648 | * size | ||
649 | * @ops: the mtd oob structure | ||
650 | * | ||
651 | * Reads flash memory OOB area of pages. | ||
652 | * | ||
653 | * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured | ||
654 | */ | ||
655 | static int doc_read_oob(struct mtd_info *mtd, loff_t from, | ||
656 | struct mtd_oob_ops *ops) | ||
657 | { | ||
658 | struct docg3 *docg3 = mtd->priv; | ||
659 | int block0, block1, page, ofs, ret; | ||
660 | u8 *buf = ops->oobbuf; | ||
661 | size_t len = ops->ooblen; | ||
662 | |||
663 | doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len); | ||
664 | if (len != DOC_LAYOUT_OOB_SIZE) | ||
665 | return -EINVAL; | ||
666 | |||
667 | switch (ops->mode) { | ||
668 | case MTD_OPS_PLACE_OOB: | ||
669 | buf += ops->ooboffs; | ||
670 | break; | ||
671 | default: | ||
672 | break; | ||
673 | } | ||
674 | |||
675 | calc_block_sector(from, &block0, &block1, &page, &ofs); | ||
676 | if (block1 > docg3->max_block) | ||
677 | return -EINVAL; | ||
678 | |||
679 | ret = doc_read_page_prepare(docg3, block0, block1, page, | ||
680 | ofs + DOC_LAYOUT_PAGE_SIZE); | ||
681 | if (!ret) | ||
682 | ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE); | ||
683 | if (!ret) | ||
684 | ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE, | ||
685 | buf, 1); | ||
686 | doc_read_page_finish(docg3); | ||
687 | |||
688 | if (ret > 0) | ||
689 | ops->oobretlen = ret; | ||
690 | else | ||
691 | ops->oobretlen = 0; | ||
692 | return (ret > 0) ? 0 : ret; | ||
693 | } | ||
694 | |||
695 | static int doc_reload_bbt(struct docg3 *docg3) | ||
696 | { | ||
697 | int block = DOC_LAYOUT_BLOCK_BBT; | ||
698 | int ret = 0, nbpages, page; | ||
699 | u_char *buf = docg3->bbt; | ||
700 | |||
701 | nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE); | ||
702 | for (page = 0; !ret && (page < nbpages); page++) { | ||
703 | ret = doc_read_page_prepare(docg3, block, block + 1, | ||
704 | page + DOC_LAYOUT_PAGE_BBT, 0); | ||
705 | if (!ret) | ||
706 | ret = doc_read_page_ecc_init(docg3, | ||
707 | DOC_LAYOUT_PAGE_SIZE); | ||
708 | if (!ret) | ||
709 | doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE, | ||
710 | buf, 1); | ||
711 | buf += DOC_LAYOUT_PAGE_SIZE; | ||
712 | } | ||
713 | doc_read_page_finish(docg3); | ||
714 | return ret; | ||
715 | } | ||
716 | |||
717 | /** | ||
718 | * doc_block_isbad - Checks whether a block is good or not | ||
719 | * @mtd: the device | ||
720 | * @from: the offset to find the correct block | ||
721 | * | ||
722 | * Returns 1 if block is bad, 0 if block is good | ||
723 | */ | ||
724 | static int doc_block_isbad(struct mtd_info *mtd, loff_t from) | ||
725 | { | ||
726 | struct docg3 *docg3 = mtd->priv; | ||
727 | int block0, block1, page, ofs, is_good; | ||
728 | |||
729 | calc_block_sector(from, &block0, &block1, &page, &ofs); | ||
730 | doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n", | ||
731 | from, block0, block1, page, ofs); | ||
732 | |||
733 | if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA) | ||
734 | return 0; | ||
735 | if (block1 > docg3->max_block) | ||
736 | return -EINVAL; | ||
737 | |||
738 | is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7)); | ||
739 | return !is_good; | ||
740 | } | ||
741 | |||
742 | /** | ||
743 | * doc_get_erase_count - Get block erase count | ||
744 | * @docg3: the device | ||
745 | * @from: the offset in which the block is. | ||
746 | * | ||
747 | * Get the number of times a block was erased. The number is the maximum of | ||
748 | * erase times between first and second plane (which should be equal normally). | ||
749 | * | ||
750 | * Returns The number of erases, or -EINVAL or -EIO on error. | ||
751 | */ | ||
752 | static int doc_get_erase_count(struct docg3 *docg3, loff_t from) | ||
753 | { | ||
754 | u8 buf[DOC_LAYOUT_WEAR_SIZE]; | ||
755 | int ret, plane1_erase_count, plane2_erase_count; | ||
756 | int block0, block1, page, ofs; | ||
757 | |||
758 | doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf); | ||
759 | if (from % DOC_LAYOUT_PAGE_SIZE) | ||
760 | return -EINVAL; | ||
761 | calc_block_sector(from, &block0, &block1, &page, &ofs); | ||
762 | if (block1 > docg3->max_block) | ||
763 | return -EINVAL; | ||
764 | |||
765 | ret = doc_reset_seq(docg3); | ||
766 | if (!ret) | ||
767 | ret = doc_read_page_prepare(docg3, block0, block1, page, | ||
768 | ofs + DOC_LAYOUT_WEAR_OFFSET); | ||
769 | if (!ret) | ||
770 | ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE, | ||
771 | buf, 1); | ||
772 | doc_read_page_finish(docg3); | ||
773 | |||
774 | if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK)) | ||
775 | return -EIO; | ||
776 | plane1_erase_count = (u8)(~buf[1]) | ((u8)(~buf[4]) << 8) | ||
777 | | ((u8)(~buf[5]) << 16); | ||
778 | plane2_erase_count = (u8)(~buf[3]) | ((u8)(~buf[6]) << 8) | ||
779 | | ((u8)(~buf[7]) << 16); | ||
780 | |||
781 | return max(plane1_erase_count, plane2_erase_count); | ||
782 | } | ||
783 | |||
784 | /* | ||
785 | * Debug sysfs entries | ||
786 | */ | ||
787 | static int dbg_flashctrl_show(struct seq_file *s, void *p) | ||
788 | { | ||
789 | struct docg3 *docg3 = (struct docg3 *)s->private; | ||
790 | |||
791 | int pos = 0; | ||
792 | u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); | ||
793 | |||
794 | pos += seq_printf(s, | ||
795 | "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n", | ||
796 | fctrl, | ||
797 | fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-", | ||
798 | fctrl & DOC_CTRL_CE ? "active" : "inactive", | ||
799 | fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-", | ||
800 | fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-", | ||
801 | fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready"); | ||
802 | return pos; | ||
803 | } | ||
804 | DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show); | ||
805 | |||
806 | static int dbg_asicmode_show(struct seq_file *s, void *p) | ||
807 | { | ||
808 | struct docg3 *docg3 = (struct docg3 *)s->private; | ||
809 | |||
810 | int pos = 0; | ||
811 | int pctrl = doc_register_readb(docg3, DOC_ASICMODE); | ||
812 | int mode = pctrl & 0x03; | ||
813 | |||
814 | pos += seq_printf(s, | ||
815 | "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (", | ||
816 | pctrl, | ||
817 | pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0, | ||
818 | pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0, | ||
819 | pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0, | ||
820 | pctrl & DOC_ASICMODE_MDWREN ? 1 : 0, | ||
821 | pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0, | ||
822 | mode >> 1, mode & 0x1); | ||
823 | |||
824 | switch (mode) { | ||
825 | case DOC_ASICMODE_RESET: | ||
826 | pos += seq_printf(s, "reset"); | ||
827 | break; | ||
828 | case DOC_ASICMODE_NORMAL: | ||
829 | pos += seq_printf(s, "normal"); | ||
830 | break; | ||
831 | case DOC_ASICMODE_POWERDOWN: | ||
832 | pos += seq_printf(s, "powerdown"); | ||
833 | break; | ||
834 | } | ||
835 | pos += seq_printf(s, ")\n"); | ||
836 | return pos; | ||
837 | } | ||
838 | DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show); | ||
839 | |||
840 | static int dbg_device_id_show(struct seq_file *s, void *p) | ||
841 | { | ||
842 | struct docg3 *docg3 = (struct docg3 *)s->private; | ||
843 | int pos = 0; | ||
844 | int id = doc_register_readb(docg3, DOC_DEVICESELECT); | ||
845 | |||
846 | pos += seq_printf(s, "DeviceId = %d\n", id); | ||
847 | return pos; | ||
848 | } | ||
849 | DEBUGFS_RO_ATTR(device_id, dbg_device_id_show); | ||
850 | |||
851 | static int dbg_protection_show(struct seq_file *s, void *p) | ||
852 | { | ||
853 | struct docg3 *docg3 = (struct docg3 *)s->private; | ||
854 | int pos = 0; | ||
855 | int protect = doc_register_readb(docg3, DOC_PROTECTION); | ||
856 | int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); | ||
857 | int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW); | ||
858 | int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH); | ||
859 | int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); | ||
860 | int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW); | ||
861 | int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH); | ||
862 | |||
863 | pos += seq_printf(s, "Protection = 0x%02x (", | ||
864 | protect); | ||
865 | if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK) | ||
866 | pos += seq_printf(s, "FOUNDRY_OTP_LOCK,"); | ||
867 | if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK) | ||
868 | pos += seq_printf(s, "CUSTOMER_OTP_LOCK,"); | ||
869 | if (protect & DOC_PROTECT_LOCK_INPUT) | ||
870 | pos += seq_printf(s, "LOCK_INPUT,"); | ||
871 | if (protect & DOC_PROTECT_STICKY_LOCK) | ||
872 | pos += seq_printf(s, "STICKY_LOCK,"); | ||
873 | if (protect & DOC_PROTECT_PROTECTION_ENABLED) | ||
874 | pos += seq_printf(s, "PROTECTION ON,"); | ||
875 | if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK) | ||
876 | pos += seq_printf(s, "IPL_DOWNLOAD_LOCK,"); | ||
877 | if (protect & DOC_PROTECT_PROTECTION_ERROR) | ||
878 | pos += seq_printf(s, "PROTECT_ERR,"); | ||
879 | else | ||
880 | pos += seq_printf(s, "NO_PROTECT_ERR"); | ||
881 | pos += seq_printf(s, ")\n"); | ||
882 | |||
883 | pos += seq_printf(s, "DPS0 = 0x%02x : " | ||
884 | "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, " | ||
885 | "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n", | ||
886 | dps0, dps0_low, dps0_high, | ||
887 | !!(dps0 & DOC_DPS_OTP_PROTECTED), | ||
888 | !!(dps0 & DOC_DPS_READ_PROTECTED), | ||
889 | !!(dps0 & DOC_DPS_WRITE_PROTECTED), | ||
890 | !!(dps0 & DOC_DPS_HW_LOCK_ENABLED), | ||
891 | !!(dps0 & DOC_DPS_KEY_OK)); | ||
892 | pos += seq_printf(s, "DPS1 = 0x%02x : " | ||
893 | "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, " | ||
894 | "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n", | ||
895 | dps1, dps1_low, dps1_high, | ||
896 | !!(dps1 & DOC_DPS_OTP_PROTECTED), | ||
897 | !!(dps1 & DOC_DPS_READ_PROTECTED), | ||
898 | !!(dps1 & DOC_DPS_WRITE_PROTECTED), | ||
899 | !!(dps1 & DOC_DPS_HW_LOCK_ENABLED), | ||
900 | !!(dps1 & DOC_DPS_KEY_OK)); | ||
901 | return pos; | ||
902 | } | ||
903 | DEBUGFS_RO_ATTR(protection, dbg_protection_show); | ||
904 | |||
905 | static int __init doc_dbg_register(struct docg3 *docg3) | ||
906 | { | ||
907 | struct dentry *root, *entry; | ||
908 | |||
909 | root = debugfs_create_dir("docg3", NULL); | ||
910 | if (!root) | ||
911 | return -ENOMEM; | ||
912 | |||
913 | entry = debugfs_create_file("flashcontrol", S_IRUSR, root, docg3, | ||
914 | &flashcontrol_fops); | ||
915 | if (entry) | ||
916 | entry = debugfs_create_file("asic_mode", S_IRUSR, root, | ||
917 | docg3, &asic_mode_fops); | ||
918 | if (entry) | ||
919 | entry = debugfs_create_file("device_id", S_IRUSR, root, | ||
920 | docg3, &device_id_fops); | ||
921 | if (entry) | ||
922 | entry = debugfs_create_file("protection", S_IRUSR, root, | ||
923 | docg3, &protection_fops); | ||
924 | if (entry) { | ||
925 | docg3->debugfs_root = root; | ||
926 | return 0; | ||
927 | } else { | ||
928 | debugfs_remove_recursive(root); | ||
929 | return -ENOMEM; | ||
930 | } | ||
931 | } | ||
932 | |||
933 | static void __exit doc_dbg_unregister(struct docg3 *docg3) | ||
934 | { | ||
935 | debugfs_remove_recursive(docg3->debugfs_root); | ||
936 | } | ||
937 | |||
938 | /** | ||
939 | * doc_set_driver_info - Fill the mtd_info structure and docg3 structure | ||
940 | * @chip_id: The chip ID of the supported chip | ||
941 | * @mtd: The structure to fill | ||
942 | */ | ||
943 | static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) | ||
944 | { | ||
945 | struct docg3 *docg3 = mtd->priv; | ||
946 | int cfg; | ||
947 | |||
948 | cfg = doc_register_readb(docg3, DOC_CONFIGURATION); | ||
949 | docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0); | ||
950 | |||
951 | switch (chip_id) { | ||
952 | case DOC_CHIPID_G3: | ||
953 | mtd->name = "DiskOnChip G3"; | ||
954 | docg3->max_block = 2047; | ||
955 | break; | ||
956 | } | ||
957 | mtd->type = MTD_NANDFLASH; | ||
958 | /* | ||
959 | * Once write methods are added, the correct flags will be set. | ||
960 | * mtd->flags = MTD_CAP_NANDFLASH; | ||
961 | */ | ||
962 | mtd->flags = MTD_CAP_ROM; | ||
963 | mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE; | ||
964 | mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES; | ||
965 | mtd->writesize = DOC_LAYOUT_PAGE_SIZE; | ||
966 | mtd->oobsize = DOC_LAYOUT_OOB_SIZE; | ||
967 | mtd->owner = THIS_MODULE; | ||
968 | mtd->erase = NULL; | ||
969 | mtd->point = NULL; | ||
970 | mtd->unpoint = NULL; | ||
971 | mtd->read = doc_read; | ||
972 | mtd->write = NULL; | ||
973 | mtd->read_oob = doc_read_oob; | ||
974 | mtd->write_oob = NULL; | ||
975 | mtd->sync = NULL; | ||
976 | mtd->block_isbad = doc_block_isbad; | ||
977 | } | ||
978 | |||
979 | /** | ||
980 | * doc_probe - Probe the IO space for a DiskOnChip G3 chip | ||
981 | * @pdev: platform device | ||
982 | * | ||
983 | * Probes for a G3 chip at the specified IO space in the platform data | ||
984 | * ressources. | ||
985 | * | ||
986 | * Returns 0 on success, -ENOMEM, -ENXIO on error | ||
987 | */ | ||
988 | static int __init docg3_probe(struct platform_device *pdev) | ||
989 | { | ||
990 | struct device *dev = &pdev->dev; | ||
991 | struct docg3 *docg3; | ||
992 | struct mtd_info *mtd; | ||
993 | struct resource *ress; | ||
994 | int ret, bbt_nbpages; | ||
995 | u16 chip_id, chip_id_inv; | ||
996 | |||
997 | ret = -ENOMEM; | ||
998 | docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL); | ||
999 | if (!docg3) | ||
1000 | goto nomem1; | ||
1001 | mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); | ||
1002 | if (!mtd) | ||
1003 | goto nomem2; | ||
1004 | mtd->priv = docg3; | ||
1005 | |||
1006 | ret = -ENXIO; | ||
1007 | ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1008 | if (!ress) { | ||
1009 | dev_err(dev, "No I/O memory resource defined\n"); | ||
1010 | goto noress; | ||
1011 | } | ||
1012 | docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE); | ||
1013 | |||
1014 | docg3->dev = &pdev->dev; | ||
1015 | docg3->device_id = 0; | ||
1016 | doc_set_device_id(docg3, docg3->device_id); | ||
1017 | doc_set_asic_mode(docg3, DOC_ASICMODE_RESET); | ||
1018 | doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL); | ||
1019 | |||
1020 | chip_id = doc_register_readw(docg3, DOC_CHIPID); | ||
1021 | chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV); | ||
1022 | |||
1023 | ret = -ENODEV; | ||
1024 | if (chip_id != (u16)(~chip_id_inv)) { | ||
1025 | doc_info("No device found at IO addr %p\n", | ||
1026 | (void *)ress->start); | ||
1027 | goto nochipfound; | ||
1028 | } | ||
1029 | |||
1030 | switch (chip_id) { | ||
1031 | case DOC_CHIPID_G3: | ||
1032 | doc_info("Found a G3 DiskOnChip at addr %p\n", | ||
1033 | (void *)ress->start); | ||
1034 | break; | ||
1035 | default: | ||
1036 | doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id); | ||
1037 | goto nochipfound; | ||
1038 | } | ||
1039 | |||
1040 | doc_set_driver_info(chip_id, mtd); | ||
1041 | platform_set_drvdata(pdev, mtd); | ||
1042 | |||
1043 | ret = -ENOMEM; | ||
1044 | bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1, | ||
1045 | 8 * DOC_LAYOUT_PAGE_SIZE); | ||
1046 | docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL); | ||
1047 | if (!docg3->bbt) | ||
1048 | goto nochipfound; | ||
1049 | doc_reload_bbt(docg3); | ||
1050 | |||
1051 | ret = mtd_device_parse_register(mtd, part_probes, | ||
1052 | NULL, NULL, 0); | ||
1053 | if (ret) | ||
1054 | goto register_error; | ||
1055 | |||
1056 | doc_dbg_register(docg3); | ||
1057 | return 0; | ||
1058 | |||
1059 | register_error: | ||
1060 | kfree(docg3->bbt); | ||
1061 | nochipfound: | ||
1062 | iounmap(docg3->base); | ||
1063 | noress: | ||
1064 | kfree(mtd); | ||
1065 | nomem2: | ||
1066 | kfree(docg3); | ||
1067 | nomem1: | ||
1068 | return ret; | ||
1069 | } | ||
1070 | |||
1071 | /** | ||
1072 | * docg3_release - Release the driver | ||
1073 | * @pdev: the platform device | ||
1074 | * | ||
1075 | * Returns 0 | ||
1076 | */ | ||
1077 | static int __exit docg3_release(struct platform_device *pdev) | ||
1078 | { | ||
1079 | struct mtd_info *mtd = platform_get_drvdata(pdev); | ||
1080 | struct docg3 *docg3 = mtd->priv; | ||
1081 | |||
1082 | doc_dbg_unregister(docg3); | ||
1083 | mtd_device_unregister(mtd); | ||
1084 | iounmap(docg3->base); | ||
1085 | kfree(docg3->bbt); | ||
1086 | kfree(docg3); | ||
1087 | kfree(mtd); | ||
1088 | return 0; | ||
1089 | } | ||
1090 | |||
1091 | static struct platform_driver g3_driver = { | ||
1092 | .driver = { | ||
1093 | .name = "docg3", | ||
1094 | .owner = THIS_MODULE, | ||
1095 | }, | ||
1096 | .remove = __exit_p(docg3_release), | ||
1097 | }; | ||
1098 | |||
1099 | static int __init docg3_init(void) | ||
1100 | { | ||
1101 | return platform_driver_probe(&g3_driver, docg3_probe); | ||
1102 | } | ||
1103 | module_init(docg3_init); | ||
1104 | |||
1105 | |||
1106 | static void __exit docg3_exit(void) | ||
1107 | { | ||
1108 | platform_driver_unregister(&g3_driver); | ||
1109 | } | ||
1110 | module_exit(docg3_exit); | ||
1111 | |||
1112 | MODULE_LICENSE("GPL"); | ||
1113 | MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); | ||
1114 | MODULE_DESCRIPTION("MTD driver for DiskOnChip G3"); | ||
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h new file mode 100644 index 000000000000..0d407be24594 --- /dev/null +++ b/drivers/mtd/devices/docg3.h | |||
@@ -0,0 +1,297 @@ | |||
1 | /* | ||
2 | * Handles the M-Systems DiskOnChip G3 chip | ||
3 | * | ||
4 | * Copyright (C) 2011 Robert Jarzmik | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #ifndef _MTD_DOCG3_H | ||
23 | #define _MTD_DOCG3_H | ||
24 | |||
25 | /* | ||
26 | * Flash memory areas : | ||
27 | * - 0x0000 .. 0x07ff : IPL | ||
28 | * - 0x0800 .. 0x0fff : Data area | ||
29 | * - 0x1000 .. 0x17ff : Registers | ||
30 | * - 0x1800 .. 0x1fff : Unknown | ||
31 | */ | ||
32 | #define DOC_IOSPACE_IPL 0x0000 | ||
33 | #define DOC_IOSPACE_DATA 0x0800 | ||
34 | #define DOC_IOSPACE_SIZE 0x2000 | ||
35 | |||
36 | /* | ||
37 | * DOC G3 layout and adressing scheme | ||
38 | * A page address for the block "b", plane "P" and page "p": | ||
39 | * address = [bbbb bPpp pppp] | ||
40 | */ | ||
41 | |||
42 | #define DOC_ADDR_PAGE_MASK 0x3f | ||
43 | #define DOC_ADDR_BLOCK_SHIFT 6 | ||
44 | #define DOC_LAYOUT_NBPLANES 2 | ||
45 | #define DOC_LAYOUT_PAGES_PER_BLOCK 64 | ||
46 | #define DOC_LAYOUT_PAGE_SIZE 512 | ||
47 | #define DOC_LAYOUT_OOB_SIZE 16 | ||
48 | #define DOC_LAYOUT_WEAR_SIZE 8 | ||
49 | #define DOC_LAYOUT_PAGE_OOB_SIZE \ | ||
50 | (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_SIZE) | ||
51 | #define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2) | ||
52 | #define DOC_LAYOUT_BLOCK_SIZE \ | ||
53 | (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE) | ||
54 | #define DOC_ECC_BCH_SIZE 7 | ||
55 | #define DOC_ECC_BCH_COVERED_BYTES \ | ||
56 | (DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \ | ||
57 | DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ) | ||
58 | |||
59 | /* | ||
60 | * Blocks distribution | ||
61 | */ | ||
62 | #define DOC_LAYOUT_BLOCK_BBT 0 | ||
63 | #define DOC_LAYOUT_BLOCK_OTP 0 | ||
64 | #define DOC_LAYOUT_BLOCK_FIRST_DATA 6 | ||
65 | |||
66 | #define DOC_LAYOUT_PAGE_BBT 4 | ||
67 | |||
68 | /* | ||
69 | * Extra page OOB (16 bytes wide) layout | ||
70 | */ | ||
71 | #define DOC_LAYOUT_OOB_PAGEINFO_OFS 0 | ||
72 | #define DOC_LAYOUT_OOB_HAMMING_OFS 7 | ||
73 | #define DOC_LAYOUT_OOB_BCH_OFS 8 | ||
74 | #define DOC_LAYOUT_OOB_UNUSED_OFS 15 | ||
75 | #define DOC_LAYOUT_OOB_PAGEINFO_SZ 7 | ||
76 | #define DOC_LAYOUT_OOB_HAMMING_SZ 1 | ||
77 | #define DOC_LAYOUT_OOB_BCH_SZ 7 | ||
78 | #define DOC_LAYOUT_OOB_UNUSED_SZ 1 | ||
79 | |||
80 | |||
81 | #define DOC_CHIPID_G3 0x200 | ||
82 | #define DOC_ERASE_MARK 0xaa | ||
83 | /* | ||
84 | * Flash registers | ||
85 | */ | ||
86 | #define DOC_CHIPID 0x1000 | ||
87 | #define DOC_TEST 0x1004 | ||
88 | #define DOC_BUSLOCK 0x1006 | ||
89 | #define DOC_ENDIANCONTROL 0x1008 | ||
90 | #define DOC_DEVICESELECT 0x100a | ||
91 | #define DOC_ASICMODE 0x100c | ||
92 | #define DOC_CONFIGURATION 0x100e | ||
93 | #define DOC_INTERRUPTCONTROL 0x1010 | ||
94 | #define DOC_READADDRESS 0x101a | ||
95 | #define DOC_DATAEND 0x101e | ||
96 | #define DOC_INTERRUPTSTATUS 0x1020 | ||
97 | |||
98 | #define DOC_FLASHSEQUENCE 0x1032 | ||
99 | #define DOC_FLASHCOMMAND 0x1034 | ||
100 | #define DOC_FLASHADDRESS 0x1036 | ||
101 | #define DOC_FLASHCONTROL 0x1038 | ||
102 | #define DOC_NOP 0x103e | ||
103 | |||
104 | #define DOC_ECCCONF0 0x1040 | ||
105 | #define DOC_ECCCONF1 0x1042 | ||
106 | #define DOC_ECCPRESET 0x1044 | ||
107 | #define DOC_HAMMINGPARITY 0x1046 | ||
108 | #define DOC_BCH_SYNDROM(idx) (0x1048 + (idx << 1)) | ||
109 | |||
110 | #define DOC_PROTECTION 0x1056 | ||
111 | #define DOC_DPS0_ADDRLOW 0x1060 | ||
112 | #define DOC_DPS0_ADDRHIGH 0x1062 | ||
113 | #define DOC_DPS1_ADDRLOW 0x1064 | ||
114 | #define DOC_DPS1_ADDRHIGH 0x1066 | ||
115 | #define DOC_DPS0_STATUS 0x106c | ||
116 | #define DOC_DPS1_STATUS 0x106e | ||
117 | |||
118 | #define DOC_ASICMODECONFIRM 0x1072 | ||
119 | #define DOC_CHIPID_INV 0x1074 | ||
120 | |||
121 | /* | ||
122 | * Flash sequences | ||
123 | * A sequence is preset before one or more commands are input to the chip. | ||
124 | */ | ||
125 | #define DOC_SEQ_RESET 0x00 | ||
126 | #define DOC_SEQ_PAGE_SIZE_532 0x03 | ||
127 | #define DOC_SEQ_SET_MODE 0x09 | ||
128 | #define DOC_SEQ_READ 0x12 | ||
129 | #define DOC_SEQ_SET_PLANE1 0x0e | ||
130 | #define DOC_SEQ_SET_PLANE2 0x10 | ||
131 | #define DOC_SEQ_PAGE_SETUP 0x1d | ||
132 | |||
133 | /* | ||
134 | * Flash commands | ||
135 | */ | ||
136 | #define DOC_CMD_READ_PLANE1 0x00 | ||
137 | #define DOC_CMD_SET_ADDR_READ 0x05 | ||
138 | #define DOC_CMD_READ_ALL_PLANES 0x30 | ||
139 | #define DOC_CMD_READ_PLANE2 0x50 | ||
140 | #define DOC_CMD_READ_FLASH 0xe0 | ||
141 | #define DOC_CMD_PAGE_SIZE_532 0x3c | ||
142 | |||
143 | #define DOC_CMD_PROG_BLOCK_ADDR 0x60 | ||
144 | #define DOC_CMD_PROG_CYCLE1 0x80 | ||
145 | #define DOC_CMD_PROG_CYCLE2 0x10 | ||
146 | #define DOC_CMD_ERASECYCLE2 0xd0 | ||
147 | |||
148 | #define DOC_CMD_RELIABLE_MODE 0x22 | ||
149 | #define DOC_CMD_FAST_MODE 0xa2 | ||
150 | |||
151 | #define DOC_CMD_RESET 0xff | ||
152 | |||
153 | /* | ||
154 | * Flash register : DOC_FLASHCONTROL | ||
155 | */ | ||
156 | #define DOC_CTRL_VIOLATION 0x20 | ||
157 | #define DOC_CTRL_CE 0x10 | ||
158 | #define DOC_CTRL_UNKNOWN_BITS 0x08 | ||
159 | #define DOC_CTRL_PROTECTION_ERROR 0x04 | ||
160 | #define DOC_CTRL_SEQUENCE_ERROR 0x02 | ||
161 | #define DOC_CTRL_FLASHREADY 0x01 | ||
162 | |||
163 | /* | ||
164 | * Flash register : DOC_ASICMODE | ||
165 | */ | ||
166 | #define DOC_ASICMODE_RESET 0x00 | ||
167 | #define DOC_ASICMODE_NORMAL 0x01 | ||
168 | #define DOC_ASICMODE_POWERDOWN 0x02 | ||
169 | #define DOC_ASICMODE_MDWREN 0x04 | ||
170 | #define DOC_ASICMODE_BDETCT_RESET 0x08 | ||
171 | #define DOC_ASICMODE_RSTIN_RESET 0x10 | ||
172 | #define DOC_ASICMODE_RAM_WE 0x20 | ||
173 | |||
174 | /* | ||
175 | * Flash register : DOC_ECCCONF0 | ||
176 | */ | ||
177 | #define DOC_ECCCONF0_READ_MODE 0x8000 | ||
178 | #define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000 | ||
179 | #define DOC_ECCCONF0_HAMMING_ENABLE 0x1000 | ||
180 | #define DOC_ECCCONF0_BCH_ENABLE 0x0800 | ||
181 | #define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff | ||
182 | |||
183 | /* | ||
184 | * Flash register : DOC_ECCCONF1 | ||
185 | */ | ||
186 | #define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80 | ||
187 | #define DOC_ECCCONF1_UNKOWN1 0x40 | ||
188 | #define DOC_ECCCONF1_UNKOWN2 0x20 | ||
189 | #define DOC_ECCCONF1_UNKOWN3 0x10 | ||
190 | #define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f | ||
191 | |||
192 | /* | ||
193 | * Flash register : DOC_PROTECTION | ||
194 | */ | ||
195 | #define DOC_PROTECT_FOUNDRY_OTP_LOCK 0x01 | ||
196 | #define DOC_PROTECT_CUSTOMER_OTP_LOCK 0x02 | ||
197 | #define DOC_PROTECT_LOCK_INPUT 0x04 | ||
198 | #define DOC_PROTECT_STICKY_LOCK 0x08 | ||
199 | #define DOC_PROTECT_PROTECTION_ENABLED 0x10 | ||
200 | #define DOC_PROTECT_IPL_DOWNLOAD_LOCK 0x20 | ||
201 | #define DOC_PROTECT_PROTECTION_ERROR 0x80 | ||
202 | |||
203 | /* | ||
204 | * Flash register : DOC_DPS0_STATUS and DOC_DPS1_STATUS | ||
205 | */ | ||
206 | #define DOC_DPS_OTP_PROTECTED 0x01 | ||
207 | #define DOC_DPS_READ_PROTECTED 0x02 | ||
208 | #define DOC_DPS_WRITE_PROTECTED 0x04 | ||
209 | #define DOC_DPS_HW_LOCK_ENABLED 0x08 | ||
210 | #define DOC_DPS_KEY_OK 0x80 | ||
211 | |||
212 | /* | ||
213 | * Flash register : DOC_CONFIGURATION | ||
214 | */ | ||
215 | #define DOC_CONF_IF_CFG 0x80 | ||
216 | #define DOC_CONF_MAX_ID_MASK 0x30 | ||
217 | #define DOC_CONF_VCCQ_3V 0x01 | ||
218 | |||
219 | /* | ||
220 | * Flash register : DOC_READADDRESS | ||
221 | */ | ||
222 | #define DOC_READADDR_INC 0x8000 | ||
223 | #define DOC_READADDR_ONE_BYTE 0x4000 | ||
224 | #define DOC_READADDR_ADDR_MASK 0x1fff | ||
225 | |||
226 | /** | ||
227 | * struct docg3 - DiskOnChip driver private data | ||
228 | * @dev: the device currently under control | ||
229 | * @base: mapped IO space | ||
230 | * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3) | ||
231 | * @if_cfg: if true, reads are on 16bits, else reads are on 8bits | ||
232 | * @bbt: bad block table cache | ||
233 | * @debugfs_root: debugfs root node | ||
234 | */ | ||
235 | struct docg3 { | ||
236 | struct device *dev; | ||
237 | void __iomem *base; | ||
238 | unsigned int device_id:4; | ||
239 | unsigned int if_cfg:1; | ||
240 | int max_block; | ||
241 | u8 *bbt; | ||
242 | struct dentry *debugfs_root; | ||
243 | }; | ||
244 | |||
245 | #define doc_err(fmt, arg...) dev_err(docg3->dev, (fmt), ## arg) | ||
246 | #define doc_info(fmt, arg...) dev_info(docg3->dev, (fmt), ## arg) | ||
247 | #define doc_dbg(fmt, arg...) dev_dbg(docg3->dev, (fmt), ## arg) | ||
248 | #define doc_vdbg(fmt, arg...) dev_vdbg(docg3->dev, (fmt), ## arg) | ||
249 | |||
250 | #define DEBUGFS_RO_ATTR(name, show_fct) \ | ||
251 | static int name##_open(struct inode *inode, struct file *file) \ | ||
252 | { return single_open(file, show_fct, inode->i_private); } \ | ||
253 | static const struct file_operations name##_fops = { \ | ||
254 | .owner = THIS_MODULE, \ | ||
255 | .open = name##_open, \ | ||
256 | .llseek = seq_lseek, \ | ||
257 | .read = seq_read, \ | ||
258 | .release = single_release \ | ||
259 | }; | ||
260 | #endif | ||
261 | |||
262 | /* | ||
263 | * Trace events part | ||
264 | */ | ||
265 | #undef TRACE_SYSTEM | ||
266 | #define TRACE_SYSTEM docg3 | ||
267 | |||
268 | #if !defined(_MTD_DOCG3_TRACE) || defined(TRACE_HEADER_MULTI_READ) | ||
269 | #define _MTD_DOCG3_TRACE | ||
270 | |||
271 | #include <linux/tracepoint.h> | ||
272 | |||
273 | TRACE_EVENT(docg3_io, | ||
274 | TP_PROTO(int op, int width, u16 reg, int val), | ||
275 | TP_ARGS(op, width, reg, val), | ||
276 | TP_STRUCT__entry( | ||
277 | __field(int, op) | ||
278 | __field(unsigned char, width) | ||
279 | __field(u16, reg) | ||
280 | __field(int, val)), | ||
281 | TP_fast_assign( | ||
282 | __entry->op = op; | ||
283 | __entry->width = width; | ||
284 | __entry->reg = reg; | ||
285 | __entry->val = val;), | ||
286 | TP_printk("docg3: %s%02d reg=%04x, val=%04x", | ||
287 | __entry->op ? "write" : "read", __entry->width, | ||
288 | __entry->reg, __entry->val) | ||
289 | ); | ||
290 | #endif | ||
291 | |||
292 | /* This part must be outside protection */ | ||
293 | #undef TRACE_INCLUDE_PATH | ||
294 | #undef TRACE_INCLUDE_FILE | ||
295 | #define TRACE_INCLUDE_PATH . | ||
296 | #define TRACE_INCLUDE_FILE docg3 | ||
297 | #include <trace/define_trace.h> | ||
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c index d374603493a7..45116bb30297 100644 --- a/drivers/mtd/devices/docprobe.c +++ b/drivers/mtd/devices/docprobe.c | |||
@@ -50,11 +50,6 @@ | |||
50 | #include <linux/mtd/nand.h> | 50 | #include <linux/mtd/nand.h> |
51 | #include <linux/mtd/doc2000.h> | 51 | #include <linux/mtd/doc2000.h> |
52 | 52 | ||
53 | /* Where to look for the devices? */ | ||
54 | #ifndef CONFIG_MTD_DOCPROBE_ADDRESS | ||
55 | #define CONFIG_MTD_DOCPROBE_ADDRESS 0 | ||
56 | #endif | ||
57 | |||
58 | 53 | ||
59 | static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS; | 54 | static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS; |
60 | module_param(doc_config_location, ulong, 0); | 55 | module_param(doc_config_location, ulong, 0); |
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c index 772a0ff89e0f..3a11ea628e58 100644 --- a/drivers/mtd/devices/lart.c +++ b/drivers/mtd/devices/lart.c | |||
@@ -34,9 +34,6 @@ | |||
34 | /* debugging */ | 34 | /* debugging */ |
35 | //#define LART_DEBUG | 35 | //#define LART_DEBUG |
36 | 36 | ||
37 | /* partition support */ | ||
38 | #define HAVE_PARTITIONS | ||
39 | |||
40 | #include <linux/kernel.h> | 37 | #include <linux/kernel.h> |
41 | #include <linux/module.h> | 38 | #include <linux/module.h> |
42 | #include <linux/types.h> | 39 | #include <linux/types.h> |
@@ -44,9 +41,7 @@ | |||
44 | #include <linux/errno.h> | 41 | #include <linux/errno.h> |
45 | #include <linux/string.h> | 42 | #include <linux/string.h> |
46 | #include <linux/mtd/mtd.h> | 43 | #include <linux/mtd/mtd.h> |
47 | #ifdef HAVE_PARTITIONS | ||
48 | #include <linux/mtd/partitions.h> | 44 | #include <linux/mtd/partitions.h> |
49 | #endif | ||
50 | 45 | ||
51 | #ifndef CONFIG_SA1100_LART | 46 | #ifndef CONFIG_SA1100_LART |
52 | #error This is for LART architecture only | 47 | #error This is for LART architecture only |
@@ -598,7 +593,6 @@ static struct mtd_erase_region_info erase_regions[] = { | |||
598 | } | 593 | } |
599 | }; | 594 | }; |
600 | 595 | ||
601 | #ifdef HAVE_PARTITIONS | ||
602 | static struct mtd_partition lart_partitions[] = { | 596 | static struct mtd_partition lart_partitions[] = { |
603 | /* blob */ | 597 | /* blob */ |
604 | { | 598 | { |
@@ -619,7 +613,7 @@ static struct mtd_partition lart_partitions[] = { | |||
619 | .size = INITRD_LEN, /* MTDPART_SIZ_FULL */ | 613 | .size = INITRD_LEN, /* MTDPART_SIZ_FULL */ |
620 | } | 614 | } |
621 | }; | 615 | }; |
622 | #endif | 616 | #define NUM_PARTITIONS ARRAY_SIZE(lart_partitions) |
623 | 617 | ||
624 | static int __init lart_flash_init (void) | 618 | static int __init lart_flash_init (void) |
625 | { | 619 | { |
@@ -668,7 +662,6 @@ static int __init lart_flash_init (void) | |||
668 | result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024, | 662 | result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024, |
669 | result,mtd.eraseregions[result].numblocks); | 663 | result,mtd.eraseregions[result].numblocks); |
670 | 664 | ||
671 | #ifdef HAVE_PARTITIONS | ||
672 | printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions)); | 665 | printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions)); |
673 | 666 | ||
674 | for (result = 0; result < ARRAY_SIZE(lart_partitions); result++) | 667 | for (result = 0; result < ARRAY_SIZE(lart_partitions); result++) |
@@ -681,25 +674,16 @@ static int __init lart_flash_init (void) | |||
681 | result,lart_partitions[result].offset, | 674 | result,lart_partitions[result].offset, |
682 | result,lart_partitions[result].size,lart_partitions[result].size / 1024); | 675 | result,lart_partitions[result].size,lart_partitions[result].size / 1024); |
683 | #endif | 676 | #endif |
684 | #endif | ||
685 | 677 | ||
686 | #ifndef HAVE_PARTITIONS | ||
687 | result = mtd_device_register(&mtd, NULL, 0); | ||
688 | #else | ||
689 | result = mtd_device_register(&mtd, lart_partitions, | 678 | result = mtd_device_register(&mtd, lart_partitions, |
690 | ARRAY_SIZE(lart_partitions)); | 679 | ARRAY_SIZE(lart_partitions)); |
691 | #endif | ||
692 | 680 | ||
693 | return (result); | 681 | return (result); |
694 | } | 682 | } |
695 | 683 | ||
696 | static void __exit lart_flash_exit (void) | 684 | static void __exit lart_flash_exit (void) |
697 | { | 685 | { |
698 | #ifndef HAVE_PARTITIONS | ||
699 | mtd_device_unregister(&mtd); | ||
700 | #else | ||
701 | mtd_device_unregister(&mtd); | 686 | mtd_device_unregister(&mtd); |
702 | #endif | ||
703 | } | 687 | } |
704 | 688 | ||
705 | module_init (lart_flash_init); | 689 | module_init (lart_flash_init); |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 35180e475c4c..884904d3f9d2 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/mtd/cfi.h> | 30 | #include <linux/mtd/cfi.h> |
31 | #include <linux/mtd/mtd.h> | 31 | #include <linux/mtd/mtd.h> |
32 | #include <linux/mtd/partitions.h> | 32 | #include <linux/mtd/partitions.h> |
33 | #include <linux/of_platform.h> | ||
33 | 34 | ||
34 | #include <linux/spi/spi.h> | 35 | #include <linux/spi/spi.h> |
35 | #include <linux/spi/flash.h> | 36 | #include <linux/spi/flash.h> |
@@ -88,7 +89,6 @@ struct m25p { | |||
88 | struct spi_device *spi; | 89 | struct spi_device *spi; |
89 | struct mutex lock; | 90 | struct mutex lock; |
90 | struct mtd_info mtd; | 91 | struct mtd_info mtd; |
91 | unsigned partitioned:1; | ||
92 | u16 page_size; | 92 | u16 page_size; |
93 | u16 addr_width; | 93 | u16 addr_width; |
94 | u8 erase_opcode; | 94 | u8 erase_opcode; |
@@ -209,9 +209,8 @@ static int wait_till_ready(struct m25p *flash) | |||
209 | */ | 209 | */ |
210 | static int erase_chip(struct m25p *flash) | 210 | static int erase_chip(struct m25p *flash) |
211 | { | 211 | { |
212 | DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n", | 212 | pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__, |
213 | dev_name(&flash->spi->dev), __func__, | 213 | (long long)(flash->mtd.size >> 10)); |
214 | (long long)(flash->mtd.size >> 10)); | ||
215 | 214 | ||
216 | /* Wait until finished previous write command. */ | 215 | /* Wait until finished previous write command. */ |
217 | if (wait_till_ready(flash)) | 216 | if (wait_till_ready(flash)) |
@@ -250,9 +249,8 @@ static int m25p_cmdsz(struct m25p *flash) | |||
250 | */ | 249 | */ |
251 | static int erase_sector(struct m25p *flash, u32 offset) | 250 | static int erase_sector(struct m25p *flash, u32 offset) |
252 | { | 251 | { |
253 | DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n", | 252 | pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev), |
254 | dev_name(&flash->spi->dev), __func__, | 253 | __func__, flash->mtd.erasesize / 1024, offset); |
255 | flash->mtd.erasesize / 1024, offset); | ||
256 | 254 | ||
257 | /* Wait until finished previous write command. */ | 255 | /* Wait until finished previous write command. */ |
258 | if (wait_till_ready(flash)) | 256 | if (wait_till_ready(flash)) |
@@ -286,9 +284,9 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
286 | u32 addr,len; | 284 | u32 addr,len; |
287 | uint32_t rem; | 285 | uint32_t rem; |
288 | 286 | ||
289 | DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n", | 287 | pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev), |
290 | dev_name(&flash->spi->dev), __func__, "at", | 288 | __func__, (long long)instr->addr, |
291 | (long long)instr->addr, (long long)instr->len); | 289 | (long long)instr->len); |
292 | 290 | ||
293 | /* sanity checks */ | 291 | /* sanity checks */ |
294 | if (instr->addr + instr->len > flash->mtd.size) | 292 | if (instr->addr + instr->len > flash->mtd.size) |
@@ -348,9 +346,8 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
348 | struct spi_transfer t[2]; | 346 | struct spi_transfer t[2]; |
349 | struct spi_message m; | 347 | struct spi_message m; |
350 | 348 | ||
351 | DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", | 349 | pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
352 | dev_name(&flash->spi->dev), __func__, "from", | 350 | __func__, (u32)from, len); |
353 | (u32)from, len); | ||
354 | 351 | ||
355 | /* sanity checks */ | 352 | /* sanity checks */ |
356 | if (!len) | 353 | if (!len) |
@@ -417,9 +414,8 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
417 | struct spi_transfer t[2]; | 414 | struct spi_transfer t[2]; |
418 | struct spi_message m; | 415 | struct spi_message m; |
419 | 416 | ||
420 | DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", | 417 | pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
421 | dev_name(&flash->spi->dev), __func__, "to", | 418 | __func__, (u32)to, len); |
422 | (u32)to, len); | ||
423 | 419 | ||
424 | *retlen = 0; | 420 | *retlen = 0; |
425 | 421 | ||
@@ -510,9 +506,8 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
510 | size_t actual; | 506 | size_t actual; |
511 | int cmd_sz, ret; | 507 | int cmd_sz, ret; |
512 | 508 | ||
513 | DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", | 509 | pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
514 | dev_name(&flash->spi->dev), __func__, "to", | 510 | __func__, (u32)to, len); |
515 | (u32)to, len); | ||
516 | 511 | ||
517 | *retlen = 0; | 512 | *retlen = 0; |
518 | 513 | ||
@@ -661,6 +656,7 @@ static const struct spi_device_id m25p_ids[] = { | |||
661 | { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, | 656 | { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, |
662 | 657 | ||
663 | { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, | 658 | { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, |
659 | { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, | ||
664 | { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, | 660 | { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, |
665 | 661 | ||
666 | { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, | 662 | { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, |
@@ -671,6 +667,7 @@ static const struct spi_device_id m25p_ids[] = { | |||
671 | /* EON -- en25xxx */ | 667 | /* EON -- en25xxx */ |
672 | { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, | 668 | { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, |
673 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, | 669 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, |
670 | { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, | ||
674 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, | 671 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, |
675 | 672 | ||
676 | /* Intel/Numonyx -- xxxs33b */ | 673 | /* Intel/Numonyx -- xxxs33b */ |
@@ -788,8 +785,8 @@ static const struct spi_device_id *__devinit jedec_probe(struct spi_device *spi) | |||
788 | */ | 785 | */ |
789 | tmp = spi_write_then_read(spi, &code, 1, id, 5); | 786 | tmp = spi_write_then_read(spi, &code, 1, id, 5); |
790 | if (tmp < 0) { | 787 | if (tmp < 0) { |
791 | DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", | 788 | pr_debug("%s: error %d reading JEDEC ID\n", |
792 | dev_name(&spi->dev), tmp); | 789 | dev_name(&spi->dev), tmp); |
793 | return ERR_PTR(tmp); | 790 | return ERR_PTR(tmp); |
794 | } | 791 | } |
795 | jedec = id[0]; | 792 | jedec = id[0]; |
@@ -825,8 +822,12 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
825 | struct m25p *flash; | 822 | struct m25p *flash; |
826 | struct flash_info *info; | 823 | struct flash_info *info; |
827 | unsigned i; | 824 | unsigned i; |
828 | struct mtd_partition *parts = NULL; | 825 | struct mtd_part_parser_data ppdata; |
829 | int nr_parts = 0; | 826 | |
827 | #ifdef CONFIG_MTD_OF_PARTS | ||
828 | if (!of_device_is_available(spi->dev.of_node)) | ||
829 | return -ENODEV; | ||
830 | #endif | ||
830 | 831 | ||
831 | /* Platform data helps sort out which chip type we have, as | 832 | /* Platform data helps sort out which chip type we have, as |
832 | * well as how this board partitions it. If we don't have | 833 | * well as how this board partitions it. If we don't have |
@@ -928,6 +929,7 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
928 | if (info->flags & M25P_NO_ERASE) | 929 | if (info->flags & M25P_NO_ERASE) |
929 | flash->mtd.flags |= MTD_NO_ERASE; | 930 | flash->mtd.flags |= MTD_NO_ERASE; |
930 | 931 | ||
932 | ppdata.of_node = spi->dev.of_node; | ||
931 | flash->mtd.dev.parent = &spi->dev; | 933 | flash->mtd.dev.parent = &spi->dev; |
932 | flash->page_size = info->page_size; | 934 | flash->page_size = info->page_size; |
933 | 935 | ||
@@ -945,8 +947,7 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
945 | dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, | 947 | dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name, |
946 | (long long)flash->mtd.size >> 10); | 948 | (long long)flash->mtd.size >> 10); |
947 | 949 | ||
948 | DEBUG(MTD_DEBUG_LEVEL2, | 950 | pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) " |
949 | "mtd .name = %s, .size = 0x%llx (%lldMiB) " | ||
950 | ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", | 951 | ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", |
951 | flash->mtd.name, | 952 | flash->mtd.name, |
952 | (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), | 953 | (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), |
@@ -955,8 +956,7 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
955 | 956 | ||
956 | if (flash->mtd.numeraseregions) | 957 | if (flash->mtd.numeraseregions) |
957 | for (i = 0; i < flash->mtd.numeraseregions; i++) | 958 | for (i = 0; i < flash->mtd.numeraseregions; i++) |
958 | DEBUG(MTD_DEBUG_LEVEL2, | 959 | pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, " |
959 | "mtd.eraseregions[%d] = { .offset = 0x%llx, " | ||
960 | ".erasesize = 0x%.8x (%uKiB), " | 960 | ".erasesize = 0x%.8x (%uKiB), " |
961 | ".numblocks = %d }\n", | 961 | ".numblocks = %d }\n", |
962 | i, (long long)flash->mtd.eraseregions[i].offset, | 962 | i, (long long)flash->mtd.eraseregions[i].offset, |
@@ -968,41 +968,9 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
968 | /* partitions should match sector boundaries; and it may be good to | 968 | /* partitions should match sector boundaries; and it may be good to |
969 | * use readonly partitions for writeprotected sectors (BP2..BP0). | 969 | * use readonly partitions for writeprotected sectors (BP2..BP0). |
970 | */ | 970 | */ |
971 | if (mtd_has_cmdlinepart()) { | 971 | return mtd_device_parse_register(&flash->mtd, NULL, &ppdata, |
972 | static const char *part_probes[] | 972 | data ? data->parts : NULL, |
973 | = { "cmdlinepart", NULL, }; | 973 | data ? data->nr_parts : 0); |
974 | |||
975 | nr_parts = parse_mtd_partitions(&flash->mtd, | ||
976 | part_probes, &parts, 0); | ||
977 | } | ||
978 | |||
979 | if (nr_parts <= 0 && data && data->parts) { | ||
980 | parts = data->parts; | ||
981 | nr_parts = data->nr_parts; | ||
982 | } | ||
983 | |||
984 | #ifdef CONFIG_MTD_OF_PARTS | ||
985 | if (nr_parts <= 0 && spi->dev.of_node) { | ||
986 | nr_parts = of_mtd_parse_partitions(&spi->dev, | ||
987 | spi->dev.of_node, &parts); | ||
988 | } | ||
989 | #endif | ||
990 | |||
991 | if (nr_parts > 0) { | ||
992 | for (i = 0; i < nr_parts; i++) { | ||
993 | DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " | ||
994 | "{.name = %s, .offset = 0x%llx, " | ||
995 | ".size = 0x%llx (%lldKiB) }\n", | ||
996 | i, parts[i].name, | ||
997 | (long long)parts[i].offset, | ||
998 | (long long)parts[i].size, | ||
999 | (long long)(parts[i].size >> 10)); | ||
1000 | } | ||
1001 | flash->partitioned = 1; | ||
1002 | } | ||
1003 | |||
1004 | return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ? | ||
1005 | -ENODEV : 0; | ||
1006 | } | 974 | } |
1007 | 975 | ||
1008 | 976 | ||
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 13749d458a31..d75c7af18a63 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/math64.h> | 19 | #include <linux/math64.h> |
20 | #include <linux/of.h> | ||
21 | #include <linux/of_device.h> | ||
20 | 22 | ||
21 | #include <linux/spi/spi.h> | 23 | #include <linux/spi/spi.h> |
22 | #include <linux/spi/flash.h> | 24 | #include <linux/spi/flash.h> |
@@ -24,7 +26,6 @@ | |||
24 | #include <linux/mtd/mtd.h> | 26 | #include <linux/mtd/mtd.h> |
25 | #include <linux/mtd/partitions.h> | 27 | #include <linux/mtd/partitions.h> |
26 | 28 | ||
27 | |||
28 | /* | 29 | /* |
29 | * DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in | 30 | * DataFlash is a kind of SPI flash. Most AT45 chips have two buffers in |
30 | * each chip, which may be used for double buffered I/O; but this driver | 31 | * each chip, which may be used for double buffered I/O; but this driver |
@@ -98,6 +99,16 @@ struct dataflash { | |||
98 | struct mtd_info mtd; | 99 | struct mtd_info mtd; |
99 | }; | 100 | }; |
100 | 101 | ||
102 | #ifdef CONFIG_OF | ||
103 | static const struct of_device_id dataflash_dt_ids[] = { | ||
104 | { .compatible = "atmel,at45", }, | ||
105 | { .compatible = "atmel,dataflash", }, | ||
106 | { /* sentinel */ } | ||
107 | }; | ||
108 | #else | ||
109 | #define dataflash_dt_ids NULL | ||
110 | #endif | ||
111 | |||
101 | /* ......................................................................... */ | 112 | /* ......................................................................... */ |
102 | 113 | ||
103 | /* | 114 | /* |
@@ -122,7 +133,7 @@ static int dataflash_waitready(struct spi_device *spi) | |||
122 | for (;;) { | 133 | for (;;) { |
123 | status = dataflash_status(spi); | 134 | status = dataflash_status(spi); |
124 | if (status < 0) { | 135 | if (status < 0) { |
125 | DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n", | 136 | pr_debug("%s: status %d?\n", |
126 | dev_name(&spi->dev), status); | 137 | dev_name(&spi->dev), status); |
127 | status = 0; | 138 | status = 0; |
128 | } | 139 | } |
@@ -149,7 +160,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
149 | uint8_t *command; | 160 | uint8_t *command; |
150 | uint32_t rem; | 161 | uint32_t rem; |
151 | 162 | ||
152 | DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n", | 163 | pr_debug("%s: erase addr=0x%llx len 0x%llx\n", |
153 | dev_name(&spi->dev), (long long)instr->addr, | 164 | dev_name(&spi->dev), (long long)instr->addr, |
154 | (long long)instr->len); | 165 | (long long)instr->len); |
155 | 166 | ||
@@ -187,7 +198,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
187 | command[2] = (uint8_t)(pageaddr >> 8); | 198 | command[2] = (uint8_t)(pageaddr >> 8); |
188 | command[3] = 0; | 199 | command[3] = 0; |
189 | 200 | ||
190 | DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n", | 201 | pr_debug("ERASE %s: (%x) %x %x %x [%i]\n", |
191 | do_block ? "block" : "page", | 202 | do_block ? "block" : "page", |
192 | command[0], command[1], command[2], command[3], | 203 | command[0], command[1], command[2], command[3], |
193 | pageaddr); | 204 | pageaddr); |
@@ -238,8 +249,8 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
238 | uint8_t *command; | 249 | uint8_t *command; |
239 | int status; | 250 | int status; |
240 | 251 | ||
241 | DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n", | 252 | pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev), |
242 | dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len)); | 253 | (unsigned)from, (unsigned)(from + len)); |
243 | 254 | ||
244 | *retlen = 0; | 255 | *retlen = 0; |
245 | 256 | ||
@@ -255,7 +266,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
255 | 266 | ||
256 | command = priv->command; | 267 | command = priv->command; |
257 | 268 | ||
258 | DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n", | 269 | pr_debug("READ: (%x) %x %x %x\n", |
259 | command[0], command[1], command[2], command[3]); | 270 | command[0], command[1], command[2], command[3]); |
260 | 271 | ||
261 | spi_message_init(&msg); | 272 | spi_message_init(&msg); |
@@ -287,7 +298,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
287 | *retlen = msg.actual_length - 8; | 298 | *retlen = msg.actual_length - 8; |
288 | status = 0; | 299 | status = 0; |
289 | } else | 300 | } else |
290 | DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n", | 301 | pr_debug("%s: read %x..%x --> %d\n", |
291 | dev_name(&priv->spi->dev), | 302 | dev_name(&priv->spi->dev), |
292 | (unsigned)from, (unsigned)(from + len), | 303 | (unsigned)from, (unsigned)(from + len), |
293 | status); | 304 | status); |
@@ -314,7 +325,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
314 | int status = -EINVAL; | 325 | int status = -EINVAL; |
315 | uint8_t *command; | 326 | uint8_t *command; |
316 | 327 | ||
317 | DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n", | 328 | pr_debug("%s: write 0x%x..0x%x\n", |
318 | dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); | 329 | dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); |
319 | 330 | ||
320 | *retlen = 0; | 331 | *retlen = 0; |
@@ -340,7 +351,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
340 | 351 | ||
341 | mutex_lock(&priv->lock); | 352 | mutex_lock(&priv->lock); |
342 | while (remaining > 0) { | 353 | while (remaining > 0) { |
343 | DEBUG(MTD_DEBUG_LEVEL3, "write @ %i:%i len=%i\n", | 354 | pr_debug("write @ %i:%i len=%i\n", |
344 | pageaddr, offset, writelen); | 355 | pageaddr, offset, writelen); |
345 | 356 | ||
346 | /* REVISIT: | 357 | /* REVISIT: |
@@ -368,12 +379,12 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
368 | command[2] = (addr & 0x0000FF00) >> 8; | 379 | command[2] = (addr & 0x0000FF00) >> 8; |
369 | command[3] = 0; | 380 | command[3] = 0; |
370 | 381 | ||
371 | DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n", | 382 | pr_debug("TRANSFER: (%x) %x %x %x\n", |
372 | command[0], command[1], command[2], command[3]); | 383 | command[0], command[1], command[2], command[3]); |
373 | 384 | ||
374 | status = spi_sync(spi, &msg); | 385 | status = spi_sync(spi, &msg); |
375 | if (status < 0) | 386 | if (status < 0) |
376 | DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n", | 387 | pr_debug("%s: xfer %u -> %d\n", |
377 | dev_name(&spi->dev), addr, status); | 388 | dev_name(&spi->dev), addr, status); |
378 | 389 | ||
379 | (void) dataflash_waitready(priv->spi); | 390 | (void) dataflash_waitready(priv->spi); |
@@ -386,7 +397,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
386 | command[2] = (addr & 0x0000FF00) >> 8; | 397 | command[2] = (addr & 0x0000FF00) >> 8; |
387 | command[3] = (addr & 0x000000FF); | 398 | command[3] = (addr & 0x000000FF); |
388 | 399 | ||
389 | DEBUG(MTD_DEBUG_LEVEL3, "PROGRAM: (%x) %x %x %x\n", | 400 | pr_debug("PROGRAM: (%x) %x %x %x\n", |
390 | command[0], command[1], command[2], command[3]); | 401 | command[0], command[1], command[2], command[3]); |
391 | 402 | ||
392 | x[1].tx_buf = writebuf; | 403 | x[1].tx_buf = writebuf; |
@@ -395,7 +406,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
395 | status = spi_sync(spi, &msg); | 406 | status = spi_sync(spi, &msg); |
396 | spi_transfer_del(x + 1); | 407 | spi_transfer_del(x + 1); |
397 | if (status < 0) | 408 | if (status < 0) |
398 | DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n", | 409 | pr_debug("%s: pgm %u/%u -> %d\n", |
399 | dev_name(&spi->dev), addr, writelen, status); | 410 | dev_name(&spi->dev), addr, writelen, status); |
400 | 411 | ||
401 | (void) dataflash_waitready(priv->spi); | 412 | (void) dataflash_waitready(priv->spi); |
@@ -410,12 +421,12 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
410 | command[2] = (addr & 0x0000FF00) >> 8; | 421 | command[2] = (addr & 0x0000FF00) >> 8; |
411 | command[3] = 0; | 422 | command[3] = 0; |
412 | 423 | ||
413 | DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n", | 424 | pr_debug("COMPARE: (%x) %x %x %x\n", |
414 | command[0], command[1], command[2], command[3]); | 425 | command[0], command[1], command[2], command[3]); |
415 | 426 | ||
416 | status = spi_sync(spi, &msg); | 427 | status = spi_sync(spi, &msg); |
417 | if (status < 0) | 428 | if (status < 0) |
418 | DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n", | 429 | pr_debug("%s: compare %u -> %d\n", |
419 | dev_name(&spi->dev), addr, status); | 430 | dev_name(&spi->dev), addr, status); |
420 | 431 | ||
421 | status = dataflash_waitready(priv->spi); | 432 | status = dataflash_waitready(priv->spi); |
@@ -634,11 +645,10 @@ add_dataflash_otp(struct spi_device *spi, char *name, | |||
634 | { | 645 | { |
635 | struct dataflash *priv; | 646 | struct dataflash *priv; |
636 | struct mtd_info *device; | 647 | struct mtd_info *device; |
648 | struct mtd_part_parser_data ppdata; | ||
637 | struct flash_platform_data *pdata = spi->dev.platform_data; | 649 | struct flash_platform_data *pdata = spi->dev.platform_data; |
638 | char *otp_tag = ""; | 650 | char *otp_tag = ""; |
639 | int err = 0; | 651 | int err = 0; |
640 | struct mtd_partition *parts; | ||
641 | int nr_parts = 0; | ||
642 | 652 | ||
643 | priv = kzalloc(sizeof *priv, GFP_KERNEL); | 653 | priv = kzalloc(sizeof *priv, GFP_KERNEL); |
644 | if (!priv) | 654 | if (!priv) |
@@ -677,28 +687,11 @@ add_dataflash_otp(struct spi_device *spi, char *name, | |||
677 | pagesize, otp_tag); | 687 | pagesize, otp_tag); |
678 | dev_set_drvdata(&spi->dev, priv); | 688 | dev_set_drvdata(&spi->dev, priv); |
679 | 689 | ||
680 | if (mtd_has_cmdlinepart()) { | 690 | ppdata.of_node = spi->dev.of_node; |
681 | static const char *part_probes[] = { "cmdlinepart", NULL, }; | 691 | err = mtd_device_parse_register(device, NULL, &ppdata, |
682 | 692 | pdata ? pdata->parts : NULL, | |
683 | nr_parts = parse_mtd_partitions(device, part_probes, &parts, | 693 | pdata ? pdata->nr_parts : 0); |
684 | 0); | ||
685 | } | ||
686 | 694 | ||
687 | if (nr_parts <= 0 && pdata && pdata->parts) { | ||
688 | parts = pdata->parts; | ||
689 | nr_parts = pdata->nr_parts; | ||
690 | } | ||
691 | |||
692 | if (nr_parts > 0) { | ||
693 | priv->partitioned = 1; | ||
694 | err = mtd_device_register(device, parts, nr_parts); | ||
695 | goto out; | ||
696 | } | ||
697 | |||
698 | if (mtd_device_register(device, NULL, 0) == 1) | ||
699 | err = -ENODEV; | ||
700 | |||
701 | out: | ||
702 | if (!err) | 695 | if (!err) |
703 | return 0; | 696 | return 0; |
704 | 697 | ||
@@ -787,7 +780,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) | |||
787 | */ | 780 | */ |
788 | tmp = spi_write_then_read(spi, &code, 1, id, 3); | 781 | tmp = spi_write_then_read(spi, &code, 1, id, 3); |
789 | if (tmp < 0) { | 782 | if (tmp < 0) { |
790 | DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", | 783 | pr_debug("%s: error %d reading JEDEC ID\n", |
791 | dev_name(&spi->dev), tmp); | 784 | dev_name(&spi->dev), tmp); |
792 | return ERR_PTR(tmp); | 785 | return ERR_PTR(tmp); |
793 | } | 786 | } |
@@ -804,7 +797,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) | |||
804 | tmp < ARRAY_SIZE(dataflash_data); | 797 | tmp < ARRAY_SIZE(dataflash_data); |
805 | tmp++, info++) { | 798 | tmp++, info++) { |
806 | if (info->jedec_id == jedec) { | 799 | if (info->jedec_id == jedec) { |
807 | DEBUG(MTD_DEBUG_LEVEL1, "%s: OTP, sector protect%s\n", | 800 | pr_debug("%s: OTP, sector protect%s\n", |
808 | dev_name(&spi->dev), | 801 | dev_name(&spi->dev), |
809 | (info->flags & SUP_POW2PS) | 802 | (info->flags & SUP_POW2PS) |
810 | ? ", binary pagesize" : "" | 803 | ? ", binary pagesize" : "" |
@@ -812,8 +805,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi) | |||
812 | if (info->flags & SUP_POW2PS) { | 805 | if (info->flags & SUP_POW2PS) { |
813 | status = dataflash_status(spi); | 806 | status = dataflash_status(spi); |
814 | if (status < 0) { | 807 | if (status < 0) { |
815 | DEBUG(MTD_DEBUG_LEVEL1, | 808 | pr_debug("%s: status error %d\n", |
816 | "%s: status error %d\n", | ||
817 | dev_name(&spi->dev), status); | 809 | dev_name(&spi->dev), status); |
818 | return ERR_PTR(status); | 810 | return ERR_PTR(status); |
819 | } | 811 | } |
@@ -878,7 +870,7 @@ static int __devinit dataflash_probe(struct spi_device *spi) | |||
878 | */ | 870 | */ |
879 | status = dataflash_status(spi); | 871 | status = dataflash_status(spi); |
880 | if (status <= 0 || status == 0xff) { | 872 | if (status <= 0 || status == 0xff) { |
881 | DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n", | 873 | pr_debug("%s: status error %d\n", |
882 | dev_name(&spi->dev), status); | 874 | dev_name(&spi->dev), status); |
883 | if (status == 0 || status == 0xff) | 875 | if (status == 0 || status == 0xff) |
884 | status = -ENODEV; | 876 | status = -ENODEV; |
@@ -914,14 +906,14 @@ static int __devinit dataflash_probe(struct spi_device *spi) | |||
914 | break; | 906 | break; |
915 | /* obsolete AT45DB1282 not (yet?) supported */ | 907 | /* obsolete AT45DB1282 not (yet?) supported */ |
916 | default: | 908 | default: |
917 | DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n", | 909 | pr_debug("%s: unsupported device (%x)\n", dev_name(&spi->dev), |
918 | dev_name(&spi->dev), status & 0x3c); | 910 | status & 0x3c); |
919 | status = -ENODEV; | 911 | status = -ENODEV; |
920 | } | 912 | } |
921 | 913 | ||
922 | if (status < 0) | 914 | if (status < 0) |
923 | DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n", | 915 | pr_debug("%s: add_dataflash --> %d\n", dev_name(&spi->dev), |
924 | dev_name(&spi->dev), status); | 916 | status); |
925 | 917 | ||
926 | return status; | 918 | return status; |
927 | } | 919 | } |
@@ -931,7 +923,7 @@ static int __devexit dataflash_remove(struct spi_device *spi) | |||
931 | struct dataflash *flash = dev_get_drvdata(&spi->dev); | 923 | struct dataflash *flash = dev_get_drvdata(&spi->dev); |
932 | int status; | 924 | int status; |
933 | 925 | ||
934 | DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev)); | 926 | pr_debug("%s: remove\n", dev_name(&spi->dev)); |
935 | 927 | ||
936 | status = mtd_device_unregister(&flash->mtd); | 928 | status = mtd_device_unregister(&flash->mtd); |
937 | if (status == 0) { | 929 | if (status == 0) { |
@@ -946,6 +938,7 @@ static struct spi_driver dataflash_driver = { | |||
946 | .name = "mtd_dataflash", | 938 | .name = "mtd_dataflash", |
947 | .bus = &spi_bus_type, | 939 | .bus = &spi_bus_type, |
948 | .owner = THIS_MODULE, | 940 | .owner = THIS_MODULE, |
941 | .of_match_table = dataflash_dt_ids, | ||
949 | }, | 942 | }, |
950 | 943 | ||
951 | .probe = dataflash_probe, | 944 | .probe = dataflash_probe, |
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c index 83e80c65d6e7..d38ef3bffe8d 100644 --- a/drivers/mtd/devices/sst25l.c +++ b/drivers/mtd/devices/sst25l.c | |||
@@ -52,8 +52,6 @@ struct sst25l_flash { | |||
52 | struct spi_device *spi; | 52 | struct spi_device *spi; |
53 | struct mutex lock; | 53 | struct mutex lock; |
54 | struct mtd_info mtd; | 54 | struct mtd_info mtd; |
55 | |||
56 | int partitioned; | ||
57 | }; | 55 | }; |
58 | 56 | ||
59 | struct flash_info { | 57 | struct flash_info { |
@@ -381,8 +379,6 @@ static int __devinit sst25l_probe(struct spi_device *spi) | |||
381 | struct sst25l_flash *flash; | 379 | struct sst25l_flash *flash; |
382 | struct flash_platform_data *data; | 380 | struct flash_platform_data *data; |
383 | int ret, i; | 381 | int ret, i; |
384 | struct mtd_partition *parts = NULL; | ||
385 | int nr_parts = 0; | ||
386 | 382 | ||
387 | flash_info = sst25l_match_device(spi); | 383 | flash_info = sst25l_match_device(spi); |
388 | if (!flash_info) | 384 | if (!flash_info) |
@@ -414,8 +410,7 @@ static int __devinit sst25l_probe(struct spi_device *spi) | |||
414 | dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, | 410 | dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, |
415 | (long long)flash->mtd.size >> 10); | 411 | (long long)flash->mtd.size >> 10); |
416 | 412 | ||
417 | DEBUG(MTD_DEBUG_LEVEL2, | 413 | pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) " |
418 | "mtd .name = %s, .size = 0x%llx (%lldMiB) " | ||
419 | ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", | 414 | ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", |
420 | flash->mtd.name, | 415 | flash->mtd.name, |
421 | (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), | 416 | (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20), |
@@ -423,37 +418,10 @@ static int __devinit sst25l_probe(struct spi_device *spi) | |||
423 | flash->mtd.numeraseregions); | 418 | flash->mtd.numeraseregions); |
424 | 419 | ||
425 | 420 | ||
426 | if (mtd_has_cmdlinepart()) { | 421 | ret = mtd_device_parse_register(&flash->mtd, NULL, 0, |
427 | static const char *part_probes[] = {"cmdlinepart", NULL}; | 422 | data ? data->parts : NULL, |
428 | 423 | data ? data->nr_parts : 0); | |
429 | nr_parts = parse_mtd_partitions(&flash->mtd, | 424 | if (ret) { |
430 | part_probes, | ||
431 | &parts, 0); | ||
432 | } | ||
433 | |||
434 | if (nr_parts <= 0 && data && data->parts) { | ||
435 | parts = data->parts; | ||
436 | nr_parts = data->nr_parts; | ||
437 | } | ||
438 | |||
439 | if (nr_parts > 0) { | ||
440 | for (i = 0; i < nr_parts; i++) { | ||
441 | DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = " | ||
442 | "{.name = %s, .offset = 0x%llx, " | ||
443 | ".size = 0x%llx (%lldKiB) }\n", | ||
444 | i, parts[i].name, | ||
445 | (long long)parts[i].offset, | ||
446 | (long long)parts[i].size, | ||
447 | (long long)(parts[i].size >> 10)); | ||
448 | } | ||
449 | |||
450 | flash->partitioned = 1; | ||
451 | return mtd_device_register(&flash->mtd, parts, | ||
452 | nr_parts); | ||
453 | } | ||
454 | |||
455 | ret = mtd_device_register(&flash->mtd, NULL, 0); | ||
456 | if (ret == 1) { | ||
457 | kfree(flash); | 425 | kfree(flash); |
458 | dev_set_drvdata(&spi->dev, NULL); | 426 | dev_set_drvdata(&spi->dev, NULL); |
459 | return -ENODEV; | 427 | return -ENODEV; |
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index 037b399df3f1..c7382bb686c6 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c | |||
@@ -339,7 +339,7 @@ static int erase_xfer(partition_t *part, | |||
339 | struct erase_info *erase; | 339 | struct erase_info *erase; |
340 | 340 | ||
341 | xfer = &part->XferInfo[xfernum]; | 341 | xfer = &part->XferInfo[xfernum]; |
342 | DEBUG(1, "ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset); | 342 | pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset); |
343 | xfer->state = XFER_ERASING; | 343 | xfer->state = XFER_ERASING; |
344 | 344 | ||
345 | /* Is there a free erase slot? Always in MTD. */ | 345 | /* Is there a free erase slot? Always in MTD. */ |
@@ -415,7 +415,7 @@ static int prepare_xfer(partition_t *part, int i) | |||
415 | xfer = &part->XferInfo[i]; | 415 | xfer = &part->XferInfo[i]; |
416 | xfer->state = XFER_FAILED; | 416 | xfer->state = XFER_FAILED; |
417 | 417 | ||
418 | DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset); | 418 | pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset); |
419 | 419 | ||
420 | /* Write the transfer unit header */ | 420 | /* Write the transfer unit header */ |
421 | header = part->header; | 421 | header = part->header; |
@@ -476,7 +476,7 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit, | |||
476 | 476 | ||
477 | eun = &part->EUNInfo[srcunit]; | 477 | eun = &part->EUNInfo[srcunit]; |
478 | xfer = &part->XferInfo[xferunit]; | 478 | xfer = &part->XferInfo[xferunit]; |
479 | DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n", | 479 | pr_debug("ftl_cs: copying block 0x%x to 0x%x\n", |
480 | eun->Offset, xfer->Offset); | 480 | eun->Offset, xfer->Offset); |
481 | 481 | ||
482 | 482 | ||
@@ -598,7 +598,7 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit, | |||
598 | unit with the fewest erases, and usually pick the data unit with | 598 | unit with the fewest erases, and usually pick the data unit with |
599 | the most deleted blocks. But with a small probability, pick the | 599 | the most deleted blocks. But with a small probability, pick the |
600 | oldest data unit instead. This means that we generally postpone | 600 | oldest data unit instead. This means that we generally postpone |
601 | the next reclaimation as long as possible, but shuffle static | 601 | the next reclamation as long as possible, but shuffle static |
602 | stuff around a bit for wear leveling. | 602 | stuff around a bit for wear leveling. |
603 | 603 | ||
604 | ======================================================================*/ | 604 | ======================================================================*/ |
@@ -609,8 +609,8 @@ static int reclaim_block(partition_t *part) | |||
609 | uint32_t best; | 609 | uint32_t best; |
610 | int queued, ret; | 610 | int queued, ret; |
611 | 611 | ||
612 | DEBUG(0, "ftl_cs: reclaiming space...\n"); | 612 | pr_debug("ftl_cs: reclaiming space...\n"); |
613 | DEBUG(3, "NumTransferUnits == %x\n", part->header.NumTransferUnits); | 613 | pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits); |
614 | /* Pick the least erased transfer unit */ | 614 | /* Pick the least erased transfer unit */ |
615 | best = 0xffffffff; xfer = 0xffff; | 615 | best = 0xffffffff; xfer = 0xffff; |
616 | do { | 616 | do { |
@@ -618,22 +618,22 @@ static int reclaim_block(partition_t *part) | |||
618 | for (i = 0; i < part->header.NumTransferUnits; i++) { | 618 | for (i = 0; i < part->header.NumTransferUnits; i++) { |
619 | int n=0; | 619 | int n=0; |
620 | if (part->XferInfo[i].state == XFER_UNKNOWN) { | 620 | if (part->XferInfo[i].state == XFER_UNKNOWN) { |
621 | DEBUG(3,"XferInfo[%d].state == XFER_UNKNOWN\n",i); | 621 | pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i); |
622 | n=1; | 622 | n=1; |
623 | erase_xfer(part, i); | 623 | erase_xfer(part, i); |
624 | } | 624 | } |
625 | if (part->XferInfo[i].state == XFER_ERASING) { | 625 | if (part->XferInfo[i].state == XFER_ERASING) { |
626 | DEBUG(3,"XferInfo[%d].state == XFER_ERASING\n",i); | 626 | pr_debug("XferInfo[%d].state == XFER_ERASING\n",i); |
627 | n=1; | 627 | n=1; |
628 | queued = 1; | 628 | queued = 1; |
629 | } | 629 | } |
630 | else if (part->XferInfo[i].state == XFER_ERASED) { | 630 | else if (part->XferInfo[i].state == XFER_ERASED) { |
631 | DEBUG(3,"XferInfo[%d].state == XFER_ERASED\n",i); | 631 | pr_debug("XferInfo[%d].state == XFER_ERASED\n",i); |
632 | n=1; | 632 | n=1; |
633 | prepare_xfer(part, i); | 633 | prepare_xfer(part, i); |
634 | } | 634 | } |
635 | if (part->XferInfo[i].state == XFER_PREPARED) { | 635 | if (part->XferInfo[i].state == XFER_PREPARED) { |
636 | DEBUG(3,"XferInfo[%d].state == XFER_PREPARED\n",i); | 636 | pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i); |
637 | n=1; | 637 | n=1; |
638 | if (part->XferInfo[i].EraseCount <= best) { | 638 | if (part->XferInfo[i].EraseCount <= best) { |
639 | best = part->XferInfo[i].EraseCount; | 639 | best = part->XferInfo[i].EraseCount; |
@@ -641,12 +641,12 @@ static int reclaim_block(partition_t *part) | |||
641 | } | 641 | } |
642 | } | 642 | } |
643 | if (!n) | 643 | if (!n) |
644 | DEBUG(3,"XferInfo[%d].state == %x\n",i, part->XferInfo[i].state); | 644 | pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state); |
645 | 645 | ||
646 | } | 646 | } |
647 | if (xfer == 0xffff) { | 647 | if (xfer == 0xffff) { |
648 | if (queued) { | 648 | if (queued) { |
649 | DEBUG(1, "ftl_cs: waiting for transfer " | 649 | pr_debug("ftl_cs: waiting for transfer " |
650 | "unit to be prepared...\n"); | 650 | "unit to be prepared...\n"); |
651 | if (part->mbd.mtd->sync) | 651 | if (part->mbd.mtd->sync) |
652 | part->mbd.mtd->sync(part->mbd.mtd); | 652 | part->mbd.mtd->sync(part->mbd.mtd); |
@@ -656,7 +656,7 @@ static int reclaim_block(partition_t *part) | |||
656 | printk(KERN_NOTICE "ftl_cs: reclaim failed: no " | 656 | printk(KERN_NOTICE "ftl_cs: reclaim failed: no " |
657 | "suitable transfer units!\n"); | 657 | "suitable transfer units!\n"); |
658 | else | 658 | else |
659 | DEBUG(1, "ftl_cs: reclaim failed: no " | 659 | pr_debug("ftl_cs: reclaim failed: no " |
660 | "suitable transfer units!\n"); | 660 | "suitable transfer units!\n"); |
661 | 661 | ||
662 | return -EIO; | 662 | return -EIO; |
@@ -666,7 +666,7 @@ static int reclaim_block(partition_t *part) | |||
666 | 666 | ||
667 | eun = 0; | 667 | eun = 0; |
668 | if ((jiffies % shuffle_freq) == 0) { | 668 | if ((jiffies % shuffle_freq) == 0) { |
669 | DEBUG(1, "ftl_cs: recycling freshest block...\n"); | 669 | pr_debug("ftl_cs: recycling freshest block...\n"); |
670 | best = 0xffffffff; | 670 | best = 0xffffffff; |
671 | for (i = 0; i < part->DataUnits; i++) | 671 | for (i = 0; i < part->DataUnits; i++) |
672 | if (part->EUNInfo[i].EraseCount <= best) { | 672 | if (part->EUNInfo[i].EraseCount <= best) { |
@@ -686,7 +686,7 @@ static int reclaim_block(partition_t *part) | |||
686 | printk(KERN_NOTICE "ftl_cs: reclaim failed: " | 686 | printk(KERN_NOTICE "ftl_cs: reclaim failed: " |
687 | "no free blocks!\n"); | 687 | "no free blocks!\n"); |
688 | else | 688 | else |
689 | DEBUG(1,"ftl_cs: reclaim failed: " | 689 | pr_debug("ftl_cs: reclaim failed: " |
690 | "no free blocks!\n"); | 690 | "no free blocks!\n"); |
691 | 691 | ||
692 | return -EIO; | 692 | return -EIO; |
@@ -771,7 +771,7 @@ static uint32_t find_free(partition_t *part) | |||
771 | printk(KERN_NOTICE "ftl_cs: bad free list!\n"); | 771 | printk(KERN_NOTICE "ftl_cs: bad free list!\n"); |
772 | return 0; | 772 | return 0; |
773 | } | 773 | } |
774 | DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun); | 774 | pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun); |
775 | return blk; | 775 | return blk; |
776 | 776 | ||
777 | } /* find_free */ | 777 | } /* find_free */ |
@@ -791,7 +791,7 @@ static int ftl_read(partition_t *part, caddr_t buffer, | |||
791 | int ret; | 791 | int ret; |
792 | size_t offset, retlen; | 792 | size_t offset, retlen; |
793 | 793 | ||
794 | DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n", | 794 | pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n", |
795 | part, sector, nblocks); | 795 | part, sector, nblocks); |
796 | if (!(part->state & FTL_FORMATTED)) { | 796 | if (!(part->state & FTL_FORMATTED)) { |
797 | printk(KERN_NOTICE "ftl_cs: bad partition\n"); | 797 | printk(KERN_NOTICE "ftl_cs: bad partition\n"); |
@@ -840,7 +840,7 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr, | |||
840 | int ret; | 840 | int ret; |
841 | size_t retlen, offset; | 841 | size_t retlen, offset; |
842 | 842 | ||
843 | DEBUG(2, "ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n", | 843 | pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n", |
844 | part, log_addr, virt_addr); | 844 | part, log_addr, virt_addr); |
845 | bsize = 1 << part->header.EraseUnitSize; | 845 | bsize = 1 << part->header.EraseUnitSize; |
846 | eun = log_addr / bsize; | 846 | eun = log_addr / bsize; |
@@ -905,7 +905,7 @@ static int ftl_write(partition_t *part, caddr_t buffer, | |||
905 | int ret; | 905 | int ret; |
906 | size_t retlen, offset; | 906 | size_t retlen, offset; |
907 | 907 | ||
908 | DEBUG(2, "ftl_cs: ftl_write(0x%p, %ld, %ld)\n", | 908 | pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n", |
909 | part, sector, nblocks); | 909 | part, sector, nblocks); |
910 | if (!(part->state & FTL_FORMATTED)) { | 910 | if (!(part->state & FTL_FORMATTED)) { |
911 | printk(KERN_NOTICE "ftl_cs: bad partition\n"); | 911 | printk(KERN_NOTICE "ftl_cs: bad partition\n"); |
@@ -1011,7 +1011,7 @@ static int ftl_discardsect(struct mtd_blktrans_dev *dev, | |||
1011 | partition_t *part = (void *)dev; | 1011 | partition_t *part = (void *)dev; |
1012 | uint32_t bsize = 1 << part->header.EraseUnitSize; | 1012 | uint32_t bsize = 1 << part->header.EraseUnitSize; |
1013 | 1013 | ||
1014 | DEBUG(1, "FTL erase sector %ld for %d sectors\n", | 1014 | pr_debug("FTL erase sector %ld for %d sectors\n", |
1015 | sector, nr_sects); | 1015 | sector, nr_sects); |
1016 | 1016 | ||
1017 | while (nr_sects) { | 1017 | while (nr_sects) { |
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index d7592e67d048..dd034efd1875 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c | |||
@@ -63,14 +63,12 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
63 | return; | 63 | return; |
64 | } | 64 | } |
65 | 65 | ||
66 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name); | 66 | pr_debug("INFTL: add_mtd for %s\n", mtd->name); |
67 | 67 | ||
68 | inftl = kzalloc(sizeof(*inftl), GFP_KERNEL); | 68 | inftl = kzalloc(sizeof(*inftl), GFP_KERNEL); |
69 | 69 | ||
70 | if (!inftl) { | 70 | if (!inftl) |
71 | printk(KERN_WARNING "INFTL: Out of memory for data structures\n"); | ||
72 | return; | 71 | return; |
73 | } | ||
74 | 72 | ||
75 | inftl->mbd.mtd = mtd; | 73 | inftl->mbd.mtd = mtd; |
76 | inftl->mbd.devnum = -1; | 74 | inftl->mbd.devnum = -1; |
@@ -133,7 +131,7 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev) | |||
133 | { | 131 | { |
134 | struct INFTLrecord *inftl = (void *)dev; | 132 | struct INFTLrecord *inftl = (void *)dev; |
135 | 133 | ||
136 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: remove_dev (i=%d)\n", dev->devnum); | 134 | pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum); |
137 | 135 | ||
138 | del_mtd_blktrans_dev(dev); | 136 | del_mtd_blktrans_dev(dev); |
139 | 137 | ||
@@ -154,7 +152,7 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, | |||
154 | struct mtd_oob_ops ops; | 152 | struct mtd_oob_ops ops; |
155 | int res; | 153 | int res; |
156 | 154 | ||
157 | ops.mode = MTD_OOB_PLACE; | 155 | ops.mode = MTD_OPS_PLACE_OOB; |
158 | ops.ooboffs = offs & (mtd->writesize - 1); | 156 | ops.ooboffs = offs & (mtd->writesize - 1); |
159 | ops.ooblen = len; | 157 | ops.ooblen = len; |
160 | ops.oobbuf = buf; | 158 | ops.oobbuf = buf; |
@@ -174,7 +172,7 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, | |||
174 | struct mtd_oob_ops ops; | 172 | struct mtd_oob_ops ops; |
175 | int res; | 173 | int res; |
176 | 174 | ||
177 | ops.mode = MTD_OOB_PLACE; | 175 | ops.mode = MTD_OPS_PLACE_OOB; |
178 | ops.ooboffs = offs & (mtd->writesize - 1); | 176 | ops.ooboffs = offs & (mtd->writesize - 1); |
179 | ops.ooblen = len; | 177 | ops.ooblen = len; |
180 | ops.oobbuf = buf; | 178 | ops.oobbuf = buf; |
@@ -194,7 +192,7 @@ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len, | |||
194 | struct mtd_oob_ops ops; | 192 | struct mtd_oob_ops ops; |
195 | int res; | 193 | int res; |
196 | 194 | ||
197 | ops.mode = MTD_OOB_PLACE; | 195 | ops.mode = MTD_OPS_PLACE_OOB; |
198 | ops.ooboffs = offs; | 196 | ops.ooboffs = offs; |
199 | ops.ooblen = mtd->oobsize; | 197 | ops.ooblen = mtd->oobsize; |
200 | ops.oobbuf = oob; | 198 | ops.oobbuf = oob; |
@@ -215,16 +213,16 @@ static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate) | |||
215 | u16 pot = inftl->LastFreeEUN; | 213 | u16 pot = inftl->LastFreeEUN; |
216 | int silly = inftl->nb_blocks; | 214 | int silly = inftl->nb_blocks; |
217 | 215 | ||
218 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p," | 216 | pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n", |
219 | "desperate=%d)\n", inftl, desperate); | 217 | inftl, desperate); |
220 | 218 | ||
221 | /* | 219 | /* |
222 | * Normally, we force a fold to happen before we run out of free | 220 | * Normally, we force a fold to happen before we run out of free |
223 | * blocks completely. | 221 | * blocks completely. |
224 | */ | 222 | */ |
225 | if (!desperate && inftl->numfreeEUNs < 2) { | 223 | if (!desperate && inftl->numfreeEUNs < 2) { |
226 | DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free " | 224 | pr_debug("INFTL: there are too few free EUNs (%d)\n", |
227 | "EUNs (%d)\n", inftl->numfreeEUNs); | 225 | inftl->numfreeEUNs); |
228 | return BLOCK_NIL; | 226 | return BLOCK_NIL; |
229 | } | 227 | } |
230 | 228 | ||
@@ -259,8 +257,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned | |||
259 | struct inftl_oob oob; | 257 | struct inftl_oob oob; |
260 | size_t retlen; | 258 | size_t retlen; |
261 | 259 | ||
262 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d," | 260 | pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n", |
263 | "pending=%d)\n", inftl, thisVUC, pendingblock); | 261 | inftl, thisVUC, pendingblock); |
264 | 262 | ||
265 | memset(BlockMap, 0xff, sizeof(BlockMap)); | 263 | memset(BlockMap, 0xff, sizeof(BlockMap)); |
266 | memset(BlockDeleted, 0, sizeof(BlockDeleted)); | 264 | memset(BlockDeleted, 0, sizeof(BlockDeleted)); |
@@ -323,8 +321,7 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned | |||
323 | * Chain, and the Erase Unit into which we are supposed to be copying. | 321 | * Chain, and the Erase Unit into which we are supposed to be copying. |
324 | * Go for it. | 322 | * Go for it. |
325 | */ | 323 | */ |
326 | DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n", | 324 | pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN); |
327 | thisVUC, targetEUN); | ||
328 | 325 | ||
329 | for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) { | 326 | for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) { |
330 | unsigned char movebuf[SECTORSIZE]; | 327 | unsigned char movebuf[SECTORSIZE]; |
@@ -349,14 +346,13 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned | |||
349 | ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) + | 346 | ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) + |
350 | (block * SECTORSIZE), SECTORSIZE, &retlen, | 347 | (block * SECTORSIZE), SECTORSIZE, &retlen, |
351 | movebuf); | 348 | movebuf); |
352 | if (ret < 0 && ret != -EUCLEAN) { | 349 | if (ret < 0 && !mtd_is_bitflip(ret)) { |
353 | ret = mtd->read(mtd, | 350 | ret = mtd->read(mtd, |
354 | (inftl->EraseSize * BlockMap[block]) + | 351 | (inftl->EraseSize * BlockMap[block]) + |
355 | (block * SECTORSIZE), SECTORSIZE, | 352 | (block * SECTORSIZE), SECTORSIZE, |
356 | &retlen, movebuf); | 353 | &retlen, movebuf); |
357 | if (ret != -EIO) | 354 | if (ret != -EIO) |
358 | DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went " | 355 | pr_debug("INFTL: error went away on retry?\n"); |
359 | "away on retry?\n"); | ||
360 | } | 356 | } |
361 | memset(&oob, 0xff, sizeof(struct inftl_oob)); | 357 | memset(&oob, 0xff, sizeof(struct inftl_oob)); |
362 | oob.b.Status = oob.b.Status1 = SECTOR_USED; | 358 | oob.b.Status = oob.b.Status1 = SECTOR_USED; |
@@ -372,8 +368,7 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned | |||
372 | * is important, by doing oldest first if we crash/reboot then it | 368 | * is important, by doing oldest first if we crash/reboot then it |
373 | * it is relatively simple to clean up the mess). | 369 | * it is relatively simple to clean up the mess). |
374 | */ | 370 | */ |
375 | DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n", | 371 | pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC); |
376 | thisVUC); | ||
377 | 372 | ||
378 | for (;;) { | 373 | for (;;) { |
379 | /* Find oldest unit in chain. */ | 374 | /* Find oldest unit in chain. */ |
@@ -421,7 +416,7 @@ static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock) | |||
421 | u16 ChainLength = 0, thislen; | 416 | u16 ChainLength = 0, thislen; |
422 | u16 chain, EUN; | 417 | u16 chain, EUN; |
423 | 418 | ||
424 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p," | 419 | pr_debug("INFTL: INFTL_makefreeblock(inftl=%p," |
425 | "pending=%d)\n", inftl, pendingblock); | 420 | "pending=%d)\n", inftl, pendingblock); |
426 | 421 | ||
427 | for (chain = 0; chain < inftl->nb_blocks; chain++) { | 422 | for (chain = 0; chain < inftl->nb_blocks; chain++) { |
@@ -484,8 +479,8 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block) | |||
484 | size_t retlen; | 479 | size_t retlen; |
485 | int silly, silly2 = 3; | 480 | int silly, silly2 = 3; |
486 | 481 | ||
487 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p," | 482 | pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n", |
488 | "block=%d)\n", inftl, block); | 483 | inftl, block); |
489 | 484 | ||
490 | do { | 485 | do { |
491 | /* | 486 | /* |
@@ -501,8 +496,8 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block) | |||
501 | blockofs, 8, &retlen, (char *)&bci); | 496 | blockofs, 8, &retlen, (char *)&bci); |
502 | 497 | ||
503 | status = bci.Status | bci.Status1; | 498 | status = bci.Status | bci.Status1; |
504 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in " | 499 | pr_debug("INFTL: status of block %d in EUN %d is %x\n", |
505 | "EUN %d is %x\n", block , writeEUN, status); | 500 | block , writeEUN, status); |
506 | 501 | ||
507 | switch(status) { | 502 | switch(status) { |
508 | case SECTOR_FREE: | 503 | case SECTOR_FREE: |
@@ -555,9 +550,9 @@ hitused: | |||
555 | * Hopefully we free something, lets try again. | 550 | * Hopefully we free something, lets try again. |
556 | * This time we are desperate... | 551 | * This time we are desperate... |
557 | */ | 552 | */ |
558 | DEBUG(MTD_DEBUG_LEVEL1, "INFTL: using desperate==1 " | 553 | pr_debug("INFTL: using desperate==1 to find free EUN " |
559 | "to find free EUN to accommodate write to " | 554 | "to accommodate write to VUC %d\n", |
560 | "VUC %d\n", thisVUC); | 555 | thisVUC); |
561 | writeEUN = INFTL_findfreeblock(inftl, 1); | 556 | writeEUN = INFTL_findfreeblock(inftl, 1); |
562 | if (writeEUN == BLOCK_NIL) { | 557 | if (writeEUN == BLOCK_NIL) { |
563 | /* | 558 | /* |
@@ -647,7 +642,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC) | |||
647 | struct inftl_bci bci; | 642 | struct inftl_bci bci; |
648 | size_t retlen; | 643 | size_t retlen; |
649 | 644 | ||
650 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p," | 645 | pr_debug("INFTL: INFTL_trydeletechain(inftl=%p," |
651 | "thisVUC=%d)\n", inftl, thisVUC); | 646 | "thisVUC=%d)\n", inftl, thisVUC); |
652 | 647 | ||
653 | memset(BlockUsed, 0, sizeof(BlockUsed)); | 648 | memset(BlockUsed, 0, sizeof(BlockUsed)); |
@@ -711,7 +706,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC) | |||
711 | * For each block in the chain free it and make it available | 706 | * For each block in the chain free it and make it available |
712 | * for future use. Erase from the oldest unit first. | 707 | * for future use. Erase from the oldest unit first. |
713 | */ | 708 | */ |
714 | DEBUG(MTD_DEBUG_LEVEL1, "INFTL: deleting empty VUC %d\n", thisVUC); | 709 | pr_debug("INFTL: deleting empty VUC %d\n", thisVUC); |
715 | 710 | ||
716 | for (;;) { | 711 | for (;;) { |
717 | u16 *prevEUN = &inftl->VUtable[thisVUC]; | 712 | u16 *prevEUN = &inftl->VUtable[thisVUC]; |
@@ -719,7 +714,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC) | |||
719 | 714 | ||
720 | /* If the chain is all gone already, we're done */ | 715 | /* If the chain is all gone already, we're done */ |
721 | if (thisEUN == BLOCK_NIL) { | 716 | if (thisEUN == BLOCK_NIL) { |
722 | DEBUG(MTD_DEBUG_LEVEL2, "INFTL: Empty VUC %d for deletion was already absent\n", thisEUN); | 717 | pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN); |
723 | return; | 718 | return; |
724 | } | 719 | } |
725 | 720 | ||
@@ -731,7 +726,7 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC) | |||
731 | thisEUN = *prevEUN; | 726 | thisEUN = *prevEUN; |
732 | } | 727 | } |
733 | 728 | ||
734 | DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n", | 729 | pr_debug("Deleting EUN %d from VUC %d\n", |
735 | thisEUN, thisVUC); | 730 | thisEUN, thisVUC); |
736 | 731 | ||
737 | if (INFTL_formatblock(inftl, thisEUN) < 0) { | 732 | if (INFTL_formatblock(inftl, thisEUN) < 0) { |
@@ -767,7 +762,7 @@ static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block) | |||
767 | size_t retlen; | 762 | size_t retlen; |
768 | struct inftl_bci bci; | 763 | struct inftl_bci bci; |
769 | 764 | ||
770 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p," | 765 | pr_debug("INFTL: INFTL_deleteblock(inftl=%p," |
771 | "block=%d)\n", inftl, block); | 766 | "block=%d)\n", inftl, block); |
772 | 767 | ||
773 | while (thisEUN < inftl->nb_blocks) { | 768 | while (thisEUN < inftl->nb_blocks) { |
@@ -826,7 +821,7 @@ static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block, | |||
826 | struct inftl_oob oob; | 821 | struct inftl_oob oob; |
827 | char *p, *pend; | 822 | char *p, *pend; |
828 | 823 | ||
829 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld," | 824 | pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld," |
830 | "buffer=%p)\n", inftl, block, buffer); | 825 | "buffer=%p)\n", inftl, block, buffer); |
831 | 826 | ||
832 | /* Is block all zero? */ | 827 | /* Is block all zero? */ |
@@ -876,7 +871,7 @@ static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block, | |||
876 | struct inftl_bci bci; | 871 | struct inftl_bci bci; |
877 | size_t retlen; | 872 | size_t retlen; |
878 | 873 | ||
879 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld," | 874 | pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld," |
880 | "buffer=%p)\n", inftl, block, buffer); | 875 | "buffer=%p)\n", inftl, block, buffer); |
881 | 876 | ||
882 | while (thisEUN < inftl->nb_blocks) { | 877 | while (thisEUN < inftl->nb_blocks) { |
@@ -922,7 +917,7 @@ foundit: | |||
922 | int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer); | 917 | int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer); |
923 | 918 | ||
924 | /* Handle corrected bit flips gracefully */ | 919 | /* Handle corrected bit flips gracefully */ |
925 | if (ret < 0 && ret != -EUCLEAN) | 920 | if (ret < 0 && !mtd_is_bitflip(ret)) |
926 | return -EIO; | 921 | return -EIO; |
927 | } | 922 | } |
928 | return 0; | 923 | return 0; |
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c index 104052e774b0..2ff601f816ce 100644 --- a/drivers/mtd/inftlmount.c +++ b/drivers/mtd/inftlmount.c | |||
@@ -53,7 +53,7 @@ static int find_boot_record(struct INFTLrecord *inftl) | |||
53 | struct INFTLPartition *ip; | 53 | struct INFTLPartition *ip; |
54 | size_t retlen; | 54 | size_t retlen; |
55 | 55 | ||
56 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl); | 56 | pr_debug("INFTL: find_boot_record(inftl=%p)\n", inftl); |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * Assume logical EraseSize == physical erasesize for starting the | 59 | * Assume logical EraseSize == physical erasesize for starting the |
@@ -139,24 +139,20 @@ static int find_boot_record(struct INFTLrecord *inftl) | |||
139 | mh->FormatFlags = le32_to_cpu(mh->FormatFlags); | 139 | mh->FormatFlags = le32_to_cpu(mh->FormatFlags); |
140 | mh->PercentUsed = le32_to_cpu(mh->PercentUsed); | 140 | mh->PercentUsed = le32_to_cpu(mh->PercentUsed); |
141 | 141 | ||
142 | #ifdef CONFIG_MTD_DEBUG_VERBOSE | 142 | pr_debug("INFTL: Media Header ->\n" |
143 | if (CONFIG_MTD_DEBUG_VERBOSE >= 2) { | 143 | " bootRecordID = %s\n" |
144 | printk("INFTL: Media Header ->\n" | 144 | " NoOfBootImageBlocks = %d\n" |
145 | " bootRecordID = %s\n" | 145 | " NoOfBinaryPartitions = %d\n" |
146 | " NoOfBootImageBlocks = %d\n" | 146 | " NoOfBDTLPartitions = %d\n" |
147 | " NoOfBinaryPartitions = %d\n" | 147 | " BlockMultiplerBits = %d\n" |
148 | " NoOfBDTLPartitions = %d\n" | 148 | " FormatFlgs = %d\n" |
149 | " BlockMultiplerBits = %d\n" | 149 | " OsakVersion = 0x%x\n" |
150 | " FormatFlgs = %d\n" | 150 | " PercentUsed = %d\n", |
151 | " OsakVersion = 0x%x\n" | 151 | mh->bootRecordID, mh->NoOfBootImageBlocks, |
152 | " PercentUsed = %d\n", | 152 | mh->NoOfBinaryPartitions, |
153 | mh->bootRecordID, mh->NoOfBootImageBlocks, | 153 | mh->NoOfBDTLPartitions, |
154 | mh->NoOfBinaryPartitions, | 154 | mh->BlockMultiplierBits, mh->FormatFlags, |
155 | mh->NoOfBDTLPartitions, | 155 | mh->OsakVersion, mh->PercentUsed); |
156 | mh->BlockMultiplierBits, mh->FormatFlags, | ||
157 | mh->OsakVersion, mh->PercentUsed); | ||
158 | } | ||
159 | #endif | ||
160 | 156 | ||
161 | if (mh->NoOfBDTLPartitions == 0) { | 157 | if (mh->NoOfBDTLPartitions == 0) { |
162 | printk(KERN_WARNING "INFTL: Media Header sanity check " | 158 | printk(KERN_WARNING "INFTL: Media Header sanity check " |
@@ -200,19 +196,15 @@ static int find_boot_record(struct INFTLrecord *inftl) | |||
200 | ip->spareUnits = le32_to_cpu(ip->spareUnits); | 196 | ip->spareUnits = le32_to_cpu(ip->spareUnits); |
201 | ip->Reserved0 = le32_to_cpu(ip->Reserved0); | 197 | ip->Reserved0 = le32_to_cpu(ip->Reserved0); |
202 | 198 | ||
203 | #ifdef CONFIG_MTD_DEBUG_VERBOSE | 199 | pr_debug(" PARTITION[%d] ->\n" |
204 | if (CONFIG_MTD_DEBUG_VERBOSE >= 2) { | 200 | " virtualUnits = %d\n" |
205 | printk(" PARTITION[%d] ->\n" | 201 | " firstUnit = %d\n" |
206 | " virtualUnits = %d\n" | 202 | " lastUnit = %d\n" |
207 | " firstUnit = %d\n" | 203 | " flags = 0x%x\n" |
208 | " lastUnit = %d\n" | 204 | " spareUnits = %d\n", |
209 | " flags = 0x%x\n" | 205 | i, ip->virtualUnits, ip->firstUnit, |
210 | " spareUnits = %d\n", | 206 | ip->lastUnit, ip->flags, |
211 | i, ip->virtualUnits, ip->firstUnit, | 207 | ip->spareUnits); |
212 | ip->lastUnit, ip->flags, | ||
213 | ip->spareUnits); | ||
214 | } | ||
215 | #endif | ||
216 | 208 | ||
217 | if (ip->Reserved0 != ip->firstUnit) { | 209 | if (ip->Reserved0 != ip->firstUnit) { |
218 | struct erase_info *instr = &inftl->instr; | 210 | struct erase_info *instr = &inftl->instr; |
@@ -375,7 +367,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address, | |||
375 | * | 367 | * |
376 | * Return: 0 when succeed, -1 on error. | 368 | * Return: 0 when succeed, -1 on error. |
377 | * | 369 | * |
378 | * ToDo: 1. Is it neceressary to check_free_sector after erasing ?? | 370 | * ToDo: 1. Is it necessary to check_free_sector after erasing ?? |
379 | */ | 371 | */ |
380 | int INFTL_formatblock(struct INFTLrecord *inftl, int block) | 372 | int INFTL_formatblock(struct INFTLrecord *inftl, int block) |
381 | { | 373 | { |
@@ -385,8 +377,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block) | |||
385 | struct mtd_info *mtd = inftl->mbd.mtd; | 377 | struct mtd_info *mtd = inftl->mbd.mtd; |
386 | int physblock; | 378 | int physblock; |
387 | 379 | ||
388 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p," | 380 | pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block); |
389 | "block=%d)\n", inftl, block); | ||
390 | 381 | ||
391 | memset(instr, 0, sizeof(struct erase_info)); | 382 | memset(instr, 0, sizeof(struct erase_info)); |
392 | 383 | ||
@@ -476,30 +467,30 @@ void INFTL_dumptables(struct INFTLrecord *s) | |||
476 | { | 467 | { |
477 | int i; | 468 | int i; |
478 | 469 | ||
479 | printk("-------------------------------------------" | 470 | pr_debug("-------------------------------------------" |
480 | "----------------------------------\n"); | 471 | "----------------------------------\n"); |
481 | 472 | ||
482 | printk("VUtable[%d] ->", s->nb_blocks); | 473 | pr_debug("VUtable[%d] ->", s->nb_blocks); |
483 | for (i = 0; i < s->nb_blocks; i++) { | 474 | for (i = 0; i < s->nb_blocks; i++) { |
484 | if ((i % 8) == 0) | 475 | if ((i % 8) == 0) |
485 | printk("\n%04x: ", i); | 476 | pr_debug("\n%04x: ", i); |
486 | printk("%04x ", s->VUtable[i]); | 477 | pr_debug("%04x ", s->VUtable[i]); |
487 | } | 478 | } |
488 | 479 | ||
489 | printk("\n-------------------------------------------" | 480 | pr_debug("\n-------------------------------------------" |
490 | "----------------------------------\n"); | 481 | "----------------------------------\n"); |
491 | 482 | ||
492 | printk("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks); | 483 | pr_debug("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks); |
493 | for (i = 0; i <= s->lastEUN; i++) { | 484 | for (i = 0; i <= s->lastEUN; i++) { |
494 | if ((i % 8) == 0) | 485 | if ((i % 8) == 0) |
495 | printk("\n%04x: ", i); | 486 | pr_debug("\n%04x: ", i); |
496 | printk("%04x ", s->PUtable[i]); | 487 | pr_debug("%04x ", s->PUtable[i]); |
497 | } | 488 | } |
498 | 489 | ||
499 | printk("\n-------------------------------------------" | 490 | pr_debug("\n-------------------------------------------" |
500 | "----------------------------------\n"); | 491 | "----------------------------------\n"); |
501 | 492 | ||
502 | printk("INFTL ->\n" | 493 | pr_debug("INFTL ->\n" |
503 | " EraseSize = %d\n" | 494 | " EraseSize = %d\n" |
504 | " h/s/c = %d/%d/%d\n" | 495 | " h/s/c = %d/%d/%d\n" |
505 | " numvunits = %d\n" | 496 | " numvunits = %d\n" |
@@ -513,7 +504,7 @@ void INFTL_dumptables(struct INFTLrecord *s) | |||
513 | s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs, | 504 | s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs, |
514 | s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks); | 505 | s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks); |
515 | 506 | ||
516 | printk("\n-------------------------------------------" | 507 | pr_debug("\n-------------------------------------------" |
517 | "----------------------------------\n"); | 508 | "----------------------------------\n"); |
518 | } | 509 | } |
519 | 510 | ||
@@ -521,25 +512,25 @@ void INFTL_dumpVUchains(struct INFTLrecord *s) | |||
521 | { | 512 | { |
522 | int logical, block, i; | 513 | int logical, block, i; |
523 | 514 | ||
524 | printk("-------------------------------------------" | 515 | pr_debug("-------------------------------------------" |
525 | "----------------------------------\n"); | 516 | "----------------------------------\n"); |
526 | 517 | ||
527 | printk("INFTL Virtual Unit Chains:\n"); | 518 | pr_debug("INFTL Virtual Unit Chains:\n"); |
528 | for (logical = 0; logical < s->nb_blocks; logical++) { | 519 | for (logical = 0; logical < s->nb_blocks; logical++) { |
529 | block = s->VUtable[logical]; | 520 | block = s->VUtable[logical]; |
530 | if (block > s->nb_blocks) | 521 | if (block > s->nb_blocks) |
531 | continue; | 522 | continue; |
532 | printk(" LOGICAL %d --> %d ", logical, block); | 523 | pr_debug(" LOGICAL %d --> %d ", logical, block); |
533 | for (i = 0; i < s->nb_blocks; i++) { | 524 | for (i = 0; i < s->nb_blocks; i++) { |
534 | if (s->PUtable[block] == BLOCK_NIL) | 525 | if (s->PUtable[block] == BLOCK_NIL) |
535 | break; | 526 | break; |
536 | block = s->PUtable[block]; | 527 | block = s->PUtable[block]; |
537 | printk("%d ", block); | 528 | pr_debug("%d ", block); |
538 | } | 529 | } |
539 | printk("\n"); | 530 | pr_debug("\n"); |
540 | } | 531 | } |
541 | 532 | ||
542 | printk("-------------------------------------------" | 533 | pr_debug("-------------------------------------------" |
543 | "----------------------------------\n"); | 534 | "----------------------------------\n"); |
544 | } | 535 | } |
545 | 536 | ||
@@ -555,7 +546,7 @@ int INFTL_mount(struct INFTLrecord *s) | |||
555 | int i; | 546 | int i; |
556 | u8 *ANACtable, ANAC; | 547 | u8 *ANACtable, ANAC; |
557 | 548 | ||
558 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s); | 549 | pr_debug("INFTL: INFTL_mount(inftl=%p)\n", s); |
559 | 550 | ||
560 | /* Search for INFTL MediaHeader and Spare INFTL Media Header */ | 551 | /* Search for INFTL MediaHeader and Spare INFTL Media Header */ |
561 | if (find_boot_record(s) < 0) { | 552 | if (find_boot_record(s) < 0) { |
@@ -585,7 +576,7 @@ int INFTL_mount(struct INFTLrecord *s) | |||
585 | * NOTEXPLORED state. Then at the end we will try to format it and | 576 | * NOTEXPLORED state. Then at the end we will try to format it and |
586 | * mark it as free. | 577 | * mark it as free. |
587 | */ | 578 | */ |
588 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 1, explore each unit\n"); | 579 | pr_debug("INFTL: pass 1, explore each unit\n"); |
589 | for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) { | 580 | for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) { |
590 | if (s->PUtable[first_block] != BLOCK_NOTEXPLORED) | 581 | if (s->PUtable[first_block] != BLOCK_NOTEXPLORED) |
591 | continue; | 582 | continue; |
@@ -717,17 +708,14 @@ int INFTL_mount(struct INFTLrecord *s) | |||
717 | logical_block = BLOCK_NIL; | 708 | logical_block = BLOCK_NIL; |
718 | } | 709 | } |
719 | 710 | ||
720 | #ifdef CONFIG_MTD_DEBUG_VERBOSE | 711 | INFTL_dumptables(s); |
721 | if (CONFIG_MTD_DEBUG_VERBOSE >= 2) | ||
722 | INFTL_dumptables(s); | ||
723 | #endif | ||
724 | 712 | ||
725 | /* | 713 | /* |
726 | * Second pass, check for infinite loops in chains. These are | 714 | * Second pass, check for infinite loops in chains. These are |
727 | * possible because we don't update the previous pointers when | 715 | * possible because we don't update the previous pointers when |
728 | * we fold chains. No big deal, just fix them up in PUtable. | 716 | * we fold chains. No big deal, just fix them up in PUtable. |
729 | */ | 717 | */ |
730 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 2, validate virtual chains\n"); | 718 | pr_debug("INFTL: pass 2, validate virtual chains\n"); |
731 | for (logical_block = 0; logical_block < s->numvunits; logical_block++) { | 719 | for (logical_block = 0; logical_block < s->numvunits; logical_block++) { |
732 | block = s->VUtable[logical_block]; | 720 | block = s->VUtable[logical_block]; |
733 | last_block = BLOCK_NIL; | 721 | last_block = BLOCK_NIL; |
@@ -772,12 +760,8 @@ int INFTL_mount(struct INFTLrecord *s) | |||
772 | } | 760 | } |
773 | } | 761 | } |
774 | 762 | ||
775 | #ifdef CONFIG_MTD_DEBUG_VERBOSE | 763 | INFTL_dumptables(s); |
776 | if (CONFIG_MTD_DEBUG_VERBOSE >= 2) | 764 | INFTL_dumpVUchains(s); |
777 | INFTL_dumptables(s); | ||
778 | if (CONFIG_MTD_DEBUG_VERBOSE >= 2) | ||
779 | INFTL_dumpVUchains(s); | ||
780 | #endif | ||
781 | 765 | ||
782 | /* | 766 | /* |
783 | * Third pass, format unreferenced blocks and init free block count. | 767 | * Third pass, format unreferenced blocks and init free block count. |
@@ -785,7 +769,7 @@ int INFTL_mount(struct INFTLrecord *s) | |||
785 | s->numfreeEUNs = 0; | 769 | s->numfreeEUNs = 0; |
786 | s->LastFreeEUN = BLOCK_NIL; | 770 | s->LastFreeEUN = BLOCK_NIL; |
787 | 771 | ||
788 | DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 3, format unused blocks\n"); | 772 | pr_debug("INFTL: pass 3, format unused blocks\n"); |
789 | for (block = s->firstEUN; block <= s->lastEUN; block++) { | 773 | for (block = s->firstEUN; block <= s->lastEUN; block++) { |
790 | if (s->PUtable[block] == BLOCK_NOTEXPLORED) { | 774 | if (s->PUtable[block] == BLOCK_NOTEXPLORED) { |
791 | printk("INFTL: unreferenced block %d, formatting it\n", | 775 | printk("INFTL: unreferenced block %d, formatting it\n", |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index c0c328c5b133..8e0c4bf9f7fb 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -41,8 +41,6 @@ config MTD_PHYSMAP_START | |||
41 | are mapped on your particular target board. Refer to the | 41 | are mapped on your particular target board. Refer to the |
42 | memory map which should hopefully be in the documentation for | 42 | memory map which should hopefully be in the documentation for |
43 | your board. | 43 | your board. |
44 | Ignore this option if you use run-time physmap configuration | ||
45 | (i.e., run-time calling physmap_configure()). | ||
46 | 44 | ||
47 | config MTD_PHYSMAP_LEN | 45 | config MTD_PHYSMAP_LEN |
48 | hex "Physical length of flash mapping" | 46 | hex "Physical length of flash mapping" |
@@ -55,8 +53,6 @@ config MTD_PHYSMAP_LEN | |||
55 | than the total amount of flash present. Refer to the memory | 53 | than the total amount of flash present. Refer to the memory |
56 | map which should hopefully be in the documentation for your | 54 | map which should hopefully be in the documentation for your |
57 | board. | 55 | board. |
58 | Ignore this option if you use run-time physmap configuration | ||
59 | (i.e., run-time calling physmap_configure()). | ||
60 | 56 | ||
61 | config MTD_PHYSMAP_BANKWIDTH | 57 | config MTD_PHYSMAP_BANKWIDTH |
62 | int "Bank width in octets" | 58 | int "Bank width in octets" |
@@ -67,8 +63,6 @@ config MTD_PHYSMAP_BANKWIDTH | |||
67 | in octets. For example, if you have a data bus width of 32 | 63 | in octets. For example, if you have a data bus width of 32 |
68 | bits, you would set the bus width octet value to 4. This is | 64 | bits, you would set the bus width octet value to 4. This is |
69 | used internally by the CFI drivers. | 65 | used internally by the CFI drivers. |
70 | Ignore this option if you use run-time physmap configuration | ||
71 | (i.e., run-time calling physmap_configure()). | ||
72 | 66 | ||
73 | config MTD_PHYSMAP_OF | 67 | config MTD_PHYSMAP_OF |
74 | tristate "Flash device in physical memory map based on OF description" | 68 | tristate "Flash device in physical memory map based on OF description" |
@@ -260,7 +254,6 @@ config MTD_BCM963XX | |||
260 | config MTD_LANTIQ | 254 | config MTD_LANTIQ |
261 | tristate "Lantiq SoC NOR support" | 255 | tristate "Lantiq SoC NOR support" |
262 | depends on LANTIQ | 256 | depends on LANTIQ |
263 | select MTD_PARTITIONS | ||
264 | help | 257 | help |
265 | Support for NOR flash attached to the Lantiq SoC's External Bus Unit. | 258 | Support for NOR flash attached to the Lantiq SoC's External Bus Unit. |
266 | 259 | ||
@@ -339,10 +332,6 @@ config MTD_SOLUTIONENGINE | |||
339 | This enables access to the flash chips on the Hitachi SolutionEngine and | 332 | This enables access to the flash chips on the Hitachi SolutionEngine and |
340 | similar boards. Say 'Y' if you are building a kernel for such a board. | 333 | similar boards. Say 'Y' if you are building a kernel for such a board. |
341 | 334 | ||
342 | config MTD_ARM_INTEGRATOR | ||
343 | tristate "CFI Flash device mapped on ARM Integrator/P720T" | ||
344 | depends on ARM && MTD_CFI | ||
345 | |||
346 | config MTD_CDB89712 | 335 | config MTD_CDB89712 |
347 | tristate "Cirrus CDB89712 evaluation board mappings" | 336 | tristate "Cirrus CDB89712 evaluation board mappings" |
348 | depends on MTD_CFI && ARCH_CDB89712 | 337 | depends on MTD_CFI && ARCH_CDB89712 |
@@ -398,13 +387,6 @@ config MTD_AUTCPU12 | |||
398 | This enables access to the NV-RAM on autronix autcpu12 board. | 387 | This enables access to the NV-RAM on autronix autcpu12 board. |
399 | If you have such a board, say 'Y'. | 388 | If you have such a board, say 'Y'. |
400 | 389 | ||
401 | config MTD_EDB7312 | ||
402 | tristate "CFI Flash device mapped on EDB7312" | ||
403 | depends on ARCH_EDB7312 && MTD_CFI | ||
404 | help | ||
405 | This enables access to the CFI Flash on the Cogent EDB7312 board. | ||
406 | If you have such a board, say 'Y' here. | ||
407 | |||
408 | config MTD_IMPA7 | 390 | config MTD_IMPA7 |
409 | tristate "JEDEC Flash device mapped on impA7" | 391 | tristate "JEDEC Flash device mapped on impA7" |
410 | depends on ARM && MTD_JEDECPROBE | 392 | depends on ARM && MTD_JEDECPROBE |
@@ -412,14 +394,6 @@ config MTD_IMPA7 | |||
412 | This enables access to the NOR Flash on the impA7 board of | 394 | This enables access to the NOR Flash on the impA7 board of |
413 | implementa GmbH. If you have such a board, say 'Y' here. | 395 | implementa GmbH. If you have such a board, say 'Y' here. |
414 | 396 | ||
415 | config MTD_CEIVA | ||
416 | tristate "JEDEC Flash device mapped on Ceiva/Polaroid PhotoMax Digital Picture Frame" | ||
417 | depends on MTD_JEDECPROBE && ARCH_CEIVA | ||
418 | help | ||
419 | This enables access to the flash chips on the Ceiva/Polaroid | ||
420 | PhotoMax Digital Picture Frame. | ||
421 | If you have such a device, say 'Y'. | ||
422 | |||
423 | config MTD_H720X | 397 | config MTD_H720X |
424 | tristate "Hynix evaluation board mappings" | 398 | tristate "Hynix evaluation board mappings" |
425 | depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 ) | 399 | depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 ) |
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index cb48b11affff..45dcb8b14f22 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
@@ -19,7 +19,6 @@ obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o | |||
19 | obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o | 19 | obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o |
20 | obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o | 20 | obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o |
21 | obj-$(CONFIG_MTD_MBX860) += mbx860.o | 21 | obj-$(CONFIG_MTD_MBX860) += mbx860.o |
22 | obj-$(CONFIG_MTD_CEIVA) += ceiva.o | ||
23 | obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o | 22 | obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o |
24 | obj-$(CONFIG_MTD_PHYSMAP) += physmap.o | 23 | obj-$(CONFIG_MTD_PHYSMAP) += physmap.o |
25 | obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o | 24 | obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o |
@@ -40,7 +39,6 @@ obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o | |||
40 | obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o | 39 | obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o |
41 | obj-$(CONFIG_MTD_PCI) += pci.o | 40 | obj-$(CONFIG_MTD_PCI) += pci.o |
42 | obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o | 41 | obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o |
43 | obj-$(CONFIG_MTD_EDB7312) += edb7312.o | ||
44 | obj-$(CONFIG_MTD_IMPA7) += impa7.o | 42 | obj-$(CONFIG_MTD_IMPA7) += impa7.o |
45 | obj-$(CONFIG_MTD_FORTUNET) += fortunet.o | 43 | obj-$(CONFIG_MTD_FORTUNET) += fortunet.o |
46 | obj-$(CONFIG_MTD_UCLINUX) += uclinux.o | 44 | obj-$(CONFIG_MTD_UCLINUX) += uclinux.o |
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c index 608967fe74c6..736ca10ca9f1 100644 --- a/drivers/mtd/maps/bcm963xx-flash.c +++ b/drivers/mtd/maps/bcm963xx-flash.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/module.h> | ||
24 | #include <linux/mtd/map.h> | 25 | #include <linux/mtd/map.h> |
25 | #include <linux/mtd/mtd.h> | 26 | #include <linux/mtd/mtd.h> |
26 | #include <linux/mtd/partitions.h> | 27 | #include <linux/mtd/partitions.h> |
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c index 67815eed2f00..6d6b2b5674ee 100644 --- a/drivers/mtd/maps/bfin-async-flash.c +++ b/drivers/mtd/maps/bfin-async-flash.c | |||
@@ -41,7 +41,6 @@ struct async_state { | |||
41 | uint32_t flash_ambctl0, flash_ambctl1; | 41 | uint32_t flash_ambctl0, flash_ambctl1; |
42 | uint32_t save_ambctl0, save_ambctl1; | 42 | uint32_t save_ambctl0, save_ambctl1; |
43 | unsigned long irq_flags; | 43 | unsigned long irq_flags; |
44 | struct mtd_partition *parts; | ||
45 | }; | 44 | }; |
46 | 45 | ||
47 | static void switch_to_flash(struct async_state *state) | 46 | static void switch_to_flash(struct async_state *state) |
@@ -165,18 +164,8 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev) | |||
165 | return -ENXIO; | 164 | return -ENXIO; |
166 | } | 165 | } |
167 | 166 | ||
168 | ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0); | 167 | mtd_device_parse_register(state->mtd, part_probe_types, 0, |
169 | if (ret > 0) { | 168 | pdata->parts, pdata->nr_parts); |
170 | pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n"); | ||
171 | mtd_device_register(state->mtd, pdata->parts, ret); | ||
172 | state->parts = pdata->parts; | ||
173 | } else if (pdata->nr_parts) { | ||
174 | pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n"); | ||
175 | mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts); | ||
176 | } else { | ||
177 | pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n"); | ||
178 | mtd_device_register(state->mtd, NULL, 0); | ||
179 | } | ||
180 | 169 | ||
181 | platform_set_drvdata(pdev, state); | 170 | platform_set_drvdata(pdev, state); |
182 | 171 | ||
@@ -188,7 +177,6 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev) | |||
188 | struct async_state *state = platform_get_drvdata(pdev); | 177 | struct async_state *state = platform_get_drvdata(pdev); |
189 | gpio_free(state->enet_flash_pin); | 178 | gpio_free(state->enet_flash_pin); |
190 | mtd_device_unregister(state->mtd); | 179 | mtd_device_unregister(state->mtd); |
191 | kfree(state->parts); | ||
192 | map_destroy(state->mtd); | 180 | map_destroy(state->mtd); |
193 | kfree(state); | 181 | kfree(state); |
194 | return 0; | 182 | return 0; |
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c deleted file mode 100644 index 06f9c9815720..000000000000 --- a/drivers/mtd/maps/ceiva.c +++ /dev/null | |||
@@ -1,341 +0,0 @@ | |||
1 | /* | ||
2 | * Ceiva flash memory driver. | ||
3 | * Copyright (C) 2002 Rob Scott <rscott@mtrob.fdns.net> | ||
4 | * | ||
5 | * Note: this driver supports jedec compatible devices. Modification | ||
6 | * for CFI compatible devices should be straight forward: change | ||
7 | * jedec_probe to cfi_probe. | ||
8 | * | ||
9 | * Based on: sa1100-flash.c, which has the following copyright: | ||
10 | * Flash memory access on SA11x0 based devices | ||
11 | * | ||
12 | * (C) 2000 Nicolas Pitre <nico@fluxnic.net> | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/ioport.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #include <linux/mtd/mtd.h> | ||
24 | #include <linux/mtd/map.h> | ||
25 | #include <linux/mtd/partitions.h> | ||
26 | #include <linux/mtd/concat.h> | ||
27 | |||
28 | #include <mach/hardware.h> | ||
29 | #include <asm/mach-types.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/sizes.h> | ||
32 | |||
33 | /* | ||
34 | * This isn't complete yet, so... | ||
35 | */ | ||
36 | #define CONFIG_MTD_CEIVA_STATICMAP | ||
37 | |||
38 | #ifdef CONFIG_MTD_CEIVA_STATICMAP | ||
39 | /* | ||
40 | * See include/linux/mtd/partitions.h for definition of the mtd_partition | ||
41 | * structure. | ||
42 | * | ||
43 | * Please note: | ||
44 | * 1. The flash size given should be the largest flash size that can | ||
45 | * be accommodated. | ||
46 | * | ||
47 | * 2. The bus width must defined in clps_setup_flash. | ||
48 | * | ||
49 | * The MTD layer will detect flash chip aliasing and reduce the size of | ||
50 | * the map accordingly. | ||
51 | * | ||
52 | */ | ||
53 | |||
54 | #ifdef CONFIG_ARCH_CEIVA | ||
55 | /* Flash / Partition sizing */ | ||
56 | /* For the 28F8003, we use the block mapping to calcuate the sizes */ | ||
57 | #define MAX_SIZE_KiB (16 + 8 + 8 + 96 + (7*128)) | ||
58 | #define BOOT_PARTITION_SIZE_KiB (16) | ||
59 | #define PARAMS_PARTITION_SIZE_KiB (8) | ||
60 | #define KERNEL_PARTITION_SIZE_KiB (4*128) | ||
61 | /* Use both remaining portion of first flash, and all of second flash */ | ||
62 | #define ROOT_PARTITION_SIZE_KiB (3*128) + (8*128) | ||
63 | |||
64 | static struct mtd_partition ceiva_partitions[] = { | ||
65 | { | ||
66 | .name = "Ceiva BOOT partition", | ||
67 | .size = BOOT_PARTITION_SIZE_KiB*1024, | ||
68 | .offset = 0, | ||
69 | |||
70 | },{ | ||
71 | .name = "Ceiva parameters partition", | ||
72 | .size = PARAMS_PARTITION_SIZE_KiB*1024, | ||
73 | .offset = (16 + 8) * 1024, | ||
74 | },{ | ||
75 | .name = "Ceiva kernel partition", | ||
76 | .size = (KERNEL_PARTITION_SIZE_KiB)*1024, | ||
77 | .offset = 0x20000, | ||
78 | |||
79 | },{ | ||
80 | .name = "Ceiva root filesystem partition", | ||
81 | .offset = MTDPART_OFS_APPEND, | ||
82 | .size = (ROOT_PARTITION_SIZE_KiB)*1024, | ||
83 | } | ||
84 | }; | ||
85 | #endif | ||
86 | |||
87 | static int __init clps_static_partitions(struct mtd_partition **parts) | ||
88 | { | ||
89 | int nb_parts = 0; | ||
90 | |||
91 | #ifdef CONFIG_ARCH_CEIVA | ||
92 | if (machine_is_ceiva()) { | ||
93 | *parts = ceiva_partitions; | ||
94 | nb_parts = ARRAY_SIZE(ceiva_partitions); | ||
95 | } | ||
96 | #endif | ||
97 | return nb_parts; | ||
98 | } | ||
99 | #endif | ||
100 | |||
101 | struct clps_info { | ||
102 | unsigned long base; | ||
103 | unsigned long size; | ||
104 | int width; | ||
105 | void *vbase; | ||
106 | struct map_info *map; | ||
107 | struct mtd_info *mtd; | ||
108 | struct resource *res; | ||
109 | }; | ||
110 | |||
111 | #define NR_SUBMTD 4 | ||
112 | |||
113 | static struct clps_info info[NR_SUBMTD]; | ||
114 | |||
115 | static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info **rmtd) | ||
116 | { | ||
117 | struct mtd_info *subdev[nr]; | ||
118 | struct map_info *maps; | ||
119 | int i, found = 0, ret = 0; | ||
120 | |||
121 | /* | ||
122 | * Allocate the map_info structs in one go. | ||
123 | */ | ||
124 | maps = kzalloc(sizeof(struct map_info) * nr, GFP_KERNEL); | ||
125 | if (!maps) | ||
126 | return -ENOMEM; | ||
127 | /* | ||
128 | * Claim and then map the memory regions. | ||
129 | */ | ||
130 | for (i = 0; i < nr; i++) { | ||
131 | if (clps[i].base == (unsigned long)-1) | ||
132 | break; | ||
133 | |||
134 | clps[i].res = request_mem_region(clps[i].base, clps[i].size, "clps flash"); | ||
135 | if (!clps[i].res) { | ||
136 | ret = -EBUSY; | ||
137 | break; | ||
138 | } | ||
139 | |||
140 | clps[i].map = maps + i; | ||
141 | |||
142 | clps[i].map->name = "clps flash"; | ||
143 | clps[i].map->phys = clps[i].base; | ||
144 | |||
145 | clps[i].vbase = ioremap(clps[i].base, clps[i].size); | ||
146 | if (!clps[i].vbase) { | ||
147 | ret = -ENOMEM; | ||
148 | break; | ||
149 | } | ||
150 | |||
151 | clps[i].map->virt = (void __iomem *)clps[i].vbase; | ||
152 | clps[i].map->bankwidth = clps[i].width; | ||
153 | clps[i].map->size = clps[i].size; | ||
154 | |||
155 | simple_map_init(&clps[i].map); | ||
156 | |||
157 | clps[i].mtd = do_map_probe("jedec_probe", clps[i].map); | ||
158 | if (clps[i].mtd == NULL) { | ||
159 | ret = -ENXIO; | ||
160 | break; | ||
161 | } | ||
162 | clps[i].mtd->owner = THIS_MODULE; | ||
163 | subdev[i] = clps[i].mtd; | ||
164 | |||
165 | printk(KERN_INFO "clps flash: JEDEC device at 0x%08lx, %dMiB, " | ||
166 | "%d-bit\n", clps[i].base, clps[i].mtd->size >> 20, | ||
167 | clps[i].width * 8); | ||
168 | found += 1; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * ENXIO is special. It means we didn't find a chip when | ||
173 | * we probed. We need to tear down the mapping, free the | ||
174 | * resource and mark it as such. | ||
175 | */ | ||
176 | if (ret == -ENXIO) { | ||
177 | iounmap(clps[i].vbase); | ||
178 | clps[i].vbase = NULL; | ||
179 | release_resource(clps[i].res); | ||
180 | clps[i].res = NULL; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * If we found one device, don't bother with concat support. | ||
185 | * If we found multiple devices, use concat if we have it | ||
186 | * available, otherwise fail. | ||
187 | */ | ||
188 | if (ret == 0 || ret == -ENXIO) { | ||
189 | if (found == 1) { | ||
190 | *rmtd = subdev[0]; | ||
191 | ret = 0; | ||
192 | } else if (found > 1) { | ||
193 | /* | ||
194 | * We detected multiple devices. Concatenate | ||
195 | * them together. | ||
196 | */ | ||
197 | *rmtd = mtd_concat_create(subdev, found, | ||
198 | "clps flash"); | ||
199 | if (*rmtd == NULL) | ||
200 | ret = -ENXIO; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * If we failed, clean up. | ||
206 | */ | ||
207 | if (ret) { | ||
208 | do { | ||
209 | if (clps[i].mtd) | ||
210 | map_destroy(clps[i].mtd); | ||
211 | if (clps[i].vbase) | ||
212 | iounmap(clps[i].vbase); | ||
213 | if (clps[i].res) | ||
214 | release_resource(clps[i].res); | ||
215 | } while (i--); | ||
216 | |||
217 | kfree(maps); | ||
218 | } | ||
219 | |||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd) | ||
224 | { | ||
225 | int i; | ||
226 | |||
227 | mtd_device_unregister(mtd); | ||
228 | |||
229 | if (mtd != clps[0].mtd) | ||
230 | mtd_concat_destroy(mtd); | ||
231 | |||
232 | for (i = NR_SUBMTD; i >= 0; i--) { | ||
233 | if (clps[i].mtd) | ||
234 | map_destroy(clps[i].mtd); | ||
235 | if (clps[i].vbase) | ||
236 | iounmap(clps[i].vbase); | ||
237 | if (clps[i].res) | ||
238 | release_resource(clps[i].res); | ||
239 | } | ||
240 | kfree(clps[0].map); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * We define the memory space, size, and width for the flash memory | ||
245 | * space here. | ||
246 | */ | ||
247 | |||
248 | static int __init clps_setup_flash(void) | ||
249 | { | ||
250 | int nr = 0; | ||
251 | |||
252 | #ifdef CONFIG_ARCH_CEIVA | ||
253 | if (machine_is_ceiva()) { | ||
254 | info[0].base = CS0_PHYS_BASE; | ||
255 | info[0].size = SZ_32M; | ||
256 | info[0].width = CEIVA_FLASH_WIDTH; | ||
257 | info[1].base = CS1_PHYS_BASE; | ||
258 | info[1].size = SZ_32M; | ||
259 | info[1].width = CEIVA_FLASH_WIDTH; | ||
260 | nr = 2; | ||
261 | } | ||
262 | #endif | ||
263 | return nr; | ||
264 | } | ||
265 | |||
266 | static struct mtd_partition *parsed_parts; | ||
267 | static const char *probes[] = { "cmdlinepart", "RedBoot", NULL }; | ||
268 | |||
269 | static void __init clps_locate_partitions(struct mtd_info *mtd) | ||
270 | { | ||
271 | const char *part_type = NULL; | ||
272 | int nr_parts = 0; | ||
273 | do { | ||
274 | /* | ||
275 | * Partition selection stuff. | ||
276 | */ | ||
277 | nr_parts = parse_mtd_partitions(mtd, probes, &parsed_parts, 0); | ||
278 | if (nr_parts > 0) { | ||
279 | part_type = "command line"; | ||
280 | break; | ||
281 | } | ||
282 | #ifdef CONFIG_MTD_CEIVA_STATICMAP | ||
283 | nr_parts = clps_static_partitions(&parsed_parts); | ||
284 | if (nr_parts > 0) { | ||
285 | part_type = "static"; | ||
286 | break; | ||
287 | } | ||
288 | printk("found: %d partitions\n", nr_parts); | ||
289 | #endif | ||
290 | } while (0); | ||
291 | |||
292 | if (nr_parts == 0) { | ||
293 | printk(KERN_NOTICE "clps flash: no partition info " | ||
294 | "available, registering whole flash\n"); | ||
295 | mtd_device_register(mtd, NULL, 0); | ||
296 | } else { | ||
297 | printk(KERN_NOTICE "clps flash: using %s partition " | ||
298 | "definition\n", part_type); | ||
299 | mtd_device_register(mtd, parsed_parts, nr_parts); | ||
300 | } | ||
301 | |||
302 | /* Always succeeds. */ | ||
303 | } | ||
304 | |||
305 | static void __exit clps_destroy_partitions(void) | ||
306 | { | ||
307 | kfree(parsed_parts); | ||
308 | } | ||
309 | |||
310 | static struct mtd_info *mymtd; | ||
311 | |||
312 | static int __init clps_mtd_init(void) | ||
313 | { | ||
314 | int ret; | ||
315 | int nr; | ||
316 | |||
317 | nr = clps_setup_flash(); | ||
318 | if (nr < 0) | ||
319 | return nr; | ||
320 | |||
321 | ret = clps_setup_mtd(info, nr, &mymtd); | ||
322 | if (ret) | ||
323 | return ret; | ||
324 | |||
325 | clps_locate_partitions(mymtd); | ||
326 | |||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static void __exit clps_mtd_cleanup(void) | ||
331 | { | ||
332 | clps_destroy_mtd(info, mymtd); | ||
333 | clps_destroy_partitions(); | ||
334 | } | ||
335 | |||
336 | module_init(clps_mtd_init); | ||
337 | module_exit(clps_mtd_cleanup); | ||
338 | |||
339 | MODULE_AUTHOR("Rob Scott"); | ||
340 | MODULE_DESCRIPTION("Cirrus Logic JEDEC map driver"); | ||
341 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c index 7a9e1989c977..f43b365b848c 100644 --- a/drivers/mtd/maps/dc21285.c +++ b/drivers/mtd/maps/dc21285.c | |||
@@ -145,14 +145,10 @@ static struct map_info dc21285_map = { | |||
145 | 145 | ||
146 | 146 | ||
147 | /* Partition stuff */ | 147 | /* Partition stuff */ |
148 | static struct mtd_partition *dc21285_parts; | ||
149 | static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; | 148 | static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; |
150 | 149 | ||
151 | static int __init init_dc21285(void) | 150 | static int __init init_dc21285(void) |
152 | { | 151 | { |
153 | |||
154 | int nrparts; | ||
155 | |||
156 | /* Determine bankwidth */ | 152 | /* Determine bankwidth */ |
157 | switch (*CSR_SA110_CNTL & (3<<14)) { | 153 | switch (*CSR_SA110_CNTL & (3<<14)) { |
158 | case SA110_CNTL_ROMWIDTH_8: | 154 | case SA110_CNTL_ROMWIDTH_8: |
@@ -200,8 +196,7 @@ static int __init init_dc21285(void) | |||
200 | 196 | ||
201 | dc21285_mtd->owner = THIS_MODULE; | 197 | dc21285_mtd->owner = THIS_MODULE; |
202 | 198 | ||
203 | nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0); | 199 | mtd_device_parse_register(dc21285_mtd, probes, 0, NULL, 0); |
204 | mtd_device_register(dc21285_mtd, dc21285_parts, nrparts); | ||
205 | 200 | ||
206 | if(machine_is_ebsa285()) { | 201 | if(machine_is_ebsa285()) { |
207 | /* | 202 | /* |
@@ -224,8 +219,6 @@ static int __init init_dc21285(void) | |||
224 | static void __exit cleanup_dc21285(void) | 219 | static void __exit cleanup_dc21285(void) |
225 | { | 220 | { |
226 | mtd_device_unregister(dc21285_mtd); | 221 | mtd_device_unregister(dc21285_mtd); |
227 | if (dc21285_parts) | ||
228 | kfree(dc21285_parts); | ||
229 | map_destroy(dc21285_mtd); | 222 | map_destroy(dc21285_mtd); |
230 | iounmap(dc21285_map.virt); | 223 | iounmap(dc21285_map.virt); |
231 | } | 224 | } |
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c deleted file mode 100644 index fe42a212bb3e..000000000000 --- a/drivers/mtd/maps/edb7312.c +++ /dev/null | |||
@@ -1,134 +0,0 @@ | |||
1 | /* | ||
2 | * Handle mapping of the NOR flash on Cogent EDB7312 boards | ||
3 | * | ||
4 | * Copyright 2002 SYSGO Real-Time Solutions GmbH | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <asm/io.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/mtd/map.h> | ||
18 | #include <linux/mtd/partitions.h> | ||
19 | |||
20 | #define WINDOW_ADDR 0x00000000 /* physical properties of flash */ | ||
21 | #define WINDOW_SIZE 0x01000000 | ||
22 | #define BUSWIDTH 2 | ||
23 | #define FLASH_BLOCKSIZE_MAIN 0x20000 | ||
24 | #define FLASH_NUMBLOCKS_MAIN 128 | ||
25 | /* can be "cfi_probe", "jedec_probe", "map_rom", NULL }; */ | ||
26 | #define PROBETYPES { "cfi_probe", NULL } | ||
27 | |||
28 | #define MSG_PREFIX "EDB7312-NOR:" /* prefix for our printk()'s */ | ||
29 | #define MTDID "edb7312-nor" /* for mtdparts= partitioning */ | ||
30 | |||
31 | static struct mtd_info *mymtd; | ||
32 | |||
33 | struct map_info edb7312nor_map = { | ||
34 | .name = "NOR flash on EDB7312", | ||
35 | .size = WINDOW_SIZE, | ||
36 | .bankwidth = BUSWIDTH, | ||
37 | .phys = WINDOW_ADDR, | ||
38 | }; | ||
39 | |||
40 | /* | ||
41 | * MTD partitioning stuff | ||
42 | */ | ||
43 | static struct mtd_partition static_partitions[3] = | ||
44 | { | ||
45 | { | ||
46 | .name = "ARMboot", | ||
47 | .size = 0x40000, | ||
48 | .offset = 0 | ||
49 | }, | ||
50 | { | ||
51 | .name = "Kernel", | ||
52 | .size = 0x200000, | ||
53 | .offset = 0x40000 | ||
54 | }, | ||
55 | { | ||
56 | .name = "RootFS", | ||
57 | .size = 0xDC0000, | ||
58 | .offset = 0x240000 | ||
59 | }, | ||
60 | }; | ||
61 | |||
62 | static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; | ||
63 | |||
64 | static int mtd_parts_nb = 0; | ||
65 | static struct mtd_partition *mtd_parts = 0; | ||
66 | |||
67 | static int __init init_edb7312nor(void) | ||
68 | { | ||
69 | static const char *rom_probe_types[] = PROBETYPES; | ||
70 | const char **type; | ||
71 | const char *part_type = 0; | ||
72 | |||
73 | printk(KERN_NOTICE MSG_PREFIX "0x%08x at 0x%08x\n", | ||
74 | WINDOW_SIZE, WINDOW_ADDR); | ||
75 | edb7312nor_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); | ||
76 | |||
77 | if (!edb7312nor_map.virt) { | ||
78 | printk(MSG_PREFIX "failed to ioremap\n"); | ||
79 | return -EIO; | ||
80 | } | ||
81 | |||
82 | simple_map_init(&edb7312nor_map); | ||
83 | |||
84 | mymtd = 0; | ||
85 | type = rom_probe_types; | ||
86 | for(; !mymtd && *type; type++) { | ||
87 | mymtd = do_map_probe(*type, &edb7312nor_map); | ||
88 | } | ||
89 | if (mymtd) { | ||
90 | mymtd->owner = THIS_MODULE; | ||
91 | |||
92 | mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID); | ||
93 | if (mtd_parts_nb > 0) | ||
94 | part_type = "detected"; | ||
95 | |||
96 | if (mtd_parts_nb == 0) { | ||
97 | mtd_parts = static_partitions; | ||
98 | mtd_parts_nb = ARRAY_SIZE(static_partitions); | ||
99 | part_type = "static"; | ||
100 | } | ||
101 | |||
102 | if (mtd_parts_nb == 0) | ||
103 | printk(KERN_NOTICE MSG_PREFIX "no partition info available\n"); | ||
104 | else | ||
105 | printk(KERN_NOTICE MSG_PREFIX | ||
106 | "using %s partition definition\n", part_type); | ||
107 | /* Register the whole device first. */ | ||
108 | mtd_device_register(mymtd, NULL, 0); | ||
109 | mtd_device_register(mymtd, mtd_parts, mtd_parts_nb); | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | iounmap((void *)edb7312nor_map.virt); | ||
114 | return -ENXIO; | ||
115 | } | ||
116 | |||
117 | static void __exit cleanup_edb7312nor(void) | ||
118 | { | ||
119 | if (mymtd) { | ||
120 | mtd_device_unregister(mymtd); | ||
121 | map_destroy(mymtd); | ||
122 | } | ||
123 | if (edb7312nor_map.virt) { | ||
124 | iounmap((void *)edb7312nor_map.virt); | ||
125 | edb7312nor_map.virt = 0; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | module_init(init_edb7312nor); | ||
130 | module_exit(cleanup_edb7312nor); | ||
131 | |||
132 | MODULE_LICENSE("GPL"); | ||
133 | MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>"); | ||
134 | MODULE_DESCRIPTION("Generic configurable MTD map driver"); | ||
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c index 7568c5f8b8ae..1ec66f031c51 100644 --- a/drivers/mtd/maps/gpio-addr-flash.c +++ b/drivers/mtd/maps/gpio-addr-flash.c | |||
@@ -187,7 +187,6 @@ static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; | |||
187 | */ | 187 | */ |
188 | static int __devinit gpio_flash_probe(struct platform_device *pdev) | 188 | static int __devinit gpio_flash_probe(struct platform_device *pdev) |
189 | { | 189 | { |
190 | int nr_parts; | ||
191 | size_t i, arr_size; | 190 | size_t i, arr_size; |
192 | struct physmap_flash_data *pdata; | 191 | struct physmap_flash_data *pdata; |
193 | struct resource *memory; | 192 | struct resource *memory; |
@@ -252,20 +251,9 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev) | |||
252 | return -ENXIO; | 251 | return -ENXIO; |
253 | } | 252 | } |
254 | 253 | ||
255 | nr_parts = parse_mtd_partitions(state->mtd, part_probe_types, | ||
256 | &pdata->parts, 0); | ||
257 | if (nr_parts > 0) { | ||
258 | pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n"); | ||
259 | kfree(pdata->parts); | ||
260 | } else if (pdata->nr_parts) { | ||
261 | pr_devinit(KERN_NOTICE PFX "Using board partition definition\n"); | ||
262 | nr_parts = pdata->nr_parts; | ||
263 | } else { | ||
264 | pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n"); | ||
265 | nr_parts = 0; | ||
266 | } | ||
267 | 254 | ||
268 | mtd_device_register(state->mtd, pdata->parts, nr_parts); | 255 | mtd_device_parse_register(state->mtd, part_probe_types, 0, |
256 | pdata->parts, pdata->nr_parts); | ||
269 | 257 | ||
270 | return 0; | 258 | return 0; |
271 | } | 259 | } |
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c index 7f035860a36b..49c14187fc66 100644 --- a/drivers/mtd/maps/h720x-flash.c +++ b/drivers/mtd/maps/h720x-flash.c | |||
@@ -58,18 +58,11 @@ static struct mtd_partition h720x_partitions[] = { | |||
58 | 58 | ||
59 | #define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions) | 59 | #define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions) |
60 | 60 | ||
61 | static int nr_mtd_parts; | ||
62 | static struct mtd_partition *mtd_parts; | ||
63 | static const char *probes[] = { "cmdlinepart", NULL }; | ||
64 | |||
65 | /* | 61 | /* |
66 | * Initialize FLASH support | 62 | * Initialize FLASH support |
67 | */ | 63 | */ |
68 | static int __init h720x_mtd_init(void) | 64 | static int __init h720x_mtd_init(void) |
69 | { | 65 | { |
70 | |||
71 | char *part_type = NULL; | ||
72 | |||
73 | h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size); | 66 | h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size); |
74 | 67 | ||
75 | if (!h720x_map.virt) { | 68 | if (!h720x_map.virt) { |
@@ -92,16 +85,8 @@ static int __init h720x_mtd_init(void) | |||
92 | if (mymtd) { | 85 | if (mymtd) { |
93 | mymtd->owner = THIS_MODULE; | 86 | mymtd->owner = THIS_MODULE; |
94 | 87 | ||
95 | nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0); | 88 | mtd_device_parse_register(mymtd, NULL, 0, |
96 | if (nr_mtd_parts > 0) | 89 | h720x_partitions, NUM_PARTITIONS); |
97 | part_type = "command line"; | ||
98 | if (nr_mtd_parts <= 0) { | ||
99 | mtd_parts = h720x_partitions; | ||
100 | nr_mtd_parts = NUM_PARTITIONS; | ||
101 | part_type = "builtin"; | ||
102 | } | ||
103 | printk(KERN_INFO "Using %s partition table\n", part_type); | ||
104 | mtd_device_register(mymtd, mtd_parts, nr_mtd_parts); | ||
105 | return 0; | 90 | return 0; |
106 | } | 91 | } |
107 | 92 | ||
@@ -120,10 +105,6 @@ static void __exit h720x_mtd_cleanup(void) | |||
120 | map_destroy(mymtd); | 105 | map_destroy(mymtd); |
121 | } | 106 | } |
122 | 107 | ||
123 | /* Free partition info, if commandline partition was used */ | ||
124 | if (mtd_parts && (mtd_parts != h720x_partitions)) | ||
125 | kfree (mtd_parts); | ||
126 | |||
127 | if (h720x_map.virt) { | 108 | if (h720x_map.virt) { |
128 | iounmap((void *)h720x_map.virt); | 109 | iounmap((void *)h720x_map.virt); |
129 | h720x_map.virt = 0; | 110 | h720x_map.virt = 0; |
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c index 404a50cbafa0..f47aedb24366 100644 --- a/drivers/mtd/maps/impa7.c +++ b/drivers/mtd/maps/impa7.c | |||
@@ -49,7 +49,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = { | |||
49 | /* | 49 | /* |
50 | * MTD partitioning stuff | 50 | * MTD partitioning stuff |
51 | */ | 51 | */ |
52 | static struct mtd_partition static_partitions[] = | 52 | static struct mtd_partition partitions[] = |
53 | { | 53 | { |
54 | { | 54 | { |
55 | .name = "FileSystem", | 55 | .name = "FileSystem", |
@@ -58,16 +58,10 @@ static struct mtd_partition static_partitions[] = | |||
58 | }, | 58 | }, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static int mtd_parts_nb[NUM_FLASHBANKS]; | ||
62 | static struct mtd_partition *mtd_parts[NUM_FLASHBANKS]; | ||
63 | |||
64 | static const char *probes[] = { "cmdlinepart", NULL }; | ||
65 | |||
66 | static int __init init_impa7(void) | 61 | static int __init init_impa7(void) |
67 | { | 62 | { |
68 | static const char *rom_probe_types[] = PROBETYPES; | 63 | static const char *rom_probe_types[] = PROBETYPES; |
69 | const char **type; | 64 | const char **type; |
70 | const char *part_type = 0; | ||
71 | int i; | 65 | int i; |
72 | static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = { | 66 | static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = { |
73 | { WINDOW_ADDR0, WINDOW_SIZE0 }, | 67 | { WINDOW_ADDR0, WINDOW_SIZE0 }, |
@@ -97,23 +91,9 @@ static int __init init_impa7(void) | |||
97 | if (impa7_mtd[i]) { | 91 | if (impa7_mtd[i]) { |
98 | impa7_mtd[i]->owner = THIS_MODULE; | 92 | impa7_mtd[i]->owner = THIS_MODULE; |
99 | devicesfound++; | 93 | devicesfound++; |
100 | mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i], | 94 | mtd_device_parse_register(impa7_mtd[i], NULL, 0, |
101 | probes, | 95 | partitions, |
102 | &mtd_parts[i], | 96 | ARRAY_SIZE(partitions)); |
103 | 0); | ||
104 | if (mtd_parts_nb[i] > 0) { | ||
105 | part_type = "command line"; | ||
106 | } else { | ||
107 | mtd_parts[i] = static_partitions; | ||
108 | mtd_parts_nb[i] = ARRAY_SIZE(static_partitions); | ||
109 | part_type = "static"; | ||
110 | } | ||
111 | |||
112 | printk(KERN_NOTICE MSG_PREFIX | ||
113 | "using %s partition definition\n", | ||
114 | part_type); | ||
115 | mtd_device_register(impa7_mtd[i], | ||
116 | mtd_parts[i], mtd_parts_nb[i]); | ||
117 | } | 97 | } |
118 | else | 98 | else |
119 | iounmap((void *)impa7_map[i].virt); | 99 | iounmap((void *)impa7_map[i].virt); |
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c index d2f47be8754b..08c239604ee4 100644 --- a/drivers/mtd/maps/intel_vr_nor.c +++ b/drivers/mtd/maps/intel_vr_nor.c | |||
@@ -44,7 +44,6 @@ struct vr_nor_mtd { | |||
44 | void __iomem *csr_base; | 44 | void __iomem *csr_base; |
45 | struct map_info map; | 45 | struct map_info map; |
46 | struct mtd_info *info; | 46 | struct mtd_info *info; |
47 | int nr_parts; | ||
48 | struct pci_dev *dev; | 47 | struct pci_dev *dev; |
49 | }; | 48 | }; |
50 | 49 | ||
@@ -71,13 +70,9 @@ static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p) | |||
71 | 70 | ||
72 | static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p) | 71 | static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p) |
73 | { | 72 | { |
74 | struct mtd_partition *parts; | ||
75 | static const char *part_probes[] = { "cmdlinepart", NULL }; | ||
76 | |||
77 | /* register the flash bank */ | 73 | /* register the flash bank */ |
78 | /* partition the flash bank */ | 74 | /* partition the flash bank */ |
79 | p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0); | 75 | return mtd_device_parse_register(p->info, NULL, 0, NULL, 0); |
80 | return mtd_device_register(p->info, parts, p->nr_parts); | ||
81 | } | 76 | } |
82 | 77 | ||
83 | static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) | 78 | static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) |
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c index 1594a802631d..437fcd2f352f 100644 --- a/drivers/mtd/maps/ixp2000.c +++ b/drivers/mtd/maps/ixp2000.c | |||
@@ -38,7 +38,6 @@ | |||
38 | struct ixp2000_flash_info { | 38 | struct ixp2000_flash_info { |
39 | struct mtd_info *mtd; | 39 | struct mtd_info *mtd; |
40 | struct map_info map; | 40 | struct map_info map; |
41 | struct mtd_partition *partitions; | ||
42 | struct resource *res; | 41 | struct resource *res; |
43 | }; | 42 | }; |
44 | 43 | ||
@@ -125,8 +124,6 @@ static int ixp2000_flash_remove(struct platform_device *dev) | |||
125 | if (info->map.map_priv_1) | 124 | if (info->map.map_priv_1) |
126 | iounmap((void *) info->map.map_priv_1); | 125 | iounmap((void *) info->map.map_priv_1); |
127 | 126 | ||
128 | kfree(info->partitions); | ||
129 | |||
130 | if (info->res) { | 127 | if (info->res) { |
131 | release_resource(info->res); | 128 | release_resource(info->res); |
132 | kfree(info->res); | 129 | kfree(info->res); |
@@ -229,13 +226,7 @@ static int ixp2000_flash_probe(struct platform_device *dev) | |||
229 | } | 226 | } |
230 | info->mtd->owner = THIS_MODULE; | 227 | info->mtd->owner = THIS_MODULE; |
231 | 228 | ||
232 | err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0); | 229 | err = mtd_device_parse_register(info->mtd, probes, 0, NULL, 0); |
233 | if (err > 0) { | ||
234 | err = mtd_device_register(info->mtd, info->partitions, err); | ||
235 | if(err) | ||
236 | dev_err(&dev->dev, "Could not parse partitions\n"); | ||
237 | } | ||
238 | |||
239 | if (err) | 230 | if (err) |
240 | goto Error; | 231 | goto Error; |
241 | 232 | ||
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index 155b21942f47..30409015a3de 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c | |||
@@ -145,7 +145,6 @@ static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr) | |||
145 | struct ixp4xx_flash_info { | 145 | struct ixp4xx_flash_info { |
146 | struct mtd_info *mtd; | 146 | struct mtd_info *mtd; |
147 | struct map_info map; | 147 | struct map_info map; |
148 | struct mtd_partition *partitions; | ||
149 | struct resource *res; | 148 | struct resource *res; |
150 | }; | 149 | }; |
151 | 150 | ||
@@ -168,8 +167,6 @@ static int ixp4xx_flash_remove(struct platform_device *dev) | |||
168 | if (info->map.virt) | 167 | if (info->map.virt) |
169 | iounmap(info->map.virt); | 168 | iounmap(info->map.virt); |
170 | 169 | ||
171 | kfree(info->partitions); | ||
172 | |||
173 | if (info->res) { | 170 | if (info->res) { |
174 | release_resource(info->res); | 171 | release_resource(info->res); |
175 | kfree(info->res); | 172 | kfree(info->res); |
@@ -185,8 +182,6 @@ static int ixp4xx_flash_probe(struct platform_device *dev) | |||
185 | { | 182 | { |
186 | struct flash_platform_data *plat = dev->dev.platform_data; | 183 | struct flash_platform_data *plat = dev->dev.platform_data; |
187 | struct ixp4xx_flash_info *info; | 184 | struct ixp4xx_flash_info *info; |
188 | const char *part_type = NULL; | ||
189 | int nr_parts = 0; | ||
190 | int err = -1; | 185 | int err = -1; |
191 | 186 | ||
192 | if (!plat) | 187 | if (!plat) |
@@ -252,28 +247,12 @@ static int ixp4xx_flash_probe(struct platform_device *dev) | |||
252 | /* Use the fast version */ | 247 | /* Use the fast version */ |
253 | info->map.write = ixp4xx_write16; | 248 | info->map.write = ixp4xx_write16; |
254 | 249 | ||
255 | nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions, | 250 | err = mtd_device_parse_register(info->mtd, probes, dev->resource->start, |
256 | dev->resource->start); | 251 | plat->parts, plat->nr_parts); |
257 | if (nr_parts > 0) { | 252 | if (err) { |
258 | part_type = "dynamic"; | ||
259 | } else { | ||
260 | info->partitions = plat->parts; | ||
261 | nr_parts = plat->nr_parts; | ||
262 | part_type = "static"; | ||
263 | } | ||
264 | if (nr_parts == 0) | ||
265 | printk(KERN_NOTICE "IXP4xx flash: no partition info " | ||
266 | "available, registering whole flash\n"); | ||
267 | else | ||
268 | printk(KERN_NOTICE "IXP4xx flash: using %s partition " | ||
269 | "definition\n", part_type); | ||
270 | |||
271 | err = mtd_device_register(info->mtd, info->partitions, nr_parts); | ||
272 | if (err) | ||
273 | printk(KERN_ERR "Could not parse partitions\n"); | 253 | printk(KERN_ERR "Could not parse partitions\n"); |
274 | |||
275 | if (err) | ||
276 | goto Error; | 254 | goto Error; |
255 | } | ||
277 | 256 | ||
278 | return 0; | 257 | return 0; |
279 | 258 | ||
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c index 7e508969239e..4f10e27ada55 100644 --- a/drivers/mtd/maps/lantiq-flash.c +++ b/drivers/mtd/maps/lantiq-flash.c | |||
@@ -107,16 +107,12 @@ ltq_copy_to(struct map_info *map, unsigned long to, | |||
107 | spin_unlock_irqrestore(&ebu_lock, flags); | 107 | spin_unlock_irqrestore(&ebu_lock, flags); |
108 | } | 108 | } |
109 | 109 | ||
110 | static const char const *part_probe_types[] = { "cmdlinepart", NULL }; | ||
111 | |||
112 | static int __init | 110 | static int __init |
113 | ltq_mtd_probe(struct platform_device *pdev) | 111 | ltq_mtd_probe(struct platform_device *pdev) |
114 | { | 112 | { |
115 | struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev); | 113 | struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev); |
116 | struct ltq_mtd *ltq_mtd; | 114 | struct ltq_mtd *ltq_mtd; |
117 | struct mtd_partition *parts; | ||
118 | struct resource *res; | 115 | struct resource *res; |
119 | int nr_parts = 0; | ||
120 | struct cfi_private *cfi; | 116 | struct cfi_private *cfi; |
121 | int err; | 117 | int err; |
122 | 118 | ||
@@ -172,17 +168,8 @@ ltq_mtd_probe(struct platform_device *pdev) | |||
172 | cfi->addr_unlock1 ^= 1; | 168 | cfi->addr_unlock1 ^= 1; |
173 | cfi->addr_unlock2 ^= 1; | 169 | cfi->addr_unlock2 ^= 1; |
174 | 170 | ||
175 | nr_parts = parse_mtd_partitions(ltq_mtd->mtd, | 171 | err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0, |
176 | part_probe_types, &parts, 0); | 172 | ltq_mtd_data->parts, ltq_mtd_data->nr_parts); |
177 | if (nr_parts > 0) { | ||
178 | dev_info(&pdev->dev, | ||
179 | "using %d partitions from cmdline", nr_parts); | ||
180 | } else { | ||
181 | nr_parts = ltq_mtd_data->nr_parts; | ||
182 | parts = ltq_mtd_data->parts; | ||
183 | } | ||
184 | |||
185 | err = mtd_device_register(ltq_mtd->mtd, parts, nr_parts); | ||
186 | if (err) { | 173 | if (err) { |
187 | dev_err(&pdev->dev, "failed to add partitions\n"); | 174 | dev_err(&pdev->dev, "failed to add partitions\n"); |
188 | goto err_destroy; | 175 | goto err_destroy; |
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c index 5936c466e901..119baa7d7477 100644 --- a/drivers/mtd/maps/latch-addr-flash.c +++ b/drivers/mtd/maps/latch-addr-flash.c | |||
@@ -33,9 +33,6 @@ struct latch_addr_flash_info { | |||
33 | /* cache; could be found out of res */ | 33 | /* cache; could be found out of res */ |
34 | unsigned long win_mask; | 34 | unsigned long win_mask; |
35 | 35 | ||
36 | int nr_parts; | ||
37 | struct mtd_partition *parts; | ||
38 | |||
39 | spinlock_t lock; | 36 | spinlock_t lock; |
40 | }; | 37 | }; |
41 | 38 | ||
@@ -97,8 +94,6 @@ static void lf_copy_from(struct map_info *map, void *to, | |||
97 | 94 | ||
98 | static char *rom_probe_types[] = { "cfi_probe", NULL }; | 95 | static char *rom_probe_types[] = { "cfi_probe", NULL }; |
99 | 96 | ||
100 | static char *part_probe_types[] = { "cmdlinepart", NULL }; | ||
101 | |||
102 | static int latch_addr_flash_remove(struct platform_device *dev) | 97 | static int latch_addr_flash_remove(struct platform_device *dev) |
103 | { | 98 | { |
104 | struct latch_addr_flash_info *info; | 99 | struct latch_addr_flash_info *info; |
@@ -112,8 +107,6 @@ static int latch_addr_flash_remove(struct platform_device *dev) | |||
112 | latch_addr_data = dev->dev.platform_data; | 107 | latch_addr_data = dev->dev.platform_data; |
113 | 108 | ||
114 | if (info->mtd != NULL) { | 109 | if (info->mtd != NULL) { |
115 | if (info->nr_parts) | ||
116 | kfree(info->parts); | ||
117 | mtd_device_unregister(info->mtd); | 110 | mtd_device_unregister(info->mtd); |
118 | map_destroy(info->mtd); | 111 | map_destroy(info->mtd); |
119 | } | 112 | } |
@@ -206,21 +199,8 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev) | |||
206 | } | 199 | } |
207 | info->mtd->owner = THIS_MODULE; | 200 | info->mtd->owner = THIS_MODULE; |
208 | 201 | ||
209 | err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types, | 202 | mtd_device_parse_register(info->mtd, NULL, 0, |
210 | &info->parts, 0); | 203 | latch_addr_data->parts, latch_addr_data->nr_parts); |
211 | if (err > 0) { | ||
212 | mtd_device_register(info->mtd, info->parts, err); | ||
213 | return 0; | ||
214 | } | ||
215 | if (latch_addr_data->nr_parts) { | ||
216 | pr_notice("Using latch-addr-flash partition information\n"); | ||
217 | mtd_device_register(info->mtd, | ||
218 | latch_addr_data->parts, | ||
219 | latch_addr_data->nr_parts); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | mtd_device_register(info->mtd, NULL, 0); | ||
224 | return 0; | 204 | return 0; |
225 | 205 | ||
226 | iounmap: | 206 | iounmap: |
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c index bbe168b65c26..e8e9fec23553 100644 --- a/drivers/mtd/maps/pcmciamtd.c +++ b/drivers/mtd/maps/pcmciamtd.c | |||
@@ -22,22 +22,6 @@ | |||
22 | #include <linux/mtd/map.h> | 22 | #include <linux/mtd/map.h> |
23 | #include <linux/mtd/mtd.h> | 23 | #include <linux/mtd/mtd.h> |
24 | 24 | ||
25 | #ifdef CONFIG_MTD_DEBUG | ||
26 | static int debug = CONFIG_MTD_DEBUG_VERBOSE; | ||
27 | module_param(debug, int, 0); | ||
28 | MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy"); | ||
29 | #undef DEBUG | ||
30 | #define DEBUG(n, format, arg...) \ | ||
31 | if (n <= debug) { \ | ||
32 | printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __func__ , ## arg); \ | ||
33 | } | ||
34 | |||
35 | #else | ||
36 | #undef DEBUG | ||
37 | #define DEBUG(n, arg...) | ||
38 | static const int debug = 0; | ||
39 | #endif | ||
40 | |||
41 | #define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg) | 25 | #define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg) |
42 | 26 | ||
43 | #define DRIVER_DESC "PCMCIA Flash memory card driver" | 27 | #define DRIVER_DESC "PCMCIA Flash memory card driver" |
@@ -105,13 +89,13 @@ static caddr_t remap_window(struct map_info *map, unsigned long to) | |||
105 | int ret; | 89 | int ret; |
106 | 90 | ||
107 | if (!pcmcia_dev_present(dev->p_dev)) { | 91 | if (!pcmcia_dev_present(dev->p_dev)) { |
108 | DEBUG(1, "device removed"); | 92 | pr_debug("device removed\n"); |
109 | return 0; | 93 | return 0; |
110 | } | 94 | } |
111 | 95 | ||
112 | offset = to & ~(dev->win_size-1); | 96 | offset = to & ~(dev->win_size-1); |
113 | if (offset != dev->offset) { | 97 | if (offset != dev->offset) { |
114 | DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x", | 98 | pr_debug("Remapping window from 0x%8.8x to 0x%8.8x\n", |
115 | dev->offset, offset); | 99 | dev->offset, offset); |
116 | ret = pcmcia_map_mem_page(dev->p_dev, win, offset); | 100 | ret = pcmcia_map_mem_page(dev->p_dev, win, offset); |
117 | if (ret != 0) | 101 | if (ret != 0) |
@@ -132,7 +116,7 @@ static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs) | |||
132 | return d; | 116 | return d; |
133 | 117 | ||
134 | d.x[0] = readb(addr); | 118 | d.x[0] = readb(addr); |
135 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]); | 119 | pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", ofs, addr, d.x[0]); |
136 | return d; | 120 | return d; |
137 | } | 121 | } |
138 | 122 | ||
@@ -147,7 +131,7 @@ static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs) | |||
147 | return d; | 131 | return d; |
148 | 132 | ||
149 | d.x[0] = readw(addr); | 133 | d.x[0] = readw(addr); |
150 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]); | 134 | pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", ofs, addr, d.x[0]); |
151 | return d; | 135 | return d; |
152 | } | 136 | } |
153 | 137 | ||
@@ -157,7 +141,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long | |||
157 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; | 141 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; |
158 | unsigned long win_size = dev->win_size; | 142 | unsigned long win_size = dev->win_size; |
159 | 143 | ||
160 | DEBUG(3, "to = %p from = %lu len = %zd", to, from, len); | 144 | pr_debug("to = %p from = %lu len = %zd\n", to, from, len); |
161 | while(len) { | 145 | while(len) { |
162 | int toread = win_size - (from & (win_size-1)); | 146 | int toread = win_size - (from & (win_size-1)); |
163 | caddr_t addr; | 147 | caddr_t addr; |
@@ -169,7 +153,7 @@ static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long | |||
169 | if(!addr) | 153 | if(!addr) |
170 | return; | 154 | return; |
171 | 155 | ||
172 | DEBUG(4, "memcpy from %p to %p len = %d", addr, to, toread); | 156 | pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread); |
173 | memcpy_fromio(to, addr, toread); | 157 | memcpy_fromio(to, addr, toread); |
174 | len -= toread; | 158 | len -= toread; |
175 | to += toread; | 159 | to += toread; |
@@ -185,7 +169,7 @@ static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long | |||
185 | if(!addr) | 169 | if(!addr) |
186 | return; | 170 | return; |
187 | 171 | ||
188 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", adr, addr, d.x[0]); | 172 | pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", adr, addr, d.x[0]); |
189 | writeb(d.x[0], addr); | 173 | writeb(d.x[0], addr); |
190 | } | 174 | } |
191 | 175 | ||
@@ -196,7 +180,7 @@ static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long | |||
196 | if(!addr) | 180 | if(!addr) |
197 | return; | 181 | return; |
198 | 182 | ||
199 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", adr, addr, d.x[0]); | 183 | pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", adr, addr, d.x[0]); |
200 | writew(d.x[0], addr); | 184 | writew(d.x[0], addr); |
201 | } | 185 | } |
202 | 186 | ||
@@ -206,7 +190,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v | |||
206 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; | 190 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; |
207 | unsigned long win_size = dev->win_size; | 191 | unsigned long win_size = dev->win_size; |
208 | 192 | ||
209 | DEBUG(3, "to = %lu from = %p len = %zd", to, from, len); | 193 | pr_debug("to = %lu from = %p len = %zd\n", to, from, len); |
210 | while(len) { | 194 | while(len) { |
211 | int towrite = win_size - (to & (win_size-1)); | 195 | int towrite = win_size - (to & (win_size-1)); |
212 | caddr_t addr; | 196 | caddr_t addr; |
@@ -218,7 +202,7 @@ static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const v | |||
218 | if(!addr) | 202 | if(!addr) |
219 | return; | 203 | return; |
220 | 204 | ||
221 | DEBUG(4, "memcpy from %p to %p len = %d", from, addr, towrite); | 205 | pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite); |
222 | memcpy_toio(addr, from, towrite); | 206 | memcpy_toio(addr, from, towrite); |
223 | len -= towrite; | 207 | len -= towrite; |
224 | to += towrite; | 208 | to += towrite; |
@@ -240,7 +224,7 @@ static map_word pcmcia_read8(struct map_info *map, unsigned long ofs) | |||
240 | return d; | 224 | return d; |
241 | 225 | ||
242 | d.x[0] = readb(win_base + ofs); | 226 | d.x[0] = readb(win_base + ofs); |
243 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", | 227 | pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", |
244 | ofs, win_base + ofs, d.x[0]); | 228 | ofs, win_base + ofs, d.x[0]); |
245 | return d; | 229 | return d; |
246 | } | 230 | } |
@@ -255,7 +239,7 @@ static map_word pcmcia_read16(struct map_info *map, unsigned long ofs) | |||
255 | return d; | 239 | return d; |
256 | 240 | ||
257 | d.x[0] = readw(win_base + ofs); | 241 | d.x[0] = readw(win_base + ofs); |
258 | DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", | 242 | pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", |
259 | ofs, win_base + ofs, d.x[0]); | 243 | ofs, win_base + ofs, d.x[0]); |
260 | return d; | 244 | return d; |
261 | } | 245 | } |
@@ -268,7 +252,7 @@ static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, | |||
268 | if(DEV_REMOVED(map)) | 252 | if(DEV_REMOVED(map)) |
269 | return; | 253 | return; |
270 | 254 | ||
271 | DEBUG(3, "to = %p from = %lu len = %zd", to, from, len); | 255 | pr_debug("to = %p from = %lu len = %zd\n", to, from, len); |
272 | memcpy_fromio(to, win_base + from, len); | 256 | memcpy_fromio(to, win_base + from, len); |
273 | } | 257 | } |
274 | 258 | ||
@@ -280,7 +264,7 @@ static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr) | |||
280 | if(DEV_REMOVED(map)) | 264 | if(DEV_REMOVED(map)) |
281 | return; | 265 | return; |
282 | 266 | ||
283 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02lx", | 267 | pr_debug("adr = 0x%08lx (%p) data = 0x%02lx\n", |
284 | adr, win_base + adr, d.x[0]); | 268 | adr, win_base + adr, d.x[0]); |
285 | writeb(d.x[0], win_base + adr); | 269 | writeb(d.x[0], win_base + adr); |
286 | } | 270 | } |
@@ -293,7 +277,7 @@ static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr) | |||
293 | if(DEV_REMOVED(map)) | 277 | if(DEV_REMOVED(map)) |
294 | return; | 278 | return; |
295 | 279 | ||
296 | DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04lx", | 280 | pr_debug("adr = 0x%08lx (%p) data = 0x%04lx\n", |
297 | adr, win_base + adr, d.x[0]); | 281 | adr, win_base + adr, d.x[0]); |
298 | writew(d.x[0], win_base + adr); | 282 | writew(d.x[0], win_base + adr); |
299 | } | 283 | } |
@@ -306,7 +290,7 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f | |||
306 | if(DEV_REMOVED(map)) | 290 | if(DEV_REMOVED(map)) |
307 | return; | 291 | return; |
308 | 292 | ||
309 | DEBUG(3, "to = %lu from = %p len = %zd", to, from, len); | 293 | pr_debug("to = %lu from = %p len = %zd\n", to, from, len); |
310 | memcpy_toio(win_base + to, from, len); | 294 | memcpy_toio(win_base + to, from, len); |
311 | } | 295 | } |
312 | 296 | ||
@@ -316,7 +300,7 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on) | |||
316 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; | 300 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; |
317 | struct pcmcia_device *link = dev->p_dev; | 301 | struct pcmcia_device *link = dev->p_dev; |
318 | 302 | ||
319 | DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp); | 303 | pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp); |
320 | pcmcia_fixup_vpp(link, on ? dev->vpp : 0); | 304 | pcmcia_fixup_vpp(link, on ? dev->vpp : 0); |
321 | } | 305 | } |
322 | 306 | ||
@@ -325,7 +309,7 @@ static void pcmciamtd_release(struct pcmcia_device *link) | |||
325 | { | 309 | { |
326 | struct pcmciamtd_dev *dev = link->priv; | 310 | struct pcmciamtd_dev *dev = link->priv; |
327 | 311 | ||
328 | DEBUG(3, "link = 0x%p", link); | 312 | pr_debug("link = 0x%p\n", link); |
329 | 313 | ||
330 | if (link->resource[2]->end) { | 314 | if (link->resource[2]->end) { |
331 | if(dev->win_base) { | 315 | if(dev->win_base) { |
@@ -337,7 +321,6 @@ static void pcmciamtd_release(struct pcmcia_device *link) | |||
337 | } | 321 | } |
338 | 322 | ||
339 | 323 | ||
340 | #ifdef CONFIG_MTD_DEBUG | ||
341 | static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev, | 324 | static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev, |
342 | tuple_t *tuple, | 325 | tuple_t *tuple, |
343 | void *priv_data) | 326 | void *priv_data) |
@@ -347,7 +330,7 @@ static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev, | |||
347 | if (!pcmcia_parse_tuple(tuple, &parse)) { | 330 | if (!pcmcia_parse_tuple(tuple, &parse)) { |
348 | cistpl_format_t *t = &parse.format; | 331 | cistpl_format_t *t = &parse.format; |
349 | (void)t; /* Shut up, gcc */ | 332 | (void)t; /* Shut up, gcc */ |
350 | DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u", | 333 | pr_debug("Format type: %u, Error Detection: %u, offset = %u, length =%u\n", |
351 | t->type, t->edc, t->offset, t->length); | 334 | t->type, t->edc, t->offset, t->length); |
352 | } | 335 | } |
353 | return -ENOSPC; | 336 | return -ENOSPC; |
@@ -363,12 +346,11 @@ static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev, | |||
363 | if (!pcmcia_parse_tuple(tuple, &parse)) { | 346 | if (!pcmcia_parse_tuple(tuple, &parse)) { |
364 | cistpl_jedec_t *t = &parse.jedec; | 347 | cistpl_jedec_t *t = &parse.jedec; |
365 | for (i = 0; i < t->nid; i++) | 348 | for (i = 0; i < t->nid; i++) |
366 | DEBUG(2, "JEDEC: 0x%02x 0x%02x", | 349 | pr_debug("JEDEC: 0x%02x 0x%02x\n", |
367 | t->id[i].mfr, t->id[i].info); | 350 | t->id[i].mfr, t->id[i].info); |
368 | } | 351 | } |
369 | return -ENOSPC; | 352 | return -ENOSPC; |
370 | } | 353 | } |
371 | #endif | ||
372 | 354 | ||
373 | static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev, | 355 | static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev, |
374 | tuple_t *tuple, | 356 | tuple_t *tuple, |
@@ -382,14 +364,14 @@ static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev, | |||
382 | if (pcmcia_parse_tuple(tuple, &parse)) | 364 | if (pcmcia_parse_tuple(tuple, &parse)) |
383 | return -EINVAL; | 365 | return -EINVAL; |
384 | 366 | ||
385 | DEBUG(2, "Common memory:"); | 367 | pr_debug("Common memory:\n"); |
386 | dev->pcmcia_map.size = t->dev[0].size; | 368 | dev->pcmcia_map.size = t->dev[0].size; |
387 | /* from here on: DEBUG only */ | 369 | /* from here on: DEBUG only */ |
388 | for (i = 0; i < t->ndev; i++) { | 370 | for (i = 0; i < t->ndev; i++) { |
389 | DEBUG(2, "Region %d, type = %u", i, t->dev[i].type); | 371 | pr_debug("Region %d, type = %u\n", i, t->dev[i].type); |
390 | DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp); | 372 | pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp); |
391 | DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed); | 373 | pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed); |
392 | DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size); | 374 | pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size); |
393 | } | 375 | } |
394 | return 0; | 376 | return 0; |
395 | } | 377 | } |
@@ -409,12 +391,12 @@ static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev, | |||
409 | dev->pcmcia_map.bankwidth = t->geo[0].buswidth; | 391 | dev->pcmcia_map.bankwidth = t->geo[0].buswidth; |
410 | /* from here on: DEBUG only */ | 392 | /* from here on: DEBUG only */ |
411 | for (i = 0; i < t->ngeo; i++) { | 393 | for (i = 0; i < t->ngeo; i++) { |
412 | DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth); | 394 | pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth); |
413 | DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block); | 395 | pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block); |
414 | DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block); | 396 | pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block); |
415 | DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block); | 397 | pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block); |
416 | DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition); | 398 | pr_debug("region: %d partition = %u\n", i, t->geo[i].partition); |
417 | DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave); | 399 | pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave); |
418 | } | 400 | } |
419 | return 0; | 401 | return 0; |
420 | } | 402 | } |
@@ -432,13 +414,11 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev | |||
432 | if (p_dev->prod_id[i]) | 414 | if (p_dev->prod_id[i]) |
433 | strcat(dev->mtd_name, p_dev->prod_id[i]); | 415 | strcat(dev->mtd_name, p_dev->prod_id[i]); |
434 | } | 416 | } |
435 | DEBUG(2, "Found name: %s", dev->mtd_name); | 417 | pr_debug("Found name: %s\n", dev->mtd_name); |
436 | } | 418 | } |
437 | 419 | ||
438 | #ifdef CONFIG_MTD_DEBUG | ||
439 | pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL); | 420 | pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL); |
440 | pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL); | 421 | pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL); |
441 | #endif | ||
442 | pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev); | 422 | pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev); |
443 | pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev); | 423 | pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev); |
444 | 424 | ||
@@ -450,12 +430,12 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev | |||
450 | 430 | ||
451 | if(force_size) { | 431 | if(force_size) { |
452 | dev->pcmcia_map.size = force_size << 20; | 432 | dev->pcmcia_map.size = force_size << 20; |
453 | DEBUG(2, "size forced to %dM", force_size); | 433 | pr_debug("size forced to %dM\n", force_size); |
454 | } | 434 | } |
455 | 435 | ||
456 | if(bankwidth) { | 436 | if(bankwidth) { |
457 | dev->pcmcia_map.bankwidth = bankwidth; | 437 | dev->pcmcia_map.bankwidth = bankwidth; |
458 | DEBUG(2, "bankwidth forced to %d", bankwidth); | 438 | pr_debug("bankwidth forced to %d\n", bankwidth); |
459 | } | 439 | } |
460 | 440 | ||
461 | dev->pcmcia_map.name = dev->mtd_name; | 441 | dev->pcmcia_map.name = dev->mtd_name; |
@@ -464,7 +444,7 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev | |||
464 | *new_name = 1; | 444 | *new_name = 1; |
465 | } | 445 | } |
466 | 446 | ||
467 | DEBUG(1, "Device: Size: %lu Width:%d Name: %s", | 447 | pr_debug("Device: Size: %lu Width:%d Name: %s\n", |
468 | dev->pcmcia_map.size, | 448 | dev->pcmcia_map.size, |
469 | dev->pcmcia_map.bankwidth << 3, dev->mtd_name); | 449 | dev->pcmcia_map.bankwidth << 3, dev->mtd_name); |
470 | } | 450 | } |
@@ -479,7 +459,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
479 | static char *probes[] = { "jedec_probe", "cfi_probe" }; | 459 | static char *probes[] = { "jedec_probe", "cfi_probe" }; |
480 | int new_name = 0; | 460 | int new_name = 0; |
481 | 461 | ||
482 | DEBUG(3, "link=0x%p", link); | 462 | pr_debug("link=0x%p\n", link); |
483 | 463 | ||
484 | card_settings(dev, link, &new_name); | 464 | card_settings(dev, link, &new_name); |
485 | 465 | ||
@@ -512,11 +492,11 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
512 | 492 | ||
513 | do { | 493 | do { |
514 | int ret; | 494 | int ret; |
515 | DEBUG(2, "requesting window with size = %luKiB memspeed = %d", | 495 | pr_debug("requesting window with size = %luKiB memspeed = %d\n", |
516 | (unsigned long) resource_size(link->resource[2]) >> 10, | 496 | (unsigned long) resource_size(link->resource[2]) >> 10, |
517 | mem_speed); | 497 | mem_speed); |
518 | ret = pcmcia_request_window(link, link->resource[2], mem_speed); | 498 | ret = pcmcia_request_window(link, link->resource[2], mem_speed); |
519 | DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size); | 499 | pr_debug("ret = %d dev->win_size = %d\n", ret, dev->win_size); |
520 | if(ret) { | 500 | if(ret) { |
521 | j++; | 501 | j++; |
522 | link->resource[2]->start = 0; | 502 | link->resource[2]->start = 0; |
@@ -524,21 +504,21 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
524 | force_size << 20 : MAX_PCMCIA_ADDR; | 504 | force_size << 20 : MAX_PCMCIA_ADDR; |
525 | link->resource[2]->end >>= j; | 505 | link->resource[2]->end >>= j; |
526 | } else { | 506 | } else { |
527 | DEBUG(2, "Got window of size %luKiB", (unsigned long) | 507 | pr_debug("Got window of size %luKiB\n", (unsigned long) |
528 | resource_size(link->resource[2]) >> 10); | 508 | resource_size(link->resource[2]) >> 10); |
529 | dev->win_size = resource_size(link->resource[2]); | 509 | dev->win_size = resource_size(link->resource[2]); |
530 | break; | 510 | break; |
531 | } | 511 | } |
532 | } while (link->resource[2]->end >= 0x1000); | 512 | } while (link->resource[2]->end >= 0x1000); |
533 | 513 | ||
534 | DEBUG(2, "dev->win_size = %d", dev->win_size); | 514 | pr_debug("dev->win_size = %d\n", dev->win_size); |
535 | 515 | ||
536 | if(!dev->win_size) { | 516 | if(!dev->win_size) { |
537 | dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n"); | 517 | dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n"); |
538 | pcmciamtd_release(link); | 518 | pcmciamtd_release(link); |
539 | return -ENODEV; | 519 | return -ENODEV; |
540 | } | 520 | } |
541 | DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10); | 521 | pr_debug("Allocated a window of %dKiB\n", dev->win_size >> 10); |
542 | 522 | ||
543 | /* Get write protect status */ | 523 | /* Get write protect status */ |
544 | dev->win_base = ioremap(link->resource[2]->start, | 524 | dev->win_base = ioremap(link->resource[2]->start, |
@@ -549,7 +529,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
549 | pcmciamtd_release(link); | 529 | pcmciamtd_release(link); |
550 | return -ENODEV; | 530 | return -ENODEV; |
551 | } | 531 | } |
552 | DEBUG(1, "mapped window dev = %p @ %pR, base = %p", | 532 | pr_debug("mapped window dev = %p @ %pR, base = %p\n", |
553 | dev, link->resource[2], dev->win_base); | 533 | dev, link->resource[2], dev->win_base); |
554 | 534 | ||
555 | dev->offset = 0; | 535 | dev->offset = 0; |
@@ -564,7 +544,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
564 | } | 544 | } |
565 | 545 | ||
566 | link->config_index = 0; | 546 | link->config_index = 0; |
567 | DEBUG(2, "Setting Configuration"); | 547 | pr_debug("Setting Configuration\n"); |
568 | ret = pcmcia_enable_device(link); | 548 | ret = pcmcia_enable_device(link); |
569 | if (ret != 0) { | 549 | if (ret != 0) { |
570 | if (dev->win_base) { | 550 | if (dev->win_base) { |
@@ -580,17 +560,17 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
580 | mtd = do_map_probe("map_rom", &dev->pcmcia_map); | 560 | mtd = do_map_probe("map_rom", &dev->pcmcia_map); |
581 | } else { | 561 | } else { |
582 | for(i = 0; i < ARRAY_SIZE(probes); i++) { | 562 | for(i = 0; i < ARRAY_SIZE(probes); i++) { |
583 | DEBUG(1, "Trying %s", probes[i]); | 563 | pr_debug("Trying %s\n", probes[i]); |
584 | mtd = do_map_probe(probes[i], &dev->pcmcia_map); | 564 | mtd = do_map_probe(probes[i], &dev->pcmcia_map); |
585 | if(mtd) | 565 | if(mtd) |
586 | break; | 566 | break; |
587 | 567 | ||
588 | DEBUG(1, "FAILED: %s", probes[i]); | 568 | pr_debug("FAILED: %s\n", probes[i]); |
589 | } | 569 | } |
590 | } | 570 | } |
591 | 571 | ||
592 | if(!mtd) { | 572 | if(!mtd) { |
593 | DEBUG(1, "Can not find an MTD"); | 573 | pr_debug("Can not find an MTD\n"); |
594 | pcmciamtd_release(link); | 574 | pcmciamtd_release(link); |
595 | return -ENODEV; | 575 | return -ENODEV; |
596 | } | 576 | } |
@@ -617,7 +597,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
617 | /* If the memory found is fits completely into the mapped PCMCIA window, | 597 | /* If the memory found is fits completely into the mapped PCMCIA window, |
618 | use the faster non-remapping read/write functions */ | 598 | use the faster non-remapping read/write functions */ |
619 | if(mtd->size <= dev->win_size) { | 599 | if(mtd->size <= dev->win_size) { |
620 | DEBUG(1, "Using non remapping memory functions"); | 600 | pr_debug("Using non remapping memory functions\n"); |
621 | dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base; | 601 | dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base; |
622 | if (dev->pcmcia_map.bankwidth == 1) { | 602 | if (dev->pcmcia_map.bankwidth == 1) { |
623 | dev->pcmcia_map.read = pcmcia_read8; | 603 | dev->pcmcia_map.read = pcmcia_read8; |
@@ -645,7 +625,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
645 | 625 | ||
646 | static int pcmciamtd_suspend(struct pcmcia_device *dev) | 626 | static int pcmciamtd_suspend(struct pcmcia_device *dev) |
647 | { | 627 | { |
648 | DEBUG(2, "EVENT_PM_RESUME"); | 628 | pr_debug("EVENT_PM_RESUME\n"); |
649 | 629 | ||
650 | /* get_lock(link); */ | 630 | /* get_lock(link); */ |
651 | 631 | ||
@@ -654,7 +634,7 @@ static int pcmciamtd_suspend(struct pcmcia_device *dev) | |||
654 | 634 | ||
655 | static int pcmciamtd_resume(struct pcmcia_device *dev) | 635 | static int pcmciamtd_resume(struct pcmcia_device *dev) |
656 | { | 636 | { |
657 | DEBUG(2, "EVENT_PM_SUSPEND"); | 637 | pr_debug("EVENT_PM_SUSPEND\n"); |
658 | 638 | ||
659 | /* free_lock(link); */ | 639 | /* free_lock(link); */ |
660 | 640 | ||
@@ -666,7 +646,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link) | |||
666 | { | 646 | { |
667 | struct pcmciamtd_dev *dev = link->priv; | 647 | struct pcmciamtd_dev *dev = link->priv; |
668 | 648 | ||
669 | DEBUG(3, "link=0x%p", link); | 649 | pr_debug("link=0x%p\n", link); |
670 | 650 | ||
671 | if(dev->mtd_info) { | 651 | if(dev->mtd_info) { |
672 | mtd_device_unregister(dev->mtd_info); | 652 | mtd_device_unregister(dev->mtd_info); |
@@ -686,7 +666,7 @@ static int pcmciamtd_probe(struct pcmcia_device *link) | |||
686 | /* Create new memory card device */ | 666 | /* Create new memory card device */ |
687 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 667 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
688 | if (!dev) return -ENOMEM; | 668 | if (!dev) return -ENOMEM; |
689 | DEBUG(1, "dev=0x%p", dev); | 669 | pr_debug("dev=0x%p\n", dev); |
690 | 670 | ||
691 | dev->p_dev = link; | 671 | dev->p_dev = link; |
692 | link->priv = dev; | 672 | link->priv = dev; |
@@ -755,7 +735,7 @@ static int __init init_pcmciamtd(void) | |||
755 | 735 | ||
756 | static void __exit exit_pcmciamtd(void) | 736 | static void __exit exit_pcmciamtd(void) |
757 | { | 737 | { |
758 | DEBUG(1, DRIVER_DESC " unloading"); | 738 | pr_debug(DRIVER_DESC " unloading"); |
759 | pcmcia_unregister_driver(&pcmciamtd_driver); | 739 | pcmcia_unregister_driver(&pcmciamtd_driver); |
760 | } | 740 | } |
761 | 741 | ||
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index f64cee4a3bfb..66e8200079c2 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c | |||
@@ -27,8 +27,6 @@ struct physmap_flash_info { | |||
27 | struct mtd_info *mtd[MAX_RESOURCES]; | 27 | struct mtd_info *mtd[MAX_RESOURCES]; |
28 | struct mtd_info *cmtd; | 28 | struct mtd_info *cmtd; |
29 | struct map_info map[MAX_RESOURCES]; | 29 | struct map_info map[MAX_RESOURCES]; |
30 | int nr_parts; | ||
31 | struct mtd_partition *parts; | ||
32 | }; | 30 | }; |
33 | 31 | ||
34 | static int physmap_flash_remove(struct platform_device *dev) | 32 | static int physmap_flash_remove(struct platform_device *dev) |
@@ -46,8 +44,6 @@ static int physmap_flash_remove(struct platform_device *dev) | |||
46 | 44 | ||
47 | if (info->cmtd) { | 45 | if (info->cmtd) { |
48 | mtd_device_unregister(info->cmtd); | 46 | mtd_device_unregister(info->cmtd); |
49 | if (info->nr_parts) | ||
50 | kfree(info->parts); | ||
51 | if (info->cmtd != info->mtd[0]) | 47 | if (info->cmtd != info->mtd[0]) |
52 | mtd_concat_destroy(info->cmtd); | 48 | mtd_concat_destroy(info->cmtd); |
53 | } | 49 | } |
@@ -175,23 +171,8 @@ static int physmap_flash_probe(struct platform_device *dev) | |||
175 | if (err) | 171 | if (err) |
176 | goto err_out; | 172 | goto err_out; |
177 | 173 | ||
178 | err = parse_mtd_partitions(info->cmtd, part_probe_types, | 174 | mtd_device_parse_register(info->cmtd, part_probe_types, 0, |
179 | &info->parts, 0); | 175 | physmap_data->parts, physmap_data->nr_parts); |
180 | if (err > 0) { | ||
181 | mtd_device_register(info->cmtd, info->parts, err); | ||
182 | info->nr_parts = err; | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | if (physmap_data->nr_parts) { | ||
187 | printk(KERN_NOTICE "Using physmap partition information\n"); | ||
188 | mtd_device_register(info->cmtd, physmap_data->parts, | ||
189 | physmap_data->nr_parts); | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | mtd_device_register(info->cmtd, NULL, 0); | ||
194 | |||
195 | return 0; | 176 | return 0; |
196 | 177 | ||
197 | err_out: | 178 | err_out: |
@@ -245,21 +226,6 @@ static struct platform_device physmap_flash = { | |||
245 | .num_resources = 1, | 226 | .num_resources = 1, |
246 | .resource = &physmap_flash_resource, | 227 | .resource = &physmap_flash_resource, |
247 | }; | 228 | }; |
248 | |||
249 | void physmap_configure(unsigned long addr, unsigned long size, | ||
250 | int bankwidth, void (*set_vpp)(struct map_info *, int)) | ||
251 | { | ||
252 | physmap_flash_resource.start = addr; | ||
253 | physmap_flash_resource.end = addr + size - 1; | ||
254 | physmap_flash_data.width = bankwidth; | ||
255 | physmap_flash_data.set_vpp = set_vpp; | ||
256 | } | ||
257 | |||
258 | void physmap_set_partitions(struct mtd_partition *parts, int num_parts) | ||
259 | { | ||
260 | physmap_flash_data.nr_parts = num_parts; | ||
261 | physmap_flash_data.parts = parts; | ||
262 | } | ||
263 | #endif | 229 | #endif |
264 | 230 | ||
265 | static int __init physmap_init(void) | 231 | static int __init physmap_init(void) |
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index d251d1db129b..7d65f9d3e690 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
@@ -34,58 +34,10 @@ struct of_flash_list { | |||
34 | 34 | ||
35 | struct of_flash { | 35 | struct of_flash { |
36 | struct mtd_info *cmtd; | 36 | struct mtd_info *cmtd; |
37 | struct mtd_partition *parts; | ||
38 | int list_size; /* number of elements in of_flash_list */ | 37 | int list_size; /* number of elements in of_flash_list */ |
39 | struct of_flash_list list[0]; | 38 | struct of_flash_list list[0]; |
40 | }; | 39 | }; |
41 | 40 | ||
42 | #define OF_FLASH_PARTS(info) ((info)->parts) | ||
43 | static int parse_obsolete_partitions(struct platform_device *dev, | ||
44 | struct of_flash *info, | ||
45 | struct device_node *dp) | ||
46 | { | ||
47 | int i, plen, nr_parts; | ||
48 | const struct { | ||
49 | __be32 offset, len; | ||
50 | } *part; | ||
51 | const char *names; | ||
52 | |||
53 | part = of_get_property(dp, "partitions", &plen); | ||
54 | if (!part) | ||
55 | return 0; /* No partitions found */ | ||
56 | |||
57 | dev_warn(&dev->dev, "Device tree uses obsolete partition map binding\n"); | ||
58 | |||
59 | nr_parts = plen / sizeof(part[0]); | ||
60 | |||
61 | info->parts = kzalloc(nr_parts * sizeof(*info->parts), GFP_KERNEL); | ||
62 | if (!info->parts) | ||
63 | return -ENOMEM; | ||
64 | |||
65 | names = of_get_property(dp, "partition-names", &plen); | ||
66 | |||
67 | for (i = 0; i < nr_parts; i++) { | ||
68 | info->parts[i].offset = be32_to_cpu(part->offset); | ||
69 | info->parts[i].size = be32_to_cpu(part->len) & ~1; | ||
70 | if (be32_to_cpu(part->len) & 1) /* bit 0 set signifies read only partition */ | ||
71 | info->parts[i].mask_flags = MTD_WRITEABLE; | ||
72 | |||
73 | if (names && (plen > 0)) { | ||
74 | int len = strlen(names) + 1; | ||
75 | |||
76 | info->parts[i].name = (char *)names; | ||
77 | plen -= len; | ||
78 | names += len; | ||
79 | } else { | ||
80 | info->parts[i].name = "unnamed"; | ||
81 | } | ||
82 | |||
83 | part++; | ||
84 | } | ||
85 | |||
86 | return nr_parts; | ||
87 | } | ||
88 | |||
89 | static int of_flash_remove(struct platform_device *dev) | 41 | static int of_flash_remove(struct platform_device *dev) |
90 | { | 42 | { |
91 | struct of_flash *info; | 43 | struct of_flash *info; |
@@ -101,11 +53,8 @@ static int of_flash_remove(struct platform_device *dev) | |||
101 | mtd_concat_destroy(info->cmtd); | 53 | mtd_concat_destroy(info->cmtd); |
102 | } | 54 | } |
103 | 55 | ||
104 | if (info->cmtd) { | 56 | if (info->cmtd) |
105 | if (OF_FLASH_PARTS(info)) | ||
106 | kfree(OF_FLASH_PARTS(info)); | ||
107 | mtd_device_unregister(info->cmtd); | 57 | mtd_device_unregister(info->cmtd); |
108 | } | ||
109 | 58 | ||
110 | for (i = 0; i < info->list_size; i++) { | 59 | for (i = 0; i < info->list_size; i++) { |
111 | if (info->list[i].mtd) | 60 | if (info->list[i].mtd) |
@@ -165,7 +114,8 @@ static struct mtd_info * __devinit obsolete_probe(struct platform_device *dev, | |||
165 | specifies the list of partition probers to use. If none is given then the | 114 | specifies the list of partition probers to use. If none is given then the |
166 | default is use. These take precedence over other device tree | 115 | default is use. These take precedence over other device tree |
167 | information. */ | 116 | information. */ |
168 | static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL }; | 117 | static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", |
118 | "ofpart", "ofoldpart", NULL }; | ||
169 | static const char ** __devinit of_get_probes(struct device_node *dp) | 119 | static const char ** __devinit of_get_probes(struct device_node *dp) |
170 | { | 120 | { |
171 | const char *cp; | 121 | const char *cp; |
@@ -218,6 +168,7 @@ static int __devinit of_flash_probe(struct platform_device *dev) | |||
218 | int reg_tuple_size; | 168 | int reg_tuple_size; |
219 | struct mtd_info **mtd_list = NULL; | 169 | struct mtd_info **mtd_list = NULL; |
220 | resource_size_t res_size; | 170 | resource_size_t res_size; |
171 | struct mtd_part_parser_data ppdata; | ||
221 | 172 | ||
222 | match = of_match_device(of_flash_match, &dev->dev); | 173 | match = of_match_device(of_flash_match, &dev->dev); |
223 | if (!match) | 174 | if (!match) |
@@ -331,29 +282,12 @@ static int __devinit of_flash_probe(struct platform_device *dev) | |||
331 | if (err) | 282 | if (err) |
332 | goto err_out; | 283 | goto err_out; |
333 | 284 | ||
285 | ppdata.of_node = dp; | ||
334 | part_probe_types = of_get_probes(dp); | 286 | part_probe_types = of_get_probes(dp); |
335 | err = parse_mtd_partitions(info->cmtd, part_probe_types, | 287 | mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata, |
336 | &info->parts, 0); | 288 | NULL, 0); |
337 | if (err < 0) { | ||
338 | of_free_probes(part_probe_types); | ||
339 | goto err_out; | ||
340 | } | ||
341 | of_free_probes(part_probe_types); | 289 | of_free_probes(part_probe_types); |
342 | 290 | ||
343 | if (err == 0) { | ||
344 | err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts); | ||
345 | if (err < 0) | ||
346 | goto err_out; | ||
347 | } | ||
348 | |||
349 | if (err == 0) { | ||
350 | err = parse_obsolete_partitions(dev, info, dp); | ||
351 | if (err < 0) | ||
352 | goto err_out; | ||
353 | } | ||
354 | |||
355 | mtd_device_register(info->cmtd, info->parts, err); | ||
356 | |||
357 | kfree(mtd_list); | 291 | kfree(mtd_list); |
358 | 292 | ||
359 | return 0; | 293 | return 0; |
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c index 9ca1eccba4bc..94f553489725 100644 --- a/drivers/mtd/maps/plat-ram.c +++ b/drivers/mtd/maps/plat-ram.c | |||
@@ -44,8 +44,6 @@ struct platram_info { | |||
44 | struct device *dev; | 44 | struct device *dev; |
45 | struct mtd_info *mtd; | 45 | struct mtd_info *mtd; |
46 | struct map_info map; | 46 | struct map_info map; |
47 | struct mtd_partition *partitions; | ||
48 | bool free_partitions; | ||
49 | struct resource *area; | 47 | struct resource *area; |
50 | struct platdata_mtd_ram *pdata; | 48 | struct platdata_mtd_ram *pdata; |
51 | }; | 49 | }; |
@@ -95,10 +93,6 @@ static int platram_remove(struct platform_device *pdev) | |||
95 | 93 | ||
96 | if (info->mtd) { | 94 | if (info->mtd) { |
97 | mtd_device_unregister(info->mtd); | 95 | mtd_device_unregister(info->mtd); |
98 | if (info->partitions) { | ||
99 | if (info->free_partitions) | ||
100 | kfree(info->partitions); | ||
101 | } | ||
102 | map_destroy(info->mtd); | 96 | map_destroy(info->mtd); |
103 | } | 97 | } |
104 | 98 | ||
@@ -228,21 +222,8 @@ static int platram_probe(struct platform_device *pdev) | |||
228 | /* check to see if there are any available partitions, or wether | 222 | /* check to see if there are any available partitions, or wether |
229 | * to add this device whole */ | 223 | * to add this device whole */ |
230 | 224 | ||
231 | if (!pdata->nr_partitions) { | 225 | err = mtd_device_parse_register(info->mtd, pdata->probes, 0, |
232 | /* try to probe using the supplied probe type */ | 226 | pdata->partitions, pdata->nr_partitions); |
233 | if (pdata->probes) { | ||
234 | err = parse_mtd_partitions(info->mtd, pdata->probes, | ||
235 | &info->partitions, 0); | ||
236 | info->free_partitions = 1; | ||
237 | if (err > 0) | ||
238 | err = mtd_device_register(info->mtd, | ||
239 | info->partitions, err); | ||
240 | } | ||
241 | } | ||
242 | /* use the static mapping */ | ||
243 | else | ||
244 | err = mtd_device_register(info->mtd, pdata->partitions, | ||
245 | pdata->nr_partitions); | ||
246 | if (!err) | 227 | if (!err) |
247 | dev_info(&pdev->dev, "registered mtd device\n"); | 228 | dev_info(&pdev->dev, "registered mtd device\n"); |
248 | 229 | ||
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index 7ae137d4b998..411a17df9fc1 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c | |||
@@ -41,8 +41,6 @@ static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from, | |||
41 | } | 41 | } |
42 | 42 | ||
43 | struct pxa2xx_flash_info { | 43 | struct pxa2xx_flash_info { |
44 | struct mtd_partition *parts; | ||
45 | int nr_parts; | ||
46 | struct mtd_info *mtd; | 44 | struct mtd_info *mtd; |
47 | struct map_info map; | 45 | struct map_info map; |
48 | }; | 46 | }; |
@@ -55,9 +53,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev) | |||
55 | { | 53 | { |
56 | struct flash_platform_data *flash = pdev->dev.platform_data; | 54 | struct flash_platform_data *flash = pdev->dev.platform_data; |
57 | struct pxa2xx_flash_info *info; | 55 | struct pxa2xx_flash_info *info; |
58 | struct mtd_partition *parts; | ||
59 | struct resource *res; | 56 | struct resource *res; |
60 | int ret = 0; | ||
61 | 57 | ||
62 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 58 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
63 | if (!res) | 59 | if (!res) |
@@ -71,8 +67,6 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev) | |||
71 | info->map.bankwidth = flash->width; | 67 | info->map.bankwidth = flash->width; |
72 | info->map.phys = res->start; | 68 | info->map.phys = res->start; |
73 | info->map.size = resource_size(res); | 69 | info->map.size = resource_size(res); |
74 | info->parts = flash->parts; | ||
75 | info->nr_parts = flash->nr_parts; | ||
76 | 70 | ||
77 | info->map.virt = ioremap(info->map.phys, info->map.size); | 71 | info->map.virt = ioremap(info->map.phys, info->map.size); |
78 | if (!info->map.virt) { | 72 | if (!info->map.virt) { |
@@ -104,18 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev) | |||
104 | } | 98 | } |
105 | info->mtd->owner = THIS_MODULE; | 99 | info->mtd->owner = THIS_MODULE; |
106 | 100 | ||
107 | ret = parse_mtd_partitions(info->mtd, probes, &parts, 0); | 101 | mtd_device_parse_register(info->mtd, probes, 0, NULL, 0); |
108 | |||
109 | if (ret > 0) { | ||
110 | info->nr_parts = ret; | ||
111 | info->parts = parts; | ||
112 | } | ||
113 | |||
114 | if (!info->nr_parts) | ||
115 | printk("Registering %s as whole device\n", | ||
116 | info->map.name); | ||
117 | |||
118 | mtd_device_register(info->mtd, info->parts, info->nr_parts); | ||
119 | 102 | ||
120 | platform_set_drvdata(pdev, info); | 103 | platform_set_drvdata(pdev, info); |
121 | return 0; | 104 | return 0; |
@@ -133,7 +116,6 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev) | |||
133 | iounmap(info->map.virt); | 116 | iounmap(info->map.virt); |
134 | if (info->map.cached) | 117 | if (info->map.cached) |
135 | iounmap(info->map.cached); | 118 | iounmap(info->map.cached); |
136 | kfree(info->parts); | ||
137 | kfree(info); | 119 | kfree(info); |
138 | return 0; | 120 | return 0; |
139 | } | 121 | } |
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c index 761fb459d2c7..0237f197fd12 100644 --- a/drivers/mtd/maps/rbtx4939-flash.c +++ b/drivers/mtd/maps/rbtx4939-flash.c | |||
@@ -25,8 +25,6 @@ | |||
25 | struct rbtx4939_flash_info { | 25 | struct rbtx4939_flash_info { |
26 | struct mtd_info *mtd; | 26 | struct mtd_info *mtd; |
27 | struct map_info map; | 27 | struct map_info map; |
28 | int nr_parts; | ||
29 | struct mtd_partition *parts; | ||
30 | }; | 28 | }; |
31 | 29 | ||
32 | static int rbtx4939_flash_remove(struct platform_device *dev) | 30 | static int rbtx4939_flash_remove(struct platform_device *dev) |
@@ -41,8 +39,6 @@ static int rbtx4939_flash_remove(struct platform_device *dev) | |||
41 | if (info->mtd) { | 39 | if (info->mtd) { |
42 | struct rbtx4939_flash_data *pdata = dev->dev.platform_data; | 40 | struct rbtx4939_flash_data *pdata = dev->dev.platform_data; |
43 | 41 | ||
44 | if (info->nr_parts) | ||
45 | kfree(info->parts); | ||
46 | mtd_device_unregister(info->mtd); | 42 | mtd_device_unregister(info->mtd); |
47 | map_destroy(info->mtd); | 43 | map_destroy(info->mtd); |
48 | } | 44 | } |
@@ -50,7 +46,6 @@ static int rbtx4939_flash_remove(struct platform_device *dev) | |||
50 | } | 46 | } |
51 | 47 | ||
52 | static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; | 48 | static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; |
53 | static const char *part_probe_types[] = { "cmdlinepart", NULL }; | ||
54 | 49 | ||
55 | static int rbtx4939_flash_probe(struct platform_device *dev) | 50 | static int rbtx4939_flash_probe(struct platform_device *dev) |
56 | { | 51 | { |
@@ -107,22 +102,11 @@ static int rbtx4939_flash_probe(struct platform_device *dev) | |||
107 | info->mtd->owner = THIS_MODULE; | 102 | info->mtd->owner = THIS_MODULE; |
108 | if (err) | 103 | if (err) |
109 | goto err_out; | 104 | goto err_out; |
105 | err = mtd_device_parse_register(info->mtd, NULL, 0, | ||
106 | pdata->parts, pdata->nr_parts); | ||
110 | 107 | ||
111 | err = parse_mtd_partitions(info->mtd, part_probe_types, | 108 | if (err) |
112 | &info->parts, 0); | 109 | goto err_out; |
113 | if (err > 0) { | ||
114 | mtd_device_register(info->mtd, info->parts, err); | ||
115 | info->nr_parts = err; | ||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | if (pdata->nr_parts) { | ||
120 | pr_notice("Using rbtx4939 partition information\n"); | ||
121 | mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | mtd_device_register(info->mtd, NULL, 0); | ||
126 | return 0; | 110 | return 0; |
127 | 111 | ||
128 | err_out: | 112 | err_out: |
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index a9b5e0e5c4c5..fa9c0a9670cd 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c | |||
@@ -131,10 +131,8 @@ struct sa_subdev_info { | |||
131 | }; | 131 | }; |
132 | 132 | ||
133 | struct sa_info { | 133 | struct sa_info { |
134 | struct mtd_partition *parts; | ||
135 | struct mtd_info *mtd; | 134 | struct mtd_info *mtd; |
136 | int num_subdev; | 135 | int num_subdev; |
137 | unsigned int nr_parts; | ||
138 | struct sa_subdev_info subdev[0]; | 136 | struct sa_subdev_info subdev[0]; |
139 | }; | 137 | }; |
140 | 138 | ||
@@ -231,8 +229,6 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla | |||
231 | mtd_concat_destroy(info->mtd); | 229 | mtd_concat_destroy(info->mtd); |
232 | } | 230 | } |
233 | 231 | ||
234 | kfree(info->parts); | ||
235 | |||
236 | for (i = info->num_subdev - 1; i >= 0; i--) | 232 | for (i = info->num_subdev - 1; i >= 0; i--) |
237 | sa1100_destroy_subdev(&info->subdev[i]); | 233 | sa1100_destroy_subdev(&info->subdev[i]); |
238 | kfree(info); | 234 | kfree(info); |
@@ -341,10 +337,8 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; | |||
341 | static int __devinit sa1100_mtd_probe(struct platform_device *pdev) | 337 | static int __devinit sa1100_mtd_probe(struct platform_device *pdev) |
342 | { | 338 | { |
343 | struct flash_platform_data *plat = pdev->dev.platform_data; | 339 | struct flash_platform_data *plat = pdev->dev.platform_data; |
344 | struct mtd_partition *parts; | ||
345 | const char *part_type = NULL; | ||
346 | struct sa_info *info; | 340 | struct sa_info *info; |
347 | int err, nr_parts = 0; | 341 | int err; |
348 | 342 | ||
349 | if (!plat) | 343 | if (!plat) |
350 | return -ENODEV; | 344 | return -ENODEV; |
@@ -358,26 +352,8 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev) | |||
358 | /* | 352 | /* |
359 | * Partition selection stuff. | 353 | * Partition selection stuff. |
360 | */ | 354 | */ |
361 | nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0); | 355 | mtd_device_parse_register(info->mtd, part_probes, 0, |
362 | if (nr_parts > 0) { | 356 | plat->parts, plat->nr_parts); |
363 | info->parts = parts; | ||
364 | part_type = "dynamic"; | ||
365 | } else { | ||
366 | parts = plat->parts; | ||
367 | nr_parts = plat->nr_parts; | ||
368 | part_type = "static"; | ||
369 | } | ||
370 | |||
371 | if (nr_parts == 0) | ||
372 | printk(KERN_NOTICE "SA1100 flash: no partition info " | ||
373 | "available, registering whole flash\n"); | ||
374 | else | ||
375 | printk(KERN_NOTICE "SA1100 flash: using %s partition " | ||
376 | "definition\n", part_type); | ||
377 | |||
378 | mtd_device_register(info->mtd, parts, nr_parts); | ||
379 | |||
380 | info->nr_parts = nr_parts; | ||
381 | 357 | ||
382 | platform_set_drvdata(pdev, info); | 358 | platform_set_drvdata(pdev, info); |
383 | err = 0; | 359 | err = 0; |
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c index cbf6bade9354..496c40704aff 100644 --- a/drivers/mtd/maps/solutionengine.c +++ b/drivers/mtd/maps/solutionengine.c | |||
@@ -19,8 +19,6 @@ | |||
19 | static struct mtd_info *flash_mtd; | 19 | static struct mtd_info *flash_mtd; |
20 | static struct mtd_info *eprom_mtd; | 20 | static struct mtd_info *eprom_mtd; |
21 | 21 | ||
22 | static struct mtd_partition *parsed_parts; | ||
23 | |||
24 | struct map_info soleng_eprom_map = { | 22 | struct map_info soleng_eprom_map = { |
25 | .name = "Solution Engine EPROM", | 23 | .name = "Solution Engine EPROM", |
26 | .size = 0x400000, | 24 | .size = 0x400000, |
@@ -51,12 +49,14 @@ static struct mtd_partition superh_se_partitions[] = { | |||
51 | .size = MTDPART_SIZ_FULL, | 49 | .size = MTDPART_SIZ_FULL, |
52 | } | 50 | } |
53 | }; | 51 | }; |
52 | #define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions) | ||
53 | #else | ||
54 | #define superh_se_partitions NULL | ||
55 | #define NUM_PARTITIONS 0 | ||
54 | #endif /* CONFIG_MTD_SUPERH_RESERVE */ | 56 | #endif /* CONFIG_MTD_SUPERH_RESERVE */ |
55 | 57 | ||
56 | static int __init init_soleng_maps(void) | 58 | static int __init init_soleng_maps(void) |
57 | { | 59 | { |
58 | int nr_parts = 0; | ||
59 | |||
60 | /* First probe at offset 0 */ | 60 | /* First probe at offset 0 */ |
61 | soleng_flash_map.phys = 0; | 61 | soleng_flash_map.phys = 0; |
62 | soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0); | 62 | soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0); |
@@ -92,21 +92,8 @@ static int __init init_soleng_maps(void) | |||
92 | mtd_device_register(eprom_mtd, NULL, 0); | 92 | mtd_device_register(eprom_mtd, NULL, 0); |
93 | } | 93 | } |
94 | 94 | ||
95 | nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0); | 95 | mtd_device_parse_register(flash_mtd, probes, 0, |
96 | 96 | superh_se_partitions, NUM_PARTITIONS); | |
97 | #ifdef CONFIG_MTD_SUPERH_RESERVE | ||
98 | if (nr_parts <= 0) { | ||
99 | printk(KERN_NOTICE "Using configured partition at 0x%08x.\n", | ||
100 | CONFIG_MTD_SUPERH_RESERVE); | ||
101 | parsed_parts = superh_se_partitions; | ||
102 | nr_parts = sizeof(superh_se_partitions)/sizeof(*parsed_parts); | ||
103 | } | ||
104 | #endif /* CONFIG_MTD_SUPERH_RESERVE */ | ||
105 | |||
106 | if (nr_parts > 0) | ||
107 | mtd_device_register(flash_mtd, parsed_parts, nr_parts); | ||
108 | else | ||
109 | mtd_device_register(flash_mtd, NULL, 0); | ||
110 | 97 | ||
111 | return 0; | 98 | return 0; |
112 | } | 99 | } |
@@ -118,10 +105,7 @@ static void __exit cleanup_soleng_maps(void) | |||
118 | map_destroy(eprom_mtd); | 105 | map_destroy(eprom_mtd); |
119 | } | 106 | } |
120 | 107 | ||
121 | if (parsed_parts) | 108 | mtd_device_unregister(flash_mtd); |
122 | mtd_device_unregister(flash_mtd); | ||
123 | else | ||
124 | mtd_device_unregister(flash_mtd); | ||
125 | map_destroy(flash_mtd); | 109 | map_destroy(flash_mtd); |
126 | } | 110 | } |
127 | 111 | ||
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c index 901ce968efae..aa7e0cb2893c 100644 --- a/drivers/mtd/maps/wr_sbc82xx_flash.c +++ b/drivers/mtd/maps/wr_sbc82xx_flash.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <asm/immap_cpm2.h> | 20 | #include <asm/immap_cpm2.h> |
21 | 21 | ||
22 | static struct mtd_info *sbcmtd[3]; | 22 | static struct mtd_info *sbcmtd[3]; |
23 | static struct mtd_partition *sbcmtd_parts[3]; | ||
24 | 23 | ||
25 | struct map_info sbc82xx_flash_map[3] = { | 24 | struct map_info sbc82xx_flash_map[3] = { |
26 | {.name = "Boot flash"}, | 25 | {.name = "Boot flash"}, |
@@ -101,6 +100,7 @@ static int __init init_sbc82xx_flash(void) | |||
101 | for (i=0; i<3; i++) { | 100 | for (i=0; i<3; i++) { |
102 | int8_t flashcs[3] = { 0, 6, 1 }; | 101 | int8_t flashcs[3] = { 0, 6, 1 }; |
103 | int nr_parts; | 102 | int nr_parts; |
103 | struct mtd_partition *defparts; | ||
104 | 104 | ||
105 | printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d", | 105 | printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d", |
106 | sbc82xx_flash_map[i].name, | 106 | sbc82xx_flash_map[i].name, |
@@ -113,7 +113,8 @@ static int __init init_sbc82xx_flash(void) | |||
113 | } | 113 | } |
114 | printk(" at %08lx)\n", sbc82xx_flash_map[i].phys); | 114 | printk(" at %08lx)\n", sbc82xx_flash_map[i].phys); |
115 | 115 | ||
116 | sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size); | 116 | sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, |
117 | sbc82xx_flash_map[i].size); | ||
117 | 118 | ||
118 | if (!sbc82xx_flash_map[i].virt) { | 119 | if (!sbc82xx_flash_map[i].virt) { |
119 | printk("Failed to ioremap\n"); | 120 | printk("Failed to ioremap\n"); |
@@ -129,24 +130,20 @@ static int __init init_sbc82xx_flash(void) | |||
129 | 130 | ||
130 | sbcmtd[i]->owner = THIS_MODULE; | 131 | sbcmtd[i]->owner = THIS_MODULE; |
131 | 132 | ||
132 | nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes, | ||
133 | &sbcmtd_parts[i], 0); | ||
134 | if (nr_parts > 0) { | ||
135 | mtd_device_register(sbcmtd[i], sbcmtd_parts[i], | ||
136 | nr_parts); | ||
137 | continue; | ||
138 | } | ||
139 | |||
140 | /* No partitioning detected. Use default */ | 133 | /* No partitioning detected. Use default */ |
141 | if (i == 2) { | 134 | if (i == 2) { |
142 | mtd_device_register(sbcmtd[i], NULL, 0); | 135 | defparts = NULL; |
136 | nr_parts = 0; | ||
143 | } else if (i == bigflash) { | 137 | } else if (i == bigflash) { |
144 | mtd_device_register(sbcmtd[i], bigflash_parts, | 138 | defparts = bigflash_parts; |
145 | ARRAY_SIZE(bigflash_parts)); | 139 | nr_parts = ARRAY_SIZE(bigflash_parts); |
146 | } else { | 140 | } else { |
147 | mtd_device_register(sbcmtd[i], smallflash_parts, | 141 | defparts = smallflash_parts; |
148 | ARRAY_SIZE(smallflash_parts)); | 142 | nr_parts = ARRAY_SIZE(smallflash_parts); |
149 | } | 143 | } |
144 | |||
145 | mtd_device_parse_register(sbcmtd[i], part_probes, 0, | ||
146 | defparts, nr_parts); | ||
150 | } | 147 | } |
151 | return 0; | 148 | return 0; |
152 | } | 149 | } |
@@ -159,12 +156,8 @@ static void __exit cleanup_sbc82xx_flash(void) | |||
159 | if (!sbcmtd[i]) | 156 | if (!sbcmtd[i]) |
160 | continue; | 157 | continue; |
161 | 158 | ||
162 | if (i<2 || sbcmtd_parts[i]) | 159 | mtd_device_unregister(sbcmtd[i]); |
163 | mtd_device_unregister(sbcmtd[i]); | ||
164 | else | ||
165 | mtd_device_unregister(sbcmtd[i]); | ||
166 | 160 | ||
167 | kfree(sbcmtd_parts[i]); | ||
168 | map_destroy(sbcmtd[i]); | 161 | map_destroy(sbcmtd[i]); |
169 | 162 | ||
170 | iounmap((void *)sbc82xx_flash_map[i].virt); | 163 | iounmap((void *)sbc82xx_flash_map[i].virt); |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index ca385697446e..ed8b5e744b12 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -426,6 +426,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
426 | new->rq->queuedata = new; | 426 | new->rq->queuedata = new; |
427 | blk_queue_logical_block_size(new->rq, tr->blksize); | 427 | blk_queue_logical_block_size(new->rq, tr->blksize); |
428 | 428 | ||
429 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); | ||
430 | |||
429 | if (tr->discard) { | 431 | if (tr->discard) { |
430 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); | 432 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); |
431 | new->rq->limits.max_discard_sectors = UINT_MAX; | 433 | new->rq->limits.max_discard_sectors = UINT_MAX; |
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index 3326615ad66b..7c1dc908a174 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c | |||
@@ -44,7 +44,7 @@ struct mtdblk_dev { | |||
44 | enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; | 44 | enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | static struct mutex mtdblks_lock; | 47 | static DEFINE_MUTEX(mtdblks_lock); |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * Cache stuff... | 50 | * Cache stuff... |
@@ -119,7 +119,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk) | |||
119 | if (mtdblk->cache_state != STATE_DIRTY) | 119 | if (mtdblk->cache_state != STATE_DIRTY) |
120 | return 0; | 120 | return 0; |
121 | 121 | ||
122 | DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" " | 122 | pr_debug("mtdblock: writing cached data for \"%s\" " |
123 | "at 0x%lx, size 0x%x\n", mtd->name, | 123 | "at 0x%lx, size 0x%x\n", mtd->name, |
124 | mtdblk->cache_offset, mtdblk->cache_size); | 124 | mtdblk->cache_offset, mtdblk->cache_size); |
125 | 125 | ||
@@ -148,7 +148,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, | |||
148 | size_t retlen; | 148 | size_t retlen; |
149 | int ret; | 149 | int ret; |
150 | 150 | ||
151 | DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", | 151 | pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", |
152 | mtd->name, pos, len); | 152 | mtd->name, pos, len); |
153 | 153 | ||
154 | if (!sect_size) | 154 | if (!sect_size) |
@@ -218,7 +218,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, | |||
218 | size_t retlen; | 218 | size_t retlen; |
219 | int ret; | 219 | int ret; |
220 | 220 | ||
221 | DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", | 221 | pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", |
222 | mtd->name, pos, len); | 222 | mtd->name, pos, len); |
223 | 223 | ||
224 | if (!sect_size) | 224 | if (!sect_size) |
@@ -283,7 +283,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd) | |||
283 | { | 283 | { |
284 | struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); | 284 | struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); |
285 | 285 | ||
286 | DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); | 286 | pr_debug("mtdblock_open\n"); |
287 | 287 | ||
288 | mutex_lock(&mtdblks_lock); | 288 | mutex_lock(&mtdblks_lock); |
289 | if (mtdblk->count) { | 289 | if (mtdblk->count) { |
@@ -303,7 +303,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd) | |||
303 | 303 | ||
304 | mutex_unlock(&mtdblks_lock); | 304 | mutex_unlock(&mtdblks_lock); |
305 | 305 | ||
306 | DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); | 306 | pr_debug("ok\n"); |
307 | 307 | ||
308 | return 0; | 308 | return 0; |
309 | } | 309 | } |
@@ -312,7 +312,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) | |||
312 | { | 312 | { |
313 | struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); | 313 | struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); |
314 | 314 | ||
315 | DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); | 315 | pr_debug("mtdblock_release\n"); |
316 | 316 | ||
317 | mutex_lock(&mtdblks_lock); | 317 | mutex_lock(&mtdblks_lock); |
318 | 318 | ||
@@ -329,7 +329,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) | |||
329 | 329 | ||
330 | mutex_unlock(&mtdblks_lock); | 330 | mutex_unlock(&mtdblks_lock); |
331 | 331 | ||
332 | DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); | 332 | pr_debug("ok\n"); |
333 | 333 | ||
334 | return 0; | 334 | return 0; |
335 | } | 335 | } |
@@ -389,8 +389,6 @@ static struct mtd_blktrans_ops mtdblock_tr = { | |||
389 | 389 | ||
390 | static int __init init_mtdblock(void) | 390 | static int __init init_mtdblock(void) |
391 | { | 391 | { |
392 | mutex_init(&mtdblks_lock); | ||
393 | |||
394 | return register_mtd_blktrans(&mtdblock_tr); | 392 | return register_mtd_blktrans(&mtdblock_tr); |
395 | } | 393 | } |
396 | 394 | ||
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 61086ea3cc6b..e7dc732ddabc 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -43,7 +43,7 @@ static struct vfsmount *mtd_inode_mnt __read_mostly; | |||
43 | 43 | ||
44 | /* | 44 | /* |
45 | * Data structure to hold the pointer to the mtd device as well | 45 | * Data structure to hold the pointer to the mtd device as well |
46 | * as mode information ofr various use cases. | 46 | * as mode information of various use cases. |
47 | */ | 47 | */ |
48 | struct mtd_file_info { | 48 | struct mtd_file_info { |
49 | struct mtd_info *mtd; | 49 | struct mtd_info *mtd; |
@@ -86,7 +86,7 @@ static int mtd_open(struct inode *inode, struct file *file) | |||
86 | struct mtd_file_info *mfi; | 86 | struct mtd_file_info *mfi; |
87 | struct inode *mtd_ino; | 87 | struct inode *mtd_ino; |
88 | 88 | ||
89 | DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); | 89 | pr_debug("MTD_open\n"); |
90 | 90 | ||
91 | /* You can't open the RO devices RW */ | 91 | /* You can't open the RO devices RW */ |
92 | if ((file->f_mode & FMODE_WRITE) && (minor & 1)) | 92 | if ((file->f_mode & FMODE_WRITE) && (minor & 1)) |
@@ -151,7 +151,7 @@ static int mtd_close(struct inode *inode, struct file *file) | |||
151 | struct mtd_file_info *mfi = file->private_data; | 151 | struct mtd_file_info *mfi = file->private_data; |
152 | struct mtd_info *mtd = mfi->mtd; | 152 | struct mtd_info *mtd = mfi->mtd; |
153 | 153 | ||
154 | DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); | 154 | pr_debug("MTD_close\n"); |
155 | 155 | ||
156 | /* Only sync if opened RW */ | 156 | /* Only sync if opened RW */ |
157 | if ((file->f_mode & FMODE_WRITE) && mtd->sync) | 157 | if ((file->f_mode & FMODE_WRITE) && mtd->sync) |
@@ -195,7 +195,7 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t | |||
195 | size_t size = count; | 195 | size_t size = count; |
196 | char *kbuf; | 196 | char *kbuf; |
197 | 197 | ||
198 | DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n"); | 198 | pr_debug("MTD_read\n"); |
199 | 199 | ||
200 | if (*ppos + count > mtd->size) | 200 | if (*ppos + count > mtd->size) |
201 | count = mtd->size - *ppos; | 201 | count = mtd->size - *ppos; |
@@ -211,17 +211,17 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t | |||
211 | len = min_t(size_t, count, size); | 211 | len = min_t(size_t, count, size); |
212 | 212 | ||
213 | switch (mfi->mode) { | 213 | switch (mfi->mode) { |
214 | case MTD_MODE_OTP_FACTORY: | 214 | case MTD_FILE_MODE_OTP_FACTORY: |
215 | ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); | 215 | ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); |
216 | break; | 216 | break; |
217 | case MTD_MODE_OTP_USER: | 217 | case MTD_FILE_MODE_OTP_USER: |
218 | ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); | 218 | ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); |
219 | break; | 219 | break; |
220 | case MTD_MODE_RAW: | 220 | case MTD_FILE_MODE_RAW: |
221 | { | 221 | { |
222 | struct mtd_oob_ops ops; | 222 | struct mtd_oob_ops ops; |
223 | 223 | ||
224 | ops.mode = MTD_OOB_RAW; | 224 | ops.mode = MTD_OPS_RAW; |
225 | ops.datbuf = kbuf; | 225 | ops.datbuf = kbuf; |
226 | ops.oobbuf = NULL; | 226 | ops.oobbuf = NULL; |
227 | ops.len = len; | 227 | ops.len = len; |
@@ -233,16 +233,16 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t | |||
233 | default: | 233 | default: |
234 | ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); | 234 | ret = mtd->read(mtd, *ppos, len, &retlen, kbuf); |
235 | } | 235 | } |
236 | /* Nand returns -EBADMSG on ecc errors, but it returns | 236 | /* Nand returns -EBADMSG on ECC errors, but it returns |
237 | * the data. For our userspace tools it is important | 237 | * the data. For our userspace tools it is important |
238 | * to dump areas with ecc errors ! | 238 | * to dump areas with ECC errors! |
239 | * For kernel internal usage it also might return -EUCLEAN | 239 | * For kernel internal usage it also might return -EUCLEAN |
240 | * to signal the caller that a bitflip has occurred and has | 240 | * to signal the caller that a bitflip has occurred and has |
241 | * been corrected by the ECC algorithm. | 241 | * been corrected by the ECC algorithm. |
242 | * Userspace software which accesses NAND this way | 242 | * Userspace software which accesses NAND this way |
243 | * must be aware of the fact that it deals with NAND | 243 | * must be aware of the fact that it deals with NAND |
244 | */ | 244 | */ |
245 | if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) { | 245 | if (!ret || mtd_is_bitflip_or_eccerr(ret)) { |
246 | *ppos += retlen; | 246 | *ppos += retlen; |
247 | if (copy_to_user(buf, kbuf, retlen)) { | 247 | if (copy_to_user(buf, kbuf, retlen)) { |
248 | kfree(kbuf); | 248 | kfree(kbuf); |
@@ -278,7 +278,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count | |||
278 | int ret=0; | 278 | int ret=0; |
279 | int len; | 279 | int len; |
280 | 280 | ||
281 | DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n"); | 281 | pr_debug("MTD_write\n"); |
282 | 282 | ||
283 | if (*ppos == mtd->size) | 283 | if (*ppos == mtd->size) |
284 | return -ENOSPC; | 284 | return -ENOSPC; |
@@ -302,10 +302,10 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count | |||
302 | } | 302 | } |
303 | 303 | ||
304 | switch (mfi->mode) { | 304 | switch (mfi->mode) { |
305 | case MTD_MODE_OTP_FACTORY: | 305 | case MTD_FILE_MODE_OTP_FACTORY: |
306 | ret = -EROFS; | 306 | ret = -EROFS; |
307 | break; | 307 | break; |
308 | case MTD_MODE_OTP_USER: | 308 | case MTD_FILE_MODE_OTP_USER: |
309 | if (!mtd->write_user_prot_reg) { | 309 | if (!mtd->write_user_prot_reg) { |
310 | ret = -EOPNOTSUPP; | 310 | ret = -EOPNOTSUPP; |
311 | break; | 311 | break; |
@@ -313,13 +313,14 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count | |||
313 | ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); | 313 | ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); |
314 | break; | 314 | break; |
315 | 315 | ||
316 | case MTD_MODE_RAW: | 316 | case MTD_FILE_MODE_RAW: |
317 | { | 317 | { |
318 | struct mtd_oob_ops ops; | 318 | struct mtd_oob_ops ops; |
319 | 319 | ||
320 | ops.mode = MTD_OOB_RAW; | 320 | ops.mode = MTD_OPS_RAW; |
321 | ops.datbuf = kbuf; | 321 | ops.datbuf = kbuf; |
322 | ops.oobbuf = NULL; | 322 | ops.oobbuf = NULL; |
323 | ops.ooboffs = 0; | ||
323 | ops.len = len; | 324 | ops.len = len; |
324 | 325 | ||
325 | ret = mtd->write_oob(mtd, *ppos, &ops); | 326 | ret = mtd->write_oob(mtd, *ppos, &ops); |
@@ -367,13 +368,13 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode) | |||
367 | if (!mtd->read_fact_prot_reg) | 368 | if (!mtd->read_fact_prot_reg) |
368 | ret = -EOPNOTSUPP; | 369 | ret = -EOPNOTSUPP; |
369 | else | 370 | else |
370 | mfi->mode = MTD_MODE_OTP_FACTORY; | 371 | mfi->mode = MTD_FILE_MODE_OTP_FACTORY; |
371 | break; | 372 | break; |
372 | case MTD_OTP_USER: | 373 | case MTD_OTP_USER: |
373 | if (!mtd->read_fact_prot_reg) | 374 | if (!mtd->read_fact_prot_reg) |
374 | ret = -EOPNOTSUPP; | 375 | ret = -EOPNOTSUPP; |
375 | else | 376 | else |
376 | mfi->mode = MTD_MODE_OTP_USER; | 377 | mfi->mode = MTD_FILE_MODE_OTP_USER; |
377 | break; | 378 | break; |
378 | default: | 379 | default: |
379 | ret = -EINVAL; | 380 | ret = -EINVAL; |
@@ -390,6 +391,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, | |||
390 | uint64_t start, uint32_t length, void __user *ptr, | 391 | uint64_t start, uint32_t length, void __user *ptr, |
391 | uint32_t __user *retp) | 392 | uint32_t __user *retp) |
392 | { | 393 | { |
394 | struct mtd_file_info *mfi = file->private_data; | ||
393 | struct mtd_oob_ops ops; | 395 | struct mtd_oob_ops ops; |
394 | uint32_t retlen; | 396 | uint32_t retlen; |
395 | int ret = 0; | 397 | int ret = 0; |
@@ -409,9 +411,10 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, | |||
409 | return ret; | 411 | return ret; |
410 | 412 | ||
411 | ops.ooblen = length; | 413 | ops.ooblen = length; |
412 | ops.ooboffs = start & (mtd->oobsize - 1); | 414 | ops.ooboffs = start & (mtd->writesize - 1); |
413 | ops.datbuf = NULL; | 415 | ops.datbuf = NULL; |
414 | ops.mode = MTD_OOB_PLACE; | 416 | ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : |
417 | MTD_OPS_PLACE_OOB; | ||
415 | 418 | ||
416 | if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) | 419 | if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) |
417 | return -EINVAL; | 420 | return -EINVAL; |
@@ -420,7 +423,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, | |||
420 | if (IS_ERR(ops.oobbuf)) | 423 | if (IS_ERR(ops.oobbuf)) |
421 | return PTR_ERR(ops.oobbuf); | 424 | return PTR_ERR(ops.oobbuf); |
422 | 425 | ||
423 | start &= ~((uint64_t)mtd->oobsize - 1); | 426 | start &= ~((uint64_t)mtd->writesize - 1); |
424 | ret = mtd->write_oob(mtd, start, &ops); | 427 | ret = mtd->write_oob(mtd, start, &ops); |
425 | 428 | ||
426 | if (ops.oobretlen > 0xFFFFFFFFU) | 429 | if (ops.oobretlen > 0xFFFFFFFFU) |
@@ -433,9 +436,11 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd, | |||
433 | return ret; | 436 | return ret; |
434 | } | 437 | } |
435 | 438 | ||
436 | static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, | 439 | static int mtd_do_readoob(struct file *file, struct mtd_info *mtd, |
437 | uint32_t length, void __user *ptr, uint32_t __user *retp) | 440 | uint64_t start, uint32_t length, void __user *ptr, |
441 | uint32_t __user *retp) | ||
438 | { | 442 | { |
443 | struct mtd_file_info *mfi = file->private_data; | ||
439 | struct mtd_oob_ops ops; | 444 | struct mtd_oob_ops ops; |
440 | int ret = 0; | 445 | int ret = 0; |
441 | 446 | ||
@@ -451,9 +456,10 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, | |||
451 | return ret; | 456 | return ret; |
452 | 457 | ||
453 | ops.ooblen = length; | 458 | ops.ooblen = length; |
454 | ops.ooboffs = start & (mtd->oobsize - 1); | 459 | ops.ooboffs = start & (mtd->writesize - 1); |
455 | ops.datbuf = NULL; | 460 | ops.datbuf = NULL; |
456 | ops.mode = MTD_OOB_PLACE; | 461 | ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW : |
462 | MTD_OPS_PLACE_OOB; | ||
457 | 463 | ||
458 | if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) | 464 | if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) |
459 | return -EINVAL; | 465 | return -EINVAL; |
@@ -462,7 +468,7 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, | |||
462 | if (!ops.oobbuf) | 468 | if (!ops.oobbuf) |
463 | return -ENOMEM; | 469 | return -ENOMEM; |
464 | 470 | ||
465 | start &= ~((uint64_t)mtd->oobsize - 1); | 471 | start &= ~((uint64_t)mtd->writesize - 1); |
466 | ret = mtd->read_oob(mtd, start, &ops); | 472 | ret = mtd->read_oob(mtd, start, &ops); |
467 | 473 | ||
468 | if (put_user(ops.oobretlen, retp)) | 474 | if (put_user(ops.oobretlen, retp)) |
@@ -472,13 +478,29 @@ static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start, | |||
472 | ret = -EFAULT; | 478 | ret = -EFAULT; |
473 | 479 | ||
474 | kfree(ops.oobbuf); | 480 | kfree(ops.oobbuf); |
481 | |||
482 | /* | ||
483 | * NAND returns -EBADMSG on ECC errors, but it returns the OOB | ||
484 | * data. For our userspace tools it is important to dump areas | ||
485 | * with ECC errors! | ||
486 | * For kernel internal usage it also might return -EUCLEAN | ||
487 | * to signal the caller that a bitflip has occured and has | ||
488 | * been corrected by the ECC algorithm. | ||
489 | * | ||
490 | * Note: currently the standard NAND function, nand_read_oob_std, | ||
491 | * does not calculate ECC for the OOB area, so do not rely on | ||
492 | * this behavior unless you have replaced it with your own. | ||
493 | */ | ||
494 | if (mtd_is_bitflip_or_eccerr(ret)) | ||
495 | return 0; | ||
496 | |||
475 | return ret; | 497 | return ret; |
476 | } | 498 | } |
477 | 499 | ||
478 | /* | 500 | /* |
479 | * Copies (and truncates, if necessary) data from the larger struct, | 501 | * Copies (and truncates, if necessary) data from the larger struct, |
480 | * nand_ecclayout, to the smaller, deprecated layout struct, | 502 | * nand_ecclayout, to the smaller, deprecated layout struct, |
481 | * nand_ecclayout_user. This is necessary only to suppport the deprecated | 503 | * nand_ecclayout_user. This is necessary only to support the deprecated |
482 | * API ioctl ECCGETLAYOUT while allowing all new functionality to use | 504 | * API ioctl ECCGETLAYOUT while allowing all new functionality to use |
483 | * nand_ecclayout flexibly (i.e. the struct may change size in new | 505 | * nand_ecclayout flexibly (i.e. the struct may change size in new |
484 | * releases without requiring major rewrites). | 506 | * releases without requiring major rewrites). |
@@ -544,6 +566,55 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd, | |||
544 | } | 566 | } |
545 | } | 567 | } |
546 | 568 | ||
569 | static int mtd_write_ioctl(struct mtd_info *mtd, | ||
570 | struct mtd_write_req __user *argp) | ||
571 | { | ||
572 | struct mtd_write_req req; | ||
573 | struct mtd_oob_ops ops; | ||
574 | void __user *usr_data, *usr_oob; | ||
575 | int ret; | ||
576 | |||
577 | if (copy_from_user(&req, argp, sizeof(req)) || | ||
578 | !access_ok(VERIFY_READ, req.usr_data, req.len) || | ||
579 | !access_ok(VERIFY_READ, req.usr_oob, req.ooblen)) | ||
580 | return -EFAULT; | ||
581 | if (!mtd->write_oob) | ||
582 | return -EOPNOTSUPP; | ||
583 | |||
584 | ops.mode = req.mode; | ||
585 | ops.len = (size_t)req.len; | ||
586 | ops.ooblen = (size_t)req.ooblen; | ||
587 | ops.ooboffs = 0; | ||
588 | |||
589 | usr_data = (void __user *)(uintptr_t)req.usr_data; | ||
590 | usr_oob = (void __user *)(uintptr_t)req.usr_oob; | ||
591 | |||
592 | if (req.usr_data) { | ||
593 | ops.datbuf = memdup_user(usr_data, ops.len); | ||
594 | if (IS_ERR(ops.datbuf)) | ||
595 | return PTR_ERR(ops.datbuf); | ||
596 | } else { | ||
597 | ops.datbuf = NULL; | ||
598 | } | ||
599 | |||
600 | if (req.usr_oob) { | ||
601 | ops.oobbuf = memdup_user(usr_oob, ops.ooblen); | ||
602 | if (IS_ERR(ops.oobbuf)) { | ||
603 | kfree(ops.datbuf); | ||
604 | return PTR_ERR(ops.oobbuf); | ||
605 | } | ||
606 | } else { | ||
607 | ops.oobbuf = NULL; | ||
608 | } | ||
609 | |||
610 | ret = mtd->write_oob(mtd, (loff_t)req.start, &ops); | ||
611 | |||
612 | kfree(ops.datbuf); | ||
613 | kfree(ops.oobbuf); | ||
614 | |||
615 | return ret; | ||
616 | } | ||
617 | |||
547 | static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | 618 | static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) |
548 | { | 619 | { |
549 | struct mtd_file_info *mfi = file->private_data; | 620 | struct mtd_file_info *mfi = file->private_data; |
@@ -553,7 +624,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
553 | u_long size; | 624 | u_long size; |
554 | struct mtd_info_user info; | 625 | struct mtd_info_user info; |
555 | 626 | ||
556 | DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); | 627 | pr_debug("MTD_ioctl\n"); |
557 | 628 | ||
558 | size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; | 629 | size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT; |
559 | if (cmd & IOC_IN) { | 630 | if (cmd & IOC_IN) { |
@@ -601,8 +672,8 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
601 | info.erasesize = mtd->erasesize; | 672 | info.erasesize = mtd->erasesize; |
602 | info.writesize = mtd->writesize; | 673 | info.writesize = mtd->writesize; |
603 | info.oobsize = mtd->oobsize; | 674 | info.oobsize = mtd->oobsize; |
604 | /* The below fields are obsolete */ | 675 | /* The below field is obsolete */ |
605 | info.ecctype = -1; | 676 | info.padding = 0; |
606 | if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) | 677 | if (copy_to_user(argp, &info, sizeof(struct mtd_info_user))) |
607 | return -EFAULT; | 678 | return -EFAULT; |
608 | break; | 679 | break; |
@@ -698,7 +769,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
698 | if (copy_from_user(&buf, argp, sizeof(buf))) | 769 | if (copy_from_user(&buf, argp, sizeof(buf))) |
699 | ret = -EFAULT; | 770 | ret = -EFAULT; |
700 | else | 771 | else |
701 | ret = mtd_do_readoob(mtd, buf.start, buf.length, | 772 | ret = mtd_do_readoob(file, mtd, buf.start, buf.length, |
702 | buf.ptr, &buf_user->start); | 773 | buf.ptr, &buf_user->start); |
703 | break; | 774 | break; |
704 | } | 775 | } |
@@ -725,12 +796,19 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
725 | if (copy_from_user(&buf, argp, sizeof(buf))) | 796 | if (copy_from_user(&buf, argp, sizeof(buf))) |
726 | ret = -EFAULT; | 797 | ret = -EFAULT; |
727 | else | 798 | else |
728 | ret = mtd_do_readoob(mtd, buf.start, buf.length, | 799 | ret = mtd_do_readoob(file, mtd, buf.start, buf.length, |
729 | (void __user *)(uintptr_t)buf.usr_ptr, | 800 | (void __user *)(uintptr_t)buf.usr_ptr, |
730 | &buf_user->length); | 801 | &buf_user->length); |
731 | break; | 802 | break; |
732 | } | 803 | } |
733 | 804 | ||
805 | case MEMWRITE: | ||
806 | { | ||
807 | ret = mtd_write_ioctl(mtd, | ||
808 | (struct mtd_write_req __user *)arg); | ||
809 | break; | ||
810 | } | ||
811 | |||
734 | case MEMLOCK: | 812 | case MEMLOCK: |
735 | { | 813 | { |
736 | struct erase_info_user einfo; | 814 | struct erase_info_user einfo; |
@@ -827,7 +905,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
827 | if (copy_from_user(&mode, argp, sizeof(int))) | 905 | if (copy_from_user(&mode, argp, sizeof(int))) |
828 | return -EFAULT; | 906 | return -EFAULT; |
829 | 907 | ||
830 | mfi->mode = MTD_MODE_NORMAL; | 908 | mfi->mode = MTD_FILE_MODE_NORMAL; |
831 | 909 | ||
832 | ret = otp_select_filemode(mfi, mode); | 910 | ret = otp_select_filemode(mfi, mode); |
833 | 911 | ||
@@ -843,11 +921,11 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
843 | return -ENOMEM; | 921 | return -ENOMEM; |
844 | ret = -EOPNOTSUPP; | 922 | ret = -EOPNOTSUPP; |
845 | switch (mfi->mode) { | 923 | switch (mfi->mode) { |
846 | case MTD_MODE_OTP_FACTORY: | 924 | case MTD_FILE_MODE_OTP_FACTORY: |
847 | if (mtd->get_fact_prot_info) | 925 | if (mtd->get_fact_prot_info) |
848 | ret = mtd->get_fact_prot_info(mtd, buf, 4096); | 926 | ret = mtd->get_fact_prot_info(mtd, buf, 4096); |
849 | break; | 927 | break; |
850 | case MTD_MODE_OTP_USER: | 928 | case MTD_FILE_MODE_OTP_USER: |
851 | if (mtd->get_user_prot_info) | 929 | if (mtd->get_user_prot_info) |
852 | ret = mtd->get_user_prot_info(mtd, buf, 4096); | 930 | ret = mtd->get_user_prot_info(mtd, buf, 4096); |
853 | break; | 931 | break; |
@@ -871,7 +949,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
871 | { | 949 | { |
872 | struct otp_info oinfo; | 950 | struct otp_info oinfo; |
873 | 951 | ||
874 | if (mfi->mode != MTD_MODE_OTP_USER) | 952 | if (mfi->mode != MTD_FILE_MODE_OTP_USER) |
875 | return -EINVAL; | 953 | return -EINVAL; |
876 | if (copy_from_user(&oinfo, argp, sizeof(oinfo))) | 954 | if (copy_from_user(&oinfo, argp, sizeof(oinfo))) |
877 | return -EFAULT; | 955 | return -EFAULT; |
@@ -882,7 +960,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
882 | } | 960 | } |
883 | #endif | 961 | #endif |
884 | 962 | ||
885 | /* This ioctl is being deprecated - it truncates the ecc layout */ | 963 | /* This ioctl is being deprecated - it truncates the ECC layout */ |
886 | case ECCGETLAYOUT: | 964 | case ECCGETLAYOUT: |
887 | { | 965 | { |
888 | struct nand_ecclayout_user *usrlay; | 966 | struct nand_ecclayout_user *usrlay; |
@@ -915,17 +993,17 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg) | |||
915 | mfi->mode = 0; | 993 | mfi->mode = 0; |
916 | 994 | ||
917 | switch(arg) { | 995 | switch(arg) { |
918 | case MTD_MODE_OTP_FACTORY: | 996 | case MTD_FILE_MODE_OTP_FACTORY: |
919 | case MTD_MODE_OTP_USER: | 997 | case MTD_FILE_MODE_OTP_USER: |
920 | ret = otp_select_filemode(mfi, arg); | 998 | ret = otp_select_filemode(mfi, arg); |
921 | break; | 999 | break; |
922 | 1000 | ||
923 | case MTD_MODE_RAW: | 1001 | case MTD_FILE_MODE_RAW: |
924 | if (!mtd->read_oob || !mtd->write_oob) | 1002 | if (!mtd->read_oob || !mtd->write_oob) |
925 | return -EOPNOTSUPP; | 1003 | return -EOPNOTSUPP; |
926 | mfi->mode = arg; | 1004 | mfi->mode = arg; |
927 | 1005 | ||
928 | case MTD_MODE_NORMAL: | 1006 | case MTD_FILE_MODE_NORMAL: |
929 | break; | 1007 | break; |
930 | default: | 1008 | default: |
931 | ret = -EINVAL; | 1009 | ret = -EINVAL; |
@@ -1011,7 +1089,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd, | |||
1011 | if (copy_from_user(&buf, argp, sizeof(buf))) | 1089 | if (copy_from_user(&buf, argp, sizeof(buf))) |
1012 | ret = -EFAULT; | 1090 | ret = -EFAULT; |
1013 | else | 1091 | else |
1014 | ret = mtd_do_readoob(mtd, buf.start, | 1092 | ret = mtd_do_readoob(file, mtd, buf.start, |
1015 | buf.length, compat_ptr(buf.ptr), | 1093 | buf.length, compat_ptr(buf.ptr), |
1016 | &buf_user->start); | 1094 | &buf_user->start); |
1017 | break; | 1095 | break; |
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index e601672a5305..6df4d4d4eb92 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c | |||
@@ -95,10 +95,10 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
95 | 95 | ||
96 | /* Save information about bitflips! */ | 96 | /* Save information about bitflips! */ |
97 | if (unlikely(err)) { | 97 | if (unlikely(err)) { |
98 | if (err == -EBADMSG) { | 98 | if (mtd_is_eccerr(err)) { |
99 | mtd->ecc_stats.failed++; | 99 | mtd->ecc_stats.failed++; |
100 | ret = err; | 100 | ret = err; |
101 | } else if (err == -EUCLEAN) { | 101 | } else if (mtd_is_bitflip(err)) { |
102 | mtd->ecc_stats.corrected++; | 102 | mtd->ecc_stats.corrected++; |
103 | /* Do not overwrite -EBADMSG !! */ | 103 | /* Do not overwrite -EBADMSG !! */ |
104 | if (!ret) | 104 | if (!ret) |
@@ -279,10 +279,10 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) | |||
279 | 279 | ||
280 | /* Save information about bitflips! */ | 280 | /* Save information about bitflips! */ |
281 | if (unlikely(err)) { | 281 | if (unlikely(err)) { |
282 | if (err == -EBADMSG) { | 282 | if (mtd_is_eccerr(err)) { |
283 | mtd->ecc_stats.failed++; | 283 | mtd->ecc_stats.failed++; |
284 | ret = err; | 284 | ret = err; |
285 | } else if (err == -EUCLEAN) { | 285 | } else if (mtd_is_bitflip(err)) { |
286 | mtd->ecc_stats.corrected++; | 286 | mtd->ecc_stats.corrected++; |
287 | /* Do not overwrite -EBADMSG !! */ | 287 | /* Do not overwrite -EBADMSG !! */ |
288 | if (!ret) | 288 | if (!ret) |
@@ -770,7 +770,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
770 | 770 | ||
771 | /* | 771 | /* |
772 | * Set up the new "super" device's MTD object structure, check for | 772 | * Set up the new "super" device's MTD object structure, check for |
773 | * incompatibilites between the subdevices. | 773 | * incompatibilities between the subdevices. |
774 | */ | 774 | */ |
775 | concat->mtd.type = subdev[0]->type; | 775 | concat->mtd.type = subdev[0]->type; |
776 | concat->mtd.flags = subdev[0]->flags; | 776 | concat->mtd.flags = subdev[0]->flags; |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index c510aff289a8..b01993ea260e 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -362,7 +362,7 @@ int add_mtd_device(struct mtd_info *mtd) | |||
362 | MTD_DEVT(i) + 1, | 362 | MTD_DEVT(i) + 1, |
363 | NULL, "mtd%dro", i); | 363 | NULL, "mtd%dro", i); |
364 | 364 | ||
365 | DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name); | 365 | pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name); |
366 | /* No need to get a refcount on the module containing | 366 | /* No need to get a refcount on the module containing |
367 | the notifier, since we hold the mtd_table_mutex */ | 367 | the notifier, since we hold the mtd_table_mutex */ |
368 | list_for_each_entry(not, &mtd_notifiers, list) | 368 | list_for_each_entry(not, &mtd_notifiers, list) |
@@ -429,27 +429,63 @@ out_error: | |||
429 | } | 429 | } |
430 | 430 | ||
431 | /** | 431 | /** |
432 | * mtd_device_register - register an MTD device. | 432 | * mtd_device_parse_register - parse partitions and register an MTD device. |
433 | * | 433 | * |
434 | * @master: the MTD device to register | 434 | * @mtd: the MTD device to register |
435 | * @parts: the partitions to register - only valid if nr_parts > 0 | 435 | * @types: the list of MTD partition probes to try, see |
436 | * @nr_parts: the number of partitions in parts. If zero then the full MTD | 436 | * 'parse_mtd_partitions()' for more information |
437 | * device is registered | 437 | * @parser_data: MTD partition parser-specific data |
438 | * @parts: fallback partition information to register, if parsing fails; | ||
439 | * only valid if %nr_parts > %0 | ||
440 | * @nr_parts: the number of partitions in parts, if zero then the full | ||
441 | * MTD device is registered if no partition info is found | ||
438 | * | 442 | * |
439 | * Register an MTD device with the system and optionally, a number of | 443 | * This function aggregates MTD partitions parsing (done by |
440 | * partitions. If nr_parts is 0 then the whole device is registered, otherwise | 444 | * 'parse_mtd_partitions()') and MTD device and partitions registering. It |
441 | * only the partitions are registered. To register both the full device *and* | 445 | * basically follows the most common pattern found in many MTD drivers: |
442 | * the partitions, call mtd_device_register() twice, once with nr_parts == 0 | 446 | * |
443 | * and once equal to the number of partitions. | 447 | * * It first tries to probe partitions on MTD device @mtd using parsers |
448 | * specified in @types (if @types is %NULL, then the default list of parsers | ||
449 | * is used, see 'parse_mtd_partitions()' for more information). If none are | ||
450 | * found this functions tries to fallback to information specified in | ||
451 | * @parts/@nr_parts. | ||
452 | * * If any partitioning info was found, this function registers the found | ||
453 | * partitions. | ||
454 | * * If no partitions were found this function just registers the MTD device | ||
455 | * @mtd and exits. | ||
456 | * | ||
457 | * Returns zero in case of success and a negative error code in case of failure. | ||
444 | */ | 458 | */ |
445 | int mtd_device_register(struct mtd_info *master, | 459 | int mtd_device_parse_register(struct mtd_info *mtd, const char **types, |
446 | const struct mtd_partition *parts, | 460 | struct mtd_part_parser_data *parser_data, |
447 | int nr_parts) | 461 | const struct mtd_partition *parts, |
462 | int nr_parts) | ||
448 | { | 463 | { |
449 | return parts ? add_mtd_partitions(master, parts, nr_parts) : | 464 | int err; |
450 | add_mtd_device(master); | 465 | struct mtd_partition *real_parts; |
466 | |||
467 | err = parse_mtd_partitions(mtd, types, &real_parts, parser_data); | ||
468 | if (err <= 0 && nr_parts && parts) { | ||
469 | real_parts = kmemdup(parts, sizeof(*parts) * nr_parts, | ||
470 | GFP_KERNEL); | ||
471 | if (!real_parts) | ||
472 | err = -ENOMEM; | ||
473 | else | ||
474 | err = nr_parts; | ||
475 | } | ||
476 | |||
477 | if (err > 0) { | ||
478 | err = add_mtd_partitions(mtd, real_parts, err); | ||
479 | kfree(real_parts); | ||
480 | } else if (err == 0) { | ||
481 | err = add_mtd_device(mtd); | ||
482 | if (err == 1) | ||
483 | err = -ENODEV; | ||
484 | } | ||
485 | |||
486 | return err; | ||
451 | } | 487 | } |
452 | EXPORT_SYMBOL_GPL(mtd_device_register); | 488 | EXPORT_SYMBOL_GPL(mtd_device_parse_register); |
453 | 489 | ||
454 | /** | 490 | /** |
455 | * mtd_device_unregister - unregister an existing MTD device. | 491 | * mtd_device_unregister - unregister an existing MTD device. |
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h index 0ed6126b4c1f..961a38408542 100644 --- a/drivers/mtd/mtdcore.h +++ b/drivers/mtd/mtdcore.h | |||
@@ -15,6 +15,9 @@ extern int del_mtd_device(struct mtd_info *mtd); | |||
15 | extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, | 15 | extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, |
16 | int); | 16 | int); |
17 | extern int del_mtd_partitions(struct mtd_info *); | 17 | extern int del_mtd_partitions(struct mtd_info *); |
18 | extern int parse_mtd_partitions(struct mtd_info *master, const char **types, | ||
19 | struct mtd_partition **pparts, | ||
20 | struct mtd_part_parser_data *data); | ||
18 | 21 | ||
19 | #define mtd_for_each_device(mtd) \ | 22 | #define mtd_for_each_device(mtd) \ |
20 | for ((mtd) = __mtd_next_device(0); \ | 23 | for ((mtd) = __mtd_next_device(0); \ |
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index e3e40f440323..1e2fa6236705 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -258,7 +258,7 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
258 | ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, | 258 | ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, |
259 | &retlen, (u_char *) &count[0]); | 259 | &retlen, (u_char *) &count[0]); |
260 | if (retlen != MTDOOPS_HEADER_SIZE || | 260 | if (retlen != MTDOOPS_HEADER_SIZE || |
261 | (ret < 0 && ret != -EUCLEAN)) { | 261 | (ret < 0 && !mtd_is_bitflip(ret))) { |
262 | printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", | 262 | printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", |
263 | page * record_size, retlen, | 263 | page * record_size, retlen, |
264 | MTDOOPS_HEADER_SIZE, ret); | 264 | MTDOOPS_HEADER_SIZE, ret); |
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 630be3e7da04..a0bd2de4752b 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
@@ -73,9 +73,9 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
73 | res = part->master->read(part->master, from + part->offset, | 73 | res = part->master->read(part->master, from + part->offset, |
74 | len, retlen, buf); | 74 | len, retlen, buf); |
75 | if (unlikely(res)) { | 75 | if (unlikely(res)) { |
76 | if (res == -EUCLEAN) | 76 | if (mtd_is_bitflip(res)) |
77 | mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; | 77 | mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; |
78 | if (res == -EBADMSG) | 78 | if (mtd_is_eccerr(res)) |
79 | mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; | 79 | mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; |
80 | } | 80 | } |
81 | return res; | 81 | return res; |
@@ -130,7 +130,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from, | |||
130 | if (ops->oobbuf) { | 130 | if (ops->oobbuf) { |
131 | size_t len, pages; | 131 | size_t len, pages; |
132 | 132 | ||
133 | if (ops->mode == MTD_OOB_AUTO) | 133 | if (ops->mode == MTD_OPS_AUTO_OOB) |
134 | len = mtd->oobavail; | 134 | len = mtd->oobavail; |
135 | else | 135 | else |
136 | len = mtd->oobsize; | 136 | len = mtd->oobsize; |
@@ -142,9 +142,9 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from, | |||
142 | 142 | ||
143 | res = part->master->read_oob(part->master, from + part->offset, ops); | 143 | res = part->master->read_oob(part->master, from + part->offset, ops); |
144 | if (unlikely(res)) { | 144 | if (unlikely(res)) { |
145 | if (res == -EUCLEAN) | 145 | if (mtd_is_bitflip(res)) |
146 | mtd->ecc_stats.corrected++; | 146 | mtd->ecc_stats.corrected++; |
147 | if (res == -EBADMSG) | 147 | if (mtd_is_eccerr(res)) |
148 | mtd->ecc_stats.failed++; | 148 | mtd->ecc_stats.failed++; |
149 | } | 149 | } |
150 | return res; | 150 | return res; |
@@ -479,6 +479,19 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, | |||
479 | (unsigned long long)cur_offset, (unsigned long long)slave->offset); | 479 | (unsigned long long)cur_offset, (unsigned long long)slave->offset); |
480 | } | 480 | } |
481 | } | 481 | } |
482 | if (slave->offset == MTDPART_OFS_RETAIN) { | ||
483 | slave->offset = cur_offset; | ||
484 | if (master->size - slave->offset >= slave->mtd.size) { | ||
485 | slave->mtd.size = master->size - slave->offset | ||
486 | - slave->mtd.size; | ||
487 | } else { | ||
488 | printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", | ||
489 | part->name, master->size - slave->offset, | ||
490 | slave->mtd.size); | ||
491 | /* register to preserve ordering */ | ||
492 | goto out_register; | ||
493 | } | ||
494 | } | ||
482 | if (slave->mtd.size == MTDPART_SIZ_FULL) | 495 | if (slave->mtd.size == MTDPART_SIZ_FULL) |
483 | slave->mtd.size = master->size - slave->offset; | 496 | slave->mtd.size = master->size - slave->offset; |
484 | 497 | ||
@@ -693,6 +706,8 @@ static struct mtd_part_parser *get_partition_parser(const char *name) | |||
693 | return ret; | 706 | return ret; |
694 | } | 707 | } |
695 | 708 | ||
709 | #define put_partition_parser(p) do { module_put((p)->owner); } while (0) | ||
710 | |||
696 | int register_mtd_parser(struct mtd_part_parser *p) | 711 | int register_mtd_parser(struct mtd_part_parser *p) |
697 | { | 712 | { |
698 | spin_lock(&part_parser_lock); | 713 | spin_lock(&part_parser_lock); |
@@ -712,19 +727,51 @@ int deregister_mtd_parser(struct mtd_part_parser *p) | |||
712 | } | 727 | } |
713 | EXPORT_SYMBOL_GPL(deregister_mtd_parser); | 728 | EXPORT_SYMBOL_GPL(deregister_mtd_parser); |
714 | 729 | ||
730 | /* | ||
731 | * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you | ||
732 | * are changing this array! | ||
733 | */ | ||
734 | static const char *default_mtd_part_types[] = { | ||
735 | "cmdlinepart", | ||
736 | "ofpart", | ||
737 | NULL | ||
738 | }; | ||
739 | |||
740 | /** | ||
741 | * parse_mtd_partitions - parse MTD partitions | ||
742 | * @master: the master partition (describes whole MTD device) | ||
743 | * @types: names of partition parsers to try or %NULL | ||
744 | * @pparts: array of partitions found is returned here | ||
745 | * @data: MTD partition parser-specific data | ||
746 | * | ||
747 | * This function tries to find partition on MTD device @master. It uses MTD | ||
748 | * partition parsers, specified in @types. However, if @types is %NULL, then | ||
749 | * the default list of parsers is used. The default list contains only the | ||
750 | * "cmdlinepart" and "ofpart" parsers ATM. | ||
751 | * | ||
752 | * This function may return: | ||
753 | * o a negative error code in case of failure | ||
754 | * o zero if no partitions were found | ||
755 | * o a positive number of found partitions, in which case on exit @pparts will | ||
756 | * point to an array containing this number of &struct mtd_info objects. | ||
757 | */ | ||
715 | int parse_mtd_partitions(struct mtd_info *master, const char **types, | 758 | int parse_mtd_partitions(struct mtd_info *master, const char **types, |
716 | struct mtd_partition **pparts, unsigned long origin) | 759 | struct mtd_partition **pparts, |
760 | struct mtd_part_parser_data *data) | ||
717 | { | 761 | { |
718 | struct mtd_part_parser *parser; | 762 | struct mtd_part_parser *parser; |
719 | int ret = 0; | 763 | int ret = 0; |
720 | 764 | ||
765 | if (!types) | ||
766 | types = default_mtd_part_types; | ||
767 | |||
721 | for ( ; ret <= 0 && *types; types++) { | 768 | for ( ; ret <= 0 && *types; types++) { |
722 | parser = get_partition_parser(*types); | 769 | parser = get_partition_parser(*types); |
723 | if (!parser && !request_module("%s", *types)) | 770 | if (!parser && !request_module("%s", *types)) |
724 | parser = get_partition_parser(*types); | 771 | parser = get_partition_parser(*types); |
725 | if (!parser) | 772 | if (!parser) |
726 | continue; | 773 | continue; |
727 | ret = (*parser->parse_fn)(master, pparts, origin); | 774 | ret = (*parser->parse_fn)(master, pparts, data); |
728 | if (ret > 0) { | 775 | if (ret > 0) { |
729 | printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", | 776 | printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", |
730 | ret, parser->name, master->name); | 777 | ret, parser->name, master->name); |
@@ -733,7 +780,6 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types, | |||
733 | } | 780 | } |
734 | return ret; | 781 | return ret; |
735 | } | 782 | } |
736 | EXPORT_SYMBOL_GPL(parse_mtd_partitions); | ||
737 | 783 | ||
738 | int mtd_is_partition(struct mtd_info *mtd) | 784 | int mtd_is_partition(struct mtd_info *mtd) |
739 | { | 785 | { |
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index 89f8e66448ab..a90bfe79916d 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c | |||
@@ -27,12 +27,12 @@ static int get_sb_mtd_compare(struct super_block *sb, void *_mtd) | |||
27 | struct mtd_info *mtd = _mtd; | 27 | struct mtd_info *mtd = _mtd; |
28 | 28 | ||
29 | if (sb->s_mtd == mtd) { | 29 | if (sb->s_mtd == mtd) { |
30 | DEBUG(2, "MTDSB: Match on device %d (\"%s\")\n", | 30 | pr_debug("MTDSB: Match on device %d (\"%s\")\n", |
31 | mtd->index, mtd->name); | 31 | mtd->index, mtd->name); |
32 | return 1; | 32 | return 1; |
33 | } | 33 | } |
34 | 34 | ||
35 | DEBUG(2, "MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n", | 35 | pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n", |
36 | sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name); | 36 | sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name); |
37 | return 0; | 37 | return 0; |
38 | } | 38 | } |
@@ -71,7 +71,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags, | |||
71 | goto already_mounted; | 71 | goto already_mounted; |
72 | 72 | ||
73 | /* fresh new superblock */ | 73 | /* fresh new superblock */ |
74 | DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n", | 74 | pr_debug("MTDSB: New superblock for device %d (\"%s\")\n", |
75 | mtd->index, mtd->name); | 75 | mtd->index, mtd->name); |
76 | 76 | ||
77 | sb->s_flags = flags; | 77 | sb->s_flags = flags; |
@@ -88,7 +88,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags, | |||
88 | 88 | ||
89 | /* new mountpoint for an already mounted superblock */ | 89 | /* new mountpoint for an already mounted superblock */ |
90 | already_mounted: | 90 | already_mounted: |
91 | DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n", | 91 | pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n", |
92 | mtd->index, mtd->name); | 92 | mtd->index, mtd->name); |
93 | put_mtd_device(mtd); | 93 | put_mtd_device(mtd); |
94 | return dget(sb->s_root); | 94 | return dget(sb->s_root); |
@@ -109,7 +109,7 @@ static struct dentry *mount_mtd_nr(struct file_system_type *fs_type, int flags, | |||
109 | 109 | ||
110 | mtd = get_mtd_device(NULL, mtdnr); | 110 | mtd = get_mtd_device(NULL, mtdnr); |
111 | if (IS_ERR(mtd)) { | 111 | if (IS_ERR(mtd)) { |
112 | DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr); | 112 | pr_debug("MTDSB: Device #%u doesn't appear to exist\n", mtdnr); |
113 | return ERR_CAST(mtd); | 113 | return ERR_CAST(mtd); |
114 | } | 114 | } |
115 | 115 | ||
@@ -132,7 +132,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, | |||
132 | if (!dev_name) | 132 | if (!dev_name) |
133 | return ERR_PTR(-EINVAL); | 133 | return ERR_PTR(-EINVAL); |
134 | 134 | ||
135 | DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name); | 135 | pr_debug("MTDSB: dev_name \"%s\"\n", dev_name); |
136 | 136 | ||
137 | /* the preferred way of mounting in future; especially when | 137 | /* the preferred way of mounting in future; especially when |
138 | * CONFIG_BLOCK=n - we specify the underlying MTD device by number or | 138 | * CONFIG_BLOCK=n - we specify the underlying MTD device by number or |
@@ -143,7 +143,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, | |||
143 | struct mtd_info *mtd; | 143 | struct mtd_info *mtd; |
144 | 144 | ||
145 | /* mount by MTD device name */ | 145 | /* mount by MTD device name */ |
146 | DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n", | 146 | pr_debug("MTDSB: mtd:%%s, name \"%s\"\n", |
147 | dev_name + 4); | 147 | dev_name + 4); |
148 | 148 | ||
149 | mtd = get_mtd_device_nm(dev_name + 4); | 149 | mtd = get_mtd_device_nm(dev_name + 4); |
@@ -164,7 +164,7 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, | |||
164 | mtdnr = simple_strtoul(dev_name + 3, &endptr, 0); | 164 | mtdnr = simple_strtoul(dev_name + 3, &endptr, 0); |
165 | if (!*endptr) { | 165 | if (!*endptr) { |
166 | /* It was a valid number */ | 166 | /* It was a valid number */ |
167 | DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n", | 167 | pr_debug("MTDSB: mtd%%d, mtdnr %d\n", |
168 | mtdnr); | 168 | mtdnr); |
169 | return mount_mtd_nr(fs_type, flags, | 169 | return mount_mtd_nr(fs_type, flags, |
170 | dev_name, data, | 170 | dev_name, data, |
@@ -180,10 +180,10 @@ struct dentry *mount_mtd(struct file_system_type *fs_type, int flags, | |||
180 | bdev = lookup_bdev(dev_name); | 180 | bdev = lookup_bdev(dev_name); |
181 | if (IS_ERR(bdev)) { | 181 | if (IS_ERR(bdev)) { |
182 | ret = PTR_ERR(bdev); | 182 | ret = PTR_ERR(bdev); |
183 | DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret); | 183 | pr_debug("MTDSB: lookup_bdev() returned %d\n", ret); |
184 | return ERR_PTR(ret); | 184 | return ERR_PTR(ret); |
185 | } | 185 | } |
186 | DEBUG(1, "MTDSB: lookup_bdev() returned 0\n"); | 186 | pr_debug("MTDSB: lookup_bdev() returned 0\n"); |
187 | 187 | ||
188 | ret = -EINVAL; | 188 | ret = -EINVAL; |
189 | 189 | ||
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index fd7885327611..bd9590c723e4 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c | |||
@@ -86,7 +86,7 @@ struct swap_eb { | |||
86 | unsigned int flags; | 86 | unsigned int flags; |
87 | unsigned int active_count; | 87 | unsigned int active_count; |
88 | unsigned int erase_count; | 88 | unsigned int erase_count; |
89 | unsigned int pad; /* speeds up pointer decremtnt */ | 89 | unsigned int pad; /* speeds up pointer decrement */ |
90 | }; | 90 | }; |
91 | 91 | ||
92 | #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \ | 92 | #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \ |
@@ -314,7 +314,7 @@ static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from, | |||
314 | { | 314 | { |
315 | int ret = d->mtd->read_oob(d->mtd, from, ops); | 315 | int ret = d->mtd->read_oob(d->mtd, from, ops); |
316 | 316 | ||
317 | if (ret == -EUCLEAN) | 317 | if (mtd_is_bitflip(ret)) |
318 | return ret; | 318 | return ret; |
319 | 319 | ||
320 | if (ret) { | 320 | if (ret) { |
@@ -350,11 +350,11 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) | |||
350 | ops.oobbuf = d->oob_buf; | 350 | ops.oobbuf = d->oob_buf; |
351 | ops.ooboffs = 0; | 351 | ops.ooboffs = 0; |
352 | ops.datbuf = NULL; | 352 | ops.datbuf = NULL; |
353 | ops.mode = MTD_OOB_AUTO; | 353 | ops.mode = MTD_OPS_AUTO_OOB; |
354 | 354 | ||
355 | ret = mtdswap_read_oob(d, offset, &ops); | 355 | ret = mtdswap_read_oob(d, offset, &ops); |
356 | 356 | ||
357 | if (ret && ret != -EUCLEAN) | 357 | if (ret && !mtd_is_bitflip(ret)) |
358 | return ret; | 358 | return ret; |
359 | 359 | ||
360 | data = (struct mtdswap_oobdata *)d->oob_buf; | 360 | data = (struct mtdswap_oobdata *)d->oob_buf; |
@@ -363,7 +363,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) | |||
363 | 363 | ||
364 | if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { | 364 | if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { |
365 | eb->erase_count = le32_to_cpu(data->count); | 365 | eb->erase_count = le32_to_cpu(data->count); |
366 | if (ret == -EUCLEAN) | 366 | if (mtd_is_bitflip(ret)) |
367 | ret = MTDSWAP_SCANNED_BITFLIP; | 367 | ret = MTDSWAP_SCANNED_BITFLIP; |
368 | else { | 368 | else { |
369 | if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY) | 369 | if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY) |
@@ -389,7 +389,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, | |||
389 | 389 | ||
390 | ops.ooboffs = 0; | 390 | ops.ooboffs = 0; |
391 | ops.oobbuf = (uint8_t *)&n; | 391 | ops.oobbuf = (uint8_t *)&n; |
392 | ops.mode = MTD_OOB_AUTO; | 392 | ops.mode = MTD_OPS_AUTO_OOB; |
393 | ops.datbuf = NULL; | 393 | ops.datbuf = NULL; |
394 | 394 | ||
395 | if (marker == MTDSWAP_TYPE_CLEAN) { | 395 | if (marker == MTDSWAP_TYPE_CLEAN) { |
@@ -408,7 +408,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, | |||
408 | if (ret) { | 408 | if (ret) { |
409 | dev_warn(d->dev, "Write OOB failed for block at %08llx " | 409 | dev_warn(d->dev, "Write OOB failed for block at %08llx " |
410 | "error %d\n", offset, ret); | 410 | "error %d\n", offset, ret); |
411 | if (ret == -EIO || ret == -EBADMSG) | 411 | if (ret == -EIO || mtd_is_eccerr(ret)) |
412 | mtdswap_handle_write_error(d, eb); | 412 | mtdswap_handle_write_error(d, eb); |
413 | return ret; | 413 | return ret; |
414 | } | 414 | } |
@@ -628,7 +628,7 @@ static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page, | |||
628 | TREE_COUNT(d, CLEAN)--; | 628 | TREE_COUNT(d, CLEAN)--; |
629 | 629 | ||
630 | ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); | 630 | ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); |
631 | } while (ret == -EIO || ret == -EBADMSG); | 631 | } while (ret == -EIO || mtd_is_eccerr(ret)); |
632 | 632 | ||
633 | if (ret) | 633 | if (ret) |
634 | return ret; | 634 | return ret; |
@@ -678,7 +678,7 @@ retry: | |||
678 | ret = mtdswap_map_free_block(d, page, bp); | 678 | ret = mtdswap_map_free_block(d, page, bp); |
679 | eb = d->eb_data + (*bp / d->pages_per_eblk); | 679 | eb = d->eb_data + (*bp / d->pages_per_eblk); |
680 | 680 | ||
681 | if (ret == -EIO || ret == -EBADMSG) { | 681 | if (ret == -EIO || mtd_is_eccerr(ret)) { |
682 | d->curr_write = NULL; | 682 | d->curr_write = NULL; |
683 | eb->active_count--; | 683 | eb->active_count--; |
684 | d->revmap[*bp] = PAGE_UNDEF; | 684 | d->revmap[*bp] = PAGE_UNDEF; |
@@ -690,7 +690,7 @@ retry: | |||
690 | 690 | ||
691 | writepos = (loff_t)*bp << PAGE_SHIFT; | 691 | writepos = (loff_t)*bp << PAGE_SHIFT; |
692 | ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf); | 692 | ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf); |
693 | if (ret == -EIO || ret == -EBADMSG) { | 693 | if (ret == -EIO || mtd_is_eccerr(ret)) { |
694 | d->curr_write_pos--; | 694 | d->curr_write_pos--; |
695 | eb->active_count--; | 695 | eb->active_count--; |
696 | d->revmap[*bp] = PAGE_UNDEF; | 696 | d->revmap[*bp] = PAGE_UNDEF; |
@@ -738,7 +738,7 @@ static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock, | |||
738 | retry: | 738 | retry: |
739 | ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); | 739 | ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); |
740 | 740 | ||
741 | if (ret < 0 && ret != -EUCLEAN) { | 741 | if (ret < 0 && !mtd_is_bitflip(ret)) { |
742 | oldeb = d->eb_data + oldblock / d->pages_per_eblk; | 742 | oldeb = d->eb_data + oldblock / d->pages_per_eblk; |
743 | oldeb->flags |= EBLOCK_READERR; | 743 | oldeb->flags |= EBLOCK_READERR; |
744 | 744 | ||
@@ -931,7 +931,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d, | |||
931 | struct mtd_oob_ops ops; | 931 | struct mtd_oob_ops ops; |
932 | int ret; | 932 | int ret; |
933 | 933 | ||
934 | ops.mode = MTD_OOB_AUTO; | 934 | ops.mode = MTD_OPS_AUTO_OOB; |
935 | ops.len = mtd->writesize; | 935 | ops.len = mtd->writesize; |
936 | ops.ooblen = mtd->ecclayout->oobavail; | 936 | ops.ooblen = mtd->ecclayout->oobavail; |
937 | ops.ooboffs = 0; | 937 | ops.ooboffs = 0; |
@@ -1016,7 +1016,7 @@ static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background) | |||
1016 | 1016 | ||
1017 | if (ret == 0) | 1017 | if (ret == 0) |
1018 | mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); | 1018 | mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); |
1019 | else if (ret != -EIO && ret != -EBADMSG) | 1019 | else if (ret != -EIO && !mtd_is_eccerr(ret)) |
1020 | mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); | 1020 | mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); |
1021 | 1021 | ||
1022 | return 0; | 1022 | return 0; |
@@ -1164,7 +1164,7 @@ retry: | |||
1164 | ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf); | 1164 | ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf); |
1165 | 1165 | ||
1166 | d->mtd_read_count++; | 1166 | d->mtd_read_count++; |
1167 | if (ret == -EUCLEAN) { | 1167 | if (mtd_is_bitflip(ret)) { |
1168 | eb->flags |= EBLOCK_BITFLIP; | 1168 | eb->flags |= EBLOCK_BITFLIP; |
1169 | mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); | 1169 | mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); |
1170 | ret = 0; | 1170 | ret = 0; |
@@ -1374,11 +1374,10 @@ static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks, | |||
1374 | goto revmap_fail; | 1374 | goto revmap_fail; |
1375 | 1375 | ||
1376 | eblk_bytes = sizeof(struct swap_eb)*d->eblks; | 1376 | eblk_bytes = sizeof(struct swap_eb)*d->eblks; |
1377 | d->eb_data = vmalloc(eblk_bytes); | 1377 | d->eb_data = vzalloc(eblk_bytes); |
1378 | if (!d->eb_data) | 1378 | if (!d->eb_data) |
1379 | goto eb_data_fail; | 1379 | goto eb_data_fail; |
1380 | 1380 | ||
1381 | memset(d->eb_data, 0, eblk_bytes); | ||
1382 | for (i = 0; i < pages; i++) | 1381 | for (i = 0; i < pages; i++) |
1383 | d->page_data[i] = BLOCK_UNDEF; | 1382 | d->page_data[i] = BLOCK_UNDEF; |
1384 | 1383 | ||
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index dbfa0f7fb464..cce7b70824c3 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -83,16 +83,9 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR | |||
83 | scratch register here to enable this feature. On Intel Moorestown | 83 | scratch register here to enable this feature. On Intel Moorestown |
84 | boards, the scratch register is at 0xFF108018. | 84 | boards, the scratch register is at 0xFF108018. |
85 | 85 | ||
86 | config MTD_NAND_EDB7312 | ||
87 | tristate "Support for Cirrus Logic EBD7312 evaluation board" | ||
88 | depends on ARCH_EDB7312 | ||
89 | help | ||
90 | This enables the driver for the Cirrus Logic EBD7312 evaluation | ||
91 | board to access the onboard NAND Flash. | ||
92 | |||
93 | config MTD_NAND_H1900 | 86 | config MTD_NAND_H1900 |
94 | tristate "iPAQ H1900 flash" | 87 | tristate "iPAQ H1900 flash" |
95 | depends on ARCH_PXA | 88 | depends on ARCH_PXA && BROKEN |
96 | help | 89 | help |
97 | This enables the driver for the iPAQ h1900 flash. | 90 | This enables the driver for the iPAQ h1900 flash. |
98 | 91 | ||
@@ -116,10 +109,11 @@ config MTD_NAND_AMS_DELTA | |||
116 | Support for NAND flash on Amstrad E3 (Delta). | 109 | Support for NAND flash on Amstrad E3 (Delta). |
117 | 110 | ||
118 | config MTD_NAND_OMAP2 | 111 | config MTD_NAND_OMAP2 |
119 | tristate "NAND Flash device on OMAP2 and OMAP3" | 112 | tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4" |
120 | depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3) | 113 | depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4) |
121 | help | 114 | help |
122 | Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. | 115 | Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4 |
116 | platforms. | ||
123 | 117 | ||
124 | config MTD_NAND_IDS | 118 | config MTD_NAND_IDS |
125 | tristate | 119 | tristate |
@@ -423,6 +417,19 @@ config MTD_NAND_NANDSIM | |||
423 | The simulator may simulate various NAND flash chips for the | 417 | The simulator may simulate various NAND flash chips for the |
424 | MTD nand layer. | 418 | MTD nand layer. |
425 | 419 | ||
420 | config MTD_NAND_GPMI_NAND | ||
421 | bool "GPMI NAND Flash Controller driver" | ||
422 | depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28) | ||
423 | select MTD_PARTITIONS | ||
424 | select MTD_CMDLINE_PARTS | ||
425 | help | ||
426 | Enables NAND Flash support for IMX23 or IMX28. | ||
427 | The GPMI controller is very powerful, with the help of BCH | ||
428 | module, it can do the hardware ECC. The GPMI supports several | ||
429 | NAND flashs at the same time. The GPMI may conflicts with other | ||
430 | block, such as SD card. So pay attention to it when you enable | ||
431 | the GPMI. | ||
432 | |||
426 | config MTD_NAND_PLATFORM | 433 | config MTD_NAND_PLATFORM |
427 | tristate "Support for generic platform NAND driver" | 434 | tristate "Support for generic platform NAND driver" |
428 | help | 435 | help |
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 5745d831168e..618f4ba23699 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile | |||
@@ -13,7 +13,6 @@ obj-$(CONFIG_MTD_NAND_SPIA) += spia.o | |||
13 | obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o | 13 | obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o |
14 | obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o | 14 | obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o |
15 | obj-$(CONFIG_MTD_NAND_DENALI) += denali.o | 15 | obj-$(CONFIG_MTD_NAND_DENALI) += denali.o |
16 | obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o | ||
17 | obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o | 16 | obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o |
18 | obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o | 17 | obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o |
19 | obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o | 18 | obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o |
@@ -49,5 +48,6 @@ obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o | |||
49 | obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o | 48 | obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o |
50 | obj-$(CONFIG_MTD_NAND_RICOH) += r852.o | 49 | obj-$(CONFIG_MTD_NAND_RICOH) += r852.o |
51 | obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o | 50 | obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o |
51 | obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ | ||
52 | 52 | ||
53 | nand-objs := nand_base.o nand_bbt.o | 53 | nand-objs := nand_base.o nand_bbt.o |
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 55da20ccc7a8..23e5d77c39fc 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
@@ -161,37 +161,6 @@ static int atmel_nand_device_ready(struct mtd_info *mtd) | |||
161 | !!host->board->rdy_pin_active_low; | 161 | !!host->board->rdy_pin_active_low; |
162 | } | 162 | } |
163 | 163 | ||
164 | /* | ||
165 | * Minimal-overhead PIO for data access. | ||
166 | */ | ||
167 | static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len) | ||
168 | { | ||
169 | struct nand_chip *nand_chip = mtd->priv; | ||
170 | |||
171 | __raw_readsb(nand_chip->IO_ADDR_R, buf, len); | ||
172 | } | ||
173 | |||
174 | static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len) | ||
175 | { | ||
176 | struct nand_chip *nand_chip = mtd->priv; | ||
177 | |||
178 | __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); | ||
179 | } | ||
180 | |||
181 | static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len) | ||
182 | { | ||
183 | struct nand_chip *nand_chip = mtd->priv; | ||
184 | |||
185 | __raw_writesb(nand_chip->IO_ADDR_W, buf, len); | ||
186 | } | ||
187 | |||
188 | static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len) | ||
189 | { | ||
190 | struct nand_chip *nand_chip = mtd->priv; | ||
191 | |||
192 | __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); | ||
193 | } | ||
194 | |||
195 | static void dma_complete_func(void *completion) | 164 | static void dma_complete_func(void *completion) |
196 | { | 165 | { |
197 | complete(completion); | 166 | complete(completion); |
@@ -266,33 +235,27 @@ err_buf: | |||
266 | static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) | 235 | static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) |
267 | { | 236 | { |
268 | struct nand_chip *chip = mtd->priv; | 237 | struct nand_chip *chip = mtd->priv; |
269 | struct atmel_nand_host *host = chip->priv; | ||
270 | 238 | ||
271 | if (use_dma && len > mtd->oobsize) | 239 | if (use_dma && len > mtd->oobsize) |
272 | /* only use DMA for bigger than oob size: better performances */ | 240 | /* only use DMA for bigger than oob size: better performances */ |
273 | if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) | 241 | if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) |
274 | return; | 242 | return; |
275 | 243 | ||
276 | if (host->board->bus_width_16) | 244 | /* if no DMA operation possible, use PIO */ |
277 | atmel_read_buf16(mtd, buf, len); | 245 | memcpy_fromio(buf, chip->IO_ADDR_R, len); |
278 | else | ||
279 | atmel_read_buf8(mtd, buf, len); | ||
280 | } | 246 | } |
281 | 247 | ||
282 | static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | 248 | static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) |
283 | { | 249 | { |
284 | struct nand_chip *chip = mtd->priv; | 250 | struct nand_chip *chip = mtd->priv; |
285 | struct atmel_nand_host *host = chip->priv; | ||
286 | 251 | ||
287 | if (use_dma && len > mtd->oobsize) | 252 | if (use_dma && len > mtd->oobsize) |
288 | /* only use DMA for bigger than oob size: better performances */ | 253 | /* only use DMA for bigger than oob size: better performances */ |
289 | if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) | 254 | if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) |
290 | return; | 255 | return; |
291 | 256 | ||
292 | if (host->board->bus_width_16) | 257 | /* if no DMA operation possible, use PIO */ |
293 | atmel_write_buf16(mtd, buf, len); | 258 | memcpy_toio(chip->IO_ADDR_W, buf, len); |
294 | else | ||
295 | atmel_write_buf8(mtd, buf, len); | ||
296 | } | 259 | } |
297 | 260 | ||
298 | /* | 261 | /* |
@@ -481,10 +444,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode) | |||
481 | } | 444 | } |
482 | } | 445 | } |
483 | 446 | ||
484 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
485 | static const char *part_probes[] = { "cmdlinepart", NULL }; | ||
486 | #endif | ||
487 | |||
488 | /* | 447 | /* |
489 | * Probe for the NAND device. | 448 | * Probe for the NAND device. |
490 | */ | 449 | */ |
@@ -496,8 +455,6 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
496 | struct resource *regs; | 455 | struct resource *regs; |
497 | struct resource *mem; | 456 | struct resource *mem; |
498 | int res; | 457 | int res; |
499 | struct mtd_partition *partitions = NULL; | ||
500 | int num_partitions = 0; | ||
501 | 458 | ||
502 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 459 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
503 | if (!mem) { | 460 | if (!mem) { |
@@ -583,7 +540,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
583 | 540 | ||
584 | if (on_flash_bbt) { | 541 | if (on_flash_bbt) { |
585 | printk(KERN_INFO "atmel_nand: Use On Flash BBT\n"); | 542 | printk(KERN_INFO "atmel_nand: Use On Flash BBT\n"); |
586 | nand_chip->options |= NAND_USE_FLASH_BBT; | 543 | nand_chip->bbt_options |= NAND_BBT_USE_FLASH; |
587 | } | 544 | } |
588 | 545 | ||
589 | if (!cpu_has_dma()) | 546 | if (!cpu_has_dma()) |
@@ -594,7 +551,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
594 | 551 | ||
595 | dma_cap_zero(mask); | 552 | dma_cap_zero(mask); |
596 | dma_cap_set(DMA_MEMCPY, mask); | 553 | dma_cap_set(DMA_MEMCPY, mask); |
597 | host->dma_chan = dma_request_channel(mask, 0, NULL); | 554 | host->dma_chan = dma_request_channel(mask, NULL, NULL); |
598 | if (!host->dma_chan) { | 555 | if (!host->dma_chan) { |
599 | dev_err(host->dev, "Failed to request DMA channel\n"); | 556 | dev_err(host->dev, "Failed to request DMA channel\n"); |
600 | use_dma = 0; | 557 | use_dma = 0; |
@@ -655,27 +612,12 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
655 | goto err_scan_tail; | 612 | goto err_scan_tail; |
656 | } | 613 | } |
657 | 614 | ||
658 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
659 | mtd->name = "atmel_nand"; | 615 | mtd->name = "atmel_nand"; |
660 | num_partitions = parse_mtd_partitions(mtd, part_probes, | 616 | res = mtd_device_parse_register(mtd, NULL, 0, |
661 | &partitions, 0); | 617 | host->board->parts, host->board->num_parts); |
662 | #endif | ||
663 | if (num_partitions <= 0 && host->board->partition_info) | ||
664 | partitions = host->board->partition_info(mtd->size, | ||
665 | &num_partitions); | ||
666 | |||
667 | if ((!partitions) || (num_partitions == 0)) { | ||
668 | printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n"); | ||
669 | res = -ENXIO; | ||
670 | goto err_no_partitions; | ||
671 | } | ||
672 | |||
673 | res = mtd_device_register(mtd, partitions, num_partitions); | ||
674 | if (!res) | 618 | if (!res) |
675 | return res; | 619 | return res; |
676 | 620 | ||
677 | err_no_partitions: | ||
678 | nand_release(mtd); | ||
679 | err_scan_tail: | 621 | err_scan_tail: |
680 | err_scan_ident: | 622 | err_scan_ident: |
681 | err_no_card: | 623 | err_no_card: |
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index fa5736b9286c..7dd3700f2303 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c | |||
@@ -52,7 +52,7 @@ static const struct mtd_partition partition_info[] = { | |||
52 | * au_read_byte - read one byte from the chip | 52 | * au_read_byte - read one byte from the chip |
53 | * @mtd: MTD device structure | 53 | * @mtd: MTD device structure |
54 | * | 54 | * |
55 | * read function for 8bit buswith | 55 | * read function for 8bit buswidth |
56 | */ | 56 | */ |
57 | static u_char au_read_byte(struct mtd_info *mtd) | 57 | static u_char au_read_byte(struct mtd_info *mtd) |
58 | { | 58 | { |
@@ -67,7 +67,7 @@ static u_char au_read_byte(struct mtd_info *mtd) | |||
67 | * @mtd: MTD device structure | 67 | * @mtd: MTD device structure |
68 | * @byte: pointer to data byte to write | 68 | * @byte: pointer to data byte to write |
69 | * | 69 | * |
70 | * write function for 8it buswith | 70 | * write function for 8it buswidth |
71 | */ | 71 | */ |
72 | static void au_write_byte(struct mtd_info *mtd, u_char byte) | 72 | static void au_write_byte(struct mtd_info *mtd, u_char byte) |
73 | { | 73 | { |
@@ -77,11 +77,10 @@ static void au_write_byte(struct mtd_info *mtd, u_char byte) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | /** | 79 | /** |
80 | * au_read_byte16 - read one byte endianess aware from the chip | 80 | * au_read_byte16 - read one byte endianness aware from the chip |
81 | * @mtd: MTD device structure | 81 | * @mtd: MTD device structure |
82 | * | 82 | * |
83 | * read function for 16bit buswith with | 83 | * read function for 16bit buswidth with endianness conversion |
84 | * endianess conversion | ||
85 | */ | 84 | */ |
86 | static u_char au_read_byte16(struct mtd_info *mtd) | 85 | static u_char au_read_byte16(struct mtd_info *mtd) |
87 | { | 86 | { |
@@ -92,12 +91,11 @@ static u_char au_read_byte16(struct mtd_info *mtd) | |||
92 | } | 91 | } |
93 | 92 | ||
94 | /** | 93 | /** |
95 | * au_write_byte16 - write one byte endianess aware to the chip | 94 | * au_write_byte16 - write one byte endianness aware to the chip |
96 | * @mtd: MTD device structure | 95 | * @mtd: MTD device structure |
97 | * @byte: pointer to data byte to write | 96 | * @byte: pointer to data byte to write |
98 | * | 97 | * |
99 | * write function for 16bit buswith with | 98 | * write function for 16bit buswidth with endianness conversion |
100 | * endianess conversion | ||
101 | */ | 99 | */ |
102 | static void au_write_byte16(struct mtd_info *mtd, u_char byte) | 100 | static void au_write_byte16(struct mtd_info *mtd, u_char byte) |
103 | { | 101 | { |
@@ -110,8 +108,7 @@ static void au_write_byte16(struct mtd_info *mtd, u_char byte) | |||
110 | * au_read_word - read one word from the chip | 108 | * au_read_word - read one word from the chip |
111 | * @mtd: MTD device structure | 109 | * @mtd: MTD device structure |
112 | * | 110 | * |
113 | * read function for 16bit buswith without | 111 | * read function for 16bit buswidth without endianness conversion |
114 | * endianess conversion | ||
115 | */ | 112 | */ |
116 | static u16 au_read_word(struct mtd_info *mtd) | 113 | static u16 au_read_word(struct mtd_info *mtd) |
117 | { | 114 | { |
@@ -127,7 +124,7 @@ static u16 au_read_word(struct mtd_info *mtd) | |||
127 | * @buf: data buffer | 124 | * @buf: data buffer |
128 | * @len: number of bytes to write | 125 | * @len: number of bytes to write |
129 | * | 126 | * |
130 | * write function for 8bit buswith | 127 | * write function for 8bit buswidth |
131 | */ | 128 | */ |
132 | static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len) | 129 | static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len) |
133 | { | 130 | { |
@@ -146,7 +143,7 @@ static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len) | |||
146 | * @buf: buffer to store date | 143 | * @buf: buffer to store date |
147 | * @len: number of bytes to read | 144 | * @len: number of bytes to read |
148 | * | 145 | * |
149 | * read function for 8bit buswith | 146 | * read function for 8bit buswidth |
150 | */ | 147 | */ |
151 | static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len) | 148 | static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len) |
152 | { | 149 | { |
@@ -165,7 +162,7 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
165 | * @buf: buffer containing the data to compare | 162 | * @buf: buffer containing the data to compare |
166 | * @len: number of bytes to compare | 163 | * @len: number of bytes to compare |
167 | * | 164 | * |
168 | * verify function for 8bit buswith | 165 | * verify function for 8bit buswidth |
169 | */ | 166 | */ |
170 | static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | 167 | static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) |
171 | { | 168 | { |
@@ -187,7 +184,7 @@ static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | |||
187 | * @buf: data buffer | 184 | * @buf: data buffer |
188 | * @len: number of bytes to write | 185 | * @len: number of bytes to write |
189 | * | 186 | * |
190 | * write function for 16bit buswith | 187 | * write function for 16bit buswidth |
191 | */ | 188 | */ |
192 | static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) | 189 | static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) |
193 | { | 190 | { |
@@ -209,7 +206,7 @@ static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) | |||
209 | * @buf: buffer to store date | 206 | * @buf: buffer to store date |
210 | * @len: number of bytes to read | 207 | * @len: number of bytes to read |
211 | * | 208 | * |
212 | * read function for 16bit buswith | 209 | * read function for 16bit buswidth |
213 | */ | 210 | */ |
214 | static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) | 211 | static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) |
215 | { | 212 | { |
@@ -230,7 +227,7 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len) | |||
230 | * @buf: buffer containing the data to compare | 227 | * @buf: buffer containing the data to compare |
231 | * @len: number of bytes to compare | 228 | * @len: number of bytes to compare |
232 | * | 229 | * |
233 | * verify function for 16bit buswith | 230 | * verify function for 16bit buswidth |
234 | */ | 231 | */ |
235 | static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len) | 232 | static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len) |
236 | { | 233 | { |
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c index eddc9a224985..2e42ec2e8ff4 100644 --- a/drivers/mtd/nand/autcpu12.c +++ b/drivers/mtd/nand/autcpu12.c | |||
@@ -172,9 +172,9 @@ static int __init autcpu12_init(void) | |||
172 | 172 | ||
173 | /* Enable the following for a flash based bad block table */ | 173 | /* Enable the following for a flash based bad block table */ |
174 | /* | 174 | /* |
175 | this->options = NAND_USE_FLASH_BBT; | 175 | this->bbt_options = NAND_BBT_USE_FLASH; |
176 | */ | 176 | */ |
177 | this->options = NAND_USE_FLASH_BBT; | 177 | this->bbt_options = NAND_BBT_USE_FLASH; |
178 | 178 | ||
179 | /* Scan to find existence of the device */ | 179 | /* Scan to find existence of the device */ |
180 | if (nand_scan(autcpu12_mtd, 1)) { | 180 | if (nand_scan(autcpu12_mtd, 1)) { |
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c index 8c569e454dc5..46b58d672847 100644 --- a/drivers/mtd/nand/bcm_umi_nand.c +++ b/drivers/mtd/nand/bcm_umi_nand.c | |||
@@ -52,8 +52,6 @@ | |||
52 | static const __devinitconst char gBanner[] = KERN_INFO \ | 52 | static const __devinitconst char gBanner[] = KERN_INFO \ |
53 | "BCM UMI MTD NAND Driver: 1.00\n"; | 53 | "BCM UMI MTD NAND Driver: 1.00\n"; |
54 | 54 | ||
55 | const char *part_probes[] = { "cmdlinepart", NULL }; | ||
56 | |||
57 | #if NAND_ECC_BCH | 55 | #if NAND_ECC_BCH |
58 | static uint8_t scan_ff_pattern[] = { 0xff }; | 56 | static uint8_t scan_ff_pattern[] = { 0xff }; |
59 | 57 | ||
@@ -376,16 +374,18 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) | |||
376 | 374 | ||
377 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 375 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
378 | 376 | ||
379 | if (!r) | 377 | if (!r) { |
380 | return -ENXIO; | 378 | err = -ENXIO; |
379 | goto out_free; | ||
380 | } | ||
381 | 381 | ||
382 | /* map physical address */ | 382 | /* map physical address */ |
383 | bcm_umi_io_base = ioremap(r->start, resource_size(r)); | 383 | bcm_umi_io_base = ioremap(r->start, resource_size(r)); |
384 | 384 | ||
385 | if (!bcm_umi_io_base) { | 385 | if (!bcm_umi_io_base) { |
386 | printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n"); | 386 | printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n"); |
387 | kfree(board_mtd); | 387 | err = -EIO; |
388 | return -EIO; | 388 | goto out_free; |
389 | } | 389 | } |
390 | 390 | ||
391 | /* Get pointer to private data */ | 391 | /* Get pointer to private data */ |
@@ -401,9 +401,8 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) | |||
401 | /* Initialize the NAND hardware. */ | 401 | /* Initialize the NAND hardware. */ |
402 | if (bcm_umi_nand_inithw() < 0) { | 402 | if (bcm_umi_nand_inithw() < 0) { |
403 | printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n"); | 403 | printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n"); |
404 | iounmap(bcm_umi_io_base); | 404 | err = -EIO; |
405 | kfree(board_mtd); | 405 | goto out_unmap; |
406 | return -EIO; | ||
407 | } | 406 | } |
408 | 407 | ||
409 | /* Set address of NAND IO lines */ | 408 | /* Set address of NAND IO lines */ |
@@ -436,7 +435,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) | |||
436 | #if USE_DMA | 435 | #if USE_DMA |
437 | err = nand_dma_init(); | 436 | err = nand_dma_init(); |
438 | if (err != 0) | 437 | if (err != 0) |
439 | return err; | 438 | goto out_unmap; |
440 | #endif | 439 | #endif |
441 | 440 | ||
442 | /* Figure out the size of the device that we have. | 441 | /* Figure out the size of the device that we have. |
@@ -447,9 +446,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) | |||
447 | err = nand_scan_ident(board_mtd, 1, NULL); | 446 | err = nand_scan_ident(board_mtd, 1, NULL); |
448 | if (err) { | 447 | if (err) { |
449 | printk(KERN_ERR "nand_scan failed: %d\n", err); | 448 | printk(KERN_ERR "nand_scan failed: %d\n", err); |
450 | iounmap(bcm_umi_io_base); | 449 | goto out_unmap; |
451 | kfree(board_mtd); | ||
452 | return err; | ||
453 | } | 450 | } |
454 | 451 | ||
455 | /* Now that we know the nand size, we can setup the ECC layout */ | 452 | /* Now that we know the nand size, we can setup the ECC layout */ |
@@ -468,13 +465,14 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) | |||
468 | { | 465 | { |
469 | printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n", | 466 | printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n", |
470 | board_mtd->writesize); | 467 | board_mtd->writesize); |
471 | return -EINVAL; | 468 | err = -EINVAL; |
469 | goto out_unmap; | ||
472 | } | 470 | } |
473 | } | 471 | } |
474 | 472 | ||
475 | #if NAND_ECC_BCH | 473 | #if NAND_ECC_BCH |
476 | if (board_mtd->writesize > 512) { | 474 | if (board_mtd->writesize > 512) { |
477 | if (this->options & NAND_USE_FLASH_BBT) | 475 | if (this->bbt_options & NAND_BBT_USE_FLASH) |
478 | largepage_bbt.options = NAND_BBT_SCAN2NDPAGE; | 476 | largepage_bbt.options = NAND_BBT_SCAN2NDPAGE; |
479 | this->badblock_pattern = &largepage_bbt; | 477 | this->badblock_pattern = &largepage_bbt; |
480 | } | 478 | } |
@@ -485,33 +483,20 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) | |||
485 | err = nand_scan_tail(board_mtd); | 483 | err = nand_scan_tail(board_mtd); |
486 | if (err) { | 484 | if (err) { |
487 | printk(KERN_ERR "nand_scan failed: %d\n", err); | 485 | printk(KERN_ERR "nand_scan failed: %d\n", err); |
488 | iounmap(bcm_umi_io_base); | 486 | goto out_unmap; |
489 | kfree(board_mtd); | ||
490 | return err; | ||
491 | } | 487 | } |
492 | 488 | ||
493 | /* Register the partitions */ | 489 | /* Register the partitions */ |
494 | { | 490 | board_mtd->name = "bcm_umi-nand"; |
495 | int nr_partitions; | 491 | mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0); |
496 | struct mtd_partition *partition_info; | ||
497 | |||
498 | board_mtd->name = "bcm_umi-nand"; | ||
499 | nr_partitions = | ||
500 | parse_mtd_partitions(board_mtd, part_probes, | ||
501 | &partition_info, 0); | ||
502 | |||
503 | if (nr_partitions <= 0) { | ||
504 | printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n", | ||
505 | nr_partitions); | ||
506 | iounmap(bcm_umi_io_base); | ||
507 | kfree(board_mtd); | ||
508 | return -EIO; | ||
509 | } | ||
510 | mtd_device_register(board_mtd, partition_info, nr_partitions); | ||
511 | } | ||
512 | 492 | ||
513 | /* Return happy */ | 493 | /* Return happy */ |
514 | return 0; | 494 | return 0; |
495 | out_unmap: | ||
496 | iounmap(bcm_umi_io_base); | ||
497 | out_free: | ||
498 | kfree(board_mtd); | ||
499 | return err; | ||
515 | } | 500 | } |
516 | 501 | ||
517 | static int bcm_umi_nand_remove(struct platform_device *pdev) | 502 | static int bcm_umi_nand_remove(struct platform_device *pdev) |
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index 7c8df837d3b8..72d3f23490c5 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c | |||
@@ -58,7 +58,6 @@ | |||
58 | 58 | ||
59 | struct cafe_priv { | 59 | struct cafe_priv { |
60 | struct nand_chip nand; | 60 | struct nand_chip nand; |
61 | struct mtd_partition *parts; | ||
62 | struct pci_dev *pdev; | 61 | struct pci_dev *pdev; |
63 | void __iomem *mmio; | 62 | void __iomem *mmio; |
64 | struct rs_control *rs; | 63 | struct rs_control *rs; |
@@ -372,7 +371,7 @@ static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, | |||
372 | return 1; | 371 | return 1; |
373 | } | 372 | } |
374 | /** | 373 | /** |
375 | * cafe_nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read | 374 | * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read |
376 | * @mtd: mtd info structure | 375 | * @mtd: mtd info structure |
377 | * @chip: nand chip info structure | 376 | * @chip: nand chip info structure |
378 | * @buf: buffer to store read data | 377 | * @buf: buffer to store read data |
@@ -631,8 +630,6 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev, | |||
631 | struct cafe_priv *cafe; | 630 | struct cafe_priv *cafe; |
632 | uint32_t ctrl; | 631 | uint32_t ctrl; |
633 | int err = 0; | 632 | int err = 0; |
634 | struct mtd_partition *parts; | ||
635 | int nr_parts; | ||
636 | 633 | ||
637 | /* Very old versions shared the same PCI ident for all three | 634 | /* Very old versions shared the same PCI ident for all three |
638 | functions on the chip. Verify the class too... */ | 635 | functions on the chip. Verify the class too... */ |
@@ -687,7 +684,8 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev, | |||
687 | cafe->nand.chip_delay = 0; | 684 | cafe->nand.chip_delay = 0; |
688 | 685 | ||
689 | /* Enable the following for a flash based bad block table */ | 686 | /* Enable the following for a flash based bad block table */ |
690 | cafe->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS; | 687 | cafe->nand.bbt_options = NAND_BBT_USE_FLASH; |
688 | cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS; | ||
691 | 689 | ||
692 | if (skipbbt) { | 690 | if (skipbbt) { |
693 | cafe->nand.options |= NAND_SKIP_BBTSCAN; | 691 | cafe->nand.options |= NAND_SKIP_BBTSCAN; |
@@ -800,18 +798,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev, | |||
800 | 798 | ||
801 | pci_set_drvdata(pdev, mtd); | 799 | pci_set_drvdata(pdev, mtd); |
802 | 800 | ||
803 | /* We register the whole device first, separate from the partitions */ | ||
804 | mtd_device_register(mtd, NULL, 0); | ||
805 | |||
806 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
807 | mtd->name = "cafe_nand"; | 801 | mtd->name = "cafe_nand"; |
808 | #endif | 802 | mtd_device_parse_register(mtd, part_probes, 0, NULL, 0); |
809 | nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0); | 803 | |
810 | if (nr_parts > 0) { | ||
811 | cafe->parts = parts; | ||
812 | dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts); | ||
813 | mtd_device_register(mtd, parts, nr_parts); | ||
814 | } | ||
815 | goto out; | 804 | goto out; |
816 | 805 | ||
817 | out_irq: | 806 | out_irq: |
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index be33b0f4634d..737ef9a04fdb 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c | |||
@@ -51,8 +51,6 @@ static struct mtd_partition partition_info[] = { | |||
51 | }; | 51 | }; |
52 | #define NUM_PARTITIONS (ARRAY_SIZE(partition_info)) | 52 | #define NUM_PARTITIONS (ARRAY_SIZE(partition_info)) |
53 | 53 | ||
54 | const char *part_probes[] = { "cmdlinepart", NULL }; | ||
55 | |||
56 | static u_char cmx270_read_byte(struct mtd_info *mtd) | 54 | static u_char cmx270_read_byte(struct mtd_info *mtd) |
57 | { | 55 | { |
58 | struct nand_chip *this = mtd->priv; | 56 | struct nand_chip *this = mtd->priv; |
@@ -152,9 +150,6 @@ static int cmx270_device_ready(struct mtd_info *mtd) | |||
152 | static int __init cmx270_init(void) | 150 | static int __init cmx270_init(void) |
153 | { | 151 | { |
154 | struct nand_chip *this; | 152 | struct nand_chip *this; |
155 | const char *part_type; | ||
156 | struct mtd_partition *mtd_parts; | ||
157 | int mtd_parts_nb = 0; | ||
158 | int ret; | 153 | int ret; |
159 | 154 | ||
160 | if (!(machine_is_armcore() && cpu_is_pxa27x())) | 155 | if (!(machine_is_armcore() && cpu_is_pxa27x())) |
@@ -223,23 +218,9 @@ static int __init cmx270_init(void) | |||
223 | goto err_scan; | 218 | goto err_scan; |
224 | } | 219 | } |
225 | 220 | ||
226 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
227 | mtd_parts_nb = parse_mtd_partitions(cmx270_nand_mtd, part_probes, | ||
228 | &mtd_parts, 0); | ||
229 | if (mtd_parts_nb > 0) | ||
230 | part_type = "command line"; | ||
231 | else | ||
232 | mtd_parts_nb = 0; | ||
233 | #endif | ||
234 | if (!mtd_parts_nb) { | ||
235 | mtd_parts = partition_info; | ||
236 | mtd_parts_nb = NUM_PARTITIONS; | ||
237 | part_type = "static"; | ||
238 | } | ||
239 | |||
240 | /* Register the partitions */ | 221 | /* Register the partitions */ |
241 | pr_notice("Using %s partition definition\n", part_type); | 222 | ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0, |
242 | ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); | 223 | partition_info, NUM_PARTITIONS); |
243 | if (ret) | 224 | if (ret) |
244 | goto err_scan; | 225 | goto err_scan; |
245 | 226 | ||
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c index f59ad1f2d5db..414afa793563 100644 --- a/drivers/mtd/nand/cs553x_nand.c +++ b/drivers/mtd/nand/cs553x_nand.c | |||
@@ -239,7 +239,8 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) | |||
239 | this->ecc.correct = nand_correct_data; | 239 | this->ecc.correct = nand_correct_data; |
240 | 240 | ||
241 | /* Enable the following for a flash based bad block table */ | 241 | /* Enable the following for a flash based bad block table */ |
242 | this->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR; | 242 | this->bbt_options = NAND_BBT_USE_FLASH; |
243 | this->options = NAND_NO_AUTOINCR; | ||
243 | 244 | ||
244 | /* Scan to find existence of the device */ | 245 | /* Scan to find existence of the device */ |
245 | if (nand_scan(new_mtd, 1)) { | 246 | if (nand_scan(new_mtd, 1)) { |
@@ -277,15 +278,11 @@ static int is_geode(void) | |||
277 | return 0; | 278 | return 0; |
278 | } | 279 | } |
279 | 280 | ||
280 | static const char *part_probes[] = { "cmdlinepart", NULL }; | ||
281 | |||
282 | static int __init cs553x_init(void) | 281 | static int __init cs553x_init(void) |
283 | { | 282 | { |
284 | int err = -ENXIO; | 283 | int err = -ENXIO; |
285 | int i; | 284 | int i; |
286 | uint64_t val; | 285 | uint64_t val; |
287 | int mtd_parts_nb = 0; | ||
288 | struct mtd_partition *mtd_parts = NULL; | ||
289 | 286 | ||
290 | /* If the CPU isn't a Geode GX or LX, abort */ | 287 | /* If the CPU isn't a Geode GX or LX, abort */ |
291 | if (!is_geode()) | 288 | if (!is_geode()) |
@@ -315,13 +312,9 @@ static int __init cs553x_init(void) | |||
315 | do mtdconcat etc. if we want to. */ | 312 | do mtdconcat etc. if we want to. */ |
316 | for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { | 313 | for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { |
317 | if (cs553x_mtd[i]) { | 314 | if (cs553x_mtd[i]) { |
318 | |||
319 | /* If any devices registered, return success. Else the last error. */ | 315 | /* If any devices registered, return success. Else the last error. */ |
320 | mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0); | 316 | mtd_device_parse_register(cs553x_mtd[i], NULL, 0, |
321 | if (mtd_parts_nb > 0) | 317 | NULL, 0); |
322 | printk(KERN_NOTICE "Using command line partition definition\n"); | ||
323 | mtd_device_register(cs553x_mtd[i], mtd_parts, | ||
324 | mtd_parts_nb); | ||
325 | err = 0; | 318 | err = 0; |
326 | } | 319 | } |
327 | } | 320 | } |
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 1f34951ae1a7..c153e1f77f90 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c | |||
@@ -57,7 +57,6 @@ struct davinci_nand_info { | |||
57 | 57 | ||
58 | struct device *dev; | 58 | struct device *dev; |
59 | struct clk *clk; | 59 | struct clk *clk; |
60 | bool partitioned; | ||
61 | 60 | ||
62 | bool is_readmode; | 61 | bool is_readmode; |
63 | 62 | ||
@@ -530,8 +529,6 @@ static int __init nand_davinci_probe(struct platform_device *pdev) | |||
530 | int ret; | 529 | int ret; |
531 | uint32_t val; | 530 | uint32_t val; |
532 | nand_ecc_modes_t ecc_mode; | 531 | nand_ecc_modes_t ecc_mode; |
533 | struct mtd_partition *mtd_parts = NULL; | ||
534 | int mtd_parts_nb = 0; | ||
535 | 532 | ||
536 | /* insist on board-specific configuration */ | 533 | /* insist on board-specific configuration */ |
537 | if (!pdata) | 534 | if (!pdata) |
@@ -581,7 +578,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev) | |||
581 | info->chip.chip_delay = 0; | 578 | info->chip.chip_delay = 0; |
582 | info->chip.select_chip = nand_davinci_select_chip; | 579 | info->chip.select_chip = nand_davinci_select_chip; |
583 | 580 | ||
584 | /* options such as NAND_USE_FLASH_BBT or 16-bit widths */ | 581 | /* options such as NAND_BBT_USE_FLASH */ |
582 | info->chip.bbt_options = pdata->bbt_options; | ||
583 | /* options such as 16-bit widths */ | ||
585 | info->chip.options = pdata->options; | 584 | info->chip.options = pdata->options; |
586 | info->chip.bbt_td = pdata->bbt_td; | 585 | info->chip.bbt_td = pdata->bbt_td; |
587 | info->chip.bbt_md = pdata->bbt_md; | 586 | info->chip.bbt_md = pdata->bbt_md; |
@@ -751,33 +750,8 @@ syndrome_done: | |||
751 | if (ret < 0) | 750 | if (ret < 0) |
752 | goto err_scan; | 751 | goto err_scan; |
753 | 752 | ||
754 | if (mtd_has_cmdlinepart()) { | 753 | ret = mtd_device_parse_register(&info->mtd, NULL, 0, |
755 | static const char *probes[] __initconst = { | 754 | pdata->parts, pdata->nr_parts); |
756 | "cmdlinepart", NULL | ||
757 | }; | ||
758 | |||
759 | mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes, | ||
760 | &mtd_parts, 0); | ||
761 | } | ||
762 | |||
763 | if (mtd_parts_nb <= 0) { | ||
764 | mtd_parts = pdata->parts; | ||
765 | mtd_parts_nb = pdata->nr_parts; | ||
766 | } | ||
767 | |||
768 | /* Register any partitions */ | ||
769 | if (mtd_parts_nb > 0) { | ||
770 | ret = mtd_device_register(&info->mtd, mtd_parts, | ||
771 | mtd_parts_nb); | ||
772 | if (ret == 0) | ||
773 | info->partitioned = true; | ||
774 | } | ||
775 | |||
776 | /* If there's no partition info, just package the whole chip | ||
777 | * as a single MTD device. | ||
778 | */ | ||
779 | if (!info->partitioned) | ||
780 | ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0; | ||
781 | 755 | ||
782 | if (ret < 0) | 756 | if (ret < 0) |
783 | goto err_scan; | 757 | goto err_scan; |
@@ -816,9 +790,6 @@ err_nomem: | |||
816 | static int __exit nand_davinci_remove(struct platform_device *pdev) | 790 | static int __exit nand_davinci_remove(struct platform_device *pdev) |
817 | { | 791 | { |
818 | struct davinci_nand_info *info = platform_get_drvdata(pdev); | 792 | struct davinci_nand_info *info = platform_get_drvdata(pdev); |
819 | int status; | ||
820 | |||
821 | status = mtd_device_unregister(&info->mtd); | ||
822 | 793 | ||
823 | spin_lock_irq(&davinci_nand_lock); | 794 | spin_lock_irq(&davinci_nand_lock); |
824 | if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) | 795 | if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME) |
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index d5276218945f..3984d488f9ab 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c | |||
@@ -1346,6 +1346,7 @@ static void denali_hw_init(struct denali_nand_info *denali) | |||
1346 | * */ | 1346 | * */ |
1347 | denali->bbtskipbytes = ioread32(denali->flash_reg + | 1347 | denali->bbtskipbytes = ioread32(denali->flash_reg + |
1348 | SPARE_AREA_SKIP_BYTES); | 1348 | SPARE_AREA_SKIP_BYTES); |
1349 | detect_max_banks(denali); | ||
1349 | denali_nand_reset(denali); | 1350 | denali_nand_reset(denali); |
1350 | iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED); | 1351 | iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED); |
1351 | iowrite32(CHIP_EN_DONT_CARE__FLAG, | 1352 | iowrite32(CHIP_EN_DONT_CARE__FLAG, |
@@ -1356,7 +1357,6 @@ static void denali_hw_init(struct denali_nand_info *denali) | |||
1356 | /* Should set value for these registers when init */ | 1357 | /* Should set value for these registers when init */ |
1357 | iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); | 1358 | iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); |
1358 | iowrite32(1, denali->flash_reg + ECC_ENABLE); | 1359 | iowrite32(1, denali->flash_reg + ECC_ENABLE); |
1359 | detect_max_banks(denali); | ||
1360 | denali_nand_timing_set(denali); | 1360 | denali_nand_timing_set(denali); |
1361 | denali_irq_init(denali); | 1361 | denali_irq_init(denali); |
1362 | } | 1362 | } |
@@ -1577,7 +1577,8 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1577 | denali->nand.bbt_md = &bbt_mirror_descr; | 1577 | denali->nand.bbt_md = &bbt_mirror_descr; |
1578 | 1578 | ||
1579 | /* skip the scan for now until we have OOB read and write support */ | 1579 | /* skip the scan for now until we have OOB read and write support */ |
1580 | denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; | 1580 | denali->nand.bbt_options |= NAND_BBT_USE_FLASH; |
1581 | denali->nand.options |= NAND_SKIP_BBTSCAN; | ||
1581 | denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; | 1582 | denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; |
1582 | 1583 | ||
1583 | /* Denali Controller only support 15bit and 8bit ECC in MRST, | 1584 | /* Denali Controller only support 15bit and 8bit ECC in MRST, |
@@ -1676,7 +1677,6 @@ static void denali_pci_remove(struct pci_dev *dev) | |||
1676 | struct denali_nand_info *denali = pci_get_drvdata(dev); | 1677 | struct denali_nand_info *denali = pci_get_drvdata(dev); |
1677 | 1678 | ||
1678 | nand_release(&denali->mtd); | 1679 | nand_release(&denali->mtd); |
1679 | mtd_device_unregister(&denali->mtd); | ||
1680 | 1680 | ||
1681 | denali_irq_cleanup(dev->irq, denali); | 1681 | denali_irq_cleanup(dev->irq, denali); |
1682 | 1682 | ||
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index e1b84cb90f0d..5780dbab6113 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c | |||
@@ -133,7 +133,7 @@ static struct rs_control *rs_decoder; | |||
133 | 133 | ||
134 | /* | 134 | /* |
135 | * The HW decoder in the DoC ASIC's provides us a error syndrome, | 135 | * The HW decoder in the DoC ASIC's provides us a error syndrome, |
136 | * which we must convert to a standard syndrom usable by the generic | 136 | * which we must convert to a standard syndrome usable by the generic |
137 | * Reed-Solomon library code. | 137 | * Reed-Solomon library code. |
138 | * | 138 | * |
139 | * Fabrice Bellard figured this out in the old docecc code. I added | 139 | * Fabrice Bellard figured this out in the old docecc code. I added |
@@ -154,7 +154,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc) | |||
154 | ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2); | 154 | ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2); |
155 | parity = ecc[1]; | 155 | parity = ecc[1]; |
156 | 156 | ||
157 | /* Initialize the syndrom buffer */ | 157 | /* Initialize the syndrome buffer */ |
158 | for (i = 0; i < NROOTS; i++) | 158 | for (i = 0; i < NROOTS; i++) |
159 | s[i] = ds[0]; | 159 | s[i] = ds[0]; |
160 | /* | 160 | /* |
@@ -1032,7 +1032,7 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, | |||
1032 | WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); | 1032 | WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); |
1033 | else | 1033 | else |
1034 | WriteDOC(DOC_ECC_DIS, docptr, ECCConf); | 1034 | WriteDOC(DOC_ECC_DIS, docptr, ECCConf); |
1035 | if (no_ecc_failures && (ret == -EBADMSG)) { | 1035 | if (no_ecc_failures && mtd_is_eccerr(ret)) { |
1036 | printk(KERN_ERR "suppressing ECC failure\n"); | 1036 | printk(KERN_ERR "suppressing ECC failure\n"); |
1037 | ret = 0; | 1037 | ret = 0; |
1038 | } | 1038 | } |
@@ -1653,7 +1653,7 @@ static int __init doc_probe(unsigned long physadr) | |||
1653 | nand->ecc.mode = NAND_ECC_HW_SYNDROME; | 1653 | nand->ecc.mode = NAND_ECC_HW_SYNDROME; |
1654 | nand->ecc.size = 512; | 1654 | nand->ecc.size = 512; |
1655 | nand->ecc.bytes = 6; | 1655 | nand->ecc.bytes = 6; |
1656 | nand->options = NAND_USE_FLASH_BBT; | 1656 | nand->bbt_options = NAND_BBT_USE_FLASH; |
1657 | 1657 | ||
1658 | doc->physadr = physadr; | 1658 | doc->physadr = physadr; |
1659 | doc->virtadr = virtadr; | 1659 | doc->virtadr = virtadr; |
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c deleted file mode 100644 index 8400d0f6dada..000000000000 --- a/drivers/mtd/nand/edb7312.c +++ /dev/null | |||
@@ -1,203 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/mtd/nand/edb7312.c | ||
3 | * | ||
4 | * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) | ||
5 | * | ||
6 | * Derived from drivers/mtd/nand/autcpu12.c | ||
7 | * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * Overview: | ||
14 | * This is a device driver for the NAND flash device found on the | ||
15 | * CLEP7312 board which utilizes the Toshiba TC58V64AFT part. This is | ||
16 | * a 64Mibit (8MiB x 8 bits) NAND flash device. | ||
17 | */ | ||
18 | |||
19 | #include <linux/slab.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/mtd/mtd.h> | ||
23 | #include <linux/mtd/nand.h> | ||
24 | #include <linux/mtd/partitions.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */ | ||
27 | #include <asm/sizes.h> | ||
28 | #include <asm/hardware/clps7111.h> | ||
29 | |||
30 | /* | ||
31 | * MTD structure for EDB7312 board | ||
32 | */ | ||
33 | static struct mtd_info *ep7312_mtd = NULL; | ||
34 | |||
35 | /* | ||
36 | * Values specific to the EDB7312 board (used with EP7312 processor) | ||
37 | */ | ||
38 | #define EP7312_FIO_PBASE 0x10000000 /* Phys address of flash */ | ||
39 | #define EP7312_PXDR 0x0001 /* | ||
40 | * IO offset to Port B data register | ||
41 | * where the CLE, ALE and NCE pins | ||
42 | * are wired to. | ||
43 | */ | ||
44 | #define EP7312_PXDDR 0x0041 /* | ||
45 | * IO offset to Port B data direction | ||
46 | * register so we can control the IO | ||
47 | * lines. | ||
48 | */ | ||
49 | |||
50 | /* | ||
51 | * Module stuff | ||
52 | */ | ||
53 | |||
54 | static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE; | ||
55 | static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR; | ||
56 | static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR; | ||
57 | |||
58 | /* | ||
59 | * Define static partitions for flash device | ||
60 | */ | ||
61 | static struct mtd_partition partition_info[] = { | ||
62 | {.name = "EP7312 Nand Flash", | ||
63 | .offset = 0, | ||
64 | .size = 8 * 1024 * 1024} | ||
65 | }; | ||
66 | |||
67 | #define NUM_PARTITIONS 1 | ||
68 | |||
69 | /* | ||
70 | * hardware specific access to control-lines | ||
71 | * | ||
72 | * NAND_NCE: bit 0 -> bit 6 (bit 7 = 1) | ||
73 | * NAND_CLE: bit 1 -> bit 4 | ||
74 | * NAND_ALE: bit 2 -> bit 5 | ||
75 | */ | ||
76 | static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) | ||
77 | { | ||
78 | struct nand_chip *chip = mtd->priv; | ||
79 | |||
80 | if (ctrl & NAND_CTRL_CHANGE) { | ||
81 | unsigned char bits = 0x80; | ||
82 | |||
83 | bits |= (ctrl & (NAND_CLE | NAND_ALE)) << 3; | ||
84 | bits |= (ctrl & NAND_NCE) ? 0x00 : 0x40; | ||
85 | |||
86 | clps_writeb((clps_readb(ep7312_pxdr) & 0xF0) | bits, | ||
87 | ep7312_pxdr); | ||
88 | } | ||
89 | if (cmd != NAND_CMD_NONE) | ||
90 | writeb(cmd, chip->IO_ADDR_W); | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * read device ready pin | ||
95 | */ | ||
96 | static int ep7312_device_ready(struct mtd_info *mtd) | ||
97 | { | ||
98 | return 1; | ||
99 | } | ||
100 | |||
101 | const char *part_probes[] = { "cmdlinepart", NULL }; | ||
102 | |||
103 | /* | ||
104 | * Main initialization routine | ||
105 | */ | ||
106 | static int __init ep7312_init(void) | ||
107 | { | ||
108 | struct nand_chip *this; | ||
109 | const char *part_type = 0; | ||
110 | int mtd_parts_nb = 0; | ||
111 | struct mtd_partition *mtd_parts = 0; | ||
112 | void __iomem *ep7312_fio_base; | ||
113 | |||
114 | /* Allocate memory for MTD device structure and private data */ | ||
115 | ep7312_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); | ||
116 | if (!ep7312_mtd) { | ||
117 | printk("Unable to allocate EDB7312 NAND MTD device structure.\n"); | ||
118 | return -ENOMEM; | ||
119 | } | ||
120 | |||
121 | /* map physical address */ | ||
122 | ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K); | ||
123 | if (!ep7312_fio_base) { | ||
124 | printk("ioremap EDB7312 NAND flash failed\n"); | ||
125 | kfree(ep7312_mtd); | ||
126 | return -EIO; | ||
127 | } | ||
128 | |||
129 | /* Get pointer to private data */ | ||
130 | this = (struct nand_chip *)(&ep7312_mtd[1]); | ||
131 | |||
132 | /* Initialize structures */ | ||
133 | memset(ep7312_mtd, 0, sizeof(struct mtd_info)); | ||
134 | memset(this, 0, sizeof(struct nand_chip)); | ||
135 | |||
136 | /* Link the private data with the MTD structure */ | ||
137 | ep7312_mtd->priv = this; | ||
138 | ep7312_mtd->owner = THIS_MODULE; | ||
139 | |||
140 | /* | ||
141 | * Set GPIO Port B control register so that the pins are configured | ||
142 | * to be outputs for controlling the NAND flash. | ||
143 | */ | ||
144 | clps_writeb(0xf0, ep7312_pxddr); | ||
145 | |||
146 | /* insert callbacks */ | ||
147 | this->IO_ADDR_R = ep7312_fio_base; | ||
148 | this->IO_ADDR_W = ep7312_fio_base; | ||
149 | this->cmd_ctrl = ep7312_hwcontrol; | ||
150 | this->dev_ready = ep7312_device_ready; | ||
151 | /* 15 us command delay time */ | ||
152 | this->chip_delay = 15; | ||
153 | |||
154 | /* Scan to find existence of the device */ | ||
155 | if (nand_scan(ep7312_mtd, 1)) { | ||
156 | iounmap((void *)ep7312_fio_base); | ||
157 | kfree(ep7312_mtd); | ||
158 | return -ENXIO; | ||
159 | } | ||
160 | ep7312_mtd->name = "edb7312-nand"; | ||
161 | mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0); | ||
162 | if (mtd_parts_nb > 0) | ||
163 | part_type = "command line"; | ||
164 | else | ||
165 | mtd_parts_nb = 0; | ||
166 | if (mtd_parts_nb == 0) { | ||
167 | mtd_parts = partition_info; | ||
168 | mtd_parts_nb = NUM_PARTITIONS; | ||
169 | part_type = "static"; | ||
170 | } | ||
171 | |||
172 | /* Register the partitions */ | ||
173 | printk(KERN_NOTICE "Using %s partition definition\n", part_type); | ||
174 | mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb); | ||
175 | |||
176 | /* Return happy */ | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | module_init(ep7312_init); | ||
181 | |||
182 | /* | ||
183 | * Clean up routine | ||
184 | */ | ||
185 | static void __exit ep7312_cleanup(void) | ||
186 | { | ||
187 | struct nand_chip *this = (struct nand_chip *)&ep7312_mtd[1]; | ||
188 | |||
189 | /* Release resources, unregister device */ | ||
190 | nand_release(ap7312_mtd); | ||
191 | |||
192 | /* Release io resource */ | ||
193 | iounmap(this->IO_ADDR_R); | ||
194 | |||
195 | /* Free the MTD device structure */ | ||
196 | kfree(ep7312_mtd); | ||
197 | } | ||
198 | |||
199 | module_exit(ep7312_cleanup); | ||
200 | |||
201 | MODULE_LICENSE("GPL"); | ||
202 | MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>"); | ||
203 | MODULE_DESCRIPTION("MTD map driver for Cogent EDB7312 board"); | ||
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 33d8aad8bba5..eedd8ee2c9ac 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c | |||
@@ -75,7 +75,6 @@ struct fsl_elbc_fcm_ctrl { | |||
75 | unsigned int use_mdr; /* Non zero if the MDR is to be set */ | 75 | unsigned int use_mdr; /* Non zero if the MDR is to be set */ |
76 | unsigned int oob; /* Non zero if operating on OOB data */ | 76 | unsigned int oob; /* Non zero if operating on OOB data */ |
77 | unsigned int counter; /* counter for the initializations */ | 77 | unsigned int counter; /* counter for the initializations */ |
78 | char *oob_poi; /* Place to write ECC after read back */ | ||
79 | }; | 78 | }; |
80 | 79 | ||
81 | /* These map to the positions used by the FCM hardware ECC generator */ | 80 | /* These map to the positions used by the FCM hardware ECC generator */ |
@@ -244,6 +243,25 @@ static int fsl_elbc_run_command(struct mtd_info *mtd) | |||
244 | return -EIO; | 243 | return -EIO; |
245 | } | 244 | } |
246 | 245 | ||
246 | if (chip->ecc.mode != NAND_ECC_HW) | ||
247 | return 0; | ||
248 | |||
249 | if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) { | ||
250 | uint32_t lteccr = in_be32(&lbc->lteccr); | ||
251 | /* | ||
252 | * if command was a full page read and the ELBC | ||
253 | * has the LTECCR register, then bits 12-15 (ppc order) of | ||
254 | * LTECCR indicates which 512 byte sub-pages had fixed errors. | ||
255 | * bits 28-31 are uncorrectable errors, marked elsewhere. | ||
256 | * for small page nand only 1 bit is used. | ||
257 | * if the ELBC doesn't have the lteccr register it reads 0 | ||
258 | */ | ||
259 | if (lteccr & 0x000F000F) | ||
260 | out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */ | ||
261 | if (lteccr & 0x000F0000) | ||
262 | mtd->ecc_stats.corrected++; | ||
263 | } | ||
264 | |||
247 | return 0; | 265 | return 0; |
248 | } | 266 | } |
249 | 267 | ||
@@ -435,7 +453,6 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command, | |||
435 | 453 | ||
436 | /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ | 454 | /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ |
437 | case NAND_CMD_PAGEPROG: { | 455 | case NAND_CMD_PAGEPROG: { |
438 | int full_page; | ||
439 | dev_vdbg(priv->dev, | 456 | dev_vdbg(priv->dev, |
440 | "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " | 457 | "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " |
441 | "writing %d bytes.\n", elbc_fcm_ctrl->index); | 458 | "writing %d bytes.\n", elbc_fcm_ctrl->index); |
@@ -445,34 +462,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command, | |||
445 | * write so the HW generates the ECC. | 462 | * write so the HW generates the ECC. |
446 | */ | 463 | */ |
447 | if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 || | 464 | if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 || |
448 | elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) { | 465 | elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) |
449 | out_be32(&lbc->fbcr, elbc_fcm_ctrl->index); | 466 | out_be32(&lbc->fbcr, elbc_fcm_ctrl->index); |
450 | full_page = 0; | 467 | else |
451 | } else { | ||
452 | out_be32(&lbc->fbcr, 0); | 468 | out_be32(&lbc->fbcr, 0); |
453 | full_page = 1; | ||
454 | } | ||
455 | 469 | ||
456 | fsl_elbc_run_command(mtd); | 470 | fsl_elbc_run_command(mtd); |
457 | |||
458 | /* Read back the page in order to fill in the ECC for the | ||
459 | * caller. Is this really needed? | ||
460 | */ | ||
461 | if (full_page && elbc_fcm_ctrl->oob_poi) { | ||
462 | out_be32(&lbc->fbcr, 3); | ||
463 | set_addr(mtd, 6, page_addr, 1); | ||
464 | |||
465 | elbc_fcm_ctrl->read_bytes = mtd->writesize + 9; | ||
466 | |||
467 | fsl_elbc_do_read(chip, 1); | ||
468 | fsl_elbc_run_command(mtd); | ||
469 | |||
470 | memcpy_fromio(elbc_fcm_ctrl->oob_poi + 6, | ||
471 | &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], 3); | ||
472 | elbc_fcm_ctrl->index += 3; | ||
473 | } | ||
474 | |||
475 | elbc_fcm_ctrl->oob_poi = NULL; | ||
476 | return; | 471 | return; |
477 | } | 472 | } |
478 | 473 | ||
@@ -752,13 +747,8 @@ static void fsl_elbc_write_page(struct mtd_info *mtd, | |||
752 | struct nand_chip *chip, | 747 | struct nand_chip *chip, |
753 | const uint8_t *buf) | 748 | const uint8_t *buf) |
754 | { | 749 | { |
755 | struct fsl_elbc_mtd *priv = chip->priv; | ||
756 | struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand; | ||
757 | |||
758 | fsl_elbc_write_buf(mtd, buf, mtd->writesize); | 750 | fsl_elbc_write_buf(mtd, buf, mtd->writesize); |
759 | fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); | 751 | fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); |
760 | |||
761 | elbc_fcm_ctrl->oob_poi = chip->oob_poi; | ||
762 | } | 752 | } |
763 | 753 | ||
764 | static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) | 754 | static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) |
@@ -791,8 +781,8 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) | |||
791 | chip->bbt_md = &bbt_mirror_descr; | 781 | chip->bbt_md = &bbt_mirror_descr; |
792 | 782 | ||
793 | /* set up nand options */ | 783 | /* set up nand options */ |
794 | chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR | | 784 | chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; |
795 | NAND_USE_FLASH_BBT; | 785 | chip->bbt_options = NAND_BBT_USE_FLASH; |
796 | 786 | ||
797 | chip->controller = &elbc_fcm_ctrl->controller; | 787 | chip->controller = &elbc_fcm_ctrl->controller; |
798 | chip->priv = priv; | 788 | chip->priv = priv; |
@@ -829,7 +819,6 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv) | |||
829 | 819 | ||
830 | elbc_fcm_ctrl->chips[priv->bank] = NULL; | 820 | elbc_fcm_ctrl->chips[priv->bank] = NULL; |
831 | kfree(priv); | 821 | kfree(priv); |
832 | kfree(elbc_fcm_ctrl); | ||
833 | return 0; | 822 | return 0; |
834 | } | 823 | } |
835 | 824 | ||
@@ -842,13 +831,14 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev) | |||
842 | struct resource res; | 831 | struct resource res; |
843 | struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl; | 832 | struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl; |
844 | static const char *part_probe_types[] | 833 | static const char *part_probe_types[] |
845 | = { "cmdlinepart", "RedBoot", NULL }; | 834 | = { "cmdlinepart", "RedBoot", "ofpart", NULL }; |
846 | struct mtd_partition *parts; | ||
847 | int ret; | 835 | int ret; |
848 | int bank; | 836 | int bank; |
849 | struct device *dev; | 837 | struct device *dev; |
850 | struct device_node *node = pdev->dev.of_node; | 838 | struct device_node *node = pdev->dev.of_node; |
839 | struct mtd_part_parser_data ppdata; | ||
851 | 840 | ||
841 | ppdata.of_node = pdev->dev.of_node; | ||
852 | if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) | 842 | if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) |
853 | return -ENODEV; | 843 | return -ENODEV; |
854 | lbc = fsl_lbc_ctrl_dev->regs; | 844 | lbc = fsl_lbc_ctrl_dev->regs; |
@@ -934,17 +924,8 @@ static int __devinit fsl_elbc_nand_probe(struct platform_device *pdev) | |||
934 | 924 | ||
935 | /* First look for RedBoot table or partitions on the command | 925 | /* First look for RedBoot table or partitions on the command |
936 | * line, these take precedence over device tree information */ | 926 | * line, these take precedence over device tree information */ |
937 | ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0); | 927 | mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata, |
938 | if (ret < 0) | 928 | NULL, 0); |
939 | goto err; | ||
940 | |||
941 | if (ret == 0) { | ||
942 | ret = of_mtd_parse_partitions(priv->dev, node, &parts); | ||
943 | if (ret < 0) | ||
944 | goto err; | ||
945 | } | ||
946 | |||
947 | mtd_device_register(&priv->mtd, parts, ret); | ||
948 | 929 | ||
949 | printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n", | 930 | printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n", |
950 | (unsigned long long)res.start, priv->bank); | 931 | (unsigned long long)res.start, priv->bank); |
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c index 23752fd5bc59..b4f3cc9f32fb 100644 --- a/drivers/mtd/nand/fsl_upm.c +++ b/drivers/mtd/nand/fsl_upm.c | |||
@@ -158,7 +158,7 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun, | |||
158 | { | 158 | { |
159 | int ret; | 159 | int ret; |
160 | struct device_node *flash_np; | 160 | struct device_node *flash_np; |
161 | static const char *part_types[] = { "cmdlinepart", NULL, }; | 161 | struct mtd_part_parser_data ppdata; |
162 | 162 | ||
163 | fun->chip.IO_ADDR_R = fun->io_base; | 163 | fun->chip.IO_ADDR_R = fun->io_base; |
164 | fun->chip.IO_ADDR_W = fun->io_base; | 164 | fun->chip.IO_ADDR_W = fun->io_base; |
@@ -192,18 +192,12 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun, | |||
192 | if (ret) | 192 | if (ret) |
193 | goto err; | 193 | goto err; |
194 | 194 | ||
195 | ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); | 195 | ppdata.of_node = flash_np; |
196 | 196 | ret = mtd_device_parse_register(&fun->mtd, NULL, &ppdata, NULL, 0); | |
197 | #ifdef CONFIG_MTD_OF_PARTS | ||
198 | if (ret == 0) { | ||
199 | ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts); | ||
200 | if (ret < 0) | ||
201 | goto err; | ||
202 | } | ||
203 | #endif | ||
204 | ret = mtd_device_register(&fun->mtd, fun->parts, ret); | ||
205 | err: | 197 | err: |
206 | of_node_put(flash_np); | 198 | of_node_put(flash_np); |
199 | if (ret) | ||
200 | kfree(fun->mtd.name); | ||
207 | return ret; | 201 | return ret; |
208 | } | 202 | } |
209 | 203 | ||
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index e9b275ac381c..e53b76064133 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c | |||
@@ -146,7 +146,7 @@ static struct mtd_partition partition_info_16KB_blk[] = { | |||
146 | { | 146 | { |
147 | .name = "Root File System", | 147 | .name = "Root File System", |
148 | .offset = 0x460000, | 148 | .offset = 0x460000, |
149 | .size = 0, | 149 | .size = MTDPART_SIZ_FULL, |
150 | }, | 150 | }, |
151 | }; | 151 | }; |
152 | 152 | ||
@@ -173,13 +173,10 @@ static struct mtd_partition partition_info_128KB_blk[] = { | |||
173 | { | 173 | { |
174 | .name = "Root File System", | 174 | .name = "Root File System", |
175 | .offset = 0x800000, | 175 | .offset = 0x800000, |
176 | .size = 0, | 176 | .size = MTDPART_SIZ_FULL, |
177 | }, | 177 | }, |
178 | }; | 178 | }; |
179 | 179 | ||
180 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
181 | const char *part_probes[] = { "cmdlinepart", NULL }; | ||
182 | #endif | ||
183 | 180 | ||
184 | /** | 181 | /** |
185 | * struct fsmc_nand_data - structure for FSMC NAND device state | 182 | * struct fsmc_nand_data - structure for FSMC NAND device state |
@@ -187,8 +184,6 @@ const char *part_probes[] = { "cmdlinepart", NULL }; | |||
187 | * @pid: Part ID on the AMBA PrimeCell format | 184 | * @pid: Part ID on the AMBA PrimeCell format |
188 | * @mtd: MTD info for a NAND flash. | 185 | * @mtd: MTD info for a NAND flash. |
189 | * @nand: Chip related info for a NAND flash. | 186 | * @nand: Chip related info for a NAND flash. |
190 | * @partitions: Partition info for a NAND Flash. | ||
191 | * @nr_partitions: Total number of partition of a NAND flash. | ||
192 | * | 187 | * |
193 | * @ecc_place: ECC placing locations in oobfree type format. | 188 | * @ecc_place: ECC placing locations in oobfree type format. |
194 | * @bank: Bank number for probed device. | 189 | * @bank: Bank number for probed device. |
@@ -203,8 +198,6 @@ struct fsmc_nand_data { | |||
203 | u32 pid; | 198 | u32 pid; |
204 | struct mtd_info mtd; | 199 | struct mtd_info mtd; |
205 | struct nand_chip nand; | 200 | struct nand_chip nand; |
206 | struct mtd_partition *partitions; | ||
207 | unsigned int nr_partitions; | ||
208 | 201 | ||
209 | struct fsmc_eccplace *ecc_place; | 202 | struct fsmc_eccplace *ecc_place; |
210 | unsigned int bank; | 203 | unsigned int bank; |
@@ -716,65 +709,17 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) | |||
716 | * platform data, | 709 | * platform data, |
717 | * default partition information present in driver. | 710 | * default partition information present in driver. |
718 | */ | 711 | */ |
719 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
720 | /* | 712 | /* |
721 | * Check if partition info passed via command line | 713 | * Check for partition info passed |
722 | */ | 714 | */ |
723 | host->mtd.name = "nand"; | 715 | host->mtd.name = "nand"; |
724 | host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes, | 716 | ret = mtd_device_parse_register(&host->mtd, NULL, 0, |
725 | &host->partitions, 0); | 717 | host->mtd.size <= 0x04000000 ? |
726 | if (host->nr_partitions <= 0) { | 718 | partition_info_16KB_blk : |
727 | #endif | 719 | partition_info_128KB_blk, |
728 | /* | 720 | host->mtd.size <= 0x04000000 ? |
729 | * Check if partition info passed via command line | 721 | ARRAY_SIZE(partition_info_16KB_blk) : |
730 | */ | 722 | ARRAY_SIZE(partition_info_128KB_blk)); |
731 | if (pdata->partitions) { | ||
732 | host->partitions = pdata->partitions; | ||
733 | host->nr_partitions = pdata->nr_partitions; | ||
734 | } else { | ||
735 | struct mtd_partition *partition; | ||
736 | int i; | ||
737 | |||
738 | /* Select the default partitions info */ | ||
739 | switch (host->mtd.size) { | ||
740 | case 0x01000000: | ||
741 | case 0x02000000: | ||
742 | case 0x04000000: | ||
743 | host->partitions = partition_info_16KB_blk; | ||
744 | host->nr_partitions = | ||
745 | sizeof(partition_info_16KB_blk) / | ||
746 | sizeof(struct mtd_partition); | ||
747 | break; | ||
748 | case 0x08000000: | ||
749 | case 0x10000000: | ||
750 | case 0x20000000: | ||
751 | case 0x40000000: | ||
752 | host->partitions = partition_info_128KB_blk; | ||
753 | host->nr_partitions = | ||
754 | sizeof(partition_info_128KB_blk) / | ||
755 | sizeof(struct mtd_partition); | ||
756 | break; | ||
757 | default: | ||
758 | ret = -ENXIO; | ||
759 | pr_err("Unsupported NAND size\n"); | ||
760 | goto err_probe; | ||
761 | } | ||
762 | |||
763 | partition = host->partitions; | ||
764 | for (i = 0; i < host->nr_partitions; i++, partition++) { | ||
765 | if (partition->size == 0) { | ||
766 | partition->size = host->mtd.size - | ||
767 | partition->offset; | ||
768 | break; | ||
769 | } | ||
770 | } | ||
771 | } | ||
772 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
773 | } | ||
774 | #endif | ||
775 | |||
776 | ret = mtd_device_register(&host->mtd, host->partitions, | ||
777 | host->nr_partitions); | ||
778 | if (ret) | 723 | if (ret) |
779 | goto err_probe; | 724 | goto err_probe; |
780 | 725 | ||
@@ -822,7 +767,7 @@ static int fsmc_nand_remove(struct platform_device *pdev) | |||
822 | platform_set_drvdata(pdev, NULL); | 767 | platform_set_drvdata(pdev, NULL); |
823 | 768 | ||
824 | if (host) { | 769 | if (host) { |
825 | mtd_device_unregister(&host->mtd); | 770 | nand_release(&host->mtd); |
826 | clk_disable(host->clk); | 771 | clk_disable(host->clk); |
827 | clk_put(host->clk); | 772 | clk_put(host->clk); |
828 | 773 | ||
diff --git a/drivers/mtd/nand/gpmi-nand/Makefile b/drivers/mtd/nand/gpmi-nand/Makefile new file mode 100644 index 000000000000..3a462487c35e --- /dev/null +++ b/drivers/mtd/nand/gpmi-nand/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o | ||
2 | gpmi_nand-objs += gpmi-nand.o | ||
3 | gpmi_nand-objs += gpmi-lib.o | ||
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h new file mode 100644 index 000000000000..4effb8c579db --- /dev/null +++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h | |||
@@ -0,0 +1,84 @@ | |||
1 | /* | ||
2 | * Freescale GPMI NAND Flash Driver | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | * Copyright 2008 Embedded Alley Solutions, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | */ | ||
21 | #ifndef __GPMI_NAND_BCH_REGS_H | ||
22 | #define __GPMI_NAND_BCH_REGS_H | ||
23 | |||
24 | #define HW_BCH_CTRL 0x00000000 | ||
25 | #define HW_BCH_CTRL_SET 0x00000004 | ||
26 | #define HW_BCH_CTRL_CLR 0x00000008 | ||
27 | #define HW_BCH_CTRL_TOG 0x0000000c | ||
28 | |||
29 | #define BM_BCH_CTRL_COMPLETE_IRQ_EN (1 << 8) | ||
30 | #define BM_BCH_CTRL_COMPLETE_IRQ (1 << 0) | ||
31 | |||
32 | #define HW_BCH_STATUS0 0x00000010 | ||
33 | #define HW_BCH_MODE 0x00000020 | ||
34 | #define HW_BCH_ENCODEPTR 0x00000030 | ||
35 | #define HW_BCH_DATAPTR 0x00000040 | ||
36 | #define HW_BCH_METAPTR 0x00000050 | ||
37 | #define HW_BCH_LAYOUTSELECT 0x00000070 | ||
38 | |||
39 | #define HW_BCH_FLASH0LAYOUT0 0x00000080 | ||
40 | |||
41 | #define BP_BCH_FLASH0LAYOUT0_NBLOCKS 24 | ||
42 | #define BM_BCH_FLASH0LAYOUT0_NBLOCKS (0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS) | ||
43 | #define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v) \ | ||
44 | (((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS) | ||
45 | |||
46 | #define BP_BCH_FLASH0LAYOUT0_META_SIZE 16 | ||
47 | #define BM_BCH_FLASH0LAYOUT0_META_SIZE (0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE) | ||
48 | #define BF_BCH_FLASH0LAYOUT0_META_SIZE(v) \ | ||
49 | (((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\ | ||
50 | & BM_BCH_FLASH0LAYOUT0_META_SIZE) | ||
51 | |||
52 | #define BP_BCH_FLASH0LAYOUT0_ECC0 12 | ||
53 | #define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0) | ||
54 | #define BF_BCH_FLASH0LAYOUT0_ECC0(v) \ | ||
55 | (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0) | ||
56 | |||
57 | #define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0 | ||
58 | #define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \ | ||
59 | (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE) | ||
60 | #define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \ | ||
61 | (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\ | ||
62 | & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) | ||
63 | |||
64 | #define HW_BCH_FLASH0LAYOUT1 0x00000090 | ||
65 | |||
66 | #define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE 16 | ||
67 | #define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE \ | ||
68 | (0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) | ||
69 | #define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v) \ | ||
70 | (((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \ | ||
71 | & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE) | ||
72 | |||
73 | #define BP_BCH_FLASH0LAYOUT1_ECCN 12 | ||
74 | #define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN) | ||
75 | #define BF_BCH_FLASH0LAYOUT1_ECCN(v) \ | ||
76 | (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN) | ||
77 | |||
78 | #define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0 | ||
79 | #define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \ | ||
80 | (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) | ||
81 | #define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \ | ||
82 | (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ | ||
83 | & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) | ||
84 | #endif | ||
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c new file mode 100644 index 000000000000..de4db7604a3f --- /dev/null +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c | |||
@@ -0,0 +1,1057 @@ | |||
1 | /* | ||
2 | * Freescale GPMI NAND Flash Driver | ||
3 | * | ||
4 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. | ||
5 | * Copyright (C) 2008 Embedded Alley Solutions, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | */ | ||
21 | #include <linux/mtd/gpmi-nand.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/clk.h> | ||
24 | #include <mach/mxs.h> | ||
25 | |||
26 | #include "gpmi-nand.h" | ||
27 | #include "gpmi-regs.h" | ||
28 | #include "bch-regs.h" | ||
29 | |||
30 | struct timing_threshod timing_default_threshold = { | ||
31 | .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >> | ||
32 | BP_GPMI_TIMING0_DATA_SETUP), | ||
33 | .internal_data_setup_in_ns = 0, | ||
34 | .max_sample_delay_factor = (BM_GPMI_CTRL1_RDN_DELAY >> | ||
35 | BP_GPMI_CTRL1_RDN_DELAY), | ||
36 | .max_dll_clock_period_in_ns = 32, | ||
37 | .max_dll_delay_in_ns = 16, | ||
38 | }; | ||
39 | |||
40 | /* | ||
41 | * Clear the bit and poll it cleared. This is usually called with | ||
42 | * a reset address and mask being either SFTRST(bit 31) or CLKGATE | ||
43 | * (bit 30). | ||
44 | */ | ||
45 | static int clear_poll_bit(void __iomem *addr, u32 mask) | ||
46 | { | ||
47 | int timeout = 0x400; | ||
48 | |||
49 | /* clear the bit */ | ||
50 | __mxs_clrl(mask, addr); | ||
51 | |||
52 | /* | ||
53 | * SFTRST needs 3 GPMI clocks to settle, the reference manual | ||
54 | * recommends to wait 1us. | ||
55 | */ | ||
56 | udelay(1); | ||
57 | |||
58 | /* poll the bit becoming clear */ | ||
59 | while ((readl(addr) & mask) && --timeout) | ||
60 | /* nothing */; | ||
61 | |||
62 | return !timeout; | ||
63 | } | ||
64 | |||
65 | #define MODULE_CLKGATE (1 << 30) | ||
66 | #define MODULE_SFTRST (1 << 31) | ||
67 | /* | ||
68 | * The current mxs_reset_block() will do two things: | ||
69 | * [1] enable the module. | ||
70 | * [2] reset the module. | ||
71 | * | ||
72 | * In most of the cases, it's ok. But there is a hardware bug in the BCH block. | ||
73 | * If you try to soft reset the BCH block, it becomes unusable until | ||
74 | * the next hard reset. This case occurs in the NAND boot mode. When the board | ||
75 | * boots by NAND, the ROM of the chip will initialize the BCH blocks itself. | ||
76 | * So If the driver tries to reset the BCH again, the BCH will not work anymore. | ||
77 | * You will see a DMA timeout in this case. | ||
78 | * | ||
79 | * To avoid this bug, just add a new parameter `just_enable` for | ||
80 | * the mxs_reset_block(), and rewrite it here. | ||
81 | */ | ||
82 | int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) | ||
83 | { | ||
84 | int ret; | ||
85 | int timeout = 0x400; | ||
86 | |||
87 | /* clear and poll SFTRST */ | ||
88 | ret = clear_poll_bit(reset_addr, MODULE_SFTRST); | ||
89 | if (unlikely(ret)) | ||
90 | goto error; | ||
91 | |||
92 | /* clear CLKGATE */ | ||
93 | __mxs_clrl(MODULE_CLKGATE, reset_addr); | ||
94 | |||
95 | if (!just_enable) { | ||
96 | /* set SFTRST to reset the block */ | ||
97 | __mxs_setl(MODULE_SFTRST, reset_addr); | ||
98 | udelay(1); | ||
99 | |||
100 | /* poll CLKGATE becoming set */ | ||
101 | while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout) | ||
102 | /* nothing */; | ||
103 | if (unlikely(!timeout)) | ||
104 | goto error; | ||
105 | } | ||
106 | |||
107 | /* clear and poll SFTRST */ | ||
108 | ret = clear_poll_bit(reset_addr, MODULE_SFTRST); | ||
109 | if (unlikely(ret)) | ||
110 | goto error; | ||
111 | |||
112 | /* clear and poll CLKGATE */ | ||
113 | ret = clear_poll_bit(reset_addr, MODULE_CLKGATE); | ||
114 | if (unlikely(ret)) | ||
115 | goto error; | ||
116 | |||
117 | return 0; | ||
118 | |||
119 | error: | ||
120 | pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); | ||
121 | return -ETIMEDOUT; | ||
122 | } | ||
123 | |||
124 | int gpmi_init(struct gpmi_nand_data *this) | ||
125 | { | ||
126 | struct resources *r = &this->resources; | ||
127 | int ret; | ||
128 | |||
129 | ret = clk_enable(r->clock); | ||
130 | if (ret) | ||
131 | goto err_out; | ||
132 | ret = gpmi_reset_block(r->gpmi_regs, false); | ||
133 | if (ret) | ||
134 | goto err_out; | ||
135 | |||
136 | /* Choose NAND mode. */ | ||
137 | writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR); | ||
138 | |||
139 | /* Set the IRQ polarity. */ | ||
140 | writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY, | ||
141 | r->gpmi_regs + HW_GPMI_CTRL1_SET); | ||
142 | |||
143 | /* Disable Write-Protection. */ | ||
144 | writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET); | ||
145 | |||
146 | /* Select BCH ECC. */ | ||
147 | writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET); | ||
148 | |||
149 | clk_disable(r->clock); | ||
150 | return 0; | ||
151 | err_out: | ||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | /* This function is very useful. It is called only when the bug occur. */ | ||
156 | void gpmi_dump_info(struct gpmi_nand_data *this) | ||
157 | { | ||
158 | struct resources *r = &this->resources; | ||
159 | struct bch_geometry *geo = &this->bch_geometry; | ||
160 | u32 reg; | ||
161 | int i; | ||
162 | |||
163 | pr_err("Show GPMI registers :\n"); | ||
164 | for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) { | ||
165 | reg = readl(r->gpmi_regs + i * 0x10); | ||
166 | pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg); | ||
167 | } | ||
168 | |||
169 | /* start to print out the BCH info */ | ||
170 | pr_err("BCH Geometry :\n"); | ||
171 | pr_err("GF length : %u\n", geo->gf_len); | ||
172 | pr_err("ECC Strength : %u\n", geo->ecc_strength); | ||
173 | pr_err("Page Size in Bytes : %u\n", geo->page_size); | ||
174 | pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size); | ||
175 | pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size); | ||
176 | pr_err("ECC Chunk Count : %u\n", geo->ecc_chunk_count); | ||
177 | pr_err("Payload Size in Bytes : %u\n", geo->payload_size); | ||
178 | pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size); | ||
179 | pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset); | ||
180 | pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset); | ||
181 | pr_err("Block Mark Bit Offset : %u\n", geo->block_mark_bit_offset); | ||
182 | } | ||
183 | |||
184 | /* Configures the geometry for BCH. */ | ||
185 | int bch_set_geometry(struct gpmi_nand_data *this) | ||
186 | { | ||
187 | struct resources *r = &this->resources; | ||
188 | struct bch_geometry *bch_geo = &this->bch_geometry; | ||
189 | unsigned int block_count; | ||
190 | unsigned int block_size; | ||
191 | unsigned int metadata_size; | ||
192 | unsigned int ecc_strength; | ||
193 | unsigned int page_size; | ||
194 | int ret; | ||
195 | |||
196 | if (common_nfc_set_geometry(this)) | ||
197 | return !0; | ||
198 | |||
199 | block_count = bch_geo->ecc_chunk_count - 1; | ||
200 | block_size = bch_geo->ecc_chunk_size; | ||
201 | metadata_size = bch_geo->metadata_size; | ||
202 | ecc_strength = bch_geo->ecc_strength >> 1; | ||
203 | page_size = bch_geo->page_size; | ||
204 | |||
205 | ret = clk_enable(r->clock); | ||
206 | if (ret) | ||
207 | goto err_out; | ||
208 | |||
209 | ret = gpmi_reset_block(r->bch_regs, true); | ||
210 | if (ret) | ||
211 | goto err_out; | ||
212 | |||
213 | /* Configure layout 0. */ | ||
214 | writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count) | ||
215 | | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size) | ||
216 | | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength) | ||
217 | | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size), | ||
218 | r->bch_regs + HW_BCH_FLASH0LAYOUT0); | ||
219 | |||
220 | writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) | ||
221 | | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength) | ||
222 | | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size), | ||
223 | r->bch_regs + HW_BCH_FLASH0LAYOUT1); | ||
224 | |||
225 | /* Set *all* chip selects to use layout 0. */ | ||
226 | writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT); | ||
227 | |||
228 | /* Enable interrupts. */ | ||
229 | writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, | ||
230 | r->bch_regs + HW_BCH_CTRL_SET); | ||
231 | |||
232 | clk_disable(r->clock); | ||
233 | return 0; | ||
234 | err_out: | ||
235 | return ret; | ||
236 | } | ||
237 | |||
238 | /* Converts time in nanoseconds to cycles. */ | ||
239 | static unsigned int ns_to_cycles(unsigned int time, | ||
240 | unsigned int period, unsigned int min) | ||
241 | { | ||
242 | unsigned int k; | ||
243 | |||
244 | k = (time + period - 1) / period; | ||
245 | return max(k, min); | ||
246 | } | ||
247 | |||
248 | /* Apply timing to current hardware conditions. */ | ||
249 | static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this, | ||
250 | struct gpmi_nfc_hardware_timing *hw) | ||
251 | { | ||
252 | struct gpmi_nand_platform_data *pdata = this->pdata; | ||
253 | struct timing_threshod *nfc = &timing_default_threshold; | ||
254 | struct nand_chip *nand = &this->nand; | ||
255 | struct nand_timing target = this->timing; | ||
256 | bool improved_timing_is_available; | ||
257 | unsigned long clock_frequency_in_hz; | ||
258 | unsigned int clock_period_in_ns; | ||
259 | bool dll_use_half_periods; | ||
260 | unsigned int dll_delay_shift; | ||
261 | unsigned int max_sample_delay_in_ns; | ||
262 | unsigned int address_setup_in_cycles; | ||
263 | unsigned int data_setup_in_ns; | ||
264 | unsigned int data_setup_in_cycles; | ||
265 | unsigned int data_hold_in_cycles; | ||
266 | int ideal_sample_delay_in_ns; | ||
267 | unsigned int sample_delay_factor; | ||
268 | int tEYE; | ||
269 | unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns; | ||
270 | unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns; | ||
271 | |||
272 | /* | ||
273 | * If there are multiple chips, we need to relax the timings to allow | ||
274 | * for signal distortion due to higher capacitance. | ||
275 | */ | ||
276 | if (nand->numchips > 2) { | ||
277 | target.data_setup_in_ns += 10; | ||
278 | target.data_hold_in_ns += 10; | ||
279 | target.address_setup_in_ns += 10; | ||
280 | } else if (nand->numchips > 1) { | ||
281 | target.data_setup_in_ns += 5; | ||
282 | target.data_hold_in_ns += 5; | ||
283 | target.address_setup_in_ns += 5; | ||
284 | } | ||
285 | |||
286 | /* Check if improved timing information is available. */ | ||
287 | improved_timing_is_available = | ||
288 | (target.tREA_in_ns >= 0) && | ||
289 | (target.tRLOH_in_ns >= 0) && | ||
290 | (target.tRHOH_in_ns >= 0) ; | ||
291 | |||
292 | /* Inspect the clock. */ | ||
293 | clock_frequency_in_hz = nfc->clock_frequency_in_hz; | ||
294 | clock_period_in_ns = 1000000000 / clock_frequency_in_hz; | ||
295 | |||
296 | /* | ||
297 | * The NFC quantizes setup and hold parameters in terms of clock cycles. | ||
298 | * Here, we quantize the setup and hold timing parameters to the | ||
299 | * next-highest clock period to make sure we apply at least the | ||
300 | * specified times. | ||
301 | * | ||
302 | * For data setup and data hold, the hardware interprets a value of zero | ||
303 | * as the largest possible delay. This is not what's intended by a zero | ||
304 | * in the input parameter, so we impose a minimum of one cycle. | ||
305 | */ | ||
306 | data_setup_in_cycles = ns_to_cycles(target.data_setup_in_ns, | ||
307 | clock_period_in_ns, 1); | ||
308 | data_hold_in_cycles = ns_to_cycles(target.data_hold_in_ns, | ||
309 | clock_period_in_ns, 1); | ||
310 | address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns, | ||
311 | clock_period_in_ns, 0); | ||
312 | |||
313 | /* | ||
314 | * The clock's period affects the sample delay in a number of ways: | ||
315 | * | ||
316 | * (1) The NFC HAL tells us the maximum clock period the sample delay | ||
317 | * DLL can tolerate. If the clock period is greater than half that | ||
318 | * maximum, we must configure the DLL to be driven by half periods. | ||
319 | * | ||
320 | * (2) We need to convert from an ideal sample delay, in ns, to a | ||
321 | * "sample delay factor," which the NFC uses. This factor depends on | ||
322 | * whether we're driving the DLL with full or half periods. | ||
323 | * Paraphrasing the reference manual: | ||
324 | * | ||
325 | * AD = SDF x 0.125 x RP | ||
326 | * | ||
327 | * where: | ||
328 | * | ||
329 | * AD is the applied delay, in ns. | ||
330 | * SDF is the sample delay factor, which is dimensionless. | ||
331 | * RP is the reference period, in ns, which is a full clock period | ||
332 | * if the DLL is being driven by full periods, or half that if | ||
333 | * the DLL is being driven by half periods. | ||
334 | * | ||
335 | * Let's re-arrange this in a way that's more useful to us: | ||
336 | * | ||
337 | * 8 | ||
338 | * SDF = AD x ---- | ||
339 | * RP | ||
340 | * | ||
341 | * The reference period is either the clock period or half that, so this | ||
342 | * is: | ||
343 | * | ||
344 | * 8 AD x DDF | ||
345 | * SDF = AD x ----- = -------- | ||
346 | * f x P P | ||
347 | * | ||
348 | * where: | ||
349 | * | ||
350 | * f is 1 or 1/2, depending on how we're driving the DLL. | ||
351 | * P is the clock period. | ||
352 | * DDF is the DLL Delay Factor, a dimensionless value that | ||
353 | * incorporates all the constants in the conversion. | ||
354 | * | ||
355 | * DDF will be either 8 or 16, both of which are powers of two. We can | ||
356 | * reduce the cost of this conversion by using bit shifts instead of | ||
357 | * multiplication or division. Thus: | ||
358 | * | ||
359 | * AD << DDS | ||
360 | * SDF = --------- | ||
361 | * P | ||
362 | * | ||
363 | * or | ||
364 | * | ||
365 | * AD = (SDF >> DDS) x P | ||
366 | * | ||
367 | * where: | ||
368 | * | ||
369 | * DDS is the DLL Delay Shift, the logarithm to base 2 of the DDF. | ||
370 | */ | ||
371 | if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) { | ||
372 | dll_use_half_periods = true; | ||
373 | dll_delay_shift = 3 + 1; | ||
374 | } else { | ||
375 | dll_use_half_periods = false; | ||
376 | dll_delay_shift = 3; | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * Compute the maximum sample delay the NFC allows, under current | ||
381 | * conditions. If the clock is running too slowly, no sample delay is | ||
382 | * possible. | ||
383 | */ | ||
384 | if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns) | ||
385 | max_sample_delay_in_ns = 0; | ||
386 | else { | ||
387 | /* | ||
388 | * Compute the delay implied by the largest sample delay factor | ||
389 | * the NFC allows. | ||
390 | */ | ||
391 | max_sample_delay_in_ns = | ||
392 | (nfc->max_sample_delay_factor * clock_period_in_ns) >> | ||
393 | dll_delay_shift; | ||
394 | |||
395 | /* | ||
396 | * Check if the implied sample delay larger than the NFC | ||
397 | * actually allows. | ||
398 | */ | ||
399 | if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns) | ||
400 | max_sample_delay_in_ns = nfc->max_dll_delay_in_ns; | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * Check if improved timing information is available. If not, we have to | ||
405 | * use a less-sophisticated algorithm. | ||
406 | */ | ||
407 | if (!improved_timing_is_available) { | ||
408 | /* | ||
409 | * Fold the read setup time required by the NFC into the ideal | ||
410 | * sample delay. | ||
411 | */ | ||
412 | ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns + | ||
413 | nfc->internal_data_setup_in_ns; | ||
414 | |||
415 | /* | ||
416 | * The ideal sample delay may be greater than the maximum | ||
417 | * allowed by the NFC. If so, we can trade off sample delay time | ||
418 | * for more data setup time. | ||
419 | * | ||
420 | * In each iteration of the following loop, we add a cycle to | ||
421 | * the data setup time and subtract a corresponding amount from | ||
422 | * the sample delay until we've satisified the constraints or | ||
423 | * can't do any better. | ||
424 | */ | ||
425 | while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) && | ||
426 | (data_setup_in_cycles < nfc->max_data_setup_cycles)) { | ||
427 | |||
428 | data_setup_in_cycles++; | ||
429 | ideal_sample_delay_in_ns -= clock_period_in_ns; | ||
430 | |||
431 | if (ideal_sample_delay_in_ns < 0) | ||
432 | ideal_sample_delay_in_ns = 0; | ||
433 | |||
434 | } | ||
435 | |||
436 | /* | ||
437 | * Compute the sample delay factor that corresponds most closely | ||
438 | * to the ideal sample delay. If the result is too large for the | ||
439 | * NFC, use the maximum value. | ||
440 | * | ||
441 | * Notice that we use the ns_to_cycles function to compute the | ||
442 | * sample delay factor. We do this because the form of the | ||
443 | * computation is the same as that for calculating cycles. | ||
444 | */ | ||
445 | sample_delay_factor = | ||
446 | ns_to_cycles( | ||
447 | ideal_sample_delay_in_ns << dll_delay_shift, | ||
448 | clock_period_in_ns, 0); | ||
449 | |||
450 | if (sample_delay_factor > nfc->max_sample_delay_factor) | ||
451 | sample_delay_factor = nfc->max_sample_delay_factor; | ||
452 | |||
453 | /* Skip to the part where we return our results. */ | ||
454 | goto return_results; | ||
455 | } | ||
456 | |||
457 | /* | ||
458 | * If control arrives here, we have more detailed timing information, | ||
459 | * so we can use a better algorithm. | ||
460 | */ | ||
461 | |||
462 | /* | ||
463 | * Fold the read setup time required by the NFC into the maximum | ||
464 | * propagation delay. | ||
465 | */ | ||
466 | max_prop_delay_in_ns += nfc->internal_data_setup_in_ns; | ||
467 | |||
468 | /* | ||
469 | * Earlier, we computed the number of clock cycles required to satisfy | ||
470 | * the data setup time. Now, we need to know the actual nanoseconds. | ||
471 | */ | ||
472 | data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles; | ||
473 | |||
474 | /* | ||
475 | * Compute tEYE, the width of the data eye when reading from the NAND | ||
476 | * Flash. The eye width is fundamentally determined by the data setup | ||
477 | * time, perturbed by propagation delays and some characteristics of the | ||
478 | * NAND Flash device. | ||
479 | * | ||
480 | * start of the eye = max_prop_delay + tREA | ||
481 | * end of the eye = min_prop_delay + tRHOH + data_setup | ||
482 | */ | ||
483 | tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns + | ||
484 | (int)data_setup_in_ns; | ||
485 | |||
486 | tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns; | ||
487 | |||
488 | /* | ||
489 | * The eye must be open. If it's not, we can try to open it by | ||
490 | * increasing its main forcer, the data setup time. | ||
491 | * | ||
492 | * In each iteration of the following loop, we increase the data setup | ||
493 | * time by a single clock cycle. We do this until either the eye is | ||
494 | * open or we run into NFC limits. | ||
495 | */ | ||
496 | while ((tEYE <= 0) && | ||
497 | (data_setup_in_cycles < nfc->max_data_setup_cycles)) { | ||
498 | /* Give a cycle to data setup. */ | ||
499 | data_setup_in_cycles++; | ||
500 | /* Synchronize the data setup time with the cycles. */ | ||
501 | data_setup_in_ns += clock_period_in_ns; | ||
502 | /* Adjust tEYE accordingly. */ | ||
503 | tEYE += clock_period_in_ns; | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * When control arrives here, the eye is open. The ideal time to sample | ||
508 | * the data is in the center of the eye: | ||
509 | * | ||
510 | * end of the eye + start of the eye | ||
511 | * --------------------------------- - data_setup | ||
512 | * 2 | ||
513 | * | ||
514 | * After some algebra, this simplifies to the code immediately below. | ||
515 | */ | ||
516 | ideal_sample_delay_in_ns = | ||
517 | ((int)max_prop_delay_in_ns + | ||
518 | (int)target.tREA_in_ns + | ||
519 | (int)min_prop_delay_in_ns + | ||
520 | (int)target.tRHOH_in_ns - | ||
521 | (int)data_setup_in_ns) >> 1; | ||
522 | |||
523 | /* | ||
524 | * The following figure illustrates some aspects of a NAND Flash read: | ||
525 | * | ||
526 | * | ||
527 | * __ _____________________________________ | ||
528 | * RDN \_________________/ | ||
529 | * | ||
530 | * <---- tEYE -----> | ||
531 | * /-----------------\ | ||
532 | * Read Data ----------------------------< >--------- | ||
533 | * \-----------------/ | ||
534 | * ^ ^ ^ ^ | ||
535 | * | | | | | ||
536 | * |<--Data Setup -->|<--Delay Time -->| | | ||
537 | * | | | | | ||
538 | * | | | | ||
539 | * | |<-- Quantized Delay Time -->| | ||
540 | * | | | | ||
541 | * | ||
542 | * | ||
543 | * We have some issues we must now address: | ||
544 | * | ||
545 | * (1) The *ideal* sample delay time must not be negative. If it is, we | ||
546 | * jam it to zero. | ||
547 | * | ||
548 | * (2) The *ideal* sample delay time must not be greater than that | ||
549 | * allowed by the NFC. If it is, we can increase the data setup | ||
550 | * time, which will reduce the delay between the end of the data | ||
551 | * setup and the center of the eye. It will also make the eye | ||
552 | * larger, which might help with the next issue... | ||
553 | * | ||
554 | * (3) The *quantized* sample delay time must not fall either before the | ||
555 | * eye opens or after it closes (the latter is the problem | ||
556 | * illustrated in the above figure). | ||
557 | */ | ||
558 | |||
559 | /* Jam a negative ideal sample delay to zero. */ | ||
560 | if (ideal_sample_delay_in_ns < 0) | ||
561 | ideal_sample_delay_in_ns = 0; | ||
562 | |||
563 | /* | ||
564 | * Extend the data setup as needed to reduce the ideal sample delay | ||
565 | * below the maximum permitted by the NFC. | ||
566 | */ | ||
567 | while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) && | ||
568 | (data_setup_in_cycles < nfc->max_data_setup_cycles)) { | ||
569 | |||
570 | /* Give a cycle to data setup. */ | ||
571 | data_setup_in_cycles++; | ||
572 | /* Synchronize the data setup time with the cycles. */ | ||
573 | data_setup_in_ns += clock_period_in_ns; | ||
574 | /* Adjust tEYE accordingly. */ | ||
575 | tEYE += clock_period_in_ns; | ||
576 | |||
577 | /* | ||
578 | * Decrease the ideal sample delay by one half cycle, to keep it | ||
579 | * in the middle of the eye. | ||
580 | */ | ||
581 | ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1); | ||
582 | |||
583 | /* Jam a negative ideal sample delay to zero. */ | ||
584 | if (ideal_sample_delay_in_ns < 0) | ||
585 | ideal_sample_delay_in_ns = 0; | ||
586 | } | ||
587 | |||
588 | /* | ||
589 | * Compute the sample delay factor that corresponds to the ideal sample | ||
590 | * delay. If the result is too large, then use the maximum allowed | ||
591 | * value. | ||
592 | * | ||
593 | * Notice that we use the ns_to_cycles function to compute the sample | ||
594 | * delay factor. We do this because the form of the computation is the | ||
595 | * same as that for calculating cycles. | ||
596 | */ | ||
597 | sample_delay_factor = | ||
598 | ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift, | ||
599 | clock_period_in_ns, 0); | ||
600 | |||
601 | if (sample_delay_factor > nfc->max_sample_delay_factor) | ||
602 | sample_delay_factor = nfc->max_sample_delay_factor; | ||
603 | |||
604 | /* | ||
605 | * These macros conveniently encapsulate a computation we'll use to | ||
606 | * continuously evaluate whether or not the data sample delay is inside | ||
607 | * the eye. | ||
608 | */ | ||
609 | #define IDEAL_DELAY ((int) ideal_sample_delay_in_ns) | ||
610 | |||
611 | #define QUANTIZED_DELAY \ | ||
612 | ((int) ((sample_delay_factor * clock_period_in_ns) >> \ | ||
613 | dll_delay_shift)) | ||
614 | |||
615 | #define DELAY_ERROR (abs(QUANTIZED_DELAY - IDEAL_DELAY)) | ||
616 | |||
617 | #define SAMPLE_IS_NOT_WITHIN_THE_EYE (DELAY_ERROR > (tEYE >> 1)) | ||
618 | |||
619 | /* | ||
620 | * While the quantized sample time falls outside the eye, reduce the | ||
621 | * sample delay or extend the data setup to move the sampling point back | ||
622 | * toward the eye. Do not allow the number of data setup cycles to | ||
623 | * exceed the maximum allowed by the NFC. | ||
624 | */ | ||
625 | while (SAMPLE_IS_NOT_WITHIN_THE_EYE && | ||
626 | (data_setup_in_cycles < nfc->max_data_setup_cycles)) { | ||
627 | /* | ||
628 | * If control arrives here, the quantized sample delay falls | ||
629 | * outside the eye. Check if it's before the eye opens, or after | ||
630 | * the eye closes. | ||
631 | */ | ||
632 | if (QUANTIZED_DELAY > IDEAL_DELAY) { | ||
633 | /* | ||
634 | * If control arrives here, the quantized sample delay | ||
635 | * falls after the eye closes. Decrease the quantized | ||
636 | * delay time and then go back to re-evaluate. | ||
637 | */ | ||
638 | if (sample_delay_factor != 0) | ||
639 | sample_delay_factor--; | ||
640 | continue; | ||
641 | } | ||
642 | |||
643 | /* | ||
644 | * If control arrives here, the quantized sample delay falls | ||
645 | * before the eye opens. Shift the sample point by increasing | ||
646 | * data setup time. This will also make the eye larger. | ||
647 | */ | ||
648 | |||
649 | /* Give a cycle to data setup. */ | ||
650 | data_setup_in_cycles++; | ||
651 | /* Synchronize the data setup time with the cycles. */ | ||
652 | data_setup_in_ns += clock_period_in_ns; | ||
653 | /* Adjust tEYE accordingly. */ | ||
654 | tEYE += clock_period_in_ns; | ||
655 | |||
656 | /* | ||
657 | * Decrease the ideal sample delay by one half cycle, to keep it | ||
658 | * in the middle of the eye. | ||
659 | */ | ||
660 | ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1); | ||
661 | |||
662 | /* ...and one less period for the delay time. */ | ||
663 | ideal_sample_delay_in_ns -= clock_period_in_ns; | ||
664 | |||
665 | /* Jam a negative ideal sample delay to zero. */ | ||
666 | if (ideal_sample_delay_in_ns < 0) | ||
667 | ideal_sample_delay_in_ns = 0; | ||
668 | |||
669 | /* | ||
670 | * We have a new ideal sample delay, so re-compute the quantized | ||
671 | * delay. | ||
672 | */ | ||
673 | sample_delay_factor = | ||
674 | ns_to_cycles( | ||
675 | ideal_sample_delay_in_ns << dll_delay_shift, | ||
676 | clock_period_in_ns, 0); | ||
677 | |||
678 | if (sample_delay_factor > nfc->max_sample_delay_factor) | ||
679 | sample_delay_factor = nfc->max_sample_delay_factor; | ||
680 | } | ||
681 | |||
682 | /* Control arrives here when we're ready to return our results. */ | ||
683 | return_results: | ||
684 | hw->data_setup_in_cycles = data_setup_in_cycles; | ||
685 | hw->data_hold_in_cycles = data_hold_in_cycles; | ||
686 | hw->address_setup_in_cycles = address_setup_in_cycles; | ||
687 | hw->use_half_periods = dll_use_half_periods; | ||
688 | hw->sample_delay_factor = sample_delay_factor; | ||
689 | |||
690 | /* Return success. */ | ||
691 | return 0; | ||
692 | } | ||
693 | |||
694 | /* Begin the I/O */ | ||
695 | void gpmi_begin(struct gpmi_nand_data *this) | ||
696 | { | ||
697 | struct resources *r = &this->resources; | ||
698 | struct timing_threshod *nfc = &timing_default_threshold; | ||
699 | unsigned char *gpmi_regs = r->gpmi_regs; | ||
700 | unsigned int clock_period_in_ns; | ||
701 | uint32_t reg; | ||
702 | unsigned int dll_wait_time_in_us; | ||
703 | struct gpmi_nfc_hardware_timing hw; | ||
704 | int ret; | ||
705 | |||
706 | /* Enable the clock. */ | ||
707 | ret = clk_enable(r->clock); | ||
708 | if (ret) { | ||
709 | pr_err("We failed in enable the clk\n"); | ||
710 | goto err_out; | ||
711 | } | ||
712 | |||
713 | /* set ready/busy timeout */ | ||
714 | writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT, | ||
715 | gpmi_regs + HW_GPMI_TIMING1); | ||
716 | |||
717 | /* Get the timing information we need. */ | ||
718 | nfc->clock_frequency_in_hz = clk_get_rate(r->clock); | ||
719 | clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz; | ||
720 | |||
721 | gpmi_nfc_compute_hardware_timing(this, &hw); | ||
722 | |||
723 | /* Set up all the simple timing parameters. */ | ||
724 | reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) | | ||
725 | BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles) | | ||
726 | BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles) ; | ||
727 | |||
728 | writel(reg, gpmi_regs + HW_GPMI_TIMING0); | ||
729 | |||
730 | /* | ||
731 | * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. | ||
732 | */ | ||
733 | writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR); | ||
734 | |||
735 | /* Clear out the DLL control fields. */ | ||
736 | writel(BM_GPMI_CTRL1_RDN_DELAY, gpmi_regs + HW_GPMI_CTRL1_CLR); | ||
737 | writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR); | ||
738 | |||
739 | /* If no sample delay is called for, return immediately. */ | ||
740 | if (!hw.sample_delay_factor) | ||
741 | return; | ||
742 | |||
743 | /* Configure the HALF_PERIOD flag. */ | ||
744 | if (hw.use_half_periods) | ||
745 | writel(BM_GPMI_CTRL1_HALF_PERIOD, | ||
746 | gpmi_regs + HW_GPMI_CTRL1_SET); | ||
747 | |||
748 | /* Set the delay factor. */ | ||
749 | writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor), | ||
750 | gpmi_regs + HW_GPMI_CTRL1_SET); | ||
751 | |||
752 | /* Enable the DLL. */ | ||
753 | writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET); | ||
754 | |||
755 | /* | ||
756 | * After we enable the GPMI DLL, we have to wait 64 clock cycles before | ||
757 | * we can use the GPMI. | ||
758 | * | ||
759 | * Calculate the amount of time we need to wait, in microseconds. | ||
760 | */ | ||
761 | dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000; | ||
762 | |||
763 | if (!dll_wait_time_in_us) | ||
764 | dll_wait_time_in_us = 1; | ||
765 | |||
766 | /* Wait for the DLL to settle. */ | ||
767 | udelay(dll_wait_time_in_us); | ||
768 | |||
769 | err_out: | ||
770 | return; | ||
771 | } | ||
772 | |||
773 | void gpmi_end(struct gpmi_nand_data *this) | ||
774 | { | ||
775 | struct resources *r = &this->resources; | ||
776 | clk_disable(r->clock); | ||
777 | } | ||
778 | |||
779 | /* Clears a BCH interrupt. */ | ||
780 | void gpmi_clear_bch(struct gpmi_nand_data *this) | ||
781 | { | ||
782 | struct resources *r = &this->resources; | ||
783 | writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR); | ||
784 | } | ||
785 | |||
786 | /* Returns the Ready/Busy status of the given chip. */ | ||
787 | int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip) | ||
788 | { | ||
789 | struct resources *r = &this->resources; | ||
790 | uint32_t mask = 0; | ||
791 | uint32_t reg = 0; | ||
792 | |||
793 | if (GPMI_IS_MX23(this)) { | ||
794 | mask = MX23_BM_GPMI_DEBUG_READY0 << chip; | ||
795 | reg = readl(r->gpmi_regs + HW_GPMI_DEBUG); | ||
796 | } else if (GPMI_IS_MX28(this)) { | ||
797 | mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip); | ||
798 | reg = readl(r->gpmi_regs + HW_GPMI_STAT); | ||
799 | } else | ||
800 | pr_err("unknow arch.\n"); | ||
801 | return reg & mask; | ||
802 | } | ||
803 | |||
804 | static inline void set_dma_type(struct gpmi_nand_data *this, | ||
805 | enum dma_ops_type type) | ||
806 | { | ||
807 | this->last_dma_type = this->dma_type; | ||
808 | this->dma_type = type; | ||
809 | } | ||
810 | |||
811 | int gpmi_send_command(struct gpmi_nand_data *this) | ||
812 | { | ||
813 | struct dma_chan *channel = get_dma_chan(this); | ||
814 | struct dma_async_tx_descriptor *desc; | ||
815 | struct scatterlist *sgl; | ||
816 | int chip = this->current_chip; | ||
817 | u32 pio[3]; | ||
818 | |||
819 | /* [1] send out the PIO words */ | ||
820 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE) | ||
821 | | BM_GPMI_CTRL0_WORD_LENGTH | ||
822 | | BF_GPMI_CTRL0_CS(chip, this) | ||
823 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) | ||
824 | | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE) | ||
825 | | BM_GPMI_CTRL0_ADDRESS_INCREMENT | ||
826 | | BF_GPMI_CTRL0_XFER_COUNT(this->command_length); | ||
827 | pio[1] = pio[2] = 0; | ||
828 | desc = channel->device->device_prep_slave_sg(channel, | ||
829 | (struct scatterlist *)pio, | ||
830 | ARRAY_SIZE(pio), DMA_NONE, 0); | ||
831 | if (!desc) { | ||
832 | pr_err("step 1 error\n"); | ||
833 | return -1; | ||
834 | } | ||
835 | |||
836 | /* [2] send out the COMMAND + ADDRESS string stored in @buffer */ | ||
837 | sgl = &this->cmd_sgl; | ||
838 | |||
839 | sg_init_one(sgl, this->cmd_buffer, this->command_length); | ||
840 | dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); | ||
841 | desc = channel->device->device_prep_slave_sg(channel, | ||
842 | sgl, 1, DMA_TO_DEVICE, 1); | ||
843 | if (!desc) { | ||
844 | pr_err("step 2 error\n"); | ||
845 | return -1; | ||
846 | } | ||
847 | |||
848 | /* [3] submit the DMA */ | ||
849 | set_dma_type(this, DMA_FOR_COMMAND); | ||
850 | return start_dma_without_bch_irq(this, desc); | ||
851 | } | ||
852 | |||
853 | int gpmi_send_data(struct gpmi_nand_data *this) | ||
854 | { | ||
855 | struct dma_async_tx_descriptor *desc; | ||
856 | struct dma_chan *channel = get_dma_chan(this); | ||
857 | int chip = this->current_chip; | ||
858 | uint32_t command_mode; | ||
859 | uint32_t address; | ||
860 | u32 pio[2]; | ||
861 | |||
862 | /* [1] PIO */ | ||
863 | command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE; | ||
864 | address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; | ||
865 | |||
866 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) | ||
867 | | BM_GPMI_CTRL0_WORD_LENGTH | ||
868 | | BF_GPMI_CTRL0_CS(chip, this) | ||
869 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) | ||
870 | | BF_GPMI_CTRL0_ADDRESS(address) | ||
871 | | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len); | ||
872 | pio[1] = 0; | ||
873 | desc = channel->device->device_prep_slave_sg(channel, | ||
874 | (struct scatterlist *)pio, | ||
875 | ARRAY_SIZE(pio), DMA_NONE, 0); | ||
876 | if (!desc) { | ||
877 | pr_err("step 1 error\n"); | ||
878 | return -1; | ||
879 | } | ||
880 | |||
881 | /* [2] send DMA request */ | ||
882 | prepare_data_dma(this, DMA_TO_DEVICE); | ||
883 | desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, | ||
884 | 1, DMA_TO_DEVICE, 1); | ||
885 | if (!desc) { | ||
886 | pr_err("step 2 error\n"); | ||
887 | return -1; | ||
888 | } | ||
889 | /* [3] submit the DMA */ | ||
890 | set_dma_type(this, DMA_FOR_WRITE_DATA); | ||
891 | return start_dma_without_bch_irq(this, desc); | ||
892 | } | ||
893 | |||
894 | int gpmi_read_data(struct gpmi_nand_data *this) | ||
895 | { | ||
896 | struct dma_async_tx_descriptor *desc; | ||
897 | struct dma_chan *channel = get_dma_chan(this); | ||
898 | int chip = this->current_chip; | ||
899 | u32 pio[2]; | ||
900 | |||
901 | /* [1] : send PIO */ | ||
902 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ) | ||
903 | | BM_GPMI_CTRL0_WORD_LENGTH | ||
904 | | BF_GPMI_CTRL0_CS(chip, this) | ||
905 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) | ||
906 | | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA) | ||
907 | | BF_GPMI_CTRL0_XFER_COUNT(this->upper_len); | ||
908 | pio[1] = 0; | ||
909 | desc = channel->device->device_prep_slave_sg(channel, | ||
910 | (struct scatterlist *)pio, | ||
911 | ARRAY_SIZE(pio), DMA_NONE, 0); | ||
912 | if (!desc) { | ||
913 | pr_err("step 1 error\n"); | ||
914 | return -1; | ||
915 | } | ||
916 | |||
917 | /* [2] : send DMA request */ | ||
918 | prepare_data_dma(this, DMA_FROM_DEVICE); | ||
919 | desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, | ||
920 | 1, DMA_FROM_DEVICE, 1); | ||
921 | if (!desc) { | ||
922 | pr_err("step 2 error\n"); | ||
923 | return -1; | ||
924 | } | ||
925 | |||
926 | /* [3] : submit the DMA */ | ||
927 | set_dma_type(this, DMA_FOR_READ_DATA); | ||
928 | return start_dma_without_bch_irq(this, desc); | ||
929 | } | ||
930 | |||
931 | int gpmi_send_page(struct gpmi_nand_data *this, | ||
932 | dma_addr_t payload, dma_addr_t auxiliary) | ||
933 | { | ||
934 | struct bch_geometry *geo = &this->bch_geometry; | ||
935 | uint32_t command_mode; | ||
936 | uint32_t address; | ||
937 | uint32_t ecc_command; | ||
938 | uint32_t buffer_mask; | ||
939 | struct dma_async_tx_descriptor *desc; | ||
940 | struct dma_chan *channel = get_dma_chan(this); | ||
941 | int chip = this->current_chip; | ||
942 | u32 pio[6]; | ||
943 | |||
944 | /* A DMA descriptor that does an ECC page read. */ | ||
945 | command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE; | ||
946 | address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; | ||
947 | ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE; | ||
948 | buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE | | ||
949 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY; | ||
950 | |||
951 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) | ||
952 | | BM_GPMI_CTRL0_WORD_LENGTH | ||
953 | | BF_GPMI_CTRL0_CS(chip, this) | ||
954 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) | ||
955 | | BF_GPMI_CTRL0_ADDRESS(address) | ||
956 | | BF_GPMI_CTRL0_XFER_COUNT(0); | ||
957 | pio[1] = 0; | ||
958 | pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC | ||
959 | | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) | ||
960 | | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask); | ||
961 | pio[3] = geo->page_size; | ||
962 | pio[4] = payload; | ||
963 | pio[5] = auxiliary; | ||
964 | |||
965 | desc = channel->device->device_prep_slave_sg(channel, | ||
966 | (struct scatterlist *)pio, | ||
967 | ARRAY_SIZE(pio), DMA_NONE, 0); | ||
968 | if (!desc) { | ||
969 | pr_err("step 2 error\n"); | ||
970 | return -1; | ||
971 | } | ||
972 | set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE); | ||
973 | return start_dma_with_bch_irq(this, desc); | ||
974 | } | ||
975 | |||
976 | int gpmi_read_page(struct gpmi_nand_data *this, | ||
977 | dma_addr_t payload, dma_addr_t auxiliary) | ||
978 | { | ||
979 | struct bch_geometry *geo = &this->bch_geometry; | ||
980 | uint32_t command_mode; | ||
981 | uint32_t address; | ||
982 | uint32_t ecc_command; | ||
983 | uint32_t buffer_mask; | ||
984 | struct dma_async_tx_descriptor *desc; | ||
985 | struct dma_chan *channel = get_dma_chan(this); | ||
986 | int chip = this->current_chip; | ||
987 | u32 pio[6]; | ||
988 | |||
989 | /* [1] Wait for the chip to report ready. */ | ||
990 | command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; | ||
991 | address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; | ||
992 | |||
993 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) | ||
994 | | BM_GPMI_CTRL0_WORD_LENGTH | ||
995 | | BF_GPMI_CTRL0_CS(chip, this) | ||
996 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) | ||
997 | | BF_GPMI_CTRL0_ADDRESS(address) | ||
998 | | BF_GPMI_CTRL0_XFER_COUNT(0); | ||
999 | pio[1] = 0; | ||
1000 | desc = channel->device->device_prep_slave_sg(channel, | ||
1001 | (struct scatterlist *)pio, 2, DMA_NONE, 0); | ||
1002 | if (!desc) { | ||
1003 | pr_err("step 1 error\n"); | ||
1004 | return -1; | ||
1005 | } | ||
1006 | |||
1007 | /* [2] Enable the BCH block and read. */ | ||
1008 | command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ; | ||
1009 | address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; | ||
1010 | ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE; | ||
1011 | buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE | ||
1012 | | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY; | ||
1013 | |||
1014 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) | ||
1015 | | BM_GPMI_CTRL0_WORD_LENGTH | ||
1016 | | BF_GPMI_CTRL0_CS(chip, this) | ||
1017 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) | ||
1018 | | BF_GPMI_CTRL0_ADDRESS(address) | ||
1019 | | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); | ||
1020 | |||
1021 | pio[1] = 0; | ||
1022 | pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC | ||
1023 | | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command) | ||
1024 | | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask); | ||
1025 | pio[3] = geo->page_size; | ||
1026 | pio[4] = payload; | ||
1027 | pio[5] = auxiliary; | ||
1028 | desc = channel->device->device_prep_slave_sg(channel, | ||
1029 | (struct scatterlist *)pio, | ||
1030 | ARRAY_SIZE(pio), DMA_NONE, 1); | ||
1031 | if (!desc) { | ||
1032 | pr_err("step 2 error\n"); | ||
1033 | return -1; | ||
1034 | } | ||
1035 | |||
1036 | /* [3] Disable the BCH block */ | ||
1037 | command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY; | ||
1038 | address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA; | ||
1039 | |||
1040 | pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode) | ||
1041 | | BM_GPMI_CTRL0_WORD_LENGTH | ||
1042 | | BF_GPMI_CTRL0_CS(chip, this) | ||
1043 | | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this) | ||
1044 | | BF_GPMI_CTRL0_ADDRESS(address) | ||
1045 | | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); | ||
1046 | pio[1] = 0; | ||
1047 | desc = channel->device->device_prep_slave_sg(channel, | ||
1048 | (struct scatterlist *)pio, 2, DMA_NONE, 1); | ||
1049 | if (!desc) { | ||
1050 | pr_err("step 3 error\n"); | ||
1051 | return -1; | ||
1052 | } | ||
1053 | |||
1054 | /* [4] submit the DMA */ | ||
1055 | set_dma_type(this, DMA_FOR_READ_ECC_PAGE); | ||
1056 | return start_dma_with_bch_irq(this, desc); | ||
1057 | } | ||
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c new file mode 100644 index 000000000000..071b63420f0e --- /dev/null +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c | |||
@@ -0,0 +1,1619 @@ | |||
1 | /* | ||
2 | * Freescale GPMI NAND Flash Driver | ||
3 | * | ||
4 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. | ||
5 | * Copyright (C) 2008 Embedded Alley Solutions, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | */ | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/mtd/gpmi-nand.h> | ||
25 | #include <linux/mtd/partitions.h> | ||
26 | |||
27 | #include "gpmi-nand.h" | ||
28 | |||
29 | /* add our owner bbt descriptor */ | ||
30 | static uint8_t scan_ff_pattern[] = { 0xff }; | ||
31 | static struct nand_bbt_descr gpmi_bbt_descr = { | ||
32 | .options = 0, | ||
33 | .offs = 0, | ||
34 | .len = 1, | ||
35 | .pattern = scan_ff_pattern | ||
36 | }; | ||
37 | |||
38 | /* We will use all the (page + OOB). */ | ||
39 | static struct nand_ecclayout gpmi_hw_ecclayout = { | ||
40 | .eccbytes = 0, | ||
41 | .eccpos = { 0, }, | ||
42 | .oobfree = { {.offset = 0, .length = 0} } | ||
43 | }; | ||
44 | |||
45 | static irqreturn_t bch_irq(int irq, void *cookie) | ||
46 | { | ||
47 | struct gpmi_nand_data *this = cookie; | ||
48 | |||
49 | gpmi_clear_bch(this); | ||
50 | complete(&this->bch_done); | ||
51 | return IRQ_HANDLED; | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Calculate the ECC strength by hand: | ||
56 | * E : The ECC strength. | ||
57 | * G : the length of Galois Field. | ||
58 | * N : The chunk count of per page. | ||
59 | * O : the oobsize of the NAND chip. | ||
60 | * M : the metasize of per page. | ||
61 | * | ||
62 | * The formula is : | ||
63 | * E * G * N | ||
64 | * ------------ <= (O - M) | ||
65 | * 8 | ||
66 | * | ||
67 | * So, we get E by: | ||
68 | * (O - M) * 8 | ||
69 | * E <= ------------- | ||
70 | * G * N | ||
71 | */ | ||
72 | static inline int get_ecc_strength(struct gpmi_nand_data *this) | ||
73 | { | ||
74 | struct bch_geometry *geo = &this->bch_geometry; | ||
75 | struct mtd_info *mtd = &this->mtd; | ||
76 | int ecc_strength; | ||
77 | |||
78 | ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) | ||
79 | / (geo->gf_len * geo->ecc_chunk_count); | ||
80 | |||
81 | /* We need the minor even number. */ | ||
82 | return round_down(ecc_strength, 2); | ||
83 | } | ||
84 | |||
85 | int common_nfc_set_geometry(struct gpmi_nand_data *this) | ||
86 | { | ||
87 | struct bch_geometry *geo = &this->bch_geometry; | ||
88 | struct mtd_info *mtd = &this->mtd; | ||
89 | unsigned int metadata_size; | ||
90 | unsigned int status_size; | ||
91 | unsigned int block_mark_bit_offset; | ||
92 | |||
93 | /* | ||
94 | * The size of the metadata can be changed, though we set it to 10 | ||
95 | * bytes now. But it can't be too large, because we have to save | ||
96 | * enough space for BCH. | ||
97 | */ | ||
98 | geo->metadata_size = 10; | ||
99 | |||
100 | /* The default for the length of Galois Field. */ | ||
101 | geo->gf_len = 13; | ||
102 | |||
103 | /* The default for chunk size. There is no oobsize greater then 512. */ | ||
104 | geo->ecc_chunk_size = 512; | ||
105 | while (geo->ecc_chunk_size < mtd->oobsize) | ||
106 | geo->ecc_chunk_size *= 2; /* keep C >= O */ | ||
107 | |||
108 | geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; | ||
109 | |||
110 | /* We use the same ECC strength for all chunks. */ | ||
111 | geo->ecc_strength = get_ecc_strength(this); | ||
112 | if (!geo->ecc_strength) { | ||
113 | pr_err("We get a wrong ECC strength.\n"); | ||
114 | return -EINVAL; | ||
115 | } | ||
116 | |||
117 | geo->page_size = mtd->writesize + mtd->oobsize; | ||
118 | geo->payload_size = mtd->writesize; | ||
119 | |||
120 | /* | ||
121 | * The auxiliary buffer contains the metadata and the ECC status. The | ||
122 | * metadata is padded to the nearest 32-bit boundary. The ECC status | ||
123 | * contains one byte for every ECC chunk, and is also padded to the | ||
124 | * nearest 32-bit boundary. | ||
125 | */ | ||
126 | metadata_size = ALIGN(geo->metadata_size, 4); | ||
127 | status_size = ALIGN(geo->ecc_chunk_count, 4); | ||
128 | |||
129 | geo->auxiliary_size = metadata_size + status_size; | ||
130 | geo->auxiliary_status_offset = metadata_size; | ||
131 | |||
132 | if (!this->swap_block_mark) | ||
133 | return 0; | ||
134 | |||
135 | /* | ||
136 | * We need to compute the byte and bit offsets of | ||
137 | * the physical block mark within the ECC-based view of the page. | ||
138 | * | ||
139 | * NAND chip with 2K page shows below: | ||
140 | * (Block Mark) | ||
141 | * | | | ||
142 | * | D | | ||
143 | * |<---->| | ||
144 | * V V | ||
145 | * +---+----------+-+----------+-+----------+-+----------+-+ | ||
146 | * | M | data |E| data |E| data |E| data |E| | ||
147 | * +---+----------+-+----------+-+----------+-+----------+-+ | ||
148 | * | ||
149 | * The position of block mark moves forward in the ECC-based view | ||
150 | * of page, and the delta is: | ||
151 | * | ||
152 | * E * G * (N - 1) | ||
153 | * D = (---------------- + M) | ||
154 | * 8 | ||
155 | * | ||
156 | * With the formula to compute the ECC strength, and the condition | ||
157 | * : C >= O (C is the ecc chunk size) | ||
158 | * | ||
159 | * It's easy to deduce to the following result: | ||
160 | * | ||
161 | * E * G (O - M) C - M C - M | ||
162 | * ----------- <= ------- <= -------- < --------- | ||
163 | * 8 N N (N - 1) | ||
164 | * | ||
165 | * So, we get: | ||
166 | * | ||
167 | * E * G * (N - 1) | ||
168 | * D = (---------------- + M) < C | ||
169 | * 8 | ||
170 | * | ||
171 | * The above inequality means the position of block mark | ||
172 | * within the ECC-based view of the page is still in the data chunk, | ||
173 | * and it's NOT in the ECC bits of the chunk. | ||
174 | * | ||
175 | * Use the following to compute the bit position of the | ||
176 | * physical block mark within the ECC-based view of the page: | ||
177 | * (page_size - D) * 8 | ||
178 | * | ||
179 | * --Huang Shijie | ||
180 | */ | ||
181 | block_mark_bit_offset = mtd->writesize * 8 - | ||
182 | (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) | ||
183 | + geo->metadata_size * 8); | ||
184 | |||
185 | geo->block_mark_byte_offset = block_mark_bit_offset / 8; | ||
186 | geo->block_mark_bit_offset = block_mark_bit_offset % 8; | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) | ||
191 | { | ||
192 | int chipnr = this->current_chip; | ||
193 | |||
194 | return this->dma_chans[chipnr]; | ||
195 | } | ||
196 | |||
197 | /* Can we use the upper's buffer directly for DMA? */ | ||
198 | void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr) | ||
199 | { | ||
200 | struct scatterlist *sgl = &this->data_sgl; | ||
201 | int ret; | ||
202 | |||
203 | this->direct_dma_map_ok = true; | ||
204 | |||
205 | /* first try to map the upper buffer directly */ | ||
206 | sg_init_one(sgl, this->upper_buf, this->upper_len); | ||
207 | ret = dma_map_sg(this->dev, sgl, 1, dr); | ||
208 | if (ret == 0) { | ||
209 | /* We have to use our own DMA buffer. */ | ||
210 | sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE); | ||
211 | |||
212 | if (dr == DMA_TO_DEVICE) | ||
213 | memcpy(this->data_buffer_dma, this->upper_buf, | ||
214 | this->upper_len); | ||
215 | |||
216 | ret = dma_map_sg(this->dev, sgl, 1, dr); | ||
217 | if (ret == 0) | ||
218 | pr_err("map failed.\n"); | ||
219 | |||
220 | this->direct_dma_map_ok = false; | ||
221 | } | ||
222 | } | ||
223 | |||
224 | /* This will be called after the DMA operation is finished. */ | ||
225 | static void dma_irq_callback(void *param) | ||
226 | { | ||
227 | struct gpmi_nand_data *this = param; | ||
228 | struct completion *dma_c = &this->dma_done; | ||
229 | |||
230 | complete(dma_c); | ||
231 | |||
232 | switch (this->dma_type) { | ||
233 | case DMA_FOR_COMMAND: | ||
234 | dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE); | ||
235 | break; | ||
236 | |||
237 | case DMA_FOR_READ_DATA: | ||
238 | dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE); | ||
239 | if (this->direct_dma_map_ok == false) | ||
240 | memcpy(this->upper_buf, this->data_buffer_dma, | ||
241 | this->upper_len); | ||
242 | break; | ||
243 | |||
244 | case DMA_FOR_WRITE_DATA: | ||
245 | dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE); | ||
246 | break; | ||
247 | |||
248 | case DMA_FOR_READ_ECC_PAGE: | ||
249 | case DMA_FOR_WRITE_ECC_PAGE: | ||
250 | /* We have to wait the BCH interrupt to finish. */ | ||
251 | break; | ||
252 | |||
253 | default: | ||
254 | pr_err("in wrong DMA operation.\n"); | ||
255 | } | ||
256 | } | ||
257 | |||
258 | int start_dma_without_bch_irq(struct gpmi_nand_data *this, | ||
259 | struct dma_async_tx_descriptor *desc) | ||
260 | { | ||
261 | struct completion *dma_c = &this->dma_done; | ||
262 | int err; | ||
263 | |||
264 | init_completion(dma_c); | ||
265 | |||
266 | desc->callback = dma_irq_callback; | ||
267 | desc->callback_param = this; | ||
268 | dmaengine_submit(desc); | ||
269 | |||
270 | /* Wait for the interrupt from the DMA block. */ | ||
271 | err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); | ||
272 | if (!err) { | ||
273 | pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type); | ||
274 | gpmi_dump_info(this); | ||
275 | return -ETIMEDOUT; | ||
276 | } | ||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * This function is used in BCH reading or BCH writing pages. | ||
282 | * It will wait for the BCH interrupt as long as ONE second. | ||
283 | * Actually, we must wait for two interrupts : | ||
284 | * [1] firstly the DMA interrupt and | ||
285 | * [2] secondly the BCH interrupt. | ||
286 | */ | ||
287 | int start_dma_with_bch_irq(struct gpmi_nand_data *this, | ||
288 | struct dma_async_tx_descriptor *desc) | ||
289 | { | ||
290 | struct completion *bch_c = &this->bch_done; | ||
291 | int err; | ||
292 | |||
293 | /* Prepare to receive an interrupt from the BCH block. */ | ||
294 | init_completion(bch_c); | ||
295 | |||
296 | /* start the DMA */ | ||
297 | start_dma_without_bch_irq(this, desc); | ||
298 | |||
299 | /* Wait for the interrupt from the BCH block. */ | ||
300 | err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000)); | ||
301 | if (!err) { | ||
302 | pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type); | ||
303 | gpmi_dump_info(this); | ||
304 | return -ETIMEDOUT; | ||
305 | } | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static int __devinit | ||
310 | acquire_register_block(struct gpmi_nand_data *this, const char *res_name) | ||
311 | { | ||
312 | struct platform_device *pdev = this->pdev; | ||
313 | struct resources *res = &this->resources; | ||
314 | struct resource *r; | ||
315 | void *p; | ||
316 | |||
317 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); | ||
318 | if (!r) { | ||
319 | pr_err("Can't get resource for %s\n", res_name); | ||
320 | return -ENXIO; | ||
321 | } | ||
322 | |||
323 | p = ioremap(r->start, resource_size(r)); | ||
324 | if (!p) { | ||
325 | pr_err("Can't remap %s\n", res_name); | ||
326 | return -ENOMEM; | ||
327 | } | ||
328 | |||
329 | if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME)) | ||
330 | res->gpmi_regs = p; | ||
331 | else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME)) | ||
332 | res->bch_regs = p; | ||
333 | else | ||
334 | pr_err("unknown resource name : %s\n", res_name); | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | static void release_register_block(struct gpmi_nand_data *this) | ||
340 | { | ||
341 | struct resources *res = &this->resources; | ||
342 | if (res->gpmi_regs) | ||
343 | iounmap(res->gpmi_regs); | ||
344 | if (res->bch_regs) | ||
345 | iounmap(res->bch_regs); | ||
346 | res->gpmi_regs = NULL; | ||
347 | res->bch_regs = NULL; | ||
348 | } | ||
349 | |||
350 | static int __devinit | ||
351 | acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) | ||
352 | { | ||
353 | struct platform_device *pdev = this->pdev; | ||
354 | struct resources *res = &this->resources; | ||
355 | const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME; | ||
356 | struct resource *r; | ||
357 | int err; | ||
358 | |||
359 | r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); | ||
360 | if (!r) { | ||
361 | pr_err("Can't get resource for %s\n", res_name); | ||
362 | return -ENXIO; | ||
363 | } | ||
364 | |||
365 | err = request_irq(r->start, irq_h, 0, res_name, this); | ||
366 | if (err) { | ||
367 | pr_err("Can't own %s\n", res_name); | ||
368 | return err; | ||
369 | } | ||
370 | |||
371 | res->bch_low_interrupt = r->start; | ||
372 | res->bch_high_interrupt = r->end; | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | static void release_bch_irq(struct gpmi_nand_data *this) | ||
377 | { | ||
378 | struct resources *res = &this->resources; | ||
379 | int i = res->bch_low_interrupt; | ||
380 | |||
381 | for (; i <= res->bch_high_interrupt; i++) | ||
382 | free_irq(i, this); | ||
383 | } | ||
384 | |||
385 | static bool gpmi_dma_filter(struct dma_chan *chan, void *param) | ||
386 | { | ||
387 | struct gpmi_nand_data *this = param; | ||
388 | struct resource *r = this->private; | ||
389 | |||
390 | if (!mxs_dma_is_apbh(chan)) | ||
391 | return false; | ||
392 | /* | ||
393 | * only catch the GPMI dma channels : | ||
394 | * for mx23 : MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3 | ||
395 | * (These four channels share the same IRQ!) | ||
396 | * | ||
397 | * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7 | ||
398 | * (These eight channels share the same IRQ!) | ||
399 | */ | ||
400 | if (r->start <= chan->chan_id && chan->chan_id <= r->end) { | ||
401 | chan->private = &this->dma_data; | ||
402 | return true; | ||
403 | } | ||
404 | return false; | ||
405 | } | ||
406 | |||
407 | static void release_dma_channels(struct gpmi_nand_data *this) | ||
408 | { | ||
409 | unsigned int i; | ||
410 | for (i = 0; i < DMA_CHANS; i++) | ||
411 | if (this->dma_chans[i]) { | ||
412 | dma_release_channel(this->dma_chans[i]); | ||
413 | this->dma_chans[i] = NULL; | ||
414 | } | ||
415 | } | ||
416 | |||
417 | static int __devinit acquire_dma_channels(struct gpmi_nand_data *this) | ||
418 | { | ||
419 | struct platform_device *pdev = this->pdev; | ||
420 | struct gpmi_nand_platform_data *pdata = this->pdata; | ||
421 | struct resources *res = &this->resources; | ||
422 | struct resource *r, *r_dma; | ||
423 | unsigned int i; | ||
424 | |||
425 | r = platform_get_resource_byname(pdev, IORESOURCE_DMA, | ||
426 | GPMI_NAND_DMA_CHANNELS_RES_NAME); | ||
427 | r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ, | ||
428 | GPMI_NAND_DMA_INTERRUPT_RES_NAME); | ||
429 | if (!r || !r_dma) { | ||
430 | pr_err("Can't get resource for DMA\n"); | ||
431 | return -ENXIO; | ||
432 | } | ||
433 | |||
434 | /* used in gpmi_dma_filter() */ | ||
435 | this->private = r; | ||
436 | |||
437 | for (i = r->start; i <= r->end; i++) { | ||
438 | struct dma_chan *dma_chan; | ||
439 | dma_cap_mask_t mask; | ||
440 | |||
441 | if (i - r->start >= pdata->max_chip_count) | ||
442 | break; | ||
443 | |||
444 | dma_cap_zero(mask); | ||
445 | dma_cap_set(DMA_SLAVE, mask); | ||
446 | |||
447 | /* get the DMA interrupt */ | ||
448 | if (r_dma->start == r_dma->end) { | ||
449 | /* only register the first. */ | ||
450 | if (i == r->start) | ||
451 | this->dma_data.chan_irq = r_dma->start; | ||
452 | else | ||
453 | this->dma_data.chan_irq = NO_IRQ; | ||
454 | } else | ||
455 | this->dma_data.chan_irq = r_dma->start + (i - r->start); | ||
456 | |||
457 | dma_chan = dma_request_channel(mask, gpmi_dma_filter, this); | ||
458 | if (!dma_chan) | ||
459 | goto acquire_err; | ||
460 | |||
461 | /* fill the first empty item */ | ||
462 | this->dma_chans[i - r->start] = dma_chan; | ||
463 | } | ||
464 | |||
465 | res->dma_low_channel = r->start; | ||
466 | res->dma_high_channel = i; | ||
467 | return 0; | ||
468 | |||
469 | acquire_err: | ||
470 | pr_err("Can't acquire DMA channel %u\n", i); | ||
471 | release_dma_channels(this); | ||
472 | return -EINVAL; | ||
473 | } | ||
474 | |||
475 | static int __devinit acquire_resources(struct gpmi_nand_data *this) | ||
476 | { | ||
477 | struct resources *res = &this->resources; | ||
478 | int ret; | ||
479 | |||
480 | ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME); | ||
481 | if (ret) | ||
482 | goto exit_regs; | ||
483 | |||
484 | ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME); | ||
485 | if (ret) | ||
486 | goto exit_regs; | ||
487 | |||
488 | ret = acquire_bch_irq(this, bch_irq); | ||
489 | if (ret) | ||
490 | goto exit_regs; | ||
491 | |||
492 | ret = acquire_dma_channels(this); | ||
493 | if (ret) | ||
494 | goto exit_dma_channels; | ||
495 | |||
496 | res->clock = clk_get(&this->pdev->dev, NULL); | ||
497 | if (IS_ERR(res->clock)) { | ||
498 | pr_err("can not get the clock\n"); | ||
499 | ret = -ENOENT; | ||
500 | goto exit_clock; | ||
501 | } | ||
502 | return 0; | ||
503 | |||
504 | exit_clock: | ||
505 | release_dma_channels(this); | ||
506 | exit_dma_channels: | ||
507 | release_bch_irq(this); | ||
508 | exit_regs: | ||
509 | release_register_block(this); | ||
510 | return ret; | ||
511 | } | ||
512 | |||
513 | static void release_resources(struct gpmi_nand_data *this) | ||
514 | { | ||
515 | struct resources *r = &this->resources; | ||
516 | |||
517 | clk_put(r->clock); | ||
518 | release_register_block(this); | ||
519 | release_bch_irq(this); | ||
520 | release_dma_channels(this); | ||
521 | } | ||
522 | |||
523 | static int __devinit init_hardware(struct gpmi_nand_data *this) | ||
524 | { | ||
525 | int ret; | ||
526 | |||
527 | /* | ||
528 | * This structure contains the "safe" GPMI timing that should succeed | ||
529 | * with any NAND Flash device | ||
530 | * (although, with less-than-optimal performance). | ||
531 | */ | ||
532 | struct nand_timing safe_timing = { | ||
533 | .data_setup_in_ns = 80, | ||
534 | .data_hold_in_ns = 60, | ||
535 | .address_setup_in_ns = 25, | ||
536 | .gpmi_sample_delay_in_ns = 6, | ||
537 | .tREA_in_ns = -1, | ||
538 | .tRLOH_in_ns = -1, | ||
539 | .tRHOH_in_ns = -1, | ||
540 | }; | ||
541 | |||
542 | /* Initialize the hardwares. */ | ||
543 | ret = gpmi_init(this); | ||
544 | if (ret) | ||
545 | return ret; | ||
546 | |||
547 | this->timing = safe_timing; | ||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static int read_page_prepare(struct gpmi_nand_data *this, | ||
552 | void *destination, unsigned length, | ||
553 | void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, | ||
554 | void **use_virt, dma_addr_t *use_phys) | ||
555 | { | ||
556 | struct device *dev = this->dev; | ||
557 | |||
558 | if (virt_addr_valid(destination)) { | ||
559 | dma_addr_t dest_phys; | ||
560 | |||
561 | dest_phys = dma_map_single(dev, destination, | ||
562 | length, DMA_FROM_DEVICE); | ||
563 | if (dma_mapping_error(dev, dest_phys)) { | ||
564 | if (alt_size < length) { | ||
565 | pr_err("Alternate buffer is too small\n"); | ||
566 | return -ENOMEM; | ||
567 | } | ||
568 | goto map_failed; | ||
569 | } | ||
570 | *use_virt = destination; | ||
571 | *use_phys = dest_phys; | ||
572 | this->direct_dma_map_ok = true; | ||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | map_failed: | ||
577 | *use_virt = alt_virt; | ||
578 | *use_phys = alt_phys; | ||
579 | this->direct_dma_map_ok = false; | ||
580 | return 0; | ||
581 | } | ||
582 | |||
583 | static inline void read_page_end(struct gpmi_nand_data *this, | ||
584 | void *destination, unsigned length, | ||
585 | void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, | ||
586 | void *used_virt, dma_addr_t used_phys) | ||
587 | { | ||
588 | if (this->direct_dma_map_ok) | ||
589 | dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE); | ||
590 | } | ||
591 | |||
592 | static inline void read_page_swap_end(struct gpmi_nand_data *this, | ||
593 | void *destination, unsigned length, | ||
594 | void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, | ||
595 | void *used_virt, dma_addr_t used_phys) | ||
596 | { | ||
597 | if (!this->direct_dma_map_ok) | ||
598 | memcpy(destination, alt_virt, length); | ||
599 | } | ||
600 | |||
601 | static int send_page_prepare(struct gpmi_nand_data *this, | ||
602 | const void *source, unsigned length, | ||
603 | void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, | ||
604 | const void **use_virt, dma_addr_t *use_phys) | ||
605 | { | ||
606 | struct device *dev = this->dev; | ||
607 | |||
608 | if (virt_addr_valid(source)) { | ||
609 | dma_addr_t source_phys; | ||
610 | |||
611 | source_phys = dma_map_single(dev, (void *)source, length, | ||
612 | DMA_TO_DEVICE); | ||
613 | if (dma_mapping_error(dev, source_phys)) { | ||
614 | if (alt_size < length) { | ||
615 | pr_err("Alternate buffer is too small\n"); | ||
616 | return -ENOMEM; | ||
617 | } | ||
618 | goto map_failed; | ||
619 | } | ||
620 | *use_virt = source; | ||
621 | *use_phys = source_phys; | ||
622 | return 0; | ||
623 | } | ||
624 | map_failed: | ||
625 | /* | ||
626 | * Copy the content of the source buffer into the alternate | ||
627 | * buffer and set up the return values accordingly. | ||
628 | */ | ||
629 | memcpy(alt_virt, source, length); | ||
630 | |||
631 | *use_virt = alt_virt; | ||
632 | *use_phys = alt_phys; | ||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static void send_page_end(struct gpmi_nand_data *this, | ||
637 | const void *source, unsigned length, | ||
638 | void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, | ||
639 | const void *used_virt, dma_addr_t used_phys) | ||
640 | { | ||
641 | struct device *dev = this->dev; | ||
642 | if (used_virt == source) | ||
643 | dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE); | ||
644 | } | ||
645 | |||
646 | static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) | ||
647 | { | ||
648 | struct device *dev = this->dev; | ||
649 | |||
650 | if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt)) | ||
651 | dma_free_coherent(dev, this->page_buffer_size, | ||
652 | this->page_buffer_virt, | ||
653 | this->page_buffer_phys); | ||
654 | kfree(this->cmd_buffer); | ||
655 | kfree(this->data_buffer_dma); | ||
656 | |||
657 | this->cmd_buffer = NULL; | ||
658 | this->data_buffer_dma = NULL; | ||
659 | this->page_buffer_virt = NULL; | ||
660 | this->page_buffer_size = 0; | ||
661 | } | ||
662 | |||
663 | /* Allocate the DMA buffers */ | ||
664 | static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) | ||
665 | { | ||
666 | struct bch_geometry *geo = &this->bch_geometry; | ||
667 | struct device *dev = this->dev; | ||
668 | |||
669 | /* [1] Allocate a command buffer. PAGE_SIZE is enough. */ | ||
670 | this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA); | ||
671 | if (this->cmd_buffer == NULL) | ||
672 | goto error_alloc; | ||
673 | |||
674 | /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */ | ||
675 | this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA); | ||
676 | if (this->data_buffer_dma == NULL) | ||
677 | goto error_alloc; | ||
678 | |||
679 | /* | ||
680 | * [3] Allocate the page buffer. | ||
681 | * | ||
682 | * Both the payload buffer and the auxiliary buffer must appear on | ||
683 | * 32-bit boundaries. We presume the size of the payload buffer is a | ||
684 | * power of two and is much larger than four, which guarantees the | ||
685 | * auxiliary buffer will appear on a 32-bit boundary. | ||
686 | */ | ||
687 | this->page_buffer_size = geo->payload_size + geo->auxiliary_size; | ||
688 | this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size, | ||
689 | &this->page_buffer_phys, GFP_DMA); | ||
690 | if (!this->page_buffer_virt) | ||
691 | goto error_alloc; | ||
692 | |||
693 | |||
694 | /* Slice up the page buffer. */ | ||
695 | this->payload_virt = this->page_buffer_virt; | ||
696 | this->payload_phys = this->page_buffer_phys; | ||
697 | this->auxiliary_virt = this->payload_virt + geo->payload_size; | ||
698 | this->auxiliary_phys = this->payload_phys + geo->payload_size; | ||
699 | return 0; | ||
700 | |||
701 | error_alloc: | ||
702 | gpmi_free_dma_buffer(this); | ||
703 | pr_err("allocate DMA buffer ret!!\n"); | ||
704 | return -ENOMEM; | ||
705 | } | ||
706 | |||
707 | static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) | ||
708 | { | ||
709 | struct nand_chip *chip = mtd->priv; | ||
710 | struct gpmi_nand_data *this = chip->priv; | ||
711 | int ret; | ||
712 | |||
713 | /* | ||
714 | * Every operation begins with a command byte and a series of zero or | ||
715 | * more address bytes. These are distinguished by either the Address | ||
716 | * Latch Enable (ALE) or Command Latch Enable (CLE) signals being | ||
717 | * asserted. When MTD is ready to execute the command, it will deassert | ||
718 | * both latch enables. | ||
719 | * | ||
720 | * Rather than run a separate DMA operation for every single byte, we | ||
721 | * queue them up and run a single DMA operation for the entire series | ||
722 | * of command and data bytes. NAND_CMD_NONE means the END of the queue. | ||
723 | */ | ||
724 | if ((ctrl & (NAND_ALE | NAND_CLE))) { | ||
725 | if (data != NAND_CMD_NONE) | ||
726 | this->cmd_buffer[this->command_length++] = data; | ||
727 | return; | ||
728 | } | ||
729 | |||
730 | if (!this->command_length) | ||
731 | return; | ||
732 | |||
733 | ret = gpmi_send_command(this); | ||
734 | if (ret) | ||
735 | pr_err("Chip: %u, Error %d\n", this->current_chip, ret); | ||
736 | |||
737 | this->command_length = 0; | ||
738 | } | ||
739 | |||
740 | static int gpmi_dev_ready(struct mtd_info *mtd) | ||
741 | { | ||
742 | struct nand_chip *chip = mtd->priv; | ||
743 | struct gpmi_nand_data *this = chip->priv; | ||
744 | |||
745 | return gpmi_is_ready(this, this->current_chip); | ||
746 | } | ||
747 | |||
748 | static void gpmi_select_chip(struct mtd_info *mtd, int chipnr) | ||
749 | { | ||
750 | struct nand_chip *chip = mtd->priv; | ||
751 | struct gpmi_nand_data *this = chip->priv; | ||
752 | |||
753 | if ((this->current_chip < 0) && (chipnr >= 0)) | ||
754 | gpmi_begin(this); | ||
755 | else if ((this->current_chip >= 0) && (chipnr < 0)) | ||
756 | gpmi_end(this); | ||
757 | |||
758 | this->current_chip = chipnr; | ||
759 | } | ||
760 | |||
761 | static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | ||
762 | { | ||
763 | struct nand_chip *chip = mtd->priv; | ||
764 | struct gpmi_nand_data *this = chip->priv; | ||
765 | |||
766 | pr_debug("len is %d\n", len); | ||
767 | this->upper_buf = buf; | ||
768 | this->upper_len = len; | ||
769 | |||
770 | gpmi_read_data(this); | ||
771 | } | ||
772 | |||
773 | static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | ||
774 | { | ||
775 | struct nand_chip *chip = mtd->priv; | ||
776 | struct gpmi_nand_data *this = chip->priv; | ||
777 | |||
778 | pr_debug("len is %d\n", len); | ||
779 | this->upper_buf = (uint8_t *)buf; | ||
780 | this->upper_len = len; | ||
781 | |||
782 | gpmi_send_data(this); | ||
783 | } | ||
784 | |||
785 | static uint8_t gpmi_read_byte(struct mtd_info *mtd) | ||
786 | { | ||
787 | struct nand_chip *chip = mtd->priv; | ||
788 | struct gpmi_nand_data *this = chip->priv; | ||
789 | uint8_t *buf = this->data_buffer_dma; | ||
790 | |||
791 | gpmi_read_buf(mtd, buf, 1); | ||
792 | return buf[0]; | ||
793 | } | ||
794 | |||
795 | /* | ||
796 | * Handles block mark swapping. | ||
797 | * It can be called in swapping the block mark, or swapping it back, | ||
798 | * because the the operations are the same. | ||
799 | */ | ||
800 | static void block_mark_swapping(struct gpmi_nand_data *this, | ||
801 | void *payload, void *auxiliary) | ||
802 | { | ||
803 | struct bch_geometry *nfc_geo = &this->bch_geometry; | ||
804 | unsigned char *p; | ||
805 | unsigned char *a; | ||
806 | unsigned int bit; | ||
807 | unsigned char mask; | ||
808 | unsigned char from_data; | ||
809 | unsigned char from_oob; | ||
810 | |||
811 | if (!this->swap_block_mark) | ||
812 | return; | ||
813 | |||
814 | /* | ||
815 | * If control arrives here, we're swapping. Make some convenience | ||
816 | * variables. | ||
817 | */ | ||
818 | bit = nfc_geo->block_mark_bit_offset; | ||
819 | p = payload + nfc_geo->block_mark_byte_offset; | ||
820 | a = auxiliary; | ||
821 | |||
822 | /* | ||
823 | * Get the byte from the data area that overlays the block mark. Since | ||
824 | * the ECC engine applies its own view to the bits in the page, the | ||
825 | * physical block mark won't (in general) appear on a byte boundary in | ||
826 | * the data. | ||
827 | */ | ||
828 | from_data = (p[0] >> bit) | (p[1] << (8 - bit)); | ||
829 | |||
830 | /* Get the byte from the OOB. */ | ||
831 | from_oob = a[0]; | ||
832 | |||
833 | /* Swap them. */ | ||
834 | a[0] = from_data; | ||
835 | |||
836 | mask = (0x1 << bit) - 1; | ||
837 | p[0] = (p[0] & mask) | (from_oob << bit); | ||
838 | |||
839 | mask = ~0 << bit; | ||
840 | p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); | ||
841 | } | ||
842 | |||
843 | static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, | ||
844 | uint8_t *buf, int page) | ||
845 | { | ||
846 | struct gpmi_nand_data *this = chip->priv; | ||
847 | struct bch_geometry *nfc_geo = &this->bch_geometry; | ||
848 | void *payload_virt; | ||
849 | dma_addr_t payload_phys; | ||
850 | void *auxiliary_virt; | ||
851 | dma_addr_t auxiliary_phys; | ||
852 | unsigned int i; | ||
853 | unsigned char *status; | ||
854 | unsigned int failed; | ||
855 | unsigned int corrected; | ||
856 | int ret; | ||
857 | |||
858 | pr_debug("page number is : %d\n", page); | ||
859 | ret = read_page_prepare(this, buf, mtd->writesize, | ||
860 | this->payload_virt, this->payload_phys, | ||
861 | nfc_geo->payload_size, | ||
862 | &payload_virt, &payload_phys); | ||
863 | if (ret) { | ||
864 | pr_err("Inadequate DMA buffer\n"); | ||
865 | ret = -ENOMEM; | ||
866 | return ret; | ||
867 | } | ||
868 | auxiliary_virt = this->auxiliary_virt; | ||
869 | auxiliary_phys = this->auxiliary_phys; | ||
870 | |||
871 | /* go! */ | ||
872 | ret = gpmi_read_page(this, payload_phys, auxiliary_phys); | ||
873 | read_page_end(this, buf, mtd->writesize, | ||
874 | this->payload_virt, this->payload_phys, | ||
875 | nfc_geo->payload_size, | ||
876 | payload_virt, payload_phys); | ||
877 | if (ret) { | ||
878 | pr_err("Error in ECC-based read: %d\n", ret); | ||
879 | goto exit_nfc; | ||
880 | } | ||
881 | |||
882 | /* handle the block mark swapping */ | ||
883 | block_mark_swapping(this, payload_virt, auxiliary_virt); | ||
884 | |||
885 | /* Loop over status bytes, accumulating ECC status. */ | ||
886 | failed = 0; | ||
887 | corrected = 0; | ||
888 | status = auxiliary_virt + nfc_geo->auxiliary_status_offset; | ||
889 | |||
890 | for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { | ||
891 | if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) | ||
892 | continue; | ||
893 | |||
894 | if (*status == STATUS_UNCORRECTABLE) { | ||
895 | failed++; | ||
896 | continue; | ||
897 | } | ||
898 | corrected += *status; | ||
899 | } | ||
900 | |||
901 | /* | ||
902 | * Propagate ECC status to the owning MTD only when failed or | ||
903 | * corrected times nearly reaches our ECC correction threshold. | ||
904 | */ | ||
905 | if (failed || corrected >= (nfc_geo->ecc_strength - 1)) { | ||
906 | mtd->ecc_stats.failed += failed; | ||
907 | mtd->ecc_stats.corrected += corrected; | ||
908 | } | ||
909 | |||
910 | /* | ||
911 | * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for | ||
912 | * details about our policy for delivering the OOB. | ||
913 | * | ||
914 | * We fill the caller's buffer with set bits, and then copy the block | ||
915 | * mark to th caller's buffer. Note that, if block mark swapping was | ||
916 | * necessary, it has already been done, so we can rely on the first | ||
917 | * byte of the auxiliary buffer to contain the block mark. | ||
918 | */ | ||
919 | memset(chip->oob_poi, ~0, mtd->oobsize); | ||
920 | chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; | ||
921 | |||
922 | read_page_swap_end(this, buf, mtd->writesize, | ||
923 | this->payload_virt, this->payload_phys, | ||
924 | nfc_geo->payload_size, | ||
925 | payload_virt, payload_phys); | ||
926 | exit_nfc: | ||
927 | return ret; | ||
928 | } | ||
929 | |||
930 | static void gpmi_ecc_write_page(struct mtd_info *mtd, | ||
931 | struct nand_chip *chip, const uint8_t *buf) | ||
932 | { | ||
933 | struct gpmi_nand_data *this = chip->priv; | ||
934 | struct bch_geometry *nfc_geo = &this->bch_geometry; | ||
935 | const void *payload_virt; | ||
936 | dma_addr_t payload_phys; | ||
937 | const void *auxiliary_virt; | ||
938 | dma_addr_t auxiliary_phys; | ||
939 | int ret; | ||
940 | |||
941 | pr_debug("ecc write page.\n"); | ||
942 | if (this->swap_block_mark) { | ||
943 | /* | ||
944 | * If control arrives here, we're doing block mark swapping. | ||
945 | * Since we can't modify the caller's buffers, we must copy them | ||
946 | * into our own. | ||
947 | */ | ||
948 | memcpy(this->payload_virt, buf, mtd->writesize); | ||
949 | payload_virt = this->payload_virt; | ||
950 | payload_phys = this->payload_phys; | ||
951 | |||
952 | memcpy(this->auxiliary_virt, chip->oob_poi, | ||
953 | nfc_geo->auxiliary_size); | ||
954 | auxiliary_virt = this->auxiliary_virt; | ||
955 | auxiliary_phys = this->auxiliary_phys; | ||
956 | |||
957 | /* Handle block mark swapping. */ | ||
958 | block_mark_swapping(this, | ||
959 | (void *) payload_virt, (void *) auxiliary_virt); | ||
960 | } else { | ||
961 | /* | ||
962 | * If control arrives here, we're not doing block mark swapping, | ||
963 | * so we can to try and use the caller's buffers. | ||
964 | */ | ||
965 | ret = send_page_prepare(this, | ||
966 | buf, mtd->writesize, | ||
967 | this->payload_virt, this->payload_phys, | ||
968 | nfc_geo->payload_size, | ||
969 | &payload_virt, &payload_phys); | ||
970 | if (ret) { | ||
971 | pr_err("Inadequate payload DMA buffer\n"); | ||
972 | return; | ||
973 | } | ||
974 | |||
975 | ret = send_page_prepare(this, | ||
976 | chip->oob_poi, mtd->oobsize, | ||
977 | this->auxiliary_virt, this->auxiliary_phys, | ||
978 | nfc_geo->auxiliary_size, | ||
979 | &auxiliary_virt, &auxiliary_phys); | ||
980 | if (ret) { | ||
981 | pr_err("Inadequate auxiliary DMA buffer\n"); | ||
982 | goto exit_auxiliary; | ||
983 | } | ||
984 | } | ||
985 | |||
986 | /* Ask the NFC. */ | ||
987 | ret = gpmi_send_page(this, payload_phys, auxiliary_phys); | ||
988 | if (ret) | ||
989 | pr_err("Error in ECC-based write: %d\n", ret); | ||
990 | |||
991 | if (!this->swap_block_mark) { | ||
992 | send_page_end(this, chip->oob_poi, mtd->oobsize, | ||
993 | this->auxiliary_virt, this->auxiliary_phys, | ||
994 | nfc_geo->auxiliary_size, | ||
995 | auxiliary_virt, auxiliary_phys); | ||
996 | exit_auxiliary: | ||
997 | send_page_end(this, buf, mtd->writesize, | ||
998 | this->payload_virt, this->payload_phys, | ||
999 | nfc_geo->payload_size, | ||
1000 | payload_virt, payload_phys); | ||
1001 | } | ||
1002 | } | ||
1003 | |||
1004 | /* | ||
1005 | * There are several places in this driver where we have to handle the OOB and | ||
1006 | * block marks. This is the function where things are the most complicated, so | ||
1007 | * this is where we try to explain it all. All the other places refer back to | ||
1008 | * here. | ||
1009 | * | ||
1010 | * These are the rules, in order of decreasing importance: | ||
1011 | * | ||
1012 | * 1) Nothing the caller does can be allowed to imperil the block mark. | ||
1013 | * | ||
1014 | * 2) In read operations, the first byte of the OOB we return must reflect the | ||
1015 | * true state of the block mark, no matter where that block mark appears in | ||
1016 | * the physical page. | ||
1017 | * | ||
1018 | * 3) ECC-based read operations return an OOB full of set bits (since we never | ||
1019 | * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads | ||
1020 | * return). | ||
1021 | * | ||
1022 | * 4) "Raw" read operations return a direct view of the physical bytes in the | ||
1023 | * page, using the conventional definition of which bytes are data and which | ||
1024 | * are OOB. This gives the caller a way to see the actual, physical bytes | ||
1025 | * in the page, without the distortions applied by our ECC engine. | ||
1026 | * | ||
1027 | * | ||
1028 | * What we do for this specific read operation depends on two questions: | ||
1029 | * | ||
1030 | * 1) Are we doing a "raw" read, or an ECC-based read? | ||
1031 | * | ||
1032 | * 2) Are we using block mark swapping or transcription? | ||
1033 | * | ||
1034 | * There are four cases, illustrated by the following Karnaugh map: | ||
1035 | * | ||
1036 | * | Raw | ECC-based | | ||
1037 | * -------------+-------------------------+-------------------------+ | ||
1038 | * | Read the conventional | | | ||
1039 | * | OOB at the end of the | | | ||
1040 | * Swapping | page and return it. It | | | ||
1041 | * | contains exactly what | | | ||
1042 | * | we want. | Read the block mark and | | ||
1043 | * -------------+-------------------------+ return it in a buffer | | ||
1044 | * | Read the conventional | full of set bits. | | ||
1045 | * | OOB at the end of the | | | ||
1046 | * | page and also the block | | | ||
1047 | * Transcribing | mark in the metadata. | | | ||
1048 | * | Copy the block mark | | | ||
1049 | * | into the first byte of | | | ||
1050 | * | the OOB. | | | ||
1051 | * -------------+-------------------------+-------------------------+ | ||
1052 | * | ||
1053 | * Note that we break rule #4 in the Transcribing/Raw case because we're not | ||
1054 | * giving an accurate view of the actual, physical bytes in the page (we're | ||
1055 | * overwriting the block mark). That's OK because it's more important to follow | ||
1056 | * rule #2. | ||
1057 | * | ||
1058 | * It turns out that knowing whether we want an "ECC-based" or "raw" read is not | ||
1059 | * easy. When reading a page, for example, the NAND Flash MTD code calls our | ||
1060 | * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an | ||
1061 | * ECC-based or raw view of the page is implicit in which function it calls | ||
1062 | * (there is a similar pair of ECC-based/raw functions for writing). | ||
1063 | * | ||
1064 | * Since MTD assumes the OOB is not covered by ECC, there is no pair of | ||
1065 | * ECC-based/raw functions for reading or or writing the OOB. The fact that the | ||
1066 | * caller wants an ECC-based or raw view of the page is not propagated down to | ||
1067 | * this driver. | ||
1068 | */ | ||
1069 | static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, | ||
1070 | int page, int sndcmd) | ||
1071 | { | ||
1072 | struct gpmi_nand_data *this = chip->priv; | ||
1073 | |||
1074 | pr_debug("page number is %d\n", page); | ||
1075 | /* clear the OOB buffer */ | ||
1076 | memset(chip->oob_poi, ~0, mtd->oobsize); | ||
1077 | |||
1078 | /* Read out the conventional OOB. */ | ||
1079 | chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); | ||
1080 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
1081 | |||
1082 | /* | ||
1083 | * Now, we want to make sure the block mark is correct. In the | ||
1084 | * Swapping/Raw case, we already have it. Otherwise, we need to | ||
1085 | * explicitly read it. | ||
1086 | */ | ||
1087 | if (!this->swap_block_mark) { | ||
1088 | /* Read the block mark into the first byte of the OOB buffer. */ | ||
1089 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); | ||
1090 | chip->oob_poi[0] = chip->read_byte(mtd); | ||
1091 | } | ||
1092 | |||
1093 | /* | ||
1094 | * Return true, indicating that the next call to this function must send | ||
1095 | * a command. | ||
1096 | */ | ||
1097 | return true; | ||
1098 | } | ||
1099 | |||
1100 | static int | ||
1101 | gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) | ||
1102 | { | ||
1103 | /* | ||
1104 | * The BCH will use all the (page + oob). | ||
1105 | * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob. | ||
1106 | * But it can not stop some ioctls such MEMWRITEOOB which uses | ||
1107 | * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit | ||
1108 | * these ioctls too. | ||
1109 | */ | ||
1110 | return -EPERM; | ||
1111 | } | ||
1112 | |||
1113 | static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs) | ||
1114 | { | ||
1115 | struct nand_chip *chip = mtd->priv; | ||
1116 | struct gpmi_nand_data *this = chip->priv; | ||
1117 | int block, ret = 0; | ||
1118 | uint8_t *block_mark; | ||
1119 | int column, page, status, chipnr; | ||
1120 | |||
1121 | /* Get block number */ | ||
1122 | block = (int)(ofs >> chip->bbt_erase_shift); | ||
1123 | if (chip->bbt) | ||
1124 | chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); | ||
1125 | |||
1126 | /* Do we have a flash based bad block table ? */ | ||
1127 | if (chip->options & NAND_BBT_USE_FLASH) | ||
1128 | ret = nand_update_bbt(mtd, ofs); | ||
1129 | else { | ||
1130 | chipnr = (int)(ofs >> chip->chip_shift); | ||
1131 | chip->select_chip(mtd, chipnr); | ||
1132 | |||
1133 | column = this->swap_block_mark ? mtd->writesize : 0; | ||
1134 | |||
1135 | /* Write the block mark. */ | ||
1136 | block_mark = this->data_buffer_dma; | ||
1137 | block_mark[0] = 0; /* bad block marker */ | ||
1138 | |||
1139 | /* Shift to get page */ | ||
1140 | page = (int)(ofs >> chip->page_shift); | ||
1141 | |||
1142 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page); | ||
1143 | chip->write_buf(mtd, block_mark, 1); | ||
1144 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); | ||
1145 | |||
1146 | status = chip->waitfunc(mtd, chip); | ||
1147 | if (status & NAND_STATUS_FAIL) | ||
1148 | ret = -EIO; | ||
1149 | |||
1150 | chip->select_chip(mtd, -1); | ||
1151 | } | ||
1152 | if (!ret) | ||
1153 | mtd->ecc_stats.badblocks++; | ||
1154 | |||
1155 | return ret; | ||
1156 | } | ||
1157 | |||
1158 | static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this) | ||
1159 | { | ||
1160 | struct boot_rom_geometry *geometry = &this->rom_geometry; | ||
1161 | |||
1162 | /* | ||
1163 | * Set the boot block stride size. | ||
1164 | * | ||
1165 | * In principle, we should be reading this from the OTP bits, since | ||
1166 | * that's where the ROM is going to get it. In fact, we don't have any | ||
1167 | * way to read the OTP bits, so we go with the default and hope for the | ||
1168 | * best. | ||
1169 | */ | ||
1170 | geometry->stride_size_in_pages = 64; | ||
1171 | |||
1172 | /* | ||
1173 | * Set the search area stride exponent. | ||
1174 | * | ||
1175 | * In principle, we should be reading this from the OTP bits, since | ||
1176 | * that's where the ROM is going to get it. In fact, we don't have any | ||
1177 | * way to read the OTP bits, so we go with the default and hope for the | ||
1178 | * best. | ||
1179 | */ | ||
1180 | geometry->search_area_stride_exponent = 2; | ||
1181 | return 0; | ||
1182 | } | ||
1183 | |||
1184 | static const char *fingerprint = "STMP"; | ||
1185 | static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this) | ||
1186 | { | ||
1187 | struct boot_rom_geometry *rom_geo = &this->rom_geometry; | ||
1188 | struct device *dev = this->dev; | ||
1189 | struct mtd_info *mtd = &this->mtd; | ||
1190 | struct nand_chip *chip = &this->nand; | ||
1191 | unsigned int search_area_size_in_strides; | ||
1192 | unsigned int stride; | ||
1193 | unsigned int page; | ||
1194 | loff_t byte; | ||
1195 | uint8_t *buffer = chip->buffers->databuf; | ||
1196 | int saved_chip_number; | ||
1197 | int found_an_ncb_fingerprint = false; | ||
1198 | |||
1199 | /* Compute the number of strides in a search area. */ | ||
1200 | search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; | ||
1201 | |||
1202 | saved_chip_number = this->current_chip; | ||
1203 | chip->select_chip(mtd, 0); | ||
1204 | |||
1205 | /* | ||
1206 | * Loop through the first search area, looking for the NCB fingerprint. | ||
1207 | */ | ||
1208 | dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); | ||
1209 | |||
1210 | for (stride = 0; stride < search_area_size_in_strides; stride++) { | ||
1211 | /* Compute the page and byte addresses. */ | ||
1212 | page = stride * rom_geo->stride_size_in_pages; | ||
1213 | byte = page * mtd->writesize; | ||
1214 | |||
1215 | dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); | ||
1216 | |||
1217 | /* | ||
1218 | * Read the NCB fingerprint. The fingerprint is four bytes long | ||
1219 | * and starts in the 12th byte of the page. | ||
1220 | */ | ||
1221 | chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page); | ||
1222 | chip->read_buf(mtd, buffer, strlen(fingerprint)); | ||
1223 | |||
1224 | /* Look for the fingerprint. */ | ||
1225 | if (!memcmp(buffer, fingerprint, strlen(fingerprint))) { | ||
1226 | found_an_ncb_fingerprint = true; | ||
1227 | break; | ||
1228 | } | ||
1229 | |||
1230 | } | ||
1231 | |||
1232 | chip->select_chip(mtd, saved_chip_number); | ||
1233 | |||
1234 | if (found_an_ncb_fingerprint) | ||
1235 | dev_dbg(dev, "\tFound a fingerprint\n"); | ||
1236 | else | ||
1237 | dev_dbg(dev, "\tNo fingerprint found\n"); | ||
1238 | return found_an_ncb_fingerprint; | ||
1239 | } | ||
1240 | |||
1241 | /* Writes a transcription stamp. */ | ||
1242 | static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this) | ||
1243 | { | ||
1244 | struct device *dev = this->dev; | ||
1245 | struct boot_rom_geometry *rom_geo = &this->rom_geometry; | ||
1246 | struct mtd_info *mtd = &this->mtd; | ||
1247 | struct nand_chip *chip = &this->nand; | ||
1248 | unsigned int block_size_in_pages; | ||
1249 | unsigned int search_area_size_in_strides; | ||
1250 | unsigned int search_area_size_in_pages; | ||
1251 | unsigned int search_area_size_in_blocks; | ||
1252 | unsigned int block; | ||
1253 | unsigned int stride; | ||
1254 | unsigned int page; | ||
1255 | loff_t byte; | ||
1256 | uint8_t *buffer = chip->buffers->databuf; | ||
1257 | int saved_chip_number; | ||
1258 | int status; | ||
1259 | |||
1260 | /* Compute the search area geometry. */ | ||
1261 | block_size_in_pages = mtd->erasesize / mtd->writesize; | ||
1262 | search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; | ||
1263 | search_area_size_in_pages = search_area_size_in_strides * | ||
1264 | rom_geo->stride_size_in_pages; | ||
1265 | search_area_size_in_blocks = | ||
1266 | (search_area_size_in_pages + (block_size_in_pages - 1)) / | ||
1267 | block_size_in_pages; | ||
1268 | |||
1269 | dev_dbg(dev, "Search Area Geometry :\n"); | ||
1270 | dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks); | ||
1271 | dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); | ||
1272 | dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); | ||
1273 | |||
1274 | /* Select chip 0. */ | ||
1275 | saved_chip_number = this->current_chip; | ||
1276 | chip->select_chip(mtd, 0); | ||
1277 | |||
1278 | /* Loop over blocks in the first search area, erasing them. */ | ||
1279 | dev_dbg(dev, "Erasing the search area...\n"); | ||
1280 | |||
1281 | for (block = 0; block < search_area_size_in_blocks; block++) { | ||
1282 | /* Compute the page address. */ | ||
1283 | page = block * block_size_in_pages; | ||
1284 | |||
1285 | /* Erase this block. */ | ||
1286 | dev_dbg(dev, "\tErasing block 0x%x\n", block); | ||
1287 | chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); | ||
1288 | chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); | ||
1289 | |||
1290 | /* Wait for the erase to finish. */ | ||
1291 | status = chip->waitfunc(mtd, chip); | ||
1292 | if (status & NAND_STATUS_FAIL) | ||
1293 | dev_err(dev, "[%s] Erase failed.\n", __func__); | ||
1294 | } | ||
1295 | |||
1296 | /* Write the NCB fingerprint into the page buffer. */ | ||
1297 | memset(buffer, ~0, mtd->writesize); | ||
1298 | memset(chip->oob_poi, ~0, mtd->oobsize); | ||
1299 | memcpy(buffer + 12, fingerprint, strlen(fingerprint)); | ||
1300 | |||
1301 | /* Loop through the first search area, writing NCB fingerprints. */ | ||
1302 | dev_dbg(dev, "Writing NCB fingerprints...\n"); | ||
1303 | for (stride = 0; stride < search_area_size_in_strides; stride++) { | ||
1304 | /* Compute the page and byte addresses. */ | ||
1305 | page = stride * rom_geo->stride_size_in_pages; | ||
1306 | byte = page * mtd->writesize; | ||
1307 | |||
1308 | /* Write the first page of the current stride. */ | ||
1309 | dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); | ||
1310 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); | ||
1311 | chip->ecc.write_page_raw(mtd, chip, buffer); | ||
1312 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); | ||
1313 | |||
1314 | /* Wait for the write to finish. */ | ||
1315 | status = chip->waitfunc(mtd, chip); | ||
1316 | if (status & NAND_STATUS_FAIL) | ||
1317 | dev_err(dev, "[%s] Write failed.\n", __func__); | ||
1318 | } | ||
1319 | |||
1320 | /* Deselect chip 0. */ | ||
1321 | chip->select_chip(mtd, saved_chip_number); | ||
1322 | return 0; | ||
1323 | } | ||
1324 | |||
1325 | static int __devinit mx23_boot_init(struct gpmi_nand_data *this) | ||
1326 | { | ||
1327 | struct device *dev = this->dev; | ||
1328 | struct nand_chip *chip = &this->nand; | ||
1329 | struct mtd_info *mtd = &this->mtd; | ||
1330 | unsigned int block_count; | ||
1331 | unsigned int block; | ||
1332 | int chipnr; | ||
1333 | int page; | ||
1334 | loff_t byte; | ||
1335 | uint8_t block_mark; | ||
1336 | int ret = 0; | ||
1337 | |||
1338 | /* | ||
1339 | * If control arrives here, we can't use block mark swapping, which | ||
1340 | * means we're forced to use transcription. First, scan for the | ||
1341 | * transcription stamp. If we find it, then we don't have to do | ||
1342 | * anything -- the block marks are already transcribed. | ||
1343 | */ | ||
1344 | if (mx23_check_transcription_stamp(this)) | ||
1345 | return 0; | ||
1346 | |||
1347 | /* | ||
1348 | * If control arrives here, we couldn't find a transcription stamp, so | ||
1349 | * so we presume the block marks are in the conventional location. | ||
1350 | */ | ||
1351 | dev_dbg(dev, "Transcribing bad block marks...\n"); | ||
1352 | |||
1353 | /* Compute the number of blocks in the entire medium. */ | ||
1354 | block_count = chip->chipsize >> chip->phys_erase_shift; | ||
1355 | |||
1356 | /* | ||
1357 | * Loop over all the blocks in the medium, transcribing block marks as | ||
1358 | * we go. | ||
1359 | */ | ||
1360 | for (block = 0; block < block_count; block++) { | ||
1361 | /* | ||
1362 | * Compute the chip, page and byte addresses for this block's | ||
1363 | * conventional mark. | ||
1364 | */ | ||
1365 | chipnr = block >> (chip->chip_shift - chip->phys_erase_shift); | ||
1366 | page = block << (chip->phys_erase_shift - chip->page_shift); | ||
1367 | byte = block << chip->phys_erase_shift; | ||
1368 | |||
1369 | /* Send the command to read the conventional block mark. */ | ||
1370 | chip->select_chip(mtd, chipnr); | ||
1371 | chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); | ||
1372 | block_mark = chip->read_byte(mtd); | ||
1373 | chip->select_chip(mtd, -1); | ||
1374 | |||
1375 | /* | ||
1376 | * Check if the block is marked bad. If so, we need to mark it | ||
1377 | * again, but this time the result will be a mark in the | ||
1378 | * location where we transcribe block marks. | ||
1379 | */ | ||
1380 | if (block_mark != 0xff) { | ||
1381 | dev_dbg(dev, "Transcribing mark in block %u\n", block); | ||
1382 | ret = chip->block_markbad(mtd, byte); | ||
1383 | if (ret) | ||
1384 | dev_err(dev, "Failed to mark block bad with " | ||
1385 | "ret %d\n", ret); | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | /* Write the stamp that indicates we've transcribed the block marks. */ | ||
1390 | mx23_write_transcription_stamp(this); | ||
1391 | return 0; | ||
1392 | } | ||
1393 | |||
1394 | static int __devinit nand_boot_init(struct gpmi_nand_data *this) | ||
1395 | { | ||
1396 | nand_boot_set_geometry(this); | ||
1397 | |||
1398 | /* This is ROM arch-specific initilization before the BBT scanning. */ | ||
1399 | if (GPMI_IS_MX23(this)) | ||
1400 | return mx23_boot_init(this); | ||
1401 | return 0; | ||
1402 | } | ||
1403 | |||
1404 | static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this) | ||
1405 | { | ||
1406 | int ret; | ||
1407 | |||
1408 | /* Free the temporary DMA memory for reading ID. */ | ||
1409 | gpmi_free_dma_buffer(this); | ||
1410 | |||
1411 | /* Set up the NFC geometry which is used by BCH. */ | ||
1412 | ret = bch_set_geometry(this); | ||
1413 | if (ret) { | ||
1414 | pr_err("set geometry ret : %d\n", ret); | ||
1415 | return ret; | ||
1416 | } | ||
1417 | |||
1418 | /* Alloc the new DMA buffers according to the pagesize and oobsize */ | ||
1419 | return gpmi_alloc_dma_buffer(this); | ||
1420 | } | ||
1421 | |||
1422 | static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this) | ||
1423 | { | ||
1424 | int ret; | ||
1425 | |||
1426 | /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ | ||
1427 | if (GPMI_IS_MX23(this)) | ||
1428 | this->swap_block_mark = false; | ||
1429 | else | ||
1430 | this->swap_block_mark = true; | ||
1431 | |||
1432 | /* Set up the medium geometry */ | ||
1433 | ret = gpmi_set_geometry(this); | ||
1434 | if (ret) | ||
1435 | return ret; | ||
1436 | |||
1437 | /* NAND boot init, depends on the gpmi_set_geometry(). */ | ||
1438 | return nand_boot_init(this); | ||
1439 | } | ||
1440 | |||
1441 | static int gpmi_scan_bbt(struct mtd_info *mtd) | ||
1442 | { | ||
1443 | struct nand_chip *chip = mtd->priv; | ||
1444 | struct gpmi_nand_data *this = chip->priv; | ||
1445 | int ret; | ||
1446 | |||
1447 | /* Prepare for the BBT scan. */ | ||
1448 | ret = gpmi_pre_bbt_scan(this); | ||
1449 | if (ret) | ||
1450 | return ret; | ||
1451 | |||
1452 | /* use the default BBT implementation */ | ||
1453 | return nand_default_bbt(mtd); | ||
1454 | } | ||
1455 | |||
1456 | void gpmi_nfc_exit(struct gpmi_nand_data *this) | ||
1457 | { | ||
1458 | nand_release(&this->mtd); | ||
1459 | gpmi_free_dma_buffer(this); | ||
1460 | } | ||
1461 | |||
1462 | static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this) | ||
1463 | { | ||
1464 | struct gpmi_nand_platform_data *pdata = this->pdata; | ||
1465 | struct mtd_info *mtd = &this->mtd; | ||
1466 | struct nand_chip *chip = &this->nand; | ||
1467 | int ret; | ||
1468 | |||
1469 | /* init current chip */ | ||
1470 | this->current_chip = -1; | ||
1471 | |||
1472 | /* init the MTD data structures */ | ||
1473 | mtd->priv = chip; | ||
1474 | mtd->name = "gpmi-nand"; | ||
1475 | mtd->owner = THIS_MODULE; | ||
1476 | |||
1477 | /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ | ||
1478 | chip->priv = this; | ||
1479 | chip->select_chip = gpmi_select_chip; | ||
1480 | chip->cmd_ctrl = gpmi_cmd_ctrl; | ||
1481 | chip->dev_ready = gpmi_dev_ready; | ||
1482 | chip->read_byte = gpmi_read_byte; | ||
1483 | chip->read_buf = gpmi_read_buf; | ||
1484 | chip->write_buf = gpmi_write_buf; | ||
1485 | chip->ecc.read_page = gpmi_ecc_read_page; | ||
1486 | chip->ecc.write_page = gpmi_ecc_write_page; | ||
1487 | chip->ecc.read_oob = gpmi_ecc_read_oob; | ||
1488 | chip->ecc.write_oob = gpmi_ecc_write_oob; | ||
1489 | chip->scan_bbt = gpmi_scan_bbt; | ||
1490 | chip->badblock_pattern = &gpmi_bbt_descr; | ||
1491 | chip->block_markbad = gpmi_block_markbad; | ||
1492 | chip->options |= NAND_NO_SUBPAGE_WRITE; | ||
1493 | chip->ecc.mode = NAND_ECC_HW; | ||
1494 | chip->ecc.size = 1; | ||
1495 | chip->ecc.layout = &gpmi_hw_ecclayout; | ||
1496 | |||
1497 | /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ | ||
1498 | this->bch_geometry.payload_size = 1024; | ||
1499 | this->bch_geometry.auxiliary_size = 128; | ||
1500 | ret = gpmi_alloc_dma_buffer(this); | ||
1501 | if (ret) | ||
1502 | goto err_out; | ||
1503 | |||
1504 | ret = nand_scan(mtd, pdata->max_chip_count); | ||
1505 | if (ret) { | ||
1506 | pr_err("Chip scan failed\n"); | ||
1507 | goto err_out; | ||
1508 | } | ||
1509 | |||
1510 | ret = mtd_device_parse_register(mtd, NULL, NULL, | ||
1511 | pdata->partitions, pdata->partition_count); | ||
1512 | if (ret) | ||
1513 | goto err_out; | ||
1514 | return 0; | ||
1515 | |||
1516 | err_out: | ||
1517 | gpmi_nfc_exit(this); | ||
1518 | return ret; | ||
1519 | } | ||
1520 | |||
1521 | static int __devinit gpmi_nand_probe(struct platform_device *pdev) | ||
1522 | { | ||
1523 | struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data; | ||
1524 | struct gpmi_nand_data *this; | ||
1525 | int ret; | ||
1526 | |||
1527 | this = kzalloc(sizeof(*this), GFP_KERNEL); | ||
1528 | if (!this) { | ||
1529 | pr_err("Failed to allocate per-device memory\n"); | ||
1530 | return -ENOMEM; | ||
1531 | } | ||
1532 | |||
1533 | platform_set_drvdata(pdev, this); | ||
1534 | this->pdev = pdev; | ||
1535 | this->dev = &pdev->dev; | ||
1536 | this->pdata = pdata; | ||
1537 | |||
1538 | if (pdata->platform_init) { | ||
1539 | ret = pdata->platform_init(); | ||
1540 | if (ret) | ||
1541 | goto platform_init_error; | ||
1542 | } | ||
1543 | |||
1544 | ret = acquire_resources(this); | ||
1545 | if (ret) | ||
1546 | goto exit_acquire_resources; | ||
1547 | |||
1548 | ret = init_hardware(this); | ||
1549 | if (ret) | ||
1550 | goto exit_nfc_init; | ||
1551 | |||
1552 | ret = gpmi_nfc_init(this); | ||
1553 | if (ret) | ||
1554 | goto exit_nfc_init; | ||
1555 | |||
1556 | return 0; | ||
1557 | |||
1558 | exit_nfc_init: | ||
1559 | release_resources(this); | ||
1560 | platform_init_error: | ||
1561 | exit_acquire_resources: | ||
1562 | platform_set_drvdata(pdev, NULL); | ||
1563 | kfree(this); | ||
1564 | return ret; | ||
1565 | } | ||
1566 | |||
1567 | static int __exit gpmi_nand_remove(struct platform_device *pdev) | ||
1568 | { | ||
1569 | struct gpmi_nand_data *this = platform_get_drvdata(pdev); | ||
1570 | |||
1571 | gpmi_nfc_exit(this); | ||
1572 | release_resources(this); | ||
1573 | platform_set_drvdata(pdev, NULL); | ||
1574 | kfree(this); | ||
1575 | return 0; | ||
1576 | } | ||
1577 | |||
1578 | static const struct platform_device_id gpmi_ids[] = { | ||
1579 | { | ||
1580 | .name = "imx23-gpmi-nand", | ||
1581 | .driver_data = IS_MX23, | ||
1582 | }, { | ||
1583 | .name = "imx28-gpmi-nand", | ||
1584 | .driver_data = IS_MX28, | ||
1585 | }, {}, | ||
1586 | }; | ||
1587 | |||
1588 | static struct platform_driver gpmi_nand_driver = { | ||
1589 | .driver = { | ||
1590 | .name = "gpmi-nand", | ||
1591 | }, | ||
1592 | .probe = gpmi_nand_probe, | ||
1593 | .remove = __exit_p(gpmi_nand_remove), | ||
1594 | .id_table = gpmi_ids, | ||
1595 | }; | ||
1596 | |||
1597 | static int __init gpmi_nand_init(void) | ||
1598 | { | ||
1599 | int err; | ||
1600 | |||
1601 | err = platform_driver_register(&gpmi_nand_driver); | ||
1602 | if (err == 0) | ||
1603 | printk(KERN_INFO "GPMI NAND driver registered. (IMX)\n"); | ||
1604 | else | ||
1605 | pr_err("i.MX GPMI NAND driver registration failed\n"); | ||
1606 | return err; | ||
1607 | } | ||
1608 | |||
1609 | static void __exit gpmi_nand_exit(void) | ||
1610 | { | ||
1611 | platform_driver_unregister(&gpmi_nand_driver); | ||
1612 | } | ||
1613 | |||
1614 | module_init(gpmi_nand_init); | ||
1615 | module_exit(gpmi_nand_exit); | ||
1616 | |||
1617 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); | ||
1618 | MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); | ||
1619 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h new file mode 100644 index 000000000000..e023bccb7781 --- /dev/null +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h | |||
@@ -0,0 +1,273 @@ | |||
1 | /* | ||
2 | * Freescale GPMI NAND Flash Driver | ||
3 | * | ||
4 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. | ||
5 | * Copyright (C) 2008 Embedded Alley Solutions, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | */ | ||
17 | #ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H | ||
18 | #define __DRIVERS_MTD_NAND_GPMI_NAND_H | ||
19 | |||
20 | #include <linux/mtd/nand.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <mach/dma.h> | ||
24 | |||
25 | struct resources { | ||
26 | void *gpmi_regs; | ||
27 | void *bch_regs; | ||
28 | unsigned int bch_low_interrupt; | ||
29 | unsigned int bch_high_interrupt; | ||
30 | unsigned int dma_low_channel; | ||
31 | unsigned int dma_high_channel; | ||
32 | struct clk *clock; | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * struct bch_geometry - BCH geometry description. | ||
37 | * @gf_len: The length of Galois Field. (e.g., 13 or 14) | ||
38 | * @ecc_strength: A number that describes the strength of the ECC | ||
39 | * algorithm. | ||
40 | * @page_size: The size, in bytes, of a physical page, including | ||
41 | * both data and OOB. | ||
42 | * @metadata_size: The size, in bytes, of the metadata. | ||
43 | * @ecc_chunk_size: The size, in bytes, of a single ECC chunk. Note | ||
44 | * the first chunk in the page includes both data and | ||
45 | * metadata, so it's a bit larger than this value. | ||
46 | * @ecc_chunk_count: The number of ECC chunks in the page, | ||
47 | * @payload_size: The size, in bytes, of the payload buffer. | ||
48 | * @auxiliary_size: The size, in bytes, of the auxiliary buffer. | ||
49 | * @auxiliary_status_offset: The offset into the auxiliary buffer at which | ||
50 | * the ECC status appears. | ||
51 | * @block_mark_byte_offset: The byte offset in the ECC-based page view at | ||
52 | * which the underlying physical block mark appears. | ||
53 | * @block_mark_bit_offset: The bit offset into the ECC-based page view at | ||
54 | * which the underlying physical block mark appears. | ||
55 | */ | ||
56 | struct bch_geometry { | ||
57 | unsigned int gf_len; | ||
58 | unsigned int ecc_strength; | ||
59 | unsigned int page_size; | ||
60 | unsigned int metadata_size; | ||
61 | unsigned int ecc_chunk_size; | ||
62 | unsigned int ecc_chunk_count; | ||
63 | unsigned int payload_size; | ||
64 | unsigned int auxiliary_size; | ||
65 | unsigned int auxiliary_status_offset; | ||
66 | unsigned int block_mark_byte_offset; | ||
67 | unsigned int block_mark_bit_offset; | ||
68 | }; | ||
69 | |||
70 | /** | ||
71 | * struct boot_rom_geometry - Boot ROM geometry description. | ||
72 | * @stride_size_in_pages: The size of a boot block stride, in pages. | ||
73 | * @search_area_stride_exponent: The logarithm to base 2 of the size of a | ||
74 | * search area in boot block strides. | ||
75 | */ | ||
76 | struct boot_rom_geometry { | ||
77 | unsigned int stride_size_in_pages; | ||
78 | unsigned int search_area_stride_exponent; | ||
79 | }; | ||
80 | |||
81 | /* DMA operations types */ | ||
82 | enum dma_ops_type { | ||
83 | DMA_FOR_COMMAND = 1, | ||
84 | DMA_FOR_READ_DATA, | ||
85 | DMA_FOR_WRITE_DATA, | ||
86 | DMA_FOR_READ_ECC_PAGE, | ||
87 | DMA_FOR_WRITE_ECC_PAGE | ||
88 | }; | ||
89 | |||
90 | /** | ||
91 | * struct nand_timing - Fundamental timing attributes for NAND. | ||
92 | * @data_setup_in_ns: The data setup time, in nanoseconds. Usually the | ||
93 | * maximum of tDS and tWP. A negative value | ||
94 | * indicates this characteristic isn't known. | ||
95 | * @data_hold_in_ns: The data hold time, in nanoseconds. Usually the | ||
96 | * maximum of tDH, tWH and tREH. A negative value | ||
97 | * indicates this characteristic isn't known. | ||
98 | * @address_setup_in_ns: The address setup time, in nanoseconds. Usually | ||
99 | * the maximum of tCLS, tCS and tALS. A negative | ||
100 | * value indicates this characteristic isn't known. | ||
101 | * @gpmi_sample_delay_in_ns: A GPMI-specific timing parameter. A negative value | ||
102 | * indicates this characteristic isn't known. | ||
103 | * @tREA_in_ns: tREA, in nanoseconds, from the data sheet. A | ||
104 | * negative value indicates this characteristic isn't | ||
105 | * known. | ||
106 | * @tRLOH_in_ns: tRLOH, in nanoseconds, from the data sheet. A | ||
107 | * negative value indicates this characteristic isn't | ||
108 | * known. | ||
109 | * @tRHOH_in_ns: tRHOH, in nanoseconds, from the data sheet. A | ||
110 | * negative value indicates this characteristic isn't | ||
111 | * known. | ||
112 | */ | ||
113 | struct nand_timing { | ||
114 | int8_t data_setup_in_ns; | ||
115 | int8_t data_hold_in_ns; | ||
116 | int8_t address_setup_in_ns; | ||
117 | int8_t gpmi_sample_delay_in_ns; | ||
118 | int8_t tREA_in_ns; | ||
119 | int8_t tRLOH_in_ns; | ||
120 | int8_t tRHOH_in_ns; | ||
121 | }; | ||
122 | |||
123 | struct gpmi_nand_data { | ||
124 | /* System Interface */ | ||
125 | struct device *dev; | ||
126 | struct platform_device *pdev; | ||
127 | struct gpmi_nand_platform_data *pdata; | ||
128 | |||
129 | /* Resources */ | ||
130 | struct resources resources; | ||
131 | |||
132 | /* Flash Hardware */ | ||
133 | struct nand_timing timing; | ||
134 | |||
135 | /* BCH */ | ||
136 | struct bch_geometry bch_geometry; | ||
137 | struct completion bch_done; | ||
138 | |||
139 | /* NAND Boot issue */ | ||
140 | bool swap_block_mark; | ||
141 | struct boot_rom_geometry rom_geometry; | ||
142 | |||
143 | /* MTD / NAND */ | ||
144 | struct nand_chip nand; | ||
145 | struct mtd_info mtd; | ||
146 | |||
147 | /* General-use Variables */ | ||
148 | int current_chip; | ||
149 | unsigned int command_length; | ||
150 | |||
151 | /* passed from upper layer */ | ||
152 | uint8_t *upper_buf; | ||
153 | int upper_len; | ||
154 | |||
155 | /* for DMA operations */ | ||
156 | bool direct_dma_map_ok; | ||
157 | |||
158 | struct scatterlist cmd_sgl; | ||
159 | char *cmd_buffer; | ||
160 | |||
161 | struct scatterlist data_sgl; | ||
162 | char *data_buffer_dma; | ||
163 | |||
164 | void *page_buffer_virt; | ||
165 | dma_addr_t page_buffer_phys; | ||
166 | unsigned int page_buffer_size; | ||
167 | |||
168 | void *payload_virt; | ||
169 | dma_addr_t payload_phys; | ||
170 | |||
171 | void *auxiliary_virt; | ||
172 | dma_addr_t auxiliary_phys; | ||
173 | |||
174 | /* DMA channels */ | ||
175 | #define DMA_CHANS 8 | ||
176 | struct dma_chan *dma_chans[DMA_CHANS]; | ||
177 | struct mxs_dma_data dma_data; | ||
178 | enum dma_ops_type last_dma_type; | ||
179 | enum dma_ops_type dma_type; | ||
180 | struct completion dma_done; | ||
181 | |||
182 | /* private */ | ||
183 | void *private; | ||
184 | }; | ||
185 | |||
186 | /** | ||
187 | * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters. | ||
188 | * @data_setup_in_cycles: The data setup time, in cycles. | ||
189 | * @data_hold_in_cycles: The data hold time, in cycles. | ||
190 | * @address_setup_in_cycles: The address setup time, in cycles. | ||
191 | * @use_half_periods: Indicates the clock is running slowly, so the | ||
192 | * NFC DLL should use half-periods. | ||
193 | * @sample_delay_factor: The sample delay factor. | ||
194 | */ | ||
195 | struct gpmi_nfc_hardware_timing { | ||
196 | uint8_t data_setup_in_cycles; | ||
197 | uint8_t data_hold_in_cycles; | ||
198 | uint8_t address_setup_in_cycles; | ||
199 | bool use_half_periods; | ||
200 | uint8_t sample_delay_factor; | ||
201 | }; | ||
202 | |||
203 | /** | ||
204 | * struct timing_threshod - Timing threshold | ||
205 | * @max_data_setup_cycles: The maximum number of data setup cycles that | ||
206 | * can be expressed in the hardware. | ||
207 | * @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires | ||
208 | * for data read internal setup. In the Reference | ||
209 | * Manual, see the chapter "High-Speed NAND | ||
210 | * Timing" for more details. | ||
211 | * @max_sample_delay_factor: The maximum sample delay factor that can be | ||
212 | * expressed in the hardware. | ||
213 | * @max_dll_clock_period_in_ns: The maximum period of the GPMI clock that the | ||
214 | * sample delay DLL hardware can possibly work | ||
215 | * with (the DLL is unusable with longer periods). | ||
216 | * If the full-cycle period is greater than HALF | ||
217 | * this value, the DLL must be configured to use | ||
218 | * half-periods. | ||
219 | * @max_dll_delay_in_ns: The maximum amount of delay, in ns, that the | ||
220 | * DLL can implement. | ||
221 | * @clock_frequency_in_hz: The clock frequency, in Hz, during the current | ||
222 | * I/O transaction. If no I/O transaction is in | ||
223 | * progress, this is the clock frequency during | ||
224 | * the most recent I/O transaction. | ||
225 | */ | ||
226 | struct timing_threshod { | ||
227 | const unsigned int max_chip_count; | ||
228 | const unsigned int max_data_setup_cycles; | ||
229 | const unsigned int internal_data_setup_in_ns; | ||
230 | const unsigned int max_sample_delay_factor; | ||
231 | const unsigned int max_dll_clock_period_in_ns; | ||
232 | const unsigned int max_dll_delay_in_ns; | ||
233 | unsigned long clock_frequency_in_hz; | ||
234 | |||
235 | }; | ||
236 | |||
237 | /* Common Services */ | ||
238 | extern int common_nfc_set_geometry(struct gpmi_nand_data *); | ||
239 | extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *); | ||
240 | extern void prepare_data_dma(struct gpmi_nand_data *, | ||
241 | enum dma_data_direction dr); | ||
242 | extern int start_dma_without_bch_irq(struct gpmi_nand_data *, | ||
243 | struct dma_async_tx_descriptor *); | ||
244 | extern int start_dma_with_bch_irq(struct gpmi_nand_data *, | ||
245 | struct dma_async_tx_descriptor *); | ||
246 | |||
247 | /* GPMI-NAND helper function library */ | ||
248 | extern int gpmi_init(struct gpmi_nand_data *); | ||
249 | extern void gpmi_clear_bch(struct gpmi_nand_data *); | ||
250 | extern void gpmi_dump_info(struct gpmi_nand_data *); | ||
251 | extern int bch_set_geometry(struct gpmi_nand_data *); | ||
252 | extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip); | ||
253 | extern int gpmi_send_command(struct gpmi_nand_data *); | ||
254 | extern void gpmi_begin(struct gpmi_nand_data *); | ||
255 | extern void gpmi_end(struct gpmi_nand_data *); | ||
256 | extern int gpmi_read_data(struct gpmi_nand_data *); | ||
257 | extern int gpmi_send_data(struct gpmi_nand_data *); | ||
258 | extern int gpmi_send_page(struct gpmi_nand_data *, | ||
259 | dma_addr_t payload, dma_addr_t auxiliary); | ||
260 | extern int gpmi_read_page(struct gpmi_nand_data *, | ||
261 | dma_addr_t payload, dma_addr_t auxiliary); | ||
262 | |||
263 | /* BCH : Status Block Completion Codes */ | ||
264 | #define STATUS_GOOD 0x00 | ||
265 | #define STATUS_ERASED 0xff | ||
266 | #define STATUS_UNCORRECTABLE 0xfe | ||
267 | |||
268 | /* Use the platform_id to distinguish different Archs. */ | ||
269 | #define IS_MX23 0x1 | ||
270 | #define IS_MX28 0x2 | ||
271 | #define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23) | ||
272 | #define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28) | ||
273 | #endif | ||
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h new file mode 100644 index 000000000000..83431240e2f2 --- /dev/null +++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * Freescale GPMI NAND Flash Driver | ||
3 | * | ||
4 | * Copyright 2008-2011 Freescale Semiconductor, Inc. | ||
5 | * Copyright 2008 Embedded Alley Solutions, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | */ | ||
21 | #ifndef __GPMI_NAND_GPMI_REGS_H | ||
22 | #define __GPMI_NAND_GPMI_REGS_H | ||
23 | |||
24 | #define HW_GPMI_CTRL0 0x00000000 | ||
25 | #define HW_GPMI_CTRL0_SET 0x00000004 | ||
26 | #define HW_GPMI_CTRL0_CLR 0x00000008 | ||
27 | #define HW_GPMI_CTRL0_TOG 0x0000000c | ||
28 | |||
29 | #define BP_GPMI_CTRL0_COMMAND_MODE 24 | ||
30 | #define BM_GPMI_CTRL0_COMMAND_MODE (3 << BP_GPMI_CTRL0_COMMAND_MODE) | ||
31 | #define BF_GPMI_CTRL0_COMMAND_MODE(v) \ | ||
32 | (((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE) | ||
33 | #define BV_GPMI_CTRL0_COMMAND_MODE__WRITE 0x0 | ||
34 | #define BV_GPMI_CTRL0_COMMAND_MODE__READ 0x1 | ||
35 | #define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE 0x2 | ||
36 | #define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY 0x3 | ||
37 | |||
38 | #define BM_GPMI_CTRL0_WORD_LENGTH (1 << 23) | ||
39 | #define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT 0x0 | ||
40 | #define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT 0x1 | ||
41 | |||
42 | /* | ||
43 | * Difference in LOCK_CS between imx23 and imx28 : | ||
44 | * This bit may impact the _POWER_ consumption. So some chips | ||
45 | * do not set it. | ||
46 | */ | ||
47 | #define MX23_BP_GPMI_CTRL0_LOCK_CS 22 | ||
48 | #define MX28_BP_GPMI_CTRL0_LOCK_CS 27 | ||
49 | #define LOCK_CS_ENABLE 0x1 | ||
50 | #define BF_GPMI_CTRL0_LOCK_CS(v, x) 0x0 | ||
51 | |||
52 | /* Difference in CS between imx23 and imx28 */ | ||
53 | #define BP_GPMI_CTRL0_CS 20 | ||
54 | #define MX23_BM_GPMI_CTRL0_CS (3 << BP_GPMI_CTRL0_CS) | ||
55 | #define MX28_BM_GPMI_CTRL0_CS (7 << BP_GPMI_CTRL0_CS) | ||
56 | #define BF_GPMI_CTRL0_CS(v, x) (((v) << BP_GPMI_CTRL0_CS) & \ | ||
57 | (GPMI_IS_MX23((x)) \ | ||
58 | ? MX23_BM_GPMI_CTRL0_CS \ | ||
59 | : MX28_BM_GPMI_CTRL0_CS)) | ||
60 | |||
61 | #define BP_GPMI_CTRL0_ADDRESS 17 | ||
62 | #define BM_GPMI_CTRL0_ADDRESS (3 << BP_GPMI_CTRL0_ADDRESS) | ||
63 | #define BF_GPMI_CTRL0_ADDRESS(v) \ | ||
64 | (((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS) | ||
65 | #define BV_GPMI_CTRL0_ADDRESS__NAND_DATA 0x0 | ||
66 | #define BV_GPMI_CTRL0_ADDRESS__NAND_CLE 0x1 | ||
67 | #define BV_GPMI_CTRL0_ADDRESS__NAND_ALE 0x2 | ||
68 | |||
69 | #define BM_GPMI_CTRL0_ADDRESS_INCREMENT (1 << 16) | ||
70 | #define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED 0x0 | ||
71 | #define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED 0x1 | ||
72 | |||
73 | #define BP_GPMI_CTRL0_XFER_COUNT 0 | ||
74 | #define BM_GPMI_CTRL0_XFER_COUNT (0xffff << BP_GPMI_CTRL0_XFER_COUNT) | ||
75 | #define BF_GPMI_CTRL0_XFER_COUNT(v) \ | ||
76 | (((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT) | ||
77 | |||
78 | #define HW_GPMI_COMPARE 0x00000010 | ||
79 | |||
80 | #define HW_GPMI_ECCCTRL 0x00000020 | ||
81 | #define HW_GPMI_ECCCTRL_SET 0x00000024 | ||
82 | #define HW_GPMI_ECCCTRL_CLR 0x00000028 | ||
83 | #define HW_GPMI_ECCCTRL_TOG 0x0000002c | ||
84 | |||
85 | #define BP_GPMI_ECCCTRL_ECC_CMD 13 | ||
86 | #define BM_GPMI_ECCCTRL_ECC_CMD (3 << BP_GPMI_ECCCTRL_ECC_CMD) | ||
87 | #define BF_GPMI_ECCCTRL_ECC_CMD(v) \ | ||
88 | (((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD) | ||
89 | #define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE 0x0 | ||
90 | #define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE 0x1 | ||
91 | |||
92 | #define BM_GPMI_ECCCTRL_ENABLE_ECC (1 << 12) | ||
93 | #define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE 0x1 | ||
94 | #define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE 0x0 | ||
95 | |||
96 | #define BP_GPMI_ECCCTRL_BUFFER_MASK 0 | ||
97 | #define BM_GPMI_ECCCTRL_BUFFER_MASK (0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK) | ||
98 | #define BF_GPMI_ECCCTRL_BUFFER_MASK(v) \ | ||
99 | (((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK) | ||
100 | #define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY 0x100 | ||
101 | #define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE 0x1FF | ||
102 | |||
103 | #define HW_GPMI_ECCCOUNT 0x00000030 | ||
104 | #define HW_GPMI_PAYLOAD 0x00000040 | ||
105 | #define HW_GPMI_AUXILIARY 0x00000050 | ||
106 | #define HW_GPMI_CTRL1 0x00000060 | ||
107 | #define HW_GPMI_CTRL1_SET 0x00000064 | ||
108 | #define HW_GPMI_CTRL1_CLR 0x00000068 | ||
109 | #define HW_GPMI_CTRL1_TOG 0x0000006c | ||
110 | |||
111 | #define BM_GPMI_CTRL1_BCH_MODE (1 << 18) | ||
112 | |||
113 | #define BP_GPMI_CTRL1_DLL_ENABLE 17 | ||
114 | #define BM_GPMI_CTRL1_DLL_ENABLE (1 << BP_GPMI_CTRL1_DLL_ENABLE) | ||
115 | |||
116 | #define BP_GPMI_CTRL1_HALF_PERIOD 16 | ||
117 | #define BM_GPMI_CTRL1_HALF_PERIOD (1 << BP_GPMI_CTRL1_HALF_PERIOD) | ||
118 | |||
119 | #define BP_GPMI_CTRL1_RDN_DELAY 12 | ||
120 | #define BM_GPMI_CTRL1_RDN_DELAY (0xf << BP_GPMI_CTRL1_RDN_DELAY) | ||
121 | #define BF_GPMI_CTRL1_RDN_DELAY(v) \ | ||
122 | (((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY) | ||
123 | |||
124 | #define BM_GPMI_CTRL1_DEV_RESET (1 << 3) | ||
125 | #define BV_GPMI_CTRL1_DEV_RESET__ENABLED 0x0 | ||
126 | #define BV_GPMI_CTRL1_DEV_RESET__DISABLED 0x1 | ||
127 | |||
128 | #define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY (1 << 2) | ||
129 | #define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW 0x0 | ||
130 | #define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH 0x1 | ||
131 | |||
132 | #define BM_GPMI_CTRL1_CAMERA_MODE (1 << 1) | ||
133 | #define BV_GPMI_CTRL1_GPMI_MODE__NAND 0x0 | ||
134 | #define BV_GPMI_CTRL1_GPMI_MODE__ATA 0x1 | ||
135 | |||
136 | #define BM_GPMI_CTRL1_GPMI_MODE (1 << 0) | ||
137 | |||
138 | #define HW_GPMI_TIMING0 0x00000070 | ||
139 | |||
140 | #define BP_GPMI_TIMING0_ADDRESS_SETUP 16 | ||
141 | #define BM_GPMI_TIMING0_ADDRESS_SETUP (0xff << BP_GPMI_TIMING0_ADDRESS_SETUP) | ||
142 | #define BF_GPMI_TIMING0_ADDRESS_SETUP(v) \ | ||
143 | (((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP) | ||
144 | |||
145 | #define BP_GPMI_TIMING0_DATA_HOLD 8 | ||
146 | #define BM_GPMI_TIMING0_DATA_HOLD (0xff << BP_GPMI_TIMING0_DATA_HOLD) | ||
147 | #define BF_GPMI_TIMING0_DATA_HOLD(v) \ | ||
148 | (((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD) | ||
149 | |||
150 | #define BP_GPMI_TIMING0_DATA_SETUP 0 | ||
151 | #define BM_GPMI_TIMING0_DATA_SETUP (0xff << BP_GPMI_TIMING0_DATA_SETUP) | ||
152 | #define BF_GPMI_TIMING0_DATA_SETUP(v) \ | ||
153 | (((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP) | ||
154 | |||
155 | #define HW_GPMI_TIMING1 0x00000080 | ||
156 | #define BP_GPMI_TIMING1_BUSY_TIMEOUT 16 | ||
157 | |||
158 | #define HW_GPMI_TIMING2 0x00000090 | ||
159 | #define HW_GPMI_DATA 0x000000a0 | ||
160 | |||
161 | /* MX28 uses this to detect READY. */ | ||
162 | #define HW_GPMI_STAT 0x000000b0 | ||
163 | #define MX28_BP_GPMI_STAT_READY_BUSY 24 | ||
164 | #define MX28_BM_GPMI_STAT_READY_BUSY (0xff << MX28_BP_GPMI_STAT_READY_BUSY) | ||
165 | #define MX28_BF_GPMI_STAT_READY_BUSY(v) \ | ||
166 | (((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY) | ||
167 | |||
168 | /* MX23 uses this to detect READY. */ | ||
169 | #define HW_GPMI_DEBUG 0x000000c0 | ||
170 | #define MX23_BP_GPMI_DEBUG_READY0 28 | ||
171 | #define MX23_BM_GPMI_DEBUG_READY0 (1 << MX23_BP_GPMI_DEBUG_READY0) | ||
172 | #endif | ||
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c index 02a03e67109c..5dc6f0d92f1a 100644 --- a/drivers/mtd/nand/h1910.c +++ b/drivers/mtd/nand/h1910.c | |||
@@ -81,9 +81,6 @@ static int h1910_device_ready(struct mtd_info *mtd) | |||
81 | static int __init h1910_init(void) | 81 | static int __init h1910_init(void) |
82 | { | 82 | { |
83 | struct nand_chip *this; | 83 | struct nand_chip *this; |
84 | const char *part_type = 0; | ||
85 | int mtd_parts_nb = 0; | ||
86 | struct mtd_partition *mtd_parts = 0; | ||
87 | void __iomem *nandaddr; | 84 | void __iomem *nandaddr; |
88 | 85 | ||
89 | if (!machine_is_h1900()) | 86 | if (!machine_is_h1900()) |
@@ -136,22 +133,10 @@ static int __init h1910_init(void) | |||
136 | iounmap((void *)nandaddr); | 133 | iounmap((void *)nandaddr); |
137 | return -ENXIO; | 134 | return -ENXIO; |
138 | } | 135 | } |
139 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
140 | mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand"); | ||
141 | if (mtd_parts_nb > 0) | ||
142 | part_type = "command line"; | ||
143 | else | ||
144 | mtd_parts_nb = 0; | ||
145 | #endif | ||
146 | if (mtd_parts_nb == 0) { | ||
147 | mtd_parts = partition_info; | ||
148 | mtd_parts_nb = NUM_PARTITIONS; | ||
149 | part_type = "static"; | ||
150 | } | ||
151 | 136 | ||
152 | /* Register the partitions */ | 137 | /* Register the partitions */ |
153 | printk(KERN_NOTICE "Using %s partition definition\n", part_type); | 138 | mtd_device_parse_register(h1910_nand_mtd, NULL, 0, |
154 | mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb); | 139 | partition_info, NUM_PARTITIONS); |
155 | 140 | ||
156 | /* Return happy */ | 141 | /* Return happy */ |
157 | return 0; | 142 | return 0; |
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index 6e813daed068..e2664073a89b 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c | |||
@@ -251,10 +251,6 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat, | |||
251 | return 0; | 251 | return 0; |
252 | } | 252 | } |
253 | 253 | ||
254 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
255 | static const char *part_probes[] = {"cmdline", NULL}; | ||
256 | #endif | ||
257 | |||
258 | static int jz_nand_ioremap_resource(struct platform_device *pdev, | 254 | static int jz_nand_ioremap_resource(struct platform_device *pdev, |
259 | const char *name, struct resource **res, void __iomem **base) | 255 | const char *name, struct resource **res, void __iomem **base) |
260 | { | 256 | { |
@@ -299,8 +295,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) | |||
299 | struct nand_chip *chip; | 295 | struct nand_chip *chip; |
300 | struct mtd_info *mtd; | 296 | struct mtd_info *mtd; |
301 | struct jz_nand_platform_data *pdata = pdev->dev.platform_data; | 297 | struct jz_nand_platform_data *pdata = pdev->dev.platform_data; |
302 | struct mtd_partition *partition_info; | ||
303 | int num_partitions = 0; | ||
304 | 298 | ||
305 | nand = kzalloc(sizeof(*nand), GFP_KERNEL); | 299 | nand = kzalloc(sizeof(*nand), GFP_KERNEL); |
306 | if (!nand) { | 300 | if (!nand) { |
@@ -373,15 +367,9 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) | |||
373 | goto err_gpio_free; | 367 | goto err_gpio_free; |
374 | } | 368 | } |
375 | 369 | ||
376 | #ifdef CONFIG_MTD_CMDLINE_PARTS | 370 | ret = mtd_device_parse_register(mtd, NULL, 0, |
377 | num_partitions = parse_mtd_partitions(mtd, part_probes, | 371 | pdata ? pdata->partitions : NULL, |
378 | &partition_info, 0); | 372 | pdata ? pdata->num_partitions : 0); |
379 | #endif | ||
380 | if (num_partitions <= 0 && pdata) { | ||
381 | num_partitions = pdata->num_partitions; | ||
382 | partition_info = pdata->partitions; | ||
383 | } | ||
384 | ret = mtd_device_register(mtd, partition_info, num_partitions); | ||
385 | 373 | ||
386 | if (ret) { | 374 | if (ret) { |
387 | dev_err(&pdev->dev, "Failed to add mtd device\n"); | 375 | dev_err(&pdev->dev, "Failed to add mtd device\n"); |
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index eb1fbac63eb6..5ede64706346 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c | |||
@@ -131,8 +131,6 @@ struct mpc5121_nfc_prv { | |||
131 | 131 | ||
132 | static void mpc5121_nfc_done(struct mtd_info *mtd); | 132 | static void mpc5121_nfc_done(struct mtd_info *mtd); |
133 | 133 | ||
134 | static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL }; | ||
135 | |||
136 | /* Read NFC register */ | 134 | /* Read NFC register */ |
137 | static inline u16 nfc_read(struct mtd_info *mtd, uint reg) | 135 | static inline u16 nfc_read(struct mtd_info *mtd, uint reg) |
138 | { | 136 | { |
@@ -656,13 +654,13 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op) | |||
656 | struct mpc5121_nfc_prv *prv; | 654 | struct mpc5121_nfc_prv *prv; |
657 | struct resource res; | 655 | struct resource res; |
658 | struct mtd_info *mtd; | 656 | struct mtd_info *mtd; |
659 | struct mtd_partition *parts; | ||
660 | struct nand_chip *chip; | 657 | struct nand_chip *chip; |
661 | unsigned long regs_paddr, regs_size; | 658 | unsigned long regs_paddr, regs_size; |
662 | const __be32 *chips_no; | 659 | const __be32 *chips_no; |
663 | int resettime = 0; | 660 | int resettime = 0; |
664 | int retval = 0; | 661 | int retval = 0; |
665 | int rev, len; | 662 | int rev, len; |
663 | struct mtd_part_parser_data ppdata; | ||
666 | 664 | ||
667 | /* | 665 | /* |
668 | * Check SoC revision. This driver supports only NFC | 666 | * Check SoC revision. This driver supports only NFC |
@@ -727,6 +725,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op) | |||
727 | } | 725 | } |
728 | 726 | ||
729 | mtd->name = "MPC5121 NAND"; | 727 | mtd->name = "MPC5121 NAND"; |
728 | ppdata.of_node = dn; | ||
730 | chip->dev_ready = mpc5121_nfc_dev_ready; | 729 | chip->dev_ready = mpc5121_nfc_dev_ready; |
731 | chip->cmdfunc = mpc5121_nfc_command; | 730 | chip->cmdfunc = mpc5121_nfc_command; |
732 | chip->read_byte = mpc5121_nfc_read_byte; | 731 | chip->read_byte = mpc5121_nfc_read_byte; |
@@ -735,7 +734,8 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op) | |||
735 | chip->write_buf = mpc5121_nfc_write_buf; | 734 | chip->write_buf = mpc5121_nfc_write_buf; |
736 | chip->verify_buf = mpc5121_nfc_verify_buf; | 735 | chip->verify_buf = mpc5121_nfc_verify_buf; |
737 | chip->select_chip = mpc5121_nfc_select_chip; | 736 | chip->select_chip = mpc5121_nfc_select_chip; |
738 | chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT; | 737 | chip->options = NAND_NO_AUTOINCR; |
738 | chip->bbt_options = NAND_BBT_USE_FLASH; | ||
739 | chip->ecc.mode = NAND_ECC_SOFT; | 739 | chip->ecc.mode = NAND_ECC_SOFT; |
740 | 740 | ||
741 | /* Support external chip-select logic on ADS5121 board */ | 741 | /* Support external chip-select logic on ADS5121 board */ |
@@ -837,19 +837,7 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op) | |||
837 | dev_set_drvdata(dev, mtd); | 837 | dev_set_drvdata(dev, mtd); |
838 | 838 | ||
839 | /* Register device in MTD */ | 839 | /* Register device in MTD */ |
840 | retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0); | 840 | retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); |
841 | #ifdef CONFIG_MTD_OF_PARTS | ||
842 | if (retval == 0) | ||
843 | retval = of_mtd_parse_partitions(dev, dn, &parts); | ||
844 | #endif | ||
845 | if (retval < 0) { | ||
846 | dev_err(dev, "Error parsing MTD partitions!\n"); | ||
847 | devm_free_irq(dev, prv->irq, mtd); | ||
848 | retval = -EINVAL; | ||
849 | goto error; | ||
850 | } | ||
851 | |||
852 | retval = mtd_device_register(mtd, parts, retval); | ||
853 | if (retval) { | 841 | if (retval) { |
854 | dev_err(dev, "Error adding MTD device!\n"); | 842 | dev_err(dev, "Error adding MTD device!\n"); |
855 | devm_free_irq(dev, prv->irq, mtd); | 843 | devm_free_irq(dev, prv->irq, mtd); |
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 90df34c4d26c..74a43b818d0e 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -41,7 +41,7 @@ | |||
41 | 41 | ||
42 | #define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) | 42 | #define nfc_is_v21() (cpu_is_mx25() || cpu_is_mx35()) |
43 | #define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) | 43 | #define nfc_is_v1() (cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21()) |
44 | #define nfc_is_v3_2() cpu_is_mx51() | 44 | #define nfc_is_v3_2() (cpu_is_mx51() || cpu_is_mx53()) |
45 | #define nfc_is_v3() nfc_is_v3_2() | 45 | #define nfc_is_v3() nfc_is_v3_2() |
46 | 46 | ||
47 | /* Addresses for NFC registers */ | 47 | /* Addresses for NFC registers */ |
@@ -143,7 +143,6 @@ | |||
143 | struct mxc_nand_host { | 143 | struct mxc_nand_host { |
144 | struct mtd_info mtd; | 144 | struct mtd_info mtd; |
145 | struct nand_chip nand; | 145 | struct nand_chip nand; |
146 | struct mtd_partition *parts; | ||
147 | struct device *dev; | 146 | struct device *dev; |
148 | 147 | ||
149 | void *spare0; | 148 | void *spare0; |
@@ -350,8 +349,7 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq) | |||
350 | udelay(1); | 349 | udelay(1); |
351 | } | 350 | } |
352 | if (max_retries < 0) | 351 | if (max_retries < 0) |
353 | DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n", | 352 | pr_debug("%s: INT not set\n", __func__); |
354 | __func__); | ||
355 | } | 353 | } |
356 | } | 354 | } |
357 | 355 | ||
@@ -371,7 +369,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq) | |||
371 | * waits for completion. */ | 369 | * waits for completion. */ |
372 | static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) | 370 | static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) |
373 | { | 371 | { |
374 | DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq); | 372 | pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq); |
375 | 373 | ||
376 | writew(cmd, NFC_V1_V2_FLASH_CMD); | 374 | writew(cmd, NFC_V1_V2_FLASH_CMD); |
377 | writew(NFC_CMD, NFC_V1_V2_CONFIG2); | 375 | writew(NFC_CMD, NFC_V1_V2_CONFIG2); |
@@ -387,8 +385,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) | |||
387 | udelay(1); | 385 | udelay(1); |
388 | } | 386 | } |
389 | if (max_retries < 0) | 387 | if (max_retries < 0) |
390 | DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n", | 388 | pr_debug("%s: RESET failed\n", __func__); |
391 | __func__); | ||
392 | } else { | 389 | } else { |
393 | /* Wait for operation to complete */ | 390 | /* Wait for operation to complete */ |
394 | wait_op_done(host, useirq); | 391 | wait_op_done(host, useirq); |
@@ -411,7 +408,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast) | |||
411 | * a NAND command. */ | 408 | * a NAND command. */ |
412 | static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) | 409 | static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) |
413 | { | 410 | { |
414 | DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast); | 411 | pr_debug("send_addr(host, 0x%x %d)\n", addr, islast); |
415 | 412 | ||
416 | writew(addr, NFC_V1_V2_FLASH_ADDR); | 413 | writew(addr, NFC_V1_V2_FLASH_ADDR); |
417 | writew(NFC_ADDR, NFC_V1_V2_CONFIG2); | 414 | writew(NFC_ADDR, NFC_V1_V2_CONFIG2); |
@@ -561,8 +558,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat, | |||
561 | uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT); | 558 | uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT); |
562 | 559 | ||
563 | if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { | 560 | if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { |
564 | DEBUG(MTD_DEBUG_LEVEL0, | 561 | pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); |
565 | "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); | ||
566 | return -1; | 562 | return -1; |
567 | } | 563 | } |
568 | 564 | ||
@@ -849,7 +845,7 @@ static void preset_v1_v2(struct mtd_info *mtd) | |||
849 | writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3); | 845 | writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3); |
850 | } else if (nfc_is_v1()) { | 846 | } else if (nfc_is_v1()) { |
851 | writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR); | 847 | writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR); |
852 | writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR); | 848 | writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR); |
853 | } else | 849 | } else |
854 | BUG(); | 850 | BUG(); |
855 | 851 | ||
@@ -932,8 +928,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, | |||
932 | struct nand_chip *nand_chip = mtd->priv; | 928 | struct nand_chip *nand_chip = mtd->priv; |
933 | struct mxc_nand_host *host = nand_chip->priv; | 929 | struct mxc_nand_host *host = nand_chip->priv; |
934 | 930 | ||
935 | DEBUG(MTD_DEBUG_LEVEL3, | 931 | pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", |
936 | "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", | ||
937 | command, column, page_addr); | 932 | command, column, page_addr); |
938 | 933 | ||
939 | /* Reset command state information */ | 934 | /* Reset command state information */ |
@@ -1044,7 +1039,7 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
1044 | struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; | 1039 | struct mxc_nand_platform_data *pdata = pdev->dev.platform_data; |
1045 | struct mxc_nand_host *host; | 1040 | struct mxc_nand_host *host; |
1046 | struct resource *res; | 1041 | struct resource *res; |
1047 | int err = 0, __maybe_unused nr_parts = 0; | 1042 | int err = 0; |
1048 | struct nand_ecclayout *oob_smallpage, *oob_largepage; | 1043 | struct nand_ecclayout *oob_smallpage, *oob_largepage; |
1049 | 1044 | ||
1050 | /* Allocate memory for MTD device structure and private data */ | 1045 | /* Allocate memory for MTD device structure and private data */ |
@@ -1179,7 +1174,7 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
1179 | this->bbt_td = &bbt_main_descr; | 1174 | this->bbt_td = &bbt_main_descr; |
1180 | this->bbt_md = &bbt_mirror_descr; | 1175 | this->bbt_md = &bbt_mirror_descr; |
1181 | /* update flash based bbt */ | 1176 | /* update flash based bbt */ |
1182 | this->options |= NAND_USE_FLASH_BBT; | 1177 | this->bbt_options |= NAND_BBT_USE_FLASH; |
1183 | } | 1178 | } |
1184 | 1179 | ||
1185 | init_completion(&host->op_completion); | 1180 | init_completion(&host->op_completion); |
@@ -1231,16 +1226,8 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
1231 | } | 1226 | } |
1232 | 1227 | ||
1233 | /* Register the partitions */ | 1228 | /* Register the partitions */ |
1234 | nr_parts = | 1229 | mtd_device_parse_register(mtd, part_probes, 0, |
1235 | parse_mtd_partitions(mtd, part_probes, &host->parts, 0); | 1230 | pdata->parts, pdata->nr_parts); |
1236 | if (nr_parts > 0) | ||
1237 | mtd_device_register(mtd, host->parts, nr_parts); | ||
1238 | else if (pdata->parts) | ||
1239 | mtd_device_register(mtd, pdata->parts, pdata->nr_parts); | ||
1240 | else { | ||
1241 | pr_info("Registering %s as whole device\n", mtd->name); | ||
1242 | mtd_device_register(mtd, NULL, 0); | ||
1243 | } | ||
1244 | 1231 | ||
1245 | platform_set_drvdata(pdev, host); | 1232 | platform_set_drvdata(pdev, host); |
1246 | 1233 | ||
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index a46e9bb847bd..3ed9c5e4d34e 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -21,7 +21,7 @@ | |||
21 | * TODO: | 21 | * TODO: |
22 | * Enable cached programming for 2k page size chips | 22 | * Enable cached programming for 2k page size chips |
23 | * Check, if mtd->ecctype should be set to MTD_ECC_HW | 23 | * Check, if mtd->ecctype should be set to MTD_ECC_HW |
24 | * if we have HW ecc support. | 24 | * if we have HW ECC support. |
25 | * The AG-AND chips have nice features for speed improvement, | 25 | * The AG-AND chips have nice features for speed improvement, |
26 | * which are not supported yet. Read / program 4 pages in one go. | 26 | * which are not supported yet. Read / program 4 pages in one go. |
27 | * BBT table is not serialized, has to be fixed | 27 | * BBT table is not serialized, has to be fixed |
@@ -113,21 +113,19 @@ static int check_offs_len(struct mtd_info *mtd, | |||
113 | 113 | ||
114 | /* Start address must align on block boundary */ | 114 | /* Start address must align on block boundary */ |
115 | if (ofs & ((1 << chip->phys_erase_shift) - 1)) { | 115 | if (ofs & ((1 << chip->phys_erase_shift) - 1)) { |
116 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); | 116 | pr_debug("%s: unaligned address\n", __func__); |
117 | ret = -EINVAL; | 117 | ret = -EINVAL; |
118 | } | 118 | } |
119 | 119 | ||
120 | /* Length must align on block boundary */ | 120 | /* Length must align on block boundary */ |
121 | if (len & ((1 << chip->phys_erase_shift) - 1)) { | 121 | if (len & ((1 << chip->phys_erase_shift) - 1)) { |
122 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", | 122 | pr_debug("%s: length not block aligned\n", __func__); |
123 | __func__); | ||
124 | ret = -EINVAL; | 123 | ret = -EINVAL; |
125 | } | 124 | } |
126 | 125 | ||
127 | /* Do not allow past end of device */ | 126 | /* Do not allow past end of device */ |
128 | if (ofs + len > mtd->size) { | 127 | if (ofs + len > mtd->size) { |
129 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n", | 128 | pr_debug("%s: past end of device\n", __func__); |
130 | __func__); | ||
131 | ret = -EINVAL; | 129 | ret = -EINVAL; |
132 | } | 130 | } |
133 | 131 | ||
@@ -136,9 +134,9 @@ static int check_offs_len(struct mtd_info *mtd, | |||
136 | 134 | ||
137 | /** | 135 | /** |
138 | * nand_release_device - [GENERIC] release chip | 136 | * nand_release_device - [GENERIC] release chip |
139 | * @mtd: MTD device structure | 137 | * @mtd: MTD device structure |
140 | * | 138 | * |
141 | * Deselect, release chip lock and wake up anyone waiting on the device | 139 | * Deselect, release chip lock and wake up anyone waiting on the device. |
142 | */ | 140 | */ |
143 | static void nand_release_device(struct mtd_info *mtd) | 141 | static void nand_release_device(struct mtd_info *mtd) |
144 | { | 142 | { |
@@ -157,9 +155,9 @@ static void nand_release_device(struct mtd_info *mtd) | |||
157 | 155 | ||
158 | /** | 156 | /** |
159 | * nand_read_byte - [DEFAULT] read one byte from the chip | 157 | * nand_read_byte - [DEFAULT] read one byte from the chip |
160 | * @mtd: MTD device structure | 158 | * @mtd: MTD device structure |
161 | * | 159 | * |
162 | * Default read function for 8bit buswith | 160 | * Default read function for 8bit buswidth |
163 | */ | 161 | */ |
164 | static uint8_t nand_read_byte(struct mtd_info *mtd) | 162 | static uint8_t nand_read_byte(struct mtd_info *mtd) |
165 | { | 163 | { |
@@ -169,10 +167,11 @@ static uint8_t nand_read_byte(struct mtd_info *mtd) | |||
169 | 167 | ||
170 | /** | 168 | /** |
171 | * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip | 169 | * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip |
172 | * @mtd: MTD device structure | 170 | * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip |
171 | * @mtd: MTD device structure | ||
172 | * | ||
173 | * Default read function for 16bit buswidth with endianness conversion. | ||
173 | * | 174 | * |
174 | * Default read function for 16bit buswith with | ||
175 | * endianess conversion | ||
176 | */ | 175 | */ |
177 | static uint8_t nand_read_byte16(struct mtd_info *mtd) | 176 | static uint8_t nand_read_byte16(struct mtd_info *mtd) |
178 | { | 177 | { |
@@ -182,10 +181,9 @@ static uint8_t nand_read_byte16(struct mtd_info *mtd) | |||
182 | 181 | ||
183 | /** | 182 | /** |
184 | * nand_read_word - [DEFAULT] read one word from the chip | 183 | * nand_read_word - [DEFAULT] read one word from the chip |
185 | * @mtd: MTD device structure | 184 | * @mtd: MTD device structure |
186 | * | 185 | * |
187 | * Default read function for 16bit buswith without | 186 | * Default read function for 16bit buswidth without endianness conversion. |
188 | * endianess conversion | ||
189 | */ | 187 | */ |
190 | static u16 nand_read_word(struct mtd_info *mtd) | 188 | static u16 nand_read_word(struct mtd_info *mtd) |
191 | { | 189 | { |
@@ -195,8 +193,8 @@ static u16 nand_read_word(struct mtd_info *mtd) | |||
195 | 193 | ||
196 | /** | 194 | /** |
197 | * nand_select_chip - [DEFAULT] control CE line | 195 | * nand_select_chip - [DEFAULT] control CE line |
198 | * @mtd: MTD device structure | 196 | * @mtd: MTD device structure |
199 | * @chipnr: chipnumber to select, -1 for deselect | 197 | * @chipnr: chipnumber to select, -1 for deselect |
200 | * | 198 | * |
201 | * Default select function for 1 chip devices. | 199 | * Default select function for 1 chip devices. |
202 | */ | 200 | */ |
@@ -218,11 +216,11 @@ static void nand_select_chip(struct mtd_info *mtd, int chipnr) | |||
218 | 216 | ||
219 | /** | 217 | /** |
220 | * nand_write_buf - [DEFAULT] write buffer to chip | 218 | * nand_write_buf - [DEFAULT] write buffer to chip |
221 | * @mtd: MTD device structure | 219 | * @mtd: MTD device structure |
222 | * @buf: data buffer | 220 | * @buf: data buffer |
223 | * @len: number of bytes to write | 221 | * @len: number of bytes to write |
224 | * | 222 | * |
225 | * Default write function for 8bit buswith | 223 | * Default write function for 8bit buswidth. |
226 | */ | 224 | */ |
227 | static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | 225 | static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) |
228 | { | 226 | { |
@@ -235,11 +233,11 @@ static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | |||
235 | 233 | ||
236 | /** | 234 | /** |
237 | * nand_read_buf - [DEFAULT] read chip data into buffer | 235 | * nand_read_buf - [DEFAULT] read chip data into buffer |
238 | * @mtd: MTD device structure | 236 | * @mtd: MTD device structure |
239 | * @buf: buffer to store date | 237 | * @buf: buffer to store date |
240 | * @len: number of bytes to read | 238 | * @len: number of bytes to read |
241 | * | 239 | * |
242 | * Default read function for 8bit buswith | 240 | * Default read function for 8bit buswidth. |
243 | */ | 241 | */ |
244 | static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | 242 | static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) |
245 | { | 243 | { |
@@ -252,11 +250,11 @@ static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |||
252 | 250 | ||
253 | /** | 251 | /** |
254 | * nand_verify_buf - [DEFAULT] Verify chip data against buffer | 252 | * nand_verify_buf - [DEFAULT] Verify chip data against buffer |
255 | * @mtd: MTD device structure | 253 | * @mtd: MTD device structure |
256 | * @buf: buffer containing the data to compare | 254 | * @buf: buffer containing the data to compare |
257 | * @len: number of bytes to compare | 255 | * @len: number of bytes to compare |
258 | * | 256 | * |
259 | * Default verify function for 8bit buswith | 257 | * Default verify function for 8bit buswidth. |
260 | */ | 258 | */ |
261 | static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | 259 | static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) |
262 | { | 260 | { |
@@ -271,11 +269,11 @@ static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | |||
271 | 269 | ||
272 | /** | 270 | /** |
273 | * nand_write_buf16 - [DEFAULT] write buffer to chip | 271 | * nand_write_buf16 - [DEFAULT] write buffer to chip |
274 | * @mtd: MTD device structure | 272 | * @mtd: MTD device structure |
275 | * @buf: data buffer | 273 | * @buf: data buffer |
276 | * @len: number of bytes to write | 274 | * @len: number of bytes to write |
277 | * | 275 | * |
278 | * Default write function for 16bit buswith | 276 | * Default write function for 16bit buswidth. |
279 | */ | 277 | */ |
280 | static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) | 278 | static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) |
281 | { | 279 | { |
@@ -291,11 +289,11 @@ static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) | |||
291 | 289 | ||
292 | /** | 290 | /** |
293 | * nand_read_buf16 - [DEFAULT] read chip data into buffer | 291 | * nand_read_buf16 - [DEFAULT] read chip data into buffer |
294 | * @mtd: MTD device structure | 292 | * @mtd: MTD device structure |
295 | * @buf: buffer to store date | 293 | * @buf: buffer to store date |
296 | * @len: number of bytes to read | 294 | * @len: number of bytes to read |
297 | * | 295 | * |
298 | * Default read function for 16bit buswith | 296 | * Default read function for 16bit buswidth. |
299 | */ | 297 | */ |
300 | static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) | 298 | static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) |
301 | { | 299 | { |
@@ -310,11 +308,11 @@ static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) | |||
310 | 308 | ||
311 | /** | 309 | /** |
312 | * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer | 310 | * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer |
313 | * @mtd: MTD device structure | 311 | * @mtd: MTD device structure |
314 | * @buf: buffer containing the data to compare | 312 | * @buf: buffer containing the data to compare |
315 | * @len: number of bytes to compare | 313 | * @len: number of bytes to compare |
316 | * | 314 | * |
317 | * Default verify function for 16bit buswith | 315 | * Default verify function for 16bit buswidth. |
318 | */ | 316 | */ |
319 | static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) | 317 | static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) |
320 | { | 318 | { |
@@ -332,9 +330,9 @@ static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) | |||
332 | 330 | ||
333 | /** | 331 | /** |
334 | * nand_block_bad - [DEFAULT] Read bad block marker from the chip | 332 | * nand_block_bad - [DEFAULT] Read bad block marker from the chip |
335 | * @mtd: MTD device structure | 333 | * @mtd: MTD device structure |
336 | * @ofs: offset from device start | 334 | * @ofs: offset from device start |
337 | * @getchip: 0, if the chip is already selected | 335 | * @getchip: 0, if the chip is already selected |
338 | * | 336 | * |
339 | * Check, if the block is bad. | 337 | * Check, if the block is bad. |
340 | */ | 338 | */ |
@@ -344,7 +342,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) | |||
344 | struct nand_chip *chip = mtd->priv; | 342 | struct nand_chip *chip = mtd->priv; |
345 | u16 bad; | 343 | u16 bad; |
346 | 344 | ||
347 | if (chip->options & NAND_BBT_SCANLASTPAGE) | 345 | if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) |
348 | ofs += mtd->erasesize - mtd->writesize; | 346 | ofs += mtd->erasesize - mtd->writesize; |
349 | 347 | ||
350 | page = (int)(ofs >> chip->page_shift) & chip->pagemask; | 348 | page = (int)(ofs >> chip->page_shift) & chip->pagemask; |
@@ -384,11 +382,11 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) | |||
384 | 382 | ||
385 | /** | 383 | /** |
386 | * nand_default_block_markbad - [DEFAULT] mark a block bad | 384 | * nand_default_block_markbad - [DEFAULT] mark a block bad |
387 | * @mtd: MTD device structure | 385 | * @mtd: MTD device structure |
388 | * @ofs: offset from device start | 386 | * @ofs: offset from device start |
389 | * | 387 | * |
390 | * This is the default implementation, which can be overridden by | 388 | * This is the default implementation, which can be overridden by a hardware |
391 | * a hardware specific driver. | 389 | * specific driver. |
392 | */ | 390 | */ |
393 | static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) | 391 | static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) |
394 | { | 392 | { |
@@ -396,7 +394,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
396 | uint8_t buf[2] = { 0, 0 }; | 394 | uint8_t buf[2] = { 0, 0 }; |
397 | int block, ret, i = 0; | 395 | int block, ret, i = 0; |
398 | 396 | ||
399 | if (chip->options & NAND_BBT_SCANLASTPAGE) | 397 | if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) |
400 | ofs += mtd->erasesize - mtd->writesize; | 398 | ofs += mtd->erasesize - mtd->writesize; |
401 | 399 | ||
402 | /* Get block number */ | 400 | /* Get block number */ |
@@ -404,33 +402,31 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
404 | if (chip->bbt) | 402 | if (chip->bbt) |
405 | chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); | 403 | chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); |
406 | 404 | ||
407 | /* Do we have a flash based bad block table ? */ | 405 | /* Do we have a flash based bad block table? */ |
408 | if (chip->options & NAND_USE_FLASH_BBT) | 406 | if (chip->bbt_options & NAND_BBT_USE_FLASH) |
409 | ret = nand_update_bbt(mtd, ofs); | 407 | ret = nand_update_bbt(mtd, ofs); |
410 | else { | 408 | else { |
409 | struct mtd_oob_ops ops; | ||
410 | |||
411 | nand_get_device(chip, mtd, FL_WRITING); | 411 | nand_get_device(chip, mtd, FL_WRITING); |
412 | 412 | ||
413 | /* Write to first two pages and to byte 1 and 6 if necessary. | 413 | /* |
414 | * If we write to more than one location, the first error | 414 | * Write to first two pages if necessary. If we write to more |
415 | * encountered quits the procedure. We write two bytes per | 415 | * than one location, the first error encountered quits the |
416 | * location, so we dont have to mess with 16 bit access. | 416 | * procedure. We write two bytes per location, so we dont have |
417 | * to mess with 16 bit access. | ||
417 | */ | 418 | */ |
419 | ops.len = ops.ooblen = 2; | ||
420 | ops.datbuf = NULL; | ||
421 | ops.oobbuf = buf; | ||
422 | ops.ooboffs = chip->badblockpos & ~0x01; | ||
423 | ops.mode = MTD_OPS_PLACE_OOB; | ||
418 | do { | 424 | do { |
419 | chip->ops.len = chip->ops.ooblen = 2; | 425 | ret = nand_do_write_oob(mtd, ofs, &ops); |
420 | chip->ops.datbuf = NULL; | ||
421 | chip->ops.oobbuf = buf; | ||
422 | chip->ops.ooboffs = chip->badblockpos & ~0x01; | ||
423 | |||
424 | ret = nand_do_write_oob(mtd, ofs, &chip->ops); | ||
425 | 426 | ||
426 | if (!ret && (chip->options & NAND_BBT_SCANBYTE1AND6)) { | ||
427 | chip->ops.ooboffs = NAND_SMALL_BADBLOCK_POS | ||
428 | & ~0x01; | ||
429 | ret = nand_do_write_oob(mtd, ofs, &chip->ops); | ||
430 | } | ||
431 | i++; | 427 | i++; |
432 | ofs += mtd->writesize; | 428 | ofs += mtd->writesize; |
433 | } while (!ret && (chip->options & NAND_BBT_SCAN2NDPAGE) && | 429 | } while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && |
434 | i < 2); | 430 | i < 2); |
435 | 431 | ||
436 | nand_release_device(mtd); | 432 | nand_release_device(mtd); |
@@ -443,16 +439,16 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
443 | 439 | ||
444 | /** | 440 | /** |
445 | * nand_check_wp - [GENERIC] check if the chip is write protected | 441 | * nand_check_wp - [GENERIC] check if the chip is write protected |
446 | * @mtd: MTD device structure | 442 | * @mtd: MTD device structure |
447 | * Check, if the device is write protected | ||
448 | * | 443 | * |
449 | * The function expects, that the device is already selected | 444 | * Check, if the device is write protected. The function expects, that the |
445 | * device is already selected. | ||
450 | */ | 446 | */ |
451 | static int nand_check_wp(struct mtd_info *mtd) | 447 | static int nand_check_wp(struct mtd_info *mtd) |
452 | { | 448 | { |
453 | struct nand_chip *chip = mtd->priv; | 449 | struct nand_chip *chip = mtd->priv; |
454 | 450 | ||
455 | /* broken xD cards report WP despite being writable */ | 451 | /* Broken xD cards report WP despite being writable */ |
456 | if (chip->options & NAND_BROKEN_XD) | 452 | if (chip->options & NAND_BROKEN_XD) |
457 | return 0; | 453 | return 0; |
458 | 454 | ||
@@ -463,10 +459,10 @@ static int nand_check_wp(struct mtd_info *mtd) | |||
463 | 459 | ||
464 | /** | 460 | /** |
465 | * nand_block_checkbad - [GENERIC] Check if a block is marked bad | 461 | * nand_block_checkbad - [GENERIC] Check if a block is marked bad |
466 | * @mtd: MTD device structure | 462 | * @mtd: MTD device structure |
467 | * @ofs: offset from device start | 463 | * @ofs: offset from device start |
468 | * @getchip: 0, if the chip is already selected | 464 | * @getchip: 0, if the chip is already selected |
469 | * @allowbbt: 1, if its allowed to access the bbt area | 465 | * @allowbbt: 1, if its allowed to access the bbt area |
470 | * | 466 | * |
471 | * Check, if the block is bad. Either by reading the bad block table or | 467 | * Check, if the block is bad. Either by reading the bad block table or |
472 | * calling of the scan function. | 468 | * calling of the scan function. |
@@ -485,8 +481,8 @@ static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip, | |||
485 | 481 | ||
486 | /** | 482 | /** |
487 | * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands. | 483 | * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands. |
488 | * @mtd: MTD device structure | 484 | * @mtd: MTD device structure |
489 | * @timeo: Timeout | 485 | * @timeo: Timeout |
490 | * | 486 | * |
491 | * Helper function for nand_wait_ready used when needing to wait in interrupt | 487 | * Helper function for nand_wait_ready used when needing to wait in interrupt |
492 | * context. | 488 | * context. |
@@ -505,10 +501,7 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo) | |||
505 | } | 501 | } |
506 | } | 502 | } |
507 | 503 | ||
508 | /* | 504 | /* Wait for the ready pin, after a command. The timeout is caught later. */ |
509 | * Wait for the ready pin, after a command | ||
510 | * The timeout is catched later. | ||
511 | */ | ||
512 | void nand_wait_ready(struct mtd_info *mtd) | 505 | void nand_wait_ready(struct mtd_info *mtd) |
513 | { | 506 | { |
514 | struct nand_chip *chip = mtd->priv; | 507 | struct nand_chip *chip = mtd->priv; |
@@ -519,7 +512,7 @@ void nand_wait_ready(struct mtd_info *mtd) | |||
519 | return panic_nand_wait_ready(mtd, 400); | 512 | return panic_nand_wait_ready(mtd, 400); |
520 | 513 | ||
521 | led_trigger_event(nand_led_trigger, LED_FULL); | 514 | led_trigger_event(nand_led_trigger, LED_FULL); |
522 | /* wait until command is processed or timeout occures */ | 515 | /* Wait until command is processed or timeout occurs */ |
523 | do { | 516 | do { |
524 | if (chip->dev_ready(mtd)) | 517 | if (chip->dev_ready(mtd)) |
525 | break; | 518 | break; |
@@ -531,13 +524,13 @@ EXPORT_SYMBOL_GPL(nand_wait_ready); | |||
531 | 524 | ||
532 | /** | 525 | /** |
533 | * nand_command - [DEFAULT] Send command to NAND device | 526 | * nand_command - [DEFAULT] Send command to NAND device |
534 | * @mtd: MTD device structure | 527 | * @mtd: MTD device structure |
535 | * @command: the command to be sent | 528 | * @command: the command to be sent |
536 | * @column: the column address for this command, -1 if none | 529 | * @column: the column address for this command, -1 if none |
537 | * @page_addr: the page address for this command, -1 if none | 530 | * @page_addr: the page address for this command, -1 if none |
538 | * | 531 | * |
539 | * Send command to NAND device. This function is used for small page | 532 | * Send command to NAND device. This function is used for small page devices |
540 | * devices (256/512 Bytes per page) | 533 | * (256/512 Bytes per page). |
541 | */ | 534 | */ |
542 | static void nand_command(struct mtd_info *mtd, unsigned int command, | 535 | static void nand_command(struct mtd_info *mtd, unsigned int command, |
543 | int column, int page_addr) | 536 | int column, int page_addr) |
@@ -545,9 +538,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, | |||
545 | register struct nand_chip *chip = mtd->priv; | 538 | register struct nand_chip *chip = mtd->priv; |
546 | int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE; | 539 | int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE; |
547 | 540 | ||
548 | /* | 541 | /* Write out the command to the device */ |
549 | * Write out the command to the device. | ||
550 | */ | ||
551 | if (command == NAND_CMD_SEQIN) { | 542 | if (command == NAND_CMD_SEQIN) { |
552 | int readcmd; | 543 | int readcmd; |
553 | 544 | ||
@@ -567,9 +558,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, | |||
567 | } | 558 | } |
568 | chip->cmd_ctrl(mtd, command, ctrl); | 559 | chip->cmd_ctrl(mtd, command, ctrl); |
569 | 560 | ||
570 | /* | 561 | /* Address cycle, when necessary */ |
571 | * Address cycle, when necessary | ||
572 | */ | ||
573 | ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; | 562 | ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; |
574 | /* Serially input address */ | 563 | /* Serially input address */ |
575 | if (column != -1) { | 564 | if (column != -1) { |
@@ -590,8 +579,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, | |||
590 | chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); | 579 | chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); |
591 | 580 | ||
592 | /* | 581 | /* |
593 | * program and erase have their own busy handlers | 582 | * Program and erase have their own busy handlers status and sequential |
594 | * status and sequential in needs no delay | 583 | * in needs no delay |
595 | */ | 584 | */ |
596 | switch (command) { | 585 | switch (command) { |
597 | 586 | ||
@@ -625,8 +614,10 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, | |||
625 | return; | 614 | return; |
626 | } | 615 | } |
627 | } | 616 | } |
628 | /* Apply this short delay always to ensure that we do wait tWB in | 617 | /* |
629 | * any case on any machine. */ | 618 | * Apply this short delay always to ensure that we do wait tWB in |
619 | * any case on any machine. | ||
620 | */ | ||
630 | ndelay(100); | 621 | ndelay(100); |
631 | 622 | ||
632 | nand_wait_ready(mtd); | 623 | nand_wait_ready(mtd); |
@@ -634,14 +625,14 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, | |||
634 | 625 | ||
635 | /** | 626 | /** |
636 | * nand_command_lp - [DEFAULT] Send command to NAND large page device | 627 | * nand_command_lp - [DEFAULT] Send command to NAND large page device |
637 | * @mtd: MTD device structure | 628 | * @mtd: MTD device structure |
638 | * @command: the command to be sent | 629 | * @command: the command to be sent |
639 | * @column: the column address for this command, -1 if none | 630 | * @column: the column address for this command, -1 if none |
640 | * @page_addr: the page address for this command, -1 if none | 631 | * @page_addr: the page address for this command, -1 if none |
641 | * | 632 | * |
642 | * Send command to NAND device. This is the version for the new large page | 633 | * Send command to NAND device. This is the version for the new large page |
643 | * devices We dont have the separate regions as we have in the small page | 634 | * devices. We don't have the separate regions as we have in the small page |
644 | * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. | 635 | * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. |
645 | */ | 636 | */ |
646 | static void nand_command_lp(struct mtd_info *mtd, unsigned int command, | 637 | static void nand_command_lp(struct mtd_info *mtd, unsigned int command, |
647 | int column, int page_addr) | 638 | int column, int page_addr) |
@@ -683,8 +674,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, | |||
683 | chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); | 674 | chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); |
684 | 675 | ||
685 | /* | 676 | /* |
686 | * program and erase have their own busy handlers | 677 | * Program and erase have their own busy handlers status, sequential |
687 | * status, sequential in, and deplete1 need no delay | 678 | * in, and deplete1 need no delay. |
688 | */ | 679 | */ |
689 | switch (command) { | 680 | switch (command) { |
690 | 681 | ||
@@ -698,14 +689,12 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, | |||
698 | case NAND_CMD_DEPLETE1: | 689 | case NAND_CMD_DEPLETE1: |
699 | return; | 690 | return; |
700 | 691 | ||
701 | /* | ||
702 | * read error status commands require only a short delay | ||
703 | */ | ||
704 | case NAND_CMD_STATUS_ERROR: | 692 | case NAND_CMD_STATUS_ERROR: |
705 | case NAND_CMD_STATUS_ERROR0: | 693 | case NAND_CMD_STATUS_ERROR0: |
706 | case NAND_CMD_STATUS_ERROR1: | 694 | case NAND_CMD_STATUS_ERROR1: |
707 | case NAND_CMD_STATUS_ERROR2: | 695 | case NAND_CMD_STATUS_ERROR2: |
708 | case NAND_CMD_STATUS_ERROR3: | 696 | case NAND_CMD_STATUS_ERROR3: |
697 | /* Read error status commands require only a short delay */ | ||
709 | udelay(chip->chip_delay); | 698 | udelay(chip->chip_delay); |
710 | return; | 699 | return; |
711 | 700 | ||
@@ -739,7 +728,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, | |||
739 | default: | 728 | default: |
740 | /* | 729 | /* |
741 | * If we don't have access to the busy pin, we apply the given | 730 | * If we don't have access to the busy pin, we apply the given |
742 | * command delay | 731 | * command delay. |
743 | */ | 732 | */ |
744 | if (!chip->dev_ready) { | 733 | if (!chip->dev_ready) { |
745 | udelay(chip->chip_delay); | 734 | udelay(chip->chip_delay); |
@@ -747,8 +736,10 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, | |||
747 | } | 736 | } |
748 | } | 737 | } |
749 | 738 | ||
750 | /* Apply this short delay always to ensure that we do wait tWB in | 739 | /* |
751 | * any case on any machine. */ | 740 | * Apply this short delay always to ensure that we do wait tWB in |
741 | * any case on any machine. | ||
742 | */ | ||
752 | ndelay(100); | 743 | ndelay(100); |
753 | 744 | ||
754 | nand_wait_ready(mtd); | 745 | nand_wait_ready(mtd); |
@@ -756,25 +747,25 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, | |||
756 | 747 | ||
757 | /** | 748 | /** |
758 | * panic_nand_get_device - [GENERIC] Get chip for selected access | 749 | * panic_nand_get_device - [GENERIC] Get chip for selected access |
759 | * @chip: the nand chip descriptor | 750 | * @chip: the nand chip descriptor |
760 | * @mtd: MTD device structure | 751 | * @mtd: MTD device structure |
761 | * @new_state: the state which is requested | 752 | * @new_state: the state which is requested |
762 | * | 753 | * |
763 | * Used when in panic, no locks are taken. | 754 | * Used when in panic, no locks are taken. |
764 | */ | 755 | */ |
765 | static void panic_nand_get_device(struct nand_chip *chip, | 756 | static void panic_nand_get_device(struct nand_chip *chip, |
766 | struct mtd_info *mtd, int new_state) | 757 | struct mtd_info *mtd, int new_state) |
767 | { | 758 | { |
768 | /* Hardware controller shared among independend devices */ | 759 | /* Hardware controller shared among independent devices */ |
769 | chip->controller->active = chip; | 760 | chip->controller->active = chip; |
770 | chip->state = new_state; | 761 | chip->state = new_state; |
771 | } | 762 | } |
772 | 763 | ||
773 | /** | 764 | /** |
774 | * nand_get_device - [GENERIC] Get chip for selected access | 765 | * nand_get_device - [GENERIC] Get chip for selected access |
775 | * @chip: the nand chip descriptor | 766 | * @chip: the nand chip descriptor |
776 | * @mtd: MTD device structure | 767 | * @mtd: MTD device structure |
777 | * @new_state: the state which is requested | 768 | * @new_state: the state which is requested |
778 | * | 769 | * |
779 | * Get the device and lock it for exclusive access | 770 | * Get the device and lock it for exclusive access |
780 | */ | 771 | */ |
@@ -812,10 +803,10 @@ retry: | |||
812 | } | 803 | } |
813 | 804 | ||
814 | /** | 805 | /** |
815 | * panic_nand_wait - [GENERIC] wait until the command is done | 806 | * panic_nand_wait - [GENERIC] wait until the command is done |
816 | * @mtd: MTD device structure | 807 | * @mtd: MTD device structure |
817 | * @chip: NAND chip structure | 808 | * @chip: NAND chip structure |
818 | * @timeo: Timeout | 809 | * @timeo: timeout |
819 | * | 810 | * |
820 | * Wait for command done. This is a helper function for nand_wait used when | 811 | * Wait for command done. This is a helper function for nand_wait used when |
821 | * we are in interrupt context. May happen when in panic and trying to write | 812 | * we are in interrupt context. May happen when in panic and trying to write |
@@ -838,13 +829,13 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip, | |||
838 | } | 829 | } |
839 | 830 | ||
840 | /** | 831 | /** |
841 | * nand_wait - [DEFAULT] wait until the command is done | 832 | * nand_wait - [DEFAULT] wait until the command is done |
842 | * @mtd: MTD device structure | 833 | * @mtd: MTD device structure |
843 | * @chip: NAND chip structure | 834 | * @chip: NAND chip structure |
844 | * | 835 | * |
845 | * Wait for command done. This applies to erase and program only | 836 | * Wait for command done. This applies to erase and program only. Erase can |
846 | * Erase can take up to 400ms and program up to 20ms according to | 837 | * take up to 400ms and program up to 20ms according to general NAND and |
847 | * general NAND and SmartMedia specs | 838 | * SmartMedia specs. |
848 | */ | 839 | */ |
849 | static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) | 840 | static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) |
850 | { | 841 | { |
@@ -859,8 +850,10 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) | |||
859 | 850 | ||
860 | led_trigger_event(nand_led_trigger, LED_FULL); | 851 | led_trigger_event(nand_led_trigger, LED_FULL); |
861 | 852 | ||
862 | /* Apply this short delay always to ensure that we do wait tWB in | 853 | /* |
863 | * any case on any machine. */ | 854 | * Apply this short delay always to ensure that we do wait tWB in any |
855 | * case on any machine. | ||
856 | */ | ||
864 | ndelay(100); | 857 | ndelay(100); |
865 | 858 | ||
866 | if ((state == FL_ERASING) && (chip->options & NAND_IS_AND)) | 859 | if ((state == FL_ERASING) && (chip->options & NAND_IS_AND)) |
@@ -890,16 +883,15 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) | |||
890 | 883 | ||
891 | /** | 884 | /** |
892 | * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks | 885 | * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks |
893 | * | ||
894 | * @mtd: mtd info | 886 | * @mtd: mtd info |
895 | * @ofs: offset to start unlock from | 887 | * @ofs: offset to start unlock from |
896 | * @len: length to unlock | 888 | * @len: length to unlock |
897 | * @invert: when = 0, unlock the range of blocks within the lower and | 889 | * @invert: when = 0, unlock the range of blocks within the lower and |
898 | * upper boundary address | 890 | * upper boundary address |
899 | * when = 1, unlock the range of blocks outside the boundaries | 891 | * when = 1, unlock the range of blocks outside the boundaries |
900 | * of the lower and upper boundary address | 892 | * of the lower and upper boundary address |
901 | * | 893 | * |
902 | * return - unlock status | 894 | * Returs unlock status. |
903 | */ | 895 | */ |
904 | static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, | 896 | static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, |
905 | uint64_t len, int invert) | 897 | uint64_t len, int invert) |
@@ -919,10 +911,9 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, | |||
919 | 911 | ||
920 | /* Call wait ready function */ | 912 | /* Call wait ready function */ |
921 | status = chip->waitfunc(mtd, chip); | 913 | status = chip->waitfunc(mtd, chip); |
922 | udelay(1000); | ||
923 | /* See if device thinks it succeeded */ | 914 | /* See if device thinks it succeeded */ |
924 | if (status & 0x01) { | 915 | if (status & 0x01) { |
925 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", | 916 | pr_debug("%s: error status = 0x%08x\n", |
926 | __func__, status); | 917 | __func__, status); |
927 | ret = -EIO; | 918 | ret = -EIO; |
928 | } | 919 | } |
@@ -932,12 +923,11 @@ static int __nand_unlock(struct mtd_info *mtd, loff_t ofs, | |||
932 | 923 | ||
933 | /** | 924 | /** |
934 | * nand_unlock - [REPLACEABLE] unlocks specified locked blocks | 925 | * nand_unlock - [REPLACEABLE] unlocks specified locked blocks |
935 | * | ||
936 | * @mtd: mtd info | 926 | * @mtd: mtd info |
937 | * @ofs: offset to start unlock from | 927 | * @ofs: offset to start unlock from |
938 | * @len: length to unlock | 928 | * @len: length to unlock |
939 | * | 929 | * |
940 | * return - unlock status | 930 | * Returns unlock status. |
941 | */ | 931 | */ |
942 | int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 932 | int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
943 | { | 933 | { |
@@ -945,7 +935,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
945 | int chipnr; | 935 | int chipnr; |
946 | struct nand_chip *chip = mtd->priv; | 936 | struct nand_chip *chip = mtd->priv; |
947 | 937 | ||
948 | DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", | 938 | pr_debug("%s: start = 0x%012llx, len = %llu\n", |
949 | __func__, (unsigned long long)ofs, len); | 939 | __func__, (unsigned long long)ofs, len); |
950 | 940 | ||
951 | if (check_offs_len(mtd, ofs, len)) | 941 | if (check_offs_len(mtd, ofs, len)) |
@@ -964,7 +954,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
964 | 954 | ||
965 | /* Check, if it is write protected */ | 955 | /* Check, if it is write protected */ |
966 | if (nand_check_wp(mtd)) { | 956 | if (nand_check_wp(mtd)) { |
967 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", | 957 | pr_debug("%s: device is write protected!\n", |
968 | __func__); | 958 | __func__); |
969 | ret = -EIO; | 959 | ret = -EIO; |
970 | goto out; | 960 | goto out; |
@@ -981,18 +971,16 @@ EXPORT_SYMBOL(nand_unlock); | |||
981 | 971 | ||
982 | /** | 972 | /** |
983 | * nand_lock - [REPLACEABLE] locks all blocks present in the device | 973 | * nand_lock - [REPLACEABLE] locks all blocks present in the device |
984 | * | ||
985 | * @mtd: mtd info | 974 | * @mtd: mtd info |
986 | * @ofs: offset to start unlock from | 975 | * @ofs: offset to start unlock from |
987 | * @len: length to unlock | 976 | * @len: length to unlock |
988 | * | 977 | * |
989 | * return - lock status | 978 | * This feature is not supported in many NAND parts. 'Micron' NAND parts do |
979 | * have this feature, but it allows only to lock all blocks, not for specified | ||
980 | * range for block. Implementing 'lock' feature by making use of 'unlock', for | ||
981 | * now. | ||
990 | * | 982 | * |
991 | * This feature is not supported in many NAND parts. 'Micron' NAND parts | 983 | * Returns lock status. |
992 | * do have this feature, but it allows only to lock all blocks, not for | ||
993 | * specified range for block. | ||
994 | * | ||
995 | * Implementing 'lock' feature by making use of 'unlock', for now. | ||
996 | */ | 984 | */ |
997 | int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 985 | int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
998 | { | 986 | { |
@@ -1000,7 +988,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
1000 | int chipnr, status, page; | 988 | int chipnr, status, page; |
1001 | struct nand_chip *chip = mtd->priv; | 989 | struct nand_chip *chip = mtd->priv; |
1002 | 990 | ||
1003 | DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", | 991 | pr_debug("%s: start = 0x%012llx, len = %llu\n", |
1004 | __func__, (unsigned long long)ofs, len); | 992 | __func__, (unsigned long long)ofs, len); |
1005 | 993 | ||
1006 | if (check_offs_len(mtd, ofs, len)) | 994 | if (check_offs_len(mtd, ofs, len)) |
@@ -1015,7 +1003,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
1015 | 1003 | ||
1016 | /* Check, if it is write protected */ | 1004 | /* Check, if it is write protected */ |
1017 | if (nand_check_wp(mtd)) { | 1005 | if (nand_check_wp(mtd)) { |
1018 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", | 1006 | pr_debug("%s: device is write protected!\n", |
1019 | __func__); | 1007 | __func__); |
1020 | status = MTD_ERASE_FAILED; | 1008 | status = MTD_ERASE_FAILED; |
1021 | ret = -EIO; | 1009 | ret = -EIO; |
@@ -1028,10 +1016,9 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
1028 | 1016 | ||
1029 | /* Call wait ready function */ | 1017 | /* Call wait ready function */ |
1030 | status = chip->waitfunc(mtd, chip); | 1018 | status = chip->waitfunc(mtd, chip); |
1031 | udelay(1000); | ||
1032 | /* See if device thinks it succeeded */ | 1019 | /* See if device thinks it succeeded */ |
1033 | if (status & 0x01) { | 1020 | if (status & 0x01) { |
1034 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n", | 1021 | pr_debug("%s: error status = 0x%08x\n", |
1035 | __func__, status); | 1022 | __func__, status); |
1036 | ret = -EIO; | 1023 | ret = -EIO; |
1037 | goto out; | 1024 | goto out; |
@@ -1047,13 +1034,13 @@ out: | |||
1047 | EXPORT_SYMBOL(nand_lock); | 1034 | EXPORT_SYMBOL(nand_lock); |
1048 | 1035 | ||
1049 | /** | 1036 | /** |
1050 | * nand_read_page_raw - [Intern] read raw page data without ecc | 1037 | * nand_read_page_raw - [INTERN] read raw page data without ecc |
1051 | * @mtd: mtd info structure | 1038 | * @mtd: mtd info structure |
1052 | * @chip: nand chip info structure | 1039 | * @chip: nand chip info structure |
1053 | * @buf: buffer to store read data | 1040 | * @buf: buffer to store read data |
1054 | * @page: page number to read | 1041 | * @page: page number to read |
1055 | * | 1042 | * |
1056 | * Not for syndrome calculating ecc controllers, which use a special oob layout | 1043 | * Not for syndrome calculating ECC controllers, which use a special oob layout. |
1057 | */ | 1044 | */ |
1058 | static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | 1045 | static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, |
1059 | uint8_t *buf, int page) | 1046 | uint8_t *buf, int page) |
@@ -1064,11 +1051,11 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | |||
1064 | } | 1051 | } |
1065 | 1052 | ||
1066 | /** | 1053 | /** |
1067 | * nand_read_page_raw_syndrome - [Intern] read raw page data without ecc | 1054 | * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc |
1068 | * @mtd: mtd info structure | 1055 | * @mtd: mtd info structure |
1069 | * @chip: nand chip info structure | 1056 | * @chip: nand chip info structure |
1070 | * @buf: buffer to store read data | 1057 | * @buf: buffer to store read data |
1071 | * @page: page number to read | 1058 | * @page: page number to read |
1072 | * | 1059 | * |
1073 | * We need a special oob layout and handling even when OOB isn't used. | 1060 | * We need a special oob layout and handling even when OOB isn't used. |
1074 | */ | 1061 | */ |
@@ -1107,11 +1094,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd, | |||
1107 | } | 1094 | } |
1108 | 1095 | ||
1109 | /** | 1096 | /** |
1110 | * nand_read_page_swecc - [REPLACABLE] software ecc based page read function | 1097 | * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function |
1111 | * @mtd: mtd info structure | 1098 | * @mtd: mtd info structure |
1112 | * @chip: nand chip info structure | 1099 | * @chip: nand chip info structure |
1113 | * @buf: buffer to store read data | 1100 | * @buf: buffer to store read data |
1114 | * @page: page number to read | 1101 | * @page: page number to read |
1115 | */ | 1102 | */ |
1116 | static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, | 1103 | static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, |
1117 | uint8_t *buf, int page) | 1104 | uint8_t *buf, int page) |
@@ -1148,12 +1135,12 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
1148 | } | 1135 | } |
1149 | 1136 | ||
1150 | /** | 1137 | /** |
1151 | * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function | 1138 | * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function |
1152 | * @mtd: mtd info structure | 1139 | * @mtd: mtd info structure |
1153 | * @chip: nand chip info structure | 1140 | * @chip: nand chip info structure |
1154 | * @data_offs: offset of requested data within the page | 1141 | * @data_offs: offset of requested data within the page |
1155 | * @readlen: data length | 1142 | * @readlen: data length |
1156 | * @bufpoi: buffer to store read data | 1143 | * @bufpoi: buffer to store read data |
1157 | */ | 1144 | */ |
1158 | static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, | 1145 | static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, |
1159 | uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) | 1146 | uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) |
@@ -1166,12 +1153,12 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, | |||
1166 | int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; | 1153 | int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; |
1167 | int index = 0; | 1154 | int index = 0; |
1168 | 1155 | ||
1169 | /* Column address wihin the page aligned to ECC size (256bytes). */ | 1156 | /* Column address within the page aligned to ECC size (256bytes) */ |
1170 | start_step = data_offs / chip->ecc.size; | 1157 | start_step = data_offs / chip->ecc.size; |
1171 | end_step = (data_offs + readlen - 1) / chip->ecc.size; | 1158 | end_step = (data_offs + readlen - 1) / chip->ecc.size; |
1172 | num_steps = end_step - start_step + 1; | 1159 | num_steps = end_step - start_step + 1; |
1173 | 1160 | ||
1174 | /* Data size aligned to ECC ecc.size*/ | 1161 | /* Data size aligned to ECC ecc.size */ |
1175 | datafrag_len = num_steps * chip->ecc.size; | 1162 | datafrag_len = num_steps * chip->ecc.size; |
1176 | eccfrag_len = num_steps * chip->ecc.bytes; | 1163 | eccfrag_len = num_steps * chip->ecc.bytes; |
1177 | 1164 | ||
@@ -1183,13 +1170,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, | |||
1183 | p = bufpoi + data_col_addr; | 1170 | p = bufpoi + data_col_addr; |
1184 | chip->read_buf(mtd, p, datafrag_len); | 1171 | chip->read_buf(mtd, p, datafrag_len); |
1185 | 1172 | ||
1186 | /* Calculate ECC */ | 1173 | /* Calculate ECC */ |
1187 | for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) | 1174 | for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) |
1188 | chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]); | 1175 | chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]); |
1189 | 1176 | ||
1190 | /* The performance is faster if to position offsets | 1177 | /* |
1191 | according to ecc.pos. Let make sure here that | 1178 | * The performance is faster if we position offsets according to |
1192 | there are no gaps in ecc positions */ | 1179 | * ecc.pos. Let's make sure that there are no gaps in ECC positions. |
1180 | */ | ||
1193 | for (i = 0; i < eccfrag_len - 1; i++) { | 1181 | for (i = 0; i < eccfrag_len - 1; i++) { |
1194 | if (eccpos[i + start_step * chip->ecc.bytes] + 1 != | 1182 | if (eccpos[i + start_step * chip->ecc.bytes] + 1 != |
1195 | eccpos[i + start_step * chip->ecc.bytes + 1]) { | 1183 | eccpos[i + start_step * chip->ecc.bytes + 1]) { |
@@ -1201,8 +1189,10 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, | |||
1201 | chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); | 1189 | chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); |
1202 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | 1190 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); |
1203 | } else { | 1191 | } else { |
1204 | /* send the command to read the particular ecc bytes */ | 1192 | /* |
1205 | /* take care about buswidth alignment in read_buf */ | 1193 | * Send the command to read the particular ECC bytes take care |
1194 | * about buswidth alignment in read_buf. | ||
1195 | */ | ||
1206 | index = start_step * chip->ecc.bytes; | 1196 | index = start_step * chip->ecc.bytes; |
1207 | 1197 | ||
1208 | aligned_pos = eccpos[index] & ~(busw - 1); | 1198 | aligned_pos = eccpos[index] & ~(busw - 1); |
@@ -1235,13 +1225,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, | |||
1235 | } | 1225 | } |
1236 | 1226 | ||
1237 | /** | 1227 | /** |
1238 | * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function | 1228 | * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function |
1239 | * @mtd: mtd info structure | 1229 | * @mtd: mtd info structure |
1240 | * @chip: nand chip info structure | 1230 | * @chip: nand chip info structure |
1241 | * @buf: buffer to store read data | 1231 | * @buf: buffer to store read data |
1242 | * @page: page number to read | 1232 | * @page: page number to read |
1243 | * | 1233 | * |
1244 | * Not for syndrome calculating ecc controllers which need a special oob layout | 1234 | * Not for syndrome calculating ECC controllers which need a special oob layout. |
1245 | */ | 1235 | */ |
1246 | static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | 1236 | static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, |
1247 | uint8_t *buf, int page) | 1237 | uint8_t *buf, int page) |
@@ -1280,18 +1270,17 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
1280 | } | 1270 | } |
1281 | 1271 | ||
1282 | /** | 1272 | /** |
1283 | * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first | 1273 | * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first |
1284 | * @mtd: mtd info structure | 1274 | * @mtd: mtd info structure |
1285 | * @chip: nand chip info structure | 1275 | * @chip: nand chip info structure |
1286 | * @buf: buffer to store read data | 1276 | * @buf: buffer to store read data |
1287 | * @page: page number to read | 1277 | * @page: page number to read |
1288 | * | 1278 | * |
1289 | * Hardware ECC for large page chips, require OOB to be read first. | 1279 | * Hardware ECC for large page chips, require OOB to be read first. For this |
1290 | * For this ECC mode, the write_page method is re-used from ECC_HW. | 1280 | * ECC mode, the write_page method is re-used from ECC_HW. These methods |
1291 | * These methods read/write ECC from the OOB area, unlike the | 1281 | * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with |
1292 | * ECC_HW_SYNDROME support with multiple ECC steps, follows the | 1282 | * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from |
1293 | * "infix ECC" scheme and reads/writes ECC from the data area, by | 1283 | * the data area, by overwriting the NAND manufacturer bad block markings. |
1294 | * overwriting the NAND manufacturer bad block markings. | ||
1295 | */ | 1284 | */ |
1296 | static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, | 1285 | static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, |
1297 | struct nand_chip *chip, uint8_t *buf, int page) | 1286 | struct nand_chip *chip, uint8_t *buf, int page) |
@@ -1329,14 +1318,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, | |||
1329 | } | 1318 | } |
1330 | 1319 | ||
1331 | /** | 1320 | /** |
1332 | * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read | 1321 | * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read |
1333 | * @mtd: mtd info structure | 1322 | * @mtd: mtd info structure |
1334 | * @chip: nand chip info structure | 1323 | * @chip: nand chip info structure |
1335 | * @buf: buffer to store read data | 1324 | * @buf: buffer to store read data |
1336 | * @page: page number to read | 1325 | * @page: page number to read |
1337 | * | 1326 | * |
1338 | * The hw generator calculates the error syndrome automatically. Therefor | 1327 | * The hw generator calculates the error syndrome automatically. Therefore we |
1339 | * we need a special oob layout and handling. | 1328 | * need a special oob layout and handling. |
1340 | */ | 1329 | */ |
1341 | static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, | 1330 | static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, |
1342 | uint8_t *buf, int page) | 1331 | uint8_t *buf, int page) |
@@ -1384,29 +1373,29 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, | |||
1384 | } | 1373 | } |
1385 | 1374 | ||
1386 | /** | 1375 | /** |
1387 | * nand_transfer_oob - [Internal] Transfer oob to client buffer | 1376 | * nand_transfer_oob - [INTERN] Transfer oob to client buffer |
1388 | * @chip: nand chip structure | 1377 | * @chip: nand chip structure |
1389 | * @oob: oob destination address | 1378 | * @oob: oob destination address |
1390 | * @ops: oob ops structure | 1379 | * @ops: oob ops structure |
1391 | * @len: size of oob to transfer | 1380 | * @len: size of oob to transfer |
1392 | */ | 1381 | */ |
1393 | static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, | 1382 | static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, |
1394 | struct mtd_oob_ops *ops, size_t len) | 1383 | struct mtd_oob_ops *ops, size_t len) |
1395 | { | 1384 | { |
1396 | switch (ops->mode) { | 1385 | switch (ops->mode) { |
1397 | 1386 | ||
1398 | case MTD_OOB_PLACE: | 1387 | case MTD_OPS_PLACE_OOB: |
1399 | case MTD_OOB_RAW: | 1388 | case MTD_OPS_RAW: |
1400 | memcpy(oob, chip->oob_poi + ops->ooboffs, len); | 1389 | memcpy(oob, chip->oob_poi + ops->ooboffs, len); |
1401 | return oob + len; | 1390 | return oob + len; |
1402 | 1391 | ||
1403 | case MTD_OOB_AUTO: { | 1392 | case MTD_OPS_AUTO_OOB: { |
1404 | struct nand_oobfree *free = chip->ecc.layout->oobfree; | 1393 | struct nand_oobfree *free = chip->ecc.layout->oobfree; |
1405 | uint32_t boffs = 0, roffs = ops->ooboffs; | 1394 | uint32_t boffs = 0, roffs = ops->ooboffs; |
1406 | size_t bytes = 0; | 1395 | size_t bytes = 0; |
1407 | 1396 | ||
1408 | for (; free->length && len; free++, len -= bytes) { | 1397 | for (; free->length && len; free++, len -= bytes) { |
1409 | /* Read request not from offset 0 ? */ | 1398 | /* Read request not from offset 0? */ |
1410 | if (unlikely(roffs)) { | 1399 | if (unlikely(roffs)) { |
1411 | if (roffs >= free->length) { | 1400 | if (roffs >= free->length) { |
1412 | roffs -= free->length; | 1401 | roffs -= free->length; |
@@ -1432,11 +1421,10 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, | |||
1432 | } | 1421 | } |
1433 | 1422 | ||
1434 | /** | 1423 | /** |
1435 | * nand_do_read_ops - [Internal] Read data with ECC | 1424 | * nand_do_read_ops - [INTERN] Read data with ECC |
1436 | * | 1425 | * @mtd: MTD device structure |
1437 | * @mtd: MTD device structure | 1426 | * @from: offset to read from |
1438 | * @from: offset to read from | 1427 | * @ops: oob ops structure |
1439 | * @ops: oob ops structure | ||
1440 | * | 1428 | * |
1441 | * Internal function. Called with chip held. | 1429 | * Internal function. Called with chip held. |
1442 | */ | 1430 | */ |
@@ -1451,7 +1439,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1451 | int ret = 0; | 1439 | int ret = 0; |
1452 | uint32_t readlen = ops->len; | 1440 | uint32_t readlen = ops->len; |
1453 | uint32_t oobreadlen = ops->ooblen; | 1441 | uint32_t oobreadlen = ops->ooblen; |
1454 | uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ? | 1442 | uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ? |
1455 | mtd->oobavail : mtd->oobsize; | 1443 | mtd->oobavail : mtd->oobsize; |
1456 | 1444 | ||
1457 | uint8_t *bufpoi, *oob, *buf; | 1445 | uint8_t *bufpoi, *oob, *buf; |
@@ -1473,7 +1461,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1473 | bytes = min(mtd->writesize - col, readlen); | 1461 | bytes = min(mtd->writesize - col, readlen); |
1474 | aligned = (bytes == mtd->writesize); | 1462 | aligned = (bytes == mtd->writesize); |
1475 | 1463 | ||
1476 | /* Is the current page in the buffer ? */ | 1464 | /* Is the current page in the buffer? */ |
1477 | if (realpage != chip->pagebuf || oob) { | 1465 | if (realpage != chip->pagebuf || oob) { |
1478 | bufpoi = aligned ? buf : chip->buffers->databuf; | 1466 | bufpoi = aligned ? buf : chip->buffers->databuf; |
1479 | 1467 | ||
@@ -1483,7 +1471,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1483 | } | 1471 | } |
1484 | 1472 | ||
1485 | /* Now read the page into the buffer */ | 1473 | /* Now read the page into the buffer */ |
1486 | if (unlikely(ops->mode == MTD_OOB_RAW)) | 1474 | if (unlikely(ops->mode == MTD_OPS_RAW)) |
1487 | ret = chip->ecc.read_page_raw(mtd, chip, | 1475 | ret = chip->ecc.read_page_raw(mtd, chip, |
1488 | bufpoi, page); | 1476 | bufpoi, page); |
1489 | else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) | 1477 | else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) |
@@ -1492,14 +1480,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1492 | else | 1480 | else |
1493 | ret = chip->ecc.read_page(mtd, chip, bufpoi, | 1481 | ret = chip->ecc.read_page(mtd, chip, bufpoi, |
1494 | page); | 1482 | page); |
1495 | if (ret < 0) | 1483 | if (ret < 0) { |
1484 | if (!aligned) | ||
1485 | /* Invalidate page cache */ | ||
1486 | chip->pagebuf = -1; | ||
1496 | break; | 1487 | break; |
1488 | } | ||
1497 | 1489 | ||
1498 | /* Transfer not aligned data */ | 1490 | /* Transfer not aligned data */ |
1499 | if (!aligned) { | 1491 | if (!aligned) { |
1500 | if (!NAND_SUBPAGE_READ(chip) && !oob && | 1492 | if (!NAND_SUBPAGE_READ(chip) && !oob && |
1501 | !(mtd->ecc_stats.failed - stats.failed)) | 1493 | !(mtd->ecc_stats.failed - stats.failed) && |
1494 | (ops->mode != MTD_OPS_RAW)) | ||
1502 | chip->pagebuf = realpage; | 1495 | chip->pagebuf = realpage; |
1496 | else | ||
1497 | /* Invalidate page cache */ | ||
1498 | chip->pagebuf = -1; | ||
1503 | memcpy(buf, chip->buffers->databuf + col, bytes); | 1499 | memcpy(buf, chip->buffers->databuf + col, bytes); |
1504 | } | 1500 | } |
1505 | 1501 | ||
@@ -1539,7 +1535,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1539 | if (!readlen) | 1535 | if (!readlen) |
1540 | break; | 1536 | break; |
1541 | 1537 | ||
1542 | /* For subsequent reads align to page boundary. */ | 1538 | /* For subsequent reads align to page boundary */ |
1543 | col = 0; | 1539 | col = 0; |
1544 | /* Increment page address */ | 1540 | /* Increment page address */ |
1545 | realpage++; | 1541 | realpage++; |
@@ -1552,8 +1548,9 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1552 | chip->select_chip(mtd, chipnr); | 1548 | chip->select_chip(mtd, chipnr); |
1553 | } | 1549 | } |
1554 | 1550 | ||
1555 | /* Check, if the chip supports auto page increment | 1551 | /* |
1556 | * or if we have hit a block boundary. | 1552 | * Check, if the chip supports auto page increment or if we |
1553 | * have hit a block boundary. | ||
1557 | */ | 1554 | */ |
1558 | if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) | 1555 | if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) |
1559 | sndcmd = 1; | 1556 | sndcmd = 1; |
@@ -1574,18 +1571,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1574 | 1571 | ||
1575 | /** | 1572 | /** |
1576 | * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc | 1573 | * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc |
1577 | * @mtd: MTD device structure | 1574 | * @mtd: MTD device structure |
1578 | * @from: offset to read from | 1575 | * @from: offset to read from |
1579 | * @len: number of bytes to read | 1576 | * @len: number of bytes to read |
1580 | * @retlen: pointer to variable to store the number of read bytes | 1577 | * @retlen: pointer to variable to store the number of read bytes |
1581 | * @buf: the databuffer to put data | 1578 | * @buf: the databuffer to put data |
1582 | * | 1579 | * |
1583 | * Get hold of the chip and call nand_do_read | 1580 | * Get hold of the chip and call nand_do_read. |
1584 | */ | 1581 | */ |
1585 | static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, | 1582 | static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, |
1586 | size_t *retlen, uint8_t *buf) | 1583 | size_t *retlen, uint8_t *buf) |
1587 | { | 1584 | { |
1588 | struct nand_chip *chip = mtd->priv; | 1585 | struct nand_chip *chip = mtd->priv; |
1586 | struct mtd_oob_ops ops; | ||
1589 | int ret; | 1587 | int ret; |
1590 | 1588 | ||
1591 | /* Do not allow reads past end of device */ | 1589 | /* Do not allow reads past end of device */ |
@@ -1596,13 +1594,14 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
1596 | 1594 | ||
1597 | nand_get_device(chip, mtd, FL_READING); | 1595 | nand_get_device(chip, mtd, FL_READING); |
1598 | 1596 | ||
1599 | chip->ops.len = len; | 1597 | ops.len = len; |
1600 | chip->ops.datbuf = buf; | 1598 | ops.datbuf = buf; |
1601 | chip->ops.oobbuf = NULL; | 1599 | ops.oobbuf = NULL; |
1600 | ops.mode = 0; | ||
1602 | 1601 | ||
1603 | ret = nand_do_read_ops(mtd, from, &chip->ops); | 1602 | ret = nand_do_read_ops(mtd, from, &ops); |
1604 | 1603 | ||
1605 | *retlen = chip->ops.retlen; | 1604 | *retlen = ops.retlen; |
1606 | 1605 | ||
1607 | nand_release_device(mtd); | 1606 | nand_release_device(mtd); |
1608 | 1607 | ||
@@ -1610,11 +1609,11 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
1610 | } | 1609 | } |
1611 | 1610 | ||
1612 | /** | 1611 | /** |
1613 | * nand_read_oob_std - [REPLACABLE] the most common OOB data read function | 1612 | * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function |
1614 | * @mtd: mtd info structure | 1613 | * @mtd: mtd info structure |
1615 | * @chip: nand chip info structure | 1614 | * @chip: nand chip info structure |
1616 | * @page: page number to read | 1615 | * @page: page number to read |
1617 | * @sndcmd: flag whether to issue read command or not | 1616 | * @sndcmd: flag whether to issue read command or not |
1618 | */ | 1617 | */ |
1619 | static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, | 1618 | static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, |
1620 | int page, int sndcmd) | 1619 | int page, int sndcmd) |
@@ -1628,12 +1627,12 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, | |||
1628 | } | 1627 | } |
1629 | 1628 | ||
1630 | /** | 1629 | /** |
1631 | * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC | 1630 | * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC |
1632 | * with syndromes | 1631 | * with syndromes |
1633 | * @mtd: mtd info structure | 1632 | * @mtd: mtd info structure |
1634 | * @chip: nand chip info structure | 1633 | * @chip: nand chip info structure |
1635 | * @page: page number to read | 1634 | * @page: page number to read |
1636 | * @sndcmd: flag whether to issue read command or not | 1635 | * @sndcmd: flag whether to issue read command or not |
1637 | */ | 1636 | */ |
1638 | static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, | 1637 | static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, |
1639 | int page, int sndcmd) | 1638 | int page, int sndcmd) |
@@ -1667,10 +1666,10 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, | |||
1667 | } | 1666 | } |
1668 | 1667 | ||
1669 | /** | 1668 | /** |
1670 | * nand_write_oob_std - [REPLACABLE] the most common OOB data write function | 1669 | * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function |
1671 | * @mtd: mtd info structure | 1670 | * @mtd: mtd info structure |
1672 | * @chip: nand chip info structure | 1671 | * @chip: nand chip info structure |
1673 | * @page: page number to write | 1672 | * @page: page number to write |
1674 | */ | 1673 | */ |
1675 | static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, | 1674 | static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, |
1676 | int page) | 1675 | int page) |
@@ -1690,11 +1689,11 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, | |||
1690 | } | 1689 | } |
1691 | 1690 | ||
1692 | /** | 1691 | /** |
1693 | * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC | 1692 | * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC |
1694 | * with syndrome - only for large page flash ! | 1693 | * with syndrome - only for large page flash |
1695 | * @mtd: mtd info structure | 1694 | * @mtd: mtd info structure |
1696 | * @chip: nand chip info structure | 1695 | * @chip: nand chip info structure |
1697 | * @page: page number to write | 1696 | * @page: page number to write |
1698 | */ | 1697 | */ |
1699 | static int nand_write_oob_syndrome(struct mtd_info *mtd, | 1698 | static int nand_write_oob_syndrome(struct mtd_info *mtd, |
1700 | struct nand_chip *chip, int page) | 1699 | struct nand_chip *chip, int page) |
@@ -1749,34 +1748,37 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd, | |||
1749 | } | 1748 | } |
1750 | 1749 | ||
1751 | /** | 1750 | /** |
1752 | * nand_do_read_oob - [Intern] NAND read out-of-band | 1751 | * nand_do_read_oob - [INTERN] NAND read out-of-band |
1753 | * @mtd: MTD device structure | 1752 | * @mtd: MTD device structure |
1754 | * @from: offset to read from | 1753 | * @from: offset to read from |
1755 | * @ops: oob operations description structure | 1754 | * @ops: oob operations description structure |
1756 | * | 1755 | * |
1757 | * NAND read out-of-band data from the spare area | 1756 | * NAND read out-of-band data from the spare area. |
1758 | */ | 1757 | */ |
1759 | static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | 1758 | static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, |
1760 | struct mtd_oob_ops *ops) | 1759 | struct mtd_oob_ops *ops) |
1761 | { | 1760 | { |
1762 | int page, realpage, chipnr, sndcmd = 1; | 1761 | int page, realpage, chipnr, sndcmd = 1; |
1763 | struct nand_chip *chip = mtd->priv; | 1762 | struct nand_chip *chip = mtd->priv; |
1763 | struct mtd_ecc_stats stats; | ||
1764 | int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; | 1764 | int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; |
1765 | int readlen = ops->ooblen; | 1765 | int readlen = ops->ooblen; |
1766 | int len; | 1766 | int len; |
1767 | uint8_t *buf = ops->oobbuf; | 1767 | uint8_t *buf = ops->oobbuf; |
1768 | 1768 | ||
1769 | DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n", | 1769 | pr_debug("%s: from = 0x%08Lx, len = %i\n", |
1770 | __func__, (unsigned long long)from, readlen); | 1770 | __func__, (unsigned long long)from, readlen); |
1771 | 1771 | ||
1772 | if (ops->mode == MTD_OOB_AUTO) | 1772 | stats = mtd->ecc_stats; |
1773 | |||
1774 | if (ops->mode == MTD_OPS_AUTO_OOB) | ||
1773 | len = chip->ecc.layout->oobavail; | 1775 | len = chip->ecc.layout->oobavail; |
1774 | else | 1776 | else |
1775 | len = mtd->oobsize; | 1777 | len = mtd->oobsize; |
1776 | 1778 | ||
1777 | if (unlikely(ops->ooboffs >= len)) { | 1779 | if (unlikely(ops->ooboffs >= len)) { |
1778 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read " | 1780 | pr_debug("%s: attempt to start read outside oob\n", |
1779 | "outside oob\n", __func__); | 1781 | __func__); |
1780 | return -EINVAL; | 1782 | return -EINVAL; |
1781 | } | 1783 | } |
1782 | 1784 | ||
@@ -1784,8 +1786,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | |||
1784 | if (unlikely(from >= mtd->size || | 1786 | if (unlikely(from >= mtd->size || |
1785 | ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - | 1787 | ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - |
1786 | (from >> chip->page_shift)) * len)) { | 1788 | (from >> chip->page_shift)) * len)) { |
1787 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end " | 1789 | pr_debug("%s: attempt to read beyond end of device\n", |
1788 | "of device\n", __func__); | 1790 | __func__); |
1789 | return -EINVAL; | 1791 | return -EINVAL; |
1790 | } | 1792 | } |
1791 | 1793 | ||
@@ -1797,7 +1799,10 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | |||
1797 | page = realpage & chip->pagemask; | 1799 | page = realpage & chip->pagemask; |
1798 | 1800 | ||
1799 | while (1) { | 1801 | while (1) { |
1800 | sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); | 1802 | if (ops->mode == MTD_OPS_RAW) |
1803 | sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd); | ||
1804 | else | ||
1805 | sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); | ||
1801 | 1806 | ||
1802 | len = min(len, readlen); | 1807 | len = min(len, readlen); |
1803 | buf = nand_transfer_oob(chip, buf, ops, len); | 1808 | buf = nand_transfer_oob(chip, buf, ops, len); |
@@ -1830,24 +1835,29 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | |||
1830 | chip->select_chip(mtd, chipnr); | 1835 | chip->select_chip(mtd, chipnr); |
1831 | } | 1836 | } |
1832 | 1837 | ||
1833 | /* Check, if the chip supports auto page increment | 1838 | /* |
1834 | * or if we have hit a block boundary. | 1839 | * Check, if the chip supports auto page increment or if we |
1840 | * have hit a block boundary. | ||
1835 | */ | 1841 | */ |
1836 | if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) | 1842 | if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck)) |
1837 | sndcmd = 1; | 1843 | sndcmd = 1; |
1838 | } | 1844 | } |
1839 | 1845 | ||
1840 | ops->oobretlen = ops->ooblen; | 1846 | ops->oobretlen = ops->ooblen; |
1841 | return 0; | 1847 | |
1848 | if (mtd->ecc_stats.failed - stats.failed) | ||
1849 | return -EBADMSG; | ||
1850 | |||
1851 | return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; | ||
1842 | } | 1852 | } |
1843 | 1853 | ||
1844 | /** | 1854 | /** |
1845 | * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band | 1855 | * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band |
1846 | * @mtd: MTD device structure | 1856 | * @mtd: MTD device structure |
1847 | * @from: offset to read from | 1857 | * @from: offset to read from |
1848 | * @ops: oob operation description structure | 1858 | * @ops: oob operation description structure |
1849 | * | 1859 | * |
1850 | * NAND read data and/or out-of-band data | 1860 | * NAND read data and/or out-of-band data. |
1851 | */ | 1861 | */ |
1852 | static int nand_read_oob(struct mtd_info *mtd, loff_t from, | 1862 | static int nand_read_oob(struct mtd_info *mtd, loff_t from, |
1853 | struct mtd_oob_ops *ops) | 1863 | struct mtd_oob_ops *ops) |
@@ -1859,17 +1869,17 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from, | |||
1859 | 1869 | ||
1860 | /* Do not allow reads past end of device */ | 1870 | /* Do not allow reads past end of device */ |
1861 | if (ops->datbuf && (from + ops->len) > mtd->size) { | 1871 | if (ops->datbuf && (from + ops->len) > mtd->size) { |
1862 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read " | 1872 | pr_debug("%s: attempt to read beyond end of device\n", |
1863 | "beyond end of device\n", __func__); | 1873 | __func__); |
1864 | return -EINVAL; | 1874 | return -EINVAL; |
1865 | } | 1875 | } |
1866 | 1876 | ||
1867 | nand_get_device(chip, mtd, FL_READING); | 1877 | nand_get_device(chip, mtd, FL_READING); |
1868 | 1878 | ||
1869 | switch (ops->mode) { | 1879 | switch (ops->mode) { |
1870 | case MTD_OOB_PLACE: | 1880 | case MTD_OPS_PLACE_OOB: |
1871 | case MTD_OOB_AUTO: | 1881 | case MTD_OPS_AUTO_OOB: |
1872 | case MTD_OOB_RAW: | 1882 | case MTD_OPS_RAW: |
1873 | break; | 1883 | break; |
1874 | 1884 | ||
1875 | default: | 1885 | default: |
@@ -1888,12 +1898,12 @@ out: | |||
1888 | 1898 | ||
1889 | 1899 | ||
1890 | /** | 1900 | /** |
1891 | * nand_write_page_raw - [Intern] raw page write function | 1901 | * nand_write_page_raw - [INTERN] raw page write function |
1892 | * @mtd: mtd info structure | 1902 | * @mtd: mtd info structure |
1893 | * @chip: nand chip info structure | 1903 | * @chip: nand chip info structure |
1894 | * @buf: data buffer | 1904 | * @buf: data buffer |
1895 | * | 1905 | * |
1896 | * Not for syndrome calculating ecc controllers, which use a special oob layout | 1906 | * Not for syndrome calculating ECC controllers, which use a special oob layout. |
1897 | */ | 1907 | */ |
1898 | static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | 1908 | static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, |
1899 | const uint8_t *buf) | 1909 | const uint8_t *buf) |
@@ -1903,10 +1913,10 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | |||
1903 | } | 1913 | } |
1904 | 1914 | ||
1905 | /** | 1915 | /** |
1906 | * nand_write_page_raw_syndrome - [Intern] raw page write function | 1916 | * nand_write_page_raw_syndrome - [INTERN] raw page write function |
1907 | * @mtd: mtd info structure | 1917 | * @mtd: mtd info structure |
1908 | * @chip: nand chip info structure | 1918 | * @chip: nand chip info structure |
1909 | * @buf: data buffer | 1919 | * @buf: data buffer |
1910 | * | 1920 | * |
1911 | * We need a special oob layout and handling even when ECC isn't checked. | 1921 | * We need a special oob layout and handling even when ECC isn't checked. |
1912 | */ | 1922 | */ |
@@ -1942,10 +1952,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd, | |||
1942 | chip->write_buf(mtd, oob, size); | 1952 | chip->write_buf(mtd, oob, size); |
1943 | } | 1953 | } |
1944 | /** | 1954 | /** |
1945 | * nand_write_page_swecc - [REPLACABLE] software ecc based page write function | 1955 | * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function |
1946 | * @mtd: mtd info structure | 1956 | * @mtd: mtd info structure |
1947 | * @chip: nand chip info structure | 1957 | * @chip: nand chip info structure |
1948 | * @buf: data buffer | 1958 | * @buf: data buffer |
1949 | */ | 1959 | */ |
1950 | static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, | 1960 | static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, |
1951 | const uint8_t *buf) | 1961 | const uint8_t *buf) |
@@ -1957,7 +1967,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
1957 | const uint8_t *p = buf; | 1967 | const uint8_t *p = buf; |
1958 | uint32_t *eccpos = chip->ecc.layout->eccpos; | 1968 | uint32_t *eccpos = chip->ecc.layout->eccpos; |
1959 | 1969 | ||
1960 | /* Software ecc calculation */ | 1970 | /* Software ECC calculation */ |
1961 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) | 1971 | for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) |
1962 | chip->ecc.calculate(mtd, p, &ecc_calc[i]); | 1972 | chip->ecc.calculate(mtd, p, &ecc_calc[i]); |
1963 | 1973 | ||
@@ -1968,10 +1978,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
1968 | } | 1978 | } |
1969 | 1979 | ||
1970 | /** | 1980 | /** |
1971 | * nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function | 1981 | * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function |
1972 | * @mtd: mtd info structure | 1982 | * @mtd: mtd info structure |
1973 | * @chip: nand chip info structure | 1983 | * @chip: nand chip info structure |
1974 | * @buf: data buffer | 1984 | * @buf: data buffer |
1975 | */ | 1985 | */ |
1976 | static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | 1986 | static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, |
1977 | const uint8_t *buf) | 1987 | const uint8_t *buf) |
@@ -1996,13 +2006,13 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
1996 | } | 2006 | } |
1997 | 2007 | ||
1998 | /** | 2008 | /** |
1999 | * nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write | 2009 | * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write |
2000 | * @mtd: mtd info structure | 2010 | * @mtd: mtd info structure |
2001 | * @chip: nand chip info structure | 2011 | * @chip: nand chip info structure |
2002 | * @buf: data buffer | 2012 | * @buf: data buffer |
2003 | * | 2013 | * |
2004 | * The hw generator calculates the error syndrome automatically. Therefor | 2014 | * The hw generator calculates the error syndrome automatically. Therefore we |
2005 | * we need a special oob layout and handling. | 2015 | * need a special oob layout and handling. |
2006 | */ | 2016 | */ |
2007 | static void nand_write_page_syndrome(struct mtd_info *mtd, | 2017 | static void nand_write_page_syndrome(struct mtd_info *mtd, |
2008 | struct nand_chip *chip, const uint8_t *buf) | 2018 | struct nand_chip *chip, const uint8_t *buf) |
@@ -2041,12 +2051,12 @@ static void nand_write_page_syndrome(struct mtd_info *mtd, | |||
2041 | 2051 | ||
2042 | /** | 2052 | /** |
2043 | * nand_write_page - [REPLACEABLE] write one page | 2053 | * nand_write_page - [REPLACEABLE] write one page |
2044 | * @mtd: MTD device structure | 2054 | * @mtd: MTD device structure |
2045 | * @chip: NAND chip descriptor | 2055 | * @chip: NAND chip descriptor |
2046 | * @buf: the data to write | 2056 | * @buf: the data to write |
2047 | * @page: page number to write | 2057 | * @page: page number to write |
2048 | * @cached: cached programming | 2058 | * @cached: cached programming |
2049 | * @raw: use _raw version of write_page | 2059 | * @raw: use _raw version of write_page |
2050 | */ | 2060 | */ |
2051 | static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | 2061 | static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, |
2052 | const uint8_t *buf, int page, int cached, int raw) | 2062 | const uint8_t *buf, int page, int cached, int raw) |
@@ -2061,8 +2071,8 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
2061 | chip->ecc.write_page(mtd, chip, buf); | 2071 | chip->ecc.write_page(mtd, chip, buf); |
2062 | 2072 | ||
2063 | /* | 2073 | /* |
2064 | * Cached progamming disabled for now, Not sure if its worth the | 2074 | * Cached progamming disabled for now. Not sure if it's worth the |
2065 | * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s) | 2075 | * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s). |
2066 | */ | 2076 | */ |
2067 | cached = 0; | 2077 | cached = 0; |
2068 | 2078 | ||
@@ -2072,7 +2082,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
2072 | status = chip->waitfunc(mtd, chip); | 2082 | status = chip->waitfunc(mtd, chip); |
2073 | /* | 2083 | /* |
2074 | * See if operation failed and additional status checks are | 2084 | * See if operation failed and additional status checks are |
2075 | * available | 2085 | * available. |
2076 | */ | 2086 | */ |
2077 | if ((status & NAND_STATUS_FAIL) && (chip->errstat)) | 2087 | if ((status & NAND_STATUS_FAIL) && (chip->errstat)) |
2078 | status = chip->errstat(mtd, chip, FL_WRITING, status, | 2088 | status = chip->errstat(mtd, chip, FL_WRITING, status, |
@@ -2096,29 +2106,37 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
2096 | } | 2106 | } |
2097 | 2107 | ||
2098 | /** | 2108 | /** |
2099 | * nand_fill_oob - [Internal] Transfer client buffer to oob | 2109 | * nand_fill_oob - [INTERN] Transfer client buffer to oob |
2100 | * @chip: nand chip structure | 2110 | * @mtd: MTD device structure |
2101 | * @oob: oob data buffer | 2111 | * @oob: oob data buffer |
2102 | * @len: oob data write length | 2112 | * @len: oob data write length |
2103 | * @ops: oob ops structure | 2113 | * @ops: oob ops structure |
2104 | */ | 2114 | */ |
2105 | static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, | 2115 | static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, |
2106 | struct mtd_oob_ops *ops) | 2116 | struct mtd_oob_ops *ops) |
2107 | { | 2117 | { |
2118 | struct nand_chip *chip = mtd->priv; | ||
2119 | |||
2120 | /* | ||
2121 | * Initialise to all 0xFF, to avoid the possibility of left over OOB | ||
2122 | * data from a previous OOB read. | ||
2123 | */ | ||
2124 | memset(chip->oob_poi, 0xff, mtd->oobsize); | ||
2125 | |||
2108 | switch (ops->mode) { | 2126 | switch (ops->mode) { |
2109 | 2127 | ||
2110 | case MTD_OOB_PLACE: | 2128 | case MTD_OPS_PLACE_OOB: |
2111 | case MTD_OOB_RAW: | 2129 | case MTD_OPS_RAW: |
2112 | memcpy(chip->oob_poi + ops->ooboffs, oob, len); | 2130 | memcpy(chip->oob_poi + ops->ooboffs, oob, len); |
2113 | return oob + len; | 2131 | return oob + len; |
2114 | 2132 | ||
2115 | case MTD_OOB_AUTO: { | 2133 | case MTD_OPS_AUTO_OOB: { |
2116 | struct nand_oobfree *free = chip->ecc.layout->oobfree; | 2134 | struct nand_oobfree *free = chip->ecc.layout->oobfree; |
2117 | uint32_t boffs = 0, woffs = ops->ooboffs; | 2135 | uint32_t boffs = 0, woffs = ops->ooboffs; |
2118 | size_t bytes = 0; | 2136 | size_t bytes = 0; |
2119 | 2137 | ||
2120 | for (; free->length && len; free++, len -= bytes) { | 2138 | for (; free->length && len; free++, len -= bytes) { |
2121 | /* Write request not from offset 0 ? */ | 2139 | /* Write request not from offset 0? */ |
2122 | if (unlikely(woffs)) { | 2140 | if (unlikely(woffs)) { |
2123 | if (woffs >= free->length) { | 2141 | if (woffs >= free->length) { |
2124 | woffs -= free->length; | 2142 | woffs -= free->length; |
@@ -2146,12 +2164,12 @@ static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len, | |||
2146 | #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) | 2164 | #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0) |
2147 | 2165 | ||
2148 | /** | 2166 | /** |
2149 | * nand_do_write_ops - [Internal] NAND write with ECC | 2167 | * nand_do_write_ops - [INTERN] NAND write with ECC |
2150 | * @mtd: MTD device structure | 2168 | * @mtd: MTD device structure |
2151 | * @to: offset to write to | 2169 | * @to: offset to write to |
2152 | * @ops: oob operations description structure | 2170 | * @ops: oob operations description structure |
2153 | * | 2171 | * |
2154 | * NAND write with ECC | 2172 | * NAND write with ECC. |
2155 | */ | 2173 | */ |
2156 | static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | 2174 | static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, |
2157 | struct mtd_oob_ops *ops) | 2175 | struct mtd_oob_ops *ops) |
@@ -2161,7 +2179,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
2161 | uint32_t writelen = ops->len; | 2179 | uint32_t writelen = ops->len; |
2162 | 2180 | ||
2163 | uint32_t oobwritelen = ops->ooblen; | 2181 | uint32_t oobwritelen = ops->ooblen; |
2164 | uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ? | 2182 | uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ? |
2165 | mtd->oobavail : mtd->oobsize; | 2183 | mtd->oobavail : mtd->oobsize; |
2166 | 2184 | ||
2167 | uint8_t *oob = ops->oobbuf; | 2185 | uint8_t *oob = ops->oobbuf; |
@@ -2172,10 +2190,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
2172 | if (!writelen) | 2190 | if (!writelen) |
2173 | return 0; | 2191 | return 0; |
2174 | 2192 | ||
2175 | /* reject writes, which are not page aligned */ | 2193 | /* Reject writes, which are not page aligned */ |
2176 | if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { | 2194 | if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { |
2177 | printk(KERN_NOTICE "%s: Attempt to write not " | 2195 | pr_notice("%s: attempt to write non page aligned data\n", |
2178 | "page aligned data\n", __func__); | 2196 | __func__); |
2179 | return -EINVAL; | 2197 | return -EINVAL; |
2180 | } | 2198 | } |
2181 | 2199 | ||
@@ -2201,10 +2219,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
2201 | (chip->pagebuf << chip->page_shift) < (to + ops->len)) | 2219 | (chip->pagebuf << chip->page_shift) < (to + ops->len)) |
2202 | chip->pagebuf = -1; | 2220 | chip->pagebuf = -1; |
2203 | 2221 | ||
2204 | /* If we're not given explicit OOB data, let it be 0xFF */ | ||
2205 | if (likely(!oob)) | ||
2206 | memset(chip->oob_poi, 0xff, mtd->oobsize); | ||
2207 | |||
2208 | /* Don't allow multipage oob writes with offset */ | 2222 | /* Don't allow multipage oob writes with offset */ |
2209 | if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) | 2223 | if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) |
2210 | return -EINVAL; | 2224 | return -EINVAL; |
@@ -2214,7 +2228,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
2214 | int cached = writelen > bytes && page != blockmask; | 2228 | int cached = writelen > bytes && page != blockmask; |
2215 | uint8_t *wbuf = buf; | 2229 | uint8_t *wbuf = buf; |
2216 | 2230 | ||
2217 | /* Partial page write ? */ | 2231 | /* Partial page write? */ |
2218 | if (unlikely(column || writelen < (mtd->writesize - 1))) { | 2232 | if (unlikely(column || writelen < (mtd->writesize - 1))) { |
2219 | cached = 0; | 2233 | cached = 0; |
2220 | bytes = min_t(int, bytes - column, (int) writelen); | 2234 | bytes = min_t(int, bytes - column, (int) writelen); |
@@ -2226,12 +2240,15 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
2226 | 2240 | ||
2227 | if (unlikely(oob)) { | 2241 | if (unlikely(oob)) { |
2228 | size_t len = min(oobwritelen, oobmaxlen); | 2242 | size_t len = min(oobwritelen, oobmaxlen); |
2229 | oob = nand_fill_oob(chip, oob, len, ops); | 2243 | oob = nand_fill_oob(mtd, oob, len, ops); |
2230 | oobwritelen -= len; | 2244 | oobwritelen -= len; |
2245 | } else { | ||
2246 | /* We still need to erase leftover OOB data */ | ||
2247 | memset(chip->oob_poi, 0xff, mtd->oobsize); | ||
2231 | } | 2248 | } |
2232 | 2249 | ||
2233 | ret = chip->write_page(mtd, chip, wbuf, page, cached, | 2250 | ret = chip->write_page(mtd, chip, wbuf, page, cached, |
2234 | (ops->mode == MTD_OOB_RAW)); | 2251 | (ops->mode == MTD_OPS_RAW)); |
2235 | if (ret) | 2252 | if (ret) |
2236 | break; | 2253 | break; |
2237 | 2254 | ||
@@ -2260,11 +2277,11 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
2260 | 2277 | ||
2261 | /** | 2278 | /** |
2262 | * panic_nand_write - [MTD Interface] NAND write with ECC | 2279 | * panic_nand_write - [MTD Interface] NAND write with ECC |
2263 | * @mtd: MTD device structure | 2280 | * @mtd: MTD device structure |
2264 | * @to: offset to write to | 2281 | * @to: offset to write to |
2265 | * @len: number of bytes to write | 2282 | * @len: number of bytes to write |
2266 | * @retlen: pointer to variable to store the number of written bytes | 2283 | * @retlen: pointer to variable to store the number of written bytes |
2267 | * @buf: the data to write | 2284 | * @buf: the data to write |
2268 | * | 2285 | * |
2269 | * NAND write with ECC. Used when performing writes in interrupt context, this | 2286 | * NAND write with ECC. Used when performing writes in interrupt context, this |
2270 | * may for example be called by mtdoops when writing an oops while in panic. | 2287 | * may for example be called by mtdoops when writing an oops while in panic. |
@@ -2273,6 +2290,7 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
2273 | size_t *retlen, const uint8_t *buf) | 2290 | size_t *retlen, const uint8_t *buf) |
2274 | { | 2291 | { |
2275 | struct nand_chip *chip = mtd->priv; | 2292 | struct nand_chip *chip = mtd->priv; |
2293 | struct mtd_oob_ops ops; | ||
2276 | int ret; | 2294 | int ret; |
2277 | 2295 | ||
2278 | /* Do not allow reads past end of device */ | 2296 | /* Do not allow reads past end of device */ |
@@ -2281,36 +2299,38 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
2281 | if (!len) | 2299 | if (!len) |
2282 | return 0; | 2300 | return 0; |
2283 | 2301 | ||
2284 | /* Wait for the device to get ready. */ | 2302 | /* Wait for the device to get ready */ |
2285 | panic_nand_wait(mtd, chip, 400); | 2303 | panic_nand_wait(mtd, chip, 400); |
2286 | 2304 | ||
2287 | /* Grab the device. */ | 2305 | /* Grab the device */ |
2288 | panic_nand_get_device(chip, mtd, FL_WRITING); | 2306 | panic_nand_get_device(chip, mtd, FL_WRITING); |
2289 | 2307 | ||
2290 | chip->ops.len = len; | 2308 | ops.len = len; |
2291 | chip->ops.datbuf = (uint8_t *)buf; | 2309 | ops.datbuf = (uint8_t *)buf; |
2292 | chip->ops.oobbuf = NULL; | 2310 | ops.oobbuf = NULL; |
2311 | ops.mode = 0; | ||
2293 | 2312 | ||
2294 | ret = nand_do_write_ops(mtd, to, &chip->ops); | 2313 | ret = nand_do_write_ops(mtd, to, &ops); |
2295 | 2314 | ||
2296 | *retlen = chip->ops.retlen; | 2315 | *retlen = ops.retlen; |
2297 | return ret; | 2316 | return ret; |
2298 | } | 2317 | } |
2299 | 2318 | ||
2300 | /** | 2319 | /** |
2301 | * nand_write - [MTD Interface] NAND write with ECC | 2320 | * nand_write - [MTD Interface] NAND write with ECC |
2302 | * @mtd: MTD device structure | 2321 | * @mtd: MTD device structure |
2303 | * @to: offset to write to | 2322 | * @to: offset to write to |
2304 | * @len: number of bytes to write | 2323 | * @len: number of bytes to write |
2305 | * @retlen: pointer to variable to store the number of written bytes | 2324 | * @retlen: pointer to variable to store the number of written bytes |
2306 | * @buf: the data to write | 2325 | * @buf: the data to write |
2307 | * | 2326 | * |
2308 | * NAND write with ECC | 2327 | * NAND write with ECC. |
2309 | */ | 2328 | */ |
2310 | static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, | 2329 | static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, |
2311 | size_t *retlen, const uint8_t *buf) | 2330 | size_t *retlen, const uint8_t *buf) |
2312 | { | 2331 | { |
2313 | struct nand_chip *chip = mtd->priv; | 2332 | struct nand_chip *chip = mtd->priv; |
2333 | struct mtd_oob_ops ops; | ||
2314 | int ret; | 2334 | int ret; |
2315 | 2335 | ||
2316 | /* Do not allow reads past end of device */ | 2336 | /* Do not allow reads past end of device */ |
@@ -2321,13 +2341,14 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
2321 | 2341 | ||
2322 | nand_get_device(chip, mtd, FL_WRITING); | 2342 | nand_get_device(chip, mtd, FL_WRITING); |
2323 | 2343 | ||
2324 | chip->ops.len = len; | 2344 | ops.len = len; |
2325 | chip->ops.datbuf = (uint8_t *)buf; | 2345 | ops.datbuf = (uint8_t *)buf; |
2326 | chip->ops.oobbuf = NULL; | 2346 | ops.oobbuf = NULL; |
2347 | ops.mode = 0; | ||
2327 | 2348 | ||
2328 | ret = nand_do_write_ops(mtd, to, &chip->ops); | 2349 | ret = nand_do_write_ops(mtd, to, &ops); |
2329 | 2350 | ||
2330 | *retlen = chip->ops.retlen; | 2351 | *retlen = ops.retlen; |
2331 | 2352 | ||
2332 | nand_release_device(mtd); | 2353 | nand_release_device(mtd); |
2333 | 2354 | ||
@@ -2336,11 +2357,11 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
2336 | 2357 | ||
2337 | /** | 2358 | /** |
2338 | * nand_do_write_oob - [MTD Interface] NAND write out-of-band | 2359 | * nand_do_write_oob - [MTD Interface] NAND write out-of-band |
2339 | * @mtd: MTD device structure | 2360 | * @mtd: MTD device structure |
2340 | * @to: offset to write to | 2361 | * @to: offset to write to |
2341 | * @ops: oob operation description structure | 2362 | * @ops: oob operation description structure |
2342 | * | 2363 | * |
2343 | * NAND write out-of-band | 2364 | * NAND write out-of-band. |
2344 | */ | 2365 | */ |
2345 | static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | 2366 | static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, |
2346 | struct mtd_oob_ops *ops) | 2367 | struct mtd_oob_ops *ops) |
@@ -2348,24 +2369,24 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
2348 | int chipnr, page, status, len; | 2369 | int chipnr, page, status, len; |
2349 | struct nand_chip *chip = mtd->priv; | 2370 | struct nand_chip *chip = mtd->priv; |
2350 | 2371 | ||
2351 | DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", | 2372 | pr_debug("%s: to = 0x%08x, len = %i\n", |
2352 | __func__, (unsigned int)to, (int)ops->ooblen); | 2373 | __func__, (unsigned int)to, (int)ops->ooblen); |
2353 | 2374 | ||
2354 | if (ops->mode == MTD_OOB_AUTO) | 2375 | if (ops->mode == MTD_OPS_AUTO_OOB) |
2355 | len = chip->ecc.layout->oobavail; | 2376 | len = chip->ecc.layout->oobavail; |
2356 | else | 2377 | else |
2357 | len = mtd->oobsize; | 2378 | len = mtd->oobsize; |
2358 | 2379 | ||
2359 | /* Do not allow write past end of page */ | 2380 | /* Do not allow write past end of page */ |
2360 | if ((ops->ooboffs + ops->ooblen) > len) { | 2381 | if ((ops->ooboffs + ops->ooblen) > len) { |
2361 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write " | 2382 | pr_debug("%s: attempt to write past end of page\n", |
2362 | "past end of page\n", __func__); | 2383 | __func__); |
2363 | return -EINVAL; | 2384 | return -EINVAL; |
2364 | } | 2385 | } |
2365 | 2386 | ||
2366 | if (unlikely(ops->ooboffs >= len)) { | 2387 | if (unlikely(ops->ooboffs >= len)) { |
2367 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start " | 2388 | pr_debug("%s: attempt to start write outside oob\n", |
2368 | "write outside oob\n", __func__); | 2389 | __func__); |
2369 | return -EINVAL; | 2390 | return -EINVAL; |
2370 | } | 2391 | } |
2371 | 2392 | ||
@@ -2374,8 +2395,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
2374 | ops->ooboffs + ops->ooblen > | 2395 | ops->ooboffs + ops->ooblen > |
2375 | ((mtd->size >> chip->page_shift) - | 2396 | ((mtd->size >> chip->page_shift) - |
2376 | (to >> chip->page_shift)) * len)) { | 2397 | (to >> chip->page_shift)) * len)) { |
2377 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " | 2398 | pr_debug("%s: attempt to write beyond end of device\n", |
2378 | "end of device\n", __func__); | 2399 | __func__); |
2379 | return -EINVAL; | 2400 | return -EINVAL; |
2380 | } | 2401 | } |
2381 | 2402 | ||
@@ -2401,10 +2422,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
2401 | if (page == chip->pagebuf) | 2422 | if (page == chip->pagebuf) |
2402 | chip->pagebuf = -1; | 2423 | chip->pagebuf = -1; |
2403 | 2424 | ||
2404 | memset(chip->oob_poi, 0xff, mtd->oobsize); | 2425 | nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops); |
2405 | nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops); | 2426 | |
2406 | status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); | 2427 | if (ops->mode == MTD_OPS_RAW) |
2407 | memset(chip->oob_poi, 0xff, mtd->oobsize); | 2428 | status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask); |
2429 | else | ||
2430 | status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); | ||
2408 | 2431 | ||
2409 | if (status) | 2432 | if (status) |
2410 | return status; | 2433 | return status; |
@@ -2416,9 +2439,9 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
2416 | 2439 | ||
2417 | /** | 2440 | /** |
2418 | * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band | 2441 | * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band |
2419 | * @mtd: MTD device structure | 2442 | * @mtd: MTD device structure |
2420 | * @to: offset to write to | 2443 | * @to: offset to write to |
2421 | * @ops: oob operation description structure | 2444 | * @ops: oob operation description structure |
2422 | */ | 2445 | */ |
2423 | static int nand_write_oob(struct mtd_info *mtd, loff_t to, | 2446 | static int nand_write_oob(struct mtd_info *mtd, loff_t to, |
2424 | struct mtd_oob_ops *ops) | 2447 | struct mtd_oob_ops *ops) |
@@ -2430,17 +2453,17 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to, | |||
2430 | 2453 | ||
2431 | /* Do not allow writes past end of device */ | 2454 | /* Do not allow writes past end of device */ |
2432 | if (ops->datbuf && (to + ops->len) > mtd->size) { | 2455 | if (ops->datbuf && (to + ops->len) > mtd->size) { |
2433 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " | 2456 | pr_debug("%s: attempt to write beyond end of device\n", |
2434 | "end of device\n", __func__); | 2457 | __func__); |
2435 | return -EINVAL; | 2458 | return -EINVAL; |
2436 | } | 2459 | } |
2437 | 2460 | ||
2438 | nand_get_device(chip, mtd, FL_WRITING); | 2461 | nand_get_device(chip, mtd, FL_WRITING); |
2439 | 2462 | ||
2440 | switch (ops->mode) { | 2463 | switch (ops->mode) { |
2441 | case MTD_OOB_PLACE: | 2464 | case MTD_OPS_PLACE_OOB: |
2442 | case MTD_OOB_AUTO: | 2465 | case MTD_OPS_AUTO_OOB: |
2443 | case MTD_OOB_RAW: | 2466 | case MTD_OPS_RAW: |
2444 | break; | 2467 | break; |
2445 | 2468 | ||
2446 | default: | 2469 | default: |
@@ -2458,11 +2481,11 @@ out: | |||
2458 | } | 2481 | } |
2459 | 2482 | ||
2460 | /** | 2483 | /** |
2461 | * single_erease_cmd - [GENERIC] NAND standard block erase command function | 2484 | * single_erase_cmd - [GENERIC] NAND standard block erase command function |
2462 | * @mtd: MTD device structure | 2485 | * @mtd: MTD device structure |
2463 | * @page: the page address of the block which will be erased | 2486 | * @page: the page address of the block which will be erased |
2464 | * | 2487 | * |
2465 | * Standard erase command for NAND chips | 2488 | * Standard erase command for NAND chips. |
2466 | */ | 2489 | */ |
2467 | static void single_erase_cmd(struct mtd_info *mtd, int page) | 2490 | static void single_erase_cmd(struct mtd_info *mtd, int page) |
2468 | { | 2491 | { |
@@ -2473,12 +2496,11 @@ static void single_erase_cmd(struct mtd_info *mtd, int page) | |||
2473 | } | 2496 | } |
2474 | 2497 | ||
2475 | /** | 2498 | /** |
2476 | * multi_erease_cmd - [GENERIC] AND specific block erase command function | 2499 | * multi_erase_cmd - [GENERIC] AND specific block erase command function |
2477 | * @mtd: MTD device structure | 2500 | * @mtd: MTD device structure |
2478 | * @page: the page address of the block which will be erased | 2501 | * @page: the page address of the block which will be erased |
2479 | * | 2502 | * |
2480 | * AND multi block erase command function | 2503 | * AND multi block erase command function. Erase 4 consecutive blocks. |
2481 | * Erase 4 consecutive blocks | ||
2482 | */ | 2504 | */ |
2483 | static void multi_erase_cmd(struct mtd_info *mtd, int page) | 2505 | static void multi_erase_cmd(struct mtd_info *mtd, int page) |
2484 | { | 2506 | { |
@@ -2493,10 +2515,10 @@ static void multi_erase_cmd(struct mtd_info *mtd, int page) | |||
2493 | 2515 | ||
2494 | /** | 2516 | /** |
2495 | * nand_erase - [MTD Interface] erase block(s) | 2517 | * nand_erase - [MTD Interface] erase block(s) |
2496 | * @mtd: MTD device structure | 2518 | * @mtd: MTD device structure |
2497 | * @instr: erase instruction | 2519 | * @instr: erase instruction |
2498 | * | 2520 | * |
2499 | * Erase one ore more blocks | 2521 | * Erase one ore more blocks. |
2500 | */ | 2522 | */ |
2501 | static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) | 2523 | static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) |
2502 | { | 2524 | { |
@@ -2505,12 +2527,12 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2505 | 2527 | ||
2506 | #define BBT_PAGE_MASK 0xffffff3f | 2528 | #define BBT_PAGE_MASK 0xffffff3f |
2507 | /** | 2529 | /** |
2508 | * nand_erase_nand - [Internal] erase block(s) | 2530 | * nand_erase_nand - [INTERN] erase block(s) |
2509 | * @mtd: MTD device structure | 2531 | * @mtd: MTD device structure |
2510 | * @instr: erase instruction | 2532 | * @instr: erase instruction |
2511 | * @allowbbt: allow erasing the bbt area | 2533 | * @allowbbt: allow erasing the bbt area |
2512 | * | 2534 | * |
2513 | * Erase one ore more blocks | 2535 | * Erase one ore more blocks. |
2514 | */ | 2536 | */ |
2515 | int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | 2537 | int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, |
2516 | int allowbbt) | 2538 | int allowbbt) |
@@ -2521,9 +2543,9 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2521 | unsigned int bbt_masked_page = 0xffffffff; | 2543 | unsigned int bbt_masked_page = 0xffffffff; |
2522 | loff_t len; | 2544 | loff_t len; |
2523 | 2545 | ||
2524 | DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", | 2546 | pr_debug("%s: start = 0x%012llx, len = %llu\n", |
2525 | __func__, (unsigned long long)instr->addr, | 2547 | __func__, (unsigned long long)instr->addr, |
2526 | (unsigned long long)instr->len); | 2548 | (unsigned long long)instr->len); |
2527 | 2549 | ||
2528 | if (check_offs_len(mtd, instr->addr, instr->len)) | 2550 | if (check_offs_len(mtd, instr->addr, instr->len)) |
2529 | return -EINVAL; | 2551 | return -EINVAL; |
@@ -2545,8 +2567,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2545 | 2567 | ||
2546 | /* Check, if it is write protected */ | 2568 | /* Check, if it is write protected */ |
2547 | if (nand_check_wp(mtd)) { | 2569 | if (nand_check_wp(mtd)) { |
2548 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", | 2570 | pr_debug("%s: device is write protected!\n", |
2549 | __func__); | 2571 | __func__); |
2550 | instr->state = MTD_ERASE_FAILED; | 2572 | instr->state = MTD_ERASE_FAILED; |
2551 | goto erase_exit; | 2573 | goto erase_exit; |
2552 | } | 2574 | } |
@@ -2555,7 +2577,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2555 | * If BBT requires refresh, set the BBT page mask to see if the BBT | 2577 | * If BBT requires refresh, set the BBT page mask to see if the BBT |
2556 | * should be rewritten. Otherwise the mask is set to 0xffffffff which | 2578 | * should be rewritten. Otherwise the mask is set to 0xffffffff which |
2557 | * can not be matched. This is also done when the bbt is actually | 2579 | * can not be matched. This is also done when the bbt is actually |
2558 | * erased to avoid recusrsive updates | 2580 | * erased to avoid recursive updates. |
2559 | */ | 2581 | */ |
2560 | if (chip->options & BBT_AUTO_REFRESH && !allowbbt) | 2582 | if (chip->options & BBT_AUTO_REFRESH && !allowbbt) |
2561 | bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK; | 2583 | bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK; |
@@ -2566,20 +2588,18 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2566 | instr->state = MTD_ERASING; | 2588 | instr->state = MTD_ERASING; |
2567 | 2589 | ||
2568 | while (len) { | 2590 | while (len) { |
2569 | /* | 2591 | /* Heck if we have a bad block, we do not erase bad blocks! */ |
2570 | * heck if we have a bad block, we do not erase bad blocks ! | ||
2571 | */ | ||
2572 | if (nand_block_checkbad(mtd, ((loff_t) page) << | 2592 | if (nand_block_checkbad(mtd, ((loff_t) page) << |
2573 | chip->page_shift, 0, allowbbt)) { | 2593 | chip->page_shift, 0, allowbbt)) { |
2574 | printk(KERN_WARNING "%s: attempt to erase a bad block " | 2594 | pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", |
2575 | "at page 0x%08x\n", __func__, page); | 2595 | __func__, page); |
2576 | instr->state = MTD_ERASE_FAILED; | 2596 | instr->state = MTD_ERASE_FAILED; |
2577 | goto erase_exit; | 2597 | goto erase_exit; |
2578 | } | 2598 | } |
2579 | 2599 | ||
2580 | /* | 2600 | /* |
2581 | * Invalidate the page cache, if we erase the block which | 2601 | * Invalidate the page cache, if we erase the block which |
2582 | * contains the current cached page | 2602 | * contains the current cached page. |
2583 | */ | 2603 | */ |
2584 | if (page <= chip->pagebuf && chip->pagebuf < | 2604 | if (page <= chip->pagebuf && chip->pagebuf < |
2585 | (page + pages_per_block)) | 2605 | (page + pages_per_block)) |
@@ -2599,8 +2619,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2599 | 2619 | ||
2600 | /* See if block erase succeeded */ | 2620 | /* See if block erase succeeded */ |
2601 | if (status & NAND_STATUS_FAIL) { | 2621 | if (status & NAND_STATUS_FAIL) { |
2602 | DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, " | 2622 | pr_debug("%s: failed erase, page 0x%08x\n", |
2603 | "page 0x%08x\n", __func__, page); | 2623 | __func__, page); |
2604 | instr->state = MTD_ERASE_FAILED; | 2624 | instr->state = MTD_ERASE_FAILED; |
2605 | instr->fail_addr = | 2625 | instr->fail_addr = |
2606 | ((loff_t)page << chip->page_shift); | 2626 | ((loff_t)page << chip->page_shift); |
@@ -2609,7 +2629,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2609 | 2629 | ||
2610 | /* | 2630 | /* |
2611 | * If BBT requires refresh, set the BBT rewrite flag to the | 2631 | * If BBT requires refresh, set the BBT rewrite flag to the |
2612 | * page being erased | 2632 | * page being erased. |
2613 | */ | 2633 | */ |
2614 | if (bbt_masked_page != 0xffffffff && | 2634 | if (bbt_masked_page != 0xffffffff && |
2615 | (page & BBT_PAGE_MASK) == bbt_masked_page) | 2635 | (page & BBT_PAGE_MASK) == bbt_masked_page) |
@@ -2628,7 +2648,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2628 | 2648 | ||
2629 | /* | 2649 | /* |
2630 | * If BBT requires refresh and BBT-PERCHIP, set the BBT | 2650 | * If BBT requires refresh and BBT-PERCHIP, set the BBT |
2631 | * page mask to see if this BBT should be rewritten | 2651 | * page mask to see if this BBT should be rewritten. |
2632 | */ | 2652 | */ |
2633 | if (bbt_masked_page != 0xffffffff && | 2653 | if (bbt_masked_page != 0xffffffff && |
2634 | (chip->bbt_td->options & NAND_BBT_PERCHIP)) | 2654 | (chip->bbt_td->options & NAND_BBT_PERCHIP)) |
@@ -2651,7 +2671,7 @@ erase_exit: | |||
2651 | 2671 | ||
2652 | /* | 2672 | /* |
2653 | * If BBT requires refresh and erase was successful, rewrite any | 2673 | * If BBT requires refresh and erase was successful, rewrite any |
2654 | * selected bad block tables | 2674 | * selected bad block tables. |
2655 | */ | 2675 | */ |
2656 | if (bbt_masked_page == 0xffffffff || ret) | 2676 | if (bbt_masked_page == 0xffffffff || ret) |
2657 | return ret; | 2677 | return ret; |
@@ -2659,10 +2679,10 @@ erase_exit: | |||
2659 | for (chipnr = 0; chipnr < chip->numchips; chipnr++) { | 2679 | for (chipnr = 0; chipnr < chip->numchips; chipnr++) { |
2660 | if (!rewrite_bbt[chipnr]) | 2680 | if (!rewrite_bbt[chipnr]) |
2661 | continue; | 2681 | continue; |
2662 | /* update the BBT for chip */ | 2682 | /* Update the BBT for chip */ |
2663 | DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt " | 2683 | pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n", |
2664 | "(%d:0x%0llx 0x%0x)\n", __func__, chipnr, | 2684 | __func__, chipnr, rewrite_bbt[chipnr], |
2665 | rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]); | 2685 | chip->bbt_td->pages[chipnr]); |
2666 | nand_update_bbt(mtd, rewrite_bbt[chipnr]); | 2686 | nand_update_bbt(mtd, rewrite_bbt[chipnr]); |
2667 | } | 2687 | } |
2668 | 2688 | ||
@@ -2672,15 +2692,15 @@ erase_exit: | |||
2672 | 2692 | ||
2673 | /** | 2693 | /** |
2674 | * nand_sync - [MTD Interface] sync | 2694 | * nand_sync - [MTD Interface] sync |
2675 | * @mtd: MTD device structure | 2695 | * @mtd: MTD device structure |
2676 | * | 2696 | * |
2677 | * Sync is actually a wait for chip ready function | 2697 | * Sync is actually a wait for chip ready function. |
2678 | */ | 2698 | */ |
2679 | static void nand_sync(struct mtd_info *mtd) | 2699 | static void nand_sync(struct mtd_info *mtd) |
2680 | { | 2700 | { |
2681 | struct nand_chip *chip = mtd->priv; | 2701 | struct nand_chip *chip = mtd->priv; |
2682 | 2702 | ||
2683 | DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); | 2703 | pr_debug("%s: called\n", __func__); |
2684 | 2704 | ||
2685 | /* Grab the lock and see if the device is available */ | 2705 | /* Grab the lock and see if the device is available */ |
2686 | nand_get_device(chip, mtd, FL_SYNCING); | 2706 | nand_get_device(chip, mtd, FL_SYNCING); |
@@ -2690,8 +2710,8 @@ static void nand_sync(struct mtd_info *mtd) | |||
2690 | 2710 | ||
2691 | /** | 2711 | /** |
2692 | * nand_block_isbad - [MTD Interface] Check if block at offset is bad | 2712 | * nand_block_isbad - [MTD Interface] Check if block at offset is bad |
2693 | * @mtd: MTD device structure | 2713 | * @mtd: MTD device structure |
2694 | * @offs: offset relative to mtd start | 2714 | * @offs: offset relative to mtd start |
2695 | */ | 2715 | */ |
2696 | static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) | 2716 | static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) |
2697 | { | 2717 | { |
@@ -2704,8 +2724,8 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) | |||
2704 | 2724 | ||
2705 | /** | 2725 | /** |
2706 | * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad | 2726 | * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad |
2707 | * @mtd: MTD device structure | 2727 | * @mtd: MTD device structure |
2708 | * @ofs: offset relative to mtd start | 2728 | * @ofs: offset relative to mtd start |
2709 | */ | 2729 | */ |
2710 | static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) | 2730 | static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) |
2711 | { | 2731 | { |
@@ -2714,7 +2734,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
2714 | 2734 | ||
2715 | ret = nand_block_isbad(mtd, ofs); | 2735 | ret = nand_block_isbad(mtd, ofs); |
2716 | if (ret) { | 2736 | if (ret) { |
2717 | /* If it was bad already, return success and do nothing. */ | 2737 | /* If it was bad already, return success and do nothing */ |
2718 | if (ret > 0) | 2738 | if (ret > 0) |
2719 | return 0; | 2739 | return 0; |
2720 | return ret; | 2740 | return ret; |
@@ -2725,7 +2745,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
2725 | 2745 | ||
2726 | /** | 2746 | /** |
2727 | * nand_suspend - [MTD Interface] Suspend the NAND flash | 2747 | * nand_suspend - [MTD Interface] Suspend the NAND flash |
2728 | * @mtd: MTD device structure | 2748 | * @mtd: MTD device structure |
2729 | */ | 2749 | */ |
2730 | static int nand_suspend(struct mtd_info *mtd) | 2750 | static int nand_suspend(struct mtd_info *mtd) |
2731 | { | 2751 | { |
@@ -2736,7 +2756,7 @@ static int nand_suspend(struct mtd_info *mtd) | |||
2736 | 2756 | ||
2737 | /** | 2757 | /** |
2738 | * nand_resume - [MTD Interface] Resume the NAND flash | 2758 | * nand_resume - [MTD Interface] Resume the NAND flash |
2739 | * @mtd: MTD device structure | 2759 | * @mtd: MTD device structure |
2740 | */ | 2760 | */ |
2741 | static void nand_resume(struct mtd_info *mtd) | 2761 | static void nand_resume(struct mtd_info *mtd) |
2742 | { | 2762 | { |
@@ -2745,13 +2765,11 @@ static void nand_resume(struct mtd_info *mtd) | |||
2745 | if (chip->state == FL_PM_SUSPENDED) | 2765 | if (chip->state == FL_PM_SUSPENDED) |
2746 | nand_release_device(mtd); | 2766 | nand_release_device(mtd); |
2747 | else | 2767 | else |
2748 | printk(KERN_ERR "%s called for a chip which is not " | 2768 | pr_err("%s called for a chip which is not in suspended state\n", |
2749 | "in suspended state\n", __func__); | 2769 | __func__); |
2750 | } | 2770 | } |
2751 | 2771 | ||
2752 | /* | 2772 | /* Set default functions */ |
2753 | * Set default functions | ||
2754 | */ | ||
2755 | static void nand_set_defaults(struct nand_chip *chip, int busw) | 2773 | static void nand_set_defaults(struct nand_chip *chip, int busw) |
2756 | { | 2774 | { |
2757 | /* check for proper chip_delay setup, set 20us if not */ | 2775 | /* check for proper chip_delay setup, set 20us if not */ |
@@ -2793,23 +2811,21 @@ static void nand_set_defaults(struct nand_chip *chip, int busw) | |||
2793 | 2811 | ||
2794 | } | 2812 | } |
2795 | 2813 | ||
2796 | /* | 2814 | /* Sanitize ONFI strings so we can safely print them */ |
2797 | * sanitize ONFI strings so we can safely print them | ||
2798 | */ | ||
2799 | static void sanitize_string(uint8_t *s, size_t len) | 2815 | static void sanitize_string(uint8_t *s, size_t len) |
2800 | { | 2816 | { |
2801 | ssize_t i; | 2817 | ssize_t i; |
2802 | 2818 | ||
2803 | /* null terminate */ | 2819 | /* Null terminate */ |
2804 | s[len - 1] = 0; | 2820 | s[len - 1] = 0; |
2805 | 2821 | ||
2806 | /* remove non printable chars */ | 2822 | /* Remove non printable chars */ |
2807 | for (i = 0; i < len - 1; i++) { | 2823 | for (i = 0; i < len - 1; i++) { |
2808 | if (s[i] < ' ' || s[i] > 127) | 2824 | if (s[i] < ' ' || s[i] > 127) |
2809 | s[i] = '?'; | 2825 | s[i] = '?'; |
2810 | } | 2826 | } |
2811 | 2827 | ||
2812 | /* remove trailing spaces */ | 2828 | /* Remove trailing spaces */ |
2813 | strim(s); | 2829 | strim(s); |
2814 | } | 2830 | } |
2815 | 2831 | ||
@@ -2826,28 +2842,28 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len) | |||
2826 | } | 2842 | } |
2827 | 2843 | ||
2828 | /* | 2844 | /* |
2829 | * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise | 2845 | * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise. |
2830 | */ | 2846 | */ |
2831 | static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, | 2847 | static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, |
2832 | int busw) | 2848 | int *busw) |
2833 | { | 2849 | { |
2834 | struct nand_onfi_params *p = &chip->onfi_params; | 2850 | struct nand_onfi_params *p = &chip->onfi_params; |
2835 | int i; | 2851 | int i; |
2836 | int val; | 2852 | int val; |
2837 | 2853 | ||
2838 | /* try ONFI for unknow chip or LP */ | 2854 | /* Try ONFI for unknown chip or LP */ |
2839 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); | 2855 | chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); |
2840 | if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || | 2856 | if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || |
2841 | chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') | 2857 | chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') |
2842 | return 0; | 2858 | return 0; |
2843 | 2859 | ||
2844 | printk(KERN_INFO "ONFI flash detected\n"); | 2860 | pr_info("ONFI flash detected\n"); |
2845 | chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); | 2861 | chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); |
2846 | for (i = 0; i < 3; i++) { | 2862 | for (i = 0; i < 3; i++) { |
2847 | chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); | 2863 | chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); |
2848 | if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) == | 2864 | if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) == |
2849 | le16_to_cpu(p->crc)) { | 2865 | le16_to_cpu(p->crc)) { |
2850 | printk(KERN_INFO "ONFI param page %d valid\n", i); | 2866 | pr_info("ONFI param page %d valid\n", i); |
2851 | break; | 2867 | break; |
2852 | } | 2868 | } |
2853 | } | 2869 | } |
@@ -2855,7 +2871,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, | |||
2855 | if (i == 3) | 2871 | if (i == 3) |
2856 | return 0; | 2872 | return 0; |
2857 | 2873 | ||
2858 | /* check version */ | 2874 | /* Check version */ |
2859 | val = le16_to_cpu(p->revision); | 2875 | val = le16_to_cpu(p->revision); |
2860 | if (val & (1 << 5)) | 2876 | if (val & (1 << 5)) |
2861 | chip->onfi_version = 23; | 2877 | chip->onfi_version = 23; |
@@ -2871,8 +2887,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, | |||
2871 | chip->onfi_version = 0; | 2887 | chip->onfi_version = 0; |
2872 | 2888 | ||
2873 | if (!chip->onfi_version) { | 2889 | if (!chip->onfi_version) { |
2874 | printk(KERN_INFO "%s: unsupported ONFI version: %d\n", | 2890 | pr_info("%s: unsupported ONFI version: %d\n", __func__, val); |
2875 | __func__, val); | ||
2876 | return 0; | 2891 | return 0; |
2877 | } | 2892 | } |
2878 | 2893 | ||
@@ -2884,9 +2899,9 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, | |||
2884 | mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; | 2899 | mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; |
2885 | mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); | 2900 | mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); |
2886 | chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize; | 2901 | chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize; |
2887 | busw = 0; | 2902 | *busw = 0; |
2888 | if (le16_to_cpu(p->features) & 1) | 2903 | if (le16_to_cpu(p->features) & 1) |
2889 | busw = NAND_BUSWIDTH_16; | 2904 | *busw = NAND_BUSWIDTH_16; |
2890 | 2905 | ||
2891 | chip->options &= ~NAND_CHIPOPTIONS_MSK; | 2906 | chip->options &= ~NAND_CHIPOPTIONS_MSK; |
2892 | chip->options |= (NAND_NO_READRDY | | 2907 | chip->options |= (NAND_NO_READRDY | |
@@ -2896,7 +2911,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, | |||
2896 | } | 2911 | } |
2897 | 2912 | ||
2898 | /* | 2913 | /* |
2899 | * Get the flash and manufacturer id and lookup if the type is supported | 2914 | * Get the flash and manufacturer id and lookup if the type is supported. |
2900 | */ | 2915 | */ |
2901 | static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | 2916 | static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, |
2902 | struct nand_chip *chip, | 2917 | struct nand_chip *chip, |
@@ -2913,7 +2928,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2913 | 2928 | ||
2914 | /* | 2929 | /* |
2915 | * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) | 2930 | * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) |
2916 | * after power-up | 2931 | * after power-up. |
2917 | */ | 2932 | */ |
2918 | chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); | 2933 | chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); |
2919 | 2934 | ||
@@ -2924,7 +2939,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2924 | *maf_id = chip->read_byte(mtd); | 2939 | *maf_id = chip->read_byte(mtd); |
2925 | *dev_id = chip->read_byte(mtd); | 2940 | *dev_id = chip->read_byte(mtd); |
2926 | 2941 | ||
2927 | /* Try again to make sure, as some systems the bus-hold or other | 2942 | /* |
2943 | * Try again to make sure, as some systems the bus-hold or other | ||
2928 | * interface concerns can cause random data which looks like a | 2944 | * interface concerns can cause random data which looks like a |
2929 | * possibly credible NAND flash to appear. If the two results do | 2945 | * possibly credible NAND flash to appear. If the two results do |
2930 | * not match, ignore the device completely. | 2946 | * not match, ignore the device completely. |
@@ -2936,9 +2952,9 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2936 | id_data[i] = chip->read_byte(mtd); | 2952 | id_data[i] = chip->read_byte(mtd); |
2937 | 2953 | ||
2938 | if (id_data[0] != *maf_id || id_data[1] != *dev_id) { | 2954 | if (id_data[0] != *maf_id || id_data[1] != *dev_id) { |
2939 | printk(KERN_INFO "%s: second ID read did not match " | 2955 | pr_info("%s: second ID read did not match " |
2940 | "%02x,%02x against %02x,%02x\n", __func__, | 2956 | "%02x,%02x against %02x,%02x\n", __func__, |
2941 | *maf_id, *dev_id, id_data[0], id_data[1]); | 2957 | *maf_id, *dev_id, id_data[0], id_data[1]); |
2942 | return ERR_PTR(-ENODEV); | 2958 | return ERR_PTR(-ENODEV); |
2943 | } | 2959 | } |
2944 | 2960 | ||
@@ -2952,7 +2968,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2952 | chip->onfi_version = 0; | 2968 | chip->onfi_version = 0; |
2953 | if (!type->name || !type->pagesize) { | 2969 | if (!type->name || !type->pagesize) { |
2954 | /* Check is chip is ONFI compliant */ | 2970 | /* Check is chip is ONFI compliant */ |
2955 | ret = nand_flash_detect_onfi(mtd, chip, busw); | 2971 | ret = nand_flash_detect_onfi(mtd, chip, &busw); |
2956 | if (ret) | 2972 | if (ret) |
2957 | goto ident_done; | 2973 | goto ident_done; |
2958 | } | 2974 | } |
@@ -2973,7 +2989,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
2973 | chip->chipsize = (uint64_t)type->chipsize << 20; | 2989 | chip->chipsize = (uint64_t)type->chipsize << 20; |
2974 | 2990 | ||
2975 | if (!type->pagesize && chip->init_size) { | 2991 | if (!type->pagesize && chip->init_size) { |
2976 | /* set the pagesize, oobsize, erasesize by the driver*/ | 2992 | /* Set the pagesize, oobsize, erasesize by the driver */ |
2977 | busw = chip->init_size(mtd, chip, id_data); | 2993 | busw = chip->init_size(mtd, chip, id_data); |
2978 | } else if (!type->pagesize) { | 2994 | } else if (!type->pagesize) { |
2979 | int extid; | 2995 | int extid; |
@@ -3033,7 +3049,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
3033 | } | 3049 | } |
3034 | } else { | 3050 | } else { |
3035 | /* | 3051 | /* |
3036 | * Old devices have chip data hardcoded in the device id table | 3052 | * Old devices have chip data hardcoded in the device id table. |
3037 | */ | 3053 | */ |
3038 | mtd->erasesize = type->erasesize; | 3054 | mtd->erasesize = type->erasesize; |
3039 | mtd->writesize = type->pagesize; | 3055 | mtd->writesize = type->pagesize; |
@@ -3043,7 +3059,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
3043 | /* | 3059 | /* |
3044 | * Check for Spansion/AMD ID + repeating 5th, 6th byte since | 3060 | * Check for Spansion/AMD ID + repeating 5th, 6th byte since |
3045 | * some Spansion chips have erasesize that conflicts with size | 3061 | * some Spansion chips have erasesize that conflicts with size |
3046 | * listed in nand_ids table | 3062 | * listed in nand_ids table. |
3047 | * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) | 3063 | * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) |
3048 | */ | 3064 | */ |
3049 | if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && | 3065 | if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && |
@@ -3057,15 +3073,16 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, | |||
3057 | chip->options &= ~NAND_CHIPOPTIONS_MSK; | 3073 | chip->options &= ~NAND_CHIPOPTIONS_MSK; |
3058 | chip->options |= type->options & NAND_CHIPOPTIONS_MSK; | 3074 | chip->options |= type->options & NAND_CHIPOPTIONS_MSK; |
3059 | 3075 | ||
3060 | /* Check if chip is a not a samsung device. Do not clear the | 3076 | /* |
3061 | * options for chips which are not having an extended id. | 3077 | * Check if chip is not a Samsung device. Do not clear the |
3078 | * options for chips which do not have an extended id. | ||
3062 | */ | 3079 | */ |
3063 | if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) | 3080 | if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) |
3064 | chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; | 3081 | chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; |
3065 | ident_done: | 3082 | ident_done: |
3066 | 3083 | ||
3067 | /* | 3084 | /* |
3068 | * Set chip as a default. Board drivers can override it, if necessary | 3085 | * Set chip as a default. Board drivers can override it, if necessary. |
3069 | */ | 3086 | */ |
3070 | chip->options |= NAND_NO_AUTOINCR; | 3087 | chip->options |= NAND_NO_AUTOINCR; |
3071 | 3088 | ||
@@ -3077,21 +3094,21 @@ ident_done: | |||
3077 | 3094 | ||
3078 | /* | 3095 | /* |
3079 | * Check, if buswidth is correct. Hardware drivers should set | 3096 | * Check, if buswidth is correct. Hardware drivers should set |
3080 | * chip correct ! | 3097 | * chip correct! |
3081 | */ | 3098 | */ |
3082 | if (busw != (chip->options & NAND_BUSWIDTH_16)) { | 3099 | if (busw != (chip->options & NAND_BUSWIDTH_16)) { |
3083 | printk(KERN_INFO "NAND device: Manufacturer ID:" | 3100 | pr_info("NAND device: Manufacturer ID:" |
3084 | " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, | 3101 | " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, |
3085 | *dev_id, nand_manuf_ids[maf_idx].name, mtd->name); | 3102 | *dev_id, nand_manuf_ids[maf_idx].name, mtd->name); |
3086 | printk(KERN_WARNING "NAND bus width %d instead %d bit\n", | 3103 | pr_warn("NAND bus width %d instead %d bit\n", |
3087 | (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, | 3104 | (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, |
3088 | busw ? 16 : 8); | 3105 | busw ? 16 : 8); |
3089 | return ERR_PTR(-EINVAL); | 3106 | return ERR_PTR(-EINVAL); |
3090 | } | 3107 | } |
3091 | 3108 | ||
3092 | /* Calculate the address shift from the page size */ | 3109 | /* Calculate the address shift from the page size */ |
3093 | chip->page_shift = ffs(mtd->writesize) - 1; | 3110 | chip->page_shift = ffs(mtd->writesize) - 1; |
3094 | /* Convert chipsize to number of pages per chip -1. */ | 3111 | /* Convert chipsize to number of pages per chip -1 */ |
3095 | chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; | 3112 | chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; |
3096 | 3113 | ||
3097 | chip->bbt_erase_shift = chip->phys_erase_shift = | 3114 | chip->bbt_erase_shift = chip->phys_erase_shift = |
@@ -3121,7 +3138,7 @@ ident_done: | |||
3121 | if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && | 3138 | if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && |
3122 | (*maf_id == NAND_MFR_SAMSUNG || | 3139 | (*maf_id == NAND_MFR_SAMSUNG || |
3123 | *maf_id == NAND_MFR_HYNIX)) | 3140 | *maf_id == NAND_MFR_HYNIX)) |
3124 | chip->options |= NAND_BBT_SCANLASTPAGE; | 3141 | chip->bbt_options |= NAND_BBT_SCANLASTPAGE; |
3125 | else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && | 3142 | else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && |
3126 | (*maf_id == NAND_MFR_SAMSUNG || | 3143 | (*maf_id == NAND_MFR_SAMSUNG || |
3127 | *maf_id == NAND_MFR_HYNIX || | 3144 | *maf_id == NAND_MFR_HYNIX || |
@@ -3129,17 +3146,7 @@ ident_done: | |||
3129 | *maf_id == NAND_MFR_AMD)) || | 3146 | *maf_id == NAND_MFR_AMD)) || |
3130 | (mtd->writesize == 2048 && | 3147 | (mtd->writesize == 2048 && |
3131 | *maf_id == NAND_MFR_MICRON)) | 3148 | *maf_id == NAND_MFR_MICRON)) |
3132 | chip->options |= NAND_BBT_SCAN2NDPAGE; | 3149 | chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; |
3133 | |||
3134 | /* | ||
3135 | * Numonyx/ST 2K pages, x8 bus use BOTH byte 1 and 6 | ||
3136 | */ | ||
3137 | if (!(busw & NAND_BUSWIDTH_16) && | ||
3138 | *maf_id == NAND_MFR_STMICRO && | ||
3139 | mtd->writesize == 2048) { | ||
3140 | chip->options |= NAND_BBT_SCANBYTE1AND6; | ||
3141 | chip->badblockpos = 0; | ||
3142 | } | ||
3143 | 3150 | ||
3144 | /* Check for AND chips with 4 page planes */ | 3151 | /* Check for AND chips with 4 page planes */ |
3145 | if (chip->options & NAND_4PAGE_ARRAY) | 3152 | if (chip->options & NAND_4PAGE_ARRAY) |
@@ -3147,12 +3154,11 @@ ident_done: | |||
3147 | else | 3154 | else |
3148 | chip->erase_cmd = single_erase_cmd; | 3155 | chip->erase_cmd = single_erase_cmd; |
3149 | 3156 | ||
3150 | /* Do not replace user supplied command function ! */ | 3157 | /* Do not replace user supplied command function! */ |
3151 | if (mtd->writesize > 512 && chip->cmdfunc == nand_command) | 3158 | if (mtd->writesize > 512 && chip->cmdfunc == nand_command) |
3152 | chip->cmdfunc = nand_command_lp; | 3159 | chip->cmdfunc = nand_command_lp; |
3153 | 3160 | ||
3154 | /* TODO onfi flash name */ | 3161 | pr_info("NAND device: Manufacturer ID:" |
3155 | printk(KERN_INFO "NAND device: Manufacturer ID:" | ||
3156 | " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, | 3162 | " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, |
3157 | nand_manuf_ids[maf_idx].name, | 3163 | nand_manuf_ids[maf_idx].name, |
3158 | chip->onfi_version ? chip->onfi_params.model : type->name); | 3164 | chip->onfi_version ? chip->onfi_params.model : type->name); |
@@ -3162,12 +3168,12 @@ ident_done: | |||
3162 | 3168 | ||
3163 | /** | 3169 | /** |
3164 | * nand_scan_ident - [NAND Interface] Scan for the NAND device | 3170 | * nand_scan_ident - [NAND Interface] Scan for the NAND device |
3165 | * @mtd: MTD device structure | 3171 | * @mtd: MTD device structure |
3166 | * @maxchips: Number of chips to scan for | 3172 | * @maxchips: number of chips to scan for |
3167 | * @table: Alternative NAND ID table | 3173 | * @table: alternative NAND ID table |
3168 | * | 3174 | * |
3169 | * This is the first phase of the normal nand_scan() function. It | 3175 | * This is the first phase of the normal nand_scan() function. It reads the |
3170 | * reads the flash ID and sets up MTD fields accordingly. | 3176 | * flash ID and sets up MTD fields accordingly. |
3171 | * | 3177 | * |
3172 | * The mtd->owner field must be set to the module of the caller. | 3178 | * The mtd->owner field must be set to the module of the caller. |
3173 | */ | 3179 | */ |
@@ -3189,7 +3195,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, | |||
3189 | 3195 | ||
3190 | if (IS_ERR(type)) { | 3196 | if (IS_ERR(type)) { |
3191 | if (!(chip->options & NAND_SCAN_SILENT_NODEV)) | 3197 | if (!(chip->options & NAND_SCAN_SILENT_NODEV)) |
3192 | printk(KERN_WARNING "No NAND device found.\n"); | 3198 | pr_warn("No NAND device found\n"); |
3193 | chip->select_chip(mtd, -1); | 3199 | chip->select_chip(mtd, -1); |
3194 | return PTR_ERR(type); | 3200 | return PTR_ERR(type); |
3195 | } | 3201 | } |
@@ -3207,7 +3213,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, | |||
3207 | break; | 3213 | break; |
3208 | } | 3214 | } |
3209 | if (i > 1) | 3215 | if (i > 1) |
3210 | printk(KERN_INFO "%d NAND chips detected\n", i); | 3216 | pr_info("%d NAND chips detected\n", i); |
3211 | 3217 | ||
3212 | /* Store the number of chips and calc total size for mtd */ | 3218 | /* Store the number of chips and calc total size for mtd */ |
3213 | chip->numchips = i; | 3219 | chip->numchips = i; |
@@ -3220,11 +3226,11 @@ EXPORT_SYMBOL(nand_scan_ident); | |||
3220 | 3226 | ||
3221 | /** | 3227 | /** |
3222 | * nand_scan_tail - [NAND Interface] Scan for the NAND device | 3228 | * nand_scan_tail - [NAND Interface] Scan for the NAND device |
3223 | * @mtd: MTD device structure | 3229 | * @mtd: MTD device structure |
3224 | * | 3230 | * |
3225 | * This is the second phase of the normal nand_scan() function. It | 3231 | * This is the second phase of the normal nand_scan() function. It fills out |
3226 | * fills out all the uninitialized function pointers with the defaults | 3232 | * all the uninitialized function pointers with the defaults and scans for a |
3227 | * and scans for a bad block table if appropriate. | 3233 | * bad block table if appropriate. |
3228 | */ | 3234 | */ |
3229 | int nand_scan_tail(struct mtd_info *mtd) | 3235 | int nand_scan_tail(struct mtd_info *mtd) |
3230 | { | 3236 | { |
@@ -3240,7 +3246,7 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3240 | chip->oob_poi = chip->buffers->databuf + mtd->writesize; | 3246 | chip->oob_poi = chip->buffers->databuf + mtd->writesize; |
3241 | 3247 | ||
3242 | /* | 3248 | /* |
3243 | * If no default placement scheme is given, select an appropriate one | 3249 | * If no default placement scheme is given, select an appropriate one. |
3244 | */ | 3250 | */ |
3245 | if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { | 3251 | if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { |
3246 | switch (mtd->oobsize) { | 3252 | switch (mtd->oobsize) { |
@@ -3257,8 +3263,8 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3257 | chip->ecc.layout = &nand_oob_128; | 3263 | chip->ecc.layout = &nand_oob_128; |
3258 | break; | 3264 | break; |
3259 | default: | 3265 | default: |
3260 | printk(KERN_WARNING "No oob scheme defined for " | 3266 | pr_warn("No oob scheme defined for oobsize %d\n", |
3261 | "oobsize %d\n", mtd->oobsize); | 3267 | mtd->oobsize); |
3262 | BUG(); | 3268 | BUG(); |
3263 | } | 3269 | } |
3264 | } | 3270 | } |
@@ -3267,7 +3273,7 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3267 | chip->write_page = nand_write_page; | 3273 | chip->write_page = nand_write_page; |
3268 | 3274 | ||
3269 | /* | 3275 | /* |
3270 | * check ECC mode, default to software if 3byte/512byte hardware ECC is | 3276 | * Check ECC mode, default to software if 3byte/512byte hardware ECC is |
3271 | * selected and we have 256 byte pagesize fallback to software ECC | 3277 | * selected and we have 256 byte pagesize fallback to software ECC |
3272 | */ | 3278 | */ |
3273 | 3279 | ||
@@ -3276,15 +3282,15 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3276 | /* Similar to NAND_ECC_HW, but a separate read_page handle */ | 3282 | /* Similar to NAND_ECC_HW, but a separate read_page handle */ |
3277 | if (!chip->ecc.calculate || !chip->ecc.correct || | 3283 | if (!chip->ecc.calculate || !chip->ecc.correct || |
3278 | !chip->ecc.hwctl) { | 3284 | !chip->ecc.hwctl) { |
3279 | printk(KERN_WARNING "No ECC functions supplied; " | 3285 | pr_warn("No ECC functions supplied; " |
3280 | "Hardware ECC not possible\n"); | 3286 | "hardware ECC not possible\n"); |
3281 | BUG(); | 3287 | BUG(); |
3282 | } | 3288 | } |
3283 | if (!chip->ecc.read_page) | 3289 | if (!chip->ecc.read_page) |
3284 | chip->ecc.read_page = nand_read_page_hwecc_oob_first; | 3290 | chip->ecc.read_page = nand_read_page_hwecc_oob_first; |
3285 | 3291 | ||
3286 | case NAND_ECC_HW: | 3292 | case NAND_ECC_HW: |
3287 | /* Use standard hwecc read page function ? */ | 3293 | /* Use standard hwecc read page function? */ |
3288 | if (!chip->ecc.read_page) | 3294 | if (!chip->ecc.read_page) |
3289 | chip->ecc.read_page = nand_read_page_hwecc; | 3295 | chip->ecc.read_page = nand_read_page_hwecc; |
3290 | if (!chip->ecc.write_page) | 3296 | if (!chip->ecc.write_page) |
@@ -3305,11 +3311,11 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3305 | chip->ecc.read_page == nand_read_page_hwecc || | 3311 | chip->ecc.read_page == nand_read_page_hwecc || |
3306 | !chip->ecc.write_page || | 3312 | !chip->ecc.write_page || |
3307 | chip->ecc.write_page == nand_write_page_hwecc)) { | 3313 | chip->ecc.write_page == nand_write_page_hwecc)) { |
3308 | printk(KERN_WARNING "No ECC functions supplied; " | 3314 | pr_warn("No ECC functions supplied; " |
3309 | "Hardware ECC not possible\n"); | 3315 | "hardware ECC not possible\n"); |
3310 | BUG(); | 3316 | BUG(); |
3311 | } | 3317 | } |
3312 | /* Use standard syndrome read/write page function ? */ | 3318 | /* Use standard syndrome read/write page function? */ |
3313 | if (!chip->ecc.read_page) | 3319 | if (!chip->ecc.read_page) |
3314 | chip->ecc.read_page = nand_read_page_syndrome; | 3320 | chip->ecc.read_page = nand_read_page_syndrome; |
3315 | if (!chip->ecc.write_page) | 3321 | if (!chip->ecc.write_page) |
@@ -3325,9 +3331,9 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3325 | 3331 | ||
3326 | if (mtd->writesize >= chip->ecc.size) | 3332 | if (mtd->writesize >= chip->ecc.size) |
3327 | break; | 3333 | break; |
3328 | printk(KERN_WARNING "%d byte HW ECC not possible on " | 3334 | pr_warn("%d byte HW ECC not possible on " |
3329 | "%d byte page size, fallback to SW ECC\n", | 3335 | "%d byte page size, fallback to SW ECC\n", |
3330 | chip->ecc.size, mtd->writesize); | 3336 | chip->ecc.size, mtd->writesize); |
3331 | chip->ecc.mode = NAND_ECC_SOFT; | 3337 | chip->ecc.mode = NAND_ECC_SOFT; |
3332 | 3338 | ||
3333 | case NAND_ECC_SOFT: | 3339 | case NAND_ECC_SOFT: |
@@ -3347,7 +3353,7 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3347 | 3353 | ||
3348 | case NAND_ECC_SOFT_BCH: | 3354 | case NAND_ECC_SOFT_BCH: |
3349 | if (!mtd_nand_has_bch()) { | 3355 | if (!mtd_nand_has_bch()) { |
3350 | printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n"); | 3356 | pr_warn("CONFIG_MTD_ECC_BCH not enabled\n"); |
3351 | BUG(); | 3357 | BUG(); |
3352 | } | 3358 | } |
3353 | chip->ecc.calculate = nand_bch_calculate_ecc; | 3359 | chip->ecc.calculate = nand_bch_calculate_ecc; |
@@ -3362,8 +3368,8 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3362 | /* | 3368 | /* |
3363 | * Board driver should supply ecc.size and ecc.bytes values to | 3369 | * Board driver should supply ecc.size and ecc.bytes values to |
3364 | * select how many bits are correctable; see nand_bch_init() | 3370 | * select how many bits are correctable; see nand_bch_init() |
3365 | * for details. | 3371 | * for details. Otherwise, default to 4 bits for large page |
3366 | * Otherwise, default to 4 bits for large page devices | 3372 | * devices. |
3367 | */ | 3373 | */ |
3368 | if (!chip->ecc.size && (mtd->oobsize >= 64)) { | 3374 | if (!chip->ecc.size && (mtd->oobsize >= 64)) { |
3369 | chip->ecc.size = 512; | 3375 | chip->ecc.size = 512; |
@@ -3374,14 +3380,14 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3374 | chip->ecc.bytes, | 3380 | chip->ecc.bytes, |
3375 | &chip->ecc.layout); | 3381 | &chip->ecc.layout); |
3376 | if (!chip->ecc.priv) { | 3382 | if (!chip->ecc.priv) { |
3377 | printk(KERN_WARNING "BCH ECC initialization failed!\n"); | 3383 | pr_warn("BCH ECC initialization failed!\n"); |
3378 | BUG(); | 3384 | BUG(); |
3379 | } | 3385 | } |
3380 | break; | 3386 | break; |
3381 | 3387 | ||
3382 | case NAND_ECC_NONE: | 3388 | case NAND_ECC_NONE: |
3383 | printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. " | 3389 | pr_warn("NAND_ECC_NONE selected by board driver. " |
3384 | "This is not recommended !!\n"); | 3390 | "This is not recommended!\n"); |
3385 | chip->ecc.read_page = nand_read_page_raw; | 3391 | chip->ecc.read_page = nand_read_page_raw; |
3386 | chip->ecc.write_page = nand_write_page_raw; | 3392 | chip->ecc.write_page = nand_write_page_raw; |
3387 | chip->ecc.read_oob = nand_read_oob_std; | 3393 | chip->ecc.read_oob = nand_read_oob_std; |
@@ -3393,14 +3399,19 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3393 | break; | 3399 | break; |
3394 | 3400 | ||
3395 | default: | 3401 | default: |
3396 | printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n", | 3402 | pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode); |
3397 | chip->ecc.mode); | ||
3398 | BUG(); | 3403 | BUG(); |
3399 | } | 3404 | } |
3400 | 3405 | ||
3406 | /* For many systems, the standard OOB write also works for raw */ | ||
3407 | if (!chip->ecc.read_oob_raw) | ||
3408 | chip->ecc.read_oob_raw = chip->ecc.read_oob; | ||
3409 | if (!chip->ecc.write_oob_raw) | ||
3410 | chip->ecc.write_oob_raw = chip->ecc.write_oob; | ||
3411 | |||
3401 | /* | 3412 | /* |
3402 | * The number of bytes available for a client to place data into | 3413 | * The number of bytes available for a client to place data into |
3403 | * the out of band area | 3414 | * the out of band area. |
3404 | */ | 3415 | */ |
3405 | chip->ecc.layout->oobavail = 0; | 3416 | chip->ecc.layout->oobavail = 0; |
3406 | for (i = 0; chip->ecc.layout->oobfree[i].length | 3417 | for (i = 0; chip->ecc.layout->oobfree[i].length |
@@ -3411,19 +3422,16 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3411 | 3422 | ||
3412 | /* | 3423 | /* |
3413 | * Set the number of read / write steps for one page depending on ECC | 3424 | * Set the number of read / write steps for one page depending on ECC |
3414 | * mode | 3425 | * mode. |
3415 | */ | 3426 | */ |
3416 | chip->ecc.steps = mtd->writesize / chip->ecc.size; | 3427 | chip->ecc.steps = mtd->writesize / chip->ecc.size; |
3417 | if (chip->ecc.steps * chip->ecc.size != mtd->writesize) { | 3428 | if (chip->ecc.steps * chip->ecc.size != mtd->writesize) { |
3418 | printk(KERN_WARNING "Invalid ecc parameters\n"); | 3429 | pr_warn("Invalid ECC parameters\n"); |
3419 | BUG(); | 3430 | BUG(); |
3420 | } | 3431 | } |
3421 | chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; | 3432 | chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; |
3422 | 3433 | ||
3423 | /* | 3434 | /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ |
3424 | * Allow subpage writes up to ecc.steps. Not possible for MLC | ||
3425 | * FLASH. | ||
3426 | */ | ||
3427 | if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && | 3435 | if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && |
3428 | !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { | 3436 | !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { |
3429 | switch (chip->ecc.steps) { | 3437 | switch (chip->ecc.steps) { |
@@ -3481,9 +3489,11 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3481 | } | 3489 | } |
3482 | EXPORT_SYMBOL(nand_scan_tail); | 3490 | EXPORT_SYMBOL(nand_scan_tail); |
3483 | 3491 | ||
3484 | /* is_module_text_address() isn't exported, and it's mostly a pointless | 3492 | /* |
3493 | * is_module_text_address() isn't exported, and it's mostly a pointless | ||
3485 | * test if this is a module _anyway_ -- they'd have to try _really_ hard | 3494 | * test if this is a module _anyway_ -- they'd have to try _really_ hard |
3486 | * to call us from in-kernel code if the core NAND support is modular. */ | 3495 | * to call us from in-kernel code if the core NAND support is modular. |
3496 | */ | ||
3487 | #ifdef MODULE | 3497 | #ifdef MODULE |
3488 | #define caller_is_module() (1) | 3498 | #define caller_is_module() (1) |
3489 | #else | 3499 | #else |
@@ -3493,15 +3503,13 @@ EXPORT_SYMBOL(nand_scan_tail); | |||
3493 | 3503 | ||
3494 | /** | 3504 | /** |
3495 | * nand_scan - [NAND Interface] Scan for the NAND device | 3505 | * nand_scan - [NAND Interface] Scan for the NAND device |
3496 | * @mtd: MTD device structure | 3506 | * @mtd: MTD device structure |
3497 | * @maxchips: Number of chips to scan for | 3507 | * @maxchips: number of chips to scan for |
3498 | * | ||
3499 | * This fills out all the uninitialized function pointers | ||
3500 | * with the defaults. | ||
3501 | * The flash ID is read and the mtd/chip structures are | ||
3502 | * filled with the appropriate values. | ||
3503 | * The mtd->owner field must be set to the module of the caller | ||
3504 | * | 3508 | * |
3509 | * This fills out all the uninitialized function pointers with the defaults. | ||
3510 | * The flash ID is read and the mtd/chip structures are filled with the | ||
3511 | * appropriate values. The mtd->owner field must be set to the module of the | ||
3512 | * caller. | ||
3505 | */ | 3513 | */ |
3506 | int nand_scan(struct mtd_info *mtd, int maxchips) | 3514 | int nand_scan(struct mtd_info *mtd, int maxchips) |
3507 | { | 3515 | { |
@@ -3509,8 +3517,7 @@ int nand_scan(struct mtd_info *mtd, int maxchips) | |||
3509 | 3517 | ||
3510 | /* Many callers got this wrong, so check for it for a while... */ | 3518 | /* Many callers got this wrong, so check for it for a while... */ |
3511 | if (!mtd->owner && caller_is_module()) { | 3519 | if (!mtd->owner && caller_is_module()) { |
3512 | printk(KERN_CRIT "%s called with NULL mtd->owner!\n", | 3520 | pr_crit("%s called with NULL mtd->owner!\n", __func__); |
3513 | __func__); | ||
3514 | BUG(); | 3521 | BUG(); |
3515 | } | 3522 | } |
3516 | 3523 | ||
@@ -3523,8 +3530,8 @@ EXPORT_SYMBOL(nand_scan); | |||
3523 | 3530 | ||
3524 | /** | 3531 | /** |
3525 | * nand_release - [NAND Interface] Free resources held by the NAND device | 3532 | * nand_release - [NAND Interface] Free resources held by the NAND device |
3526 | * @mtd: MTD device structure | 3533 | * @mtd: MTD device structure |
3527 | */ | 3534 | */ |
3528 | void nand_release(struct mtd_info *mtd) | 3535 | void nand_release(struct mtd_info *mtd) |
3529 | { | 3536 | { |
3530 | struct nand_chip *chip = mtd->priv; | 3537 | struct nand_chip *chip = mtd->priv; |
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 4165857752ca..69148ae3bf58 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c | |||
@@ -14,7 +14,7 @@ | |||
14 | * | 14 | * |
15 | * When nand_scan_bbt is called, then it tries to find the bad block table | 15 | * When nand_scan_bbt is called, then it tries to find the bad block table |
16 | * depending on the options in the BBT descriptor(s). If no flash based BBT | 16 | * depending on the options in the BBT descriptor(s). If no flash based BBT |
17 | * (NAND_USE_FLASH_BBT) is specified then the device is scanned for factory | 17 | * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory |
18 | * marked good / bad blocks. This information is used to create a memory BBT. | 18 | * marked good / bad blocks. This information is used to create a memory BBT. |
19 | * Once a new bad block is discovered then the "factory" information is updated | 19 | * Once a new bad block is discovered then the "factory" information is updated |
20 | * on the device. | 20 | * on the device. |
@@ -36,9 +36,9 @@ | |||
36 | * The table is marked in the OOB area with an ident pattern and a version | 36 | * The table is marked in the OOB area with an ident pattern and a version |
37 | * number which indicates which of both tables is more up to date. If the NAND | 37 | * number which indicates which of both tables is more up to date. If the NAND |
38 | * controller needs the complete OOB area for the ECC information then the | 38 | * controller needs the complete OOB area for the ECC information then the |
39 | * option NAND_USE_FLASH_BBT_NO_OOB should be used: it moves the ident pattern | 39 | * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of |
40 | * and the version byte into the data area and the OOB area will remain | 40 | * course): it moves the ident pattern and the version byte into the data area |
41 | * untouched. | 41 | * and the OOB area will remain untouched. |
42 | * | 42 | * |
43 | * The table uses 2 bits per block | 43 | * The table uses 2 bits per block |
44 | * 11b: block is good | 44 | * 11b: block is good |
@@ -81,17 +81,15 @@ static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td) | |||
81 | 81 | ||
82 | /** | 82 | /** |
83 | * check_pattern - [GENERIC] check if a pattern is in the buffer | 83 | * check_pattern - [GENERIC] check if a pattern is in the buffer |
84 | * @buf: the buffer to search | 84 | * @buf: the buffer to search |
85 | * @len: the length of buffer to search | 85 | * @len: the length of buffer to search |
86 | * @paglen: the pagelength | 86 | * @paglen: the pagelength |
87 | * @td: search pattern descriptor | 87 | * @td: search pattern descriptor |
88 | * | 88 | * |
89 | * Check for a pattern at the given place. Used to search bad block | 89 | * Check for a pattern at the given place. Used to search bad block tables and |
90 | * tables and good / bad block identifiers. | 90 | * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if |
91 | * If the SCAN_EMPTY option is set then check, if all bytes except the | 91 | * all bytes except the pattern area contain 0xff. |
92 | * pattern area contain 0xff | 92 | */ |
93 | * | ||
94 | */ | ||
95 | static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) | 93 | static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) |
96 | { | 94 | { |
97 | int i, end = 0; | 95 | int i, end = 0; |
@@ -110,32 +108,8 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc | |||
110 | p += end; | 108 | p += end; |
111 | 109 | ||
112 | /* Compare the pattern */ | 110 | /* Compare the pattern */ |
113 | for (i = 0; i < td->len; i++) { | 111 | if (memcmp(p, td->pattern, td->len)) |
114 | if (p[i] != td->pattern[i]) | 112 | return -1; |
115 | return -1; | ||
116 | } | ||
117 | |||
118 | /* Check both positions 1 and 6 for pattern? */ | ||
119 | if (td->options & NAND_BBT_SCANBYTE1AND6) { | ||
120 | if (td->options & NAND_BBT_SCANEMPTY) { | ||
121 | p += td->len; | ||
122 | end += NAND_SMALL_BADBLOCK_POS - td->offs; | ||
123 | /* Check region between positions 1 and 6 */ | ||
124 | for (i = 0; i < NAND_SMALL_BADBLOCK_POS - td->offs - td->len; | ||
125 | i++) { | ||
126 | if (*p++ != 0xff) | ||
127 | return -1; | ||
128 | } | ||
129 | } | ||
130 | else { | ||
131 | p += NAND_SMALL_BADBLOCK_POS - td->offs; | ||
132 | } | ||
133 | /* Compare the pattern */ | ||
134 | for (i = 0; i < td->len; i++) { | ||
135 | if (p[i] != td->pattern[i]) | ||
136 | return -1; | ||
137 | } | ||
138 | } | ||
139 | 113 | ||
140 | if (td->options & NAND_BBT_SCANEMPTY) { | 114 | if (td->options & NAND_BBT_SCANEMPTY) { |
141 | p += td->len; | 115 | p += td->len; |
@@ -150,14 +124,13 @@ static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_desc | |||
150 | 124 | ||
151 | /** | 125 | /** |
152 | * check_short_pattern - [GENERIC] check if a pattern is in the buffer | 126 | * check_short_pattern - [GENERIC] check if a pattern is in the buffer |
153 | * @buf: the buffer to search | 127 | * @buf: the buffer to search |
154 | * @td: search pattern descriptor | 128 | * @td: search pattern descriptor |
155 | * | ||
156 | * Check for a pattern at the given place. Used to search bad block | ||
157 | * tables and good / bad block identifiers. Same as check_pattern, but | ||
158 | * no optional empty check | ||
159 | * | 129 | * |
160 | */ | 130 | * Check for a pattern at the given place. Used to search bad block tables and |
131 | * good / bad block identifiers. Same as check_pattern, but no optional empty | ||
132 | * check. | ||
133 | */ | ||
161 | static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) | 134 | static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) |
162 | { | 135 | { |
163 | int i; | 136 | int i; |
@@ -168,21 +141,14 @@ static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) | |||
168 | if (p[td->offs + i] != td->pattern[i]) | 141 | if (p[td->offs + i] != td->pattern[i]) |
169 | return -1; | 142 | return -1; |
170 | } | 143 | } |
171 | /* Need to check location 1 AND 6? */ | ||
172 | if (td->options & NAND_BBT_SCANBYTE1AND6) { | ||
173 | for (i = 0; i < td->len; i++) { | ||
174 | if (p[NAND_SMALL_BADBLOCK_POS + i] != td->pattern[i]) | ||
175 | return -1; | ||
176 | } | ||
177 | } | ||
178 | return 0; | 144 | return 0; |
179 | } | 145 | } |
180 | 146 | ||
181 | /** | 147 | /** |
182 | * add_marker_len - compute the length of the marker in data area | 148 | * add_marker_len - compute the length of the marker in data area |
183 | * @td: BBT descriptor used for computation | 149 | * @td: BBT descriptor used for computation |
184 | * | 150 | * |
185 | * The length will be 0 if the markeris located in OOB area. | 151 | * The length will be 0 if the marker is located in OOB area. |
186 | */ | 152 | */ |
187 | static u32 add_marker_len(struct nand_bbt_descr *td) | 153 | static u32 add_marker_len(struct nand_bbt_descr *td) |
188 | { | 154 | { |
@@ -199,34 +165,33 @@ static u32 add_marker_len(struct nand_bbt_descr *td) | |||
199 | 165 | ||
200 | /** | 166 | /** |
201 | * read_bbt - [GENERIC] Read the bad block table starting from page | 167 | * read_bbt - [GENERIC] Read the bad block table starting from page |
202 | * @mtd: MTD device structure | 168 | * @mtd: MTD device structure |
203 | * @buf: temporary buffer | 169 | * @buf: temporary buffer |
204 | * @page: the starting page | 170 | * @page: the starting page |
205 | * @num: the number of bbt descriptors to read | 171 | * @num: the number of bbt descriptors to read |
206 | * @td: the bbt describtion table | 172 | * @td: the bbt describtion table |
207 | * @offs: offset in the memory table | 173 | * @offs: offset in the memory table |
208 | * | 174 | * |
209 | * Read the bad block table starting from page. | 175 | * Read the bad block table starting from page. |
210 | * | ||
211 | */ | 176 | */ |
212 | static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, | 177 | static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, |
213 | struct nand_bbt_descr *td, int offs) | 178 | struct nand_bbt_descr *td, int offs) |
214 | { | 179 | { |
215 | int res, i, j, act = 0; | 180 | int res, ret = 0, i, j, act = 0; |
216 | struct nand_chip *this = mtd->priv; | 181 | struct nand_chip *this = mtd->priv; |
217 | size_t retlen, len, totlen; | 182 | size_t retlen, len, totlen; |
218 | loff_t from; | 183 | loff_t from; |
219 | int bits = td->options & NAND_BBT_NRBITS_MSK; | 184 | int bits = td->options & NAND_BBT_NRBITS_MSK; |
220 | uint8_t msk = (uint8_t) ((1 << bits) - 1); | 185 | uint8_t msk = (uint8_t)((1 << bits) - 1); |
221 | u32 marker_len; | 186 | u32 marker_len; |
222 | int reserved_block_code = td->reserved_block_code; | 187 | int reserved_block_code = td->reserved_block_code; |
223 | 188 | ||
224 | totlen = (num * bits) >> 3; | 189 | totlen = (num * bits) >> 3; |
225 | marker_len = add_marker_len(td); | 190 | marker_len = add_marker_len(td); |
226 | from = ((loff_t) page) << this->page_shift; | 191 | from = ((loff_t)page) << this->page_shift; |
227 | 192 | ||
228 | while (totlen) { | 193 | while (totlen) { |
229 | len = min(totlen, (size_t) (1 << this->bbt_erase_shift)); | 194 | len = min(totlen, (size_t)(1 << this->bbt_erase_shift)); |
230 | if (marker_len) { | 195 | if (marker_len) { |
231 | /* | 196 | /* |
232 | * In case the BBT marker is not in the OOB area it | 197 | * In case the BBT marker is not in the OOB area it |
@@ -238,11 +203,18 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, | |||
238 | } | 203 | } |
239 | res = mtd->read(mtd, from, len, &retlen, buf); | 204 | res = mtd->read(mtd, from, len, &retlen, buf); |
240 | if (res < 0) { | 205 | if (res < 0) { |
241 | if (retlen != len) { | 206 | if (mtd_is_eccerr(res)) { |
242 | printk(KERN_INFO "nand_bbt: Error reading bad block table\n"); | 207 | pr_info("nand_bbt: ECC error in BBT at " |
208 | "0x%012llx\n", from & ~mtd->writesize); | ||
209 | return res; | ||
210 | } else if (mtd_is_bitflip(res)) { | ||
211 | pr_info("nand_bbt: corrected error in BBT at " | ||
212 | "0x%012llx\n", from & ~mtd->writesize); | ||
213 | ret = res; | ||
214 | } else { | ||
215 | pr_info("nand_bbt: error reading BBT\n"); | ||
243 | return res; | 216 | return res; |
244 | } | 217 | } |
245 | printk(KERN_WARNING "nand_bbt: ECC error while reading bad block table\n"); | ||
246 | } | 218 | } |
247 | 219 | ||
248 | /* Analyse data */ | 220 | /* Analyse data */ |
@@ -253,17 +225,19 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, | |||
253 | if (tmp == msk) | 225 | if (tmp == msk) |
254 | continue; | 226 | continue; |
255 | if (reserved_block_code && (tmp == reserved_block_code)) { | 227 | if (reserved_block_code && (tmp == reserved_block_code)) { |
256 | printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n", | 228 | pr_info("nand_read_bbt: reserved block at 0x%012llx\n", |
257 | (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); | 229 | (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); |
258 | this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); | 230 | this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); |
259 | mtd->ecc_stats.bbtblocks++; | 231 | mtd->ecc_stats.bbtblocks++; |
260 | continue; | 232 | continue; |
261 | } | 233 | } |
262 | /* Leave it for now, if its matured we can move this | 234 | /* |
263 | * message to MTD_DEBUG_LEVEL0 */ | 235 | * Leave it for now, if it's matured we can |
264 | printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n", | 236 | * move this message to pr_debug. |
265 | (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); | 237 | */ |
266 | /* Factory marked bad or worn out ? */ | 238 | pr_info("nand_read_bbt: bad block at 0x%012llx\n", |
239 | (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); | ||
240 | /* Factory marked bad or worn out? */ | ||
267 | if (tmp == 0) | 241 | if (tmp == 0) |
268 | this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); | 242 | this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); |
269 | else | 243 | else |
@@ -274,20 +248,20 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, | |||
274 | totlen -= len; | 248 | totlen -= len; |
275 | from += len; | 249 | from += len; |
276 | } | 250 | } |
277 | return 0; | 251 | return ret; |
278 | } | 252 | } |
279 | 253 | ||
280 | /** | 254 | /** |
281 | * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page | 255 | * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page |
282 | * @mtd: MTD device structure | 256 | * @mtd: MTD device structure |
283 | * @buf: temporary buffer | 257 | * @buf: temporary buffer |
284 | * @td: descriptor for the bad block table | 258 | * @td: descriptor for the bad block table |
285 | * @chip: read the table for a specific chip, -1 read all chips. | 259 | * @chip: read the table for a specific chip, -1 read all chips; applies only if |
286 | * Applies only if NAND_BBT_PERCHIP option is set | 260 | * NAND_BBT_PERCHIP option is set |
287 | * | 261 | * |
288 | * Read the bad block table for all chips starting at a given page | 262 | * Read the bad block table for all chips starting at a given page. We assume |
289 | * We assume that the bbt bits are in consecutive order. | 263 | * that the bbt bits are in consecutive order. |
290 | */ | 264 | */ |
291 | static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip) | 265 | static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip) |
292 | { | 266 | { |
293 | struct nand_chip *this = mtd->priv; | 267 | struct nand_chip *this = mtd->priv; |
@@ -313,9 +287,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc | |||
313 | return 0; | 287 | return 0; |
314 | } | 288 | } |
315 | 289 | ||
316 | /* | 290 | /* BBT marker is in the first page, no OOB */ |
317 | * BBT marker is in the first page, no OOB. | ||
318 | */ | ||
319 | static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | 291 | static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, |
320 | struct nand_bbt_descr *td) | 292 | struct nand_bbt_descr *td) |
321 | { | 293 | { |
@@ -329,35 +301,26 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | |||
329 | return mtd->read(mtd, offs, len, &retlen, buf); | 301 | return mtd->read(mtd, offs, len, &retlen, buf); |
330 | } | 302 | } |
331 | 303 | ||
332 | /* | 304 | /* Scan read raw data from flash */ |
333 | * Scan read raw data from flash | ||
334 | */ | ||
335 | static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | 305 | static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, |
336 | size_t len) | 306 | size_t len) |
337 | { | 307 | { |
338 | struct mtd_oob_ops ops; | 308 | struct mtd_oob_ops ops; |
339 | int res; | 309 | int res; |
340 | 310 | ||
341 | ops.mode = MTD_OOB_RAW; | 311 | ops.mode = MTD_OPS_RAW; |
342 | ops.ooboffs = 0; | 312 | ops.ooboffs = 0; |
343 | ops.ooblen = mtd->oobsize; | 313 | ops.ooblen = mtd->oobsize; |
344 | 314 | ||
345 | |||
346 | while (len > 0) { | 315 | while (len > 0) { |
347 | if (len <= mtd->writesize) { | 316 | ops.datbuf = buf; |
348 | ops.oobbuf = buf + len; | 317 | ops.len = min(len, (size_t)mtd->writesize); |
349 | ops.datbuf = buf; | 318 | ops.oobbuf = buf + ops.len; |
350 | ops.len = len; | ||
351 | return mtd->read_oob(mtd, offs, &ops); | ||
352 | } else { | ||
353 | ops.oobbuf = buf + mtd->writesize; | ||
354 | ops.datbuf = buf; | ||
355 | ops.len = mtd->writesize; | ||
356 | res = mtd->read_oob(mtd, offs, &ops); | ||
357 | 319 | ||
358 | if (res) | 320 | res = mtd->read_oob(mtd, offs, &ops); |
359 | return res; | 321 | |
360 | } | 322 | if (res) |
323 | return res; | ||
361 | 324 | ||
362 | buf += mtd->oobsize + mtd->writesize; | 325 | buf += mtd->oobsize + mtd->writesize; |
363 | len -= mtd->writesize; | 326 | len -= mtd->writesize; |
@@ -374,15 +337,13 @@ static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs, | |||
374 | return scan_read_raw_oob(mtd, buf, offs, len); | 337 | return scan_read_raw_oob(mtd, buf, offs, len); |
375 | } | 338 | } |
376 | 339 | ||
377 | /* | 340 | /* Scan write data with oob to flash */ |
378 | * Scan write data with oob to flash | ||
379 | */ | ||
380 | static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len, | 341 | static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len, |
381 | uint8_t *buf, uint8_t *oob) | 342 | uint8_t *buf, uint8_t *oob) |
382 | { | 343 | { |
383 | struct mtd_oob_ops ops; | 344 | struct mtd_oob_ops ops; |
384 | 345 | ||
385 | ops.mode = MTD_OOB_PLACE; | 346 | ops.mode = MTD_OPS_PLACE_OOB; |
386 | ops.ooboffs = 0; | 347 | ops.ooboffs = 0; |
387 | ops.ooblen = mtd->oobsize; | 348 | ops.ooblen = mtd->oobsize; |
388 | ops.datbuf = buf; | 349 | ops.datbuf = buf; |
@@ -403,15 +364,14 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td) | |||
403 | 364 | ||
404 | /** | 365 | /** |
405 | * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page | 366 | * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page |
406 | * @mtd: MTD device structure | 367 | * @mtd: MTD device structure |
407 | * @buf: temporary buffer | 368 | * @buf: temporary buffer |
408 | * @td: descriptor for the bad block table | 369 | * @td: descriptor for the bad block table |
409 | * @md: descriptor for the bad block table mirror | 370 | * @md: descriptor for the bad block table mirror |
410 | * | 371 | * |
411 | * Read the bad block table(s) for all chips starting at a given page | 372 | * Read the bad block table(s) for all chips starting at a given page. We |
412 | * We assume that the bbt bits are in consecutive order. | 373 | * assume that the bbt bits are in consecutive order. |
413 | * | 374 | */ |
414 | */ | ||
415 | static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, | 375 | static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, |
416 | struct nand_bbt_descr *td, struct nand_bbt_descr *md) | 376 | struct nand_bbt_descr *td, struct nand_bbt_descr *md) |
417 | { | 377 | { |
@@ -422,8 +382,8 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, | |||
422 | scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift, | 382 | scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift, |
423 | mtd->writesize, td); | 383 | mtd->writesize, td); |
424 | td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; | 384 | td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; |
425 | printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", | 385 | pr_info("Bad block table at page %d, version 0x%02X\n", |
426 | td->pages[0], td->version[0]); | 386 | td->pages[0], td->version[0]); |
427 | } | 387 | } |
428 | 388 | ||
429 | /* Read the mirror version, if available */ | 389 | /* Read the mirror version, if available */ |
@@ -431,15 +391,13 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, | |||
431 | scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, | 391 | scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift, |
432 | mtd->writesize, td); | 392 | mtd->writesize, td); |
433 | md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; | 393 | md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; |
434 | printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", | 394 | pr_info("Bad block table at page %d, version 0x%02X\n", |
435 | md->pages[0], md->version[0]); | 395 | md->pages[0], md->version[0]); |
436 | } | 396 | } |
437 | return 1; | 397 | return 1; |
438 | } | 398 | } |
439 | 399 | ||
440 | /* | 400 | /* Scan a given block full */ |
441 | * Scan a given block full | ||
442 | */ | ||
443 | static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, | 401 | static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, |
444 | loff_t offs, uint8_t *buf, size_t readlen, | 402 | loff_t offs, uint8_t *buf, size_t readlen, |
445 | int scanlen, int len) | 403 | int scanlen, int len) |
@@ -447,7 +405,8 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, | |||
447 | int ret, j; | 405 | int ret, j; |
448 | 406 | ||
449 | ret = scan_read_raw_oob(mtd, buf, offs, readlen); | 407 | ret = scan_read_raw_oob(mtd, buf, offs, readlen); |
450 | if (ret) | 408 | /* Ignore ECC errors when checking for BBM */ |
409 | if (ret && !mtd_is_bitflip_or_eccerr(ret)) | ||
451 | return ret; | 410 | return ret; |
452 | 411 | ||
453 | for (j = 0; j < len; j++, buf += scanlen) { | 412 | for (j = 0; j < len; j++, buf += scanlen) { |
@@ -457,9 +416,7 @@ static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, | |||
457 | return 0; | 416 | return 0; |
458 | } | 417 | } |
459 | 418 | ||
460 | /* | 419 | /* Scan a given block partially */ |
461 | * Scan a given block partially | ||
462 | */ | ||
463 | static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, | 420 | static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, |
464 | loff_t offs, uint8_t *buf, int len) | 421 | loff_t offs, uint8_t *buf, int len) |
465 | { | 422 | { |
@@ -470,16 +427,16 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, | |||
470 | ops.oobbuf = buf; | 427 | ops.oobbuf = buf; |
471 | ops.ooboffs = 0; | 428 | ops.ooboffs = 0; |
472 | ops.datbuf = NULL; | 429 | ops.datbuf = NULL; |
473 | ops.mode = MTD_OOB_PLACE; | 430 | ops.mode = MTD_OPS_PLACE_OOB; |
474 | 431 | ||
475 | for (j = 0; j < len; j++) { | 432 | for (j = 0; j < len; j++) { |
476 | /* | 433 | /* |
477 | * Read the full oob until read_oob is fixed to | 434 | * Read the full oob until read_oob is fixed to handle single |
478 | * handle single byte reads for 16 bit | 435 | * byte reads for 16 bit buswidth. |
479 | * buswidth | ||
480 | */ | 436 | */ |
481 | ret = mtd->read_oob(mtd, offs, &ops); | 437 | ret = mtd->read_oob(mtd, offs, &ops); |
482 | if (ret) | 438 | /* Ignore ECC errors when checking for BBM */ |
439 | if (ret && !mtd_is_bitflip_or_eccerr(ret)) | ||
483 | return ret; | 440 | return ret; |
484 | 441 | ||
485 | if (check_short_pattern(buf, bd)) | 442 | if (check_short_pattern(buf, bd)) |
@@ -492,14 +449,14 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, | |||
492 | 449 | ||
493 | /** | 450 | /** |
494 | * create_bbt - [GENERIC] Create a bad block table by scanning the device | 451 | * create_bbt - [GENERIC] Create a bad block table by scanning the device |
495 | * @mtd: MTD device structure | 452 | * @mtd: MTD device structure |
496 | * @buf: temporary buffer | 453 | * @buf: temporary buffer |
497 | * @bd: descriptor for the good/bad block search pattern | 454 | * @bd: descriptor for the good/bad block search pattern |
498 | * @chip: create the table for a specific chip, -1 read all chips. | 455 | * @chip: create the table for a specific chip, -1 read all chips; applies only |
499 | * Applies only if NAND_BBT_PERCHIP option is set | 456 | * if NAND_BBT_PERCHIP option is set |
500 | * | 457 | * |
501 | * Create a bad block table by scanning the device | 458 | * Create a bad block table by scanning the device for the given good/bad block |
502 | * for the given good/bad block identify pattern | 459 | * identify pattern. |
503 | */ | 460 | */ |
504 | static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | 461 | static int create_bbt(struct mtd_info *mtd, uint8_t *buf, |
505 | struct nand_bbt_descr *bd, int chip) | 462 | struct nand_bbt_descr *bd, int chip) |
@@ -510,7 +467,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
510 | loff_t from; | 467 | loff_t from; |
511 | size_t readlen; | 468 | size_t readlen; |
512 | 469 | ||
513 | printk(KERN_INFO "Scanning device for bad blocks\n"); | 470 | pr_info("Scanning device for bad blocks\n"); |
514 | 471 | ||
515 | if (bd->options & NAND_BBT_SCANALLPAGES) | 472 | if (bd->options & NAND_BBT_SCANALLPAGES) |
516 | len = 1 << (this->bbt_erase_shift - this->page_shift); | 473 | len = 1 << (this->bbt_erase_shift - this->page_shift); |
@@ -530,14 +487,16 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
530 | } | 487 | } |
531 | 488 | ||
532 | if (chip == -1) { | 489 | if (chip == -1) { |
533 | /* Note that numblocks is 2 * (real numblocks) here, see i+=2 | 490 | /* |
534 | * below as it makes shifting and masking less painful */ | 491 | * Note that numblocks is 2 * (real numblocks) here, see i+=2 |
492 | * below as it makes shifting and masking less painful | ||
493 | */ | ||
535 | numblocks = mtd->size >> (this->bbt_erase_shift - 1); | 494 | numblocks = mtd->size >> (this->bbt_erase_shift - 1); |
536 | startblock = 0; | 495 | startblock = 0; |
537 | from = 0; | 496 | from = 0; |
538 | } else { | 497 | } else { |
539 | if (chip >= this->numchips) { | 498 | if (chip >= this->numchips) { |
540 | printk(KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n", | 499 | pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n", |
541 | chip + 1, this->numchips); | 500 | chip + 1, this->numchips); |
542 | return -EINVAL; | 501 | return -EINVAL; |
543 | } | 502 | } |
@@ -547,7 +506,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
547 | from = (loff_t)startblock << (this->bbt_erase_shift - 1); | 506 | from = (loff_t)startblock << (this->bbt_erase_shift - 1); |
548 | } | 507 | } |
549 | 508 | ||
550 | if (this->options & NAND_BBT_SCANLASTPAGE) | 509 | if (this->bbt_options & NAND_BBT_SCANLASTPAGE) |
551 | from += mtd->erasesize - (mtd->writesize * len); | 510 | from += mtd->erasesize - (mtd->writesize * len); |
552 | 511 | ||
553 | for (i = startblock; i < numblocks;) { | 512 | for (i = startblock; i < numblocks;) { |
@@ -566,8 +525,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
566 | 525 | ||
567 | if (ret) { | 526 | if (ret) { |
568 | this->bbt[i >> 3] |= 0x03 << (i & 0x6); | 527 | this->bbt[i >> 3] |= 0x03 << (i & 0x6); |
569 | printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n", | 528 | pr_warn("Bad eraseblock %d at 0x%012llx\n", |
570 | i >> 1, (unsigned long long)from); | 529 | i >> 1, (unsigned long long)from); |
571 | mtd->ecc_stats.badblocks++; | 530 | mtd->ecc_stats.badblocks++; |
572 | } | 531 | } |
573 | 532 | ||
@@ -579,20 +538,18 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
579 | 538 | ||
580 | /** | 539 | /** |
581 | * search_bbt - [GENERIC] scan the device for a specific bad block table | 540 | * search_bbt - [GENERIC] scan the device for a specific bad block table |
582 | * @mtd: MTD device structure | 541 | * @mtd: MTD device structure |
583 | * @buf: temporary buffer | 542 | * @buf: temporary buffer |
584 | * @td: descriptor for the bad block table | 543 | * @td: descriptor for the bad block table |
585 | * | 544 | * |
586 | * Read the bad block table by searching for a given ident pattern. | 545 | * Read the bad block table by searching for a given ident pattern. Search is |
587 | * Search is preformed either from the beginning up or from the end of | 546 | * preformed either from the beginning up or from the end of the device |
588 | * the device downwards. The search starts always at the start of a | 547 | * downwards. The search starts always at the start of a block. If the option |
589 | * block. | 548 | * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains |
590 | * If the option NAND_BBT_PERCHIP is given, each chip is searched | 549 | * the bad block information of this chip. This is necessary to provide support |
591 | * for a bbt, which contains the bad block information of this chip. | 550 | * for certain DOC devices. |
592 | * This is necessary to provide support for certain DOC devices. | ||
593 | * | 551 | * |
594 | * The bbt ident pattern resides in the oob area of the first page | 552 | * The bbt ident pattern resides in the oob area of the first page in a block. |
595 | * in a block. | ||
596 | */ | 553 | */ |
597 | static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td) | 554 | static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td) |
598 | { | 555 | { |
@@ -603,7 +560,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr | |||
603 | int bbtblocks; | 560 | int bbtblocks; |
604 | int blocktopage = this->bbt_erase_shift - this->page_shift; | 561 | int blocktopage = this->bbt_erase_shift - this->page_shift; |
605 | 562 | ||
606 | /* Search direction top -> down ? */ | 563 | /* Search direction top -> down? */ |
607 | if (td->options & NAND_BBT_LASTBLOCK) { | 564 | if (td->options & NAND_BBT_LASTBLOCK) { |
608 | startblock = (mtd->size >> this->bbt_erase_shift) - 1; | 565 | startblock = (mtd->size >> this->bbt_erase_shift) - 1; |
609 | dir = -1; | 566 | dir = -1; |
@@ -612,7 +569,7 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr | |||
612 | dir = 1; | 569 | dir = 1; |
613 | } | 570 | } |
614 | 571 | ||
615 | /* Do we have a bbt per chip ? */ | 572 | /* Do we have a bbt per chip? */ |
616 | if (td->options & NAND_BBT_PERCHIP) { | 573 | if (td->options & NAND_BBT_PERCHIP) { |
617 | chips = this->numchips; | 574 | chips = this->numchips; |
618 | bbtblocks = this->chipsize >> this->bbt_erase_shift; | 575 | bbtblocks = this->chipsize >> this->bbt_erase_shift; |
@@ -651,23 +608,23 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr | |||
651 | /* Check, if we found a bbt for each requested chip */ | 608 | /* Check, if we found a bbt for each requested chip */ |
652 | for (i = 0; i < chips; i++) { | 609 | for (i = 0; i < chips; i++) { |
653 | if (td->pages[i] == -1) | 610 | if (td->pages[i] == -1) |
654 | printk(KERN_WARNING "Bad block table not found for chip %d\n", i); | 611 | pr_warn("Bad block table not found for chip %d\n", i); |
655 | else | 612 | else |
656 | printk(KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i], | 613 | pr_info("Bad block table found at page %d, version " |
657 | td->version[i]); | 614 | "0x%02X\n", td->pages[i], td->version[i]); |
658 | } | 615 | } |
659 | return 0; | 616 | return 0; |
660 | } | 617 | } |
661 | 618 | ||
662 | /** | 619 | /** |
663 | * search_read_bbts - [GENERIC] scan the device for bad block table(s) | 620 | * search_read_bbts - [GENERIC] scan the device for bad block table(s) |
664 | * @mtd: MTD device structure | 621 | * @mtd: MTD device structure |
665 | * @buf: temporary buffer | 622 | * @buf: temporary buffer |
666 | * @td: descriptor for the bad block table | 623 | * @td: descriptor for the bad block table |
667 | * @md: descriptor for the bad block table mirror | 624 | * @md: descriptor for the bad block table mirror |
668 | * | 625 | * |
669 | * Search and read the bad block table(s) | 626 | * Search and read the bad block table(s). |
670 | */ | 627 | */ |
671 | static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) | 628 | static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md) |
672 | { | 629 | { |
673 | /* Search the primary table */ | 630 | /* Search the primary table */ |
@@ -683,16 +640,14 @@ static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt | |||
683 | 640 | ||
684 | /** | 641 | /** |
685 | * write_bbt - [GENERIC] (Re)write the bad block table | 642 | * write_bbt - [GENERIC] (Re)write the bad block table |
643 | * @mtd: MTD device structure | ||
644 | * @buf: temporary buffer | ||
645 | * @td: descriptor for the bad block table | ||
646 | * @md: descriptor for the bad block table mirror | ||
647 | * @chipsel: selector for a specific chip, -1 for all | ||
686 | * | 648 | * |
687 | * @mtd: MTD device structure | 649 | * (Re)write the bad block table. |
688 | * @buf: temporary buffer | 650 | */ |
689 | * @td: descriptor for the bad block table | ||
690 | * @md: descriptor for the bad block table mirror | ||
691 | * @chipsel: selector for a specific chip, -1 for all | ||
692 | * | ||
693 | * (Re)write the bad block table | ||
694 | * | ||
695 | */ | ||
696 | static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | 651 | static int write_bbt(struct mtd_info *mtd, uint8_t *buf, |
697 | struct nand_bbt_descr *td, struct nand_bbt_descr *md, | 652 | struct nand_bbt_descr *td, struct nand_bbt_descr *md, |
698 | int chipsel) | 653 | int chipsel) |
@@ -711,14 +666,14 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
711 | ops.ooblen = mtd->oobsize; | 666 | ops.ooblen = mtd->oobsize; |
712 | ops.ooboffs = 0; | 667 | ops.ooboffs = 0; |
713 | ops.datbuf = NULL; | 668 | ops.datbuf = NULL; |
714 | ops.mode = MTD_OOB_PLACE; | 669 | ops.mode = MTD_OPS_PLACE_OOB; |
715 | 670 | ||
716 | if (!rcode) | 671 | if (!rcode) |
717 | rcode = 0xff; | 672 | rcode = 0xff; |
718 | /* Write bad block table per chip rather than per device ? */ | 673 | /* Write bad block table per chip rather than per device? */ |
719 | if (td->options & NAND_BBT_PERCHIP) { | 674 | if (td->options & NAND_BBT_PERCHIP) { |
720 | numblocks = (int)(this->chipsize >> this->bbt_erase_shift); | 675 | numblocks = (int)(this->chipsize >> this->bbt_erase_shift); |
721 | /* Full device write or specific chip ? */ | 676 | /* Full device write or specific chip? */ |
722 | if (chipsel == -1) { | 677 | if (chipsel == -1) { |
723 | nrchips = this->numchips; | 678 | nrchips = this->numchips; |
724 | } else { | 679 | } else { |
@@ -732,8 +687,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
732 | 687 | ||
733 | /* Loop through the chips */ | 688 | /* Loop through the chips */ |
734 | for (; chip < nrchips; chip++) { | 689 | for (; chip < nrchips; chip++) { |
735 | 690 | /* | |
736 | /* There was already a version of the table, reuse the page | 691 | * There was already a version of the table, reuse the page |
737 | * This applies for absolute placement too, as we have the | 692 | * This applies for absolute placement too, as we have the |
738 | * page nr. in td->pages. | 693 | * page nr. in td->pages. |
739 | */ | 694 | */ |
@@ -742,8 +697,10 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
742 | goto write; | 697 | goto write; |
743 | } | 698 | } |
744 | 699 | ||
745 | /* Automatic placement of the bad block table */ | 700 | /* |
746 | /* Search direction top -> down ? */ | 701 | * Automatic placement of the bad block table. Search direction |
702 | * top -> down? | ||
703 | */ | ||
747 | if (td->options & NAND_BBT_LASTBLOCK) { | 704 | if (td->options & NAND_BBT_LASTBLOCK) { |
748 | startblock = numblocks * (chip + 1) - 1; | 705 | startblock = numblocks * (chip + 1) - 1; |
749 | dir = -1; | 706 | dir = -1; |
@@ -767,7 +724,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
767 | if (!md || md->pages[chip] != page) | 724 | if (!md || md->pages[chip] != page) |
768 | goto write; | 725 | goto write; |
769 | } | 726 | } |
770 | printk(KERN_ERR "No space left to write bad block table\n"); | 727 | pr_err("No space left to write bad block table\n"); |
771 | return -ENOSPC; | 728 | return -ENOSPC; |
772 | write: | 729 | write: |
773 | 730 | ||
@@ -792,24 +749,22 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
792 | 749 | ||
793 | bbtoffs = chip * (numblocks >> 2); | 750 | bbtoffs = chip * (numblocks >> 2); |
794 | 751 | ||
795 | to = ((loff_t) page) << this->page_shift; | 752 | to = ((loff_t)page) << this->page_shift; |
796 | 753 | ||
797 | /* Must we save the block contents ? */ | 754 | /* Must we save the block contents? */ |
798 | if (td->options & NAND_BBT_SAVECONTENT) { | 755 | if (td->options & NAND_BBT_SAVECONTENT) { |
799 | /* Make it block aligned */ | 756 | /* Make it block aligned */ |
800 | to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1)); | 757 | to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1)); |
801 | len = 1 << this->bbt_erase_shift; | 758 | len = 1 << this->bbt_erase_shift; |
802 | res = mtd->read(mtd, to, len, &retlen, buf); | 759 | res = mtd->read(mtd, to, len, &retlen, buf); |
803 | if (res < 0) { | 760 | if (res < 0) { |
804 | if (retlen != len) { | 761 | if (retlen != len) { |
805 | printk(KERN_INFO "nand_bbt: Error " | 762 | pr_info("nand_bbt: error reading block " |
806 | "reading block for writing " | 763 | "for writing the bad block table\n"); |
807 | "the bad block table\n"); | ||
808 | return res; | 764 | return res; |
809 | } | 765 | } |
810 | printk(KERN_WARNING "nand_bbt: ECC error " | 766 | pr_warn("nand_bbt: ECC error while reading " |
811 | "while reading block for writing " | 767 | "block for writing bad block table\n"); |
812 | "bad block table\n"); | ||
813 | } | 768 | } |
814 | /* Read oob data */ | 769 | /* Read oob data */ |
815 | ops.ooblen = (len >> this->page_shift) * mtd->oobsize; | 770 | ops.ooblen = (len >> this->page_shift) * mtd->oobsize; |
@@ -822,19 +777,19 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
822 | pageoffs = page - (int)(to >> this->page_shift); | 777 | pageoffs = page - (int)(to >> this->page_shift); |
823 | offs = pageoffs << this->page_shift; | 778 | offs = pageoffs << this->page_shift; |
824 | /* Preset the bbt area with 0xff */ | 779 | /* Preset the bbt area with 0xff */ |
825 | memset(&buf[offs], 0xff, (size_t) (numblocks >> sft)); | 780 | memset(&buf[offs], 0xff, (size_t)(numblocks >> sft)); |
826 | ooboffs = len + (pageoffs * mtd->oobsize); | 781 | ooboffs = len + (pageoffs * mtd->oobsize); |
827 | 782 | ||
828 | } else if (td->options & NAND_BBT_NO_OOB) { | 783 | } else if (td->options & NAND_BBT_NO_OOB) { |
829 | ooboffs = 0; | 784 | ooboffs = 0; |
830 | offs = td->len; | 785 | offs = td->len; |
831 | /* the version byte */ | 786 | /* The version byte */ |
832 | if (td->options & NAND_BBT_VERSION) | 787 | if (td->options & NAND_BBT_VERSION) |
833 | offs++; | 788 | offs++; |
834 | /* Calc length */ | 789 | /* Calc length */ |
835 | len = (size_t) (numblocks >> sft); | 790 | len = (size_t)(numblocks >> sft); |
836 | len += offs; | 791 | len += offs; |
837 | /* Make it page aligned ! */ | 792 | /* Make it page aligned! */ |
838 | len = ALIGN(len, mtd->writesize); | 793 | len = ALIGN(len, mtd->writesize); |
839 | /* Preset the buffer with 0xff */ | 794 | /* Preset the buffer with 0xff */ |
840 | memset(buf, 0xff, len); | 795 | memset(buf, 0xff, len); |
@@ -842,8 +797,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
842 | memcpy(buf, td->pattern, td->len); | 797 | memcpy(buf, td->pattern, td->len); |
843 | } else { | 798 | } else { |
844 | /* Calc length */ | 799 | /* Calc length */ |
845 | len = (size_t) (numblocks >> sft); | 800 | len = (size_t)(numblocks >> sft); |
846 | /* Make it page aligned ! */ | 801 | /* Make it page aligned! */ |
847 | len = ALIGN(len, mtd->writesize); | 802 | len = ALIGN(len, mtd->writesize); |
848 | /* Preset the buffer with 0xff */ | 803 | /* Preset the buffer with 0xff */ |
849 | memset(buf, 0xff, len + | 804 | memset(buf, 0xff, len + |
@@ -857,13 +812,13 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
857 | if (td->options & NAND_BBT_VERSION) | 812 | if (td->options & NAND_BBT_VERSION) |
858 | buf[ooboffs + td->veroffs] = td->version[chip]; | 813 | buf[ooboffs + td->veroffs] = td->version[chip]; |
859 | 814 | ||
860 | /* walk through the memory table */ | 815 | /* Walk through the memory table */ |
861 | for (i = 0; i < numblocks;) { | 816 | for (i = 0; i < numblocks;) { |
862 | uint8_t dat; | 817 | uint8_t dat; |
863 | dat = this->bbt[bbtoffs + (i >> 2)]; | 818 | dat = this->bbt[bbtoffs + (i >> 2)]; |
864 | for (j = 0; j < 4; j++, i++) { | 819 | for (j = 0; j < 4; j++, i++) { |
865 | int sftcnt = (i << (3 - sft)) & sftmsk; | 820 | int sftcnt = (i << (3 - sft)) & sftmsk; |
866 | /* Do not store the reserved bbt blocks ! */ | 821 | /* Do not store the reserved bbt blocks! */ |
867 | buf[offs + (i >> sft)] &= | 822 | buf[offs + (i >> sft)] &= |
868 | ~(msk[dat & 0x03] << sftcnt); | 823 | ~(msk[dat & 0x03] << sftcnt); |
869 | dat >>= 2; | 824 | dat >>= 2; |
@@ -884,8 +839,8 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
884 | if (res < 0) | 839 | if (res < 0) |
885 | goto outerr; | 840 | goto outerr; |
886 | 841 | ||
887 | printk(KERN_DEBUG "Bad block table written to 0x%012llx, version " | 842 | pr_info("Bad block table written to 0x%012llx, version 0x%02X\n", |
888 | "0x%02X\n", (unsigned long long)to, td->version[chip]); | 843 | (unsigned long long)to, td->version[chip]); |
889 | 844 | ||
890 | /* Mark it as used */ | 845 | /* Mark it as used */ |
891 | td->pages[chip] = page; | 846 | td->pages[chip] = page; |
@@ -893,19 +848,18 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf, | |||
893 | return 0; | 848 | return 0; |
894 | 849 | ||
895 | outerr: | 850 | outerr: |
896 | printk(KERN_WARNING | 851 | pr_warn("nand_bbt: error while writing bad block table %d\n", res); |
897 | "nand_bbt: Error while writing bad block table %d\n", res); | ||
898 | return res; | 852 | return res; |
899 | } | 853 | } |
900 | 854 | ||
901 | /** | 855 | /** |
902 | * nand_memory_bbt - [GENERIC] create a memory based bad block table | 856 | * nand_memory_bbt - [GENERIC] create a memory based bad block table |
903 | * @mtd: MTD device structure | 857 | * @mtd: MTD device structure |
904 | * @bd: descriptor for the good/bad block search pattern | 858 | * @bd: descriptor for the good/bad block search pattern |
905 | * | 859 | * |
906 | * The function creates a memory based bbt by scanning the device | 860 | * The function creates a memory based bbt by scanning the device for |
907 | * for manufacturer / software marked good / bad blocks | 861 | * manufacturer / software marked good / bad blocks. |
908 | */ | 862 | */ |
909 | static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) | 863 | static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) |
910 | { | 864 | { |
911 | struct nand_chip *this = mtd->priv; | 865 | struct nand_chip *this = mtd->priv; |
@@ -916,25 +870,24 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b | |||
916 | 870 | ||
917 | /** | 871 | /** |
918 | * check_create - [GENERIC] create and write bbt(s) if necessary | 872 | * check_create - [GENERIC] create and write bbt(s) if necessary |
919 | * @mtd: MTD device structure | 873 | * @mtd: MTD device structure |
920 | * @buf: temporary buffer | 874 | * @buf: temporary buffer |
921 | * @bd: descriptor for the good/bad block search pattern | 875 | * @bd: descriptor for the good/bad block search pattern |
922 | * | 876 | * |
923 | * The function checks the results of the previous call to read_bbt | 877 | * The function checks the results of the previous call to read_bbt and creates |
924 | * and creates / updates the bbt(s) if necessary | 878 | * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found |
925 | * Creation is necessary if no bbt was found for the chip/device | 879 | * for the chip/device. Update is necessary if one of the tables is missing or |
926 | * Update is necessary if one of the tables is missing or the | 880 | * the version nr. of one table is less than the other. |
927 | * version nr. of one table is less than the other | 881 | */ |
928 | */ | ||
929 | static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd) | 882 | static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd) |
930 | { | 883 | { |
931 | int i, chips, writeops, chipsel, res; | 884 | int i, chips, writeops, create, chipsel, res, res2; |
932 | struct nand_chip *this = mtd->priv; | 885 | struct nand_chip *this = mtd->priv; |
933 | struct nand_bbt_descr *td = this->bbt_td; | 886 | struct nand_bbt_descr *td = this->bbt_td; |
934 | struct nand_bbt_descr *md = this->bbt_md; | 887 | struct nand_bbt_descr *md = this->bbt_md; |
935 | struct nand_bbt_descr *rd, *rd2; | 888 | struct nand_bbt_descr *rd, *rd2; |
936 | 889 | ||
937 | /* Do we have a bbt per chip ? */ | 890 | /* Do we have a bbt per chip? */ |
938 | if (td->options & NAND_BBT_PERCHIP) | 891 | if (td->options & NAND_BBT_PERCHIP) |
939 | chips = this->numchips; | 892 | chips = this->numchips; |
940 | else | 893 | else |
@@ -942,86 +895,98 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc | |||
942 | 895 | ||
943 | for (i = 0; i < chips; i++) { | 896 | for (i = 0; i < chips; i++) { |
944 | writeops = 0; | 897 | writeops = 0; |
898 | create = 0; | ||
945 | rd = NULL; | 899 | rd = NULL; |
946 | rd2 = NULL; | 900 | rd2 = NULL; |
947 | /* Per chip or per device ? */ | 901 | res = res2 = 0; |
902 | /* Per chip or per device? */ | ||
948 | chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1; | 903 | chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1; |
949 | /* Mirrored table available ? */ | 904 | /* Mirrored table available? */ |
950 | if (md) { | 905 | if (md) { |
951 | if (td->pages[i] == -1 && md->pages[i] == -1) { | 906 | if (td->pages[i] == -1 && md->pages[i] == -1) { |
907 | create = 1; | ||
952 | writeops = 0x03; | 908 | writeops = 0x03; |
953 | goto create; | 909 | } else if (td->pages[i] == -1) { |
954 | } | ||
955 | |||
956 | if (td->pages[i] == -1) { | ||
957 | rd = md; | 910 | rd = md; |
958 | td->version[i] = md->version[i]; | 911 | writeops = 0x01; |
959 | writeops = 1; | 912 | } else if (md->pages[i] == -1) { |
960 | goto writecheck; | ||
961 | } | ||
962 | |||
963 | if (md->pages[i] == -1) { | ||
964 | rd = td; | 913 | rd = td; |
965 | md->version[i] = td->version[i]; | 914 | writeops = 0x02; |
966 | writeops = 2; | 915 | } else if (td->version[i] == md->version[i]) { |
967 | goto writecheck; | ||
968 | } | ||
969 | |||
970 | if (td->version[i] == md->version[i]) { | ||
971 | rd = td; | 916 | rd = td; |
972 | if (!(td->options & NAND_BBT_VERSION)) | 917 | if (!(td->options & NAND_BBT_VERSION)) |
973 | rd2 = md; | 918 | rd2 = md; |
974 | goto writecheck; | 919 | } else if (((int8_t)(td->version[i] - md->version[i])) > 0) { |
975 | } | ||
976 | |||
977 | if (((int8_t) (td->version[i] - md->version[i])) > 0) { | ||
978 | rd = td; | 920 | rd = td; |
979 | md->version[i] = td->version[i]; | 921 | writeops = 0x02; |
980 | writeops = 2; | ||
981 | } else { | 922 | } else { |
982 | rd = md; | 923 | rd = md; |
983 | td->version[i] = md->version[i]; | 924 | writeops = 0x01; |
984 | writeops = 1; | ||
985 | } | 925 | } |
986 | |||
987 | goto writecheck; | ||
988 | |||
989 | } else { | 926 | } else { |
990 | if (td->pages[i] == -1) { | 927 | if (td->pages[i] == -1) { |
928 | create = 1; | ||
991 | writeops = 0x01; | 929 | writeops = 0x01; |
992 | goto create; | 930 | } else { |
931 | rd = td; | ||
993 | } | 932 | } |
994 | rd = td; | ||
995 | goto writecheck; | ||
996 | } | 933 | } |
997 | create: | ||
998 | /* Create the bad block table by scanning the device ? */ | ||
999 | if (!(td->options & NAND_BBT_CREATE)) | ||
1000 | continue; | ||
1001 | 934 | ||
1002 | /* Create the table in memory by scanning the chip(s) */ | 935 | if (create) { |
1003 | if (!(this->options & NAND_CREATE_EMPTY_BBT)) | 936 | /* Create the bad block table by scanning the device? */ |
1004 | create_bbt(mtd, buf, bd, chipsel); | 937 | if (!(td->options & NAND_BBT_CREATE)) |
1005 | 938 | continue; | |
1006 | td->version[i] = 1; | 939 | |
1007 | if (md) | 940 | /* Create the table in memory by scanning the chip(s) */ |
1008 | md->version[i] = 1; | 941 | if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY)) |
1009 | writecheck: | 942 | create_bbt(mtd, buf, bd, chipsel); |
1010 | /* read back first ? */ | 943 | |
1011 | if (rd) | 944 | td->version[i] = 1; |
1012 | read_abs_bbt(mtd, buf, rd, chipsel); | 945 | if (md) |
1013 | /* If they weren't versioned, read both. */ | 946 | md->version[i] = 1; |
1014 | if (rd2) | 947 | } |
1015 | read_abs_bbt(mtd, buf, rd2, chipsel); | 948 | |
1016 | 949 | /* Read back first? */ | |
1017 | /* Write the bad block table to the device ? */ | 950 | if (rd) { |
951 | res = read_abs_bbt(mtd, buf, rd, chipsel); | ||
952 | if (mtd_is_eccerr(res)) { | ||
953 | /* Mark table as invalid */ | ||
954 | rd->pages[i] = -1; | ||
955 | rd->version[i] = 0; | ||
956 | i--; | ||
957 | continue; | ||
958 | } | ||
959 | } | ||
960 | /* If they weren't versioned, read both */ | ||
961 | if (rd2) { | ||
962 | res2 = read_abs_bbt(mtd, buf, rd2, chipsel); | ||
963 | if (mtd_is_eccerr(res2)) { | ||
964 | /* Mark table as invalid */ | ||
965 | rd2->pages[i] = -1; | ||
966 | rd2->version[i] = 0; | ||
967 | i--; | ||
968 | continue; | ||
969 | } | ||
970 | } | ||
971 | |||
972 | /* Scrub the flash table(s)? */ | ||
973 | if (mtd_is_bitflip(res) || mtd_is_bitflip(res2)) | ||
974 | writeops = 0x03; | ||
975 | |||
976 | /* Update version numbers before writing */ | ||
977 | if (md) { | ||
978 | td->version[i] = max(td->version[i], md->version[i]); | ||
979 | md->version[i] = td->version[i]; | ||
980 | } | ||
981 | |||
982 | /* Write the bad block table to the device? */ | ||
1018 | if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { | 983 | if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { |
1019 | res = write_bbt(mtd, buf, td, md, chipsel); | 984 | res = write_bbt(mtd, buf, td, md, chipsel); |
1020 | if (res < 0) | 985 | if (res < 0) |
1021 | return res; | 986 | return res; |
1022 | } | 987 | } |
1023 | 988 | ||
1024 | /* Write the mirror bad block table to the device ? */ | 989 | /* Write the mirror bad block table to the device? */ |
1025 | if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { | 990 | if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { |
1026 | res = write_bbt(mtd, buf, md, td, chipsel); | 991 | res = write_bbt(mtd, buf, md, td, chipsel); |
1027 | if (res < 0) | 992 | if (res < 0) |
@@ -1033,20 +998,19 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc | |||
1033 | 998 | ||
1034 | /** | 999 | /** |
1035 | * mark_bbt_regions - [GENERIC] mark the bad block table regions | 1000 | * mark_bbt_regions - [GENERIC] mark the bad block table regions |
1036 | * @mtd: MTD device structure | 1001 | * @mtd: MTD device structure |
1037 | * @td: bad block table descriptor | 1002 | * @td: bad block table descriptor |
1038 | * | 1003 | * |
1039 | * The bad block table regions are marked as "bad" to prevent | 1004 | * The bad block table regions are marked as "bad" to prevent accidental |
1040 | * accidental erasures / writes. The regions are identified by | 1005 | * erasures / writes. The regions are identified by the mark 0x02. |
1041 | * the mark 0x02. | 1006 | */ |
1042 | */ | ||
1043 | static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) | 1007 | static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) |
1044 | { | 1008 | { |
1045 | struct nand_chip *this = mtd->priv; | 1009 | struct nand_chip *this = mtd->priv; |
1046 | int i, j, chips, block, nrblocks, update; | 1010 | int i, j, chips, block, nrblocks, update; |
1047 | uint8_t oldval, newval; | 1011 | uint8_t oldval, newval; |
1048 | 1012 | ||
1049 | /* Do we have a bbt per chip ? */ | 1013 | /* Do we have a bbt per chip? */ |
1050 | if (td->options & NAND_BBT_PERCHIP) { | 1014 | if (td->options & NAND_BBT_PERCHIP) { |
1051 | chips = this->numchips; | 1015 | chips = this->numchips; |
1052 | nrblocks = (int)(this->chipsize >> this->bbt_erase_shift); | 1016 | nrblocks = (int)(this->chipsize >> this->bbt_erase_shift); |
@@ -1083,9 +1047,11 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) | |||
1083 | update = 1; | 1047 | update = 1; |
1084 | block += 2; | 1048 | block += 2; |
1085 | } | 1049 | } |
1086 | /* If we want reserved blocks to be recorded to flash, and some | 1050 | /* |
1087 | new ones have been marked, then we need to update the stored | 1051 | * If we want reserved blocks to be recorded to flash, and some |
1088 | bbts. This should only happen once. */ | 1052 | * new ones have been marked, then we need to update the stored |
1053 | * bbts. This should only happen once. | ||
1054 | */ | ||
1089 | if (update && td->reserved_block_code) | 1055 | if (update && td->reserved_block_code) |
1090 | nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1)); | 1056 | nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1)); |
1091 | } | 1057 | } |
@@ -1093,8 +1059,8 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) | |||
1093 | 1059 | ||
1094 | /** | 1060 | /** |
1095 | * verify_bbt_descr - verify the bad block description | 1061 | * verify_bbt_descr - verify the bad block description |
1096 | * @mtd: MTD device structure | 1062 | * @mtd: MTD device structure |
1097 | * @bd: the table to verify | 1063 | * @bd: the table to verify |
1098 | * | 1064 | * |
1099 | * This functions performs a few sanity checks on the bad block description | 1065 | * This functions performs a few sanity checks on the bad block description |
1100 | * table. | 1066 | * table. |
@@ -1112,16 +1078,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd) | |||
1112 | pattern_len = bd->len; | 1078 | pattern_len = bd->len; |
1113 | bits = bd->options & NAND_BBT_NRBITS_MSK; | 1079 | bits = bd->options & NAND_BBT_NRBITS_MSK; |
1114 | 1080 | ||
1115 | BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) && | 1081 | BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) && |
1116 | !(this->options & NAND_USE_FLASH_BBT)); | 1082 | !(this->bbt_options & NAND_BBT_USE_FLASH)); |
1117 | BUG_ON(!bits); | 1083 | BUG_ON(!bits); |
1118 | 1084 | ||
1119 | if (bd->options & NAND_BBT_VERSION) | 1085 | if (bd->options & NAND_BBT_VERSION) |
1120 | pattern_len++; | 1086 | pattern_len++; |
1121 | 1087 | ||
1122 | if (bd->options & NAND_BBT_NO_OOB) { | 1088 | if (bd->options & NAND_BBT_NO_OOB) { |
1123 | BUG_ON(!(this->options & NAND_USE_FLASH_BBT)); | 1089 | BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH)); |
1124 | BUG_ON(!(this->options & NAND_USE_FLASH_BBT_NO_OOB)); | 1090 | BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB)); |
1125 | BUG_ON(bd->offs); | 1091 | BUG_ON(bd->offs); |
1126 | if (bd->options & NAND_BBT_VERSION) | 1092 | if (bd->options & NAND_BBT_VERSION) |
1127 | BUG_ON(bd->veroffs != bd->len); | 1093 | BUG_ON(bd->veroffs != bd->len); |
@@ -1141,18 +1107,16 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd) | |||
1141 | 1107 | ||
1142 | /** | 1108 | /** |
1143 | * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s) | 1109 | * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s) |
1144 | * @mtd: MTD device structure | 1110 | * @mtd: MTD device structure |
1145 | * @bd: descriptor for the good/bad block search pattern | 1111 | * @bd: descriptor for the good/bad block search pattern |
1146 | * | ||
1147 | * The function checks, if a bad block table(s) is/are already | ||
1148 | * available. If not it scans the device for manufacturer | ||
1149 | * marked good / bad blocks and writes the bad block table(s) to | ||
1150 | * the selected place. | ||
1151 | * | 1112 | * |
1152 | * The bad block table memory is allocated here. It must be freed | 1113 | * The function checks, if a bad block table(s) is/are already available. If |
1153 | * by calling the nand_free_bbt function. | 1114 | * not it scans the device for manufacturer marked good / bad blocks and writes |
1115 | * the bad block table(s) to the selected place. | ||
1154 | * | 1116 | * |
1155 | */ | 1117 | * The bad block table memory is allocated here. It must be freed by calling |
1118 | * the nand_free_bbt function. | ||
1119 | */ | ||
1156 | int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) | 1120 | int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) |
1157 | { | 1121 | { |
1158 | struct nand_chip *this = mtd->priv; | 1122 | struct nand_chip *this = mtd->priv; |
@@ -1162,19 +1126,21 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) | |||
1162 | struct nand_bbt_descr *md = this->bbt_md; | 1126 | struct nand_bbt_descr *md = this->bbt_md; |
1163 | 1127 | ||
1164 | len = mtd->size >> (this->bbt_erase_shift + 2); | 1128 | len = mtd->size >> (this->bbt_erase_shift + 2); |
1165 | /* Allocate memory (2bit per block) and clear the memory bad block table */ | 1129 | /* |
1130 | * Allocate memory (2bit per block) and clear the memory bad block | ||
1131 | * table. | ||
1132 | */ | ||
1166 | this->bbt = kzalloc(len, GFP_KERNEL); | 1133 | this->bbt = kzalloc(len, GFP_KERNEL); |
1167 | if (!this->bbt) { | 1134 | if (!this->bbt) |
1168 | printk(KERN_ERR "nand_scan_bbt: Out of memory\n"); | ||
1169 | return -ENOMEM; | 1135 | return -ENOMEM; |
1170 | } | ||
1171 | 1136 | ||
1172 | /* If no primary table decriptor is given, scan the device | 1137 | /* |
1173 | * to build a memory based bad block table | 1138 | * If no primary table decriptor is given, scan the device to build a |
1139 | * memory based bad block table. | ||
1174 | */ | 1140 | */ |
1175 | if (!td) { | 1141 | if (!td) { |
1176 | if ((res = nand_memory_bbt(mtd, bd))) { | 1142 | if ((res = nand_memory_bbt(mtd, bd))) { |
1177 | printk(KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n"); | 1143 | pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n"); |
1178 | kfree(this->bbt); | 1144 | kfree(this->bbt); |
1179 | this->bbt = NULL; | 1145 | this->bbt = NULL; |
1180 | } | 1146 | } |
@@ -1188,13 +1154,12 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) | |||
1188 | len += (len >> this->page_shift) * mtd->oobsize; | 1154 | len += (len >> this->page_shift) * mtd->oobsize; |
1189 | buf = vmalloc(len); | 1155 | buf = vmalloc(len); |
1190 | if (!buf) { | 1156 | if (!buf) { |
1191 | printk(KERN_ERR "nand_bbt: Out of memory\n"); | ||
1192 | kfree(this->bbt); | 1157 | kfree(this->bbt); |
1193 | this->bbt = NULL; | 1158 | this->bbt = NULL; |
1194 | return -ENOMEM; | 1159 | return -ENOMEM; |
1195 | } | 1160 | } |
1196 | 1161 | ||
1197 | /* Is the bbt at a given page ? */ | 1162 | /* Is the bbt at a given page? */ |
1198 | if (td->options & NAND_BBT_ABSPAGE) { | 1163 | if (td->options & NAND_BBT_ABSPAGE) { |
1199 | res = read_abs_bbts(mtd, buf, td, md); | 1164 | res = read_abs_bbts(mtd, buf, td, md); |
1200 | } else { | 1165 | } else { |
@@ -1216,15 +1181,15 @@ int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) | |||
1216 | 1181 | ||
1217 | /** | 1182 | /** |
1218 | * nand_update_bbt - [NAND Interface] update bad block table(s) | 1183 | * nand_update_bbt - [NAND Interface] update bad block table(s) |
1219 | * @mtd: MTD device structure | 1184 | * @mtd: MTD device structure |
1220 | * @offs: the offset of the newly marked block | 1185 | * @offs: the offset of the newly marked block |
1221 | * | 1186 | * |
1222 | * The function updates the bad block table(s) | 1187 | * The function updates the bad block table(s). |
1223 | */ | 1188 | */ |
1224 | int nand_update_bbt(struct mtd_info *mtd, loff_t offs) | 1189 | int nand_update_bbt(struct mtd_info *mtd, loff_t offs) |
1225 | { | 1190 | { |
1226 | struct nand_chip *this = mtd->priv; | 1191 | struct nand_chip *this = mtd->priv; |
1227 | int len, res = 0, writeops = 0; | 1192 | int len, res = 0; |
1228 | int chip, chipsel; | 1193 | int chip, chipsel; |
1229 | uint8_t *buf; | 1194 | uint8_t *buf; |
1230 | struct nand_bbt_descr *td = this->bbt_td; | 1195 | struct nand_bbt_descr *td = this->bbt_td; |
@@ -1237,14 +1202,10 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs) | |||
1237 | len = (1 << this->bbt_erase_shift); | 1202 | len = (1 << this->bbt_erase_shift); |
1238 | len += (len >> this->page_shift) * mtd->oobsize; | 1203 | len += (len >> this->page_shift) * mtd->oobsize; |
1239 | buf = kmalloc(len, GFP_KERNEL); | 1204 | buf = kmalloc(len, GFP_KERNEL); |
1240 | if (!buf) { | 1205 | if (!buf) |
1241 | printk(KERN_ERR "nand_update_bbt: Out of memory\n"); | ||
1242 | return -ENOMEM; | 1206 | return -ENOMEM; |
1243 | } | ||
1244 | |||
1245 | writeops = md != NULL ? 0x03 : 0x01; | ||
1246 | 1207 | ||
1247 | /* Do we have a bbt per chip ? */ | 1208 | /* Do we have a bbt per chip? */ |
1248 | if (td->options & NAND_BBT_PERCHIP) { | 1209 | if (td->options & NAND_BBT_PERCHIP) { |
1249 | chip = (int)(offs >> this->chip_shift); | 1210 | chip = (int)(offs >> this->chip_shift); |
1250 | chipsel = chip; | 1211 | chipsel = chip; |
@@ -1257,14 +1218,14 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs) | |||
1257 | if (md) | 1218 | if (md) |
1258 | md->version[chip]++; | 1219 | md->version[chip]++; |
1259 | 1220 | ||
1260 | /* Write the bad block table to the device ? */ | 1221 | /* Write the bad block table to the device? */ |
1261 | if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { | 1222 | if (td->options & NAND_BBT_WRITE) { |
1262 | res = write_bbt(mtd, buf, td, md, chipsel); | 1223 | res = write_bbt(mtd, buf, td, md, chipsel); |
1263 | if (res < 0) | 1224 | if (res < 0) |
1264 | goto out; | 1225 | goto out; |
1265 | } | 1226 | } |
1266 | /* Write the mirror bad block table to the device ? */ | 1227 | /* Write the mirror bad block table to the device? */ |
1267 | if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { | 1228 | if (md && (md->options & NAND_BBT_WRITE)) { |
1268 | res = write_bbt(mtd, buf, md, td, chipsel); | 1229 | res = write_bbt(mtd, buf, md, td, chipsel); |
1269 | } | 1230 | } |
1270 | 1231 | ||
@@ -1273,8 +1234,10 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs) | |||
1273 | return res; | 1234 | return res; |
1274 | } | 1235 | } |
1275 | 1236 | ||
1276 | /* Define some generic bad / good block scan pattern which are used | 1237 | /* |
1277 | * while scanning a device for factory marked good / bad blocks. */ | 1238 | * Define some generic bad / good block scan pattern which are used |
1239 | * while scanning a device for factory marked good / bad blocks. | ||
1240 | */ | ||
1278 | static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; | 1241 | static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; |
1279 | 1242 | ||
1280 | static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; | 1243 | static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; |
@@ -1286,8 +1249,7 @@ static struct nand_bbt_descr agand_flashbased = { | |||
1286 | .pattern = scan_agand_pattern | 1249 | .pattern = scan_agand_pattern |
1287 | }; | 1250 | }; |
1288 | 1251 | ||
1289 | /* Generic flash bbt decriptors | 1252 | /* Generic flash bbt descriptors */ |
1290 | */ | ||
1291 | static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; | 1253 | static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; |
1292 | static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; | 1254 | static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; |
1293 | 1255 | ||
@@ -1331,31 +1293,27 @@ static struct nand_bbt_descr bbt_mirror_no_bbt_descr = { | |||
1331 | .pattern = mirror_pattern | 1293 | .pattern = mirror_pattern |
1332 | }; | 1294 | }; |
1333 | 1295 | ||
1334 | #define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \ | 1296 | #define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB) |
1335 | NAND_BBT_SCANBYTE1AND6) | ||
1336 | /** | 1297 | /** |
1337 | * nand_create_default_bbt_descr - [Internal] Creates a BBT descriptor structure | 1298 | * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure |
1338 | * @this: NAND chip to create descriptor for | 1299 | * @this: NAND chip to create descriptor for |
1339 | * | 1300 | * |
1340 | * This function allocates and initializes a nand_bbt_descr for BBM detection | 1301 | * This function allocates and initializes a nand_bbt_descr for BBM detection |
1341 | * based on the properties of "this". The new descriptor is stored in | 1302 | * based on the properties of @this. The new descriptor is stored in |
1342 | * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when | 1303 | * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when |
1343 | * passed to this function. | 1304 | * passed to this function. |
1344 | * | ||
1345 | */ | 1305 | */ |
1346 | static int nand_create_default_bbt_descr(struct nand_chip *this) | 1306 | static int nand_create_badblock_pattern(struct nand_chip *this) |
1347 | { | 1307 | { |
1348 | struct nand_bbt_descr *bd; | 1308 | struct nand_bbt_descr *bd; |
1349 | if (this->badblock_pattern) { | 1309 | if (this->badblock_pattern) { |
1350 | printk(KERN_WARNING "BBT descr already allocated; not replacing.\n"); | 1310 | pr_warn("Bad block pattern already allocated; not replacing\n"); |
1351 | return -EINVAL; | 1311 | return -EINVAL; |
1352 | } | 1312 | } |
1353 | bd = kzalloc(sizeof(*bd), GFP_KERNEL); | 1313 | bd = kzalloc(sizeof(*bd), GFP_KERNEL); |
1354 | if (!bd) { | 1314 | if (!bd) |
1355 | printk(KERN_ERR "nand_create_default_bbt_descr: Out of memory\n"); | ||
1356 | return -ENOMEM; | 1315 | return -ENOMEM; |
1357 | } | 1316 | bd->options = this->bbt_options & BADBLOCK_SCAN_MASK; |
1358 | bd->options = this->options & BBT_SCAN_OPTIONS; | ||
1359 | bd->offs = this->badblockpos; | 1317 | bd->offs = this->badblockpos; |
1360 | bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1; | 1318 | bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1; |
1361 | bd->pattern = scan_ff_pattern; | 1319 | bd->pattern = scan_ff_pattern; |
@@ -1366,22 +1324,20 @@ static int nand_create_default_bbt_descr(struct nand_chip *this) | |||
1366 | 1324 | ||
1367 | /** | 1325 | /** |
1368 | * nand_default_bbt - [NAND Interface] Select a default bad block table for the device | 1326 | * nand_default_bbt - [NAND Interface] Select a default bad block table for the device |
1369 | * @mtd: MTD device structure | 1327 | * @mtd: MTD device structure |
1370 | * | ||
1371 | * This function selects the default bad block table | ||
1372 | * support for the device and calls the nand_scan_bbt function | ||
1373 | * | 1328 | * |
1374 | */ | 1329 | * This function selects the default bad block table support for the device and |
1330 | * calls the nand_scan_bbt function. | ||
1331 | */ | ||
1375 | int nand_default_bbt(struct mtd_info *mtd) | 1332 | int nand_default_bbt(struct mtd_info *mtd) |
1376 | { | 1333 | { |
1377 | struct nand_chip *this = mtd->priv; | 1334 | struct nand_chip *this = mtd->priv; |
1378 | 1335 | ||
1379 | /* Default for AG-AND. We must use a flash based | 1336 | /* |
1380 | * bad block table as the devices have factory marked | 1337 | * Default for AG-AND. We must use a flash based bad block table as the |
1381 | * _good_ blocks. Erasing those blocks leads to loss | 1338 | * devices have factory marked _good_ blocks. Erasing those blocks |
1382 | * of the good / bad information, so we _must_ store | 1339 | * leads to loss of the good / bad information, so we _must_ store this |
1383 | * this information in a good / bad table during | 1340 | * information in a good / bad table during startup. |
1384 | * startup | ||
1385 | */ | 1341 | */ |
1386 | if (this->options & NAND_IS_AND) { | 1342 | if (this->options & NAND_IS_AND) { |
1387 | /* Use the default pattern descriptors */ | 1343 | /* Use the default pattern descriptors */ |
@@ -1389,15 +1345,15 @@ int nand_default_bbt(struct mtd_info *mtd) | |||
1389 | this->bbt_td = &bbt_main_descr; | 1345 | this->bbt_td = &bbt_main_descr; |
1390 | this->bbt_md = &bbt_mirror_descr; | 1346 | this->bbt_md = &bbt_mirror_descr; |
1391 | } | 1347 | } |
1392 | this->options |= NAND_USE_FLASH_BBT; | 1348 | this->bbt_options |= NAND_BBT_USE_FLASH; |
1393 | return nand_scan_bbt(mtd, &agand_flashbased); | 1349 | return nand_scan_bbt(mtd, &agand_flashbased); |
1394 | } | 1350 | } |
1395 | 1351 | ||
1396 | /* Is a flash based bad block table requested ? */ | 1352 | /* Is a flash based bad block table requested? */ |
1397 | if (this->options & NAND_USE_FLASH_BBT) { | 1353 | if (this->bbt_options & NAND_BBT_USE_FLASH) { |
1398 | /* Use the default pattern descriptors */ | 1354 | /* Use the default pattern descriptors */ |
1399 | if (!this->bbt_td) { | 1355 | if (!this->bbt_td) { |
1400 | if (this->options & NAND_USE_FLASH_BBT_NO_OOB) { | 1356 | if (this->bbt_options & NAND_BBT_NO_OOB) { |
1401 | this->bbt_td = &bbt_main_no_bbt_descr; | 1357 | this->bbt_td = &bbt_main_no_bbt_descr; |
1402 | this->bbt_md = &bbt_mirror_no_bbt_descr; | 1358 | this->bbt_md = &bbt_mirror_no_bbt_descr; |
1403 | } else { | 1359 | } else { |
@@ -1411,18 +1367,17 @@ int nand_default_bbt(struct mtd_info *mtd) | |||
1411 | } | 1367 | } |
1412 | 1368 | ||
1413 | if (!this->badblock_pattern) | 1369 | if (!this->badblock_pattern) |
1414 | nand_create_default_bbt_descr(this); | 1370 | nand_create_badblock_pattern(this); |
1415 | 1371 | ||
1416 | return nand_scan_bbt(mtd, this->badblock_pattern); | 1372 | return nand_scan_bbt(mtd, this->badblock_pattern); |
1417 | } | 1373 | } |
1418 | 1374 | ||
1419 | /** | 1375 | /** |
1420 | * nand_isbad_bbt - [NAND Interface] Check if a block is bad | 1376 | * nand_isbad_bbt - [NAND Interface] Check if a block is bad |
1421 | * @mtd: MTD device structure | 1377 | * @mtd: MTD device structure |
1422 | * @offs: offset in the device | 1378 | * @offs: offset in the device |
1423 | * @allowbbt: allow access to bad block table region | 1379 | * @allowbbt: allow access to bad block table region |
1424 | * | 1380 | */ |
1425 | */ | ||
1426 | int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) | 1381 | int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) |
1427 | { | 1382 | { |
1428 | struct nand_chip *this = mtd->priv; | 1383 | struct nand_chip *this = mtd->priv; |
@@ -1433,8 +1388,9 @@ int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) | |||
1433 | block = (int)(offs >> (this->bbt_erase_shift - 1)); | 1388 | block = (int)(offs >> (this->bbt_erase_shift - 1)); |
1434 | res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; | 1389 | res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; |
1435 | 1390 | ||
1436 | DEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n", | 1391 | pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: " |
1437 | (unsigned int)offs, block >> 1, res); | 1392 | "(block %d) 0x%02x\n", |
1393 | (unsigned int)offs, block >> 1, res); | ||
1438 | 1394 | ||
1439 | switch ((int)res) { | 1395 | switch ((int)res) { |
1440 | case 0x00: | 1396 | case 0x00: |
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c index 0f931e757116..3803e0bba23b 100644 --- a/drivers/mtd/nand/nand_bch.c +++ b/drivers/mtd/nand/nand_bch.c | |||
@@ -93,8 +93,8 @@ int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, | |||
93 | buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); | 93 | buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); |
94 | /* else error in ecc, no action needed */ | 94 | /* else error in ecc, no action needed */ |
95 | 95 | ||
96 | DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n", | 96 | pr_debug("%s: corrected bitflip %u\n", __func__, |
97 | __func__, errloc[i]); | 97 | errloc[i]); |
98 | } | 98 | } |
99 | } else if (count < 0) { | 99 | } else if (count < 0) { |
100 | printk(KERN_ERR "ecc unrecoverable error\n"); | 100 | printk(KERN_ERR "ecc unrecoverable error\n"); |
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c index 271b8e735e8f..b7cfe0d37121 100644 --- a/drivers/mtd/nand/nand_ecc.c +++ b/drivers/mtd/nand/nand_ecc.c | |||
@@ -110,7 +110,7 @@ static const char bitsperbyte[256] = { | |||
110 | 110 | ||
111 | /* | 111 | /* |
112 | * addressbits is a lookup table to filter out the bits from the xor-ed | 112 | * addressbits is a lookup table to filter out the bits from the xor-ed |
113 | * ecc data that identify the faulty location. | 113 | * ECC data that identify the faulty location. |
114 | * this is only used for repairing parity | 114 | * this is only used for repairing parity |
115 | * see the comments in nand_correct_data for more details | 115 | * see the comments in nand_correct_data for more details |
116 | */ | 116 | */ |
@@ -153,7 +153,7 @@ static const char addressbits[256] = { | |||
153 | * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte | 153 | * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte |
154 | * block | 154 | * block |
155 | * @buf: input buffer with raw data | 155 | * @buf: input buffer with raw data |
156 | * @eccsize: data bytes per ecc step (256 or 512) | 156 | * @eccsize: data bytes per ECC step (256 or 512) |
157 | * @code: output buffer with ECC | 157 | * @code: output buffer with ECC |
158 | */ | 158 | */ |
159 | void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, | 159 | void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, |
@@ -348,7 +348,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, | |||
348 | rp17 = (par ^ rp16) & 0xff; | 348 | rp17 = (par ^ rp16) & 0xff; |
349 | 349 | ||
350 | /* | 350 | /* |
351 | * Finally calculate the ecc bits. | 351 | * Finally calculate the ECC bits. |
352 | * Again here it might seem that there are performance optimisations | 352 | * Again here it might seem that there are performance optimisations |
353 | * possible, but benchmarks showed that on the system this is developed | 353 | * possible, but benchmarks showed that on the system this is developed |
354 | * the code below is the fastest | 354 | * the code below is the fastest |
@@ -436,7 +436,7 @@ EXPORT_SYMBOL(nand_calculate_ecc); | |||
436 | * @buf: raw data read from the chip | 436 | * @buf: raw data read from the chip |
437 | * @read_ecc: ECC from the chip | 437 | * @read_ecc: ECC from the chip |
438 | * @calc_ecc: the ECC calculated from raw data | 438 | * @calc_ecc: the ECC calculated from raw data |
439 | * @eccsize: data bytes per ecc step (256 or 512) | 439 | * @eccsize: data bytes per ECC step (256 or 512) |
440 | * | 440 | * |
441 | * Detect and correct a 1 bit error for eccsize byte block | 441 | * Detect and correct a 1 bit error for eccsize byte block |
442 | */ | 442 | */ |
@@ -505,7 +505,7 @@ int __nand_correct_data(unsigned char *buf, | |||
505 | } | 505 | } |
506 | /* count nr of bits; use table lookup, faster than calculating it */ | 506 | /* count nr of bits; use table lookup, faster than calculating it */ |
507 | if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) | 507 | if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1) |
508 | return 1; /* error in ecc data; no action needed */ | 508 | return 1; /* error in ECC data; no action needed */ |
509 | 509 | ||
510 | printk(KERN_ERR "uncorrectable error : "); | 510 | printk(KERN_ERR "uncorrectable error : "); |
511 | return -1; | 511 | return -1; |
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 357e8c5252a8..34c03be77301 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c | |||
@@ -2273,9 +2273,9 @@ static int __init ns_init_module(void) | |||
2273 | 2273 | ||
2274 | switch (bbt) { | 2274 | switch (bbt) { |
2275 | case 2: | 2275 | case 2: |
2276 | chip->options |= NAND_USE_FLASH_BBT_NO_OOB; | 2276 | chip->bbt_options |= NAND_BBT_NO_OOB; |
2277 | case 1: | 2277 | case 1: |
2278 | chip->options |= NAND_USE_FLASH_BBT; | 2278 | chip->bbt_options |= NAND_BBT_USE_FLASH; |
2279 | case 0: | 2279 | case 0: |
2280 | break; | 2280 | break; |
2281 | default: | 2281 | default: |
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index ea2dea8a9c88..ee1713907b92 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c | |||
@@ -42,7 +42,6 @@ struct ndfc_controller { | |||
42 | struct nand_chip chip; | 42 | struct nand_chip chip; |
43 | int chip_select; | 43 | int chip_select; |
44 | struct nand_hw_control ndfc_control; | 44 | struct nand_hw_control ndfc_control; |
45 | struct mtd_partition *parts; | ||
46 | }; | 45 | }; |
47 | 46 | ||
48 | static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS]; | 47 | static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS]; |
@@ -159,13 +158,9 @@ static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | |||
159 | static int ndfc_chip_init(struct ndfc_controller *ndfc, | 158 | static int ndfc_chip_init(struct ndfc_controller *ndfc, |
160 | struct device_node *node) | 159 | struct device_node *node) |
161 | { | 160 | { |
162 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
163 | static const char *part_types[] = { "cmdlinepart", NULL }; | ||
164 | #else | ||
165 | static const char *part_types[] = { NULL }; | ||
166 | #endif | ||
167 | struct device_node *flash_np; | 161 | struct device_node *flash_np; |
168 | struct nand_chip *chip = &ndfc->chip; | 162 | struct nand_chip *chip = &ndfc->chip; |
163 | struct mtd_part_parser_data ppdata; | ||
169 | int ret; | 164 | int ret; |
170 | 165 | ||
171 | chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; | 166 | chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; |
@@ -193,6 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc, | |||
193 | if (!flash_np) | 188 | if (!flash_np) |
194 | return -ENODEV; | 189 | return -ENODEV; |
195 | 190 | ||
191 | ppdata->of_node = flash_np; | ||
196 | ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s", | 192 | ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s", |
197 | dev_name(&ndfc->ofdev->dev), flash_np->name); | 193 | dev_name(&ndfc->ofdev->dev), flash_np->name); |
198 | if (!ndfc->mtd.name) { | 194 | if (!ndfc->mtd.name) { |
@@ -204,18 +200,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc, | |||
204 | if (ret) | 200 | if (ret) |
205 | goto err; | 201 | goto err; |
206 | 202 | ||
207 | ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0); | 203 | ret = mtd_device_parse_register(&ndfc->mtd, NULL, &ppdata, NULL, 0); |
208 | if (ret < 0) | ||
209 | goto err; | ||
210 | |||
211 | if (ret == 0) { | ||
212 | ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np, | ||
213 | &ndfc->parts); | ||
214 | if (ret < 0) | ||
215 | goto err; | ||
216 | } | ||
217 | |||
218 | ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret); | ||
219 | 204 | ||
220 | err: | 205 | err: |
221 | of_node_put(flash_np); | 206 | of_node_put(flash_np); |
@@ -288,6 +273,7 @@ static int __devexit ndfc_remove(struct platform_device *ofdev) | |||
288 | struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); | 273 | struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); |
289 | 274 | ||
290 | nand_release(&ndfc->mtd); | 275 | nand_release(&ndfc->mtd); |
276 | kfree(ndfc->mtd.name); | ||
291 | 277 | ||
292 | return 0; | 278 | return 0; |
293 | } | 279 | } |
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c index b6a5c86ab31e..b463ecfb4c1a 100644 --- a/drivers/mtd/nand/nomadik_nand.c +++ b/drivers/mtd/nand/nomadik_nand.c | |||
@@ -187,6 +187,7 @@ static int nomadik_nand_remove(struct platform_device *pdev) | |||
187 | pdata->exit(); | 187 | pdata->exit(); |
188 | 188 | ||
189 | if (host) { | 189 | if (host) { |
190 | nand_release(&host->mtd); | ||
190 | iounmap(host->cmd_va); | 191 | iounmap(host->cmd_va); |
191 | iounmap(host->data_va); | 192 | iounmap(host->data_va); |
192 | iounmap(host->addr_va); | 193 | iounmap(host->addr_va); |
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c index 9c30a0b03171..fa8faedfad6e 100644 --- a/drivers/mtd/nand/nuc900_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c | |||
@@ -339,6 +339,7 @@ static int __devexit nuc900_nand_remove(struct platform_device *pdev) | |||
339 | struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); | 339 | struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev); |
340 | struct resource *res; | 340 | struct resource *res; |
341 | 341 | ||
342 | nand_release(&nuc900_nand->mtd); | ||
342 | iounmap(nuc900_nand->reg); | 343 | iounmap(nuc900_nand->reg); |
343 | 344 | ||
344 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 345 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index ec22a5aab038..f745f00f3167 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -95,8 +95,6 @@ | |||
95 | #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) | 95 | #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) |
96 | #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) | 96 | #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) |
97 | 97 | ||
98 | static const char *part_probes[] = { "cmdlinepart", NULL }; | ||
99 | |||
100 | /* oob info generated runtime depending on ecc algorithm and layout selected */ | 98 | /* oob info generated runtime depending on ecc algorithm and layout selected */ |
101 | static struct nand_ecclayout omap_oobinfo; | 99 | static struct nand_ecclayout omap_oobinfo; |
102 | /* Define some generic bad / good block scan pattern which are used | 100 | /* Define some generic bad / good block scan pattern which are used |
@@ -115,7 +113,6 @@ struct omap_nand_info { | |||
115 | struct nand_hw_control controller; | 113 | struct nand_hw_control controller; |
116 | struct omap_nand_platform_data *pdata; | 114 | struct omap_nand_platform_data *pdata; |
117 | struct mtd_info mtd; | 115 | struct mtd_info mtd; |
118 | struct mtd_partition *parts; | ||
119 | struct nand_chip nand; | 116 | struct nand_chip nand; |
120 | struct platform_device *pdev; | 117 | struct platform_device *pdev; |
121 | 118 | ||
@@ -745,12 +742,12 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | |||
745 | 742 | ||
746 | case 1: | 743 | case 1: |
747 | /* Uncorrectable error */ | 744 | /* Uncorrectable error */ |
748 | DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n"); | 745 | pr_debug("ECC UNCORRECTED_ERROR 1\n"); |
749 | return -1; | 746 | return -1; |
750 | 747 | ||
751 | case 11: | 748 | case 11: |
752 | /* UN-Correctable error */ | 749 | /* UN-Correctable error */ |
753 | DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n"); | 750 | pr_debug("ECC UNCORRECTED_ERROR B\n"); |
754 | return -1; | 751 | return -1; |
755 | 752 | ||
756 | case 12: | 753 | case 12: |
@@ -767,8 +764,8 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | |||
767 | 764 | ||
768 | find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1]; | 765 | find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1]; |
769 | 766 | ||
770 | DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at " | 767 | pr_debug("Correcting single bit ECC error at offset: " |
771 | "offset: %d, bit: %d\n", find_byte, find_bit); | 768 | "%d, bit: %d\n", find_byte, find_bit); |
772 | 769 | ||
773 | page_data[find_byte] ^= (1 << find_bit); | 770 | page_data[find_byte] ^= (1 << find_bit); |
774 | 771 | ||
@@ -780,7 +777,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | |||
780 | ecc_data2[2] == 0) | 777 | ecc_data2[2] == 0) |
781 | return 0; | 778 | return 0; |
782 | } | 779 | } |
783 | DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n"); | 780 | pr_debug("UNCORRECTED_ERROR default\n"); |
784 | return -1; | 781 | return -1; |
785 | } | 782 | } |
786 | } | 783 | } |
@@ -1104,13 +1101,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1104 | goto out_release_mem_region; | 1101 | goto out_release_mem_region; |
1105 | } | 1102 | } |
1106 | 1103 | ||
1107 | err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); | 1104 | mtd_device_parse_register(&info->mtd, NULL, 0, |
1108 | if (err > 0) | 1105 | pdata->parts, pdata->nr_parts); |
1109 | mtd_device_register(&info->mtd, info->parts, err); | ||
1110 | else if (pdata->parts) | ||
1111 | mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts); | ||
1112 | else | ||
1113 | mtd_device_register(&info->mtd, NULL, 0); | ||
1114 | 1106 | ||
1115 | platform_set_drvdata(pdev, &info->mtd); | 1107 | platform_set_drvdata(pdev, &info->mtd); |
1116 | 1108 | ||
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 7794d0680f91..29f505adaf84 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
@@ -21,8 +21,6 @@ | |||
21 | #include <mach/hardware.h> | 21 | #include <mach/hardware.h> |
22 | #include <plat/orion_nand.h> | 22 | #include <plat/orion_nand.h> |
23 | 23 | ||
24 | static const char *part_probes[] = { "cmdlinepart", NULL }; | ||
25 | |||
26 | static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) | 24 | static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) |
27 | { | 25 | { |
28 | struct nand_chip *nc = mtd->priv; | 26 | struct nand_chip *nc = mtd->priv; |
@@ -81,8 +79,6 @@ static int __init orion_nand_probe(struct platform_device *pdev) | |||
81 | struct resource *res; | 79 | struct resource *res; |
82 | void __iomem *io_base; | 80 | void __iomem *io_base; |
83 | int ret = 0; | 81 | int ret = 0; |
84 | struct mtd_partition *partitions = NULL; | ||
85 | int num_part = 0; | ||
86 | 82 | ||
87 | nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL); | 83 | nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL); |
88 | if (!nc) { | 84 | if (!nc) { |
@@ -132,17 +128,9 @@ static int __init orion_nand_probe(struct platform_device *pdev) | |||
132 | goto no_dev; | 128 | goto no_dev; |
133 | } | 129 | } |
134 | 130 | ||
135 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
136 | mtd->name = "orion_nand"; | 131 | mtd->name = "orion_nand"; |
137 | num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0); | 132 | ret = mtd_device_parse_register(mtd, NULL, 0, |
138 | #endif | 133 | board->parts, board->nr_parts); |
139 | /* If cmdline partitions have been passed, let them be used */ | ||
140 | if (num_part <= 0) { | ||
141 | num_part = board->nr_parts; | ||
142 | partitions = board->parts; | ||
143 | } | ||
144 | |||
145 | ret = mtd_device_register(mtd, partitions, num_part); | ||
146 | if (ret) { | 134 | if (ret) { |
147 | nand_release(mtd); | 135 | nand_release(mtd); |
148 | goto no_dev; | 136 | goto no_dev; |
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c index b1aa41b8a4eb..a97264ececdb 100644 --- a/drivers/mtd/nand/pasemi_nand.c +++ b/drivers/mtd/nand/pasemi_nand.c | |||
@@ -155,7 +155,8 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev) | |||
155 | chip->ecc.mode = NAND_ECC_SOFT; | 155 | chip->ecc.mode = NAND_ECC_SOFT; |
156 | 156 | ||
157 | /* Enable the following for a flash based bad block table */ | 157 | /* Enable the following for a flash based bad block table */ |
158 | chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR; | 158 | chip->options = NAND_NO_AUTOINCR; |
159 | chip->bbt_options = NAND_BBT_USE_FLASH; | ||
159 | 160 | ||
160 | /* Scan to find existence of the device */ | 161 | /* Scan to find existence of the device */ |
161 | if (nand_scan(pasemi_nand_mtd, 1)) { | 162 | if (nand_scan(pasemi_nand_mtd, 1)) { |
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index 633c04bf76f6..ea8e1234e0e2 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c | |||
@@ -21,8 +21,6 @@ struct plat_nand_data { | |||
21 | struct nand_chip chip; | 21 | struct nand_chip chip; |
22 | struct mtd_info mtd; | 22 | struct mtd_info mtd; |
23 | void __iomem *io_base; | 23 | void __iomem *io_base; |
24 | int nr_parts; | ||
25 | struct mtd_partition *parts; | ||
26 | }; | 24 | }; |
27 | 25 | ||
28 | /* | 26 | /* |
@@ -79,6 +77,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) | |||
79 | data->chip.read_buf = pdata->ctrl.read_buf; | 77 | data->chip.read_buf = pdata->ctrl.read_buf; |
80 | data->chip.chip_delay = pdata->chip.chip_delay; | 78 | data->chip.chip_delay = pdata->chip.chip_delay; |
81 | data->chip.options |= pdata->chip.options; | 79 | data->chip.options |= pdata->chip.options; |
80 | data->chip.bbt_options |= pdata->chip.bbt_options; | ||
82 | 81 | ||
83 | data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; | 82 | data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; |
84 | data->chip.ecc.layout = pdata->chip.ecclayout; | 83 | data->chip.ecc.layout = pdata->chip.ecclayout; |
@@ -99,23 +98,9 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) | |||
99 | goto out; | 98 | goto out; |
100 | } | 99 | } |
101 | 100 | ||
102 | if (pdata->chip.part_probe_types) { | 101 | err = mtd_device_parse_register(&data->mtd, |
103 | err = parse_mtd_partitions(&data->mtd, | 102 | pdata->chip.part_probe_types, 0, |
104 | pdata->chip.part_probe_types, | 103 | pdata->chip.partitions, pdata->chip.nr_partitions); |
105 | &data->parts, 0); | ||
106 | if (err > 0) { | ||
107 | mtd_device_register(&data->mtd, data->parts, err); | ||
108 | return 0; | ||
109 | } | ||
110 | } | ||
111 | if (pdata->chip.set_parts) | ||
112 | pdata->chip.set_parts(data->mtd.size, &pdata->chip); | ||
113 | if (pdata->chip.partitions) { | ||
114 | data->parts = pdata->chip.partitions; | ||
115 | err = mtd_device_register(&data->mtd, data->parts, | ||
116 | pdata->chip.nr_partitions); | ||
117 | } else | ||
118 | err = mtd_device_register(&data->mtd, NULL, 0); | ||
119 | 104 | ||
120 | if (!err) | 105 | if (!err) |
121 | return err; | 106 | return err; |
@@ -145,8 +130,6 @@ static int __devexit plat_nand_remove(struct platform_device *pdev) | |||
145 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 130 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
146 | 131 | ||
147 | nand_release(&data->mtd); | 132 | nand_release(&data->mtd); |
148 | if (data->parts && data->parts != pdata->chip.partitions) | ||
149 | kfree(data->parts); | ||
150 | if (pdata->ctrl.remove) | 133 | if (pdata->ctrl.remove) |
151 | pdata->ctrl.remove(pdev); | 134 | pdata->ctrl.remove(pdev); |
152 | iounmap(data->io_base); | 135 | iounmap(data->io_base); |
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c index 3bbb796b451c..7e52af51a198 100644 --- a/drivers/mtd/nand/ppchameleonevb.c +++ b/drivers/mtd/nand/ppchameleonevb.c | |||
@@ -99,8 +99,6 @@ static struct mtd_partition partition_info_evb[] = { | |||
99 | 99 | ||
100 | #define NUM_PARTITIONS 1 | 100 | #define NUM_PARTITIONS 1 |
101 | 101 | ||
102 | extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id); | ||
103 | |||
104 | /* | 102 | /* |
105 | * hardware specific access to control-lines | 103 | * hardware specific access to control-lines |
106 | */ | 104 | */ |
@@ -187,18 +185,12 @@ static int ppchameleonevb_device_ready(struct mtd_info *minfo) | |||
187 | } | 185 | } |
188 | #endif | 186 | #endif |
189 | 187 | ||
190 | const char *part_probes[] = { "cmdlinepart", NULL }; | ||
191 | const char *part_probes_evb[] = { "cmdlinepart", NULL }; | ||
192 | |||
193 | /* | 188 | /* |
194 | * Main initialization routine | 189 | * Main initialization routine |
195 | */ | 190 | */ |
196 | static int __init ppchameleonevb_init(void) | 191 | static int __init ppchameleonevb_init(void) |
197 | { | 192 | { |
198 | struct nand_chip *this; | 193 | struct nand_chip *this; |
199 | const char *part_type = 0; | ||
200 | int mtd_parts_nb = 0; | ||
201 | struct mtd_partition *mtd_parts = 0; | ||
202 | void __iomem *ppchameleon_fio_base; | 194 | void __iomem *ppchameleon_fio_base; |
203 | void __iomem *ppchameleonevb_fio_base; | 195 | void __iomem *ppchameleonevb_fio_base; |
204 | 196 | ||
@@ -281,24 +273,13 @@ static int __init ppchameleonevb_init(void) | |||
281 | #endif | 273 | #endif |
282 | 274 | ||
283 | ppchameleon_mtd->name = "ppchameleon-nand"; | 275 | ppchameleon_mtd->name = "ppchameleon-nand"; |
284 | mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0); | ||
285 | if (mtd_parts_nb > 0) | ||
286 | part_type = "command line"; | ||
287 | else | ||
288 | mtd_parts_nb = 0; | ||
289 | |||
290 | if (mtd_parts_nb == 0) { | ||
291 | if (ppchameleon_mtd->size == NAND_SMALL_SIZE) | ||
292 | mtd_parts = partition_info_me; | ||
293 | else | ||
294 | mtd_parts = partition_info_hi; | ||
295 | mtd_parts_nb = NUM_PARTITIONS; | ||
296 | part_type = "static"; | ||
297 | } | ||
298 | 276 | ||
299 | /* Register the partitions */ | 277 | /* Register the partitions */ |
300 | printk(KERN_NOTICE "Using %s partition definition\n", part_type); | 278 | mtd_device_parse_register(ppchameleon_mtd, NULL, 0, |
301 | mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb); | 279 | ppchameleon_mtd->size == NAND_SMALL_SIZE ? |
280 | partition_info_me : | ||
281 | partition_info_hi, | ||
282 | NUM_PARTITIONS); | ||
302 | 283 | ||
303 | nand_evb_init: | 284 | nand_evb_init: |
304 | /**************************** | 285 | /**************************** |
@@ -382,21 +363,13 @@ static int __init ppchameleonevb_init(void) | |||
382 | } | 363 | } |
383 | 364 | ||
384 | ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; | 365 | ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; |
385 | mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0); | ||
386 | if (mtd_parts_nb > 0) | ||
387 | part_type = "command line"; | ||
388 | else | ||
389 | mtd_parts_nb = 0; | ||
390 | |||
391 | if (mtd_parts_nb == 0) { | ||
392 | mtd_parts = partition_info_evb; | ||
393 | mtd_parts_nb = NUM_PARTITIONS; | ||
394 | part_type = "static"; | ||
395 | } | ||
396 | 366 | ||
397 | /* Register the partitions */ | 367 | /* Register the partitions */ |
398 | printk(KERN_NOTICE "Using %s partition definition\n", part_type); | 368 | mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0, |
399 | mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb); | 369 | ppchameleon_mtd->size == NAND_SMALL_SIZE ? |
370 | partition_info_me : | ||
371 | partition_info_hi, | ||
372 | NUM_PARTITIONS); | ||
400 | 373 | ||
401 | /* Return happy */ | 374 | /* Return happy */ |
402 | return 0; | 375 | return 0; |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 1fb3b3a80581..9eb7f879969e 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -110,6 +110,7 @@ enum { | |||
110 | 110 | ||
111 | enum { | 111 | enum { |
112 | STATE_IDLE = 0, | 112 | STATE_IDLE = 0, |
113 | STATE_PREPARED, | ||
113 | STATE_CMD_HANDLE, | 114 | STATE_CMD_HANDLE, |
114 | STATE_DMA_READING, | 115 | STATE_DMA_READING, |
115 | STATE_DMA_WRITING, | 116 | STATE_DMA_WRITING, |
@@ -120,21 +121,40 @@ enum { | |||
120 | STATE_READY, | 121 | STATE_READY, |
121 | }; | 122 | }; |
122 | 123 | ||
123 | struct pxa3xx_nand_info { | 124 | struct pxa3xx_nand_host { |
124 | struct nand_chip nand_chip; | 125 | struct nand_chip chip; |
126 | struct pxa3xx_nand_cmdset *cmdset; | ||
127 | struct mtd_info *mtd; | ||
128 | void *info_data; | ||
129 | |||
130 | /* page size of attached chip */ | ||
131 | unsigned int page_size; | ||
132 | int use_ecc; | ||
133 | int cs; | ||
125 | 134 | ||
135 | /* calculated from pxa3xx_nand_flash data */ | ||
136 | unsigned int col_addr_cycles; | ||
137 | unsigned int row_addr_cycles; | ||
138 | size_t read_id_bytes; | ||
139 | |||
140 | /* cached register value */ | ||
141 | uint32_t reg_ndcr; | ||
142 | uint32_t ndtr0cs0; | ||
143 | uint32_t ndtr1cs0; | ||
144 | }; | ||
145 | |||
146 | struct pxa3xx_nand_info { | ||
126 | struct nand_hw_control controller; | 147 | struct nand_hw_control controller; |
127 | struct platform_device *pdev; | 148 | struct platform_device *pdev; |
128 | struct pxa3xx_nand_cmdset *cmdset; | ||
129 | 149 | ||
130 | struct clk *clk; | 150 | struct clk *clk; |
131 | void __iomem *mmio_base; | 151 | void __iomem *mmio_base; |
132 | unsigned long mmio_phys; | 152 | unsigned long mmio_phys; |
153 | struct completion cmd_complete; | ||
133 | 154 | ||
134 | unsigned int buf_start; | 155 | unsigned int buf_start; |
135 | unsigned int buf_count; | 156 | unsigned int buf_count; |
136 | 157 | ||
137 | struct mtd_info *mtd; | ||
138 | /* DMA information */ | 158 | /* DMA information */ |
139 | int drcmr_dat; | 159 | int drcmr_dat; |
140 | int drcmr_cmd; | 160 | int drcmr_cmd; |
@@ -142,44 +162,27 @@ struct pxa3xx_nand_info { | |||
142 | unsigned char *data_buff; | 162 | unsigned char *data_buff; |
143 | unsigned char *oob_buff; | 163 | unsigned char *oob_buff; |
144 | dma_addr_t data_buff_phys; | 164 | dma_addr_t data_buff_phys; |
145 | size_t data_buff_size; | ||
146 | int data_dma_ch; | 165 | int data_dma_ch; |
147 | struct pxa_dma_desc *data_desc; | 166 | struct pxa_dma_desc *data_desc; |
148 | dma_addr_t data_desc_addr; | 167 | dma_addr_t data_desc_addr; |
149 | 168 | ||
150 | uint32_t reg_ndcr; | 169 | struct pxa3xx_nand_host *host[NUM_CHIP_SELECT]; |
151 | |||
152 | /* saved column/page_addr during CMD_SEQIN */ | ||
153 | int seqin_column; | ||
154 | int seqin_page_addr; | ||
155 | |||
156 | /* relate to the command */ | ||
157 | unsigned int state; | 170 | unsigned int state; |
158 | 171 | ||
172 | int cs; | ||
159 | int use_ecc; /* use HW ECC ? */ | 173 | int use_ecc; /* use HW ECC ? */ |
160 | int use_dma; /* use DMA ? */ | 174 | int use_dma; /* use DMA ? */ |
161 | int is_ready; | 175 | int is_ready; |
162 | 176 | ||
163 | unsigned int page_size; /* page size of attached chip */ | 177 | unsigned int page_size; /* page size of attached chip */ |
164 | unsigned int data_size; /* data size in FIFO */ | 178 | unsigned int data_size; /* data size in FIFO */ |
179 | unsigned int oob_size; | ||
165 | int retcode; | 180 | int retcode; |
166 | struct completion cmd_complete; | ||
167 | 181 | ||
168 | /* generated NDCBx register values */ | 182 | /* generated NDCBx register values */ |
169 | uint32_t ndcb0; | 183 | uint32_t ndcb0; |
170 | uint32_t ndcb1; | 184 | uint32_t ndcb1; |
171 | uint32_t ndcb2; | 185 | uint32_t ndcb2; |
172 | |||
173 | /* timing calcuted from setting */ | ||
174 | uint32_t ndtr0cs0; | ||
175 | uint32_t ndtr1cs0; | ||
176 | |||
177 | /* calculated from pxa3xx_nand_flash data */ | ||
178 | size_t oob_size; | ||
179 | size_t read_id_bytes; | ||
180 | |||
181 | unsigned int col_addr_cycles; | ||
182 | unsigned int row_addr_cycles; | ||
183 | }; | 186 | }; |
184 | 187 | ||
185 | static int use_dma = 1; | 188 | static int use_dma = 1; |
@@ -225,7 +228,7 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = { | |||
225 | /* Define a default flash type setting serve as flash detecting only */ | 228 | /* Define a default flash type setting serve as flash detecting only */ |
226 | #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) | 229 | #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) |
227 | 230 | ||
228 | const char *mtd_names[] = {"pxa3xx_nand-0", NULL}; | 231 | const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL}; |
229 | 232 | ||
230 | #define NDTR0_tCH(c) (min((c), 7) << 19) | 233 | #define NDTR0_tCH(c) (min((c), 7) << 19) |
231 | #define NDTR0_tCS(c) (min((c), 7) << 16) | 234 | #define NDTR0_tCS(c) (min((c), 7) << 16) |
@@ -241,9 +244,10 @@ const char *mtd_names[] = {"pxa3xx_nand-0", NULL}; | |||
241 | /* convert nano-seconds to nand flash controller clock cycles */ | 244 | /* convert nano-seconds to nand flash controller clock cycles */ |
242 | #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) | 245 | #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) |
243 | 246 | ||
244 | static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, | 247 | static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host, |
245 | const struct pxa3xx_nand_timing *t) | 248 | const struct pxa3xx_nand_timing *t) |
246 | { | 249 | { |
250 | struct pxa3xx_nand_info *info = host->info_data; | ||
247 | unsigned long nand_clk = clk_get_rate(info->clk); | 251 | unsigned long nand_clk = clk_get_rate(info->clk); |
248 | uint32_t ndtr0, ndtr1; | 252 | uint32_t ndtr0, ndtr1; |
249 | 253 | ||
@@ -258,23 +262,24 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, | |||
258 | NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) | | 262 | NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) | |
259 | NDTR1_tAR(ns2cycle(t->tAR, nand_clk)); | 263 | NDTR1_tAR(ns2cycle(t->tAR, nand_clk)); |
260 | 264 | ||
261 | info->ndtr0cs0 = ndtr0; | 265 | host->ndtr0cs0 = ndtr0; |
262 | info->ndtr1cs0 = ndtr1; | 266 | host->ndtr1cs0 = ndtr1; |
263 | nand_writel(info, NDTR0CS0, ndtr0); | 267 | nand_writel(info, NDTR0CS0, ndtr0); |
264 | nand_writel(info, NDTR1CS0, ndtr1); | 268 | nand_writel(info, NDTR1CS0, ndtr1); |
265 | } | 269 | } |
266 | 270 | ||
267 | static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) | 271 | static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) |
268 | { | 272 | { |
269 | int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; | 273 | struct pxa3xx_nand_host *host = info->host[info->cs]; |
274 | int oob_enable = host->reg_ndcr & NDCR_SPARE_EN; | ||
270 | 275 | ||
271 | info->data_size = info->page_size; | 276 | info->data_size = host->page_size; |
272 | if (!oob_enable) { | 277 | if (!oob_enable) { |
273 | info->oob_size = 0; | 278 | info->oob_size = 0; |
274 | return; | 279 | return; |
275 | } | 280 | } |
276 | 281 | ||
277 | switch (info->page_size) { | 282 | switch (host->page_size) { |
278 | case 2048: | 283 | case 2048: |
279 | info->oob_size = (info->use_ecc) ? 40 : 64; | 284 | info->oob_size = (info->use_ecc) ? 40 : 64; |
280 | break; | 285 | break; |
@@ -292,9 +297,10 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) | |||
292 | */ | 297 | */ |
293 | static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) | 298 | static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) |
294 | { | 299 | { |
300 | struct pxa3xx_nand_host *host = info->host[info->cs]; | ||
295 | uint32_t ndcr; | 301 | uint32_t ndcr; |
296 | 302 | ||
297 | ndcr = info->reg_ndcr; | 303 | ndcr = host->reg_ndcr; |
298 | ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; | 304 | ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; |
299 | ndcr |= info->use_dma ? NDCR_DMA_EN : 0; | 305 | ndcr |= info->use_dma ? NDCR_DMA_EN : 0; |
300 | ndcr |= NDCR_ND_RUN; | 306 | ndcr |= NDCR_ND_RUN; |
@@ -359,7 +365,7 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) | |||
359 | DIV_ROUND_UP(info->oob_size, 4)); | 365 | DIV_ROUND_UP(info->oob_size, 4)); |
360 | break; | 366 | break; |
361 | default: | 367 | default: |
362 | printk(KERN_ERR "%s: invalid state %d\n", __func__, | 368 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, |
363 | info->state); | 369 | info->state); |
364 | BUG(); | 370 | BUG(); |
365 | } | 371 | } |
@@ -385,7 +391,7 @@ static void start_data_dma(struct pxa3xx_nand_info *info) | |||
385 | desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; | 391 | desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; |
386 | break; | 392 | break; |
387 | default: | 393 | default: |
388 | printk(KERN_ERR "%s: invalid state %d\n", __func__, | 394 | dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, |
389 | info->state); | 395 | info->state); |
390 | BUG(); | 396 | BUG(); |
391 | } | 397 | } |
@@ -416,6 +422,15 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) | |||
416 | { | 422 | { |
417 | struct pxa3xx_nand_info *info = devid; | 423 | struct pxa3xx_nand_info *info = devid; |
418 | unsigned int status, is_completed = 0; | 424 | unsigned int status, is_completed = 0; |
425 | unsigned int ready, cmd_done; | ||
426 | |||
427 | if (info->cs == 0) { | ||
428 | ready = NDSR_FLASH_RDY; | ||
429 | cmd_done = NDSR_CS0_CMDD; | ||
430 | } else { | ||
431 | ready = NDSR_RDY; | ||
432 | cmd_done = NDSR_CS1_CMDD; | ||
433 | } | ||
419 | 434 | ||
420 | status = nand_readl(info, NDSR); | 435 | status = nand_readl(info, NDSR); |
421 | 436 | ||
@@ -437,11 +452,11 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) | |||
437 | handle_data_pio(info); | 452 | handle_data_pio(info); |
438 | } | 453 | } |
439 | } | 454 | } |
440 | if (status & NDSR_CS0_CMDD) { | 455 | if (status & cmd_done) { |
441 | info->state = STATE_CMD_DONE; | 456 | info->state = STATE_CMD_DONE; |
442 | is_completed = 1; | 457 | is_completed = 1; |
443 | } | 458 | } |
444 | if (status & NDSR_FLASH_RDY) { | 459 | if (status & ready) { |
445 | info->is_ready = 1; | 460 | info->is_ready = 1; |
446 | info->state = STATE_READY; | 461 | info->state = STATE_READY; |
447 | } | 462 | } |
@@ -463,12 +478,6 @@ NORMAL_IRQ_EXIT: | |||
463 | return IRQ_HANDLED; | 478 | return IRQ_HANDLED; |
464 | } | 479 | } |
465 | 480 | ||
466 | static int pxa3xx_nand_dev_ready(struct mtd_info *mtd) | ||
467 | { | ||
468 | struct pxa3xx_nand_info *info = mtd->priv; | ||
469 | return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0; | ||
470 | } | ||
471 | |||
472 | static inline int is_buf_blank(uint8_t *buf, size_t len) | 481 | static inline int is_buf_blank(uint8_t *buf, size_t len) |
473 | { | 482 | { |
474 | for (; len > 0; len--) | 483 | for (; len > 0; len--) |
@@ -481,10 +490,12 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
481 | uint16_t column, int page_addr) | 490 | uint16_t column, int page_addr) |
482 | { | 491 | { |
483 | uint16_t cmd; | 492 | uint16_t cmd; |
484 | int addr_cycle, exec_cmd, ndcb0; | 493 | int addr_cycle, exec_cmd; |
485 | struct mtd_info *mtd = info->mtd; | 494 | struct pxa3xx_nand_host *host; |
495 | struct mtd_info *mtd; | ||
486 | 496 | ||
487 | ndcb0 = 0; | 497 | host = info->host[info->cs]; |
498 | mtd = host->mtd; | ||
488 | addr_cycle = 0; | 499 | addr_cycle = 0; |
489 | exec_cmd = 1; | 500 | exec_cmd = 1; |
490 | 501 | ||
@@ -495,6 +506,10 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
495 | info->use_ecc = 0; | 506 | info->use_ecc = 0; |
496 | info->is_ready = 0; | 507 | info->is_ready = 0; |
497 | info->retcode = ERR_NONE; | 508 | info->retcode = ERR_NONE; |
509 | if (info->cs != 0) | ||
510 | info->ndcb0 = NDCB0_CSEL; | ||
511 | else | ||
512 | info->ndcb0 = 0; | ||
498 | 513 | ||
499 | switch (command) { | 514 | switch (command) { |
500 | case NAND_CMD_READ0: | 515 | case NAND_CMD_READ0: |
@@ -512,20 +527,19 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
512 | break; | 527 | break; |
513 | } | 528 | } |
514 | 529 | ||
515 | info->ndcb0 = ndcb0; | 530 | addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles |
516 | addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles | 531 | + host->col_addr_cycles); |
517 | + info->col_addr_cycles); | ||
518 | 532 | ||
519 | switch (command) { | 533 | switch (command) { |
520 | case NAND_CMD_READOOB: | 534 | case NAND_CMD_READOOB: |
521 | case NAND_CMD_READ0: | 535 | case NAND_CMD_READ0: |
522 | cmd = info->cmdset->read1; | 536 | cmd = host->cmdset->read1; |
523 | if (command == NAND_CMD_READOOB) | 537 | if (command == NAND_CMD_READOOB) |
524 | info->buf_start = mtd->writesize + column; | 538 | info->buf_start = mtd->writesize + column; |
525 | else | 539 | else |
526 | info->buf_start = column; | 540 | info->buf_start = column; |
527 | 541 | ||
528 | if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) | 542 | if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) |
529 | info->ndcb0 |= NDCB0_CMD_TYPE(0) | 543 | info->ndcb0 |= NDCB0_CMD_TYPE(0) |
530 | | addr_cycle | 544 | | addr_cycle |
531 | | (cmd & NDCB0_CMD1_MASK); | 545 | | (cmd & NDCB0_CMD1_MASK); |
@@ -537,7 +551,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
537 | 551 | ||
538 | case NAND_CMD_SEQIN: | 552 | case NAND_CMD_SEQIN: |
539 | /* small page addr setting */ | 553 | /* small page addr setting */ |
540 | if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) { | 554 | if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) { |
541 | info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) | 555 | info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) |
542 | | (column & 0xFF); | 556 | | (column & 0xFF); |
543 | 557 | ||
@@ -564,7 +578,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
564 | break; | 578 | break; |
565 | } | 579 | } |
566 | 580 | ||
567 | cmd = info->cmdset->program; | 581 | cmd = host->cmdset->program; |
568 | info->ndcb0 |= NDCB0_CMD_TYPE(0x1) | 582 | info->ndcb0 |= NDCB0_CMD_TYPE(0x1) |
569 | | NDCB0_AUTO_RS | 583 | | NDCB0_AUTO_RS |
570 | | NDCB0_ST_ROW_EN | 584 | | NDCB0_ST_ROW_EN |
@@ -574,8 +588,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
574 | break; | 588 | break; |
575 | 589 | ||
576 | case NAND_CMD_READID: | 590 | case NAND_CMD_READID: |
577 | cmd = info->cmdset->read_id; | 591 | cmd = host->cmdset->read_id; |
578 | info->buf_count = info->read_id_bytes; | 592 | info->buf_count = host->read_id_bytes; |
579 | info->ndcb0 |= NDCB0_CMD_TYPE(3) | 593 | info->ndcb0 |= NDCB0_CMD_TYPE(3) |
580 | | NDCB0_ADDR_CYC(1) | 594 | | NDCB0_ADDR_CYC(1) |
581 | | cmd; | 595 | | cmd; |
@@ -583,7 +597,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
583 | info->data_size = 8; | 597 | info->data_size = 8; |
584 | break; | 598 | break; |
585 | case NAND_CMD_STATUS: | 599 | case NAND_CMD_STATUS: |
586 | cmd = info->cmdset->read_status; | 600 | cmd = host->cmdset->read_status; |
587 | info->buf_count = 1; | 601 | info->buf_count = 1; |
588 | info->ndcb0 |= NDCB0_CMD_TYPE(4) | 602 | info->ndcb0 |= NDCB0_CMD_TYPE(4) |
589 | | NDCB0_ADDR_CYC(1) | 603 | | NDCB0_ADDR_CYC(1) |
@@ -593,7 +607,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
593 | break; | 607 | break; |
594 | 608 | ||
595 | case NAND_CMD_ERASE1: | 609 | case NAND_CMD_ERASE1: |
596 | cmd = info->cmdset->erase; | 610 | cmd = host->cmdset->erase; |
597 | info->ndcb0 |= NDCB0_CMD_TYPE(2) | 611 | info->ndcb0 |= NDCB0_CMD_TYPE(2) |
598 | | NDCB0_AUTO_RS | 612 | | NDCB0_AUTO_RS |
599 | | NDCB0_ADDR_CYC(3) | 613 | | NDCB0_ADDR_CYC(3) |
@@ -604,7 +618,7 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
604 | 618 | ||
605 | break; | 619 | break; |
606 | case NAND_CMD_RESET: | 620 | case NAND_CMD_RESET: |
607 | cmd = info->cmdset->reset; | 621 | cmd = host->cmdset->reset; |
608 | info->ndcb0 |= NDCB0_CMD_TYPE(5) | 622 | info->ndcb0 |= NDCB0_CMD_TYPE(5) |
609 | | cmd; | 623 | | cmd; |
610 | 624 | ||
@@ -616,8 +630,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
616 | 630 | ||
617 | default: | 631 | default: |
618 | exec_cmd = 0; | 632 | exec_cmd = 0; |
619 | printk(KERN_ERR "pxa3xx-nand: non-supported" | 633 | dev_err(&info->pdev->dev, "non-supported command %x\n", |
620 | " command %x\n", command); | 634 | command); |
621 | break; | 635 | break; |
622 | } | 636 | } |
623 | 637 | ||
@@ -627,7 +641,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, | |||
627 | static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, | 641 | static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, |
628 | int column, int page_addr) | 642 | int column, int page_addr) |
629 | { | 643 | { |
630 | struct pxa3xx_nand_info *info = mtd->priv; | 644 | struct pxa3xx_nand_host *host = mtd->priv; |
645 | struct pxa3xx_nand_info *info = host->info_data; | ||
631 | int ret, exec_cmd; | 646 | int ret, exec_cmd; |
632 | 647 | ||
633 | /* | 648 | /* |
@@ -635,9 +650,21 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, | |||
635 | * "byte" address into a "word" address appropriate | 650 | * "byte" address into a "word" address appropriate |
636 | * for indexing a word-oriented device | 651 | * for indexing a word-oriented device |
637 | */ | 652 | */ |
638 | if (info->reg_ndcr & NDCR_DWIDTH_M) | 653 | if (host->reg_ndcr & NDCR_DWIDTH_M) |
639 | column /= 2; | 654 | column /= 2; |
640 | 655 | ||
656 | /* | ||
657 | * There may be different NAND chip hooked to | ||
658 | * different chip select, so check whether | ||
659 | * chip select has been changed, if yes, reset the timing | ||
660 | */ | ||
661 | if (info->cs != host->cs) { | ||
662 | info->cs = host->cs; | ||
663 | nand_writel(info, NDTR0CS0, host->ndtr0cs0); | ||
664 | nand_writel(info, NDTR1CS0, host->ndtr1cs0); | ||
665 | } | ||
666 | |||
667 | info->state = STATE_PREPARED; | ||
641 | exec_cmd = prepare_command_pool(info, command, column, page_addr); | 668 | exec_cmd = prepare_command_pool(info, command, column, page_addr); |
642 | if (exec_cmd) { | 669 | if (exec_cmd) { |
643 | init_completion(&info->cmd_complete); | 670 | init_completion(&info->cmd_complete); |
@@ -646,12 +673,12 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, | |||
646 | ret = wait_for_completion_timeout(&info->cmd_complete, | 673 | ret = wait_for_completion_timeout(&info->cmd_complete, |
647 | CHIP_DELAY_TIMEOUT); | 674 | CHIP_DELAY_TIMEOUT); |
648 | if (!ret) { | 675 | if (!ret) { |
649 | printk(KERN_ERR "Wait time out!!!\n"); | 676 | dev_err(&info->pdev->dev, "Wait time out!!!\n"); |
650 | /* Stop State Machine for next command cycle */ | 677 | /* Stop State Machine for next command cycle */ |
651 | pxa3xx_nand_stop(info); | 678 | pxa3xx_nand_stop(info); |
652 | } | 679 | } |
653 | info->state = STATE_IDLE; | ||
654 | } | 680 | } |
681 | info->state = STATE_IDLE; | ||
655 | } | 682 | } |
656 | 683 | ||
657 | static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, | 684 | static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, |
@@ -664,7 +691,8 @@ static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, | |||
664 | static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, | 691 | static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, |
665 | struct nand_chip *chip, uint8_t *buf, int page) | 692 | struct nand_chip *chip, uint8_t *buf, int page) |
666 | { | 693 | { |
667 | struct pxa3xx_nand_info *info = mtd->priv; | 694 | struct pxa3xx_nand_host *host = mtd->priv; |
695 | struct pxa3xx_nand_info *info = host->info_data; | ||
668 | 696 | ||
669 | chip->read_buf(mtd, buf, mtd->writesize); | 697 | chip->read_buf(mtd, buf, mtd->writesize); |
670 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | 698 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); |
@@ -685,6 +713,8 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, | |||
685 | * OOB, ignore such double bit errors | 713 | * OOB, ignore such double bit errors |
686 | */ | 714 | */ |
687 | if (is_buf_blank(buf, mtd->writesize)) | 715 | if (is_buf_blank(buf, mtd->writesize)) |
716 | info->retcode = ERR_NONE; | ||
717 | else | ||
688 | mtd->ecc_stats.failed++; | 718 | mtd->ecc_stats.failed++; |
689 | } | 719 | } |
690 | 720 | ||
@@ -693,7 +723,8 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, | |||
693 | 723 | ||
694 | static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) | 724 | static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) |
695 | { | 725 | { |
696 | struct pxa3xx_nand_info *info = mtd->priv; | 726 | struct pxa3xx_nand_host *host = mtd->priv; |
727 | struct pxa3xx_nand_info *info = host->info_data; | ||
697 | char retval = 0xFF; | 728 | char retval = 0xFF; |
698 | 729 | ||
699 | if (info->buf_start < info->buf_count) | 730 | if (info->buf_start < info->buf_count) |
@@ -705,7 +736,8 @@ static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) | |||
705 | 736 | ||
706 | static u16 pxa3xx_nand_read_word(struct mtd_info *mtd) | 737 | static u16 pxa3xx_nand_read_word(struct mtd_info *mtd) |
707 | { | 738 | { |
708 | struct pxa3xx_nand_info *info = mtd->priv; | 739 | struct pxa3xx_nand_host *host = mtd->priv; |
740 | struct pxa3xx_nand_info *info = host->info_data; | ||
709 | u16 retval = 0xFFFF; | 741 | u16 retval = 0xFFFF; |
710 | 742 | ||
711 | if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) { | 743 | if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) { |
@@ -717,7 +749,8 @@ static u16 pxa3xx_nand_read_word(struct mtd_info *mtd) | |||
717 | 749 | ||
718 | static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | 750 | static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) |
719 | { | 751 | { |
720 | struct pxa3xx_nand_info *info = mtd->priv; | 752 | struct pxa3xx_nand_host *host = mtd->priv; |
753 | struct pxa3xx_nand_info *info = host->info_data; | ||
721 | int real_len = min_t(size_t, len, info->buf_count - info->buf_start); | 754 | int real_len = min_t(size_t, len, info->buf_count - info->buf_start); |
722 | 755 | ||
723 | memcpy(buf, info->data_buff + info->buf_start, real_len); | 756 | memcpy(buf, info->data_buff + info->buf_start, real_len); |
@@ -727,7 +760,8 @@ static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | |||
727 | static void pxa3xx_nand_write_buf(struct mtd_info *mtd, | 760 | static void pxa3xx_nand_write_buf(struct mtd_info *mtd, |
728 | const uint8_t *buf, int len) | 761 | const uint8_t *buf, int len) |
729 | { | 762 | { |
730 | struct pxa3xx_nand_info *info = mtd->priv; | 763 | struct pxa3xx_nand_host *host = mtd->priv; |
764 | struct pxa3xx_nand_info *info = host->info_data; | ||
731 | int real_len = min_t(size_t, len, info->buf_count - info->buf_start); | 765 | int real_len = min_t(size_t, len, info->buf_count - info->buf_start); |
732 | 766 | ||
733 | memcpy(info->data_buff + info->buf_start, buf, real_len); | 767 | memcpy(info->data_buff + info->buf_start, buf, real_len); |
@@ -747,7 +781,8 @@ static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip) | |||
747 | 781 | ||
748 | static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) | 782 | static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) |
749 | { | 783 | { |
750 | struct pxa3xx_nand_info *info = mtd->priv; | 784 | struct pxa3xx_nand_host *host = mtd->priv; |
785 | struct pxa3xx_nand_info *info = host->info_data; | ||
751 | 786 | ||
752 | /* pxa3xx_nand_send_command has waited for command complete */ | 787 | /* pxa3xx_nand_send_command has waited for command complete */ |
753 | if (this->state == FL_WRITING || this->state == FL_ERASING) { | 788 | if (this->state == FL_WRITING || this->state == FL_ERASING) { |
@@ -770,54 +805,70 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, | |||
770 | { | 805 | { |
771 | struct platform_device *pdev = info->pdev; | 806 | struct platform_device *pdev = info->pdev; |
772 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; | 807 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; |
808 | struct pxa3xx_nand_host *host = info->host[info->cs]; | ||
773 | uint32_t ndcr = 0x0; /* enable all interrupts */ | 809 | uint32_t ndcr = 0x0; /* enable all interrupts */ |
774 | 810 | ||
775 | if (f->page_size != 2048 && f->page_size != 512) | 811 | if (f->page_size != 2048 && f->page_size != 512) { |
812 | dev_err(&pdev->dev, "Current only support 2048 and 512 size\n"); | ||
776 | return -EINVAL; | 813 | return -EINVAL; |
814 | } | ||
777 | 815 | ||
778 | if (f->flash_width != 16 && f->flash_width != 8) | 816 | if (f->flash_width != 16 && f->flash_width != 8) { |
817 | dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n"); | ||
779 | return -EINVAL; | 818 | return -EINVAL; |
819 | } | ||
780 | 820 | ||
781 | /* calculate flash information */ | 821 | /* calculate flash information */ |
782 | info->cmdset = &default_cmdset; | 822 | host->cmdset = &default_cmdset; |
783 | info->page_size = f->page_size; | 823 | host->page_size = f->page_size; |
784 | info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; | 824 | host->read_id_bytes = (f->page_size == 2048) ? 4 : 2; |
785 | 825 | ||
786 | /* calculate addressing information */ | 826 | /* calculate addressing information */ |
787 | info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; | 827 | host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1; |
788 | 828 | ||
789 | if (f->num_blocks * f->page_per_block > 65536) | 829 | if (f->num_blocks * f->page_per_block > 65536) |
790 | info->row_addr_cycles = 3; | 830 | host->row_addr_cycles = 3; |
791 | else | 831 | else |
792 | info->row_addr_cycles = 2; | 832 | host->row_addr_cycles = 2; |
793 | 833 | ||
794 | ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; | 834 | ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; |
795 | ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0; | 835 | ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0; |
796 | ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; | 836 | ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0; |
797 | ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; | 837 | ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0; |
798 | ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; | 838 | ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0; |
799 | ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; | 839 | ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0; |
800 | 840 | ||
801 | ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes); | 841 | ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes); |
802 | ndcr |= NDCR_SPARE_EN; /* enable spare by default */ | 842 | ndcr |= NDCR_SPARE_EN; /* enable spare by default */ |
803 | 843 | ||
804 | info->reg_ndcr = ndcr; | 844 | host->reg_ndcr = ndcr; |
805 | 845 | ||
806 | pxa3xx_nand_set_timing(info, f->timing); | 846 | pxa3xx_nand_set_timing(host, f->timing); |
807 | return 0; | 847 | return 0; |
808 | } | 848 | } |
809 | 849 | ||
810 | static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) | 850 | static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) |
811 | { | 851 | { |
852 | /* | ||
853 | * We set 0 by hard coding here, for we don't support keep_config | ||
854 | * when there is more than one chip attached to the controller | ||
855 | */ | ||
856 | struct pxa3xx_nand_host *host = info->host[0]; | ||
812 | uint32_t ndcr = nand_readl(info, NDCR); | 857 | uint32_t ndcr = nand_readl(info, NDCR); |
813 | info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; | ||
814 | /* set info fields needed to read id */ | ||
815 | info->read_id_bytes = (info->page_size == 2048) ? 4 : 2; | ||
816 | info->reg_ndcr = ndcr; | ||
817 | info->cmdset = &default_cmdset; | ||
818 | 858 | ||
819 | info->ndtr0cs0 = nand_readl(info, NDTR0CS0); | 859 | if (ndcr & NDCR_PAGE_SZ) { |
820 | info->ndtr1cs0 = nand_readl(info, NDTR1CS0); | 860 | host->page_size = 2048; |
861 | host->read_id_bytes = 4; | ||
862 | } else { | ||
863 | host->page_size = 512; | ||
864 | host->read_id_bytes = 2; | ||
865 | } | ||
866 | |||
867 | host->reg_ndcr = ndcr & ~NDCR_INT_MASK; | ||
868 | host->cmdset = &default_cmdset; | ||
869 | |||
870 | host->ndtr0cs0 = nand_readl(info, NDTR0CS0); | ||
871 | host->ndtr1cs0 = nand_readl(info, NDTR1CS0); | ||
821 | 872 | ||
822 | return 0; | 873 | return 0; |
823 | } | 874 | } |
@@ -847,7 +898,6 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) | |||
847 | return -ENOMEM; | 898 | return -ENOMEM; |
848 | } | 899 | } |
849 | 900 | ||
850 | info->data_buff_size = MAX_BUFF_SIZE; | ||
851 | info->data_desc = (void *)info->data_buff + data_desc_offset; | 901 | info->data_desc = (void *)info->data_buff + data_desc_offset; |
852 | info->data_desc_addr = info->data_buff_phys + data_desc_offset; | 902 | info->data_desc_addr = info->data_buff_phys + data_desc_offset; |
853 | 903 | ||
@@ -855,7 +905,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) | |||
855 | pxa3xx_nand_data_dma_irq, info); | 905 | pxa3xx_nand_data_dma_irq, info); |
856 | if (info->data_dma_ch < 0) { | 906 | if (info->data_dma_ch < 0) { |
857 | dev_err(&pdev->dev, "failed to request data dma\n"); | 907 | dev_err(&pdev->dev, "failed to request data dma\n"); |
858 | dma_free_coherent(&pdev->dev, info->data_buff_size, | 908 | dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, |
859 | info->data_buff, info->data_buff_phys); | 909 | info->data_buff, info->data_buff_phys); |
860 | return info->data_dma_ch; | 910 | return info->data_dma_ch; |
861 | } | 911 | } |
@@ -865,24 +915,28 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) | |||
865 | 915 | ||
866 | static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) | 916 | static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) |
867 | { | 917 | { |
868 | struct mtd_info *mtd = info->mtd; | 918 | struct mtd_info *mtd; |
869 | struct nand_chip *chip = mtd->priv; | 919 | int ret; |
870 | 920 | mtd = info->host[info->cs]->mtd; | |
871 | /* use the common timing to make a try */ | 921 | /* use the common timing to make a try */ |
872 | pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); | 922 | ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); |
873 | chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0); | 923 | if (ret) |
924 | return ret; | ||
925 | |||
926 | pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0); | ||
874 | if (info->is_ready) | 927 | if (info->is_ready) |
875 | return 1; | ||
876 | else | ||
877 | return 0; | 928 | return 0; |
929 | |||
930 | return -ENODEV; | ||
878 | } | 931 | } |
879 | 932 | ||
880 | static int pxa3xx_nand_scan(struct mtd_info *mtd) | 933 | static int pxa3xx_nand_scan(struct mtd_info *mtd) |
881 | { | 934 | { |
882 | struct pxa3xx_nand_info *info = mtd->priv; | 935 | struct pxa3xx_nand_host *host = mtd->priv; |
936 | struct pxa3xx_nand_info *info = host->info_data; | ||
883 | struct platform_device *pdev = info->pdev; | 937 | struct platform_device *pdev = info->pdev; |
884 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; | 938 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; |
885 | struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} }; | 939 | struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL; |
886 | const struct pxa3xx_nand_flash *f = NULL; | 940 | const struct pxa3xx_nand_flash *f = NULL; |
887 | struct nand_chip *chip = mtd->priv; | 941 | struct nand_chip *chip = mtd->priv; |
888 | uint32_t id = -1; | 942 | uint32_t id = -1; |
@@ -893,22 +947,20 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) | |||
893 | goto KEEP_CONFIG; | 947 | goto KEEP_CONFIG; |
894 | 948 | ||
895 | ret = pxa3xx_nand_sensing(info); | 949 | ret = pxa3xx_nand_sensing(info); |
896 | if (!ret) { | 950 | if (ret) { |
897 | kfree(mtd); | 951 | dev_info(&info->pdev->dev, "There is no chip on cs %d!\n", |
898 | info->mtd = NULL; | 952 | info->cs); |
899 | printk(KERN_INFO "There is no nand chip on cs 0!\n"); | ||
900 | 953 | ||
901 | return -EINVAL; | 954 | return ret; |
902 | } | 955 | } |
903 | 956 | ||
904 | chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); | 957 | chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); |
905 | id = *((uint16_t *)(info->data_buff)); | 958 | id = *((uint16_t *)(info->data_buff)); |
906 | if (id != 0) | 959 | if (id != 0) |
907 | printk(KERN_INFO "Detect a flash id %x\n", id); | 960 | dev_info(&info->pdev->dev, "Detect a flash id %x\n", id); |
908 | else { | 961 | else { |
909 | kfree(mtd); | 962 | dev_warn(&info->pdev->dev, |
910 | info->mtd = NULL; | 963 | "Read out ID 0, potential timing set wrong!!\n"); |
911 | printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n"); | ||
912 | 964 | ||
913 | return -EINVAL; | 965 | return -EINVAL; |
914 | } | 966 | } |
@@ -926,14 +978,17 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) | |||
926 | } | 978 | } |
927 | 979 | ||
928 | if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { | 980 | if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { |
929 | kfree(mtd); | 981 | dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n"); |
930 | info->mtd = NULL; | ||
931 | printk(KERN_ERR "ERROR!! flash not defined!!!\n"); | ||
932 | 982 | ||
933 | return -EINVAL; | 983 | return -EINVAL; |
934 | } | 984 | } |
935 | 985 | ||
936 | pxa3xx_nand_config_flash(info, f); | 986 | ret = pxa3xx_nand_config_flash(info, f); |
987 | if (ret) { | ||
988 | dev_err(&info->pdev->dev, "ERROR! Configure failed\n"); | ||
989 | return ret; | ||
990 | } | ||
991 | |||
937 | pxa3xx_flash_ids[0].name = f->name; | 992 | pxa3xx_flash_ids[0].name = f->name; |
938 | pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; | 993 | pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; |
939 | pxa3xx_flash_ids[0].pagesize = f->page_size; | 994 | pxa3xx_flash_ids[0].pagesize = f->page_size; |
@@ -942,62 +997,78 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) | |||
942 | pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; | 997 | pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; |
943 | if (f->flash_width == 16) | 998 | if (f->flash_width == 16) |
944 | pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; | 999 | pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; |
1000 | pxa3xx_flash_ids[1].name = NULL; | ||
1001 | def = pxa3xx_flash_ids; | ||
945 | KEEP_CONFIG: | 1002 | KEEP_CONFIG: |
946 | if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids)) | 1003 | chip->ecc.mode = NAND_ECC_HW; |
1004 | chip->ecc.size = host->page_size; | ||
1005 | |||
1006 | chip->options = NAND_NO_AUTOINCR; | ||
1007 | chip->options |= NAND_NO_READRDY; | ||
1008 | if (host->reg_ndcr & NDCR_DWIDTH_M) | ||
1009 | chip->options |= NAND_BUSWIDTH_16; | ||
1010 | |||
1011 | if (nand_scan_ident(mtd, 1, def)) | ||
947 | return -ENODEV; | 1012 | return -ENODEV; |
948 | /* calculate addressing information */ | 1013 | /* calculate addressing information */ |
949 | info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1; | 1014 | if (mtd->writesize >= 2048) |
1015 | host->col_addr_cycles = 2; | ||
1016 | else | ||
1017 | host->col_addr_cycles = 1; | ||
1018 | |||
950 | info->oob_buff = info->data_buff + mtd->writesize; | 1019 | info->oob_buff = info->data_buff + mtd->writesize; |
951 | if ((mtd->size >> chip->page_shift) > 65536) | 1020 | if ((mtd->size >> chip->page_shift) > 65536) |
952 | info->row_addr_cycles = 3; | 1021 | host->row_addr_cycles = 3; |
953 | else | 1022 | else |
954 | info->row_addr_cycles = 2; | 1023 | host->row_addr_cycles = 2; |
955 | mtd->name = mtd_names[0]; | ||
956 | chip->ecc.mode = NAND_ECC_HW; | ||
957 | chip->ecc.size = f->page_size; | ||
958 | |||
959 | chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0; | ||
960 | chip->options |= NAND_NO_AUTOINCR; | ||
961 | chip->options |= NAND_NO_READRDY; | ||
962 | 1024 | ||
1025 | mtd->name = mtd_names[0]; | ||
963 | return nand_scan_tail(mtd); | 1026 | return nand_scan_tail(mtd); |
964 | } | 1027 | } |
965 | 1028 | ||
966 | static | 1029 | static int alloc_nand_resource(struct platform_device *pdev) |
967 | struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev) | ||
968 | { | 1030 | { |
1031 | struct pxa3xx_nand_platform_data *pdata; | ||
969 | struct pxa3xx_nand_info *info; | 1032 | struct pxa3xx_nand_info *info; |
1033 | struct pxa3xx_nand_host *host; | ||
970 | struct nand_chip *chip; | 1034 | struct nand_chip *chip; |
971 | struct mtd_info *mtd; | 1035 | struct mtd_info *mtd; |
972 | struct resource *r; | 1036 | struct resource *r; |
973 | int ret, irq; | 1037 | int ret, irq, cs; |
974 | 1038 | ||
975 | mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info), | 1039 | pdata = pdev->dev.platform_data; |
976 | GFP_KERNEL); | 1040 | info = kzalloc(sizeof(*info) + (sizeof(*mtd) + |
977 | if (!mtd) { | 1041 | sizeof(*host)) * pdata->num_cs, GFP_KERNEL); |
1042 | if (!info) { | ||
978 | dev_err(&pdev->dev, "failed to allocate memory\n"); | 1043 | dev_err(&pdev->dev, "failed to allocate memory\n"); |
979 | return NULL; | 1044 | return -ENOMEM; |
980 | } | 1045 | } |
981 | 1046 | ||
982 | info = (struct pxa3xx_nand_info *)(&mtd[1]); | ||
983 | chip = (struct nand_chip *)(&mtd[1]); | ||
984 | info->pdev = pdev; | 1047 | info->pdev = pdev; |
985 | info->mtd = mtd; | 1048 | for (cs = 0; cs < pdata->num_cs; cs++) { |
986 | mtd->priv = info; | 1049 | mtd = (struct mtd_info *)((unsigned int)&info[1] + |
987 | mtd->owner = THIS_MODULE; | 1050 | (sizeof(*mtd) + sizeof(*host)) * cs); |
988 | 1051 | chip = (struct nand_chip *)(&mtd[1]); | |
989 | chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; | 1052 | host = (struct pxa3xx_nand_host *)chip; |
990 | chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; | 1053 | info->host[cs] = host; |
991 | chip->controller = &info->controller; | 1054 | host->mtd = mtd; |
992 | chip->waitfunc = pxa3xx_nand_waitfunc; | 1055 | host->cs = cs; |
993 | chip->select_chip = pxa3xx_nand_select_chip; | 1056 | host->info_data = info; |
994 | chip->dev_ready = pxa3xx_nand_dev_ready; | 1057 | mtd->priv = host; |
995 | chip->cmdfunc = pxa3xx_nand_cmdfunc; | 1058 | mtd->owner = THIS_MODULE; |
996 | chip->read_word = pxa3xx_nand_read_word; | 1059 | |
997 | chip->read_byte = pxa3xx_nand_read_byte; | 1060 | chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; |
998 | chip->read_buf = pxa3xx_nand_read_buf; | 1061 | chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; |
999 | chip->write_buf = pxa3xx_nand_write_buf; | 1062 | chip->controller = &info->controller; |
1000 | chip->verify_buf = pxa3xx_nand_verify_buf; | 1063 | chip->waitfunc = pxa3xx_nand_waitfunc; |
1064 | chip->select_chip = pxa3xx_nand_select_chip; | ||
1065 | chip->cmdfunc = pxa3xx_nand_cmdfunc; | ||
1066 | chip->read_word = pxa3xx_nand_read_word; | ||
1067 | chip->read_byte = pxa3xx_nand_read_byte; | ||
1068 | chip->read_buf = pxa3xx_nand_read_buf; | ||
1069 | chip->write_buf = pxa3xx_nand_write_buf; | ||
1070 | chip->verify_buf = pxa3xx_nand_verify_buf; | ||
1071 | } | ||
1001 | 1072 | ||
1002 | spin_lock_init(&chip->controller->lock); | 1073 | spin_lock_init(&chip->controller->lock); |
1003 | init_waitqueue_head(&chip->controller->wq); | 1074 | init_waitqueue_head(&chip->controller->wq); |
@@ -1070,13 +1141,13 @@ struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev) | |||
1070 | 1141 | ||
1071 | platform_set_drvdata(pdev, info); | 1142 | platform_set_drvdata(pdev, info); |
1072 | 1143 | ||
1073 | return info; | 1144 | return 0; |
1074 | 1145 | ||
1075 | fail_free_buf: | 1146 | fail_free_buf: |
1076 | free_irq(irq, info); | 1147 | free_irq(irq, info); |
1077 | if (use_dma) { | 1148 | if (use_dma) { |
1078 | pxa_free_dma(info->data_dma_ch); | 1149 | pxa_free_dma(info->data_dma_ch); |
1079 | dma_free_coherent(&pdev->dev, info->data_buff_size, | 1150 | dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE, |
1080 | info->data_buff, info->data_buff_phys); | 1151 | info->data_buff, info->data_buff_phys); |
1081 | } else | 1152 | } else |
1082 | kfree(info->data_buff); | 1153 | kfree(info->data_buff); |
@@ -1088,17 +1159,21 @@ fail_put_clk: | |||
1088 | clk_disable(info->clk); | 1159 | clk_disable(info->clk); |
1089 | clk_put(info->clk); | 1160 | clk_put(info->clk); |
1090 | fail_free_mtd: | 1161 | fail_free_mtd: |
1091 | kfree(mtd); | 1162 | kfree(info); |
1092 | return NULL; | 1163 | return ret; |
1093 | } | 1164 | } |
1094 | 1165 | ||
1095 | static int pxa3xx_nand_remove(struct platform_device *pdev) | 1166 | static int pxa3xx_nand_remove(struct platform_device *pdev) |
1096 | { | 1167 | { |
1097 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); | 1168 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1098 | struct mtd_info *mtd = info->mtd; | 1169 | struct pxa3xx_nand_platform_data *pdata; |
1099 | struct resource *r; | 1170 | struct resource *r; |
1100 | int irq; | 1171 | int irq, cs; |
1101 | 1172 | ||
1173 | if (!info) | ||
1174 | return 0; | ||
1175 | |||
1176 | pdata = pdev->dev.platform_data; | ||
1102 | platform_set_drvdata(pdev, NULL); | 1177 | platform_set_drvdata(pdev, NULL); |
1103 | 1178 | ||
1104 | irq = platform_get_irq(pdev, 0); | 1179 | irq = platform_get_irq(pdev, 0); |
@@ -1106,7 +1181,7 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) | |||
1106 | free_irq(irq, info); | 1181 | free_irq(irq, info); |
1107 | if (use_dma) { | 1182 | if (use_dma) { |
1108 | pxa_free_dma(info->data_dma_ch); | 1183 | pxa_free_dma(info->data_dma_ch); |
1109 | dma_free_writecombine(&pdev->dev, info->data_buff_size, | 1184 | dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE, |
1110 | info->data_buff, info->data_buff_phys); | 1185 | info->data_buff, info->data_buff_phys); |
1111 | } else | 1186 | } else |
1112 | kfree(info->data_buff); | 1187 | kfree(info->data_buff); |
@@ -1118,10 +1193,9 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) | |||
1118 | clk_disable(info->clk); | 1193 | clk_disable(info->clk); |
1119 | clk_put(info->clk); | 1194 | clk_put(info->clk); |
1120 | 1195 | ||
1121 | if (mtd) { | 1196 | for (cs = 0; cs < pdata->num_cs; cs++) |
1122 | mtd_device_unregister(mtd); | 1197 | nand_release(info->host[cs]->mtd); |
1123 | kfree(mtd); | 1198 | kfree(info); |
1124 | } | ||
1125 | return 0; | 1199 | return 0; |
1126 | } | 1200 | } |
1127 | 1201 | ||
@@ -1129,6 +1203,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) | |||
1129 | { | 1203 | { |
1130 | struct pxa3xx_nand_platform_data *pdata; | 1204 | struct pxa3xx_nand_platform_data *pdata; |
1131 | struct pxa3xx_nand_info *info; | 1205 | struct pxa3xx_nand_info *info; |
1206 | int ret, cs, probe_success; | ||
1132 | 1207 | ||
1133 | pdata = pdev->dev.platform_data; | 1208 | pdata = pdev->dev.platform_data; |
1134 | if (!pdata) { | 1209 | if (!pdata) { |
@@ -1136,52 +1211,88 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) | |||
1136 | return -ENODEV; | 1211 | return -ENODEV; |
1137 | } | 1212 | } |
1138 | 1213 | ||
1139 | info = alloc_nand_resource(pdev); | 1214 | ret = alloc_nand_resource(pdev); |
1140 | if (info == NULL) | 1215 | if (ret) { |
1141 | return -ENOMEM; | 1216 | dev_err(&pdev->dev, "alloc nand resource failed\n"); |
1142 | 1217 | return ret; | |
1143 | if (pxa3xx_nand_scan(info->mtd)) { | ||
1144 | dev_err(&pdev->dev, "failed to scan nand\n"); | ||
1145 | pxa3xx_nand_remove(pdev); | ||
1146 | return -ENODEV; | ||
1147 | } | 1218 | } |
1148 | 1219 | ||
1149 | if (mtd_has_cmdlinepart()) { | 1220 | info = platform_get_drvdata(pdev); |
1150 | const char *probes[] = { "cmdlinepart", NULL }; | 1221 | probe_success = 0; |
1151 | struct mtd_partition *parts; | 1222 | for (cs = 0; cs < pdata->num_cs; cs++) { |
1152 | int nr_parts; | 1223 | info->cs = cs; |
1224 | ret = pxa3xx_nand_scan(info->host[cs]->mtd); | ||
1225 | if (ret) { | ||
1226 | dev_warn(&pdev->dev, "failed to scan nand at cs %d\n", | ||
1227 | cs); | ||
1228 | continue; | ||
1229 | } | ||
1153 | 1230 | ||
1154 | nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0); | 1231 | ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0, |
1232 | pdata->parts[cs], pdata->nr_parts[cs]); | ||
1233 | if (!ret) | ||
1234 | probe_success = 1; | ||
1235 | } | ||
1155 | 1236 | ||
1156 | if (nr_parts) | 1237 | if (!probe_success) { |
1157 | return mtd_device_register(info->mtd, parts, nr_parts); | 1238 | pxa3xx_nand_remove(pdev); |
1239 | return -ENODEV; | ||
1158 | } | 1240 | } |
1159 | 1241 | ||
1160 | return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts); | 1242 | return 0; |
1161 | } | 1243 | } |
1162 | 1244 | ||
1163 | #ifdef CONFIG_PM | 1245 | #ifdef CONFIG_PM |
1164 | static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) | 1246 | static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) |
1165 | { | 1247 | { |
1166 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); | 1248 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1167 | struct mtd_info *mtd = info->mtd; | 1249 | struct pxa3xx_nand_platform_data *pdata; |
1250 | struct mtd_info *mtd; | ||
1251 | int cs; | ||
1168 | 1252 | ||
1253 | pdata = pdev->dev.platform_data; | ||
1169 | if (info->state) { | 1254 | if (info->state) { |
1170 | dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); | 1255 | dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); |
1171 | return -EAGAIN; | 1256 | return -EAGAIN; |
1172 | } | 1257 | } |
1173 | 1258 | ||
1259 | for (cs = 0; cs < pdata->num_cs; cs++) { | ||
1260 | mtd = info->host[cs]->mtd; | ||
1261 | mtd->suspend(mtd); | ||
1262 | } | ||
1263 | |||
1174 | return 0; | 1264 | return 0; |
1175 | } | 1265 | } |
1176 | 1266 | ||
1177 | static int pxa3xx_nand_resume(struct platform_device *pdev) | 1267 | static int pxa3xx_nand_resume(struct platform_device *pdev) |
1178 | { | 1268 | { |
1179 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); | 1269 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1180 | struct mtd_info *mtd = info->mtd; | 1270 | struct pxa3xx_nand_platform_data *pdata; |
1271 | struct mtd_info *mtd; | ||
1272 | int cs; | ||
1181 | 1273 | ||
1182 | nand_writel(info, NDTR0CS0, info->ndtr0cs0); | 1274 | pdata = pdev->dev.platform_data; |
1183 | nand_writel(info, NDTR1CS0, info->ndtr1cs0); | 1275 | /* We don't want to handle interrupt without calling mtd routine */ |
1184 | clk_enable(info->clk); | 1276 | disable_int(info, NDCR_INT_MASK); |
1277 | |||
1278 | /* | ||
1279 | * Directly set the chip select to a invalid value, | ||
1280 | * then the driver would reset the timing according | ||
1281 | * to current chip select at the beginning of cmdfunc | ||
1282 | */ | ||
1283 | info->cs = 0xff; | ||
1284 | |||
1285 | /* | ||
1286 | * As the spec says, the NDSR would be updated to 0x1800 when | ||
1287 | * doing the nand_clk disable/enable. | ||
1288 | * To prevent it damaging state machine of the driver, clear | ||
1289 | * all status before resume | ||
1290 | */ | ||
1291 | nand_writel(info, NDSR, NDSR_MASK); | ||
1292 | for (cs = 0; cs < pdata->num_cs; cs++) { | ||
1293 | mtd = info->host[cs]->mtd; | ||
1294 | mtd->resume(mtd); | ||
1295 | } | ||
1185 | 1296 | ||
1186 | return 0; | 1297 | return 0; |
1187 | } | 1298 | } |
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index cae2e013c986..f20f393bfda6 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c | |||
@@ -1027,7 +1027,7 @@ void r852_shutdown(struct pci_dev *pci_dev) | |||
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | #ifdef CONFIG_PM | 1029 | #ifdef CONFIG_PM |
1030 | int r852_suspend(struct device *device) | 1030 | static int r852_suspend(struct device *device) |
1031 | { | 1031 | { |
1032 | struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); | 1032 | struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); |
1033 | 1033 | ||
@@ -1048,7 +1048,7 @@ int r852_suspend(struct device *device) | |||
1048 | return 0; | 1048 | return 0; |
1049 | } | 1049 | } |
1050 | 1050 | ||
1051 | int r852_resume(struct device *device) | 1051 | static int r852_resume(struct device *device) |
1052 | { | 1052 | { |
1053 | struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); | 1053 | struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); |
1054 | 1054 | ||
@@ -1092,7 +1092,7 @@ static const struct pci_device_id r852_pci_id_tbl[] = { | |||
1092 | 1092 | ||
1093 | MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); | 1093 | MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); |
1094 | 1094 | ||
1095 | SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); | 1095 | static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); |
1096 | 1096 | ||
1097 | static struct pci_driver r852_pci_driver = { | 1097 | static struct pci_driver r852_pci_driver = { |
1098 | .name = DRV_NAME, | 1098 | .name = DRV_NAME, |
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c index c9f9127ff770..f309addc2fa0 100644 --- a/drivers/mtd/nand/rtc_from4.c +++ b/drivers/mtd/nand/rtc_from4.c | |||
@@ -351,7 +351,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha | |||
351 | return 0; | 351 | return 0; |
352 | } | 352 | } |
353 | 353 | ||
354 | /* Read the syndrom pattern from the FPGA and correct the bitorder */ | 354 | /* Read the syndrome pattern from the FPGA and correct the bitorder */ |
355 | rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC); | 355 | rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC); |
356 | for (i = 0; i < 8; i++) { | 356 | for (i = 0; i < 8; i++) { |
357 | ecc[i] = bitrev8(*rs_ecc); | 357 | ecc[i] = bitrev8(*rs_ecc); |
@@ -380,7 +380,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha | |||
380 | /* Let the library code do its magic. */ | 380 | /* Let the library code do its magic. */ |
381 | res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL); | 381 | res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL); |
382 | if (res > 0) { | 382 | if (res > 0) { |
383 | DEBUG(MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res); | 383 | pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res); |
384 | } | 384 | } |
385 | return res; | 385 | return res; |
386 | } | 386 | } |
@@ -444,7 +444,6 @@ static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this, | |||
444 | len = mtd->writesize; | 444 | len = mtd->writesize; |
445 | buf = kmalloc(len, GFP_KERNEL); | 445 | buf = kmalloc(len, GFP_KERNEL); |
446 | if (!buf) { | 446 | if (!buf) { |
447 | printk(KERN_ERR "rtc_from4_errstat: Out of memory!\n"); | ||
448 | er_stat = 1; | 447 | er_stat = 1; |
449 | goto out; | 448 | goto out; |
450 | } | 449 | } |
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 4405468f196b..868685db6712 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c | |||
@@ -723,7 +723,7 @@ static int s3c24xx_nand_remove(struct platform_device *pdev) | |||
723 | 723 | ||
724 | /* free the common resources */ | 724 | /* free the common resources */ |
725 | 725 | ||
726 | if (info->clk != NULL && !IS_ERR(info->clk)) { | 726 | if (!IS_ERR(info->clk)) { |
727 | s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); | 727 | s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); |
728 | clk_put(info->clk); | 728 | clk_put(info->clk); |
729 | } | 729 | } |
@@ -744,26 +744,15 @@ static int s3c24xx_nand_remove(struct platform_device *pdev) | |||
744 | return 0; | 744 | return 0; |
745 | } | 745 | } |
746 | 746 | ||
747 | const char *part_probes[] = { "cmdlinepart", NULL }; | ||
748 | static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, | 747 | static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, |
749 | struct s3c2410_nand_mtd *mtd, | 748 | struct s3c2410_nand_mtd *mtd, |
750 | struct s3c2410_nand_set *set) | 749 | struct s3c2410_nand_set *set) |
751 | { | 750 | { |
752 | struct mtd_partition *part_info; | 751 | if (set) |
753 | int nr_part = 0; | 752 | mtd->mtd.name = set->name; |
754 | 753 | ||
755 | if (set == NULL) | 754 | return mtd_device_parse_register(&mtd->mtd, NULL, 0, |
756 | return mtd_device_register(&mtd->mtd, NULL, 0); | 755 | set->partitions, set->nr_partitions); |
757 | |||
758 | mtd->mtd.name = set->name; | ||
759 | nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0); | ||
760 | |||
761 | if (nr_part <= 0 && set->nr_partitions > 0) { | ||
762 | nr_part = set->nr_partitions; | ||
763 | part_info = set->partitions; | ||
764 | } | ||
765 | |||
766 | return mtd_device_register(&mtd->mtd, part_info, nr_part); | ||
767 | } | 756 | } |
768 | 757 | ||
769 | /** | 758 | /** |
@@ -880,8 +869,10 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, | |||
880 | /* If you use u-boot BBT creation code, specifying this flag will | 869 | /* If you use u-boot BBT creation code, specifying this flag will |
881 | * let the kernel fish out the BBT from the NAND, and also skip the | 870 | * let the kernel fish out the BBT from the NAND, and also skip the |
882 | * full NAND scan that can take 1/2s or so. Little things... */ | 871 | * full NAND scan that can take 1/2s or so. Little things... */ |
883 | if (set->flash_bbt) | 872 | if (set->flash_bbt) { |
884 | chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN; | 873 | chip->bbt_options |= NAND_BBT_USE_FLASH; |
874 | chip->options |= NAND_SKIP_BBTSCAN; | ||
875 | } | ||
885 | } | 876 | } |
886 | 877 | ||
887 | /** | 878 | /** |
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c index 19e24ed089ea..619d2a504788 100644 --- a/drivers/mtd/nand/sharpsl.c +++ b/drivers/mtd/nand/sharpsl.c | |||
@@ -103,16 +103,12 @@ static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, | |||
103 | return readb(sharpsl->io + ECCCNTR) != 0; | 103 | return readb(sharpsl->io + ECCCNTR) != 0; |
104 | } | 104 | } |
105 | 105 | ||
106 | static const char *part_probes[] = { "cmdlinepart", NULL }; | ||
107 | |||
108 | /* | 106 | /* |
109 | * Main initialization routine | 107 | * Main initialization routine |
110 | */ | 108 | */ |
111 | static int __devinit sharpsl_nand_probe(struct platform_device *pdev) | 109 | static int __devinit sharpsl_nand_probe(struct platform_device *pdev) |
112 | { | 110 | { |
113 | struct nand_chip *this; | 111 | struct nand_chip *this; |
114 | struct mtd_partition *sharpsl_partition_info; | ||
115 | int nr_partitions; | ||
116 | struct resource *r; | 112 | struct resource *r; |
117 | int err = 0; | 113 | int err = 0; |
118 | struct sharpsl_nand *sharpsl; | 114 | struct sharpsl_nand *sharpsl; |
@@ -184,14 +180,9 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev) | |||
184 | 180 | ||
185 | /* Register the partitions */ | 181 | /* Register the partitions */ |
186 | sharpsl->mtd.name = "sharpsl-nand"; | 182 | sharpsl->mtd.name = "sharpsl-nand"; |
187 | nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0); | ||
188 | if (nr_partitions <= 0) { | ||
189 | nr_partitions = data->nr_partitions; | ||
190 | sharpsl_partition_info = data->partitions; | ||
191 | } | ||
192 | 183 | ||
193 | err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info, | 184 | err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0, |
194 | nr_partitions); | 185 | data->partitions, data->nr_partitions); |
195 | if (err) | 186 | if (err) |
196 | goto err_add; | 187 | goto err_add; |
197 | 188 | ||
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c index 43469715b3fa..32ae5af7444f 100644 --- a/drivers/mtd/nand/sm_common.c +++ b/drivers/mtd/nand/sm_common.c | |||
@@ -48,7 +48,7 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
48 | 48 | ||
49 | /* As long as this function is called on erase block boundaries | 49 | /* As long as this function is called on erase block boundaries |
50 | it will work correctly for 256 byte nand */ | 50 | it will work correctly for 256 byte nand */ |
51 | ops.mode = MTD_OOB_PLACE; | 51 | ops.mode = MTD_OPS_PLACE_OOB; |
52 | ops.ooboffs = 0; | 52 | ops.ooboffs = 0; |
53 | ops.ooblen = mtd->oobsize; | 53 | ops.ooblen = mtd->oobsize; |
54 | ops.oobbuf = (void *)&oob; | 54 | ops.oobbuf = (void *)&oob; |
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c index ca2d0555729e..0fb24f9c2327 100644 --- a/drivers/mtd/nand/socrates_nand.c +++ b/drivers/mtd/nand/socrates_nand.c | |||
@@ -155,8 +155,6 @@ static int socrates_nand_device_ready(struct mtd_info *mtd) | |||
155 | return 1; | 155 | return 1; |
156 | } | 156 | } |
157 | 157 | ||
158 | static const char *part_probes[] = { "cmdlinepart", NULL }; | ||
159 | |||
160 | /* | 158 | /* |
161 | * Probe for the NAND device. | 159 | * Probe for the NAND device. |
162 | */ | 160 | */ |
@@ -166,8 +164,7 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev) | |||
166 | struct mtd_info *mtd; | 164 | struct mtd_info *mtd; |
167 | struct nand_chip *nand_chip; | 165 | struct nand_chip *nand_chip; |
168 | int res; | 166 | int res; |
169 | struct mtd_partition *partitions = NULL; | 167 | struct mtd_part_parser_data ppdata; |
170 | int num_partitions = 0; | ||
171 | 168 | ||
172 | /* Allocate memory for the device structure (and zero it) */ | 169 | /* Allocate memory for the device structure (and zero it) */ |
173 | host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL); | 170 | host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL); |
@@ -193,6 +190,7 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev) | |||
193 | mtd->name = "socrates_nand"; | 190 | mtd->name = "socrates_nand"; |
194 | mtd->owner = THIS_MODULE; | 191 | mtd->owner = THIS_MODULE; |
195 | mtd->dev.parent = &ofdev->dev; | 192 | mtd->dev.parent = &ofdev->dev; |
193 | ppdata.of_node = ofdev->dev.of_node; | ||
196 | 194 | ||
197 | /*should never be accessed directly */ | 195 | /*should never be accessed directly */ |
198 | nand_chip->IO_ADDR_R = (void *)0xdeadbeef; | 196 | nand_chip->IO_ADDR_R = (void *)0xdeadbeef; |
@@ -225,30 +223,10 @@ static int __devinit socrates_nand_probe(struct platform_device *ofdev) | |||
225 | goto out; | 223 | goto out; |
226 | } | 224 | } |
227 | 225 | ||
228 | #ifdef CONFIG_MTD_CMDLINE_PARTS | 226 | res = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); |
229 | num_partitions = parse_mtd_partitions(mtd, part_probes, | ||
230 | &partitions, 0); | ||
231 | if (num_partitions < 0) { | ||
232 | res = num_partitions; | ||
233 | goto release; | ||
234 | } | ||
235 | #endif | ||
236 | |||
237 | if (num_partitions == 0) { | ||
238 | num_partitions = of_mtd_parse_partitions(&ofdev->dev, | ||
239 | ofdev->dev.of_node, | ||
240 | &partitions); | ||
241 | if (num_partitions < 0) { | ||
242 | res = num_partitions; | ||
243 | goto release; | ||
244 | } | ||
245 | } | ||
246 | |||
247 | res = mtd_device_register(mtd, partitions, num_partitions); | ||
248 | if (!res) | 227 | if (!res) |
249 | return res; | 228 | return res; |
250 | 229 | ||
251 | release: | ||
252 | nand_release(mtd); | 230 | nand_release(mtd); |
253 | 231 | ||
254 | out: | 232 | out: |
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index 11e8371b5683..beebd95f7690 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c | |||
@@ -121,9 +121,6 @@ struct tmio_nand { | |||
121 | 121 | ||
122 | #define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd) | 122 | #define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd) |
123 | 123 | ||
124 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
125 | static const char *part_probes[] = { "cmdlinepart", NULL }; | ||
126 | #endif | ||
127 | 124 | ||
128 | /*--------------------------------------------------------------------------*/ | 125 | /*--------------------------------------------------------------------------*/ |
129 | 126 | ||
@@ -381,8 +378,6 @@ static int tmio_probe(struct platform_device *dev) | |||
381 | struct tmio_nand *tmio; | 378 | struct tmio_nand *tmio; |
382 | struct mtd_info *mtd; | 379 | struct mtd_info *mtd; |
383 | struct nand_chip *nand_chip; | 380 | struct nand_chip *nand_chip; |
384 | struct mtd_partition *parts; | ||
385 | int nbparts = 0; | ||
386 | int retval; | 381 | int retval; |
387 | 382 | ||
388 | if (data == NULL) | 383 | if (data == NULL) |
@@ -461,15 +456,9 @@ static int tmio_probe(struct platform_device *dev) | |||
461 | goto err_scan; | 456 | goto err_scan; |
462 | } | 457 | } |
463 | /* Register the partitions */ | 458 | /* Register the partitions */ |
464 | #ifdef CONFIG_MTD_CMDLINE_PARTS | 459 | retval = mtd_device_parse_register(mtd, NULL, 0, |
465 | nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0); | 460 | data ? data->partition : NULL, |
466 | #endif | 461 | data ? data->num_partitions : 0); |
467 | if (nbparts <= 0 && data) { | ||
468 | parts = data->partition; | ||
469 | nbparts = data->num_partitions; | ||
470 | } | ||
471 | |||
472 | retval = mtd_device_register(mtd, parts, nbparts); | ||
473 | if (!retval) | 462 | if (!retval) |
474 | return retval; | 463 | return retval; |
475 | 464 | ||
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c index bfba4e39a6c5..ace46fdaef58 100644 --- a/drivers/mtd/nand/txx9ndfmc.c +++ b/drivers/mtd/nand/txx9ndfmc.c | |||
@@ -74,7 +74,6 @@ struct txx9ndfmc_drvdata { | |||
74 | unsigned char hold; /* in gbusclock */ | 74 | unsigned char hold; /* in gbusclock */ |
75 | unsigned char spw; /* in gbusclock */ | 75 | unsigned char spw; /* in gbusclock */ |
76 | struct nand_hw_control hw_control; | 76 | struct nand_hw_control hw_control; |
77 | struct mtd_partition *parts[MAX_TXX9NDFMC_DEV]; | ||
78 | }; | 77 | }; |
79 | 78 | ||
80 | static struct platform_device *mtd_to_platdev(struct mtd_info *mtd) | 79 | static struct platform_device *mtd_to_platdev(struct mtd_info *mtd) |
@@ -287,7 +286,6 @@ static int txx9ndfmc_nand_scan(struct mtd_info *mtd) | |||
287 | static int __init txx9ndfmc_probe(struct platform_device *dev) | 286 | static int __init txx9ndfmc_probe(struct platform_device *dev) |
288 | { | 287 | { |
289 | struct txx9ndfmc_platform_data *plat = dev->dev.platform_data; | 288 | struct txx9ndfmc_platform_data *plat = dev->dev.platform_data; |
290 | static const char *probes[] = { "cmdlinepart", NULL }; | ||
291 | int hold, spw; | 289 | int hold, spw; |
292 | int i; | 290 | int i; |
293 | struct txx9ndfmc_drvdata *drvdata; | 291 | struct txx9ndfmc_drvdata *drvdata; |
@@ -333,7 +331,6 @@ static int __init txx9ndfmc_probe(struct platform_device *dev) | |||
333 | struct txx9ndfmc_priv *txx9_priv; | 331 | struct txx9ndfmc_priv *txx9_priv; |
334 | struct nand_chip *chip; | 332 | struct nand_chip *chip; |
335 | struct mtd_info *mtd; | 333 | struct mtd_info *mtd; |
336 | int nr_parts; | ||
337 | 334 | ||
338 | if (!(plat->ch_mask & (1 << i))) | 335 | if (!(plat->ch_mask & (1 << i))) |
339 | continue; | 336 | continue; |
@@ -393,9 +390,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev) | |||
393 | } | 390 | } |
394 | mtd->name = txx9_priv->mtdname; | 391 | mtd->name = txx9_priv->mtdname; |
395 | 392 | ||
396 | nr_parts = parse_mtd_partitions(mtd, probes, | 393 | mtd_device_parse_register(mtd, NULL, 0, NULL, 0); |
397 | &drvdata->parts[i], 0); | ||
398 | mtd_device_register(mtd, drvdata->parts[i], nr_parts); | ||
399 | drvdata->mtds[i] = mtd; | 394 | drvdata->mtds[i] = mtd; |
400 | } | 395 | } |
401 | 396 | ||
@@ -421,7 +416,6 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev) | |||
421 | txx9_priv = chip->priv; | 416 | txx9_priv = chip->priv; |
422 | 417 | ||
423 | nand_release(mtd); | 418 | nand_release(mtd); |
424 | kfree(drvdata->parts[i]); | ||
425 | kfree(txx9_priv->mtdname); | 419 | kfree(txx9_priv->mtdname); |
426 | kfree(txx9_priv); | 420 | kfree(txx9_priv); |
427 | } | 421 | } |
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index b155666acfbe..cda77b562ad4 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c | |||
@@ -63,14 +63,12 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
63 | return; | 63 | return; |
64 | } | 64 | } |
65 | 65 | ||
66 | DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name); | 66 | pr_debug("NFTL: add_mtd for %s\n", mtd->name); |
67 | 67 | ||
68 | nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL); | 68 | nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL); |
69 | 69 | ||
70 | if (!nftl) { | 70 | if (!nftl) |
71 | printk(KERN_WARNING "NFTL: out of memory for data structures\n"); | ||
72 | return; | 71 | return; |
73 | } | ||
74 | 72 | ||
75 | nftl->mbd.mtd = mtd; | 73 | nftl->mbd.mtd = mtd; |
76 | nftl->mbd.devnum = -1; | 74 | nftl->mbd.devnum = -1; |
@@ -132,7 +130,7 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev) | |||
132 | { | 130 | { |
133 | struct NFTLrecord *nftl = (void *)dev; | 131 | struct NFTLrecord *nftl = (void *)dev; |
134 | 132 | ||
135 | DEBUG(MTD_DEBUG_LEVEL1, "NFTL: remove_dev (i=%d)\n", dev->devnum); | 133 | pr_debug("NFTL: remove_dev (i=%d)\n", dev->devnum); |
136 | 134 | ||
137 | del_mtd_blktrans_dev(dev); | 135 | del_mtd_blktrans_dev(dev); |
138 | kfree(nftl->ReplUnitTable); | 136 | kfree(nftl->ReplUnitTable); |
@@ -149,7 +147,7 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, | |||
149 | struct mtd_oob_ops ops; | 147 | struct mtd_oob_ops ops; |
150 | int res; | 148 | int res; |
151 | 149 | ||
152 | ops.mode = MTD_OOB_PLACE; | 150 | ops.mode = MTD_OPS_PLACE_OOB; |
153 | ops.ooboffs = offs & mask; | 151 | ops.ooboffs = offs & mask; |
154 | ops.ooblen = len; | 152 | ops.ooblen = len; |
155 | ops.oobbuf = buf; | 153 | ops.oobbuf = buf; |
@@ -170,7 +168,7 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, | |||
170 | struct mtd_oob_ops ops; | 168 | struct mtd_oob_ops ops; |
171 | int res; | 169 | int res; |
172 | 170 | ||
173 | ops.mode = MTD_OOB_PLACE; | 171 | ops.mode = MTD_OPS_PLACE_OOB; |
174 | ops.ooboffs = offs & mask; | 172 | ops.ooboffs = offs & mask; |
175 | ops.ooblen = len; | 173 | ops.ooblen = len; |
176 | ops.oobbuf = buf; | 174 | ops.oobbuf = buf; |
@@ -193,7 +191,7 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len, | |||
193 | struct mtd_oob_ops ops; | 191 | struct mtd_oob_ops ops; |
194 | int res; | 192 | int res; |
195 | 193 | ||
196 | ops.mode = MTD_OOB_PLACE; | 194 | ops.mode = MTD_OPS_PLACE_OOB; |
197 | ops.ooboffs = offs & mask; | 195 | ops.ooboffs = offs & mask; |
198 | ops.ooblen = mtd->oobsize; | 196 | ops.ooblen = mtd->oobsize; |
199 | ops.oobbuf = oob; | 197 | ops.oobbuf = oob; |
@@ -220,7 +218,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate ) | |||
220 | 218 | ||
221 | /* Normally, we force a fold to happen before we run out of free blocks completely */ | 219 | /* Normally, we force a fold to happen before we run out of free blocks completely */ |
222 | if (!desperate && nftl->numfreeEUNs < 2) { | 220 | if (!desperate && nftl->numfreeEUNs < 2) { |
223 | DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n"); | 221 | pr_debug("NFTL_findfreeblock: there are too few free EUNs\n"); |
224 | return BLOCK_NIL; | 222 | return BLOCK_NIL; |
225 | } | 223 | } |
226 | 224 | ||
@@ -291,8 +289,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p | |||
291 | if (block == 2) { | 289 | if (block == 2) { |
292 | foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1; | 290 | foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1; |
293 | if (foldmark == FOLD_MARK_IN_PROGRESS) { | 291 | if (foldmark == FOLD_MARK_IN_PROGRESS) { |
294 | DEBUG(MTD_DEBUG_LEVEL1, | 292 | pr_debug("Write Inhibited on EUN %d\n", thisEUN); |
295 | "Write Inhibited on EUN %d\n", thisEUN); | ||
296 | inplace = 0; | 293 | inplace = 0; |
297 | } else { | 294 | } else { |
298 | /* There's no other reason not to do inplace, | 295 | /* There's no other reason not to do inplace, |
@@ -357,7 +354,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p | |||
357 | if (BlockLastState[block] != SECTOR_FREE && | 354 | if (BlockLastState[block] != SECTOR_FREE && |
358 | BlockMap[block] != BLOCK_NIL && | 355 | BlockMap[block] != BLOCK_NIL && |
359 | BlockMap[block] != targetEUN) { | 356 | BlockMap[block] != targetEUN) { |
360 | DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, " | 357 | pr_debug("Setting inplace to 0. VUC %d, " |
361 | "block %d was %x lastEUN, " | 358 | "block %d was %x lastEUN, " |
362 | "and is in EUN %d (%s) %d\n", | 359 | "and is in EUN %d (%s) %d\n", |
363 | thisVUC, block, BlockLastState[block], | 360 | thisVUC, block, BlockLastState[block], |
@@ -373,14 +370,14 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p | |||
373 | pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) && | 370 | pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) && |
374 | BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] != | 371 | BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] != |
375 | SECTOR_FREE) { | 372 | SECTOR_FREE) { |
376 | DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. " | 373 | pr_debug("Pending write not free in EUN %d. " |
377 | "Folding out of place.\n", targetEUN); | 374 | "Folding out of place.\n", targetEUN); |
378 | inplace = 0; | 375 | inplace = 0; |
379 | } | 376 | } |
380 | } | 377 | } |
381 | 378 | ||
382 | if (!inplace) { | 379 | if (!inplace) { |
383 | DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. " | 380 | pr_debug("Cannot fold Virtual Unit Chain %d in place. " |
384 | "Trying out-of-place\n", thisVUC); | 381 | "Trying out-of-place\n", thisVUC); |
385 | /* We need to find a targetEUN to fold into. */ | 382 | /* We need to find a targetEUN to fold into. */ |
386 | targetEUN = NFTL_findfreeblock(nftl, 1); | 383 | targetEUN = NFTL_findfreeblock(nftl, 1); |
@@ -410,7 +407,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p | |||
410 | and the Erase Unit into which we are supposed to be copying. | 407 | and the Erase Unit into which we are supposed to be copying. |
411 | Go for it. | 408 | Go for it. |
412 | */ | 409 | */ |
413 | DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN); | 410 | pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN); |
414 | for (block = 0; block < nftl->EraseSize / 512 ; block++) { | 411 | for (block = 0; block < nftl->EraseSize / 512 ; block++) { |
415 | unsigned char movebuf[512]; | 412 | unsigned char movebuf[512]; |
416 | int ret; | 413 | int ret; |
@@ -428,7 +425,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p | |||
428 | 425 | ||
429 | ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), | 426 | ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), |
430 | 512, &retlen, movebuf); | 427 | 512, &retlen, movebuf); |
431 | if (ret < 0 && ret != -EUCLEAN) { | 428 | if (ret < 0 && !mtd_is_bitflip(ret)) { |
432 | ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) | 429 | ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) |
433 | + (block * 512), 512, &retlen, | 430 | + (block * 512), 512, &retlen, |
434 | movebuf); | 431 | movebuf); |
@@ -457,7 +454,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p | |||
457 | has duplicate chains, we need to free one of the chains because it's not necessary any more. | 454 | has duplicate chains, we need to free one of the chains because it's not necessary any more. |
458 | */ | 455 | */ |
459 | thisEUN = nftl->EUNtable[thisVUC]; | 456 | thisEUN = nftl->EUNtable[thisVUC]; |
460 | DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n"); | 457 | pr_debug("Want to erase\n"); |
461 | 458 | ||
462 | /* For each block in the old chain (except the targetEUN of course), | 459 | /* For each block in the old chain (except the targetEUN of course), |
463 | free it and make it available for future use */ | 460 | free it and make it available for future use */ |
@@ -570,7 +567,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block) | |||
570 | (writeEUN * nftl->EraseSize) + blockofs, | 567 | (writeEUN * nftl->EraseSize) + blockofs, |
571 | 8, &retlen, (char *)&bci); | 568 | 8, &retlen, (char *)&bci); |
572 | 569 | ||
573 | DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n", | 570 | pr_debug("Status of block %d in EUN %d is %x\n", |
574 | block , writeEUN, le16_to_cpu(bci.Status)); | 571 | block , writeEUN, le16_to_cpu(bci.Status)); |
575 | 572 | ||
576 | status = bci.Status | bci.Status1; | 573 | status = bci.Status | bci.Status1; |
@@ -623,7 +620,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block) | |||
623 | but they are reserved for when we're | 620 | but they are reserved for when we're |
624 | desperate. Well, now we're desperate. | 621 | desperate. Well, now we're desperate. |
625 | */ | 622 | */ |
626 | DEBUG(MTD_DEBUG_LEVEL1, "Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC); | 623 | pr_debug("Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC); |
627 | writeEUN = NFTL_findfreeblock(nftl, 1); | 624 | writeEUN = NFTL_findfreeblock(nftl, 1); |
628 | } | 625 | } |
629 | if (writeEUN == BLOCK_NIL) { | 626 | if (writeEUN == BLOCK_NIL) { |
@@ -776,7 +773,7 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block, | |||
776 | size_t retlen; | 773 | size_t retlen; |
777 | int res = mtd->read(mtd, ptr, 512, &retlen, buffer); | 774 | int res = mtd->read(mtd, ptr, 512, &retlen, buffer); |
778 | 775 | ||
779 | if (res < 0 && res != -EUCLEAN) | 776 | if (res < 0 && !mtd_is_bitflip(res)) |
780 | return -EIO; | 777 | return -EIO; |
781 | } | 778 | } |
782 | return 0; | 779 | return 0; |
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c index e3cd1ffad2f6..ac4092591aea 100644 --- a/drivers/mtd/nftlmount.c +++ b/drivers/mtd/nftlmount.c | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | /* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the | 33 | /* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the |
34 | * various device information of the NFTL partition and Bad Unit Table. Update | 34 | * various device information of the NFTL partition and Bad Unit Table. Update |
35 | * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[] | 35 | * the ReplUnitTable[] table according to the Bad Unit Table. ReplUnitTable[] |
36 | * is used for management of Erase Unit in other routines in nftl.c and nftlmount.c | 36 | * is used for management of Erase Unit in other routines in nftl.c and nftlmount.c |
37 | */ | 37 | */ |
38 | static int find_boot_record(struct NFTLrecord *nftl) | 38 | static int find_boot_record(struct NFTLrecord *nftl) |
@@ -297,7 +297,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int | |||
297 | * | 297 | * |
298 | * Return: 0 when succeed, -1 on error. | 298 | * Return: 0 when succeed, -1 on error. |
299 | * | 299 | * |
300 | * ToDo: 1. Is it neceressary to check_free_sector after erasing ?? | 300 | * ToDo: 1. Is it necessary to check_free_sector after erasing ?? |
301 | */ | 301 | */ |
302 | int NFTL_formatblock(struct NFTLrecord *nftl, int block) | 302 | int NFTL_formatblock(struct NFTLrecord *nftl, int block) |
303 | { | 303 | { |
@@ -337,7 +337,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block) | |||
337 | nb_erases = le32_to_cpu(uci.WearInfo); | 337 | nb_erases = le32_to_cpu(uci.WearInfo); |
338 | nb_erases++; | 338 | nb_erases++; |
339 | 339 | ||
340 | /* wrap (almost impossible with current flashs) or free block */ | 340 | /* wrap (almost impossible with current flash) or free block */ |
341 | if (nb_erases == 0) | 341 | if (nb_erases == 0) |
342 | nb_erases = 1; | 342 | nb_erases = 1; |
343 | 343 | ||
@@ -363,10 +363,10 @@ fail: | |||
363 | * Mark as 'IGNORE' each incorrect sector. This check is only done if the chain | 363 | * Mark as 'IGNORE' each incorrect sector. This check is only done if the chain |
364 | * was being folded when NFTL was interrupted. | 364 | * was being folded when NFTL was interrupted. |
365 | * | 365 | * |
366 | * The check_free_sectors in this function is neceressary. There is a possible | 366 | * The check_free_sectors in this function is necessary. There is a possible |
367 | * situation that after writing the Data area, the Block Control Information is | 367 | * situation that after writing the Data area, the Block Control Information is |
368 | * not updated according (due to power failure or something) which leaves the block | 368 | * not updated according (due to power failure or something) which leaves the block |
369 | * in an umconsistent state. So we have to check if a block is really FREE in this | 369 | * in an inconsistent state. So we have to check if a block is really FREE in this |
370 | * case. */ | 370 | * case. */ |
371 | static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block) | 371 | static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block) |
372 | { | 372 | { |
@@ -428,7 +428,7 @@ static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block) | |||
428 | 428 | ||
429 | for (;;) { | 429 | for (;;) { |
430 | length++; | 430 | length++; |
431 | /* avoid infinite loops, although this is guaranted not to | 431 | /* avoid infinite loops, although this is guaranteed not to |
432 | happen because of the previous checks */ | 432 | happen because of the previous checks */ |
433 | if (length >= nftl->nb_blocks) { | 433 | if (length >= nftl->nb_blocks) { |
434 | printk("nftl: length too long %d !\n", length); | 434 | printk("nftl: length too long %d !\n", length); |
@@ -447,11 +447,11 @@ static int calc_chain_length(struct NFTLrecord *nftl, unsigned int first_block) | |||
447 | /* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a | 447 | /* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a |
448 | * Virtual Unit Chain, i.e. all the units are disconnected. | 448 | * Virtual Unit Chain, i.e. all the units are disconnected. |
449 | * | 449 | * |
450 | * It is not stricly correct to begin from the first block of the chain because | 450 | * It is not strictly correct to begin from the first block of the chain because |
451 | * if we stop the code, we may see again a valid chain if there was a first_block | 451 | * if we stop the code, we may see again a valid chain if there was a first_block |
452 | * flag in a block inside it. But is it really a problem ? | 452 | * flag in a block inside it. But is it really a problem ? |
453 | * | 453 | * |
454 | * FixMe: Figure out what the last statesment means. What if power failure when we are | 454 | * FixMe: Figure out what the last statement means. What if power failure when we are |
455 | * in the for (;;) loop formatting blocks ?? | 455 | * in the for (;;) loop formatting blocks ?? |
456 | */ | 456 | */ |
457 | static void format_chain(struct NFTLrecord *nftl, unsigned int first_block) | 457 | static void format_chain(struct NFTLrecord *nftl, unsigned int first_block) |
@@ -485,7 +485,7 @@ static void format_chain(struct NFTLrecord *nftl, unsigned int first_block) | |||
485 | * totally free (only 0xff). | 485 | * totally free (only 0xff). |
486 | * | 486 | * |
487 | * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the | 487 | * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the |
488 | * following critia: | 488 | * following criteria: |
489 | * 1. */ | 489 | * 1. */ |
490 | static int check_and_mark_free_block(struct NFTLrecord *nftl, int block) | 490 | static int check_and_mark_free_block(struct NFTLrecord *nftl, int block) |
491 | { | 491 | { |
@@ -502,7 +502,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block) | |||
502 | erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1)); | 502 | erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1)); |
503 | if (erase_mark != ERASE_MARK) { | 503 | if (erase_mark != ERASE_MARK) { |
504 | /* if no erase mark, the block must be totally free. This is | 504 | /* if no erase mark, the block must be totally free. This is |
505 | possible in two cases : empty filsystem or interrupted erase (very unlikely) */ | 505 | possible in two cases : empty filesystem or interrupted erase (very unlikely) */ |
506 | if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0) | 506 | if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0) |
507 | return -1; | 507 | return -1; |
508 | 508 | ||
@@ -544,7 +544,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block) | |||
544 | /* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS | 544 | /* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS |
545 | * to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2 | 545 | * to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2 |
546 | * is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted | 546 | * is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted |
547 | * for some reason. A clean up/check of the VUC is neceressary in this case. | 547 | * for some reason. A clean up/check of the VUC is necessary in this case. |
548 | * | 548 | * |
549 | * WARNING: return 0 if read error | 549 | * WARNING: return 0 if read error |
550 | */ | 550 | */ |
@@ -657,7 +657,7 @@ int NFTL_mount(struct NFTLrecord *s) | |||
657 | printk("Block %d: incorrect logical block: %d expected: %d\n", | 657 | printk("Block %d: incorrect logical block: %d expected: %d\n", |
658 | block, logical_block, first_logical_block); | 658 | block, logical_block, first_logical_block); |
659 | /* the chain is incorrect : we must format it, | 659 | /* the chain is incorrect : we must format it, |
660 | but we need to read it completly */ | 660 | but we need to read it completely */ |
661 | do_format_chain = 1; | 661 | do_format_chain = 1; |
662 | } | 662 | } |
663 | if (is_first_block) { | 663 | if (is_first_block) { |
@@ -669,7 +669,7 @@ int NFTL_mount(struct NFTLrecord *s) | |||
669 | printk("Block %d: incorrectly marked as first block in chain\n", | 669 | printk("Block %d: incorrectly marked as first block in chain\n", |
670 | block); | 670 | block); |
671 | /* the chain is incorrect : we must format it, | 671 | /* the chain is incorrect : we must format it, |
672 | but we need to read it completly */ | 672 | but we need to read it completely */ |
673 | do_format_chain = 1; | 673 | do_format_chain = 1; |
674 | } else { | 674 | } else { |
675 | printk("Block %d: folding in progress - ignoring first block flag\n", | 675 | printk("Block %d: folding in progress - ignoring first block flag\n", |
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index a996718fa6b0..64be8f0848b0 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c | |||
@@ -20,14 +20,23 @@ | |||
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/mtd/partitions.h> | 21 | #include <linux/mtd/partitions.h> |
22 | 22 | ||
23 | int __devinit of_mtd_parse_partitions(struct device *dev, | 23 | static int parse_ofpart_partitions(struct mtd_info *master, |
24 | struct device_node *node, | 24 | struct mtd_partition **pparts, |
25 | struct mtd_partition **pparts) | 25 | struct mtd_part_parser_data *data) |
26 | { | 26 | { |
27 | struct device_node *node; | ||
27 | const char *partname; | 28 | const char *partname; |
28 | struct device_node *pp; | 29 | struct device_node *pp; |
29 | int nr_parts, i; | 30 | int nr_parts, i; |
30 | 31 | ||
32 | |||
33 | if (!data) | ||
34 | return 0; | ||
35 | |||
36 | node = data->of_node; | ||
37 | if (!node) | ||
38 | return 0; | ||
39 | |||
31 | /* First count the subnodes */ | 40 | /* First count the subnodes */ |
32 | pp = NULL; | 41 | pp = NULL; |
33 | nr_parts = 0; | 42 | nr_parts = 0; |
@@ -69,7 +78,7 @@ int __devinit of_mtd_parse_partitions(struct device *dev, | |||
69 | 78 | ||
70 | if (!i) { | 79 | if (!i) { |
71 | of_node_put(pp); | 80 | of_node_put(pp); |
72 | dev_err(dev, "No valid partition found on %s\n", node->full_name); | 81 | pr_err("No valid partition found on %s\n", node->full_name); |
73 | kfree(*pparts); | 82 | kfree(*pparts); |
74 | *pparts = NULL; | 83 | *pparts = NULL; |
75 | return -EINVAL; | 84 | return -EINVAL; |
@@ -77,6 +86,99 @@ int __devinit of_mtd_parse_partitions(struct device *dev, | |||
77 | 86 | ||
78 | return nr_parts; | 87 | return nr_parts; |
79 | } | 88 | } |
80 | EXPORT_SYMBOL(of_mtd_parse_partitions); | 89 | |
90 | static struct mtd_part_parser ofpart_parser = { | ||
91 | .owner = THIS_MODULE, | ||
92 | .parse_fn = parse_ofpart_partitions, | ||
93 | .name = "ofpart", | ||
94 | }; | ||
95 | |||
96 | static int parse_ofoldpart_partitions(struct mtd_info *master, | ||
97 | struct mtd_partition **pparts, | ||
98 | struct mtd_part_parser_data *data) | ||
99 | { | ||
100 | struct device_node *dp; | ||
101 | int i, plen, nr_parts; | ||
102 | const struct { | ||
103 | __be32 offset, len; | ||
104 | } *part; | ||
105 | const char *names; | ||
106 | |||
107 | if (!data) | ||
108 | return 0; | ||
109 | |||
110 | dp = data->of_node; | ||
111 | if (!dp) | ||
112 | return 0; | ||
113 | |||
114 | part = of_get_property(dp, "partitions", &plen); | ||
115 | if (!part) | ||
116 | return 0; /* No partitions found */ | ||
117 | |||
118 | pr_warning("Device tree uses obsolete partition map binding: %s\n", | ||
119 | dp->full_name); | ||
120 | |||
121 | nr_parts = plen / sizeof(part[0]); | ||
122 | |||
123 | *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL); | ||
124 | if (!pparts) | ||
125 | return -ENOMEM; | ||
126 | |||
127 | names = of_get_property(dp, "partition-names", &plen); | ||
128 | |||
129 | for (i = 0; i < nr_parts; i++) { | ||
130 | (*pparts)[i].offset = be32_to_cpu(part->offset); | ||
131 | (*pparts)[i].size = be32_to_cpu(part->len) & ~1; | ||
132 | /* bit 0 set signifies read only partition */ | ||
133 | if (be32_to_cpu(part->len) & 1) | ||
134 | (*pparts)[i].mask_flags = MTD_WRITEABLE; | ||
135 | |||
136 | if (names && (plen > 0)) { | ||
137 | int len = strlen(names) + 1; | ||
138 | |||
139 | (*pparts)[i].name = (char *)names; | ||
140 | plen -= len; | ||
141 | names += len; | ||
142 | } else { | ||
143 | (*pparts)[i].name = "unnamed"; | ||
144 | } | ||
145 | |||
146 | part++; | ||
147 | } | ||
148 | |||
149 | return nr_parts; | ||
150 | } | ||
151 | |||
152 | static struct mtd_part_parser ofoldpart_parser = { | ||
153 | .owner = THIS_MODULE, | ||
154 | .parse_fn = parse_ofoldpart_partitions, | ||
155 | .name = "ofoldpart", | ||
156 | }; | ||
157 | |||
158 | static int __init ofpart_parser_init(void) | ||
159 | { | ||
160 | int rc; | ||
161 | rc = register_mtd_parser(&ofpart_parser); | ||
162 | if (rc) | ||
163 | goto out; | ||
164 | |||
165 | rc = register_mtd_parser(&ofoldpart_parser); | ||
166 | if (!rc) | ||
167 | return 0; | ||
168 | |||
169 | deregister_mtd_parser(&ofoldpart_parser); | ||
170 | out: | ||
171 | return rc; | ||
172 | } | ||
173 | |||
174 | module_init(ofpart_parser_init); | ||
81 | 175 | ||
82 | MODULE_LICENSE("GPL"); | 176 | MODULE_LICENSE("GPL"); |
177 | MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree"); | ||
178 | MODULE_AUTHOR("Vitaly Wool, David Gibson"); | ||
179 | /* | ||
180 | * When MTD core cannot find the requested parser, it tries to load the module | ||
181 | * with the same name. Since we provide the ofoldpart parser, we should have | ||
182 | * the corresponding alias. | ||
183 | */ | ||
184 | MODULE_ALIAS("ofoldpart"); | ||
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c index 2d70d354d846..7813095264a5 100644 --- a/drivers/mtd/onenand/generic.c +++ b/drivers/mtd/onenand/generic.c | |||
@@ -30,11 +30,8 @@ | |||
30 | */ | 30 | */ |
31 | #define DRIVER_NAME "onenand-flash" | 31 | #define DRIVER_NAME "onenand-flash" |
32 | 32 | ||
33 | static const char *part_probes[] = { "cmdlinepart", NULL, }; | ||
34 | |||
35 | struct onenand_info { | 33 | struct onenand_info { |
36 | struct mtd_info mtd; | 34 | struct mtd_info mtd; |
37 | struct mtd_partition *parts; | ||
38 | struct onenand_chip onenand; | 35 | struct onenand_chip onenand; |
39 | }; | 36 | }; |
40 | 37 | ||
@@ -73,13 +70,9 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev) | |||
73 | goto out_iounmap; | 70 | goto out_iounmap; |
74 | } | 71 | } |
75 | 72 | ||
76 | err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); | 73 | err = mtd_device_parse_register(&info->mtd, NULL, 0, |
77 | if (err > 0) | 74 | pdata ? pdata->parts : NULL, |
78 | mtd_device_register(&info->mtd, info->parts, err); | 75 | pdata ? pdata->nr_parts : 0); |
79 | else if (err <= 0 && pdata && pdata->parts) | ||
80 | mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts); | ||
81 | else | ||
82 | err = mtd_device_register(&info->mtd, NULL, 0); | ||
83 | 76 | ||
84 | platform_set_drvdata(pdev, info); | 77 | platform_set_drvdata(pdev, info); |
85 | 78 | ||
@@ -104,7 +97,6 @@ static int __devexit generic_onenand_remove(struct platform_device *pdev) | |||
104 | platform_set_drvdata(pdev, NULL); | 97 | platform_set_drvdata(pdev, NULL); |
105 | 98 | ||
106 | if (info) { | 99 | if (info) { |
107 | mtd_device_unregister(&info->mtd); | ||
108 | onenand_release(&info->mtd); | 100 | onenand_release(&info->mtd); |
109 | release_mem_region(res->start, size); | 101 | release_mem_region(res->start, size); |
110 | iounmap(info->onenand.base); | 102 | iounmap(info->onenand.base); |
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 6a1d6d9a2df9..7e9ea6852b67 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c | |||
@@ -57,7 +57,6 @@ struct omap2_onenand { | |||
57 | unsigned long phys_base; | 57 | unsigned long phys_base; |
58 | int gpio_irq; | 58 | int gpio_irq; |
59 | struct mtd_info mtd; | 59 | struct mtd_info mtd; |
60 | struct mtd_partition *parts; | ||
61 | struct onenand_chip onenand; | 60 | struct onenand_chip onenand; |
62 | struct completion irq_done; | 61 | struct completion irq_done; |
63 | struct completion dma_done; | 62 | struct completion dma_done; |
@@ -67,8 +66,6 @@ struct omap2_onenand { | |||
67 | struct regulator *regulator; | 66 | struct regulator *regulator; |
68 | }; | 67 | }; |
69 | 68 | ||
70 | static const char *part_probes[] = { "cmdlinepart", NULL, }; | ||
71 | |||
72 | static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) | 69 | static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data) |
73 | { | 70 | { |
74 | struct omap2_onenand *c = data; | 71 | struct omap2_onenand *c = data; |
@@ -741,6 +738,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
741 | c->regulator = regulator_get(&pdev->dev, "vonenand"); | 738 | c->regulator = regulator_get(&pdev->dev, "vonenand"); |
742 | if (IS_ERR(c->regulator)) { | 739 | if (IS_ERR(c->regulator)) { |
743 | dev_err(&pdev->dev, "Failed to get regulator\n"); | 740 | dev_err(&pdev->dev, "Failed to get regulator\n"); |
741 | r = PTR_ERR(c->regulator); | ||
744 | goto err_release_dma; | 742 | goto err_release_dma; |
745 | } | 743 | } |
746 | c->onenand.enable = omap2_onenand_enable; | 744 | c->onenand.enable = omap2_onenand_enable; |
@@ -753,13 +751,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
753 | if ((r = onenand_scan(&c->mtd, 1)) < 0) | 751 | if ((r = onenand_scan(&c->mtd, 1)) < 0) |
754 | goto err_release_regulator; | 752 | goto err_release_regulator; |
755 | 753 | ||
756 | r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); | 754 | r = mtd_device_parse_register(&c->mtd, NULL, 0, |
757 | if (r > 0) | 755 | pdata ? pdata->parts : NULL, |
758 | r = mtd_device_register(&c->mtd, c->parts, r); | 756 | pdata ? pdata->nr_parts : 0); |
759 | else if (pdata->parts != NULL) | ||
760 | r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts); | ||
761 | else | ||
762 | r = mtd_device_register(&c->mtd, NULL, 0); | ||
763 | if (r) | 757 | if (r) |
764 | goto err_release_onenand; | 758 | goto err_release_onenand; |
765 | 759 | ||
@@ -786,7 +780,6 @@ err_release_mem_region: | |||
786 | err_free_cs: | 780 | err_free_cs: |
787 | gpmc_cs_free(c->gpmc_cs); | 781 | gpmc_cs_free(c->gpmc_cs); |
788 | err_kfree: | 782 | err_kfree: |
789 | kfree(c->parts); | ||
790 | kfree(c); | 783 | kfree(c); |
791 | 784 | ||
792 | return r; | 785 | return r; |
@@ -809,7 +802,6 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev) | |||
809 | iounmap(c->onenand.base); | 802 | iounmap(c->onenand.base); |
810 | release_mem_region(c->phys_base, ONENAND_IO_SIZE); | 803 | release_mem_region(c->phys_base, ONENAND_IO_SIZE); |
811 | gpmc_cs_free(c->gpmc_cs); | 804 | gpmc_cs_free(c->gpmc_cs); |
812 | kfree(c->parts); | ||
813 | kfree(c); | 805 | kfree(c); |
814 | 806 | ||
815 | return 0; | 807 | return 0; |
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index ac9e959802a7..a8394730b4b6 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c | |||
@@ -1015,7 +1015,7 @@ static void onenand_release_device(struct mtd_info *mtd) | |||
1015 | } | 1015 | } |
1016 | 1016 | ||
1017 | /** | 1017 | /** |
1018 | * onenand_transfer_auto_oob - [Internal] oob auto-placement transfer | 1018 | * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer |
1019 | * @param mtd MTD device structure | 1019 | * @param mtd MTD device structure |
1020 | * @param buf destination address | 1020 | * @param buf destination address |
1021 | * @param column oob offset to read from | 1021 | * @param column oob offset to read from |
@@ -1079,7 +1079,7 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status) | |||
1079 | return status; | 1079 | return status; |
1080 | 1080 | ||
1081 | /* check if we failed due to uncorrectable error */ | 1081 | /* check if we failed due to uncorrectable error */ |
1082 | if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR) | 1082 | if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR) |
1083 | return status; | 1083 | return status; |
1084 | 1084 | ||
1085 | /* check if address lies in MLC region */ | 1085 | /* check if address lies in MLC region */ |
@@ -1122,10 +1122,10 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1122 | int ret = 0; | 1122 | int ret = 0; |
1123 | int writesize = this->writesize; | 1123 | int writesize = this->writesize; |
1124 | 1124 | ||
1125 | DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", | 1125 | pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, |
1126 | __func__, (unsigned int) from, (int) len); | 1126 | (int)len); |
1127 | 1127 | ||
1128 | if (ops->mode == MTD_OOB_AUTO) | 1128 | if (ops->mode == MTD_OPS_AUTO_OOB) |
1129 | oobsize = this->ecclayout->oobavail; | 1129 | oobsize = this->ecclayout->oobavail; |
1130 | else | 1130 | else |
1131 | oobsize = mtd->oobsize; | 1131 | oobsize = mtd->oobsize; |
@@ -1159,7 +1159,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1159 | if (unlikely(ret)) | 1159 | if (unlikely(ret)) |
1160 | ret = onenand_recover_lsb(mtd, from, ret); | 1160 | ret = onenand_recover_lsb(mtd, from, ret); |
1161 | onenand_update_bufferram(mtd, from, !ret); | 1161 | onenand_update_bufferram(mtd, from, !ret); |
1162 | if (ret == -EBADMSG) | 1162 | if (mtd_is_eccerr(ret)) |
1163 | ret = 0; | 1163 | ret = 0; |
1164 | if (ret) | 1164 | if (ret) |
1165 | break; | 1165 | break; |
@@ -1170,7 +1170,7 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1170 | thisooblen = oobsize - oobcolumn; | 1170 | thisooblen = oobsize - oobcolumn; |
1171 | thisooblen = min_t(int, thisooblen, ooblen - oobread); | 1171 | thisooblen = min_t(int, thisooblen, ooblen - oobread); |
1172 | 1172 | ||
1173 | if (ops->mode == MTD_OOB_AUTO) | 1173 | if (ops->mode == MTD_OPS_AUTO_OOB) |
1174 | onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); | 1174 | onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); |
1175 | else | 1175 | else |
1176 | this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); | 1176 | this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); |
@@ -1226,10 +1226,10 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1226 | int ret = 0, boundary = 0; | 1226 | int ret = 0, boundary = 0; |
1227 | int writesize = this->writesize; | 1227 | int writesize = this->writesize; |
1228 | 1228 | ||
1229 | DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", | 1229 | pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, |
1230 | __func__, (unsigned int) from, (int) len); | 1230 | (int)len); |
1231 | 1231 | ||
1232 | if (ops->mode == MTD_OOB_AUTO) | 1232 | if (ops->mode == MTD_OPS_AUTO_OOB) |
1233 | oobsize = this->ecclayout->oobavail; | 1233 | oobsize = this->ecclayout->oobavail; |
1234 | else | 1234 | else |
1235 | oobsize = mtd->oobsize; | 1235 | oobsize = mtd->oobsize; |
@@ -1255,7 +1255,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1255 | this->command(mtd, ONENAND_CMD_READ, from, writesize); | 1255 | this->command(mtd, ONENAND_CMD_READ, from, writesize); |
1256 | ret = this->wait(mtd, FL_READING); | 1256 | ret = this->wait(mtd, FL_READING); |
1257 | onenand_update_bufferram(mtd, from, !ret); | 1257 | onenand_update_bufferram(mtd, from, !ret); |
1258 | if (ret == -EBADMSG) | 1258 | if (mtd_is_eccerr(ret)) |
1259 | ret = 0; | 1259 | ret = 0; |
1260 | } | 1260 | } |
1261 | } | 1261 | } |
@@ -1291,7 +1291,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1291 | thisooblen = oobsize - oobcolumn; | 1291 | thisooblen = oobsize - oobcolumn; |
1292 | thisooblen = min_t(int, thisooblen, ooblen - oobread); | 1292 | thisooblen = min_t(int, thisooblen, ooblen - oobread); |
1293 | 1293 | ||
1294 | if (ops->mode == MTD_OOB_AUTO) | 1294 | if (ops->mode == MTD_OPS_AUTO_OOB) |
1295 | onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); | 1295 | onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); |
1296 | else | 1296 | else |
1297 | this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); | 1297 | this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); |
@@ -1315,7 +1315,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1315 | /* Now wait for load */ | 1315 | /* Now wait for load */ |
1316 | ret = this->wait(mtd, FL_READING); | 1316 | ret = this->wait(mtd, FL_READING); |
1317 | onenand_update_bufferram(mtd, from, !ret); | 1317 | onenand_update_bufferram(mtd, from, !ret); |
1318 | if (ret == -EBADMSG) | 1318 | if (mtd_is_eccerr(ret)) |
1319 | ret = 0; | 1319 | ret = 0; |
1320 | } | 1320 | } |
1321 | 1321 | ||
@@ -1351,19 +1351,19 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, | |||
1351 | struct mtd_ecc_stats stats; | 1351 | struct mtd_ecc_stats stats; |
1352 | int read = 0, thislen, column, oobsize; | 1352 | int read = 0, thislen, column, oobsize; |
1353 | size_t len = ops->ooblen; | 1353 | size_t len = ops->ooblen; |
1354 | mtd_oob_mode_t mode = ops->mode; | 1354 | unsigned int mode = ops->mode; |
1355 | u_char *buf = ops->oobbuf; | 1355 | u_char *buf = ops->oobbuf; |
1356 | int ret = 0, readcmd; | 1356 | int ret = 0, readcmd; |
1357 | 1357 | ||
1358 | from += ops->ooboffs; | 1358 | from += ops->ooboffs; |
1359 | 1359 | ||
1360 | DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", | 1360 | pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, |
1361 | __func__, (unsigned int) from, (int) len); | 1361 | (int)len); |
1362 | 1362 | ||
1363 | /* Initialize return length value */ | 1363 | /* Initialize return length value */ |
1364 | ops->oobretlen = 0; | 1364 | ops->oobretlen = 0; |
1365 | 1365 | ||
1366 | if (mode == MTD_OOB_AUTO) | 1366 | if (mode == MTD_OPS_AUTO_OOB) |
1367 | oobsize = this->ecclayout->oobavail; | 1367 | oobsize = this->ecclayout->oobavail; |
1368 | else | 1368 | else |
1369 | oobsize = mtd->oobsize; | 1369 | oobsize = mtd->oobsize; |
@@ -1403,13 +1403,13 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, | |||
1403 | if (unlikely(ret)) | 1403 | if (unlikely(ret)) |
1404 | ret = onenand_recover_lsb(mtd, from, ret); | 1404 | ret = onenand_recover_lsb(mtd, from, ret); |
1405 | 1405 | ||
1406 | if (ret && ret != -EBADMSG) { | 1406 | if (ret && !mtd_is_eccerr(ret)) { |
1407 | printk(KERN_ERR "%s: read failed = 0x%x\n", | 1407 | printk(KERN_ERR "%s: read failed = 0x%x\n", |
1408 | __func__, ret); | 1408 | __func__, ret); |
1409 | break; | 1409 | break; |
1410 | } | 1410 | } |
1411 | 1411 | ||
1412 | if (mode == MTD_OOB_AUTO) | 1412 | if (mode == MTD_OPS_AUTO_OOB) |
1413 | onenand_transfer_auto_oob(mtd, buf, column, thislen); | 1413 | onenand_transfer_auto_oob(mtd, buf, column, thislen); |
1414 | else | 1414 | else |
1415 | this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); | 1415 | this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); |
@@ -1487,10 +1487,10 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from, | |||
1487 | int ret; | 1487 | int ret; |
1488 | 1488 | ||
1489 | switch (ops->mode) { | 1489 | switch (ops->mode) { |
1490 | case MTD_OOB_PLACE: | 1490 | case MTD_OPS_PLACE_OOB: |
1491 | case MTD_OOB_AUTO: | 1491 | case MTD_OPS_AUTO_OOB: |
1492 | break; | 1492 | break; |
1493 | case MTD_OOB_RAW: | 1493 | case MTD_OPS_RAW: |
1494 | /* Not implemented yet */ | 1494 | /* Not implemented yet */ |
1495 | default: | 1495 | default: |
1496 | return -EINVAL; | 1496 | return -EINVAL; |
@@ -1576,8 +1576,8 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, | |||
1576 | size_t len = ops->ooblen; | 1576 | size_t len = ops->ooblen; |
1577 | u_char *buf = ops->oobbuf; | 1577 | u_char *buf = ops->oobbuf; |
1578 | 1578 | ||
1579 | DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n", | 1579 | pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from, |
1580 | __func__, (unsigned int) from, len); | 1580 | len); |
1581 | 1581 | ||
1582 | /* Initialize return value */ | 1582 | /* Initialize return value */ |
1583 | ops->oobretlen = 0; | 1583 | ops->oobretlen = 0; |
@@ -1750,8 +1750,8 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
1750 | /* Wait for any existing operation to clear */ | 1750 | /* Wait for any existing operation to clear */ |
1751 | onenand_panic_wait(mtd); | 1751 | onenand_panic_wait(mtd); |
1752 | 1752 | ||
1753 | DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", | 1753 | pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, |
1754 | __func__, (unsigned int) to, (int) len); | 1754 | (int)len); |
1755 | 1755 | ||
1756 | /* Initialize retlen, in case of early exit */ | 1756 | /* Initialize retlen, in case of early exit */ |
1757 | *retlen = 0; | 1757 | *retlen = 0; |
@@ -1821,7 +1821,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
1821 | } | 1821 | } |
1822 | 1822 | ||
1823 | /** | 1823 | /** |
1824 | * onenand_fill_auto_oob - [Internal] oob auto-placement transfer | 1824 | * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer |
1825 | * @param mtd MTD device structure | 1825 | * @param mtd MTD device structure |
1826 | * @param oob_buf oob buffer | 1826 | * @param oob_buf oob buffer |
1827 | * @param buf source address | 1827 | * @param buf source address |
@@ -1883,8 +1883,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1883 | u_char *oobbuf; | 1883 | u_char *oobbuf; |
1884 | int ret = 0, cmd; | 1884 | int ret = 0, cmd; |
1885 | 1885 | ||
1886 | DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", | 1886 | pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, |
1887 | __func__, (unsigned int) to, (int) len); | 1887 | (int)len); |
1888 | 1888 | ||
1889 | /* Initialize retlen, in case of early exit */ | 1889 | /* Initialize retlen, in case of early exit */ |
1890 | ops->retlen = 0; | 1890 | ops->retlen = 0; |
@@ -1908,7 +1908,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1908 | if (!len) | 1908 | if (!len) |
1909 | return 0; | 1909 | return 0; |
1910 | 1910 | ||
1911 | if (ops->mode == MTD_OOB_AUTO) | 1911 | if (ops->mode == MTD_OPS_AUTO_OOB) |
1912 | oobsize = this->ecclayout->oobavail; | 1912 | oobsize = this->ecclayout->oobavail; |
1913 | else | 1913 | else |
1914 | oobsize = mtd->oobsize; | 1914 | oobsize = mtd->oobsize; |
@@ -1945,7 +1945,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1945 | /* We send data to spare ram with oobsize | 1945 | /* We send data to spare ram with oobsize |
1946 | * to prevent byte access */ | 1946 | * to prevent byte access */ |
1947 | memset(oobbuf, 0xff, mtd->oobsize); | 1947 | memset(oobbuf, 0xff, mtd->oobsize); |
1948 | if (ops->mode == MTD_OOB_AUTO) | 1948 | if (ops->mode == MTD_OPS_AUTO_OOB) |
1949 | onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen); | 1949 | onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen); |
1950 | else | 1950 | else |
1951 | memcpy(oobbuf + oobcolumn, oob, thisooblen); | 1951 | memcpy(oobbuf + oobcolumn, oob, thisooblen); |
@@ -2055,7 +2055,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
2055 | 2055 | ||
2056 | 2056 | ||
2057 | /** | 2057 | /** |
2058 | * onenand_write_oob_nolock - [Internal] OneNAND write out-of-band | 2058 | * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band |
2059 | * @param mtd MTD device structure | 2059 | * @param mtd MTD device structure |
2060 | * @param to offset to write to | 2060 | * @param to offset to write to |
2061 | * @param len number of bytes to write | 2061 | * @param len number of bytes to write |
@@ -2074,17 +2074,17 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, | |||
2074 | u_char *oobbuf; | 2074 | u_char *oobbuf; |
2075 | size_t len = ops->ooblen; | 2075 | size_t len = ops->ooblen; |
2076 | const u_char *buf = ops->oobbuf; | 2076 | const u_char *buf = ops->oobbuf; |
2077 | mtd_oob_mode_t mode = ops->mode; | 2077 | unsigned int mode = ops->mode; |
2078 | 2078 | ||
2079 | to += ops->ooboffs; | 2079 | to += ops->ooboffs; |
2080 | 2080 | ||
2081 | DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", | 2081 | pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, |
2082 | __func__, (unsigned int) to, (int) len); | 2082 | (int)len); |
2083 | 2083 | ||
2084 | /* Initialize retlen, in case of early exit */ | 2084 | /* Initialize retlen, in case of early exit */ |
2085 | ops->oobretlen = 0; | 2085 | ops->oobretlen = 0; |
2086 | 2086 | ||
2087 | if (mode == MTD_OOB_AUTO) | 2087 | if (mode == MTD_OPS_AUTO_OOB) |
2088 | oobsize = this->ecclayout->oobavail; | 2088 | oobsize = this->ecclayout->oobavail; |
2089 | else | 2089 | else |
2090 | oobsize = mtd->oobsize; | 2090 | oobsize = mtd->oobsize; |
@@ -2128,7 +2128,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, | |||
2128 | /* We send data to spare ram with oobsize | 2128 | /* We send data to spare ram with oobsize |
2129 | * to prevent byte access */ | 2129 | * to prevent byte access */ |
2130 | memset(oobbuf, 0xff, mtd->oobsize); | 2130 | memset(oobbuf, 0xff, mtd->oobsize); |
2131 | if (mode == MTD_OOB_AUTO) | 2131 | if (mode == MTD_OPS_AUTO_OOB) |
2132 | onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen); | 2132 | onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen); |
2133 | else | 2133 | else |
2134 | memcpy(oobbuf + column, buf, thislen); | 2134 | memcpy(oobbuf + column, buf, thislen); |
@@ -2217,10 +2217,10 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to, | |||
2217 | int ret; | 2217 | int ret; |
2218 | 2218 | ||
2219 | switch (ops->mode) { | 2219 | switch (ops->mode) { |
2220 | case MTD_OOB_PLACE: | 2220 | case MTD_OPS_PLACE_OOB: |
2221 | case MTD_OOB_AUTO: | 2221 | case MTD_OPS_AUTO_OOB: |
2222 | break; | 2222 | break; |
2223 | case MTD_OOB_RAW: | 2223 | case MTD_OPS_RAW: |
2224 | /* Not implemented yet */ | 2224 | /* Not implemented yet */ |
2225 | default: | 2225 | default: |
2226 | return -EINVAL; | 2226 | return -EINVAL; |
@@ -2281,7 +2281,7 @@ static int onenand_multiblock_erase_verify(struct mtd_info *mtd, | |||
2281 | } | 2281 | } |
2282 | 2282 | ||
2283 | /** | 2283 | /** |
2284 | * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase | 2284 | * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase |
2285 | * @param mtd MTD device structure | 2285 | * @param mtd MTD device structure |
2286 | * @param instr erase instruction | 2286 | * @param instr erase instruction |
2287 | * @param region erase region | 2287 | * @param region erase region |
@@ -2397,7 +2397,7 @@ static int onenand_multiblock_erase(struct mtd_info *mtd, | |||
2397 | 2397 | ||
2398 | 2398 | ||
2399 | /** | 2399 | /** |
2400 | * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase | 2400 | * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase |
2401 | * @param mtd MTD device structure | 2401 | * @param mtd MTD device structure |
2402 | * @param instr erase instruction | 2402 | * @param instr erase instruction |
2403 | * @param region erase region | 2403 | * @param region erase region |
@@ -2489,8 +2489,9 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2489 | struct mtd_erase_region_info *region = NULL; | 2489 | struct mtd_erase_region_info *region = NULL; |
2490 | loff_t region_offset = 0; | 2490 | loff_t region_offset = 0; |
2491 | 2491 | ||
2492 | DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__, | 2492 | pr_debug("%s: start=0x%012llx, len=%llu\n", __func__, |
2493 | (unsigned long long) instr->addr, (unsigned long long) instr->len); | 2493 | (unsigned long long)instr->addr, |
2494 | (unsigned long long)instr->len); | ||
2494 | 2495 | ||
2495 | /* Do not allow erase past end of device */ | 2496 | /* Do not allow erase past end of device */ |
2496 | if (unlikely((len + addr) > mtd->size)) { | 2497 | if (unlikely((len + addr) > mtd->size)) { |
@@ -2558,7 +2559,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2558 | */ | 2559 | */ |
2559 | static void onenand_sync(struct mtd_info *mtd) | 2560 | static void onenand_sync(struct mtd_info *mtd) |
2560 | { | 2561 | { |
2561 | DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); | 2562 | pr_debug("%s: called\n", __func__); |
2562 | 2563 | ||
2563 | /* Grab the lock and see if the device is available */ | 2564 | /* Grab the lock and see if the device is available */ |
2564 | onenand_get_device(mtd, FL_SYNCING); | 2565 | onenand_get_device(mtd, FL_SYNCING); |
@@ -2602,7 +2603,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
2602 | struct bbm_info *bbm = this->bbm; | 2603 | struct bbm_info *bbm = this->bbm; |
2603 | u_char buf[2] = {0, 0}; | 2604 | u_char buf[2] = {0, 0}; |
2604 | struct mtd_oob_ops ops = { | 2605 | struct mtd_oob_ops ops = { |
2605 | .mode = MTD_OOB_PLACE, | 2606 | .mode = MTD_OPS_PLACE_OOB, |
2606 | .ooblen = 2, | 2607 | .ooblen = 2, |
2607 | .oobbuf = buf, | 2608 | .oobbuf = buf, |
2608 | .ooboffs = 0, | 2609 | .ooboffs = 0, |
@@ -2922,7 +2923,7 @@ static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr, | |||
2922 | } | 2923 | } |
2923 | 2924 | ||
2924 | /** | 2925 | /** |
2925 | * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP | 2926 | * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP |
2926 | * @param mtd MTD device structure | 2927 | * @param mtd MTD device structure |
2927 | * @param to offset to write to | 2928 | * @param to offset to write to |
2928 | * @param len number of bytes to write | 2929 | * @param len number of bytes to write |
@@ -3170,7 +3171,7 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, | |||
3170 | this->command(mtd, ONENAND_CMD_RESET, 0, 0); | 3171 | this->command(mtd, ONENAND_CMD_RESET, 0, 0); |
3171 | this->wait(mtd, FL_RESETING); | 3172 | this->wait(mtd, FL_RESETING); |
3172 | } else { | 3173 | } else { |
3173 | ops.mode = MTD_OOB_PLACE; | 3174 | ops.mode = MTD_OPS_PLACE_OOB; |
3174 | ops.ooblen = len; | 3175 | ops.ooblen = len; |
3175 | ops.oobbuf = buf; | 3176 | ops.oobbuf = buf; |
3176 | ops.ooboffs = 0; | 3177 | ops.ooboffs = 0; |
@@ -3429,6 +3430,19 @@ static void onenand_check_features(struct mtd_info *mtd) | |||
3429 | else if (numbufs == 1) { | 3430 | else if (numbufs == 1) { |
3430 | this->options |= ONENAND_HAS_4KB_PAGE; | 3431 | this->options |= ONENAND_HAS_4KB_PAGE; |
3431 | this->options |= ONENAND_HAS_CACHE_PROGRAM; | 3432 | this->options |= ONENAND_HAS_CACHE_PROGRAM; |
3433 | /* | ||
3434 | * There are two different 4KiB pagesize chips | ||
3435 | * and no way to detect it by H/W config values. | ||
3436 | * | ||
3437 | * To detect the correct NOP for each chips, | ||
3438 | * It should check the version ID as workaround. | ||
3439 | * | ||
3440 | * Now it has as following | ||
3441 | * KFM4G16Q4M has NOP 4 with version ID 0x0131 | ||
3442 | * KFM4G16Q5M has NOP 1 with versoin ID 0x013e | ||
3443 | */ | ||
3444 | if ((this->version_id & 0xf) == 0xe) | ||
3445 | this->options |= ONENAND_HAS_NOP_1; | ||
3432 | } | 3446 | } |
3433 | 3447 | ||
3434 | case ONENAND_DEVICE_DENSITY_2Gb: | 3448 | case ONENAND_DEVICE_DENSITY_2Gb: |
@@ -3663,7 +3677,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int | |||
3663 | int i, ret; | 3677 | int i, ret; |
3664 | int block; | 3678 | int block; |
3665 | struct mtd_oob_ops ops = { | 3679 | struct mtd_oob_ops ops = { |
3666 | .mode = MTD_OOB_PLACE, | 3680 | .mode = MTD_OPS_PLACE_OOB, |
3667 | .ooboffs = 0, | 3681 | .ooboffs = 0, |
3668 | .ooblen = mtd->oobsize, | 3682 | .ooblen = mtd->oobsize, |
3669 | .datbuf = NULL, | 3683 | .datbuf = NULL, |
@@ -4054,6 +4068,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) | |||
4054 | this->ecclayout = &onenand_oob_128; | 4068 | this->ecclayout = &onenand_oob_128; |
4055 | mtd->subpage_sft = 2; | 4069 | mtd->subpage_sft = 2; |
4056 | } | 4070 | } |
4071 | if (ONENAND_IS_NOP_1(this)) | ||
4072 | mtd->subpage_sft = 0; | ||
4057 | break; | 4073 | break; |
4058 | case 64: | 4074 | case 64: |
4059 | this->ecclayout = &onenand_oob_64; | 4075 | this->ecclayout = &onenand_oob_64; |
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c index b2d7fc5ea25d..66fe3b7e7851 100644 --- a/drivers/mtd/onenand/onenand_bbt.c +++ b/drivers/mtd/onenand/onenand_bbt.c | |||
@@ -81,7 +81,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr | |||
81 | startblock = 0; | 81 | startblock = 0; |
82 | from = 0; | 82 | from = 0; |
83 | 83 | ||
84 | ops.mode = MTD_OOB_PLACE; | 84 | ops.mode = MTD_OPS_PLACE_OOB; |
85 | ops.ooblen = readlen; | 85 | ops.ooblen = readlen; |
86 | ops.oobbuf = buf; | 86 | ops.oobbuf = buf; |
87 | ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; | 87 | ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; |
@@ -154,7 +154,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) | |||
154 | block = (int) (onenand_block(this, offs) << 1); | 154 | block = (int) (onenand_block(this, offs) << 1); |
155 | res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03; | 155 | res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03; |
156 | 156 | ||
157 | DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n", | 157 | pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n", |
158 | (unsigned int) offs, block >> 1, res); | 158 | (unsigned int) offs, block >> 1, res); |
159 | 159 | ||
160 | switch ((int) res) { | 160 | switch ((int) res) { |
@@ -189,10 +189,8 @@ int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) | |||
189 | len = this->chipsize >> (this->erase_shift + 2); | 189 | len = this->chipsize >> (this->erase_shift + 2); |
190 | /* Allocate memory (2bit per block) and clear the memory bad block table */ | 190 | /* Allocate memory (2bit per block) and clear the memory bad block table */ |
191 | bbm->bbt = kzalloc(len, GFP_KERNEL); | 191 | bbm->bbt = kzalloc(len, GFP_KERNEL); |
192 | if (!bbm->bbt) { | 192 | if (!bbm->bbt) |
193 | printk(KERN_ERR "onenand_scan_bbt: Out of memory\n"); | ||
194 | return -ENOMEM; | 193 | return -ENOMEM; |
195 | } | ||
196 | 194 | ||
197 | /* Set the bad block position */ | 195 | /* Set the bad block position */ |
198 | bbm->badblockpos = ONENAND_BADBLOCK_POS; | 196 | bbm->badblockpos = ONENAND_BADBLOCK_POS; |
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c index 3306b5b3c736..5474547eafc2 100644 --- a/drivers/mtd/onenand/samsung.c +++ b/drivers/mtd/onenand/samsung.c | |||
@@ -147,7 +147,6 @@ struct s3c_onenand { | |||
147 | struct resource *dma_res; | 147 | struct resource *dma_res; |
148 | unsigned long phys_base; | 148 | unsigned long phys_base; |
149 | struct completion complete; | 149 | struct completion complete; |
150 | struct mtd_partition *parts; | ||
151 | }; | 150 | }; |
152 | 151 | ||
153 | #define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1))) | 152 | #define CMD_MAP_00(dev, addr) (dev->cmd_map(MAP_00, ((addr) << 1))) |
@@ -157,8 +156,6 @@ struct s3c_onenand { | |||
157 | 156 | ||
158 | static struct s3c_onenand *onenand; | 157 | static struct s3c_onenand *onenand; |
159 | 158 | ||
160 | static const char *part_probes[] = { "cmdlinepart", NULL, }; | ||
161 | |||
162 | static inline int s3c_read_reg(int offset) | 159 | static inline int s3c_read_reg(int offset) |
163 | { | 160 | { |
164 | return readl(onenand->base + offset); | 161 | return readl(onenand->base + offset); |
@@ -1017,13 +1014,9 @@ static int s3c_onenand_probe(struct platform_device *pdev) | |||
1017 | if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) | 1014 | if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) |
1018 | dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); | 1015 | dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); |
1019 | 1016 | ||
1020 | err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0); | 1017 | err = mtd_device_parse_register(mtd, NULL, 0, |
1021 | if (err > 0) | 1018 | pdata ? pdata->parts : NULL, |
1022 | mtd_device_register(mtd, onenand->parts, err); | 1019 | pdata ? pdata->nr_parts : 0); |
1023 | else if (err <= 0 && pdata && pdata->parts) | ||
1024 | mtd_device_register(mtd, pdata->parts, pdata->nr_parts); | ||
1025 | else | ||
1026 | err = mtd_device_register(mtd, NULL, 0); | ||
1027 | 1020 | ||
1028 | platform_set_drvdata(pdev, mtd); | 1021 | platform_set_drvdata(pdev, mtd); |
1029 | 1022 | ||
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c index 84b4dda023f4..e366b1d84ead 100644 --- a/drivers/mtd/redboot.c +++ b/drivers/mtd/redboot.c | |||
@@ -57,8 +57,8 @@ static inline int redboot_checksum(struct fis_image_desc *img) | |||
57 | } | 57 | } |
58 | 58 | ||
59 | static int parse_redboot_partitions(struct mtd_info *master, | 59 | static int parse_redboot_partitions(struct mtd_info *master, |
60 | struct mtd_partition **pparts, | 60 | struct mtd_partition **pparts, |
61 | unsigned long fis_origin) | 61 | struct mtd_part_parser_data *data) |
62 | { | 62 | { |
63 | int nrparts = 0; | 63 | int nrparts = 0; |
64 | struct fis_image_desc *buf; | 64 | struct fis_image_desc *buf; |
@@ -198,11 +198,10 @@ static int parse_redboot_partitions(struct mtd_info *master, | |||
198 | goto out; | 198 | goto out; |
199 | } | 199 | } |
200 | new_fl->img = &buf[i]; | 200 | new_fl->img = &buf[i]; |
201 | if (fis_origin) { | 201 | if (data && data->origin) |
202 | buf[i].flash_base -= fis_origin; | 202 | buf[i].flash_base -= data->origin; |
203 | } else { | 203 | else |
204 | buf[i].flash_base &= master->size-1; | 204 | buf[i].flash_base &= master->size-1; |
205 | } | ||
206 | 205 | ||
207 | /* I'm sure the JFFS2 code has done me permanent damage. | 206 | /* I'm sure the JFFS2 code has done me permanent damage. |
208 | * I now think the following is _normal_ | 207 | * I now think the following is _normal_ |
@@ -298,6 +297,9 @@ static struct mtd_part_parser redboot_parser = { | |||
298 | .name = "RedBoot", | 297 | .name = "RedBoot", |
299 | }; | 298 | }; |
300 | 299 | ||
300 | /* mtd parsers will request the module by parser name */ | ||
301 | MODULE_ALIAS("RedBoot"); | ||
302 | |||
301 | static int __init redboot_parser_init(void) | 303 | static int __init redboot_parser_init(void) |
302 | { | 304 | { |
303 | return register_mtd_parser(&redboot_parser); | 305 | return register_mtd_parser(&redboot_parser); |
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index ed3d6cd2c6dc..fddb714e323c 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c | |||
@@ -34,7 +34,7 @@ module_param(debug, int, S_IRUGO | S_IWUSR); | |||
34 | MODULE_PARM_DESC(debug, "Debug level (0-2)"); | 34 | MODULE_PARM_DESC(debug, "Debug level (0-2)"); |
35 | 35 | ||
36 | 36 | ||
37 | /* ------------------- sysfs attributtes ---------------------------------- */ | 37 | /* ------------------- sysfs attributes ---------------------------------- */ |
38 | struct sm_sysfs_attribute { | 38 | struct sm_sysfs_attribute { |
39 | struct device_attribute dev_attr; | 39 | struct device_attribute dev_attr; |
40 | char *data; | 40 | char *data; |
@@ -138,7 +138,7 @@ static int sm_get_lba(uint8_t *lba) | |||
138 | if ((lba[0] & 0xF8) != 0x10) | 138 | if ((lba[0] & 0xF8) != 0x10) |
139 | return -2; | 139 | return -2; |
140 | 140 | ||
141 | /* check parity - endianess doesn't matter */ | 141 | /* check parity - endianness doesn't matter */ |
142 | if (hweight16(*(uint16_t *)lba) & 1) | 142 | if (hweight16(*(uint16_t *)lba) & 1) |
143 | return -2; | 143 | return -2; |
144 | 144 | ||
@@ -147,7 +147,7 @@ static int sm_get_lba(uint8_t *lba) | |||
147 | 147 | ||
148 | 148 | ||
149 | /* | 149 | /* |
150 | * Read LBA asscociated with block | 150 | * Read LBA associated with block |
151 | * returns -1, if block is erased | 151 | * returns -1, if block is erased |
152 | * returns -2 if error happens | 152 | * returns -2 if error happens |
153 | */ | 153 | */ |
@@ -252,11 +252,11 @@ static int sm_read_sector(struct sm_ftl *ftl, | |||
252 | return 0; | 252 | return 0; |
253 | } | 253 | } |
254 | 254 | ||
255 | /* User might not need the oob, but we do for data vertification */ | 255 | /* User might not need the oob, but we do for data verification */ |
256 | if (!oob) | 256 | if (!oob) |
257 | oob = &tmp_oob; | 257 | oob = &tmp_oob; |
258 | 258 | ||
259 | ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; | 259 | ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB; |
260 | ops.ooboffs = 0; | 260 | ops.ooboffs = 0; |
261 | ops.ooblen = SM_OOB_SIZE; | 261 | ops.ooblen = SM_OOB_SIZE; |
262 | ops.oobbuf = (void *)oob; | 262 | ops.oobbuf = (void *)oob; |
@@ -276,12 +276,12 @@ again: | |||
276 | return ret; | 276 | return ret; |
277 | } | 277 | } |
278 | 278 | ||
279 | /* Unfortunelly, oob read will _always_ succeed, | 279 | /* Unfortunately, oob read will _always_ succeed, |
280 | despite card removal..... */ | 280 | despite card removal..... */ |
281 | ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); | 281 | ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); |
282 | 282 | ||
283 | /* Test for unknown errors */ | 283 | /* Test for unknown errors */ |
284 | if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) { | 284 | if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) { |
285 | dbg("read of block %d at zone %d, failed due to error (%d)", | 285 | dbg("read of block %d at zone %d, failed due to error (%d)", |
286 | block, zone, ret); | 286 | block, zone, ret); |
287 | goto again; | 287 | goto again; |
@@ -306,7 +306,7 @@ again: | |||
306 | } | 306 | } |
307 | 307 | ||
308 | /* Test ECC*/ | 308 | /* Test ECC*/ |
309 | if (ret == -EBADMSG || | 309 | if (mtd_is_eccerr(ret) || |
310 | (ftl->smallpagenand && sm_correct_sector(buffer, oob))) { | 310 | (ftl->smallpagenand && sm_correct_sector(buffer, oob))) { |
311 | 311 | ||
312 | dbg("read of block %d at zone %d, failed due to ECC error", | 312 | dbg("read of block %d at zone %d, failed due to ECC error", |
@@ -336,7 +336,7 @@ static int sm_write_sector(struct sm_ftl *ftl, | |||
336 | if (ftl->unstable) | 336 | if (ftl->unstable) |
337 | return -EIO; | 337 | return -EIO; |
338 | 338 | ||
339 | ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; | 339 | ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB; |
340 | ops.len = SM_SECTOR_SIZE; | 340 | ops.len = SM_SECTOR_SIZE; |
341 | ops.datbuf = buffer; | 341 | ops.datbuf = buffer; |
342 | ops.ooboffs = 0; | 342 | ops.ooboffs = 0; |
@@ -447,14 +447,14 @@ static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block) | |||
447 | 447 | ||
448 | /* We aren't checking the return value, because we don't care */ | 448 | /* We aren't checking the return value, because we don't care */ |
449 | /* This also fails on fake xD cards, but I guess these won't expose | 449 | /* This also fails on fake xD cards, but I guess these won't expose |
450 | any bad blocks till fail completly */ | 450 | any bad blocks till fail completely */ |
451 | for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) | 451 | for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) |
452 | sm_write_sector(ftl, zone, block, boffset, NULL, &oob); | 452 | sm_write_sector(ftl, zone, block, boffset, NULL, &oob); |
453 | } | 453 | } |
454 | 454 | ||
455 | /* | 455 | /* |
456 | * Erase a block within a zone | 456 | * Erase a block within a zone |
457 | * If erase succedes, it updates free block fifo, otherwise marks block as bad | 457 | * If erase succeeds, it updates free block fifo, otherwise marks block as bad |
458 | */ | 458 | */ |
459 | static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, | 459 | static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, |
460 | int put_free) | 460 | int put_free) |
@@ -510,7 +510,7 @@ static void sm_erase_callback(struct erase_info *self) | |||
510 | complete(&ftl->erase_completion); | 510 | complete(&ftl->erase_completion); |
511 | } | 511 | } |
512 | 512 | ||
513 | /* Throughtly test that block is valid. */ | 513 | /* Thoroughly test that block is valid. */ |
514 | static int sm_check_block(struct sm_ftl *ftl, int zone, int block) | 514 | static int sm_check_block(struct sm_ftl *ftl, int zone, int block) |
515 | { | 515 | { |
516 | int boffset; | 516 | int boffset; |
@@ -526,7 +526,7 @@ static int sm_check_block(struct sm_ftl *ftl, int zone, int block) | |||
526 | for (boffset = 0; boffset < ftl->block_size; | 526 | for (boffset = 0; boffset < ftl->block_size; |
527 | boffset += SM_SECTOR_SIZE) { | 527 | boffset += SM_SECTOR_SIZE) { |
528 | 528 | ||
529 | /* This shoudn't happen anyway */ | 529 | /* This shouldn't happen anyway */ |
530 | if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob)) | 530 | if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob)) |
531 | return -2; | 531 | return -2; |
532 | 532 | ||
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c index 5cd189793332..976e3d28b962 100644 --- a/drivers/mtd/ssfdc.c +++ b/drivers/mtd/ssfdc.c | |||
@@ -135,8 +135,7 @@ static int get_valid_cis_sector(struct mtd_info *mtd) | |||
135 | /* Found */ | 135 | /* Found */ |
136 | cis_sector = (int)(offset >> SECTOR_SHIFT); | 136 | cis_sector = (int)(offset >> SECTOR_SHIFT); |
137 | } else { | 137 | } else { |
138 | DEBUG(MTD_DEBUG_LEVEL1, | 138 | pr_debug("SSFDC_RO: CIS/IDI sector not found" |
139 | "SSFDC_RO: CIS/IDI sector not found" | ||
140 | " on %s (mtd%d)\n", mtd->name, | 139 | " on %s (mtd%d)\n", mtd->name, |
141 | mtd->index); | 140 | mtd->index); |
142 | } | 141 | } |
@@ -170,7 +169,7 @@ static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf) | |||
170 | struct mtd_oob_ops ops; | 169 | struct mtd_oob_ops ops; |
171 | int ret; | 170 | int ret; |
172 | 171 | ||
173 | ops.mode = MTD_OOB_RAW; | 172 | ops.mode = MTD_OPS_RAW; |
174 | ops.ooboffs = 0; | 173 | ops.ooboffs = 0; |
175 | ops.ooblen = OOB_SIZE; | 174 | ops.ooblen = OOB_SIZE; |
176 | ops.oobbuf = buf; | 175 | ops.oobbuf = buf; |
@@ -221,8 +220,7 @@ static int get_logical_address(uint8_t *oob_buf) | |||
221 | block_address >>= 1; | 220 | block_address >>= 1; |
222 | 221 | ||
223 | if (get_parity(block_address, 10) != parity) { | 222 | if (get_parity(block_address, 10) != parity) { |
224 | DEBUG(MTD_DEBUG_LEVEL0, | 223 | pr_debug("SSFDC_RO: logical address field%d" |
225 | "SSFDC_RO: logical address field%d" | ||
226 | "parity error(0x%04X)\n", j+1, | 224 | "parity error(0x%04X)\n", j+1, |
227 | block_address); | 225 | block_address); |
228 | } else { | 226 | } else { |
@@ -235,7 +233,7 @@ static int get_logical_address(uint8_t *oob_buf) | |||
235 | if (!ok) | 233 | if (!ok) |
236 | block_address = -2; | 234 | block_address = -2; |
237 | 235 | ||
238 | DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n", | 236 | pr_debug("SSFDC_RO: get_logical_address() %d\n", |
239 | block_address); | 237 | block_address); |
240 | 238 | ||
241 | return block_address; | 239 | return block_address; |
@@ -249,7 +247,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc) | |||
249 | int ret, block_address, phys_block; | 247 | int ret, block_address, phys_block; |
250 | struct mtd_info *mtd = ssfdc->mbd.mtd; | 248 | struct mtd_info *mtd = ssfdc->mbd.mtd; |
251 | 249 | ||
252 | DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n", | 250 | pr_debug("SSFDC_RO: build_block_map() nblks=%d (%luK)\n", |
253 | ssfdc->map_len, | 251 | ssfdc->map_len, |
254 | (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024); | 252 | (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024); |
255 | 253 | ||
@@ -262,8 +260,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc) | |||
262 | 260 | ||
263 | ret = read_raw_oob(mtd, offset, oob_buf); | 261 | ret = read_raw_oob(mtd, offset, oob_buf); |
264 | if (ret < 0) { | 262 | if (ret < 0) { |
265 | DEBUG(MTD_DEBUG_LEVEL0, | 263 | pr_debug("SSFDC_RO: mtd read_oob() failed at %lu\n", |
266 | "SSFDC_RO: mtd read_oob() failed at %lu\n", | ||
267 | offset); | 264 | offset); |
268 | return -1; | 265 | return -1; |
269 | } | 266 | } |
@@ -279,8 +276,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc) | |||
279 | ssfdc->logic_block_map[block_address] = | 276 | ssfdc->logic_block_map[block_address] = |
280 | (unsigned short)phys_block; | 277 | (unsigned short)phys_block; |
281 | 278 | ||
282 | DEBUG(MTD_DEBUG_LEVEL2, | 279 | pr_debug("SSFDC_RO: build_block_map() phys_block=%d," |
283 | "SSFDC_RO: build_block_map() phys_block=%d," | ||
284 | "logic_block_addr=%d, zone=%d\n", | 280 | "logic_block_addr=%d, zone=%d\n", |
285 | phys_block, block_address, zone_index); | 281 | phys_block, block_address, zone_index); |
286 | } | 282 | } |
@@ -304,11 +300,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
304 | return; | 300 | return; |
305 | 301 | ||
306 | ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL); | 302 | ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL); |
307 | if (!ssfdc) { | 303 | if (!ssfdc) |
308 | printk(KERN_WARNING | ||
309 | "SSFDC_RO: out of memory for data structures\n"); | ||
310 | return; | 304 | return; |
311 | } | ||
312 | 305 | ||
313 | ssfdc->mbd.mtd = mtd; | 306 | ssfdc->mbd.mtd = mtd; |
314 | ssfdc->mbd.devnum = -1; | 307 | ssfdc->mbd.devnum = -1; |
@@ -319,8 +312,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
319 | ssfdc->erase_size = mtd->erasesize; | 312 | ssfdc->erase_size = mtd->erasesize; |
320 | ssfdc->map_len = (u32)mtd->size / mtd->erasesize; | 313 | ssfdc->map_len = (u32)mtd->size / mtd->erasesize; |
321 | 314 | ||
322 | DEBUG(MTD_DEBUG_LEVEL1, | 315 | pr_debug("SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", |
323 | "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", | ||
324 | ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len, | 316 | ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len, |
325 | DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE)); | 317 | DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE)); |
326 | 318 | ||
@@ -331,7 +323,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
331 | ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) / | 323 | ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) / |
332 | ((long)ssfdc->sectors * (long)ssfdc->heads)); | 324 | ((long)ssfdc->sectors * (long)ssfdc->heads)); |
333 | 325 | ||
334 | DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", | 326 | pr_debug("SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", |
335 | ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, | 327 | ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, |
336 | (long)ssfdc->cylinders * (long)ssfdc->heads * | 328 | (long)ssfdc->cylinders * (long)ssfdc->heads * |
337 | (long)ssfdc->sectors); | 329 | (long)ssfdc->sectors); |
@@ -342,11 +334,8 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
342 | /* Allocate logical block map */ | 334 | /* Allocate logical block map */ |
343 | ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) * | 335 | ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) * |
344 | ssfdc->map_len, GFP_KERNEL); | 336 | ssfdc->map_len, GFP_KERNEL); |
345 | if (!ssfdc->logic_block_map) { | 337 | if (!ssfdc->logic_block_map) |
346 | printk(KERN_WARNING | ||
347 | "SSFDC_RO: out of memory for data structures\n"); | ||
348 | goto out_err; | 338 | goto out_err; |
349 | } | ||
350 | memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) * | 339 | memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) * |
351 | ssfdc->map_len); | 340 | ssfdc->map_len); |
352 | 341 | ||
@@ -371,7 +360,7 @@ static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev) | |||
371 | { | 360 | { |
372 | struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; | 361 | struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; |
373 | 362 | ||
374 | DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: remove_dev (i=%d)\n", dev->devnum); | 363 | pr_debug("SSFDC_RO: remove_dev (i=%d)\n", dev->devnum); |
375 | 364 | ||
376 | del_mtd_blktrans_dev(dev); | 365 | del_mtd_blktrans_dev(dev); |
377 | kfree(ssfdc->logic_block_map); | 366 | kfree(ssfdc->logic_block_map); |
@@ -387,8 +376,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev, | |||
387 | offset = (int)(logic_sect_no % sectors_per_block); | 376 | offset = (int)(logic_sect_no % sectors_per_block); |
388 | block_address = (int)(logic_sect_no / sectors_per_block); | 377 | block_address = (int)(logic_sect_no / sectors_per_block); |
389 | 378 | ||
390 | DEBUG(MTD_DEBUG_LEVEL3, | 379 | pr_debug("SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d," |
391 | "SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d," | ||
392 | " block_addr=%d\n", logic_sect_no, sectors_per_block, offset, | 380 | " block_addr=%d\n", logic_sect_no, sectors_per_block, offset, |
393 | block_address); | 381 | block_address); |
394 | 382 | ||
@@ -397,8 +385,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev, | |||
397 | 385 | ||
398 | block_address = ssfdc->logic_block_map[block_address]; | 386 | block_address = ssfdc->logic_block_map[block_address]; |
399 | 387 | ||
400 | DEBUG(MTD_DEBUG_LEVEL3, | 388 | pr_debug("SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n", |
401 | "SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n", | ||
402 | block_address); | 389 | block_address); |
403 | 390 | ||
404 | if (block_address < 0xffff) { | 391 | if (block_address < 0xffff) { |
@@ -407,8 +394,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev, | |||
407 | sect_no = (unsigned long)block_address * sectors_per_block + | 394 | sect_no = (unsigned long)block_address * sectors_per_block + |
408 | offset; | 395 | offset; |
409 | 396 | ||
410 | DEBUG(MTD_DEBUG_LEVEL3, | 397 | pr_debug("SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n", |
411 | "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n", | ||
412 | sect_no); | 398 | sect_no); |
413 | 399 | ||
414 | if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0) | 400 | if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0) |
@@ -424,7 +410,7 @@ static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) | |||
424 | { | 410 | { |
425 | struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; | 411 | struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; |
426 | 412 | ||
427 | DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n", | 413 | pr_debug("SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n", |
428 | ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); | 414 | ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); |
429 | 415 | ||
430 | geo->heads = ssfdc->heads; | 416 | geo->heads = ssfdc->heads; |
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c index dec92ae6111a..933f7e5f32d3 100644 --- a/drivers/mtd/tests/mtd_oobtest.c +++ b/drivers/mtd/tests/mtd_oobtest.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #define PRINT_PREF KERN_INFO "mtd_oobtest: " | 31 | #define PRINT_PREF KERN_INFO "mtd_oobtest: " |
32 | 32 | ||
33 | static int dev; | 33 | static int dev = -EINVAL; |
34 | module_param(dev, int, S_IRUGO); | 34 | module_param(dev, int, S_IRUGO); |
35 | MODULE_PARM_DESC(dev, "MTD device number to use"); | 35 | MODULE_PARM_DESC(dev, "MTD device number to use"); |
36 | 36 | ||
@@ -131,7 +131,7 @@ static int write_eraseblock(int ebnum) | |||
131 | 131 | ||
132 | for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { | 132 | for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { |
133 | set_random_data(writebuf, use_len); | 133 | set_random_data(writebuf, use_len); |
134 | ops.mode = MTD_OOB_AUTO; | 134 | ops.mode = MTD_OPS_AUTO_OOB; |
135 | ops.len = 0; | 135 | ops.len = 0; |
136 | ops.retlen = 0; | 136 | ops.retlen = 0; |
137 | ops.ooblen = use_len; | 137 | ops.ooblen = use_len; |
@@ -184,7 +184,7 @@ static int verify_eraseblock(int ebnum) | |||
184 | 184 | ||
185 | for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { | 185 | for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) { |
186 | set_random_data(writebuf, use_len); | 186 | set_random_data(writebuf, use_len); |
187 | ops.mode = MTD_OOB_AUTO; | 187 | ops.mode = MTD_OPS_AUTO_OOB; |
188 | ops.len = 0; | 188 | ops.len = 0; |
189 | ops.retlen = 0; | 189 | ops.retlen = 0; |
190 | ops.ooblen = use_len; | 190 | ops.ooblen = use_len; |
@@ -211,7 +211,7 @@ static int verify_eraseblock(int ebnum) | |||
211 | if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) { | 211 | if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) { |
212 | int k; | 212 | int k; |
213 | 213 | ||
214 | ops.mode = MTD_OOB_AUTO; | 214 | ops.mode = MTD_OPS_AUTO_OOB; |
215 | ops.len = 0; | 215 | ops.len = 0; |
216 | ops.retlen = 0; | 216 | ops.retlen = 0; |
217 | ops.ooblen = mtd->ecclayout->oobavail; | 217 | ops.ooblen = mtd->ecclayout->oobavail; |
@@ -276,7 +276,7 @@ static int verify_eraseblock_in_one_go(int ebnum) | |||
276 | size_t len = mtd->ecclayout->oobavail * pgcnt; | 276 | size_t len = mtd->ecclayout->oobavail * pgcnt; |
277 | 277 | ||
278 | set_random_data(writebuf, len); | 278 | set_random_data(writebuf, len); |
279 | ops.mode = MTD_OOB_AUTO; | 279 | ops.mode = MTD_OPS_AUTO_OOB; |
280 | ops.len = 0; | 280 | ops.len = 0; |
281 | ops.retlen = 0; | 281 | ops.retlen = 0; |
282 | ops.ooblen = len; | 282 | ops.ooblen = len; |
@@ -366,6 +366,13 @@ static int __init mtd_oobtest_init(void) | |||
366 | 366 | ||
367 | printk(KERN_INFO "\n"); | 367 | printk(KERN_INFO "\n"); |
368 | printk(KERN_INFO "=================================================\n"); | 368 | printk(KERN_INFO "=================================================\n"); |
369 | |||
370 | if (dev < 0) { | ||
371 | printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); | ||
372 | printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); | ||
373 | return -EINVAL; | ||
374 | } | ||
375 | |||
369 | printk(PRINT_PREF "MTD device: %d\n", dev); | 376 | printk(PRINT_PREF "MTD device: %d\n", dev); |
370 | 377 | ||
371 | mtd = get_mtd_device(NULL, dev); | 378 | mtd = get_mtd_device(NULL, dev); |
@@ -507,7 +514,7 @@ static int __init mtd_oobtest_init(void) | |||
507 | addr0 += mtd->erasesize; | 514 | addr0 += mtd->erasesize; |
508 | 515 | ||
509 | /* Attempt to write off end of OOB */ | 516 | /* Attempt to write off end of OOB */ |
510 | ops.mode = MTD_OOB_AUTO; | 517 | ops.mode = MTD_OPS_AUTO_OOB; |
511 | ops.len = 0; | 518 | ops.len = 0; |
512 | ops.retlen = 0; | 519 | ops.retlen = 0; |
513 | ops.ooblen = 1; | 520 | ops.ooblen = 1; |
@@ -527,7 +534,7 @@ static int __init mtd_oobtest_init(void) | |||
527 | } | 534 | } |
528 | 535 | ||
529 | /* Attempt to read off end of OOB */ | 536 | /* Attempt to read off end of OOB */ |
530 | ops.mode = MTD_OOB_AUTO; | 537 | ops.mode = MTD_OPS_AUTO_OOB; |
531 | ops.len = 0; | 538 | ops.len = 0; |
532 | ops.retlen = 0; | 539 | ops.retlen = 0; |
533 | ops.ooblen = 1; | 540 | ops.ooblen = 1; |
@@ -551,7 +558,7 @@ static int __init mtd_oobtest_init(void) | |||
551 | "block is bad\n"); | 558 | "block is bad\n"); |
552 | else { | 559 | else { |
553 | /* Attempt to write off end of device */ | 560 | /* Attempt to write off end of device */ |
554 | ops.mode = MTD_OOB_AUTO; | 561 | ops.mode = MTD_OPS_AUTO_OOB; |
555 | ops.len = 0; | 562 | ops.len = 0; |
556 | ops.retlen = 0; | 563 | ops.retlen = 0; |
557 | ops.ooblen = mtd->ecclayout->oobavail + 1; | 564 | ops.ooblen = mtd->ecclayout->oobavail + 1; |
@@ -571,7 +578,7 @@ static int __init mtd_oobtest_init(void) | |||
571 | } | 578 | } |
572 | 579 | ||
573 | /* Attempt to read off end of device */ | 580 | /* Attempt to read off end of device */ |
574 | ops.mode = MTD_OOB_AUTO; | 581 | ops.mode = MTD_OPS_AUTO_OOB; |
575 | ops.len = 0; | 582 | ops.len = 0; |
576 | ops.retlen = 0; | 583 | ops.retlen = 0; |
577 | ops.ooblen = mtd->ecclayout->oobavail + 1; | 584 | ops.ooblen = mtd->ecclayout->oobavail + 1; |
@@ -595,7 +602,7 @@ static int __init mtd_oobtest_init(void) | |||
595 | goto out; | 602 | goto out; |
596 | 603 | ||
597 | /* Attempt to write off end of device */ | 604 | /* Attempt to write off end of device */ |
598 | ops.mode = MTD_OOB_AUTO; | 605 | ops.mode = MTD_OPS_AUTO_OOB; |
599 | ops.len = 0; | 606 | ops.len = 0; |
600 | ops.retlen = 0; | 607 | ops.retlen = 0; |
601 | ops.ooblen = mtd->ecclayout->oobavail; | 608 | ops.ooblen = mtd->ecclayout->oobavail; |
@@ -615,7 +622,7 @@ static int __init mtd_oobtest_init(void) | |||
615 | } | 622 | } |
616 | 623 | ||
617 | /* Attempt to read off end of device */ | 624 | /* Attempt to read off end of device */ |
618 | ops.mode = MTD_OOB_AUTO; | 625 | ops.mode = MTD_OPS_AUTO_OOB; |
619 | ops.len = 0; | 626 | ops.len = 0; |
620 | ops.retlen = 0; | 627 | ops.retlen = 0; |
621 | ops.ooblen = mtd->ecclayout->oobavail; | 628 | ops.ooblen = mtd->ecclayout->oobavail; |
@@ -655,7 +662,7 @@ static int __init mtd_oobtest_init(void) | |||
655 | addr = (i + 1) * mtd->erasesize - mtd->writesize; | 662 | addr = (i + 1) * mtd->erasesize - mtd->writesize; |
656 | for (pg = 0; pg < cnt; ++pg) { | 663 | for (pg = 0; pg < cnt; ++pg) { |
657 | set_random_data(writebuf, sz); | 664 | set_random_data(writebuf, sz); |
658 | ops.mode = MTD_OOB_AUTO; | 665 | ops.mode = MTD_OPS_AUTO_OOB; |
659 | ops.len = 0; | 666 | ops.len = 0; |
660 | ops.retlen = 0; | 667 | ops.retlen = 0; |
661 | ops.ooblen = sz; | 668 | ops.ooblen = sz; |
@@ -683,7 +690,7 @@ static int __init mtd_oobtest_init(void) | |||
683 | continue; | 690 | continue; |
684 | set_random_data(writebuf, mtd->ecclayout->oobavail * 2); | 691 | set_random_data(writebuf, mtd->ecclayout->oobavail * 2); |
685 | addr = (i + 1) * mtd->erasesize - mtd->writesize; | 692 | addr = (i + 1) * mtd->erasesize - mtd->writesize; |
686 | ops.mode = MTD_OOB_AUTO; | 693 | ops.mode = MTD_OPS_AUTO_OOB; |
687 | ops.len = 0; | 694 | ops.len = 0; |
688 | ops.retlen = 0; | 695 | ops.retlen = 0; |
689 | ops.ooblen = mtd->ecclayout->oobavail * 2; | 696 | ops.ooblen = mtd->ecclayout->oobavail * 2; |
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c index 00b937e38c1d..afafb6935fd0 100644 --- a/drivers/mtd/tests/mtd_pagetest.c +++ b/drivers/mtd/tests/mtd_pagetest.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #define PRINT_PREF KERN_INFO "mtd_pagetest: " | 31 | #define PRINT_PREF KERN_INFO "mtd_pagetest: " |
32 | 32 | ||
33 | static int dev; | 33 | static int dev = -EINVAL; |
34 | module_param(dev, int, S_IRUGO); | 34 | module_param(dev, int, S_IRUGO); |
35 | MODULE_PARM_DESC(dev, "MTD device number to use"); | 35 | MODULE_PARM_DESC(dev, "MTD device number to use"); |
36 | 36 | ||
@@ -128,7 +128,7 @@ static int verify_eraseblock(int ebnum) | |||
128 | for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { | 128 | for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) { |
129 | /* Do a read to set the internal dataRAMs to different data */ | 129 | /* Do a read to set the internal dataRAMs to different data */ |
130 | err = mtd->read(mtd, addr0, bufsize, &read, twopages); | 130 | err = mtd->read(mtd, addr0, bufsize, &read, twopages); |
131 | if (err == -EUCLEAN) | 131 | if (mtd_is_bitflip(err)) |
132 | err = 0; | 132 | err = 0; |
133 | if (err || read != bufsize) { | 133 | if (err || read != bufsize) { |
134 | printk(PRINT_PREF "error: read failed at %#llx\n", | 134 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -136,7 +136,7 @@ static int verify_eraseblock(int ebnum) | |||
136 | return err; | 136 | return err; |
137 | } | 137 | } |
138 | err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); | 138 | err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); |
139 | if (err == -EUCLEAN) | 139 | if (mtd_is_bitflip(err)) |
140 | err = 0; | 140 | err = 0; |
141 | if (err || read != bufsize) { | 141 | if (err || read != bufsize) { |
142 | printk(PRINT_PREF "error: read failed at %#llx\n", | 142 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -146,7 +146,7 @@ static int verify_eraseblock(int ebnum) | |||
146 | memset(twopages, 0, bufsize); | 146 | memset(twopages, 0, bufsize); |
147 | read = 0; | 147 | read = 0; |
148 | err = mtd->read(mtd, addr, bufsize, &read, twopages); | 148 | err = mtd->read(mtd, addr, bufsize, &read, twopages); |
149 | if (err == -EUCLEAN) | 149 | if (mtd_is_bitflip(err)) |
150 | err = 0; | 150 | err = 0; |
151 | if (err || read != bufsize) { | 151 | if (err || read != bufsize) { |
152 | printk(PRINT_PREF "error: read failed at %#llx\n", | 152 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -164,7 +164,7 @@ static int verify_eraseblock(int ebnum) | |||
164 | unsigned long oldnext = next; | 164 | unsigned long oldnext = next; |
165 | /* Do a read to set the internal dataRAMs to different data */ | 165 | /* Do a read to set the internal dataRAMs to different data */ |
166 | err = mtd->read(mtd, addr0, bufsize, &read, twopages); | 166 | err = mtd->read(mtd, addr0, bufsize, &read, twopages); |
167 | if (err == -EUCLEAN) | 167 | if (mtd_is_bitflip(err)) |
168 | err = 0; | 168 | err = 0; |
169 | if (err || read != bufsize) { | 169 | if (err || read != bufsize) { |
170 | printk(PRINT_PREF "error: read failed at %#llx\n", | 170 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -172,7 +172,7 @@ static int verify_eraseblock(int ebnum) | |||
172 | return err; | 172 | return err; |
173 | } | 173 | } |
174 | err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); | 174 | err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages); |
175 | if (err == -EUCLEAN) | 175 | if (mtd_is_bitflip(err)) |
176 | err = 0; | 176 | err = 0; |
177 | if (err || read != bufsize) { | 177 | if (err || read != bufsize) { |
178 | printk(PRINT_PREF "error: read failed at %#llx\n", | 178 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -182,7 +182,7 @@ static int verify_eraseblock(int ebnum) | |||
182 | memset(twopages, 0, bufsize); | 182 | memset(twopages, 0, bufsize); |
183 | read = 0; | 183 | read = 0; |
184 | err = mtd->read(mtd, addr, bufsize, &read, twopages); | 184 | err = mtd->read(mtd, addr, bufsize, &read, twopages); |
185 | if (err == -EUCLEAN) | 185 | if (mtd_is_bitflip(err)) |
186 | err = 0; | 186 | err = 0; |
187 | if (err || read != bufsize) { | 187 | if (err || read != bufsize) { |
188 | printk(PRINT_PREF "error: read failed at %#llx\n", | 188 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -231,7 +231,7 @@ static int crosstest(void) | |||
231 | read = 0; | 231 | read = 0; |
232 | addr = addrn - pgsize - pgsize; | 232 | addr = addrn - pgsize - pgsize; |
233 | err = mtd->read(mtd, addr, pgsize, &read, pp1); | 233 | err = mtd->read(mtd, addr, pgsize, &read, pp1); |
234 | if (err == -EUCLEAN) | 234 | if (mtd_is_bitflip(err)) |
235 | err = 0; | 235 | err = 0; |
236 | if (err || read != pgsize) { | 236 | if (err || read != pgsize) { |
237 | printk(PRINT_PREF "error: read failed at %#llx\n", | 237 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -244,7 +244,7 @@ static int crosstest(void) | |||
244 | read = 0; | 244 | read = 0; |
245 | addr = addrn - pgsize - pgsize - pgsize; | 245 | addr = addrn - pgsize - pgsize - pgsize; |
246 | err = mtd->read(mtd, addr, pgsize, &read, pp1); | 246 | err = mtd->read(mtd, addr, pgsize, &read, pp1); |
247 | if (err == -EUCLEAN) | 247 | if (mtd_is_bitflip(err)) |
248 | err = 0; | 248 | err = 0; |
249 | if (err || read != pgsize) { | 249 | if (err || read != pgsize) { |
250 | printk(PRINT_PREF "error: read failed at %#llx\n", | 250 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -258,7 +258,7 @@ static int crosstest(void) | |||
258 | addr = addr0; | 258 | addr = addr0; |
259 | printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); | 259 | printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); |
260 | err = mtd->read(mtd, addr, pgsize, &read, pp2); | 260 | err = mtd->read(mtd, addr, pgsize, &read, pp2); |
261 | if (err == -EUCLEAN) | 261 | if (mtd_is_bitflip(err)) |
262 | err = 0; | 262 | err = 0; |
263 | if (err || read != pgsize) { | 263 | if (err || read != pgsize) { |
264 | printk(PRINT_PREF "error: read failed at %#llx\n", | 264 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -272,7 +272,7 @@ static int crosstest(void) | |||
272 | addr = addrn - pgsize; | 272 | addr = addrn - pgsize; |
273 | printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); | 273 | printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); |
274 | err = mtd->read(mtd, addr, pgsize, &read, pp3); | 274 | err = mtd->read(mtd, addr, pgsize, &read, pp3); |
275 | if (err == -EUCLEAN) | 275 | if (mtd_is_bitflip(err)) |
276 | err = 0; | 276 | err = 0; |
277 | if (err || read != pgsize) { | 277 | if (err || read != pgsize) { |
278 | printk(PRINT_PREF "error: read failed at %#llx\n", | 278 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -286,7 +286,7 @@ static int crosstest(void) | |||
286 | addr = addr0; | 286 | addr = addr0; |
287 | printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); | 287 | printk(PRINT_PREF "reading page at %#llx\n", (long long)addr); |
288 | err = mtd->read(mtd, addr, pgsize, &read, pp4); | 288 | err = mtd->read(mtd, addr, pgsize, &read, pp4); |
289 | if (err == -EUCLEAN) | 289 | if (mtd_is_bitflip(err)) |
290 | err = 0; | 290 | err = 0; |
291 | if (err || read != pgsize) { | 291 | if (err || read != pgsize) { |
292 | printk(PRINT_PREF "error: read failed at %#llx\n", | 292 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -345,7 +345,7 @@ static int erasecrosstest(void) | |||
345 | printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); | 345 | printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); |
346 | memset(readbuf, 0, pgsize); | 346 | memset(readbuf, 0, pgsize); |
347 | err = mtd->read(mtd, addr0, pgsize, &read, readbuf); | 347 | err = mtd->read(mtd, addr0, pgsize, &read, readbuf); |
348 | if (err == -EUCLEAN) | 348 | if (mtd_is_bitflip(err)) |
349 | err = 0; | 349 | err = 0; |
350 | if (err || read != pgsize) { | 350 | if (err || read != pgsize) { |
351 | printk(PRINT_PREF "error: read failed at %#llx\n", | 351 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -383,7 +383,7 @@ static int erasecrosstest(void) | |||
383 | printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); | 383 | printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); |
384 | memset(readbuf, 0, pgsize); | 384 | memset(readbuf, 0, pgsize); |
385 | err = mtd->read(mtd, addr0, pgsize, &read, readbuf); | 385 | err = mtd->read(mtd, addr0, pgsize, &read, readbuf); |
386 | if (err == -EUCLEAN) | 386 | if (mtd_is_bitflip(err)) |
387 | err = 0; | 387 | err = 0; |
388 | if (err || read != pgsize) { | 388 | if (err || read != pgsize) { |
389 | printk(PRINT_PREF "error: read failed at %#llx\n", | 389 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -439,7 +439,7 @@ static int erasetest(void) | |||
439 | 439 | ||
440 | printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); | 440 | printk(PRINT_PREF "reading 1st page of block %d\n", ebnum); |
441 | err = mtd->read(mtd, addr0, pgsize, &read, twopages); | 441 | err = mtd->read(mtd, addr0, pgsize, &read, twopages); |
442 | if (err == -EUCLEAN) | 442 | if (mtd_is_bitflip(err)) |
443 | err = 0; | 443 | err = 0; |
444 | if (err || read != pgsize) { | 444 | if (err || read != pgsize) { |
445 | printk(PRINT_PREF "error: read failed at %#llx\n", | 445 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -504,6 +504,13 @@ static int __init mtd_pagetest_init(void) | |||
504 | 504 | ||
505 | printk(KERN_INFO "\n"); | 505 | printk(KERN_INFO "\n"); |
506 | printk(KERN_INFO "=================================================\n"); | 506 | printk(KERN_INFO "=================================================\n"); |
507 | |||
508 | if (dev < 0) { | ||
509 | printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); | ||
510 | printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); | ||
511 | return -EINVAL; | ||
512 | } | ||
513 | |||
507 | printk(PRINT_PREF "MTD device: %d\n", dev); | 514 | printk(PRINT_PREF "MTD device: %d\n", dev); |
508 | 515 | ||
509 | mtd = get_mtd_device(NULL, dev); | 516 | mtd = get_mtd_device(NULL, dev); |
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c index afe71aa15c4b..550fe51225a7 100644 --- a/drivers/mtd/tests/mtd_readtest.c +++ b/drivers/mtd/tests/mtd_readtest.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | #define PRINT_PREF KERN_INFO "mtd_readtest: " | 30 | #define PRINT_PREF KERN_INFO "mtd_readtest: " |
31 | 31 | ||
32 | static int dev; | 32 | static int dev = -EINVAL; |
33 | module_param(dev, int, S_IRUGO); | 33 | module_param(dev, int, S_IRUGO); |
34 | MODULE_PARM_DESC(dev, "MTD device number to use"); | 34 | MODULE_PARM_DESC(dev, "MTD device number to use"); |
35 | 35 | ||
@@ -66,7 +66,7 @@ static int read_eraseblock_by_page(int ebnum) | |||
66 | if (mtd->oobsize) { | 66 | if (mtd->oobsize) { |
67 | struct mtd_oob_ops ops; | 67 | struct mtd_oob_ops ops; |
68 | 68 | ||
69 | ops.mode = MTD_OOB_PLACE; | 69 | ops.mode = MTD_OPS_PLACE_OOB; |
70 | ops.len = 0; | 70 | ops.len = 0; |
71 | ops.retlen = 0; | 71 | ops.retlen = 0; |
72 | ops.ooblen = mtd->oobsize; | 72 | ops.ooblen = mtd->oobsize; |
@@ -75,7 +75,8 @@ static int read_eraseblock_by_page(int ebnum) | |||
75 | ops.datbuf = NULL; | 75 | ops.datbuf = NULL; |
76 | ops.oobbuf = oobbuf; | 76 | ops.oobbuf = oobbuf; |
77 | ret = mtd->read_oob(mtd, addr, &ops); | 77 | ret = mtd->read_oob(mtd, addr, &ops); |
78 | if (ret || ops.oobretlen != mtd->oobsize) { | 78 | if ((ret && !mtd_is_bitflip(ret)) || |
79 | ops.oobretlen != mtd->oobsize) { | ||
79 | printk(PRINT_PREF "error: read oob failed at " | 80 | printk(PRINT_PREF "error: read oob failed at " |
80 | "%#llx\n", (long long)addr); | 81 | "%#llx\n", (long long)addr); |
81 | if (!err) | 82 | if (!err) |
@@ -169,6 +170,12 @@ static int __init mtd_readtest_init(void) | |||
169 | 170 | ||
170 | printk(KERN_INFO "\n"); | 171 | printk(KERN_INFO "\n"); |
171 | printk(KERN_INFO "=================================================\n"); | 172 | printk(KERN_INFO "=================================================\n"); |
173 | |||
174 | if (dev < 0) { | ||
175 | printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); | ||
176 | return -EINVAL; | ||
177 | } | ||
178 | |||
172 | printk(PRINT_PREF "MTD device: %d\n", dev); | 179 | printk(PRINT_PREF "MTD device: %d\n", dev); |
173 | 180 | ||
174 | mtd = get_mtd_device(NULL, dev); | 181 | mtd = get_mtd_device(NULL, dev); |
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c index 627d4e2466a3..493b367bdd35 100644 --- a/drivers/mtd/tests/mtd_speedtest.c +++ b/drivers/mtd/tests/mtd_speedtest.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | #define PRINT_PREF KERN_INFO "mtd_speedtest: " | 30 | #define PRINT_PREF KERN_INFO "mtd_speedtest: " |
31 | 31 | ||
32 | static int dev; | 32 | static int dev = -EINVAL; |
33 | module_param(dev, int, S_IRUGO); | 33 | module_param(dev, int, S_IRUGO); |
34 | MODULE_PARM_DESC(dev, "MTD device number to use"); | 34 | MODULE_PARM_DESC(dev, "MTD device number to use"); |
35 | 35 | ||
@@ -216,7 +216,7 @@ static int read_eraseblock(int ebnum) | |||
216 | 216 | ||
217 | err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf); | 217 | err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf); |
218 | /* Ignore corrected ECC errors */ | 218 | /* Ignore corrected ECC errors */ |
219 | if (err == -EUCLEAN) | 219 | if (mtd_is_bitflip(err)) |
220 | err = 0; | 220 | err = 0; |
221 | if (err || read != mtd->erasesize) { | 221 | if (err || read != mtd->erasesize) { |
222 | printk(PRINT_PREF "error: read failed at %#llx\n", addr); | 222 | printk(PRINT_PREF "error: read failed at %#llx\n", addr); |
@@ -237,7 +237,7 @@ static int read_eraseblock_by_page(int ebnum) | |||
237 | for (i = 0; i < pgcnt; i++) { | 237 | for (i = 0; i < pgcnt; i++) { |
238 | err = mtd->read(mtd, addr, pgsize, &read, buf); | 238 | err = mtd->read(mtd, addr, pgsize, &read, buf); |
239 | /* Ignore corrected ECC errors */ | 239 | /* Ignore corrected ECC errors */ |
240 | if (err == -EUCLEAN) | 240 | if (mtd_is_bitflip(err)) |
241 | err = 0; | 241 | err = 0; |
242 | if (err || read != pgsize) { | 242 | if (err || read != pgsize) { |
243 | printk(PRINT_PREF "error: read failed at %#llx\n", | 243 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -263,7 +263,7 @@ static int read_eraseblock_by_2pages(int ebnum) | |||
263 | for (i = 0; i < n; i++) { | 263 | for (i = 0; i < n; i++) { |
264 | err = mtd->read(mtd, addr, sz, &read, buf); | 264 | err = mtd->read(mtd, addr, sz, &read, buf); |
265 | /* Ignore corrected ECC errors */ | 265 | /* Ignore corrected ECC errors */ |
266 | if (err == -EUCLEAN) | 266 | if (mtd_is_bitflip(err)) |
267 | err = 0; | 267 | err = 0; |
268 | if (err || read != sz) { | 268 | if (err || read != sz) { |
269 | printk(PRINT_PREF "error: read failed at %#llx\n", | 269 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -278,7 +278,7 @@ static int read_eraseblock_by_2pages(int ebnum) | |||
278 | if (pgcnt % 2) { | 278 | if (pgcnt % 2) { |
279 | err = mtd->read(mtd, addr, pgsize, &read, buf); | 279 | err = mtd->read(mtd, addr, pgsize, &read, buf); |
280 | /* Ignore corrected ECC errors */ | 280 | /* Ignore corrected ECC errors */ |
281 | if (err == -EUCLEAN) | 281 | if (mtd_is_bitflip(err)) |
282 | err = 0; | 282 | err = 0; |
283 | if (err || read != pgsize) { | 283 | if (err || read != pgsize) { |
284 | printk(PRINT_PREF "error: read failed at %#llx\n", | 284 | printk(PRINT_PREF "error: read failed at %#llx\n", |
@@ -361,6 +361,13 @@ static int __init mtd_speedtest_init(void) | |||
361 | 361 | ||
362 | printk(KERN_INFO "\n"); | 362 | printk(KERN_INFO "\n"); |
363 | printk(KERN_INFO "=================================================\n"); | 363 | printk(KERN_INFO "=================================================\n"); |
364 | |||
365 | if (dev < 0) { | ||
366 | printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); | ||
367 | printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); | ||
368 | return -EINVAL; | ||
369 | } | ||
370 | |||
364 | if (count) | 371 | if (count) |
365 | printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count); | 372 | printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count); |
366 | else | 373 | else |
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c index 531625fc9259..52ffd9120e0d 100644 --- a/drivers/mtd/tests/mtd_stresstest.c +++ b/drivers/mtd/tests/mtd_stresstest.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #define PRINT_PREF KERN_INFO "mtd_stresstest: " | 31 | #define PRINT_PREF KERN_INFO "mtd_stresstest: " |
32 | 32 | ||
33 | static int dev; | 33 | static int dev = -EINVAL; |
34 | module_param(dev, int, S_IRUGO); | 34 | module_param(dev, int, S_IRUGO); |
35 | MODULE_PARM_DESC(dev, "MTD device number to use"); | 35 | MODULE_PARM_DESC(dev, "MTD device number to use"); |
36 | 36 | ||
@@ -154,7 +154,7 @@ static int do_read(void) | |||
154 | } | 154 | } |
155 | addr = eb * mtd->erasesize + offs; | 155 | addr = eb * mtd->erasesize + offs; |
156 | err = mtd->read(mtd, addr, len, &read, readbuf); | 156 | err = mtd->read(mtd, addr, len, &read, readbuf); |
157 | if (err == -EUCLEAN) | 157 | if (mtd_is_bitflip(err)) |
158 | err = 0; | 158 | err = 0; |
159 | if (unlikely(err || read != len)) { | 159 | if (unlikely(err || read != len)) { |
160 | printk(PRINT_PREF "error: read failed at 0x%llx\n", | 160 | printk(PRINT_PREF "error: read failed at 0x%llx\n", |
@@ -250,6 +250,13 @@ static int __init mtd_stresstest_init(void) | |||
250 | 250 | ||
251 | printk(KERN_INFO "\n"); | 251 | printk(KERN_INFO "\n"); |
252 | printk(KERN_INFO "=================================================\n"); | 252 | printk(KERN_INFO "=================================================\n"); |
253 | |||
254 | if (dev < 0) { | ||
255 | printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); | ||
256 | printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); | ||
257 | return -EINVAL; | ||
258 | } | ||
259 | |||
253 | printk(PRINT_PREF "MTD device: %d\n", dev); | 260 | printk(PRINT_PREF "MTD device: %d\n", dev); |
254 | 261 | ||
255 | mtd = get_mtd_device(NULL, dev); | 262 | mtd = get_mtd_device(NULL, dev); |
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c index 334eae53a3db..1a05bfac4eee 100644 --- a/drivers/mtd/tests/mtd_subpagetest.c +++ b/drivers/mtd/tests/mtd_subpagetest.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | #define PRINT_PREF KERN_INFO "mtd_subpagetest: " | 30 | #define PRINT_PREF KERN_INFO "mtd_subpagetest: " |
31 | 31 | ||
32 | static int dev; | 32 | static int dev = -EINVAL; |
33 | module_param(dev, int, S_IRUGO); | 33 | module_param(dev, int, S_IRUGO); |
34 | MODULE_PARM_DESC(dev, "MTD device number to use"); | 34 | MODULE_PARM_DESC(dev, "MTD device number to use"); |
35 | 35 | ||
@@ -198,7 +198,7 @@ static int verify_eraseblock(int ebnum) | |||
198 | read = 0; | 198 | read = 0; |
199 | err = mtd->read(mtd, addr, subpgsize, &read, readbuf); | 199 | err = mtd->read(mtd, addr, subpgsize, &read, readbuf); |
200 | if (unlikely(err || read != subpgsize)) { | 200 | if (unlikely(err || read != subpgsize)) { |
201 | if (err == -EUCLEAN && read == subpgsize) { | 201 | if (mtd_is_bitflip(err) && read == subpgsize) { |
202 | printk(PRINT_PREF "ECC correction at %#llx\n", | 202 | printk(PRINT_PREF "ECC correction at %#llx\n", |
203 | (long long)addr); | 203 | (long long)addr); |
204 | err = 0; | 204 | err = 0; |
@@ -226,7 +226,7 @@ static int verify_eraseblock(int ebnum) | |||
226 | read = 0; | 226 | read = 0; |
227 | err = mtd->read(mtd, addr, subpgsize, &read, readbuf); | 227 | err = mtd->read(mtd, addr, subpgsize, &read, readbuf); |
228 | if (unlikely(err || read != subpgsize)) { | 228 | if (unlikely(err || read != subpgsize)) { |
229 | if (err == -EUCLEAN && read == subpgsize) { | 229 | if (mtd_is_bitflip(err) && read == subpgsize) { |
230 | printk(PRINT_PREF "ECC correction at %#llx\n", | 230 | printk(PRINT_PREF "ECC correction at %#llx\n", |
231 | (long long)addr); | 231 | (long long)addr); |
232 | err = 0; | 232 | err = 0; |
@@ -264,7 +264,7 @@ static int verify_eraseblock2(int ebnum) | |||
264 | read = 0; | 264 | read = 0; |
265 | err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf); | 265 | err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf); |
266 | if (unlikely(err || read != subpgsize * k)) { | 266 | if (unlikely(err || read != subpgsize * k)) { |
267 | if (err == -EUCLEAN && read == subpgsize * k) { | 267 | if (mtd_is_bitflip(err) && read == subpgsize * k) { |
268 | printk(PRINT_PREF "ECC correction at %#llx\n", | 268 | printk(PRINT_PREF "ECC correction at %#llx\n", |
269 | (long long)addr); | 269 | (long long)addr); |
270 | err = 0; | 270 | err = 0; |
@@ -298,7 +298,7 @@ static int verify_eraseblock_ff(int ebnum) | |||
298 | read = 0; | 298 | read = 0; |
299 | err = mtd->read(mtd, addr, subpgsize, &read, readbuf); | 299 | err = mtd->read(mtd, addr, subpgsize, &read, readbuf); |
300 | if (unlikely(err || read != subpgsize)) { | 300 | if (unlikely(err || read != subpgsize)) { |
301 | if (err == -EUCLEAN && read == subpgsize) { | 301 | if (mtd_is_bitflip(err) && read == subpgsize) { |
302 | printk(PRINT_PREF "ECC correction at %#llx\n", | 302 | printk(PRINT_PREF "ECC correction at %#llx\n", |
303 | (long long)addr); | 303 | (long long)addr); |
304 | err = 0; | 304 | err = 0; |
@@ -379,6 +379,13 @@ static int __init mtd_subpagetest_init(void) | |||
379 | 379 | ||
380 | printk(KERN_INFO "\n"); | 380 | printk(KERN_INFO "\n"); |
381 | printk(KERN_INFO "=================================================\n"); | 381 | printk(KERN_INFO "=================================================\n"); |
382 | |||
383 | if (dev < 0) { | ||
384 | printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); | ||
385 | printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); | ||
386 | return -EINVAL; | ||
387 | } | ||
388 | |||
382 | printk(PRINT_PREF "MTD device: %d\n", dev); | 389 | printk(PRINT_PREF "MTD device: %d\n", dev); |
383 | 390 | ||
384 | mtd = get_mtd_device(NULL, dev); | 391 | mtd = get_mtd_device(NULL, dev); |
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c index 5c6c3d248901..03ab649a6964 100644 --- a/drivers/mtd/tests/mtd_torturetest.c +++ b/drivers/mtd/tests/mtd_torturetest.c | |||
@@ -46,7 +46,7 @@ static int pgcnt; | |||
46 | module_param(pgcnt, int, S_IRUGO); | 46 | module_param(pgcnt, int, S_IRUGO); |
47 | MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)"); | 47 | MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)"); |
48 | 48 | ||
49 | static int dev; | 49 | static int dev = -EINVAL; |
50 | module_param(dev, int, S_IRUGO); | 50 | module_param(dev, int, S_IRUGO); |
51 | MODULE_PARM_DESC(dev, "MTD device number to use"); | 51 | MODULE_PARM_DESC(dev, "MTD device number to use"); |
52 | 52 | ||
@@ -138,7 +138,7 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf) | |||
138 | 138 | ||
139 | retry: | 139 | retry: |
140 | err = mtd->read(mtd, addr, len, &read, check_buf); | 140 | err = mtd->read(mtd, addr, len, &read, check_buf); |
141 | if (err == -EUCLEAN) | 141 | if (mtd_is_bitflip(err)) |
142 | printk(PRINT_PREF "single bit flip occurred at EB %d " | 142 | printk(PRINT_PREF "single bit flip occurred at EB %d " |
143 | "MTD reported that it was fixed.\n", ebnum); | 143 | "MTD reported that it was fixed.\n", ebnum); |
144 | else if (err) { | 144 | else if (err) { |
@@ -213,6 +213,13 @@ static int __init tort_init(void) | |||
213 | printk(KERN_INFO "=================================================\n"); | 213 | printk(KERN_INFO "=================================================\n"); |
214 | printk(PRINT_PREF "Warning: this program is trying to wear out your " | 214 | printk(PRINT_PREF "Warning: this program is trying to wear out your " |
215 | "flash, stop it if this is not wanted.\n"); | 215 | "flash, stop it if this is not wanted.\n"); |
216 | |||
217 | if (dev < 0) { | ||
218 | printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n"); | ||
219 | printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n"); | ||
220 | return -EINVAL; | ||
221 | } | ||
222 | |||
216 | printk(PRINT_PREF "MTD device: %d\n", dev); | 223 | printk(PRINT_PREF "MTD device: %d\n", dev); |
217 | printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n", | 224 | printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n", |
218 | ebcnt, eb, eb + ebcnt - 1, dev); | 225 | ebcnt, eb, eb + ebcnt - 1, dev); |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 4be671815014..fb7f19b62d91 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -443,7 +443,7 @@ retry: | |||
443 | if (err == UBI_IO_BITFLIPS) { | 443 | if (err == UBI_IO_BITFLIPS) { |
444 | scrub = 1; | 444 | scrub = 1; |
445 | err = 0; | 445 | err = 0; |
446 | } else if (err == -EBADMSG) { | 446 | } else if (mtd_is_eccerr(err)) { |
447 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) | 447 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) |
448 | goto out_unlock; | 448 | goto out_unlock; |
449 | scrub = 1; | 449 | scrub = 1; |
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 6ba55c235873..f20b6f22f240 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c | |||
@@ -172,9 +172,9 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, | |||
172 | retry: | 172 | retry: |
173 | err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); | 173 | err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); |
174 | if (err) { | 174 | if (err) { |
175 | const char *errstr = (err == -EBADMSG) ? " (ECC error)" : ""; | 175 | const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : ""; |
176 | 176 | ||
177 | if (err == -EUCLEAN) { | 177 | if (mtd_is_bitflip(err)) { |
178 | /* | 178 | /* |
179 | * -EUCLEAN is reported if there was a bit-flip which | 179 | * -EUCLEAN is reported if there was a bit-flip which |
180 | * was corrected, so this is harmless. | 180 | * was corrected, so this is harmless. |
@@ -205,7 +205,7 @@ retry: | |||
205 | * all the requested data. But some buggy drivers might do | 205 | * all the requested data. But some buggy drivers might do |
206 | * this, so we change it to -EIO. | 206 | * this, so we change it to -EIO. |
207 | */ | 207 | */ |
208 | if (read != len && err == -EBADMSG) { | 208 | if (read != len && mtd_is_eccerr(err)) { |
209 | ubi_assert(0); | 209 | ubi_assert(0); |
210 | err = -EIO; | 210 | err = -EIO; |
211 | } | 211 | } |
@@ -469,7 +469,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum) | |||
469 | 469 | ||
470 | out: | 470 | out: |
471 | mutex_unlock(&ubi->buf_mutex); | 471 | mutex_unlock(&ubi->buf_mutex); |
472 | if (err == UBI_IO_BITFLIPS || err == -EBADMSG) { | 472 | if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { |
473 | /* | 473 | /* |
474 | * If a bit-flip or data integrity error was detected, the test | 474 | * If a bit-flip or data integrity error was detected, the test |
475 | * has not passed because it happened on a freshly erased | 475 | * has not passed because it happened on a freshly erased |
@@ -760,7 +760,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, | |||
760 | 760 | ||
761 | read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); | 761 | read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); |
762 | if (read_err) { | 762 | if (read_err) { |
763 | if (read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG) | 763 | if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) |
764 | return read_err; | 764 | return read_err; |
765 | 765 | ||
766 | /* | 766 | /* |
@@ -776,7 +776,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, | |||
776 | 776 | ||
777 | magic = be32_to_cpu(ec_hdr->magic); | 777 | magic = be32_to_cpu(ec_hdr->magic); |
778 | if (magic != UBI_EC_HDR_MAGIC) { | 778 | if (magic != UBI_EC_HDR_MAGIC) { |
779 | if (read_err == -EBADMSG) | 779 | if (mtd_is_eccerr(read_err)) |
780 | return UBI_IO_BAD_HDR_EBADMSG; | 780 | return UBI_IO_BAD_HDR_EBADMSG; |
781 | 781 | ||
782 | /* | 782 | /* |
@@ -1032,12 +1032,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, | |||
1032 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | 1032 | p = (char *)vid_hdr - ubi->vid_hdr_shift; |
1033 | read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, | 1033 | read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, |
1034 | ubi->vid_hdr_alsize); | 1034 | ubi->vid_hdr_alsize); |
1035 | if (read_err && read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG) | 1035 | if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err)) |
1036 | return read_err; | 1036 | return read_err; |
1037 | 1037 | ||
1038 | magic = be32_to_cpu(vid_hdr->magic); | 1038 | magic = be32_to_cpu(vid_hdr->magic); |
1039 | if (magic != UBI_VID_HDR_MAGIC) { | 1039 | if (magic != UBI_VID_HDR_MAGIC) { |
1040 | if (read_err == -EBADMSG) | 1040 | if (mtd_is_eccerr(read_err)) |
1041 | return UBI_IO_BAD_HDR_EBADMSG; | 1041 | return UBI_IO_BAD_HDR_EBADMSG; |
1042 | 1042 | ||
1043 | if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { | 1043 | if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { |
@@ -1219,7 +1219,7 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) | |||
1219 | return -ENOMEM; | 1219 | return -ENOMEM; |
1220 | 1220 | ||
1221 | err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); | 1221 | err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); |
1222 | if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) | 1222 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) |
1223 | goto exit; | 1223 | goto exit; |
1224 | 1224 | ||
1225 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); | 1225 | crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); |
@@ -1306,7 +1306,7 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) | |||
1306 | p = (char *)vid_hdr - ubi->vid_hdr_shift; | 1306 | p = (char *)vid_hdr - ubi->vid_hdr_shift; |
1307 | err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, | 1307 | err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, |
1308 | ubi->vid_hdr_alsize); | 1308 | ubi->vid_hdr_alsize); |
1309 | if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) | 1309 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) |
1310 | goto exit; | 1310 | goto exit; |
1311 | 1311 | ||
1312 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); | 1312 | crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); |
@@ -1358,7 +1358,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum, | |||
1358 | } | 1358 | } |
1359 | 1359 | ||
1360 | err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1); | 1360 | err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1); |
1361 | if (err && err != -EUCLEAN) | 1361 | if (err && !mtd_is_bitflip(err)) |
1362 | goto out_free; | 1362 | goto out_free; |
1363 | 1363 | ||
1364 | for (i = 0; i < len; i++) { | 1364 | for (i = 0; i < len; i++) { |
@@ -1422,7 +1422,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) | |||
1422 | } | 1422 | } |
1423 | 1423 | ||
1424 | err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); | 1424 | err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); |
1425 | if (err && err != -EUCLEAN) { | 1425 | if (err && !mtd_is_bitflip(err)) { |
1426 | ubi_err("error %d while reading %d bytes from PEB %d:%d, " | 1426 | ubi_err("error %d while reading %d bytes from PEB %d:%d, " |
1427 | "read %zd bytes", err, len, pnum, offset, read); | 1427 | "read %zd bytes", err, len, pnum, offset, read); |
1428 | goto error; | 1428 | goto error; |
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index d39716e5b204..1a35fc5e3b40 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
@@ -410,7 +410,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, | |||
410 | return 0; | 410 | return 0; |
411 | 411 | ||
412 | err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); | 412 | err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); |
413 | if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) { | 413 | if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) { |
414 | ubi_warn("mark volume %d as corrupted", vol_id); | 414 | ubi_warn("mark volume %d as corrupted", vol_id); |
415 | vol->corrupted = 1; | 415 | vol->corrupted = 1; |
416 | } | 416 | } |
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c index ff2a65c37f69..f6a7d7ac4b98 100644 --- a/drivers/mtd/ubi/misc.c +++ b/drivers/mtd/ubi/misc.c | |||
@@ -81,7 +81,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id) | |||
81 | 81 | ||
82 | err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); | 82 | err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); |
83 | if (err) { | 83 | if (err) { |
84 | if (err == -EBADMSG) | 84 | if (mtd_is_eccerr(err)) |
85 | err = 1; | 85 | err = 1; |
86 | break; | 86 | break; |
87 | } | 87 | } |
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index a3a198f9b98d..0cb17d936b5a 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c | |||
@@ -395,7 +395,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, | |||
395 | } | 395 | } |
396 | 396 | ||
397 | err = ubi_io_read_data(ubi, buf, pnum, 0, len); | 397 | err = ubi_io_read_data(ubi, buf, pnum, 0, len); |
398 | if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) | 398 | if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) |
399 | goto out_free_buf; | 399 | goto out_free_buf; |
400 | 400 | ||
401 | data_crc = be32_to_cpu(vid_hdr->data_crc); | 401 | data_crc = be32_to_cpu(vid_hdr->data_crc); |
@@ -793,7 +793,7 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, | |||
793 | 793 | ||
794 | err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start, | 794 | err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start, |
795 | ubi->leb_size); | 795 | ubi->leb_size); |
796 | if (err == UBI_IO_BITFLIPS || err == -EBADMSG) { | 796 | if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { |
797 | /* | 797 | /* |
798 | * Bit-flips or integrity errors while reading the data area. | 798 | * Bit-flips or integrity errors while reading the data area. |
799 | * It is difficult to say for sure what type of corruption is | 799 | * It is difficult to say for sure what type of corruption is |
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 4b50a3029b84..9ad18da1891d 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -423,7 +423,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, | |||
423 | 423 | ||
424 | err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, | 424 | err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, |
425 | ubi->vtbl_size); | 425 | ubi->vtbl_size); |
426 | if (err == UBI_IO_BITFLIPS || err == -EBADMSG) | 426 | if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) |
427 | /* | 427 | /* |
428 | * Scrub the PEB later. Note, -EBADMSG indicates an | 428 | * Scrub the PEB later. Note, -EBADMSG indicates an |
429 | * uncorrectable ECC error, but we have our own CRC and | 429 | * uncorrectable ECC error, but we have our own CRC and |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b2b9109b6712..b0c577256487 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -560,8 +560,8 @@ static int bond_update_speed_duplex(struct slave *slave) | |||
560 | u32 slave_speed; | 560 | u32 slave_speed; |
561 | int res; | 561 | int res; |
562 | 562 | ||
563 | slave->speed = -1; | 563 | slave->speed = SPEED_UNKNOWN; |
564 | slave->duplex = -1; | 564 | slave->duplex = DUPLEX_UNKNOWN; |
565 | 565 | ||
566 | res = __ethtool_get_settings(slave_dev, &ecmd); | 566 | res = __ethtool_get_settings(slave_dev, &ecmd); |
567 | if (res < 0) | 567 | if (res < 0) |
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 2acf0b080169..ad284baafe87 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c | |||
@@ -158,12 +158,12 @@ static void bond_info_show_slave(struct seq_file *seq, | |||
158 | seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); | 158 | seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); |
159 | seq_printf(seq, "MII Status: %s\n", | 159 | seq_printf(seq, "MII Status: %s\n", |
160 | (slave->link == BOND_LINK_UP) ? "up" : "down"); | 160 | (slave->link == BOND_LINK_UP) ? "up" : "down"); |
161 | if (slave->speed == -1) | 161 | if (slave->speed == SPEED_UNKNOWN) |
162 | seq_printf(seq, "Speed: %s\n", "Unknown"); | 162 | seq_printf(seq, "Speed: %s\n", "Unknown"); |
163 | else | 163 | else |
164 | seq_printf(seq, "Speed: %d Mbps\n", slave->speed); | 164 | seq_printf(seq, "Speed: %d Mbps\n", slave->speed); |
165 | 165 | ||
166 | if (slave->duplex == -1) | 166 | if (slave->duplex == DUPLEX_UNKNOWN) |
167 | seq_printf(seq, "Duplex: %s\n", "Unknown"); | 167 | seq_printf(seq, "Duplex: %s\n", "Unknown"); |
168 | else | 168 | else |
169 | seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half"); | 169 | seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half"); |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index be5dde040261..94b7f287d6c5 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -10,7 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ | |||
10 | obj-$(CONFIG_NET_VENDOR_AMD) += amd/ | 10 | obj-$(CONFIG_NET_VENDOR_AMD) += amd/ |
11 | obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ | 11 | obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ |
12 | obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ | 12 | obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ |
13 | obj-$(CONFIG_NET_ATMEL) += cadence/ | 13 | obj-$(CONFIG_NET_CADENCE) += cadence/ |
14 | obj-$(CONFIG_NET_BFIN) += adi/ | 14 | obj-$(CONFIG_NET_BFIN) += adi/ |
15 | obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ | 15 | obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ |
16 | obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ | 16 | obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 161cbbb4814a..bf4074167d6a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) | |||
89 | 89 | ||
90 | #define DRV_MODULE_NAME "tg3" | 90 | #define DRV_MODULE_NAME "tg3" |
91 | #define TG3_MAJ_NUM 3 | 91 | #define TG3_MAJ_NUM 3 |
92 | #define TG3_MIN_NUM 120 | 92 | #define TG3_MIN_NUM 121 |
93 | #define DRV_MODULE_VERSION \ | 93 | #define DRV_MODULE_VERSION \ |
94 | __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) | 94 | __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) |
95 | #define DRV_MODULE_RELDATE "August 18, 2011" | 95 | #define DRV_MODULE_RELDATE "November 2, 2011" |
96 | 96 | ||
97 | #define RESET_KIND_SHUTDOWN 0 | 97 | #define RESET_KIND_SHUTDOWN 0 |
98 | #define RESET_KIND_INIT 1 | 98 | #define RESET_KIND_INIT 1 |
@@ -628,19 +628,23 @@ static void tg3_ape_lock_init(struct tg3 *tp) | |||
628 | regbase = TG3_APE_PER_LOCK_GRANT; | 628 | regbase = TG3_APE_PER_LOCK_GRANT; |
629 | 629 | ||
630 | /* Make sure the driver hasn't any stale locks. */ | 630 | /* Make sure the driver hasn't any stale locks. */ |
631 | for (i = 0; i < 8; i++) { | 631 | for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) { |
632 | if (i == TG3_APE_LOCK_GPIO) | 632 | switch (i) { |
633 | continue; | 633 | case TG3_APE_LOCK_PHY0: |
634 | tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER); | 634 | case TG3_APE_LOCK_PHY1: |
635 | case TG3_APE_LOCK_PHY2: | ||
636 | case TG3_APE_LOCK_PHY3: | ||
637 | bit = APE_LOCK_GRANT_DRIVER; | ||
638 | break; | ||
639 | default: | ||
640 | if (!tp->pci_fn) | ||
641 | bit = APE_LOCK_GRANT_DRIVER; | ||
642 | else | ||
643 | bit = 1 << tp->pci_fn; | ||
644 | } | ||
645 | tg3_ape_write32(tp, regbase + 4 * i, bit); | ||
635 | } | 646 | } |
636 | 647 | ||
637 | /* Clear the correct bit of the GPIO lock too. */ | ||
638 | if (!tp->pci_fn) | ||
639 | bit = APE_LOCK_GRANT_DRIVER; | ||
640 | else | ||
641 | bit = 1 << tp->pci_fn; | ||
642 | |||
643 | tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit); | ||
644 | } | 648 | } |
645 | 649 | ||
646 | static int tg3_ape_lock(struct tg3 *tp, int locknum) | 650 | static int tg3_ape_lock(struct tg3 *tp, int locknum) |
@@ -658,6 +662,10 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) | |||
658 | return 0; | 662 | return 0; |
659 | case TG3_APE_LOCK_GRC: | 663 | case TG3_APE_LOCK_GRC: |
660 | case TG3_APE_LOCK_MEM: | 664 | case TG3_APE_LOCK_MEM: |
665 | if (!tp->pci_fn) | ||
666 | bit = APE_LOCK_REQ_DRIVER; | ||
667 | else | ||
668 | bit = 1 << tp->pci_fn; | ||
661 | break; | 669 | break; |
662 | default: | 670 | default: |
663 | return -EINVAL; | 671 | return -EINVAL; |
@@ -673,11 +681,6 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) | |||
673 | 681 | ||
674 | off = 4 * locknum; | 682 | off = 4 * locknum; |
675 | 683 | ||
676 | if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) | ||
677 | bit = APE_LOCK_REQ_DRIVER; | ||
678 | else | ||
679 | bit = 1 << tp->pci_fn; | ||
680 | |||
681 | tg3_ape_write32(tp, req + off, bit); | 684 | tg3_ape_write32(tp, req + off, bit); |
682 | 685 | ||
683 | /* Wait for up to 1 millisecond to acquire lock. */ | 686 | /* Wait for up to 1 millisecond to acquire lock. */ |
@@ -710,6 +713,10 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) | |||
710 | return; | 713 | return; |
711 | case TG3_APE_LOCK_GRC: | 714 | case TG3_APE_LOCK_GRC: |
712 | case TG3_APE_LOCK_MEM: | 715 | case TG3_APE_LOCK_MEM: |
716 | if (!tp->pci_fn) | ||
717 | bit = APE_LOCK_GRANT_DRIVER; | ||
718 | else | ||
719 | bit = 1 << tp->pci_fn; | ||
713 | break; | 720 | break; |
714 | default: | 721 | default: |
715 | return; | 722 | return; |
@@ -720,11 +727,6 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) | |||
720 | else | 727 | else |
721 | gnt = TG3_APE_PER_LOCK_GRANT; | 728 | gnt = TG3_APE_PER_LOCK_GRANT; |
722 | 729 | ||
723 | if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn) | ||
724 | bit = APE_LOCK_GRANT_DRIVER; | ||
725 | else | ||
726 | bit = 1 << tp->pci_fn; | ||
727 | |||
728 | tg3_ape_write32(tp, gnt + 4 * locknum, bit); | 730 | tg3_ape_write32(tp, gnt + 4 * locknum, bit); |
729 | } | 731 | } |
730 | 732 | ||
@@ -5927,6 +5929,18 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) | |||
5927 | return work_done; | 5929 | return work_done; |
5928 | } | 5930 | } |
5929 | 5931 | ||
5932 | static inline void tg3_reset_task_schedule(struct tg3 *tp) | ||
5933 | { | ||
5934 | if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) | ||
5935 | schedule_work(&tp->reset_task); | ||
5936 | } | ||
5937 | |||
5938 | static inline void tg3_reset_task_cancel(struct tg3 *tp) | ||
5939 | { | ||
5940 | cancel_work_sync(&tp->reset_task); | ||
5941 | tg3_flag_clear(tp, RESET_TASK_PENDING); | ||
5942 | } | ||
5943 | |||
5930 | static int tg3_poll_msix(struct napi_struct *napi, int budget) | 5944 | static int tg3_poll_msix(struct napi_struct *napi, int budget) |
5931 | { | 5945 | { |
5932 | struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); | 5946 | struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); |
@@ -5967,7 +5981,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget) | |||
5967 | tx_recovery: | 5981 | tx_recovery: |
5968 | /* work_done is guaranteed to be less than budget. */ | 5982 | /* work_done is guaranteed to be less than budget. */ |
5969 | napi_complete(napi); | 5983 | napi_complete(napi); |
5970 | schedule_work(&tp->reset_task); | 5984 | tg3_reset_task_schedule(tp); |
5971 | return work_done; | 5985 | return work_done; |
5972 | } | 5986 | } |
5973 | 5987 | ||
@@ -6002,7 +6016,7 @@ static void tg3_process_error(struct tg3 *tp) | |||
6002 | tg3_dump_state(tp); | 6016 | tg3_dump_state(tp); |
6003 | 6017 | ||
6004 | tg3_flag_set(tp, ERROR_PROCESSED); | 6018 | tg3_flag_set(tp, ERROR_PROCESSED); |
6005 | schedule_work(&tp->reset_task); | 6019 | tg3_reset_task_schedule(tp); |
6006 | } | 6020 | } |
6007 | 6021 | ||
6008 | static int tg3_poll(struct napi_struct *napi, int budget) | 6022 | static int tg3_poll(struct napi_struct *napi, int budget) |
@@ -6049,7 +6063,7 @@ static int tg3_poll(struct napi_struct *napi, int budget) | |||
6049 | tx_recovery: | 6063 | tx_recovery: |
6050 | /* work_done is guaranteed to be less than budget. */ | 6064 | /* work_done is guaranteed to be less than budget. */ |
6051 | napi_complete(napi); | 6065 | napi_complete(napi); |
6052 | schedule_work(&tp->reset_task); | 6066 | tg3_reset_task_schedule(tp); |
6053 | return work_done; | 6067 | return work_done; |
6054 | } | 6068 | } |
6055 | 6069 | ||
@@ -6338,11 +6352,11 @@ static void tg3_reset_task(struct work_struct *work) | |||
6338 | { | 6352 | { |
6339 | struct tg3 *tp = container_of(work, struct tg3, reset_task); | 6353 | struct tg3 *tp = container_of(work, struct tg3, reset_task); |
6340 | int err; | 6354 | int err; |
6341 | unsigned int restart_timer; | ||
6342 | 6355 | ||
6343 | tg3_full_lock(tp, 0); | 6356 | tg3_full_lock(tp, 0); |
6344 | 6357 | ||
6345 | if (!netif_running(tp->dev)) { | 6358 | if (!netif_running(tp->dev)) { |
6359 | tg3_flag_clear(tp, RESET_TASK_PENDING); | ||
6346 | tg3_full_unlock(tp); | 6360 | tg3_full_unlock(tp); |
6347 | return; | 6361 | return; |
6348 | } | 6362 | } |
@@ -6355,9 +6369,6 @@ static void tg3_reset_task(struct work_struct *work) | |||
6355 | 6369 | ||
6356 | tg3_full_lock(tp, 1); | 6370 | tg3_full_lock(tp, 1); |
6357 | 6371 | ||
6358 | restart_timer = tg3_flag(tp, RESTART_TIMER); | ||
6359 | tg3_flag_clear(tp, RESTART_TIMER); | ||
6360 | |||
6361 | if (tg3_flag(tp, TX_RECOVERY_PENDING)) { | 6372 | if (tg3_flag(tp, TX_RECOVERY_PENDING)) { |
6362 | tp->write32_tx_mbox = tg3_write32_tx_mbox; | 6373 | tp->write32_tx_mbox = tg3_write32_tx_mbox; |
6363 | tp->write32_rx_mbox = tg3_write_flush_reg32; | 6374 | tp->write32_rx_mbox = tg3_write_flush_reg32; |
@@ -6372,14 +6383,13 @@ static void tg3_reset_task(struct work_struct *work) | |||
6372 | 6383 | ||
6373 | tg3_netif_start(tp); | 6384 | tg3_netif_start(tp); |
6374 | 6385 | ||
6375 | if (restart_timer) | ||
6376 | mod_timer(&tp->timer, jiffies + 1); | ||
6377 | |||
6378 | out: | 6386 | out: |
6379 | tg3_full_unlock(tp); | 6387 | tg3_full_unlock(tp); |
6380 | 6388 | ||
6381 | if (!err) | 6389 | if (!err) |
6382 | tg3_phy_start(tp); | 6390 | tg3_phy_start(tp); |
6391 | |||
6392 | tg3_flag_clear(tp, RESET_TASK_PENDING); | ||
6383 | } | 6393 | } |
6384 | 6394 | ||
6385 | static void tg3_tx_timeout(struct net_device *dev) | 6395 | static void tg3_tx_timeout(struct net_device *dev) |
@@ -6391,7 +6401,7 @@ static void tg3_tx_timeout(struct net_device *dev) | |||
6391 | tg3_dump_state(tp); | 6401 | tg3_dump_state(tp); |
6392 | } | 6402 | } |
6393 | 6403 | ||
6394 | schedule_work(&tp->reset_task); | 6404 | tg3_reset_task_schedule(tp); |
6395 | } | 6405 | } |
6396 | 6406 | ||
6397 | /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ | 6407 | /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ |
@@ -6442,31 +6452,26 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, | |||
6442 | hwbug = 1; | 6452 | hwbug = 1; |
6443 | 6453 | ||
6444 | if (tg3_flag(tp, 4K_FIFO_LIMIT)) { | 6454 | if (tg3_flag(tp, 4K_FIFO_LIMIT)) { |
6455 | u32 prvidx = *entry; | ||
6445 | u32 tmp_flag = flags & ~TXD_FLAG_END; | 6456 | u32 tmp_flag = flags & ~TXD_FLAG_END; |
6446 | while (len > TG3_TX_BD_DMA_MAX) { | 6457 | while (len > TG3_TX_BD_DMA_MAX && *budget) { |
6447 | u32 frag_len = TG3_TX_BD_DMA_MAX; | 6458 | u32 frag_len = TG3_TX_BD_DMA_MAX; |
6448 | len -= TG3_TX_BD_DMA_MAX; | 6459 | len -= TG3_TX_BD_DMA_MAX; |
6449 | 6460 | ||
6450 | if (len) { | 6461 | /* Avoid the 8byte DMA problem */ |
6451 | tnapi->tx_buffers[*entry].fragmented = true; | 6462 | if (len <= 8) { |
6452 | /* Avoid the 8byte DMA problem */ | 6463 | len += TG3_TX_BD_DMA_MAX / 2; |
6453 | if (len <= 8) { | 6464 | frag_len = TG3_TX_BD_DMA_MAX / 2; |
6454 | len += TG3_TX_BD_DMA_MAX / 2; | ||
6455 | frag_len = TG3_TX_BD_DMA_MAX / 2; | ||
6456 | } | ||
6457 | } else | ||
6458 | tmp_flag = flags; | ||
6459 | |||
6460 | if (*budget) { | ||
6461 | tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, | ||
6462 | frag_len, tmp_flag, mss, vlan); | ||
6463 | (*budget)--; | ||
6464 | *entry = NEXT_TX(*entry); | ||
6465 | } else { | ||
6466 | hwbug = 1; | ||
6467 | break; | ||
6468 | } | 6465 | } |
6469 | 6466 | ||
6467 | tnapi->tx_buffers[*entry].fragmented = true; | ||
6468 | |||
6469 | tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, | ||
6470 | frag_len, tmp_flag, mss, vlan); | ||
6471 | *budget -= 1; | ||
6472 | prvidx = *entry; | ||
6473 | *entry = NEXT_TX(*entry); | ||
6474 | |||
6470 | map += frag_len; | 6475 | map += frag_len; |
6471 | } | 6476 | } |
6472 | 6477 | ||
@@ -6474,10 +6479,11 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, | |||
6474 | if (*budget) { | 6479 | if (*budget) { |
6475 | tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, | 6480 | tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, |
6476 | len, flags, mss, vlan); | 6481 | len, flags, mss, vlan); |
6477 | (*budget)--; | 6482 | *budget -= 1; |
6478 | *entry = NEXT_TX(*entry); | 6483 | *entry = NEXT_TX(*entry); |
6479 | } else { | 6484 | } else { |
6480 | hwbug = 1; | 6485 | hwbug = 1; |
6486 | tnapi->tx_buffers[prvidx].fragmented = false; | ||
6481 | } | 6487 | } |
6482 | } | 6488 | } |
6483 | } else { | 6489 | } else { |
@@ -6509,7 +6515,7 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) | |||
6509 | txb = &tnapi->tx_buffers[entry]; | 6515 | txb = &tnapi->tx_buffers[entry]; |
6510 | } | 6516 | } |
6511 | 6517 | ||
6512 | for (i = 0; i < last; i++) { | 6518 | for (i = 0; i <= last; i++) { |
6513 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 6519 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
6514 | 6520 | ||
6515 | entry = NEXT_TX(entry); | 6521 | entry = NEXT_TX(entry); |
@@ -6559,6 +6565,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | |||
6559 | dev_kfree_skb(new_skb); | 6565 | dev_kfree_skb(new_skb); |
6560 | ret = -1; | 6566 | ret = -1; |
6561 | } else { | 6567 | } else { |
6568 | u32 save_entry = *entry; | ||
6569 | |||
6562 | base_flags |= TXD_FLAG_END; | 6570 | base_flags |= TXD_FLAG_END; |
6563 | 6571 | ||
6564 | tnapi->tx_buffers[*entry].skb = new_skb; | 6572 | tnapi->tx_buffers[*entry].skb = new_skb; |
@@ -6568,7 +6576,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, | |||
6568 | if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, | 6576 | if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, |
6569 | new_skb->len, base_flags, | 6577 | new_skb->len, base_flags, |
6570 | mss, vlan)) { | 6578 | mss, vlan)) { |
6571 | tg3_tx_skb_unmap(tnapi, *entry, 0); | 6579 | tg3_tx_skb_unmap(tnapi, save_entry, -1); |
6572 | dev_kfree_skb(new_skb); | 6580 | dev_kfree_skb(new_skb); |
6573 | ret = -1; | 6581 | ret = -1; |
6574 | } | 6582 | } |
@@ -6758,11 +6766,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6758 | 6766 | ||
6759 | if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | | 6767 | if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | |
6760 | ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), | 6768 | ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), |
6761 | mss, vlan)) | 6769 | mss, vlan)) { |
6762 | would_hit_hwbug = 1; | 6770 | would_hit_hwbug = 1; |
6763 | |||
6764 | /* Now loop through additional data fragments, and queue them. */ | 6771 | /* Now loop through additional data fragments, and queue them. */ |
6765 | if (skb_shinfo(skb)->nr_frags > 0) { | 6772 | } else if (skb_shinfo(skb)->nr_frags > 0) { |
6766 | u32 tmp_mss = mss; | 6773 | u32 tmp_mss = mss; |
6767 | 6774 | ||
6768 | if (!tg3_flag(tp, HW_TSO_1) && | 6775 | if (!tg3_flag(tp, HW_TSO_1) && |
@@ -6784,11 +6791,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6784 | if (dma_mapping_error(&tp->pdev->dev, mapping)) | 6791 | if (dma_mapping_error(&tp->pdev->dev, mapping)) |
6785 | goto dma_error; | 6792 | goto dma_error; |
6786 | 6793 | ||
6787 | if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, | 6794 | if (!budget || |
6795 | tg3_tx_frag_set(tnapi, &entry, &budget, mapping, | ||
6788 | len, base_flags | | 6796 | len, base_flags | |
6789 | ((i == last) ? TXD_FLAG_END : 0), | 6797 | ((i == last) ? TXD_FLAG_END : 0), |
6790 | tmp_mss, vlan)) | 6798 | tmp_mss, vlan)) { |
6791 | would_hit_hwbug = 1; | 6799 | would_hit_hwbug = 1; |
6800 | break; | ||
6801 | } | ||
6792 | } | 6802 | } |
6793 | } | 6803 | } |
6794 | 6804 | ||
@@ -6828,7 +6838,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
6828 | return NETDEV_TX_OK; | 6838 | return NETDEV_TX_OK; |
6829 | 6839 | ||
6830 | dma_error: | 6840 | dma_error: |
6831 | tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); | 6841 | tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); |
6832 | tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; | 6842 | tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; |
6833 | drop: | 6843 | drop: |
6834 | dev_kfree_skb(skb); | 6844 | dev_kfree_skb(skb); |
@@ -7281,7 +7291,8 @@ static void tg3_free_rings(struct tg3 *tp) | |||
7281 | if (!skb) | 7291 | if (!skb) |
7282 | continue; | 7292 | continue; |
7283 | 7293 | ||
7284 | tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags); | 7294 | tg3_tx_skb_unmap(tnapi, i, |
7295 | skb_shinfo(skb)->nr_frags - 1); | ||
7285 | 7296 | ||
7286 | dev_kfree_skb_any(skb); | 7297 | dev_kfree_skb_any(skb); |
7287 | } | 7298 | } |
@@ -9200,7 +9211,7 @@ static void tg3_timer(unsigned long __opaque) | |||
9200 | { | 9211 | { |
9201 | struct tg3 *tp = (struct tg3 *) __opaque; | 9212 | struct tg3 *tp = (struct tg3 *) __opaque; |
9202 | 9213 | ||
9203 | if (tp->irq_sync) | 9214 | if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) |
9204 | goto restart_timer; | 9215 | goto restart_timer; |
9205 | 9216 | ||
9206 | spin_lock(&tp->lock); | 9217 | spin_lock(&tp->lock); |
@@ -9223,10 +9234,9 @@ static void tg3_timer(unsigned long __opaque) | |||
9223 | } | 9234 | } |
9224 | 9235 | ||
9225 | if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { | 9236 | if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { |
9226 | tg3_flag_set(tp, RESTART_TIMER); | ||
9227 | spin_unlock(&tp->lock); | 9237 | spin_unlock(&tp->lock); |
9228 | schedule_work(&tp->reset_task); | 9238 | tg3_reset_task_schedule(tp); |
9229 | return; | 9239 | goto restart_timer; |
9230 | } | 9240 | } |
9231 | } | 9241 | } |
9232 | 9242 | ||
@@ -9674,15 +9684,14 @@ static int tg3_open(struct net_device *dev) | |||
9674 | struct tg3_napi *tnapi = &tp->napi[i]; | 9684 | struct tg3_napi *tnapi = &tp->napi[i]; |
9675 | err = tg3_request_irq(tp, i); | 9685 | err = tg3_request_irq(tp, i); |
9676 | if (err) { | 9686 | if (err) { |
9677 | for (i--; i >= 0; i--) | 9687 | for (i--; i >= 0; i--) { |
9688 | tnapi = &tp->napi[i]; | ||
9678 | free_irq(tnapi->irq_vec, tnapi); | 9689 | free_irq(tnapi->irq_vec, tnapi); |
9679 | break; | 9690 | } |
9691 | goto err_out2; | ||
9680 | } | 9692 | } |
9681 | } | 9693 | } |
9682 | 9694 | ||
9683 | if (err) | ||
9684 | goto err_out2; | ||
9685 | |||
9686 | tg3_full_lock(tp, 0); | 9695 | tg3_full_lock(tp, 0); |
9687 | 9696 | ||
9688 | err = tg3_init_hw(tp, 1); | 9697 | err = tg3_init_hw(tp, 1); |
@@ -9783,7 +9792,7 @@ static int tg3_close(struct net_device *dev) | |||
9783 | struct tg3 *tp = netdev_priv(dev); | 9792 | struct tg3 *tp = netdev_priv(dev); |
9784 | 9793 | ||
9785 | tg3_napi_disable(tp); | 9794 | tg3_napi_disable(tp); |
9786 | cancel_work_sync(&tp->reset_task); | 9795 | tg3_reset_task_cancel(tp); |
9787 | 9796 | ||
9788 | netif_tx_stop_all_queues(dev); | 9797 | netif_tx_stop_all_queues(dev); |
9789 | 9798 | ||
@@ -11520,7 +11529,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) | |||
11520 | break; | 11529 | break; |
11521 | } | 11530 | } |
11522 | 11531 | ||
11523 | tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0); | 11532 | tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1); |
11524 | dev_kfree_skb(skb); | 11533 | dev_kfree_skb(skb); |
11525 | 11534 | ||
11526 | if (tx_idx != tnapi->tx_prod) | 11535 | if (tx_idx != tnapi->tx_prod) |
@@ -14228,12 +14237,30 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
14228 | val = tr32(MEMARB_MODE); | 14237 | val = tr32(MEMARB_MODE); |
14229 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); | 14238 | tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE); |
14230 | 14239 | ||
14231 | if (tg3_flag(tp, PCIX_MODE)) { | 14240 | tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; |
14232 | pci_read_config_dword(tp->pdev, | 14241 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || |
14233 | tp->pcix_cap + PCI_X_STATUS, &val); | 14242 | tg3_flag(tp, 5780_CLASS)) { |
14234 | tp->pci_fn = val & 0x7; | 14243 | if (tg3_flag(tp, PCIX_MODE)) { |
14235 | } else { | 14244 | pci_read_config_dword(tp->pdev, |
14236 | tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3; | 14245 | tp->pcix_cap + PCI_X_STATUS, |
14246 | &val); | ||
14247 | tp->pci_fn = val & 0x7; | ||
14248 | } | ||
14249 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { | ||
14250 | tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); | ||
14251 | if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) == | ||
14252 | NIC_SRAM_CPMUSTAT_SIG) { | ||
14253 | tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717; | ||
14254 | tp->pci_fn = tp->pci_fn ? 1 : 0; | ||
14255 | } | ||
14256 | } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || | ||
14257 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) { | ||
14258 | tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val); | ||
14259 | if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) == | ||
14260 | NIC_SRAM_CPMUSTAT_SIG) { | ||
14261 | tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >> | ||
14262 | TG3_CPMU_STATUS_FSHFT_5719; | ||
14263 | } | ||
14237 | } | 14264 | } |
14238 | 14265 | ||
14239 | /* Get eeprom hw config before calling tg3_set_power_state(). | 14266 | /* Get eeprom hw config before calling tg3_set_power_state(). |
@@ -15665,7 +15692,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) | |||
15665 | if (tp->fw) | 15692 | if (tp->fw) |
15666 | release_firmware(tp->fw); | 15693 | release_firmware(tp->fw); |
15667 | 15694 | ||
15668 | cancel_work_sync(&tp->reset_task); | 15695 | tg3_reset_task_cancel(tp); |
15669 | 15696 | ||
15670 | if (tg3_flag(tp, USE_PHYLIB)) { | 15697 | if (tg3_flag(tp, USE_PHYLIB)) { |
15671 | tg3_phy_fini(tp); | 15698 | tg3_phy_fini(tp); |
@@ -15699,7 +15726,7 @@ static int tg3_suspend(struct device *device) | |||
15699 | if (!netif_running(dev)) | 15726 | if (!netif_running(dev)) |
15700 | return 0; | 15727 | return 0; |
15701 | 15728 | ||
15702 | flush_work_sync(&tp->reset_task); | 15729 | tg3_reset_task_cancel(tp); |
15703 | tg3_phy_stop(tp); | 15730 | tg3_phy_stop(tp); |
15704 | tg3_netif_stop(tp); | 15731 | tg3_netif_stop(tp); |
15705 | 15732 | ||
@@ -15812,12 +15839,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
15812 | tg3_netif_stop(tp); | 15839 | tg3_netif_stop(tp); |
15813 | 15840 | ||
15814 | del_timer_sync(&tp->timer); | 15841 | del_timer_sync(&tp->timer); |
15815 | tg3_flag_clear(tp, RESTART_TIMER); | ||
15816 | 15842 | ||
15817 | /* Want to make sure that the reset task doesn't run */ | 15843 | /* Want to make sure that the reset task doesn't run */ |
15818 | cancel_work_sync(&tp->reset_task); | 15844 | tg3_reset_task_cancel(tp); |
15819 | tg3_flag_clear(tp, TX_RECOVERY_PENDING); | 15845 | tg3_flag_clear(tp, TX_RECOVERY_PENDING); |
15820 | tg3_flag_clear(tp, RESTART_TIMER); | ||
15821 | 15846 | ||
15822 | netif_device_detach(netdev); | 15847 | netif_device_detach(netdev); |
15823 | 15848 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index f32f288134c7..94b4bd049a33 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h | |||
@@ -1095,6 +1095,11 @@ | |||
1095 | #define TG3_CPMU_CLCK_ORIDE 0x00003624 | 1095 | #define TG3_CPMU_CLCK_ORIDE 0x00003624 |
1096 | #define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 | 1096 | #define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 |
1097 | 1097 | ||
1098 | #define TG3_CPMU_STATUS 0x0000362c | ||
1099 | #define TG3_CPMU_STATUS_FMSK_5717 0x20000000 | ||
1100 | #define TG3_CPMU_STATUS_FMSK_5719 0xc0000000 | ||
1101 | #define TG3_CPMU_STATUS_FSHFT_5719 30 | ||
1102 | |||
1098 | #define TG3_CPMU_CLCK_STAT 0x00003630 | 1103 | #define TG3_CPMU_CLCK_STAT 0x00003630 |
1099 | #define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 | 1104 | #define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000 |
1100 | #define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 | 1105 | #define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 |
@@ -2128,6 +2133,10 @@ | |||
2128 | #define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 | 2133 | #define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008 |
2129 | #define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 | 2134 | #define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010 |
2130 | 2135 | ||
2136 | #define NIC_SRAM_CPMU_STATUS 0x00000e00 | ||
2137 | #define NIC_SRAM_CPMUSTAT_SIG 0x0000362c | ||
2138 | #define NIC_SRAM_CPMUSTAT_SIG_MSK 0x0000ffff | ||
2139 | |||
2131 | #define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 | 2140 | #define NIC_SRAM_RX_MINI_BUFFER_DESC 0x00001000 |
2132 | 2141 | ||
2133 | #define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 | 2142 | #define NIC_SRAM_DMA_DESC_POOL_BASE 0x00002000 |
@@ -2344,9 +2353,13 @@ | |||
2344 | #define APE_PER_LOCK_GRANT_DRIVER 0x00001000 | 2353 | #define APE_PER_LOCK_GRANT_DRIVER 0x00001000 |
2345 | 2354 | ||
2346 | /* APE convenience enumerations. */ | 2355 | /* APE convenience enumerations. */ |
2347 | #define TG3_APE_LOCK_GRC 1 | 2356 | #define TG3_APE_LOCK_PHY0 0 |
2348 | #define TG3_APE_LOCK_MEM 4 | 2357 | #define TG3_APE_LOCK_GRC 1 |
2349 | #define TG3_APE_LOCK_GPIO 7 | 2358 | #define TG3_APE_LOCK_PHY1 2 |
2359 | #define TG3_APE_LOCK_PHY2 3 | ||
2360 | #define TG3_APE_LOCK_MEM 4 | ||
2361 | #define TG3_APE_LOCK_PHY3 5 | ||
2362 | #define TG3_APE_LOCK_GPIO 7 | ||
2350 | 2363 | ||
2351 | #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 | 2364 | #define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 |
2352 | 2365 | ||
@@ -2866,7 +2879,6 @@ enum TG3_FLAGS { | |||
2866 | TG3_FLAG_JUMBO_CAPABLE, | 2879 | TG3_FLAG_JUMBO_CAPABLE, |
2867 | TG3_FLAG_CHIP_RESETTING, | 2880 | TG3_FLAG_CHIP_RESETTING, |
2868 | TG3_FLAG_INIT_COMPLETE, | 2881 | TG3_FLAG_INIT_COMPLETE, |
2869 | TG3_FLAG_RESTART_TIMER, | ||
2870 | TG3_FLAG_TSO_BUG, | 2882 | TG3_FLAG_TSO_BUG, |
2871 | TG3_FLAG_IS_5788, | 2883 | TG3_FLAG_IS_5788, |
2872 | TG3_FLAG_MAX_RXPEND_64, | 2884 | TG3_FLAG_MAX_RXPEND_64, |
@@ -2909,6 +2921,7 @@ enum TG3_FLAGS { | |||
2909 | TG3_FLAG_APE_HAS_NCSI, | 2921 | TG3_FLAG_APE_HAS_NCSI, |
2910 | TG3_FLAG_5717_PLUS, | 2922 | TG3_FLAG_5717_PLUS, |
2911 | TG3_FLAG_4K_FIFO_LIMIT, | 2923 | TG3_FLAG_4K_FIFO_LIMIT, |
2924 | TG3_FLAG_RESET_TASK_PENDING, | ||
2912 | 2925 | ||
2913 | /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ | 2926 | /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */ |
2914 | TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ | 2927 | TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ |
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 98849a1fc749..a2e150059bc7 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig | |||
@@ -5,8 +5,8 @@ | |||
5 | config HAVE_NET_MACB | 5 | config HAVE_NET_MACB |
6 | bool | 6 | bool |
7 | 7 | ||
8 | config NET_ATMEL | 8 | config NET_CADENCE |
9 | bool "Atmel devices" | 9 | bool "Cadence devices" |
10 | depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200) | 10 | depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200) |
11 | ---help--- | 11 | ---help--- |
12 | If you have a network (Ethernet) card belonging to this class, say Y. | 12 | If you have a network (Ethernet) card belonging to this class, say Y. |
@@ -20,7 +20,7 @@ config NET_ATMEL | |||
20 | the remaining Atmel network card questions. If you say Y, you will be | 20 | the remaining Atmel network card questions. If you say Y, you will be |
21 | asked for your specific card in the following questions. | 21 | asked for your specific card in the following questions. |
22 | 22 | ||
23 | if NET_ATMEL | 23 | if NET_CADENCE |
24 | 24 | ||
25 | config ARM_AT91_ETHER | 25 | config ARM_AT91_ETHER |
26 | tristate "AT91RM9200 Ethernet support" | 26 | tristate "AT91RM9200 Ethernet support" |
@@ -32,14 +32,16 @@ config ARM_AT91_ETHER | |||
32 | ethernet support, then you should always answer Y to this. | 32 | ethernet support, then you should always answer Y to this. |
33 | 33 | ||
34 | config MACB | 34 | config MACB |
35 | tristate "Atmel MACB support" | 35 | tristate "Cadence MACB/GEM support" |
36 | depends on HAVE_NET_MACB | 36 | depends on HAVE_NET_MACB |
37 | select PHYLIB | 37 | select PHYLIB |
38 | ---help--- | 38 | ---help--- |
39 | The Atmel MACB ethernet interface is found on many AT32 and AT91 | 39 | The Cadence MACB ethernet interface is found on many Atmel AT32 and |
40 | parts. Say Y to include support for the MACB chip. | 40 | AT91 parts. This driver also supports the Cadence GEM (Gigabit |
41 | Ethernet MAC found in some ARM SoC devices). Note: the Gigabit mode | ||
42 | is not yet supported. Say Y to include support for the MACB/GEM chip. | ||
41 | 43 | ||
42 | To compile this driver as a module, choose M here: the module | 44 | To compile this driver as a module, choose M here: the module |
43 | will be called macb. | 45 | will be called macb. |
44 | 46 | ||
45 | endif # NET_ATMEL | 47 | endif # NET_CADENCE |
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index 56624d303487..dfeb46cb3f74 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/skbuff.h> | 26 | #include <linux/skbuff.h> |
27 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
28 | #include <linux/ethtool.h> | 28 | #include <linux/ethtool.h> |
29 | #include <linux/platform_data/macb.h> | ||
29 | #include <linux/platform_device.h> | 30 | #include <linux/platform_device.h> |
30 | #include <linux/clk.h> | 31 | #include <linux/clk.h> |
31 | #include <linux/gfp.h> | 32 | #include <linux/gfp.h> |
@@ -984,7 +985,7 @@ static const struct net_device_ops at91ether_netdev_ops = { | |||
984 | static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address, | 985 | static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address, |
985 | struct platform_device *pdev, struct clk *ether_clk) | 986 | struct platform_device *pdev, struct clk *ether_clk) |
986 | { | 987 | { |
987 | struct at91_eth_data *board_data = pdev->dev.platform_data; | 988 | struct macb_platform_data *board_data = pdev->dev.platform_data; |
988 | struct net_device *dev; | 989 | struct net_device *dev; |
989 | struct at91_private *lp; | 990 | struct at91_private *lp; |
990 | unsigned int val; | 991 | unsigned int val; |
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h index 353f4dab62be..3725fbb0defe 100644 --- a/drivers/net/ethernet/cadence/at91_ether.h +++ b/drivers/net/ethernet/cadence/at91_ether.h | |||
@@ -85,7 +85,9 @@ struct recv_desc_bufs | |||
85 | struct at91_private | 85 | struct at91_private |
86 | { | 86 | { |
87 | struct mii_if_info mii; /* ethtool support */ | 87 | struct mii_if_info mii; /* ethtool support */ |
88 | struct at91_eth_data board_data; /* board-specific configuration */ | 88 | struct macb_platform_data board_data; /* board-specific |
89 | * configuration (shared with | ||
90 | * macb for common data */ | ||
89 | struct clk *ether_clk; /* clock */ | 91 | struct clk *ether_clk; /* clock */ |
90 | 92 | ||
91 | /* PHY */ | 93 | /* PHY */ |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index a437b46e5490..64d61461bdc7 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Atmel MACB Ethernet Controller driver | 2 | * Cadence MACB/GEM Ethernet Controller driver |
3 | * | 3 | * |
4 | * Copyright (C) 2004-2006 Atmel Corporation | 4 | * Copyright (C) 2004-2006 Atmel Corporation |
5 | * | 5 | * |
@@ -8,6 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
11 | #include <linux/clk.h> | 12 | #include <linux/clk.h> |
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
13 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
@@ -19,12 +20,10 @@ | |||
19 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
20 | #include <linux/etherdevice.h> | 21 | #include <linux/etherdevice.h> |
21 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
23 | #include <linux/platform_data/macb.h> | ||
22 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
23 | #include <linux/phy.h> | 25 | #include <linux/phy.h> |
24 | 26 | ||
25 | #include <mach/board.h> | ||
26 | #include <mach/cpu.h> | ||
27 | |||
28 | #include "macb.h" | 27 | #include "macb.h" |
29 | 28 | ||
30 | #define RX_BUFFER_SIZE 128 | 29 | #define RX_BUFFER_SIZE 128 |
@@ -60,9 +59,9 @@ static void __macb_set_hwaddr(struct macb *bp) | |||
60 | u16 top; | 59 | u16 top; |
61 | 60 | ||
62 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); | 61 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); |
63 | macb_writel(bp, SA1B, bottom); | 62 | macb_or_gem_writel(bp, SA1B, bottom); |
64 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); | 63 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); |
65 | macb_writel(bp, SA1T, top); | 64 | macb_or_gem_writel(bp, SA1T, top); |
66 | } | 65 | } |
67 | 66 | ||
68 | static void __init macb_get_hwaddr(struct macb *bp) | 67 | static void __init macb_get_hwaddr(struct macb *bp) |
@@ -71,8 +70,8 @@ static void __init macb_get_hwaddr(struct macb *bp) | |||
71 | u16 top; | 70 | u16 top; |
72 | u8 addr[6]; | 71 | u8 addr[6]; |
73 | 72 | ||
74 | bottom = macb_readl(bp, SA1B); | 73 | bottom = macb_or_gem_readl(bp, SA1B); |
75 | top = macb_readl(bp, SA1T); | 74 | top = macb_or_gem_readl(bp, SA1T); |
76 | 75 | ||
77 | addr[0] = bottom & 0xff; | 76 | addr[0] = bottom & 0xff; |
78 | addr[1] = (bottom >> 8) & 0xff; | 77 | addr[1] = (bottom >> 8) & 0xff; |
@@ -84,7 +83,7 @@ static void __init macb_get_hwaddr(struct macb *bp) | |||
84 | if (is_valid_ether_addr(addr)) { | 83 | if (is_valid_ether_addr(addr)) { |
85 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); | 84 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); |
86 | } else { | 85 | } else { |
87 | dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); | 86 | netdev_info(bp->dev, "invalid hw address, using random\n"); |
88 | random_ether_addr(bp->dev->dev_addr); | 87 | random_ether_addr(bp->dev->dev_addr); |
89 | } | 88 | } |
90 | } | 89 | } |
@@ -178,11 +177,12 @@ static void macb_handle_link_change(struct net_device *dev) | |||
178 | 177 | ||
179 | if (status_change) { | 178 | if (status_change) { |
180 | if (phydev->link) | 179 | if (phydev->link) |
181 | printk(KERN_INFO "%s: link up (%d/%s)\n", | 180 | netdev_info(dev, "link up (%d/%s)\n", |
182 | dev->name, phydev->speed, | 181 | phydev->speed, |
183 | DUPLEX_FULL == phydev->duplex ? "Full":"Half"); | 182 | phydev->duplex == DUPLEX_FULL ? |
183 | "Full" : "Half"); | ||
184 | else | 184 | else |
185 | printk(KERN_INFO "%s: link down\n", dev->name); | 185 | netdev_info(dev, "link down\n"); |
186 | } | 186 | } |
187 | } | 187 | } |
188 | 188 | ||
@@ -191,12 +191,12 @@ static int macb_mii_probe(struct net_device *dev) | |||
191 | { | 191 | { |
192 | struct macb *bp = netdev_priv(dev); | 192 | struct macb *bp = netdev_priv(dev); |
193 | struct phy_device *phydev; | 193 | struct phy_device *phydev; |
194 | struct eth_platform_data *pdata; | 194 | struct macb_platform_data *pdata; |
195 | int ret; | 195 | int ret; |
196 | 196 | ||
197 | phydev = phy_find_first(bp->mii_bus); | 197 | phydev = phy_find_first(bp->mii_bus); |
198 | if (!phydev) { | 198 | if (!phydev) { |
199 | printk (KERN_ERR "%s: no PHY found\n", dev->name); | 199 | netdev_err(dev, "no PHY found\n"); |
200 | return -1; | 200 | return -1; |
201 | } | 201 | } |
202 | 202 | ||
@@ -209,7 +209,7 @@ static int macb_mii_probe(struct net_device *dev) | |||
209 | PHY_INTERFACE_MODE_RMII : | 209 | PHY_INTERFACE_MODE_RMII : |
210 | PHY_INTERFACE_MODE_MII); | 210 | PHY_INTERFACE_MODE_MII); |
211 | if (ret) { | 211 | if (ret) { |
212 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 212 | netdev_err(dev, "Could not attach to PHY\n"); |
213 | return ret; | 213 | return ret; |
214 | } | 214 | } |
215 | 215 | ||
@@ -228,7 +228,7 @@ static int macb_mii_probe(struct net_device *dev) | |||
228 | 228 | ||
229 | static int macb_mii_init(struct macb *bp) | 229 | static int macb_mii_init(struct macb *bp) |
230 | { | 230 | { |
231 | struct eth_platform_data *pdata; | 231 | struct macb_platform_data *pdata; |
232 | int err = -ENXIO, i; | 232 | int err = -ENXIO, i; |
233 | 233 | ||
234 | /* Enable management port */ | 234 | /* Enable management port */ |
@@ -285,8 +285,8 @@ err_out: | |||
285 | static void macb_update_stats(struct macb *bp) | 285 | static void macb_update_stats(struct macb *bp) |
286 | { | 286 | { |
287 | u32 __iomem *reg = bp->regs + MACB_PFR; | 287 | u32 __iomem *reg = bp->regs + MACB_PFR; |
288 | u32 *p = &bp->hw_stats.rx_pause_frames; | 288 | u32 *p = &bp->hw_stats.macb.rx_pause_frames; |
289 | u32 *end = &bp->hw_stats.tx_pause_frames + 1; | 289 | u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; |
290 | 290 | ||
291 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); | 291 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); |
292 | 292 | ||
@@ -303,14 +303,13 @@ static void macb_tx(struct macb *bp) | |||
303 | status = macb_readl(bp, TSR); | 303 | status = macb_readl(bp, TSR); |
304 | macb_writel(bp, TSR, status); | 304 | macb_writel(bp, TSR, status); |
305 | 305 | ||
306 | dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n", | 306 | netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status); |
307 | (unsigned long)status); | ||
308 | 307 | ||
309 | if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { | 308 | if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { |
310 | int i; | 309 | int i; |
311 | printk(KERN_ERR "%s: TX %s, resetting buffers\n", | 310 | netdev_err(bp->dev, "TX %s, resetting buffers\n", |
312 | bp->dev->name, status & MACB_BIT(UND) ? | 311 | status & MACB_BIT(UND) ? |
313 | "underrun" : "retry limit exceeded"); | 312 | "underrun" : "retry limit exceeded"); |
314 | 313 | ||
315 | /* Transfer ongoing, disable transmitter, to avoid confusion */ | 314 | /* Transfer ongoing, disable transmitter, to avoid confusion */ |
316 | if (status & MACB_BIT(TGO)) | 315 | if (status & MACB_BIT(TGO)) |
@@ -369,8 +368,8 @@ static void macb_tx(struct macb *bp) | |||
369 | if (!(bufstat & MACB_BIT(TX_USED))) | 368 | if (!(bufstat & MACB_BIT(TX_USED))) |
370 | break; | 369 | break; |
371 | 370 | ||
372 | dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n", | 371 | netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n", |
373 | tail, skb->data); | 372 | tail, skb->data); |
374 | dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, | 373 | dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, |
375 | DMA_TO_DEVICE); | 374 | DMA_TO_DEVICE); |
376 | bp->stats.tx_packets++; | 375 | bp->stats.tx_packets++; |
@@ -395,8 +394,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
395 | 394 | ||
396 | len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); | 395 | len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); |
397 | 396 | ||
398 | dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n", | 397 | netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", |
399 | first_frag, last_frag, len); | 398 | first_frag, last_frag, len); |
400 | 399 | ||
401 | skb = dev_alloc_skb(len + RX_OFFSET); | 400 | skb = dev_alloc_skb(len + RX_OFFSET); |
402 | if (!skb) { | 401 | if (!skb) { |
@@ -437,8 +436,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | |||
437 | 436 | ||
438 | bp->stats.rx_packets++; | 437 | bp->stats.rx_packets++; |
439 | bp->stats.rx_bytes += len; | 438 | bp->stats.rx_bytes += len; |
440 | dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n", | 439 | netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n", |
441 | skb->len, skb->csum); | 440 | skb->len, skb->csum); |
442 | netif_receive_skb(skb); | 441 | netif_receive_skb(skb); |
443 | 442 | ||
444 | return 0; | 443 | return 0; |
@@ -515,8 +514,8 @@ static int macb_poll(struct napi_struct *napi, int budget) | |||
515 | 514 | ||
516 | work_done = 0; | 515 | work_done = 0; |
517 | 516 | ||
518 | dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", | 517 | netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n", |
519 | (unsigned long)status, budget); | 518 | (unsigned long)status, budget); |
520 | 519 | ||
521 | work_done = macb_rx(bp, budget); | 520 | work_done = macb_rx(bp, budget); |
522 | if (work_done < budget) { | 521 | if (work_done < budget) { |
@@ -565,8 +564,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
565 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); | 564 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); |
566 | 565 | ||
567 | if (napi_schedule_prep(&bp->napi)) { | 566 | if (napi_schedule_prep(&bp->napi)) { |
568 | dev_dbg(&bp->pdev->dev, | 567 | netdev_dbg(bp->dev, "scheduling RX softirq\n"); |
569 | "scheduling RX softirq\n"); | ||
570 | __napi_schedule(&bp->napi); | 568 | __napi_schedule(&bp->napi); |
571 | } | 569 | } |
572 | } | 570 | } |
@@ -582,16 +580,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
582 | 580 | ||
583 | if (status & MACB_BIT(ISR_ROVR)) { | 581 | if (status & MACB_BIT(ISR_ROVR)) { |
584 | /* We missed at least one packet */ | 582 | /* We missed at least one packet */ |
585 | bp->hw_stats.rx_overruns++; | 583 | if (macb_is_gem(bp)) |
584 | bp->hw_stats.gem.rx_overruns++; | ||
585 | else | ||
586 | bp->hw_stats.macb.rx_overruns++; | ||
586 | } | 587 | } |
587 | 588 | ||
588 | if (status & MACB_BIT(HRESP)) { | 589 | if (status & MACB_BIT(HRESP)) { |
589 | /* | 590 | /* |
590 | * TODO: Reset the hardware, and maybe move the printk | 591 | * TODO: Reset the hardware, and maybe move the |
591 | * to a lower-priority context as well (work queue?) | 592 | * netdev_err to a lower-priority context as well |
593 | * (work queue?) | ||
592 | */ | 594 | */ |
593 | printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n", | 595 | netdev_err(dev, "DMA bus error: HRESP not OK\n"); |
594 | dev->name); | ||
595 | } | 596 | } |
596 | 597 | ||
597 | status = macb_readl(bp, ISR); | 598 | status = macb_readl(bp, ISR); |
@@ -626,16 +627,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
626 | unsigned long flags; | 627 | unsigned long flags; |
627 | 628 | ||
628 | #ifdef DEBUG | 629 | #ifdef DEBUG |
629 | int i; | 630 | netdev_dbg(bp->dev, |
630 | dev_dbg(&bp->pdev->dev, | 631 | "start_xmit: len %u head %p data %p tail %p end %p\n", |
631 | "start_xmit: len %u head %p data %p tail %p end %p\n", | 632 | skb->len, skb->head, skb->data, |
632 | skb->len, skb->head, skb->data, | 633 | skb_tail_pointer(skb), skb_end_pointer(skb)); |
633 | skb_tail_pointer(skb), skb_end_pointer(skb)); | 634 | print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, |
634 | dev_dbg(&bp->pdev->dev, | 635 | skb->data, 16, true); |
635 | "data:"); | ||
636 | for (i = 0; i < 16; i++) | ||
637 | printk(" %02x", (unsigned int)skb->data[i]); | ||
638 | printk("\n"); | ||
639 | #endif | 636 | #endif |
640 | 637 | ||
641 | len = skb->len; | 638 | len = skb->len; |
@@ -645,21 +642,20 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
645 | if (TX_BUFFS_AVAIL(bp) < 1) { | 642 | if (TX_BUFFS_AVAIL(bp) < 1) { |
646 | netif_stop_queue(dev); | 643 | netif_stop_queue(dev); |
647 | spin_unlock_irqrestore(&bp->lock, flags); | 644 | spin_unlock_irqrestore(&bp->lock, flags); |
648 | dev_err(&bp->pdev->dev, | 645 | netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); |
649 | "BUG! Tx Ring full when queue awake!\n"); | 646 | netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", |
650 | dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", | 647 | bp->tx_head, bp->tx_tail); |
651 | bp->tx_head, bp->tx_tail); | ||
652 | return NETDEV_TX_BUSY; | 648 | return NETDEV_TX_BUSY; |
653 | } | 649 | } |
654 | 650 | ||
655 | entry = bp->tx_head; | 651 | entry = bp->tx_head; |
656 | dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry); | 652 | netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry); |
657 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | 653 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
658 | len, DMA_TO_DEVICE); | 654 | len, DMA_TO_DEVICE); |
659 | bp->tx_skb[entry].skb = skb; | 655 | bp->tx_skb[entry].skb = skb; |
660 | bp->tx_skb[entry].mapping = mapping; | 656 | bp->tx_skb[entry].mapping = mapping; |
661 | dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n", | 657 | netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", |
662 | skb->data, (unsigned long)mapping); | 658 | skb->data, (unsigned long)mapping); |
663 | 659 | ||
664 | ctrl = MACB_BF(TX_FRMLEN, len); | 660 | ctrl = MACB_BF(TX_FRMLEN, len); |
665 | ctrl |= MACB_BIT(TX_LAST); | 661 | ctrl |= MACB_BIT(TX_LAST); |
@@ -723,27 +719,27 @@ static int macb_alloc_consistent(struct macb *bp) | |||
723 | &bp->rx_ring_dma, GFP_KERNEL); | 719 | &bp->rx_ring_dma, GFP_KERNEL); |
724 | if (!bp->rx_ring) | 720 | if (!bp->rx_ring) |
725 | goto out_err; | 721 | goto out_err; |
726 | dev_dbg(&bp->pdev->dev, | 722 | netdev_dbg(bp->dev, |
727 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", | 723 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", |
728 | size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); | 724 | size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); |
729 | 725 | ||
730 | size = TX_RING_BYTES; | 726 | size = TX_RING_BYTES; |
731 | bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | 727 | bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
732 | &bp->tx_ring_dma, GFP_KERNEL); | 728 | &bp->tx_ring_dma, GFP_KERNEL); |
733 | if (!bp->tx_ring) | 729 | if (!bp->tx_ring) |
734 | goto out_err; | 730 | goto out_err; |
735 | dev_dbg(&bp->pdev->dev, | 731 | netdev_dbg(bp->dev, |
736 | "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", | 732 | "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", |
737 | size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); | 733 | size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); |
738 | 734 | ||
739 | size = RX_RING_SIZE * RX_BUFFER_SIZE; | 735 | size = RX_RING_SIZE * RX_BUFFER_SIZE; |
740 | bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, | 736 | bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, |
741 | &bp->rx_buffers_dma, GFP_KERNEL); | 737 | &bp->rx_buffers_dma, GFP_KERNEL); |
742 | if (!bp->rx_buffers) | 738 | if (!bp->rx_buffers) |
743 | goto out_err; | 739 | goto out_err; |
744 | dev_dbg(&bp->pdev->dev, | 740 | netdev_dbg(bp->dev, |
745 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", | 741 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", |
746 | size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); | 742 | size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); |
747 | 743 | ||
748 | return 0; | 744 | return 0; |
749 | 745 | ||
@@ -797,6 +793,84 @@ static void macb_reset_hw(struct macb *bp) | |||
797 | macb_readl(bp, ISR); | 793 | macb_readl(bp, ISR); |
798 | } | 794 | } |
799 | 795 | ||
796 | static u32 gem_mdc_clk_div(struct macb *bp) | ||
797 | { | ||
798 | u32 config; | ||
799 | unsigned long pclk_hz = clk_get_rate(bp->pclk); | ||
800 | |||
801 | if (pclk_hz <= 20000000) | ||
802 | config = GEM_BF(CLK, GEM_CLK_DIV8); | ||
803 | else if (pclk_hz <= 40000000) | ||
804 | config = GEM_BF(CLK, GEM_CLK_DIV16); | ||
805 | else if (pclk_hz <= 80000000) | ||
806 | config = GEM_BF(CLK, GEM_CLK_DIV32); | ||
807 | else if (pclk_hz <= 120000000) | ||
808 | config = GEM_BF(CLK, GEM_CLK_DIV48); | ||
809 | else if (pclk_hz <= 160000000) | ||
810 | config = GEM_BF(CLK, GEM_CLK_DIV64); | ||
811 | else | ||
812 | config = GEM_BF(CLK, GEM_CLK_DIV96); | ||
813 | |||
814 | return config; | ||
815 | } | ||
816 | |||
817 | static u32 macb_mdc_clk_div(struct macb *bp) | ||
818 | { | ||
819 | u32 config; | ||
820 | unsigned long pclk_hz; | ||
821 | |||
822 | if (macb_is_gem(bp)) | ||
823 | return gem_mdc_clk_div(bp); | ||
824 | |||
825 | pclk_hz = clk_get_rate(bp->pclk); | ||
826 | if (pclk_hz <= 20000000) | ||
827 | config = MACB_BF(CLK, MACB_CLK_DIV8); | ||
828 | else if (pclk_hz <= 40000000) | ||
829 | config = MACB_BF(CLK, MACB_CLK_DIV16); | ||
830 | else if (pclk_hz <= 80000000) | ||
831 | config = MACB_BF(CLK, MACB_CLK_DIV32); | ||
832 | else | ||
833 | config = MACB_BF(CLK, MACB_CLK_DIV64); | ||
834 | |||
835 | return config; | ||
836 | } | ||
837 | |||
838 | /* | ||
839 | * Get the DMA bus width field of the network configuration register that we | ||
840 | * should program. We find the width from decoding the design configuration | ||
841 | * register to find the maximum supported data bus width. | ||
842 | */ | ||
843 | static u32 macb_dbw(struct macb *bp) | ||
844 | { | ||
845 | if (!macb_is_gem(bp)) | ||
846 | return 0; | ||
847 | |||
848 | switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { | ||
849 | case 4: | ||
850 | return GEM_BF(DBW, GEM_DBW128); | ||
851 | case 2: | ||
852 | return GEM_BF(DBW, GEM_DBW64); | ||
853 | case 1: | ||
854 | default: | ||
855 | return GEM_BF(DBW, GEM_DBW32); | ||
856 | } | ||
857 | } | ||
858 | |||
859 | /* | ||
860 | * Configure the receive DMA engine to use the correct receive buffer size. | ||
861 | * This is a configurable parameter for GEM. | ||
862 | */ | ||
863 | static void macb_configure_dma(struct macb *bp) | ||
864 | { | ||
865 | u32 dmacfg; | ||
866 | |||
867 | if (macb_is_gem(bp)) { | ||
868 | dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); | ||
869 | dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); | ||
870 | gem_writel(bp, DMACFG, dmacfg); | ||
871 | } | ||
872 | } | ||
873 | |||
800 | static void macb_init_hw(struct macb *bp) | 874 | static void macb_init_hw(struct macb *bp) |
801 | { | 875 | { |
802 | u32 config; | 876 | u32 config; |
@@ -804,7 +878,7 @@ static void macb_init_hw(struct macb *bp) | |||
804 | macb_reset_hw(bp); | 878 | macb_reset_hw(bp); |
805 | __macb_set_hwaddr(bp); | 879 | __macb_set_hwaddr(bp); |
806 | 880 | ||
807 | config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); | 881 | config = macb_mdc_clk_div(bp); |
808 | config |= MACB_BIT(PAE); /* PAuse Enable */ | 882 | config |= MACB_BIT(PAE); /* PAuse Enable */ |
809 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ | 883 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ |
810 | config |= MACB_BIT(BIG); /* Receive oversized frames */ | 884 | config |= MACB_BIT(BIG); /* Receive oversized frames */ |
@@ -812,8 +886,11 @@ static void macb_init_hw(struct macb *bp) | |||
812 | config |= MACB_BIT(CAF); /* Copy All Frames */ | 886 | config |= MACB_BIT(CAF); /* Copy All Frames */ |
813 | if (!(bp->dev->flags & IFF_BROADCAST)) | 887 | if (!(bp->dev->flags & IFF_BROADCAST)) |
814 | config |= MACB_BIT(NBC); /* No BroadCast */ | 888 | config |= MACB_BIT(NBC); /* No BroadCast */ |
889 | config |= macb_dbw(bp); | ||
815 | macb_writel(bp, NCFGR, config); | 890 | macb_writel(bp, NCFGR, config); |
816 | 891 | ||
892 | macb_configure_dma(bp); | ||
893 | |||
817 | /* Initialize TX and RX buffers */ | 894 | /* Initialize TX and RX buffers */ |
818 | macb_writel(bp, RBQP, bp->rx_ring_dma); | 895 | macb_writel(bp, RBQP, bp->rx_ring_dma); |
819 | macb_writel(bp, TBQP, bp->tx_ring_dma); | 896 | macb_writel(bp, TBQP, bp->tx_ring_dma); |
@@ -909,8 +986,8 @@ static void macb_sethashtable(struct net_device *dev) | |||
909 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); | 986 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); |
910 | } | 987 | } |
911 | 988 | ||
912 | macb_writel(bp, HRB, mc_filter[0]); | 989 | macb_or_gem_writel(bp, HRB, mc_filter[0]); |
913 | macb_writel(bp, HRT, mc_filter[1]); | 990 | macb_or_gem_writel(bp, HRT, mc_filter[1]); |
914 | } | 991 | } |
915 | 992 | ||
916 | /* | 993 | /* |
@@ -932,8 +1009,8 @@ static void macb_set_rx_mode(struct net_device *dev) | |||
932 | 1009 | ||
933 | if (dev->flags & IFF_ALLMULTI) { | 1010 | if (dev->flags & IFF_ALLMULTI) { |
934 | /* Enable all multicast mode */ | 1011 | /* Enable all multicast mode */ |
935 | macb_writel(bp, HRB, -1); | 1012 | macb_or_gem_writel(bp, HRB, -1); |
936 | macb_writel(bp, HRT, -1); | 1013 | macb_or_gem_writel(bp, HRT, -1); |
937 | cfg |= MACB_BIT(NCFGR_MTI); | 1014 | cfg |= MACB_BIT(NCFGR_MTI); |
938 | } else if (!netdev_mc_empty(dev)) { | 1015 | } else if (!netdev_mc_empty(dev)) { |
939 | /* Enable specific multicasts */ | 1016 | /* Enable specific multicasts */ |
@@ -941,8 +1018,8 @@ static void macb_set_rx_mode(struct net_device *dev) | |||
941 | cfg |= MACB_BIT(NCFGR_MTI); | 1018 | cfg |= MACB_BIT(NCFGR_MTI); |
942 | } else if (dev->flags & (~IFF_ALLMULTI)) { | 1019 | } else if (dev->flags & (~IFF_ALLMULTI)) { |
943 | /* Disable all multicast mode */ | 1020 | /* Disable all multicast mode */ |
944 | macb_writel(bp, HRB, 0); | 1021 | macb_or_gem_writel(bp, HRB, 0); |
945 | macb_writel(bp, HRT, 0); | 1022 | macb_or_gem_writel(bp, HRT, 0); |
946 | cfg &= ~MACB_BIT(NCFGR_MTI); | 1023 | cfg &= ~MACB_BIT(NCFGR_MTI); |
947 | } | 1024 | } |
948 | 1025 | ||
@@ -954,7 +1031,7 @@ static int macb_open(struct net_device *dev) | |||
954 | struct macb *bp = netdev_priv(dev); | 1031 | struct macb *bp = netdev_priv(dev); |
955 | int err; | 1032 | int err; |
956 | 1033 | ||
957 | dev_dbg(&bp->pdev->dev, "open\n"); | 1034 | netdev_dbg(bp->dev, "open\n"); |
958 | 1035 | ||
959 | /* if the phy is not yet register, retry later*/ | 1036 | /* if the phy is not yet register, retry later*/ |
960 | if (!bp->phy_dev) | 1037 | if (!bp->phy_dev) |
@@ -965,9 +1042,8 @@ static int macb_open(struct net_device *dev) | |||
965 | 1042 | ||
966 | err = macb_alloc_consistent(bp); | 1043 | err = macb_alloc_consistent(bp); |
967 | if (err) { | 1044 | if (err) { |
968 | printk(KERN_ERR | 1045 | netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", |
969 | "%s: Unable to allocate DMA memory (error %d)\n", | 1046 | err); |
970 | dev->name, err); | ||
971 | return err; | 1047 | return err; |
972 | } | 1048 | } |
973 | 1049 | ||
@@ -1005,11 +1081,62 @@ static int macb_close(struct net_device *dev) | |||
1005 | return 0; | 1081 | return 0; |
1006 | } | 1082 | } |
1007 | 1083 | ||
1084 | static void gem_update_stats(struct macb *bp) | ||
1085 | { | ||
1086 | u32 __iomem *reg = bp->regs + GEM_OTX; | ||
1087 | u32 *p = &bp->hw_stats.gem.tx_octets_31_0; | ||
1088 | u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1; | ||
1089 | |||
1090 | for (; p < end; p++, reg++) | ||
1091 | *p += __raw_readl(reg); | ||
1092 | } | ||
1093 | |||
1094 | static struct net_device_stats *gem_get_stats(struct macb *bp) | ||
1095 | { | ||
1096 | struct gem_stats *hwstat = &bp->hw_stats.gem; | ||
1097 | struct net_device_stats *nstat = &bp->stats; | ||
1098 | |||
1099 | gem_update_stats(bp); | ||
1100 | |||
1101 | nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + | ||
1102 | hwstat->rx_alignment_errors + | ||
1103 | hwstat->rx_resource_errors + | ||
1104 | hwstat->rx_overruns + | ||
1105 | hwstat->rx_oversize_frames + | ||
1106 | hwstat->rx_jabbers + | ||
1107 | hwstat->rx_undersized_frames + | ||
1108 | hwstat->rx_length_field_frame_errors); | ||
1109 | nstat->tx_errors = (hwstat->tx_late_collisions + | ||
1110 | hwstat->tx_excessive_collisions + | ||
1111 | hwstat->tx_underrun + | ||
1112 | hwstat->tx_carrier_sense_errors); | ||
1113 | nstat->multicast = hwstat->rx_multicast_frames; | ||
1114 | nstat->collisions = (hwstat->tx_single_collision_frames + | ||
1115 | hwstat->tx_multiple_collision_frames + | ||
1116 | hwstat->tx_excessive_collisions); | ||
1117 | nstat->rx_length_errors = (hwstat->rx_oversize_frames + | ||
1118 | hwstat->rx_jabbers + | ||
1119 | hwstat->rx_undersized_frames + | ||
1120 | hwstat->rx_length_field_frame_errors); | ||
1121 | nstat->rx_over_errors = hwstat->rx_resource_errors; | ||
1122 | nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; | ||
1123 | nstat->rx_frame_errors = hwstat->rx_alignment_errors; | ||
1124 | nstat->rx_fifo_errors = hwstat->rx_overruns; | ||
1125 | nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; | ||
1126 | nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; | ||
1127 | nstat->tx_fifo_errors = hwstat->tx_underrun; | ||
1128 | |||
1129 | return nstat; | ||
1130 | } | ||
1131 | |||
1008 | static struct net_device_stats *macb_get_stats(struct net_device *dev) | 1132 | static struct net_device_stats *macb_get_stats(struct net_device *dev) |
1009 | { | 1133 | { |
1010 | struct macb *bp = netdev_priv(dev); | 1134 | struct macb *bp = netdev_priv(dev); |
1011 | struct net_device_stats *nstat = &bp->stats; | 1135 | struct net_device_stats *nstat = &bp->stats; |
1012 | struct macb_stats *hwstat = &bp->hw_stats; | 1136 | struct macb_stats *hwstat = &bp->hw_stats.macb; |
1137 | |||
1138 | if (macb_is_gem(bp)) | ||
1139 | return gem_get_stats(bp); | ||
1013 | 1140 | ||
1014 | /* read stats from hardware */ | 1141 | /* read stats from hardware */ |
1015 | macb_update_stats(bp); | 1142 | macb_update_stats(bp); |
@@ -1119,12 +1246,11 @@ static const struct net_device_ops macb_netdev_ops = { | |||
1119 | 1246 | ||
1120 | static int __init macb_probe(struct platform_device *pdev) | 1247 | static int __init macb_probe(struct platform_device *pdev) |
1121 | { | 1248 | { |
1122 | struct eth_platform_data *pdata; | 1249 | struct macb_platform_data *pdata; |
1123 | struct resource *regs; | 1250 | struct resource *regs; |
1124 | struct net_device *dev; | 1251 | struct net_device *dev; |
1125 | struct macb *bp; | 1252 | struct macb *bp; |
1126 | struct phy_device *phydev; | 1253 | struct phy_device *phydev; |
1127 | unsigned long pclk_hz; | ||
1128 | u32 config; | 1254 | u32 config; |
1129 | int err = -ENXIO; | 1255 | int err = -ENXIO; |
1130 | 1256 | ||
@@ -1152,28 +1278,19 @@ static int __init macb_probe(struct platform_device *pdev) | |||
1152 | 1278 | ||
1153 | spin_lock_init(&bp->lock); | 1279 | spin_lock_init(&bp->lock); |
1154 | 1280 | ||
1155 | #if defined(CONFIG_ARCH_AT91) | 1281 | bp->pclk = clk_get(&pdev->dev, "pclk"); |
1156 | bp->pclk = clk_get(&pdev->dev, "macb_clk"); | ||
1157 | if (IS_ERR(bp->pclk)) { | 1282 | if (IS_ERR(bp->pclk)) { |
1158 | dev_err(&pdev->dev, "failed to get macb_clk\n"); | 1283 | dev_err(&pdev->dev, "failed to get macb_clk\n"); |
1159 | goto err_out_free_dev; | 1284 | goto err_out_free_dev; |
1160 | } | 1285 | } |
1161 | clk_enable(bp->pclk); | 1286 | clk_enable(bp->pclk); |
1162 | #else | 1287 | |
1163 | bp->pclk = clk_get(&pdev->dev, "pclk"); | ||
1164 | if (IS_ERR(bp->pclk)) { | ||
1165 | dev_err(&pdev->dev, "failed to get pclk\n"); | ||
1166 | goto err_out_free_dev; | ||
1167 | } | ||
1168 | bp->hclk = clk_get(&pdev->dev, "hclk"); | 1288 | bp->hclk = clk_get(&pdev->dev, "hclk"); |
1169 | if (IS_ERR(bp->hclk)) { | 1289 | if (IS_ERR(bp->hclk)) { |
1170 | dev_err(&pdev->dev, "failed to get hclk\n"); | 1290 | dev_err(&pdev->dev, "failed to get hclk\n"); |
1171 | goto err_out_put_pclk; | 1291 | goto err_out_put_pclk; |
1172 | } | 1292 | } |
1173 | |||
1174 | clk_enable(bp->pclk); | ||
1175 | clk_enable(bp->hclk); | 1293 | clk_enable(bp->hclk); |
1176 | #endif | ||
1177 | 1294 | ||
1178 | bp->regs = ioremap(regs->start, resource_size(regs)); | 1295 | bp->regs = ioremap(regs->start, resource_size(regs)); |
1179 | if (!bp->regs) { | 1296 | if (!bp->regs) { |
@@ -1185,9 +1302,8 @@ static int __init macb_probe(struct platform_device *pdev) | |||
1185 | dev->irq = platform_get_irq(pdev, 0); | 1302 | dev->irq = platform_get_irq(pdev, 0); |
1186 | err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev); | 1303 | err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev); |
1187 | if (err) { | 1304 | if (err) { |
1188 | printk(KERN_ERR | 1305 | dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", |
1189 | "%s: Unable to request IRQ %d (error %d)\n", | 1306 | dev->irq, err); |
1190 | dev->name, dev->irq, err); | ||
1191 | goto err_out_iounmap; | 1307 | goto err_out_iounmap; |
1192 | } | 1308 | } |
1193 | 1309 | ||
@@ -1198,15 +1314,8 @@ static int __init macb_probe(struct platform_device *pdev) | |||
1198 | dev->base_addr = regs->start; | 1314 | dev->base_addr = regs->start; |
1199 | 1315 | ||
1200 | /* Set MII management clock divider */ | 1316 | /* Set MII management clock divider */ |
1201 | pclk_hz = clk_get_rate(bp->pclk); | 1317 | config = macb_mdc_clk_div(bp); |
1202 | if (pclk_hz <= 20000000) | 1318 | config |= macb_dbw(bp); |
1203 | config = MACB_BF(CLK, MACB_CLK_DIV8); | ||
1204 | else if (pclk_hz <= 40000000) | ||
1205 | config = MACB_BF(CLK, MACB_CLK_DIV16); | ||
1206 | else if (pclk_hz <= 80000000) | ||
1207 | config = MACB_BF(CLK, MACB_CLK_DIV32); | ||
1208 | else | ||
1209 | config = MACB_BF(CLK, MACB_CLK_DIV64); | ||
1210 | macb_writel(bp, NCFGR, config); | 1319 | macb_writel(bp, NCFGR, config); |
1211 | 1320 | ||
1212 | macb_get_hwaddr(bp); | 1321 | macb_get_hwaddr(bp); |
@@ -1214,15 +1323,16 @@ static int __init macb_probe(struct platform_device *pdev) | |||
1214 | 1323 | ||
1215 | if (pdata && pdata->is_rmii) | 1324 | if (pdata && pdata->is_rmii) |
1216 | #if defined(CONFIG_ARCH_AT91) | 1325 | #if defined(CONFIG_ARCH_AT91) |
1217 | macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) ); | 1326 | macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | |
1327 | MACB_BIT(CLKEN))); | ||
1218 | #else | 1328 | #else |
1219 | macb_writel(bp, USRIO, 0); | 1329 | macb_or_gem_writel(bp, USRIO, 0); |
1220 | #endif | 1330 | #endif |
1221 | else | 1331 | else |
1222 | #if defined(CONFIG_ARCH_AT91) | 1332 | #if defined(CONFIG_ARCH_AT91) |
1223 | macb_writel(bp, USRIO, MACB_BIT(CLKEN)); | 1333 | macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN)); |
1224 | #else | 1334 | #else |
1225 | macb_writel(bp, USRIO, MACB_BIT(MII)); | 1335 | macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); |
1226 | #endif | 1336 | #endif |
1227 | 1337 | ||
1228 | bp->tx_pending = DEF_TX_RING_PENDING; | 1338 | bp->tx_pending = DEF_TX_RING_PENDING; |
@@ -1239,13 +1349,13 @@ static int __init macb_probe(struct platform_device *pdev) | |||
1239 | 1349 | ||
1240 | platform_set_drvdata(pdev, dev); | 1350 | platform_set_drvdata(pdev, dev); |
1241 | 1351 | ||
1242 | printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n", | 1352 | netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n", |
1243 | dev->name, dev->base_addr, dev->irq, dev->dev_addr); | 1353 | macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr, |
1354 | dev->irq, dev->dev_addr); | ||
1244 | 1355 | ||
1245 | phydev = bp->phy_dev; | 1356 | phydev = bp->phy_dev; |
1246 | printk(KERN_INFO "%s: attached PHY driver [%s] " | 1357 | netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", |
1247 | "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, | 1358 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq); |
1248 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq); | ||
1249 | 1359 | ||
1250 | return 0; | 1360 | return 0; |
1251 | 1361 | ||
@@ -1256,14 +1366,10 @@ err_out_free_irq: | |||
1256 | err_out_iounmap: | 1366 | err_out_iounmap: |
1257 | iounmap(bp->regs); | 1367 | iounmap(bp->regs); |
1258 | err_out_disable_clocks: | 1368 | err_out_disable_clocks: |
1259 | #ifndef CONFIG_ARCH_AT91 | ||
1260 | clk_disable(bp->hclk); | 1369 | clk_disable(bp->hclk); |
1261 | clk_put(bp->hclk); | 1370 | clk_put(bp->hclk); |
1262 | #endif | ||
1263 | clk_disable(bp->pclk); | 1371 | clk_disable(bp->pclk); |
1264 | #ifndef CONFIG_ARCH_AT91 | ||
1265 | err_out_put_pclk: | 1372 | err_out_put_pclk: |
1266 | #endif | ||
1267 | clk_put(bp->pclk); | 1373 | clk_put(bp->pclk); |
1268 | err_out_free_dev: | 1374 | err_out_free_dev: |
1269 | free_netdev(dev); | 1375 | free_netdev(dev); |
@@ -1289,10 +1395,8 @@ static int __exit macb_remove(struct platform_device *pdev) | |||
1289 | unregister_netdev(dev); | 1395 | unregister_netdev(dev); |
1290 | free_irq(dev->irq, dev); | 1396 | free_irq(dev->irq, dev); |
1291 | iounmap(bp->regs); | 1397 | iounmap(bp->regs); |
1292 | #ifndef CONFIG_ARCH_AT91 | ||
1293 | clk_disable(bp->hclk); | 1398 | clk_disable(bp->hclk); |
1294 | clk_put(bp->hclk); | 1399 | clk_put(bp->hclk); |
1295 | #endif | ||
1296 | clk_disable(bp->pclk); | 1400 | clk_disable(bp->pclk); |
1297 | clk_put(bp->pclk); | 1401 | clk_put(bp->pclk); |
1298 | free_netdev(dev); | 1402 | free_netdev(dev); |
@@ -1310,9 +1414,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state) | |||
1310 | 1414 | ||
1311 | netif_device_detach(netdev); | 1415 | netif_device_detach(netdev); |
1312 | 1416 | ||
1313 | #ifndef CONFIG_ARCH_AT91 | ||
1314 | clk_disable(bp->hclk); | 1417 | clk_disable(bp->hclk); |
1315 | #endif | ||
1316 | clk_disable(bp->pclk); | 1418 | clk_disable(bp->pclk); |
1317 | 1419 | ||
1318 | return 0; | 1420 | return 0; |
@@ -1324,9 +1426,7 @@ static int macb_resume(struct platform_device *pdev) | |||
1324 | struct macb *bp = netdev_priv(netdev); | 1426 | struct macb *bp = netdev_priv(netdev); |
1325 | 1427 | ||
1326 | clk_enable(bp->pclk); | 1428 | clk_enable(bp->pclk); |
1327 | #ifndef CONFIG_ARCH_AT91 | ||
1328 | clk_enable(bp->hclk); | 1429 | clk_enable(bp->hclk); |
1329 | #endif | ||
1330 | 1430 | ||
1331 | netif_device_attach(netdev); | 1431 | netif_device_attach(netdev); |
1332 | 1432 | ||
@@ -1361,6 +1461,6 @@ module_init(macb_init); | |||
1361 | module_exit(macb_exit); | 1461 | module_exit(macb_exit); |
1362 | 1462 | ||
1363 | MODULE_LICENSE("GPL"); | 1463 | MODULE_LICENSE("GPL"); |
1364 | MODULE_DESCRIPTION("Atmel MACB Ethernet driver"); | 1464 | MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); |
1365 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); | 1465 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
1366 | MODULE_ALIAS("platform:macb"); | 1466 | MODULE_ALIAS("platform:macb"); |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d3212f6db703..193107884a5a 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -59,6 +59,24 @@ | |||
59 | #define MACB_TPQ 0x00bc | 59 | #define MACB_TPQ 0x00bc |
60 | #define MACB_USRIO 0x00c0 | 60 | #define MACB_USRIO 0x00c0 |
61 | #define MACB_WOL 0x00c4 | 61 | #define MACB_WOL 0x00c4 |
62 | #define MACB_MID 0x00fc | ||
63 | |||
64 | /* GEM register offsets. */ | ||
65 | #define GEM_NCFGR 0x0004 | ||
66 | #define GEM_USRIO 0x000c | ||
67 | #define GEM_DMACFG 0x0010 | ||
68 | #define GEM_HRB 0x0080 | ||
69 | #define GEM_HRT 0x0084 | ||
70 | #define GEM_SA1B 0x0088 | ||
71 | #define GEM_SA1T 0x008C | ||
72 | #define GEM_OTX 0x0100 | ||
73 | #define GEM_DCFG1 0x0280 | ||
74 | #define GEM_DCFG2 0x0284 | ||
75 | #define GEM_DCFG3 0x0288 | ||
76 | #define GEM_DCFG4 0x028c | ||
77 | #define GEM_DCFG5 0x0290 | ||
78 | #define GEM_DCFG6 0x0294 | ||
79 | #define GEM_DCFG7 0x0298 | ||
62 | 80 | ||
63 | /* Bitfields in NCR */ | 81 | /* Bitfields in NCR */ |
64 | #define MACB_LB_OFFSET 0 | 82 | #define MACB_LB_OFFSET 0 |
@@ -126,6 +144,21 @@ | |||
126 | #define MACB_IRXFCS_OFFSET 19 | 144 | #define MACB_IRXFCS_OFFSET 19 |
127 | #define MACB_IRXFCS_SIZE 1 | 145 | #define MACB_IRXFCS_SIZE 1 |
128 | 146 | ||
147 | /* GEM specific NCFGR bitfields. */ | ||
148 | #define GEM_CLK_OFFSET 18 | ||
149 | #define GEM_CLK_SIZE 3 | ||
150 | #define GEM_DBW_OFFSET 21 | ||
151 | #define GEM_DBW_SIZE 2 | ||
152 | |||
153 | /* Constants for data bus width. */ | ||
154 | #define GEM_DBW32 0 | ||
155 | #define GEM_DBW64 1 | ||
156 | #define GEM_DBW128 2 | ||
157 | |||
158 | /* Bitfields in DMACFG. */ | ||
159 | #define GEM_RXBS_OFFSET 16 | ||
160 | #define GEM_RXBS_SIZE 8 | ||
161 | |||
129 | /* Bitfields in NSR */ | 162 | /* Bitfields in NSR */ |
130 | #define MACB_NSR_LINK_OFFSET 0 | 163 | #define MACB_NSR_LINK_OFFSET 0 |
131 | #define MACB_NSR_LINK_SIZE 1 | 164 | #define MACB_NSR_LINK_SIZE 1 |
@@ -228,12 +261,30 @@ | |||
228 | #define MACB_WOL_MTI_OFFSET 19 | 261 | #define MACB_WOL_MTI_OFFSET 19 |
229 | #define MACB_WOL_MTI_SIZE 1 | 262 | #define MACB_WOL_MTI_SIZE 1 |
230 | 263 | ||
264 | /* Bitfields in MID */ | ||
265 | #define MACB_IDNUM_OFFSET 16 | ||
266 | #define MACB_IDNUM_SIZE 16 | ||
267 | #define MACB_REV_OFFSET 0 | ||
268 | #define MACB_REV_SIZE 16 | ||
269 | |||
270 | /* Bitfields in DCFG1. */ | ||
271 | #define GEM_DBWDEF_OFFSET 25 | ||
272 | #define GEM_DBWDEF_SIZE 3 | ||
273 | |||
231 | /* Constants for CLK */ | 274 | /* Constants for CLK */ |
232 | #define MACB_CLK_DIV8 0 | 275 | #define MACB_CLK_DIV8 0 |
233 | #define MACB_CLK_DIV16 1 | 276 | #define MACB_CLK_DIV16 1 |
234 | #define MACB_CLK_DIV32 2 | 277 | #define MACB_CLK_DIV32 2 |
235 | #define MACB_CLK_DIV64 3 | 278 | #define MACB_CLK_DIV64 3 |
236 | 279 | ||
280 | /* GEM specific constants for CLK. */ | ||
281 | #define GEM_CLK_DIV8 0 | ||
282 | #define GEM_CLK_DIV16 1 | ||
283 | #define GEM_CLK_DIV32 2 | ||
284 | #define GEM_CLK_DIV48 3 | ||
285 | #define GEM_CLK_DIV64 4 | ||
286 | #define GEM_CLK_DIV96 5 | ||
287 | |||
237 | /* Constants for MAN register */ | 288 | /* Constants for MAN register */ |
238 | #define MACB_MAN_SOF 1 | 289 | #define MACB_MAN_SOF 1 |
239 | #define MACB_MAN_WRITE 1 | 290 | #define MACB_MAN_WRITE 1 |
@@ -254,11 +305,52 @@ | |||
254 | << MACB_##name##_OFFSET)) \ | 305 | << MACB_##name##_OFFSET)) \ |
255 | | MACB_BF(name,value)) | 306 | | MACB_BF(name,value)) |
256 | 307 | ||
308 | #define GEM_BIT(name) \ | ||
309 | (1 << GEM_##name##_OFFSET) | ||
310 | #define GEM_BF(name, value) \ | ||
311 | (((value) & ((1 << GEM_##name##_SIZE) - 1)) \ | ||
312 | << GEM_##name##_OFFSET) | ||
313 | #define GEM_BFEXT(name, value)\ | ||
314 | (((value) >> GEM_##name##_OFFSET) \ | ||
315 | & ((1 << GEM_##name##_SIZE) - 1)) | ||
316 | #define GEM_BFINS(name, value, old) \ | ||
317 | (((old) & ~(((1 << GEM_##name##_SIZE) - 1) \ | ||
318 | << GEM_##name##_OFFSET)) \ | ||
319 | | GEM_BF(name, value)) | ||
320 | |||
257 | /* Register access macros */ | 321 | /* Register access macros */ |
258 | #define macb_readl(port,reg) \ | 322 | #define macb_readl(port,reg) \ |
259 | __raw_readl((port)->regs + MACB_##reg) | 323 | __raw_readl((port)->regs + MACB_##reg) |
260 | #define macb_writel(port,reg,value) \ | 324 | #define macb_writel(port,reg,value) \ |
261 | __raw_writel((value), (port)->regs + MACB_##reg) | 325 | __raw_writel((value), (port)->regs + MACB_##reg) |
326 | #define gem_readl(port, reg) \ | ||
327 | __raw_readl((port)->regs + GEM_##reg) | ||
328 | #define gem_writel(port, reg, value) \ | ||
329 | __raw_writel((value), (port)->regs + GEM_##reg) | ||
330 | |||
331 | /* | ||
332 | * Conditional GEM/MACB macros. These perform the operation to the correct | ||
333 | * register dependent on whether the device is a GEM or a MACB. For registers | ||
334 | * and bitfields that are common across both devices, use macb_{read,write}l | ||
335 | * to avoid the cost of the conditional. | ||
336 | */ | ||
337 | #define macb_or_gem_writel(__bp, __reg, __value) \ | ||
338 | ({ \ | ||
339 | if (macb_is_gem((__bp))) \ | ||
340 | gem_writel((__bp), __reg, __value); \ | ||
341 | else \ | ||
342 | macb_writel((__bp), __reg, __value); \ | ||
343 | }) | ||
344 | |||
345 | #define macb_or_gem_readl(__bp, __reg) \ | ||
346 | ({ \ | ||
347 | u32 __v; \ | ||
348 | if (macb_is_gem((__bp))) \ | ||
349 | __v = gem_readl((__bp), __reg); \ | ||
350 | else \ | ||
351 | __v = macb_readl((__bp), __reg); \ | ||
352 | __v; \ | ||
353 | }) | ||
262 | 354 | ||
263 | struct dma_desc { | 355 | struct dma_desc { |
264 | u32 addr; | 356 | u32 addr; |
@@ -358,6 +450,54 @@ struct macb_stats { | |||
358 | u32 tx_pause_frames; | 450 | u32 tx_pause_frames; |
359 | }; | 451 | }; |
360 | 452 | ||
453 | struct gem_stats { | ||
454 | u32 tx_octets_31_0; | ||
455 | u32 tx_octets_47_32; | ||
456 | u32 tx_frames; | ||
457 | u32 tx_broadcast_frames; | ||
458 | u32 tx_multicast_frames; | ||
459 | u32 tx_pause_frames; | ||
460 | u32 tx_64_byte_frames; | ||
461 | u32 tx_65_127_byte_frames; | ||
462 | u32 tx_128_255_byte_frames; | ||
463 | u32 tx_256_511_byte_frames; | ||
464 | u32 tx_512_1023_byte_frames; | ||
465 | u32 tx_1024_1518_byte_frames; | ||
466 | u32 tx_greater_than_1518_byte_frames; | ||
467 | u32 tx_underrun; | ||
468 | u32 tx_single_collision_frames; | ||
469 | u32 tx_multiple_collision_frames; | ||
470 | u32 tx_excessive_collisions; | ||
471 | u32 tx_late_collisions; | ||
472 | u32 tx_deferred_frames; | ||
473 | u32 tx_carrier_sense_errors; | ||
474 | u32 rx_octets_31_0; | ||
475 | u32 rx_octets_47_32; | ||
476 | u32 rx_frames; | ||
477 | u32 rx_broadcast_frames; | ||
478 | u32 rx_multicast_frames; | ||
479 | u32 rx_pause_frames; | ||
480 | u32 rx_64_byte_frames; | ||
481 | u32 rx_65_127_byte_frames; | ||
482 | u32 rx_128_255_byte_frames; | ||
483 | u32 rx_256_511_byte_frames; | ||
484 | u32 rx_512_1023_byte_frames; | ||
485 | u32 rx_1024_1518_byte_frames; | ||
486 | u32 rx_greater_than_1518_byte_frames; | ||
487 | u32 rx_undersized_frames; | ||
488 | u32 rx_oversize_frames; | ||
489 | u32 rx_jabbers; | ||
490 | u32 rx_frame_check_sequence_errors; | ||
491 | u32 rx_length_field_frame_errors; | ||
492 | u32 rx_symbol_errors; | ||
493 | u32 rx_alignment_errors; | ||
494 | u32 rx_resource_errors; | ||
495 | u32 rx_overruns; | ||
496 | u32 rx_ip_header_checksum_errors; | ||
497 | u32 rx_tcp_checksum_errors; | ||
498 | u32 rx_udp_checksum_errors; | ||
499 | }; | ||
500 | |||
361 | struct macb { | 501 | struct macb { |
362 | void __iomem *regs; | 502 | void __iomem *regs; |
363 | 503 | ||
@@ -376,7 +516,10 @@ struct macb { | |||
376 | struct net_device *dev; | 516 | struct net_device *dev; |
377 | struct napi_struct napi; | 517 | struct napi_struct napi; |
378 | struct net_device_stats stats; | 518 | struct net_device_stats stats; |
379 | struct macb_stats hw_stats; | 519 | union { |
520 | struct macb_stats macb; | ||
521 | struct gem_stats gem; | ||
522 | } hw_stats; | ||
380 | 523 | ||
381 | dma_addr_t rx_ring_dma; | 524 | dma_addr_t rx_ring_dma; |
382 | dma_addr_t tx_ring_dma; | 525 | dma_addr_t tx_ring_dma; |
@@ -391,4 +534,9 @@ struct macb { | |||
391 | unsigned int duplex; | 534 | unsigned int duplex; |
392 | }; | 535 | }; |
393 | 536 | ||
537 | static inline bool macb_is_gem(struct macb *bp) | ||
538 | { | ||
539 | return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2; | ||
540 | } | ||
541 | |||
394 | #endif /* _MACB_H */ | 542 | #endif /* _MACB_H */ |
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 1cf671643d1f..c520cfd3b298 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig | |||
@@ -7,8 +7,7 @@ config NET_VENDOR_FREESCALE | |||
7 | default y | 7 | default y |
8 | depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ | 8 | depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ |
9 | M523x || M527x || M5272 || M528x || M520x || M532x || \ | 9 | M523x || M527x || M5272 || M528x || M520x || M532x || \ |
10 | ARCH_MXC || ARCH_MXS || \ | 10 | ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) |
11 | (PPC_MPC52xx && PPC_BESTCOMM) | ||
12 | ---help--- | 11 | ---help--- |
13 | If you have a network (Ethernet) card belonging to this class, say Y | 12 | If you have a network (Ethernet) card belonging to this class, say Y |
14 | and read the Ethernet-HOWTO, available from | 13 | and read the Ethernet-HOWTO, available from |
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 61029dc7fa6f..76213162fbe3 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig | |||
@@ -5,7 +5,11 @@ | |||
5 | config NET_VENDOR_INTEL | 5 | config NET_VENDOR_INTEL |
6 | bool "Intel devices" | 6 | bool "Intel devices" |
7 | default y | 7 | default y |
8 | depends on PCI || PCI_MSI | 8 | depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \ |
9 | ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \ | ||
10 | GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \ | ||
11 | (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \ | ||
12 | EXPERIMENTAL | ||
9 | ---help--- | 13 | ---help--- |
10 | If you have a network (Ethernet) card belonging to this class, say Y | 14 | If you have a network (Ethernet) card belonging to this class, say Y |
11 | and read the Ethernet-HOWTO, available from | 15 | and read the Ethernet-HOWTO, available from |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index db95731863d7..00fcd39ad666 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | |||
@@ -442,12 +442,14 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, | |||
442 | 442 | ||
443 | int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter) | 443 | int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter) |
444 | { | 444 | { |
445 | #ifdef CONFIG_PCI_IOV | ||
445 | int i; | 446 | int i; |
446 | for (i = 0; i < adapter->num_vfs; i++) { | 447 | for (i = 0; i < adapter->num_vfs; i++) { |
447 | if (adapter->vfinfo[i].vfdev->dev_flags & | 448 | if (adapter->vfinfo[i].vfdev->dev_flags & |
448 | PCI_DEV_FLAGS_ASSIGNED) | 449 | PCI_DEV_FLAGS_ASSIGNED) |
449 | return true; | 450 | return true; |
450 | } | 451 | } |
452 | #endif | ||
451 | return false; | 453 | return false; |
452 | } | 454 | } |
453 | 455 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 4a5d8897faab..df04f1a3857c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h | |||
@@ -42,11 +42,11 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); | |||
42 | int ixgbe_ndo_get_vf_config(struct net_device *netdev, | 42 | int ixgbe_ndo_get_vf_config(struct net_device *netdev, |
43 | int vf, struct ifla_vf_info *ivi); | 43 | int vf, struct ifla_vf_info *ivi); |
44 | void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); | 44 | void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); |
45 | #ifdef CONFIG_PCI_IOV | ||
46 | void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); | 45 | void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); |
46 | int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter); | ||
47 | #ifdef CONFIG_PCI_IOV | ||
47 | void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, | 48 | void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, |
48 | const struct ixgbe_info *ii); | 49 | const struct ixgbe_info *ii); |
49 | int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter); | ||
50 | #endif | 50 | #endif |
51 | 51 | ||
52 | 52 | ||
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index cbd026f3bc57..fdc6c394c683 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -366,17 +366,6 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
366 | gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); | 366 | gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec); |
367 | } | 367 | } |
368 | } else { | 368 | } else { |
369 | if (hw->chip_id >= CHIP_ID_YUKON_OPT) { | ||
370 | u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2); | ||
371 | |||
372 | /* enable PHY Reverse Auto-Negotiation */ | ||
373 | ctrl2 |= 1u << 13; | ||
374 | |||
375 | /* Write PHY changes (SW-reset must follow) */ | ||
376 | gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2); | ||
377 | } | ||
378 | |||
379 | |||
380 | /* disable energy detect */ | 369 | /* disable energy detect */ |
381 | ctrl &= ~PHY_M_PC_EN_DET_MSK; | 370 | ctrl &= ~PHY_M_PC_EN_DET_MSK; |
382 | 371 | ||
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig index 4a6b9fd073b6..eb836f770f50 100644 --- a/drivers/net/ethernet/natsemi/Kconfig +++ b/drivers/net/ethernet/natsemi/Kconfig | |||
@@ -5,7 +5,10 @@ | |||
5 | config NET_VENDOR_NATSEMI | 5 | config NET_VENDOR_NATSEMI |
6 | bool "National Semi-conductor devices" | 6 | bool "National Semi-conductor devices" |
7 | default y | 7 | default y |
8 | depends on MCA || MAC || MACH_JAZZ || PCI || XTENSA_PLATFORM_XT2000 | 8 | depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \ |
9 | ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MCA || \ | ||
10 | MCA_LEGACY || MIPS || PCI || PCMCIA || SUPERH || \ | ||
11 | XTENSA_PLATFORM_XT2000 || ZORRO | ||
9 | ---help--- | 12 | ---help--- |
10 | If you have a network (Ethernet) card belonging to this class, say Y | 13 | If you have a network (Ethernet) card belonging to this class, say Y |
11 | and read the Ethernet-HOWTO, available from | 14 | and read the Ethernet-HOWTO, available from |
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 1e37eb98c4e2..1dca57013cb2 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c | |||
@@ -1682,6 +1682,7 @@ static void nv_get_hw_stats(struct net_device *dev) | |||
1682 | np->estats.tx_pause += readl(base + NvRegTxPause); | 1682 | np->estats.tx_pause += readl(base + NvRegTxPause); |
1683 | np->estats.rx_pause += readl(base + NvRegRxPause); | 1683 | np->estats.rx_pause += readl(base + NvRegRxPause); |
1684 | np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); | 1684 | np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); |
1685 | np->estats.rx_errors_total += np->estats.rx_drop_frame; | ||
1685 | } | 1686 | } |
1686 | 1687 | ||
1687 | if (np->driver_data & DEV_HAS_STATISTICS_V3) { | 1688 | if (np->driver_data & DEV_HAS_STATISTICS_V3) { |
@@ -1706,11 +1707,14 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev) | |||
1706 | nv_get_hw_stats(dev); | 1707 | nv_get_hw_stats(dev); |
1707 | 1708 | ||
1708 | /* copy to net_device stats */ | 1709 | /* copy to net_device stats */ |
1710 | dev->stats.tx_packets = np->estats.tx_packets; | ||
1711 | dev->stats.rx_bytes = np->estats.rx_bytes; | ||
1709 | dev->stats.tx_bytes = np->estats.tx_bytes; | 1712 | dev->stats.tx_bytes = np->estats.tx_bytes; |
1710 | dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; | 1713 | dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; |
1711 | dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; | 1714 | dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; |
1712 | dev->stats.rx_crc_errors = np->estats.rx_crc_errors; | 1715 | dev->stats.rx_crc_errors = np->estats.rx_crc_errors; |
1713 | dev->stats.rx_over_errors = np->estats.rx_over_errors; | 1716 | dev->stats.rx_over_errors = np->estats.rx_over_errors; |
1717 | dev->stats.rx_fifo_errors = np->estats.rx_drop_frame; | ||
1714 | dev->stats.rx_errors = np->estats.rx_errors_total; | 1718 | dev->stats.rx_errors = np->estats.rx_errors_total; |
1715 | dev->stats.tx_errors = np->estats.tx_errors_total; | 1719 | dev->stats.tx_errors = np->estats.tx_errors_total; |
1716 | } | 1720 | } |
@@ -2099,10 +2103,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2099 | 2103 | ||
2100 | /* add fragments to entries count */ | 2104 | /* add fragments to entries count */ |
2101 | for (i = 0; i < fragments; i++) { | 2105 | for (i = 0; i < fragments; i++) { |
2102 | u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | 2106 | u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
2103 | 2107 | ||
2104 | entries += (size >> NV_TX2_TSO_MAX_SHIFT) + | 2108 | entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + |
2105 | ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 2109 | ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
2106 | } | 2110 | } |
2107 | 2111 | ||
2108 | spin_lock_irqsave(&np->lock, flags); | 2112 | spin_lock_irqsave(&np->lock, flags); |
@@ -2141,13 +2145,13 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2141 | /* setup the fragments */ | 2145 | /* setup the fragments */ |
2142 | for (i = 0; i < fragments; i++) { | 2146 | for (i = 0; i < fragments; i++) { |
2143 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2147 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2144 | u32 size = skb_frag_size(frag); | 2148 | u32 frag_size = skb_frag_size(frag); |
2145 | offset = 0; | 2149 | offset = 0; |
2146 | 2150 | ||
2147 | do { | 2151 | do { |
2148 | prev_tx = put_tx; | 2152 | prev_tx = put_tx; |
2149 | prev_tx_ctx = np->put_tx_ctx; | 2153 | prev_tx_ctx = np->put_tx_ctx; |
2150 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | 2154 | bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; |
2151 | np->put_tx_ctx->dma = skb_frag_dma_map( | 2155 | np->put_tx_ctx->dma = skb_frag_dma_map( |
2152 | &np->pci_dev->dev, | 2156 | &np->pci_dev->dev, |
2153 | frag, offset, | 2157 | frag, offset, |
@@ -2159,12 +2163,12 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2159 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | 2163 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
2160 | 2164 | ||
2161 | offset += bcnt; | 2165 | offset += bcnt; |
2162 | size -= bcnt; | 2166 | frag_size -= bcnt; |
2163 | if (unlikely(put_tx++ == np->last_tx.orig)) | 2167 | if (unlikely(put_tx++ == np->last_tx.orig)) |
2164 | put_tx = np->first_tx.orig; | 2168 | put_tx = np->first_tx.orig; |
2165 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) | 2169 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
2166 | np->put_tx_ctx = np->first_tx_ctx; | 2170 | np->put_tx_ctx = np->first_tx_ctx; |
2167 | } while (size); | 2171 | } while (frag_size); |
2168 | } | 2172 | } |
2169 | 2173 | ||
2170 | /* set last fragment flag */ | 2174 | /* set last fragment flag */ |
@@ -2213,10 +2217,10 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
2213 | 2217 | ||
2214 | /* add fragments to entries count */ | 2218 | /* add fragments to entries count */ |
2215 | for (i = 0; i < fragments; i++) { | 2219 | for (i = 0; i < fragments; i++) { |
2216 | u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | 2220 | u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); |
2217 | 2221 | ||
2218 | entries += (size >> NV_TX2_TSO_MAX_SHIFT) + | 2222 | entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) + |
2219 | ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 2223 | ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
2220 | } | 2224 | } |
2221 | 2225 | ||
2222 | spin_lock_irqsave(&np->lock, flags); | 2226 | spin_lock_irqsave(&np->lock, flags); |
@@ -2257,13 +2261,13 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
2257 | /* setup the fragments */ | 2261 | /* setup the fragments */ |
2258 | for (i = 0; i < fragments; i++) { | 2262 | for (i = 0; i < fragments; i++) { |
2259 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2263 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2260 | u32 size = skb_frag_size(frag); | 2264 | u32 frag_size = skb_frag_size(frag); |
2261 | offset = 0; | 2265 | offset = 0; |
2262 | 2266 | ||
2263 | do { | 2267 | do { |
2264 | prev_tx = put_tx; | 2268 | prev_tx = put_tx; |
2265 | prev_tx_ctx = np->put_tx_ctx; | 2269 | prev_tx_ctx = np->put_tx_ctx; |
2266 | bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; | 2270 | bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; |
2267 | np->put_tx_ctx->dma = skb_frag_dma_map( | 2271 | np->put_tx_ctx->dma = skb_frag_dma_map( |
2268 | &np->pci_dev->dev, | 2272 | &np->pci_dev->dev, |
2269 | frag, offset, | 2273 | frag, offset, |
@@ -2276,12 +2280,12 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, | |||
2276 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); | 2280 | put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags); |
2277 | 2281 | ||
2278 | offset += bcnt; | 2282 | offset += bcnt; |
2279 | size -= bcnt; | 2283 | frag_size -= bcnt; |
2280 | if (unlikely(put_tx++ == np->last_tx.ex)) | 2284 | if (unlikely(put_tx++ == np->last_tx.ex)) |
2281 | put_tx = np->first_tx.ex; | 2285 | put_tx = np->first_tx.ex; |
2282 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) | 2286 | if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) |
2283 | np->put_tx_ctx = np->first_tx_ctx; | 2287 | np->put_tx_ctx = np->first_tx_ctx; |
2284 | } while (size); | 2288 | } while (frag_size); |
2285 | } | 2289 | } |
2286 | 2290 | ||
2287 | /* set last fragment flag */ | 2291 | /* set last fragment flag */ |
@@ -2374,16 +2378,8 @@ static int nv_tx_done(struct net_device *dev, int limit) | |||
2374 | if (np->desc_ver == DESC_VER_1) { | 2378 | if (np->desc_ver == DESC_VER_1) { |
2375 | if (flags & NV_TX_LASTPACKET) { | 2379 | if (flags & NV_TX_LASTPACKET) { |
2376 | if (flags & NV_TX_ERROR) { | 2380 | if (flags & NV_TX_ERROR) { |
2377 | if (flags & NV_TX_UNDERFLOW) | ||
2378 | dev->stats.tx_fifo_errors++; | ||
2379 | if (flags & NV_TX_CARRIERLOST) | ||
2380 | dev->stats.tx_carrier_errors++; | ||
2381 | if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) | 2381 | if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) |
2382 | nv_legacybackoff_reseed(dev); | 2382 | nv_legacybackoff_reseed(dev); |
2383 | dev->stats.tx_errors++; | ||
2384 | } else { | ||
2385 | dev->stats.tx_packets++; | ||
2386 | dev->stats.tx_bytes += np->get_tx_ctx->skb->len; | ||
2387 | } | 2383 | } |
2388 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2384 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2389 | np->get_tx_ctx->skb = NULL; | 2385 | np->get_tx_ctx->skb = NULL; |
@@ -2392,16 +2388,8 @@ static int nv_tx_done(struct net_device *dev, int limit) | |||
2392 | } else { | 2388 | } else { |
2393 | if (flags & NV_TX2_LASTPACKET) { | 2389 | if (flags & NV_TX2_LASTPACKET) { |
2394 | if (flags & NV_TX2_ERROR) { | 2390 | if (flags & NV_TX2_ERROR) { |
2395 | if (flags & NV_TX2_UNDERFLOW) | ||
2396 | dev->stats.tx_fifo_errors++; | ||
2397 | if (flags & NV_TX2_CARRIERLOST) | ||
2398 | dev->stats.tx_carrier_errors++; | ||
2399 | if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) | 2391 | if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) |
2400 | nv_legacybackoff_reseed(dev); | 2392 | nv_legacybackoff_reseed(dev); |
2401 | dev->stats.tx_errors++; | ||
2402 | } else { | ||
2403 | dev->stats.tx_packets++; | ||
2404 | dev->stats.tx_bytes += np->get_tx_ctx->skb->len; | ||
2405 | } | 2393 | } |
2406 | dev_kfree_skb_any(np->get_tx_ctx->skb); | 2394 | dev_kfree_skb_any(np->get_tx_ctx->skb); |
2407 | np->get_tx_ctx->skb = NULL; | 2395 | np->get_tx_ctx->skb = NULL; |
@@ -2434,9 +2422,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) | |||
2434 | nv_unmap_txskb(np, np->get_tx_ctx); | 2422 | nv_unmap_txskb(np, np->get_tx_ctx); |
2435 | 2423 | ||
2436 | if (flags & NV_TX2_LASTPACKET) { | 2424 | if (flags & NV_TX2_LASTPACKET) { |
2437 | if (!(flags & NV_TX2_ERROR)) | 2425 | if (flags & NV_TX2_ERROR) { |
2438 | dev->stats.tx_packets++; | ||
2439 | else { | ||
2440 | if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { | 2426 | if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { |
2441 | if (np->driver_data & DEV_HAS_GEAR_MODE) | 2427 | if (np->driver_data & DEV_HAS_GEAR_MODE) |
2442 | nv_gear_backoff_reseed(dev); | 2428 | nv_gear_backoff_reseed(dev); |
@@ -2636,7 +2622,6 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2636 | if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { | 2622 | if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { |
2637 | len = nv_getlen(dev, skb->data, len); | 2623 | len = nv_getlen(dev, skb->data, len); |
2638 | if (len < 0) { | 2624 | if (len < 0) { |
2639 | dev->stats.rx_errors++; | ||
2640 | dev_kfree_skb(skb); | 2625 | dev_kfree_skb(skb); |
2641 | goto next_pkt; | 2626 | goto next_pkt; |
2642 | } | 2627 | } |
@@ -2650,11 +2635,6 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2650 | else { | 2635 | else { |
2651 | if (flags & NV_RX_MISSEDFRAME) | 2636 | if (flags & NV_RX_MISSEDFRAME) |
2652 | dev->stats.rx_missed_errors++; | 2637 | dev->stats.rx_missed_errors++; |
2653 | if (flags & NV_RX_CRCERR) | ||
2654 | dev->stats.rx_crc_errors++; | ||
2655 | if (flags & NV_RX_OVERFLOW) | ||
2656 | dev->stats.rx_over_errors++; | ||
2657 | dev->stats.rx_errors++; | ||
2658 | dev_kfree_skb(skb); | 2638 | dev_kfree_skb(skb); |
2659 | goto next_pkt; | 2639 | goto next_pkt; |
2660 | } | 2640 | } |
@@ -2670,7 +2650,6 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2670 | if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { | 2650 | if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { |
2671 | len = nv_getlen(dev, skb->data, len); | 2651 | len = nv_getlen(dev, skb->data, len); |
2672 | if (len < 0) { | 2652 | if (len < 0) { |
2673 | dev->stats.rx_errors++; | ||
2674 | dev_kfree_skb(skb); | 2653 | dev_kfree_skb(skb); |
2675 | goto next_pkt; | 2654 | goto next_pkt; |
2676 | } | 2655 | } |
@@ -2682,11 +2661,6 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2682 | } | 2661 | } |
2683 | /* the rest are hard errors */ | 2662 | /* the rest are hard errors */ |
2684 | else { | 2663 | else { |
2685 | if (flags & NV_RX2_CRCERR) | ||
2686 | dev->stats.rx_crc_errors++; | ||
2687 | if (flags & NV_RX2_OVERFLOW) | ||
2688 | dev->stats.rx_over_errors++; | ||
2689 | dev->stats.rx_errors++; | ||
2690 | dev_kfree_skb(skb); | 2664 | dev_kfree_skb(skb); |
2691 | goto next_pkt; | 2665 | goto next_pkt; |
2692 | } | 2666 | } |
@@ -2704,7 +2678,6 @@ static int nv_rx_process(struct net_device *dev, int limit) | |||
2704 | skb->protocol = eth_type_trans(skb, dev); | 2678 | skb->protocol = eth_type_trans(skb, dev); |
2705 | napi_gro_receive(&np->napi, skb); | 2679 | napi_gro_receive(&np->napi, skb); |
2706 | dev->stats.rx_packets++; | 2680 | dev->stats.rx_packets++; |
2707 | dev->stats.rx_bytes += len; | ||
2708 | next_pkt: | 2681 | next_pkt: |
2709 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) | 2682 | if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) |
2710 | np->get_rx.orig = np->first_rx.orig; | 2683 | np->get_rx.orig = np->first_rx.orig; |
@@ -2787,9 +2760,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) | |||
2787 | __vlan_hwaccel_put_tag(skb, vid); | 2760 | __vlan_hwaccel_put_tag(skb, vid); |
2788 | } | 2761 | } |
2789 | napi_gro_receive(&np->napi, skb); | 2762 | napi_gro_receive(&np->napi, skb); |
2790 | |||
2791 | dev->stats.rx_packets++; | 2763 | dev->stats.rx_packets++; |
2792 | dev->stats.rx_bytes += len; | ||
2793 | } else { | 2764 | } else { |
2794 | dev_kfree_skb(skb); | 2765 | dev_kfree_skb(skb); |
2795 | } | 2766 | } |
@@ -2962,11 +2933,11 @@ static void nv_set_multicast(struct net_device *dev) | |||
2962 | struct netdev_hw_addr *ha; | 2933 | struct netdev_hw_addr *ha; |
2963 | 2934 | ||
2964 | netdev_for_each_mc_addr(ha, dev) { | 2935 | netdev_for_each_mc_addr(ha, dev) { |
2965 | unsigned char *addr = ha->addr; | 2936 | unsigned char *hw_addr = ha->addr; |
2966 | u32 a, b; | 2937 | u32 a, b; |
2967 | 2938 | ||
2968 | a = le32_to_cpu(*(__le32 *) addr); | 2939 | a = le32_to_cpu(*(__le32 *) hw_addr); |
2969 | b = le16_to_cpu(*(__le16 *) (&addr[4])); | 2940 | b = le16_to_cpu(*(__le16 *) (&hw_addr[4])); |
2970 | alwaysOn[0] &= a; | 2941 | alwaysOn[0] &= a; |
2971 | alwaysOff[0] &= ~a; | 2942 | alwaysOff[0] &= ~a; |
2972 | alwaysOn[1] &= b; | 2943 | alwaysOn[1] &= b; |
@@ -3398,7 +3369,8 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) | |||
3398 | 3369 | ||
3399 | for (i = 0;; i++) { | 3370 | for (i = 0;; i++) { |
3400 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; | 3371 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; |
3401 | writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); | 3372 | writel(events, base + NvRegMSIXIrqStatus); |
3373 | netdev_dbg(dev, "tx irq events: %08x\n", events); | ||
3402 | if (!(events & np->irqmask)) | 3374 | if (!(events & np->irqmask)) |
3403 | break; | 3375 | break; |
3404 | 3376 | ||
@@ -3509,7 +3481,8 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) | |||
3509 | 3481 | ||
3510 | for (i = 0;; i++) { | 3482 | for (i = 0;; i++) { |
3511 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | 3483 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; |
3512 | writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | 3484 | writel(events, base + NvRegMSIXIrqStatus); |
3485 | netdev_dbg(dev, "rx irq events: %08x\n", events); | ||
3513 | if (!(events & np->irqmask)) | 3486 | if (!(events & np->irqmask)) |
3514 | break; | 3487 | break; |
3515 | 3488 | ||
@@ -3553,7 +3526,8 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) | |||
3553 | 3526 | ||
3554 | for (i = 0;; i++) { | 3527 | for (i = 0;; i++) { |
3555 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; | 3528 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; |
3556 | writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); | 3529 | writel(events, base + NvRegMSIXIrqStatus); |
3530 | netdev_dbg(dev, "irq events: %08x\n", events); | ||
3557 | if (!(events & np->irqmask)) | 3531 | if (!(events & np->irqmask)) |
3558 | break; | 3532 | break; |
3559 | 3533 | ||
@@ -3617,10 +3591,10 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data) | |||
3617 | 3591 | ||
3618 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | 3592 | if (!(np->msi_flags & NV_MSI_X_ENABLED)) { |
3619 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | 3593 | events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; |
3620 | writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); | 3594 | writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus); |
3621 | } else { | 3595 | } else { |
3622 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | 3596 | events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; |
3623 | writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); | 3597 | writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); |
3624 | } | 3598 | } |
3625 | pci_push(base); | 3599 | pci_push(base); |
3626 | if (!(events & NVREG_IRQ_TIMER)) | 3600 | if (!(events & NVREG_IRQ_TIMER)) |
@@ -4566,7 +4540,7 @@ static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *e | |||
4566 | struct fe_priv *np = netdev_priv(dev); | 4540 | struct fe_priv *np = netdev_priv(dev); |
4567 | 4541 | ||
4568 | /* update stats */ | 4542 | /* update stats */ |
4569 | nv_do_stats_poll((unsigned long)dev); | 4543 | nv_get_hw_stats(dev); |
4570 | 4544 | ||
4571 | memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); | 4545 | memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64)); |
4572 | } | 4546 | } |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a3ce3d4561ed..74134970b709 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -192,6 +192,13 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) | |||
192 | */ | 192 | */ |
193 | macvlan_broadcast(skb, port, src->dev, | 193 | macvlan_broadcast(skb, port, src->dev, |
194 | MACVLAN_MODE_VEPA); | 194 | MACVLAN_MODE_VEPA); |
195 | else { | ||
196 | /* forward to original port. */ | ||
197 | vlan = src; | ||
198 | ret = macvlan_broadcast_one(skb, vlan, eth, 0); | ||
199 | goto out; | ||
200 | } | ||
201 | |||
195 | return RX_HANDLER_PASS; | 202 | return RX_HANDLER_PASS; |
196 | } | 203 | } |
197 | 204 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 7d6082160bcc..fae0fbd8bc88 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -1057,7 +1057,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
1057 | unsigned long flags; | 1057 | unsigned long flags; |
1058 | int retval; | 1058 | int retval; |
1059 | 1059 | ||
1060 | skb_tx_timestamp(skb); | 1060 | if (skb) |
1061 | skb_tx_timestamp(skb); | ||
1061 | 1062 | ||
1062 | // some devices want funky USB-level framing, for | 1063 | // some devices want funky USB-level framing, for |
1063 | // win32 driver (usually) and/or hardware quirks | 1064 | // win32 driver (usually) and/or hardware quirks |
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index e0ab0657cc3a..88279e325dca 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c | |||
@@ -868,10 +868,6 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) | |||
868 | /* Do PA Calibration */ | 868 | /* Do PA Calibration */ |
869 | ar9002_hw_pa_cal(ah, true); | 869 | ar9002_hw_pa_cal(ah, true); |
870 | 870 | ||
871 | /* Do NF Calibration after DC offset and other calibrations */ | ||
872 | ath9k_hw_loadnf(ah, chan); | ||
873 | ath9k_hw_start_nfcal(ah, true); | ||
874 | |||
875 | if (ah->caldata) | 871 | if (ah->caldata) |
876 | ah->caldata->nfcal_pending = true; | 872 | ah->caldata->nfcal_pending = true; |
877 | 873 | ||
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 16851cb109a6..12a730dcb500 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c | |||
@@ -908,12 +908,15 @@ static bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan) | |||
908 | int i; | 908 | int i; |
909 | bool restore; | 909 | bool restore; |
910 | 910 | ||
911 | if (!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT) || !ah->caldata) | 911 | if (!ah->caldata) |
912 | return false; | 912 | return false; |
913 | 913 | ||
914 | hist = &ah->caldata->rtt_hist; | 914 | hist = &ah->caldata->rtt_hist; |
915 | if (!hist->num_readings) | ||
916 | return false; | ||
917 | |||
915 | ar9003_hw_rtt_enable(ah); | 918 | ar9003_hw_rtt_enable(ah); |
916 | ar9003_hw_rtt_set_mask(ah, 0x10); | 919 | ar9003_hw_rtt_set_mask(ah, 0x00); |
917 | for (i = 0; i < AR9300_MAX_CHAINS; i++) { | 920 | for (i = 0; i < AR9300_MAX_CHAINS; i++) { |
918 | if (!(ah->rxchainmask & (1 << i))) | 921 | if (!(ah->rxchainmask & (1 << i))) |
919 | continue; | 922 | continue; |
@@ -1070,6 +1073,7 @@ skip_tx_iqcal: | |||
1070 | if (is_reusable && (hist->num_readings < RTT_HIST_MAX)) { | 1073 | if (is_reusable && (hist->num_readings < RTT_HIST_MAX)) { |
1071 | u32 *table; | 1074 | u32 *table; |
1072 | 1075 | ||
1076 | hist->num_readings++; | ||
1073 | for (i = 0; i < AR9300_MAX_CHAINS; i++) { | 1077 | for (i = 0; i < AR9300_MAX_CHAINS; i++) { |
1074 | if (!(ah->rxchainmask & (1 << i))) | 1078 | if (!(ah->rxchainmask & (1 << i))) |
1075 | continue; | 1079 | continue; |
@@ -1081,9 +1085,6 @@ skip_tx_iqcal: | |||
1081 | ar9003_hw_rtt_disable(ah); | 1085 | ar9003_hw_rtt_disable(ah); |
1082 | } | 1086 | } |
1083 | 1087 | ||
1084 | ath9k_hw_loadnf(ah, chan); | ||
1085 | ath9k_hw_start_nfcal(ah, true); | ||
1086 | |||
1087 | /* Initialize list pointers */ | 1088 | /* Initialize list pointers */ |
1088 | ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; | 1089 | ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; |
1089 | ah->supp_cals = IQ_MISMATCH_CAL; | 1090 | ah->supp_cals = IQ_MISMATCH_CAL; |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index 2f4023e66081..4114fe752c6b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h | |||
@@ -572,14 +572,14 @@ | |||
572 | 572 | ||
573 | #define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300) | 573 | #define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300) |
574 | 574 | ||
575 | #define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + AR_SREV_9485(ah) ? \ | 575 | #define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \ |
576 | 0x3c4 : 0x444) | 576 | 0x3c4 : 0x444)) |
577 | #define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + AR_SREV_9485(ah) ? \ | 577 | #define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + (AR_SREV_9485(ah) ? \ |
578 | 0x3c8 : 0x448) | 578 | 0x3c8 : 0x448)) |
579 | #define AR_PHY_TX_IQCAL_START (AR_SM_BASE + AR_SREV_9485(ah) ? \ | 579 | #define AR_PHY_TX_IQCAL_START (AR_SM_BASE + (AR_SREV_9485(ah) ? \ |
580 | 0x3c4 : 0x440) | 580 | 0x3c4 : 0x440)) |
581 | #define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + AR_SREV_9485(ah) ? \ | 581 | #define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \ |
582 | 0x3f0 : 0x48c) | 582 | 0x3f0 : 0x48c)) |
583 | #define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \ | 583 | #define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \ |
584 | (AR_SREV_9485(ah) ? \ | 584 | (AR_SREV_9485(ah) ? \ |
585 | 0x3d0 : 0x450) + ((_i) << 2)) | 585 | 0x3d0 : 0x450) + ((_i) << 2)) |
@@ -651,7 +651,7 @@ | |||
651 | #define AR_SWITCH_TABLE_ALL_S (0) | 651 | #define AR_SWITCH_TABLE_ALL_S (0) |
652 | 652 | ||
653 | #define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\ | 653 | #define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\ |
654 | (AR_SREV_9485(ah) ? 0x1628c : 0x16294)) | 654 | (AR_SREV_9462(ah) ? 0x16294 : 0x1628c)) |
655 | 655 | ||
656 | #define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000 | 656 | #define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000 |
657 | #define AR_PHY_65NM_CH0_THERM_LOCAL_S 31 | 657 | #define AR_PHY_65NM_CH0_THERM_LOCAL_S 31 |
@@ -668,12 +668,12 @@ | |||
668 | #define AR_PHY_65NM_CH2_RXTX2 0x16904 | 668 | #define AR_PHY_65NM_CH2_RXTX2 0x16904 |
669 | 669 | ||
670 | #define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \ | 670 | #define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \ |
671 | (AR_SREV_9485(ah) ? 0x16284 : 0x16290)) | 671 | (AR_SREV_9462(ah) ? 0x16290 : 0x16284)) |
672 | #define AR_CH0_TOP2_XPABIASLVL 0xf000 | 672 | #define AR_CH0_TOP2_XPABIASLVL 0xf000 |
673 | #define AR_CH0_TOP2_XPABIASLVL_S 12 | 673 | #define AR_CH0_TOP2_XPABIASLVL_S 12 |
674 | 674 | ||
675 | #define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \ | 675 | #define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \ |
676 | (AR_SREV_9485(ah) ? 0x16290 : 0x16298)) | 676 | (AR_SREV_9462(ah) ? 0x16298 : 0x16290)) |
677 | #define AR_CH0_XTAL_CAPINDAC 0x7f000000 | 677 | #define AR_CH0_XTAL_CAPINDAC 0x7f000000 |
678 | #define AR_CH0_XTAL_CAPINDAC_S 24 | 678 | #define AR_CH0_XTAL_CAPINDAC_S 24 |
679 | #define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000 | 679 | #define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000 |
@@ -908,8 +908,8 @@ | |||
908 | #define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208) | 908 | #define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208) |
909 | #define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c) | 909 | #define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c) |
910 | #define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) | 910 | #define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) |
911 | #define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_AR9300(ah) ? \ | 911 | #define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_AR9462(ah) ? \ |
912 | 0x240 : 0x280)) | 912 | 0x280 : 0x240)) |
913 | #define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240) | 913 | #define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240) |
914 | #define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff | 914 | #define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff |
915 | #define AR_PHY_TPC_19_B1_ALPHA_THERM_S 0 | 915 | #define AR_PHY_TPC_19_B1_ALPHA_THERM_S 0 |
@@ -931,10 +931,10 @@ | |||
931 | #define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0) | 931 | #define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0) |
932 | #define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4) | 932 | #define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4) |
933 | 933 | ||
934 | #define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + (i) ? \ | 934 | #define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + ((i) ? \ |
935 | AR_SM1_BASE : AR_SM_BASE) | 935 | AR_SM1_BASE : AR_SM_BASE)) |
936 | #define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + (i) ? \ | 936 | #define AR_PHY_RTT_TABLE_SW_INTF_1_B(i) (0x388 + ((i) ? \ |
937 | AR_SM1_BASE : AR_SM_BASE) | 937 | AR_SM1_BASE : AR_SM_BASE)) |
938 | /* | 938 | /* |
939 | * Channel 2 Register Map | 939 | * Channel 2 Register Map |
940 | */ | 940 | */ |
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index 611ea6ce8508..d16d029f81a9 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h | |||
@@ -521,7 +521,7 @@ static const u32 ar9485_1_1_radio_postamble[][2] = { | |||
521 | {0x000160ac, 0x24611800}, | 521 | {0x000160ac, 0x24611800}, |
522 | {0x000160b0, 0x03284f3e}, | 522 | {0x000160b0, 0x03284f3e}, |
523 | {0x0001610c, 0x00170000}, | 523 | {0x0001610c, 0x00170000}, |
524 | {0x00016140, 0x10804008}, | 524 | {0x00016140, 0x50804008}, |
525 | }; | 525 | }; |
526 | 526 | ||
527 | static const u32 ar9485_1_1_mac_postamble[][5] = { | 527 | static const u32 ar9485_1_1_mac_postamble[][5] = { |
@@ -603,7 +603,7 @@ static const u32 ar9485_1_1_radio_core[][2] = { | |||
603 | 603 | ||
604 | static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { | 604 | static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { |
605 | /* Addr allmodes */ | 605 | /* Addr allmodes */ |
606 | {0x00018c00, 0x10052e5e}, | 606 | {0x00018c00, 0x18052e5e}, |
607 | {0x00018c04, 0x000801d8}, | 607 | {0x00018c04, 0x000801d8}, |
608 | {0x00018c08, 0x0000080c}, | 608 | {0x00018c08, 0x0000080c}, |
609 | }; | 609 | }; |
@@ -776,7 +776,7 @@ static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = { | |||
776 | 776 | ||
777 | static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { | 777 | static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { |
778 | /* Addr allmodes */ | 778 | /* Addr allmodes */ |
779 | {0x00018c00, 0x10013e5e}, | 779 | {0x00018c00, 0x18013e5e}, |
780 | {0x00018c04, 0x000801d8}, | 780 | {0x00018c04, 0x000801d8}, |
781 | {0x00018c08, 0x0000080c}, | 781 | {0x00018c08, 0x0000080c}, |
782 | }; | 782 | }; |
@@ -882,7 +882,7 @@ static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = { | |||
882 | 882 | ||
883 | static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = { | 883 | static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = { |
884 | /* Addr allmodes */ | 884 | /* Addr allmodes */ |
885 | {0x00018c00, 0x10012e5e}, | 885 | {0x00018c00, 0x18012e5e}, |
886 | {0x00018c04, 0x000801d8}, | 886 | {0x00018c04, 0x000801d8}, |
887 | {0x00018c08, 0x0000080c}, | 887 | {0x00018c08, 0x0000080c}, |
888 | }; | 888 | }; |
@@ -1021,7 +1021,7 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = { | |||
1021 | 1021 | ||
1022 | static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { | 1022 | static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { |
1023 | /* Addr allmodes */ | 1023 | /* Addr allmodes */ |
1024 | {0x00018c00, 0x10053e5e}, | 1024 | {0x00018c00, 0x18053e5e}, |
1025 | {0x00018c04, 0x000801d8}, | 1025 | {0x00018c04, 0x000801d8}, |
1026 | {0x00018c08, 0x0000080c}, | 1026 | {0x00018c08, 0x0000080c}, |
1027 | }; | 1027 | }; |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 4952ad8c4e8c..2f91acccb7db 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -1725,6 +1725,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, | |||
1725 | if (!ath9k_hw_init_cal(ah, chan)) | 1725 | if (!ath9k_hw_init_cal(ah, chan)) |
1726 | return -EIO; | 1726 | return -EIO; |
1727 | 1727 | ||
1728 | ath9k_hw_loadnf(ah, chan); | ||
1729 | ath9k_hw_start_nfcal(ah, true); | ||
1730 | |||
1728 | ENABLE_REGWRITE_BUFFER(ah); | 1731 | ENABLE_REGWRITE_BUFFER(ah); |
1729 | 1732 | ||
1730 | ath9k_hw_restore_chainmask(ah); | 1733 | ath9k_hw_restore_chainmask(ah); |
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index d20946939cd8..59472e1605cd 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c | |||
@@ -296,7 +296,8 @@ static void carl9170_tx_release(struct kref *ref) | |||
296 | super = (void *)skb->data; | 296 | super = (void *)skb->data; |
297 | txinfo->status.ampdu_len = super->s.rix; | 297 | txinfo->status.ampdu_len = super->s.rix; |
298 | txinfo->status.ampdu_ack_len = super->s.cnt; | 298 | txinfo->status.ampdu_ack_len = super->s.cnt; |
299 | } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) { | 299 | } else if ((txinfo->flags & IEEE80211_TX_STAT_ACK) && |
300 | !(txinfo->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) { | ||
300 | /* | 301 | /* |
301 | * drop redundant tx_status reports: | 302 | * drop redundant tx_status reports: |
302 | * | 303 | * |
@@ -308,15 +309,17 @@ static void carl9170_tx_release(struct kref *ref) | |||
308 | * | 309 | * |
309 | * 3. minstrel_ht is picky, it only accepts | 310 | * 3. minstrel_ht is picky, it only accepts |
310 | * reports of frames with the TX_STATUS_AMPDU flag. | 311 | * reports of frames with the TX_STATUS_AMPDU flag. |
312 | * | ||
313 | * 4. mac80211 is not particularly interested in | ||
314 | * feedback either [CTL_REQ_TX_STATUS not set] | ||
311 | */ | 315 | */ |
312 | 316 | ||
313 | dev_kfree_skb_any(skb); | 317 | dev_kfree_skb_any(skb); |
314 | return; | 318 | return; |
315 | } else { | 319 | } else { |
316 | /* | 320 | /* |
317 | * Frame has failed, but we want to keep it in | 321 | * Either the frame transmission has failed or |
318 | * case it was lost due to a power-state | 322 | * mac80211 requested tx status. |
319 | * transition. | ||
320 | */ | 323 | */ |
321 | } | 324 | } |
322 | } | 325 | } |
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index c73e8600d218..58ea0e5fabfd 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c | |||
@@ -827,7 +827,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) | |||
827 | #endif | 827 | #endif |
828 | return; | 828 | return; |
829 | drop: | 829 | drop: |
830 | b43dbg(dev->wl, "RX: Packet dropped\n"); | ||
831 | dev_kfree_skb_any(skb); | 830 | dev_kfree_skb_any(skb); |
832 | } | 831 | } |
833 | 832 | ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index b247a56d5135..001fdf140abb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c | |||
@@ -1755,16 +1755,6 @@ static inline int iwl_check_stuck_queue(struct iwl_priv *priv, int txq) | |||
1755 | { | 1755 | { |
1756 | if (iwl_trans_check_stuck_queue(trans(priv), txq)) { | 1756 | if (iwl_trans_check_stuck_queue(trans(priv), txq)) { |
1757 | int ret; | 1757 | int ret; |
1758 | if (txq == priv->shrd->cmd_queue) { | ||
1759 | /* | ||
1760 | * validate command queue still working | ||
1761 | * by sending "ECHO" command | ||
1762 | */ | ||
1763 | if (!iwl_cmd_echo_test(priv)) | ||
1764 | return 0; | ||
1765 | else | ||
1766 | IWL_DEBUG_HC(priv, "echo testing fail\n"); | ||
1767 | } | ||
1768 | ret = iwl_force_reset(priv, IWL_FW_RESET, false); | 1758 | ret = iwl_force_reset(priv, IWL_FW_RESET, false); |
1769 | return (ret == -EAGAIN) ? 0 : 1; | 1759 | return (ret == -EAGAIN) ? 0 : 1; |
1770 | } | 1760 | } |
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c index f0c623ade3ff..1800029911ad 100644 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c | |||
@@ -446,10 +446,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
446 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); | 446 | pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); |
447 | 447 | ||
448 | err = pci_enable_msi(pdev); | 448 | err = pci_enable_msi(pdev); |
449 | if (err) { | 449 | if (err) |
450 | dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed"); | 450 | dev_printk(KERN_ERR, &pdev->dev, |
451 | goto out_iounmap; | 451 | "pci_enable_msi failed(0X%x)", err); |
452 | } | ||
453 | 452 | ||
454 | /* TODO: Move this away, not needed if not MSI */ | 453 | /* TODO: Move this away, not needed if not MSI */ |
455 | /* enable rfkill interrupt: hw bug w/a */ | 454 | /* enable rfkill interrupt: hw bug w/a */ |
@@ -470,7 +469,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
470 | 469 | ||
471 | out_disable_msi: | 470 | out_disable_msi: |
472 | pci_disable_msi(pdev); | 471 | pci_disable_msi(pdev); |
473 | out_iounmap: | ||
474 | pci_iounmap(pdev, pci_bus->hw_base); | 472 | pci_iounmap(pdev, pci_bus->hw_base); |
475 | out_pci_release_regions: | 473 | out_pci_release_regions: |
476 | pci_set_drvdata(pdev, NULL); | 474 | pci_set_drvdata(pdev, NULL); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c index 8e8c75c997ee..da3411057afc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c | |||
@@ -407,6 +407,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) | |||
407 | struct iwl_queue *q = &txq->q; | 407 | struct iwl_queue *q = &txq->q; |
408 | enum dma_data_direction dma_dir; | 408 | enum dma_data_direction dma_dir; |
409 | unsigned long flags; | 409 | unsigned long flags; |
410 | spinlock_t *lock; | ||
410 | 411 | ||
411 | if (!q->n_bd) | 412 | if (!q->n_bd) |
412 | return; | 413 | return; |
@@ -414,19 +415,22 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) | |||
414 | /* In the command queue, all the TBs are mapped as BIDI | 415 | /* In the command queue, all the TBs are mapped as BIDI |
415 | * so unmap them as such. | 416 | * so unmap them as such. |
416 | */ | 417 | */ |
417 | if (txq_id == trans->shrd->cmd_queue) | 418 | if (txq_id == trans->shrd->cmd_queue) { |
418 | dma_dir = DMA_BIDIRECTIONAL; | 419 | dma_dir = DMA_BIDIRECTIONAL; |
419 | else | 420 | lock = &trans->hcmd_lock; |
421 | } else { | ||
420 | dma_dir = DMA_TO_DEVICE; | 422 | dma_dir = DMA_TO_DEVICE; |
423 | lock = &trans->shrd->sta_lock; | ||
424 | } | ||
421 | 425 | ||
422 | spin_lock_irqsave(&trans->shrd->sta_lock, flags); | 426 | spin_lock_irqsave(lock, flags); |
423 | while (q->write_ptr != q->read_ptr) { | 427 | while (q->write_ptr != q->read_ptr) { |
424 | /* The read_ptr needs to bound by q->n_window */ | 428 | /* The read_ptr needs to bound by q->n_window */ |
425 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), | 429 | iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr), |
426 | dma_dir); | 430 | dma_dir); |
427 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); | 431 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
428 | } | 432 | } |
429 | spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); | 433 | spin_unlock_irqrestore(lock, flags); |
430 | } | 434 | } |
431 | 435 | ||
432 | /** | 436 | /** |
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index ff6378276ff0..4fcd653bddc4 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c | |||
@@ -728,15 +728,9 @@ static void lbs_scan_worker(struct work_struct *work) | |||
728 | le16_to_cpu(scan_cmd->hdr.size), | 728 | le16_to_cpu(scan_cmd->hdr.size), |
729 | lbs_ret_scan, 0); | 729 | lbs_ret_scan, 0); |
730 | 730 | ||
731 | if (priv->scan_channel >= priv->scan_req->n_channels) { | 731 | if (priv->scan_channel >= priv->scan_req->n_channels) |
732 | /* Mark scan done */ | 732 | /* Mark scan done */ |
733 | if (priv->internal_scan) | 733 | lbs_scan_done(priv); |
734 | kfree(priv->scan_req); | ||
735 | else | ||
736 | cfg80211_scan_done(priv->scan_req, false); | ||
737 | |||
738 | priv->scan_req = NULL; | ||
739 | } | ||
740 | 734 | ||
741 | /* Restart network */ | 735 | /* Restart network */ |
742 | if (carrier) | 736 | if (carrier) |
@@ -774,6 +768,21 @@ static void _internal_start_scan(struct lbs_private *priv, bool internal, | |||
774 | lbs_deb_leave(LBS_DEB_CFG80211); | 768 | lbs_deb_leave(LBS_DEB_CFG80211); |
775 | } | 769 | } |
776 | 770 | ||
771 | /* | ||
772 | * Clean up priv->scan_req. Should be used to handle the allocation details. | ||
773 | */ | ||
774 | void lbs_scan_done(struct lbs_private *priv) | ||
775 | { | ||
776 | WARN_ON(!priv->scan_req); | ||
777 | |||
778 | if (priv->internal_scan) | ||
779 | kfree(priv->scan_req); | ||
780 | else | ||
781 | cfg80211_scan_done(priv->scan_req, false); | ||
782 | |||
783 | priv->scan_req = NULL; | ||
784 | } | ||
785 | |||
777 | static int lbs_cfg_scan(struct wiphy *wiphy, | 786 | static int lbs_cfg_scan(struct wiphy *wiphy, |
778 | struct net_device *dev, | 787 | struct net_device *dev, |
779 | struct cfg80211_scan_request *request) | 788 | struct cfg80211_scan_request *request) |
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h index a02ee151710e..558168ce634d 100644 --- a/drivers/net/wireless/libertas/cfg.h +++ b/drivers/net/wireless/libertas/cfg.h | |||
@@ -16,6 +16,7 @@ int lbs_reg_notifier(struct wiphy *wiphy, | |||
16 | void lbs_send_disconnect_notification(struct lbs_private *priv); | 16 | void lbs_send_disconnect_notification(struct lbs_private *priv); |
17 | void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); | 17 | void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event); |
18 | 18 | ||
19 | void lbs_scan_done(struct lbs_private *priv); | ||
19 | void lbs_scan_deinit(struct lbs_private *priv); | 20 | void lbs_scan_deinit(struct lbs_private *priv); |
20 | int lbs_disconnect(struct lbs_private *priv, u16 reason); | 21 | int lbs_disconnect(struct lbs_private *priv, u16 reason); |
21 | 22 | ||
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 4ae99a40dbf7..957681dede17 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c | |||
@@ -255,10 +255,8 @@ static int lbs_eth_stop(struct net_device *dev) | |||
255 | 255 | ||
256 | lbs_update_mcast(priv); | 256 | lbs_update_mcast(priv); |
257 | cancel_delayed_work_sync(&priv->scan_work); | 257 | cancel_delayed_work_sync(&priv->scan_work); |
258 | if (priv->scan_req) { | 258 | if (priv->scan_req) |
259 | cfg80211_scan_done(priv->scan_req, false); | 259 | lbs_scan_done(priv); |
260 | priv->scan_req = NULL; | ||
261 | } | ||
262 | 260 | ||
263 | netif_carrier_off(priv->dev); | 261 | netif_carrier_off(priv->dev); |
264 | 262 | ||
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index ef566443f945..e17e2f8001d2 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig | |||
@@ -2,23 +2,17 @@ | |||
2 | # PINCTRL infrastructure and drivers | 2 | # PINCTRL infrastructure and drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | menuconfig PINCTRL | 5 | config PINCTRL |
6 | bool "PINCTRL Support" | 6 | bool |
7 | depends on EXPERIMENTAL | 7 | depends on EXPERIMENTAL |
8 | help | ||
9 | This enables the PINCTRL subsystem for controlling pins | ||
10 | on chip packages, for example multiplexing pins on primarily | ||
11 | PGA and BGA packages for systems on chip. | ||
12 | |||
13 | If unsure, say N. | ||
14 | 8 | ||
15 | if PINCTRL | 9 | if PINCTRL |
16 | 10 | ||
11 | menu "Pin controllers" | ||
12 | depends on PINCTRL | ||
13 | |||
17 | config PINMUX | 14 | config PINMUX |
18 | bool "Support pinmux controllers" | 15 | bool "Support pinmux controllers" |
19 | help | ||
20 | Say Y here if you want the pincontrol subsystem to handle pin | ||
21 | multiplexing drivers. | ||
22 | 16 | ||
23 | config DEBUG_PINCTRL | 17 | config DEBUG_PINCTRL |
24 | bool "Debug PINCTRL calls" | 18 | bool "Debug PINCTRL calls" |
@@ -30,14 +24,12 @@ config PINMUX_SIRF | |||
30 | bool "CSR SiRFprimaII pinmux driver" | 24 | bool "CSR SiRFprimaII pinmux driver" |
31 | depends on ARCH_PRIMA2 | 25 | depends on ARCH_PRIMA2 |
32 | select PINMUX | 26 | select PINMUX |
33 | help | ||
34 | Say Y here to enable the SiRFprimaII pinmux driver | ||
35 | 27 | ||
36 | config PINMUX_U300 | 28 | config PINMUX_U300 |
37 | bool "U300 pinmux driver" | 29 | bool "U300 pinmux driver" |
38 | depends on ARCH_U300 | 30 | depends on ARCH_U300 |
39 | select PINMUX | 31 | select PINMUX |
40 | help | 32 | |
41 | Say Y here to enable the U300 pinmux driver | 33 | endmenu |
42 | 34 | ||
43 | endif | 35 | endif |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index f4e3d82379d7..7f43cf86d776 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -83,8 +83,10 @@ config DELL_LAPTOP | |||
83 | depends on EXPERIMENTAL | 83 | depends on EXPERIMENTAL |
84 | depends on BACKLIGHT_CLASS_DEVICE | 84 | depends on BACKLIGHT_CLASS_DEVICE |
85 | depends on RFKILL || RFKILL = n | 85 | depends on RFKILL || RFKILL = n |
86 | depends on POWER_SUPPLY | ||
87 | depends on SERIO_I8042 | 86 | depends on SERIO_I8042 |
87 | select POWER_SUPPLY | ||
88 | select LEDS_CLASS | ||
89 | select NEW_LEDS | ||
88 | default n | 90 | default n |
89 | ---help--- | 91 | ---help--- |
90 | This driver adds support for rfkill and backlight control to Dell | 92 | This driver adds support for rfkill and backlight control to Dell |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index bbf3edd85beb..5be4a392a3ae 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -509,15 +509,12 @@ static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev, | |||
509 | struct acpi_resource_dma *p) | 509 | struct acpi_resource_dma *p) |
510 | { | 510 | { |
511 | int i; | 511 | int i; |
512 | unsigned char map = 0, flags = 0; | 512 | unsigned char map = 0, flags; |
513 | |||
514 | if (p->channel_count == 0) | ||
515 | flags |= IORESOURCE_DISABLED; | ||
516 | 513 | ||
517 | for (i = 0; i < p->channel_count; i++) | 514 | for (i = 0; i < p->channel_count; i++) |
518 | map |= 1 << p->channels[i]; | 515 | map |= 1 << p->channels[i]; |
519 | 516 | ||
520 | flags |= dma_flags(dev, p->type, p->bus_master, p->transfer); | 517 | flags = dma_flags(dev, p->type, p->bus_master, p->transfer); |
521 | pnp_register_dma_resource(dev, option_flags, map, flags); | 518 | pnp_register_dma_resource(dev, option_flags, map, flags); |
522 | } | 519 | } |
523 | 520 | ||
@@ -527,17 +524,14 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev, | |||
527 | { | 524 | { |
528 | int i; | 525 | int i; |
529 | pnp_irq_mask_t map; | 526 | pnp_irq_mask_t map; |
530 | unsigned char flags = 0; | 527 | unsigned char flags; |
531 | |||
532 | if (p->interrupt_count == 0) | ||
533 | flags |= IORESOURCE_DISABLED; | ||
534 | 528 | ||
535 | bitmap_zero(map.bits, PNP_IRQ_NR); | 529 | bitmap_zero(map.bits, PNP_IRQ_NR); |
536 | for (i = 0; i < p->interrupt_count; i++) | 530 | for (i = 0; i < p->interrupt_count; i++) |
537 | if (p->interrupts[i]) | 531 | if (p->interrupts[i]) |
538 | __set_bit(p->interrupts[i], map.bits); | 532 | __set_bit(p->interrupts[i], map.bits); |
539 | 533 | ||
540 | flags |= irq_flags(p->triggering, p->polarity, p->sharable); | 534 | flags = irq_flags(p->triggering, p->polarity, p->sharable); |
541 | pnp_register_irq_resource(dev, option_flags, &map, flags); | 535 | pnp_register_irq_resource(dev, option_flags, &map, flags); |
542 | } | 536 | } |
543 | 537 | ||
@@ -547,10 +541,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, | |||
547 | { | 541 | { |
548 | int i; | 542 | int i; |
549 | pnp_irq_mask_t map; | 543 | pnp_irq_mask_t map; |
550 | unsigned char flags = 0; | 544 | unsigned char flags; |
551 | |||
552 | if (p->interrupt_count == 0) | ||
553 | flags |= IORESOURCE_DISABLED; | ||
554 | 545 | ||
555 | bitmap_zero(map.bits, PNP_IRQ_NR); | 546 | bitmap_zero(map.bits, PNP_IRQ_NR); |
556 | for (i = 0; i < p->interrupt_count; i++) { | 547 | for (i = 0; i < p->interrupt_count; i++) { |
@@ -564,7 +555,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, | |||
564 | } | 555 | } |
565 | } | 556 | } |
566 | 557 | ||
567 | flags |= irq_flags(p->triggering, p->polarity, p->sharable); | 558 | flags = irq_flags(p->triggering, p->polarity, p->sharable); |
568 | pnp_register_irq_resource(dev, option_flags, &map, flags); | 559 | pnp_register_irq_resource(dev, option_flags, &map, flags); |
569 | } | 560 | } |
570 | 561 | ||
@@ -574,11 +565,8 @@ static __init void pnpacpi_parse_port_option(struct pnp_dev *dev, | |||
574 | { | 565 | { |
575 | unsigned char flags = 0; | 566 | unsigned char flags = 0; |
576 | 567 | ||
577 | if (io->address_length == 0) | ||
578 | flags |= IORESOURCE_DISABLED; | ||
579 | |||
580 | if (io->io_decode == ACPI_DECODE_16) | 568 | if (io->io_decode == ACPI_DECODE_16) |
581 | flags |= IORESOURCE_IO_16BIT_ADDR; | 569 | flags = IORESOURCE_IO_16BIT_ADDR; |
582 | pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum, | 570 | pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum, |
583 | io->alignment, io->address_length, flags); | 571 | io->alignment, io->address_length, flags); |
584 | } | 572 | } |
@@ -587,13 +575,8 @@ static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev, | |||
587 | unsigned int option_flags, | 575 | unsigned int option_flags, |
588 | struct acpi_resource_fixed_io *io) | 576 | struct acpi_resource_fixed_io *io) |
589 | { | 577 | { |
590 | unsigned char flags = 0; | ||
591 | |||
592 | if (io->address_length == 0) | ||
593 | flags |= IORESOURCE_DISABLED; | ||
594 | |||
595 | pnp_register_port_resource(dev, option_flags, io->address, io->address, | 578 | pnp_register_port_resource(dev, option_flags, io->address, io->address, |
596 | 0, io->address_length, flags | IORESOURCE_IO_FIXED); | 579 | 0, io->address_length, IORESOURCE_IO_FIXED); |
597 | } | 580 | } |
598 | 581 | ||
599 | static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, | 582 | static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, |
@@ -602,11 +585,8 @@ static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev, | |||
602 | { | 585 | { |
603 | unsigned char flags = 0; | 586 | unsigned char flags = 0; |
604 | 587 | ||
605 | if (p->address_length == 0) | ||
606 | flags |= IORESOURCE_DISABLED; | ||
607 | |||
608 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) | 588 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) |
609 | flags |= IORESOURCE_MEM_WRITEABLE; | 589 | flags = IORESOURCE_MEM_WRITEABLE; |
610 | pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, | 590 | pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, |
611 | p->alignment, p->address_length, flags); | 591 | p->alignment, p->address_length, flags); |
612 | } | 592 | } |
@@ -617,11 +597,8 @@ static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev, | |||
617 | { | 597 | { |
618 | unsigned char flags = 0; | 598 | unsigned char flags = 0; |
619 | 599 | ||
620 | if (p->address_length == 0) | ||
621 | flags |= IORESOURCE_DISABLED; | ||
622 | |||
623 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) | 600 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) |
624 | flags |= IORESOURCE_MEM_WRITEABLE; | 601 | flags = IORESOURCE_MEM_WRITEABLE; |
625 | pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, | 602 | pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum, |
626 | p->alignment, p->address_length, flags); | 603 | p->alignment, p->address_length, flags); |
627 | } | 604 | } |
@@ -632,11 +609,8 @@ static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev, | |||
632 | { | 609 | { |
633 | unsigned char flags = 0; | 610 | unsigned char flags = 0; |
634 | 611 | ||
635 | if (p->address_length == 0) | ||
636 | flags |= IORESOURCE_DISABLED; | ||
637 | |||
638 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) | 612 | if (p->write_protect == ACPI_READ_WRITE_MEMORY) |
639 | flags |= IORESOURCE_MEM_WRITEABLE; | 613 | flags = IORESOURCE_MEM_WRITEABLE; |
640 | pnp_register_mem_resource(dev, option_flags, p->address, p->address, | 614 | pnp_register_mem_resource(dev, option_flags, p->address, p->address, |
641 | 0, p->address_length, flags); | 615 | 0, p->address_length, flags); |
642 | } | 616 | } |
@@ -656,19 +630,16 @@ static __init void pnpacpi_parse_address_option(struct pnp_dev *dev, | |||
656 | return; | 630 | return; |
657 | } | 631 | } |
658 | 632 | ||
659 | if (p->address_length == 0) | ||
660 | flags |= IORESOURCE_DISABLED; | ||
661 | |||
662 | if (p->resource_type == ACPI_MEMORY_RANGE) { | 633 | if (p->resource_type == ACPI_MEMORY_RANGE) { |
663 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) | 634 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) |
664 | flags |= IORESOURCE_MEM_WRITEABLE; | 635 | flags = IORESOURCE_MEM_WRITEABLE; |
665 | pnp_register_mem_resource(dev, option_flags, p->minimum, | 636 | pnp_register_mem_resource(dev, option_flags, p->minimum, |
666 | p->minimum, 0, p->address_length, | 637 | p->minimum, 0, p->address_length, |
667 | flags); | 638 | flags); |
668 | } else if (p->resource_type == ACPI_IO_RANGE) | 639 | } else if (p->resource_type == ACPI_IO_RANGE) |
669 | pnp_register_port_resource(dev, option_flags, p->minimum, | 640 | pnp_register_port_resource(dev, option_flags, p->minimum, |
670 | p->minimum, 0, p->address_length, | 641 | p->minimum, 0, p->address_length, |
671 | flags | IORESOURCE_IO_FIXED); | 642 | IORESOURCE_IO_FIXED); |
672 | } | 643 | } |
673 | 644 | ||
674 | static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, | 645 | static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, |
@@ -678,19 +649,16 @@ static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev, | |||
678 | struct acpi_resource_extended_address64 *p = &r->data.ext_address64; | 649 | struct acpi_resource_extended_address64 *p = &r->data.ext_address64; |
679 | unsigned char flags = 0; | 650 | unsigned char flags = 0; |
680 | 651 | ||
681 | if (p->address_length == 0) | ||
682 | flags |= IORESOURCE_DISABLED; | ||
683 | |||
684 | if (p->resource_type == ACPI_MEMORY_RANGE) { | 652 | if (p->resource_type == ACPI_MEMORY_RANGE) { |
685 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) | 653 | if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY) |
686 | flags |= IORESOURCE_MEM_WRITEABLE; | 654 | flags = IORESOURCE_MEM_WRITEABLE; |
687 | pnp_register_mem_resource(dev, option_flags, p->minimum, | 655 | pnp_register_mem_resource(dev, option_flags, p->minimum, |
688 | p->minimum, 0, p->address_length, | 656 | p->minimum, 0, p->address_length, |
689 | flags); | 657 | flags); |
690 | } else if (p->resource_type == ACPI_IO_RANGE) | 658 | } else if (p->resource_type == ACPI_IO_RANGE) |
691 | pnp_register_port_resource(dev, option_flags, p->minimum, | 659 | pnp_register_port_resource(dev, option_flags, p->minimum, |
692 | p->minimum, 0, p->address_length, | 660 | p->minimum, 0, p->address_length, |
693 | flags | IORESOURCE_IO_FIXED); | 661 | IORESOURCE_IO_FIXED); |
694 | } | 662 | } |
695 | 663 | ||
696 | struct acpipnp_parse_option_s { | 664 | struct acpipnp_parse_option_s { |
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c index d9fb729535a1..fb7300837fee 100644 --- a/drivers/ps3/ps3-vuart.c +++ b/drivers/ps3/ps3-vuart.c | |||
@@ -952,7 +952,7 @@ static int ps3_vuart_bus_interrupt_get(void) | |||
952 | } | 952 | } |
953 | 953 | ||
954 | result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler, | 954 | result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler, |
955 | IRQF_DISABLED, "vuart", &vuart_bus_priv); | 955 | 0, "vuart", &vuart_bus_priv); |
956 | 956 | ||
957 | if (result) { | 957 | if (result) { |
958 | pr_debug("%s:%d: request_irq failed (%d)\n", | 958 | pr_debug("%s:%d: request_irq failed (%d)\n", |
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c index cc328dec946b..8c3f5adf1bc6 100644 --- a/drivers/ps3/ps3stor_lib.c +++ b/drivers/ps3/ps3stor_lib.c | |||
@@ -167,7 +167,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler) | |||
167 | goto fail_close_device; | 167 | goto fail_close_device; |
168 | } | 168 | } |
169 | 169 | ||
170 | error = request_irq(dev->irq, handler, IRQF_DISABLED, | 170 | error = request_irq(dev->irq, handler, 0, |
171 | dev->sbd.core.driver->name, dev); | 171 | dev->sbd.core.driver->name, dev); |
172 | if (error) { | 172 | if (error) { |
173 | dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n", | 173 | dev_err(&dev->sbd.core, "%s:%u: request_irq failed %d\n", |
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index d33544802a2e..bb21f443fb70 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c | |||
@@ -76,12 +76,15 @@ static inline unsigned char vrtc_is_updating(void) | |||
76 | /* | 76 | /* |
77 | * rtc_time's year contains the increment over 1900, but vRTC's YEAR | 77 | * rtc_time's year contains the increment over 1900, but vRTC's YEAR |
78 | * register can't be programmed to value larger than 0x64, so vRTC | 78 | * register can't be programmed to value larger than 0x64, so vRTC |
79 | * driver chose to use 1960 (1970 is UNIX time start point) as the base, | 79 | * driver chose to use 1972 (1970 is UNIX time start point) as the base, |
80 | * and does the translation at read/write time. | 80 | * and does the translation at read/write time. |
81 | * | 81 | * |
82 | * Why not just use 1970 as the offset? it's because using 1960 will | 82 | * Why not just use 1970 as the offset? it's because using 1972 will |
83 | * make it consistent in leap year setting for both vrtc and low-level | 83 | * make it consistent in leap year setting for both vrtc and low-level |
84 | * physical rtc devices. | 84 | * physical rtc devices. Then why not use 1960 as the offset? If we use |
85 | * 1960, for a device's first use, its YEAR register is 0 and the system | ||
86 | * year will be parsed as 1960 which is not a valid UNIX time and will | ||
87 | * cause many applications to fail mysteriously. | ||
85 | */ | 88 | */ |
86 | static int mrst_read_time(struct device *dev, struct rtc_time *time) | 89 | static int mrst_read_time(struct device *dev, struct rtc_time *time) |
87 | { | 90 | { |
@@ -99,10 +102,10 @@ static int mrst_read_time(struct device *dev, struct rtc_time *time) | |||
99 | time->tm_year = vrtc_cmos_read(RTC_YEAR); | 102 | time->tm_year = vrtc_cmos_read(RTC_YEAR); |
100 | spin_unlock_irqrestore(&rtc_lock, flags); | 103 | spin_unlock_irqrestore(&rtc_lock, flags); |
101 | 104 | ||
102 | /* Adjust for the 1960/1900 */ | 105 | /* Adjust for the 1972/1900 */ |
103 | time->tm_year += 60; | 106 | time->tm_year += 72; |
104 | time->tm_mon--; | 107 | time->tm_mon--; |
105 | return RTC_24H; | 108 | return rtc_valid_tm(time); |
106 | } | 109 | } |
107 | 110 | ||
108 | static int mrst_set_time(struct device *dev, struct rtc_time *time) | 111 | static int mrst_set_time(struct device *dev, struct rtc_time *time) |
@@ -119,9 +122,9 @@ static int mrst_set_time(struct device *dev, struct rtc_time *time) | |||
119 | min = time->tm_min; | 122 | min = time->tm_min; |
120 | sec = time->tm_sec; | 123 | sec = time->tm_sec; |
121 | 124 | ||
122 | if (yrs < 70 || yrs > 138) | 125 | if (yrs < 72 || yrs > 138) |
123 | return -EINVAL; | 126 | return -EINVAL; |
124 | yrs -= 60; | 127 | yrs -= 72; |
125 | 128 | ||
126 | spin_lock_irqsave(&rtc_lock, flags); | 129 | spin_lock_irqsave(&rtc_lock, flags); |
127 | 130 | ||
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile index 24e6cec0ae8d..67e272ab1623 100644 --- a/drivers/sh/Makefile +++ b/drivers/sh/Makefile | |||
@@ -7,3 +7,11 @@ obj-$(CONFIG_HAVE_CLK) += clk/ | |||
7 | obj-$(CONFIG_MAPLE) += maple/ | 7 | obj-$(CONFIG_MAPLE) += maple/ |
8 | obj-$(CONFIG_SUPERHYWAY) += superhyway/ | 8 | obj-$(CONFIG_SUPERHYWAY) += superhyway/ |
9 | obj-$(CONFIG_GENERIC_GPIO) += pfc.o | 9 | obj-$(CONFIG_GENERIC_GPIO) += pfc.o |
10 | |||
11 | # | ||
12 | # For the moment we only use this framework for ARM-based SH/R-Mobile | ||
13 | # platforms and generic SH. SH-based SH-Mobile platforms are still using | ||
14 | # an older framework that is pending up-porting, at which point this | ||
15 | # special casing can go away. | ||
16 | # | ||
17 | obj-$(CONFIG_SUPERH)$(CONFIG_ARCH_SHMOBILE) += pm_runtime.o | ||
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index dc8d022c07a1..db257a35e71a 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/seq_file.h> | 25 | #include <linux/seq_file.h> |
26 | #include <linux/err.h> | 26 | #include <linux/err.h> |
27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
28 | #include <linux/debugfs.h> | ||
29 | #include <linux/cpufreq.h> | 28 | #include <linux/cpufreq.h> |
30 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
31 | #include <linux/sh_clk.h> | 30 | #include <linux/sh_clk.h> |
@@ -173,6 +172,26 @@ long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, | |||
173 | return clk_rate_round_helper(&div_range_round); | 172 | return clk_rate_round_helper(&div_range_round); |
174 | } | 173 | } |
175 | 174 | ||
175 | static long clk_rate_mult_range_iter(unsigned int pos, | ||
176 | struct clk_rate_round_data *rounder) | ||
177 | { | ||
178 | return clk_get_rate(rounder->arg) * pos; | ||
179 | } | ||
180 | |||
181 | long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min, | ||
182 | unsigned int mult_max, unsigned long rate) | ||
183 | { | ||
184 | struct clk_rate_round_data mult_range_round = { | ||
185 | .min = mult_min, | ||
186 | .max = mult_max, | ||
187 | .func = clk_rate_mult_range_iter, | ||
188 | .arg = clk_get_parent(clk), | ||
189 | .rate = rate, | ||
190 | }; | ||
191 | |||
192 | return clk_rate_round_helper(&mult_range_round); | ||
193 | } | ||
194 | |||
176 | int clk_rate_table_find(struct clk *clk, | 195 | int clk_rate_table_find(struct clk *clk, |
177 | struct cpufreq_frequency_table *freq_table, | 196 | struct cpufreq_frequency_table *freq_table, |
178 | unsigned long rate) | 197 | unsigned long rate) |
@@ -205,9 +224,6 @@ int clk_reparent(struct clk *child, struct clk *parent) | |||
205 | list_add(&child->sibling, &parent->children); | 224 | list_add(&child->sibling, &parent->children); |
206 | child->parent = parent; | 225 | child->parent = parent; |
207 | 226 | ||
208 | /* now do the debugfs renaming to reattach the child | ||
209 | to the proper parent */ | ||
210 | |||
211 | return 0; | 227 | return 0; |
212 | } | 228 | } |
213 | 229 | ||
@@ -665,89 +681,6 @@ static int __init clk_syscore_init(void) | |||
665 | subsys_initcall(clk_syscore_init); | 681 | subsys_initcall(clk_syscore_init); |
666 | #endif | 682 | #endif |
667 | 683 | ||
668 | /* | ||
669 | * debugfs support to trace clock tree hierarchy and attributes | ||
670 | */ | ||
671 | static struct dentry *clk_debugfs_root; | ||
672 | |||
673 | static int clk_debugfs_register_one(struct clk *c) | ||
674 | { | ||
675 | int err; | ||
676 | struct dentry *d; | ||
677 | struct clk *pa = c->parent; | ||
678 | char s[255]; | ||
679 | char *p = s; | ||
680 | |||
681 | p += sprintf(p, "%p", c); | ||
682 | d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root); | ||
683 | if (!d) | ||
684 | return -ENOMEM; | ||
685 | c->dentry = d; | ||
686 | |||
687 | d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount); | ||
688 | if (!d) { | ||
689 | err = -ENOMEM; | ||
690 | goto err_out; | ||
691 | } | ||
692 | d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate); | ||
693 | if (!d) { | ||
694 | err = -ENOMEM; | ||
695 | goto err_out; | ||
696 | } | ||
697 | d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags); | ||
698 | if (!d) { | ||
699 | err = -ENOMEM; | ||
700 | goto err_out; | ||
701 | } | ||
702 | return 0; | ||
703 | |||
704 | err_out: | ||
705 | debugfs_remove_recursive(c->dentry); | ||
706 | return err; | ||
707 | } | ||
708 | |||
709 | static int clk_debugfs_register(struct clk *c) | ||
710 | { | ||
711 | int err; | ||
712 | struct clk *pa = c->parent; | ||
713 | |||
714 | if (pa && !pa->dentry) { | ||
715 | err = clk_debugfs_register(pa); | ||
716 | if (err) | ||
717 | return err; | ||
718 | } | ||
719 | |||
720 | if (!c->dentry) { | ||
721 | err = clk_debugfs_register_one(c); | ||
722 | if (err) | ||
723 | return err; | ||
724 | } | ||
725 | return 0; | ||
726 | } | ||
727 | |||
728 | static int __init clk_debugfs_init(void) | ||
729 | { | ||
730 | struct clk *c; | ||
731 | struct dentry *d; | ||
732 | int err; | ||
733 | |||
734 | d = debugfs_create_dir("clock", NULL); | ||
735 | if (!d) | ||
736 | return -ENOMEM; | ||
737 | clk_debugfs_root = d; | ||
738 | |||
739 | list_for_each_entry(c, &clock_list, node) { | ||
740 | err = clk_debugfs_register(c); | ||
741 | if (err) | ||
742 | goto err_out; | ||
743 | } | ||
744 | return 0; | ||
745 | err_out: | ||
746 | debugfs_remove_recursive(clk_debugfs_root); | ||
747 | return err; | ||
748 | } | ||
749 | late_initcall(clk_debugfs_init); | ||
750 | |||
751 | static int __init clk_late_init(void) | 684 | static int __init clk_late_init(void) |
752 | { | 685 | { |
753 | unsigned long flags; | 686 | unsigned long flags; |
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c new file mode 100644 index 000000000000..afe9282629b9 --- /dev/null +++ b/drivers/sh/pm_runtime.c | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Runtime PM support code | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Magnus Damm | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/pm_runtime.h> | ||
15 | #include <linux/pm_domain.h> | ||
16 | #include <linux/pm_clock.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/clk.h> | ||
19 | #include <linux/sh_clk.h> | ||
20 | #include <linux/bitmap.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #ifdef CONFIG_PM_RUNTIME | ||
24 | |||
25 | static int default_platform_runtime_idle(struct device *dev) | ||
26 | { | ||
27 | /* suspend synchronously to disable clocks immediately */ | ||
28 | return pm_runtime_suspend(dev); | ||
29 | } | ||
30 | |||
31 | static struct dev_pm_domain default_pm_domain = { | ||
32 | .ops = { | ||
33 | .runtime_suspend = pm_clk_suspend, | ||
34 | .runtime_resume = pm_clk_resume, | ||
35 | .runtime_idle = default_platform_runtime_idle, | ||
36 | USE_PLATFORM_PM_SLEEP_OPS | ||
37 | }, | ||
38 | }; | ||
39 | |||
40 | #define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain) | ||
41 | |||
42 | #else | ||
43 | |||
44 | #define DEFAULT_PM_DOMAIN_PTR NULL | ||
45 | |||
46 | #endif /* CONFIG_PM_RUNTIME */ | ||
47 | |||
48 | static struct pm_clk_notifier_block platform_bus_notifier = { | ||
49 | .pm_domain = DEFAULT_PM_DOMAIN_PTR, | ||
50 | .con_ids = { NULL, }, | ||
51 | }; | ||
52 | |||
53 | static int __init sh_pm_runtime_init(void) | ||
54 | { | ||
55 | pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); | ||
56 | return 0; | ||
57 | } | ||
58 | core_initcall(sh_pm_runtime_init); | ||
59 | |||
60 | static int __init sh_pm_runtime_late_init(void) | ||
61 | { | ||
62 | pm_genpd_poweroff_unused(); | ||
63 | return 0; | ||
64 | } | ||
65 | late_initcall(sh_pm_runtime_late_init); | ||
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 79665e2e6ec5..16d6a839c7fa 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -907,7 +907,7 @@ static void atmel_spi_cleanup(struct spi_device *spi) | |||
907 | 907 | ||
908 | /*-------------------------------------------------------------------------*/ | 908 | /*-------------------------------------------------------------------------*/ |
909 | 909 | ||
910 | static int __init atmel_spi_probe(struct platform_device *pdev) | 910 | static int __devinit atmel_spi_probe(struct platform_device *pdev) |
911 | { | 911 | { |
912 | struct resource *regs; | 912 | struct resource *regs; |
913 | int irq; | 913 | int irq; |
@@ -1003,7 +1003,7 @@ out_free: | |||
1003 | return ret; | 1003 | return ret; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | static int __exit atmel_spi_remove(struct platform_device *pdev) | 1006 | static int __devexit atmel_spi_remove(struct platform_device *pdev) |
1007 | { | 1007 | { |
1008 | struct spi_master *master = platform_get_drvdata(pdev); | 1008 | struct spi_master *master = platform_get_drvdata(pdev); |
1009 | struct atmel_spi *as = spi_master_get_devdata(master); | 1009 | struct atmel_spi *as = spi_master_get_devdata(master); |
@@ -1072,6 +1072,7 @@ static struct platform_driver atmel_spi_driver = { | |||
1072 | }, | 1072 | }, |
1073 | .suspend = atmel_spi_suspend, | 1073 | .suspend = atmel_spi_suspend, |
1074 | .resume = atmel_spi_resume, | 1074 | .resume = atmel_spi_resume, |
1075 | .probe = atmel_spi_probe, | ||
1075 | .remove = __exit_p(atmel_spi_remove), | 1076 | .remove = __exit_p(atmel_spi_remove), |
1076 | }; | 1077 | }; |
1077 | module_platform_driver(atmel_spi_driver); | 1078 | module_platform_driver(atmel_spi_driver); |
diff --git a/drivers/staging/spectra/lld_mtd.c b/drivers/staging/spectra/lld_mtd.c index 2bd34662beb5..a9c309a167c2 100644 --- a/drivers/staging/spectra/lld_mtd.c +++ b/drivers/staging/spectra/lld_mtd.c | |||
@@ -340,7 +340,7 @@ u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block, | |||
340 | struct mtd_oob_ops ops; | 340 | struct mtd_oob_ops ops; |
341 | int ret; | 341 | int ret; |
342 | 342 | ||
343 | ops.mode = MTD_OOB_AUTO; | 343 | ops.mode = MTD_OPS_AUTO_OOB; |
344 | ops.datbuf = read_data; | 344 | ops.datbuf = read_data; |
345 | ops.len = DeviceInfo.wPageDataSize; | 345 | ops.len = DeviceInfo.wPageDataSize; |
346 | ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET; | 346 | ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET; |
@@ -400,7 +400,7 @@ u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block, | |||
400 | struct mtd_oob_ops ops; | 400 | struct mtd_oob_ops ops; |
401 | int ret; | 401 | int ret; |
402 | 402 | ||
403 | ops.mode = MTD_OOB_AUTO; | 403 | ops.mode = MTD_OPS_AUTO_OOB; |
404 | ops.datbuf = write_data; | 404 | ops.datbuf = write_data; |
405 | ops.len = DeviceInfo.wPageDataSize; | 405 | ops.len = DeviceInfo.wPageDataSize; |
406 | ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET; | 406 | ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET; |
@@ -473,7 +473,7 @@ u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block, | |||
473 | struct mtd_oob_ops ops; | 473 | struct mtd_oob_ops ops; |
474 | int ret; | 474 | int ret; |
475 | 475 | ||
476 | ops.mode = MTD_OOB_AUTO; | 476 | ops.mode = MTD_OPS_AUTO_OOB; |
477 | ops.datbuf = NULL; | 477 | ops.datbuf = NULL; |
478 | ops.len = 0; | 478 | ops.len = 0; |
479 | ops.oobbuf = read_data; | 479 | ops.oobbuf = read_data; |
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 708f8e92771a..dd9a5743fa99 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
@@ -678,10 +678,10 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, | |||
678 | return; | 678 | return; |
679 | 679 | ||
680 | if (delay > 1000) | 680 | if (delay > 1000) |
681 | schedule_delayed_work(&(tz->poll_queue), | 681 | queue_delayed_work(system_freezable_wq, &(tz->poll_queue), |
682 | round_jiffies(msecs_to_jiffies(delay))); | 682 | round_jiffies(msecs_to_jiffies(delay))); |
683 | else | 683 | else |
684 | schedule_delayed_work(&(tz->poll_queue), | 684 | queue_delayed_work(system_freezable_wq, &(tz->poll_queue), |
685 | msecs_to_jiffies(delay)); | 685 | msecs_to_jiffies(delay)); |
686 | } | 686 | } |
687 | 687 | ||
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 4cb0d0a3e57b..fc7bbba585ce 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c | |||
@@ -66,14 +66,16 @@ | |||
66 | static int debug; | 66 | static int debug; |
67 | module_param(debug, int, 0600); | 67 | module_param(debug, int, 0600); |
68 | 68 | ||
69 | #define T1 (HZ/10) | 69 | /* Defaults: these are from the specification */ |
70 | #define T2 (HZ/3) | 70 | |
71 | #define N2 3 | 71 | #define T1 10 /* 100mS */ |
72 | #define T2 34 /* 333mS */ | ||
73 | #define N2 3 /* Retry 3 times */ | ||
72 | 74 | ||
73 | /* Use long timers for testing at low speed with debug on */ | 75 | /* Use long timers for testing at low speed with debug on */ |
74 | #ifdef DEBUG_TIMING | 76 | #ifdef DEBUG_TIMING |
75 | #define T1 HZ | 77 | #define T1 100 |
76 | #define T2 (2 * HZ) | 78 | #define T2 200 |
77 | #endif | 79 | #endif |
78 | 80 | ||
79 | /* | 81 | /* |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 1945c70539c2..aff9d612dff0 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -207,6 +207,25 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { | |||
207 | }, | 207 | }, |
208 | 208 | ||
209 | /* | 209 | /* |
210 | * Common SH-2(A) SCIF definitions for ports with FIFO data | ||
211 | * count registers. | ||
212 | */ | ||
213 | [SCIx_SH2_SCIF_FIFODATA_REGTYPE] = { | ||
214 | [SCSMR] = { 0x00, 16 }, | ||
215 | [SCBRR] = { 0x04, 8 }, | ||
216 | [SCSCR] = { 0x08, 16 }, | ||
217 | [SCxTDR] = { 0x0c, 8 }, | ||
218 | [SCxSR] = { 0x10, 16 }, | ||
219 | [SCxRDR] = { 0x14, 8 }, | ||
220 | [SCFCR] = { 0x18, 16 }, | ||
221 | [SCFDR] = { 0x1c, 16 }, | ||
222 | [SCTFDR] = sci_reg_invalid, | ||
223 | [SCRFDR] = sci_reg_invalid, | ||
224 | [SCSPTR] = { 0x20, 16 }, | ||
225 | [SCLSR] = { 0x24, 16 }, | ||
226 | }, | ||
227 | |||
228 | /* | ||
210 | * Common SH-3 SCIF definitions. | 229 | * Common SH-3 SCIF definitions. |
211 | */ | 230 | */ |
212 | [SCIx_SH3_SCIF_REGTYPE] = { | 231 | [SCIx_SH3_SCIF_REGTYPE] = { |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 79a31e5b4b68..3d1bf41e8892 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -594,11 +594,11 @@ static struct virtio_config_ops virtio_pci_config_ops = { | |||
594 | 594 | ||
595 | static void virtio_pci_release_dev(struct device *_d) | 595 | static void virtio_pci_release_dev(struct device *_d) |
596 | { | 596 | { |
597 | struct virtio_device *dev = container_of(_d, struct virtio_device, | 597 | /* |
598 | dev); | 598 | * No need for a release method as we allocate/free |
599 | struct virtio_pci_device *vp_dev = to_vp_device(dev); | 599 | * all devices together with the pci devices. |
600 | 600 | * Provide an empty one to avoid getting a warning from core. | |
601 | kfree(vp_dev); | 601 | */ |
602 | } | 602 | } |
603 | 603 | ||
604 | /* the PCI probing function */ | 604 | /* the PCI probing function */ |
@@ -686,6 +686,7 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev) | |||
686 | pci_iounmap(pci_dev, vp_dev->ioaddr); | 686 | pci_iounmap(pci_dev, vp_dev->ioaddr); |
687 | pci_release_regions(pci_dev); | 687 | pci_release_regions(pci_dev); |
688 | pci_disable_device(pci_dev); | 688 | pci_disable_device(pci_dev); |
689 | kfree(vp_dev); | ||
689 | } | 690 | } |
690 | 691 | ||
691 | #ifdef CONFIG_PM | 692 | #ifdef CONFIG_PM |