aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ec.c22
-rw-r--r--drivers/acpi/internal.h5
-rw-r--r--drivers/acpi/processor_idle.c17
-rw-r--r--drivers/acpi/sleep.c57
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/misc/vmware_balloon.c18
-rw-r--r--drivers/mmc/host/omap.c1
-rw-r--r--drivers/rtc/rtc-s3c.c9
-rw-r--r--drivers/sfi/sfi_core.c4
-rw-r--r--drivers/usb/gadget/f_audio.c4
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/fb_defio.c52
12 files changed, 103 insertions, 93 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e61d4f8e62a5..5f2027d782e8 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -79,7 +79,7 @@ enum {
79 EC_FLAGS_GPE_STORM, /* GPE storm detected */ 79 EC_FLAGS_GPE_STORM, /* GPE storm detected */
80 EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and 80 EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
81 * OpReg are installed */ 81 * OpReg are installed */
82 EC_FLAGS_FROZEN, /* Transactions are suspended */ 82 EC_FLAGS_BLOCKED, /* Transactions are blocked */
83}; 83};
84 84
85/* If we find an EC via the ECDT, we need to keep a ptr to its context */ 85/* If we find an EC via the ECDT, we need to keep a ptr to its context */
@@ -293,7 +293,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
293 if (t->rdata) 293 if (t->rdata)
294 memset(t->rdata, 0, t->rlen); 294 memset(t->rdata, 0, t->rlen);
295 mutex_lock(&ec->lock); 295 mutex_lock(&ec->lock);
296 if (test_bit(EC_FLAGS_FROZEN, &ec->flags)) { 296 if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) {
297 status = -EINVAL; 297 status = -EINVAL;
298 goto unlock; 298 goto unlock;
299 } 299 }
@@ -459,7 +459,7 @@ int ec_transaction(u8 command,
459 459
460EXPORT_SYMBOL(ec_transaction); 460EXPORT_SYMBOL(ec_transaction);
461 461
462void acpi_ec_suspend_transactions(void) 462void acpi_ec_block_transactions(void)
463{ 463{
464 struct acpi_ec *ec = first_ec; 464 struct acpi_ec *ec = first_ec;
465 465
@@ -468,11 +468,11 @@ void acpi_ec_suspend_transactions(void)
468 468
469 mutex_lock(&ec->lock); 469 mutex_lock(&ec->lock);
470 /* Prevent transactions from being carried out */ 470 /* Prevent transactions from being carried out */
471 set_bit(EC_FLAGS_FROZEN, &ec->flags); 471 set_bit(EC_FLAGS_BLOCKED, &ec->flags);
472 mutex_unlock(&ec->lock); 472 mutex_unlock(&ec->lock);
473} 473}
474 474
475void acpi_ec_resume_transactions(void) 475void acpi_ec_unblock_transactions(void)
476{ 476{
477 struct acpi_ec *ec = first_ec; 477 struct acpi_ec *ec = first_ec;
478 478
@@ -481,10 +481,20 @@ void acpi_ec_resume_transactions(void)
481 481
482 mutex_lock(&ec->lock); 482 mutex_lock(&ec->lock);
483 /* Allow transactions to be carried out again */ 483 /* Allow transactions to be carried out again */
484 clear_bit(EC_FLAGS_FROZEN, &ec->flags); 484 clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
485 mutex_unlock(&ec->lock); 485 mutex_unlock(&ec->lock);
486} 486}
487 487
488void acpi_ec_unblock_transactions_early(void)
489{
490 /*
491 * Allow transactions to happen again (this function is called from
492 * atomic context during wakeup, so we don't need to acquire the mutex).
493 */
494 if (first_ec)
495 clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags);
496}
497
488static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) 498static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
489{ 499{
490 int result; 500 int result;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index e28411367239..f8f190ec066e 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -49,8 +49,9 @@ void acpi_early_processor_set_pdc(void);
49int acpi_ec_init(void); 49int acpi_ec_init(void);
50int acpi_ec_ecdt_probe(void); 50int acpi_ec_ecdt_probe(void);
51int acpi_boot_ec_enable(void); 51int acpi_boot_ec_enable(void);
52void acpi_ec_suspend_transactions(void); 52void acpi_ec_block_transactions(void);
53void acpi_ec_resume_transactions(void); 53void acpi_ec_unblock_transactions(void);
54void acpi_ec_unblock_transactions_early(void);
54 55
55/*-------------------------------------------------------------------------- 56/*--------------------------------------------------------------------------
56 Suspend/Resume 57 Suspend/Resume
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2e8c27d48f2b..b1b385692f46 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -80,7 +80,7 @@ module_param(nocst, uint, 0000);
80static unsigned int latency_factor __read_mostly = 2; 80static unsigned int latency_factor __read_mostly = 2;
81module_param(latency_factor, uint, 0644); 81module_param(latency_factor, uint, 0644);
82 82
83static s64 us_to_pm_timer_ticks(s64 t) 83static u64 us_to_pm_timer_ticks(s64 t)
84{ 84{
85 return div64_u64(t * PM_TIMER_FREQUENCY, 1000000); 85 return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
86} 86}
@@ -731,10 +731,10 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
731 731
732 seq_puts(seq, "demotion[--] "); 732 seq_puts(seq, "demotion[--] ");
733 733
734 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 734 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020Lu]\n",
735 pr->power.states[i].latency, 735 pr->power.states[i].latency,
736 pr->power.states[i].usage, 736 pr->power.states[i].usage,
737 (unsigned long long)pr->power.states[i].time); 737 us_to_pm_timer_ticks(pr->power.states[i].time));
738 } 738 }
739 739
740 end: 740 end:
@@ -861,7 +861,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
861 ktime_t kt1, kt2; 861 ktime_t kt1, kt2;
862 s64 idle_time_ns; 862 s64 idle_time_ns;
863 s64 idle_time; 863 s64 idle_time;
864 s64 sleep_ticks = 0;
865 864
866 pr = __get_cpu_var(processors); 865 pr = __get_cpu_var(processors);
867 866
@@ -906,8 +905,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
906 idle_time = idle_time_ns; 905 idle_time = idle_time_ns;
907 do_div(idle_time, NSEC_PER_USEC); 906 do_div(idle_time, NSEC_PER_USEC);
908 907
909 sleep_ticks = us_to_pm_timer_ticks(idle_time);
910
911 /* Tell the scheduler how much we idled: */ 908 /* Tell the scheduler how much we idled: */
912 sched_clock_idle_wakeup_event(idle_time_ns); 909 sched_clock_idle_wakeup_event(idle_time_ns);
913 910
@@ -918,7 +915,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
918 cx->usage++; 915 cx->usage++;
919 916
920 lapic_timer_state_broadcast(pr, cx, 0); 917 lapic_timer_state_broadcast(pr, cx, 0);
921 cx->time += sleep_ticks; 918 cx->time += idle_time;
922 return idle_time; 919 return idle_time;
923} 920}
924 921
@@ -940,7 +937,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
940 ktime_t kt1, kt2; 937 ktime_t kt1, kt2;
941 s64 idle_time_ns; 938 s64 idle_time_ns;
942 s64 idle_time; 939 s64 idle_time;
943 s64 sleep_ticks = 0;
944 940
945 941
946 pr = __get_cpu_var(processors); 942 pr = __get_cpu_var(processors);
@@ -1022,11 +1018,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1022 spin_unlock(&c3_lock); 1018 spin_unlock(&c3_lock);
1023 } 1019 }
1024 kt2 = ktime_get_real(); 1020 kt2 = ktime_get_real();
1025 idle_time_ns = ktime_to_us(ktime_sub(kt2, kt1)); 1021 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
1026 idle_time = idle_time_ns; 1022 idle_time = idle_time_ns;
1027 do_div(idle_time, NSEC_PER_USEC); 1023 do_div(idle_time, NSEC_PER_USEC);
1028 1024
1029 sleep_ticks = us_to_pm_timer_ticks(idle_time);
1030 /* Tell the scheduler how much we idled: */ 1025 /* Tell the scheduler how much we idled: */
1031 sched_clock_idle_wakeup_event(idle_time_ns); 1026 sched_clock_idle_wakeup_event(idle_time_ns);
1032 1027
@@ -1037,7 +1032,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1037 cx->usage++; 1032 cx->usage++;
1038 1033
1039 lapic_timer_state_broadcast(pr, cx, 0); 1034 lapic_timer_state_broadcast(pr, cx, 0);
1040 cx->time += sleep_ticks; 1035 cx->time += idle_time;
1041 return idle_time; 1036 return idle_time;
1042} 1037}
1043 1038
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 4ab2275b4461..3fb4bdea7e06 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -94,11 +94,13 @@ void __init acpi_old_suspend_ordering(void)
94} 94}
95 95
96/** 96/**
97 * acpi_pm_disable_gpes - Disable the GPEs. 97 * acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
98 */ 98 */
99static int acpi_pm_disable_gpes(void) 99static int acpi_pm_freeze(void)
100{ 100{
101 acpi_disable_all_gpes(); 101 acpi_disable_all_gpes();
102 acpi_os_wait_events_complete(NULL);
103 acpi_ec_block_transactions();
102 return 0; 104 return 0;
103} 105}
104 106
@@ -126,7 +128,8 @@ static int acpi_pm_prepare(void)
126 int error = __acpi_pm_prepare(); 128 int error = __acpi_pm_prepare();
127 129
128 if (!error) 130 if (!error)
129 acpi_disable_all_gpes(); 131 acpi_pm_freeze();
132
130 return error; 133 return error;
131} 134}
132 135
@@ -256,6 +259,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
256 * acpi_leave_sleep_state will reenable specific GPEs later 259 * acpi_leave_sleep_state will reenable specific GPEs later
257 */ 260 */
258 acpi_disable_all_gpes(); 261 acpi_disable_all_gpes();
262 /* Allow EC transactions to happen. */
263 acpi_ec_unblock_transactions_early();
259 264
260 local_irq_restore(flags); 265 local_irq_restore(flags);
261 printk(KERN_DEBUG "Back to C!\n"); 266 printk(KERN_DEBUG "Back to C!\n");
@@ -267,6 +272,12 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
267 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 272 return ACPI_SUCCESS(status) ? 0 : -EFAULT;
268} 273}
269 274
275static void acpi_suspend_finish(void)
276{
277 acpi_ec_unblock_transactions();
278 acpi_pm_finish();
279}
280
270static int acpi_suspend_state_valid(suspend_state_t pm_state) 281static int acpi_suspend_state_valid(suspend_state_t pm_state)
271{ 282{
272 u32 acpi_state; 283 u32 acpi_state;
@@ -288,7 +299,7 @@ static struct platform_suspend_ops acpi_suspend_ops = {
288 .begin = acpi_suspend_begin, 299 .begin = acpi_suspend_begin,
289 .prepare_late = acpi_pm_prepare, 300 .prepare_late = acpi_pm_prepare,
290 .enter = acpi_suspend_enter, 301 .enter = acpi_suspend_enter,
291 .wake = acpi_pm_finish, 302 .wake = acpi_suspend_finish,
292 .end = acpi_pm_end, 303 .end = acpi_pm_end,
293}; 304};
294 305
@@ -314,9 +325,9 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
314static struct platform_suspend_ops acpi_suspend_ops_old = { 325static struct platform_suspend_ops acpi_suspend_ops_old = {
315 .valid = acpi_suspend_state_valid, 326 .valid = acpi_suspend_state_valid,
316 .begin = acpi_suspend_begin_old, 327 .begin = acpi_suspend_begin_old,
317 .prepare_late = acpi_pm_disable_gpes, 328 .prepare_late = acpi_pm_freeze,
318 .enter = acpi_suspend_enter, 329 .enter = acpi_suspend_enter,
319 .wake = acpi_pm_finish, 330 .wake = acpi_suspend_finish,
320 .end = acpi_pm_end, 331 .end = acpi_pm_end,
321 .recover = acpi_pm_finish, 332 .recover = acpi_pm_finish,
322}; 333};
@@ -433,6 +444,7 @@ static int acpi_hibernation_enter(void)
433static void acpi_hibernation_finish(void) 444static void acpi_hibernation_finish(void)
434{ 445{
435 hibernate_nvs_free(); 446 hibernate_nvs_free();
447 acpi_ec_unblock_transactions();
436 acpi_pm_finish(); 448 acpi_pm_finish();
437} 449}
438 450
@@ -453,19 +465,13 @@ static void acpi_hibernation_leave(void)
453 } 465 }
454 /* Restore the NVS memory area */ 466 /* Restore the NVS memory area */
455 hibernate_nvs_restore(); 467 hibernate_nvs_restore();
468 /* Allow EC transactions to happen. */
469 acpi_ec_unblock_transactions_early();
456} 470}
457 471
458static int acpi_pm_pre_restore(void) 472static void acpi_pm_thaw(void)
459{
460 acpi_disable_all_gpes();
461 acpi_os_wait_events_complete(NULL);
462 acpi_ec_suspend_transactions();
463 return 0;
464}
465
466static void acpi_pm_restore_cleanup(void)
467{ 473{
468 acpi_ec_resume_transactions(); 474 acpi_ec_unblock_transactions();
469 acpi_enable_all_runtime_gpes(); 475 acpi_enable_all_runtime_gpes();
470} 476}
471 477
@@ -477,8 +483,8 @@ static struct platform_hibernation_ops acpi_hibernation_ops = {
477 .prepare = acpi_pm_prepare, 483 .prepare = acpi_pm_prepare,
478 .enter = acpi_hibernation_enter, 484 .enter = acpi_hibernation_enter,
479 .leave = acpi_hibernation_leave, 485 .leave = acpi_hibernation_leave,
480 .pre_restore = acpi_pm_pre_restore, 486 .pre_restore = acpi_pm_freeze,
481 .restore_cleanup = acpi_pm_restore_cleanup, 487 .restore_cleanup = acpi_pm_thaw,
482}; 488};
483 489
484/** 490/**
@@ -510,12 +516,9 @@ static int acpi_hibernation_begin_old(void)
510 516
511static int acpi_hibernation_pre_snapshot_old(void) 517static int acpi_hibernation_pre_snapshot_old(void)
512{ 518{
513 int error = acpi_pm_disable_gpes(); 519 acpi_pm_freeze();
514 520 hibernate_nvs_save();
515 if (!error) 521 return 0;
516 hibernate_nvs_save();
517
518 return error;
519} 522}
520 523
521/* 524/*
@@ -527,11 +530,11 @@ static struct platform_hibernation_ops acpi_hibernation_ops_old = {
527 .end = acpi_pm_end, 530 .end = acpi_pm_end,
528 .pre_snapshot = acpi_hibernation_pre_snapshot_old, 531 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
529 .finish = acpi_hibernation_finish, 532 .finish = acpi_hibernation_finish,
530 .prepare = acpi_pm_disable_gpes, 533 .prepare = acpi_pm_freeze,
531 .enter = acpi_hibernation_enter, 534 .enter = acpi_hibernation_enter,
532 .leave = acpi_hibernation_leave, 535 .leave = acpi_hibernation_leave,
533 .pre_restore = acpi_pm_pre_restore, 536 .pre_restore = acpi_pm_freeze,
534 .restore_cleanup = acpi_pm_restore_cleanup, 537 .restore_cleanup = acpi_pm_thaw,
535 .recover = acpi_pm_finish, 538 .recover = acpi_pm_finish,
536}; 539};
537#endif /* CONFIG_HIBERNATION */ 540#endif /* CONFIG_HIBERNATION */
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index f09fc0e2062d..7cfcc629a7fd 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1123,6 +1123,7 @@ source "drivers/s390/char/Kconfig"
1123 1123
1124config RAMOOPS 1124config RAMOOPS
1125 tristate "Log panic/oops to a RAM buffer" 1125 tristate "Log panic/oops to a RAM buffer"
1126 depends on HAS_IOMEM
1126 default n 1127 default n
1127 help 1128 help
1128 This enables panic and oops messages to be logged to a circular 1129 This enables panic and oops messages to be logged to a circular
diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c
index db9cd0240c6f..2a1e804a71aa 100644
--- a/drivers/misc/vmware_balloon.c
+++ b/drivers/misc/vmware_balloon.c
@@ -45,7 +45,7 @@
45 45
46MODULE_AUTHOR("VMware, Inc."); 46MODULE_AUTHOR("VMware, Inc.");
47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); 47MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
48MODULE_VERSION("1.2.1.0-K"); 48MODULE_VERSION("1.2.1.1-k");
49MODULE_ALIAS("dmi:*:svnVMware*:*"); 49MODULE_ALIAS("dmi:*:svnVMware*:*");
50MODULE_ALIAS("vmware_vmmemctl"); 50MODULE_ALIAS("vmware_vmmemctl");
51MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
@@ -101,6 +101,8 @@ MODULE_LICENSE("GPL");
101/* Maximum number of page allocations without yielding processor */ 101/* Maximum number of page allocations without yielding processor */
102#define VMW_BALLOON_YIELD_THRESHOLD 1024 102#define VMW_BALLOON_YIELD_THRESHOLD 1024
103 103
104/* Maximum number of refused pages we accumulate during inflation cycle */
105#define VMW_BALLOON_MAX_REFUSED 16
104 106
105/* 107/*
106 * Hypervisor communication port definitions. 108 * Hypervisor communication port definitions.
@@ -183,6 +185,7 @@ struct vmballoon {
183 185
184 /* transient list of non-balloonable pages */ 186 /* transient list of non-balloonable pages */
185 struct list_head refused_pages; 187 struct list_head refused_pages;
188 unsigned int n_refused_pages;
186 189
187 /* balloon size in pages */ 190 /* balloon size in pages */
188 unsigned int size; 191 unsigned int size;
@@ -428,14 +431,21 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
428 /* inform monitor */ 431 /* inform monitor */
429 locked = vmballoon_send_lock_page(b, page_to_pfn(page)); 432 locked = vmballoon_send_lock_page(b, page_to_pfn(page));
430 if (!locked) { 433 if (!locked) {
434 STATS_INC(b->stats.refused_alloc);
435
431 if (b->reset_required) { 436 if (b->reset_required) {
432 __free_page(page); 437 __free_page(page);
433 return -EIO; 438 return -EIO;
434 } 439 }
435 440
436 /* place on list of non-balloonable pages, retry allocation */ 441 /*
442 * Place page on the list of non-balloonable pages
443 * and retry allocation, unless we already accumulated
444 * too many of them, in which case take a breather.
445 */
437 list_add(&page->lru, &b->refused_pages); 446 list_add(&page->lru, &b->refused_pages);
438 STATS_INC(b->stats.refused_alloc); 447 if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
448 return -EIO;
439 } 449 }
440 } while (!locked); 450 } while (!locked);
441 451
@@ -483,6 +493,8 @@ static void vmballoon_release_refused_pages(struct vmballoon *b)
483 __free_page(page); 493 __free_page(page);
484 STATS_INC(b->stats.refused_free); 494 STATS_INC(b->stats.refused_free);
485 } 495 }
496
497 b->n_refused_pages = 0;
486} 498}
487 499
488/* 500/*
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 2b281680e320..d98ddcfac5e5 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1157,7 +1157,6 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
1157 mmc_omap_start_command(host, req->cmd); 1157 mmc_omap_start_command(host, req->cmd);
1158 if (host->dma_in_use) 1158 if (host->dma_in_use)
1159 omap_start_dma(host->dma_ch); 1159 omap_start_dma(host->dma_ch);
1160 BUG_ON(irqs_disabled());
1161} 1160}
1162 1161
1163static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) 1162static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index e5972b2c17b7..70b68d35f969 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -495,8 +495,6 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
495 pr_debug("s3c2410_rtc: RTCCON=%02x\n", 495 pr_debug("s3c2410_rtc: RTCCON=%02x\n",
496 readb(s3c_rtc_base + S3C2410_RTCCON)); 496 readb(s3c_rtc_base + S3C2410_RTCCON));
497 497
498 s3c_rtc_setfreq(&pdev->dev, 1);
499
500 device_init_wakeup(&pdev->dev, 1); 498 device_init_wakeup(&pdev->dev, 1);
501 499
502 /* register RTC and exit */ 500 /* register RTC and exit */
@@ -510,14 +508,17 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
510 goto err_nortc; 508 goto err_nortc;
511 } 509 }
512 510
511 s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
512
513 if (s3c_rtc_cpu_type == TYPE_S3C64XX) 513 if (s3c_rtc_cpu_type == TYPE_S3C64XX)
514 rtc->max_user_freq = 32768; 514 rtc->max_user_freq = 32768;
515 else 515 else
516 rtc->max_user_freq = 128; 516 rtc->max_user_freq = 128;
517 517
518 s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
519
520 platform_set_drvdata(pdev, rtc); 518 platform_set_drvdata(pdev, rtc);
519
520 s3c_rtc_setfreq(&pdev->dev, 1);
521
521 return 0; 522 return 0;
522 523
523 err_nortc: 524 err_nortc:
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index 005195958647..ceba593dc84f 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -441,8 +441,10 @@ struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa)
441 441
442 ret = sysfs_create_bin_file(tables_kobj, 442 ret = sysfs_create_bin_file(tables_kobj,
443 &tbl_attr->attr); 443 &tbl_attr->attr);
444 if (ret) 444 if (ret) {
445 kfree(tbl_attr); 445 kfree(tbl_attr);
446 tbl_attr = NULL;
447 }
446 448
447 sfi_unmap_table(th); 449 sfi_unmap_table(th);
448 return tbl_attr; 450 return tbl_attr;
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 43bf44514c41..b91115f84b13 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -101,7 +101,7 @@ static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
101static struct usb_audio_control mute_control = { 101static struct usb_audio_control mute_control = {
102 .list = LIST_HEAD_INIT(mute_control.list), 102 .list = LIST_HEAD_INIT(mute_control.list),
103 .name = "Mute Control", 103 .name = "Mute Control",
104 .type = UAC_MUTE_CONTROL, 104 .type = UAC_FU_MUTE,
105 /* Todo: add real Mute control code */ 105 /* Todo: add real Mute control code */
106 .set = generic_set_cmd, 106 .set = generic_set_cmd,
107 .get = generic_get_cmd, 107 .get = generic_get_cmd,
@@ -110,7 +110,7 @@ static struct usb_audio_control mute_control = {
110static struct usb_audio_control volume_control = { 110static struct usb_audio_control volume_control = {
111 .list = LIST_HEAD_INIT(volume_control.list), 111 .list = LIST_HEAD_INIT(volume_control.list),
112 .name = "Volume Control", 112 .name = "Volume Control",
113 .type = UAC_VOLUME_CONTROL, 113 .type = UAC_FU_VOLUME,
114 /* Todo: add real Volume control code */ 114 /* Todo: add real Volume control code */
115 .set = generic_set_cmd, 115 .set = generic_set_cmd,
116 .get = generic_get_cmd, 116 .get = generic_get_cmd,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 1e6fec487973..3d94a1471724 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -8,6 +8,9 @@ menu "Graphics support"
8config HAVE_FB_ATMEL 8config HAVE_FB_ATMEL
9 bool 9 bool
10 10
11config HAVE_FB_IMX
12 bool
13
11source "drivers/char/agp/Kconfig" 14source "drivers/char/agp/Kconfig"
12 15
13source "drivers/gpu/vga/Kconfig" 16source "drivers/gpu/vga/Kconfig"
@@ -400,9 +403,6 @@ config FB_SA1100
400 If you plan to use the LCD display with your SA-1100 system, say 403 If you plan to use the LCD display with your SA-1100 system, say
401 Y here. 404 Y here.
402 405
403config HAVE_FB_IMX
404 bool
405
406config FB_IMX 406config FB_IMX
407 tristate "Motorola i.MX LCD support" 407 tristate "Motorola i.MX LCD support"
408 depends on FB && (HAVE_FB_IMX || ARCH_MX1 || ARCH_MX2) 408 depends on FB && (HAVE_FB_IMX || ARCH_MX1 || ARCH_MX2)
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 073c9b408cf7..6b93ef93cb12 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -100,6 +100,16 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
100 /* protect against the workqueue changing the page list */ 100 /* protect against the workqueue changing the page list */
101 mutex_lock(&fbdefio->lock); 101 mutex_lock(&fbdefio->lock);
102 102
103 /*
104 * We want the page to remain locked from ->page_mkwrite until
105 * the PTE is marked dirty to avoid page_mkclean() being called
106 * before the PTE is updated, which would leave the page ignored
107 * by defio.
108 * Do this by locking the page here and informing the caller
109 * about it with VM_FAULT_LOCKED.
110 */
111 lock_page(page);
112
103 /* we loop through the pagelist before adding in order 113 /* we loop through the pagelist before adding in order
104 to keep the pagelist sorted */ 114 to keep the pagelist sorted */
105 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 115 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
@@ -121,7 +131,7 @@ page_already_added:
121 131
122 /* come back after delay to process the deferred IO */ 132 /* come back after delay to process the deferred IO */
123 schedule_delayed_work(&info->deferred_work, fbdefio->delay); 133 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
124 return 0; 134 return VM_FAULT_LOCKED;
125} 135}
126 136
127static const struct vm_operations_struct fb_deferred_io_vm_ops = { 137static const struct vm_operations_struct fb_deferred_io_vm_ops = {
@@ -155,41 +165,25 @@ static void fb_deferred_io_work(struct work_struct *work)
155{ 165{
156 struct fb_info *info = container_of(work, struct fb_info, 166 struct fb_info *info = container_of(work, struct fb_info,
157 deferred_work.work); 167 deferred_work.work);
168 struct list_head *node, *next;
169 struct page *cur;
158 struct fb_deferred_io *fbdefio = info->fbdefio; 170 struct fb_deferred_io *fbdefio = info->fbdefio;
159 struct page *page, *tmp_page;
160 struct list_head *node, *tmp_node;
161 struct list_head non_dirty;
162
163 INIT_LIST_HEAD(&non_dirty);
164 171
165 /* here we mkclean the pages, then do all deferred IO */ 172 /* here we mkclean the pages, then do all deferred IO */
166 mutex_lock(&fbdefio->lock); 173 mutex_lock(&fbdefio->lock);
167 list_for_each_entry_safe(page, tmp_page, &fbdefio->pagelist, lru) { 174 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
168 lock_page(page); 175 lock_page(cur);
169 /* 176 page_mkclean(cur);
170 * The workqueue callback can be triggered after a 177 unlock_page(cur);
171 * ->page_mkwrite() call but before the PTE has been marked
172 * dirty. In this case page_mkclean() won't "rearm" the page.
173 *
174 * To avoid this, remove those "non-dirty" pages from the
175 * pagelist before calling the driver's callback, then add
176 * them back to get processed on the next work iteration.
177 * At that time, their PTEs will hopefully be dirty for real.
178 */
179 if (!page_mkclean(page))
180 list_move_tail(&page->lru, &non_dirty);
181 unlock_page(page);
182 } 178 }
183 179
184 /* driver's callback with pagelist */ 180 /* driver's callback with pagelist */
185 fbdefio->deferred_io(info, &fbdefio->pagelist); 181 fbdefio->deferred_io(info, &fbdefio->pagelist);
186 182
187 /* clear the list... */ 183 /* clear the list */
188 list_for_each_safe(node, tmp_node, &fbdefio->pagelist) { 184 list_for_each_safe(node, next, &fbdefio->pagelist) {
189 list_del(node); 185 list_del(node);
190 } 186 }
191 /* ... and add back the "non-dirty" pages to the list */
192 list_splice_tail(&non_dirty, &fbdefio->pagelist);
193 mutex_unlock(&fbdefio->lock); 187 mutex_unlock(&fbdefio->lock);
194} 188}
195 189
@@ -218,7 +212,6 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_open);
218void fb_deferred_io_cleanup(struct fb_info *info) 212void fb_deferred_io_cleanup(struct fb_info *info)
219{ 213{
220 struct fb_deferred_io *fbdefio = info->fbdefio; 214 struct fb_deferred_io *fbdefio = info->fbdefio;
221 struct list_head *node, *tmp_node;
222 struct page *page; 215 struct page *page;
223 int i; 216 int i;
224 217
@@ -226,13 +219,6 @@ void fb_deferred_io_cleanup(struct fb_info *info)
226 cancel_delayed_work(&info->deferred_work); 219 cancel_delayed_work(&info->deferred_work);
227 flush_scheduled_work(); 220 flush_scheduled_work();
228 221
229 /* the list may have still some non-dirty pages at this point */
230 mutex_lock(&fbdefio->lock);
231 list_for_each_safe(node, tmp_node, &fbdefio->pagelist) {
232 list_del(node);
233 }
234 mutex_unlock(&fbdefio->lock);
235
236 /* clear out the mapping that we setup */ 222 /* clear out the mapping that we setup */
237 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { 223 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
238 page = fb_deferred_io_page(info, i); 224 page = fb_deferred_io_page(info, i);