diff options
Diffstat (limited to 'drivers')
38 files changed, 1046 insertions, 253 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 6c6751b1405b..8206fc1ecc58 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -39,6 +39,17 @@ | |||
39 | #include <linux/moduleparam.h> | 39 | #include <linux/moduleparam.h> |
40 | #include <linux/sched.h> /* need_resched() */ | 40 | #include <linux/sched.h> /* need_resched() */ |
41 | #include <linux/latency.h> | 41 | #include <linux/latency.h> |
42 | #include <linux/clockchips.h> | ||
43 | |||
44 | /* | ||
45 | * Include the apic definitions for x86 to have the APIC timer related defines | ||
46 | * available also for UP (on SMP it gets magically included via linux/smp.h). | ||
47 | * asm/acpi.h is not an option, as it would require more include magic. Also | ||
48 | * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. | ||
49 | */ | ||
50 | #ifdef CONFIG_X86 | ||
51 | #include <asm/apic.h> | ||
52 | #endif | ||
42 | 53 | ||
43 | #include <asm/io.h> | 54 | #include <asm/io.h> |
44 | #include <asm/uaccess.h> | 55 | #include <asm/uaccess.h> |
@@ -238,6 +249,81 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
238 | } | 249 | } |
239 | } | 250 | } |
240 | 251 | ||
252 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | ||
253 | |||
254 | /* | ||
255 | * Some BIOS implementations switch to C3 in the published C2 state. | ||
256 | * This seems to be a common problem on AMD boxen, but other vendors | ||
257 | * are affected too. We pick the most conservative approach: we assume | ||
258 | * that the local APIC stops in both C2 and C3. | ||
259 | */ | ||
260 | static void acpi_timer_check_state(int state, struct acpi_processor *pr, | ||
261 | struct acpi_processor_cx *cx) | ||
262 | { | ||
263 | struct acpi_processor_power *pwr = &pr->power; | ||
264 | |||
265 | /* | ||
266 | * Check, if one of the previous states already marked the lapic | ||
267 | * unstable | ||
268 | */ | ||
269 | if (pwr->timer_broadcast_on_state < state) | ||
270 | return; | ||
271 | |||
272 | if (cx->type >= ACPI_STATE_C2) | ||
273 | pr->power.timer_broadcast_on_state = state; | ||
274 | } | ||
275 | |||
276 | static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) | ||
277 | { | ||
278 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | ||
279 | unsigned long reason; | ||
280 | |||
281 | reason = pr->power.timer_broadcast_on_state < INT_MAX ? | ||
282 | CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; | ||
283 | |||
284 | clockevents_notify(reason, &pr->id); | ||
285 | #else | ||
286 | cpumask_t mask = cpumask_of_cpu(pr->id); | ||
287 | |||
288 | if (pr->power.timer_broadcast_on_state < INT_MAX) | ||
289 | on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); | ||
290 | else | ||
291 | on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); | ||
292 | #endif | ||
293 | } | ||
294 | |||
295 | /* Power(C) State timer broadcast control */ | ||
296 | static void acpi_state_timer_broadcast(struct acpi_processor *pr, | ||
297 | struct acpi_processor_cx *cx, | ||
298 | int broadcast) | ||
299 | { | ||
300 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | ||
301 | |||
302 | int state = cx - pr->power.states; | ||
303 | |||
304 | if (state >= pr->power.timer_broadcast_on_state) { | ||
305 | unsigned long reason; | ||
306 | |||
307 | reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : | ||
308 | CLOCK_EVT_NOTIFY_BROADCAST_EXIT; | ||
309 | clockevents_notify(reason, &pr->id); | ||
310 | } | ||
311 | #endif | ||
312 | } | ||
313 | |||
314 | #else | ||
315 | |||
316 | static void acpi_timer_check_state(int state, struct acpi_processor *pr, | ||
317 | struct acpi_processor_cx *cstate) { } | ||
318 | static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } | ||
319 | static void acpi_state_timer_broadcast(struct acpi_processor *pr, | ||
320 | struct acpi_processor_cx *cx, | ||
321 | int broadcast) | ||
322 | { | ||
323 | } | ||
324 | |||
325 | #endif | ||
326 | |||
241 | static void acpi_processor_idle(void) | 327 | static void acpi_processor_idle(void) |
242 | { | 328 | { |
243 | struct acpi_processor *pr = NULL; | 329 | struct acpi_processor *pr = NULL; |
@@ -382,6 +468,7 @@ static void acpi_processor_idle(void) | |||
382 | /* Get start time (ticks) */ | 468 | /* Get start time (ticks) */ |
383 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 469 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
384 | /* Invoke C2 */ | 470 | /* Invoke C2 */ |
471 | acpi_state_timer_broadcast(pr, cx, 1); | ||
385 | acpi_cstate_enter(cx); | 472 | acpi_cstate_enter(cx); |
386 | /* Get end time (ticks) */ | 473 | /* Get end time (ticks) */ |
387 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 474 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
@@ -396,6 +483,7 @@ static void acpi_processor_idle(void) | |||
396 | /* Compute time (ticks) that we were actually asleep */ | 483 | /* Compute time (ticks) that we were actually asleep */ |
397 | sleep_ticks = | 484 | sleep_ticks = |
398 | ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; | 485 | ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD; |
486 | acpi_state_timer_broadcast(pr, cx, 0); | ||
399 | break; | 487 | break; |
400 | 488 | ||
401 | case ACPI_STATE_C3: | 489 | case ACPI_STATE_C3: |
@@ -417,6 +505,7 @@ static void acpi_processor_idle(void) | |||
417 | /* Get start time (ticks) */ | 505 | /* Get start time (ticks) */ |
418 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 506 | t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
419 | /* Invoke C3 */ | 507 | /* Invoke C3 */ |
508 | acpi_state_timer_broadcast(pr, cx, 1); | ||
420 | acpi_cstate_enter(cx); | 509 | acpi_cstate_enter(cx); |
421 | /* Get end time (ticks) */ | 510 | /* Get end time (ticks) */ |
422 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 511 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
@@ -436,6 +525,7 @@ static void acpi_processor_idle(void) | |||
436 | /* Compute time (ticks) that we were actually asleep */ | 525 | /* Compute time (ticks) that we were actually asleep */ |
437 | sleep_ticks = | 526 | sleep_ticks = |
438 | ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; | 527 | ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD; |
528 | acpi_state_timer_broadcast(pr, cx, 0); | ||
439 | break; | 529 | break; |
440 | 530 | ||
441 | default: | 531 | default: |
@@ -904,11 +994,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
904 | unsigned int i; | 994 | unsigned int i; |
905 | unsigned int working = 0; | 995 | unsigned int working = 0; |
906 | 996 | ||
907 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 997 | pr->power.timer_broadcast_on_state = INT_MAX; |
908 | int timer_broadcast = 0; | ||
909 | cpumask_t mask = cpumask_of_cpu(pr->id); | ||
910 | on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); | ||
911 | #endif | ||
912 | 998 | ||
913 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { | 999 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { |
914 | struct acpi_processor_cx *cx = &pr->power.states[i]; | 1000 | struct acpi_processor_cx *cx = &pr->power.states[i]; |
@@ -920,21 +1006,14 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
920 | 1006 | ||
921 | case ACPI_STATE_C2: | 1007 | case ACPI_STATE_C2: |
922 | acpi_processor_power_verify_c2(cx); | 1008 | acpi_processor_power_verify_c2(cx); |
923 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 1009 | if (cx->valid) |
924 | /* Some AMD systems fake C3 as C2, but still | 1010 | acpi_timer_check_state(i, pr, cx); |
925 | have timer troubles */ | ||
926 | if (cx->valid && | ||
927 | boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | ||
928 | timer_broadcast++; | ||
929 | #endif | ||
930 | break; | 1011 | break; |
931 | 1012 | ||
932 | case ACPI_STATE_C3: | 1013 | case ACPI_STATE_C3: |
933 | acpi_processor_power_verify_c3(pr, cx); | 1014 | acpi_processor_power_verify_c3(pr, cx); |
934 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | ||
935 | if (cx->valid) | 1015 | if (cx->valid) |
936 | timer_broadcast++; | 1016 | acpi_timer_check_state(i, pr, cx); |
937 | #endif | ||
938 | break; | 1017 | break; |
939 | } | 1018 | } |
940 | 1019 | ||
@@ -942,10 +1021,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
942 | working++; | 1021 | working++; |
943 | } | 1022 | } |
944 | 1023 | ||
945 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 | 1024 | acpi_propagate_timer_broadcast(pr); |
946 | if (timer_broadcast) | ||
947 | on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); | ||
948 | #endif | ||
949 | 1025 | ||
950 | return (working); | 1026 | return (working); |
951 | } | 1027 | } |
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile index 3e581603d0a8..a0d04a23dacd 100644 --- a/drivers/char/agp/Makefile +++ b/drivers/char/agp/Makefile | |||
@@ -1,6 +1,7 @@ | |||
1 | agpgart-y := backend.o frontend.o generic.o isoch.o | 1 | agpgart-y := backend.o frontend.o generic.o isoch.o |
2 | 2 | ||
3 | obj-$(CONFIG_AGP) += agpgart.o | 3 | obj-$(CONFIG_AGP) += agpgart.o |
4 | obj-$(CONFIG_COMPAT) += compat_ioctl.o | ||
4 | obj-$(CONFIG_AGP_ALI) += ali-agp.o | 5 | obj-$(CONFIG_AGP_ALI) += ali-agp.o |
5 | obj-$(CONFIG_AGP_ATI) += ati-agp.o | 6 | obj-$(CONFIG_AGP_ATI) += ati-agp.o |
6 | obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o | 7 | obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o |
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 1d59e2a5b9aa..9bd68d9f0f59 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h | |||
@@ -114,6 +114,7 @@ struct agp_bridge_driver { | |||
114 | void (*free_by_type)(struct agp_memory *); | 114 | void (*free_by_type)(struct agp_memory *); |
115 | void *(*agp_alloc_page)(struct agp_bridge_data *); | 115 | void *(*agp_alloc_page)(struct agp_bridge_data *); |
116 | void (*agp_destroy_page)(void *); | 116 | void (*agp_destroy_page)(void *); |
117 | int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); | ||
117 | }; | 118 | }; |
118 | 119 | ||
119 | struct agp_bridge_data { | 120 | struct agp_bridge_data { |
@@ -218,6 +219,7 @@ struct agp_bridge_data { | |||
218 | #define I810_PTE_MAIN_UNCACHED 0x00000000 | 219 | #define I810_PTE_MAIN_UNCACHED 0x00000000 |
219 | #define I810_PTE_LOCAL 0x00000002 | 220 | #define I810_PTE_LOCAL 0x00000002 |
220 | #define I810_PTE_VALID 0x00000001 | 221 | #define I810_PTE_VALID 0x00000001 |
222 | #define I830_PTE_SYSTEM_CACHED 0x00000006 | ||
221 | #define I810_SMRAM_MISCC 0x70 | 223 | #define I810_SMRAM_MISCC 0x70 |
222 | #define I810_GFX_MEM_WIN_SIZE 0x00010000 | 224 | #define I810_GFX_MEM_WIN_SIZE 0x00010000 |
223 | #define I810_GFX_MEM_WIN_32M 0x00010000 | 225 | #define I810_GFX_MEM_WIN_32M 0x00010000 |
@@ -270,8 +272,16 @@ void global_cache_flush(void); | |||
270 | void get_agp_version(struct agp_bridge_data *bridge); | 272 | void get_agp_version(struct agp_bridge_data *bridge); |
271 | unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, | 273 | unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, |
272 | unsigned long addr, int type); | 274 | unsigned long addr, int type); |
275 | int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, | ||
276 | int type); | ||
273 | struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev); | 277 | struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev); |
274 | 278 | ||
279 | /* generic functions for user-populated AGP memory types */ | ||
280 | struct agp_memory *agp_generic_alloc_user(size_t page_count, int type); | ||
281 | void agp_alloc_page_array(size_t size, struct agp_memory *mem); | ||
282 | void agp_free_page_array(struct agp_memory *mem); | ||
283 | |||
284 | |||
275 | /* generic routines for agp>=3 */ | 285 | /* generic routines for agp>=3 */ |
276 | int agp3_generic_fetch_size(void); | 286 | int agp3_generic_fetch_size(void); |
277 | void agp3_generic_tlbflush(struct agp_memory *mem); | 287 | void agp3_generic_tlbflush(struct agp_memory *mem); |
@@ -288,6 +298,8 @@ extern struct aper_size_info_16 agp3_generic_sizes[]; | |||
288 | extern int agp_off; | 298 | extern int agp_off; |
289 | extern int agp_try_unsupported_boot; | 299 | extern int agp_try_unsupported_boot; |
290 | 300 | ||
301 | long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | ||
302 | |||
291 | /* Chipset independant registers (from AGP Spec) */ | 303 | /* Chipset independant registers (from AGP Spec) */ |
292 | #define AGP_APBASE 0x10 | 304 | #define AGP_APBASE 0x10 |
293 | 305 | ||
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index 5a31ec7c62fc..98177a93076f 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c | |||
@@ -214,6 +214,7 @@ static struct agp_bridge_driver ali_generic_bridge = { | |||
214 | .free_by_type = agp_generic_free_by_type, | 214 | .free_by_type = agp_generic_free_by_type, |
215 | .agp_alloc_page = agp_generic_alloc_page, | 215 | .agp_alloc_page = agp_generic_alloc_page, |
216 | .agp_destroy_page = ali_destroy_page, | 216 | .agp_destroy_page = ali_destroy_page, |
217 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
217 | }; | 218 | }; |
218 | 219 | ||
219 | static struct agp_bridge_driver ali_m1541_bridge = { | 220 | static struct agp_bridge_driver ali_m1541_bridge = { |
@@ -237,6 +238,7 @@ static struct agp_bridge_driver ali_m1541_bridge = { | |||
237 | .free_by_type = agp_generic_free_by_type, | 238 | .free_by_type = agp_generic_free_by_type, |
238 | .agp_alloc_page = m1541_alloc_page, | 239 | .agp_alloc_page = m1541_alloc_page, |
239 | .agp_destroy_page = m1541_destroy_page, | 240 | .agp_destroy_page = m1541_destroy_page, |
241 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
240 | }; | 242 | }; |
241 | 243 | ||
242 | 244 | ||
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c index b4e00a343da9..b0acf41c0db9 100644 --- a/drivers/char/agp/alpha-agp.c +++ b/drivers/char/agp/alpha-agp.c | |||
@@ -91,6 +91,9 @@ static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start, | |||
91 | int num_entries, status; | 91 | int num_entries, status; |
92 | void *temp; | 92 | void *temp; |
93 | 93 | ||
94 | if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) | ||
95 | return -EINVAL; | ||
96 | |||
94 | temp = agp_bridge->current_size; | 97 | temp = agp_bridge->current_size; |
95 | num_entries = A_SIZE_FIX(temp)->num_entries; | 98 | num_entries = A_SIZE_FIX(temp)->num_entries; |
96 | if ((pg_start + mem->page_count) > num_entries) | 99 | if ((pg_start + mem->page_count) > num_entries) |
@@ -142,6 +145,7 @@ struct agp_bridge_driver alpha_core_agp_driver = { | |||
142 | .free_by_type = agp_generic_free_by_type, | 145 | .free_by_type = agp_generic_free_by_type, |
143 | .agp_alloc_page = agp_generic_alloc_page, | 146 | .agp_alloc_page = agp_generic_alloc_page, |
144 | .agp_destroy_page = agp_generic_destroy_page, | 147 | .agp_destroy_page = agp_generic_destroy_page, |
148 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
145 | }; | 149 | }; |
146 | 150 | ||
147 | struct agp_bridge_data *alpha_bridge; | 151 | struct agp_bridge_data *alpha_bridge; |
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c index c85c8cadb6df..3d8d448bf394 100644 --- a/drivers/char/agp/amd-k7-agp.c +++ b/drivers/char/agp/amd-k7-agp.c | |||
@@ -381,6 +381,7 @@ static struct agp_bridge_driver amd_irongate_driver = { | |||
381 | .free_by_type = agp_generic_free_by_type, | 381 | .free_by_type = agp_generic_free_by_type, |
382 | .agp_alloc_page = agp_generic_alloc_page, | 382 | .agp_alloc_page = agp_generic_alloc_page, |
383 | .agp_destroy_page = agp_generic_destroy_page, | 383 | .agp_destroy_page = agp_generic_destroy_page, |
384 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
384 | }; | 385 | }; |
385 | 386 | ||
386 | static struct agp_device_ids amd_agp_device_ids[] __devinitdata = | 387 | static struct agp_device_ids amd_agp_device_ids[] __devinitdata = |
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 93d2209fee4c..636d984ed4a6 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -62,12 +62,18 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
62 | { | 62 | { |
63 | int i, j, num_entries; | 63 | int i, j, num_entries; |
64 | long long tmp; | 64 | long long tmp; |
65 | int mask_type; | ||
66 | struct agp_bridge_data *bridge = mem->bridge; | ||
65 | u32 pte; | 67 | u32 pte; |
66 | 68 | ||
67 | num_entries = agp_num_entries(); | 69 | num_entries = agp_num_entries(); |
68 | 70 | ||
69 | if (type != 0 || mem->type != 0) | 71 | if (type != mem->type) |
70 | return -EINVAL; | 72 | return -EINVAL; |
73 | mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); | ||
74 | if (mask_type != 0) | ||
75 | return -EINVAL; | ||
76 | |||
71 | 77 | ||
72 | /* Make sure we can fit the range in the gatt table. */ | 78 | /* Make sure we can fit the range in the gatt table. */ |
73 | /* FIXME: could wrap */ | 79 | /* FIXME: could wrap */ |
@@ -90,7 +96,7 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
90 | 96 | ||
91 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 97 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { |
92 | tmp = agp_bridge->driver->mask_memory(agp_bridge, | 98 | tmp = agp_bridge->driver->mask_memory(agp_bridge, |
93 | mem->memory[i], mem->type); | 99 | mem->memory[i], mask_type); |
94 | 100 | ||
95 | BUG_ON(tmp & 0xffffff0000000ffcULL); | 101 | BUG_ON(tmp & 0xffffff0000000ffcULL); |
96 | pte = (tmp & 0x000000ff00000000ULL) >> 28; | 102 | pte = (tmp & 0x000000ff00000000ULL) >> 28; |
@@ -247,6 +253,7 @@ static struct agp_bridge_driver amd_8151_driver = { | |||
247 | .free_by_type = agp_generic_free_by_type, | 253 | .free_by_type = agp_generic_free_by_type, |
248 | .agp_alloc_page = agp_generic_alloc_page, | 254 | .agp_alloc_page = agp_generic_alloc_page, |
249 | .agp_destroy_page = agp_generic_destroy_page, | 255 | .agp_destroy_page = agp_generic_destroy_page, |
256 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
250 | }; | 257 | }; |
251 | 258 | ||
252 | /* Some basic sanity checks for the aperture. */ | 259 | /* Some basic sanity checks for the aperture. */ |
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c index 9987dc2e0c3f..77c9ad68fba9 100644 --- a/drivers/char/agp/ati-agp.c +++ b/drivers/char/agp/ati-agp.c | |||
@@ -431,6 +431,7 @@ static struct agp_bridge_driver ati_generic_bridge = { | |||
431 | .free_by_type = agp_generic_free_by_type, | 431 | .free_by_type = agp_generic_free_by_type, |
432 | .agp_alloc_page = agp_generic_alloc_page, | 432 | .agp_alloc_page = agp_generic_alloc_page, |
433 | .agp_destroy_page = agp_generic_destroy_page, | 433 | .agp_destroy_page = agp_generic_destroy_page, |
434 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
434 | }; | 435 | }; |
435 | 436 | ||
436 | 437 | ||
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index d59e037ddd12..ebdd6dd66edb 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c | |||
@@ -43,7 +43,7 @@ | |||
43 | * fix some real stupidity. It's only by chance we can bump | 43 | * fix some real stupidity. It's only by chance we can bump |
44 | * past 0.99 at all due to some boolean logic error. */ | 44 | * past 0.99 at all due to some boolean logic error. */ |
45 | #define AGPGART_VERSION_MAJOR 0 | 45 | #define AGPGART_VERSION_MAJOR 0 |
46 | #define AGPGART_VERSION_MINOR 101 | 46 | #define AGPGART_VERSION_MINOR 102 |
47 | static const struct agp_version agp_current_version = | 47 | static const struct agp_version agp_current_version = |
48 | { | 48 | { |
49 | .major = AGPGART_VERSION_MAJOR, | 49 | .major = AGPGART_VERSION_MAJOR, |
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c new file mode 100644 index 000000000000..fcb4b1bf0d4e --- /dev/null +++ b/drivers/char/agp/compat_ioctl.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * AGPGART driver frontend compatibility ioctls | ||
3 | * Copyright (C) 2004 Silicon Graphics, Inc. | ||
4 | * Copyright (C) 2002-2003 Dave Jones | ||
5 | * Copyright (C) 1999 Jeff Hartmann | ||
6 | * Copyright (C) 1999 Precision Insight, Inc. | ||
7 | * Copyright (C) 1999 Xi Graphics, Inc. | ||
8 | * | ||
9 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
10 | * copy of this software and associated documentation files (the "Software"), | ||
11 | * to deal in the Software without restriction, including without limitation | ||
12 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
13 | * and/or sell copies of the Software, and to permit persons to whom the | ||
14 | * Software is furnished to do so, subject to the following conditions: | ||
15 | * | ||
16 | * The above copyright notice and this permission notice shall be included | ||
17 | * in all copies or substantial portions of the Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, | ||
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE | ||
25 | * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
26 | * | ||
27 | */ | ||
28 | |||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/pci.h> | ||
31 | #include <linux/agpgart.h> | ||
32 | #include <asm/uaccess.h> | ||
33 | #include "agp.h" | ||
34 | #include "compat_ioctl.h" | ||
35 | |||
36 | static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) | ||
37 | { | ||
38 | struct agp_info32 userinfo; | ||
39 | struct agp_kern_info kerninfo; | ||
40 | |||
41 | agp_copy_info(agp_bridge, &kerninfo); | ||
42 | |||
43 | userinfo.version.major = kerninfo.version.major; | ||
44 | userinfo.version.minor = kerninfo.version.minor; | ||
45 | userinfo.bridge_id = kerninfo.device->vendor | | ||
46 | (kerninfo.device->device << 16); | ||
47 | userinfo.agp_mode = kerninfo.mode; | ||
48 | userinfo.aper_base = (compat_long_t)kerninfo.aper_base; | ||
49 | userinfo.aper_size = kerninfo.aper_size; | ||
50 | userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory; | ||
51 | userinfo.pg_used = kerninfo.current_memory; | ||
52 | |||
53 | if (copy_to_user(arg, &userinfo, sizeof(userinfo))) | ||
54 | return -EFAULT; | ||
55 | |||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) | ||
60 | { | ||
61 | struct agp_region32 ureserve; | ||
62 | struct agp_region kreserve; | ||
63 | struct agp_client *client; | ||
64 | struct agp_file_private *client_priv; | ||
65 | |||
66 | DBG(""); | ||
67 | if (copy_from_user(&ureserve, arg, sizeof(ureserve))) | ||
68 | return -EFAULT; | ||
69 | |||
70 | if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32)) | ||
71 | return -EFAULT; | ||
72 | |||
73 | kreserve.pid = ureserve.pid; | ||
74 | kreserve.seg_count = ureserve.seg_count; | ||
75 | |||
76 | client = agp_find_client_by_pid(kreserve.pid); | ||
77 | |||
78 | if (kreserve.seg_count == 0) { | ||
79 | /* remove a client */ | ||
80 | client_priv = agp_find_private(kreserve.pid); | ||
81 | |||
82 | if (client_priv != NULL) { | ||
83 | set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); | ||
84 | set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); | ||
85 | } | ||
86 | if (client == NULL) { | ||
87 | /* client is already removed */ | ||
88 | return 0; | ||
89 | } | ||
90 | return agp_remove_client(kreserve.pid); | ||
91 | } else { | ||
92 | struct agp_segment32 *usegment; | ||
93 | struct agp_segment *ksegment; | ||
94 | int seg; | ||
95 | |||
96 | if (ureserve.seg_count >= 16384) | ||
97 | return -EINVAL; | ||
98 | |||
99 | usegment = kmalloc(sizeof(*usegment) * ureserve.seg_count, GFP_KERNEL); | ||
100 | if (!usegment) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | ksegment = kmalloc(sizeof(*ksegment) * kreserve.seg_count, GFP_KERNEL); | ||
104 | if (!ksegment) { | ||
105 | kfree(usegment); | ||
106 | return -ENOMEM; | ||
107 | } | ||
108 | |||
109 | if (copy_from_user(usegment, (void __user *) ureserve.seg_list, | ||
110 | sizeof(*usegment) * ureserve.seg_count)) { | ||
111 | kfree(usegment); | ||
112 | kfree(ksegment); | ||
113 | return -EFAULT; | ||
114 | } | ||
115 | |||
116 | for (seg = 0; seg < ureserve.seg_count; seg++) { | ||
117 | ksegment[seg].pg_start = usegment[seg].pg_start; | ||
118 | ksegment[seg].pg_count = usegment[seg].pg_count; | ||
119 | ksegment[seg].prot = usegment[seg].prot; | ||
120 | } | ||
121 | |||
122 | kfree(usegment); | ||
123 | kreserve.seg_list = ksegment; | ||
124 | |||
125 | if (client == NULL) { | ||
126 | /* Create the client and add the segment */ | ||
127 | client = agp_create_client(kreserve.pid); | ||
128 | |||
129 | if (client == NULL) { | ||
130 | kfree(ksegment); | ||
131 | return -ENOMEM; | ||
132 | } | ||
133 | client_priv = agp_find_private(kreserve.pid); | ||
134 | |||
135 | if (client_priv != NULL) { | ||
136 | set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags); | ||
137 | set_bit(AGP_FF_IS_VALID, &client_priv->access_flags); | ||
138 | } | ||
139 | } | ||
140 | return agp_create_segment(client, &kreserve); | ||
141 | } | ||
142 | /* Will never really happen */ | ||
143 | return -EINVAL; | ||
144 | } | ||
145 | |||
146 | static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) | ||
147 | { | ||
148 | struct agp_memory *memory; | ||
149 | struct agp_allocate32 alloc; | ||
150 | |||
151 | DBG(""); | ||
152 | if (copy_from_user(&alloc, arg, sizeof(alloc))) | ||
153 | return -EFAULT; | ||
154 | |||
155 | memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); | ||
156 | |||
157 | if (memory == NULL) | ||
158 | return -ENOMEM; | ||
159 | |||
160 | alloc.key = memory->key; | ||
161 | alloc.physical = memory->physical; | ||
162 | |||
163 | if (copy_to_user(arg, &alloc, sizeof(alloc))) { | ||
164 | agp_free_memory_wrap(memory); | ||
165 | return -EFAULT; | ||
166 | } | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg) | ||
171 | { | ||
172 | struct agp_bind32 bind_info; | ||
173 | struct agp_memory *memory; | ||
174 | |||
175 | DBG(""); | ||
176 | if (copy_from_user(&bind_info, arg, sizeof(bind_info))) | ||
177 | return -EFAULT; | ||
178 | |||
179 | memory = agp_find_mem_by_key(bind_info.key); | ||
180 | |||
181 | if (memory == NULL) | ||
182 | return -EINVAL; | ||
183 | |||
184 | return agp_bind_memory(memory, bind_info.pg_start); | ||
185 | } | ||
186 | |||
187 | static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) | ||
188 | { | ||
189 | struct agp_memory *memory; | ||
190 | struct agp_unbind32 unbind; | ||
191 | |||
192 | DBG(""); | ||
193 | if (copy_from_user(&unbind, arg, sizeof(unbind))) | ||
194 | return -EFAULT; | ||
195 | |||
196 | memory = agp_find_mem_by_key(unbind.key); | ||
197 | |||
198 | if (memory == NULL) | ||
199 | return -EINVAL; | ||
200 | |||
201 | return agp_unbind_memory(memory); | ||
202 | } | ||
203 | |||
204 | long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | ||
205 | { | ||
206 | struct agp_file_private *curr_priv = file->private_data; | ||
207 | int ret_val = -ENOTTY; | ||
208 | |||
209 | mutex_lock(&(agp_fe.agp_mutex)); | ||
210 | |||
211 | if ((agp_fe.current_controller == NULL) && | ||
212 | (cmd != AGPIOC_ACQUIRE32)) { | ||
213 | ret_val = -EINVAL; | ||
214 | goto ioctl_out; | ||
215 | } | ||
216 | if ((agp_fe.backend_acquired != TRUE) && | ||
217 | (cmd != AGPIOC_ACQUIRE32)) { | ||
218 | ret_val = -EBUSY; | ||
219 | goto ioctl_out; | ||
220 | } | ||
221 | if (cmd != AGPIOC_ACQUIRE32) { | ||
222 | if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) { | ||
223 | ret_val = -EPERM; | ||
224 | goto ioctl_out; | ||
225 | } | ||
226 | /* Use the original pid of the controller, | ||
227 | * in case it's threaded */ | ||
228 | |||
229 | if (agp_fe.current_controller->pid != curr_priv->my_pid) { | ||
230 | ret_val = -EBUSY; | ||
231 | goto ioctl_out; | ||
232 | } | ||
233 | } | ||
234 | |||
235 | switch (cmd) { | ||
236 | case AGPIOC_INFO32: | ||
237 | ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg); | ||
238 | break; | ||
239 | |||
240 | case AGPIOC_ACQUIRE32: | ||
241 | ret_val = agpioc_acquire_wrap(curr_priv); | ||
242 | break; | ||
243 | |||
244 | case AGPIOC_RELEASE32: | ||
245 | ret_val = agpioc_release_wrap(curr_priv); | ||
246 | break; | ||
247 | |||
248 | case AGPIOC_SETUP32: | ||
249 | ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg); | ||
250 | break; | ||
251 | |||
252 | case AGPIOC_RESERVE32: | ||
253 | ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg); | ||
254 | break; | ||
255 | |||
256 | case AGPIOC_PROTECT32: | ||
257 | ret_val = agpioc_protect_wrap(curr_priv); | ||
258 | break; | ||
259 | |||
260 | case AGPIOC_ALLOCATE32: | ||
261 | ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg); | ||
262 | break; | ||
263 | |||
264 | case AGPIOC_DEALLOCATE32: | ||
265 | ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg); | ||
266 | break; | ||
267 | |||
268 | case AGPIOC_BIND32: | ||
269 | ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg); | ||
270 | break; | ||
271 | |||
272 | case AGPIOC_UNBIND32: | ||
273 | ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg); | ||
274 | break; | ||
275 | } | ||
276 | |||
277 | ioctl_out: | ||
278 | DBG("ioctl returns %d\n", ret_val); | ||
279 | mutex_unlock(&(agp_fe.agp_mutex)); | ||
280 | return ret_val; | ||
281 | } | ||
282 | |||
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h new file mode 100644 index 000000000000..71939d637236 --- /dev/null +++ b/drivers/char/agp/compat_ioctl.h | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Jeff Hartmann | ||
3 | * Copyright (C) 1999 Precision Insight, Inc. | ||
4 | * Copyright (C) 1999 Xi Graphics, Inc. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included | ||
14 | * in all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
17 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, | ||
20 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
21 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE | ||
22 | * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #ifndef _AGP_COMPAT_IOCTL_H | ||
27 | #define _AGP_COMPAT_IOCTL_H | ||
28 | |||
29 | #include <linux/compat.h> | ||
30 | #include <linux/agpgart.h> | ||
31 | |||
32 | #define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t) | ||
33 | #define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1) | ||
34 | #define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2) | ||
35 | #define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t) | ||
36 | #define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t) | ||
37 | #define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t) | ||
38 | #define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t) | ||
39 | #define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t) | ||
40 | #define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t) | ||
41 | #define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t) | ||
42 | |||
43 | struct agp_info32 { | ||
44 | struct agp_version version; /* version of the driver */ | ||
45 | u32 bridge_id; /* bridge vendor/device */ | ||
46 | u32 agp_mode; /* mode info of bridge */ | ||
47 | compat_long_t aper_base; /* base of aperture */ | ||
48 | compat_size_t aper_size; /* size of aperture */ | ||
49 | compat_size_t pg_total; /* max pages (swap + system) */ | ||
50 | compat_size_t pg_system; /* max pages (system) */ | ||
51 | compat_size_t pg_used; /* current pages used */ | ||
52 | }; | ||
53 | |||
54 | /* | ||
55 | * The "prot" down below needs still a "sleep" flag somehow ... | ||
56 | */ | ||
57 | struct agp_segment32 { | ||
58 | compat_off_t pg_start; /* starting page to populate */ | ||
59 | compat_size_t pg_count; /* number of pages */ | ||
60 | compat_int_t prot; /* prot flags for mmap */ | ||
61 | }; | ||
62 | |||
63 | struct agp_region32 { | ||
64 | compat_pid_t pid; /* pid of process */ | ||
65 | compat_size_t seg_count; /* number of segments */ | ||
66 | struct agp_segment32 *seg_list; | ||
67 | }; | ||
68 | |||
69 | struct agp_allocate32 { | ||
70 | compat_int_t key; /* tag of allocation */ | ||
71 | compat_size_t pg_count; /* number of pages */ | ||
72 | u32 type; /* 0 == normal, other devspec */ | ||
73 | u32 physical; /* device specific (some devices | ||
74 | * need a phys address of the | ||
75 | * actual page behind the gatt | ||
76 | * table) */ | ||
77 | }; | ||
78 | |||
79 | struct agp_bind32 { | ||
80 | compat_int_t key; /* tag of allocation */ | ||
81 | compat_off_t pg_start; /* starting page to populate */ | ||
82 | }; | ||
83 | |||
84 | struct agp_unbind32 { | ||
85 | compat_int_t key; /* tag of allocation */ | ||
86 | u32 priority; /* priority for paging out */ | ||
87 | }; | ||
88 | |||
89 | extern struct agp_front_data agp_fe; | ||
90 | |||
91 | int agpioc_acquire_wrap(struct agp_file_private *priv); | ||
92 | int agpioc_release_wrap(struct agp_file_private *priv); | ||
93 | int agpioc_protect_wrap(struct agp_file_private *priv); | ||
94 | int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg); | ||
95 | int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg); | ||
96 | struct agp_file_private *agp_find_private(pid_t pid); | ||
97 | struct agp_client *agp_create_client(pid_t id); | ||
98 | int agp_remove_client(pid_t id); | ||
99 | int agp_create_segment(struct agp_client *client, struct agp_region *region); | ||
100 | void agp_free_memory_wrap(struct agp_memory *memory); | ||
101 | struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type); | ||
102 | struct agp_memory *agp_find_mem_by_key(int key); | ||
103 | struct agp_client *agp_find_client_by_pid(pid_t id); | ||
104 | |||
105 | #endif /* _AGP_COMPAT_H */ | ||
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index 30f730ff81c1..658cb1a72d2c 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c | |||
@@ -335,6 +335,7 @@ static struct agp_bridge_driver efficeon_driver = { | |||
335 | .free_by_type = agp_generic_free_by_type, | 335 | .free_by_type = agp_generic_free_by_type, |
336 | .agp_alloc_page = agp_generic_alloc_page, | 336 | .agp_alloc_page = agp_generic_alloc_page, |
337 | .agp_destroy_page = agp_generic_destroy_page, | 337 | .agp_destroy_page = agp_generic_destroy_page, |
338 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
338 | }; | 339 | }; |
339 | 340 | ||
340 | static int __devinit agp_efficeon_probe(struct pci_dev *pdev, | 341 | static int __devinit agp_efficeon_probe(struct pci_dev *pdev, |
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 0f2ed2aa2d81..679d7f972439 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c | |||
@@ -41,9 +41,9 @@ | |||
41 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
42 | #include "agp.h" | 42 | #include "agp.h" |
43 | 43 | ||
44 | static struct agp_front_data agp_fe; | 44 | struct agp_front_data agp_fe; |
45 | 45 | ||
46 | static struct agp_memory *agp_find_mem_by_key(int key) | 46 | struct agp_memory *agp_find_mem_by_key(int key) |
47 | { | 47 | { |
48 | struct agp_memory *curr; | 48 | struct agp_memory *curr; |
49 | 49 | ||
@@ -159,7 +159,7 @@ static pgprot_t agp_convert_mmap_flags(int prot) | |||
159 | return vm_get_page_prot(prot_bits); | 159 | return vm_get_page_prot(prot_bits); |
160 | } | 160 | } |
161 | 161 | ||
162 | static int agp_create_segment(struct agp_client *client, struct agp_region *region) | 162 | int agp_create_segment(struct agp_client *client, struct agp_region *region) |
163 | { | 163 | { |
164 | struct agp_segment_priv **ret_seg; | 164 | struct agp_segment_priv **ret_seg; |
165 | struct agp_segment_priv *seg; | 165 | struct agp_segment_priv *seg; |
@@ -211,7 +211,7 @@ static void agp_insert_into_pool(struct agp_memory * temp) | |||
211 | 211 | ||
212 | /* File private list routines */ | 212 | /* File private list routines */ |
213 | 213 | ||
214 | static struct agp_file_private *agp_find_private(pid_t pid) | 214 | struct agp_file_private *agp_find_private(pid_t pid) |
215 | { | 215 | { |
216 | struct agp_file_private *curr; | 216 | struct agp_file_private *curr; |
217 | 217 | ||
@@ -266,13 +266,13 @@ static void agp_remove_file_private(struct agp_file_private * priv) | |||
266 | * Wrappers for agp_free_memory & agp_allocate_memory | 266 | * Wrappers for agp_free_memory & agp_allocate_memory |
267 | * These make sure that internal lists are kept updated. | 267 | * These make sure that internal lists are kept updated. |
268 | */ | 268 | */ |
269 | static void agp_free_memory_wrap(struct agp_memory *memory) | 269 | void agp_free_memory_wrap(struct agp_memory *memory) |
270 | { | 270 | { |
271 | agp_remove_from_pool(memory); | 271 | agp_remove_from_pool(memory); |
272 | agp_free_memory(memory); | 272 | agp_free_memory(memory); |
273 | } | 273 | } |
274 | 274 | ||
275 | static struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) | 275 | struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type) |
276 | { | 276 | { |
277 | struct agp_memory *memory; | 277 | struct agp_memory *memory; |
278 | 278 | ||
@@ -484,7 +484,7 @@ static struct agp_controller *agp_find_controller_for_client(pid_t id) | |||
484 | return NULL; | 484 | return NULL; |
485 | } | 485 | } |
486 | 486 | ||
487 | static struct agp_client *agp_find_client_by_pid(pid_t id) | 487 | struct agp_client *agp_find_client_by_pid(pid_t id) |
488 | { | 488 | { |
489 | struct agp_client *temp; | 489 | struct agp_client *temp; |
490 | 490 | ||
@@ -509,7 +509,7 @@ static void agp_insert_client(struct agp_client *client) | |||
509 | agp_fe.current_controller->num_clients++; | 509 | agp_fe.current_controller->num_clients++; |
510 | } | 510 | } |
511 | 511 | ||
512 | static struct agp_client *agp_create_client(pid_t id) | 512 | struct agp_client *agp_create_client(pid_t id) |
513 | { | 513 | { |
514 | struct agp_client *new_client; | 514 | struct agp_client *new_client; |
515 | 515 | ||
@@ -522,7 +522,7 @@ static struct agp_client *agp_create_client(pid_t id) | |||
522 | return new_client; | 522 | return new_client; |
523 | } | 523 | } |
524 | 524 | ||
525 | static int agp_remove_client(pid_t id) | 525 | int agp_remove_client(pid_t id) |
526 | { | 526 | { |
527 | struct agp_client *client; | 527 | struct agp_client *client; |
528 | struct agp_client *prev_client; | 528 | struct agp_client *prev_client; |
@@ -746,7 +746,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) | |||
746 | return 0; | 746 | return 0; |
747 | } | 747 | } |
748 | 748 | ||
749 | static int agpioc_acquire_wrap(struct agp_file_private *priv) | 749 | int agpioc_acquire_wrap(struct agp_file_private *priv) |
750 | { | 750 | { |
751 | struct agp_controller *controller; | 751 | struct agp_controller *controller; |
752 | 752 | ||
@@ -789,14 +789,14 @@ static int agpioc_acquire_wrap(struct agp_file_private *priv) | |||
789 | return 0; | 789 | return 0; |
790 | } | 790 | } |
791 | 791 | ||
792 | static int agpioc_release_wrap(struct agp_file_private *priv) | 792 | int agpioc_release_wrap(struct agp_file_private *priv) |
793 | { | 793 | { |
794 | DBG(""); | 794 | DBG(""); |
795 | agp_controller_release_current(agp_fe.current_controller, priv); | 795 | agp_controller_release_current(agp_fe.current_controller, priv); |
796 | return 0; | 796 | return 0; |
797 | } | 797 | } |
798 | 798 | ||
799 | static int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg) | 799 | int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg) |
800 | { | 800 | { |
801 | struct agp_setup mode; | 801 | struct agp_setup mode; |
802 | 802 | ||
@@ -876,7 +876,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg) | |||
876 | return -EINVAL; | 876 | return -EINVAL; |
877 | } | 877 | } |
878 | 878 | ||
879 | static int agpioc_protect_wrap(struct agp_file_private *priv) | 879 | int agpioc_protect_wrap(struct agp_file_private *priv) |
880 | { | 880 | { |
881 | DBG(""); | 881 | DBG(""); |
882 | /* This function is not currently implemented */ | 882 | /* This function is not currently implemented */ |
@@ -892,6 +892,9 @@ static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) | |||
892 | if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate))) | 892 | if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate))) |
893 | return -EFAULT; | 893 | return -EFAULT; |
894 | 894 | ||
895 | if (alloc.type >= AGP_USER_TYPES) | ||
896 | return -EINVAL; | ||
897 | |||
895 | memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); | 898 | memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); |
896 | 899 | ||
897 | if (memory == NULL) | 900 | if (memory == NULL) |
@@ -907,7 +910,7 @@ static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg) | |||
907 | return 0; | 910 | return 0; |
908 | } | 911 | } |
909 | 912 | ||
910 | static int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg) | 913 | int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg) |
911 | { | 914 | { |
912 | struct agp_memory *memory; | 915 | struct agp_memory *memory; |
913 | 916 | ||
@@ -1043,6 +1046,9 @@ static const struct file_operations agp_fops = | |||
1043 | .read = agp_read, | 1046 | .read = agp_read, |
1044 | .write = agp_write, | 1047 | .write = agp_write, |
1045 | .ioctl = agp_ioctl, | 1048 | .ioctl = agp_ioctl, |
1049 | #ifdef CONFIG_COMPAT | ||
1050 | .compat_ioctl = compat_agp_ioctl, | ||
1051 | #endif | ||
1046 | .mmap = agp_mmap, | 1052 | .mmap = agp_mmap, |
1047 | .open = agp_open, | 1053 | .open = agp_open, |
1048 | .release = agp_release, | 1054 | .release = agp_release, |
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 3491d6f84bc6..7923337c3d26 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c | |||
@@ -101,6 +101,63 @@ static int agp_get_key(void) | |||
101 | return -1; | 101 | return -1; |
102 | } | 102 | } |
103 | 103 | ||
104 | /* | ||
105 | * Use kmalloc if possible for the page list. Otherwise fall back to | ||
106 | * vmalloc. This speeds things up and also saves memory for small AGP | ||
107 | * regions. | ||
108 | */ | ||
109 | |||
110 | void agp_alloc_page_array(size_t size, struct agp_memory *mem) | ||
111 | { | ||
112 | mem->memory = NULL; | ||
113 | mem->vmalloc_flag = 0; | ||
114 | |||
115 | if (size <= 2*PAGE_SIZE) | ||
116 | mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); | ||
117 | if (mem->memory == NULL) { | ||
118 | mem->memory = vmalloc(size); | ||
119 | mem->vmalloc_flag = 1; | ||
120 | } | ||
121 | } | ||
122 | EXPORT_SYMBOL(agp_alloc_page_array); | ||
123 | |||
124 | void agp_free_page_array(struct agp_memory *mem) | ||
125 | { | ||
126 | if (mem->vmalloc_flag) { | ||
127 | vfree(mem->memory); | ||
128 | } else { | ||
129 | kfree(mem->memory); | ||
130 | } | ||
131 | } | ||
132 | EXPORT_SYMBOL(agp_free_page_array); | ||
133 | |||
134 | |||
135 | static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) | ||
136 | { | ||
137 | struct agp_memory *new; | ||
138 | unsigned long alloc_size = num_agp_pages*sizeof(struct page *); | ||
139 | |||
140 | new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); | ||
141 | if (new == NULL) | ||
142 | return NULL; | ||
143 | |||
144 | new->key = agp_get_key(); | ||
145 | |||
146 | if (new->key < 0) { | ||
147 | kfree(new); | ||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | agp_alloc_page_array(alloc_size, new); | ||
152 | |||
153 | if (new->memory == NULL) { | ||
154 | agp_free_key(new->key); | ||
155 | kfree(new); | ||
156 | return NULL; | ||
157 | } | ||
158 | new->num_scratch_pages = 0; | ||
159 | return new; | ||
160 | } | ||
104 | 161 | ||
105 | struct agp_memory *agp_create_memory(int scratch_pages) | 162 | struct agp_memory *agp_create_memory(int scratch_pages) |
106 | { | 163 | { |
@@ -116,7 +173,8 @@ struct agp_memory *agp_create_memory(int scratch_pages) | |||
116 | kfree(new); | 173 | kfree(new); |
117 | return NULL; | 174 | return NULL; |
118 | } | 175 | } |
119 | new->memory = vmalloc(PAGE_SIZE * scratch_pages); | 176 | |
177 | agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); | ||
120 | 178 | ||
121 | if (new->memory == NULL) { | 179 | if (new->memory == NULL) { |
122 | agp_free_key(new->key); | 180 | agp_free_key(new->key); |
@@ -124,6 +182,7 @@ struct agp_memory *agp_create_memory(int scratch_pages) | |||
124 | return NULL; | 182 | return NULL; |
125 | } | 183 | } |
126 | new->num_scratch_pages = scratch_pages; | 184 | new->num_scratch_pages = scratch_pages; |
185 | new->type = AGP_NORMAL_MEMORY; | ||
127 | return new; | 186 | return new; |
128 | } | 187 | } |
129 | EXPORT_SYMBOL(agp_create_memory); | 188 | EXPORT_SYMBOL(agp_create_memory); |
@@ -146,6 +205,11 @@ void agp_free_memory(struct agp_memory *curr) | |||
146 | if (curr->is_bound == TRUE) | 205 | if (curr->is_bound == TRUE) |
147 | agp_unbind_memory(curr); | 206 | agp_unbind_memory(curr); |
148 | 207 | ||
208 | if (curr->type >= AGP_USER_TYPES) { | ||
209 | agp_generic_free_by_type(curr); | ||
210 | return; | ||
211 | } | ||
212 | |||
149 | if (curr->type != 0) { | 213 | if (curr->type != 0) { |
150 | curr->bridge->driver->free_by_type(curr); | 214 | curr->bridge->driver->free_by_type(curr); |
151 | return; | 215 | return; |
@@ -157,7 +221,7 @@ void agp_free_memory(struct agp_memory *curr) | |||
157 | flush_agp_mappings(); | 221 | flush_agp_mappings(); |
158 | } | 222 | } |
159 | agp_free_key(curr->key); | 223 | agp_free_key(curr->key); |
160 | vfree(curr->memory); | 224 | agp_free_page_array(curr); |
161 | kfree(curr); | 225 | kfree(curr); |
162 | } | 226 | } |
163 | EXPORT_SYMBOL(agp_free_memory); | 227 | EXPORT_SYMBOL(agp_free_memory); |
@@ -188,6 +252,13 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, | |||
188 | if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) | 252 | if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) |
189 | return NULL; | 253 | return NULL; |
190 | 254 | ||
255 | if (type >= AGP_USER_TYPES) { | ||
256 | new = agp_generic_alloc_user(page_count, type); | ||
257 | if (new) | ||
258 | new->bridge = bridge; | ||
259 | return new; | ||
260 | } | ||
261 | |||
191 | if (type != 0) { | 262 | if (type != 0) { |
192 | new = bridge->driver->alloc_by_type(page_count, type); | 263 | new = bridge->driver->alloc_by_type(page_count, type); |
193 | if (new) | 264 | if (new) |
@@ -960,6 +1031,7 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) | |||
960 | off_t j; | 1031 | off_t j; |
961 | void *temp; | 1032 | void *temp; |
962 | struct agp_bridge_data *bridge; | 1033 | struct agp_bridge_data *bridge; |
1034 | int mask_type; | ||
963 | 1035 | ||
964 | bridge = mem->bridge; | 1036 | bridge = mem->bridge; |
965 | if (!bridge) | 1037 | if (!bridge) |
@@ -995,7 +1067,11 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) | |||
995 | num_entries -= agp_memory_reserved/PAGE_SIZE; | 1067 | num_entries -= agp_memory_reserved/PAGE_SIZE; |
996 | if (num_entries < 0) num_entries = 0; | 1068 | if (num_entries < 0) num_entries = 0; |
997 | 1069 | ||
998 | if (type != 0 || mem->type != 0) { | 1070 | if (type != mem->type) |
1071 | return -EINVAL; | ||
1072 | |||
1073 | mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); | ||
1074 | if (mask_type != 0) { | ||
999 | /* The generic routines know nothing of memory types */ | 1075 | /* The generic routines know nothing of memory types */ |
1000 | return -EINVAL; | 1076 | return -EINVAL; |
1001 | } | 1077 | } |
@@ -1018,7 +1094,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) | |||
1018 | } | 1094 | } |
1019 | 1095 | ||
1020 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 1096 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { |
1021 | writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j); | 1097 | writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type), |
1098 | bridge->gatt_table+j); | ||
1022 | } | 1099 | } |
1023 | readl(bridge->gatt_table+j-1); /* PCI Posting. */ | 1100 | readl(bridge->gatt_table+j-1); /* PCI Posting. */ |
1024 | 1101 | ||
@@ -1032,6 +1109,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
1032 | { | 1109 | { |
1033 | size_t i; | 1110 | size_t i; |
1034 | struct agp_bridge_data *bridge; | 1111 | struct agp_bridge_data *bridge; |
1112 | int mask_type; | ||
1035 | 1113 | ||
1036 | bridge = mem->bridge; | 1114 | bridge = mem->bridge; |
1037 | if (!bridge) | 1115 | if (!bridge) |
@@ -1040,7 +1118,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
1040 | if (mem->page_count == 0) | 1118 | if (mem->page_count == 0) |
1041 | return 0; | 1119 | return 0; |
1042 | 1120 | ||
1043 | if (type != 0 || mem->type != 0) { | 1121 | if (type != mem->type) |
1122 | return -EINVAL; | ||
1123 | |||
1124 | mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); | ||
1125 | if (mask_type != 0) { | ||
1044 | /* The generic routines know nothing of memory types */ | 1126 | /* The generic routines know nothing of memory types */ |
1045 | return -EINVAL; | 1127 | return -EINVAL; |
1046 | } | 1128 | } |
@@ -1056,22 +1138,40 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) | |||
1056 | } | 1138 | } |
1057 | EXPORT_SYMBOL(agp_generic_remove_memory); | 1139 | EXPORT_SYMBOL(agp_generic_remove_memory); |
1058 | 1140 | ||
1059 | |||
1060 | struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) | 1141 | struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) |
1061 | { | 1142 | { |
1062 | return NULL; | 1143 | return NULL; |
1063 | } | 1144 | } |
1064 | EXPORT_SYMBOL(agp_generic_alloc_by_type); | 1145 | EXPORT_SYMBOL(agp_generic_alloc_by_type); |
1065 | 1146 | ||
1066 | |||
1067 | void agp_generic_free_by_type(struct agp_memory *curr) | 1147 | void agp_generic_free_by_type(struct agp_memory *curr) |
1068 | { | 1148 | { |
1069 | vfree(curr->memory); | 1149 | agp_free_page_array(curr); |
1070 | agp_free_key(curr->key); | 1150 | agp_free_key(curr->key); |
1071 | kfree(curr); | 1151 | kfree(curr); |
1072 | } | 1152 | } |
1073 | EXPORT_SYMBOL(agp_generic_free_by_type); | 1153 | EXPORT_SYMBOL(agp_generic_free_by_type); |
1074 | 1154 | ||
1155 | struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) | ||
1156 | { | ||
1157 | struct agp_memory *new; | ||
1158 | int i; | ||
1159 | int pages; | ||
1160 | |||
1161 | pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; | ||
1162 | new = agp_create_user_memory(page_count); | ||
1163 | if (new == NULL) | ||
1164 | return NULL; | ||
1165 | |||
1166 | for (i = 0; i < page_count; i++) | ||
1167 | new->memory[i] = 0; | ||
1168 | new->page_count = 0; | ||
1169 | new->type = type; | ||
1170 | new->num_scratch_pages = pages; | ||
1171 | |||
1172 | return new; | ||
1173 | } | ||
1174 | EXPORT_SYMBOL(agp_generic_alloc_user); | ||
1075 | 1175 | ||
1076 | /* | 1176 | /* |
1077 | * Basic Page Allocation Routines - | 1177 | * Basic Page Allocation Routines - |
@@ -1165,6 +1265,15 @@ unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, | |||
1165 | } | 1265 | } |
1166 | EXPORT_SYMBOL(agp_generic_mask_memory); | 1266 | EXPORT_SYMBOL(agp_generic_mask_memory); |
1167 | 1267 | ||
1268 | int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, | ||
1269 | int type) | ||
1270 | { | ||
1271 | if (type >= AGP_USER_TYPES) | ||
1272 | return 0; | ||
1273 | return type; | ||
1274 | } | ||
1275 | EXPORT_SYMBOL(agp_generic_type_to_mask_type); | ||
1276 | |||
1168 | /* | 1277 | /* |
1169 | * These functions are implemented according to the AGPv3 spec, | 1278 | * These functions are implemented according to the AGPv3 spec, |
1170 | * which covers implementation details that had previously been | 1279 | * which covers implementation details that had previously been |
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c index 907fb66ec4a9..847deabf7f9b 100644 --- a/drivers/char/agp/hp-agp.c +++ b/drivers/char/agp/hp-agp.c | |||
@@ -438,6 +438,7 @@ struct agp_bridge_driver hp_zx1_driver = { | |||
438 | .free_by_type = agp_generic_free_by_type, | 438 | .free_by_type = agp_generic_free_by_type, |
439 | .agp_alloc_page = agp_generic_alloc_page, | 439 | .agp_alloc_page = agp_generic_alloc_page, |
440 | .agp_destroy_page = agp_generic_destroy_page, | 440 | .agp_destroy_page = agp_generic_destroy_page, |
441 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
441 | .cant_use_aperture = 1, | 442 | .cant_use_aperture = 1, |
442 | }; | 443 | }; |
443 | 444 | ||
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c index 91769443d8fe..3e7618653abd 100644 --- a/drivers/char/agp/i460-agp.c +++ b/drivers/char/agp/i460-agp.c | |||
@@ -293,6 +293,9 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem, | |||
293 | pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", | 293 | pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n", |
294 | mem, pg_start, type, mem->memory[0]); | 294 | mem, pg_start, type, mem->memory[0]); |
295 | 295 | ||
296 | if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) | ||
297 | return -EINVAL; | ||
298 | |||
296 | io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; | 299 | io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start; |
297 | 300 | ||
298 | temp = agp_bridge->current_size; | 301 | temp = agp_bridge->current_size; |
@@ -396,6 +399,9 @@ static int i460_insert_memory_large_io_page (struct agp_memory *mem, | |||
396 | struct lp_desc *start, *end, *lp; | 399 | struct lp_desc *start, *end, *lp; |
397 | void *temp; | 400 | void *temp; |
398 | 401 | ||
402 | if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) | ||
403 | return -EINVAL; | ||
404 | |||
399 | temp = agp_bridge->current_size; | 405 | temp = agp_bridge->current_size; |
400 | num_entries = A_SIZE_8(temp)->num_entries; | 406 | num_entries = A_SIZE_8(temp)->num_entries; |
401 | 407 | ||
@@ -572,6 +578,7 @@ struct agp_bridge_driver intel_i460_driver = { | |||
572 | #endif | 578 | #endif |
573 | .alloc_by_type = agp_generic_alloc_by_type, | 579 | .alloc_by_type = agp_generic_alloc_by_type, |
574 | .free_by_type = agp_generic_free_by_type, | 580 | .free_by_type = agp_generic_free_by_type, |
581 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
575 | .cant_use_aperture = 1, | 582 | .cant_use_aperture = 1, |
576 | }; | 583 | }; |
577 | 584 | ||
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index a3011de51f7c..06b0bb6d982f 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/pci.h> | 6 | #include <linux/pci.h> |
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/kernel.h> | ||
8 | #include <linux/pagemap.h> | 9 | #include <linux/pagemap.h> |
9 | #include <linux/agp_backend.h> | 10 | #include <linux/agp_backend.h> |
10 | #include "agp.h" | 11 | #include "agp.h" |
@@ -24,6 +25,9 @@ | |||
24 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB) | 25 | agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB) |
25 | 26 | ||
26 | 27 | ||
28 | extern int agp_memory_reserved; | ||
29 | |||
30 | |||
27 | /* Intel 815 register */ | 31 | /* Intel 815 register */ |
28 | #define INTEL_815_APCONT 0x51 | 32 | #define INTEL_815_APCONT 0x51 |
29 | #define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF | 33 | #define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF |
@@ -68,12 +72,15 @@ static struct aper_size_info_fixed intel_i810_sizes[] = | |||
68 | 72 | ||
69 | #define AGP_DCACHE_MEMORY 1 | 73 | #define AGP_DCACHE_MEMORY 1 |
70 | #define AGP_PHYS_MEMORY 2 | 74 | #define AGP_PHYS_MEMORY 2 |
75 | #define INTEL_AGP_CACHED_MEMORY 3 | ||
71 | 76 | ||
72 | static struct gatt_mask intel_i810_masks[] = | 77 | static struct gatt_mask intel_i810_masks[] = |
73 | { | 78 | { |
74 | {.mask = I810_PTE_VALID, .type = 0}, | 79 | {.mask = I810_PTE_VALID, .type = 0}, |
75 | {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, | 80 | {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, |
76 | {.mask = I810_PTE_VALID, .type = 0} | 81 | {.mask = I810_PTE_VALID, .type = 0}, |
82 | {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, | ||
83 | .type = INTEL_AGP_CACHED_MEMORY} | ||
77 | }; | 84 | }; |
78 | 85 | ||
79 | static struct _intel_i810_private { | 86 | static struct _intel_i810_private { |
@@ -117,13 +124,15 @@ static int intel_i810_configure(void) | |||
117 | 124 | ||
118 | current_size = A_SIZE_FIX(agp_bridge->current_size); | 125 | current_size = A_SIZE_FIX(agp_bridge->current_size); |
119 | 126 | ||
120 | pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp); | ||
121 | temp &= 0xfff80000; | ||
122 | |||
123 | intel_i810_private.registers = ioremap(temp, 128 * 4096); | ||
124 | if (!intel_i810_private.registers) { | 127 | if (!intel_i810_private.registers) { |
125 | printk(KERN_ERR PFX "Unable to remap memory.\n"); | 128 | pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp); |
126 | return -ENOMEM; | 129 | temp &= 0xfff80000; |
130 | |||
131 | intel_i810_private.registers = ioremap(temp, 128 * 4096); | ||
132 | if (!intel_i810_private.registers) { | ||
133 | printk(KERN_ERR PFX "Unable to remap memory.\n"); | ||
134 | return -ENOMEM; | ||
135 | } | ||
127 | } | 136 | } |
128 | 137 | ||
129 | if ((readl(intel_i810_private.registers+I810_DRAM_CTL) | 138 | if ((readl(intel_i810_private.registers+I810_DRAM_CTL) |
@@ -201,62 +210,79 @@ static void i8xx_destroy_pages(void *addr) | |||
201 | atomic_dec(&agp_bridge->current_memory_agp); | 210 | atomic_dec(&agp_bridge->current_memory_agp); |
202 | } | 211 | } |
203 | 212 | ||
213 | static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, | ||
214 | int type) | ||
215 | { | ||
216 | if (type < AGP_USER_TYPES) | ||
217 | return type; | ||
218 | else if (type == AGP_USER_CACHED_MEMORY) | ||
219 | return INTEL_AGP_CACHED_MEMORY; | ||
220 | else | ||
221 | return 0; | ||
222 | } | ||
223 | |||
204 | static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, | 224 | static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, |
205 | int type) | 225 | int type) |
206 | { | 226 | { |
207 | int i, j, num_entries; | 227 | int i, j, num_entries; |
208 | void *temp; | 228 | void *temp; |
229 | int ret = -EINVAL; | ||
230 | int mask_type; | ||
209 | 231 | ||
210 | if (mem->page_count == 0) | 232 | if (mem->page_count == 0) |
211 | return 0; | 233 | goto out; |
212 | 234 | ||
213 | temp = agp_bridge->current_size; | 235 | temp = agp_bridge->current_size; |
214 | num_entries = A_SIZE_FIX(temp)->num_entries; | 236 | num_entries = A_SIZE_FIX(temp)->num_entries; |
215 | 237 | ||
216 | if ((pg_start + mem->page_count) > num_entries) | 238 | if ((pg_start + mem->page_count) > num_entries) |
217 | return -EINVAL; | 239 | goto out_err; |
218 | 240 | ||
219 | for (j = pg_start; j < (pg_start + mem->page_count); j++) { | ||
220 | if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) | ||
221 | return -EBUSY; | ||
222 | } | ||
223 | 241 | ||
224 | if (type != 0 || mem->type != 0) { | 242 | for (j = pg_start; j < (pg_start + mem->page_count); j++) { |
225 | if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) { | 243 | if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { |
226 | /* special insert */ | 244 | ret = -EBUSY; |
227 | if (!mem->is_flushed) { | 245 | goto out_err; |
228 | global_cache_flush(); | ||
229 | mem->is_flushed = TRUE; | ||
230 | } | ||
231 | |||
232 | for (i = pg_start; i < (pg_start + mem->page_count); i++) { | ||
233 | writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, intel_i810_private.registers+I810_PTE_BASE+(i*4)); | ||
234 | } | ||
235 | readl(intel_i810_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */ | ||
236 | |||
237 | agp_bridge->driver->tlb_flush(mem); | ||
238 | return 0; | ||
239 | } | 246 | } |
240 | if ((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY)) | ||
241 | goto insert; | ||
242 | return -EINVAL; | ||
243 | } | 247 | } |
244 | 248 | ||
245 | insert: | 249 | if (type != mem->type) |
246 | if (!mem->is_flushed) { | 250 | goto out_err; |
247 | global_cache_flush(); | ||
248 | mem->is_flushed = TRUE; | ||
249 | } | ||
250 | 251 | ||
251 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 252 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); |
252 | writel(agp_bridge->driver->mask_memory(agp_bridge, | 253 | |
253 | mem->memory[i], mem->type), | 254 | switch (mask_type) { |
254 | intel_i810_private.registers+I810_PTE_BASE+(j*4)); | 255 | case AGP_DCACHE_MEMORY: |
256 | if (!mem->is_flushed) | ||
257 | global_cache_flush(); | ||
258 | for (i = pg_start; i < (pg_start + mem->page_count); i++) { | ||
259 | writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, | ||
260 | intel_i810_private.registers+I810_PTE_BASE+(i*4)); | ||
261 | } | ||
262 | readl(intel_i810_private.registers+I810_PTE_BASE+((i-1)*4)); | ||
263 | break; | ||
264 | case AGP_PHYS_MEMORY: | ||
265 | case AGP_NORMAL_MEMORY: | ||
266 | if (!mem->is_flushed) | ||
267 | global_cache_flush(); | ||
268 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | ||
269 | writel(agp_bridge->driver->mask_memory(agp_bridge, | ||
270 | mem->memory[i], | ||
271 | mask_type), | ||
272 | intel_i810_private.registers+I810_PTE_BASE+(j*4)); | ||
273 | } | ||
274 | readl(intel_i810_private.registers+I810_PTE_BASE+((j-1)*4)); | ||
275 | break; | ||
276 | default: | ||
277 | goto out_err; | ||
255 | } | 278 | } |
256 | readl(intel_i810_private.registers+I810_PTE_BASE+((j-1)*4)); /* PCI Posting. */ | ||
257 | 279 | ||
258 | agp_bridge->driver->tlb_flush(mem); | 280 | agp_bridge->driver->tlb_flush(mem); |
259 | return 0; | 281 | out: |
282 | ret = 0; | ||
283 | out_err: | ||
284 | mem->is_flushed = 1; | ||
285 | return ret; | ||
260 | } | 286 | } |
261 | 287 | ||
262 | static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, | 288 | static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, |
@@ -337,12 +363,11 @@ static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) | |||
337 | new->type = AGP_DCACHE_MEMORY; | 363 | new->type = AGP_DCACHE_MEMORY; |
338 | new->page_count = pg_count; | 364 | new->page_count = pg_count; |
339 | new->num_scratch_pages = 0; | 365 | new->num_scratch_pages = 0; |
340 | vfree(new->memory); | 366 | agp_free_page_array(new); |
341 | return new; | 367 | return new; |
342 | } | 368 | } |
343 | if (type == AGP_PHYS_MEMORY) | 369 | if (type == AGP_PHYS_MEMORY) |
344 | return alloc_agpphysmem_i8xx(pg_count, type); | 370 | return alloc_agpphysmem_i8xx(pg_count, type); |
345 | |||
346 | return NULL; | 371 | return NULL; |
347 | } | 372 | } |
348 | 373 | ||
@@ -357,7 +382,7 @@ static void intel_i810_free_by_type(struct agp_memory *curr) | |||
357 | gart_to_virt(curr->memory[0])); | 382 | gart_to_virt(curr->memory[0])); |
358 | global_flush_tlb(); | 383 | global_flush_tlb(); |
359 | } | 384 | } |
360 | vfree(curr->memory); | 385 | agp_free_page_array(curr); |
361 | } | 386 | } |
362 | kfree(curr); | 387 | kfree(curr); |
363 | } | 388 | } |
@@ -619,9 +644,11 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int | |||
619 | { | 644 | { |
620 | int i,j,num_entries; | 645 | int i,j,num_entries; |
621 | void *temp; | 646 | void *temp; |
647 | int ret = -EINVAL; | ||
648 | int mask_type; | ||
622 | 649 | ||
623 | if (mem->page_count == 0) | 650 | if (mem->page_count == 0) |
624 | return 0; | 651 | goto out; |
625 | 652 | ||
626 | temp = agp_bridge->current_size; | 653 | temp = agp_bridge->current_size; |
627 | num_entries = A_SIZE_FIX(temp)->num_entries; | 654 | num_entries = A_SIZE_FIX(temp)->num_entries; |
@@ -631,34 +658,41 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int | |||
631 | pg_start,intel_i830_private.gtt_entries); | 658 | pg_start,intel_i830_private.gtt_entries); |
632 | 659 | ||
633 | printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n"); | 660 | printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n"); |
634 | return -EINVAL; | 661 | goto out_err; |
635 | } | 662 | } |
636 | 663 | ||
637 | if ((pg_start + mem->page_count) > num_entries) | 664 | if ((pg_start + mem->page_count) > num_entries) |
638 | return -EINVAL; | 665 | goto out_err; |
639 | 666 | ||
640 | /* The i830 can't check the GTT for entries since its read only, | 667 | /* The i830 can't check the GTT for entries since its read only, |
641 | * depend on the caller to make the correct offset decisions. | 668 | * depend on the caller to make the correct offset decisions. |
642 | */ | 669 | */ |
643 | 670 | ||
644 | if ((type != 0 && type != AGP_PHYS_MEMORY) || | 671 | if (type != mem->type) |
645 | (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) | 672 | goto out_err; |
646 | return -EINVAL; | 673 | |
674 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
647 | 675 | ||
648 | if (!mem->is_flushed) { | 676 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && |
677 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
678 | goto out_err; | ||
679 | |||
680 | if (!mem->is_flushed) | ||
649 | global_cache_flush(); | 681 | global_cache_flush(); |
650 | mem->is_flushed = TRUE; | ||
651 | } | ||
652 | 682 | ||
653 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 683 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { |
654 | writel(agp_bridge->driver->mask_memory(agp_bridge, | 684 | writel(agp_bridge->driver->mask_memory(agp_bridge, |
655 | mem->memory[i], mem->type), | 685 | mem->memory[i], mask_type), |
656 | intel_i830_private.registers+I810_PTE_BASE+(j*4)); | 686 | intel_i830_private.registers+I810_PTE_BASE+(j*4)); |
657 | } | 687 | } |
658 | readl(intel_i830_private.registers+I810_PTE_BASE+((j-1)*4)); | 688 | readl(intel_i830_private.registers+I810_PTE_BASE+((j-1)*4)); |
659 | |||
660 | agp_bridge->driver->tlb_flush(mem); | 689 | agp_bridge->driver->tlb_flush(mem); |
661 | return 0; | 690 | |
691 | out: | ||
692 | ret = 0; | ||
693 | out_err: | ||
694 | mem->is_flushed = 1; | ||
695 | return ret; | ||
662 | } | 696 | } |
663 | 697 | ||
664 | static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start, | 698 | static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start, |
@@ -687,7 +721,6 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type) | |||
687 | { | 721 | { |
688 | if (type == AGP_PHYS_MEMORY) | 722 | if (type == AGP_PHYS_MEMORY) |
689 | return alloc_agpphysmem_i8xx(pg_count, type); | 723 | return alloc_agpphysmem_i8xx(pg_count, type); |
690 | |||
691 | /* always return NULL for other allocation types for now */ | 724 | /* always return NULL for other allocation types for now */ |
692 | return NULL; | 725 | return NULL; |
693 | } | 726 | } |
@@ -734,9 +767,11 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start, | |||
734 | { | 767 | { |
735 | int i,j,num_entries; | 768 | int i,j,num_entries; |
736 | void *temp; | 769 | void *temp; |
770 | int ret = -EINVAL; | ||
771 | int mask_type; | ||
737 | 772 | ||
738 | if (mem->page_count == 0) | 773 | if (mem->page_count == 0) |
739 | return 0; | 774 | goto out; |
740 | 775 | ||
741 | temp = agp_bridge->current_size; | 776 | temp = agp_bridge->current_size; |
742 | num_entries = A_SIZE_FIX(temp)->num_entries; | 777 | num_entries = A_SIZE_FIX(temp)->num_entries; |
@@ -746,33 +781,41 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start, | |||
746 | pg_start,intel_i830_private.gtt_entries); | 781 | pg_start,intel_i830_private.gtt_entries); |
747 | 782 | ||
748 | printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n"); | 783 | printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n"); |
749 | return -EINVAL; | 784 | goto out_err; |
750 | } | 785 | } |
751 | 786 | ||
752 | if ((pg_start + mem->page_count) > num_entries) | 787 | if ((pg_start + mem->page_count) > num_entries) |
753 | return -EINVAL; | 788 | goto out_err; |
754 | 789 | ||
755 | /* The i830 can't check the GTT for entries since its read only, | 790 | /* The i915 can't check the GTT for entries since its read only, |
756 | * depend on the caller to make the correct offset decisions. | 791 | * depend on the caller to make the correct offset decisions. |
757 | */ | 792 | */ |
758 | 793 | ||
759 | if ((type != 0 && type != AGP_PHYS_MEMORY) || | 794 | if (type != mem->type) |
760 | (mem->type != 0 && mem->type != AGP_PHYS_MEMORY)) | 795 | goto out_err; |
761 | return -EINVAL; | 796 | |
797 | mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); | ||
762 | 798 | ||
763 | if (!mem->is_flushed) { | 799 | if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && |
800 | mask_type != INTEL_AGP_CACHED_MEMORY) | ||
801 | goto out_err; | ||
802 | |||
803 | if (!mem->is_flushed) | ||
764 | global_cache_flush(); | 804 | global_cache_flush(); |
765 | mem->is_flushed = TRUE; | ||
766 | } | ||
767 | 805 | ||
768 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { | 806 | for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { |
769 | writel(agp_bridge->driver->mask_memory(agp_bridge, | 807 | writel(agp_bridge->driver->mask_memory(agp_bridge, |
770 | mem->memory[i], mem->type), intel_i830_private.gtt+j); | 808 | mem->memory[i], mask_type), intel_i830_private.gtt+j); |
771 | } | 809 | } |
772 | readl(intel_i830_private.gtt+j-1); | ||
773 | 810 | ||
811 | readl(intel_i830_private.gtt+j-1); | ||
774 | agp_bridge->driver->tlb_flush(mem); | 812 | agp_bridge->driver->tlb_flush(mem); |
775 | return 0; | 813 | |
814 | out: | ||
815 | ret = 0; | ||
816 | out_err: | ||
817 | mem->is_flushed = 1; | ||
818 | return ret; | ||
776 | } | 819 | } |
777 | 820 | ||
778 | static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start, | 821 | static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start, |
@@ -803,7 +846,7 @@ static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start, | |||
803 | */ | 846 | */ |
804 | static int intel_i9xx_fetch_size(void) | 847 | static int intel_i9xx_fetch_size(void) |
805 | { | 848 | { |
806 | int num_sizes = sizeof(intel_i830_sizes) / sizeof(*intel_i830_sizes); | 849 | int num_sizes = ARRAY_SIZE(intel_i830_sizes); |
807 | int aper_size; /* size in megabytes */ | 850 | int aper_size; /* size in megabytes */ |
808 | int i; | 851 | int i; |
809 | 852 | ||
@@ -1384,6 +1427,7 @@ static struct agp_bridge_driver intel_generic_driver = { | |||
1384 | .free_by_type = agp_generic_free_by_type, | 1427 | .free_by_type = agp_generic_free_by_type, |
1385 | .agp_alloc_page = agp_generic_alloc_page, | 1428 | .agp_alloc_page = agp_generic_alloc_page, |
1386 | .agp_destroy_page = agp_generic_destroy_page, | 1429 | .agp_destroy_page = agp_generic_destroy_page, |
1430 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1387 | }; | 1431 | }; |
1388 | 1432 | ||
1389 | static struct agp_bridge_driver intel_810_driver = { | 1433 | static struct agp_bridge_driver intel_810_driver = { |
@@ -1408,6 +1452,7 @@ static struct agp_bridge_driver intel_810_driver = { | |||
1408 | .free_by_type = intel_i810_free_by_type, | 1452 | .free_by_type = intel_i810_free_by_type, |
1409 | .agp_alloc_page = agp_generic_alloc_page, | 1453 | .agp_alloc_page = agp_generic_alloc_page, |
1410 | .agp_destroy_page = agp_generic_destroy_page, | 1454 | .agp_destroy_page = agp_generic_destroy_page, |
1455 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1411 | }; | 1456 | }; |
1412 | 1457 | ||
1413 | static struct agp_bridge_driver intel_815_driver = { | 1458 | static struct agp_bridge_driver intel_815_driver = { |
@@ -1431,6 +1476,7 @@ static struct agp_bridge_driver intel_815_driver = { | |||
1431 | .free_by_type = agp_generic_free_by_type, | 1476 | .free_by_type = agp_generic_free_by_type, |
1432 | .agp_alloc_page = agp_generic_alloc_page, | 1477 | .agp_alloc_page = agp_generic_alloc_page, |
1433 | .agp_destroy_page = agp_generic_destroy_page, | 1478 | .agp_destroy_page = agp_generic_destroy_page, |
1479 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1434 | }; | 1480 | }; |
1435 | 1481 | ||
1436 | static struct agp_bridge_driver intel_830_driver = { | 1482 | static struct agp_bridge_driver intel_830_driver = { |
@@ -1455,6 +1501,7 @@ static struct agp_bridge_driver intel_830_driver = { | |||
1455 | .free_by_type = intel_i810_free_by_type, | 1501 | .free_by_type = intel_i810_free_by_type, |
1456 | .agp_alloc_page = agp_generic_alloc_page, | 1502 | .agp_alloc_page = agp_generic_alloc_page, |
1457 | .agp_destroy_page = agp_generic_destroy_page, | 1503 | .agp_destroy_page = agp_generic_destroy_page, |
1504 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1458 | }; | 1505 | }; |
1459 | 1506 | ||
1460 | static struct agp_bridge_driver intel_820_driver = { | 1507 | static struct agp_bridge_driver intel_820_driver = { |
@@ -1478,6 +1525,7 @@ static struct agp_bridge_driver intel_820_driver = { | |||
1478 | .free_by_type = agp_generic_free_by_type, | 1525 | .free_by_type = agp_generic_free_by_type, |
1479 | .agp_alloc_page = agp_generic_alloc_page, | 1526 | .agp_alloc_page = agp_generic_alloc_page, |
1480 | .agp_destroy_page = agp_generic_destroy_page, | 1527 | .agp_destroy_page = agp_generic_destroy_page, |
1528 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1481 | }; | 1529 | }; |
1482 | 1530 | ||
1483 | static struct agp_bridge_driver intel_830mp_driver = { | 1531 | static struct agp_bridge_driver intel_830mp_driver = { |
@@ -1501,6 +1549,7 @@ static struct agp_bridge_driver intel_830mp_driver = { | |||
1501 | .free_by_type = agp_generic_free_by_type, | 1549 | .free_by_type = agp_generic_free_by_type, |
1502 | .agp_alloc_page = agp_generic_alloc_page, | 1550 | .agp_alloc_page = agp_generic_alloc_page, |
1503 | .agp_destroy_page = agp_generic_destroy_page, | 1551 | .agp_destroy_page = agp_generic_destroy_page, |
1552 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1504 | }; | 1553 | }; |
1505 | 1554 | ||
1506 | static struct agp_bridge_driver intel_840_driver = { | 1555 | static struct agp_bridge_driver intel_840_driver = { |
@@ -1524,6 +1573,7 @@ static struct agp_bridge_driver intel_840_driver = { | |||
1524 | .free_by_type = agp_generic_free_by_type, | 1573 | .free_by_type = agp_generic_free_by_type, |
1525 | .agp_alloc_page = agp_generic_alloc_page, | 1574 | .agp_alloc_page = agp_generic_alloc_page, |
1526 | .agp_destroy_page = agp_generic_destroy_page, | 1575 | .agp_destroy_page = agp_generic_destroy_page, |
1576 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1527 | }; | 1577 | }; |
1528 | 1578 | ||
1529 | static struct agp_bridge_driver intel_845_driver = { | 1579 | static struct agp_bridge_driver intel_845_driver = { |
@@ -1547,6 +1597,7 @@ static struct agp_bridge_driver intel_845_driver = { | |||
1547 | .free_by_type = agp_generic_free_by_type, | 1597 | .free_by_type = agp_generic_free_by_type, |
1548 | .agp_alloc_page = agp_generic_alloc_page, | 1598 | .agp_alloc_page = agp_generic_alloc_page, |
1549 | .agp_destroy_page = agp_generic_destroy_page, | 1599 | .agp_destroy_page = agp_generic_destroy_page, |
1600 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1550 | }; | 1601 | }; |
1551 | 1602 | ||
1552 | static struct agp_bridge_driver intel_850_driver = { | 1603 | static struct agp_bridge_driver intel_850_driver = { |
@@ -1570,6 +1621,7 @@ static struct agp_bridge_driver intel_850_driver = { | |||
1570 | .free_by_type = agp_generic_free_by_type, | 1621 | .free_by_type = agp_generic_free_by_type, |
1571 | .agp_alloc_page = agp_generic_alloc_page, | 1622 | .agp_alloc_page = agp_generic_alloc_page, |
1572 | .agp_destroy_page = agp_generic_destroy_page, | 1623 | .agp_destroy_page = agp_generic_destroy_page, |
1624 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1573 | }; | 1625 | }; |
1574 | 1626 | ||
1575 | static struct agp_bridge_driver intel_860_driver = { | 1627 | static struct agp_bridge_driver intel_860_driver = { |
@@ -1593,6 +1645,7 @@ static struct agp_bridge_driver intel_860_driver = { | |||
1593 | .free_by_type = agp_generic_free_by_type, | 1645 | .free_by_type = agp_generic_free_by_type, |
1594 | .agp_alloc_page = agp_generic_alloc_page, | 1646 | .agp_alloc_page = agp_generic_alloc_page, |
1595 | .agp_destroy_page = agp_generic_destroy_page, | 1647 | .agp_destroy_page = agp_generic_destroy_page, |
1648 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1596 | }; | 1649 | }; |
1597 | 1650 | ||
1598 | static struct agp_bridge_driver intel_915_driver = { | 1651 | static struct agp_bridge_driver intel_915_driver = { |
@@ -1617,6 +1670,7 @@ static struct agp_bridge_driver intel_915_driver = { | |||
1617 | .free_by_type = intel_i810_free_by_type, | 1670 | .free_by_type = intel_i810_free_by_type, |
1618 | .agp_alloc_page = agp_generic_alloc_page, | 1671 | .agp_alloc_page = agp_generic_alloc_page, |
1619 | .agp_destroy_page = agp_generic_destroy_page, | 1672 | .agp_destroy_page = agp_generic_destroy_page, |
1673 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1620 | }; | 1674 | }; |
1621 | 1675 | ||
1622 | static struct agp_bridge_driver intel_i965_driver = { | 1676 | static struct agp_bridge_driver intel_i965_driver = { |
@@ -1641,6 +1695,7 @@ static struct agp_bridge_driver intel_i965_driver = { | |||
1641 | .free_by_type = intel_i810_free_by_type, | 1695 | .free_by_type = intel_i810_free_by_type, |
1642 | .agp_alloc_page = agp_generic_alloc_page, | 1696 | .agp_alloc_page = agp_generic_alloc_page, |
1643 | .agp_destroy_page = agp_generic_destroy_page, | 1697 | .agp_destroy_page = agp_generic_destroy_page, |
1698 | .agp_type_to_mask_type = intel_i830_type_to_mask_type, | ||
1644 | }; | 1699 | }; |
1645 | 1700 | ||
1646 | static struct agp_bridge_driver intel_7505_driver = { | 1701 | static struct agp_bridge_driver intel_7505_driver = { |
@@ -1664,6 +1719,7 @@ static struct agp_bridge_driver intel_7505_driver = { | |||
1664 | .free_by_type = agp_generic_free_by_type, | 1719 | .free_by_type = agp_generic_free_by_type, |
1665 | .agp_alloc_page = agp_generic_alloc_page, | 1720 | .agp_alloc_page = agp_generic_alloc_page, |
1666 | .agp_destroy_page = agp_generic_destroy_page, | 1721 | .agp_destroy_page = agp_generic_destroy_page, |
1722 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
1667 | }; | 1723 | }; |
1668 | 1724 | ||
1669 | static int find_i810(u16 device) | 1725 | static int find_i810(u16 device) |
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c index df7f37b2739a..2563286b2fcf 100644 --- a/drivers/char/agp/nvidia-agp.c +++ b/drivers/char/agp/nvidia-agp.c | |||
@@ -310,6 +310,7 @@ static struct agp_bridge_driver nvidia_driver = { | |||
310 | .free_by_type = agp_generic_free_by_type, | 310 | .free_by_type = agp_generic_free_by_type, |
311 | .agp_alloc_page = agp_generic_alloc_page, | 311 | .agp_alloc_page = agp_generic_alloc_page, |
312 | .agp_destroy_page = agp_generic_destroy_page, | 312 | .agp_destroy_page = agp_generic_destroy_page, |
313 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
313 | }; | 314 | }; |
314 | 315 | ||
315 | static int __devinit agp_nvidia_probe(struct pci_dev *pdev, | 316 | static int __devinit agp_nvidia_probe(struct pci_dev *pdev, |
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c index 17c50b0f83f0..b7b4590673ae 100644 --- a/drivers/char/agp/parisc-agp.c +++ b/drivers/char/agp/parisc-agp.c | |||
@@ -228,6 +228,7 @@ struct agp_bridge_driver parisc_agp_driver = { | |||
228 | .free_by_type = agp_generic_free_by_type, | 228 | .free_by_type = agp_generic_free_by_type, |
229 | .agp_alloc_page = agp_generic_alloc_page, | 229 | .agp_alloc_page = agp_generic_alloc_page, |
230 | .agp_destroy_page = agp_generic_destroy_page, | 230 | .agp_destroy_page = agp_generic_destroy_page, |
231 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
231 | .cant_use_aperture = 1, | 232 | .cant_use_aperture = 1, |
232 | }; | 233 | }; |
233 | 234 | ||
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c index 902648db7efa..92d1dc45b9be 100644 --- a/drivers/char/agp/sgi-agp.c +++ b/drivers/char/agp/sgi-agp.c | |||
@@ -265,6 +265,7 @@ struct agp_bridge_driver sgi_tioca_driver = { | |||
265 | .free_by_type = agp_generic_free_by_type, | 265 | .free_by_type = agp_generic_free_by_type, |
266 | .agp_alloc_page = sgi_tioca_alloc_page, | 266 | .agp_alloc_page = sgi_tioca_alloc_page, |
267 | .agp_destroy_page = agp_generic_destroy_page, | 267 | .agp_destroy_page = agp_generic_destroy_page, |
268 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
268 | .cant_use_aperture = 1, | 269 | .cant_use_aperture = 1, |
269 | .needs_scratch_page = 0, | 270 | .needs_scratch_page = 0, |
270 | .num_aperture_sizes = 1, | 271 | .num_aperture_sizes = 1, |
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c index a00fd48a6f05..60342b708152 100644 --- a/drivers/char/agp/sis-agp.c +++ b/drivers/char/agp/sis-agp.c | |||
@@ -140,6 +140,7 @@ static struct agp_bridge_driver sis_driver = { | |||
140 | .free_by_type = agp_generic_free_by_type, | 140 | .free_by_type = agp_generic_free_by_type, |
141 | .agp_alloc_page = agp_generic_alloc_page, | 141 | .agp_alloc_page = agp_generic_alloc_page, |
142 | .agp_destroy_page = agp_generic_destroy_page, | 142 | .agp_destroy_page = agp_generic_destroy_page, |
143 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
143 | }; | 144 | }; |
144 | 145 | ||
145 | static struct agp_device_ids sis_agp_device_ids[] __devinitdata = | 146 | static struct agp_device_ids sis_agp_device_ids[] __devinitdata = |
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c index 4f2d7d99902f..9f5ae7714f85 100644 --- a/drivers/char/agp/sworks-agp.c +++ b/drivers/char/agp/sworks-agp.c | |||
@@ -444,6 +444,7 @@ static struct agp_bridge_driver sworks_driver = { | |||
444 | .free_by_type = agp_generic_free_by_type, | 444 | .free_by_type = agp_generic_free_by_type, |
445 | .agp_alloc_page = agp_generic_alloc_page, | 445 | .agp_alloc_page = agp_generic_alloc_page, |
446 | .agp_destroy_page = agp_generic_destroy_page, | 446 | .agp_destroy_page = agp_generic_destroy_page, |
447 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
447 | }; | 448 | }; |
448 | 449 | ||
449 | static int __devinit agp_serverworks_probe(struct pci_dev *pdev, | 450 | static int __devinit agp_serverworks_probe(struct pci_dev *pdev, |
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index dffc19382f7e..6c45702e542c 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c | |||
@@ -510,6 +510,7 @@ struct agp_bridge_driver uninorth_agp_driver = { | |||
510 | .free_by_type = agp_generic_free_by_type, | 510 | .free_by_type = agp_generic_free_by_type, |
511 | .agp_alloc_page = agp_generic_alloc_page, | 511 | .agp_alloc_page = agp_generic_alloc_page, |
512 | .agp_destroy_page = agp_generic_destroy_page, | 512 | .agp_destroy_page = agp_generic_destroy_page, |
513 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
513 | .cant_use_aperture = 1, | 514 | .cant_use_aperture = 1, |
514 | }; | 515 | }; |
515 | 516 | ||
@@ -534,6 +535,7 @@ struct agp_bridge_driver u3_agp_driver = { | |||
534 | .free_by_type = agp_generic_free_by_type, | 535 | .free_by_type = agp_generic_free_by_type, |
535 | .agp_alloc_page = agp_generic_alloc_page, | 536 | .agp_alloc_page = agp_generic_alloc_page, |
536 | .agp_destroy_page = agp_generic_destroy_page, | 537 | .agp_destroy_page = agp_generic_destroy_page, |
538 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
537 | .cant_use_aperture = 1, | 539 | .cant_use_aperture = 1, |
538 | .needs_scratch_page = 1, | 540 | .needs_scratch_page = 1, |
539 | }; | 541 | }; |
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c index 2ded7a280d7f..2e7c04370cd9 100644 --- a/drivers/char/agp/via-agp.c +++ b/drivers/char/agp/via-agp.c | |||
@@ -191,6 +191,7 @@ static struct agp_bridge_driver via_agp3_driver = { | |||
191 | .free_by_type = agp_generic_free_by_type, | 191 | .free_by_type = agp_generic_free_by_type, |
192 | .agp_alloc_page = agp_generic_alloc_page, | 192 | .agp_alloc_page = agp_generic_alloc_page, |
193 | .agp_destroy_page = agp_generic_destroy_page, | 193 | .agp_destroy_page = agp_generic_destroy_page, |
194 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
194 | }; | 195 | }; |
195 | 196 | ||
196 | static struct agp_bridge_driver via_driver = { | 197 | static struct agp_bridge_driver via_driver = { |
@@ -214,6 +215,7 @@ static struct agp_bridge_driver via_driver = { | |||
214 | .free_by_type = agp_generic_free_by_type, | 215 | .free_by_type = agp_generic_free_by_type, |
215 | .agp_alloc_page = agp_generic_alloc_page, | 216 | .agp_alloc_page = agp_generic_alloc_page, |
216 | .agp_destroy_page = agp_generic_destroy_page, | 217 | .agp_destroy_page = agp_generic_destroy_page, |
218 | .agp_type_to_mask_type = agp_generic_type_to_mask_type, | ||
217 | }; | 219 | }; |
218 | 220 | ||
219 | static struct agp_device_ids via_agp_device_ids[] __devinitdata = | 221 | static struct agp_device_ids via_agp_device_ids[] __devinitdata = |
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c index 1aa93a752a9c..ae76a9ffe89f 100644 --- a/drivers/char/hangcheck-timer.c +++ b/drivers/char/hangcheck-timer.c | |||
@@ -117,7 +117,7 @@ __setup("hcheck_reboot", hangcheck_parse_reboot); | |||
117 | __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); | 117 | __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); |
118 | #endif /* not MODULE */ | 118 | #endif /* not MODULE */ |
119 | 119 | ||
120 | #if defined(CONFIG_X86_64) || defined(CONFIG_S390) | 120 | #if defined(CONFIG_S390) |
121 | # define HAVE_MONOTONIC | 121 | # define HAVE_MONOTONIC |
122 | # define TIMER_FREQ 1000000000ULL | 122 | # define TIMER_FREQ 1000000000ULL |
123 | #elif defined(CONFIG_IA64) | 123 | #elif defined(CONFIG_IA64) |
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index be73c80d699d..1d8c4ae61551 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | #include <linux/kexec.h> | 37 | #include <linux/kexec.h> |
38 | #include <linux/irq.h> | 38 | #include <linux/irq.h> |
39 | #include <linux/hrtimer.h> | ||
39 | 40 | ||
40 | #include <asm/ptrace.h> | 41 | #include <asm/ptrace.h> |
41 | #include <asm/irq_regs.h> | 42 | #include <asm/irq_regs.h> |
@@ -158,6 +159,17 @@ static struct sysrq_key_op sysrq_sync_op = { | |||
158 | .enable_mask = SYSRQ_ENABLE_SYNC, | 159 | .enable_mask = SYSRQ_ENABLE_SYNC, |
159 | }; | 160 | }; |
160 | 161 | ||
162 | static void sysrq_handle_show_timers(int key, struct tty_struct *tty) | ||
163 | { | ||
164 | sysrq_timer_list_show(); | ||
165 | } | ||
166 | |||
167 | static struct sysrq_key_op sysrq_show_timers_op = { | ||
168 | .handler = sysrq_handle_show_timers, | ||
169 | .help_msg = "show-all-timers(Q)", | ||
170 | .action_msg = "Show Pending Timers", | ||
171 | }; | ||
172 | |||
161 | static void sysrq_handle_mountro(int key, struct tty_struct *tty) | 173 | static void sysrq_handle_mountro(int key, struct tty_struct *tty) |
162 | { | 174 | { |
163 | emergency_remount(); | 175 | emergency_remount(); |
@@ -335,7 +347,7 @@ static struct sysrq_key_op *sysrq_key_table[36] = { | |||
335 | /* o: This will often be registered as 'Off' at init time */ | 347 | /* o: This will often be registered as 'Off' at init time */ |
336 | NULL, /* o */ | 348 | NULL, /* o */ |
337 | &sysrq_showregs_op, /* p */ | 349 | &sysrq_showregs_op, /* p */ |
338 | NULL, /* q */ | 350 | &sysrq_show_timers_op, /* q */ |
339 | &sysrq_unraw_op, /* r */ | 351 | &sysrq_unraw_op, /* r */ |
340 | &sysrq_sync_op, /* s */ | 352 | &sysrq_sync_op, /* s */ |
341 | &sysrq_showstate_op, /* t */ | 353 | &sysrq_showstate_op, /* t */ |
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index b6bcdbbf57b3..ccaa6a39cb4b 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c | |||
@@ -16,15 +16,13 @@ | |||
16 | * This file is licensed under the GPL v2. | 16 | * This file is licensed under the GPL v2. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/acpi_pmtmr.h> | ||
19 | #include <linux/clocksource.h> | 20 | #include <linux/clocksource.h> |
20 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
21 | #include <linux/init.h> | 22 | #include <linux/init.h> |
22 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
23 | #include <asm/io.h> | 24 | #include <asm/io.h> |
24 | 25 | ||
25 | /* Number of PMTMR ticks expected during calibration run */ | ||
26 | #define PMTMR_TICKS_PER_SEC 3579545 | ||
27 | |||
28 | /* | 26 | /* |
29 | * The I/O port the PMTMR resides at. | 27 | * The I/O port the PMTMR resides at. |
30 | * The location is detected during setup_arch(), | 28 | * The location is detected during setup_arch(), |
@@ -32,15 +30,13 @@ | |||
32 | */ | 30 | */ |
33 | u32 pmtmr_ioport __read_mostly; | 31 | u32 pmtmr_ioport __read_mostly; |
34 | 32 | ||
35 | #define ACPI_PM_MASK CLOCKSOURCE_MASK(24) /* limit it to 24 bits */ | ||
36 | |||
37 | static inline u32 read_pmtmr(void) | 33 | static inline u32 read_pmtmr(void) |
38 | { | 34 | { |
39 | /* mask the output to 24 bits */ | 35 | /* mask the output to 24 bits */ |
40 | return inl(pmtmr_ioport) & ACPI_PM_MASK; | 36 | return inl(pmtmr_ioport) & ACPI_PM_MASK; |
41 | } | 37 | } |
42 | 38 | ||
43 | static cycle_t acpi_pm_read_verified(void) | 39 | u32 acpi_pm_read_verified(void) |
44 | { | 40 | { |
45 | u32 v1 = 0, v2 = 0, v3 = 0; | 41 | u32 v1 = 0, v2 = 0, v3 = 0; |
46 | 42 | ||
@@ -57,7 +53,12 @@ static cycle_t acpi_pm_read_verified(void) | |||
57 | } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) | 53 | } while (unlikely((v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) |
58 | || (v3 > v1 && v3 < v2))); | 54 | || (v3 > v1 && v3 < v2))); |
59 | 55 | ||
60 | return (cycle_t)v2; | 56 | return v2; |
57 | } | ||
58 | |||
59 | static cycle_t acpi_pm_read_slow(void) | ||
60 | { | ||
61 | return (cycle_t)acpi_pm_read_verified(); | ||
61 | } | 62 | } |
62 | 63 | ||
63 | static cycle_t acpi_pm_read(void) | 64 | static cycle_t acpi_pm_read(void) |
@@ -72,7 +73,8 @@ static struct clocksource clocksource_acpi_pm = { | |||
72 | .mask = (cycle_t)ACPI_PM_MASK, | 73 | .mask = (cycle_t)ACPI_PM_MASK, |
73 | .mult = 0, /*to be caluclated*/ | 74 | .mult = 0, /*to be caluclated*/ |
74 | .shift = 22, | 75 | .shift = 22, |
75 | .is_continuous = 1, | 76 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
77 | |||
76 | }; | 78 | }; |
77 | 79 | ||
78 | 80 | ||
@@ -87,7 +89,7 @@ __setup("acpi_pm_good", acpi_pm_good_setup); | |||
87 | 89 | ||
88 | static inline void acpi_pm_need_workaround(void) | 90 | static inline void acpi_pm_need_workaround(void) |
89 | { | 91 | { |
90 | clocksource_acpi_pm.read = acpi_pm_read_verified; | 92 | clocksource_acpi_pm.read = acpi_pm_read_slow; |
91 | clocksource_acpi_pm.rating = 110; | 93 | clocksource_acpi_pm.rating = 110; |
92 | } | 94 | } |
93 | 95 | ||
diff --git a/drivers/clocksource/cyclone.c b/drivers/clocksource/cyclone.c index bf4d3d50d1c4..4f3925ceb360 100644 --- a/drivers/clocksource/cyclone.c +++ b/drivers/clocksource/cyclone.c | |||
@@ -31,7 +31,7 @@ static struct clocksource clocksource_cyclone = { | |||
31 | .mask = CYCLONE_TIMER_MASK, | 31 | .mask = CYCLONE_TIMER_MASK, |
32 | .mult = 10, | 32 | .mult = 10, |
33 | .shift = 0, | 33 | .shift = 0, |
34 | .is_continuous = 1, | 34 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static int __init init_cyclone_clocksource(void) | 37 | static int __init init_cyclone_clocksource(void) |
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c index 22915cc46ba7..b92da677aa5d 100644 --- a/drivers/clocksource/scx200_hrt.c +++ b/drivers/clocksource/scx200_hrt.c | |||
@@ -57,7 +57,7 @@ static struct clocksource cs_hrt = { | |||
57 | .rating = 250, | 57 | .rating = 250, |
58 | .read = read_hrt, | 58 | .read = read_hrt, |
59 | .mask = CLOCKSOURCE_MASK(32), | 59 | .mask = CLOCKSOURCE_MASK(32), |
60 | .is_continuous = 1, | 60 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
61 | /* mult, shift are set based on mhz27 flag */ | 61 | /* mult, shift are set based on mhz27 flag */ |
62 | }; | 62 | }; |
63 | 63 | ||
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 491779af8d55..d155e81b5c97 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -16,7 +16,7 @@ config CPU_FREQ | |||
16 | if CPU_FREQ | 16 | if CPU_FREQ |
17 | 17 | ||
18 | config CPU_FREQ_TABLE | 18 | config CPU_FREQ_TABLE |
19 | def_tristate m | 19 | tristate |
20 | 20 | ||
21 | config CPU_FREQ_DEBUG | 21 | config CPU_FREQ_DEBUG |
22 | bool "Enable CPUfreq debugging" | 22 | bool "Enable CPUfreq debugging" |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a45cc89e387a..f52facc570f5 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -41,8 +41,67 @@ static struct cpufreq_driver *cpufreq_driver; | |||
41 | static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; | 41 | static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; |
42 | static DEFINE_SPINLOCK(cpufreq_driver_lock); | 42 | static DEFINE_SPINLOCK(cpufreq_driver_lock); |
43 | 43 | ||
44 | /* | ||
45 | * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure | ||
46 | * all cpufreq/hotplug/workqueue/etc related lock issues. | ||
47 | * | ||
48 | * The rules for this semaphore: | ||
49 | * - Any routine that wants to read from the policy structure will | ||
50 | * do a down_read on this semaphore. | ||
51 | * - Any routine that will write to the policy structure and/or may take away | ||
52 | * the policy altogether (eg. CPU hotplug), will hold this lock in write | ||
53 | * mode before doing so. | ||
54 | * | ||
55 | * Additional rules: | ||
56 | * - All holders of the lock should check to make sure that the CPU they | ||
57 | * are concerned with are online after they get the lock. | ||
58 | * - Governor routines that can be called in cpufreq hotplug path should not | ||
59 | * take this sem as top level hotplug notifier handler takes this. | ||
60 | */ | ||
61 | static DEFINE_PER_CPU(int, policy_cpu); | ||
62 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | ||
63 | |||
64 | #define lock_policy_rwsem(mode, cpu) \ | ||
65 | int lock_policy_rwsem_##mode \ | ||
66 | (int cpu) \ | ||
67 | { \ | ||
68 | int policy_cpu = per_cpu(policy_cpu, cpu); \ | ||
69 | BUG_ON(policy_cpu == -1); \ | ||
70 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
71 | if (unlikely(!cpu_online(cpu))) { \ | ||
72 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
73 | return -1; \ | ||
74 | } \ | ||
75 | \ | ||
76 | return 0; \ | ||
77 | } | ||
78 | |||
79 | lock_policy_rwsem(read, cpu); | ||
80 | EXPORT_SYMBOL_GPL(lock_policy_rwsem_read); | ||
81 | |||
82 | lock_policy_rwsem(write, cpu); | ||
83 | EXPORT_SYMBOL_GPL(lock_policy_rwsem_write); | ||
84 | |||
85 | void unlock_policy_rwsem_read(int cpu) | ||
86 | { | ||
87 | int policy_cpu = per_cpu(policy_cpu, cpu); | ||
88 | BUG_ON(policy_cpu == -1); | ||
89 | up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
90 | } | ||
91 | EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read); | ||
92 | |||
93 | void unlock_policy_rwsem_write(int cpu) | ||
94 | { | ||
95 | int policy_cpu = per_cpu(policy_cpu, cpu); | ||
96 | BUG_ON(policy_cpu == -1); | ||
97 | up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
98 | } | ||
99 | EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write); | ||
100 | |||
101 | |||
44 | /* internal prototypes */ | 102 | /* internal prototypes */ |
45 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); | 103 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); |
104 | static unsigned int __cpufreq_get(unsigned int cpu); | ||
46 | static void handle_update(struct work_struct *work); | 105 | static void handle_update(struct work_struct *work); |
47 | 106 | ||
48 | /** | 107 | /** |
@@ -415,12 +474,8 @@ static ssize_t store_##file_name \ | |||
415 | if (ret != 1) \ | 474 | if (ret != 1) \ |
416 | return -EINVAL; \ | 475 | return -EINVAL; \ |
417 | \ | 476 | \ |
418 | lock_cpu_hotplug(); \ | ||
419 | mutex_lock(&policy->lock); \ | ||
420 | ret = __cpufreq_set_policy(policy, &new_policy); \ | 477 | ret = __cpufreq_set_policy(policy, &new_policy); \ |
421 | policy->user_policy.object = policy->object; \ | 478 | policy->user_policy.object = policy->object; \ |
422 | mutex_unlock(&policy->lock); \ | ||
423 | unlock_cpu_hotplug(); \ | ||
424 | \ | 479 | \ |
425 | return ret ? ret : count; \ | 480 | return ret ? ret : count; \ |
426 | } | 481 | } |
@@ -434,7 +489,7 @@ store_one(scaling_max_freq,max); | |||
434 | static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, | 489 | static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, |
435 | char *buf) | 490 | char *buf) |
436 | { | 491 | { |
437 | unsigned int cur_freq = cpufreq_get(policy->cpu); | 492 | unsigned int cur_freq = __cpufreq_get(policy->cpu); |
438 | if (!cur_freq) | 493 | if (!cur_freq) |
439 | return sprintf(buf, "<unknown>"); | 494 | return sprintf(buf, "<unknown>"); |
440 | return sprintf(buf, "%u\n", cur_freq); | 495 | return sprintf(buf, "%u\n", cur_freq); |
@@ -479,18 +534,12 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy, | |||
479 | &new_policy.governor)) | 534 | &new_policy.governor)) |
480 | return -EINVAL; | 535 | return -EINVAL; |
481 | 536 | ||
482 | lock_cpu_hotplug(); | ||
483 | |||
484 | /* Do not use cpufreq_set_policy here or the user_policy.max | 537 | /* Do not use cpufreq_set_policy here or the user_policy.max |
485 | will be wrongly overridden */ | 538 | will be wrongly overridden */ |
486 | mutex_lock(&policy->lock); | ||
487 | ret = __cpufreq_set_policy(policy, &new_policy); | 539 | ret = __cpufreq_set_policy(policy, &new_policy); |
488 | 540 | ||
489 | policy->user_policy.policy = policy->policy; | 541 | policy->user_policy.policy = policy->policy; |
490 | policy->user_policy.governor = policy->governor; | 542 | policy->user_policy.governor = policy->governor; |
491 | mutex_unlock(&policy->lock); | ||
492 | |||
493 | unlock_cpu_hotplug(); | ||
494 | 543 | ||
495 | if (ret) | 544 | if (ret) |
496 | return ret; | 545 | return ret; |
@@ -595,11 +644,17 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | |||
595 | policy = cpufreq_cpu_get(policy->cpu); | 644 | policy = cpufreq_cpu_get(policy->cpu); |
596 | if (!policy) | 645 | if (!policy) |
597 | return -EINVAL; | 646 | return -EINVAL; |
647 | |||
648 | if (lock_policy_rwsem_read(policy->cpu) < 0) | ||
649 | return -EINVAL; | ||
650 | |||
598 | if (fattr->show) | 651 | if (fattr->show) |
599 | ret = fattr->show(policy, buf); | 652 | ret = fattr->show(policy, buf); |
600 | else | 653 | else |
601 | ret = -EIO; | 654 | ret = -EIO; |
602 | 655 | ||
656 | unlock_policy_rwsem_read(policy->cpu); | ||
657 | |||
603 | cpufreq_cpu_put(policy); | 658 | cpufreq_cpu_put(policy); |
604 | return ret; | 659 | return ret; |
605 | } | 660 | } |
@@ -613,11 +668,17 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
613 | policy = cpufreq_cpu_get(policy->cpu); | 668 | policy = cpufreq_cpu_get(policy->cpu); |
614 | if (!policy) | 669 | if (!policy) |
615 | return -EINVAL; | 670 | return -EINVAL; |
671 | |||
672 | if (lock_policy_rwsem_write(policy->cpu) < 0) | ||
673 | return -EINVAL; | ||
674 | |||
616 | if (fattr->store) | 675 | if (fattr->store) |
617 | ret = fattr->store(policy, buf, count); | 676 | ret = fattr->store(policy, buf, count); |
618 | else | 677 | else |
619 | ret = -EIO; | 678 | ret = -EIO; |
620 | 679 | ||
680 | unlock_policy_rwsem_write(policy->cpu); | ||
681 | |||
621 | cpufreq_cpu_put(policy); | 682 | cpufreq_cpu_put(policy); |
622 | return ret; | 683 | return ret; |
623 | } | 684 | } |
@@ -691,8 +752,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
691 | policy->cpu = cpu; | 752 | policy->cpu = cpu; |
692 | policy->cpus = cpumask_of_cpu(cpu); | 753 | policy->cpus = cpumask_of_cpu(cpu); |
693 | 754 | ||
694 | mutex_init(&policy->lock); | 755 | /* Initially set CPU itself as the policy_cpu */ |
695 | mutex_lock(&policy->lock); | 756 | per_cpu(policy_cpu, cpu) = cpu; |
757 | lock_policy_rwsem_write(cpu); | ||
758 | |||
696 | init_completion(&policy->kobj_unregister); | 759 | init_completion(&policy->kobj_unregister); |
697 | INIT_WORK(&policy->update, handle_update); | 760 | INIT_WORK(&policy->update, handle_update); |
698 | 761 | ||
@@ -702,7 +765,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
702 | ret = cpufreq_driver->init(policy); | 765 | ret = cpufreq_driver->init(policy); |
703 | if (ret) { | 766 | if (ret) { |
704 | dprintk("initialization failed\n"); | 767 | dprintk("initialization failed\n"); |
705 | mutex_unlock(&policy->lock); | 768 | unlock_policy_rwsem_write(cpu); |
706 | goto err_out; | 769 | goto err_out; |
707 | } | 770 | } |
708 | 771 | ||
@@ -716,6 +779,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
716 | */ | 779 | */ |
717 | managed_policy = cpufreq_cpu_get(j); | 780 | managed_policy = cpufreq_cpu_get(j); |
718 | if (unlikely(managed_policy)) { | 781 | if (unlikely(managed_policy)) { |
782 | |||
783 | /* Set proper policy_cpu */ | ||
784 | unlock_policy_rwsem_write(cpu); | ||
785 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; | ||
786 | |||
787 | if (lock_policy_rwsem_write(cpu) < 0) | ||
788 | goto err_out_driver_exit; | ||
789 | |||
719 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 790 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
720 | managed_policy->cpus = policy->cpus; | 791 | managed_policy->cpus = policy->cpus; |
721 | cpufreq_cpu_data[cpu] = managed_policy; | 792 | cpufreq_cpu_data[cpu] = managed_policy; |
@@ -726,13 +797,13 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
726 | &managed_policy->kobj, | 797 | &managed_policy->kobj, |
727 | "cpufreq"); | 798 | "cpufreq"); |
728 | if (ret) { | 799 | if (ret) { |
729 | mutex_unlock(&policy->lock); | 800 | unlock_policy_rwsem_write(cpu); |
730 | goto err_out_driver_exit; | 801 | goto err_out_driver_exit; |
731 | } | 802 | } |
732 | 803 | ||
733 | cpufreq_debug_enable_ratelimit(); | 804 | cpufreq_debug_enable_ratelimit(); |
734 | mutex_unlock(&policy->lock); | ||
735 | ret = 0; | 805 | ret = 0; |
806 | unlock_policy_rwsem_write(cpu); | ||
736 | goto err_out_driver_exit; /* call driver->exit() */ | 807 | goto err_out_driver_exit; /* call driver->exit() */ |
737 | } | 808 | } |
738 | } | 809 | } |
@@ -746,7 +817,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
746 | 817 | ||
747 | ret = kobject_register(&policy->kobj); | 818 | ret = kobject_register(&policy->kobj); |
748 | if (ret) { | 819 | if (ret) { |
749 | mutex_unlock(&policy->lock); | 820 | unlock_policy_rwsem_write(cpu); |
750 | goto err_out_driver_exit; | 821 | goto err_out_driver_exit; |
751 | } | 822 | } |
752 | /* set up files for this cpu device */ | 823 | /* set up files for this cpu device */ |
@@ -761,8 +832,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
761 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | 832 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); |
762 | 833 | ||
763 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 834 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
764 | for_each_cpu_mask(j, policy->cpus) | 835 | for_each_cpu_mask(j, policy->cpus) { |
765 | cpufreq_cpu_data[j] = policy; | 836 | cpufreq_cpu_data[j] = policy; |
837 | per_cpu(policy_cpu, j) = policy->cpu; | ||
838 | } | ||
766 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 839 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
767 | 840 | ||
768 | /* symlink affected CPUs */ | 841 | /* symlink affected CPUs */ |
@@ -778,14 +851,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
778 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, | 851 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, |
779 | "cpufreq"); | 852 | "cpufreq"); |
780 | if (ret) { | 853 | if (ret) { |
781 | mutex_unlock(&policy->lock); | 854 | unlock_policy_rwsem_write(cpu); |
782 | goto err_out_unregister; | 855 | goto err_out_unregister; |
783 | } | 856 | } |
784 | } | 857 | } |
785 | 858 | ||
786 | policy->governor = NULL; /* to assure that the starting sequence is | 859 | policy->governor = NULL; /* to assure that the starting sequence is |
787 | * run in cpufreq_set_policy */ | 860 | * run in cpufreq_set_policy */ |
788 | mutex_unlock(&policy->lock); | 861 | unlock_policy_rwsem_write(cpu); |
789 | 862 | ||
790 | /* set default policy */ | 863 | /* set default policy */ |
791 | ret = cpufreq_set_policy(&new_policy); | 864 | ret = cpufreq_set_policy(&new_policy); |
@@ -826,11 +899,13 @@ module_out: | |||
826 | 899 | ||
827 | 900 | ||
828 | /** | 901 | /** |
829 | * cpufreq_remove_dev - remove a CPU device | 902 | * __cpufreq_remove_dev - remove a CPU device |
830 | * | 903 | * |
831 | * Removes the cpufreq interface for a CPU device. | 904 | * Removes the cpufreq interface for a CPU device. |
905 | * Caller should already have policy_rwsem in write mode for this CPU. | ||
906 | * This routine frees the rwsem before returning. | ||
832 | */ | 907 | */ |
833 | static int cpufreq_remove_dev (struct sys_device * sys_dev) | 908 | static int __cpufreq_remove_dev (struct sys_device * sys_dev) |
834 | { | 909 | { |
835 | unsigned int cpu = sys_dev->id; | 910 | unsigned int cpu = sys_dev->id; |
836 | unsigned long flags; | 911 | unsigned long flags; |
@@ -849,6 +924,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
849 | if (!data) { | 924 | if (!data) { |
850 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 925 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
851 | cpufreq_debug_enable_ratelimit(); | 926 | cpufreq_debug_enable_ratelimit(); |
927 | unlock_policy_rwsem_write(cpu); | ||
852 | return -EINVAL; | 928 | return -EINVAL; |
853 | } | 929 | } |
854 | cpufreq_cpu_data[cpu] = NULL; | 930 | cpufreq_cpu_data[cpu] = NULL; |
@@ -865,6 +941,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
865 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); | 941 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); |
866 | cpufreq_cpu_put(data); | 942 | cpufreq_cpu_put(data); |
867 | cpufreq_debug_enable_ratelimit(); | 943 | cpufreq_debug_enable_ratelimit(); |
944 | unlock_policy_rwsem_write(cpu); | ||
868 | return 0; | 945 | return 0; |
869 | } | 946 | } |
870 | #endif | 947 | #endif |
@@ -873,6 +950,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
873 | if (!kobject_get(&data->kobj)) { | 950 | if (!kobject_get(&data->kobj)) { |
874 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 951 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
875 | cpufreq_debug_enable_ratelimit(); | 952 | cpufreq_debug_enable_ratelimit(); |
953 | unlock_policy_rwsem_write(cpu); | ||
876 | return -EFAULT; | 954 | return -EFAULT; |
877 | } | 955 | } |
878 | 956 | ||
@@ -906,10 +984,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
906 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 984 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
907 | #endif | 985 | #endif |
908 | 986 | ||
909 | mutex_lock(&data->lock); | ||
910 | if (cpufreq_driver->target) | 987 | if (cpufreq_driver->target) |
911 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 988 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
912 | mutex_unlock(&data->lock); | 989 | |
990 | unlock_policy_rwsem_write(cpu); | ||
913 | 991 | ||
914 | kobject_unregister(&data->kobj); | 992 | kobject_unregister(&data->kobj); |
915 | 993 | ||
@@ -933,6 +1011,18 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
933 | } | 1011 | } |
934 | 1012 | ||
935 | 1013 | ||
1014 | static int cpufreq_remove_dev (struct sys_device * sys_dev) | ||
1015 | { | ||
1016 | unsigned int cpu = sys_dev->id; | ||
1017 | int retval; | ||
1018 | if (unlikely(lock_policy_rwsem_write(cpu))) | ||
1019 | BUG(); | ||
1020 | |||
1021 | retval = __cpufreq_remove_dev(sys_dev); | ||
1022 | return retval; | ||
1023 | } | ||
1024 | |||
1025 | |||
936 | static void handle_update(struct work_struct *work) | 1026 | static void handle_update(struct work_struct *work) |
937 | { | 1027 | { |
938 | struct cpufreq_policy *policy = | 1028 | struct cpufreq_policy *policy = |
@@ -980,9 +1070,12 @@ unsigned int cpufreq_quick_get(unsigned int cpu) | |||
980 | unsigned int ret_freq = 0; | 1070 | unsigned int ret_freq = 0; |
981 | 1071 | ||
982 | if (policy) { | 1072 | if (policy) { |
983 | mutex_lock(&policy->lock); | 1073 | if (unlikely(lock_policy_rwsem_read(cpu))) |
1074 | return ret_freq; | ||
1075 | |||
984 | ret_freq = policy->cur; | 1076 | ret_freq = policy->cur; |
985 | mutex_unlock(&policy->lock); | 1077 | |
1078 | unlock_policy_rwsem_read(cpu); | ||
986 | cpufreq_cpu_put(policy); | 1079 | cpufreq_cpu_put(policy); |
987 | } | 1080 | } |
988 | 1081 | ||
@@ -991,24 +1084,13 @@ unsigned int cpufreq_quick_get(unsigned int cpu) | |||
991 | EXPORT_SYMBOL(cpufreq_quick_get); | 1084 | EXPORT_SYMBOL(cpufreq_quick_get); |
992 | 1085 | ||
993 | 1086 | ||
994 | /** | 1087 | static unsigned int __cpufreq_get(unsigned int cpu) |
995 | * cpufreq_get - get the current CPU frequency (in kHz) | ||
996 | * @cpu: CPU number | ||
997 | * | ||
998 | * Get the CPU current (static) CPU frequency | ||
999 | */ | ||
1000 | unsigned int cpufreq_get(unsigned int cpu) | ||
1001 | { | 1088 | { |
1002 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 1089 | struct cpufreq_policy *policy = cpufreq_cpu_data[cpu]; |
1003 | unsigned int ret_freq = 0; | 1090 | unsigned int ret_freq = 0; |
1004 | 1091 | ||
1005 | if (!policy) | ||
1006 | return 0; | ||
1007 | |||
1008 | if (!cpufreq_driver->get) | 1092 | if (!cpufreq_driver->get) |
1009 | goto out; | 1093 | return (ret_freq); |
1010 | |||
1011 | mutex_lock(&policy->lock); | ||
1012 | 1094 | ||
1013 | ret_freq = cpufreq_driver->get(cpu); | 1095 | ret_freq = cpufreq_driver->get(cpu); |
1014 | 1096 | ||
@@ -1022,11 +1104,33 @@ unsigned int cpufreq_get(unsigned int cpu) | |||
1022 | } | 1104 | } |
1023 | } | 1105 | } |
1024 | 1106 | ||
1025 | mutex_unlock(&policy->lock); | 1107 | return (ret_freq); |
1108 | } | ||
1026 | 1109 | ||
1027 | out: | 1110 | /** |
1028 | cpufreq_cpu_put(policy); | 1111 | * cpufreq_get - get the current CPU frequency (in kHz) |
1112 | * @cpu: CPU number | ||
1113 | * | ||
1114 | * Get the CPU current (static) CPU frequency | ||
1115 | */ | ||
1116 | unsigned int cpufreq_get(unsigned int cpu) | ||
1117 | { | ||
1118 | unsigned int ret_freq = 0; | ||
1119 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
1120 | |||
1121 | if (!policy) | ||
1122 | goto out; | ||
1123 | |||
1124 | if (unlikely(lock_policy_rwsem_read(cpu))) | ||
1125 | goto out_policy; | ||
1126 | |||
1127 | ret_freq = __cpufreq_get(cpu); | ||
1029 | 1128 | ||
1129 | unlock_policy_rwsem_read(cpu); | ||
1130 | |||
1131 | out_policy: | ||
1132 | cpufreq_cpu_put(policy); | ||
1133 | out: | ||
1030 | return (ret_freq); | 1134 | return (ret_freq); |
1031 | } | 1135 | } |
1032 | EXPORT_SYMBOL(cpufreq_get); | 1136 | EXPORT_SYMBOL(cpufreq_get); |
@@ -1278,7 +1382,6 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); | |||
1278 | *********************************************************************/ | 1382 | *********************************************************************/ |
1279 | 1383 | ||
1280 | 1384 | ||
1281 | /* Must be called with lock_cpu_hotplug held */ | ||
1282 | int __cpufreq_driver_target(struct cpufreq_policy *policy, | 1385 | int __cpufreq_driver_target(struct cpufreq_policy *policy, |
1283 | unsigned int target_freq, | 1386 | unsigned int target_freq, |
1284 | unsigned int relation) | 1387 | unsigned int relation) |
@@ -1304,20 +1407,19 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1304 | if (!policy) | 1407 | if (!policy) |
1305 | return -EINVAL; | 1408 | return -EINVAL; |
1306 | 1409 | ||
1307 | lock_cpu_hotplug(); | 1410 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1308 | mutex_lock(&policy->lock); | 1411 | return -EINVAL; |
1309 | 1412 | ||
1310 | ret = __cpufreq_driver_target(policy, target_freq, relation); | 1413 | ret = __cpufreq_driver_target(policy, target_freq, relation); |
1311 | 1414 | ||
1312 | mutex_unlock(&policy->lock); | 1415 | unlock_policy_rwsem_write(policy->cpu); |
1313 | unlock_cpu_hotplug(); | ||
1314 | 1416 | ||
1315 | cpufreq_cpu_put(policy); | 1417 | cpufreq_cpu_put(policy); |
1316 | return ret; | 1418 | return ret; |
1317 | } | 1419 | } |
1318 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1420 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
1319 | 1421 | ||
1320 | int cpufreq_driver_getavg(struct cpufreq_policy *policy) | 1422 | int __cpufreq_driver_getavg(struct cpufreq_policy *policy) |
1321 | { | 1423 | { |
1322 | int ret = 0; | 1424 | int ret = 0; |
1323 | 1425 | ||
@@ -1325,20 +1427,15 @@ int cpufreq_driver_getavg(struct cpufreq_policy *policy) | |||
1325 | if (!policy) | 1427 | if (!policy) |
1326 | return -EINVAL; | 1428 | return -EINVAL; |
1327 | 1429 | ||
1328 | mutex_lock(&policy->lock); | ||
1329 | |||
1330 | if (cpu_online(policy->cpu) && cpufreq_driver->getavg) | 1430 | if (cpu_online(policy->cpu) && cpufreq_driver->getavg) |
1331 | ret = cpufreq_driver->getavg(policy->cpu); | 1431 | ret = cpufreq_driver->getavg(policy->cpu); |
1332 | 1432 | ||
1333 | mutex_unlock(&policy->lock); | ||
1334 | |||
1335 | cpufreq_cpu_put(policy); | 1433 | cpufreq_cpu_put(policy); |
1336 | return ret; | 1434 | return ret; |
1337 | } | 1435 | } |
1338 | EXPORT_SYMBOL_GPL(cpufreq_driver_getavg); | 1436 | EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); |
1339 | 1437 | ||
1340 | /* | 1438 | /* |
1341 | * Locking: Must be called with the lock_cpu_hotplug() lock held | ||
1342 | * when "event" is CPUFREQ_GOV_LIMITS | 1439 | * when "event" is CPUFREQ_GOV_LIMITS |
1343 | */ | 1440 | */ |
1344 | 1441 | ||
@@ -1420,9 +1517,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | |||
1420 | if (!cpu_policy) | 1517 | if (!cpu_policy) |
1421 | return -EINVAL; | 1518 | return -EINVAL; |
1422 | 1519 | ||
1423 | mutex_lock(&cpu_policy->lock); | ||
1424 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); | 1520 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); |
1425 | mutex_unlock(&cpu_policy->lock); | ||
1426 | 1521 | ||
1427 | cpufreq_cpu_put(cpu_policy); | 1522 | cpufreq_cpu_put(cpu_policy); |
1428 | return 0; | 1523 | return 0; |
@@ -1433,7 +1528,6 @@ EXPORT_SYMBOL(cpufreq_get_policy); | |||
1433 | /* | 1528 | /* |
1434 | * data : current policy. | 1529 | * data : current policy. |
1435 | * policy : policy to be set. | 1530 | * policy : policy to be set. |
1436 | * Locking: Must be called with the lock_cpu_hotplug() lock held | ||
1437 | */ | 1531 | */ |
1438 | static int __cpufreq_set_policy(struct cpufreq_policy *data, | 1532 | static int __cpufreq_set_policy(struct cpufreq_policy *data, |
1439 | struct cpufreq_policy *policy) | 1533 | struct cpufreq_policy *policy) |
@@ -1539,10 +1633,9 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) | |||
1539 | if (!data) | 1633 | if (!data) |
1540 | return -EINVAL; | 1634 | return -EINVAL; |
1541 | 1635 | ||
1542 | lock_cpu_hotplug(); | 1636 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1637 | return -EINVAL; | ||
1543 | 1638 | ||
1544 | /* lock this CPU */ | ||
1545 | mutex_lock(&data->lock); | ||
1546 | 1639 | ||
1547 | ret = __cpufreq_set_policy(data, policy); | 1640 | ret = __cpufreq_set_policy(data, policy); |
1548 | data->user_policy.min = data->min; | 1641 | data->user_policy.min = data->min; |
@@ -1550,9 +1643,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) | |||
1550 | data->user_policy.policy = data->policy; | 1643 | data->user_policy.policy = data->policy; |
1551 | data->user_policy.governor = data->governor; | 1644 | data->user_policy.governor = data->governor; |
1552 | 1645 | ||
1553 | mutex_unlock(&data->lock); | 1646 | unlock_policy_rwsem_write(policy->cpu); |
1554 | 1647 | ||
1555 | unlock_cpu_hotplug(); | ||
1556 | cpufreq_cpu_put(data); | 1648 | cpufreq_cpu_put(data); |
1557 | 1649 | ||
1558 | return ret; | 1650 | return ret; |
@@ -1576,8 +1668,8 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1576 | if (!data) | 1668 | if (!data) |
1577 | return -ENODEV; | 1669 | return -ENODEV; |
1578 | 1670 | ||
1579 | lock_cpu_hotplug(); | 1671 | if (unlikely(lock_policy_rwsem_write(cpu))) |
1580 | mutex_lock(&data->lock); | 1672 | return -EINVAL; |
1581 | 1673 | ||
1582 | dprintk("updating policy for CPU %u\n", cpu); | 1674 | dprintk("updating policy for CPU %u\n", cpu); |
1583 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); | 1675 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); |
@@ -1602,8 +1694,8 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1602 | 1694 | ||
1603 | ret = __cpufreq_set_policy(data, &policy); | 1695 | ret = __cpufreq_set_policy(data, &policy); |
1604 | 1696 | ||
1605 | mutex_unlock(&data->lock); | 1697 | unlock_policy_rwsem_write(cpu); |
1606 | unlock_cpu_hotplug(); | 1698 | |
1607 | cpufreq_cpu_put(data); | 1699 | cpufreq_cpu_put(data); |
1608 | return ret; | 1700 | return ret; |
1609 | } | 1701 | } |
@@ -1613,31 +1705,28 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1613 | unsigned long action, void *hcpu) | 1705 | unsigned long action, void *hcpu) |
1614 | { | 1706 | { |
1615 | unsigned int cpu = (unsigned long)hcpu; | 1707 | unsigned int cpu = (unsigned long)hcpu; |
1616 | struct cpufreq_policy *policy; | ||
1617 | struct sys_device *sys_dev; | 1708 | struct sys_device *sys_dev; |
1709 | struct cpufreq_policy *policy; | ||
1618 | 1710 | ||
1619 | sys_dev = get_cpu_sysdev(cpu); | 1711 | sys_dev = get_cpu_sysdev(cpu); |
1620 | |||
1621 | if (sys_dev) { | 1712 | if (sys_dev) { |
1622 | switch (action) { | 1713 | switch (action) { |
1623 | case CPU_ONLINE: | 1714 | case CPU_ONLINE: |
1624 | cpufreq_add_dev(sys_dev); | 1715 | cpufreq_add_dev(sys_dev); |
1625 | break; | 1716 | break; |
1626 | case CPU_DOWN_PREPARE: | 1717 | case CPU_DOWN_PREPARE: |
1627 | /* | 1718 | if (unlikely(lock_policy_rwsem_write(cpu))) |
1628 | * We attempt to put this cpu in lowest frequency | 1719 | BUG(); |
1629 | * possible before going down. This will permit | 1720 | |
1630 | * hardware-managed P-State to switch other related | ||
1631 | * threads to min or higher speeds if possible. | ||
1632 | */ | ||
1633 | policy = cpufreq_cpu_data[cpu]; | 1721 | policy = cpufreq_cpu_data[cpu]; |
1634 | if (policy) { | 1722 | if (policy) { |
1635 | cpufreq_driver_target(policy, policy->min, | 1723 | __cpufreq_driver_target(policy, policy->min, |
1636 | CPUFREQ_RELATION_H); | 1724 | CPUFREQ_RELATION_H); |
1637 | } | 1725 | } |
1726 | __cpufreq_remove_dev(sys_dev); | ||
1638 | break; | 1727 | break; |
1639 | case CPU_DEAD: | 1728 | case CPU_DOWN_FAILED: |
1640 | cpufreq_remove_dev(sys_dev); | 1729 | cpufreq_add_dev(sys_dev); |
1641 | break; | 1730 | break; |
1642 | } | 1731 | } |
1643 | } | 1732 | } |
@@ -1751,3 +1840,16 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) | |||
1751 | return 0; | 1840 | return 0; |
1752 | } | 1841 | } |
1753 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); | 1842 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); |
1843 | |||
1844 | static int __init cpufreq_core_init(void) | ||
1845 | { | ||
1846 | int cpu; | ||
1847 | |||
1848 | for_each_possible_cpu(cpu) { | ||
1849 | per_cpu(policy_cpu, cpu) = -1; | ||
1850 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); | ||
1851 | } | ||
1852 | return 0; | ||
1853 | } | ||
1854 | |||
1855 | core_initcall(cpufreq_core_init); | ||
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 05d6c22ba07c..26f440ccc3fb 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -429,14 +429,12 @@ static void dbs_check_cpu(int cpu) | |||
429 | static void do_dbs_timer(struct work_struct *work) | 429 | static void do_dbs_timer(struct work_struct *work) |
430 | { | 430 | { |
431 | int i; | 431 | int i; |
432 | lock_cpu_hotplug(); | ||
433 | mutex_lock(&dbs_mutex); | 432 | mutex_lock(&dbs_mutex); |
434 | for_each_online_cpu(i) | 433 | for_each_online_cpu(i) |
435 | dbs_check_cpu(i); | 434 | dbs_check_cpu(i); |
436 | schedule_delayed_work(&dbs_work, | 435 | schedule_delayed_work(&dbs_work, |
437 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 436 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
438 | mutex_unlock(&dbs_mutex); | 437 | mutex_unlock(&dbs_mutex); |
439 | unlock_cpu_hotplug(); | ||
440 | } | 438 | } |
441 | 439 | ||
442 | static inline void dbs_timer_init(void) | 440 | static inline void dbs_timer_init(void) |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f697449327c6..d60bcb9d14cc 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -52,19 +52,20 @@ static unsigned int def_sampling_rate; | |||
52 | static void do_dbs_timer(struct work_struct *work); | 52 | static void do_dbs_timer(struct work_struct *work); |
53 | 53 | ||
54 | /* Sampling types */ | 54 | /* Sampling types */ |
55 | enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | 55 | enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; |
56 | 56 | ||
57 | struct cpu_dbs_info_s { | 57 | struct cpu_dbs_info_s { |
58 | cputime64_t prev_cpu_idle; | 58 | cputime64_t prev_cpu_idle; |
59 | cputime64_t prev_cpu_wall; | 59 | cputime64_t prev_cpu_wall; |
60 | struct cpufreq_policy *cur_policy; | 60 | struct cpufreq_policy *cur_policy; |
61 | struct delayed_work work; | 61 | struct delayed_work work; |
62 | enum dbs_sample sample_type; | ||
63 | unsigned int enable; | ||
64 | struct cpufreq_frequency_table *freq_table; | 62 | struct cpufreq_frequency_table *freq_table; |
65 | unsigned int freq_lo; | 63 | unsigned int freq_lo; |
66 | unsigned int freq_lo_jiffies; | 64 | unsigned int freq_lo_jiffies; |
67 | unsigned int freq_hi_jiffies; | 65 | unsigned int freq_hi_jiffies; |
66 | int cpu; | ||
67 | unsigned int enable:1, | ||
68 | sample_type:1; | ||
68 | }; | 69 | }; |
69 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 70 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); |
70 | 71 | ||
@@ -402,7 +403,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
402 | if (load < (dbs_tuners_ins.up_threshold - 10)) { | 403 | if (load < (dbs_tuners_ins.up_threshold - 10)) { |
403 | unsigned int freq_next, freq_cur; | 404 | unsigned int freq_next, freq_cur; |
404 | 405 | ||
405 | freq_cur = cpufreq_driver_getavg(policy); | 406 | freq_cur = __cpufreq_driver_getavg(policy); |
406 | if (!freq_cur) | 407 | if (!freq_cur) |
407 | freq_cur = policy->cur; | 408 | freq_cur = policy->cur; |
408 | 409 | ||
@@ -423,9 +424,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
423 | 424 | ||
424 | static void do_dbs_timer(struct work_struct *work) | 425 | static void do_dbs_timer(struct work_struct *work) |
425 | { | 426 | { |
426 | unsigned int cpu = smp_processor_id(); | 427 | struct cpu_dbs_info_s *dbs_info = |
427 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); | 428 | container_of(work, struct cpu_dbs_info_s, work.work); |
428 | enum dbs_sample sample_type = dbs_info->sample_type; | 429 | unsigned int cpu = dbs_info->cpu; |
430 | int sample_type = dbs_info->sample_type; | ||
431 | |||
429 | /* We want all CPUs to do sampling nearly on same jiffy */ | 432 | /* We want all CPUs to do sampling nearly on same jiffy */ |
430 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 433 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
431 | 434 | ||
@@ -434,15 +437,19 @@ static void do_dbs_timer(struct work_struct *work) | |||
434 | 437 | ||
435 | delay -= jiffies % delay; | 438 | delay -= jiffies % delay; |
436 | 439 | ||
437 | if (!dbs_info->enable) | 440 | if (lock_policy_rwsem_write(cpu) < 0) |
441 | return; | ||
442 | |||
443 | if (!dbs_info->enable) { | ||
444 | unlock_policy_rwsem_write(cpu); | ||
438 | return; | 445 | return; |
446 | } | ||
447 | |||
439 | /* Common NORMAL_SAMPLE setup */ | 448 | /* Common NORMAL_SAMPLE setup */ |
440 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 449 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
441 | if (!dbs_tuners_ins.powersave_bias || | 450 | if (!dbs_tuners_ins.powersave_bias || |
442 | sample_type == DBS_NORMAL_SAMPLE) { | 451 | sample_type == DBS_NORMAL_SAMPLE) { |
443 | lock_cpu_hotplug(); | ||
444 | dbs_check_cpu(dbs_info); | 452 | dbs_check_cpu(dbs_info); |
445 | unlock_cpu_hotplug(); | ||
446 | if (dbs_info->freq_lo) { | 453 | if (dbs_info->freq_lo) { |
447 | /* Setup timer for SUB_SAMPLE */ | 454 | /* Setup timer for SUB_SAMPLE */ |
448 | dbs_info->sample_type = DBS_SUB_SAMPLE; | 455 | dbs_info->sample_type = DBS_SUB_SAMPLE; |
@@ -454,26 +461,27 @@ static void do_dbs_timer(struct work_struct *work) | |||
454 | CPUFREQ_RELATION_H); | 461 | CPUFREQ_RELATION_H); |
455 | } | 462 | } |
456 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 463 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); |
464 | unlock_policy_rwsem_write(cpu); | ||
457 | } | 465 | } |
458 | 466 | ||
459 | static inline void dbs_timer_init(unsigned int cpu) | 467 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) |
460 | { | 468 | { |
461 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); | ||
462 | /* We want all CPUs to do sampling nearly on same jiffy */ | 469 | /* We want all CPUs to do sampling nearly on same jiffy */ |
463 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 470 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
464 | delay -= jiffies % delay; | 471 | delay -= jiffies % delay; |
465 | 472 | ||
473 | dbs_info->enable = 1; | ||
466 | ondemand_powersave_bias_init(); | 474 | ondemand_powersave_bias_init(); |
467 | INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); | ||
468 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 475 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
469 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 476 | INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); |
477 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, | ||
478 | delay); | ||
470 | } | 479 | } |
471 | 480 | ||
472 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 481 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
473 | { | 482 | { |
474 | dbs_info->enable = 0; | 483 | dbs_info->enable = 0; |
475 | cancel_delayed_work(&dbs_info->work); | 484 | cancel_delayed_work(&dbs_info->work); |
476 | flush_workqueue(kondemand_wq); | ||
477 | } | 485 | } |
478 | 486 | ||
479 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 487 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, |
@@ -502,21 +510,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
502 | 510 | ||
503 | mutex_lock(&dbs_mutex); | 511 | mutex_lock(&dbs_mutex); |
504 | dbs_enable++; | 512 | dbs_enable++; |
505 | if (dbs_enable == 1) { | ||
506 | kondemand_wq = create_workqueue("kondemand"); | ||
507 | if (!kondemand_wq) { | ||
508 | printk(KERN_ERR | ||
509 | "Creation of kondemand failed\n"); | ||
510 | dbs_enable--; | ||
511 | mutex_unlock(&dbs_mutex); | ||
512 | return -ENOSPC; | ||
513 | } | ||
514 | } | ||
515 | 513 | ||
516 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | 514 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); |
517 | if (rc) { | 515 | if (rc) { |
518 | if (dbs_enable == 1) | ||
519 | destroy_workqueue(kondemand_wq); | ||
520 | dbs_enable--; | 516 | dbs_enable--; |
521 | mutex_unlock(&dbs_mutex); | 517 | mutex_unlock(&dbs_mutex); |
522 | return rc; | 518 | return rc; |
@@ -530,7 +526,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
530 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); | 526 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); |
531 | j_dbs_info->prev_cpu_wall = get_jiffies_64(); | 527 | j_dbs_info->prev_cpu_wall = get_jiffies_64(); |
532 | } | 528 | } |
533 | this_dbs_info->enable = 1; | 529 | this_dbs_info->cpu = cpu; |
534 | /* | 530 | /* |
535 | * Start the timerschedule work, when this governor | 531 | * Start the timerschedule work, when this governor |
536 | * is used for first time | 532 | * is used for first time |
@@ -550,7 +546,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
550 | 546 | ||
551 | dbs_tuners_ins.sampling_rate = def_sampling_rate; | 547 | dbs_tuners_ins.sampling_rate = def_sampling_rate; |
552 | } | 548 | } |
553 | dbs_timer_init(policy->cpu); | 549 | dbs_timer_init(this_dbs_info); |
554 | 550 | ||
555 | mutex_unlock(&dbs_mutex); | 551 | mutex_unlock(&dbs_mutex); |
556 | break; | 552 | break; |
@@ -560,9 +556,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
560 | dbs_timer_exit(this_dbs_info); | 556 | dbs_timer_exit(this_dbs_info); |
561 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | 557 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); |
562 | dbs_enable--; | 558 | dbs_enable--; |
563 | if (dbs_enable == 0) | ||
564 | destroy_workqueue(kondemand_wq); | ||
565 | |||
566 | mutex_unlock(&dbs_mutex); | 559 | mutex_unlock(&dbs_mutex); |
567 | 560 | ||
568 | break; | 561 | break; |
@@ -591,12 +584,18 @@ static struct cpufreq_governor cpufreq_gov_dbs = { | |||
591 | 584 | ||
592 | static int __init cpufreq_gov_dbs_init(void) | 585 | static int __init cpufreq_gov_dbs_init(void) |
593 | { | 586 | { |
587 | kondemand_wq = create_workqueue("kondemand"); | ||
588 | if (!kondemand_wq) { | ||
589 | printk(KERN_ERR "Creation of kondemand failed\n"); | ||
590 | return -EFAULT; | ||
591 | } | ||
594 | return cpufreq_register_governor(&cpufreq_gov_dbs); | 592 | return cpufreq_register_governor(&cpufreq_gov_dbs); |
595 | } | 593 | } |
596 | 594 | ||
597 | static void __exit cpufreq_gov_dbs_exit(void) | 595 | static void __exit cpufreq_gov_dbs_exit(void) |
598 | { | 596 | { |
599 | cpufreq_unregister_governor(&cpufreq_gov_dbs); | 597 | cpufreq_unregister_governor(&cpufreq_gov_dbs); |
598 | destroy_workqueue(kondemand_wq); | ||
600 | } | 599 | } |
601 | 600 | ||
602 | 601 | ||
@@ -608,3 +607,4 @@ MODULE_LICENSE("GPL"); | |||
608 | 607 | ||
609 | module_init(cpufreq_gov_dbs_init); | 608 | module_init(cpufreq_gov_dbs_init); |
610 | module_exit(cpufreq_gov_dbs_exit); | 609 | module_exit(cpufreq_gov_dbs_exit); |
610 | |||
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 91ad342a6051..d1c7cac9316c 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -370,12 +370,10 @@ __exit cpufreq_stats_exit(void) | |||
370 | cpufreq_unregister_notifier(¬ifier_trans_block, | 370 | cpufreq_unregister_notifier(¬ifier_trans_block, |
371 | CPUFREQ_TRANSITION_NOTIFIER); | 371 | CPUFREQ_TRANSITION_NOTIFIER); |
372 | unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); | 372 | unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); |
373 | lock_cpu_hotplug(); | ||
374 | for_each_online_cpu(cpu) { | 373 | for_each_online_cpu(cpu) { |
375 | cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, | 374 | cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, |
376 | CPU_DEAD, (void *)(long)cpu); | 375 | CPU_DEAD, (void *)(long)cpu); |
377 | } | 376 | } |
378 | unlock_cpu_hotplug(); | ||
379 | } | 377 | } |
380 | 378 | ||
381 | MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); | 379 | MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 2a4eb0bfaf30..860345c7799a 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -71,7 +71,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) | |||
71 | 71 | ||
72 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); | 72 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); |
73 | 73 | ||
74 | lock_cpu_hotplug(); | ||
75 | mutex_lock(&userspace_mutex); | 74 | mutex_lock(&userspace_mutex); |
76 | if (!cpu_is_managed[policy->cpu]) | 75 | if (!cpu_is_managed[policy->cpu]) |
77 | goto err; | 76 | goto err; |
@@ -94,7 +93,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy) | |||
94 | 93 | ||
95 | err: | 94 | err: |
96 | mutex_unlock(&userspace_mutex); | 95 | mutex_unlock(&userspace_mutex); |
97 | unlock_cpu_hotplug(); | ||
98 | return ret; | 96 | return ret; |
99 | } | 97 | } |
100 | 98 | ||
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index cd251efda410..0a26e0663542 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -546,7 +546,7 @@ static void ads7846_rx(void *ads) | |||
546 | ts->spi->dev.bus_id, ts->tc.ignore, Rt); | 546 | ts->spi->dev.bus_id, ts->tc.ignore, Rt); |
547 | #endif | 547 | #endif |
548 | hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD), | 548 | hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD), |
549 | HRTIMER_REL); | 549 | HRTIMER_MODE_REL); |
550 | return; | 550 | return; |
551 | } | 551 | } |
552 | 552 | ||
@@ -578,7 +578,8 @@ static void ads7846_rx(void *ads) | |||
578 | #endif | 578 | #endif |
579 | } | 579 | } |
580 | 580 | ||
581 | hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD), HRTIMER_REL); | 581 | hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_PERIOD), |
582 | HRTIMER_MODE_REL); | ||
582 | } | 583 | } |
583 | 584 | ||
584 | static int ads7846_debounce(void *ads, int data_idx, int *val) | 585 | static int ads7846_debounce(void *ads, int data_idx, int *val) |
@@ -667,7 +668,7 @@ static void ads7846_rx_val(void *ads) | |||
667 | status); | 668 | status); |
668 | } | 669 | } |
669 | 670 | ||
670 | static int ads7846_timer(struct hrtimer *handle) | 671 | static enum hrtimer_restart ads7846_timer(struct hrtimer *handle) |
671 | { | 672 | { |
672 | struct ads7846 *ts = container_of(handle, struct ads7846, timer); | 673 | struct ads7846 *ts = container_of(handle, struct ads7846, timer); |
673 | int status = 0; | 674 | int status = 0; |
@@ -724,7 +725,7 @@ static irqreturn_t ads7846_irq(int irq, void *handle) | |||
724 | disable_irq(ts->spi->irq); | 725 | disable_irq(ts->spi->irq); |
725 | ts->pending = 1; | 726 | ts->pending = 1; |
726 | hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_DELAY), | 727 | hrtimer_start(&ts->timer, ktime_set(0, TS_POLL_DELAY), |
727 | HRTIMER_REL); | 728 | HRTIMER_MODE_REL); |
728 | } | 729 | } |
729 | } | 730 | } |
730 | spin_unlock_irqrestore(&ts->lock, flags); | 731 | spin_unlock_irqrestore(&ts->lock, flags); |
@@ -862,7 +863,7 @@ static int __devinit ads7846_probe(struct spi_device *spi) | |||
862 | ts->spi = spi; | 863 | ts->spi = spi; |
863 | ts->input = input_dev; | 864 | ts->input = input_dev; |
864 | 865 | ||
865 | hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_REL); | 866 | hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
866 | ts->timer.function = ads7846_timer; | 867 | ts->timer.function = ads7846_timer; |
867 | 868 | ||
868 | spin_lock_init(&ts->lock); | 869 | spin_lock_init(&ts->lock); |
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile index 835b806a9de7..077e297d8c72 100644 --- a/drivers/isdn/gigaset/Makefile +++ b/drivers/isdn/gigaset/Makefile | |||
@@ -5,4 +5,4 @@ ser_gigaset-y := ser-gigaset.o asyncdata.o | |||
5 | 5 | ||
6 | obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o | 6 | obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o |
7 | obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o | 7 | obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o |
8 | obj-$(CONFIG_GIGASET_M105) += ser_gigaset.o gigaset.o | 8 | obj-$(CONFIG_GIGASET_M101) += ser_gigaset.o gigaset.o |
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c index ccef56d0c157..ed3426062a8b 100644 --- a/drivers/video/s3c2410fb.c +++ b/drivers/video/s3c2410fb.c | |||
@@ -791,6 +791,8 @@ static int __init s3c2410fb_probe(struct platform_device *pdev) | |||
791 | 791 | ||
792 | info = fbinfo->par; | 792 | info = fbinfo->par; |
793 | info->fb = fbinfo; | 793 | info->fb = fbinfo; |
794 | info->dev = &pdev->dev; | ||
795 | |||
794 | platform_set_drvdata(pdev, fbinfo); | 796 | platform_set_drvdata(pdev, fbinfo); |
795 | 797 | ||
796 | dprintk("devinit\n"); | 798 | dprintk("devinit\n"); |