aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 07:34:07 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:07 -0500
commitd7c8f21a8cad0228c7c5ce2bb6dbd95d1ee49d13 (patch)
treed1e305bec62022a0bec82a3499a372c2c7c40583
parentd1028a154c65d7fadd1b2d0276c077014d401ec7 (diff)
x86: cpa: move flush to cpa
The set_memory_* and set_pages_* family of API's currently requires the callers to do a global tlb flush after the function call; forgetting this is a very nasty deathtrap. This patch moves the global tlb flush into each of the callers Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/pci-gart_64.c1
-rw-r--r--arch/x86/mm/init_32.c14
-rw-r--r--arch/x86/mm/init_64.c10
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--arch/x86/mm/pageattr.c137
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/i460-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.c5
-rw-r--r--drivers/video/vermilion/vermilion.c6
-rw-r--r--include/asm-x86/agp.h6
-rw-r--r--include/asm-x86/cacheflush.h1
-rw-r--r--sound/pci/intel8x0.c1
12 files changed, 72 insertions, 115 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 8860c6eba8ab..4d5cc7181982 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -572,7 +572,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
572 panic("Cannot allocate GATT table"); 572 panic("Cannot allocate GATT table");
573 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) 573 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
574 panic("Could not set GART PTEs to uncacheable pages"); 574 panic("Could not set GART PTEs to uncacheable pages");
575 global_flush_tlb();
576 575
577 memset(gatt, 0, gatt_size); 576 memset(gatt, 0, gatt_size);
578 agp_gatt_table = gatt; 577 agp_gatt_table = gatt;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f7b941c3b2c3..0d3369b900e9 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -752,15 +752,11 @@ void mark_rodata_ro(void)
752 printk("Write protecting the kernel text: %luk\n", size >> 10); 752 printk("Write protecting the kernel text: %luk\n", size >> 10);
753 753
754#ifdef CONFIG_CPA_DEBUG 754#ifdef CONFIG_CPA_DEBUG
755 global_flush_tlb();
756
757 printk("Testing CPA: Reverting %lx-%lx\n", start, start+size); 755 printk("Testing CPA: Reverting %lx-%lx\n", start, start+size);
758 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); 756 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
759 global_flush_tlb();
760 757
761 printk("Testing CPA: write protecting again\n"); 758 printk("Testing CPA: write protecting again\n");
762 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); 759 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
763 global_flush_tlb();
764#endif 760#endif
765 } 761 }
766#endif 762#endif
@@ -770,22 +766,12 @@ void mark_rodata_ro(void)
770 printk("Write protecting the kernel read-only data: %luk\n", 766 printk("Write protecting the kernel read-only data: %luk\n",
771 size >> 10); 767 size >> 10);
772 768
773 /*
774 * set_pages_*() requires a global_flush_tlb() call after it.
775 * We do this after the printk so that if something went wrong in the
776 * change, the printk gets out at least to give a better debug hint
777 * of who is the culprit.
778 */
779 global_flush_tlb();
780
781#ifdef CONFIG_CPA_DEBUG 769#ifdef CONFIG_CPA_DEBUG
782 printk("Testing CPA: undo %lx-%lx\n", start, start + size); 770 printk("Testing CPA: undo %lx-%lx\n", start, start + size);
783 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); 771 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
784 global_flush_tlb();
785 772
786 printk("Testing CPA: write protecting again\n"); 773 printk("Testing CPA: write protecting again\n");
787 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); 774 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
788 global_flush_tlb();
789#endif 775#endif
790} 776}
791#endif 777#endif
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 4757be7b5e55..9b69fa54a831 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -610,22 +610,12 @@ void mark_rodata_ro(void)
610 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 610 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
611 (end - start) >> 10); 611 (end - start) >> 10);
612 612
613 /*
614 * set_memory_*() requires a global_flush_tlb() call after it.
615 * We do this after the printk so that if something went wrong in the
616 * change, the printk gets out at least to give a better debug hint
617 * of who is the culprit.
618 */
619 global_flush_tlb();
620
621#ifdef CONFIG_CPA_DEBUG 613#ifdef CONFIG_CPA_DEBUG
622 printk("Testing CPA: undo %lx-%lx\n", start, end); 614 printk("Testing CPA: undo %lx-%lx\n", start, end);
623 set_memory_rw(start, (end-start) >> PAGE_SHIFT); 615 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
624 global_flush_tlb();
625 616
626 printk("Testing CPA: again\n"); 617 printk("Testing CPA: again\n");
627 set_memory_ro(start, (end-start) >> PAGE_SHIFT); 618 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
628 global_flush_tlb();
629#endif 619#endif
630} 620}
631#endif 621#endif
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index b86f66fa5185..6a9a1418bc98 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -96,8 +96,6 @@ static int ioremap_change_attr(unsigned long paddr, unsigned long size,
96 err = set_memory_wb(vaddr, nrpages); 96 err = set_memory_wb(vaddr, nrpages);
97 break; 97 break;
98 } 98 }
99 if (!err)
100 global_flush_tlb();
101 99
102 return err; 100 return err;
103} 101}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e4d2b6930e61..a2d747c06147 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -23,6 +23,36 @@ within(unsigned long addr, unsigned long start, unsigned long end)
23} 23}
24 24
25/* 25/*
26 * Flushing functions
27 */
28void clflush_cache_range(void *addr, int size)
29{
30 int i;
31
32 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
33 clflush(addr+i);
34}
35
36static void flush_kernel_map(void *arg)
37{
38 /*
39 * Flush all to work around Errata in early athlons regarding
40 * large page flushing.
41 */
42 __flush_tlb_all();
43
44 if (boot_cpu_data.x86_model >= 4)
45 wbinvd();
46}
47
48static void global_flush_tlb(void)
49{
50 BUG_ON(irqs_disabled());
51
52 on_each_cpu(flush_kernel_map, NULL, 1, 1);
53}
54
55/*
26 * Certain areas of memory on x86 require very specific protection flags, 56 * Certain areas of memory on x86 require very specific protection flags,
27 * for example the BIOS area or kernel text. Callers don't always get this 57 * for example the BIOS area or kernel text. Callers don't always get this
28 * right (again, ioremap() on BIOS memory is not uncommon) so this function 58 * right (again, ioremap() on BIOS memory is not uncommon) so this function
@@ -328,149 +358,124 @@ static int change_page_attr_clear(unsigned long addr, int numpages,
328 358
329int set_memory_uc(unsigned long addr, int numpages) 359int set_memory_uc(unsigned long addr, int numpages)
330{ 360{
331 pgprot_t uncached; 361 int err;
332 362
333 pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; 363 err = change_page_attr_set(addr, numpages,
334 return change_page_attr_set(addr, numpages, uncached); 364 __pgprot(_PAGE_PCD | _PAGE_PWT));
365 global_flush_tlb();
366 return err;
335} 367}
336EXPORT_SYMBOL(set_memory_uc); 368EXPORT_SYMBOL(set_memory_uc);
337 369
338int set_memory_wb(unsigned long addr, int numpages) 370int set_memory_wb(unsigned long addr, int numpages)
339{ 371{
340 pgprot_t uncached; 372 int err;
341 373
342 pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; 374 err = change_page_attr_clear(addr, numpages,
343 return change_page_attr_clear(addr, numpages, uncached); 375 __pgprot(_PAGE_PCD | _PAGE_PWT));
376 global_flush_tlb();
377 return err;
344} 378}
345EXPORT_SYMBOL(set_memory_wb); 379EXPORT_SYMBOL(set_memory_wb);
346 380
347int set_memory_x(unsigned long addr, int numpages) 381int set_memory_x(unsigned long addr, int numpages)
348{ 382{
349 pgprot_t nx; 383 int err;
350 384
351 pgprot_val(nx) = _PAGE_NX; 385 err = change_page_attr_clear(addr, numpages,
352 return change_page_attr_clear(addr, numpages, nx); 386 __pgprot(_PAGE_NX));
387 global_flush_tlb();
388 return err;
353} 389}
354EXPORT_SYMBOL(set_memory_x); 390EXPORT_SYMBOL(set_memory_x);
355 391
356int set_memory_nx(unsigned long addr, int numpages) 392int set_memory_nx(unsigned long addr, int numpages)
357{ 393{
358 pgprot_t nx; 394 int err;
359 395
360 pgprot_val(nx) = _PAGE_NX; 396 err = change_page_attr_set(addr, numpages,
361 return change_page_attr_set(addr, numpages, nx); 397 __pgprot(_PAGE_NX));
398 global_flush_tlb();
399 return err;
362} 400}
363EXPORT_SYMBOL(set_memory_nx); 401EXPORT_SYMBOL(set_memory_nx);
364 402
365int set_memory_ro(unsigned long addr, int numpages) 403int set_memory_ro(unsigned long addr, int numpages)
366{ 404{
367 pgprot_t rw; 405 int err;
368 406
369 pgprot_val(rw) = _PAGE_RW; 407 err = change_page_attr_clear(addr, numpages,
370 return change_page_attr_clear(addr, numpages, rw); 408 __pgprot(_PAGE_RW));
409 global_flush_tlb();
410 return err;
371} 411}
372 412
373int set_memory_rw(unsigned long addr, int numpages) 413int set_memory_rw(unsigned long addr, int numpages)
374{ 414{
375 pgprot_t rw; 415 int err;
376 416
377 pgprot_val(rw) = _PAGE_RW; 417 err = change_page_attr_set(addr, numpages,
378 return change_page_attr_set(addr, numpages, rw); 418 __pgprot(_PAGE_RW));
419 global_flush_tlb();
420 return err;
379} 421}
380 422
381int set_memory_np(unsigned long addr, int numpages) 423int set_memory_np(unsigned long addr, int numpages)
382{ 424{
383 pgprot_t present; 425 int err;
384 426
385 pgprot_val(present) = _PAGE_PRESENT; 427 err = change_page_attr_clear(addr, numpages,
386 return change_page_attr_clear(addr, numpages, present); 428 __pgprot(_PAGE_PRESENT));
429 global_flush_tlb();
430 return err;
387} 431}
388 432
389int set_pages_uc(struct page *page, int numpages) 433int set_pages_uc(struct page *page, int numpages)
390{ 434{
391 unsigned long addr = (unsigned long)page_address(page); 435 unsigned long addr = (unsigned long)page_address(page);
392 pgprot_t uncached;
393 436
394 pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; 437 return set_memory_uc(addr, numpages);
395 return change_page_attr_set(addr, numpages, uncached);
396} 438}
397EXPORT_SYMBOL(set_pages_uc); 439EXPORT_SYMBOL(set_pages_uc);
398 440
399int set_pages_wb(struct page *page, int numpages) 441int set_pages_wb(struct page *page, int numpages)
400{ 442{
401 unsigned long addr = (unsigned long)page_address(page); 443 unsigned long addr = (unsigned long)page_address(page);
402 pgprot_t uncached;
403 444
404 pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT; 445 return set_memory_wb(addr, numpages);
405 return change_page_attr_clear(addr, numpages, uncached);
406} 446}
407EXPORT_SYMBOL(set_pages_wb); 447EXPORT_SYMBOL(set_pages_wb);
408 448
409int set_pages_x(struct page *page, int numpages) 449int set_pages_x(struct page *page, int numpages)
410{ 450{
411 unsigned long addr = (unsigned long)page_address(page); 451 unsigned long addr = (unsigned long)page_address(page);
412 pgprot_t nx;
413 452
414 pgprot_val(nx) = _PAGE_NX; 453 return set_memory_x(addr, numpages);
415 return change_page_attr_clear(addr, numpages, nx);
416} 454}
417EXPORT_SYMBOL(set_pages_x); 455EXPORT_SYMBOL(set_pages_x);
418 456
419int set_pages_nx(struct page *page, int numpages) 457int set_pages_nx(struct page *page, int numpages)
420{ 458{
421 unsigned long addr = (unsigned long)page_address(page); 459 unsigned long addr = (unsigned long)page_address(page);
422 pgprot_t nx;
423 460
424 pgprot_val(nx) = _PAGE_NX; 461 return set_memory_nx(addr, numpages);
425 return change_page_attr_set(addr, numpages, nx);
426} 462}
427EXPORT_SYMBOL(set_pages_nx); 463EXPORT_SYMBOL(set_pages_nx);
428 464
429int set_pages_ro(struct page *page, int numpages) 465int set_pages_ro(struct page *page, int numpages)
430{ 466{
431 unsigned long addr = (unsigned long)page_address(page); 467 unsigned long addr = (unsigned long)page_address(page);
432 pgprot_t rw;
433 468
434 pgprot_val(rw) = _PAGE_RW; 469 return set_memory_ro(addr, numpages);
435 return change_page_attr_clear(addr, numpages, rw);
436} 470}
437 471
438int set_pages_rw(struct page *page, int numpages) 472int set_pages_rw(struct page *page, int numpages)
439{ 473{
440 unsigned long addr = (unsigned long)page_address(page); 474 unsigned long addr = (unsigned long)page_address(page);
441 pgprot_t rw;
442
443 pgprot_val(rw) = _PAGE_RW;
444 return change_page_attr_set(addr, numpages, rw);
445}
446
447void clflush_cache_range(void *addr, int size)
448{
449 int i;
450
451 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
452 clflush(addr+i);
453}
454 475
455static void flush_kernel_map(void *arg) 476 return set_memory_rw(addr, numpages);
456{
457 /*
458 * Flush all to work around Errata in early athlons regarding
459 * large page flushing.
460 */
461 __flush_tlb_all();
462
463 if (boot_cpu_data.x86_model >= 4)
464 wbinvd();
465} 477}
466 478
467void global_flush_tlb(void)
468{
469 BUG_ON(irqs_disabled());
470
471 on_each_cpu(flush_kernel_map, NULL, 1, 1);
472}
473EXPORT_SYMBOL(global_flush_tlb);
474 479
475#ifdef CONFIG_DEBUG_PAGEALLOC 480#ifdef CONFIG_DEBUG_PAGEALLOC
476 481
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index aa5ddb716ffb..1ffb381130c3 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -145,7 +145,6 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
145 void *addr = agp_generic_alloc_page(agp_bridge); 145 void *addr = agp_generic_alloc_page(agp_bridge);
146 u32 temp; 146 u32 temp;
147 147
148 global_flush_tlb();
149 if (!addr) 148 if (!addr)
150 return NULL; 149 return NULL;
151 150
@@ -162,7 +161,6 @@ static void ali_destroy_page(void * addr, int flags)
162 if (flags & AGP_PAGE_DESTROY_UNMAP) { 161 if (flags & AGP_PAGE_DESTROY_UNMAP) {
163 global_cache_flush(); /* is this really needed? --hch */ 162 global_cache_flush(); /* is this really needed? --hch */
164 agp_generic_destroy_page(addr, flags); 163 agp_generic_destroy_page(addr, flags);
165 global_flush_tlb();
166 } else 164 } else
167 agp_generic_destroy_page(addr, flags); 165 agp_generic_destroy_page(addr, flags);
168 } 166 }
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index e72a83e2bad5..76f581c85a7d 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -527,7 +527,6 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
527 527
528 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { 528 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
529 page = agp_generic_alloc_page(agp_bridge); 529 page = agp_generic_alloc_page(agp_bridge);
530 global_flush_tlb();
531 } else 530 } else
532 /* Returning NULL would cause problems */ 531 /* Returning NULL would cause problems */
533 /* AK: really dubious code. */ 532 /* AK: really dubious code. */
@@ -539,7 +538,6 @@ static void i460_destroy_page (void *page, int flags)
539{ 538{
540 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { 539 if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
541 agp_generic_destroy_page(page, flags); 540 agp_generic_destroy_page(page, flags);
542 global_flush_tlb();
543 } 541 }
544} 542}
545 543
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index c03a7143928f..189efb6ef970 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -212,11 +212,9 @@ static void *i8xx_alloc_pages(void)
212 212
213 if (set_pages_uc(page, 4) < 0) { 213 if (set_pages_uc(page, 4) < 0) {
214 set_pages_wb(page, 4); 214 set_pages_wb(page, 4);
215 global_flush_tlb();
216 __free_pages(page, 2); 215 __free_pages(page, 2);
217 return NULL; 216 return NULL;
218 } 217 }
219 global_flush_tlb();
220 get_page(page); 218 get_page(page);
221 atomic_inc(&agp_bridge->current_memory_agp); 219 atomic_inc(&agp_bridge->current_memory_agp);
222 return page_address(page); 220 return page_address(page);
@@ -231,7 +229,6 @@ static void i8xx_destroy_pages(void *addr)
231 229
232 page = virt_to_page(addr); 230 page = virt_to_page(addr);
233 set_pages_wb(page, 4); 231 set_pages_wb(page, 4);
234 global_flush_tlb();
235 put_page(page); 232 put_page(page);
236 __free_pages(page, 2); 233 __free_pages(page, 2);
237 atomic_dec(&agp_bridge->current_memory_agp); 234 atomic_dec(&agp_bridge->current_memory_agp);
@@ -341,7 +338,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
341 338
342 switch (pg_count) { 339 switch (pg_count) {
343 case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge); 340 case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
344 global_flush_tlb();
345 break; 341 break;
346 case 4: 342 case 4:
347 /* kludge to get 4 physical pages for ARGB cursor */ 343 /* kludge to get 4 physical pages for ARGB cursor */
@@ -404,7 +400,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
404 else { 400 else {
405 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), 401 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
406 AGP_PAGE_DESTROY_UNMAP); 402 AGP_PAGE_DESTROY_UNMAP);
407 global_flush_tlb();
408 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), 403 agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]),
409 AGP_PAGE_DESTROY_FREE); 404 AGP_PAGE_DESTROY_FREE);
410 } 405 }
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index fb72778dee48..1c656667b937 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -124,13 +124,8 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
124 /* 124 /*
125 * Change caching policy of the linear kernel map to avoid 125 * Change caching policy of the linear kernel map to avoid
126 * mapping type conflicts with user-space mappings. 126 * mapping type conflicts with user-space mappings.
127 * The first global_flush_tlb() is really only there to do a global
128 * wbinvd().
129 */ 127 */
130
131 global_flush_tlb();
132 set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT); 128 set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT);
133 global_flush_tlb();
134 129
135 printk(KERN_DEBUG MODULE_NAME 130 printk(KERN_DEBUG MODULE_NAME
136 ": Allocated %ld bytes vram area at 0x%08lx\n", 131 ": Allocated %ld bytes vram area at 0x%08lx\n",
@@ -156,7 +151,6 @@ static void vmlfb_free_vram_area(struct vram_area *va)
156 151
157 set_pages_wb(virt_to_page(va->logical), 152 set_pages_wb(virt_to_page(va->logical),
158 va->size >> PAGE_SHIFT); 153 va->size >> PAGE_SHIFT);
159 global_flush_tlb();
160 154
161 /* 155 /*
162 * Decrease the usage count on the pages we've used 156 * Decrease the usage count on the pages we've used
diff --git a/include/asm-x86/agp.h b/include/asm-x86/agp.h
index f6df72561832..0c309b9a5217 100644
--- a/include/asm-x86/agp.h
+++ b/include/asm-x86/agp.h
@@ -12,13 +12,9 @@
12 * page. This avoids data corruption on some CPUs. 12 * page. This avoids data corruption on some CPUs.
13 */ 13 */
14 14
15/*
16 * Caller's responsibility to call global_flush_tlb() for performance
17 * reasons
18 */
19#define map_page_into_agp(page) set_pages_uc(page, 1) 15#define map_page_into_agp(page) set_pages_uc(page, 1)
20#define unmap_page_from_agp(page) set_pages_wb(page, 1) 16#define unmap_page_from_agp(page) set_pages_wb(page, 1)
21#define flush_agp_mappings() global_flush_tlb() 17#define flush_agp_mappings() do { } while (0)
22 18
23/* 19/*
24 * Could use CLFLUSH here if the cpu supports it. But then it would 20 * Could use CLFLUSH here if the cpu supports it. But then it would
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index d15ff359d3e3..157da0206ccc 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -24,7 +24,6 @@
24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 24#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
25 memcpy(dst, src, len) 25 memcpy(dst, src, len)
26 26
27void global_flush_tlb(void);
28int __deprecated_for_modules change_page_attr(struct page *page, int numpages, 27int __deprecated_for_modules change_page_attr(struct page *page, int numpages,
29 pgprot_t prot); 28 pgprot_t prot);
30 29
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index e5650905296e..4bb97646a67a 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -715,7 +715,6 @@ static void fill_nocache(void *buf, int size, int nocache)
715 set_pages_uc(virt_to_page(buf), size); 715 set_pages_uc(virt_to_page(buf), size);
716 else 716 else
717 set_pages_wb(virt_to_page(buf), size); 717 set_pages_wb(virt_to_page(buf), size);
718 global_flush_tlb();
719} 718}
720#else 719#else
721#define fill_nocache(buf, size, nocache) do { ; } while (0) 720#define fill_nocache(buf, size, nocache) do { ; } while (0)