aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 20:10:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 20:10:04 -0500
commitf94181da7192f4ed8ccb1b633ea4ce56954df130 (patch)
tree2e28785f2df447573a11fbdd611dc19eb3fcb794
parent932adbed6d99cc373fc3433d701b3a594fea872c (diff)
parentfdbc0450df12cc9cb397f3497db4b0cad7c1a8ff (diff)
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: rcu: fix rcutorture bug rcu: eliminate synchronize_rcu_xxx macro rcu: make treercu safe for suspend and resume rcu: fix rcutree grace-period-latency bug on small systems futex: catch certain assymetric (get|put)_futex_key calls futex: make futex_(get|put)_key() calls symmetric locking, percpu counters: introduce separate lock classes swiotlb: clean up EXPORT_SYMBOL usage swiotlb: remove unnecessary declaration swiotlb: replace architecture-specific swiotlb.h with linux/swiotlb.h swiotlb: add support for systems with highmem swiotlb: store phys address in io_tlb_orig_addr array swiotlb: add hwdev to swiotlb_phys_to_bus() / swiotlb_sg_to_bus()
-rw-r--r--arch/ia64/include/asm/swiotlb.h39
-rw-r--r--arch/x86/include/asm/swiotlb.h38
-rw-r--r--arch/x86/kernel/pci-swiotlb_64.c2
-rw-r--r--include/linux/percpu_counter.h14
-rw-r--r--include/linux/rcupdate.h12
-rw-r--r--include/linux/swiotlb.h3
-rw-r--r--kernel/futex.c72
-rw-r--r--kernel/rcupdate.c11
-rw-r--r--kernel/rcupreempt.c11
-rw-r--r--kernel/rcutorture.c18
-rw-r--r--kernel/rcutree.c13
-rw-r--r--lib/percpu_counter.c18
-rw-r--r--lib/proportions.c6
-rw-r--r--lib/swiotlb.c237
-rw-r--r--mm/backing-dev.c2
15 files changed, 195 insertions, 301 deletions
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
index fb79423834d0..dcbaea7ce128 100644
--- a/arch/ia64/include/asm/swiotlb.h
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -2,44 +2,7 @@
2#define ASM_IA64__SWIOTLB_H 2#define ASM_IA64__SWIOTLB_H
3 3
4#include <linux/dma-mapping.h> 4#include <linux/dma-mapping.h>
5 5#include <linux/swiotlb.h>
6/* SWIOTLB interface */
7
8extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
9 size_t size, int dir);
10extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flags);
12extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
13 size_t size, int dir);
14extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t dev_addr,
16 size_t size, int dir);
17extern void swiotlb_sync_single_for_device(struct device *hwdev,
18 dma_addr_t dev_addr,
19 size_t size, int dir);
20extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
21 dma_addr_t dev_addr,
22 unsigned long offset,
23 size_t size, int dir);
24extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
25 dma_addr_t dev_addr,
26 unsigned long offset,
27 size_t size, int dir);
28extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
29 struct scatterlist *sg, int nelems,
30 int dir);
31extern void swiotlb_sync_sg_for_device(struct device *hwdev,
32 struct scatterlist *sg, int nelems,
33 int dir);
34extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction);
36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
37 int nents, int direction);
38extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
39extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle);
41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
42extern void swiotlb_init(void);
43 6
44extern int swiotlb_force; 7extern int swiotlb_force;
45 8
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
index 51fb2c76ad74..b9e4e20174fb 100644
--- a/arch/x86/include/asm/swiotlb.h
+++ b/arch/x86/include/asm/swiotlb.h
@@ -1,46 +1,10 @@
1#ifndef _ASM_X86_SWIOTLB_H 1#ifndef _ASM_X86_SWIOTLB_H
2#define _ASM_X86_SWIOTLB_H 2#define _ASM_X86_SWIOTLB_H
3 3
4#include <asm/dma-mapping.h> 4#include <linux/swiotlb.h>
5 5
6/* SWIOTLB interface */ 6/* SWIOTLB interface */
7 7
8extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr,
9 size_t size, int dir);
10extern void *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
11 dma_addr_t *dma_handle, gfp_t flags);
12extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
13 size_t size, int dir);
14extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t dev_addr,
16 size_t size, int dir);
17extern void swiotlb_sync_single_for_device(struct device *hwdev,
18 dma_addr_t dev_addr,
19 size_t size, int dir);
20extern void swiotlb_sync_single_range_for_cpu(struct device *hwdev,
21 dma_addr_t dev_addr,
22 unsigned long offset,
23 size_t size, int dir);
24extern void swiotlb_sync_single_range_for_device(struct device *hwdev,
25 dma_addr_t dev_addr,
26 unsigned long offset,
27 size_t size, int dir);
28extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
29 struct scatterlist *sg, int nelems,
30 int dir);
31extern void swiotlb_sync_sg_for_device(struct device *hwdev,
32 struct scatterlist *sg, int nelems,
33 int dir);
34extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
35 int nents, int direction);
36extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
37 int nents, int direction);
38extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
39extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
40 void *vaddr, dma_addr_t dma_handle);
41extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
42extern void swiotlb_init(void);
43
44extern int swiotlb_force; 8extern int swiotlb_force;
45 9
46#ifdef CONFIG_SWIOTLB 10#ifdef CONFIG_SWIOTLB
diff --git a/arch/x86/kernel/pci-swiotlb_64.c b/arch/x86/kernel/pci-swiotlb_64.c
index 8cba3749a511..d59c91747665 100644
--- a/arch/x86/kernel/pci-swiotlb_64.c
+++ b/arch/x86/kernel/pci-swiotlb_64.c
@@ -23,7 +23,7 @@ void *swiotlb_alloc(unsigned order, unsigned long nslabs)
23 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); 23 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
24} 24}
25 25
26dma_addr_t swiotlb_phys_to_bus(phys_addr_t paddr) 26dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
27{ 27{
28 return paddr; 28 return paddr;
29} 29}
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 99de7a31bab8..a7684a513994 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -26,8 +26,16 @@ struct percpu_counter {
26 26
27extern int percpu_counter_batch; 27extern int percpu_counter_batch;
28 28
29int percpu_counter_init(struct percpu_counter *fbc, s64 amount); 29int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
30int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount); 30 struct lock_class_key *key);
31
32#define percpu_counter_init(fbc, value) \
33 ({ \
34 static struct lock_class_key __key; \
35 \
36 __percpu_counter_init(fbc, value, &__key); \
37 })
38
31void percpu_counter_destroy(struct percpu_counter *fbc); 39void percpu_counter_destroy(struct percpu_counter *fbc);
32void percpu_counter_set(struct percpu_counter *fbc, s64 amount); 40void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
33void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); 41void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
@@ -81,8 +89,6 @@ static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
81 return 0; 89 return 0;
82} 90}
83 91
84#define percpu_counter_init_irq percpu_counter_init
85
86static inline void percpu_counter_destroy(struct percpu_counter *fbc) 92static inline void percpu_counter_destroy(struct percpu_counter *fbc)
87{ 93{
88} 94}
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1168fbcea8d4..921340a7b71c 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -204,18 +204,6 @@ struct rcu_synchronize {
204 204
205extern void wakeme_after_rcu(struct rcu_head *head); 205extern void wakeme_after_rcu(struct rcu_head *head);
206 206
207#define synchronize_rcu_xxx(name, func) \
208void name(void) \
209{ \
210 struct rcu_synchronize rcu; \
211 \
212 init_completion(&rcu.completion); \
213 /* Will wake me after RCU finished. */ \
214 func(&rcu.head, wakeme_after_rcu); \
215 /* Wait for it. */ \
216 wait_for_completion(&rcu.completion); \
217}
218
219/** 207/**
220 * synchronize_sched - block until all CPUs have exited any non-preemptive 208 * synchronize_sched - block until all CPUs have exited any non-preemptive
221 * kernel code sequences. 209 * kernel code sequences.
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 325af1de0351..dedd3c0cfe30 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -27,7 +27,8 @@ swiotlb_init(void);
27extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs); 27extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
28extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); 28extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
29 29
30extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address); 30extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev,
31 phys_addr_t address);
31extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); 32extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
32 33
33extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); 34extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size);
diff --git a/kernel/futex.c b/kernel/futex.c
index 7c6cbabe52b3..002aa189eb09 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -170,8 +170,11 @@ static void get_futex_key_refs(union futex_key *key)
170 */ 170 */
171static void drop_futex_key_refs(union futex_key *key) 171static void drop_futex_key_refs(union futex_key *key)
172{ 172{
173 if (!key->both.ptr) 173 if (!key->both.ptr) {
174 /* If we're here then we tried to put a key we failed to get */
175 WARN_ON_ONCE(1);
174 return; 176 return;
177 }
175 178
176 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { 179 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
177 case FUT_OFF_INODE: 180 case FUT_OFF_INODE:
@@ -730,8 +733,8 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
730 } 733 }
731 734
732 spin_unlock(&hb->lock); 735 spin_unlock(&hb->lock);
733out:
734 put_futex_key(fshared, &key); 736 put_futex_key(fshared, &key);
737out:
735 return ret; 738 return ret;
736} 739}
737 740
@@ -755,7 +758,7 @@ retryfull:
755 goto out; 758 goto out;
756 ret = get_futex_key(uaddr2, fshared, &key2); 759 ret = get_futex_key(uaddr2, fshared, &key2);
757 if (unlikely(ret != 0)) 760 if (unlikely(ret != 0))
758 goto out; 761 goto out_put_key1;
759 762
760 hb1 = hash_futex(&key1); 763 hb1 = hash_futex(&key1);
761 hb2 = hash_futex(&key2); 764 hb2 = hash_futex(&key2);
@@ -777,12 +780,12 @@ retry:
777 * but we might get them from range checking 780 * but we might get them from range checking
778 */ 781 */
779 ret = op_ret; 782 ret = op_ret;
780 goto out; 783 goto out_put_keys;
781#endif 784#endif
782 785
783 if (unlikely(op_ret != -EFAULT)) { 786 if (unlikely(op_ret != -EFAULT)) {
784 ret = op_ret; 787 ret = op_ret;
785 goto out; 788 goto out_put_keys;
786 } 789 }
787 790
788 /* 791 /*
@@ -796,7 +799,7 @@ retry:
796 ret = futex_handle_fault((unsigned long)uaddr2, 799 ret = futex_handle_fault((unsigned long)uaddr2,
797 attempt); 800 attempt);
798 if (ret) 801 if (ret)
799 goto out; 802 goto out_put_keys;
800 goto retry; 803 goto retry;
801 } 804 }
802 805
@@ -834,10 +837,11 @@ retry:
834 spin_unlock(&hb1->lock); 837 spin_unlock(&hb1->lock);
835 if (hb1 != hb2) 838 if (hb1 != hb2)
836 spin_unlock(&hb2->lock); 839 spin_unlock(&hb2->lock);
837out: 840out_put_keys:
838 put_futex_key(fshared, &key2); 841 put_futex_key(fshared, &key2);
842out_put_key1:
839 put_futex_key(fshared, &key1); 843 put_futex_key(fshared, &key1);
840 844out:
841 return ret; 845 return ret;
842} 846}
843 847
@@ -854,13 +858,13 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
854 struct futex_q *this, *next; 858 struct futex_q *this, *next;
855 int ret, drop_count = 0; 859 int ret, drop_count = 0;
856 860
857 retry: 861retry:
858 ret = get_futex_key(uaddr1, fshared, &key1); 862 ret = get_futex_key(uaddr1, fshared, &key1);
859 if (unlikely(ret != 0)) 863 if (unlikely(ret != 0))
860 goto out; 864 goto out;
861 ret = get_futex_key(uaddr2, fshared, &key2); 865 ret = get_futex_key(uaddr2, fshared, &key2);
862 if (unlikely(ret != 0)) 866 if (unlikely(ret != 0))
863 goto out; 867 goto out_put_key1;
864 868
865 hb1 = hash_futex(&key1); 869 hb1 = hash_futex(&key1);
866 hb2 = hash_futex(&key2); 870 hb2 = hash_futex(&key2);
@@ -882,7 +886,7 @@ static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
882 if (!ret) 886 if (!ret)
883 goto retry; 887 goto retry;
884 888
885 return ret; 889 goto out_put_keys;
886 } 890 }
887 if (curval != *cmpval) { 891 if (curval != *cmpval) {
888 ret = -EAGAIN; 892 ret = -EAGAIN;
@@ -927,9 +931,11 @@ out_unlock:
927 while (--drop_count >= 0) 931 while (--drop_count >= 0)
928 drop_futex_key_refs(&key1); 932 drop_futex_key_refs(&key1);
929 933
930out: 934out_put_keys:
931 put_futex_key(fshared, &key2); 935 put_futex_key(fshared, &key2);
936out_put_key1:
932 put_futex_key(fshared, &key1); 937 put_futex_key(fshared, &key1);
938out:
933 return ret; 939 return ret;
934} 940}
935 941
@@ -990,7 +996,7 @@ static int unqueue_me(struct futex_q *q)
990 int ret = 0; 996 int ret = 0;
991 997
992 /* In the common case we don't take the spinlock, which is nice. */ 998 /* In the common case we don't take the spinlock, which is nice. */
993 retry: 999retry:
994 lock_ptr = q->lock_ptr; 1000 lock_ptr = q->lock_ptr;
995 barrier(); 1001 barrier();
996 if (lock_ptr != NULL) { 1002 if (lock_ptr != NULL) {
@@ -1172,11 +1178,11 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1172 1178
1173 q.pi_state = NULL; 1179 q.pi_state = NULL;
1174 q.bitset = bitset; 1180 q.bitset = bitset;
1175 retry: 1181retry:
1176 q.key = FUTEX_KEY_INIT; 1182 q.key = FUTEX_KEY_INIT;
1177 ret = get_futex_key(uaddr, fshared, &q.key); 1183 ret = get_futex_key(uaddr, fshared, &q.key);
1178 if (unlikely(ret != 0)) 1184 if (unlikely(ret != 0))
1179 goto out_release_sem; 1185 goto out;
1180 1186
1181 hb = queue_lock(&q); 1187 hb = queue_lock(&q);
1182 1188
@@ -1204,6 +1210,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1204 1210
1205 if (unlikely(ret)) { 1211 if (unlikely(ret)) {
1206 queue_unlock(&q, hb); 1212 queue_unlock(&q, hb);
1213 put_futex_key(fshared, &q.key);
1207 1214
1208 ret = get_user(uval, uaddr); 1215 ret = get_user(uval, uaddr);
1209 1216
@@ -1213,7 +1220,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1213 } 1220 }
1214 ret = -EWOULDBLOCK; 1221 ret = -EWOULDBLOCK;
1215 if (uval != val) 1222 if (uval != val)
1216 goto out_unlock_release_sem; 1223 goto out_unlock_put_key;
1217 1224
1218 /* Only actually queue if *uaddr contained val. */ 1225 /* Only actually queue if *uaddr contained val. */
1219 queue_me(&q, hb); 1226 queue_me(&q, hb);
@@ -1305,11 +1312,11 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1305 return -ERESTART_RESTARTBLOCK; 1312 return -ERESTART_RESTARTBLOCK;
1306 } 1313 }
1307 1314
1308 out_unlock_release_sem: 1315out_unlock_put_key:
1309 queue_unlock(&q, hb); 1316 queue_unlock(&q, hb);
1310
1311 out_release_sem:
1312 put_futex_key(fshared, &q.key); 1317 put_futex_key(fshared, &q.key);
1318
1319out:
1313 return ret; 1320 return ret;
1314} 1321}
1315 1322
@@ -1358,16 +1365,16 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1358 } 1365 }
1359 1366
1360 q.pi_state = NULL; 1367 q.pi_state = NULL;
1361 retry: 1368retry:
1362 q.key = FUTEX_KEY_INIT; 1369 q.key = FUTEX_KEY_INIT;
1363 ret = get_futex_key(uaddr, fshared, &q.key); 1370 ret = get_futex_key(uaddr, fshared, &q.key);
1364 if (unlikely(ret != 0)) 1371 if (unlikely(ret != 0))
1365 goto out_release_sem; 1372 goto out;
1366 1373
1367 retry_unlocked: 1374retry_unlocked:
1368 hb = queue_lock(&q); 1375 hb = queue_lock(&q);
1369 1376
1370 retry_locked: 1377retry_locked:
1371 ret = lock_taken = 0; 1378 ret = lock_taken = 0;
1372 1379
1373 /* 1380 /*
@@ -1388,14 +1395,14 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1388 */ 1395 */
1389 if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) { 1396 if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
1390 ret = -EDEADLK; 1397 ret = -EDEADLK;
1391 goto out_unlock_release_sem; 1398 goto out_unlock_put_key;
1392 } 1399 }
1393 1400
1394 /* 1401 /*
1395 * Surprise - we got the lock. Just return to userspace: 1402 * Surprise - we got the lock. Just return to userspace:
1396 */ 1403 */
1397 if (unlikely(!curval)) 1404 if (unlikely(!curval))
1398 goto out_unlock_release_sem; 1405 goto out_unlock_put_key;
1399 1406
1400 uval = curval; 1407 uval = curval;
1401 1408
@@ -1431,7 +1438,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1431 * We took the lock due to owner died take over. 1438 * We took the lock due to owner died take over.
1432 */ 1439 */
1433 if (unlikely(lock_taken)) 1440 if (unlikely(lock_taken))
1434 goto out_unlock_release_sem; 1441 goto out_unlock_put_key;
1435 1442
1436 /* 1443 /*
1437 * We dont have the lock. Look up the PI state (or create it if 1444 * We dont have the lock. Look up the PI state (or create it if
@@ -1470,7 +1477,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1470 goto retry_locked; 1477 goto retry_locked;
1471 } 1478 }
1472 default: 1479 default:
1473 goto out_unlock_release_sem; 1480 goto out_unlock_put_key;
1474 } 1481 }
1475 } 1482 }
1476 1483
@@ -1561,16 +1568,17 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1561 destroy_hrtimer_on_stack(&to->timer); 1568 destroy_hrtimer_on_stack(&to->timer);
1562 return ret != -EINTR ? ret : -ERESTARTNOINTR; 1569 return ret != -EINTR ? ret : -ERESTARTNOINTR;
1563 1570
1564 out_unlock_release_sem: 1571out_unlock_put_key:
1565 queue_unlock(&q, hb); 1572 queue_unlock(&q, hb);
1566 1573
1567 out_release_sem: 1574out_put_key:
1568 put_futex_key(fshared, &q.key); 1575 put_futex_key(fshared, &q.key);
1576out:
1569 if (to) 1577 if (to)
1570 destroy_hrtimer_on_stack(&to->timer); 1578 destroy_hrtimer_on_stack(&to->timer);
1571 return ret; 1579 return ret;
1572 1580
1573 uaddr_faulted: 1581uaddr_faulted:
1574 /* 1582 /*
1575 * We have to r/w *(int __user *)uaddr, and we have to modify it 1583 * We have to r/w *(int __user *)uaddr, and we have to modify it
1576 * atomically. Therefore, if we continue to fault after get_user() 1584 * atomically. Therefore, if we continue to fault after get_user()
@@ -1583,7 +1591,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1583 if (attempt++) { 1591 if (attempt++) {
1584 ret = futex_handle_fault((unsigned long)uaddr, attempt); 1592 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1585 if (ret) 1593 if (ret)
1586 goto out_release_sem; 1594 goto out_put_key;
1587 goto retry_unlocked; 1595 goto retry_unlocked;
1588 } 1596 }
1589 1597
@@ -1675,9 +1683,9 @@ retry_unlocked:
1675 1683
1676out_unlock: 1684out_unlock:
1677 spin_unlock(&hb->lock); 1685 spin_unlock(&hb->lock);
1678out:
1679 put_futex_key(fshared, &key); 1686 put_futex_key(fshared, &key);
1680 1687
1688out:
1681 return ret; 1689 return ret;
1682 1690
1683pi_faulted: 1691pi_faulted:
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index ad63af8b2521..d92a76a881aa 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -77,8 +77,15 @@ void wakeme_after_rcu(struct rcu_head *head)
77 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 77 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
78 * and may be nested. 78 * and may be nested.
79 */ 79 */
80void synchronize_rcu(void); /* Makes kernel-doc tools happy */ 80void synchronize_rcu(void)
81synchronize_rcu_xxx(synchronize_rcu, call_rcu) 81{
82 struct rcu_synchronize rcu;
83 init_completion(&rcu.completion);
84 /* Will wake me after RCU finished. */
85 call_rcu(&rcu.head, wakeme_after_rcu);
86 /* Wait for it. */
87 wait_for_completion(&rcu.completion);
88}
82EXPORT_SYMBOL_GPL(synchronize_rcu); 89EXPORT_SYMBOL_GPL(synchronize_rcu);
83 90
84static void rcu_barrier_callback(struct rcu_head *notused) 91static void rcu_barrier_callback(struct rcu_head *notused)
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index f9dc8f3720f6..33cfc50781f9 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -1177,7 +1177,16 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
1177 * in -rt this does -not- necessarily result in all currently executing 1177 * in -rt this does -not- necessarily result in all currently executing
1178 * interrupt -handlers- having completed. 1178 * interrupt -handlers- having completed.
1179 */ 1179 */
1180synchronize_rcu_xxx(__synchronize_sched, call_rcu_sched) 1180void __synchronize_sched(void)
1181{
1182 struct rcu_synchronize rcu;
1183
1184 init_completion(&rcu.completion);
1185 /* Will wake me after RCU finished. */
1186 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1187 /* Wait for it. */
1188 wait_for_completion(&rcu.completion);
1189}
1181EXPORT_SYMBOL_GPL(__synchronize_sched); 1190EXPORT_SYMBOL_GPL(__synchronize_sched);
1182 1191
1183/* 1192/*
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 3245b40952c6..1cff28db56b6 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -136,7 +136,7 @@ static int stutter_pause_test = 0;
136#endif 136#endif
137int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; 137int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
138 138
139#define FULLSTOP_SIGNALED 1 /* Bail due to signal. */ 139#define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */
140#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */ 140#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */
141static int fullstop; /* stop generating callbacks at test end. */ 141static int fullstop; /* stop generating callbacks at test end. */
142DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */ 142DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */
@@ -151,12 +151,10 @@ rcutorture_shutdown_notify(struct notifier_block *unused1,
151{ 151{
152 if (fullstop) 152 if (fullstop)
153 return NOTIFY_DONE; 153 return NOTIFY_DONE;
154 if (signal_pending(current)) { 154 mutex_lock(&fullstop_mutex);
155 mutex_lock(&fullstop_mutex); 155 if (!fullstop)
156 if (!ACCESS_ONCE(fullstop)) 156 fullstop = FULLSTOP_SHUTDOWN;
157 fullstop = FULLSTOP_SIGNALED; 157 mutex_unlock(&fullstop_mutex);
158 mutex_unlock(&fullstop_mutex);
159 }
160 return NOTIFY_DONE; 158 return NOTIFY_DONE;
161} 159}
162 160
@@ -624,7 +622,7 @@ rcu_torture_writer(void *arg)
624 rcu_stutter_wait(); 622 rcu_stutter_wait();
625 } while (!kthread_should_stop() && !fullstop); 623 } while (!kthread_should_stop() && !fullstop);
626 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); 624 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
627 while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) 625 while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
628 schedule_timeout_uninterruptible(1); 626 schedule_timeout_uninterruptible(1);
629 return 0; 627 return 0;
630} 628}
@@ -649,7 +647,7 @@ rcu_torture_fakewriter(void *arg)
649 } while (!kthread_should_stop() && !fullstop); 647 } while (!kthread_should_stop() && !fullstop);
650 648
651 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); 649 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
652 while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) 650 while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
653 schedule_timeout_uninterruptible(1); 651 schedule_timeout_uninterruptible(1);
654 return 0; 652 return 0;
655} 653}
@@ -759,7 +757,7 @@ rcu_torture_reader(void *arg)
759 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); 757 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
760 if (irqreader && cur_ops->irqcapable) 758 if (irqreader && cur_ops->irqcapable)
761 del_timer_sync(&t); 759 del_timer_sync(&t);
762 while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED) 760 while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
763 schedule_timeout_uninterruptible(1); 761 schedule_timeout_uninterruptible(1);
764 return 0; 762 return 0;
765} 763}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index a342b032112c..f2d8638e6c60 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -79,7 +79,10 @@ struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
79DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 79DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
80 80
81#ifdef CONFIG_NO_HZ 81#ifdef CONFIG_NO_HZ
82DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks); 82DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
83 .dynticks_nesting = 1,
84 .dynticks = 1,
85};
83#endif /* #ifdef CONFIG_NO_HZ */ 86#endif /* #ifdef CONFIG_NO_HZ */
84 87
85static int blimit = 10; /* Maximum callbacks per softirq. */ 88static int blimit = 10; /* Maximum callbacks per softirq. */
@@ -572,6 +575,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
572 /* Special-case the common single-level case. */ 575 /* Special-case the common single-level case. */
573 if (NUM_RCU_NODES == 1) { 576 if (NUM_RCU_NODES == 1) {
574 rnp->qsmask = rnp->qsmaskinit; 577 rnp->qsmask = rnp->qsmaskinit;
578 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
575 spin_unlock_irqrestore(&rnp->lock, flags); 579 spin_unlock_irqrestore(&rnp->lock, flags);
576 return; 580 return;
577 } 581 }
@@ -1379,13 +1383,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
1379 1383
1380static void __cpuinit rcu_online_cpu(int cpu) 1384static void __cpuinit rcu_online_cpu(int cpu)
1381{ 1385{
1382#ifdef CONFIG_NO_HZ
1383 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1384
1385 rdtp->dynticks_nesting = 1;
1386 rdtp->dynticks |= 1; /* need consecutive #s even for hotplug. */
1387 rdtp->dynticks_nmi = (rdtp->dynticks_nmi + 1) & ~0x1;
1388#endif /* #ifdef CONFIG_NO_HZ */
1389 rcu_init_percpu_data(cpu, &rcu_state); 1386 rcu_init_percpu_data(cpu, &rcu_state);
1390 rcu_init_percpu_data(cpu, &rcu_bh_state); 1387 rcu_init_percpu_data(cpu, &rcu_bh_state);
1391 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1388 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index a60bd8046095..aeaa6d734447 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -66,11 +66,11 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
66} 66}
67EXPORT_SYMBOL(__percpu_counter_sum); 67EXPORT_SYMBOL(__percpu_counter_sum);
68 68
69static struct lock_class_key percpu_counter_irqsafe; 69int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
70 70 struct lock_class_key *key)
71int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
72{ 71{
73 spin_lock_init(&fbc->lock); 72 spin_lock_init(&fbc->lock);
73 lockdep_set_class(&fbc->lock, key);
74 fbc->count = amount; 74 fbc->count = amount;
75 fbc->counters = alloc_percpu(s32); 75 fbc->counters = alloc_percpu(s32);
76 if (!fbc->counters) 76 if (!fbc->counters)
@@ -82,17 +82,7 @@ int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
82#endif 82#endif
83 return 0; 83 return 0;
84} 84}
85EXPORT_SYMBOL(percpu_counter_init); 85EXPORT_SYMBOL(__percpu_counter_init);
86
87int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount)
88{
89 int err;
90
91 err = percpu_counter_init(fbc, amount);
92 if (!err)
93 lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe);
94 return err;
95}
96 86
97void percpu_counter_destroy(struct percpu_counter *fbc) 87void percpu_counter_destroy(struct percpu_counter *fbc)
98{ 88{
diff --git a/lib/proportions.c b/lib/proportions.c
index 3fda810faf0d..d50746a79de2 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -83,11 +83,11 @@ int prop_descriptor_init(struct prop_descriptor *pd, int shift)
83 pd->index = 0; 83 pd->index = 0;
84 pd->pg[0].shift = shift; 84 pd->pg[0].shift = shift;
85 mutex_init(&pd->mutex); 85 mutex_init(&pd->mutex);
86 err = percpu_counter_init_irq(&pd->pg[0].events, 0); 86 err = percpu_counter_init(&pd->pg[0].events, 0);
87 if (err) 87 if (err)
88 goto out; 88 goto out;
89 89
90 err = percpu_counter_init_irq(&pd->pg[1].events, 0); 90 err = percpu_counter_init(&pd->pg[1].events, 0);
91 if (err) 91 if (err)
92 percpu_counter_destroy(&pd->pg[0].events); 92 percpu_counter_destroy(&pd->pg[0].events);
93 93
@@ -193,7 +193,7 @@ int prop_local_init_percpu(struct prop_local_percpu *pl)
193 spin_lock_init(&pl->lock); 193 spin_lock_init(&pl->lock);
194 pl->shift = 0; 194 pl->shift = 0;
195 pl->period = 0; 195 pl->period = 0;
196 return percpu_counter_init_irq(&pl->events, 0); 196 return percpu_counter_init(&pl->events, 0);
197} 197}
198 198
199void prop_local_destroy_percpu(struct prop_local_percpu *pl) 199void prop_local_destroy_percpu(struct prop_local_percpu *pl)
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c2a4e6401456..1f991acc2a05 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -14,6 +14,7 @@
14 * 04/07/.. ak Better overflow handling. Assorted fixes. 14 * 04/07/.. ak Better overflow handling. Assorted fixes.
15 * 05/09/10 linville Add support for syncing ranges, support syncing for 15 * 05/09/10 linville Add support for syncing ranges, support syncing for
16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17 * 08/12/11 beckyb Add highmem support
17 */ 18 */
18 19
19#include <linux/cache.h> 20#include <linux/cache.h>
@@ -21,8 +22,9 @@
21#include <linux/mm.h> 22#include <linux/mm.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/swiotlb.h>
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/swiotlb.h>
27#include <linux/pfn.h>
26#include <linux/types.h> 28#include <linux/types.h>
27#include <linux/ctype.h> 29#include <linux/ctype.h>
28#include <linux/highmem.h> 30#include <linux/highmem.h>
@@ -88,10 +90,7 @@ static unsigned int io_tlb_index;
88 * We need to save away the original address corresponding to a mapped entry 90 * We need to save away the original address corresponding to a mapped entry
89 * for the sync operations. 91 * for the sync operations.
90 */ 92 */
91static struct swiotlb_phys_addr { 93static phys_addr_t *io_tlb_orig_addr;
92 struct page *page;
93 unsigned int offset;
94} *io_tlb_orig_addr;
95 94
96/* 95/*
97 * Protect the above data structures in the map and unmap calls 96 * Protect the above data structures in the map and unmap calls
@@ -125,7 +124,7 @@ void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
125 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); 124 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
126} 125}
127 126
128dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr) 127dma_addr_t __weak swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
129{ 128{
130 return paddr; 129 return paddr;
131} 130}
@@ -135,9 +134,10 @@ phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
135 return baddr; 134 return baddr;
136} 135}
137 136
138static dma_addr_t swiotlb_virt_to_bus(volatile void *address) 137static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
138 volatile void *address)
139{ 139{
140 return swiotlb_phys_to_bus(virt_to_phys(address)); 140 return swiotlb_phys_to_bus(hwdev, virt_to_phys(address));
141} 141}
142 142
143static void *swiotlb_bus_to_virt(dma_addr_t address) 143static void *swiotlb_bus_to_virt(dma_addr_t address)
@@ -150,35 +150,18 @@ int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
150 return 0; 150 return 0;
151} 151}
152 152
153static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg)
154{
155 return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset);
156}
157
158static void swiotlb_print_info(unsigned long bytes) 153static void swiotlb_print_info(unsigned long bytes)
159{ 154{
160 phys_addr_t pstart, pend; 155 phys_addr_t pstart, pend;
161 dma_addr_t bstart, bend;
162 156
163 pstart = virt_to_phys(io_tlb_start); 157 pstart = virt_to_phys(io_tlb_start);
164 pend = virt_to_phys(io_tlb_end); 158 pend = virt_to_phys(io_tlb_end);
165 159
166 bstart = swiotlb_phys_to_bus(pstart);
167 bend = swiotlb_phys_to_bus(pend);
168
169 printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", 160 printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
170 bytes >> 20, io_tlb_start, io_tlb_end); 161 bytes >> 20, io_tlb_start, io_tlb_end);
171 if (pstart != bstart || pend != bend) 162 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
172 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx" 163 (unsigned long long)pstart,
173 " bus %#llx - %#llx\n", 164 (unsigned long long)pend);
174 (unsigned long long)pstart,
175 (unsigned long long)pend,
176 (unsigned long long)bstart,
177 (unsigned long long)bend);
178 else
179 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
180 (unsigned long long)pstart,
181 (unsigned long long)pend);
182} 165}
183 166
184/* 167/*
@@ -214,7 +197,7 @@ swiotlb_init_with_default_size(size_t default_size)
214 for (i = 0; i < io_tlb_nslabs; i++) 197 for (i = 0; i < io_tlb_nslabs; i++)
215 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 198 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
216 io_tlb_index = 0; 199 io_tlb_index = 0;
217 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)); 200 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
218 201
219 /* 202 /*
220 * Get the overflow emergency buffer 203 * Get the overflow emergency buffer
@@ -288,12 +271,14 @@ swiotlb_late_init_with_default_size(size_t default_size)
288 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 271 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
289 io_tlb_index = 0; 272 io_tlb_index = 0;
290 273
291 io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL, 274 io_tlb_orig_addr = (phys_addr_t *)
292 get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr))); 275 __get_free_pages(GFP_KERNEL,
276 get_order(io_tlb_nslabs *
277 sizeof(phys_addr_t)));
293 if (!io_tlb_orig_addr) 278 if (!io_tlb_orig_addr)
294 goto cleanup3; 279 goto cleanup3;
295 280
296 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)); 281 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
297 282
298 /* 283 /*
299 * Get the overflow emergency buffer 284 * Get the overflow emergency buffer
@@ -308,8 +293,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
308 return 0; 293 return 0;
309 294
310cleanup4: 295cleanup4:
311 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * 296 free_pages((unsigned long)io_tlb_orig_addr,
312 sizeof(char *))); 297 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
313 io_tlb_orig_addr = NULL; 298 io_tlb_orig_addr = NULL;
314cleanup3: 299cleanup3:
315 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 300 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
@@ -340,51 +325,44 @@ static int is_swiotlb_buffer(char *addr)
340 return addr >= io_tlb_start && addr < io_tlb_end; 325 return addr >= io_tlb_start && addr < io_tlb_end;
341} 326}
342 327
343static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr) 328/*
344{ 329 * Bounce: copy the swiotlb buffer back to the original dma location
345 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 330 */
346 struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index]; 331static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
347 buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1); 332 enum dma_data_direction dir)
348 buffer.page += buffer.offset >> PAGE_SHIFT; 333{
349 buffer.offset &= PAGE_SIZE - 1; 334 unsigned long pfn = PFN_DOWN(phys);
350 return buffer; 335
351} 336 if (PageHighMem(pfn_to_page(pfn))) {
352 337 /* The buffer does not have a mapping. Map it in and copy */
353static void 338 unsigned int offset = phys & ~PAGE_MASK;
354__sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir) 339 char *buffer;
355{ 340 unsigned int sz = 0;
356 if (PageHighMem(buffer.page)) { 341 unsigned long flags;
357 size_t len, bytes; 342
358 char *dev, *host, *kmp; 343 while (size) {
359 344 sz = min(PAGE_SIZE - offset, size);
360 len = size; 345
361 while (len != 0) { 346 local_irq_save(flags);
362 unsigned long flags; 347 buffer = kmap_atomic(pfn_to_page(pfn),
363 348 KM_BOUNCE_READ);
364 bytes = len; 349 if (dir == DMA_TO_DEVICE)
365 if ((bytes + buffer.offset) > PAGE_SIZE) 350 memcpy(dma_addr, buffer + offset, sz);
366 bytes = PAGE_SIZE - buffer.offset;
367 local_irq_save(flags); /* protects KM_BOUNCE_READ */
368 kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
369 dev = dma_addr + size - len;
370 host = kmp + buffer.offset;
371 if (dir == DMA_FROM_DEVICE)
372 memcpy(host, dev, bytes);
373 else 351 else
374 memcpy(dev, host, bytes); 352 memcpy(buffer + offset, dma_addr, sz);
375 kunmap_atomic(kmp, KM_BOUNCE_READ); 353 kunmap_atomic(buffer, KM_BOUNCE_READ);
376 local_irq_restore(flags); 354 local_irq_restore(flags);
377 len -= bytes; 355
378 buffer.page++; 356 size -= sz;
379 buffer.offset = 0; 357 pfn++;
358 dma_addr += sz;
359 offset = 0;
380 } 360 }
381 } else { 361 } else {
382 void *v = page_address(buffer.page) + buffer.offset;
383
384 if (dir == DMA_TO_DEVICE) 362 if (dir == DMA_TO_DEVICE)
385 memcpy(dma_addr, v, size); 363 memcpy(dma_addr, phys_to_virt(phys), size);
386 else 364 else
387 memcpy(v, dma_addr, size); 365 memcpy(phys_to_virt(phys), dma_addr, size);
388 } 366 }
389} 367}
390 368
@@ -392,7 +370,7 @@ __sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int
392 * Allocates bounce buffer and returns its kernel virtual address. 370 * Allocates bounce buffer and returns its kernel virtual address.
393 */ 371 */
394static void * 372static void *
395map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir) 373map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
396{ 374{
397 unsigned long flags; 375 unsigned long flags;
398 char *dma_addr; 376 char *dma_addr;
@@ -402,10 +380,9 @@ map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, i
402 unsigned long mask; 380 unsigned long mask;
403 unsigned long offset_slots; 381 unsigned long offset_slots;
404 unsigned long max_slots; 382 unsigned long max_slots;
405 struct swiotlb_phys_addr slot_buf;
406 383
407 mask = dma_get_seg_boundary(hwdev); 384 mask = dma_get_seg_boundary(hwdev);
408 start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask; 385 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
409 386
410 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 387 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
411 388
@@ -487,15 +464,10 @@ found:
487 * This is needed when we sync the memory. Then we sync the buffer if 464 * This is needed when we sync the memory. Then we sync the buffer if
488 * needed. 465 * needed.
489 */ 466 */
490 slot_buf = buffer; 467 for (i = 0; i < nslots; i++)
491 for (i = 0; i < nslots; i++) { 468 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
492 slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
493 slot_buf.offset &= PAGE_SIZE - 1;
494 io_tlb_orig_addr[index+i] = slot_buf;
495 slot_buf.offset += 1 << IO_TLB_SHIFT;
496 }
497 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 469 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
498 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); 470 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
499 471
500 return dma_addr; 472 return dma_addr;
501} 473}
@@ -509,17 +481,13 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
509 unsigned long flags; 481 unsigned long flags;
510 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 482 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
511 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 483 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
512 struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr); 484 phys_addr_t phys = io_tlb_orig_addr[index];
513 485
514 /* 486 /*
515 * First, sync the memory before unmapping the entry 487 * First, sync the memory before unmapping the entry
516 */ 488 */
517 if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)) 489 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
518 /* 490 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
519 * bounce... copy the data back into the original buffer * and
520 * delete the bounce buffer.
521 */
522 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
523 491
524 /* 492 /*
525 * Return the buffer to the free list by setting the corresponding 493 * Return the buffer to the free list by setting the corresponding
@@ -551,18 +519,21 @@ static void
551sync_single(struct device *hwdev, char *dma_addr, size_t size, 519sync_single(struct device *hwdev, char *dma_addr, size_t size,
552 int dir, int target) 520 int dir, int target)
553{ 521{
554 struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr); 522 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
523 phys_addr_t phys = io_tlb_orig_addr[index];
524
525 phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
555 526
556 switch (target) { 527 switch (target) {
557 case SYNC_FOR_CPU: 528 case SYNC_FOR_CPU:
558 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 529 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
559 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE); 530 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
560 else 531 else
561 BUG_ON(dir != DMA_TO_DEVICE); 532 BUG_ON(dir != DMA_TO_DEVICE);
562 break; 533 break;
563 case SYNC_FOR_DEVICE: 534 case SYNC_FOR_DEVICE:
564 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 535 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
565 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE); 536 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
566 else 537 else
567 BUG_ON(dir != DMA_FROM_DEVICE); 538 BUG_ON(dir != DMA_FROM_DEVICE);
568 break; 539 break;
@@ -584,7 +555,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
584 dma_mask = hwdev->coherent_dma_mask; 555 dma_mask = hwdev->coherent_dma_mask;
585 556
586 ret = (void *)__get_free_pages(flags, order); 557 ret = (void *)__get_free_pages(flags, order);
587 if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) { 558 if (ret &&
559 !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(hwdev, ret),
560 size)) {
588 /* 561 /*
589 * The allocated memory isn't reachable by the device. 562 * The allocated memory isn't reachable by the device.
590 * Fall back on swiotlb_map_single(). 563 * Fall back on swiotlb_map_single().
@@ -599,16 +572,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
599 * swiotlb_map_single(), which will grab memory from 572 * swiotlb_map_single(), which will grab memory from
600 * the lowest available address range. 573 * the lowest available address range.
601 */ 574 */
602 struct swiotlb_phys_addr buffer; 575 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
603 buffer.page = virt_to_page(NULL);
604 buffer.offset = 0;
605 ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE);
606 if (!ret) 576 if (!ret)
607 return NULL; 577 return NULL;
608 } 578 }
609 579
610 memset(ret, 0, size); 580 memset(ret, 0, size);
611 dev_addr = swiotlb_virt_to_bus(ret); 581 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
612 582
613 /* Confirm address can be DMA'd by device */ 583 /* Confirm address can be DMA'd by device */
614 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { 584 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
@@ -623,6 +593,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
623 *dma_handle = dev_addr; 593 *dma_handle = dev_addr;
624 return ret; 594 return ret;
625} 595}
596EXPORT_SYMBOL(swiotlb_alloc_coherent);
626 597
627void 598void
628swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 599swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -635,6 +606,7 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
635 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 606 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
636 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 607 unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
637} 608}
609EXPORT_SYMBOL(swiotlb_free_coherent);
638 610
639static void 611static void
640swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 612swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -668,9 +640,8 @@ dma_addr_t
668swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, 640swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
669 int dir, struct dma_attrs *attrs) 641 int dir, struct dma_attrs *attrs)
670{ 642{
671 dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr); 643 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr);
672 void *map; 644 void *map;
673 struct swiotlb_phys_addr buffer;
674 645
675 BUG_ON(dir == DMA_NONE); 646 BUG_ON(dir == DMA_NONE);
676 /* 647 /*
@@ -685,15 +656,13 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
685 /* 656 /*
686 * Oh well, have to allocate and map a bounce buffer. 657 * Oh well, have to allocate and map a bounce buffer.
687 */ 658 */
688 buffer.page = virt_to_page(ptr); 659 map = map_single(hwdev, virt_to_phys(ptr), size, dir);
689 buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
690 map = map_single(hwdev, buffer, size, dir);
691 if (!map) { 660 if (!map) {
692 swiotlb_full(hwdev, size, dir, 1); 661 swiotlb_full(hwdev, size, dir, 1);
693 map = io_tlb_overflow_buffer; 662 map = io_tlb_overflow_buffer;
694 } 663 }
695 664
696 dev_addr = swiotlb_virt_to_bus(map); 665 dev_addr = swiotlb_virt_to_bus(hwdev, map);
697 666
698 /* 667 /*
699 * Ensure that the address returned is DMA'ble 668 * Ensure that the address returned is DMA'ble
@@ -710,6 +679,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
710{ 679{
711 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); 680 return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
712} 681}
682EXPORT_SYMBOL(swiotlb_map_single);
713 683
714/* 684/*
715 * Unmap a single streaming mode DMA translation. The dma_addr and size must 685 * Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -739,6 +709,8 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
739{ 709{
740 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); 710 return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
741} 711}
712EXPORT_SYMBOL(swiotlb_unmap_single);
713
742/* 714/*
743 * Make physical memory consistent for a single streaming mode DMA translation 715 * Make physical memory consistent for a single streaming mode DMA translation
744 * after a transfer. 716 * after a transfer.
@@ -768,6 +740,7 @@ swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
768{ 740{
769 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 741 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
770} 742}
743EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
771 744
772void 745void
773swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 746swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -775,6 +748,7 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
775{ 748{
776 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); 749 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
777} 750}
751EXPORT_SYMBOL(swiotlb_sync_single_for_device);
778 752
779/* 753/*
780 * Same as above, but for a sub-range of the mapping. 754 * Same as above, but for a sub-range of the mapping.
@@ -800,6 +774,7 @@ swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
800 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 774 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
801 SYNC_FOR_CPU); 775 SYNC_FOR_CPU);
802} 776}
777EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
803 778
804void 779void
805swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, 780swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -808,9 +783,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
808 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, 783 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
809 SYNC_FOR_DEVICE); 784 SYNC_FOR_DEVICE);
810} 785}
786EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
811 787
812void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
813 struct dma_attrs *);
814/* 788/*
815 * Map a set of buffers described by scatterlist in streaming mode for DMA. 789 * Map a set of buffers described by scatterlist in streaming mode for DMA.
816 * This is the scatter-gather version of the above swiotlb_map_single 790 * This is the scatter-gather version of the above swiotlb_map_single
@@ -832,20 +806,18 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
832 int dir, struct dma_attrs *attrs) 806 int dir, struct dma_attrs *attrs)
833{ 807{
834 struct scatterlist *sg; 808 struct scatterlist *sg;
835 struct swiotlb_phys_addr buffer;
836 dma_addr_t dev_addr;
837 int i; 809 int i;
838 810
839 BUG_ON(dir == DMA_NONE); 811 BUG_ON(dir == DMA_NONE);
840 812
841 for_each_sg(sgl, sg, nelems, i) { 813 for_each_sg(sgl, sg, nelems, i) {
842 dev_addr = swiotlb_sg_to_bus(sg); 814 void *addr = sg_virt(sg);
843 if (range_needs_mapping(sg_virt(sg), sg->length) || 815 dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr);
816
817 if (range_needs_mapping(addr, sg->length) ||
844 address_needs_mapping(hwdev, dev_addr, sg->length)) { 818 address_needs_mapping(hwdev, dev_addr, sg->length)) {
845 void *map; 819 void *map = map_single(hwdev, sg_phys(sg),
846 buffer.page = sg_page(sg); 820 sg->length, dir);
847 buffer.offset = sg->offset;
848 map = map_single(hwdev, buffer, sg->length, dir);
849 if (!map) { 821 if (!map) {
850 /* Don't panic here, we expect map_sg users 822 /* Don't panic here, we expect map_sg users
851 to do proper error handling. */ 823 to do proper error handling. */
@@ -855,7 +827,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
855 sgl[0].dma_length = 0; 827 sgl[0].dma_length = 0;
856 return 0; 828 return 0;
857 } 829 }
858 sg->dma_address = swiotlb_virt_to_bus(map); 830 sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
859 } else 831 } else
860 sg->dma_address = dev_addr; 832 sg->dma_address = dev_addr;
861 sg->dma_length = sg->length; 833 sg->dma_length = sg->length;
@@ -870,6 +842,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
870{ 842{
871 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 843 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
872} 844}
845EXPORT_SYMBOL(swiotlb_map_sg);
873 846
874/* 847/*
875 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 848 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
@@ -885,11 +858,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
885 BUG_ON(dir == DMA_NONE); 858 BUG_ON(dir == DMA_NONE);
886 859
887 for_each_sg(sgl, sg, nelems, i) { 860 for_each_sg(sgl, sg, nelems, i) {
888 if (sg->dma_address != swiotlb_sg_to_bus(sg)) 861 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg)))
889 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 862 unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
890 sg->dma_length, dir); 863 sg->dma_length, dir);
891 else if (dir == DMA_FROM_DEVICE) 864 else if (dir == DMA_FROM_DEVICE)
892 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); 865 dma_mark_clean(sg_virt(sg), sg->dma_length);
893 } 866 }
894} 867}
895EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 868EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -900,6 +873,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
900{ 873{
901 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 874 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
902} 875}
876EXPORT_SYMBOL(swiotlb_unmap_sg);
903 877
904/* 878/*
905 * Make physical memory consistent for a set of streaming mode DMA translations 879 * Make physical memory consistent for a set of streaming mode DMA translations
@@ -918,11 +892,11 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
918 BUG_ON(dir == DMA_NONE); 892 BUG_ON(dir == DMA_NONE);
919 893
920 for_each_sg(sgl, sg, nelems, i) { 894 for_each_sg(sgl, sg, nelems, i) {
921 if (sg->dma_address != swiotlb_sg_to_bus(sg)) 895 if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg)))
922 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), 896 sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
923 sg->dma_length, dir, target); 897 sg->dma_length, dir, target);
924 else if (dir == DMA_FROM_DEVICE) 898 else if (dir == DMA_FROM_DEVICE)
925 dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); 899 dma_mark_clean(sg_virt(sg), sg->dma_length);
926 } 900 }
927} 901}
928 902
@@ -932,6 +906,7 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
932{ 906{
933 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); 907 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
934} 908}
909EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
935 910
936void 911void
937swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 912swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
@@ -939,12 +914,14 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
939{ 914{
940 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 915 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
941} 916}
917EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
942 918
943int 919int
944swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 920swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
945{ 921{
946 return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer)); 922 return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
947} 923}
924EXPORT_SYMBOL(swiotlb_dma_mapping_error);
948 925
949/* 926/*
950 * Return whether the given device DMA address mask can be supported 927 * Return whether the given device DMA address mask can be supported
@@ -955,20 +932,6 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
955int 932int
956swiotlb_dma_supported(struct device *hwdev, u64 mask) 933swiotlb_dma_supported(struct device *hwdev, u64 mask)
957{ 934{
958 return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask; 935 return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
959} 936}
960
961EXPORT_SYMBOL(swiotlb_map_single);
962EXPORT_SYMBOL(swiotlb_unmap_single);
963EXPORT_SYMBOL(swiotlb_map_sg);
964EXPORT_SYMBOL(swiotlb_unmap_sg);
965EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
966EXPORT_SYMBOL(swiotlb_sync_single_for_device);
967EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
968EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
969EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
970EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
971EXPORT_SYMBOL(swiotlb_dma_mapping_error);
972EXPORT_SYMBOL(swiotlb_alloc_coherent);
973EXPORT_SYMBOL(swiotlb_free_coherent);
974EXPORT_SYMBOL(swiotlb_dma_supported); 937EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 6f80beddd8a4..8e8587444132 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -223,7 +223,7 @@ int bdi_init(struct backing_dev_info *bdi)
223 bdi->max_prop_frac = PROP_FRAC_BASE; 223 bdi->max_prop_frac = PROP_FRAC_BASE;
224 224
225 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 225 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
226 err = percpu_counter_init_irq(&bdi->bdi_stat[i], 0); 226 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
227 if (err) 227 if (err)
228 goto err; 228 goto err;
229 } 229 }